From 174e0a9f77c2b8da10140b3381070089e4b8a08f Mon Sep 17 00:00:00 2001 From: Mehdi Dogguy <mehdi@debian.org> Date: Mon, 8 Sep 2014 21:31:18 +0200 Subject: [PATCH] Imported Upstream version 1.3.3 --- AUTHORS | 5 +- BUILD.NOTES | 9 +- COPYING | 34 +- DISCLAIMER | 105 +- META | 8 +- Makefile.am | 1 + Makefile.in | 47 +- NEWS | 353 +- README | 71 +- RELEASE_NOTES | 350 +- aclocal.m4 | 255 +- auxdir/Makefile.am | 3 +- auxdir/Makefile.in | 18 +- auxdir/config.guess | 6 +- auxdir/config.sub | 10 +- auxdir/depcomp | 33 +- auxdir/install-sh | 228 +- auxdir/ltmain.sh | 58 +- auxdir/slurm.m4 | 20 +- auxdir/x_ac_aix.m4 | 2 +- auxdir/x_ac_bluegene.m4 | 8 +- auxdir/x_ac_databases.m4 | 111 + auxdir/x_ac_gtk.m4 | 4 +- auxdir/x_ac_slurm_ssl.m4 | 61 +- config.h.in | 18 + configure | 1312 +++- configure.ac | 55 +- contribs/Makefile.am | 2 +- contribs/Makefile.in | 32 +- contribs/env_cache_builder.c | 95 +- contribs/perlapi/Makefile.in | 17 +- contribs/perlapi/libslurm-perl/Slurm.xs | 10 +- contribs/perlapi/libslurm-perl/alloc.c | 10 +- contribs/perlapi/libslurm-perl/conf.c | 49 +- contribs/perlapi/libslurm-perl/job.c | 3 +- contribs/perlapi/libslurm-perl/launch.c | 4 +- contribs/perlapi/libslurm-perl/partition.c | 4 +- contribs/perlapi/libslurm-perl/trigger.c | 2 +- contribs/phpext/Makefile.am | 48 + contribs/phpext/Makefile.in | 445 ++ contribs/phpext/README | 21 + contribs/phpext/slurm_php/config.m4.in | 59 + contribs/phpext/slurm_php/slurm_php.c | 100 + .../phpext/slurm_php/slurm_php.h | 46 +- contribs/time_login.c | 2 +- contribs/torque/Makefile.in | 17 +- contribs/torque/mpiexec.pl | 2 +- contribs/torque/pbsnodes.pl | 2 +- contribs/torque/qdel.pl | 2 +- contribs/torque/qhold.pl | 2 +- contribs/torque/qrls.pl | 2 +- contribs/torque/qstat.pl | 2 +- contribs/torque/qsub.pl | 2 +- doc/Makefile.in | 30 +- doc/html/Makefile.am | 8 +- doc/html/Makefile.in | 25 +- doc/html/accounting.shtml | 478 ++ doc/html/arch.gif | Bin 28772 -> 39628 bytes doc/html/big_sys.shtml | 87 +- doc/html/bluegene.shtml | 22 +- doc/html/checkpoint_plugins.shtml | 12 +- doc/html/configurator.html.in | 334 +- doc/html/cons_res.shtml | 1 + doc/html/cons_res_share.shtml | 222 + doc/html/crypto_plugins.shtml | 151 + doc/html/documentation.shtml | 35 +- doc/html/download.shtml | 68 +- doc/html/faq.shtml | 268 +- doc/html/footer.txt | 8 +- doc/html/gang_scheduling.shtml | 472 ++ doc/html/header.txt | 2 +- doc/html/jobacct_gatherplugins.shtml | 262 + doc/html/jobacct_storageplugins.shtml | 204 + doc/html/jobacctplugins.shtml | 359 -- doc/html/jobcompplugins.shtml | 57 +- doc/html/maui.shtml | 13 +- doc/html/moab.shtml | 72 +- doc/html/news.shtml | 29 +- doc/html/overview.shtml | 70 +- doc/html/power_save.shtml | 28 +- doc/html/preempt.shtml | 247 + doc/html/programmer_guide.shtml | 22 +- doc/html/publications.shtml | 17 +- doc/html/quickstart.shtml | 61 +- doc/html/quickstart_admin.shtml | 169 +- doc/html/review_release.html | 78 +- doc/html/schedplugins.shtml | 29 +- doc/html/selectplugins.shtml | 35 +- doc/html/slurm.shtml | 31 +- doc/html/slurm_moab.pdf | Bin 0 -> 759031 bytes doc/html/slurm_v1.3.pdf | Bin 0 -> 845840 bytes doc/html/taskplugins.shtml | 24 +- doc/html/team.shtml | 13 +- doc/man/Makefile.am | 10 +- doc/man/Makefile.in | 27 +- doc/man/man1/sacctmgr.1 | 363 ++ doc/man/man1/salloc.1 | 229 +- doc/man/man1/sattach.1 | 2 +- doc/man/man1/sbatch.1 | 262 +- doc/man/man1/sbcast.1 | 2 +- doc/man/man1/scancel.1 | 13 +- doc/man/man1/scontrol.1 | 89 +- doc/man/man1/sinfo.1 | 65 +- doc/man/man1/slaunch.1 | 734 --- doc/man/man1/slurm.1 | 4 +- doc/man/man1/smap.1 | 36 +- doc/man/man1/squeue.1 | 12 +- doc/man/man1/sreport.1 | 111 + doc/man/man1/srun.1 | 385 +- doc/man/man1/sstat.1 | 262 + doc/man/man1/strigger.1 | 42 +- doc/man/man1/sview.1 | 2 +- doc/man/man3/slurm_allocate_resources.3 | 2 +- doc/man/man3/slurm_checkpoint_error.3 | 2 +- doc/man/man3/slurm_complete_job.3 | 2 +- doc/man/man3/slurm_free_ctl_conf.3 | 2 +- doc/man/man3/slurm_free_job_info_msg.3 | 2 +- .../slurm_free_job_step_info_response_msg.3 | 2 +- doc/man/man3/slurm_free_node_info.3 | 2 +- doc/man/man3/slurm_free_partition_info.3 | 2 +- doc/man/man3/slurm_get_errno.3 | 2 +- doc/man/man3/slurm_hostlist_create.3 | 2 +- doc/man/man3/slurm_job_step_create.3 | 2 +- doc/man/man3/slurm_kill_job.3 | 2 +- doc/man/man3/slurm_reconfigure.3 | 12 +- doc/man/man3/slurm_resume.3 | 2 +- doc/man/man3/slurm_step_ctx_create.3 | 2 +- doc/man/man3/slurm_step_launch.3 | 2 +- doc/man/man5/bluegene.conf.5 | 106 +- doc/man/man5/slurm.conf.5 | 764 ++- doc/man/man5/slurmdbd.conf.5 | 216 + doc/man/man5/wiki.conf.5 | 11 +- doc/man/man8/slurmctld.8 | 2 +- doc/man/man8/slurmd.8 | 2 +- doc/man/man8/slurmdbd.8 | 55 + doc/man/man8/slurmstepd.8 | 2 +- doc/man/man8/spank.8 | 37 +- etc/bluegene.conf.example | 3 +- etc/init.d.slurm | 6 +- etc/init.d.slurmdbd | 146 + etc/slurm.conf.example | 7 +- slurm.spec | 203 +- slurm/slurm.h.in | 542 +- slurm/slurm_errno.h | 9 +- slurm/spank.h | 2 +- src/Makefile.am | 7 +- src/Makefile.in | 37 +- src/api/Makefile.am | 6 + src/api/Makefile.in | 60 +- src/api/allocate.c | 51 +- src/{salloc/msg.c => api/allocate_msg.c} | 173 +- src/api/cancel.c | 4 +- src/api/checkpoint.c | 125 +- src/api/complete.c | 4 +- src/api/config_info.c | 220 +- src/api/init_msg.c | 21 +- src/api/job_info.c | 64 +- src/api/job_info.h | 4 +- src/api/job_step_info.c | 19 +- src/api/node_info.c | 16 +- src/api/node_select_info.c | 4 +- src/api/node_select_info.h | 4 +- src/api/partition_info.c | 43 +- src/api/pmi.c | 8 +- src/api/pmi_server.c | 6 +- src/api/pmi_server.h | 4 +- src/api/reconfigure.c | 42 +- src/api/signal.c | 38 +- src/api/slurm_pmi.c | 2 +- src/api/slurm_pmi.h | 2 +- src/api/step_ctx.c | 150 +- src/api/step_ctx.h | 6 +- src/api/step_io.c | 4 +- src/api/step_io.h | 4 +- src/api/step_launch.c | 459 +- src/api/step_launch.h | 11 +- src/api/submit.c | 4 +- src/api/suspend.c | 4 +- src/api/triggers.c | 2 +- src/api/update_config.c | 4 +- src/common/Makefile.am | 34 +- src/common/Makefile.in | 188 +- src/common/arg_desc.c | 2 +- src/common/arg_desc.h | 2 +- src/common/assoc_mgr.c | 675 ++ src/common/assoc_mgr.h | 123 + src/common/bitstring.c | 2 +- src/common/bitstring.h | 2 +- src/common/checkpoint.c | 26 +- src/common/checkpoint.h | 7 +- src/common/daemonize.c | 4 +- src/common/daemonize.h | 4 +- src/common/eio.c | 2 +- src/common/eio.h | 2 +- src/common/env.c | 92 +- src/common/env.h | 9 +- src/common/forward.c | 2 +- src/common/forward.h | 2 +- src/common/hostlist.c | 6 +- src/common/hostlist.h | 4 +- src/common/io_hdr.c | 11 +- src/common/io_hdr.h | 4 +- src/common/job_options.c | 28 +- src/common/job_options.h | 2 +- src/common/jobacct_common.c | 886 +++ src/common/jobacct_common.h | 267 + src/common/list.c | 74 +- src/common/list.h | 26 +- src/common/log.c | 4 +- src/common/log.h | 2 +- src/common/macros.h | 4 +- src/common/mpi.c | 2 +- src/common/mpi.h | 2 +- src/common/net.c | 2 +- src/common/net.h | 2 +- src/common/node_select.c | 192 +- src/common/node_select.h | 69 +- src/common/optz.c | 2 +- src/common/optz.h | 2 +- src/common/pack.c | 44 +- src/common/pack.h | 50 +- src/common/parse_config.c | 12 +- src/common/parse_config.h | 4 +- src/common/parse_spec.c | 4 +- src/common/parse_spec.h | 2 +- src/common/parse_time.c | 38 +- src/common/parse_time.h | 21 +- src/common/plugin.c | 10 +- src/common/plugin.h | 2 +- src/common/plugrack.c | 2 +- src/common/plugrack.h | 2 +- src/common/plugstack.c | 4 +- src/common/plugstack.h | 2 +- src/common/proc_args.c | 653 ++ src/common/proc_args.h | 106 + src/common/read_config.c | 438 +- src/common/read_config.h | 37 +- src/common/safeopen.c | 4 +- src/common/safeopen.h | 4 +- src/common/slurm_accounting_storage.c | 2029 ++++++ src/common/slurm_accounting_storage.h | 563 ++ src/common/slurm_auth.c | 61 +- src/common/slurm_auth.h | 15 +- src/common/slurm_cred.c | 560 +- src/common/slurm_cred.h | 14 +- src/common/slurm_errno.c | 41 +- src/common/slurm_jobacct.c | 692 --- src/common/slurm_jobacct_gather.c | 524 ++ ...slurm_jobacct.h => slurm_jobacct_gather.h} | 107 +- src/common/slurm_jobcomp.c | 62 +- src/common/slurm_jobcomp.h | 43 +- src/common/slurm_protocol_api.c | 820 ++- src/common/slurm_protocol_api.h | 144 +- src/common/slurm_protocol_common.h | 3 +- src/common/slurm_protocol_defs.c | 173 +- src/common/slurm_protocol_defs.h | 143 +- src/common/slurm_protocol_interface.h | 2 +- src/common/slurm_protocol_mongo_common.h | 2 +- src/common/slurm_protocol_pack.c | 1076 +++- src/common/slurm_protocol_pack.h | 2 +- src/common/slurm_protocol_socket_common.h | 2 +- .../slurm_protocol_socket_implementation.c | 13 +- src/common/slurm_protocol_util.c | 7 +- src/common/slurm_protocol_util.h | 2 +- src/common/slurm_resource_info.c | 103 +- src/common/slurm_resource_info.h | 4 +- src/common/slurm_selecttype_info.c | 2 +- src/common/slurm_selecttype_info.h | 2 +- src/common/slurm_step_layout.c | 22 +- src/common/slurm_step_layout.h | 6 +- src/common/slurm_xlator.h | 4 +- src/common/slurmdbd_defs.c | 2299 +++++++ src/common/slurmdbd_defs.h | 424 ++ src/common/stepd_api.c | 95 +- src/common/stepd_api.h | 15 +- src/common/switch.c | 2 +- src/common/switch.h | 2 +- src/common/timers.c | 2 +- src/common/timers.h | 2 +- src/common/uid.c | 4 +- src/common/uid.h | 4 +- src/common/unsetenv.c | 2 +- src/common/unsetenv.h | 2 +- src/common/xassert.c | 4 +- src/common/xassert.h | 4 +- src/common/xmalloc.c | 4 +- src/common/xmalloc.h | 4 +- src/common/xsignal.c | 2 +- src/common/xsignal.h | 4 +- src/common/xstring.c | 38 +- src/common/xstring.h | 7 +- src/database/Makefile.am | 39 + src/database/Makefile.in | 604 ++ .../jobacct/gold => database}/base64.c | 8 +- .../jobacct/gold => database}/base64.h | 0 .../gold => database}/gold_interface.c | 103 +- .../gold => database}/gold_interface.h | 34 +- src/database/mysql_common.c | 428 ++ src/database/mysql_common.h | 98 + src/database/pgsql_common.c | 390 ++ src/database/pgsql_common.h | 100 + src/plugins/Makefile.am | 3 +- src/plugins/Makefile.in | 32 +- src/plugins/accounting_storage/Makefile.am | 3 + src/plugins/accounting_storage/Makefile.in | 565 ++ .../accounting_storage/filetxt/Makefile.am | 14 + .../accounting_storage/filetxt/Makefile.in | 559 ++ .../filetxt/accounting_storage_filetxt.c | 830 +++ .../filetxt/filetxt_jobacct_process.c | 1450 +++++ .../filetxt/filetxt_jobacct_process.h | 55 + .../accounting_storage/gold/Makefile.am | 31 + .../accounting_storage/gold/Makefile.in | 575 ++ .../gold/accounting_storage_gold.c | 3259 ++++++++++ .../accounting_storage/mysql/Makefile.am | 21 + .../accounting_storage/mysql/Makefile.in | 593 ++ .../mysql/accounting_storage_mysql.c | 5468 +++++++++++++++++ .../mysql/mysql_jobacct_process.c | 463 ++ .../mysql/mysql_jobacct_process.h | 82 + .../accounting_storage/mysql/mysql_rollup.c | 762 +++ .../accounting_storage/mysql/mysql_rollup.h} | 59 +- .../accounting_storage/none/Makefile.am | 12 + .../accounting_storage/none/Makefile.in | 555 ++ .../none/accounting_storage_none.c | 349 ++ .../accounting_storage/pgsql/Makefile.am | 20 + .../accounting_storage/pgsql/Makefile.in | 583 ++ .../pgsql/accounting_storage_pgsql.c | 1566 +++++ .../pgsql/pgsql_jobacct_process.c | 491 ++ .../pgsql/pgsql_jobacct_process.h | 71 + .../accounting_storage/slurmdbd/Makefile.am | 16 + .../accounting_storage/slurmdbd/Makefile.in | 558 ++ .../slurmdbd/accounting_storage_slurmdbd.c | 1233 ++++ src/plugins/auth/Makefile.in | 30 +- src/plugins/auth/authd/Makefile.in | 40 +- src/plugins/auth/authd/auth_authd.c | 48 +- src/plugins/auth/munge/Makefile.in | 40 +- src/plugins/auth/munge/auth_munge.c | 69 +- src/plugins/auth/none/Makefile.in | 40 +- src/plugins/auth/none/auth_none.c | 49 +- src/plugins/checkpoint/Makefile.am | 2 +- src/plugins/checkpoint/Makefile.in | 32 +- src/plugins/checkpoint/aix/Makefile.in | 40 +- src/plugins/checkpoint/aix/checkpoint_aix.c | 17 +- src/plugins/checkpoint/none/Makefile.in | 40 +- src/plugins/checkpoint/none/checkpoint_none.c | 9 +- src/plugins/checkpoint/ompi/Makefile.in | 40 +- src/plugins/checkpoint/ompi/checkpoint_ompi.c | 13 +- src/plugins/checkpoint/xlch/Makefile.am | 24 + src/plugins/checkpoint/xlch/Makefile.in | 565 ++ src/plugins/checkpoint/xlch/checkpoint_xlch.c | 696 +++ src/plugins/crypto/Makefile.am | 3 + src/plugins/crypto/Makefile.in | 565 ++ src/plugins/crypto/munge/Makefile.am | 19 + src/plugins/crypto/munge/Makefile.in | 561 ++ src/plugins/crypto/munge/crypto_munge.c | 211 + src/plugins/crypto/openssl/Makefile.am | 25 + src/plugins/crypto/openssl/Makefile.in | 564 ++ src/plugins/crypto/openssl/crypto_openssl.c | 230 + src/plugins/jobacct/aix/Makefile.am | 19 - src/plugins/jobacct/common/common_slurmctld.c | 537 -- .../jobacct/common/common_slurmstepd.c | 170 - src/plugins/jobacct/common/jobacct_common.c | 461 -- src/plugins/jobacct/common/jobacct_common.h | 139 - src/plugins/jobacct/gold/Makefile.am | 18 - src/plugins/jobacct/gold/agent.c | 675 -- src/plugins/jobacct/gold/agent.h | 170 - src/plugins/jobacct/gold/jobacct_gold.c | 1133 ---- src/plugins/jobacct/linux/Makefile.am | 19 - src/plugins/jobacct/none/Makefile.am | 13 - .../{jobacct => jobacct_gather}/Makefile.am | 2 +- .../{jobacct => jobacct_gather}/Makefile.in | 38 +- src/plugins/jobacct_gather/aix/Makefile.am | 17 + .../aix/Makefile.in | 102 +- .../aix/jobacct_gather_aix.c} | 444 +- src/plugins/jobacct_gather/linux/Makefile.am | 17 + .../linux/Makefile.in | 104 +- .../linux/jobacct_gather_linux.c} | 570 +- src/plugins/jobacct_gather/none/Makefile.am | 13 + .../gold => jobacct_gather/none}/Makefile.in | 87 +- .../none/jobacct_gather_none.c} | 133 +- src/plugins/jobcomp/Makefile.am | 2 +- src/plugins/jobcomp/Makefile.in | 32 +- src/plugins/jobcomp/filetxt/Makefile.am | 4 +- src/plugins/jobcomp/filetxt/Makefile.in | 48 +- .../jobcomp/filetxt/filetxt_jobcomp_process.c | 300 + .../jobcomp/filetxt/filetxt_jobcomp_process.h | 55 + src/plugins/jobcomp/filetxt/jobcomp_filetxt.c | 234 +- src/plugins/jobcomp/mysql/Makefile.am | 19 + src/plugins/jobcomp/mysql/Makefile.in | 578 ++ src/plugins/jobcomp/mysql/jobcomp_mysql.c | 459 ++ .../jobcomp/mysql/mysql_jobcomp_process.c | 207 + .../jobcomp/mysql/mysql_jobcomp_process.h | 94 + src/plugins/jobcomp/none/Makefile.in | 40 +- src/plugins/jobcomp/none/jobcomp_none.c | 18 +- src/plugins/jobcomp/pgsql/Makefile.am | 19 + src/plugins/jobcomp/pgsql/Makefile.in | 578 ++ src/plugins/jobcomp/pgsql/jobcomp_pgsql.c | 482 ++ .../jobcomp/pgsql/pgsql_jobcomp_process.c | 223 + .../jobcomp/pgsql/pgsql_jobcomp_process.h | 92 + src/plugins/jobcomp/script/Makefile.in | 40 +- src/plugins/jobcomp/script/jobcomp_script.c | 39 +- src/plugins/jobcomp/slurmdbd/Makefile.am | 13 + .../none => jobcomp/slurmdbd}/Makefile.in | 74 +- .../jobcomp/slurmdbd/jobcomp_slurmdbd.c | 139 + src/plugins/mpi/Makefile.in | 30 +- src/plugins/mpi/lam/Makefile.in | 40 +- src/plugins/mpi/lam/lam.h | 2 +- src/plugins/mpi/lam/mpi_lam.c | 2 +- src/plugins/mpi/mpich1_p4/Makefile.in | 40 +- src/plugins/mpi/mpich1_p4/mpich1_p4.c | 2 +- src/plugins/mpi/mpich1_shmem/Makefile.in | 40 +- src/plugins/mpi/mpich1_shmem/mpich1_shmem.c | 2 +- src/plugins/mpi/mpichgm/Makefile.in | 40 +- src/plugins/mpi/mpichgm/mpi_mpichgm.c | 2 +- src/plugins/mpi/mpichgm/mpichgm.c | 2 +- src/plugins/mpi/mpichgm/mpichgm.h | 2 +- src/plugins/mpi/mpichmx/Makefile.in | 40 +- src/plugins/mpi/mpichmx/mpi_mpichmx.c | 2 +- src/plugins/mpi/mpichmx/mpichmx.c | 2 +- src/plugins/mpi/mpichmx/mpichmx.h | 2 +- src/plugins/mpi/mvapich/Makefile.in | 40 +- src/plugins/mpi/mvapich/mpi_mvapich.c | 2 +- src/plugins/mpi/mvapich/mvapich.c | 2 +- src/plugins/mpi/mvapich/mvapich.h | 2 +- src/plugins/mpi/none/Makefile.in | 40 +- src/plugins/mpi/none/mpi_none.c | 2 +- src/plugins/mpi/openmpi/Makefile.in | 40 +- src/plugins/mpi/openmpi/mpi_openmpi.c | 2 +- src/plugins/proctrack/Makefile.in | 30 +- src/plugins/proctrack/aix/Makefile.in | 40 +- src/plugins/proctrack/aix/proctrack_aix.c | 2 +- src/plugins/proctrack/linuxproc/Makefile.in | 40 +- src/plugins/proctrack/linuxproc/kill_tree.c | 2 +- src/plugins/proctrack/linuxproc/kill_tree.h | 2 +- .../proctrack/linuxproc/proctrack_linuxproc.c | 2 +- src/plugins/proctrack/pgid/Makefile.in | 40 +- src/plugins/proctrack/pgid/proctrack_pgid.c | 6 +- src/plugins/proctrack/rms/Makefile.in | 40 +- src/plugins/proctrack/rms/proctrack_rms.c | 2 +- src/plugins/proctrack/sgi_job/Makefile.in | 40 +- .../proctrack/sgi_job/proctrack_sgi_job.c | 2 +- src/plugins/sched/Makefile.in | 30 +- src/plugins/sched/backfill/Makefile.in | 40 +- src/plugins/sched/backfill/backfill.c | 828 +-- src/plugins/sched/backfill/backfill.h | 2 +- src/plugins/sched/backfill/backfill_wrapper.c | 35 +- src/plugins/sched/builtin/Makefile.in | 40 +- src/plugins/sched/builtin/builtin_wrapper.c | 35 +- src/plugins/sched/gang/Makefile.in | 40 +- src/plugins/sched/gang/gang.c | 1466 ++++- src/plugins/sched/gang/gang.h | 36 +- src/plugins/sched/gang/sched_gang.c | 82 +- src/plugins/sched/hold/Makefile.in | 40 +- src/plugins/sched/hold/hold_wrapper.c | 35 +- src/plugins/sched/wiki/Makefile.in | 40 +- src/plugins/sched/wiki/cancel_job.c | 2 +- src/plugins/sched/wiki/get_jobs.c | 59 +- src/plugins/sched/wiki/get_nodes.c | 71 +- src/plugins/sched/wiki/hostlist.c | 5 +- src/plugins/sched/wiki/job_modify.c | 58 +- src/plugins/sched/wiki/msg.c | 68 +- src/plugins/sched/wiki/msg.h | 10 +- src/plugins/sched/wiki/resume_job.c | 2 +- src/plugins/sched/wiki/sched_wiki.c | 33 +- src/plugins/sched/wiki/start_job.c | 8 +- src/plugins/sched/wiki/suspend_job.c | 2 +- src/plugins/sched/wiki2/Makefile.in | 40 +- src/plugins/sched/wiki2/cancel_job.c | 2 +- src/plugins/sched/wiki2/event.c | 2 +- src/plugins/sched/wiki2/get_jobs.c | 132 +- src/plugins/sched/wiki2/get_nodes.c | 248 +- src/plugins/sched/wiki2/hostlist.c | 5 +- src/plugins/sched/wiki2/initialize.c | 19 +- src/plugins/sched/wiki2/job_add_task.c | 2 +- src/plugins/sched/wiki2/job_modify.c | 104 +- src/plugins/sched/wiki2/job_notify.c | 2 +- src/plugins/sched/wiki2/job_release_task.c | 2 +- src/plugins/sched/wiki2/job_requeue.c | 2 +- src/plugins/sched/wiki2/job_signal.c | 2 +- src/plugins/sched/wiki2/job_will_run.c | 433 +- src/plugins/sched/wiki2/msg.c | 76 +- src/plugins/sched/wiki2/msg.h | 48 +- src/plugins/sched/wiki2/resume_job.c | 2 +- src/plugins/sched/wiki2/sched_wiki.c | 33 +- src/plugins/sched/wiki2/start_job.c | 96 +- src/plugins/sched/wiki2/suspend_job.c | 2 +- src/plugins/select/Makefile.in | 30 +- src/plugins/select/bluegene/Makefile.in | 30 +- .../bluegene/block_allocator/Makefile.am | 7 - .../bluegene/block_allocator/Makefile.in | 41 +- .../block_allocator/block_allocator.c | 157 +- .../block_allocator/block_allocator.h | 12 +- .../select/bluegene/plugin/Makefile.am | 12 +- .../select/bluegene/plugin/Makefile.in | 76 +- .../select/bluegene/plugin/bg_job_place.c | 1749 ++++-- .../select/bluegene/plugin/bg_job_place.h | 11 +- .../select/bluegene/plugin/bg_job_run.c | 88 +- .../select/bluegene/plugin/bg_job_run.h | 27 +- .../bluegene/plugin/bg_record_functions.c | 867 +++ .../bluegene/plugin/bg_record_functions.h | 136 + .../select/bluegene/plugin/block_sys.c | 4 +- src/plugins/select/bluegene/plugin/bluegene.c | 1919 +----- src/plugins/select/bluegene/plugin/bluegene.h | 101 +- .../select/bluegene/plugin/defined_block.c | 370 ++ .../select/bluegene/plugin/defined_block.h | 48 + .../select/bluegene/plugin/dynamic_block.c | 715 +++ .../select/bluegene/plugin/dynamic_block.h | 49 + src/plugins/select/bluegene/plugin/opts.c | 4 +- .../select/bluegene/plugin/select_bluegene.c | 180 +- src/plugins/select/bluegene/plugin/sfree.c | 4 +- src/plugins/select/bluegene/plugin/sfree.h | 2 +- .../select/bluegene/plugin/slurm_epilog.c | 4 +- .../select/bluegene/plugin/slurm_prolog.c | 18 +- .../select/bluegene/plugin/state_test.c | 3 +- src/plugins/select/cons_res/Makefile.am | 1 - src/plugins/select/cons_res/Makefile.in | 44 +- src/plugins/select/cons_res/dist_tasks.c | 933 ++- src/plugins/select/cons_res/dist_tasks.h | 9 +- src/plugins/select/cons_res/select_cons_res.c | 3597 ++++++----- src/plugins/select/cons_res/select_cons_res.h | 109 +- src/plugins/select/linear/Makefile.am | 3 +- src/plugins/select/linear/Makefile.in | 46 +- src/plugins/select/linear/select_linear.c | 1084 +++- src/plugins/select/linear/select_linear.h | 81 + src/plugins/switch/Makefile.in | 30 +- src/plugins/switch/elan/Makefile.in | 40 +- src/plugins/switch/elan/qsw.c | 6 +- src/plugins/switch/elan/qsw.h | 2 +- src/plugins/switch/elan/switch_elan.c | 10 +- src/plugins/switch/federation/Makefile.am | 5 +- src/plugins/switch/federation/Makefile.in | 48 +- src/plugins/switch/federation/federation.c | 25 +- src/plugins/switch/federation/federation.h | 4 +- .../switch/federation/federation_keys.h | 4 +- .../switch/federation/switch_federation.c | 4 +- src/plugins/switch/none/Makefile.am | 4 - src/plugins/switch/none/Makefile.in | 44 +- src/plugins/switch/none/switch_none.c | 2 +- src/plugins/task/Makefile.in | 30 +- src/plugins/task/affinity/Makefile.am | 2 +- src/plugins/task/affinity/Makefile.in | 43 +- src/plugins/task/affinity/affinity.c | 41 +- src/plugins/task/affinity/affinity.h | 10 +- src/plugins/task/affinity/cpuset.c | 2 +- src/plugins/task/affinity/dist_tasks.c | 234 +- src/plugins/task/affinity/dist_tasks.h | 2 +- src/plugins/task/affinity/numa.c | 2 +- src/plugins/task/affinity/task_affinity.c | 162 +- src/plugins/task/none/Makefile.am | 4 - src/plugins/task/none/Makefile.in | 44 +- src/plugins/task/none/task_none.c | 52 +- src/sacct/Makefile.am | 3 +- src/sacct/Makefile.in | 43 +- src/sacct/options.c | 1243 +--- src/sacct/print.c | 970 ++- src/sacct/process.c | 532 +- src/sacct/sacct.c | 96 +- src/sacct/sacct.h | 260 +- src/sacct/sacct_stat.c | 40 +- src/sacctmgr/Makefile.am | 27 + src/sacctmgr/Makefile.in | 576 ++ src/sacctmgr/account_functions.c | 1174 ++++ src/sacctmgr/association_functions.c | 402 ++ src/sacctmgr/cluster_functions.c | 646 ++ src/sacctmgr/common.c | 605 ++ src/sacctmgr/print.c | 195 + src/sacctmgr/print.h | 93 + src/sacctmgr/sacctmgr.c | 1482 +++++ src/sacctmgr/sacctmgr.h | 185 + src/sacctmgr/user_functions.c | 1224 ++++ src/salloc/Makefile.am | 4 +- src/salloc/Makefile.in | 43 +- src/salloc/msg.h | 39 - src/salloc/opt.c | 610 +- src/salloc/opt.h | 21 +- src/salloc/salloc.c | 130 +- src/salloc/salloc.h | 2 +- src/sattach/Makefile.am | 2 +- src/sattach/Makefile.in | 38 +- src/sattach/attach.c | 4 +- src/sattach/opt.c | 4 +- src/sattach/opt.h | 4 +- src/sattach/sattach.c | 10 +- src/sbatch/Makefile.am | 5 +- src/sbatch/Makefile.in | 42 +- src/sbatch/opt.c | 815 ++- src/sbatch/opt.h | 30 +- src/sbatch/sbatch.c | 75 +- src/sbcast/Makefile.in | 36 +- src/sbcast/agent.c | 2 +- src/sbcast/opts.c | 2 +- src/sbcast/sbcast.c | 52 +- src/sbcast/sbcast.h | 2 +- src/scancel/Makefile.in | 36 +- src/scancel/opt.c | 2 +- src/scancel/scancel.c | 11 +- src/scancel/scancel.h | 2 +- src/scontrol/Makefile.in | 36 +- src/scontrol/info_job.c | 2 +- src/scontrol/info_node.c | 2 +- src/scontrol/info_part.c | 2 +- src/scontrol/scontrol.c | 69 +- src/scontrol/scontrol.h | 3 +- src/scontrol/update_job.c | 65 +- src/scontrol/update_node.c | 15 +- src/scontrol/update_part.c | 109 +- src/sinfo/Makefile.in | 36 +- src/sinfo/opts.c | 13 +- src/sinfo/print.c | 47 +- src/sinfo/print.h | 6 +- src/sinfo/sinfo.c | 22 +- src/sinfo/sinfo.h | 7 +- src/sinfo/sort.c | 27 +- src/slaunch/Makefile.am | 47 - src/slaunch/attach.c | 57 - src/slaunch/core-format.c | 152 - src/slaunch/core-format.h | 42 - src/slaunch/fname.c | 160 - src/slaunch/fname.h | 56 - src/slaunch/multi_prog.c | 207 - src/slaunch/multi_prog.h | 37 - src/slaunch/opt.c | 2138 ------- src/slaunch/opt.h | 165 - src/slaunch/sigstr.c | 54 - src/slaunch/sigstr.h | 39 - src/slaunch/slaunch.c | 906 --- src/slaunch/slaunch.h | 34 - src/slaunch/slaunch.wrapper.c | 17 - src/slurmctld/Makefile.am | 13 +- src/slurmctld/Makefile.in | 65 +- src/slurmctld/agent.c | 77 +- src/slurmctld/agent.h | 10 +- src/slurmctld/backup.c | 4 +- src/slurmctld/controller.c | 259 +- src/slurmctld/job_mgr.c | 1127 ++-- src/slurmctld/job_scheduler.c | 518 +- src/slurmctld/job_scheduler.h | 135 + src/slurmctld/licenses.c | 394 ++ src/slurmctld/licenses.h | 94 + src/slurmctld/locks.c | 2 +- src/slurmctld/locks.h | 2 +- src/slurmctld/node_mgr.c | 403 +- src/slurmctld/node_scheduler.c | 1321 ++-- src/slurmctld/node_scheduler.h | 21 +- src/slurmctld/partition_mgr.c | 236 +- src/slurmctld/ping_nodes.c | 79 +- src/slurmctld/ping_nodes.h | 2 +- src/slurmctld/power_save.c | 88 +- src/slurmctld/proc_req.c | 713 ++- src/slurmctld/proc_req.h | 2 +- src/slurmctld/read_config.c | 336 +- src/slurmctld/read_config.h | 6 +- src/slurmctld/sched_plugin.c | 63 +- src/slurmctld/sched_plugin.h | 23 +- src/slurmctld/slurmctld.h | 329 +- src/slurmctld/srun_comm.c | 80 +- src/slurmctld/srun_comm.h | 2 +- src/slurmctld/state_save.c | 2 +- src/slurmctld/state_save.h | 2 +- src/slurmctld/step_mgr.c | 571 +- src/slurmctld/trigger_mgr.c | 228 +- src/slurmctld/trigger_mgr.h | 5 +- src/slurmd/Makefile.in | 30 +- src/slurmd/common/proctrack.c | 2 +- src/slurmd/common/proctrack.h | 2 +- src/slurmd/common/reverse_tree.h | 2 +- src/slurmd/common/run_script.c | 8 +- src/slurmd/common/run_script.h | 2 +- src/slurmd/common/setproctitle.c | 4 +- src/slurmd/common/setproctitle.h | 4 +- src/slurmd/common/slurmstepd_init.c | 20 +- src/slurmd/common/slurmstepd_init.h | 2 +- src/slurmd/common/task_plugin.c | 91 +- src/slurmd/common/task_plugin.h | 29 +- src/slurmd/slurmd/Makefile.am | 10 +- src/slurmd/slurmd/Makefile.in | 60 +- src/slurmd/slurmd/get_mach_stat.c | 2 +- src/slurmd/slurmd/get_mach_stat.h | 2 +- src/slurmd/slurmd/read_proc.c | 8 +- src/slurmd/slurmd/req.c | 655 +- src/slurmd/slurmd/req.h | 4 +- src/slurmd/slurmd/reverse_tree_math.c | 2 +- src/slurmd/slurmd/reverse_tree_math.h | 2 +- src/slurmd/slurmd/slurmd.c | 49 +- src/slurmd/slurmd/slurmd.h | 8 +- src/slurmd/slurmd/xcpu.c | 2 +- src/slurmd/slurmd/xcpu.h | 2 +- src/slurmd/slurmstepd/Makefile.am | 10 +- src/slurmd/slurmstepd/Makefile.in | 52 +- src/slurmd/slurmstepd/fname.c | 68 +- src/slurmd/slurmstepd/fname.h | 2 +- src/slurmd/slurmstepd/io.c | 246 +- src/slurmd/slurmstepd/io.h | 4 +- src/slurmd/slurmstepd/mgr.c | 100 +- src/slurmd/slurmstepd/mgr.h | 2 +- src/slurmd/slurmstepd/multi_prog.c | 8 +- src/slurmd/slurmstepd/multi_prog.h | 2 +- src/slurmd/slurmstepd/pam_ses.c | 2 +- src/slurmd/slurmstepd/pam_ses.h | 2 +- src/slurmd/slurmstepd/pdebug.c | 2 +- src/slurmd/slurmstepd/pdebug.h | 2 +- src/slurmd/slurmstepd/req.c | 153 +- src/slurmd/slurmstepd/req.h | 4 +- src/slurmd/slurmstepd/slurmstepd.c | 28 +- src/slurmd/slurmstepd/slurmstepd.h | 4 +- src/slurmd/slurmstepd/slurmstepd_job.c | 60 +- src/slurmd/slurmstepd/slurmstepd_job.h | 10 +- .../slurmstepd/step_terminate_monitor.c | 7 +- .../slurmstepd/step_terminate_monitor.h | 2 +- src/slurmd/slurmstepd/task.c | 35 +- src/slurmd/slurmstepd/task.h | 4 +- src/slurmd/slurmstepd/ulimits.c | 40 +- src/slurmd/slurmstepd/ulimits.h | 2 +- src/slurmdbd/Makefile.am | 32 + src/slurmdbd/Makefile.in | 576 ++ src/slurmdbd/agent.c | 37 + src/{srun/reattach.h => slurmdbd/agent.h} | 18 +- src/slurmdbd/proc_req.c | 1800 ++++++ .../sacct_stat.h => slurmdbd/proc_req.h} | 53 +- src/slurmdbd/read_config.c | 283 + src/slurmdbd/read_config.h | 110 + src/slurmdbd/rpc_mgr.c | 523 ++ src/{srun/msg.h => slurmdbd/rpc_mgr.h} | 36 +- src/slurmdbd/slurmdbd.c | 465 ++ src/{srun/sigstr.h => slurmdbd/slurmdbd.h} | 24 +- src/smap/Makefile.am | 3 +- src/smap/Makefile.in | 42 +- src/smap/configure_functions.c | 43 +- src/smap/grid_functions.c | 2 +- src/smap/job_functions.c | 2 +- src/smap/opts.c | 2 +- src/smap/partition_functions.c | 2 +- src/smap/smap.c | 4 +- src/smap/smap.h | 2 +- src/squeue/Makefile.in | 36 +- src/squeue/opts.c | 9 +- src/squeue/print.c | 36 +- src/squeue/print.h | 6 +- src/squeue/sort.c | 2 +- src/squeue/squeue.c | 4 +- src/squeue/squeue.h | 2 +- src/sreport/Makefile.am | 21 + src/{slaunch => sreport}/Makefile.in | 143 +- src/sreport/sreport.c | 379 ++ src/sreport/sreport.h | 88 + src/srun/Makefile.am | 15 +- src/srun/Makefile.in | 66 +- src/srun/allocate.c | 656 +- src/srun/allocate.h | 21 +- src/srun/attach.h | 84 - src/srun/core-format.c | 4 +- src/srun/core-format.h | 4 +- src/srun/{attach.c => debugger.c} | 12 +- src/{slaunch/attach.h => srun/debugger.h} | 10 +- src/srun/fname.c | 10 +- src/srun/fname.h | 25 +- src/srun/launch.c | 377 -- src/srun/msg.c | 1565 ----- src/srun/multi_prog.c | 61 +- src/srun/multi_prog.h | 10 +- src/srun/opt.c | 1510 ++--- src/srun/opt.h | 39 +- src/srun/reattach.c | 545 -- src/srun/signals.c | 214 - src/srun/srun.c | 1160 ++-- src/srun/srun.h | 4 +- src/srun/srun_job.c | 457 +- src/srun/srun_job.h | 141 +- src/srun/srun_pty.c | 171 + src/srun/{signals.h => srun_pty.h} | 14 +- src/sstat/Makefile.am | 20 + src/sstat/Makefile.in | 563 ++ src/sstat/options.c | 396 ++ src/sstat/print.c | 438 ++ src/sstat/process.c | 90 + src/sstat/sstat.c | 273 + src/sstat/sstat.h | 119 + src/strigger/Makefile.in | 36 +- src/strigger/opts.c | 96 +- src/strigger/strigger.c | 25 +- src/strigger/strigger.h | 5 +- src/sview/Makefile.am | 4 +- src/sview/Makefile.in | 42 +- src/sview/admin_info.c | 2 +- src/sview/block_info.c | 2 +- src/sview/common.c | 2 +- src/sview/grid.c | 3 +- src/sview/job_info.c | 32 +- src/sview/node_info.c | 2 +- src/sview/part_info.c | 90 +- src/sview/popups.c | 315 +- src/sview/submit_info.c | 2 +- src/sview/sview.c | 2 +- src/sview/sview.h | 2 +- testsuite/Makefile.in | 30 +- testsuite/expect/Makefile.am | 76 +- testsuite/expect/Makefile.in | 93 +- testsuite/expect/README | 123 +- testsuite/expect/globals | 13 +- testsuite/expect/pkill | 2 +- testsuite/expect/regression | 6 +- testsuite/expect/regression.py | 66 +- testsuite/expect/test1.1 | 2 +- testsuite/expect/test1.10 | 2 +- testsuite/expect/test1.11 | 127 +- testsuite/expect/test1.12 | 109 +- testsuite/expect/test1.13 | 8 +- testsuite/expect/test1.14 | 224 +- testsuite/expect/test1.15 | 6 +- testsuite/expect/test1.16 | 2 +- testsuite/expect/test1.17 | 196 +- testsuite/expect/test1.18 | 120 +- testsuite/expect/test1.18.prog.c | 53 - testsuite/expect/test1.19 | 8 +- testsuite/expect/test1.2 | 2 +- testsuite/expect/test1.20 | 2 +- testsuite/expect/test1.21 | 10 +- testsuite/expect/test1.22 | 6 +- testsuite/expect/test1.23 | 2 +- testsuite/expect/test1.24 | 2 +- testsuite/expect/test1.25 | 2 +- testsuite/expect/test1.26 | 12 +- testsuite/expect/test1.27 | 2 +- testsuite/expect/test1.28 | 2 +- testsuite/expect/test1.29 | 6 +- testsuite/expect/test1.29.prog.c | 2 +- testsuite/expect/test1.3 | 2 +- testsuite/expect/test1.30 | 2 +- testsuite/expect/test1.31 | 2 +- testsuite/expect/test1.32 | 12 +- testsuite/expect/test1.32.prog.c | 3 +- testsuite/expect/test1.33 | 2 +- testsuite/expect/test1.34 | 116 - testsuite/expect/test1.35 | 7 +- testsuite/expect/test1.36 | 2 +- testsuite/expect/test1.37 | 155 - testsuite/expect/test1.38 | 3 +- testsuite/expect/test1.39 | 2 +- testsuite/expect/test1.39.prog.c | 2 +- testsuite/expect/test1.4 | 2 +- testsuite/expect/test1.40 | 152 - testsuite/expect/test1.41 | 2 +- testsuite/expect/test1.42 | 110 +- testsuite/expect/test1.43 | 2 +- testsuite/expect/test1.44 | 2 +- testsuite/expect/test1.45 | 213 - testsuite/expect/test1.46 | 2 +- testsuite/expect/test1.47 | 160 - testsuite/expect/test1.48 | 2 +- testsuite/expect/test1.49 | 2 +- testsuite/expect/test1.5 | 2 +- testsuite/expect/test1.50 | 2 +- testsuite/expect/test1.51 | 2 +- testsuite/expect/test1.52 | 2 +- testsuite/expect/test1.53 | 189 - testsuite/expect/test1.54 | 15 +- testsuite/expect/test1.55 | 33 +- testsuite/expect/test1.56 | 4 +- testsuite/expect/test1.57 | 2 +- testsuite/expect/test1.58 | 8 +- testsuite/expect/test1.59 | 8 +- testsuite/expect/test1.6 | 2 +- testsuite/expect/test1.7 | 2 +- testsuite/expect/test1.8 | 2 +- testsuite/expect/test1.80 | 2 +- testsuite/expect/test1.81 | 2 +- testsuite/expect/test1.82 | 2 +- testsuite/expect/test1.83 | 6 +- testsuite/expect/test1.84 | 2 +- testsuite/expect/test1.85 | 182 - testsuite/expect/test1.86 | 30 +- testsuite/expect/test1.87 | 4 +- testsuite/expect/test1.88 | 12 +- testsuite/expect/test1.88.prog.c | 2 +- testsuite/expect/test1.89 | 36 +- testsuite/expect/test1.89.prog.c | 4 +- testsuite/expect/test1.9 | 2 +- testsuite/expect/test1.90 | 35 +- testsuite/expect/test1.90.prog.c | 2 +- testsuite/expect/test1.91 | 7 +- testsuite/expect/test1.91.prog.c | 6 +- testsuite/expect/test1.92 | 2 +- testsuite/expect/test1.93 | 156 + testsuite/expect/test10.1 | 2 +- testsuite/expect/test10.10 | 2 +- testsuite/expect/test10.11 | 2 +- testsuite/expect/test10.12 | 4 +- testsuite/expect/test10.13 | 2 +- testsuite/expect/test10.2 | 2 +- testsuite/expect/test10.3 | 2 +- testsuite/expect/test10.4 | 2 +- testsuite/expect/test10.5 | 2 +- testsuite/expect/test10.6 | 2 +- testsuite/expect/test10.7 | 2 +- testsuite/expect/test10.8 | 2 +- testsuite/expect/test10.9 | 2 +- testsuite/expect/test11.1 | 2 +- testsuite/expect/test11.2 | 2 +- testsuite/expect/test11.3 | 2 +- testsuite/expect/test11.4 | 2 +- testsuite/expect/test11.5 | 8 +- testsuite/expect/test11.6 | 2 +- testsuite/expect/test11.7 | 2 +- testsuite/expect/test12.1 | 4 +- testsuite/expect/test12.2 | 38 +- testsuite/expect/test12.2.prog.c | 13 +- testsuite/expect/test13.1 | 10 +- testsuite/expect/test14.1 | 2 +- testsuite/expect/test14.2 | 2 +- testsuite/expect/test14.3 | 2 +- testsuite/expect/test14.4 | 10 +- testsuite/expect/test14.5 | 10 +- testsuite/expect/test14.6 | 10 +- testsuite/expect/test14.7 | 10 +- testsuite/expect/test14.8 | 10 +- testsuite/expect/test15.1 | 2 +- testsuite/expect/test15.10 | 2 +- testsuite/expect/test15.11 | 2 +- testsuite/expect/test15.12 | 2 +- testsuite/expect/test15.13 | 2 +- testsuite/expect/test15.14 | 8 +- testsuite/expect/test15.15 | 2 +- testsuite/expect/test15.16 | 2 +- testsuite/expect/test15.17 | 2 +- testsuite/expect/test15.18 | 2 +- testsuite/expect/test15.19 | 20 +- testsuite/expect/test15.2 | 2 +- testsuite/expect/test15.20 | 14 +- testsuite/expect/test15.21 | 14 +- testsuite/expect/test15.22 | 2 +- testsuite/expect/test15.23 | 13 +- testsuite/expect/test15.24 | 4 +- testsuite/expect/test15.3 | 2 +- testsuite/expect/test15.4 | 4 +- testsuite/expect/test15.5 | 2 +- testsuite/expect/test15.6 | 2 +- testsuite/expect/test15.7 | 2 +- testsuite/expect/test15.8 | 2 +- testsuite/expect/test15.9 | 8 +- testsuite/expect/test16.1 | 2 +- testsuite/expect/test16.2 | 2 +- testsuite/expect/test16.3 | 2 +- testsuite/expect/test16.4 | 4 +- testsuite/expect/test16.4.prog.c | 2 +- testsuite/expect/test17.1 | 2 +- testsuite/expect/test17.10 | 2 +- testsuite/expect/test17.11 | 2 +- testsuite/expect/test17.12 | 2 +- testsuite/expect/test17.13 | 2 +- testsuite/expect/test17.14 | 2 +- testsuite/expect/test17.15 | 14 +- testsuite/expect/test17.15.prog.c | 2 +- testsuite/expect/test17.16 | 2 +- testsuite/expect/test17.17 | 10 +- testsuite/expect/test17.18 | 16 +- testsuite/expect/test17.19 | 10 +- testsuite/expect/test17.2 | 2 +- testsuite/expect/test17.20 | 7 +- testsuite/expect/test17.21 | 2 +- testsuite/expect/test17.22 | 2 +- testsuite/expect/test17.23 | 2 +- testsuite/expect/test17.24 | 2 +- testsuite/expect/test17.25 | 5 +- testsuite/expect/test17.26 | 4 +- testsuite/expect/test17.27 | 17 +- testsuite/expect/test17.28 | 24 +- testsuite/expect/test17.29 | 3 +- testsuite/expect/test17.3 | 2 +- testsuite/expect/test17.31 | 10 +- testsuite/expect/test17.32 | 3 +- testsuite/expect/{test18.5 => test17.33} | 178 +- testsuite/expect/test17.4 | 2 +- testsuite/expect/test17.5 | 60 +- testsuite/expect/test17.6 | 6 +- testsuite/expect/test17.7 | 22 +- testsuite/expect/test17.8 | 14 +- testsuite/expect/test17.9 | 2 +- testsuite/expect/test18.10 | 116 - testsuite/expect/test18.11 | 304 - testsuite/expect/test18.12 | 90 - testsuite/expect/test18.14 | 120 - testsuite/expect/test18.15 | 84 - testsuite/expect/test18.16 | 194 - testsuite/expect/test18.16.prog.c | 59 - testsuite/expect/test18.17 | 117 - testsuite/expect/test18.18 | 94 - testsuite/expect/test18.19 | 148 - testsuite/expect/test18.19.prog.c | 81 - testsuite/expect/test18.20 | 116 - testsuite/expect/test18.21 | 114 - testsuite/expect/test18.22 | 181 - testsuite/expect/test18.23 | 86 - testsuite/expect/test18.24 | 93 - testsuite/expect/test18.25 | 155 - testsuite/expect/test18.26 | 173 - testsuite/expect/test18.27 | 141 - testsuite/expect/test18.28 | 136 - testsuite/expect/test18.29 | 207 - testsuite/expect/test18.30 | 204 - testsuite/expect/test18.31 | 333 - testsuite/expect/test18.32 | 218 - testsuite/expect/test18.32.prog.c | 62 - testsuite/expect/test18.33 | 102 - testsuite/expect/test18.34 | 83 - testsuite/expect/test18.35 | 352 -- testsuite/expect/test18.36 | 499 -- testsuite/expect/test18.36.prog.c | 79 - testsuite/expect/test18.37 | 538 -- testsuite/expect/test18.37.prog.c | 75 - testsuite/expect/test18.38 | 117 - testsuite/expect/test18.4 | 85 - testsuite/expect/test18.7 | 75 - testsuite/expect/test18.8 | 88 - testsuite/expect/test18.9 | 94 - testsuite/expect/test19.1 | 2 +- testsuite/expect/test19.2 | 2 +- testsuite/expect/test19.3 | 2 +- testsuite/expect/test19.4 | 2 +- testsuite/expect/test19.5 | 6 +- testsuite/expect/test19.6 | 6 +- testsuite/expect/test19.7 | 6 +- testsuite/expect/test2.1 | 2 +- testsuite/expect/test2.10 | 2 +- testsuite/expect/test2.11 | 8 +- testsuite/expect/test2.2 | 2 +- testsuite/expect/test2.3 | 2 +- testsuite/expect/test2.4 | 2 +- testsuite/expect/test2.5 | 10 +- testsuite/expect/test2.6 | 2 +- testsuite/expect/test2.7 | 8 +- testsuite/expect/test2.8 | 14 +- testsuite/expect/test2.9 | 2 +- testsuite/expect/test20.1 | 2 +- testsuite/expect/test20.2 | 2 +- testsuite/expect/test20.3 | 2 +- testsuite/expect/test20.4 | 2 +- testsuite/expect/{test18.6 => test21.1} | 48 +- testsuite/expect/{test18.3 => test21.2} | 41 +- testsuite/expect/{test18.1 => test21.3} | 24 +- testsuite/expect/{test18.2 => test21.4} | 30 +- testsuite/expect/test21.5 | 157 + testsuite/expect/test21.6 | 169 + testsuite/expect/test3.1 | 2 +- testsuite/expect/{test18.13 => test3.10} | 83 +- testsuite/expect/test3.2 | 2 +- testsuite/expect/test3.3 | 2 +- testsuite/expect/test3.4 | 6 +- testsuite/expect/test3.5 | 2 +- testsuite/expect/test3.6 | 2 +- testsuite/expect/test3.7 | 10 +- testsuite/expect/test3.7.prog.c | 2 +- testsuite/expect/test3.8 | 14 +- testsuite/expect/test3.9 | 2 +- testsuite/expect/test4.1 | 2 +- testsuite/expect/test4.10 | 2 +- testsuite/expect/test4.11 | 2 +- testsuite/expect/test4.2 | 2 +- testsuite/expect/test4.3 | 2 +- testsuite/expect/test4.4 | 2 +- testsuite/expect/test4.5 | 2 +- testsuite/expect/test4.6 | 2 +- testsuite/expect/test4.7 | 2 +- testsuite/expect/test4.8 | 2 +- testsuite/expect/test4.9 | 2 +- testsuite/expect/test5.1 | 2 +- testsuite/expect/test5.2 | 2 +- testsuite/expect/test5.3 | 2 +- testsuite/expect/test5.4 | 14 +- testsuite/expect/test5.5 | 14 +- testsuite/expect/test5.6 | 35 +- testsuite/expect/test5.7 | 2 +- testsuite/expect/test5.8 | 2 +- testsuite/expect/test6.1 | 2 +- testsuite/expect/test6.10 | 10 +- testsuite/expect/test6.11 | 11 +- testsuite/expect/test6.12 | 22 +- testsuite/expect/test6.13 | 17 +- testsuite/expect/test6.2 | 2 +- testsuite/expect/test6.3 | 8 +- testsuite/expect/test6.4 | 14 +- testsuite/expect/test6.5 | 14 +- testsuite/expect/test6.6 | 2 +- testsuite/expect/test6.7 | 19 +- testsuite/expect/test6.8 | 16 +- testsuite/expect/test6.9 | 10 +- testsuite/expect/test7.1 | 25 +- testsuite/expect/test7.10 | 2 +- testsuite/expect/test7.2 | 45 +- testsuite/expect/test7.2.prog.c | 2 +- testsuite/expect/test7.3 | 5 +- testsuite/expect/test7.3.io.c | 2 +- testsuite/expect/test7.3.prog.c | 4 +- testsuite/expect/test7.4 | 2 +- testsuite/expect/test7.4.prog.c | 2 +- testsuite/expect/test7.5 | 304 - testsuite/expect/test7.5.prog.c | 62 - testsuite/expect/test7.6 | 4 +- testsuite/expect/test7.6.prog.c | 2 +- testsuite/expect/test7.7 | 26 +- testsuite/expect/test7.7.prog.c | 53 +- testsuite/expect/test7.8 | 2 +- testsuite/expect/test7.8.prog.c | 2 +- testsuite/expect/test7.9 | 12 +- testsuite/expect/test7.9.prog.c | 16 +- testsuite/expect/test8.1 | 22 +- testsuite/expect/test8.2 | 18 +- testsuite/expect/test8.3 | 26 +- testsuite/expect/test8.4 | 12 +- testsuite/expect/test8.4.prog.c | 2 +- testsuite/expect/test8.5 | 29 +- testsuite/expect/test8.6 | 88 +- testsuite/expect/test8.7 | 256 + testsuite/expect/test8.7.crypto.c | 134 + testsuite/expect/test8.7.prog.c | 329 + testsuite/expect/test9.1 | 6 +- testsuite/expect/test9.2 | 6 +- testsuite/expect/test9.3 | 10 +- testsuite/expect/test9.4 | 2 +- testsuite/expect/test9.5 | 2 +- testsuite/expect/test9.6 | 6 +- testsuite/expect/test9.7 | 2 +- testsuite/expect/test9.7.bash | 2 +- testsuite/expect/test9.8 | 12 +- testsuite/expect/usleep | 2 +- testsuite/slurm_unit/Makefile.in | 30 +- testsuite/slurm_unit/api/Makefile.in | 32 +- testsuite/slurm_unit/api/manual/Makefile.in | 32 +- testsuite/slurm_unit/api/manual/cancel-tst.c | 2 +- .../slurm_unit/api/manual/complete-tst.c | 2 +- .../slurm_unit/api/manual/job_info-tst.c | 2 +- .../slurm_unit/api/manual/node_info-tst.c | 2 +- .../api/manual/partition_info-tst.c | 2 +- .../slurm_unit/api/manual/reconfigure-tst.c | 2 +- testsuite/slurm_unit/api/manual/submit-tst.c | 2 +- .../slurm_unit/api/manual/update_config-tst.c | 2 +- testsuite/slurm_unit/common/Makefile.in | 32 +- testsuite/slurm_unit/common/pack-test.c | 6 +- testsuite/slurm_unit/slurmctld/Makefile.in | 17 +- .../slurm_unit/slurmctld/security_2_1.py | 11 +- .../slurm_unit/slurmctld/security_2_2a.sh | 10 + .../{security_2_2.sh => security_2_2b.sh} | 7 +- testsuite/slurm_unit/slurmd/Makefile.in | 17 +- 1141 files changed, 92055 insertions(+), 42278 deletions(-) create mode 100644 auxdir/x_ac_databases.m4 create mode 100644 contribs/phpext/Makefile.am create mode 100644 contribs/phpext/Makefile.in create mode 100644 contribs/phpext/README create mode 100644 contribs/phpext/slurm_php/config.m4.in create mode 100644 contribs/phpext/slurm_php/slurm_php.c rename src/srun/sigstr.c => contribs/phpext/slurm_php/slurm_php.h (74%) create mode 100644 doc/html/accounting.shtml create mode 100644 doc/html/cons_res_share.shtml create mode 100644 doc/html/crypto_plugins.shtml create mode 100644 doc/html/gang_scheduling.shtml create mode 100644 doc/html/jobacct_gatherplugins.shtml create mode 100644 doc/html/jobacct_storageplugins.shtml delete mode 100644 doc/html/jobacctplugins.shtml create mode 100644 doc/html/preempt.shtml create mode 100644 doc/html/slurm_moab.pdf create mode 100644 doc/html/slurm_v1.3.pdf create mode 100644 doc/man/man1/sacctmgr.1 delete mode 100644 doc/man/man1/slaunch.1 create mode 100644 doc/man/man1/sreport.1 create mode 100644 doc/man/man1/sstat.1 create mode 100644 doc/man/man5/slurmdbd.conf.5 create mode 100644 doc/man/man8/slurmdbd.8 create mode 100755 etc/init.d.slurmdbd rename src/{salloc/msg.c => api/allocate_msg.c} (60%) create mode 100644 src/common/assoc_mgr.c create mode 100644 src/common/assoc_mgr.h create mode 100644 src/common/jobacct_common.c create mode 100644 src/common/jobacct_common.h create mode 100644 src/common/proc_args.c create mode 100644 src/common/proc_args.h create mode 100644 src/common/slurm_accounting_storage.c create mode 100644 src/common/slurm_accounting_storage.h delete mode 100644 src/common/slurm_jobacct.c create mode 100644 src/common/slurm_jobacct_gather.c rename src/common/{slurm_jobacct.h => slurm_jobacct_gather.h} (50%) create mode 100644 src/common/slurmdbd_defs.c create mode 100644 src/common/slurmdbd_defs.h create mode 100644 src/database/Makefile.am create mode 100644 src/database/Makefile.in rename src/{plugins/jobacct/gold => database}/base64.c (97%) rename src/{plugins/jobacct/gold => database}/base64.h (100%) rename src/{plugins/jobacct/gold => database}/gold_interface.c (90%) rename src/{plugins/jobacct/gold => database}/gold_interface.h (82%) create mode 100644 src/database/mysql_common.c create mode 100644 src/database/mysql_common.h create mode 100644 src/database/pgsql_common.c create mode 100644 src/database/pgsql_common.h create mode 100644 src/plugins/accounting_storage/Makefile.am create mode 100644 src/plugins/accounting_storage/Makefile.in create mode 100644 src/plugins/accounting_storage/filetxt/Makefile.am create mode 100644 src/plugins/accounting_storage/filetxt/Makefile.in create mode 100644 src/plugins/accounting_storage/filetxt/accounting_storage_filetxt.c create mode 100644 src/plugins/accounting_storage/filetxt/filetxt_jobacct_process.c create mode 100644 src/plugins/accounting_storage/filetxt/filetxt_jobacct_process.h create mode 100644 src/plugins/accounting_storage/gold/Makefile.am create mode 100644 src/plugins/accounting_storage/gold/Makefile.in create mode 100644 src/plugins/accounting_storage/gold/accounting_storage_gold.c create mode 100644 src/plugins/accounting_storage/mysql/Makefile.am create mode 100644 src/plugins/accounting_storage/mysql/Makefile.in create mode 100644 src/plugins/accounting_storage/mysql/accounting_storage_mysql.c create mode 100644 src/plugins/accounting_storage/mysql/mysql_jobacct_process.c create mode 100644 src/plugins/accounting_storage/mysql/mysql_jobacct_process.h create mode 100644 src/plugins/accounting_storage/mysql/mysql_rollup.c rename src/{srun/launch.h => plugins/accounting_storage/mysql/mysql_rollup.h} (65%) create mode 100644 src/plugins/accounting_storage/none/Makefile.am create mode 100644 src/plugins/accounting_storage/none/Makefile.in create mode 100644 src/plugins/accounting_storage/none/accounting_storage_none.c create mode 100644 src/plugins/accounting_storage/pgsql/Makefile.am create mode 100644 src/plugins/accounting_storage/pgsql/Makefile.in create mode 100644 src/plugins/accounting_storage/pgsql/accounting_storage_pgsql.c create mode 100644 src/plugins/accounting_storage/pgsql/pgsql_jobacct_process.c create mode 100644 src/plugins/accounting_storage/pgsql/pgsql_jobacct_process.h create mode 100644 src/plugins/accounting_storage/slurmdbd/Makefile.am create mode 100644 src/plugins/accounting_storage/slurmdbd/Makefile.in create mode 100644 src/plugins/accounting_storage/slurmdbd/accounting_storage_slurmdbd.c create mode 100644 src/plugins/checkpoint/xlch/Makefile.am create mode 100644 src/plugins/checkpoint/xlch/Makefile.in create mode 100644 src/plugins/checkpoint/xlch/checkpoint_xlch.c create mode 100644 src/plugins/crypto/Makefile.am create mode 100644 src/plugins/crypto/Makefile.in create mode 100644 src/plugins/crypto/munge/Makefile.am create mode 100644 src/plugins/crypto/munge/Makefile.in create mode 100644 src/plugins/crypto/munge/crypto_munge.c create mode 100644 src/plugins/crypto/openssl/Makefile.am create mode 100644 src/plugins/crypto/openssl/Makefile.in create mode 100644 src/plugins/crypto/openssl/crypto_openssl.c delete mode 100644 src/plugins/jobacct/aix/Makefile.am delete mode 100644 src/plugins/jobacct/common/common_slurmctld.c delete mode 100644 src/plugins/jobacct/common/common_slurmstepd.c delete mode 100644 src/plugins/jobacct/common/jobacct_common.c delete mode 100644 src/plugins/jobacct/common/jobacct_common.h delete mode 100644 src/plugins/jobacct/gold/Makefile.am delete mode 100644 src/plugins/jobacct/gold/agent.c delete mode 100644 src/plugins/jobacct/gold/agent.h delete mode 100644 src/plugins/jobacct/gold/jobacct_gold.c delete mode 100644 src/plugins/jobacct/linux/Makefile.am delete mode 100644 src/plugins/jobacct/none/Makefile.am rename src/plugins/{jobacct => jobacct_gather}/Makefile.am (51%) rename src/plugins/{jobacct => jobacct_gather}/Makefile.in (94%) create mode 100644 src/plugins/jobacct_gather/aix/Makefile.am rename src/plugins/{jobacct => jobacct_gather}/aix/Makefile.in (74%) rename src/plugins/{jobacct/aix/jobacct_aix.c => jobacct_gather/aix/jobacct_gather_aix.c} (75%) create mode 100644 src/plugins/jobacct_gather/linux/Makefile.am rename src/plugins/{jobacct => jobacct_gather}/linux/Makefile.in (74%) rename src/plugins/{jobacct/linux/jobacct_linux.c => jobacct_gather/linux/jobacct_gather_linux.c} (66%) create mode 100644 src/plugins/jobacct_gather/none/Makefile.am rename src/plugins/{jobacct/gold => jobacct_gather/none}/Makefile.in (86%) rename src/plugins/{jobacct/none/jobacct_none.c => jobacct_gather/none/jobacct_gather_none.c} (61%) create mode 100644 src/plugins/jobcomp/filetxt/filetxt_jobcomp_process.c create mode 100644 src/plugins/jobcomp/filetxt/filetxt_jobcomp_process.h create mode 100644 src/plugins/jobcomp/mysql/Makefile.am create mode 100644 src/plugins/jobcomp/mysql/Makefile.in create mode 100644 src/plugins/jobcomp/mysql/jobcomp_mysql.c create mode 100644 src/plugins/jobcomp/mysql/mysql_jobcomp_process.c create mode 100644 src/plugins/jobcomp/mysql/mysql_jobcomp_process.h create mode 100644 src/plugins/jobcomp/pgsql/Makefile.am create mode 100644 src/plugins/jobcomp/pgsql/Makefile.in create mode 100644 src/plugins/jobcomp/pgsql/jobcomp_pgsql.c create mode 100644 src/plugins/jobcomp/pgsql/pgsql_jobcomp_process.c create mode 100644 src/plugins/jobcomp/pgsql/pgsql_jobcomp_process.h create mode 100644 src/plugins/jobcomp/slurmdbd/Makefile.am rename src/plugins/{jobacct/none => jobcomp/slurmdbd}/Makefile.in (86%) create mode 100644 src/plugins/jobcomp/slurmdbd/jobcomp_slurmdbd.c create mode 100644 src/plugins/select/bluegene/plugin/bg_record_functions.c create mode 100644 src/plugins/select/bluegene/plugin/bg_record_functions.h create mode 100644 src/plugins/select/bluegene/plugin/defined_block.c create mode 100644 src/plugins/select/bluegene/plugin/defined_block.h create mode 100644 src/plugins/select/bluegene/plugin/dynamic_block.c create mode 100644 src/plugins/select/bluegene/plugin/dynamic_block.h create mode 100644 src/plugins/select/linear/select_linear.h create mode 100644 src/sacctmgr/Makefile.am create mode 100644 src/sacctmgr/Makefile.in create mode 100644 src/sacctmgr/account_functions.c create mode 100644 src/sacctmgr/association_functions.c create mode 100644 src/sacctmgr/cluster_functions.c create mode 100644 src/sacctmgr/common.c create mode 100644 src/sacctmgr/print.c create mode 100644 src/sacctmgr/print.h create mode 100644 src/sacctmgr/sacctmgr.c create mode 100644 src/sacctmgr/sacctmgr.h create mode 100644 src/sacctmgr/user_functions.c delete mode 100644 src/salloc/msg.h delete mode 100644 src/slaunch/Makefile.am delete mode 100644 src/slaunch/attach.c delete mode 100644 src/slaunch/core-format.c delete mode 100644 src/slaunch/core-format.h delete mode 100644 src/slaunch/fname.c delete mode 100644 src/slaunch/fname.h delete mode 100644 src/slaunch/multi_prog.c delete mode 100644 src/slaunch/multi_prog.h delete mode 100644 src/slaunch/opt.c delete mode 100644 src/slaunch/opt.h delete mode 100644 src/slaunch/sigstr.c delete mode 100644 src/slaunch/sigstr.h delete mode 100644 src/slaunch/slaunch.c delete mode 100644 src/slaunch/slaunch.h delete mode 100644 src/slaunch/slaunch.wrapper.c create mode 100644 src/slurmctld/job_scheduler.h create mode 100644 src/slurmctld/licenses.c create mode 100644 src/slurmctld/licenses.h create mode 100644 src/slurmdbd/Makefile.am create mode 100644 src/slurmdbd/Makefile.in create mode 100644 src/slurmdbd/agent.c rename src/{srun/reattach.h => slurmdbd/agent.h} (82%) create mode 100644 src/slurmdbd/proc_req.c rename src/{sacct/sacct_stat.h => slurmdbd/proc_req.h} (68%) create mode 100644 src/slurmdbd/read_config.c create mode 100644 src/slurmdbd/read_config.h create mode 100644 src/slurmdbd/rpc_mgr.c rename src/{srun/msg.h => slurmdbd/rpc_mgr.h} (73%) create mode 100644 src/slurmdbd/slurmdbd.c rename src/{srun/sigstr.h => slurmdbd/slurmdbd.h} (80%) create mode 100644 src/sreport/Makefile.am rename src/{slaunch => sreport}/Makefile.in (80%) create mode 100644 src/sreport/sreport.c create mode 100644 src/sreport/sreport.h delete mode 100644 src/srun/attach.h rename src/srun/{attach.c => debugger.c} (91%) rename src/{slaunch/attach.h => srun/debugger.h} (92%) delete mode 100644 src/srun/launch.c delete mode 100644 src/srun/msg.c delete mode 100644 src/srun/reattach.c delete mode 100644 src/srun/signals.c create mode 100644 src/srun/srun_pty.c rename src/srun/{signals.h => srun_pty.h} (89%) create mode 100644 src/sstat/Makefile.am create mode 100644 src/sstat/Makefile.in create mode 100644 src/sstat/options.c create mode 100644 src/sstat/print.c create mode 100644 src/sstat/process.c create mode 100644 src/sstat/sstat.c create mode 100644 src/sstat/sstat.h delete mode 100644 testsuite/expect/test1.18.prog.c delete mode 100755 testsuite/expect/test1.34 delete mode 100755 testsuite/expect/test1.37 delete mode 100755 testsuite/expect/test1.40 delete mode 100755 testsuite/expect/test1.45 delete mode 100755 testsuite/expect/test1.47 delete mode 100755 testsuite/expect/test1.53 delete mode 100755 testsuite/expect/test1.85 create mode 100755 testsuite/expect/test1.93 rename testsuite/expect/{test18.5 => test17.33} (54%) delete mode 100755 testsuite/expect/test18.10 delete mode 100755 testsuite/expect/test18.11 delete mode 100755 testsuite/expect/test18.12 delete mode 100755 testsuite/expect/test18.14 delete mode 100755 testsuite/expect/test18.15 delete mode 100755 testsuite/expect/test18.16 delete mode 100644 testsuite/expect/test18.16.prog.c delete mode 100755 testsuite/expect/test18.17 delete mode 100755 testsuite/expect/test18.18 delete mode 100755 testsuite/expect/test18.19 delete mode 100644 testsuite/expect/test18.19.prog.c delete mode 100755 testsuite/expect/test18.20 delete mode 100755 testsuite/expect/test18.21 delete mode 100755 testsuite/expect/test18.22 delete mode 100755 testsuite/expect/test18.23 delete mode 100755 testsuite/expect/test18.24 delete mode 100755 testsuite/expect/test18.25 delete mode 100755 testsuite/expect/test18.26 delete mode 100755 testsuite/expect/test18.27 delete mode 100755 testsuite/expect/test18.28 delete mode 100755 testsuite/expect/test18.29 delete mode 100755 testsuite/expect/test18.30 delete mode 100755 testsuite/expect/test18.31 delete mode 100755 testsuite/expect/test18.32 delete mode 100644 testsuite/expect/test18.32.prog.c delete mode 100755 testsuite/expect/test18.33 delete mode 100755 testsuite/expect/test18.34 delete mode 100755 testsuite/expect/test18.35 delete mode 100755 testsuite/expect/test18.36 delete mode 100644 testsuite/expect/test18.36.prog.c delete mode 100755 testsuite/expect/test18.37 delete mode 100644 testsuite/expect/test18.37.prog.c delete mode 100755 testsuite/expect/test18.38 delete mode 100755 testsuite/expect/test18.4 delete mode 100755 testsuite/expect/test18.7 delete mode 100755 testsuite/expect/test18.8 delete mode 100755 testsuite/expect/test18.9 rename testsuite/expect/{test18.6 => test21.1} (63%) rename testsuite/expect/{test18.3 => test21.2} (71%) rename testsuite/expect/{test18.1 => test21.3} (77%) rename testsuite/expect/{test18.2 => test21.4} (74%) create mode 100755 testsuite/expect/test21.5 create mode 100755 testsuite/expect/test21.6 rename testsuite/expect/{test18.13 => test3.10} (61%) delete mode 100755 testsuite/expect/test7.5 delete mode 100644 testsuite/expect/test7.5.prog.c create mode 100755 testsuite/expect/test8.7 create mode 100644 testsuite/expect/test8.7.crypto.c create mode 100644 testsuite/expect/test8.7.prog.c create mode 100755 testsuite/slurm_unit/slurmctld/security_2_2a.sh rename testsuite/slurm_unit/slurmctld/{security_2_2.sh => security_2_2b.sh} (79%) diff --git a/AUTHORS b/AUTHORS index e97f4e4aa..f12487efb 100644 --- a/AUTHORS +++ b/AUTHORS @@ -3,7 +3,7 @@ Ernest Artiaga <ernest.artiaga(at)bsc.es> Danny Auble <auble1(at)llnl.gov> Susanne Balle <susanne.balle(at)hp.com> Anton Blanchard <anton(at)samba.org> -Hongjia Cao <hgcao(at)nudt.edu.cn> +Hongjia Cao <hjcao(at)nudt.edu.cn> Chuck Clouston <Chuck.Clouston(at)bull.com> Daniel Christians <Daniel.Christians(at)hp.com> Gilles Civario <gilles.civario(at)bull.net> @@ -11,8 +11,9 @@ Chris Dunlap <cdunlap(at)llnl.gov> Joey Ekstrom <ekstrom1(at)llnl.gov> Jim Garlick <garlick(at)llnl.gov> Mark Grondona <mgrondona(at)llnl.gov> -Christopher Holmes <cholmes(at)hp.com> Takao Hatazaki <takao.hatazaki(at)hp.com> +Matthieu Hautreux <matthieu.hautreux(at)cea.fr> +Christopher Holmes <cholmes(at)hp.com> Nathan Huff <nhuff(at)geekshanty.com> David Jackson <jacksond(at)clusterresources.com> Greg Johnson <gjohnson(at)lanl.gov> diff --git a/BUILD.NOTES b/BUILD.NOTES index 2d52a948d..b8eff4b63 100644 --- a/BUILD.NOTES +++ b/BUILD.NOTES @@ -65,12 +65,15 @@ Linux cluster (See BlueGene and AIX specific notes below for some differences). BlueGene build notes: 3. Use the rpm make target to create the new RPMs. This requires a .rpmmacros (.rpmrc for newer versions of rpmbuild) file containing: + %_prefix /usr %_slurm_sysconfdir /etc/slurm - %_with_debug 1 %_with_bluegene 1 + %_with_debug 1 %with_cflags CFLAGS=-m64 Build on Service Node with using the following syntax rpmbuild -ta slurm-...bz2 + The RPM files get written to the directory + /usr/src/packages/RPMS/ppc64 To build and run on AIX: 0. svn co https://eris.llnl.gov/svn/slurm/trunk slurm @@ -151,7 +154,7 @@ Some RPM commands: rpm -i --ignoresize slurm-1.1.9-1.rpm (install a new rpm) For main SLURM plugin installation on BGL service node: rpm -i --force --nodeps --ignoresize slurm-1.1.9-1.rpm - + rpm -U --force --nodeps --ignoresize slurm-1.1.9-1.rpm (upgrade option) To clear a wedged job: /bgl/startMMCSconsole @@ -183,7 +186,7 @@ Before new major release: - Test on ia64, i386, x86_64, BGL, AIX, OSX, XCPU - Test on Elan and IB switches - Test fail-over of slurmctld - - Test for memory leaks in slurmctld and slurmd + - Test for memory leaks in slurmctld, slurmd and slurmdbd with various plugins - Change API version number - Review and release web pages - Review and release code diff --git a/COPYING b/COPYING index 55269c8e3..afd7942ba 100644 --- a/COPYING +++ b/COPYING @@ -17,27 +17,27 @@ also delete it here. OUR NOTICE AND TERMS OF AND CONDITIONS OF THE GNU GENERAL PUBLIC LICENSE -Our Preamble Notice +Auspices -A. This notice is required to be provided under our contract with the U.S. -Department of Energy (DOE). This work was produced at the University -of California, Lawrence Livermore National Laboratory under Contract -No. W-7405-ENG-48 with the DOE. +This work performed under the auspices of the U.S. Department of Energy by +Lawrence Livermore National Laboratory under Contract DE-AC52-07NA27344. -B. Neither the United States Government nor the University of California -nor any of their employees, makes any warranty, express or implied, or -assumes any liability or responsibility for the accuracy, completeness, or -usefulness of any information, apparatus, product, or process disclosed, or -represents that its use would not infringe privately-owned rights. +Disclaimer -C. Also, reference herein to any specific commercial products, process, or -services by trade names, trademark, manufacturer or otherwise does not +This work was sponsored by an agency of the United States government. +Neither the United States Government nor Lawrence Livermore National +Security, LLC, nor any of their employees, makes any warranty, express +or implied, or assumes any liability or responsibility for the accuracy, +completeness, or usefulness of any information, apparatus, product, or +process disclosed, or represents that its use would not infringe privately +owned rights. References herein to any specific commercial products, process, +or services by trade names, trademark, manufacturer or otherwise does not necessarily constitute or imply its endorsement, recommendation, or -favoring by the United States Government or the University of California. -The views and opinions of authors expressed herein do not necessarily -state or reflect those of the United States Government or the University of -California, and shall not be used for advertising or product endorsement -purposes. +favoring by the United States Government or the Lawrence Livermore National +Security, LLC. The views and opinions of authors expressed herein do not +necessarily state or reflect those of the United States government or +Lawrence Livermore National Security, LLC, and shall not be used for +advertising or product endorsement purposes. ============================================================================= diff --git a/DISCLAIMER b/DISCLAIMER index 024277266..ea55a6dba 100644 --- a/DISCLAIMER +++ b/DISCLAIMER @@ -1,33 +1,50 @@ -Copyright (C) 2002-2006 The Regents of the University of California. +Copyright (C) 2008 Lawrence Livermore National Security and Hewlett-Packard. +Copyright (C) 2002-2007 The Regents of the University of California, + Linux NetworX, Hewlett-Packard and Bull. Produced at Lawrence Livermore National Laboratory, Hewlett-Packard, -Linux NetworX, and other sites. +Bull, Linux NetworX, and others. Written by: -Ernest Artiaga <ernest.artiaga@bsc.es> -Danny Auble <auble1@llnl.gov> -Susanne Balle <susanne.balle@hp.com> -Daniel Christians <Daniel.Christians@hp.com> -Chris Dunlap <cdunlap@llnl.gov> -Joey Ekstrom <ekstrom1@llnl.gov> -Jim Garlick <garlick@llnl.gov> -Mark Grondona <grondona1@llnl.gov> -Christopher Holmes <cholmes@hp.com> -Takae Hatazaki <takao.hatazaki@hp.com> -Nathan Huff <nhuff@geekshanty.com> -David Jackson <jacksond@clusterresources.com> -Greg Johnson <gjohnson@lanl.gov> -Morris Jette <jette1@llnl.gov> -Jason King <king49@llnl.gov> -Chris Morrone <morrone2@llnl.gov> -Brian O'Sullivan <bos@pathscale.com> -Daniel Palermo <dan.palermo@hp.com> -Dan Phung <phung4@llnl.gov> -Andy Riebs <Andy.Riebs@hp.com> -Jeff Squyres <jsquyres@lam-mpi.org> -Keven Tew <tew1@llnl.gov> -Jay Windley <jwindley@lnxi.com> +Don Albert <Don.Albert(at)bull.com> +Ernest Artiaga <ernest.artiaga(at)bsc.es> +Danny Auble <auble1(at)llnl.gov> +Susanne Balle <susanne.balle(at)hp.com> +Anton Blanchard <anton(at)samba.org> +Hongjia Cao <hgcao(at)nudt.edu.cn> +Chuck Clouston <Chuck.Clouston(at)bull.com> +Daniel Christians <Daniel.Christians(at)hp.com> +Gilles Civario <gilles.civario(at)bull.net> +Chris Dunlap <cdunlap(at)llnl.gov> +Joey Ekstrom <ekstrom1(at)llnl.gov> +Jim Garlick <garlick(at)llnl.gov> +Mark Grondona <mgrondona(at)llnl.gov> +Christopher Holmes <cholmes(at)hp.com> +Takao Hatazaki <takao.hatazaki(at)hp.com> +Nathan Huff <nhuff(at)geekshanty.com> +David Jackson <jacksond(at)clusterresources.com> +Greg Johnson <gjohnson(at)lanl.gov> +Morris Jette <jette1(at)llnl.gov> +Jason King <king49(at)llnl.gov> +Nancy Kritkausky <Nancy.Kritkausky(at)bull.com> +Bernard Li <bli(at)bcgsc.ca> +Puenlap Lee <Puen-Lap.Lee(at)bull.com> +Donna Mecozzi <mecozzi1(at)llnl.gov> +Chris Morrone <morrone2(at)llnl.gov> +Bryan O'Sullivan <bos(at)pathscale.com> +Gennaro Oliva <oliva.g(at)na.icar.cnr.it> +Daniel Palermo <dan.palermo(at)hp.com> +Dan Phung <phung4(at)llnl.gov> +Ashley Pitman <ashley(at)quadrics.com> +Andy Riebs <Andy.Riebs(at)hp.com> +Asier Roa <asier.roa(at)bsc.es> +Federico Sacerdoti <Federico.Sacerdoti(at)deshaw.com> +Jeff Squyres <jsquyres(at)lam-mpi.org> +Keven Tew <tew1(at)llnl.gov> +Prashanth Tamraparni <prashanth.tamraparni(at)hp.com> +Jay Windley <jwindley(at)lnxi.com> +Ann-Marie Wunderlin<Anne-Marie.Wunderlin(at)Bull.com> -UCRL-CODE-226842. +LLNL-CODE-402394. This file is part of SLURM, a resource management program. For details, see <http://www.llnl.gov/linux/slurm/>. @@ -51,25 +68,27 @@ OUR NOTICE AND TERMS OF AND CONDITIONS OF THE GNU GENERAL PUBLIC LICENSE Our Preamble Notice -A. This notice is required to be provided under our contract with the U.S. -Department of Energy (DOE). This work was produced at the University -of California, Lawrence Livermore National Laboratory under Contract -No. W-7405-ENG-48 with the DOE. +Auspices -B. Neither the United States Government nor the University of California -nor any of their employees, makes any warranty, express or implied, or -assumes any liability or responsibility for the accuracy, completeness, or -usefulness of any information, apparatus, product, or process disclosed, or -represents that its use would not infringe privately-owned rights. +This work performed under the auspices of the U.S. Department of Energy by +Lawrence Livermore National Laboratory under Contract DE-AC52-07NA27344. -C. Also, reference herein to any specific commercial products, process, or -services by trade names, trademark, manufacturer or otherwise does not -necessarily constitute or imply its endorsement, recommendation, or -favoring by the United States Government or the University of California. -The views and opinions of authors expressed herein do not necessarily -state or reflect those of the United States Government or the University of -California, and shall not be used for advertising or product endorsement -purposes. +Disclaimer + +This work was sponsored by an agency of the United States government. +Neither the United States Government nor Lawrence Livermore National +Security, LLC, nor any of their employees, makes any warranty, express +or implied, or assumes any liability or responsibility for the accuracy, +completeness, or usefulness of any information, apparatus, product, or +process disclosed, or represents that its use would not infringe privately +owned rights. References herein to any specific commercial products, process, +or services by trade names, trademark, manufacturer or otherwise does not +necessarily constitute or imply its endorsement, recommendation, or +favoring by the United States Government or the Lawrence Livermore National +Security, LLC. The views and opinions of authors expressed herein do not +necessarily state or reflect those of the United States government or +Lawrence Livermore National Security, LLC, and shall not be used for +advertising or product endorsement purposes. The precise terms and conditions for copying, distribution and modification is provided in the file named "COPYING" in this directory. diff --git a/META b/META index 50b7e7dbe..21ee366df 100644 --- a/META +++ b/META @@ -1,11 +1,11 @@ Api_age: 0 - Api_current: 11 + Api_current: 13 Api_revision: 0 Major: 1 Meta: 1 - Micro: 27 - Minor: 2 + Micro: 3 + Minor: 3 Name: slurm Release: 1 Release_tags: - Version: 1.2.27 + Version: 1.3.3 diff --git a/Makefile.am b/Makefile.am index 50c334895..33a35825d 100644 --- a/Makefile.am +++ b/Makefile.am @@ -9,6 +9,7 @@ EXTRA_DIST = \ etc/slurm.conf.example \ etc/slurm.epilog.clean \ etc/init.d.slurm \ + etc/init.d.slurmdbd \ autogen.sh \ slurm.spec \ README \ diff --git a/Makefile.in b/Makefile.in index 010f7d2a8..b06636b44 100644 --- a/Makefile.in +++ b/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -39,6 +39,7 @@ DIST_COMMON = README $(am__configure_deps) $(pkginclude_HEADERS) \ $(srcdir)/config.h.in $(srcdir)/config.xml.in \ $(top_srcdir)/configure \ $(top_srcdir)/contribs/perlapi/libslurm-perl/Makefile.PL.in \ + $(top_srcdir)/contribs/phpext/slurm_php/config.m4.in \ $(top_srcdir)/slurm/slurm.h.in AUTHORS COPYING ChangeLog \ INSTALL NEWS ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 @@ -48,6 +49,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -70,7 +73,8 @@ am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \ mkinstalldirs = $(install_sh) -d CONFIG_HEADER = config.h $(top_builddir)/slurm/slurm.h CONFIG_CLEAN_FILES = config.xml \ - contribs/perlapi/libslurm-perl/Makefile.PL + contribs/perlapi/libslurm-perl/Makefile.PL \ + contribs/phpext/slurm_php/config.m4 SOURCES = DIST_SOURCES = RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ @@ -128,6 +132,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -141,10 +146,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -164,7 +172,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -175,6 +186,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -190,6 +203,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -205,6 +219,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -270,6 +285,7 @@ EXTRA_DIST = \ etc/slurm.conf.example \ etc/slurm.epilog.clean \ etc/init.d.slurm \ + etc/init.d.slurmdbd \ autogen.sh \ slurm.spec \ README \ @@ -365,6 +381,8 @@ config.xml: $(top_builddir)/config.status $(srcdir)/config.xml.in cd $(top_builddir) && $(SHELL) ./config.status $@ contribs/perlapi/libslurm-perl/Makefile.PL: $(top_builddir)/config.status $(top_srcdir)/contribs/perlapi/libslurm-perl/Makefile.PL.in cd $(top_builddir) && $(SHELL) ./config.status $@ +contribs/phpext/slurm_php/config.m4: $(top_builddir)/config.status $(top_srcdir)/contribs/phpext/slurm_php/config.m4.in + cd $(top_builddir) && $(SHELL) ./config.status $@ mostlyclean-libtool: -rm -f *.lo @@ -467,8 +485,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -493,8 +511,8 @@ TAGS: tags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -504,13 +522,12 @@ ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique @@ -581,6 +598,10 @@ dist-bzip2: distdir tardir=$(distdir) && $(am__tar) | bzip2 -9 -c >$(distdir).tar.bz2 $(am__remove_distdir) +dist-lzma: distdir + tardir=$(distdir) && $(am__tar) | lzma -9 -c >$(distdir).tar.lzma + $(am__remove_distdir) + dist-tarZ: distdir tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z $(am__remove_distdir) @@ -607,6 +628,8 @@ distcheck: dist GZIP=$(GZIP_ENV) gunzip -c $(distdir).tar.gz | $(am__untar) ;;\ *.tar.bz2*) \ bunzip2 -c $(distdir).tar.bz2 | $(am__untar) ;;\ + *.tar.lzma*) \ + unlzma -c $(distdir).tar.lzma | $(am__untar) ;;\ *.tar.Z*) \ uncompress -c $(distdir).tar.Z | $(am__untar) ;;\ *.shar.gz*) \ @@ -762,8 +785,8 @@ uninstall-am: uninstall-pkgincludeHEADERS .PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ all all-am am--refresh check check-am clean clean-generic \ clean-libtool ctags ctags-recursive dist dist-all dist-bzip2 \ - dist-gzip dist-shar dist-tarZ dist-zip distcheck distclean \ - distclean-generic distclean-hdr distclean-libtool \ + dist-gzip dist-lzma dist-shar dist-tarZ dist-zip distcheck \ + distclean distclean-generic distclean-hdr distclean-libtool \ distclean-local distclean-tags distcleancheck distdir \ distuninstallcheck dvi dvi-am html html-am info info-am \ install install-am install-data install-data-am install-dvi \ diff --git a/NEWS b/NEWS index 3dc5b0fd1..83bf170af 100644 --- a/NEWS +++ b/NEWS @@ -1,6 +1,354 @@ This file describes changes in recent versions of SLURM. It primarily documents those changes that are of interest to users and admins. +* Changes in SLURM 1.3.3 +======================== + -- Add mpi_openmpi plugin to the main SLURM RPM. + -- Prevent invalid memory reference when using srun's --cpu_bind=cores option + (slurm-1.3.2-1.cea1.patch from Matthieu Hautreux, CEA). + -- Task affinity plugin modified to support a particular cpu bind type: cores, + sockets, threads, or none. Accomplished by setting an environment variable + SLURM_ENFORCE_CPU_TYPE (slurm-1.3.2-1.cea2.patch from Matthieu Hautreux, + CEA). + -- For BlueGene only, log "Prolog failure" once per job not once per node. + -- Reopen slurmctld log file after reconfigure or SIGHUP is received. + -- In TaskPlugin=task/affinity, fix possible infinite loop for slurmd. + -- Accounting rollup works for mysql plugin. Automatic rollup when using + slurmdbd. + -- Copied job stat logic out of sacct into sstat in the future sacct -stat + will be deprecated. + -- Correct sbatch processing of --nice option with negative values. + -- Add squeue formatted print option %Q to print a job's integer priority. + -- In sched/backfill, fix bug that was changing a pending job's shared value + to zero (possibly changing a pending job's resource requirements from a + processor on some node to the full node). + +* Changes in SLURM 1.3.2 +======================== + -- Get --ntasks-per-node option working for sbatch command. + -- BLUEGENE: Added logic to give back a best block on overlapped mode + in test_only mode + -- BLUEGENE: Updated debug info and man pages for better help with the + numpsets option and to fail correctly with bad image request for building + blocks. + -- In sched/wiki and sched/wiki2 properly support Slurm license consumption + (job state reported as "Hold" when required licenses are not available). + -- In sched/wiki2 JobWillRun command, don't return an error code if the job(s) + can not be started at that time. Just return an error message (from + Doug Wightman, CRI). + -- Fix bug if sched/wiki or sched/wiki2 are configured and no job comment is + set. + -- scontrol modified to report partition partition's "DisableRootJobs" value. + -- Fix bug in setting host address for PMI communications (mpich2 only). + -- Fix for memory size accounting on some architectures. + -- In sbatch and salloc, change --dependency's one letter option from "-d" + to "-P" (continue to accept "-d", but change the documentation). + -- Only check that task_epilog and task_prolog are runable by the job's + user, not as root. + -- In sbatch, if specifying an alternate directory (--workdir/-D), then + input, output and error files are in that directory rather than the + directory from which the command is executed + -- NOTE: Fully operational with Moab version 5.2.3+. Change SUBMITCMD in + moab.cfg to be the location of sbatch rather than srun. Also set + HostFormat=2 in SLURM's wiki.conf for improved performance. + -- NOTE: We needed to change an RPC from version 1.3.1. You must upgrade + all nodes in a cluster from v1.3.1 to v1.3.2 at the same time. + -- Postgres plugin will work from job accounting, not for association + management yet. + -- For srun/sbatch --get-user-env option (Moab use only) look for "env" + command in both /bin and /usr/sbin (for Suse Linux). + -- Fix bug in processing job feature requests with node counts (could fail + to schedule job if some nodes have not associated features). + -- Added nodecnt and gid to jobcomp/script + -- Insure that nodes select in "srun --will-run" command or the equivalent in + sched/wiki2 are in the job's partition. + -- BLUGENE - changed partition Min|MaxNodes to represent c-node counts + instead of base partitions + -- In sched/gang only, prevent possible invalid memory reference when + slurmctld is reconfigured, e.g. "scontrol reconfig". + -- In select/linear only, prevent invalid memory reference in log message when + nodes are added to slurm.conf and then "scontrol reconfig" is executed. + +* Changes in SLURM 1.3.1 +======================== + -- Correct logic for processing batch job's memory limit enforcement. + -- Fix bug that was setting a job's requeue value on any update of the + job using the "scontrol update" command. The invalid value of an + updated job prevents it's recovery when slurmctld restarts. + -- Add support for cluster-wide consumable resources. See "Licenses" + parameter in slurm.conf man page and "--licenses" option in salloc, + sbatch and srun man pages. + -- Major changes in select/cons_res to support FastSchedule=2 with more + resources configured than actually exist (useful for testing purposes). + -- Modify srun --test-only response to include expected initiation time + for a job as well as the nodes to be allocated and processor count + (for use by Moab). + -- Correct sched/backfill to properly honor job dependencies. + -- Correct select/cons_res logic to allocate CPUs properly if there is + more than one thread per core (previously failed to allocate all cores). + -- Correct select/linear logic in shared job count (was off by 1). + -- Add support for job preeption based upon partition priority (in sched/gang, + preempt.patch from Chris Holmes, HP). + -- Added much better logic for mysql accounting. + -- Finished all basic functionality for sacctmgr. + -- Added load file logic to sacctmgr for setting up a cluster in one step. + -- NOTE: We needed to change an RPC from version 1.3.0. You must upgrade + all nodes in a cluster from v1.3.0 to v1.3.1 at the same time. + -- NOTE: Work is currently underway to improve placement of jobs for gang + scheduling and preemption. + -- NOTE: Work is underway to provide additional tools for reporting + accounting information. + +* Changes in SLURM 1.3.0 +======================== + -- In sched/wiki2, add processor count to JOBWILLRUN response. + -- Add event trigger for node entering DRAINED state. + -- Build properly without OpenSSL installed (OpenSSL is recommended, but not + required). + -- Added slurmdbd, and modified accounting_storage plugin to talk to it. + Allowing multiple slurm systems to securly store and gather information + not only about jobs, but the system also. See accounting web page for more + information. + +* Changes in SLURM 1.3.0-pre11 +============================== + -- Restructure the sbcast RPC to take advantage of larger buffers available + in Slurm v1.3 RPCs. + -- Fix several memory leaks. + -- In scontrol, show job's Requeue value, permit change of Requeue and Comment + values. + -- In slurmctld job record, add QOS (quality of service) value for accounting + purposes with Maui and Moab. + -- Log to a job's stderr when it is being cancelled explicitly or upon reaching + it's time limit. + -- Only permit a job's account to be changed while that job is PENDING. + -- Fix race condition in job suspend/resume (slurmd.sus_res.patch from HP). + +* Changes in SLURM 1.3.0-pre10 +============================== + -- Add support for node-specific "arch" (architecture) and "os" (operating + system) fields. These fields are set based upon values reported by the + slurmd daemon on each compute node using SLURM_ARCH and SLURM_OS environment + variables (if set, the uname function otherwise) and are intended to support + changes in real time changes in operating system. These values are reported + by "scontrol show node" plus the sched/wiki and sched/wiki2 plugins for Maui + and Moab respectively. + -- In sched/wiki and sched/wiki2: add HostFormat and HidePartitionJobs to + "scontrol show config" SCHEDULER_CONF output. + -- In sched/wiki2: accept hostname expression as input for GETNODES command. + -- Add JobRequeue configuration parameter and --requeue option to the sbatch + command. + -- Add HealthCheckInterval and HealthCheckProgram configuration parameters. + -- Add SlurmDbdAddr, SlurmDbdAuthInfo and SlurmDbdPort configuration parameters. + -- Modify select/linear to achieve better load leveling with gang scheduler. + -- Develop the sched/gang plugin to support select/linear and + select/cons_res. If sched/gang is enabled and Shared=FORCE is configured + for a partition, this plugin will gang-schedule or "timeslice" jobs that + share common resources within the partition. Note that resources that are + shared across partitions are not gang-scheduled. + -- Add EpilogMsgTime configuration parameter. See "man slurm.conf" for details. + -- Increase default MaxJobCount configuration parameter from 2000 to 5000. + -- Move all database common files from src/common to new lib in src/database. + -- Move sacct to src/accounting added sacctmgr for scontrol like operations + to accounting infrastructure. + -- Basic functions of sacctmgr in place to make for administration of + accounting. + -- Moved clusteracct_storage plugin to accounting_storage plugin, + jobacct_storage is still it's own plugin for now. + -- Added template for slurm php extention. + -- Add infrastructure to support allocation of cluster-wide licenses to jobs. + Full support will be added some time after version 1.3.0 is released. + -- In sched/wiki2 with select/bluegene, add support for WILLRUN command + to accept multiple jobs with start time specifications. + +* Changes in SLURM 1.3.0-pre9 +============================= + -- Add spank support to sbatch. Note that spank_local_user() will be called + with step_layout=NULL and gid=SLURM_BATCH_SCRIPT and spank_fini() will + be called immediately afterwards. + -- Made configure use mysql_config to find location of mysql database install + Removed bluegene specific information from the general database tables. + -- Re-write sched/backfill to utilize new will-run logic in the select + plugins. It now supports select/cons_res and all job options (required + nodes, excluded nodes, contiguous, etc.). + -- Modify scheduling logic to better support overlapping partitions. + -- Add --task-mem option and remove --job-mem option from srun, salloc, and + sbatch commands. Enforce step memory limit, if specified and there is + no job memory limit specified (--mem). Also see DefMemPerTask and + MaxMemPerTask in "man slurm.conf". Enforcement is dependent upon job + accounting being enabled with non-zero value for JoabAcctGatherFrequency. + -- Change default node tmp_disk size to zero (for diskless nodes). + +* Changes in SLURM 1.3.0-pre8 +============================= + -- Modify how strings are packed in the RPCs, Maximum string size + increased from 64KB (16-bit size field) to 4GB (32-bit size field). + -- Fix bug that prevented time value of "INFINITE" from being processed. + -- Added new srun/sbatch option "--open-mode" to control how output/error + files are opened ("t" for truncate, "a" for append). + -- Added checkpoint/xlch plugin for use with XLCH (Hongjia Cao, NUDT). + -- Added srun option --checkpoint-path for use with XLCH (Hongjia Cao, NUDT). + -- Added new srun/salloc/sbatch option "--acctg-freq" for user control over + accounting data collection polling interval. + -- In sched/wiki2 add support for hostlist expression use in GETNODES command + with HostFormat=2 in the wiki.conf file. + -- Added new scontrol option "setdebug" that can change the slurmctld daemons + debug level at any time (Hongjia Cao, NUDT). + -- Track total total suspend time for jobs and steps for accounting purposes. + -- Add version information to partition state file. + -- Added 'will-run' functionality to all of the select plugins (bluegene, + linear, and cons_res) to return node list and time job can start based + on other jobs running. + -- Major restructuring of node selection logic. select/linear now supports + partition max_share parameter and tries to match like size jobs on the + same nodes to improve gang scheduling performance. Also supports treating + memory as consumable resource for job preemption and gang scheduling if + SelectTypeParameter=CR_Memory in slurm.conf. + -- BLUEGENE: Reorganized bluegene plugin for maintainability sake. + -- Major restructuring of data structures in select/cons_res. + -- Support job, node and partition names of arbitrary size. + -- Fix bug that caused slurmd to hang when using select/linear with + task/affinity. + +* Changes in SLURM 1.3.0-pre7 +============================= + -- Fix a bug in the processing of srun's --exclusive option for a job step. + +* Changes in SLURM 1.3.0-pre6 +============================= + -- Add support for configurable number of jobs to share resources using the + partition Shared parameter in slurm.conf (e.g. "Shared=FORCE:3" for two + jobs to share the resources). From Chris Holmes, HP. + -- Made salloc use api instead of local code for message handling. + +* Changes in SLURM 1.3.0-pre5 +============================= + -- Add select_g_reconfigure() function to node changes in slurmctld configuration + that can impact node scheduling. + -- scontrol to set/get partition's MaxTime and job's Timelimit in minutes plus + new formats: min:sec, hr:min:sec, days-hr:min:sec, days-hr, etc. + -- scontrol "notify" command added to send message to stdout of srun for + specified job id. + -- For BlueGene, make alpha part of node location specification be case insensitive. + -- Report scheduler-plugin specific configuration information with the + "scontrol show configuration" command on the SCHEDULER_CONF line. This + information is not found in the "slurm.conf" file, but a scheduler plugin + specific configuration (e.g. "wiki.conf"). + -- sview partition information reported now includes partition priority. + -- Expand job dependency specification to support concurrent execution, + testing of job exit status and multiple job IDs. + +* Changes in SLURM 1.3.0-pre4 +============================= + -- Job step launch in srun is now done from the slurm api's all further + modifications to job launch should be done there. + -- Add new partition configuration parameter Priority. Add job count to + Shared parameter. + -- Add new configuration parameters DefMemPerTask, MaxMemPerTask, and + SchedulerTimeSlice. + -- In sched/wiki2, return REJMESSAGE with details on why a job was + requeued (e.g. what node failed). + +* Changes in SLURM 1.3.0-pre3 +============================= + -- Remove slaunch command + -- Added srun option "--checkpoint=time" for job step to automatically be + checkpointed on a period basis. + -- Change behavior of "scancel -s KILL <jobid>" to send SIGKILL to all job + steps rather than cancelling the job. This now matches the behavior of + all other signals. "scancel <jobid>" still cancels the job and all steps. + -- Add support for new job step options --exclusive and --immediate. Permit + job steps to be queued when resources are not available within an existing + job allocation to dedicate the resources to the job step. Useful for + executing simultaneous job steps. Provides resource management both at + the level of jobs and job steps. + -- Add support for feature count in job constraints, for example + srun --nodes=16 --constraint=graphics*4 ... + Based upon work by Kumar Krishna (HP, India). + -- Add multi-core options to salloc and sbatch commands (sbatch.patch and + cleanup.patch from Chris Holmes, HP). + -- In select/cons_res properly release resources allocated to job being + suspended (rmbreak.patch, from Chris Holmes, HP). + -- Removed database and jobacct plugin replaced with jobacct_storage + and jobacct_gather for easier hooks for further expansion of the + jobacct plugin. + +* Changes in SLURM 1.3.0-pre2 +============================= + -- Added new srun option --pty to start job with pseudo terminal attached + to task 0 (all other tasks have I/O discarded) + -- Disable user specifying jobid when sched/wiki2 configured (needed for + Moab releases until early 2007). + -- Report command, args and working directory for batch jobs with + "scontrol show job". + +* Changes in SLURM 1.3.0-pre1 +============================= + -- !!! SRUN CHANGES !!! + The srun options -A/--allocate, -b/--batch, and -a/--attach have been + removed! That functionality is now available in the separate commands + salloc, sbatch, and sattach, respectively. + -- Add new node state FAILING plus trigger for when node enters that state. + -- Add new configuration paramter "PrivateData". This can be used to + prevent a user from seeing jobs or job steps belonging to other users. + -- Added configuration parameters for node power save mode: ResumeProgram + ResumeRate, SuspendExcNodes, SuspendExcParts, SuspendProgram and + SuspendRate. + -- Slurmctld maintains the IP address (rather than hostname) for srun + communications. This fixes some possible network routing issues. + -- Added global database plugin. Job accounting and Job completion are the + first to use it. Follow documentation to add more to the plugin. + -- Removed no-longer-needed jobacct/common/common_slurmctld.c since that is + replaced by the database plugin. + -- Added new configuration parameter: CryptoType. + Moved existing digital signature logic into new plugin: crypto/openssl. + Added new support for crypto/munge (available with GPL license). + +* Changes in SLURM 1.2.31 +========================= + +* Changes in SLURM 1.2.30 +========================= + -- Fix for gold not to print out 720 error messages since they are + potentally harmful. + -- In sched/wiki2 (Moab), permit changes to a pending job's required features: + CMD=CHANGEJOB ARG=<jobid> RFEATURES=<features> + -- Fix for not aborting when node selection doesn't load, fatal error instead + -- In sched/wiki and sched/wiki2 DO NOT report a job's state as "Hold" if it's + dependencies have not been satisfied. This reverses a changed made in SLURM + version 1.2.29 (which was requested by Cluster Resources, but places jobs + in a HELD state indefinitely). + +* Changes in SLURM 1.2.29 +========================= + -- Modified global configuration option "DisableRootJobs" from number (0 or 1) + to boolean (YES or NO) to match partition parameter. + -- Set "DisableRootJobs" for a partition to match the global parameters value + for newly created partitions. + -- In sched/wiki and sched/wiki2 report a node's updated features if changed + after startup using "scontrol update ..." command. + -- In sched/wiki and sched/wiki2 report a job's state as "Hold" if it's + dependencies have not been satisfied. + -- In sched/wiki and sched/wiki2 do not process incoming requests until + slurm configuration is completely loaded. + -- In sched/wiki and sched/wiki2 do not report a job's node count after it + has completed (slurm decrements the allocated node count when the nodes + transition from completing to idle state). + -- If job prolog or epilog fail, log the program's exit code. + -- In jobacct/gold map job names containing any non-alphanumeric characters + to '_' to avoid MySQL parsing problems. + -- In jobacct/linux correct parsing if command name contains spaces. + -- In sched/wiki and sched/wiki2 report make job info TASK count reflect the + actual task allocation (not requested tasks) even after job terminates. + Useful for accounting purposes only. + +* Changes in SLURM 1.2.28 +========================= + -- Added configuration option "DisableRootJobs" for parameter + "PartitionName". See "man slurm.conf" for details. + -- Fix for faking a large system to correctly handle node_id in the task + afffinity plugin for ia64 systems. + * Changes in SLURM 1.2.27 ========================= -- Record job eligible time in accounting database (for jobacct/gold only). @@ -15,6 +363,7 @@ documents those changes that are of interest to users and admins. -- Enhance job requeue on node failure to be more robust. -- Added configuration parameter "DisableRootJobs". See "man slurm.conf" for details. + -- Fixed issue with account = NULL in Gold job accounting plugin * Changes in SLURM 1.2.26 ========================= @@ -25,8 +374,6 @@ documents those changes that are of interest to users and admins. -- In srun and sbatch, do not check the PATH env var if an absolute pathname of the program is specified (previously reported an error if no PATH). -- Correct output of "sinfo -o %C" (CPU counts by node state). - -- Treat attempt to submit a batch job against an existing resource allocation - as an error if sched/wiki2 is configured (Moab). * Changes in SLURM 1.2.25 ========================= @@ -2878,4 +3225,4 @@ documents those changes that are of interest to users and admins. -- Change directory to /tmp in slurmd if daemonizing. -- Logfiles are reopened on reconfigure. -$Id: NEWS 13871 2008-04-15 15:47:33Z jette $ +$Id: NEWS 14153 2008-05-29 16:55:52Z jette $ diff --git a/README b/README index 0828b6ccd..1a782d369 100644 --- a/README +++ b/README @@ -62,71 +62,10 @@ quick description of the subdirectories of the SLURM distribution follows: COMPILING AND INSTALLING THE DISTRIBUTION ----------------------------------------- -Please the the INSTALL file for basic instructions. You will need a -working installation of OpenSSL. - -SLURM does not use reserved ports to authenticate communication -between components. You will need to have at least one "auth" -plugin. Currently, only three authentication plugins are available: -"auth/none," "auth/authd," and "auth/munge." The "auth/none" plugin is -built and used by default, but one of either Brent Chun's authd, or Chris -Dunlap's Munge should be installed in order to get properly authenticated -communications. The configure script in the top-level directory of this -distribution will determine which authentication plugins may be built. - - -OpenSSL: -http://www.openssl.org - -AUTHD: -http://www.theether.org/authd/ - -MUNGE: -http://www.llnl.gov/linux/munge/ - - -CONFIGURATION -------------- - -An annotated sample configuration file for SLURM is provided with this -distribution as etc/slurm.conf.example. Edit this config file to suit -your site and cluster, then copy it to `$sysconfdir/slurm.conf,' where -sysconfdir defaults to PREFIX/etc unless explicitly overwritten in the -`configure' or `make' steps. - -Once the config file is installed in the proper location, you'll need -to create the keys for SLURM job credential creation and verification. -The following openssl commands should be used: - - > openssl genrsa -out /path/to/private/key 1024 - > openssl rsa -in /path/to/private/key -pubout -out /path/to/public/key - -The private key and public key locations should be those specified by -JobCredentialPrivateKey and JobCredentialPublicCertificate in the SLURM -config file. - - -RUNNING SLURM -------------- - -Once a valid configuration has been set up and installed, the SLURM -controller, slurmctld, should be started on the primary and backup -control machines, and the SLURM compute node daemon, slurmd, should be -started on each compute server. - -The slurmd daemons need to run as root for production use, but may be -run as a user for testing purposes (obviously no jobs may be run as -any other user in that configuration). The SLURM controller, slurmctld, -need to be run as the configured SlurmUser (see your config file). - -Man pages are the best source of information about SLURM commands and -daemons. Please see: slurmctld(8), slurmd(8), scontrol(1), sinfo(1), -squeue(1), scancel(1), and srun(1). - -Also, take a look at the Quickstart Guide to get acquainted with -running and managing jobs with SLURM: doc/html/quickstart_admin.html -or PREFIX/share/doc/quickstart_admin.html. - +Please see the instructions at + http://www.llnl.gov/linux/slurm/quickstart_admin.html +Extensive documentation is available from our home page at + http://www.llnl.gov/linux/slurm PROBLEMS -------- @@ -134,4 +73,4 @@ PROBLEMS If you experience problems compiling, installing, or running SLURM please send e-mail to either slurm-dev@lists.llnl.gov. -$Id: README 11977 2007-08-09 17:49:08Z da $ +$Id: README 11978 2007-08-09 17:53:54Z da $ diff --git a/RELEASE_NOTES b/RELEASE_NOTES index 8bc14d0fa..858d9ebfd 100644 --- a/RELEASE_NOTES +++ b/RELEASE_NOTES @@ -1,148 +1,236 @@ -RELEASE NOTES FOR SLURM VERSION 1.2 -13 March 2007 +RELEASE NOTES FOR SLURM VERSION 1.3 +14 May 2008 IMPORTANT NOTE: -SLURM state files in version 1.2 are different from those of version 1.1. +SLURM state files in version 1.3 are different from those of version 1.2. After installing SLURM version 1.2, plan to restart without preserving -jobs or other state information. Restart daemons with the "-c" option or -use "/etc/init.d/slurm startclean". - -NEW COMMANDS - -* Several new commands have been added to perform individual srun functions. - The srun command will continue to exist, but these commands may offer - greater clarity and ease of use. The srun options --allocate, --attach, - and --batch will eventually cease being supported. The new commands are +jobs or other state information. While SLURM version 1.2 is still running, +cancel all pending and running jobs (e.g. +"scancel --state=pending; scancel --state=running"). Then stop and restart +daemons with the "-c" option or use "/etc/init.d/slurm startclean". + +There are substantial changes in the slurm.conf configuration file. It +is recommended that you rebuild your configuration file using the tool +doc/html/configurator.html that comes with the distribution. The node +information is unchanged and the partition information only changes for +the Shared and Priority parameters, so those portions of your old +slurml.conf file may be copied into the new file. + +Two areas of substantial change are accounting and job scheduling. +Slurm is now able to save accounting information in a database, +either MySQL or PostGreSQL. We have written a new daemon, slurmdbd +(Slurm DataBase Daemon), to serve as a centralized data manager for +multiple Slurm clusters. A new tool sacctmgr is available to manage +user accounting information through SlurmdDBD and a variety of +other tools are still under development to generate assorted +acccounting reports including graphics and a web interface. Slurm +now supports gang scheduling (time-slicing of parallel jobs for +improved responsiveness and system utilization). Many related +scheduling changes have also been made. + +There are changes in SLURM's RPMs. There is a new RPM called +slurmdbd (SLURM DataBase Daemon) used to provide a secure +SLURM database interface for accounting purposes (more information +about that below). The SLURM plugins have been moved to a new +RPM called slurm-plugins. If the slurmdbd is installed it +requires the slurm-plugins, but not the other slurm components. +The base SLURM plugin requires the plugins, which used to be +within that RPM. + +Many enhancements have been made for better Slurm integration with +Moab and Maui schedulers. Moab development work is still underway +to support Slurm v1.3. Moab users should not upgrade to Slurm v1.3 +until this work has been completed. + +Major changes in Slurm version 1.3 are described below. Some changes +made after the initial release of Slurm version 1.2 are also noted. +Many less significant changes are not identified here. A complete list +of changes can be found in the NEWS file. Man pages should be consulted +for more details about command and configuration parameter changes. + + +COMMAND CHANGES +=============== +* The srun options --allocate, --attach and --batch have been removed. + Use the new commands added in SLURM version 1.2 for this functionality: salloc - Create a job allocation (functions like "srun --allocate") sattach - Attach to an existing job step (functions like "srun --attach") sbatch - Submit a batch job script (functions like "srun --batch") + These commands generally have the same options as the srun command. See the individual man pages for more information. -* A new command, strigger, was added to manage event triggers. This was - added in SLURM version 1.2.2. See strigger's man pages for details. - -* A new GUI is available for viewing and modifying state information, sview. - Note that sview will only be built on systems which have libglade-2.0 and - gtk+-2.0 installed. - - -CONFIGURATION FILE CHANGES +* The slaunch command has been removed. Use the srun command instead. + +* The srun option --exclusive has been added for job steps to be + allocated processors not already assigned to other job steps. This + can be used to execute multiple job steps simultaneously within a + job allocation and have SLURM perform resource management for the + job steps much like it does for jobs. If dedicated resources are + not immediately available, the job step will be executed later + unless the --immediate option is also set. + +* Support is now provided for feature counts in job constraints. For + example: srun --nodes=16 --constraint=graphics*4 ... + +* The srun option --pty has been added to start the job with a pseudo + terminal attached to task zero (all other tasks have I/O discarded). + +* Job time limits can be specified using the following formats: min, + min:sec, hour:min:sec, and days-hour:min:sec (formerly only supported + minutes). + +* scontrol now shows job TimeLimit and partition MaxTime in the format of + [days-]hours:minutes:seconds or "UNLIMITED". The scontrol update options + for times now accept minutes, minutes:seconds, hours:minutes:seconds, + days-hours, days-hours:minutes, days-hours:minutes:seconds or "UNLIMITED". + This new format also applies to partition MaxTime in the slurm.conf file. + +* scontrol "notify" command added to send message to stdout of srun for + specified job id. + +* Support has been added for a much richer job dependency specification + including testing of exit codes and multiple dependencies. + +* The srun options --checkpoint=<interval> and --checkpoint-path=<file_path> + have been added. + +* Event trigger support was added in Slurm v1.2.2. The command strigger + was added to manage the triggers. + +* Added a --task-mem option and removed --job-mem option from srun, salloc, + and sbatch commands. Memory limits are applied on a per-task basis. + + +SCHEDULING CHANGES +================== +* The sched/backfill plugin has been largely re-written. It now supports + select/cons_res and all job options (required nodes, excluded nodes, + contiguous, etc.). + +* Added a new partition parameter, Priority. A job's scheduling priority is + based upon two factors. First the priority of its partition and second the + job's priority. Since nodes can be configured in multiple partitions, this + can be used to configure high priority partitions (queues). + +* The partition parameter Shared now has a job count. For example: + Shared=YES:4 (Up to 4 jobs may share each resource, user control) + Shared=FORCE:2 (Up to 2 jobs may share each resource, no user control) + +* Added new parameters DefMemPerTask and MaxMemPerTask to control the default + and maximum memory per task. Any task that exceeds the specified size will + be terminated (enforcement requires job accounting to be enabled with a + non-zero value for JoabAcctGatherFrequency). + +* The select linear plugin (allocating whole nodes to jobs) can treat memory + as a consumable resource with SelectTypeParameter=CR_Memory configured. + +* A new scheduler type, gang, was added for gang scheduling (time-slicing of + parallel jobs). Note: The Slurm gang scheduler is not compatible with the + LSF, Maui or Moab schedulers. + +* The new parameter, SchedulerTimeSlice, controls the length of gang scheduler + time slices. + +* Added a new parameter, Licenses to support cluster-wide consumable + resources. The --licenses option was also added to salloc, sbatch, + and srun. + +* The Shared=exclusive option in conjunction with SelectType=select/cons_res + can be used to dedicate whole nodes to jobs in specific partitions while + allocating sockets, cores, or hyperthreads in other partitions. + +* Changes in the interface with the Moab and Maui scheduler have been + extensive providing far better integration between the systems. + * Many more parameters are shared between the systems. + * A new wiki.conf parameter, ExcludePartitions, can be used to enable + Slurm-based scheduling of jobs in specific partitions to achieve + better responsiveness while losing Moab or Maui policy controls. + * Another new wiki.conf parameter, HidePartitionJobs, can be used to + to hide jobs in specific partitions from Moab or Maui as well. See + the wiki.conf man pages for details. + * Moab relies upon Slurm to get a user's environment variables upon + job submission. If this can not be accomplished within a few seconds + (see the GetEnvTimeout parameter) then cache files can be used. Use + contribs/env_cache_builder.c to build these cache files. + + +ACCOUNTING CHANGES +================== +* The job accounting plugin has been split into two components: gathering + of data and storing the data. The JobAcctType parameter has been replaced by + JobAcctGatherType (AIX or Linux) and AccountingStorageType (MySQL, PostGreSQL, + filetext, and SlurmDBD). Storing the accounting information into a database + will provide you with greater flexibility in managing the data. + +* A new daemon SlurmDBD (Slurm DataBase Daemon) has been added. This can + be used to securely manage the accounting data for several Slurm clusters + in a central location. Several new parameters have been added to support + SlurmDBD, all starting with SlurmDBD. Note that the SlurmDBD daemon is + designed to use a Slurm JobAcctStorageType plugin to use MySQL now. + It also uses existing Slurm authentication plugins. -* The slurm.conf configuration file now supports a "Include" directive to - include other files inline. +* A new command, sacctmgr, is available for managing user accounts in + SlurmDBD has been added. This information is required for use of SlurmDBD + to manage job accounting data. Information is maintained based upon + an "association", which has four components: cluster name, Slurm partition, + user name and bank account. This tool can also be used to maintain + scheduling policy information that can be uploaded to Moab (various + resource limits and fair-share values) See the sacctmgr man page and + accounting web page for more information. Additional tools to generate + accounting reports are currently under development and will be released + soon. + +* Job completion records can now be written to a MySQL or PostGreSQL + database in addition to a test file as controlled using the JobCompType + parameter. -* Added new configuration parameter MessageTimeout (replaces #define in the - code). -* Added new configuration parameter MailProg (in case mail program is not - at "/bin/mail"). +OTHER CONFIGURATION CHANGES +=========================== +* A new parameter, JobRequeue, to control default job behavior after a node + failure (requeue or kill the job). The sbatch--requeue option can be used to + override the system default. + +* Added new parameters HealthCheckInterval and HealthCheckProgram to + automatically test the health of compute nodes. + +* New parameters UnkillableStepProgram and UnkillableStepTimeout offer + better control when user processes can not be killed. For example + nodes can be automatically rebooted (added in Slurm v1.2.12) + +* A new parameter, JobFileAppend, controls how to proceed when a job's + output or error file already exist (truncate the file or append to it, + added in slurm v1.2.13). Users can override this using the --open-mode + option when submitting a job. + +* Checkpoint plugins have been added for XLCH and OpenMPI. + +* A new parameter, PrivateData, can be used to prevent users from being + able to view jobs or job steps belonging to other users. + +* A new parameter CryptoType to specify digital signature plugin to be used + Options are crypto/openssl (default) or crypto/munge (for a GPL license). + +* Several Slurm MPI plugins were added to support srun launch of MPI tasks + including mpich1_p4 (Slurm v1.2.10) and mpich-mx (Slurm v1.2.11). + +* Cpuset logic was added to the task/affinity plugin in Slurm v1.2.3. + Set TaskPluginParam=cpusets to enable. -* Added new configuration parameter TaskPluginParam (for future use, to - control which task affinity functions are used, "cpusets" or "sched" for - schedsetaffinity). -* Removed defunct configuration parameter, ShedulerAuth. It has been rendered - obsolete by the new wiki.conf file. - -* Several new configuration parameters can be used to specify architectural - details of the nodes: Sockets, CoresPerSocket, and ThreadsPerCore. - For multi-core systems, these parameters will typically need to be - specified in the configuration file. - -* For select/cons_res, an assortment of consumable resources can be supported - including: CPUs, cores, sockets and/or memory. See new configuration - parameter SelectTypeParameters. - -* For BlueGene systems only: Alternate BlrtsImage, LinuxImage, MloaderImage, - and RamDiskImage file can be specified along with specific groups that - can use them. +OTHER CHANGES +============= +* Perl APIs and Torque wrappers for Torque/PBS to SLURM migration were + added in Slurm v1.2.13 in the contribs directory. SLURM now works + directly with Globus using the PBS GRAM. -* MPI plugin for use with MPICH-GM renamed from "mpich-gm" to "mpichgm". - (There was previously some inconsistent naming.) +* Support was added for several additional PMI functions to be used by + MPICH2 and MVAPICH2. Support for an PMI_TIME environment variable was + also added for user to control how PMI communications are spread out + in time. Scalability up to 16k tasks has been achieved. -OTHER CHANGES +* New node state FAILING has been added along with event trigger for it. + This is similar to DRAINING, but is intended for fault prediction work. + A trigger was also added for nodes becoming DRAINED. -* Several srun options are available to control layout of tasks across the - cores on a node: --extra-node-info, --ntasks-per-node, --sockets-per-node, - --cores-per-socket, --threads-per-core, --minsockets, --mincores, - --minthreads, --ntasks-per-socket, --ntasks-per-core, and --ntasks-per-node. - -* New scontrol, sinfo and squeue options can be used to view socket, core, - and task details by job and/or node. - -* The scontrol and squeue commands will provide a more detailed explanation - of why a job was put into FAILED state. - -* Permit batch jobs to be requeued ("scontrol requeue <jobid>" or - "srun --batch --no-requeue ..." to prevent). - -* View a job's exit code using "scontrol show job". - -* Added "account" field to job and step accounting information and sacct output. - -* Added new job field, "comment". Set by srun, salloc and sbatch. View - with "scontrol show job". Used by sched/wiki2. - -* Added support for OS X operating system. - -* A job step's actual run time is maintained with suspend/resume use. - -* Added support for Portable Linux Processor Affinity in the task/affinity - plugin (PLPA, see http://www.open-mpi.org/software/plpa). - -* There is a new version of the Wiki scheduler plugin to interface with - the Moab Scheduler. It can be accessed with the slurm.conf parameter - "SchedulerType=sched/wiki2". Continue using "SchedulerType=sched/wiki" - for the Maui Scheduler at this time and see the section below for details. - If you use sched/wiki2, you will at least need to add a wiki.conf file. - Key differences include: - - Node and job data returned is correct (several errors in old plugin) - - Node data includes partition information (CCLASS field) - - Improved error handling - - Support added for configuration file ("wiki.conf" in same directory - as "slurm.conf" file, see "man wiki.conf" for details) - - Support added for job modify, suspend/resume, and requeue - - Authentication of communications now supported - - Notification of scheduler on events (job submitted or termination) - - There is no longer a "sched-wiki" RPM. All files are in the main RPM. - -* The sched/wiki plugin has been re-written for use with the Maui Scheduler. - Some time in early 2007 the Maui Scheduler should be upgraded to use - the additional capabilities offered by sched/wiki2. Changes in sched/wiki - include: - - Node and job data returned is correct (several errors in old plugin) - - Support added for configuration file ("wiki.conf" in same directory - as "slurm.conf" file, see "man wiki.conf" for details). Currently - wiki.conf is only used to store an authentication key, which is not - used by most versions of the Maui Scheduler. - - There is no longer a "sched-wiki" RPM. All files are in the main RPM. - -* The features associated with a node can now be changed using the - "scontrol update" and "sview" commands. - -* For BlueGene system only: Added "--reboot" option to srun, salloc, and - sbatch commands to force booting of nodes before starting the job. - -* For BlueGene Systems only: Changed way of keeping track of smaller - partitions using ionode range instead of quarter nodecard notation. - (i.e. bgl000[0-3] instead of bgl000.0.0) - -* For BlueGene Systems only: New scontrol options to down specific ionodes - without having to create blocks there (i.e. - "scontrol update subbp=bgl000[0-3] state=error"). This will down the first - four ionodes or on a BGL system the first nodecard in the base partition. - -* For BlueGene Systems only: sinfo will now display correct node counts for - blocks in an error state or blocks and small blocks that are allocated. - This results in some overlap of nodes displayed if you are running with - Small blocks but the node counts will be correct for the states given. - -* For BlueGene Systems only: A state save/recover file has been added to save - the state of the system (primarily for blocks in an error state) to be loaded - when the system has been brought back up. - -* Added "scontrol listpids" command to identify processes associated with - specific jobs and/or steps. - -See the file NEWS for more details. diff --git a/aclocal.m4 b/aclocal.m4 index 595115a0c..c9bd7cc5b 100644 --- a/aclocal.m4 +++ b/aclocal.m4 @@ -1,7 +1,7 @@ -# generated automatically by aclocal 1.10 -*- Autoconf -*- +# generated automatically by aclocal 1.10.1 -*- Autoconf -*- # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, -# 2005, 2006 Free Software Foundation, Inc. +# 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -11,14 +11,17 @@ # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. -m4_if(m4_PACKAGE_VERSION, [2.61],, -[m4_fatal([this file was generated for autoconf 2.61. -You have another version of autoconf. If you want to use that, -you should regenerate the build system entirely.], [63])]) +m4_ifndef([AC_AUTOCONF_VERSION], + [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl +m4_if(AC_AUTOCONF_VERSION, [2.61],, +[m4_warning([this file was generated for autoconf 2.61. +You have another version of autoconf. It may work, but is not guaranteed to. +If you have problems, you may need to regenerate the build system entirely. +To do so, use the procedure documented by the package, typically `autoreconf'.])]) # libtool.m4 - Configure libtool for the host system. -*-Autoconf-*- -# serial 51 Debian 1.5.24-1ubuntu1 AC_PROG_LIBTOOL +# serial 52 Debian 1.5.26-1ubuntu1 AC_PROG_LIBTOOL # AC_PROVIDE_IFELSE(MACRO-NAME, IF-PROVIDED, IF-NOT-PROVIDED) @@ -106,7 +109,6 @@ AC_REQUIRE([AC_DEPLIBS_CHECK_METHOD])dnl AC_REQUIRE([AC_OBJEXT])dnl AC_REQUIRE([AC_EXEEXT])dnl dnl - AC_LIBTOOL_SYS_MAX_CMD_LEN AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE AC_LIBTOOL_OBJDIR @@ -208,6 +210,8 @@ file_magic*) ;; esac +_LT_REQUIRED_DARWIN_CHECKS + AC_PROVIDE_IFELSE([AC_LIBTOOL_DLOPEN], enable_dlopen=yes, enable_dlopen=no) AC_PROVIDE_IFELSE([AC_LIBTOOL_WIN32_DLL], enable_win32_dll=yes, enable_win32_dll=no) @@ -287,9 +291,80 @@ ac_outfile=conftest.$ac_objext echo "$lt_simple_link_test_code" >conftest.$ac_ext eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_linker_boilerplate=`cat conftest.err` -$rm conftest* +$rm -r conftest* ])# _LT_LINKER_BOILERPLATE +# _LT_REQUIRED_DARWIN_CHECKS +# -------------------------- +# Check for some things on darwin +AC_DEFUN([_LT_REQUIRED_DARWIN_CHECKS],[ + case $host_os in + rhapsody* | darwin*) + AC_CHECK_TOOL([DSYMUTIL], [dsymutil], [:]) + AC_CHECK_TOOL([NMEDIT], [nmedit], [:]) + + AC_CACHE_CHECK([for -single_module linker flag],[lt_cv_apple_cc_single_mod], + [lt_cv_apple_cc_single_mod=no + if test -z "${LT_MULTI_MODULE}"; then + # By default we will add the -single_module flag. You can override + # by either setting the environment variable LT_MULTI_MODULE + # non-empty at configure time, or by adding -multi_module to the + # link flags. + echo "int foo(void){return 1;}" > conftest.c + $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ + -dynamiclib ${wl}-single_module conftest.c + if test -f libconftest.dylib; then + lt_cv_apple_cc_single_mod=yes + rm -rf libconftest.dylib* + fi + rm conftest.c + fi]) + AC_CACHE_CHECK([for -exported_symbols_list linker flag], + [lt_cv_ld_exported_symbols_list], + [lt_cv_ld_exported_symbols_list=no + save_LDFLAGS=$LDFLAGS + echo "_main" > conftest.sym + LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym" + AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])], + [lt_cv_ld_exported_symbols_list=yes], + [lt_cv_ld_exported_symbols_list=no]) + LDFLAGS="$save_LDFLAGS" + ]) + case $host_os in + rhapsody* | darwin1.[[0123]]) + _lt_dar_allow_undefined='${wl}-undefined ${wl}suppress' ;; + darwin1.*) + _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; + darwin*) + # if running on 10.5 or later, the deployment target defaults + # to the OS version, if on x86, and 10.4, the deployment + # target defaults to 10.4. Don't you love it? + case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in + 10.0,*86*-darwin8*|10.0,*-darwin[[91]]*) + _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; + 10.[[012]]*) + _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; + 10.*) + _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; + esac + ;; + esac + if test "$lt_cv_apple_cc_single_mod" = "yes"; then + _lt_dar_single_mod='$single_module' + fi + if test "$lt_cv_ld_exported_symbols_list" = "yes"; then + _lt_dar_export_syms=' ${wl}-exported_symbols_list,$output_objdir/${libname}-symbols.expsym' + else + _lt_dar_export_syms="~$NMEDIT -s \$output_objdir/\${libname}-symbols.expsym \${lib}" + fi + if test "$DSYMUTIL" != ":"; then + _lt_dsymutil="~$DSYMUTIL \$lib || :" + else + _lt_dsymutil= + fi + ;; + esac +]) # _LT_AC_SYS_LIBPATH_AIX # ---------------------- @@ -614,7 +689,11 @@ sparc*-*solaris*) *64-bit*) case $lt_cv_prog_gnu_ld in yes*) LD="${LD-ld} -m elf64_sparc" ;; - *) LD="${LD-ld} -64" ;; + *) + if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then + LD="${LD-ld} -64" + fi + ;; esac ;; esac @@ -707,7 +786,7 @@ AC_CACHE_CHECK([$1], [$2], $2=yes fi fi - $rm conftest* + $rm -r conftest* LDFLAGS="$save_LDFLAGS" ]) @@ -978,7 +1057,7 @@ else AC_CHECK_FUNC([shl_load], [lt_cv_dlopen="shl_load"], [AC_CHECK_LIB([dld], [shl_load], - [lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-dld"], + [lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-ldld"], [AC_CHECK_FUNC([dlopen], [lt_cv_dlopen="dlopen"], [AC_CHECK_LIB([dl], [dlopen], @@ -986,7 +1065,7 @@ else [AC_CHECK_LIB([svld], [dlopen], [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld"], [AC_CHECK_LIB([dld], [dld_link], - [lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-dld"]) + [lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-ldld"]) ]) ]) ]) @@ -1303,7 +1382,7 @@ aix3*) soname_spec='${libname}${release}${shared_ext}$major' ;; -aix4* | aix5*) +aix[[4-9]]*) version_type=linux need_lib_prefix=no need_version=no @@ -1836,6 +1915,13 @@ esac AC_MSG_RESULT([$dynamic_linker]) test "$dynamic_linker" = no && can_build_shared=no +AC_CACHE_VAL([lt_cv_sys_lib_search_path_spec], +[lt_cv_sys_lib_search_path_spec="$sys_lib_search_path_spec"]) +sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec" +AC_CACHE_VAL([lt_cv_sys_lib_dlsearch_path_spec], +[lt_cv_sys_lib_dlsearch_path_spec="$sys_lib_dlsearch_path_spec"]) +sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec" + variables_saved_for_relink="PATH $shlibpath_var $runpath_var" if test "$GCC" = yes; then variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" @@ -2335,7 +2421,7 @@ lt_cv_deplibs_check_method='unknown' # whether `pass_all' will *always* work, you probably want this one. case $host_os in -aix4* | aix5*) +aix[[4-9]]*) lt_cv_deplibs_check_method=pass_all ;; @@ -2771,7 +2857,7 @@ aix3*) fi ;; -aix4* | aix5*) +aix[[4-9]]*) if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then test "$enable_shared" = yes && enable_static=no fi @@ -2828,6 +2914,7 @@ _LT_AC_TAGVAR(postdep_objects, $1)= _LT_AC_TAGVAR(predeps, $1)= _LT_AC_TAGVAR(postdeps, $1)= _LT_AC_TAGVAR(compiler_lib_search_path, $1)= +_LT_AC_TAGVAR(compiler_lib_search_dirs, $1)= # Source file extension for C++ test sources. ac_ext=cpp @@ -2937,7 +3024,7 @@ case $host_os in # FIXME: insert proper C++ library support _LT_AC_TAGVAR(ld_shlibs, $1)=no ;; - aix4* | aix5*) + aix[[4-9]]*) if test "$host_cpu" = ia64; then # On IA64, the linker does run time linking by default, so we don't # have to do anything special. @@ -2950,7 +3037,7 @@ case $host_os in # Test if we are trying to use run time linking or normal # AIX style linking. If -brtl is somewhere in LDFLAGS, we # need to do runtime linking. - case $host_os in aix4.[[23]]|aix4.[[23]].*|aix5*) + case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*) for ld_flag in $LDFLAGS; do case $ld_flag in *-brtl*) @@ -3096,51 +3183,23 @@ case $host_os in fi ;; darwin* | rhapsody*) - case $host_os in - rhapsody* | darwin1.[[012]]) - _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-undefined ${wl}suppress' - ;; - *) # Darwin 1.3 on - if test -z ${MACOSX_DEPLOYMENT_TARGET} ; then - _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' - else - case ${MACOSX_DEPLOYMENT_TARGET} in - 10.[[012]]) - _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' - ;; - 10.*) - _LT_AC_TAGVAR(allow_undefined_flag, $1)='${wl}-undefined ${wl}dynamic_lookup' - ;; - esac - fi - ;; - esac _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no _LT_AC_TAGVAR(hardcode_direct, $1)=no _LT_AC_TAGVAR(hardcode_automatic, $1)=yes _LT_AC_TAGVAR(hardcode_shlibpath_var, $1)=unsupported _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='' _LT_AC_TAGVAR(link_all_deplibs, $1)=yes - - if test "$GXX" = yes ; then - lt_int_apple_cc_single_mod=no + _LT_AC_TAGVAR(allow_undefined_flag, $1)="$_lt_dar_allow_undefined" + if test "$GXX" = yes ; then output_verbose_link_cmd='echo' - if $CC -dumpspecs 2>&1 | $EGREP 'single_module' >/dev/null ; then - lt_int_apple_cc_single_mod=yes + _LT_AC_TAGVAR(archive_cmds, $1)="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}" + _LT_AC_TAGVAR(module_cmds, $1)="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}" + _LT_AC_TAGVAR(archive_expsym_cmds, $1)="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}" + _LT_AC_TAGVAR(module_expsym_cmds, $1)="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}" + if test "$lt_cv_apple_cc_single_mod" != "yes"; then + _LT_AC_TAGVAR(archive_cmds, $1)="\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dsymutil}" + _LT_AC_TAGVAR(archive_expsym_cmds, $1)="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dar_export_syms}${_lt_dsymutil}" fi - if test "X$lt_int_apple_cc_single_mod" = Xyes ; then - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -dynamiclib -single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' - else - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -r -keep_private_externs -nostdlib -o ${lib}-master.o $libobjs~$CC -dynamiclib $allow_undefined_flag -o $lib ${lib}-master.o $deplibs $compiler_flags -install_name $rpath/$soname $verstring' - fi - _LT_AC_TAGVAR(module_cmds, $1)='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' - # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds - if test "X$lt_int_apple_cc_single_mod" = Xyes ; then - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib -single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - else - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -r -keep_private_externs -nostdlib -o ${lib}-master.o $libobjs~$CC -dynamiclib $allow_undefined_flag -o $lib ${lib}-master.o $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - fi - _LT_AC_TAGVAR(module_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' else case $cc_basename in xlc*) @@ -3391,7 +3450,7 @@ case $host_os in _LT_AC_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' _LT_AC_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive$convenience ${wl}--no-whole-archive' ;; - pgCC*) + pgCC* | pgcpp*) # Portland Group C++ compiler _LT_AC_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' _LT_AC_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' @@ -3826,7 +3885,8 @@ lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld # compiler output when linking a shared library. # Parse the compiler output and extract the necessary # objects, libraries and library flags. -AC_DEFUN([AC_LIBTOOL_POSTDEP_PREDEP],[ +AC_DEFUN([AC_LIBTOOL_POSTDEP_PREDEP], +[AC_REQUIRE([LT_AC_PROG_SED])dnl dnl we can't use the lt_simple_compile_test_code here, dnl because it contains code intended for an executable, dnl not a library. It's possible we should let each @@ -3951,6 +4011,11 @@ fi $rm -f confest.$objext +_LT_AC_TAGVAR(compiler_lib_search_dirs, $1)= +if test -n "$_LT_AC_TAGVAR(compiler_lib_search_path, $1)"; then + _LT_AC_TAGVAR(compiler_lib_search_dirs, $1)=`echo " ${_LT_AC_TAGVAR(compiler_lib_search_path, $1)}" | ${SED} -e 's! -L! !g' -e 's!^ !!'` +fi + # PORTME: override above test on systems where it is broken ifelse([$1],[CXX], [case $host_os in @@ -4007,7 +4072,6 @@ solaris*) ;; esac ]) - case " $_LT_AC_TAGVAR(postdeps, $1) " in *" -lc "*) _LT_AC_TAGVAR(archive_cmds_need_lc, $1)=no ;; esac @@ -4092,7 +4156,7 @@ aix3*) postinstall_cmds='$RANLIB $lib' fi ;; -aix4* | aix5*) +aix[[4-9]]*) if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then test "$enable_shared" = yes && enable_static=no fi @@ -4269,6 +4333,7 @@ if test -f "$ltmain"; then _LT_AC_TAGVAR(predeps, $1) \ _LT_AC_TAGVAR(postdeps, $1) \ _LT_AC_TAGVAR(compiler_lib_search_path, $1) \ + _LT_AC_TAGVAR(compiler_lib_search_dirs, $1) \ _LT_AC_TAGVAR(archive_cmds, $1) \ _LT_AC_TAGVAR(archive_expsym_cmds, $1) \ _LT_AC_TAGVAR(postinstall_cmds, $1) \ @@ -4331,7 +4396,7 @@ ifelse([$1], [], # Generated automatically by $PROGRAM (GNU $PACKAGE $VERSION$TIMESTAMP) # NOTE: Changes made to this file will be lost: look at ltmain.sh. # -# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007 +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 # Free Software Foundation, Inc. # # This file is part of GNU Libtool: @@ -4568,6 +4633,10 @@ predeps=$lt_[]_LT_AC_TAGVAR(predeps, $1) # shared library. postdeps=$lt_[]_LT_AC_TAGVAR(postdeps, $1) +# The directories searched by this compiler when creating a shared +# library +compiler_lib_search_dirs=$lt_[]_LT_AC_TAGVAR(compiler_lib_search_dirs, $1) + # The library search path used internally by the compiler when linking # a shared library. compiler_lib_search_path=$lt_[]_LT_AC_TAGVAR(compiler_lib_search_path, $1) @@ -4917,7 +4986,7 @@ EOF echo "$progname: failed program was:" >&AS_MESSAGE_LOG_FD cat conftest.$ac_ext >&5 fi - rm -f conftest* conftst* + rm -rf conftest* conftst* # Do not use the global_symbol_pipe unless it works. if test "$pipe_works" = yes; then @@ -4974,7 +5043,8 @@ AC_MSG_CHECKING([for $compiler option to produce PIC]) # built for inclusion in a dll (and should export symbols for example). # Although the cygwin gcc ignores -fPIC, still need this for old-style # (--disable-auto-import) libraries - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT' + m4_if([$1], [GCJ], [], + [_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) ;; darwin* | rhapsody*) # PIC is the default on this platform @@ -5011,7 +5081,7 @@ AC_MSG_CHECKING([for $compiler option to produce PIC]) esac else case $host_os in - aix4* | aix5*) + aix[[4-9]]*) # All AIX code is PIC. if test "$host_cpu" = ia64; then # AIX 5 now supports IA64 processor @@ -5107,7 +5177,7 @@ AC_MSG_CHECKING([for $compiler option to produce PIC]) _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' _LT_AC_TAGVAR(lt_prog_compiler_static, $1)='-static' ;; - pgCC*) + pgCC* | pgcpp*) # Portland Group C++ compiler. _LT_AC_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' @@ -5258,7 +5328,8 @@ AC_MSG_CHECKING([for $compiler option to produce PIC]) # built for inclusion in a dll (and should export symbols for example). # Although the cygwin gcc ignores -fPIC, still need this for old-style # (--disable-auto-import) libraries - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT' + m4_if([$1], [GCJ], [], + [_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) ;; darwin* | rhapsody*) @@ -5328,7 +5399,8 @@ AC_MSG_CHECKING([for $compiler option to produce PIC]) mingw* | cygwin* | pw32* | os2*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). - _LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT' + m4_if([$1], [GCJ], [], + [_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) ;; hpux9* | hpux10* | hpux11*) @@ -5465,7 +5537,7 @@ AC_MSG_RESULT([$_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)]) # if test -n "$_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)"; then AC_LIBTOOL_COMPILER_OPTION([if $compiler PIC flag $_LT_AC_TAGVAR(lt_prog_compiler_pic, $1) works], - _LT_AC_TAGVAR(lt_prog_compiler_pic_works, $1), + _LT_AC_TAGVAR(lt_cv_prog_compiler_pic_works, $1), [$_LT_AC_TAGVAR(lt_prog_compiler_pic, $1)ifelse([$1],[],[ -DPIC],[ifelse([$1],[CXX],[ -DPIC],[])])], [], [case $_LT_AC_TAGVAR(lt_prog_compiler_pic, $1) in "" | " "*) ;; @@ -5489,7 +5561,7 @@ esac # wl=$_LT_AC_TAGVAR(lt_prog_compiler_wl, $1) eval lt_tmp_static_flag=\"$_LT_AC_TAGVAR(lt_prog_compiler_static, $1)\" AC_LIBTOOL_LINKER_OPTION([if $compiler static flag $lt_tmp_static_flag works], - _LT_AC_TAGVAR(lt_prog_compiler_static_works, $1), + _LT_AC_TAGVAR(lt_cv_prog_compiler_static_works, $1), $lt_tmp_static_flag, [], [_LT_AC_TAGVAR(lt_prog_compiler_static, $1)=]) @@ -5505,7 +5577,7 @@ AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) ifelse([$1],[CXX],[ _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' case $host_os in - aix4* | aix5*) + aix[[4-9]]*) # If we're using GNU nm, then we don't want the "-C" option. # -C means demangle to AIX nm, but means don't demangle with GNU nm if $NM -V 2>&1 | grep 'GNU' > /dev/null; then @@ -5527,6 +5599,7 @@ ifelse([$1],[CXX],[ _LT_AC_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' ;; esac + _LT_AC_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'] ],[ runpath_var= _LT_AC_TAGVAR(allow_undefined_flag, $1)= @@ -5557,12 +5630,14 @@ ifelse([$1],[CXX],[ # it will be wrapped by ` (' and `)$', so one must not match beginning or # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', # as well as any symbol that contains `d'. - _LT_AC_TAGVAR(exclude_expsyms, $1)="_GLOBAL_OFFSET_TABLE_" + _LT_AC_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'] # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out # platforms (ab)use it in PIC code, but their linkers get confused if # the symbol is explicitly referenced. Since portable code cannot # rely on this symbol name, it's probably fine to never include it in # preloaded symbol tables. + # Exclude shared library initialization/finalization symbols. +dnl Note also adjust exclude_expsyms for C++ above. extract_expsyms_cmds= # Just being paranoid about ensuring that cc_basename is set. _LT_CC_BASENAME([$compiler]) @@ -5612,7 +5687,7 @@ ifelse([$1],[CXX],[ # See if GNU ld supports shared libraries. case $host_os in - aix3* | aix4* | aix5*) + aix[[3-9]]*) # On AIX/PPC, the GNU linker is very broken if test "$host_cpu" != ia64; then _LT_AC_TAGVAR(ld_shlibs, $1)=no @@ -5832,7 +5907,7 @@ _LT_EOF fi ;; - aix4* | aix5*) + aix[[4-9]]*) if test "$host_cpu" = ia64; then # On IA64, the linker does run time linking by default, so we don't # have to do anything special. @@ -5852,7 +5927,7 @@ _LT_EOF # Test if we are trying to use run time linking or normal # AIX style linking. If -brtl is somewhere in LDFLAGS, we # need to do runtime linking. - case $host_os in aix4.[[23]]|aix4.[[23]].*|aix5*) + case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*) for ld_flag in $LDFLAGS; do if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then aix_use_runtimelinking=yes @@ -6012,11 +6087,10 @@ _LT_EOF _LT_AC_TAGVAR(link_all_deplibs, $1)=yes if test "$GCC" = yes ; then output_verbose_link_cmd='echo' - _LT_AC_TAGVAR(archive_cmds, $1)='$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' - _LT_AC_TAGVAR(module_cmds, $1)='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' - # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds - _LT_AC_TAGVAR(archive_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - _LT_AC_TAGVAR(module_expsym_cmds, $1)='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + _LT_AC_TAGVAR(archive_cmds, $1)="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}" + _LT_AC_TAGVAR(module_cmds, $1)="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}" + _LT_AC_TAGVAR(archive_expsym_cmds, $1)="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}" + _LT_AC_TAGVAR(module_expsym_cmds, $1)="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}" else case $cc_basename in xlc*) @@ -6597,7 +6671,7 @@ AC_SUBST([SED]) AC_MSG_RESULT([$SED]) ]) -# Copyright (C) 2002, 2003, 2005, 2006 Free Software Foundation, Inc. +# Copyright (C) 2002, 2003, 2005, 2006, 2007 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, @@ -6612,7 +6686,7 @@ AC_DEFUN([AM_AUTOMAKE_VERSION], [am__api_version='1.10' dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to dnl require some minimum version. Point them to the right macro. -m4_if([$1], [1.10], [], +m4_if([$1], [1.10.1], [], [AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl ]) @@ -6628,8 +6702,10 @@ m4_define([_AM_AUTOCONF_VERSION], []) # Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced. # This function is AC_REQUIREd by AC_INIT_AUTOMAKE. AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION], -[AM_AUTOMAKE_VERSION([1.10])dnl -_AM_AUTOCONF_VERSION(m4_PACKAGE_VERSION)]) +[AM_AUTOMAKE_VERSION([1.10.1])dnl +m4_ifndef([AC_AUTOCONF_VERSION], + [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl +_AM_AUTOCONF_VERSION(AC_AUTOCONF_VERSION)]) # AM_AUX_DIR_EXPAND -*- Autoconf -*- @@ -6901,7 +6977,7 @@ AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS], # each Makefile.in and add a new line on top of each file to say so. # Grep'ing the whole file is not good either: AIX grep has a line # limit of 2048, but all sed's we know have understand at least 4000. - if sed 10q "$mf" | grep '^#.*generated by automake' > /dev/null 2>&1; then + if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then dirpart=`AS_DIRNAME("$mf")` else continue @@ -6949,13 +7025,13 @@ AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS], # Do all the work for Automake. -*- Autoconf -*- # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, -# 2005, 2006 Free Software Foundation, Inc. +# 2005, 2006, 2008 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. -# serial 12 +# serial 13 # This macro actually does too much. Some checks are only needed if # your package does certain things. But this isn't really a big deal. @@ -7060,16 +7136,17 @@ AC_PROVIDE_IFELSE([AC_PROG_OBJC], # our stamp files there. AC_DEFUN([_AC_AM_CONFIG_HEADER_HOOK], [# Compute $1's index in $config_headers. +_am_arg=$1 _am_stamp_count=1 for _am_header in $config_headers :; do case $_am_header in - $1 | $1:* ) + $_am_arg | $_am_arg:* ) break ;; * ) _am_stamp_count=`expr $_am_stamp_count + 1` ;; esac done -echo "timestamp for $1" >`AS_DIRNAME([$1])`/stamp-h[]$_am_stamp_count]) +echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_count]) # Copyright (C) 2001, 2003, 2005 Free Software Foundation, Inc. # @@ -7370,7 +7447,7 @@ AC_SUBST([INSTALL_STRIP_PROGRAM])]) # _AM_SUBST_NOTMAKE(VARIABLE) # --------------------------- -# Prevent Automake from outputing VARIABLE = @VARIABLE@ in Makefile.in. +# Prevent Automake from outputting VARIABLE = @VARIABLE@ in Makefile.in. # This macro is traced by Automake. AC_DEFUN([_AM_SUBST_NOTMAKE]) @@ -7476,6 +7553,8 @@ m4_include([auxdir/x_ac__system_configuration.m4]) m4_include([auxdir/x_ac_affinity.m4]) m4_include([auxdir/x_ac_aix.m4]) m4_include([auxdir/x_ac_bluegene.m4]) +m4_include([auxdir/x_ac_cflags.m4]) +m4_include([auxdir/x_ac_databases.m4]) m4_include([auxdir/x_ac_debug.m4]) m4_include([auxdir/x_ac_elan.m4]) m4_include([auxdir/x_ac_federation.m4]) diff --git a/auxdir/Makefile.am b/auxdir/Makefile.am index 9cbc77a98..c89c5c037 100644 --- a/auxdir/Makefile.am +++ b/auxdir/Makefile.am @@ -1,5 +1,5 @@ ##**************************************************************************** -## $Id: Makefile.am 7550 2006-03-21 00:16:26Z jette $ +## $Id: Makefile.am 14089 2008-05-20 21:17:09Z da $ ##**************************************************************************** ## Process this file with automake to produce Makefile.in. ##**************************************************************************** @@ -12,6 +12,7 @@ EXTRA_DIST = \ x_ac_affinity.m4 \ x_ac_aix.m4 \ x_ac_bluegene.m4 \ + x_ac_cflags.m4 \ x_ac_debug.m4 \ x_ac_elan.m4 \ x_ac_federation.m4 \ diff --git a/auxdir/Makefile.in b/auxdir/Makefile.in index a3675c3e4..12a7cce90 100644 --- a/auxdir/Makefile.in +++ b/auxdir/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -42,6 +42,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -88,6 +90,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -101,10 +104,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -124,7 +130,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -135,6 +144,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -150,6 +161,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -165,6 +177,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -229,6 +242,7 @@ EXTRA_DIST = \ x_ac_affinity.m4 \ x_ac_aix.m4 \ x_ac_bluegene.m4 \ + x_ac_cflags.m4 \ x_ac_debug.m4 \ x_ac_elan.m4 \ x_ac_federation.m4 \ diff --git a/auxdir/config.guess b/auxdir/config.guess index 0f0fe712a..278f9e9e0 100755 --- a/auxdir/config.guess +++ b/auxdir/config.guess @@ -4,7 +4,7 @@ # 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, # Inc. -timestamp='2007-03-06' +timestamp='2007-07-22' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by @@ -330,7 +330,7 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; - i86pc:SunOS:5.*:*) + i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) echo i386-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; sun4*:SunOS:6*:*) @@ -793,7 +793,7 @@ EOF exit ;; *:Interix*:[3456]*) case ${UNAME_MACHINE} in - x86) + x86) echo i586-pc-interix${UNAME_RELEASE} exit ;; EM64T | authenticamd) diff --git a/auxdir/config.sub b/auxdir/config.sub index 5defff65a..1761d8bdf 100755 --- a/auxdir/config.sub +++ b/auxdir/config.sub @@ -4,7 +4,7 @@ # 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, # Inc. -timestamp='2007-01-18' +timestamp='2007-06-28' # This file is (in principle) common to ALL GNU software. # The presence of a machine in this file suggests that SOME GNU software @@ -475,8 +475,8 @@ case $basic_machine in basic_machine=craynv-cray os=-unicosmp ;; - cr16c) - basic_machine=cr16c-unknown + cr16) + basic_machine=cr16-unknown os=-elf ;; crds | unos) @@ -683,6 +683,10 @@ case $basic_machine in basic_machine=i386-pc os=-mingw32 ;; + mingw32ce) + basic_machine=arm-unknown + os=-mingw32ce + ;; miniframe) basic_machine=m68000-convergent ;; diff --git a/auxdir/depcomp b/auxdir/depcomp index ca5ea4e1e..e5f9736c7 100755 --- a/auxdir/depcomp +++ b/auxdir/depcomp @@ -1,9 +1,9 @@ #! /bin/sh # depcomp - compile a program generating dependencies as side-effects -scriptversion=2006-10-15.18 +scriptversion=2007-03-29.01 -# Copyright (C) 1999, 2000, 2003, 2004, 2005, 2006 Free Software +# Copyright (C) 1999, 2000, 2003, 2004, 2005, 2006, 2007 Free Software # Foundation, Inc. # This program is free software; you can redistribute it and/or modify @@ -215,34 +215,39 @@ aix) # current directory. Also, the AIX compiler puts `$object:' at the # start of each line; $object doesn't have directory information. # Version 6 uses the directory in both cases. - stripped=`echo "$object" | sed 's/\(.*\)\..*$/\1/'` - tmpdepfile="$stripped.u" + dir=`echo "$object" | sed -e 's|/[^/]*$|/|'` + test "x$dir" = "x$object" && dir= + base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'` if test "$libtool" = yes; then + tmpdepfile1=$dir$base.u + tmpdepfile2=$base.u + tmpdepfile3=$dir.libs/$base.u "$@" -Wc,-M else + tmpdepfile1=$dir$base.u + tmpdepfile2=$dir$base.u + tmpdepfile3=$dir$base.u "$@" -M fi stat=$? - if test -f "$tmpdepfile"; then : - else - stripped=`echo "$stripped" | sed 's,^.*/,,'` - tmpdepfile="$stripped.u" - fi - if test $stat -eq 0; then : else - rm -f "$tmpdepfile" + rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" exit $stat fi + for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" + do + test -f "$tmpdepfile" && break + done if test -f "$tmpdepfile"; then - outname="$stripped.o" # Each line is of the form `foo.o: dependent.h'. # Do two passes, one to just change these to # `$object: dependent.h' and one to simply `dependent.h:'. - sed -e "s,^$outname:,$object :," < "$tmpdepfile" > "$depfile" - sed -e "s,^$outname: \(.*\)$,\1:," < "$tmpdepfile" >> "$depfile" + sed -e "s,^.*\.[a-z]*:,$object:," < "$tmpdepfile" > "$depfile" + # That's a tab and a space in the []. + sed -e 's,^.*\.[a-z]*:[ ]*,,' -e 's,$,:,' < "$tmpdepfile" >> "$depfile" else # The sourcefile does not contain any dependencies, so just # store a dummy comment line, to avoid errors with the Makefile diff --git a/auxdir/install-sh b/auxdir/install-sh index 4fbbae7b7..a5897de6e 100755 --- a/auxdir/install-sh +++ b/auxdir/install-sh @@ -1,7 +1,7 @@ #!/bin/sh # install - install a program, script, or datafile -scriptversion=2006-10-14.15 +scriptversion=2006-12-25.00 # This originates from X11R5 (mit/util/scripts/install.sh), which was # later released in X11R6 (xc/config/util/install.sh) with the @@ -48,7 +48,7 @@ IFS=" "" $nl" # set DOITPROG to echo to test this script # Don't use :- since 4.3BSD and earlier shells don't like it. -doit="${DOITPROG-}" +doit=${DOITPROG-} if test -z "$doit"; then doit_exec=exec else @@ -58,34 +58,49 @@ fi # Put in absolute file names if you don't have them in your path; # or use environment vars. -mvprog="${MVPROG-mv}" -cpprog="${CPPROG-cp}" -chmodprog="${CHMODPROG-chmod}" -chownprog="${CHOWNPROG-chown}" -chgrpprog="${CHGRPPROG-chgrp}" -stripprog="${STRIPPROG-strip}" -rmprog="${RMPROG-rm}" -mkdirprog="${MKDIRPROG-mkdir}" +chgrpprog=${CHGRPPROG-chgrp} +chmodprog=${CHMODPROG-chmod} +chownprog=${CHOWNPROG-chown} +cmpprog=${CMPPROG-cmp} +cpprog=${CPPROG-cp} +mkdirprog=${MKDIRPROG-mkdir} +mvprog=${MVPROG-mv} +rmprog=${RMPROG-rm} +stripprog=${STRIPPROG-strip} + +posix_glob='?' +initialize_posix_glob=' + test "$posix_glob" != "?" || { + if (set -f) 2>/dev/null; then + posix_glob= + else + posix_glob=: + fi + } +' -posix_glob= posix_mkdir= # Desired mode of installed file. mode=0755 +chgrpcmd= chmodcmd=$chmodprog chowncmd= -chgrpcmd= -stripcmd= +mvcmd=$mvprog rmcmd="$rmprog -f" -mvcmd="$mvprog" +stripcmd= + src= dst= dir_arg= -dstarg= +dst_arg= + +copy_on_change=false no_target_directory= -usage="Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE +usage="\ +Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE or: $0 [OPTION]... SRCFILES... DIRECTORY or: $0 [OPTION]... -t DIRECTORY SRCFILES... or: $0 [OPTION]... -d DIRECTORIES... @@ -95,65 +110,55 @@ In the 2nd and 3rd, copy all SRCFILES to DIRECTORY. In the 4th, create DIRECTORIES. Options: --c (ignored) --d create directories instead of installing files. --g GROUP $chgrpprog installed files to GROUP. --m MODE $chmodprog installed files to MODE. --o USER $chownprog installed files to USER. --s $stripprog installed files. --t DIRECTORY install into DIRECTORY. --T report an error if DSTFILE is a directory. ---help display this help and exit. ---version display version info and exit. + --help display this help and exit. + --version display version info and exit. + + -c (ignored) + -C install only if different (preserve the last data modification time) + -d create directories instead of installing files. + -g GROUP $chgrpprog installed files to GROUP. + -m MODE $chmodprog installed files to MODE. + -o USER $chownprog installed files to USER. + -s $stripprog installed files. + -t DIRECTORY install into DIRECTORY. + -T report an error if DSTFILE is a directory. Environment variables override the default commands: - CHGRPPROG CHMODPROG CHOWNPROG CPPROG MKDIRPROG MVPROG RMPROG STRIPPROG + CHGRPPROG CHMODPROG CHOWNPROG CMPPROG CPPROG MKDIRPROG MVPROG + RMPROG STRIPPROG " while test $# -ne 0; do case $1 in - -c) shift - continue;; + -c) ;; + + -C) copy_on_change=true;; - -d) dir_arg=true - shift - continue;; + -d) dir_arg=true;; -g) chgrpcmd="$chgrpprog $2" - shift - shift - continue;; + shift;; --help) echo "$usage"; exit $?;; -m) mode=$2 - shift - shift case $mode in *' '* | *' '* | *' '* | *'*'* | *'?'* | *'['*) echo "$0: invalid mode: $mode" >&2 exit 1;; esac - continue;; + shift;; -o) chowncmd="$chownprog $2" - shift - shift - continue;; + shift;; - -s) stripcmd=$stripprog - shift - continue;; + -s) stripcmd=$stripprog;; - -t) dstarg=$2 - shift - shift - continue;; + -t) dst_arg=$2 + shift;; - -T) no_target_directory=true - shift - continue;; + -T) no_target_directory=true;; --version) echo "$0 $scriptversion"; exit $?;; @@ -165,21 +170,22 @@ while test $# -ne 0; do *) break;; esac + shift done -if test $# -ne 0 && test -z "$dir_arg$dstarg"; then +if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then # When -d is used, all remaining arguments are directories to create. # When -t is used, the destination is already specified. # Otherwise, the last argument is the destination. Remove it from $@. for arg do - if test -n "$dstarg"; then + if test -n "$dst_arg"; then # $@ is not empty: it contains at least $arg. - set fnord "$@" "$dstarg" + set fnord "$@" "$dst_arg" shift # fnord fi shift # arg - dstarg=$arg + dst_arg=$arg done fi @@ -224,7 +230,7 @@ for src do # Protect names starting with `-'. case $src in - -*) src=./$src ;; + -*) src=./$src;; esac if test -n "$dir_arg"; then @@ -242,22 +248,22 @@ do exit 1 fi - if test -z "$dstarg"; then + if test -z "$dst_arg"; then echo "$0: no destination specified." >&2 exit 1 fi - dst=$dstarg + dst=$dst_arg # Protect names starting with `-'. case $dst in - -*) dst=./$dst ;; + -*) dst=./$dst;; esac # If destination is a directory, append the input filename; won't work # if double slashes aren't ignored. if test -d "$dst"; then if test -n "$no_target_directory"; then - echo "$0: $dstarg: Is a directory" >&2 + echo "$0: $dst_arg: Is a directory" >&2 exit 1 fi dstdir=$dst @@ -378,26 +384,19 @@ do # directory the slow way, step by step, checking for races as we go. case $dstdir in - /*) prefix=/ ;; - -*) prefix=./ ;; - *) prefix= ;; + /*) prefix='/';; + -*) prefix='./';; + *) prefix='';; esac - case $posix_glob in - '') - if (set -f) 2>/dev/null; then - posix_glob=true - else - posix_glob=false - fi ;; - esac + eval "$initialize_posix_glob" oIFS=$IFS IFS=/ - $posix_glob && set -f + $posix_glob set -f set fnord $dstdir shift - $posix_glob && set +f + $posix_glob set +f IFS=$oIFS prefixes= @@ -459,41 +458,54 @@ do # ignore errors from any of these, just make sure not to ignore # errors from the above "$doit $cpprog $src $dsttmp" command. # - { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } \ - && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } \ - && { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } \ - && { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } && - - # Now rename the file to the real destination. - { $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null \ - || { - # The rename failed, perhaps because mv can't rename something else - # to itself, or perhaps because mv is so ancient that it does not - # support -f. - - # Now remove or move aside any old file at destination location. - # We try this two ways since rm can't unlink itself on some - # systems and the destination file might be busy for other - # reasons. In this case, the final cleanup might fail but the new - # file should still install successfully. - { - if test -f "$dst"; then - $doit $rmcmd -f "$dst" 2>/dev/null \ - || { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null \ - && { $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; }; }\ - || { - echo "$0: cannot unlink or rename $dst" >&2 - (exit 1); exit 1 - } - else - : - fi - } && - - # Now rename the file to the real destination. - $doit $mvcmd "$dsttmp" "$dst" - } - } || exit 1 + { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } && + { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } && + { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } && + { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } && + + # If -C, don't bother to copy if it wouldn't change the file. + if $copy_on_change && + old=`LC_ALL=C ls -dlL "$dst" 2>/dev/null` && + new=`LC_ALL=C ls -dlL "$dsttmp" 2>/dev/null` && + + eval "$initialize_posix_glob" && + $posix_glob set -f && + set X $old && old=:$2:$4:$5:$6 && + set X $new && new=:$2:$4:$5:$6 && + $posix_glob set +f && + + test "$old" = "$new" && + $cmpprog "$dst" "$dsttmp" >/dev/null 2>&1 + then + rm -f "$dsttmp" + else + # Rename the file to the real destination. + $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null || + + # The rename failed, perhaps because mv can't rename something else + # to itself, or perhaps because mv is so ancient that it does not + # support -f. + { + # Now remove or move aside any old file at destination location. + # We try this two ways since rm can't unlink itself on some + # systems and the destination file might be busy for other + # reasons. In this case, the final cleanup might fail but the new + # file should still install successfully. + { + test ! -f "$dst" || + $doit $rmcmd -f "$dst" 2>/dev/null || + { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null && + { $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; } + } || + { echo "$0: cannot unlink or rename $dst" >&2 + (exit 1); exit 1 + } + } && + + # Now rename the file to the real destination. + $doit $mvcmd "$dsttmp" "$dst" + } + fi || exit 1 trap '' 0 fi diff --git a/auxdir/ltmain.sh b/auxdir/ltmain.sh index 2160ef7b9..e420facf5 100644 --- a/auxdir/ltmain.sh +++ b/auxdir/ltmain.sh @@ -2,7 +2,7 @@ # NOTE: Changing this file will not affect anything until you rerun configure. # # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, 2006, -# 2007 Free Software Foundation, Inc. +# 2007, 2008 Free Software Foundation, Inc. # Originally by Gordon Matzigkeit <gord@gnu.ai.mit.edu>, 1996 # # This program is free software; you can redistribute it and/or modify @@ -43,8 +43,8 @@ EXIT_FAILURE=1 PROGRAM=ltmain.sh PACKAGE=libtool -VERSION="1.5.24 Debian 1.5.24-1ubuntu1" -TIMESTAMP=" (1.1220.2.456 2007/06/24 02:25:32)" +VERSION="1.5.26 Debian 1.5.26-1ubuntu1" +TIMESTAMP=" (1.1220.2.493 2008/02/01 16:58:18)" # Be Bourne compatible (taken from Autoconf:_AS_BOURNE_COMPATIBLE). if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then @@ -113,15 +113,21 @@ esac # These must not be set unconditionally because not all systems understand # e.g. LANG=C (notably SCO). # We save the old values to restore during execute mode. -for lt_var in LANG LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES +lt_env= +for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES do eval "if test \"\${$lt_var+set}\" = set; then save_$lt_var=\$$lt_var + lt_env=\"$lt_var=\$$lt_var \$lt_env\" $lt_var=C export $lt_var fi" done +if test -n "$lt_env"; then + lt_env="env $lt_env" +fi + # Make sure IFS has a sensible default lt_nl=' ' @@ -485,7 +491,7 @@ do echo "\ $PROGRAM (GNU $PACKAGE) $VERSION$TIMESTAMP -Copyright (C) 2007 Free Software Foundation, Inc. +Copyright (C) 2008 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." exit $? @@ -788,6 +794,7 @@ if test -z "$show_help"; then *.for) xform=for ;; *.java) xform=java ;; *.obj) xform=obj ;; + *.sx) xform=sx ;; esac libobj=`$echo "X$libobj" | $Xsed -e "s/\.$xform$/.lo/"` @@ -956,7 +963,7 @@ EOF $run $rm "$lobj" "$output_obj" $show "$command" - if $run eval "$command"; then : + if $run eval $lt_env "$command"; then : else test -n "$output_obj" && $run $rm $removelist exit $EXIT_FAILURE @@ -1028,7 +1035,7 @@ EOF command="$command$suppress_output" $run $rm "$obj" "$output_obj" $show "$command" - if $run eval "$command"; then : + if $run eval $lt_env "$command"; then : else $run $rm $removelist exit $EXIT_FAILURE @@ -1161,6 +1168,7 @@ EOF thread_safe=no vinfo= vinfo_number=no + single_module="${wl}-single_module" func_infer_tag $base_compile @@ -1646,6 +1654,11 @@ EOF continue ;; + -multi_module) + single_module="${wl}-multi_module" + continue + ;; + -module) module=yes continue @@ -2152,7 +2165,12 @@ EOF continue fi name=`$echo "X$deplib" | $Xsed -e 's/^-l//'` - for searchdir in $newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path; do + if test "$linkmode" = lib; then + searchdirs="$newlib_search_path $lib_search_path $compiler_lib_search_dirs $sys_lib_search_path $shlib_search_path" + else + searchdirs="$newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path" + fi + for searchdir in $searchdirs; do for search_ext in .la $std_shrext .so .a; do # Search the libtool library lib="$searchdir/lib${name}${search_ext}" @@ -2948,12 +2966,18 @@ EOF # we do not want to link against static libs, # but need to link against shared eval deplibrary_names=`${SED} -n -e 's/^library_names=\(.*\)$/\1/p' $deplib` + eval deplibdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` if test -n "$deplibrary_names" ; then for tmp in $deplibrary_names ; do depdepl=$tmp done - if test -f "$path/$depdepl" ; then + if test -f "$deplibdir/$depdepl" ; then + depdepl="$deplibdir/$depdepl" + elif test -f "$path/$depdepl" ; then depdepl="$path/$depdepl" + else + # Can't find it, oh well... + depdepl= fi # do not add paths which are already there case " $newlib_search_path " in @@ -3101,9 +3125,10 @@ EOF case $linkmode in oldlib) - if test -n "$deplibs"; then - $echo "$modename: warning: \`-l' and \`-L' are ignored for archives" 1>&2 - fi + case " $deplibs" in + *\ -l* | *\ -L*) + $echo "$modename: warning: \`-l' and \`-L' are ignored for archives" 1>&2 ;; + esac if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then $echo "$modename: warning: \`-dlopen' is ignored for archives" 1>&2 @@ -4245,9 +4270,10 @@ EOF ;; obj) - if test -n "$deplibs"; then - $echo "$modename: warning: \`-l' and \`-L' are ignored for objects" 1>&2 - fi + case " $deplibs" in + *\ -l* | *\ -L*) + $echo "$modename: warning: \`-l' and \`-L' are ignored for objects" 1>&2 ;; + esac if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then $echo "$modename: warning: \`-dlopen' is ignored for objects" 1>&2 @@ -6486,7 +6512,7 @@ relink_command=\"$relink_command\"" fi # Restore saved environment variables - for lt_var in LANG LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES + for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES do eval "if test \"\${save_$lt_var+set}\" = set; then $lt_var=\$save_$lt_var; export $lt_var diff --git a/auxdir/slurm.m4 b/auxdir/slurm.m4 index 9f4cfbf62..20060e456 100644 --- a/auxdir/slurm.m4 +++ b/auxdir/slurm.m4 @@ -1,5 +1,5 @@ ##***************************************************************************** -## $Id: slurm.m4 8192 2006-05-25 00:15:05Z morrone $ +## $Id: slurm.m4 13389 2008-02-27 22:03:57Z jette $ ##***************************************************************************** # AUTHOR: # Mark A. Grondona <mgrondona@llnl.gov> @@ -13,7 +13,6 @@ AC_DEFUN([X_AC_SLURM_PORTS], [ AC_MSG_CHECKING(for slurmctld default port) - AC_ARG_WITH(slurmctld-port, AS_HELP_STRING(--with-slurmctld-port=N,set slurmctld default port [[6817]]), [ if test `expr match "$withval" '[[0-9]]*$'` -gt 0; then @@ -22,11 +21,11 @@ AC_DEFUN([X_AC_SLURM_PORTS], ] ) AC_MSG_RESULT(${slurmctldport=$1}) - AC_DEFINE_UNQUOTED(SLURMCTLD_PORT, [$slurmctldport], [Define the default port number for slurmctld]) AC_SUBST(SLURMCTLD_PORT) + AC_MSG_CHECKING(for slurmd default port) AC_ARG_WITH(slurmd-port, AS_HELP_STRING(--with-slurmd-port=N,set slurmd default port [[6818]]), @@ -36,12 +35,23 @@ AC_DEFUN([X_AC_SLURM_PORTS], ] ) AC_MSG_RESULT(${slurmdport=$2}) - AC_DEFINE_UNQUOTED(SLURMD_PORT, [$slurmdport], [Define the default port number for slurmd]) - AC_SUBST(SLURMD_PORT) + + AC_MSG_CHECKING(for slurmdbd default port) + AC_ARG_WITH(slurmdbd-port, + AS_HELP_STRING(--with-slurmdbd-port=N,set slurmdbd default port [[6819]]), + [ if test `expr match "$withval" '[[0-9]]*$'` -gt 0; then + slurmdbdport="$withval" + fi + ] + ) + AC_MSG_RESULT(${slurmdbdport=$3}) + AC_DEFINE_UNQUOTED(SLURMDBD_PORT, [$slurmdbdport], + [Define the default port number for slurmdbd]) + AC_SUBST(SLURMDBD_PORT) ]) dnl dnl Check for program_invocation_name diff --git a/auxdir/x_ac_aix.m4 b/auxdir/x_ac_aix.m4 index 026932d93..9aa4c892f 100644 --- a/auxdir/x_ac_aix.m4 +++ b/auxdir/x_ac_aix.m4 @@ -1,5 +1,5 @@ ##***************************************************************************** -## $Id: x_ac_aix.m4 11723 2007-06-15 22:15:14Z morrone $ +## $Id: x_ac_aix.m4 11741 2007-06-20 18:42:19Z da $ ##***************************************************************************** # AUTHOR: # Morris Jette <jette@llnl.gov> diff --git a/auxdir/x_ac_bluegene.m4 b/auxdir/x_ac_bluegene.m4 index dd995545f..24d743be3 100644 --- a/auxdir/x_ac_bluegene.m4 +++ b/auxdir/x_ac_bluegene.m4 @@ -1,5 +1,5 @@ ##***************************************************************************** -## $Id: x_ac_bluegene.m4 8863 2006-08-10 18:47:55Z da $ +## $Id: x_ac_bluegene.m4 14087 2008-05-20 19:35:45Z da $ ##***************************************************************************** # AUTHOR: # Morris Jette <jette1@llnl.gov> @@ -45,14 +45,14 @@ AC_DEFUN([X_AC_BLUEGENE], have_bg_ar=yes bg_bridge_so="$bg_dir/lib64/libbglbridge.so" bg_ldflags="$bg_ldflags -L$bg_dir/lib64 -L/usr/lib64 -Wl,--unresolved-symbols=ignore-in-shared-libs -lbglbridge -lbgldb -ltableapi -lbglmachine -lexpat -lsaymessage" - fi + fi # Search for required DB2 library in the directory if test -z "$have_db2" -a -f "$bg_dir/lib64/libdb2.so" ; then have_db2=yes bg_db2_so="$bg_dir/lib64/libdb2.so" bg_ldflags="$bg_ldflags -L$bg_dir/lib64 -ldb2" - fi + fi # Search for headers in the directory if test -z "$have_bg_hdr" -a -f "$bg_dir/include/rm_api.h" ; then @@ -75,7 +75,7 @@ AC_DEFUN([X_AC_BLUEGENE], if test ! -z "$have_bg_files" ; then BG_INCLUDES="$bg_includes" - AC_DEFINE(HAVE_BG_FILES, 1, [Define to 1 if have Blue Gene files]) + AC_DEFINE(HAVE_BG_FILES, 1, [Define to 1 if have Blue Gene files]) AC_DEFINE_UNQUOTED(BG_BRIDGE_SO, "$bg_bridge_so", [Define the BG_BRIDGE_SO value]) AC_DEFINE_UNQUOTED(BG_DB2_SO, "$bg_db2_so", [Define the BG_DB2_SO value]) diff --git a/auxdir/x_ac_databases.m4 b/auxdir/x_ac_databases.m4 new file mode 100644 index 000000000..056eb840a --- /dev/null +++ b/auxdir/x_ac_databases.m4 @@ -0,0 +1,111 @@ +##***************************************************************************** +## $Id: x_ac_databases.m4 5401 2005-09-22 01:56:49Z da $ +##***************************************************************************** +# AUTHOR: +# Danny Auble <da@llnl.gov> +# +# SYNOPSIS: +# X_AC_DATABASES +# +# DESCRIPTION: +# Test for Different Database apis. If found define appropriate ENVs. +##***************************************************************************** + +AC_DEFUN([X_AC_DATABASES], +[ + #Check for MySQL + ac_have_mysql="no" + ### Check for mysql_config program + AC_PATH_PROG(HAVEMYSQLCONFIG, mysql_config, no) + if test x$HAVEMYSQLCONFIG = xno; then + AC_MSG_WARN([*** mysql_config not found. Evidently no MySQL install on system.]) + else + # check for mysql-5.0.0+ + mysql_config_major_version=`$HAVEMYSQLCONFIG --version | \ + sed 's/\([[0-9]]*\).\([[0-9]]*\).\([[a-zA-Z0-9]]*\)/\1/'` + mysql_config_minor_version=`$HAVEMYSQLCONFIG --version | \ + sed 's/\([[0-9]]*\).\([[0-9]]*\).\([[a-zA-Z0-9]]*\)/\2/'` + mysql_config_micro_version=`$HAVEMYSQLCONFIG --version | \ + sed 's/\([[0-9]]*\).\([[0-9]]*\).\([[a-zA-Z0-9]]*\)/\3/'` + + if test $mysql_config_major_version -lt 5; then + AC_MSG_WARN([*** mysql-$mysql_config_major_version.$mysql_config_minor_version.$mysql_config_micro_version available, we need >= mysql-5.0.0 installed for the mysql interface.]) + ac_have_mysql="no" + else + # mysql_config puts -I on the front of the dir. We don't + # want that so we remove it. + MYSQL_CFLAGS=`$HAVEMYSQLCONFIG --cflags` + MYSQL_LIBS=`$HAVEMYSQLCONFIG --libs_r` + if test -z "$MYSQL_LIBS"; then + MYSQL_LIBS=`$HAVEMYSQLCONFIG --libs` + fi + save_CFLAGS="$CFLAGS" + save_LIBS="$LIBS" + CFLAGS="$MYSQL_CFLAGS $save_CFLAGS" + LIBS="$MYSQL_LIBS $save_LIBS" + AC_TRY_LINK([#include <mysql.h>],[ + int main() + { + MYSQL mysql; + (void) mysql_init(&mysql); + (void) mysql_close(&mysql); + } + ], + [ac_have_mysql="yes"], + [ac_have_mysql="no"]) + CFLAGS="$save_CFLAGS" + LIBS="$save_LIBS" + if test "$ac_have_mysql" == "yes"; then + AC_MSG_RESULT([MySQL test program built properly.]) + AC_SUBST(MYSQL_LIBS) + AC_SUBST(MYSQL_CFLAGS) + AC_DEFINE(HAVE_MYSQL, 1, [Define to 1 if using MySQL libaries]) + else + MYSQL_CFLAGS="" + MYSQL_LIBS="" + AC_MSG_WARN([*** MySQL test program execution failed.]) + fi + fi + fi + + + #Check for PostgreSQL + ac_have_postgres="no" + ### Check for pg_config program + AC_PATH_PROG(HAVEPGCONFIG, pg_config, no) + if test x$HAVEPGCONFIG = xno; then + AC_MSG_WARN([*** pg_config not found. Evidently no PostgreSQL install on system.]) + else + PGSQL_INCLUDEDIR=`$HAVEPGCONFIG --includedir` + PGSQL_LIBDIR=`$HAVEPGCONFIG --libdir` + PGSQL_CFLAGS="-I$PGSQL_INCLUDEDIR -L$PGSQL_LIBDIR" + save_CFLAGS="$CFLAGS" + CFLAGS="$PGSQL_CFLAGS $save_CFLAGS" + + PGSQL_LIBS=" -lpq" + save_LIBS="$LIBS" + LIBS="$PGSQL_LIBS $save_LIBS" + AC_TRY_LINK([#include <libpq-fe.h>],[ + int main() + { + PGconn *conn; + conn = PQconnectdb("dbname = postgres"); + (void) PQfinish(conn); + } + ], + [ac_have_pgsql="yes"], + [ac_have_pgsql="no"]) + LIBS="$save_LIBS" + CFLAGS="$save_CFLAGS" + if test "$ac_have_pgsql" == "yes"; then + AC_MSG_RESULT([PostgreSQL test program built properly.]) + AC_SUBST(PGSQL_LIBS) + AC_SUBST(PGSQL_CFLAGS) + AC_DEFINE(HAVE_PGSQL, 1, [Define to 1 if using PostgreSQL libaries]) + else + PGSQL_CFLAGS="" + PGSQL_LIBS="" + AC_MSG_WARN([*** PostgreSQL test program execution failed.]) + fi + fi +]) diff --git a/auxdir/x_ac_gtk.m4 b/auxdir/x_ac_gtk.m4 index 827d28a13..cdd4d7798 100644 --- a/auxdir/x_ac_gtk.m4 +++ b/auxdir/x_ac_gtk.m4 @@ -18,8 +18,8 @@ AC_DEFUN([X_AC_GTK], ac_have_gtk="yes" ### Check for pkg-config program - AC_PATH_PROG(HAVEPKGCONFIG, pkg-config, $PATH) - if test -z "$HAVEPKGCONFIG"; then + AC_PATH_PROG(HAVEPKGCONFIG, pkg-config, no) + if test x$HAVEPKGCONFIG = xno; then AC_MSG_WARN([*** pkg-config not found. Cannot probe for libglade-2.0 or gtk+-2.0.]) ac_have_gtk="no" fi diff --git a/auxdir/x_ac_slurm_ssl.m4 b/auxdir/x_ac_slurm_ssl.m4 index 9a8fe3b12..7f3baaeab 100644 --- a/auxdir/x_ac_slurm_ssl.m4 +++ b/auxdir/x_ac_slurm_ssl.m4 @@ -1,5 +1,5 @@ ##***************************************************************************** -## $Id: x_ac_slurm_ssl.m4 12827 2007-12-14 22:29:30Z da $ +## $Id: x_ac_slurm_ssl.m4 13975 2008-05-02 23:48:39Z da $ ##***************************************************************************** # AUTHOR: # Mark Grondona <mgrondona@llnl.gov> @@ -15,16 +15,15 @@ AC_DEFUN([X_AC_SLURM_WITH_SSL], [ - ac_slurm_with_ssl=no ssl_default_dirs="/usr/local/openssl64 /usr/local/openssl /usr/lib/openssl \ /usr/local/ssl /usr/lib/ssl /usr/local \ - /usr/pkg /opt /opt/openssl" + /usr/pkg /opt /opt/openssl /usr" AC_SUBST(SSL_LDFLAGS) AC_SUBST(SSL_LIBS) AC_SUBST(SSL_CPPFLAGS) - SSL_LIBS="-lcrypto" + SSL_LIB_TEST="-lcrypto" AC_ARG_WITH(ssl, AS_HELP_STRING(--with-ssl=PATH,Specify path to OpenSSL installation), @@ -36,7 +35,7 @@ AC_DEFUN([X_AC_SLURM_WITH_SSL], [ # which is specified with --with-ssl), and libtool is not setting # the correct runtime library path in the binaries. if test "x$ac_have_aix" = "xyes"; then - SSL_LIBS="-lcrypto-static" + SSL_LIB_TEST="-lcrypto-static" fi ]) @@ -46,21 +45,20 @@ AC_DEFUN([X_AC_SLURM_WITH_SSL], [ if test "x$prefix" != "xNONE" ; then tryssldir="$tryssldir $prefix" fi - if test "x$tryssldir" == "xno" ; then - AC_MSG_ERROR([OpenSSL libary is required for SLURM operation, download from www.openssl.org]) - fi - - AC_CACHE_CHECK([for OpenSSL directory], ac_cv_openssldir, [ + + if test "x$tryssldir" != "xno" ; then + AC_CACHE_CHECK([for OpenSSL directory], ac_cv_openssldir, [ for ssldir in $tryssldir "" $ssl_default_dirs; do CPPFLAGS="$saved_CPPFLAGS" LDFLAGS="$saved_LDFLAGS" - LIBS="$saved_LIBS $SSL_LIBS" + LIBS="$saved_LIBS $SSL_LIB_TEST" # Skip directories if they don't exist if test ! -z "$ssldir" -a ! -d "$ssldir" ; then continue; fi - if test ! -z "$ssldir" -a "x$ssldir" != "x/usr"; then + sslincludedir="$ssldir" + if test ! -z "$ssldir"; then # Try to use $ssldir/lib if it exists, otherwise # $ssldir if test -d "$ssldir/lib" ; then @@ -77,12 +75,16 @@ AC_DEFUN([X_AC_SLURM_WITH_SSL], [ # Try to use $ssldir/include if it exists, otherwise # $ssldir if test -d "$ssldir/include" ; then + sslincludedir="$ssldir/include" CPPFLAGS="-I$ssldir/include $saved_CPPFLAGS" else CPPFLAGS="-I$ssldir $saved_CPPFLAGS" fi fi - + test -f "$sslincludedir/openssl/rand.h" || continue + test -f "$sslincludedir/openssl/hmac.h" || continue + test -f "$sslincludedir/openssl/sha.h" || continue + # Basic test to check for compatible version and correct linking AC_RUN_IFELSE([AC_LANG_SOURCE([[ #include <stdlib.h> @@ -99,27 +101,26 @@ AC_DEFUN([X_AC_SLURM_WITH_SSL], [ return(RAND_status() <= 0); } ]])],[ - found_crypto=1 + ac_have_openssl="yes" break; ],[ ],[]) - if test ! -z "$found_crypto" ; then + if test ! -z "$ac_have_openssl" ; then break; fi done - if test -z "$found_crypto" ; then - AC_MSG_ERROR([Could not find working OpenSSL library, download from www.openssl.org]) - fi - if test -z "$ssldir" ; then - ssldir="(system)" - fi - - ac_cv_openssldir=$ssldir - ]) - - if (test ! -z "$ac_cv_openssldir" && test "x$ac_cv_openssldir" != "x(system)") ; then + if test ! -z "$ac_have_openssl" ; then + ac_cv_openssldir=$ssldir + fi + ]) + fi + + if test ! -z "$ac_have_openssl" ; then + SSL_LIBS="$SSL_LIB_TEST" + AC_DEFINE(HAVE_OPENSSL, 1, [define if you have openssl.]) + if (test ! -z "$ac_cv_openssldir") ; then dnl Need to recover ssldir - test above runs in subshell ssldir=$ac_cv_openssldir if test ! -z "$ssldir" -a "x$ssldir" != "x/usr"; then @@ -138,10 +139,14 @@ AC_DEFUN([X_AC_SLURM_WITH_SSL], [ SSL_CPPFLAGS="-I$ssldir" fi fi - fi + fi - AC_LINK_IFELSE([AC_LANG_PROGRAM([[#include <openssl/evp.h>]], [[EVP_MD_CTX_cleanup(NULL);]])],[AC_DEFINE(HAVE_EVP_MD_CTX_CLEANUP, 1, + AC_LINK_IFELSE([AC_LANG_PROGRAM([[#include <openssl/evp.h>]], [[EVP_MD_CTX_cleanup(NULL);]])],[AC_DEFINE(HAVE_EVP_MD_CTX_CLEANUP, 1, [Define to 1 if function EVP_MD_CTX_cleanup exists.])],[]) + else + SSL_LIBS="" + AC_MSG_WARN([Could not find working OpenSSL library]) + fi LIBS="$saved_LIBS" CPPFLAGS="$saved_CPPFLAGS" diff --git a/config.h.in b/config.h.in index a9bb420bc..a13263699 100644 --- a/config.h.in +++ b/config.h.in @@ -98,6 +98,9 @@ /* Define to 1 if you have the `mtrace' function. */ #undef HAVE_MTRACE +/* Define to 1 if using MySQL libaries */ +#undef HAVE_MYSQL + /* Define to 1 if you have the <ncurses.h> header file. */ #undef HAVE_NCURSES_H @@ -107,12 +110,18 @@ /* define if numa library installed */ #undef HAVE_NUMA +/* define if you have openssl. */ +#undef HAVE_OPENSSL + /* define if you have the PAM library */ #undef HAVE_PAM /* Define to 1 if you have the <pam/pam_appl.h> header file. */ #undef HAVE_PAM_PAM_APPL_H +/* Define to 1 if using PostgreSQL libaries */ +#undef HAVE_PGSQL + /* define if plpa library installed */ #undef HAVE_PLPA @@ -134,6 +143,9 @@ /* Define to 1 if you have the `ptrace64' function. */ #undef HAVE_PTRACE64 +/* Define to 1 if you have the <pty.h> header file. */ +#undef HAVE_PTY_H + /* Define if you are compiling with readline. */ #undef HAVE_READLINE @@ -233,6 +245,9 @@ /* Define to 1 if you have the `unsetenv' function. */ #undef HAVE_UNSETENV +/* Define to 1 if you have the <utmp.h> header file. */ +#undef HAVE_UTMP_H + /* Define to 1 if you have the <values.h> header file. */ #undef HAVE_VALUES_H @@ -310,6 +325,9 @@ /* Define the default port number for slurmctld */ #undef SLURMCTLD_PORT +/* Define the default port number for slurmdbd */ +#undef SLURMDBD_PORT + /* Define the default port number for slurmd */ #undef SLURMD_PORT diff --git a/configure b/configure index f2a95f057..e0ef76114 100755 --- a/configure +++ b/configure @@ -891,6 +891,8 @@ LN_S ECHO AR RANLIB +DSYMUTIL +NMEDIT CXXCPP F77 FFLAGS @@ -925,10 +927,17 @@ GTK2_CFLAGS GTK2_LIBS HAVE_GTK_TRUE HAVE_GTK_FALSE +HAVEMYSQLCONFIG +MYSQL_LIBS +MYSQL_CFLAGS +HAVEPGCONFIG +PGSQL_LIBS +PGSQL_CFLAGS DEBUG_MODULES_TRUE DEBUG_MODULES_FALSE SLURMCTLD_PORT SLURMD_PORT +SLURMDBD_PORT ELAN_LIBS HAVE_ELAN_TRUE HAVE_ELAN_FALSE @@ -943,6 +952,9 @@ READLINE_LIBS SSL_LDFLAGS SSL_LIBS SSL_CPPFLAGS +HAVE_OPENSSL_TRUE +HAVE_OPENSSL_FALSE +HAVE_OPENSSL MUNGE_LIBS MUNGE_CPPFLAGS MUNGE_LDFLAGS @@ -952,6 +964,7 @@ AUTHD_LIBS AUTHD_CFLAGS WITH_AUTHD_TRUE WITH_AUTHD_FALSE +UTIL_LIBS LTLIBOBJS' ac_subst_files='' ac_precious_vars='build_alias @@ -1583,6 +1596,7 @@ Optional Packages: --with-xcpu=PATH specify path to XCPU directory --with-slurmctld-port=N set slurmctld default port 6817 --with-slurmd-port=N set slurmd default port 6818 + --with-slurmdbd-port=N set slurmdbd default port 6819 --without-readline compile without readline support --with-ssl=PATH Specify path to OpenSSL installation --with-munge=PATH Specify path to munge installation @@ -6886,7 +6900,7 @@ lt_cv_deplibs_check_method='unknown' # whether `pass_all' will *always* work, you probably want this one. case $host_os in -aix4* | aix5*) +aix[4-9]*) lt_cv_deplibs_check_method=pass_all ;; @@ -7101,7 +7115,7 @@ ia64-*-hpux*) ;; *-*-irix6*) # Find out which ABI we are using. - echo '#line 7104 "configure"' > conftest.$ac_ext + echo '#line 7118 "configure"' > conftest.$ac_ext if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5 (eval $ac_compile) 2>&5 ac_status=$? @@ -7273,7 +7287,11 @@ sparc*-*solaris*) *64-bit*) case $lt_cv_prog_gnu_ld in yes*) LD="${LD-ld} -m elf64_sparc" ;; - *) LD="${LD-ld} -64" ;; + *) + if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then + LD="${LD-ld} -64" + fi + ;; esac ;; esac @@ -7924,7 +7942,6 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu # Autoconf 2.13's AC_OBJEXT and AC_EXEEXT macros only works for C compilers! - # find the maximum length of command line arguments { echo "$as_me:$LINENO: checking the maximum length of command line arguments" >&5 echo $ECHO_N "checking the maximum length of command line arguments... $ECHO_C" >&6; } @@ -8239,7 +8256,7 @@ EOF echo "$progname: failed program was:" >&5 cat conftest.$ac_ext >&5 fi - rm -f conftest* conftst* + rm -rf conftest* conftst* # Do not use the global_symbol_pipe unless it works. if test "$pipe_works" = yes; then @@ -8799,6 +8816,318 @@ fi ;; esac + + case $host_os in + rhapsody* | darwin*) + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}dsymutil", so it can be a program name with args. +set dummy ${ac_tool_prefix}dsymutil; ac_word=$2 +{ echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +if test "${ac_cv_prog_DSYMUTIL+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$DSYMUTIL"; then + ac_cv_prog_DSYMUTIL="$DSYMUTIL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_DSYMUTIL="${ac_tool_prefix}dsymutil" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + +fi +fi +DSYMUTIL=$ac_cv_prog_DSYMUTIL +if test -n "$DSYMUTIL"; then + { echo "$as_me:$LINENO: result: $DSYMUTIL" >&5 +echo "${ECHO_T}$DSYMUTIL" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_DSYMUTIL"; then + ac_ct_DSYMUTIL=$DSYMUTIL + # Extract the first word of "dsymutil", so it can be a program name with args. +set dummy dsymutil; ac_word=$2 +{ echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +if test "${ac_cv_prog_ac_ct_DSYMUTIL+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$ac_ct_DSYMUTIL"; then + ac_cv_prog_ac_ct_DSYMUTIL="$ac_ct_DSYMUTIL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_DSYMUTIL="dsymutil" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + +fi +fi +ac_ct_DSYMUTIL=$ac_cv_prog_ac_ct_DSYMUTIL +if test -n "$ac_ct_DSYMUTIL"; then + { echo "$as_me:$LINENO: result: $ac_ct_DSYMUTIL" >&5 +echo "${ECHO_T}$ac_ct_DSYMUTIL" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi + + if test "x$ac_ct_DSYMUTIL" = x; then + DSYMUTIL=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools +whose name does not start with the host triplet. If you think this +configuration is useful to you, please write to autoconf@gnu.org." >&5 +echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools +whose name does not start with the host triplet. If you think this +configuration is useful to you, please write to autoconf@gnu.org." >&2;} +ac_tool_warned=yes ;; +esac + DSYMUTIL=$ac_ct_DSYMUTIL + fi +else + DSYMUTIL="$ac_cv_prog_DSYMUTIL" +fi + + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}nmedit", so it can be a program name with args. +set dummy ${ac_tool_prefix}nmedit; ac_word=$2 +{ echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +if test "${ac_cv_prog_NMEDIT+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$NMEDIT"; then + ac_cv_prog_NMEDIT="$NMEDIT" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_NMEDIT="${ac_tool_prefix}nmedit" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + +fi +fi +NMEDIT=$ac_cv_prog_NMEDIT +if test -n "$NMEDIT"; then + { echo "$as_me:$LINENO: result: $NMEDIT" >&5 +echo "${ECHO_T}$NMEDIT" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_NMEDIT"; then + ac_ct_NMEDIT=$NMEDIT + # Extract the first word of "nmedit", so it can be a program name with args. +set dummy nmedit; ac_word=$2 +{ echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +if test "${ac_cv_prog_ac_ct_NMEDIT+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + if test -n "$ac_ct_NMEDIT"; then + ac_cv_prog_ac_ct_NMEDIT="$ac_ct_NMEDIT" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_NMEDIT="nmedit" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + +fi +fi +ac_ct_NMEDIT=$ac_cv_prog_ac_ct_NMEDIT +if test -n "$ac_ct_NMEDIT"; then + { echo "$as_me:$LINENO: result: $ac_ct_NMEDIT" >&5 +echo "${ECHO_T}$ac_ct_NMEDIT" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi + + if test "x$ac_ct_NMEDIT" = x; then + NMEDIT=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools +whose name does not start with the host triplet. If you think this +configuration is useful to you, please write to autoconf@gnu.org." >&5 +echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools +whose name does not start with the host triplet. If you think this +configuration is useful to you, please write to autoconf@gnu.org." >&2;} +ac_tool_warned=yes ;; +esac + NMEDIT=$ac_ct_NMEDIT + fi +else + NMEDIT="$ac_cv_prog_NMEDIT" +fi + + + { echo "$as_me:$LINENO: checking for -single_module linker flag" >&5 +echo $ECHO_N "checking for -single_module linker flag... $ECHO_C" >&6; } +if test "${lt_cv_apple_cc_single_mod+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_cv_apple_cc_single_mod=no + if test -z "${LT_MULTI_MODULE}"; then + # By default we will add the -single_module flag. You can override + # by either setting the environment variable LT_MULTI_MODULE + # non-empty at configure time, or by adding -multi_module to the + # link flags. + echo "int foo(void){return 1;}" > conftest.c + $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ + -dynamiclib ${wl}-single_module conftest.c + if test -f libconftest.dylib; then + lt_cv_apple_cc_single_mod=yes + rm -rf libconftest.dylib* + fi + rm conftest.c + fi +fi +{ echo "$as_me:$LINENO: result: $lt_cv_apple_cc_single_mod" >&5 +echo "${ECHO_T}$lt_cv_apple_cc_single_mod" >&6; } + { echo "$as_me:$LINENO: checking for -exported_symbols_list linker flag" >&5 +echo $ECHO_N "checking for -exported_symbols_list linker flag... $ECHO_C" >&6; } +if test "${lt_cv_ld_exported_symbols_list+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_cv_ld_exported_symbols_list=no + save_LDFLAGS=$LDFLAGS + echo "_main" > conftest.sym + LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym" + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && + $as_test_x conftest$ac_exeext; then + lt_cv_ld_exported_symbols_list=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + lt_cv_ld_exported_symbols_list=no +fi + +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext + LDFLAGS="$save_LDFLAGS" + +fi +{ echo "$as_me:$LINENO: result: $lt_cv_ld_exported_symbols_list" >&5 +echo "${ECHO_T}$lt_cv_ld_exported_symbols_list" >&6; } + case $host_os in + rhapsody* | darwin1.[0123]) + _lt_dar_allow_undefined='${wl}-undefined ${wl}suppress' ;; + darwin1.*) + _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; + darwin*) + # if running on 10.5 or later, the deployment target defaults + # to the OS version, if on x86, and 10.4, the deployment + # target defaults to 10.4. Don't you love it? + case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in + 10.0,*86*-darwin8*|10.0,*-darwin[91]*) + _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; + 10.[012]*) + _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; + 10.*) + _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; + esac + ;; + esac + if test "$lt_cv_apple_cc_single_mod" = "yes"; then + _lt_dar_single_mod='$single_module' + fi + if test "$lt_cv_ld_exported_symbols_list" = "yes"; then + _lt_dar_export_syms=' ${wl}-exported_symbols_list,$output_objdir/${libname}-symbols.expsym' + else + _lt_dar_export_syms="~$NMEDIT -s \$output_objdir/\${libname}-symbols.expsym \${lib}" + fi + if test "$DSYMUTIL" != ":"; then + _lt_dsymutil="~$DSYMUTIL \$lib || :" + else + _lt_dsymutil= + fi + ;; + esac + + enable_dlopen=no enable_win32_dll=no @@ -8864,7 +9193,7 @@ ac_outfile=conftest.$ac_objext echo "$lt_simple_link_test_code" >conftest.$ac_ext eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_linker_boilerplate=`cat conftest.err` -$rm conftest* +$rm -r conftest* @@ -8892,11 +9221,11 @@ else -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:8895: $lt_compile\"" >&5) + (eval echo "\"\$as_me:9224: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 - echo "$as_me:8899: \$? = $ac_status" >&5 + echo "$as_me:9228: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. @@ -9166,10 +9495,10 @@ if test -n "$lt_prog_compiler_pic"; then { echo "$as_me:$LINENO: checking if $compiler PIC flag $lt_prog_compiler_pic works" >&5 echo $ECHO_N "checking if $compiler PIC flag $lt_prog_compiler_pic works... $ECHO_C" >&6; } -if test "${lt_prog_compiler_pic_works+set}" = set; then +if test "${lt_cv_prog_compiler_pic_works+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else - lt_prog_compiler_pic_works=no + lt_cv_prog_compiler_pic_works=no ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="$lt_prog_compiler_pic -DPIC" @@ -9182,27 +9511,27 @@ else -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:9185: $lt_compile\"" >&5) + (eval echo "\"\$as_me:9514: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 - echo "$as_me:9189: \$? = $ac_status" >&5 + echo "$as_me:9518: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. $echo "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then - lt_prog_compiler_pic_works=yes + lt_cv_prog_compiler_pic_works=yes fi fi $rm conftest* fi -{ echo "$as_me:$LINENO: result: $lt_prog_compiler_pic_works" >&5 -echo "${ECHO_T}$lt_prog_compiler_pic_works" >&6; } +{ echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_pic_works" >&5 +echo "${ECHO_T}$lt_cv_prog_compiler_pic_works" >&6; } -if test x"$lt_prog_compiler_pic_works" = xyes; then +if test x"$lt_cv_prog_compiler_pic_works" = xyes; then case $lt_prog_compiler_pic in "" | " "*) ;; *) lt_prog_compiler_pic=" $lt_prog_compiler_pic" ;; @@ -9229,10 +9558,10 @@ esac wl=$lt_prog_compiler_wl eval lt_tmp_static_flag=\"$lt_prog_compiler_static\" { echo "$as_me:$LINENO: checking if $compiler static flag $lt_tmp_static_flag works" >&5 echo $ECHO_N "checking if $compiler static flag $lt_tmp_static_flag works... $ECHO_C" >&6; } -if test "${lt_prog_compiler_static_works+set}" = set; then +if test "${lt_cv_prog_compiler_static_works+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else - lt_prog_compiler_static_works=no + lt_cv_prog_compiler_static_works=no save_LDFLAGS="$LDFLAGS" LDFLAGS="$LDFLAGS $lt_tmp_static_flag" echo "$lt_simple_link_test_code" > conftest.$ac_ext @@ -9245,20 +9574,20 @@ else $echo "X$_lt_linker_boilerplate" | $Xsed -e '/^$/d' > conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if diff conftest.exp conftest.er2 >/dev/null; then - lt_prog_compiler_static_works=yes + lt_cv_prog_compiler_static_works=yes fi else - lt_prog_compiler_static_works=yes + lt_cv_prog_compiler_static_works=yes fi fi - $rm conftest* + $rm -r conftest* LDFLAGS="$save_LDFLAGS" fi -{ echo "$as_me:$LINENO: result: $lt_prog_compiler_static_works" >&5 -echo "${ECHO_T}$lt_prog_compiler_static_works" >&6; } +{ echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_static_works" >&5 +echo "${ECHO_T}$lt_cv_prog_compiler_static_works" >&6; } -if test x"$lt_prog_compiler_static_works" = xyes; then +if test x"$lt_cv_prog_compiler_static_works" = xyes; then : else lt_prog_compiler_static= @@ -9286,11 +9615,11 @@ else -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:9289: $lt_compile\"" >&5) + (eval echo "\"\$as_me:9618: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 - echo "$as_me:9293: \$? = $ac_status" >&5 + echo "$as_me:9622: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized @@ -9370,12 +9699,13 @@ echo $ECHO_N "checking whether the $compiler linker ($LD) supports shared librar # it will be wrapped by ` (' and `)$', so one must not match beginning or # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', # as well as any symbol that contains `d'. - exclude_expsyms="_GLOBAL_OFFSET_TABLE_" + exclude_expsyms='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out # platforms (ab)use it in PIC code, but their linkers get confused if # the symbol is explicitly referenced. Since portable code cannot # rely on this symbol name, it's probably fine to never include it in # preloaded symbol tables. + # Exclude shared library initialization/finalization symbols. extract_expsyms_cmds= # Just being paranoid about ensuring that cc_basename is set. for cc_temp in $compiler""; do @@ -9434,7 +9764,7 @@ cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` # See if GNU ld supports shared libraries. case $host_os in - aix3* | aix4* | aix5*) + aix[3-9]*) # On AIX/PPC, the GNU linker is very broken if test "$host_cpu" != ia64; then ld_shlibs=no @@ -9654,7 +9984,7 @@ _LT_EOF fi ;; - aix4* | aix5*) + aix[4-9]*) if test "$host_cpu" = ia64; then # On IA64, the linker does run time linking by default, so we don't # have to do anything special. @@ -9674,7 +10004,7 @@ _LT_EOF # Test if we are trying to use run time linking or normal # AIX style linking. If -brtl is somewhere in LDFLAGS, we # need to do runtime linking. - case $host_os in aix4.[23]|aix4.[23].*|aix5*) + case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) for ld_flag in $LDFLAGS; do if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then aix_use_runtimelinking=yes @@ -9946,11 +10276,10 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi link_all_deplibs=yes if test "$GCC" = yes ; then output_verbose_link_cmd='echo' - archive_cmds='$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' - module_cmds='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' - # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds - archive_expsym_cmds='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - module_expsym_cmds='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + archive_cmds="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}" + module_cmds="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}" + archive_expsym_cmds="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}" + module_expsym_cmds="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}" else case $cc_basename in xlc*) @@ -10470,7 +10799,7 @@ aix3*) soname_spec='${libname}${release}${shared_ext}$major' ;; -aix4* | aix5*) +aix[4-9]*) version_type=linux need_lib_prefix=no need_version=no @@ -11004,6 +11333,21 @@ esac echo "${ECHO_T}$dynamic_linker" >&6; } test "$dynamic_linker" = no && can_build_shared=no +if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_cv_sys_lib_search_path_spec="$sys_lib_search_path_spec" +fi + +sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec" +if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_cv_sys_lib_dlsearch_path_spec="$sys_lib_dlsearch_path_spec" +fi + +sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec" + variables_saved_for_relink="PATH $shlibpath_var $runpath_var" if test "$GCC" = yes; then variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" @@ -11323,7 +11667,7 @@ fi { echo "$as_me:$LINENO: result: $ac_cv_lib_dld_shl_load" >&5 echo "${ECHO_T}$ac_cv_lib_dld_shl_load" >&6; } if test $ac_cv_lib_dld_shl_load = yes; then - lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-dld" + lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-ldld" else { echo "$as_me:$LINENO: checking for dlopen" >&5 echo $ECHO_N "checking for dlopen... $ECHO_C" >&6; } @@ -11599,7 +11943,7 @@ fi { echo "$as_me:$LINENO: result: $ac_cv_lib_dld_dld_link" >&5 echo "${ECHO_T}$ac_cv_lib_dld_dld_link" >&6; } if test $ac_cv_lib_dld_dld_link = yes; then - lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-dld" + lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-ldld" fi @@ -11648,7 +11992,7 @@ else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext <<EOF -#line 11651 "configure" +#line 11995 "configure" #include "confdefs.h" #if HAVE_DLFCN_H @@ -11748,7 +12092,7 @@ else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext <<EOF -#line 11751 "configure" +#line 12095 "configure" #include "confdefs.h" #if HAVE_DLFCN_H @@ -11875,7 +12219,7 @@ aix3*) fi ;; -aix4* | aix5*) +aix[4-9]*) if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then test "$enable_shared" = yes && enable_static=no fi @@ -11931,6 +12275,7 @@ if test -f "$ltmain"; then predeps \ postdeps \ compiler_lib_search_path \ + compiler_lib_search_dirs \ archive_cmds \ archive_expsym_cmds \ postinstall_cmds \ @@ -11991,7 +12336,7 @@ echo "$as_me: creating $ofile" >&6;} # Generated automatically by $PROGRAM (GNU $PACKAGE $VERSION$TIMESTAMP) # NOTE: Changes made to this file will be lost: look at ltmain.sh. # -# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007 +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 # Free Software Foundation, Inc. # # This file is part of GNU Libtool: @@ -12227,6 +12572,10 @@ predeps=$lt_predeps # shared library. postdeps=$lt_postdeps +# The directories searched by this compiler when creating a shared +# library +compiler_lib_search_dirs=$lt_compiler_lib_search_dirs + # The library search path used internally by the compiler when linking # a shared library. compiler_lib_search_path=$lt_compiler_lib_search_path @@ -12475,6 +12824,7 @@ postdep_objects_CXX= predeps_CXX= postdeps_CXX= compiler_lib_search_path_CXX= +compiler_lib_search_dirs_CXX= # Source file extension for C++ test sources. ac_ext=cpp @@ -12512,7 +12862,7 @@ ac_outfile=conftest.$ac_objext echo "$lt_simple_link_test_code" >conftest.$ac_ext eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_linker_boilerplate=`cat conftest.err` -$rm conftest* +$rm -r conftest* # Allow CC to be a program name with arguments. @@ -12719,7 +13069,7 @@ case $host_os in # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; - aix4* | aix5*) + aix[4-9]*) if test "$host_cpu" = ia64; then # On IA64, the linker does run time linking by default, so we don't # have to do anything special. @@ -12732,7 +13082,7 @@ case $host_os in # Test if we are trying to use run time linking or normal # AIX style linking. If -brtl is somewhere in LDFLAGS, we # need to do runtime linking. - case $host_os in aix4.[23]|aix4.[23].*|aix5*) + case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) for ld_flag in $LDFLAGS; do case $ld_flag in *-brtl*) @@ -12990,51 +13340,23 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi fi ;; darwin* | rhapsody*) - case $host_os in - rhapsody* | darwin1.[012]) - allow_undefined_flag_CXX='${wl}-undefined ${wl}suppress' - ;; - *) # Darwin 1.3 on - if test -z ${MACOSX_DEPLOYMENT_TARGET} ; then - allow_undefined_flag_CXX='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' - else - case ${MACOSX_DEPLOYMENT_TARGET} in - 10.[012]) - allow_undefined_flag_CXX='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' - ;; - 10.*) - allow_undefined_flag_CXX='${wl}-undefined ${wl}dynamic_lookup' - ;; - esac - fi - ;; - esac archive_cmds_need_lc_CXX=no hardcode_direct_CXX=no hardcode_automatic_CXX=yes hardcode_shlibpath_var_CXX=unsupported whole_archive_flag_spec_CXX='' link_all_deplibs_CXX=yes - - if test "$GXX" = yes ; then - lt_int_apple_cc_single_mod=no + allow_undefined_flag_CXX="$_lt_dar_allow_undefined" + if test "$GXX" = yes ; then output_verbose_link_cmd='echo' - if $CC -dumpspecs 2>&1 | $EGREP 'single_module' >/dev/null ; then - lt_int_apple_cc_single_mod=yes + archive_cmds_CXX="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}" + module_cmds_CXX="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}" + archive_expsym_cmds_CXX="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}" + module_expsym_cmds_CXX="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}" + if test "$lt_cv_apple_cc_single_mod" != "yes"; then + archive_cmds_CXX="\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dsymutil}" + archive_expsym_cmds_CXX="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dar_export_syms}${_lt_dsymutil}" fi - if test "X$lt_int_apple_cc_single_mod" = Xyes ; then - archive_cmds_CXX='$CC -dynamiclib -single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' - else - archive_cmds_CXX='$CC -r -keep_private_externs -nostdlib -o ${lib}-master.o $libobjs~$CC -dynamiclib $allow_undefined_flag -o $lib ${lib}-master.o $deplibs $compiler_flags -install_name $rpath/$soname $verstring' - fi - module_cmds_CXX='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' - # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds - if test "X$lt_int_apple_cc_single_mod" = Xyes ; then - archive_expsym_cmds_CXX='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib -single_module $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - else - archive_expsym_cmds_CXX='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -r -keep_private_externs -nostdlib -o ${lib}-master.o $libobjs~$CC -dynamiclib $allow_undefined_flag -o $lib ${lib}-master.o $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - fi - module_expsym_cmds_CXX='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' else case $cc_basename in xlc*) @@ -13285,7 +13607,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi export_dynamic_flag_spec_CXX='${wl}--export-dynamic' whole_archive_flag_spec_CXX='${wl}--whole-archive$convenience ${wl}--no-whole-archive' ;; - pgCC*) + pgCC* | pgcpp*) # Portland Group C++ compiler archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' @@ -13692,7 +14014,6 @@ test "$ld_shlibs_CXX" = no && can_build_shared=no GCC_CXX="$GXX" LD_CXX="$LD" - cat > conftest.$ac_ext <<EOF class Foo { @@ -13794,6 +14115,11 @@ fi $rm -f confest.$objext +compiler_lib_search_dirs_CXX= +if test -n "$compiler_lib_search_path_CXX"; then + compiler_lib_search_dirs_CXX=`echo " ${compiler_lib_search_path_CXX}" | ${SED} -e 's! -L! !g' -e 's!^ !!'` +fi + # PORTME: override above test on systems where it is broken case $host_os in interix[3-9]*) @@ -13849,7 +14175,6 @@ solaris*) ;; esac - case " $postdeps_CXX " in *" -lc "*) archive_cmds_need_lc_CXX=no ;; esac @@ -13925,7 +14250,7 @@ echo $ECHO_N "checking for $compiler option to produce PIC... $ECHO_C" >&6; } esac else case $host_os in - aix4* | aix5*) + aix[4-9]*) # All AIX code is PIC. if test "$host_cpu" = ia64; then # AIX 5 now supports IA64 processor @@ -14021,7 +14346,7 @@ echo $ECHO_N "checking for $compiler option to produce PIC... $ECHO_C" >&6; } lt_prog_compiler_pic_CXX='-KPIC' lt_prog_compiler_static_CXX='-static' ;; - pgCC*) + pgCC* | pgcpp*) # Portland Group C++ compiler. lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_pic_CXX='-fpic' @@ -14152,10 +14477,10 @@ if test -n "$lt_prog_compiler_pic_CXX"; then { echo "$as_me:$LINENO: checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works" >&5 echo $ECHO_N "checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works... $ECHO_C" >&6; } -if test "${lt_prog_compiler_pic_works_CXX+set}" = set; then +if test "${lt_cv_prog_compiler_pic_works_CXX+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else - lt_prog_compiler_pic_works_CXX=no + lt_cv_prog_compiler_pic_works_CXX=no ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="$lt_prog_compiler_pic_CXX -DPIC" @@ -14168,27 +14493,27 @@ else -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:14171: $lt_compile\"" >&5) + (eval echo "\"\$as_me:14496: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 - echo "$as_me:14175: \$? = $ac_status" >&5 + echo "$as_me:14500: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. $echo "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then - lt_prog_compiler_pic_works_CXX=yes + lt_cv_prog_compiler_pic_works_CXX=yes fi fi $rm conftest* fi -{ echo "$as_me:$LINENO: result: $lt_prog_compiler_pic_works_CXX" >&5 -echo "${ECHO_T}$lt_prog_compiler_pic_works_CXX" >&6; } +{ echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_pic_works_CXX" >&5 +echo "${ECHO_T}$lt_cv_prog_compiler_pic_works_CXX" >&6; } -if test x"$lt_prog_compiler_pic_works_CXX" = xyes; then +if test x"$lt_cv_prog_compiler_pic_works_CXX" = xyes; then case $lt_prog_compiler_pic_CXX in "" | " "*) ;; *) lt_prog_compiler_pic_CXX=" $lt_prog_compiler_pic_CXX" ;; @@ -14215,10 +14540,10 @@ esac wl=$lt_prog_compiler_wl_CXX eval lt_tmp_static_flag=\"$lt_prog_compiler_static_CXX\" { echo "$as_me:$LINENO: checking if $compiler static flag $lt_tmp_static_flag works" >&5 echo $ECHO_N "checking if $compiler static flag $lt_tmp_static_flag works... $ECHO_C" >&6; } -if test "${lt_prog_compiler_static_works_CXX+set}" = set; then +if test "${lt_cv_prog_compiler_static_works_CXX+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else - lt_prog_compiler_static_works_CXX=no + lt_cv_prog_compiler_static_works_CXX=no save_LDFLAGS="$LDFLAGS" LDFLAGS="$LDFLAGS $lt_tmp_static_flag" echo "$lt_simple_link_test_code" > conftest.$ac_ext @@ -14231,20 +14556,20 @@ else $echo "X$_lt_linker_boilerplate" | $Xsed -e '/^$/d' > conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if diff conftest.exp conftest.er2 >/dev/null; then - lt_prog_compiler_static_works_CXX=yes + lt_cv_prog_compiler_static_works_CXX=yes fi else - lt_prog_compiler_static_works_CXX=yes + lt_cv_prog_compiler_static_works_CXX=yes fi fi - $rm conftest* + $rm -r conftest* LDFLAGS="$save_LDFLAGS" fi -{ echo "$as_me:$LINENO: result: $lt_prog_compiler_static_works_CXX" >&5 -echo "${ECHO_T}$lt_prog_compiler_static_works_CXX" >&6; } +{ echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_static_works_CXX" >&5 +echo "${ECHO_T}$lt_cv_prog_compiler_static_works_CXX" >&6; } -if test x"$lt_prog_compiler_static_works_CXX" = xyes; then +if test x"$lt_cv_prog_compiler_static_works_CXX" = xyes; then : else lt_prog_compiler_static_CXX= @@ -14272,11 +14597,11 @@ else -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:14275: $lt_compile\"" >&5) + (eval echo "\"\$as_me:14600: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 - echo "$as_me:14279: \$? = $ac_status" >&5 + echo "$as_me:14604: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized @@ -14329,7 +14654,7 @@ echo $ECHO_N "checking whether the $compiler linker ($LD) supports shared librar export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' case $host_os in - aix4* | aix5*) + aix[4-9]*) # If we're using GNU nm, then we don't want the "-C" option. # -C means demangle to AIX nm, but means don't demangle with GNU nm if $NM -V 2>&1 | grep 'GNU' > /dev/null; then @@ -14351,6 +14676,7 @@ echo $ECHO_N "checking whether the $compiler linker ($LD) supports shared librar export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' ;; esac + exclude_expsyms_CXX='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' { echo "$as_me:$LINENO: result: $ld_shlibs_CXX" >&5 echo "${ECHO_T}$ld_shlibs_CXX" >&6; } @@ -14452,7 +14778,7 @@ aix3*) soname_spec='${libname}${release}${shared_ext}$major' ;; -aix4* | aix5*) +aix[4-9]*) version_type=linux need_lib_prefix=no need_version=no @@ -14985,6 +15311,21 @@ esac echo "${ECHO_T}$dynamic_linker" >&6; } test "$dynamic_linker" = no && can_build_shared=no +if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_cv_sys_lib_search_path_spec="$sys_lib_search_path_spec" +fi + +sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec" +if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_cv_sys_lib_dlsearch_path_spec="$sys_lib_dlsearch_path_spec" +fi + +sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec" + variables_saved_for_relink="PATH $shlibpath_var $runpath_var" if test "$GCC" = yes; then variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" @@ -15068,6 +15409,7 @@ if test -f "$ltmain"; then predeps_CXX \ postdeps_CXX \ compiler_lib_search_path_CXX \ + compiler_lib_search_dirs_CXX \ archive_cmds_CXX \ archive_expsym_cmds_CXX \ postinstall_cmds_CXX \ @@ -15316,6 +15658,10 @@ predeps=$lt_predeps_CXX # shared library. postdeps=$lt_postdeps_CXX +# The directories searched by this compiler when creating a shared +# library +compiler_lib_search_dirs=$lt_compiler_lib_search_dirs_CXX + # The library search path used internally by the compiler when linking # a shared library. compiler_lib_search_path=$lt_compiler_lib_search_path_CXX @@ -15530,7 +15876,7 @@ ac_outfile=conftest.$ac_objext echo "$lt_simple_link_test_code" >conftest.$ac_ext eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_linker_boilerplate=`cat conftest.err` -$rm conftest* +$rm -r conftest* # Allow CC to be a program name with arguments. @@ -15568,7 +15914,7 @@ aix3*) postinstall_cmds='$RANLIB $lib' fi ;; -aix4* | aix5*) +aix[4-9]*) if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then test "$enable_shared" = yes && enable_static=no fi @@ -15833,10 +16179,10 @@ if test -n "$lt_prog_compiler_pic_F77"; then { echo "$as_me:$LINENO: checking if $compiler PIC flag $lt_prog_compiler_pic_F77 works" >&5 echo $ECHO_N "checking if $compiler PIC flag $lt_prog_compiler_pic_F77 works... $ECHO_C" >&6; } -if test "${lt_prog_compiler_pic_works_F77+set}" = set; then +if test "${lt_cv_prog_compiler_pic_works_F77+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else - lt_prog_compiler_pic_works_F77=no + lt_cv_prog_compiler_pic_works_F77=no ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="$lt_prog_compiler_pic_F77" @@ -15849,27 +16195,27 @@ else -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:15852: $lt_compile\"" >&5) + (eval echo "\"\$as_me:16198: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 - echo "$as_me:15856: \$? = $ac_status" >&5 + echo "$as_me:16202: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. $echo "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then - lt_prog_compiler_pic_works_F77=yes + lt_cv_prog_compiler_pic_works_F77=yes fi fi $rm conftest* fi -{ echo "$as_me:$LINENO: result: $lt_prog_compiler_pic_works_F77" >&5 -echo "${ECHO_T}$lt_prog_compiler_pic_works_F77" >&6; } +{ echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_pic_works_F77" >&5 +echo "${ECHO_T}$lt_cv_prog_compiler_pic_works_F77" >&6; } -if test x"$lt_prog_compiler_pic_works_F77" = xyes; then +if test x"$lt_cv_prog_compiler_pic_works_F77" = xyes; then case $lt_prog_compiler_pic_F77 in "" | " "*) ;; *) lt_prog_compiler_pic_F77=" $lt_prog_compiler_pic_F77" ;; @@ -15896,10 +16242,10 @@ esac wl=$lt_prog_compiler_wl_F77 eval lt_tmp_static_flag=\"$lt_prog_compiler_static_F77\" { echo "$as_me:$LINENO: checking if $compiler static flag $lt_tmp_static_flag works" >&5 echo $ECHO_N "checking if $compiler static flag $lt_tmp_static_flag works... $ECHO_C" >&6; } -if test "${lt_prog_compiler_static_works_F77+set}" = set; then +if test "${lt_cv_prog_compiler_static_works_F77+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else - lt_prog_compiler_static_works_F77=no + lt_cv_prog_compiler_static_works_F77=no save_LDFLAGS="$LDFLAGS" LDFLAGS="$LDFLAGS $lt_tmp_static_flag" echo "$lt_simple_link_test_code" > conftest.$ac_ext @@ -15912,20 +16258,20 @@ else $echo "X$_lt_linker_boilerplate" | $Xsed -e '/^$/d' > conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if diff conftest.exp conftest.er2 >/dev/null; then - lt_prog_compiler_static_works_F77=yes + lt_cv_prog_compiler_static_works_F77=yes fi else - lt_prog_compiler_static_works_F77=yes + lt_cv_prog_compiler_static_works_F77=yes fi fi - $rm conftest* + $rm -r conftest* LDFLAGS="$save_LDFLAGS" fi -{ echo "$as_me:$LINENO: result: $lt_prog_compiler_static_works_F77" >&5 -echo "${ECHO_T}$lt_prog_compiler_static_works_F77" >&6; } +{ echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_static_works_F77" >&5 +echo "${ECHO_T}$lt_cv_prog_compiler_static_works_F77" >&6; } -if test x"$lt_prog_compiler_static_works_F77" = xyes; then +if test x"$lt_cv_prog_compiler_static_works_F77" = xyes; then : else lt_prog_compiler_static_F77= @@ -15953,11 +16299,11 @@ else -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:15956: $lt_compile\"" >&5) + (eval echo "\"\$as_me:16302: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 - echo "$as_me:15960: \$? = $ac_status" >&5 + echo "$as_me:16306: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized @@ -16037,12 +16383,13 @@ echo $ECHO_N "checking whether the $compiler linker ($LD) supports shared librar # it will be wrapped by ` (' and `)$', so one must not match beginning or # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', # as well as any symbol that contains `d'. - exclude_expsyms_F77="_GLOBAL_OFFSET_TABLE_" + exclude_expsyms_F77='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out # platforms (ab)use it in PIC code, but their linkers get confused if # the symbol is explicitly referenced. Since portable code cannot # rely on this symbol name, it's probably fine to never include it in # preloaded symbol tables. + # Exclude shared library initialization/finalization symbols. extract_expsyms_cmds= # Just being paranoid about ensuring that cc_basename is set. for cc_temp in $compiler""; do @@ -16101,7 +16448,7 @@ cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` # See if GNU ld supports shared libraries. case $host_os in - aix3* | aix4* | aix5*) + aix[3-9]*) # On AIX/PPC, the GNU linker is very broken if test "$host_cpu" != ia64; then ld_shlibs_F77=no @@ -16321,7 +16668,7 @@ _LT_EOF fi ;; - aix4* | aix5*) + aix[4-9]*) if test "$host_cpu" = ia64; then # On IA64, the linker does run time linking by default, so we don't # have to do anything special. @@ -16341,7 +16688,7 @@ _LT_EOF # Test if we are trying to use run time linking or normal # AIX style linking. If -brtl is somewhere in LDFLAGS, we # need to do runtime linking. - case $host_os in aix4.[23]|aix4.[23].*|aix5*) + case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) for ld_flag in $LDFLAGS; do if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then aix_use_runtimelinking=yes @@ -16593,11 +16940,10 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi link_all_deplibs_F77=yes if test "$GCC" = yes ; then output_verbose_link_cmd='echo' - archive_cmds_F77='$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' - module_cmds_F77='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' - # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds - archive_expsym_cmds_F77='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - module_expsym_cmds_F77='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + archive_cmds_F77="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}" + module_cmds_F77="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}" + archive_expsym_cmds_F77="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}" + module_expsym_cmds_F77="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}" else case $cc_basename in xlc*) @@ -17066,7 +17412,7 @@ aix3*) soname_spec='${libname}${release}${shared_ext}$major' ;; -aix4* | aix5*) +aix[4-9]*) version_type=linux need_lib_prefix=no need_version=no @@ -17599,6 +17945,21 @@ esac echo "${ECHO_T}$dynamic_linker" >&6; } test "$dynamic_linker" = no && can_build_shared=no +if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_cv_sys_lib_search_path_spec="$sys_lib_search_path_spec" +fi + +sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec" +if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_cv_sys_lib_dlsearch_path_spec="$sys_lib_dlsearch_path_spec" +fi + +sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec" + variables_saved_for_relink="PATH $shlibpath_var $runpath_var" if test "$GCC" = yes; then variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" @@ -17682,6 +18043,7 @@ if test -f "$ltmain"; then predeps_F77 \ postdeps_F77 \ compiler_lib_search_path_F77 \ + compiler_lib_search_dirs_F77 \ archive_cmds_F77 \ archive_expsym_cmds_F77 \ postinstall_cmds_F77 \ @@ -17930,6 +18292,10 @@ predeps=$lt_predeps_F77 # shared library. postdeps=$lt_postdeps_F77 +# The directories searched by this compiler when creating a shared +# library +compiler_lib_search_dirs=$lt_compiler_lib_search_dirs_F77 + # The library search path used internally by the compiler when linking # a shared library. compiler_lib_search_path=$lt_compiler_lib_search_path_F77 @@ -18104,7 +18470,7 @@ ac_outfile=conftest.$ac_objext echo "$lt_simple_link_test_code" >conftest.$ac_ext eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_linker_boilerplate=`cat conftest.err` -$rm conftest* +$rm -r conftest* # Allow CC to be a program name with arguments. @@ -18153,11 +18519,11 @@ else -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:18156: $lt_compile\"" >&5) + (eval echo "\"\$as_me:18522: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 - echo "$as_me:18160: \$? = $ac_status" >&5 + echo "$as_me:18526: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. @@ -18217,7 +18583,7 @@ echo $ECHO_N "checking for $compiler option to produce PIC... $ECHO_C" >&6; } # built for inclusion in a dll (and should export symbols for example). # Although the cygwin gcc ignores -fPIC, still need this for old-style # (--disable-auto-import) libraries - lt_prog_compiler_pic_GCJ='-DDLL_EXPORT' + ;; darwin* | rhapsody*) @@ -18287,7 +18653,7 @@ echo $ECHO_N "checking for $compiler option to produce PIC... $ECHO_C" >&6; } mingw* | cygwin* | pw32* | os2*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). - lt_prog_compiler_pic_GCJ='-DDLL_EXPORT' + ;; hpux9* | hpux10* | hpux11*) @@ -18427,10 +18793,10 @@ if test -n "$lt_prog_compiler_pic_GCJ"; then { echo "$as_me:$LINENO: checking if $compiler PIC flag $lt_prog_compiler_pic_GCJ works" >&5 echo $ECHO_N "checking if $compiler PIC flag $lt_prog_compiler_pic_GCJ works... $ECHO_C" >&6; } -if test "${lt_prog_compiler_pic_works_GCJ+set}" = set; then +if test "${lt_cv_prog_compiler_pic_works_GCJ+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else - lt_prog_compiler_pic_works_GCJ=no + lt_cv_prog_compiler_pic_works_GCJ=no ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="$lt_prog_compiler_pic_GCJ" @@ -18443,27 +18809,27 @@ else -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:18446: $lt_compile\"" >&5) + (eval echo "\"\$as_me:18812: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 - echo "$as_me:18450: \$? = $ac_status" >&5 + echo "$as_me:18816: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. $echo "X$_lt_compiler_boilerplate" | $Xsed -e '/^$/d' >conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then - lt_prog_compiler_pic_works_GCJ=yes + lt_cv_prog_compiler_pic_works_GCJ=yes fi fi $rm conftest* fi -{ echo "$as_me:$LINENO: result: $lt_prog_compiler_pic_works_GCJ" >&5 -echo "${ECHO_T}$lt_prog_compiler_pic_works_GCJ" >&6; } +{ echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_pic_works_GCJ" >&5 +echo "${ECHO_T}$lt_cv_prog_compiler_pic_works_GCJ" >&6; } -if test x"$lt_prog_compiler_pic_works_GCJ" = xyes; then +if test x"$lt_cv_prog_compiler_pic_works_GCJ" = xyes; then case $lt_prog_compiler_pic_GCJ in "" | " "*) ;; *) lt_prog_compiler_pic_GCJ=" $lt_prog_compiler_pic_GCJ" ;; @@ -18490,10 +18856,10 @@ esac wl=$lt_prog_compiler_wl_GCJ eval lt_tmp_static_flag=\"$lt_prog_compiler_static_GCJ\" { echo "$as_me:$LINENO: checking if $compiler static flag $lt_tmp_static_flag works" >&5 echo $ECHO_N "checking if $compiler static flag $lt_tmp_static_flag works... $ECHO_C" >&6; } -if test "${lt_prog_compiler_static_works_GCJ+set}" = set; then +if test "${lt_cv_prog_compiler_static_works_GCJ+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else - lt_prog_compiler_static_works_GCJ=no + lt_cv_prog_compiler_static_works_GCJ=no save_LDFLAGS="$LDFLAGS" LDFLAGS="$LDFLAGS $lt_tmp_static_flag" echo "$lt_simple_link_test_code" > conftest.$ac_ext @@ -18506,20 +18872,20 @@ else $echo "X$_lt_linker_boilerplate" | $Xsed -e '/^$/d' > conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if diff conftest.exp conftest.er2 >/dev/null; then - lt_prog_compiler_static_works_GCJ=yes + lt_cv_prog_compiler_static_works_GCJ=yes fi else - lt_prog_compiler_static_works_GCJ=yes + lt_cv_prog_compiler_static_works_GCJ=yes fi fi - $rm conftest* + $rm -r conftest* LDFLAGS="$save_LDFLAGS" fi -{ echo "$as_me:$LINENO: result: $lt_prog_compiler_static_works_GCJ" >&5 -echo "${ECHO_T}$lt_prog_compiler_static_works_GCJ" >&6; } +{ echo "$as_me:$LINENO: result: $lt_cv_prog_compiler_static_works_GCJ" >&5 +echo "${ECHO_T}$lt_cv_prog_compiler_static_works_GCJ" >&6; } -if test x"$lt_prog_compiler_static_works_GCJ" = xyes; then +if test x"$lt_cv_prog_compiler_static_works_GCJ" = xyes; then : else lt_prog_compiler_static_GCJ= @@ -18547,11 +18913,11 @@ else -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:18550: $lt_compile\"" >&5) + (eval echo "\"\$as_me:18916: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 - echo "$as_me:18554: \$? = $ac_status" >&5 + echo "$as_me:18920: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized @@ -18631,12 +18997,13 @@ echo $ECHO_N "checking whether the $compiler linker ($LD) supports shared librar # it will be wrapped by ` (' and `)$', so one must not match beginning or # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', # as well as any symbol that contains `d'. - exclude_expsyms_GCJ="_GLOBAL_OFFSET_TABLE_" + exclude_expsyms_GCJ='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out # platforms (ab)use it in PIC code, but their linkers get confused if # the symbol is explicitly referenced. Since portable code cannot # rely on this symbol name, it's probably fine to never include it in # preloaded symbol tables. + # Exclude shared library initialization/finalization symbols. extract_expsyms_cmds= # Just being paranoid about ensuring that cc_basename is set. for cc_temp in $compiler""; do @@ -18695,7 +19062,7 @@ cc_basename=`$echo "X$cc_temp" | $Xsed -e 's%.*/%%' -e "s%^$host_alias-%%"` # See if GNU ld supports shared libraries. case $host_os in - aix3* | aix4* | aix5*) + aix[3-9]*) # On AIX/PPC, the GNU linker is very broken if test "$host_cpu" != ia64; then ld_shlibs_GCJ=no @@ -18915,7 +19282,7 @@ _LT_EOF fi ;; - aix4* | aix5*) + aix[4-9]*) if test "$host_cpu" = ia64; then # On IA64, the linker does run time linking by default, so we don't # have to do anything special. @@ -18935,7 +19302,7 @@ _LT_EOF # Test if we are trying to use run time linking or normal # AIX style linking. If -brtl is somewhere in LDFLAGS, we # need to do runtime linking. - case $host_os in aix4.[23]|aix4.[23].*|aix5*) + case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) for ld_flag in $LDFLAGS; do if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then aix_use_runtimelinking=yes @@ -19207,11 +19574,10 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi link_all_deplibs_GCJ=yes if test "$GCC" = yes ; then output_verbose_link_cmd='echo' - archive_cmds_GCJ='$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring' - module_cmds_GCJ='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' - # Don't fix this by using the ld -exported_symbols_list flag, it doesn't exist in older darwin lds - archive_expsym_cmds_GCJ='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs $compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' - module_expsym_cmds_GCJ='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' + archive_cmds_GCJ="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}" + module_cmds_GCJ="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}" + archive_expsym_cmds_GCJ="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}" + module_expsym_cmds_GCJ="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}" else case $cc_basename in xlc*) @@ -19680,7 +20046,7 @@ aix3*) soname_spec='${libname}${release}${shared_ext}$major' ;; -aix4* | aix5*) +aix[4-9]*) version_type=linux need_lib_prefix=no need_version=no @@ -20213,6 +20579,21 @@ esac echo "${ECHO_T}$dynamic_linker" >&6; } test "$dynamic_linker" = no && can_build_shared=no +if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_cv_sys_lib_search_path_spec="$sys_lib_search_path_spec" +fi + +sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec" +if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + lt_cv_sys_lib_dlsearch_path_spec="$sys_lib_dlsearch_path_spec" +fi + +sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec" + variables_saved_for_relink="PATH $shlibpath_var $runpath_var" if test "$GCC" = yes; then variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" @@ -20296,6 +20677,7 @@ if test -f "$ltmain"; then predeps_GCJ \ postdeps_GCJ \ compiler_lib_search_path_GCJ \ + compiler_lib_search_dirs_GCJ \ archive_cmds_GCJ \ archive_expsym_cmds_GCJ \ postinstall_cmds_GCJ \ @@ -20544,6 +20926,10 @@ predeps=$lt_predeps_GCJ # shared library. postdeps=$lt_postdeps_GCJ +# The directories searched by this compiler when creating a shared +# library +compiler_lib_search_dirs=$lt_compiler_lib_search_dirs_GCJ + # The library search path used internally by the compiler when linking # a shared library. compiler_lib_search_path=$lt_compiler_lib_search_path_GCJ @@ -20717,7 +21103,7 @@ ac_outfile=conftest.$ac_objext echo "$lt_simple_link_test_code" >conftest.$ac_ext eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_linker_boilerplate=`cat conftest.err` -$rm conftest* +$rm -r conftest* # Allow CC to be a program name with arguments. @@ -20777,6 +21163,7 @@ if test -f "$ltmain"; then predeps_RC \ postdeps_RC \ compiler_lib_search_path_RC \ + compiler_lib_search_dirs_RC \ archive_cmds_RC \ archive_expsym_cmds_RC \ postinstall_cmds_RC \ @@ -21025,6 +21412,10 @@ predeps=$lt_predeps_RC # shared library. postdeps=$lt_postdeps_RC +# The directories searched by this compiler when creating a shared +# library +compiler_lib_search_dirs=$lt_compiler_lib_search_dirs_RC + # The library search path used internally by the compiler when linking # a shared library. compiler_lib_search_path=$lt_compiler_lib_search_path_RC @@ -21248,6 +21639,8 @@ fi + + @@ -21256,7 +21649,8 @@ for ac_header in mcheck.h values.h socket.h sys/socket.h \ stdlib.h dirent.h pthread.h sys/prctl.h \ sysint.h inttypes.h termcap.h netdb.h sys/socket.h \ sys/systemcfg.h ncurses.h curses.h sys/dr.h sys/vfs.h \ - pam/pam_appl.h security/pam_appl.h sys/sysctl.h \ + pam/pam_appl.h security/pam_appl.h sys/sysctl.h \ + pty.h utmp.h \ sys/syslog.h \ do @@ -24298,14 +24692,14 @@ echo "$as_me: Running in bluegene emulation mode" >&6;} have_bg_ar=yes bg_bridge_so="$bg_dir/lib64/libbglbridge.so" bg_ldflags="$bg_ldflags -L$bg_dir/lib64 -L/usr/lib64 -Wl,--unresolved-symbols=ignore-in-shared-libs -lbglbridge -lbgldb -ltableapi -lbglmachine -lexpat -lsaymessage" - fi + fi # Search for required DB2 library in the directory if test -z "$have_db2" -a -f "$bg_dir/lib64/libdb2.so" ; then have_db2=yes bg_db2_so="$bg_dir/lib64/libdb2.so" bg_ldflags="$bg_ldflags -L$bg_dir/lib64 -ldb2" - fi + fi # Search for headers in the directory if test -z "$have_bg_hdr" -a -f "$bg_dir/include/rm_api.h" ; then @@ -24417,6 +24811,15 @@ _ACEOF + # This is here to avoid a bug in the gcc compiler 3.4.6 + # Without this flag there is a bug when pointing to other functions + # and then using them. It is also advised to set the flag if there + # are goto statements you may get better performance. + if test "$GCC" == "yes"; then + CFLAGS="$CFLAGS -fno-gcse" + fi + + { echo "$as_me:$LINENO: checking whether XCPU is enabled" >&5 echo $ECHO_N "checking whether XCPU is enabled... $ECHO_C" >&6; } @@ -24781,7 +25184,7 @@ done done IFS=$as_save_IFS - test -z "$ac_cv_path_HAVEPKGCONFIG" && ac_cv_path_HAVEPKGCONFIG="$PATH" + test -z "$ac_cv_path_HAVEPKGCONFIG" && ac_cv_path_HAVEPKGCONFIG="no" ;; esac fi @@ -24795,7 +25198,7 @@ echo "${ECHO_T}no" >&6; } fi - if test -z "$HAVEPKGCONFIG"; then + if test x$HAVEPKGCONFIG = xno; then { echo "$as_me:$LINENO: WARNING: *** pkg-config not found. Cannot probe for libglade-2.0 or gtk+-2.0." >&5 echo "$as_me: WARNING: *** pkg-config not found. Cannot probe for libglade-2.0 or gtk+-2.0." >&2;} ac_have_gtk="no" @@ -24922,6 +25325,279 @@ fi + #Check for MySQL + ac_have_mysql="no" + ### Check for mysql_config program + # Extract the first word of "mysql_config", so it can be a program name with args. +set dummy mysql_config; ac_word=$2 +{ echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +if test "${ac_cv_path_HAVEMYSQLCONFIG+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + case $HAVEMYSQLCONFIG in + [\\/]* | ?:[\\/]*) + ac_cv_path_HAVEMYSQLCONFIG="$HAVEMYSQLCONFIG" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_path_HAVEMYSQLCONFIG="$as_dir/$ac_word$ac_exec_ext" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + + test -z "$ac_cv_path_HAVEMYSQLCONFIG" && ac_cv_path_HAVEMYSQLCONFIG="no" + ;; +esac +fi +HAVEMYSQLCONFIG=$ac_cv_path_HAVEMYSQLCONFIG +if test -n "$HAVEMYSQLCONFIG"; then + { echo "$as_me:$LINENO: result: $HAVEMYSQLCONFIG" >&5 +echo "${ECHO_T}$HAVEMYSQLCONFIG" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi + + + if test x$HAVEMYSQLCONFIG = xno; then + { echo "$as_me:$LINENO: WARNING: *** mysql_config not found. Evidently no MySQL install on system." >&5 +echo "$as_me: WARNING: *** mysql_config not found. Evidently no MySQL install on system." >&2;} + else + # check for mysql-5.0.0+ + mysql_config_major_version=`$HAVEMYSQLCONFIG --version | \ + sed 's/\([0-9]*\).\([0-9]*\).\([a-zA-Z0-9]*\)/\1/'` + mysql_config_minor_version=`$HAVEMYSQLCONFIG --version | \ + sed 's/\([0-9]*\).\([0-9]*\).\([a-zA-Z0-9]*\)/\2/'` + mysql_config_micro_version=`$HAVEMYSQLCONFIG --version | \ + sed 's/\([0-9]*\).\([0-9]*\).\([a-zA-Z0-9]*\)/\3/'` + + if test $mysql_config_major_version -lt 5; then + { echo "$as_me:$LINENO: WARNING: *** mysql-$mysql_config_major_version.$mysql_config_minor_version.$mysql_config_micro_version available, we need >= mysql-5.0.0 installed for the mysql interface." >&5 +echo "$as_me: WARNING: *** mysql-$mysql_config_major_version.$mysql_config_minor_version.$mysql_config_micro_version available, we need >= mysql-5.0.0 installed for the mysql interface." >&2;} + ac_have_mysql="no" + else + # mysql_config puts -I on the front of the dir. We don't + # want that so we remove it. + MYSQL_CFLAGS=`$HAVEMYSQLCONFIG --cflags` + MYSQL_LIBS=`$HAVEMYSQLCONFIG --libs_r` + if test -z "$MYSQL_LIBS"; then + MYSQL_LIBS=`$HAVEMYSQLCONFIG --libs` + fi + save_CFLAGS="$CFLAGS" + save_LIBS="$LIBS" + CFLAGS="$MYSQL_CFLAGS $save_CFLAGS" + LIBS="$MYSQL_LIBS $save_LIBS" + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include <mysql.h> +int +main () +{ + + int main() + { + MYSQL mysql; + (void) mysql_init(&mysql); + (void) mysql_close(&mysql); + } + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && + $as_test_x conftest$ac_exeext; then + ac_have_mysql="yes" +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_have_mysql="no" +fi + +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext + CFLAGS="$save_CFLAGS" + LIBS="$save_LIBS" + if test "$ac_have_mysql" == "yes"; then + { echo "$as_me:$LINENO: result: MySQL test program built properly." >&5 +echo "${ECHO_T}MySQL test program built properly." >&6; } + + + +cat >>confdefs.h <<\_ACEOF +#define HAVE_MYSQL 1 +_ACEOF + + else + MYSQL_CFLAGS="" + MYSQL_LIBS="" + { echo "$as_me:$LINENO: WARNING: *** MySQL test program execution failed." >&5 +echo "$as_me: WARNING: *** MySQL test program execution failed." >&2;} + fi + fi + fi + + + #Check for PostgreSQL + ac_have_postgres="no" + ### Check for pg_config program + # Extract the first word of "pg_config", so it can be a program name with args. +set dummy pg_config; ac_word=$2 +{ echo "$as_me:$LINENO: checking for $ac_word" >&5 +echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } +if test "${ac_cv_path_HAVEPGCONFIG+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + case $HAVEPGCONFIG in + [\\/]* | ?:[\\/]*) + ac_cv_path_HAVEPGCONFIG="$HAVEPGCONFIG" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_path_HAVEPGCONFIG="$as_dir/$ac_word$ac_exec_ext" + echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done +done +IFS=$as_save_IFS + + test -z "$ac_cv_path_HAVEPGCONFIG" && ac_cv_path_HAVEPGCONFIG="no" + ;; +esac +fi +HAVEPGCONFIG=$ac_cv_path_HAVEPGCONFIG +if test -n "$HAVEPGCONFIG"; then + { echo "$as_me:$LINENO: result: $HAVEPGCONFIG" >&5 +echo "${ECHO_T}$HAVEPGCONFIG" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } +fi + + + if test x$HAVEPGCONFIG = xno; then + { echo "$as_me:$LINENO: WARNING: *** pg_config not found. Evidently no PostgreSQL install on system." >&5 +echo "$as_me: WARNING: *** pg_config not found. Evidently no PostgreSQL install on system." >&2;} + else + PGSQL_INCLUDEDIR=`$HAVEPGCONFIG --includedir` + PGSQL_LIBDIR=`$HAVEPGCONFIG --libdir` + PGSQL_CFLAGS="-I$PGSQL_INCLUDEDIR -L$PGSQL_LIBDIR" + save_CFLAGS="$CFLAGS" + CFLAGS="$PGSQL_CFLAGS $save_CFLAGS" + + PGSQL_LIBS=" -lpq" + save_LIBS="$LIBS" + LIBS="$PGSQL_LIBS $save_LIBS" + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include <libpq-fe.h> +int +main () +{ + + int main() + { + PGconn *conn; + conn = PQconnectdb("dbname = postgres"); + (void) PQfinish(conn); + } + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && + $as_test_x conftest$ac_exeext; then + ac_have_pgsql="yes" +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_have_pgsql="no" +fi + +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext + LIBS="$save_LIBS" + CFLAGS="$save_CFLAGS" + if test "$ac_have_pgsql" == "yes"; then + { echo "$as_me:$LINENO: result: PostgreSQL test program built properly." >&5 +echo "${ECHO_T}PostgreSQL test program built properly." >&6; } + + + +cat >>confdefs.h <<\_ACEOF +#define HAVE_PGSQL 1 +_ACEOF + + else + PGSQL_CFLAGS="" + PGSQL_LIBS="" + { echo "$as_me:$LINENO: WARNING: *** PostgreSQL test program execution failed." >&5 +echo "$as_me: WARNING: *** PostgreSQL test program execution failed." >&2;} + fi + fi + + + @@ -25098,7 +25774,6 @@ fi { echo "$as_me:$LINENO: checking for slurmctld default port" >&5 echo $ECHO_N "checking for slurmctld default port... $ECHO_C" >&6; } - # Check whether --with-slurmctld-port was given. if test "${with_slurmctld_port+set}" = set; then withval=$with_slurmctld_port; if test `expr match "$withval" '[0-9]*$'` -gt 0; then @@ -25111,13 +25786,13 @@ fi { echo "$as_me:$LINENO: result: ${slurmctldport=6817}" >&5 echo "${ECHO_T}${slurmctldport=6817}" >&6; } - cat >>confdefs.h <<_ACEOF #define SLURMCTLD_PORT $slurmctldport _ACEOF + { echo "$as_me:$LINENO: checking for slurmd default port" >&5 echo $ECHO_N "checking for slurmd default port... $ECHO_C" >&6; } @@ -25133,7 +25808,6 @@ fi { echo "$as_me:$LINENO: result: ${slurmdport=6818}" >&5 echo "${ECHO_T}${slurmdport=6818}" >&6; } - cat >>confdefs.h <<_ACEOF #define SLURMD_PORT $slurmdport _ACEOF @@ -25141,6 +25815,26 @@ _ACEOF + { echo "$as_me:$LINENO: checking for slurmdbd default port" >&5 +echo $ECHO_N "checking for slurmdbd default port... $ECHO_C" >&6; } + +# Check whether --with-slurmdbd-port was given. +if test "${with_slurmdbd_port+set}" = set; then + withval=$with_slurmdbd_port; if test `expr match "$withval" '[0-9]*$'` -gt 0; then + slurmdbdport="$withval" + fi + + +fi + + { echo "$as_me:$LINENO: result: ${slurmdbdport=6819}" >&5 +echo "${ECHO_T}${slurmdbdport=6819}" >&6; } + +cat >>confdefs.h <<_ACEOF +#define SLURMDBD_PORT $slurmdbdport +_ACEOF + + @@ -25681,16 +26375,15 @@ rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ - ac_slurm_with_ssl=no ssl_default_dirs="/usr/local/openssl64 /usr/local/openssl /usr/lib/openssl \ /usr/local/ssl /usr/lib/ssl /usr/local \ - /usr/pkg /opt /opt/openssl" + /usr/pkg /opt /opt/openssl /usr" - SSL_LIBS="-lcrypto" + SSL_LIB_TEST="-lcrypto" # Check whether --with-ssl was given. @@ -25703,7 +26396,7 @@ if test "${with_ssl+set}" = set; then # which is specified with --with-ssl), and libtool is not setting # the correct runtime library path in the binaries. if test "x$ac_have_aix" = "xyes"; then - SSL_LIBS="-lcrypto-static" + SSL_LIB_TEST="-lcrypto-static" fi fi @@ -25715,13 +26408,9 @@ fi if test "x$prefix" != "xNONE" ; then tryssldir="$tryssldir $prefix" fi - if test "x$tryssldir" == "xno" ; then - { { echo "$as_me:$LINENO: error: OpenSSL libary is required for SLURM operation, download from www.openssl.org" >&5 -echo "$as_me: error: OpenSSL libary is required for SLURM operation, download from www.openssl.org" >&2;} - { (exit 1); exit 1; }; } - fi - { echo "$as_me:$LINENO: checking for OpenSSL directory" >&5 + if test "x$tryssldir" != "xno" ; then + { echo "$as_me:$LINENO: checking for OpenSSL directory" >&5 echo $ECHO_N "checking for OpenSSL directory... $ECHO_C" >&6; } if test "${ac_cv_openssldir+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 @@ -25730,13 +26419,14 @@ else for ssldir in $tryssldir "" $ssl_default_dirs; do CPPFLAGS="$saved_CPPFLAGS" LDFLAGS="$saved_LDFLAGS" - LIBS="$saved_LIBS $SSL_LIBS" + LIBS="$saved_LIBS $SSL_LIB_TEST" # Skip directories if they don't exist if test ! -z "$ssldir" -a ! -d "$ssldir" ; then continue; fi - if test ! -z "$ssldir" -a "x$ssldir" != "x/usr"; then + sslincludedir="$ssldir" + if test ! -z "$ssldir"; then # Try to use $ssldir/lib if it exists, otherwise # $ssldir if test -d "$ssldir/lib" ; then @@ -25753,11 +26443,15 @@ else # Try to use $ssldir/include if it exists, otherwise # $ssldir if test -d "$ssldir/include" ; then + sslincludedir="$ssldir/include" CPPFLAGS="-I$ssldir/include $saved_CPPFLAGS" else CPPFLAGS="-I$ssldir $saved_CPPFLAGS" fi fi + test -f "$sslincludedir/openssl/rand.h" || continue + test -f "$sslincludedir/openssl/hmac.h" || continue + test -f "$sslincludedir/openssl/sha.h" || continue # Basic test to check for compatible version and correct linking if test "$cross_compiling" = yes; then @@ -25810,7 +26504,7 @@ eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); }; }; then - found_crypto=1 + ac_have_openssl="yes" break; else @@ -25827,27 +26521,28 @@ fi - if test ! -z "$found_crypto" ; then + if test ! -z "$ac_have_openssl" ; then break; fi done - if test -z "$found_crypto" ; then - { { echo "$as_me:$LINENO: error: Could not find working OpenSSL library, download from www.openssl.org" >&5 -echo "$as_me: error: Could not find working OpenSSL library, download from www.openssl.org" >&2;} - { (exit 1); exit 1; }; } - fi - if test -z "$ssldir" ; then - ssldir="(system)" - fi - - ac_cv_openssldir=$ssldir + if test ! -z "$ac_have_openssl" ; then + ac_cv_openssldir=$ssldir + fi fi { echo "$as_me:$LINENO: result: $ac_cv_openssldir" >&5 echo "${ECHO_T}$ac_cv_openssldir" >&6; } + fi + + if test ! -z "$ac_have_openssl" ; then + SSL_LIBS="$SSL_LIB_TEST" - if (test ! -z "$ac_cv_openssldir" && test "x$ac_cv_openssldir" != "x(system)") ; then +cat >>confdefs.h <<\_ACEOF +#define HAVE_OPENSSL 1 +_ACEOF + + if (test ! -z "$ac_cv_openssldir") ; then ssldir=$ac_cv_openssldir if test ! -z "$ssldir" -a "x$ssldir" != "x/usr"; then # Try to use $ssldir/lib if it exists, otherwise @@ -25865,9 +26560,9 @@ echo "${ECHO_T}$ac_cv_openssldir" >&6; } SSL_CPPFLAGS="-I$ssldir" fi fi - fi + fi - cat >conftest.$ac_ext <<_ACEOF + cat >conftest.$ac_ext <<_ACEOF /* confdefs.h. */ _ACEOF cat confdefs.h >>conftest.$ac_ext @@ -25914,12 +26609,25 @@ fi rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ conftest$ac_exeext conftest.$ac_ext + else + SSL_LIBS="" + { echo "$as_me:$LINENO: WARNING: Could not find working OpenSSL library" >&5 +echo "$as_me: WARNING: Could not find working OpenSSL library" >&2;} + fi LIBS="$saved_LIBS" CPPFLAGS="$saved_CPPFLAGS" LDFLAGS="$saved_LDFLAGS" + if test "x$ac_have_openssl" = "xyes"; then + HAVE_OPENSSL_TRUE= + HAVE_OPENSSL_FALSE='#' +else + HAVE_OPENSSL_TRUE='#' + HAVE_OPENSSL_FALSE= +fi + @@ -26055,6 +26763,11 @@ cat >>confdefs.h <<\_ACEOF #define LOAD_ENV_NO_LOGIN 1 _ACEOF + { echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6; } +else + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } fi { echo "$as_me:$LINENO: checking whether to enable multiple-slurmd support" >&5 @@ -26170,6 +26883,76 @@ fi LIBS="$savedLIBS" CFLAGS="$savedCFLAGS" +savedLIBS="$LIBS" +LIBS="-lutil $LIBS" +{ echo "$as_me:$LINENO: checking for openpty in -lutil" >&5 +echo $ECHO_N "checking for openpty in -lutil... $ECHO_C" >&6; } +if test "${ac_cv_lib_util_openpty+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lutil $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char openpty (); +int +main () +{ +return openpty (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && + $as_test_x conftest$ac_exeext; then + ac_cv_lib_util_openpty=yes +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_lib_util_openpty=no +fi + +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ echo "$as_me:$LINENO: result: $ac_cv_lib_util_openpty" >&5 +echo "${ECHO_T}$ac_cv_lib_util_openpty" >&6; } +if test $ac_cv_lib_util_openpty = yes; then + UTIL_LIBS="-lutil" +fi + + +LIBS="$savedLIBS" + cat >>confdefs.h <<\_ACEOF #define WITH_LSD_FATAL_ERROR_FUNC 1 @@ -26182,7 +26965,7 @@ _ACEOF -ac_config_files="$ac_config_files Makefile config.xml auxdir/Makefile contribs/Makefile contribs/perlapi/Makefile contribs/perlapi/libslurm-perl/Makefile.PL contribs/torque/Makefile src/Makefile src/api/Makefile src/common/Makefile src/sacct/Makefile src/salloc/Makefile src/sbatch/Makefile src/sattach/Makefile src/srun/Makefile src/slaunch/Makefile src/slurmd/Makefile src/slurmd/slurmd/Makefile src/slurmd/slurmstepd/Makefile src/slurmctld/Makefile src/sbcast/Makefile src/scontrol/Makefile src/scancel/Makefile src/squeue/Makefile src/sinfo/Makefile src/smap/Makefile src/strigger/Makefile src/sview/Makefile src/plugins/Makefile src/plugins/auth/Makefile src/plugins/auth/authd/Makefile src/plugins/auth/munge/Makefile src/plugins/auth/none/Makefile src/plugins/checkpoint/Makefile src/plugins/checkpoint/aix/Makefile src/plugins/checkpoint/none/Makefile src/plugins/checkpoint/ompi/Makefile src/plugins/jobacct/Makefile src/plugins/jobacct/linux/Makefile src/plugins/jobacct/aix/Makefile src/plugins/jobacct/none/Makefile src/plugins/jobacct/gold/Makefile src/plugins/jobcomp/Makefile src/plugins/jobcomp/filetxt/Makefile src/plugins/jobcomp/none/Makefile src/plugins/jobcomp/script/Makefile src/plugins/proctrack/Makefile src/plugins/proctrack/aix/Makefile src/plugins/proctrack/pgid/Makefile src/plugins/proctrack/linuxproc/Makefile src/plugins/proctrack/rms/Makefile src/plugins/proctrack/sgi_job/Makefile src/plugins/sched/Makefile src/plugins/sched/backfill/Makefile src/plugins/sched/builtin/Makefile src/plugins/sched/gang/Makefile src/plugins/sched/hold/Makefile src/plugins/sched/wiki/Makefile src/plugins/sched/wiki2/Makefile src/plugins/select/Makefile src/plugins/select/bluegene/Makefile src/plugins/select/bluegene/block_allocator/Makefile src/plugins/select/bluegene/plugin/Makefile src/plugins/select/linear/Makefile src/plugins/select/cons_res/Makefile src/plugins/switch/Makefile src/plugins/switch/elan/Makefile src/plugins/switch/none/Makefile src/plugins/switch/federation/Makefile src/plugins/mpi/Makefile src/plugins/mpi/mpich1_p4/Makefile src/plugins/mpi/mpich1_shmem/Makefile src/plugins/mpi/mpichgm/Makefile src/plugins/mpi/mpichmx/Makefile src/plugins/mpi/mvapich/Makefile src/plugins/mpi/lam/Makefile src/plugins/mpi/none/Makefile src/plugins/mpi/openmpi/Makefile src/plugins/task/Makefile src/plugins/task/affinity/Makefile src/plugins/task/none/Makefile doc/Makefile doc/man/Makefile doc/html/Makefile doc/html/configurator.html testsuite/Makefile testsuite/expect/Makefile testsuite/slurm_unit/Makefile testsuite/slurm_unit/common/Makefile testsuite/slurm_unit/slurmctld/Makefile testsuite/slurm_unit/slurmd/Makefile testsuite/slurm_unit/api/Makefile testsuite/slurm_unit/api/manual/Makefile" +ac_config_files="$ac_config_files Makefile config.xml auxdir/Makefile contribs/Makefile contribs/perlapi/Makefile contribs/perlapi/libslurm-perl/Makefile.PL contribs/torque/Makefile contribs/phpext/Makefile contribs/phpext/slurm_php/config.m4 src/Makefile src/api/Makefile src/common/Makefile src/database/Makefile src/sacct/Makefile src/sacctmgr/Makefile src/sreport/Makefile src/sstat/Makefile src/salloc/Makefile src/sbatch/Makefile src/sattach/Makefile src/srun/Makefile src/slurmd/Makefile src/slurmd/slurmd/Makefile src/slurmd/slurmstepd/Makefile src/slurmdbd/Makefile src/slurmctld/Makefile src/sbcast/Makefile src/scontrol/Makefile src/scancel/Makefile src/squeue/Makefile src/sinfo/Makefile src/smap/Makefile src/strigger/Makefile src/sview/Makefile src/plugins/Makefile src/plugins/accounting_storage/Makefile src/plugins/accounting_storage/filetxt/Makefile src/plugins/accounting_storage/gold/Makefile src/plugins/accounting_storage/mysql/Makefile src/plugins/accounting_storage/pgsql/Makefile src/plugins/accounting_storage/none/Makefile src/plugins/accounting_storage/slurmdbd/Makefile src/plugins/auth/Makefile src/plugins/auth/authd/Makefile src/plugins/auth/munge/Makefile src/plugins/auth/none/Makefile src/plugins/checkpoint/Makefile src/plugins/checkpoint/aix/Makefile src/plugins/checkpoint/none/Makefile src/plugins/checkpoint/ompi/Makefile src/plugins/checkpoint/xlch/Makefile src/plugins/crypto/Makefile src/plugins/crypto/munge/Makefile src/plugins/crypto/openssl/Makefile src/plugins/jobacct_gather/Makefile src/plugins/jobacct_gather/linux/Makefile src/plugins/jobacct_gather/aix/Makefile src/plugins/jobacct_gather/none/Makefile src/plugins/jobcomp/Makefile src/plugins/jobcomp/filetxt/Makefile src/plugins/jobcomp/none/Makefile src/plugins/jobcomp/script/Makefile src/plugins/jobcomp/mysql/Makefile src/plugins/jobcomp/pgsql/Makefile src/plugins/jobcomp/slurmdbd/Makefile src/plugins/proctrack/Makefile src/plugins/proctrack/aix/Makefile src/plugins/proctrack/pgid/Makefile src/plugins/proctrack/linuxproc/Makefile src/plugins/proctrack/rms/Makefile src/plugins/proctrack/sgi_job/Makefile src/plugins/sched/Makefile src/plugins/sched/backfill/Makefile src/plugins/sched/builtin/Makefile src/plugins/sched/gang/Makefile src/plugins/sched/hold/Makefile src/plugins/sched/wiki/Makefile src/plugins/sched/wiki2/Makefile src/plugins/select/Makefile src/plugins/select/bluegene/Makefile src/plugins/select/bluegene/block_allocator/Makefile src/plugins/select/bluegene/plugin/Makefile src/plugins/select/linear/Makefile src/plugins/select/cons_res/Makefile src/plugins/switch/Makefile src/plugins/switch/elan/Makefile src/plugins/switch/none/Makefile src/plugins/switch/federation/Makefile src/plugins/mpi/Makefile src/plugins/mpi/mpich1_p4/Makefile src/plugins/mpi/mpich1_shmem/Makefile src/plugins/mpi/mpichgm/Makefile src/plugins/mpi/mpichmx/Makefile src/plugins/mpi/mvapich/Makefile src/plugins/mpi/lam/Makefile src/plugins/mpi/none/Makefile src/plugins/mpi/openmpi/Makefile src/plugins/task/Makefile src/plugins/task/affinity/Makefile src/plugins/task/none/Makefile doc/Makefile doc/man/Makefile doc/html/Makefile doc/html/configurator.html testsuite/Makefile testsuite/expect/Makefile testsuite/slurm_unit/Makefile testsuite/slurm_unit/common/Makefile testsuite/slurm_unit/slurmctld/Makefile testsuite/slurm_unit/slurmd/Makefile testsuite/slurm_unit/api/Makefile testsuite/slurm_unit/api/manual/Makefile" cat >confcache <<\_ACEOF @@ -26407,6 +27190,13 @@ echo "$as_me: error: conditional \"HAVE_SGI_JOB\" was never defined. Usually this means the macro was only invoked conditionally." >&2;} { (exit 1); exit 1; }; } fi +if test -z "${HAVE_OPENSSL_TRUE}" && test -z "${HAVE_OPENSSL_FALSE}"; then + { { echo "$as_me:$LINENO: error: conditional \"HAVE_OPENSSL\" was never defined. +Usually this means the macro was only invoked conditionally." >&5 +echo "$as_me: error: conditional \"HAVE_OPENSSL\" was never defined. +Usually this means the macro was only invoked conditionally." >&2;} + { (exit 1); exit 1; }; } +fi if test -z "${WITH_MUNGE_TRUE}" && test -z "${WITH_MUNGE_FALSE}"; then { { echo "$as_me:$LINENO: error: conditional \"WITH_MUNGE\" was never defined. Usually this means the macro was only invoked conditionally." >&5 @@ -26898,18 +27688,24 @@ do "contribs/perlapi/Makefile") CONFIG_FILES="$CONFIG_FILES contribs/perlapi/Makefile" ;; "contribs/perlapi/libslurm-perl/Makefile.PL") CONFIG_FILES="$CONFIG_FILES contribs/perlapi/libslurm-perl/Makefile.PL" ;; "contribs/torque/Makefile") CONFIG_FILES="$CONFIG_FILES contribs/torque/Makefile" ;; + "contribs/phpext/Makefile") CONFIG_FILES="$CONFIG_FILES contribs/phpext/Makefile" ;; + "contribs/phpext/slurm_php/config.m4") CONFIG_FILES="$CONFIG_FILES contribs/phpext/slurm_php/config.m4" ;; "src/Makefile") CONFIG_FILES="$CONFIG_FILES src/Makefile" ;; "src/api/Makefile") CONFIG_FILES="$CONFIG_FILES src/api/Makefile" ;; "src/common/Makefile") CONFIG_FILES="$CONFIG_FILES src/common/Makefile" ;; + "src/database/Makefile") CONFIG_FILES="$CONFIG_FILES src/database/Makefile" ;; "src/sacct/Makefile") CONFIG_FILES="$CONFIG_FILES src/sacct/Makefile" ;; + "src/sacctmgr/Makefile") CONFIG_FILES="$CONFIG_FILES src/sacctmgr/Makefile" ;; + "src/sreport/Makefile") CONFIG_FILES="$CONFIG_FILES src/sreport/Makefile" ;; + "src/sstat/Makefile") CONFIG_FILES="$CONFIG_FILES src/sstat/Makefile" ;; "src/salloc/Makefile") CONFIG_FILES="$CONFIG_FILES src/salloc/Makefile" ;; "src/sbatch/Makefile") CONFIG_FILES="$CONFIG_FILES src/sbatch/Makefile" ;; "src/sattach/Makefile") CONFIG_FILES="$CONFIG_FILES src/sattach/Makefile" ;; "src/srun/Makefile") CONFIG_FILES="$CONFIG_FILES src/srun/Makefile" ;; - "src/slaunch/Makefile") CONFIG_FILES="$CONFIG_FILES src/slaunch/Makefile" ;; "src/slurmd/Makefile") CONFIG_FILES="$CONFIG_FILES src/slurmd/Makefile" ;; "src/slurmd/slurmd/Makefile") CONFIG_FILES="$CONFIG_FILES src/slurmd/slurmd/Makefile" ;; "src/slurmd/slurmstepd/Makefile") CONFIG_FILES="$CONFIG_FILES src/slurmd/slurmstepd/Makefile" ;; + "src/slurmdbd/Makefile") CONFIG_FILES="$CONFIG_FILES src/slurmdbd/Makefile" ;; "src/slurmctld/Makefile") CONFIG_FILES="$CONFIG_FILES src/slurmctld/Makefile" ;; "src/sbcast/Makefile") CONFIG_FILES="$CONFIG_FILES src/sbcast/Makefile" ;; "src/scontrol/Makefile") CONFIG_FILES="$CONFIG_FILES src/scontrol/Makefile" ;; @@ -26920,6 +27716,13 @@ do "src/strigger/Makefile") CONFIG_FILES="$CONFIG_FILES src/strigger/Makefile" ;; "src/sview/Makefile") CONFIG_FILES="$CONFIG_FILES src/sview/Makefile" ;; "src/plugins/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/Makefile" ;; + "src/plugins/accounting_storage/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/accounting_storage/Makefile" ;; + "src/plugins/accounting_storage/filetxt/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/accounting_storage/filetxt/Makefile" ;; + "src/plugins/accounting_storage/gold/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/accounting_storage/gold/Makefile" ;; + "src/plugins/accounting_storage/mysql/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/accounting_storage/mysql/Makefile" ;; + "src/plugins/accounting_storage/pgsql/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/accounting_storage/pgsql/Makefile" ;; + "src/plugins/accounting_storage/none/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/accounting_storage/none/Makefile" ;; + "src/plugins/accounting_storage/slurmdbd/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/accounting_storage/slurmdbd/Makefile" ;; "src/plugins/auth/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/auth/Makefile" ;; "src/plugins/auth/authd/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/auth/authd/Makefile" ;; "src/plugins/auth/munge/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/auth/munge/Makefile" ;; @@ -26928,15 +27731,21 @@ do "src/plugins/checkpoint/aix/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/checkpoint/aix/Makefile" ;; "src/plugins/checkpoint/none/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/checkpoint/none/Makefile" ;; "src/plugins/checkpoint/ompi/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/checkpoint/ompi/Makefile" ;; - "src/plugins/jobacct/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/jobacct/Makefile" ;; - "src/plugins/jobacct/linux/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/jobacct/linux/Makefile" ;; - "src/plugins/jobacct/aix/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/jobacct/aix/Makefile" ;; - "src/plugins/jobacct/none/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/jobacct/none/Makefile" ;; - "src/plugins/jobacct/gold/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/jobacct/gold/Makefile" ;; + "src/plugins/checkpoint/xlch/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/checkpoint/xlch/Makefile" ;; + "src/plugins/crypto/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/crypto/Makefile" ;; + "src/plugins/crypto/munge/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/crypto/munge/Makefile" ;; + "src/plugins/crypto/openssl/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/crypto/openssl/Makefile" ;; + "src/plugins/jobacct_gather/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/jobacct_gather/Makefile" ;; + "src/plugins/jobacct_gather/linux/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/jobacct_gather/linux/Makefile" ;; + "src/plugins/jobacct_gather/aix/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/jobacct_gather/aix/Makefile" ;; + "src/plugins/jobacct_gather/none/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/jobacct_gather/none/Makefile" ;; "src/plugins/jobcomp/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/jobcomp/Makefile" ;; "src/plugins/jobcomp/filetxt/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/jobcomp/filetxt/Makefile" ;; "src/plugins/jobcomp/none/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/jobcomp/none/Makefile" ;; "src/plugins/jobcomp/script/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/jobcomp/script/Makefile" ;; + "src/plugins/jobcomp/mysql/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/jobcomp/mysql/Makefile" ;; + "src/plugins/jobcomp/pgsql/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/jobcomp/pgsql/Makefile" ;; + "src/plugins/jobcomp/slurmdbd/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/jobcomp/slurmdbd/Makefile" ;; "src/plugins/proctrack/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/proctrack/Makefile" ;; "src/plugins/proctrack/aix/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/proctrack/aix/Makefile" ;; "src/plugins/proctrack/pgid/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/proctrack/pgid/Makefile" ;; @@ -27211,6 +28020,8 @@ LN_S!$LN_S$ac_delim ECHO!$ECHO$ac_delim AR!$AR$ac_delim RANLIB!$RANLIB$ac_delim +DSYMUTIL!$DSYMUTIL$ac_delim +NMEDIT!$NMEDIT$ac_delim CXXCPP!$CXXCPP$ac_delim F77!$F77$ac_delim FFLAGS!$FFLAGS$ac_delim @@ -27245,10 +28056,17 @@ GTK2_CFLAGS!$GTK2_CFLAGS$ac_delim GTK2_LIBS!$GTK2_LIBS$ac_delim HAVE_GTK_TRUE!$HAVE_GTK_TRUE$ac_delim HAVE_GTK_FALSE!$HAVE_GTK_FALSE$ac_delim +HAVEMYSQLCONFIG!$HAVEMYSQLCONFIG$ac_delim +MYSQL_LIBS!$MYSQL_LIBS$ac_delim +MYSQL_CFLAGS!$MYSQL_CFLAGS$ac_delim +HAVEPGCONFIG!$HAVEPGCONFIG$ac_delim +PGSQL_LIBS!$PGSQL_LIBS$ac_delim +PGSQL_CFLAGS!$PGSQL_CFLAGS$ac_delim DEBUG_MODULES_TRUE!$DEBUG_MODULES_TRUE$ac_delim DEBUG_MODULES_FALSE!$DEBUG_MODULES_FALSE$ac_delim SLURMCTLD_PORT!$SLURMCTLD_PORT$ac_delim SLURMD_PORT!$SLURMD_PORT$ac_delim +SLURMDBD_PORT!$SLURMDBD_PORT$ac_delim ELAN_LIBS!$ELAN_LIBS$ac_delim HAVE_ELAN_TRUE!$HAVE_ELAN_TRUE$ac_delim HAVE_ELAN_FALSE!$HAVE_ELAN_FALSE$ac_delim @@ -27263,19 +28081,64 @@ READLINE_LIBS!$READLINE_LIBS$ac_delim SSL_LDFLAGS!$SSL_LDFLAGS$ac_delim SSL_LIBS!$SSL_LIBS$ac_delim SSL_CPPFLAGS!$SSL_CPPFLAGS$ac_delim +HAVE_OPENSSL_TRUE!$HAVE_OPENSSL_TRUE$ac_delim +HAVE_OPENSSL_FALSE!$HAVE_OPENSSL_FALSE$ac_delim +HAVE_OPENSSL!$HAVE_OPENSSL$ac_delim MUNGE_LIBS!$MUNGE_LIBS$ac_delim MUNGE_CPPFLAGS!$MUNGE_CPPFLAGS$ac_delim MUNGE_LDFLAGS!$MUNGE_LDFLAGS$ac_delim WITH_MUNGE_TRUE!$WITH_MUNGE_TRUE$ac_delim WITH_MUNGE_FALSE!$WITH_MUNGE_FALSE$ac_delim AUTHD_LIBS!$AUTHD_LIBS$ac_delim +_ACEOF + + if test `sed -n "s/.*$ac_delim\$/X/p" conf$$subs.sed | grep -c X` = 97; then + break + elif $ac_last_try; then + { { echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5 +echo "$as_me: error: could not make $CONFIG_STATUS" >&2;} + { (exit 1); exit 1; }; } + else + ac_delim="$ac_delim!$ac_delim _$ac_delim!! " + fi +done + +ac_eof=`sed -n '/^CEOF[0-9]*$/s/CEOF/0/p' conf$$subs.sed` +if test -n "$ac_eof"; then + ac_eof=`echo "$ac_eof" | sort -nru | sed 1q` + ac_eof=`expr $ac_eof + 1` +fi + +cat >>$CONFIG_STATUS <<_ACEOF +cat >"\$tmp/subs-2.sed" <<\CEOF$ac_eof +/@[a-zA-Z_][a-zA-Z_0-9]*@/!b +_ACEOF +sed ' +s/[,\\&]/\\&/g; s/@/@|#_!!_#|/g +s/^/s,@/; s/!/@,|#_!!_#|/ +:n +t n +s/'"$ac_delim"'$/,g/; t +s/$/\\/; p +N; s/^.*\n//; s/[,\\&]/\\&/g; s/@/@|#_!!_#|/g; b n +' >>$CONFIG_STATUS <conf$$subs.sed +rm -f conf$$subs.sed +cat >>$CONFIG_STATUS <<_ACEOF +CEOF$ac_eof +_ACEOF + + +ac_delim='%!_!# ' +for ac_last_try in false false false false false :; do + cat >conf$$subs.sed <<_ACEOF AUTHD_CFLAGS!$AUTHD_CFLAGS$ac_delim WITH_AUTHD_TRUE!$WITH_AUTHD_TRUE$ac_delim WITH_AUTHD_FALSE!$WITH_AUTHD_FALSE$ac_delim +UTIL_LIBS!$UTIL_LIBS$ac_delim LTLIBOBJS!$LTLIBOBJS$ac_delim _ACEOF - if test `sed -n "s/.*$ac_delim\$/X/p" conf$$subs.sed | grep -c X` = 89; then + if test `sed -n "s/.*$ac_delim\$/X/p" conf$$subs.sed | grep -c X` = 5; then break elif $ac_last_try; then { { echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5 @@ -27293,7 +28156,7 @@ if test -n "$ac_eof"; then fi cat >>$CONFIG_STATUS <<_ACEOF -cat >"\$tmp/subs-2.sed" <<\CEOF$ac_eof +cat >"\$tmp/subs-3.sed" <<\CEOF$ac_eof /@[a-zA-Z_][a-zA-Z_0-9]*@/!b end _ACEOF sed ' @@ -27562,7 +28425,7 @@ s&@abs_top_builddir@&$ac_abs_top_builddir&;t t s&@INSTALL@&$ac_INSTALL&;t t s&@MKDIR_P@&$ac_MKDIR_P&;t t $ac_datarootdir_hack -" $ac_file_inputs | sed -f "$tmp/subs-1.sed" | sed -f "$tmp/subs-2.sed" >$tmp/out +" $ac_file_inputs | sed -f "$tmp/subs-1.sed" | sed -f "$tmp/subs-2.sed" | sed -f "$tmp/subs-3.sed" >$tmp/out test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && { ac_out=`sed -n '/\${datarootdir}/p' "$tmp/out"`; test -n "$ac_out"; } && @@ -27676,21 +28539,22 @@ echo "$as_me: $ac_file is unchanged" >&6;} fi rm -f "$tmp/out12" # Compute $ac_file's index in $config_headers. +_am_arg=$ac_file _am_stamp_count=1 for _am_header in $config_headers :; do case $_am_header in - $ac_file | $ac_file:* ) + $_am_arg | $_am_arg:* ) break ;; * ) _am_stamp_count=`expr $_am_stamp_count + 1` ;; esac done -echo "timestamp for $ac_file" >`$as_dirname -- $ac_file || -$as_expr X$ac_file : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X$ac_file : 'X\(//\)[^/]' \| \ - X$ac_file : 'X\(//\)$' \| \ - X$ac_file : 'X\(/\)' \| . 2>/dev/null || -echo X$ac_file | +echo "timestamp for $_am_arg" >`$as_dirname -- "$_am_arg" || +$as_expr X"$_am_arg" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$_am_arg" : 'X\(//\)[^/]' \| \ + X"$_am_arg" : 'X\(//\)$' \| \ + X"$_am_arg" : 'X\(/\)' \| . 2>/dev/null || +echo X"$_am_arg" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q @@ -27727,7 +28591,7 @@ echo "$as_me: executing $ac_file commands" >&6;} # each Makefile.in and add a new line on top of each file to say so. # Grep'ing the whole file is not good either: AIX grep has a line # limit of 2048, but all sed's we know have understand at least 4000. - if sed 10q "$mf" | grep '^#.*generated by automake' > /dev/null 2>&1; then + if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then dirpart=`$as_dirname -- "$mf" || $as_expr X"$mf" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$mf" : 'X\(//\)[^/]' \| \ diff --git a/configure.ac b/configure.ac index e452a5336..56782ee48 100644 --- a/configure.ac +++ b/configure.ac @@ -1,4 +1,4 @@ -# $Id: configure.ac 12827 2007-12-14 22:29:30Z da $ +# $Id: configure.ac 14147 2008-05-28 22:46:41Z da $ # This file is to be processed with autoconf to generate a configure script dnl Prologue @@ -59,7 +59,8 @@ AC_CHECK_HEADERS(mcheck.h values.h socket.h sys/socket.h \ stdlib.h dirent.h pthread.h sys/prctl.h \ sysint.h inttypes.h termcap.h netdb.h sys/socket.h \ sys/systemcfg.h ncurses.h curses.h sys/dr.h sys/vfs.h \ - pam/pam_appl.h security/pam_appl.h sys/sysctl.h \ + pam/pam_appl.h security/pam_appl.h sys/sysctl.h \ + pty.h utmp.h \ sys/syslog.h \ ) AC_HEADER_SYS_WAIT @@ -133,6 +134,7 @@ CFLAGS="$CFLAGS $PTHREAD_CFLAGS" LIBS="$PTHREAD_LIBS $LIBS" X_AC_BLUEGENE +X_AC_CFLAGS X_AC_XCPU X_AC_SLURM_SEMAPHORE @@ -143,6 +145,8 @@ AC_SUBST(HAVE_SOME_CURSES) X_AC_GTK AM_CONDITIONAL(HAVE_GTK, test "x$ac_have_gtk" = "xyes") +X_AC_DATABASES + dnl checks for system services. dnl @@ -160,9 +164,9 @@ X_AC_DEBUG AM_CONDITIONAL(DEBUG_MODULES, test "x$ac_debug" = "xtrue") -dnl check for slurmd and slurmctld default ports +dnl check for slurmctld, slurmd and slurmdbd default ports dnl -X_AC_SLURM_PORTS([6817], [6818]) +X_AC_SLURM_PORTS([6817], [6818], [6819]) dnl check for whether to include Elan support @@ -188,7 +192,8 @@ X_AC_READLINE dnl dnl X_AC_SLURM_WITH_SSL - +AM_CONDITIONAL(HAVE_OPENSSL, test "x$ac_have_openssl" = "xyes") +AC_SUBST(HAVE_OPENSSL) dnl dnl Check for compilation of SLURM auth modules: @@ -214,6 +219,9 @@ AC_ARG_ENABLE( if test "$x_ac_load_env_no_login" = yes; then AC_DEFINE(LOAD_ENV_NO_LOGIN, 1, [Define to 1 for --get-user-env to load user environment without login.]) + AC_MSG_RESULT([yes]) +else + AC_MSG_RESULT([no]) fi dnl @@ -249,6 +257,12 @@ AM_CONDITIONAL(WITH_AUTHD, test "x$have_authd" = "xyes") LIBS="$savedLIBS" CFLAGS="$savedCFLAGS" +savedLIBS="$LIBS" +LIBS="-lutil $LIBS" +AC_CHECK_LIB(util, openpty, [UTIL_LIBS="-lutil"], []) +AC_SUBST(UTIL_LIBS) +LIBS="$savedLIBS" + dnl Add LSD-Tools defines: AC_DEFINE(WITH_LSD_FATAL_ERROR_FUNC, 1, [Have definition of lsd_fatal_error()]) AC_DEFINE(WITH_LSD_NOMEM_ERROR_FUNC, 1, [Have definition of lsd_nomem_error()]) @@ -262,18 +276,24 @@ AC_CONFIG_FILES([Makefile contribs/perlapi/Makefile contribs/perlapi/libslurm-perl/Makefile.PL contribs/torque/Makefile + contribs/phpext/Makefile + contribs/phpext/slurm_php/config.m4 src/Makefile src/api/Makefile src/common/Makefile + src/database/Makefile src/sacct/Makefile + src/sacctmgr/Makefile + src/sreport/Makefile + src/sstat/Makefile src/salloc/Makefile src/sbatch/Makefile src/sattach/Makefile src/srun/Makefile - src/slaunch/Makefile src/slurmd/Makefile src/slurmd/slurmd/Makefile src/slurmd/slurmstepd/Makefile + src/slurmdbd/Makefile src/slurmctld/Makefile src/sbcast/Makefile src/scontrol/Makefile @@ -284,6 +304,13 @@ AC_CONFIG_FILES([Makefile src/strigger/Makefile src/sview/Makefile src/plugins/Makefile + src/plugins/accounting_storage/Makefile + src/plugins/accounting_storage/filetxt/Makefile + src/plugins/accounting_storage/gold/Makefile + src/plugins/accounting_storage/mysql/Makefile + src/plugins/accounting_storage/pgsql/Makefile + src/plugins/accounting_storage/none/Makefile + src/plugins/accounting_storage/slurmdbd/Makefile src/plugins/auth/Makefile src/plugins/auth/authd/Makefile src/plugins/auth/munge/Makefile @@ -292,15 +319,21 @@ AC_CONFIG_FILES([Makefile src/plugins/checkpoint/aix/Makefile src/plugins/checkpoint/none/Makefile src/plugins/checkpoint/ompi/Makefile - src/plugins/jobacct/Makefile - src/plugins/jobacct/linux/Makefile - src/plugins/jobacct/aix/Makefile - src/plugins/jobacct/none/Makefile - src/plugins/jobacct/gold/Makefile + src/plugins/checkpoint/xlch/Makefile + src/plugins/crypto/Makefile + src/plugins/crypto/munge/Makefile + src/plugins/crypto/openssl/Makefile + src/plugins/jobacct_gather/Makefile + src/plugins/jobacct_gather/linux/Makefile + src/plugins/jobacct_gather/aix/Makefile + src/plugins/jobacct_gather/none/Makefile src/plugins/jobcomp/Makefile src/plugins/jobcomp/filetxt/Makefile src/plugins/jobcomp/none/Makefile src/plugins/jobcomp/script/Makefile + src/plugins/jobcomp/mysql/Makefile + src/plugins/jobcomp/pgsql/Makefile + src/plugins/jobcomp/slurmdbd/Makefile src/plugins/proctrack/Makefile src/plugins/proctrack/aix/Makefile src/plugins/proctrack/pgid/Makefile diff --git a/contribs/Makefile.am b/contribs/Makefile.am index bd2f2097f..77afcd59a 100644 --- a/contribs/Makefile.am +++ b/contribs/Makefile.am @@ -1,4 +1,4 @@ -SUBDIRS = perlapi torque +SUBDIRS = perlapi phpext torque EXTRA_DIST = \ env_cache_builder.c \ diff --git a/contribs/Makefile.in b/contribs/Makefile.in index 60186dd79..dffadf883 100644 --- a/contribs/Makefile.in +++ b/contribs/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -41,6 +41,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -99,6 +101,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -112,10 +115,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -135,7 +141,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -146,6 +155,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -161,6 +172,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -176,6 +188,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -232,7 +245,7 @@ target_os = @target_os@ target_vendor = @target_vendor@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ -SUBDIRS = perlapi torque +SUBDIRS = perlapi phpext torque EXTRA_DIST = \ env_cache_builder.c \ make.slurm.patch \ @@ -355,8 +368,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -381,8 +394,8 @@ TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -392,13 +405,12 @@ ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/contribs/env_cache_builder.c b/contribs/env_cache_builder.c index f13d7f8e7..d8560d104 100644 --- a/contribs/env_cache_builder.c +++ b/contribs/env_cache_builder.c @@ -1,23 +1,33 @@ /*****************************************************************************\ + * On the cluster's control host as user root, execute: + * make -f /dev/null env_cache_builder + * ./env_cache_builder + ***************************************************************************** * This program is used to build an environment variable cache file for use * with the srun/sbatch --get-user-env option, which is used by Moab to launch * user jobs. srun/sbatch will first attempt to load the user's current * environment by executing "su - <user> -c env". If that fails to complete - * in a relatively short period of time (currently 8 seconds), srun/sbatch + * in a relatively short period of time (currently 3 seconds), srun/sbatch * will attempt to load the user's environment from a cache file located * in the directory StateSaveLocation with a name of the sort "env_<user>". * If that fails, then abort the job request. * * This program can accept a space delimited list of individual users to have * cache files created (e.g. "cache_build alice bob chuck"). If no argument - * is given, cache files will be created for all users. - * - * This program must execute as user root. + * is given, cache files will be created for all users in the "/etc/passwd" + * file. If you see "ERROR" in the output, it means that the user's + * environment could not be loaded automatically, typically because their + * dot files spawn some other shell. You must explicitly login as the user, + * execute "env" and write the output to a file having the same name as the + * user in a subdirectory of the configured StateSaveLocation named "env_cache" + * (e.g. "/tmp/slurm/env_cache/alice"). The file is only needed on the node + * where the Moab daemon executes, typically the control host. ***************************************************************************** - * Copyright (C) 2007-2008 The Regents of the University of California. + * Copyright (C) 2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -64,16 +74,20 @@ static long int _build_cache(char *user_name, char *cache_dir); static int _get_cache_dir(char *buffer, int buf_size); +static void _log_failures(int failures, char *cache_dir); static int _parse_line(char *in_line, char **user_name, int *user_id); +char *env_loc = NULL; + main (int argc, char **argv) { FILE *passwd_fd; char cache_dir[256], in_line[256], *user_name; - int i, user_id; + int i, failures = 0, user_cnt = 0, user_id; long int delta_t; + struct stat buf; - if (geteuid() != (uid_t)0) { + if (geteuid() != (uid_t) 0) { printf("Need to run as user root\n"); exit(1); } @@ -86,16 +100,45 @@ main (int argc, char **argv) strerror(errno)); exit(1); } - printf("cache_dir=%s\n", cache_dir); + + if (stat("/bgl", &buf) == 0) { + printf("BlueGene Note: Execute only a a front-end node, " + "not the service node\n"); + printf(" User logins to the service node are " + "disabled\n\n"); + } + if (stat("/bin/su", &buf)) { + printf("Could not locate command: /bin/su\n"); + exit(1); + } + if (stat("/bin/echo", &buf)) { + printf("Could not locate command: /bin/echo\n"); + exit(1); + } + if (stat("/bin/env", &buf) == 0) + env_loc = "/bin/env"; + else if (stat("/usr/bin/env", &buf) == 0) + env_loc = "/usr/bin/env"; + else { + printf("Could not location command: env\n"); + exit(1); + } + + printf("Building user environment cache files for Moab/Slurm.\n"); + printf("This will take a while.\n\n"); for (i=1; i<argc; i++) { delta_t = _build_cache(argv[i], cache_dir); + if (delta_t == -1) + failures++; if (delta_t < ((SU_WAIT_MSEC * 0.8) * 1000)) continue; printf("WARNING: user %-8s time %ld usec\n", argv[i], delta_t); } - if (i > 1) + if (i > 1) { + _log_failures(failures, cache_dir); exit(0); + } passwd_fd = fopen("/etc/passwd", "r"); if (!passwd_fd) { @@ -109,11 +152,33 @@ main (int argc, char **argv) if (user_id <= 100) continue; delta_t = _build_cache(user_name, cache_dir); + if (delta_t == -1) + failures++; + user_cnt++; + if ((user_cnt % 100) == 0) + printf("Processed %d users...\n", user_cnt); if (delta_t < ((SU_WAIT_MSEC * 0.8) * 1000)) continue; printf("WARNING: user %-8s time %ld usec\n", user_name, delta_t); } fclose(passwd_fd); + _log_failures(failures, cache_dir); +} + +static void _log_failures(int failures, char *cache_dir) +{ + if (failures) { + printf("\n"); + printf("Some user environments could not be loaded.\n"); + printf("Manually run 'env' for those %d users.\n", + failures); + printf("Write the output to a file with the same name as " + "the user in the\n %s directory\n", cache_dir); + } else { + printf("\n"); + printf("All user environments successfully loaded.\n"); + printf("Files written to the %s directory\n", cache_dir); + } } /* Given a line from /etc/passwd, sets the user_name and user_id @@ -192,8 +257,8 @@ static long int _build_cache(char *user_name, char *cache_dir) open("/dev/null", O_WRONLY); snprintf(buffer, sizeof(buffer), "/bin/echo; /bin/echo; /bin/echo; " - "/bin/echo %s; /bin/env; /bin/echo %s", - starttoken, stoptoken); + "/bin/echo %s; %s; /bin/echo %s", + starttoken, env_loc, stoptoken); #ifdef LOAD_ENV_NO_LOGIN execl("/bin/su", "su", user_name, "-c", buffer, NULL); #else @@ -272,7 +337,7 @@ static long int _build_cache(char *user_name, char *cache_dir) } close(fildes[0]); if (!found) { - printf("ERROR: Failed to load current user environment " + printf("***ERROR: Failed to load current user environment " "variables for %s\n", user_name); return -1; } @@ -289,7 +354,7 @@ static long int _build_cache(char *user_name, char *cache_dir) line = strtok_r(NULL, "\n", &last); } if (!found) { - printf("ERROR: Failed to get current user environment " + printf("***ERROR: Failed to get current user environment " "variables for %s\n", user_name); return -1; } @@ -327,7 +392,7 @@ static long int _build_cache(char *user_name, char *cache_dir) delta_t = (now.tv_sec - begin.tv_sec) * 1000000; delta_t += now.tv_usec - begin.tv_usec; if (!found) { - printf("ERROR: Failed to write all user environment " + printf("***ERROR: Failed to write all user environment " "variables for %s\n", user_name); if (delta_t < (SU_WAIT_MSEC * 1000)) return (SU_WAIT_MSEC * 1000); diff --git a/contribs/perlapi/Makefile.in b/contribs/perlapi/Makefile.in index c11e027f8..b8783c521 100644 --- a/contribs/perlapi/Makefile.in +++ b/contribs/perlapi/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -41,6 +41,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -87,6 +89,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -100,10 +103,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -123,7 +129,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -134,6 +143,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -149,6 +160,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -164,6 +176,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ diff --git a/contribs/perlapi/libslurm-perl/Slurm.xs b/contribs/perlapi/libslurm-perl/Slurm.xs index 3f7a91f52..b92833d1f 100644 --- a/contribs/perlapi/libslurm-perl/Slurm.xs +++ b/contribs/perlapi/libslurm-perl/Slurm.xs @@ -6,6 +6,8 @@ #include <slurm/slurm.h> #include <signal.h> +#include <string.h> +#include <unistd.h> #include "msg.h" #include "const-c.inc" @@ -999,7 +1001,13 @@ slurm_step_launch(slurm_step_ctx ctx = NULL, HV* hv = NULL, SV* start_cb = NULL, if(hv_to_slurm_step_launch_params(hv, ¶ms) < 0) { RETVAL = SLURM_ERROR; } else { - RETVAL = slurm_step_launch(ctx, ¶ms, &callbacks); + char *dot_ptr, launcher_host[1024]; + gethostname(launcher_host, sizeof(launcher_host)); + dot_ptr = strchr(launcher_host, '.'); + if (dot_ptr) + dot_ptr[0] = '\0'; + RETVAL = slurm_step_launch(ctx, launcher_host, + ¶ms, &callbacks); } OUTPUT: RETVAL diff --git a/contribs/perlapi/libslurm-perl/alloc.c b/contribs/perlapi/libslurm-perl/alloc.c index 944e25429..0ca22566a 100644 --- a/contribs/perlapi/libslurm-perl/alloc.c +++ b/contribs/perlapi/libslurm-perl/alloc.c @@ -61,7 +61,6 @@ hv_to_job_desc_msg(HV* hv, job_desc_msg_t* job_desc_msg) FETCH_FIELD(hv, job_desc_msg, job_min_cores, uint16_t, FALSE); FETCH_FIELD(hv, job_desc_msg, job_min_threads, uint16_t, FALSE); FETCH_FIELD(hv, job_desc_msg, job_min_memory, uint16_t, FALSE); - FETCH_FIELD(hv, job_desc_msg, job_max_memory, uint16_t, FALSE); FETCH_FIELD(hv, job_desc_msg, job_min_tmp_disk, uint16_t, FALSE); FETCH_FIELD(hv, job_desc_msg, partition, charp, FALSE); FETCH_FIELD(hv, job_desc_msg, priority, uint32_t, FALSE); @@ -89,7 +88,7 @@ hv_to_job_desc_msg(HV* hv, job_desc_msg_t* job_desc_msg) argv_av = (AV*)SvRV(*svp); job_desc_msg->argc = av_len(argv_av) + 1; if (job_desc_msg->argc > 0) { - Newz(0, job_desc_msg->argv, job_desc_msg->argc + 1, char*); + Newz(0, job_desc_msg->argv, (int32_t)(job_desc_msg->argc + 1), char*); for(i = 0; i < job_desc_msg->argc; i ++) { if((svp = av_fetch(argv_av, i, FALSE))) *(job_desc_msg->argv + i) = (char*) SvPV_nolen(*svp); @@ -112,11 +111,10 @@ hv_to_job_desc_msg(HV* hv, job_desc_msg_t* job_desc_msg) FETCH_FIELD(hv, job_desc_msg, work_dir, charp, FALSE); FETCH_FIELD(hv, job_desc_msg, alloc_node, charp, FALSE); FETCH_FIELD(hv, job_desc_msg, alloc_sid, uint32_t, FALSE); - FETCH_FIELD(hv, job_desc_msg, alloc_resp_hostname, charp, FALSE); + FETCH_FIELD(hv, job_desc_msg, resp_host, charp, FALSE); FETCH_FIELD(hv, job_desc_msg, alloc_resp_port, uint16_t, FALSE); - FETCH_FIELD(hv, job_desc_msg, other_hostname, charp, FALSE); FETCH_FIELD(hv, job_desc_msg, other_port, uint16_t, FALSE); - FETCH_FIELD(hv, job_desc_msg, dependency, uint32_t, FALSE); + FETCH_FIELD(hv, job_desc_msg, dependency, charp, FALSE); FETCH_FIELD(hv, job_desc_msg, overcommit, uint16_t, FALSE); FETCH_FIELD(hv, job_desc_msg, num_tasks, uint32_t, FALSE); FETCH_FIELD(hv, job_desc_msg, nice, uint16_t, FALSE); @@ -128,7 +126,7 @@ hv_to_job_desc_msg(HV* hv, job_desc_msg_t* job_desc_msg) FETCH_FIELD(hv, job_desc_msg, begin_time, time_t, FALSE); FETCH_FIELD(hv, job_desc_msg, mail_type, uint16_t, FALSE); FETCH_FIELD(hv, job_desc_msg, mail_user, charp, FALSE); - FETCH_FIELD(hv, job_desc_msg, no_requeue, uint16_t, FALSE); + FETCH_FIELD(hv, job_desc_msg, requeue, uint16_t, FALSE); /* geometry */ #if SYSTEM_DIMENSIONS if((svp = hv_fetch(hv, "geometry", 8, FALSE))) { diff --git a/contribs/perlapi/libslurm-perl/conf.c b/contribs/perlapi/libslurm-perl/conf.c index 12d4a4e6b..a4a2e91ea 100644 --- a/contribs/perlapi/libslurm-perl/conf.c +++ b/contribs/perlapi/libslurm-perl/conf.c @@ -23,6 +23,7 @@ slurm_ctl_conf_to_hv(slurm_ctl_conf_t* conf, HV* hv) STORE_FIELD(hv, conf, backup_addr, charp); if(conf->backup_controller) STORE_FIELD(hv, conf, backup_controller, charp); + STORE_FIELD(hv, conf, boot_time, time_t); STORE_FIELD(hv, conf, cache_groups, uint16_t); if(conf->checkpoint_type) STORE_FIELD(hv, conf, checkpoint_type, charp); @@ -30,21 +31,40 @@ slurm_ctl_conf_to_hv(slurm_ctl_conf_t* conf, HV* hv) STORE_FIELD(hv, conf, control_addr, charp); if(conf->control_machine) STORE_FIELD(hv, conf, control_machine, charp); + if(conf->crypto_type) + STORE_FIELD(hv, conf, crypto_type, charp); if(conf->epilog) STORE_FIELD(hv, conf, epilog, charp); STORE_FIELD(hv, conf, first_job_id, uint32_t); STORE_FIELD(hv, conf, next_job_id, uint32_t); STORE_FIELD(hv, conf, fast_schedule, uint16_t); STORE_FIELD(hv, conf, inactive_limit, uint16_t); - if(conf->job_acct_logfile) - STORE_FIELD(hv, conf, job_acct_logfile, charp); - STORE_FIELD(hv, conf, job_acct_freq, uint16_t); - if(conf->job_acct_type) - STORE_FIELD(hv, conf, job_acct_type, charp); - if(conf->job_comp_type) - STORE_FIELD(hv, conf, job_comp_type, charp); + if(conf->job_acct_gather_type) + STORE_FIELD(hv, conf, job_acct_gather_type, charp); + STORE_FIELD(hv, conf, job_acct_gather_freq, uint16_t); + if(conf->accounting_storage_loc) + STORE_FIELD(hv, conf, accounting_storage_loc, charp); + if(conf->accounting_storage_type) + STORE_FIELD(hv, conf, accounting_storage_type, charp); + if(conf->accounting_storage_user) + STORE_FIELD(hv, conf, accounting_storage_user, charp); + if(conf->accounting_storage_host) + STORE_FIELD(hv, conf, accounting_storage_host, charp); + if(conf->accounting_storage_pass) + STORE_FIELD(hv, conf, accounting_storage_pass, charp); + STORE_FIELD(hv, conf, accounting_storage_port, uint32_t); if(conf->job_comp_loc) STORE_FIELD(hv, conf, job_comp_loc, charp); + if(conf->job_comp_type) + STORE_FIELD(hv, conf, job_comp_type, charp); + if(conf->job_comp_user) + STORE_FIELD(hv, conf, job_comp_user, charp); + if(conf->job_comp_host) + STORE_FIELD(hv, conf, job_comp_host, charp); + if(conf->job_comp_pass) + STORE_FIELD(hv, conf, job_comp_pass, charp); + STORE_FIELD(hv, conf, job_comp_port, uint32_t); + STORE_FIELD(hv, conf, job_file_append, uint16_t); STORE_FIELD(hv, conf, kill_wait, uint16_t); if(conf->mail_prog) STORE_FIELD(hv, conf, mail_prog, charp); @@ -57,6 +77,7 @@ slurm_ctl_conf_to_hv(slurm_ctl_conf_t* conf, HV* hv) STORE_FIELD(hv, conf, plugindir, charp); if(conf->plugstack) STORE_FIELD(hv, conf, plugstack, charp); + STORE_FIELD(hv, conf, private_data, uint16_t); if(conf->proctrack_type) STORE_FIELD(hv, conf, proctrack_type, charp); if(conf->prolog) @@ -67,6 +88,9 @@ slurm_ctl_conf_to_hv(slurm_ctl_conf_t* conf, HV* hv) if(conf->propagate_rlimits_except) STORE_FIELD(hv, conf, propagate_rlimits_except, charp); STORE_FIELD(hv, conf, ret2service, uint16_t); + STORE_FIELD(hv, conf, resume_rate, uint16_t); + if(conf->resume_program) + STORE_FIELD(hv, conf, resume_program, charp); if(conf->schedtype) STORE_FIELD(hv, conf, schedtype, charp); STORE_FIELD(hv, conf, schedport, uint16_t); @@ -97,6 +121,14 @@ slurm_ctl_conf_to_hv(slurm_ctl_conf_t* conf, HV* hv) STORE_FIELD(hv, conf, slurm_conf, charp); if(conf->state_save_location) STORE_FIELD(hv, conf, state_save_location, charp); + if(conf->suspend_exc_nodes) + STORE_FIELD(hv, conf, suspend_exc_nodes, charp); + if(conf->suspend_exc_parts) + STORE_FIELD(hv, conf, suspend_exc_parts, charp); + if(conf->suspend_program) + STORE_FIELD(hv, conf, suspend_program, charp); + STORE_FIELD(hv, conf, suspend_rate, uint16_t); + STORE_FIELD(hv, conf, suspend_time, uint16_t); if(conf->switch_type) STORE_FIELD(hv, conf, switch_type, charp); if(conf->task_epilog) @@ -121,6 +153,9 @@ slurm_ctl_conf_to_hv(slurm_ctl_conf_t* conf, HV* hv) STORE_FIELD(hv, conf, node_prefix, charp); STORE_FIELD(hv, conf, tree_width, uint16_t); STORE_FIELD(hv, conf, use_pam, uint16_t); + if(conf->unkillable_program) + STORE_FIELD(hv, conf, unkillable_program, charp); + STORE_FIELD(hv, conf, unkillable_timeout, uint16_t); return 0; } diff --git a/contribs/perlapi/libslurm-perl/job.c b/contribs/perlapi/libslurm-perl/job.c index b84d60e30..f08d76db9 100644 --- a/contribs/perlapi/libslurm-perl/job.c +++ b/contribs/perlapi/libslurm-perl/job.c @@ -78,7 +78,6 @@ job_info_to_hv(job_info_t* job_info, HV* hv) STORE_FIELD(hv, job_info, job_min_cores, uint16_t); STORE_FIELD(hv, job_info, job_min_threads, uint16_t); STORE_FIELD(hv, job_info, job_min_memory, uint32_t); - STORE_FIELD(hv, job_info, job_max_memory, uint32_t); STORE_FIELD(hv, job_info, job_min_tmp_disk, uint32_t); if(job_info->req_nodes) STORE_FIELD(hv, job_info, req_nodes, charp); @@ -88,7 +87,7 @@ job_info_to_hv(job_info_t* job_info, HV* hv) /* TODO: exc_node_inx */ if(job_info->features) STORE_FIELD(hv, job_info, features, charp); - STORE_FIELD(hv, job_info, dependency, uint32_t); + STORE_FIELD(hv, job_info, dependency, charp); STORE_FIELD(hv, job_info, exit_code, uint32_t); if(job_info->account) STORE_FIELD(hv, job_info, account, charp); diff --git a/contribs/perlapi/libslurm-perl/launch.c b/contribs/perlapi/libslurm-perl/launch.c index f17beae54..e5901cb97 100644 --- a/contribs/perlapi/libslurm-perl/launch.c +++ b/contribs/perlapi/libslurm-perl/launch.c @@ -29,6 +29,7 @@ hv_to_slurm_step_ctx_params(HV* hv, slurm_step_ctx_params_t* params) FETCH_FIELD(hv, params, node_list, charp, FALSE); FETCH_FIELD(hv, params, network, charp, FALSE); FETCH_FIELD(hv, params, overcommit, bool, FALSE); + FETCH_FIELD(hv, params, mem_per_task, uint16_t, FALSE); return 0; } @@ -54,7 +55,7 @@ hv_to_slurm_step_launch_params(HV* hv, slurm_step_launch_params_t* params) argv_av = (AV*)SvRV(*svp); params->argc = av_len(argv_av) + 1; if (params->argc > 0) { - Newz(0, params->argv, params->argc + 1, char*); + Newz(0, params->argv, (int32_t)(params->argc + 1), char*); for(i = 0; i < params->argc; i ++) { if((svp = av_fetch(argv_av, i, FALSE))) *(params->argv + i) = (char*) SvPV_nolen(*svp); @@ -95,6 +96,7 @@ hv_to_slurm_step_launch_params(HV* hv, slurm_step_launch_params_t* params) } FETCH_FIELD(hv, params, cwd, charp, FALSE); FETCH_FIELD(hv, params, user_managed_io, bool, FALSE); + FETCH_FIELD(hv, params, msg_timeout, uint32_t, FALSE); FETCH_FIELD(hv, params, buffered_stdio, bool, FALSE); FETCH_FIELD(hv, params, labelio, bool, FALSE); FETCH_FIELD(hv, params, remote_output_filename, charp, FALSE); diff --git a/contribs/perlapi/libslurm-perl/partition.c b/contribs/perlapi/libslurm-perl/partition.c index 9dd8ea102..f0f2b30de 100644 --- a/contribs/perlapi/libslurm-perl/partition.c +++ b/contribs/perlapi/libslurm-perl/partition.c @@ -30,7 +30,7 @@ part_info_to_hv(partition_info_t* part_info, HV* hv) STORE_FIELD(hv, part_info, default_part, uint16_t); STORE_FIELD(hv, part_info, hidden, uint16_t); STORE_FIELD(hv, part_info, root_only, uint16_t); - STORE_FIELD(hv, part_info, shared, uint16_t); + STORE_FIELD(hv, part_info, max_share, uint16_t); STORE_FIELD(hv, part_info, state_up, uint16_t); if (part_info->nodes) STORE_FIELD(hv, part_info, nodes, charp); @@ -81,7 +81,7 @@ hv_to_update_part_msg(HV* hv, update_part_msg_t* part_msg) FETCH_FIELD(hv, part_msg, default_part, uint16_t, FALSE); FETCH_FIELD(hv, part_msg, hidden, uint16_t, FALSE); FETCH_FIELD(hv, part_msg, root_only, uint16_t, FALSE); - FETCH_FIELD(hv, part_msg, shared, uint16_t, FALSE); + FETCH_FIELD(hv, part_msg, max_share, uint16_t, FALSE); FETCH_FIELD(hv, part_msg, state_up, uint16_t, FALSE); FETCH_FIELD(hv, part_msg, nodes, charp, FALSE); /* node_inx not used */ diff --git a/contribs/perlapi/libslurm-perl/trigger.c b/contribs/perlapi/libslurm-perl/trigger.c index 29dae4119..56f65162a 100644 --- a/contribs/perlapi/libslurm-perl/trigger.c +++ b/contribs/perlapi/libslurm-perl/trigger.c @@ -16,7 +16,7 @@ int trigger_info_to_hv(trigger_info_t* trigger_info, HV* hv) { STORE_FIELD(hv, trigger_info, trig_id, uint32_t); - STORE_FIELD(hv, trigger_info, res_type, uint8_t); + STORE_FIELD(hv, trigger_info, res_type, uint16_t); if(trigger_info->res_id) STORE_FIELD(hv, trigger_info, res_id, charp); STORE_FIELD(hv, trigger_info, trig_type, uint16_t); diff --git a/contribs/phpext/Makefile.am b/contribs/phpext/Makefile.am new file mode 100644 index 000000000..365ea0c5e --- /dev/null +++ b/contribs/phpext/Makefile.am @@ -0,0 +1,48 @@ +AUTOMAKE_OPTIONS = foreign +php_dir = slurm_php +phpize = /usr/bin/phpize + +if HAVE_AIX + add_flags = "CC=\"$(CC)\" CCFLAGS=\"-g -static $(CFLAGS)\"" +else + add_flags = "CC=\"$(CC)\" LD=\"$(CC) $(CFLAGS)\" CCFLAGS=\"-g -static $(CFLAGS)\"" +endif + +all-local: + @cd $(php_dir) && \ + if [ ! -f Makefile ]; then \ + if [ ! -f configure ]; then \ + $(phpize); \ + fi && \ + ./configure ; \ + if [ ! -f Makefile ]; then \ + exit 0;\ + fi \ + fi && \ + $(MAKE) $(add_flags); \ + cd ..; + +install-exec-local: + @cd $(php_dir) && \ + if [ ! -f Makefile ]; then \ + exit 0;\ + fi && \ + $(MAKE) INSTALL_ROOT=$(DESTDIR) $(add_flags) install && \ + cd ..; + +clean-generic: + @cd $(php_dir); \ + if [ ! -f Makefile ]; then \ + exit 0;\ + fi && \ + $(MAKE) clean; \ + cd ..; + +distclean-generic: + @cd $(php_dir); \ + if [ ! -f Makefile ]; then \ + exit 0;\ + fi && \ + $(MAKE) clean; \ + $(phpize) --clean; \ + cd ..; diff --git a/contribs/phpext/Makefile.in b/contribs/phpext/Makefile.in new file mode 100644 index 000000000..6f94c4de3 --- /dev/null +++ b/contribs/phpext/Makefile.in @@ -0,0 +1,445 @@ +# Makefile.in generated by automake 1.10.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ +VPATH = @srcdir@ +pkgdatadir = $(datadir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +target_triplet = @target@ +subdir = contribs/phpext +DIST_COMMON = README $(srcdir)/Makefile.am $(srcdir)/Makefile.in +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ + $(top_srcdir)/auxdir/slurm.m4 \ + $(top_srcdir)/auxdir/x_ac__system_configuration.m4 \ + $(top_srcdir)/auxdir/x_ac_affinity.m4 \ + $(top_srcdir)/auxdir/x_ac_aix.m4 \ + $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ + $(top_srcdir)/auxdir/x_ac_debug.m4 \ + $(top_srcdir)/auxdir/x_ac_elan.m4 \ + $(top_srcdir)/auxdir/x_ac_federation.m4 \ + $(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \ + $(top_srcdir)/auxdir/x_ac_gtk.m4 \ + $(top_srcdir)/auxdir/x_ac_munge.m4 \ + $(top_srcdir)/auxdir/x_ac_ncurses.m4 \ + $(top_srcdir)/auxdir/x_ac_pam.m4 \ + $(top_srcdir)/auxdir/x_ac_ptrace.m4 \ + $(top_srcdir)/auxdir/x_ac_readline.m4 \ + $(top_srcdir)/auxdir/x_ac_setpgrp.m4 \ + $(top_srcdir)/auxdir/x_ac_setproctitle.m4 \ + $(top_srcdir)/auxdir/x_ac_sgi_job.m4 \ + $(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \ + $(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h +CONFIG_CLEAN_FILES = +SOURCES = +DIST_SOURCES = +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AR = @AR@ +AUTHD_CFLAGS = @AUTHD_CFLAGS@ +AUTHD_LIBS = @AUTHD_LIBS@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +BG_INCLUDES = @BG_INCLUDES@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CMD_LDFLAGS = @CMD_LDFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ +ECHO = @ECHO@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +ELAN_LIBS = @ELAN_LIBS@ +EXEEXT = @EXEEXT@ +F77 = @F77@ +FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@ +FFLAGS = @FFLAGS@ +GREP = @GREP@ +GTK2_CFLAGS = @GTK2_CFLAGS@ +GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ +HAVEPKGCONFIG = @HAVEPKGCONFIG@ +HAVE_AIX = @HAVE_AIX@ +HAVE_ELAN = @HAVE_ELAN@ +HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ +HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIB_LDFLAGS = @LIB_LDFLAGS@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MKDIR_P = @MKDIR_P@ +MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ +MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ +MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ +NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ +NUMA_LIBS = @NUMA_LIBS@ +OBJEXT = @OBJEXT@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PAM_LIBS = @PAM_LIBS@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ +PLPA_LIBS = @PLPA_LIBS@ +PROCTRACKDIR = @PROCTRACKDIR@ +PROJECT = @PROJECT@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +RANLIB = @RANLIB@ +READLINE_LIBS = @READLINE_LIBS@ +RELEASE = @RELEASE@ +SED = @SED@ +SEMAPHORE_LIBS = @SEMAPHORE_LIBS@ +SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ +SLURMD_PORT = @SLURMD_PORT@ +SLURM_API_AGE = @SLURM_API_AGE@ +SLURM_API_CURRENT = @SLURM_API_CURRENT@ +SLURM_API_MAJOR = @SLURM_API_MAJOR@ +SLURM_API_REVISION = @SLURM_API_REVISION@ +SLURM_API_VERSION = @SLURM_API_VERSION@ +SLURM_MAJOR = @SLURM_MAJOR@ +SLURM_MICRO = @SLURM_MICRO@ +SLURM_MINOR = @SLURM_MINOR@ +SLURM_VERSION = @SLURM_VERSION@ +SO_LDFLAGS = @SO_LDFLAGS@ +SSL_CPPFLAGS = @SSL_CPPFLAGS@ +SSL_LDFLAGS = @SSL_LDFLAGS@ +SSL_LIBS = @SSL_LIBS@ +STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ +VERSION = @VERSION@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_F77 = @ac_ct_F77@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target = @target@ +target_alias = @target_alias@ +target_cpu = @target_cpu@ +target_os = @target_os@ +target_vendor = @target_vendor@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +AUTOMAKE_OPTIONS = foreign +php_dir = slurm_php +phpize = /usr/bin/phpize +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ + && exit 0; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign contribs/phpext/Makefile'; \ + cd $(top_srcdir) && \ + $(AUTOMAKE) --foreign contribs/phpext/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs +tags: TAGS +TAGS: + +ctags: CTAGS +CTAGS: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ + fi; \ + cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ + else \ + test -f $(distdir)/$$file \ + || cp -p $$d/$$file $(distdir)/$$file \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile all-local +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + `test -z '$(STRIP)' || \ + echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install +mostlyclean-generic: + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libtool mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-exec-am: install-exec-local + +install-html: install-html-am + +install-info: install-info-am + +install-man: + +install-pdf: install-pdf-am + +install-ps: install-ps-am + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am all-local check check-am clean clean-generic \ + clean-libtool distclean distclean-generic distclean-libtool \ + distdir dvi dvi-am html html-am info info-am install \ + install-am install-data install-data-am install-dvi \ + install-dvi-am install-exec install-exec-am install-exec-local \ + install-html install-html-am install-info install-info-am \ + install-man install-pdf install-pdf-am install-ps \ + install-ps-am install-strip installcheck installcheck-am \ + installdirs maintainer-clean maintainer-clean-generic \ + mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ + ps ps-am uninstall uninstall-am + + +@HAVE_AIX_TRUE@ add_flags = "CC=\"$(CC)\" CCFLAGS=\"-g -static $(CFLAGS)\"" +@HAVE_AIX_FALSE@ add_flags = "CC=\"$(CC)\" LD=\"$(CC) $(CFLAGS)\" CCFLAGS=\"-g -static $(CFLAGS)\"" + +all-local: + @cd $(php_dir) && \ + if [ ! -f Makefile ]; then \ + if [ ! -f configure ]; then \ + $(phpize); \ + fi && \ + ./configure ; \ + if [ ! -f Makefile ]; then \ + exit 0;\ + fi \ + fi && \ + $(MAKE) $(add_flags); \ + cd ..; + +install-exec-local: + @cd $(php_dir) && \ + if [ ! -f Makefile ]; then \ + exit 0;\ + fi && \ + $(MAKE) INSTALL_ROOT=$(DESTDIR) $(add_flags) install && \ + cd ..; + +clean-generic: + @cd $(php_dir); \ + if [ ! -f Makefile ]; then \ + exit 0;\ + fi && \ + $(MAKE) clean; \ + cd ..; + +distclean-generic: + @cd $(php_dir); \ + if [ ! -f Makefile ]; then \ + exit 0;\ + fi && \ + $(MAKE) clean; \ + $(phpize) --clean; \ + cd ..; +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/contribs/phpext/README b/contribs/phpext/README new file mode 100644 index 000000000..30b88ccf7 --- /dev/null +++ b/contribs/phpext/README @@ -0,0 +1,21 @@ +README for the php extension for SLURM. + +This was made primarily for SLURMWEB to connect to slurm. Any extra +interactions are welcome. + +to compile... + +phpize +./configure +make + +this should make modules/slurm_php.so + +make install as root +should install this where your extensions are in your php install + +in your php.ini file add the line + +extension=slurm.so + +and you should be able to use the functions here. diff --git a/contribs/phpext/slurm_php/config.m4.in b/contribs/phpext/slurm_php/config.m4.in new file mode 100644 index 000000000..51f804857 --- /dev/null +++ b/contribs/phpext/slurm_php/config.m4.in @@ -0,0 +1,59 @@ +##***************************************************************************** +## $Id: config.m4 8863 2006-08-10 18:47:55Z da $ +##***************************************************************************** +# AUTHOR: +# Danny Auble <da@llnl.gov> +# +# DESCRIPTION: +# Use to make the php slurm extension +##***************************************************************************** +PHP_ARG_WITH(slurm, whether to use slurm, +[ --with-slurm SLURM install dir]) + +if test "$PHP_SLURM" != "no"; then + SLURMLIB_PATH="@prefix@/lib @top_builddir@/src/api/.libs" + SLURMINCLUDE_PATH="@prefix@/include" + SEARCH_FOR="libslurm.so" + + # --with-libslurm -> check with-path + + if test -r $PHP_SLURM/; then # path given as parameter + SLURM_DIR=$PHP_SLURM + SLURMLIB_PATH="$SLURM_DIR/lib" + else # search default path list + AC_MSG_CHECKING([for libslurm.so in default paths]) + for i in $SLURMLIB_PATH ; do + if test -r $i/$SEARCH_FOR; then + SLURM_DIR=$i + PHP_ADD_LIBPATH($i, SLURM_PHP_SHARED_LIBADD) + + AC_MSG_RESULT([found in $i]) + + fi + done + fi + + if test -z "$SLURM_DIR"; then + AC_MSG_RESULT([not found]) + AC_MSG_ERROR([Please reinstall the slurm distribution]) + fi + + PHP_ADD_INCLUDE($SLURMINCLUDE_PATH) + PHP_ADD_INCLUDE(@top_srcdir@) + + LIBNAME=slurm + LIBSYMBOL=slurm_acct_storage_init + + PHP_CHECK_LIBRARY($LIBNAME, $LIBSYMBOL, + [PHP_ADD_LIBRARY($LIBNAME, , SLURM_PHP_SHARED_LIBADD) + AC_DEFINE(HAVE_SLURMLIB,1,[ ])], + [AC_MSG_ERROR([wrong libslurm version or lib not found])], + [-L$SLURM_DIR -lslurm]) + + + PHP_SUBST(SLURM_PHP_SHARED_LIBADD) + + AC_DEFINE(HAVE_SLURM_PHP, 1, [Whether you have SLURM]) + #PHP_EXTENSION(slurm_php, $ext_shared) + PHP_NEW_EXTENSION(slurm_php, @top_srcdir@/contribs/phpext/slurm_php/slurm_php.c, $ext_shared) +fi diff --git a/contribs/phpext/slurm_php/slurm_php.c b/contribs/phpext/slurm_php/slurm_php.c new file mode 100644 index 000000000..14e9690bd --- /dev/null +++ b/contribs/phpext/slurm_php/slurm_php.c @@ -0,0 +1,100 @@ +/*****************************************************************************\ + * slurm_php.c - php interface to slurm. + * + * $Id: account_gold.c 13061 2008-01-22 21:23:56Z da $ + ***************************************************************************** + * Copyright (C) 2004-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include "php.h" +#include "slurm_php.h" +#include "slurm/slurm.h" +#include "src/common/list.h" + +static function_entry slurm_functions[] = { + PHP_FE(hello_world, NULL) + PHP_FE(print_partitions, NULL) + {NULL, NULL, NULL} +}; + +zend_module_entry slurm_php_module_entry = { +#if ZEND_MODULE_API_NO >= 20010901 + STANDARD_MODULE_HEADER, +#endif + SLURM_PHP_EXTNAME, + slurm_functions, + NULL, + NULL, + NULL, + NULL, + NULL, +#if ZEND_MODULE_API_NO >= 20010901 + SLURM_PHP_VERSION, +#endif + STANDARD_MODULE_PROPERTIES +}; + +#ifdef COMPILE_DL_SLURM_PHP +ZEND_GET_MODULE(slurm_php) +#endif + +PHP_FUNCTION(hello_world) +{ + RETURN_STRING("Hello World\n", 1); +} + +PHP_FUNCTION(print_partitions) +{ + List sinfo_list = NULL; + int error_code = SLURM_SUCCESS; + uint16_t show_flags = 0; + static partition_info_msg_t *new_part_ptr; + printf("hey\n"); + slurm_info("got here!"); + printf("hey\n"); + error_code = slurm_load_partitions((time_t) NULL, &new_part_ptr, + show_flags); + if (error_code) { + error("slurm_load_part"); + RETURN_INT(error_code); + } + +// sinfo_list = list_create(_sinfo_list_delete); + + RETURN_INT(error_code); +} diff --git a/src/srun/sigstr.c b/contribs/phpext/slurm_php/slurm_php.h similarity index 74% rename from src/srun/sigstr.c rename to contribs/phpext/slurm_php/slurm_php.h index f0e2418fc..d857934ae 100644 --- a/src/srun/sigstr.c +++ b/contribs/phpext/slurm_php/slurm_php.h @@ -1,10 +1,12 @@ /*****************************************************************************\ - * src/srun/sigstr.c - Function to convert signal to description + * slurm_php.h - php interface to slurm. + * + * $Id: slurm_php.h 13061 2008-01-22 21:23:56Z da $ ***************************************************************************** - * Copyright (C) 2002 The Regents of the University of California. + * Copyright (C) 2004-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by AUTHOR <AUTHOR@llnl.gov>. - * UCRL-CODE-226842. + * Written by Danny Auble <da@llnl.gov> * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -15,7 +17,7 @@ * any later version. * * In addition, as a special exception, the copyright holders give permission - * to link the code of portions of this program with the OpenSSL library under + * to link the code of portions of this program with the OpenSSL library under * certain conditions as described in each individual source file, and * distribute linked combinations including the two. You must obey the GNU * General Public License in all respects for all of the code used other than @@ -34,32 +36,16 @@ * with SLURM; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. \*****************************************************************************/ +#ifndef SLURM_PHP_H +#define SLURM_PHP_H 1 -#if HAVE_CONFIG_H -# include "config.h" -#endif - -#include <string.h> -#include <sys/wait.h> - -#include "src/common/xassert.h" - -/* - * Get a definition for strsignal : - */ -#if defined (HAVE_DECL_STRSIGNAL) && !HAVE_DECL_STRSIGNAL -# ifndef strsignal - extern char *strsignal(int); -# endif -#endif /* defined HAVE_DECL_STRSIGNAL && !HAVE_DECL_STRSIGNAL */ - -char * -sigstr(int status) -{ - xassert(WIFSIGNALED(status)); - - return strsignal(WTERMSIG(status)); -} +#define SLURM_PHP_VERSION "1.0" +#define SLURM_PHP_EXTNAME "slurm" +PHP_FUNCTION(hello_world); +PHP_FUNCTION(print_partitions); +extern zend_module_entry slurm_php_module_entry; +#define phpext_slurm_php_ptr &slurm_php_module_entry +#endif diff --git a/contribs/time_login.c b/contribs/time_login.c index 27261599b..4c44f8d82 100644 --- a/contribs/time_login.c +++ b/contribs/time_login.c @@ -14,7 +14,7 @@ * Copyright (C) 2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/contribs/torque/Makefile.in b/contribs/torque/Makefile.in index e4e159f44..048d4a630 100644 --- a/contribs/torque/Makefile.in +++ b/contribs/torque/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -45,6 +45,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -94,6 +96,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -107,10 +110,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -130,7 +136,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -141,6 +150,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -156,6 +167,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -171,6 +183,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ diff --git a/contribs/torque/mpiexec.pl b/contribs/torque/mpiexec.pl index 622a1a496..37614a7fa 100755 --- a/contribs/torque/mpiexec.pl +++ b/contribs/torque/mpiexec.pl @@ -8,7 +8,7 @@ # Copyright (C) 2007 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Danny Auble <auble1@llnl.gov>. -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/contribs/torque/pbsnodes.pl b/contribs/torque/pbsnodes.pl index 3b2a3b307..f02637f06 100755 --- a/contribs/torque/pbsnodes.pl +++ b/contribs/torque/pbsnodes.pl @@ -8,7 +8,7 @@ # Copyright (C) 2007 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Danny Auble <auble1@llnl.gov>. -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/contribs/torque/qdel.pl b/contribs/torque/qdel.pl index 0d0649e14..79d248706 100755 --- a/contribs/torque/qdel.pl +++ b/contribs/torque/qdel.pl @@ -8,7 +8,7 @@ # Copyright (C) 2007 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Danny Auble <auble1@llnl.gov>. -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/contribs/torque/qhold.pl b/contribs/torque/qhold.pl index 2b33a714e..731a7c0c5 100755 --- a/contribs/torque/qhold.pl +++ b/contribs/torque/qhold.pl @@ -9,7 +9,7 @@ # Copyright (C) 2007 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Danny Auble <auble1@llnl.gov>. -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/contribs/torque/qrls.pl b/contribs/torque/qrls.pl index bad8a2e94..fd24b29fb 100755 --- a/contribs/torque/qrls.pl +++ b/contribs/torque/qrls.pl @@ -8,7 +8,7 @@ # Copyright (C) 2007 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Danny Auble <auble1@llnl.gov>. -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/contribs/torque/qstat.pl b/contribs/torque/qstat.pl index 20f457576..7e302cba4 100755 --- a/contribs/torque/qstat.pl +++ b/contribs/torque/qstat.pl @@ -8,7 +8,7 @@ # Copyright (C) 2007 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Danny Auble <auble1@llnl.gov>. -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/contribs/torque/qsub.pl b/contribs/torque/qsub.pl index 52d332a43..cee2d06d9 100755 --- a/contribs/torque/qsub.pl +++ b/contribs/torque/qsub.pl @@ -8,7 +8,7 @@ # Copyright (C) 2007 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Danny Auble <auble1@llnl.gov>. -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/doc/Makefile.in b/doc/Makefile.in index cb6d6cca8..73ab4da8b 100644 --- a/doc/Makefile.in +++ b/doc/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -41,6 +41,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -99,6 +101,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -112,10 +115,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -135,7 +141,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -146,6 +155,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -161,6 +172,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -176,6 +188,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -354,8 +367,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -380,8 +393,8 @@ TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -391,13 +404,12 @@ ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/doc/html/Makefile.am b/doc/html/Makefile.am index c7b713fd7..6b9fae583 100644 --- a/doc/html/Makefile.am +++ b/doc/html/Makefile.am @@ -2,19 +2,24 @@ htmldir = ${prefix}/share/doc/@PACKAGE@-@VERSION@/html generated_html = \ + accounting.html \ api.html \ authplugins.html \ big_sys.html \ bluegene.html \ checkpoint_plugins.html \ cons_res.html \ + cons_res_share.html \ + crypto_plugins.html \ dist_plane.html \ documentation.html \ download.html \ faq.html \ + gang_scheduling.html \ help.html \ ibm.html \ - jobacctplugins.html \ + jobacct_gatherplugins.html \ + jobacct_storageplugins.html \ jobcompplugins.html \ mail.html \ maui.html \ @@ -26,6 +31,7 @@ generated_html = \ platforms.html \ plugins.html \ power_save.html \ + preempt.html \ proctrack_plugins.html \ programmer_guide.html \ publications.html \ diff --git a/doc/html/Makefile.in b/doc/html/Makefile.in index 14562fb45..54b78c500 100644 --- a/doc/html/Makefile.in +++ b/doc/html/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -43,6 +43,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -98,6 +100,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -111,10 +114,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -134,7 +140,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -145,6 +154,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -160,6 +171,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -175,6 +187,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -232,19 +245,24 @@ target_vendor = @target_vendor@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ generated_html = \ + accounting.html \ api.html \ authplugins.html \ big_sys.html \ bluegene.html \ checkpoint_plugins.html \ cons_res.html \ + cons_res_share.html \ + crypto_plugins.html \ dist_plane.html \ documentation.html \ download.html \ faq.html \ + gang_scheduling.html \ help.html \ ibm.html \ - jobacctplugins.html \ + jobacct_gatherplugins.html \ + jobacct_storageplugins.html \ jobcompplugins.html \ mail.html \ maui.html \ @@ -256,6 +274,7 @@ generated_html = \ platforms.html \ plugins.html \ power_save.html \ + preempt.html \ proctrack_plugins.html \ programmer_guide.html \ publications.html \ diff --git a/doc/html/accounting.shtml b/doc/html/accounting.shtml new file mode 100644 index 000000000..af84b48dd --- /dev/null +++ b/doc/html/accounting.shtml @@ -0,0 +1,478 @@ +<!--#include virtual="header.txt"--> + +<h1>Accounting</h1> +<p>SLURM collects accounting information for every job and job step +executed. +Information is available about both currently executing jobs and +jobs which have already terminated and can be viewed using the +<b>sacct</b> command. +Resource usage is reported for each task and this can be useful to +detect load imbalance between the tasks. +SLURM version 1.2 and earlier supported the storage of accounting +records to a text file. +Beginning in SLURM version 1.3 accounting records can be written to +a database. </p> + +<p>There are three distinct plugin types associated with resource accounting. +The configuration parameters associated with these plugins include: +</p><ul> +<li><b>JobCompType</b> controls how job completion information is +recorded. This can be used to record basic job information such +as job name, user name, allocated nodes, start time, completion +time, exit status, etc. If the preservation of only basic job +information is required, this plugin should satisfy your needs +with minimal overhead. You can store this information in a +text file, <a href="http://www.mysql.com/">MySQL</a> or +<a href="http://www.postgresql.org/">PostgreSQL</a> +database optionally using either +<a href="http://www.clusterresources.com/pages/products/gold-allocation-manager.php">Gold</a> +or SlurmDBD for added database security.</li> +<li><b>JobAcctGatherType</b> is operating system dependent and +controls what mechanisms are used to collect accounting information. +Supported values are <i>jobacct_gather/aix</i>, <i>jobacct_gather/linux</i> +and <i>jobacct_gather/none</i> (no information collected).</li> +<li><b>AccountingStorageType</b> controls how detailed job and job +step information is recorded. You can store this information in a +text file, <a href="http://www.mysql.com/">MySQL</a> or +<a href="http://www.postgresql.org/">PostgreSQL</a> +database optionally using either +<a href="http://www.clusterresources.com/pages/products/gold-allocation-manager.php">Gold</a> +or SlurmDBD for added security.</li> +</ul> + +<p>Storing the information into text files is very simple. +Just configure the appropriate plugin (e.g. +<i>AccountingStorageType=accounting_storage/filetxt</i> and/or +<i>JobCompType=jobcomp/filetxt</i>) and then specify the +pathname of the file (e.g. +<i>AccountingStorageLoc=/var/log/slurm/accounting</i> and/or +<i>JobCompLoc=/var/log/slurm/job_completions</i>). +Use the <i>logrotate</i> or similar tool to prevent the +log files from getting too large. +Send a SIGHUP signal to the <i>slurmctld</i> deaemon +after moving the files, but before compressing them so +that new log files will be created.</p> + +<p>Storing the data directly into a database from SLURM may seem +attractive, but that requires the availability of user name and +password data not only for the SLURM control daemon (slurmctld), +but also user commands which need to access the data (sacct and +sacctmgr). +Making information available to all users makes database security +more difficult to provide, sending the data through an intermediate +daemon can provide better security. +Gold and SlurmDBD are two such services. +Our initial implementation relied upon Gold, but we found its +performance to be inadequate for our needs and developed SlurmDBD. +SlurmDBD (SLURM Database Daemon) is written in C, multi-threaded, +secure, and considerably faster than Gold. +The configuration required to use SlurmDBD will be described below. +Direct database or Gold use would be similar.</p> + +<p>Note that SlurmDBD relies upon existing SLURM plugins +for authentication and database use, but the other SLURM +commands and daemons are not required on the host where +SlurmDBD is installed. Install the <i>slurmdbd</i> and +<i>slurm-plugins</i> RPMs on the computer when SlurmDBD +is to execute.</p> + +<h2>Infrastructure</h2> + +<p>If the SlurmDBD is executed on a different cluster than the +one managed by SLURM, possibly to collect data from multiple +clusters in a single location, there are some constraints on +the user space. +The user ID associated with <i>SlurmUser</i> must be uniform +across all clusters. +Accounting is maintained by user name (not user ID), but a +given user name should refer to the same person across all +of the computers.</p> + +<p>The best way to insure security of the data is by authenticating +communications to the SlurmDBD and we recommend +<a href="http://home.gna.org/munge/">Munge</a> for that purpose. +Munge was designed to support authentication within a cluster. +If you have one cluster managed by SLURM and execute the SlurmDBD +on that one cluster, the normal Munge configuration will suffice. +Otherwise Munge should then be installed on all nodes of all +SLURM managed clusters plus the machine where SlurmDBD executes. +You then have a choice of either having a single Munge key for +all of these computers or maintaining a unique key for each of the +clusters plus a second key for communications between the clusters +for better security. +Munge enhancements are planned to support two keys within a single +configuration file, but presently two different daemons must be +started with different configuration files to support two different +keys. +If a Munge separate daemon configured to provide enterprise-wide +authentication, it will have a unique named pipe configured for +communications. +The pathname of this pipe will be needed in the SLURM and SlurmDBD +configuration files (slurm.conf and slurmdbd.conf respectively, +more details are provided below).</p> + +<h2>SLURM Configuration</h2> + +<p>Several SLURM configuration parameters must be set to support +archiving information in SlurmDBD. SlurmDBD has a separate configuration +file which is documented in a separate section. +Note that you can write accounting information to SlurmDBD +while job completion records are written to a text file or +not maintained at all. +If you don't set the configuration parameters that begin +with "JobComp" then job completion records will not be recorded.</p> + +<ul> +<li><b>AccountingStorageEnforce</b>: +If you want to prevent users from running jobs if their <i>association</i> +(a combination of cluster, account, and user names. For more +flexibility in accounting the association can also include a partition +name, but it is not necissary.) is not in the database, then set this +to "1". Otherwise jobs will be executed based upon policies configured +in SLURM on each cluster. </li> + +<li><b>AccountingStorageHost</b>: The name or address of the host where SlurmDBD executes + </li> + +<li><b>AccountingStoragePass</b>: If using SlurmDBD with a second Munge +daemon, store the pathname of the named socket used by Munge to provide +enterprise-wide. Otherwise the default Munge daemon will be used. . </li> + +<li><b>AccountingStoragePort</b>: +The network port that SlurmDBD accepts communication on.</li> + +<li><b>AccountingStorageType</b>: +Set to "accounting_storage/slurmdbd".</li> + +<li><b>ClusterName</b>: +Set to a unique name for each Slurm-managed cluster so that +accounting records from each can be identified.</li> + +<li><b>JobCompHost</b>: +The name or address of the host where SlurmDBD executes.</li> + +<li><b>JobCompPass</b>: +If using SlurmDBD with a second Munge daemon, store the pathname of +the named socket used by Munge to provide enterprise-wide. +Otherwise the default Munge daemon will be used.</li> + +<li><b>JobCompPort</b>: +The network port that SlurmDBD accepts communication on.</li> + +<li><b>JobCompType</b>: +Set to "jobcomp/slurmdbd".</li> +</ul> + +<h2>SlurmDBD Configuration</h2> + +<p>SlurmDBD requires its own configuration file called "slurmdbd.conf". +This file should be only on the computer where SlurmDBD executes and +should only be readable by the user which executes SlurmDBD (e.g. "slurm"). +This file should be protected from unauthorized access since it +contains a database login name and password. +See "man slurmdbd.conf" for a more complete description of the +configuration parameters. +Some of the more important parameters include:</p> + +<ul> +<li><b>AuthInfo</b>: +If using SlurmDBD with a second Munge daemon, store the pathname of +the named socket used by Munge to provide enterprise-wide. +Otherwise the default Munge daemon will be used.</li> + +<li><b>AuthType</b>: +Define the authentication method for communications between SLURM +components. A value of "auth/munge" is recommended.</li> + +<li><b>DbdHost</b>: +The name of the machine where the Slurm Database Daemon is executed. +This should be a node name without the full domain name (e.g. "lx0001"). +This value must be specified.</li> + +<li><b>DbdPort</b>: +The port number that the Slurm Database Daemon (slurmdbd) listens +to for work. The default value is SLURMDBD_PORT as established at system +build time. If none is explicitly specified, it will be set to 6819. +This value must be equal to the <i>SlurmDbdPort</i> parameter in the +slurm.conf file.</li> + +<li><b>LogFile</b>: +Fully qualified pathname of a file into which the Slurm Database Daemon's +logs are written. +The default value is none (performs logging via syslog).</li> + +<li><b>PluginDir</b>: +Identifies the places in which to look for SLURM plugins. +This is a colon-separated list of directories, like the PATH +environment variable. +The default value is "/usr/local/lib/slurm".</li> + +<li><b>SlurmUser</b>: +The name of the user that the <i>slurmctld</i> daemon executes as. +This user must exist on the machine executing the Slurm Database Daemon +and have the same user ID as the hosts on which <i>slurmctld</i> execute. +For security purposes, a user other than "root" is recommended. +The default value is "root". </li> + +<li><b>StorageHost</b>: +Define the name of the host the database is running where we are going +to store the data. +Ideally this should be the host on which slurmdbd executes.</li> + +<li><b>StorageLoc</b>: +Specifies the name of the database where accounting +records are written, for databases the default database is +slurm_acct_db. Note the name can not have a '/' in it or the +default will be used.</li> + +<li><b>StoragePass</b>: +Define the password used to gain access to the database to store +the job accounting data.</li> + +<li><b>StoragePort</b>: +Define the port on which the database is listening.</li> + +<li><b>StorageType</b>: +Define the accounting storage mechanism type. +Acceptable values at present include +"accounting_storage/gold", "accounting_storage/mysql", and +"accounting_storage/pgsql". +The value "accounting_storage/gold" indicates that account records +will be written to Gold, which maintains its own database. +Use of Gold is not recommended due to reduced performance without +providing any additional security. +The value "accounting_storage/mysql" indicates that accounting records +should be written to a MySQL database specified by the +<i>StorageLoc</i> parameter. +The value "accounting_storage/pgsql" indicates that accounting records +should be written to a PostgreSQL database specified by the +<i>StorageLoc</i> parameter. +This value must be specified.</li> + +<li><b>StorageUser</b>: +Define the name of the user we are going to connect to the database +with to store the job accounting data.</li> +</ul> + +<h2>Tools</h2> + +<p>There are two tools available to work with accounting data, +<b>sacct</b> and <b>sacctmgr</b>. +Both of these tools will get or set data through the SlurmDBD daemon. +Sacct is used to generate accounting report for both running and +completed jobs. +Sacctmgr is used to manage associations in the database: +add or remove clusters, add or remove users, etc. +See the man pages for each command for more information.</p> + +<p>Web interfaces with graphical output is currently under +development and should be available in the summer of 2008. +A tool to report node state information is also under development.</p> + +<h2>Database Configuration</h2> + +<p>Accounting records are maintained based upon what we refer +to as an <i>Association</i>, +which consists of four elements: cluster, account, and user names. For +more flexibility in accounting the association can also include a +partition name, but it is not necessary. Use the <i>sacctmgr</i> +command to create and manage these records. There is an order to set up +accounting associations. You must define clusters before you add +accounts and you must add accounts before you can add users. </p> + +<p>For example, to add a cluster named "snowflake" to the database +execute this line:</p> +<pre> +sacctmgr add cluster snowflake +</pre> + +<p>Add accounts "none" and "test" to cluster "snowflake" with an execute +line of this sort:</p> +<pre> +sacctmgr add account none,test Cluster=snowflake \ + Description="none" Organization="none" +</pre> + +<p>If you have more clusters you want to add these accounts to you +can either not specify a cluster, which will add the accounts to all +clusters in the system, or comma separate the cluster names you want +to add to in the cluster option. +Note that multiple accounts can be added at the same time +by comma separating the names. +Some Description of the account and the organization which it belongs +must be specified. +These terms can be used later to generated accounting reports. +Accounts may be arranged in a hierarchical fashion, for example accounts +<i>chemistry</i> and <i>physics</i> may be children of the account <i>science</i>. +The hierarchy may have an arbitrary depth. +To do this one only needs to specify the <i>parent='' </i>option in the add +account line. +For the example above execute</p> +<pre> +sacctmgr add account science \ + Description="science accounts" Organization=science +sacctmgr add account chemistry,physics parent=science \ + Description="physical sciences" Organization=science +</pre> + +<p>Add users to accounts using similar syntax. +For example, to permit user <i>da</i> to execute jobs on all clusters +with a default account of <i>test</i> execute:</p> +<pre> +sacctmgr add user da default=test +</pre> +<p>If <b>AccountingStorageEnforce=1</b> is configured in the slurm.conf of +the cluster <i>snowflake</i> then user <i>da</i> would be +allowed to run in account <i>test</i> and any other accounts added +in the future. +Any attempt to use other accounts will result in the job being +aborted. +Account <i>test</i> will be the default if he doesn't specify one in a +srun line.</p> + +<p>Partition names can also be added to an "add user" command with the +Partition='partitionname' option to specify an association specific to +a slurm partition.</p> + +<!-- For future use +<h2>Cluster Options</h2> + +<p>When either adding or modifying a cluster, these are the options +available with sacctmgr: +<ul> +<li><b>Name=</b> Cluster name</li> + +<li><b>Fairshare=</b> Used for determining priority</li> + +<li><b>MaxJobs=</b> Limit number of jobs a user can run in this account</li> + +<li><b>MaxNodes=</b>Limit number of nodes a user can allocate in this +account</li> + +<li><b>MaxWall=</b>Limit wall clock time a job can run</li> + +<li><b>MaxCPUSecs=</b> Limit cpu seconds a job can run</li> +</ul> +!--> + +<h2>Account Options</h2> + +<p>When either adding or modifying an account, the following sacctmgr +options are available: +<ul> +<li><b>Description=</b> Description of the account. (Required on creation)</li> + +<li><b>Organization=</b>Organization of the account. (Required on creation)</li> + +<li><b>Name=</b> Name of account</li> + +<li><b>Cluster=</b> Only add this account to these clusters. +The account is added to all defined clusters by default.</li> + +<li><b>Parent=</b> Make this account a child of this other account.</li> + +<!-- For future use +<li><b>QOS=</b> Quality of Service</li> + +<li><b>Fairshare=</b> Used for determining priority</li> + +<li><b>MaxJobs=</b> Limit number of jobs a user can run in this account</li> + +<li><b>MaxNodes=</b>Limit number of nodes a user can allocate in this account</li> + +<li><b>MaxWall=</b>Limit wall time a job can run</li> + +<li><b>MaxCPUSecs=</b> Limit cpu seconds a job can run</li> +!--> +</ul> + +<h2>User Options</h2> + +<p>When either adding or modifying a user, the following sacctmgr +options are available: + +<ul> +<li><b>Name=</b> User name</li> + +<li><b>DefaultAccount=</b> Default account for the user, used when no account +is specified when a job is sumbitted. (Required on creation)</li> + +<li><b>AdminLevel=</b> This field is used to allow a user to add accounting +privileges to this user. Valid options are +<ul> +<li>None</li> +<li>Operator: can add, modify,and remove users, and add other operators)</li> +<li>Admin: In addition to operator privileges these users can add, modify, +and remove accounts and clusters</li> +</ul> + +<li><b>Account=</b> Account(s) to add user to</li> + +<li><b>Cluster=</b> Only add to accounts on these clusters (default is all clusters)</li> + +<li><b>Partition=</b> Name of Slurm partition this association applies to</li> + +<!-- For future use +<li><b>QOS=</b> Quality of Service</li> + +<li><b>Fairshare=</b> Used for determining priority</li> + +<li><b>MaxJobs=</b> Limit number of jobs a user can run in this account</li> + +<li><b>MaxNodes=</b> Limit number of nodes a user can allocate in this account</li> + +<li><b>MaxWall=</b> Limit wall time a job can run</li> + +<li><b>MaxCPUSecs=</b> Limit cpu seconds a job can run</li> +!--> +</ul> + +<!-- For future use +<h2>Limit enforcement</h2> + +<p>When limits are developed they will work in this order... +If a user has a limit set SLURM will read in those, +if not we will refer to the account associated with the job. +If the account doesn't have the limit set we will refer to +the cluster's limits. +If the cluster doesn't have the limit set no limit will be enforced. +!--> + +<h2>Modifying Entities</h2> + +<p>When modifying entities, you can specify many different options in +SQL-like fashion, using key words like <i>where</i> and <i>set</i>. +A typical execute line has the following form: +<pre> +sacctmgr modify <entity> set <options> where <options> +</pre> + +<p>For example:</p> +<pre> +sacctmgr modify user set default=none where default=test +</pre> +<p>will change all users with a default account of "test" to account "none". +Once an entity has been added, modified or removed, the change is +sent to the appropriate SLURM daemons and will be available for use +instantly.</p> + +<h2>Removing Entities</h2> + +<p>Removing entities using an execute line similar to the modify example above, +but without the set options. +For example, remove all users with a default account "test" using the following +execute line:</p> +<pre> +sacctmgr remove user where default=test +</pre> + +<h2>Node State Information</h2> + +<p>Node state information is also recorded in the database. +Whenever a node goes DOWN or becomes DRAINED that event is +logged along with the node's <i>Reason</i> field. +This can be used to generate various reports. + +<p style="text-align: center;">Last modified 25 March 2008</p> + +</ul></body></html> diff --git a/doc/html/arch.gif b/doc/html/arch.gif index f605a1c5cc6ddbd4390fdeff5b500e2217abb0cc..1cae914e0d1a81f6f990484f1ca172b71e6fc669 100644 GIT binary patch delta 38849 zcmWLA1zQsg1BKxgY$HcE45S;RbECVvy9A{~5Or*HN+XDLN=P?IH!_fvkW@Ov0O6JI z`xEE7o^$VQ0jFC*shPm(v~)?JN8^YOs96%s3d9s<4pAckATJ0N$UcG~0;|=i=|tg1 z0gF%;?Ef02h`zuv(@z#_k|AJjC?fiT0Gtz{8UPyOtEIJ_AbT3179&d^jAiB!l%a>x zZx(YTDl9=%V>swJQLOSrQl-qH)T_JcA~S$8G@R3VB~4D;dtX=CMFyA3N0zBV8|Z|= zsvywMuT_|R0Y4&8w0GD+HX%U-mLhUE&IrKg#SUeZtCckZ2p`02M6q_=XcBP%@<xzR zjEG^Rl`lL37{l?Nx0F(jsJaYvN`Xa&6U|qmfb@SgON8Ch2#Uepc_T2*o%`+Ytd23V z9=Vp^{-$Nn&jZRcfgS8)ESx<>7-%wq%gS(?m;(YD6YREfjD{<OkHVffS$RO%QzD|6 z!ypX(iHtgl+l)vZ-K`FK)<?o!-zZ@yz*n8^SYks)RDCD?m*SnYGY>0wfFFtNNn^*+ z`vwhb4qZGeNGq)D17Mnd4@jTKUYudiI`L$XRYQR51BB?JY$7n20Y!&l$Q$VI3*18h zrvhu)w<m=@3f4_a+!6x_=_jX-8KiXu2qCwG@lDJwfPTLeN`rjCabinFW57eTV^K6k zFmp-a6Hc9a29UdU(U>v7zc`b?oo^J!c|IVp3b;tCV|Jdttz-5OypI!$3Hc#+u^D{N z(v0fSA&~^|3$pgdzrcvVTG{{-cHAji8vnJToNZ6T)JcQnjE3_(c?rN}D#C<p2)Ep} zG0EWP_+18KP83pE0VV&ry09AscqZK&3pBJyVQ&}!i@vzDwzvuY4Dj{8n|bzm1DM(` z;&8dcIu+3MqcE0@;)5-DCMrjFNalRwPU=)_5ppX0A^CW@9wWW5dL@MaOy;R5`0Mws zWA5Epn_|P63J8omZ(FXpn@h%;=&)@KYocQ%l3XC{EC8bZk0fFPfYhtp6M8Epc12wb z!79s7Kx)TV^VtxFbhE3aJrrC>KSuV4DP+7HoHz0Q)e8ZZ<oko@Vj2@|73RI1o>A40 zM4zHw=rWHJV7khB07y~So8}=jw`M6f?Jz*1#^erJ*%*fY<UvNQ1#0;wP^(WK7ae5D zWUHm(C~1fdzFL8Wcs9Ga*PB{oE`fWgfG^w+NM2d>6AXK0klsOISgQyC8DxSYx(rPY z0@ai>$BsJl`Vr(}XGQVS`|KBFBoX<{ylt_7BrVKqqKU8DJMGd3<zmSIFTfS^AC=0} zADHe^CYFF3C60fF$c!3FKDw4zQ+_^qpw}i`p;e^KZx~ckqXjYWdI+`?OJL=r*sJ1w z2ksCcXGM+^95_`HBdVXd?e4JPTJ-<8!y;ja4!NR5_YVlJ$>Mf&SOH5~va~;%;#m0$ zwlJ@F1UVH^fYfd<DZ%6j(%FDaMszo0y_(|-sfj%D#4;^qoFW6;%<(3%2&#>f)zBHs z*peFn?jr{oHk@_WhO0Ebwnligo#b9dTGJM^mMgmPX~5hDaf1Hi&|q=ZC$5&0ZNm#Q z_G0h%CL`e?V<NH*&F=$chVtrpqr>I|G2{_OT<K1dg853sg<rhA;Fu7-WViD&YWBmk z6V7xi;4j&YDI+QvNJ6ptwVCBl2g8Z)=8rfy8?QLEy2u33VvuY&=@6c+#bS8MBb$r^ z@MyZ9KZZ=aE<D(3xo`NQ1PxSyL~rCiQ{Wzqy=NI{q!r_K6xmt6odAAdVLyC|xyHvT zT{}Z`#3Q0eU|3J_<wQznVWZr&Vl3(zC*j^u;2!|?SU8G&i$*IgvqoX*zeTyxOfWBL zAmEV=5<#D^fcmn>O?@%|3QiY63?+bAj3$k2pQC9TB5MAOS_Yft9YAcKR!H1b>pw~O zoPU&wuxwl`uA4M{14^4oz!G7ILrNCj3TE-^=^q*Y{m-=M_waj@-b1ql8}l?DE^cvI zF(o^K$9H!*k-=G6Q4Bk^A2_WZ64Dpf{2j7sd7f0v_W9SF8{3_<Y)P~yddklKmur8^ zm>?C)F?Mw*vfb;ie4yWrT?XkFR6$7HljukHCZZ2>Xuop_x-dMdm$op3Vz!s1qTZ9L zI$S)6@!1k<q5*h*{EsSq>PQNU6vZRVir)RR&`5#L*_qkee7=SXW<QO`G=c%0E`M5D z*z=yqMb1sD5k-{qk*cdl^3R`5pc1SFXV10S3YSUB2v{Z_eJ0|rK5HA^*d726-`3T< zBz;6kmb3z5uw7k9YE#}_!h{m|9nK)>9;i```+p$h-iEkz^!Ell1~oFp{GipFQ1lLT z*<Y^@urGjU)eI4Yv57BMKC%1u`a?&{ocf=Mb7s&SRNmhHSV?=($Wa>Oq9^)}4?4#` zmP&5!9(k7D-h<mTj_w6}h<woOJbO9Y{qtVf@NaelLio$`^8e1<GcorIx_t9_QJ&$u z)1My+htvKvaBI0?lP3UDRTGa}jg|u_c$x^JsG~<0jQ<rubve$Z_77!$InjWoEa>cX zf0e2--0KJ3l13$_+t@UdJ@2SoUefq$5GR&grKk|?)%56`=0939)en&@YbKT(s3y*u zzGM0L%R#wtMvK^}4tdO4RFPEOf260*x7G(_yQn>?jkR}Q{XP*?^G;qZk7*hDtlv%D z;C7dFg9y8$Uw$toKK(9c0n$lLa5Vhe)T2l-Z2wj4PtZTq>f}*!OK88e%xb64t>vg_ z_A@G1@o4(9A(}iS?Mf&B!#CGy>u};v{qsIst6HION%GIZCH-TJr`c%l#IaU+EiZ!U zqiIg-0CC4%#_fMrlbIezQDI;J9gX!bGfm<EV}=oPGzSnzqbR@y;K}q-+6S8E7!kA@ zkt}H5P#L}A#xN5Y9<;NvW<X{8p@b12!7HiT3{{y7s6>m1Sh}eehp<l;M@g2x;pdlv zR~}7eW=EQ;KKmH?Z**#n!4}?JLcRc7t#9&JNwgEq6U#-Ny+J~mU>O*vf}xBBYYS9r z-y#s<Q-BTRYJ|E}QMgwjJYj4=0OSq_&0gRaRh5Wy6?V;LPY6=rK)G#|iOh4v5^@R2 zISB_-X;m~^&8t9{Y0ZolZIl792lq(nSP9VH1HPIV40~7uSIlSY%XDk943B0r>l_-} z*hK~;#9JVZ%c%+hLc(ByPeI#S0C58TKr>1TK=UvgVhj3G6!a<(QTy4B8XfZjLGdxv zf#lMT4(Koh1I!M~Nu}HC49fM!Q7rrtB}F^T|6(Ai!f|8uOFxT~8UwxtBeJZxbgzuz z+sT-TCKdLT6y$)AyBj6<BHx5Y6iIQ&>t7nb=wGKuzXlIP01r|?ofJRQ<B(|ikr-4p zy>2Sl?s<7C2{!d*TPoN(jq37&b7~Ajv2Ev5>G}grWe5fHge1L38aD$4uPaaa52#=m zh~k3xskj4Qzc@~mAYaAt$$b;x+MK5;m=MDo0V3&77m9z0Zs4&J<<HLXvV}<AOJ?4> zLhMta%Bdg|e8!U>JUk_?Je^rL7Dg#9!efDg6tV1bM)B^{R{1n>=i-lkgB61P2_E>X zyQ_piw=GSgRY-WD`c6PWSHcSOhw?j|ts_}c-UtI1{iaN5*3S|$8Jvj77h2i85*Slo z39w?u0}1x@OtM3ntT5#uLIt-6%6yqzR~B!$wdLb(!pfSYy@vfV#B*`kQjZ>nx5w$4 zn{yTX^sQOWqW^}f+sgy^1Clj!AOiyB-gz|I@=0dcpemsO9Q)zV;PV`IGE4O=x>tbI z9AK3nswLOxw@MSIht$1z(94j-ag6nwlk_|fPuX|4fS6XN0Lze8>!9hU>}NkzhbP{S zyDBHqSTK7Pq)2&vkI!Fi&X@eIJRTVCENT6N15;b+p?!mrSIeHOMD<I)M7ik(a6es^ z4W&;v2HU&|H}dRVdA1^@HX8ey6CysGtLVa`7BU>dcIZnKqe5&F|LHI-<_7bOUFS_U zcVSzL(GN`{9k@JSuyokavn_^TOh=&(-vc>lK|q60My|594no-_RZ%WAEMDN|N>#V? z`ExiRl*da@M}q`U=nM!P9VQ&7HGbp*0K_5w<@kHFIB2Hxx={<JjtWP1mUF{33>gbh z2#uUkkw^Vd%hn|Ads#)+2N(;xMRVCLF-muY;U8PO6|>dPkk}CRe;m48+VT&Hs=#mw z0Zv_Y1`d{15l=i+C6%40KVEoWjM3A^?K9dPZr;{aI*$Y4I>fz@Lay-~9XK(J&U@Ni z#h@FNEy?7=3VSdRi2s6EZAGjvCeBP!OjRWj7}r5AIeKDiCm5g@MqVQ~po9%yJ%|rr z-$3|9)5EP8#8grbkpt@?BLPP=z<z>jjSsfo;*mxCkqxMSHVuT~4Y0QjMBNP}YYk+# z4G5-2O8Lg$jZw7Wjr4CD8M_;q*BZYCQqqH9V2Y*(0#FVC1ZQFsw`~&-F@W#5NuZ`l z=(b6Msaf=1z8NEB+bq-FEU4QUw%v$gYEhAIQ5B$!KtTAB0B{7mLQxCv=N9lqi@{Wj zsQ}bSpw(En)%sDZd3dW$Vyn|_tIJfY?QyFcQ=8p!^D}gd?^@gAsWy%@cH3K!ad%4~ zs$D0MJ@ip~cuo7$<My!dc8ov=cC9^Xsy*^iN5b2VB#doGa$<+iBf7xjmZ!R%*|wc; z!aMWccIJ0?7OZs^-FB8Rbum!3)4Oz5z3r^-?lP|FtUYe|aNLz8(A|*O)h6HFX4{n} zK*u27)yLG;f7?CC)H5vK)5SztTGREWrYCc%=V?t((OTD>0HC0z1>Dfn5Z=2Y&>JEE z?VjqUzsH#L?8x`*$@hJ;?K3~_<)G-vIqq4y?OG=OIN#mbJk_~&+qdxO<LX*p9TOzt z0{WGSt~b07lGI1=qJKZU2Z{c8I@Lcc-@Cik(<1-z&u!0bV(+;?=acRM=99)HbPSPL zCrn|G&~A{^u9N75BBCFRPVG9Y`B*I9({DR)9**gwyc=X(ACT)A(h6a*20)T*5MbXy zwHJfxzWsdb6dXw(CGG}E?RrXxKi=F9YcjXWP7hg44=n?M^pr4kZBwAru#4S@n_d^a zOIM)L@CnmL^1Dv`^-lL5iQqJLZlke~7h|FMW6yiWUaXJ3x*LngAJZ0O!KAX|@KA`@ zFvj)1XFNb~G;*3kBfrO#WZ<#FM7G`p&)v8$dW^$p?42*Dka@B|VX{bXvczt(>c!-H z-^tqi$-17&`lQJ`5*F}VcIz#ulVBhC0%V4Opiu;*{o{kREzlx@K0N}y{?VmyNG~%) zr2#x3KgD3&*K;z3+X6`1O^DS_`%lkccESPHTT`?7Ghly6-`&)N-X{<Og26X|lGvZ= z0iO!aa-(K{E6iTo&ECA2{kuMUcQ<>>jQ=Ny2PopXkuw882ZWQ|6A&x{0mC9;iGw0h zTQ!yPLk99d&*+&U-(h+q(3fy^1OExi2-I`Y`Tj&aT|ggWG6(Z%ce>!jF6MUb>HS+S z9{mBZ(L4igK1UEBYd1!IF{#2b)nK=v>bIcwa-oc5l7EA7dIZnAg-_+2i!_>g+&yZb z8yJR|PSzWRA)ycadgV7dA)-qqYYWtkgF*^@HZzNU%o8Jt03Q8L`FAre$?OJB18?3A z73R+p+T$bi@woq%BUx6WFdMVbm!p-J3okD+P8Ak$MvHHAM&<`&=;~&4<YmJy=2`su zmv0HB?b}nE!A<gCN)wl;8i$AFWqtcs$?QgV-U@~J^_C`o*0EhHxc#h~ywFp(p#9%k zFX?)P;B1v2{i?a$D?A>+0W(uvwN4@g3c$#B@XFPR2mt>)rM4&&W^_;hP<wy6wAs_M zv(dBrU(bGTXSnK@r{U7gLYpTnGYO=W2`Alor%Q`#tD^avnNyob3V^$f&Jjgg1{L5t z!C3-_71*nlnBFb813vEE%8@)h(F|TN9yz1GO0NQ&Bb_5V<?fOJZAZrt<Du(ZLO47- zvG~{78qjkN7(W67LC$<369v$3ffQ0f{CEHi0pV)^^QTVJ`R^)hfyhLGX)|Eb2q6E~ zE}zj}a5@2BQ44<&e?Rj_9Rc76ekcymdJ;*f`)`%LWYeUcz($mS(qGzztP99W(Lc2l zEDGa8LO4;o!lxTrZ$bL<K&g6=^4W$`A6NtrpmKs>f&eq<lwJIN5)fBJVDzfj)Ztsb z;sHHwT4!zF@$9R!K_`6?K_rU6ll7Yq>(_HZXsF)o%Z=r#{G;lWqsmufb&tnB^sUvC z9XIza6!{(z_I-1@TzItw6@<e~dwDJ1jWE{pBS<0liJ-AX=tg-Oc_Z{9i#;-8Jf68P zPe2gki}*e!{{7eK21C;-G<83GXM@5^m@5JZd<eDFfGRXdXRiR35>ZMi>~5(!3?)$8 z<U=R>_OF2+x388;{!tVrp7J$-7)?N%WWobUXW0jU?Rw}Kj!?qj;awoujuU#s>L$N+ zwlz2h27t%J&jV3}xp5o=cbi#f=T4`cCzz8@{^&7o#6sJ>!K@hDY>431tK_S=x+{s< zE9oa!DAJ#jj$^MP2sA`sN~q&!wa5Bs``-F>giyn$nV~2Y>^;ZEI2lY?7Bu|{`bn5_ zNEoVo0v)Ove=08<bao08*)@L4W&3iCQH3yf!~D5F7=#LRRszoB2<1qRK&f1&7$m^) zo%ln8ho^PtNBT-Zo1)%*oqFepB4UZBY^|uFQ&PiT&PwP-{kbdZMl0qbR0Jwr^oL>a zk7d~=v%g@N;&q)OAm+ZutG;C~@*;_?tBd95Rnk$2-q!Q!+pv?{^7`9O49Em=*DG@O z_XS9^?Xkp@t*1$NLTc(U{q24-3}m{O*K~Hs%juWZ{W)H)=*0f3>6jK(xF~G3a9j7J zg&~67tAFkZV)y*)hQ;xao#Q@Gl%TuzA~=OTL>Ul1L<pF30)S)*-oa2sl}_<MDA6~H zxUwxWAeg^TY$erTK#Z2fqn3VtM+-=>4?yLb?CWF;S%Y>iRouo&2<9*_=8K9b00}w4 z55e)#frxx5f!~5M?)}8F2q-cusI){)))&G(eQo^70H;sjk{RUBX~n}ROS#A&@=Ytp zZ=)}o$@0vu>#=@<Xqx3YxHnAQNEF+C{<Z%l7))E6<L~k@?uykpWBGDxB3l2VHEGa| zx+hEjO)xP#zNo8w{|9CpT>P4i8uJeTq|`FH&qhb-ZH~UP{quM|iqM^c6DC!p%zO8f z0sG}=;I|p$_k@FMWf84Lzl;J9lzdEZO*RcA5pD7xoJqWowDuqeNhzuRS*VY=+6Wzb zi;h&DQdzyq1=^qMRo99N`c#k8VubU{Rsmis6f>WYeSFvyIf+q7D>5+z{N9rR*Zqm$ z=N<Zc1}5%H_ze~lJo@(*HjqmcC8oh}C_VupqR*&(*oy$yim;O2&W#O4NDL6jp7j?u zu!^l2i*sDb6({0Kkdz`IYZdlH-1Qr=azztc(&!~XykDCjtWjT1a#>=c9gQP~z;=?Z zz#!Q5N~x|Q=8tbYxpD-(+6;xB+-uIdd!?jVfyC6w2l8+_N$*OkiM97744){SKpA95 z$f`mFr!Hp`<%W-7svYOyq+(^zrBSq=+s{g?p-1HJ-x29wF6r(gBjxlT?rgDZst+30 zxoEo|a702{w9J(XqowK@-?hv&4Uim@jQKXU8rzQjKT8i%eEtKOGz^PUonUz@ef1`0 ziEO;dQS+Iq(9S-yI&!glq^T5|usT^ETdqn)y0rs=AjC8ec0VbUM$7DcH-bfNZa)58 zN)CY88<#mgk@u3<^c*?ocH=4kxkReJMAY|y!~T)-e>&5@)bfmJCae`H)Gl`Qsb(Ii zq-E_RF!KnZF2pyW1#Q(n&_U^W9p$|z8Q$Wpt(%2seEX|5ji6@`9I;%(H87DCb~_Y@ z`*h(vPL+UHAyX*B*MK-)rQ+h4HwmwadXpryiTJF%*w5KsiEh6}<LOnJs3PK2$t3QU zjv;{kqIuEYdTP>k^j}zp;|i{HW(RBd<VqTY-hm8C?lOLJ#l~*ry?eCgx?W0-f}{x( zqqOMN*k!+M1@7Xw)h{BDk5$25)-OxUQ&LQ({pX2<zO<@FN$EEVR_91cDp&Dxd<oBg z8%_5mcgf^m8(I5pHMjcZahBPY)_eSXtI(0o$Uhe{^Xp0RzM5*7Cwgo7l)B?RE^`V4 zG3gp259Ntv-c$E`L#&3LPR+Tetv!XBOsb|EAllatuw)^jdXGy0^&6b=(Y|13^6Mty zX>(mo3tSFcOOzbzZ;hveX{G^pMm(_b$5>4@4xQf@l`Kaw2pnJ-*2CF@rk)KAL>Li% z^5>{9D+eYqS{pHQvFdD~l=*7LV=zZN*gvV0{0iVi6Qc3LLW=!BYFS717O4jXb8d-h zha77C2Ss8_{E8hM<}nLa6jE(4esYkxNfkF#uHY!8%0-t_ay8#utXhk}TE!tLlarmg zjD#G(%<8`d&EV92Xil;1H=P<eHNmdUZMB*9_(ppJh0P)ea5qXf!;UXrU~D49w2Pjv zFiG5TBVbe|Y#LjlT?_Y*GnUt>AiN%3YX}!2-iQdw_>2blH|PA}ZF-z&QS8l%aUkR0 z6!kqzJ`$~JFVJ|zg?Q9QRU#+L-2n1yAM20c;9|j@fCwfzlj#(a{(?}1R)c8PnBpjm z8y&YQw>D8i<~PnKS@ZoL#z%^(h7=iox67I+8V!id7Az)KOjIQX%>lK{p2wZgOFfmN z%Ltv<=(JOH-ob;ZVIptL#efS^UN|c1ha}Wq?+Gzqbc{by`*Oo0;R`XbWja~aBW0AN z4=YP&1|B(d8QUW(Q2ELv6d_CN-gZyBps+h<T=xVBaR;yjppY2-!L_)@kOe9!Q8iFn zB)R#I48Dv5Y}7q>F%Htj+i7SH@yS$+Q@m=(aJ1J%vYEzZ1D?x9OH9gbVTou0z0*-7 z#O5^7Y0T6+97WV2fY|aqKvf`%jx4RTYifUtj0AO>9jy|9P#i1$k48R{5zjC@j#cCB zdWFnZOPPt#V+_Q2Rb^taTPSzQ{a*;vWUf<FI|M{qf-sjA%z8rr=O@kKx8uFwitHLD z+1lHR9Bx?d9ZCA2SiMlnMCnB?H$Fq%*S$p`LV?hqo}mI3H+HOB>Q^0UuSY=(?SBO| zg&CpIRsE6F!B%9!*Hzj9rl1CeVbAkvGogK?ritHgjL|t~7HcSS2U*r{EEDO6wNkGo znQBw#=Dr`UQ@KOX@VX6i^^vk{R()J+xRwg_D%1YQ3uO3A&o4DfimGegCHO@@e(2g@ z*IPkP)$>Ctzq5${<(}h!ok7-*8Qowa6SPBuuE_H?cgH&>`l;x|YEX!!V|`03*j6y# zJ4S*Ma}xnV`Zw!iuZK#Hf5dKdSfil+nGnu?dzW`w3$y`3%|EnkEo{{mDDvXLoDmZF zVu4Gb2ubax7!M^eT|iOWuZ9;d*K8~QzNles({d0(T7R}dwS_!wf+F^`1pI9j=ZPx= zsG_F|^u?kdi)0pY3oBfikSW6S_%nh_CsejE8exZ#iaB4*JGTMrxu)q(!Q3At+>5{5 zr&!tF6F48@NK`%$dl|6di<r=M6st0et<B{u>G6e)uc<U0rsbqg9u`7u!t4r13je!E zwY2-v8Y45ApX>_(fwopio|G8+TgomB3>qI$I1$#>yKuzE_*@egscK_y|H{sCZ#RF! zBzB^!K-YMJu=n47G>vIY4UmhB$t4B;aoR)-KC=`-5M@@sRUl>96A*bi`Pwfq!?nq^ z?o%k6<J7TYfWqmMWtj=}`e%+cjOP?z!<(94%`iF>iiuktO>Yw)NNz=F^8j>rMB35* zB2Sw(EuOv7-|^#BFTiJb9_@?A*82=tU>wRAUK|--RW@o`A>{BaL3>$F#IK7^pmJgB zd@1)KebCK6exCA^1Zt-uol$d-u@l6pb>SA4>!-c5@)yO1)Qdb~-!LALyC1(e{EoTB z8w2c&+C^gjW;5ZB+x*GxQlV5<p*f*&<;;ZKGRMhc5<^O0E6{1==+DOwYkQ=T7**gu z0fZ8u1&4&7#xq!mqB2FhyAwNl-u9*6_QDn9Va!r1H~=ajv4;Z%RVXmu?LDyV`*y5I zoZ0v5@*_cB_K4_*2i{E7am@U4A1d(a<v2$EiSk-;Angu<xhjG<9o!W*fLn?CML8ID zoW?xNWjaYrtV%f>O844!fIkUCw^7`=ek~VuoUSxJh;GQqgi)&)#Rr$5NYn<tul3$j zD-1j=<L%Oo7hFxp^Twg#6f;GKT6I+=@=;PhhtlxM0!eAz{ZdR5MPu*Td#)p0_+cCh zUDC}Ieh=XfWl*lqYe~Cmi#oD(_L8I$o8+8GreSL6q!um^2zE@0zQ%xw)~HwplLZ0Y z;+bHJ9*&Qu-O8M3iD7lg+J0bhx8gPUc|d17ssV?9*gzt-*3`4sMile=jXBi~e2{?? zi9{Xc%uZNtI0@|vlT-~e+t$NsDoyl5AT#gqBL+HN;8<{(Mw1DNSJG(5d_}^c91_6e z1=RSQ3vyF~E2V-lNUL~-;GvJU8dAX;xD)lC#e<)m3p!T7u*i{2e0OA-<b8r$pSwa~ zcH)?q^+P6|u@9FTLr#<fR_P)6Nx|E+Q%LPp=JdXcG8H$<(DiZ7BXIW-OYcq%!)heK z1v&$OBiD;pLMd7gb0{v1NSvs8_Vix?NiFRvHjt3;MlB45f{yaA)_1Ci+`Vycg|TdA zI_?3fK?Ei9Wm5iUX5WWXJOkM(FWN&Ui2G8t>(&`f)znYtu~t#^F}@kfTZ3&?R9WLJ zScUFtiSa};9ecfsO1BK;7PiJtC+bdhP(i-)B+r{Or!AOr6gCy~lkc)AJEKe^7^weD zmd?jnyAdOi)KR9Vl-r_n+=YrzTss7tF+->$5=Ab-uWoYIy}Btf_3pc`W^hR;2r#+X zL~kQ5Wi?RGR$_Y7?o<5q<l;hxq^qvaNp>p9#Iu@D!Fp5m3!@FwX(wgbuJh6|nV;&a zC$Pk*d^2R@XJI}c;xJh%DQ{JWJsFeT?)F=KHO!F=*I#VEi%@4ngPd9ibbWSXZOFsd zpyL1iWtYMTG&~j@9!55_{M1Mt#^Zx^gBtOtZV$&~G#7*M^DA9lJ*3+ah6JF|br__` zH=Yz?G%IHG8yk6e*d4oupvvu2Nt=G*lWK07_5NYJ{EIoNWW{t<fDAHH5Qak`KrC69 zJ_=3%H!0}0NQZhlVo(Q7!D3uehHszNYtK)Dsq42aXoRdA+&^o{o^K&B<fs8X!V3$b zbRP9?zQO6~a)3`r2_%x8rj!5LN=m(F+hUfg!?PYRjVmfd<gD^Iu1W9yY;RvF7Tg+( ze+y7VMwW}hBlxE+D1=?v7pPw3+InN;lQT`dGb2USHABqI8<X(IW)l<i+CDGLTR3%F z%}j+A&9kZmp0ASvgN@hbrf|Dc`zYWV0LLc^$Gn|O^M_?P8&Kp+z3rb}uR+Cit3%S_ zG~UVczksjx>t;KZqhXfudQv$0*9jf@@J5}^w@d4CX=72Jfc}v)gF1;T=9uqZ0~dGW zy+w<e%ycu%nqpZ-8NQUMoV{=%fhRM|ohw~}FGhGLweJajM9Qg02EHSPMTnw4(+?<6 z$Y?dD4Rw_By@u$^ZrI{ek#T&-v)7QQV&C{LzKMHGQmG^H>G66*J<P45(`D<d3ZhbJ zED<s8Dc$t5Gmy`hN#^FQc#L1CLA2t^!(XFh1XdkMrX}WZ<*?Oy(-G=-R`siqu8YDP z<%tE%Q55*Lm>Ttb=huf~I*OSxWolZR#0GSonLOkDHTrNmQVWG)MKkX;l>wNX%mVMb zwdz&F^bYDPcC%W^<O>%PX0+zR83U4EMpOM#g+97;oTfHh0=}Qk>oED6xQu)hJ*qQL zas7HCs+pc;oQjs8_4O%{f=vR85s*d;2$umxYy<DDw}I=VIKnr82S^BOY6P_rkWv>| za?rTv9!H1Ap<%d_HxWoYP7t}M&c`Kz$Fanv0%5o{eeC{A;0FO9i5B*&IS>`<aH*gA z9%d6AmF@8#><1>ERRX_(KG@td2J)qCx)rx>3&BokkOfFF8h`BQFksF8>)LbR`iMP> z(vfET>zRIQ0c^FEctcq6(?;AJ_Qr|)%t<O>y{JWdbG)YaZ$0Vy=RS#KMS8qo1BbyQ z)JLJoBP-ZzUalm#U}Zl*!>(}(#9KvCAYj{+=|X|##b77C#Dhbz9FM6aWLze~HZ9@t zj#!hOg*8x^QkfCCsW$k5M7$X6flgk;{S?+By}XR;7?Il<VW$$guTB*01%O!9UCIF1 z5v<VwBf+aT0KNfI);S99Jr!2xl5*YpMc}I>0g<1~Yr$mF8DJHdrN0<QELEy5e3cdq zHetYkbq11|P{Pcnd=7IPYb#iILnRcQ&}1Yw5s<lIMD+FtD@e2`;X2On@v0|<wSI_D zgOM=ls%QzI_~f9__K>6efT$8yWFVF%LHXb*4c}b{UzPvb>2_8of#Wi-bAkBY6T<IU z*~KMJdlgaprmUKTU-{JTWJ(bYsEC@V+;^D6I0il}XURZhT2X?Ab{by<k4J&Wr5C_A zX4(GIxeyV-6-1PqE`&HBY7~XvvyoZjQiTcKSKevMyI5p`K)f-pO6A|f!ug5oyj+~q z7=HIZ`1yuSJfYx40nx$0{(tP92Pfb1+75qmZlD1WWq)TvTjO;vV|p>$5Q})_H}5ct zNNTfzIO-wm6WOw3cG`xjLQOcYtP$tIVM0tg@2e%D(yBN%D&d4T*hgf%+3(=!koZI! zRrhV55?QlIRw8L$7{V*=-3ZSOl86QoMa|3hyC2N_o2Vqfwwf%S?7?Dv1JU>9!;!}; zlv!ld1bSM#kp=mCsDs1(18^dP#xCaV*wSrxCDze6*JJxjd^+XnHb-9YfAse7gtKd( ziLPV?0Qq|dicA|O+rC1o-0J&l^+y*(XcwU0Y6asU3w+3d0^Sc$i+YXnNv(QZ`vY0M z)aRHFNK~b%f0<`d^it-9`^j1Mr={A@j|U}!4Vs;^@@KMpfi(YLYkjK!Y`<eXd<I7g z`dwfo*wWwquVdzkt}dzyI{;Oy*ePqTY>m8Bqgq*~NXZi-gQsbm9&oJNv7xt5s852( z83*6D0<x*oivjv&j@%<MHiLKA{-#ud50mLDF6R~jwa1J3ZDW1<&8siewPGl=xgm^y zC>@`ccx1uOWc)(RU7wrev5{jgv1f%Ot2By;xZ_oX7NZ?fla)ZMFwKjTcX=oAEZz0^ z($RrQbCsqz6#@0nyR`1ScgYpxWy`A|3-U_ITRZw8nJEu-yp(Nc8Pjl%uTvVC6V`Ab zOYS9|`mOutH#)zQnyau3{ofBR>3CAhpzKfLSriYlK)jI^XE=JZf<_EaO-Nd-fvl`! zytBYhw(>%%VxpRcynk?pjG}6-$vWDHh2-R3x+1>L#?}{|<Ef(Na?8IYuBcGkHVUW8 zO1z`Wv6o$<wn~oif4h@g?Gqg*92zbH%%mChG2K%PCt2cfCM1#0xlbn?3r1YPfR~=O z3}yo^Q9pyT#vgdn`C;TbBGVdpt`A-VWp-}If9m}wDdUd=^HxM26zZU}fgKU!#gQp7 zA+G;&1x6Wt)eG#Oh2EbtZU7`UT<)c5&$v+?zDvEyqEW<?!m#j5tG&D*v{R8tD|bx_ zVexUvDinC(i`m7S+P~~BF{zE~TKj7asSn>f0^zixAG?D!;g~#inzTYrIj1=LTvmb! z(wYM{#R%bv;y?N`DP*quYvVZjI<|D_Fhh#?g8TSx+r(J9aD5%aLsaCue_v`NBSh{Z zg|W^xm7z-X@<CGZYZ;MOhw-}5Z!9tH{14%U_s=Fe7uJxGd}PE_%|3;lVJY-4QsJ^0 z;y+tEsBfc#iZEa4_lHFfDd1d-8yri<_%=9vD?G+4PA>=CGb5hTA3;x(_<&H1^-Amx z5AKuBRmpvkHq?LLxcpzITZyCQ@1r2Y8lEG@bM7QRxHfInf?v3YrB6f-0+=efOsmuq zIekjoni}`7Q@^`WgGTD;z_a<z!<^P6x)q!XfFdQ0F-+OVh4Jp6JzShI&WQy=lm8N4 zEV)g|Onv<?mgoC&&sZ`fPC051!O05)MJNJj#eEMh*wvJ*i6-G?h>{^i9PdBtDpj)L zL;@%b>D{y!0Ss7?;k}7L=4OJ7vTLoc%JkAD0ya-3g1*gWDj2$?mR1e}WVv3DMGVb< zpDBZ2#7ycE0uBta3>(Sz6Rrdn>ZSC_Uh?s9e@?gmA+e=%<$<mV{61gz=#TJb&+|V& zPLH2GDO6_SyQkCu56_N#OrVqA14oInBZ=uBLev8Au&!6U9^0nYpU7hfhCqt==;SD% z$E8Fl@luUJdy}gwmm+`=%_Dlzq7VikTgz|7?3#pV{jE5xYa3fqS`F$9TIIED|8*lT z6Z{4J55Rx+pZxX3&KTX-T_|2{keUE^r24Pd?*GztuRYx{+mGA$ewO>;>o(hS2$B)) zRxa63VxWc4M9c4S+8ZHa+>4^Pw=M7{GR`Q%AnI&2=1_-I$PhK<ncLc<RrhDWNR3Mj zU@-=t{FL&A<X|OhitJIxgV%~XUakQziU7k%IZDbnG8nLvQ}tDuC4%wQi{-D+3udAn z(UH8Xh<hXp1+Qi-3oCLAX_Lt}Mh~3~b%{nb^6dguNN^fN&7Imz+Q{cn7&)4mCqLF@ zVx_{$D%rat*RxvNOIuG)C--%ljvU7O_3JZ&0XFPj>@L#u+PS<ORbsUF{On~cNIj~^ zD94?E0B8L2o44Hj+lVhezZZoHCp=0>(pau37@6buGKyrJI%s?rGvQS=bEeMMGG}m9 zYZv|#yP0C$;>p&!cRI(fZW>|33^BBjShYi>5~Wv@qa*5`cZ1$PD&luBuj#=|{+07; zNTtlIt-B>kdJ-MdwfLkRE^n3!CzZbw9j4G#5F4Q}O%iQ=ag8oZ!`(HO51z{sG9cGY z2!-!+PbYtkJzt;f;!7FP$a(UX_h^lI1|Sl9>4G4;3o?`IZe<Quh+P*N;*h9qf$Pcl zN-XGW{@1c#Zu;`qf-%z!XGJsS+X^4iE~~x+Tcmq+WT+Kwl$1TCaf^>Cqq@%%yb7_9 zOL2&5N2}u_5NXRM)t8f{klSQMsUb2`{KXEp#6OwcG<jj!y$o9i*>9QB(|^L3^O8&3 z;#eM+W<<HOyN8<5E?C>Yb&o$&E#HkBY3TVxA375@l_c+-e^oAjj=9&z_~Yl3m7edA zyZQb4VfFC7=jxO6Y4@{l0r?NuSv^BuB!|P)R;c+4^Z1YMUZ%87kXGwcz)KnK^6Mxe zg(CN6)iWYYzrrqWKzDz>l6M3XjwlXOM0uxo=YK;~^-l|lVzVBi|NR4D*#}6(wJ6sB zV^>1I0KiDJc#ir}lai1*=8--1YGjK^)gVn%gUMF0mOk!6Bz@e7jJtZ9l-P`t`OGL5 zytPgK5~jvVR1_nY?@W;<qsB#)7VpFXC*O1GATbdLFpqS&YKj{X(s&X;v7qauRLLp< zPzg4xO?z<w?!G7@c^DF=hmH`XQ}QLUGX<cCud2or4UVIcF1gGYdXN^{z@0r(!i^p1 zucgi%n3m$p$qA0v(iTrof8FB7$*_YHDVTd5SQIf&OQ!=(Z^Bl%IqeFzcdD6;DqP%8 zQiKmLYd6IQApDtT|CV{t!F&@CO;IorVvh2=j@Z|u3f$sDs%f)EP3F@tzNzFh%&6&5 z0Tye&aeUC8@sq-2yv4V8a3~K>TFXr)q(3YIexR3pS`y@P==*(7rCOq(e3oCOi!}w0 zgi%K%<)1UqI71p0N&k)^S<8r!aPjEpa%JQMm-{N;RAG~MPPy6Q_R9ZN&m{-`6rKp8 z|7^lAON-beDOITCXM$?VH4H$J`iF8XF=9wJ>Ie2UTkaULAbd*p_i$>sn*P`yyzQq; z?f3K3s_iYge18cL!A0W+Un2?0#4{@`diiB%s(W%0=AAWpVboO>lfgt-q_NE4y=f_O zKDZ*V2EqJY1tMnr-s(6@Cc)jj@%@{$aw5bd>ep*TO-9{C?~%B10aYRas6-NRG<vb{ zIpHK{kRqn##q(Z4k50?fVE)JNCU5!C{;8Ao4#r^#BWTjneu?(<rMjB3Jr&x9G)q}E z*K0Gw-%8ommt|t+4l1=SU#@k#UVYLH(mp_nYOQ^Y%MewwH$g8xh^snl>sqR#7(yM2 zE@{*@c&t43G+z}>ryPb#M&Y#ViNz>F)3U;mG)I`n?@Q4DcHd=!*KB&cv7c9YbL0Sz zGk6DAy7~h?(gPdpj5H`JK=95$0FDeWSN?aseUO&>wZ$^ALt<S<FR3msnclrWF1EMX z+4k4PPs+|%xrDi3ceRUXxGfq@LZ|Tl0qppM(q49{ya<PQjpG}1)#S@YM#hPyXPmq% zLu2&9P+K_WhKoqSts(;DjvwSIzWZ!|fpX#TL1e?02}J{-B`_7Io3n)b8E4&tct8dz z=qg>;v6#SYtES5H^8eUyf9X|qsrvh(Z2#4mI1Z2fqLoFUiw2C{gw?-$AavNtSs!2< z9m#xl5jpU)s6TKVNT8sPP^9K0;ua-V$7Ca2@EY4<!5u1#ZC#5UKd}opBvCJU>gZu{ zd5Bj8CQfilmF^<nmAkO{)XJ9fK}r`OSoTNhH3~r;7gXQXC)LK}VsIbpXF~Q~YJgkS z_gh2W76Em<O8fqWJ=KB{nUNE-*Rmf(7SB$?K~ODv?lbE9r`$KHkt^ei)%WXKOd<mO zfrxF%ef>{a<%4L_MKcRM1mRWpj@D*vJcH$sq*fiVu-~&dRZ<IzcCU@YPXn?0>)`UI zZ%NrVFKnOvv`U=f`gpG-v+AS~P(yvw0IP6CBWKodc5X@wEQW5hJ94tj@4gu<AGcW> zP*~{-Z76zKb*a7p>D6?5tMiUeV)%^kT!Qr*KKhkESQ8ZKKd*-$!PP$3&}|%Ua;-!; zy@m1Nt}go1@KJAM!@Q<be<J=K#(ecdcYW!I%Kj2S@Ebc%#2gLHzb_(&h=rjY9-sY? zBC)Gba+(GfiFKdloUV>Ogx=efa<KgpezjtjqTa#RkEui=-xihAZhGPHn12_2e=21) zCqkQX-AFf)_PW+jzn(tX+#nTbc+Z!LpZ)N=^=uxEOfb9ts)|bTx;{<E3B?Y4*&prd zNpzXR-|he?zXUW>x8Lpl`}(=CQm^k1T<OmSuCJ%xQW-t*)B(L0F)#6Ts{AcYex}}l z`T9@vy1gx%(Gv1xEgeSZfJwstm4pN;BHi_}m|^yVTI<jyHh5qr%|r%8t?fSw$e1<D z5(687@5X*F5^eVm9x*l9X!`?ewi^fFnaek1SlWC_Hho9u5PJWGWt*U*2z$sEqblY! z-jdLog`_DkE>}z<?#TZEa9VC<0w&mb#lP8~K(oj=e3LcUr*OQ)JbeeRvOF+nSG_<7 zbrL}*Sv(im0aa6~Kt^+FGg^samut<Y<P<0w2Q%x=oRi&XmTg5RHIb+m^*HkKT|{nf zp363nEWf{J#%YLpiRBH-d4FI^ECu-AR_i4?7#l@lLv(CyVK2&hvk|@O>-dKn!b)k4 z76c$UI7AXOcjz&xD-&4S{I~ET8g&#lmtvSai5=ZAAWAtpRql+=$9naejpmk=-K@FS zP70sVz;X*VgzW5LHq861DesJ*-&u*&yV6h$TgAT_+Peo3o4F2muzRYLc*gg`ulwOi zC2y_S;9r<MNt}~mb`I35Y~M!0|NFlY#R{;_o=pJRnlKO4Q^w$%Ow;jMlE1g+Q)$UH zl0qbt%Yl361jP;PYSMDWpZNA}4hZo2wp9w-)E;<hdTaH~y>wq&rdMv7IMxL83^;jj z$ZOfl+BIPy`}5_Zs<ws>f6UDsi$f1<rdyFJgbJ;&mrgSJDVdCDfYaa=S4?NA!e!a8 z%#2OlBIbvhL42Jlzw_?PUrVaSR;K?5j<H2{p?U1z?9qChVS*7t=~T#Qh!t4{4|=*c zxg7LsFNVflA)3xLdx^=$4nj+4A*a)Mmw8k^RdIK}>LAF%zFzXIKTR$@1NxZDxuC+~ zMdku;!A}2Y!<aoIrPrD!GZyigwjQ*oES*6sCyayWrt@jE%S-N7M23bCi&qJEpwC<R zm}Q$mJAUBqRopUfF}+5?f}hJ`yIE=nXA-8<ZNQw{o|Br&B=LB9Pq5rIu)@=a7NM!6 z=f{c<>U*G<>pp_#S+L_V<STzQ&E@*W@uZl|GuRc&#n~#stf|d}z9@e#e4t`wOwQ^N zi_s>rNich!R<g^x#i3F0&ig@TTxoMli9@O@I<!iV(P9u;#e7j&-amIIR+XqS3}^IM zRH&%cC$*{PR?z14+^$rf_;eKE+4AxMJ$xnL|7s@4ulM!R%ra#wQR;9ouG~ocd$h?k zIKa}^=2P%!MeOAPL)hyOe%@62G8K&+Z_JOeu05_{3JBW7*y(*$2CK{KaqF^iJ~X~s zY{V*rGNvF}PdLll+-l#&*j<g+Zk*a8F_5~t=<sa?kGLeivlYD^C4~JB?IAT~%34{; z@YVsqpQ=Puy}~8S@V!K;x-^^L#Dg}Lk)bMPavnSUo@4#lUxK1~zGcKa2hvfm6wLej zKJUjw+uL>(PNyYLEZ*CPazi@!Twy!z#k_CM-Zz_+<xmnDaSg9rRH)=l)qGO-C-O<Y z<~nz#O;oDsD5-d5z}zU8h&gqs@xTiIB?x4{7jOs6mpCt_ryq1|3UFxfYejsWyZ#=K z=lD!wpK?&Zy?^<!In%&S&7i}Z*E<-i%2&LFz|jg$OJv@`!FbtIuA{-ng7A|5B*~i6 zxWWq7Z9}1$)xDz?l9&>kMJjQYvA){Rr`Re*t2Ej@cRas2fM4yu5f#eeNt3Dcr4zt5 z|1_;o+a==!r*FVdZ%2|E)B40hIo+BfUk|aGIfuIRa{k=wfzFVr@*mL~?vF5Qz6mQ* zy05l;HKPTV>Q7(7_l}DEnp=F1pL#?H6IOZ{h4I%7#ufE8)qQ<z*7A?KU5TiH-%NUP zCT??#)g%pTV-@uD^s9#OpQ3^Sm9xhVgyTy0YE$X)c&VsH6N-eR)45b4A)We>T>c}K zsnB3Hw}#|{O-%|`VTVzH%;3kEWKQp-*4op&$4|@a&oy*H^|(DVh4327m~YXnln_tx zQNj?)0FG6sX@PUOjdP3hlow-c2RU0$DdT>tH?ki`d`d6oXGl;w`S~T+An?uKdXd|< z^AXgx!}#{v>Eq-$Umb2{^*2xA86mRe&`oyh8s?)L2d0dGG+OmpAWs7jBO>~`gk!?$ zG4*Dx<4@+MgoEoeQ?<Uc?Tb@@Wxi~&2_N=?FIp7-x*-OnA@u9?Qq<4!0v(VAZMw;5 z(#Ui1_J^9L`EXo-jz!4@{K-X<ZM%e02JZb}pWfkhxKifMReH*p@1Ep**(^)bq1<e! z=i_*h-2n3xqkYoIKbK*SdE=&D<W^NdbjeDe`y5Jjd_EVK@l(TQicFZs(IV-uD9gD& z%l$>RiZfe;Q>R7@<(k%ZileDZ(X*D5Pk%pGI@sznS~2U|?6_CczV+uc41_1r4a#J9 zm+h2&5e>BxaHM8}{8`=cF`II+y^#tm^Ro?a$}n~?4(bz+Zw*Z8!Zg_!@C{N=bihP1 zt1ef{YHDfizRj!u9SCT@)?Bc$0+fpX6|S`9Lu!gwJOr0#syx)I`zE1#_;!GXnJfD@ zVGzaFZO3bmK`D`~f9`GjV`2gi7lb@x-rt+Ph{=&}{~^zYu$^dD^z>@zAfp1|LMVgU zgP#z)3WSpI@-$Q2|G+Rc)3nIRC(<x+d7~?>6oWn0gM)Q~g&d6^xj*zR<~7LD(_B4w zY&k+ki91{uo6DAsq@AfazVUG|(6W~Rp*>xFQuM+(_ZB2HK3IkiWJ76Vhzir*{3h6; zg#3LmR%C4+HgBi)_e#f>U4F__u-!v1n$y;s)118AT=VDRPb4O;Z0FVGkCFO4N5qfJ z;RbVW_cftFCTYo?c=rXtPZ@LAOd9kGmV=fWPFtH#-2VlR7|5akP67wXwXlwnbV%w6 zhB9lmYf0xw8ww}uZ1qphQ)m}Uk^t)a+%&$HWQInU>H6flM5-rd2i)bNjjd1JbIe@x zDo6<@8=27SSPZ#>>v8~lFW1)cyJGrDAnP@tB7oU%3R@k<_<aBOW7j+u-fy+~>T2&b z8Df*xrP}p>rAxo|!#kVT=!wl}7kmFX3;gHSu3qR~WLfin<1s^_Yu~!O+>Gv6+j*mI zm1ar*CW1hs0ABlN3cD0e71g`_U~b!N!lj{exa3bv;wDB|+Un7gIV-E&lV={1ExY3g z7XN&Dj~)8=TlDX-^Y}KK&Jg;Fi2Ud`Jn77r1g@lcD58AlxS@RKU*qhasXUQ&oy;A) zbP$T5*=tq@(uW<)B0RnoYq_#XE?M^jv=*U5W~js$4qfO?zbT-yMJ97cz63`YT6BO> z{01%~8-`&r`a<zEC-{a<Z6(K$KAJRD<n#XX7J*la3vAN5yR59+hu3mbKp}EA|F?j! z#jwZC&YAL`R?It}|LiY#i^9fdTkf{`4T{L#)R^`)gqD5Hi=Fhk-GreoZwF*A8k@D| z!l=uhr9q370^e1~M-aEIb)%Cwb!5>!wq03rN;Vj>oERe1t$Ga^eX4uB$H!QV)1U1d zO{VbI<&ItX%0WZ5TVaNdpS5<Mjcn29F~(1!<G4Bn&SmLeeQ9YAQT)nN5?P-b(w3SA zMZ0FVPXbMMy}dQf<nEsjQeVuE-$xLUjWWt~7KnUp5RoSZwO18NYqg>ZVxM0{n_oG1 z4BcU{694yDf>y`t)!?;}i8EScF&ggkjCFFrur!>*MQ#^>rA-KBi32RlhLh}c$IbrE z@M013DPaaY2T*o5Ct6L+zEu{VsR6@@q05He7a0d#4qSn7;cK;g`H_dg&s38;2c&q# z$GhLjptrEp!FOHesIgLXB0`PT?%iwYO$@w<alv$W;Wq5ut4*&Pr?w}ep652TK8OVi zx>1wu`S{Pn_b<BWjC5vVeb;Vx`sKC$e~)VZM?<p9q6c((O{d<HxuD>JK6zPj^YYQx zn~cg_tC72K1!-5KhIU__wI}xrB)6NaMG;_Oi2F^?nj^J*=}MerlEVD1!K7=!hqsua zeAJ^c>dy^*NeNcRw!6o(ulA=+R|Jx-;h`vHGcQam*47fgRQLrks21`2yC-6(!@|7V zbo|4bVS<~WWu>MMy@F<|WZ}hSPpBjE@|9xOx5qkzo~b?ATSmv;Kgd%f`y@my3iq{2 zj&$#{ln#aw<#+8o$u_0C7|q<P69$a35^9`6D{dA9fKqUqb)-70#*(vZxM^t>vBlGg zc`S6Kl@78zhs0!FRk~_t@EBK^b*8&dC9~_lK~QAirr$_=?<9qB7>!HH>MitG2UX~o zO0yvsJA=9B-e|iM*)z`HyDus#s(Ne>HIb`hBavo)aU7iJMo-<}3~||hYya=Ro*2kp z_GA0*XP57zIwh*+O4RwL6$U*Jj4Dw}%mn!lJ&i|={V22k(tDbYb60(v(Q2zz&qk?% z8gF^_iWKv%h$v;~o<7PBH;d&w%nR{bYnO~^>FIuQUX=1u^;GEB6TPwFr*b0<*W6np zGh)QUy{U$@X;^~~QUBxUEZmy>-ao#AjoL<s#OTJ+jT_yPlIrLX6#)rBwz1Kj($Wpm zjZQ*R0RaV690*7lU?3nL`SJbz0q1(IbFSxl&biO?x?gX!@g{5lUq<6SsdHWCaR%*V zZF-$v%Y#4P*B^g)*V}GWJe(=O-jDaSULTIv*i+p(ZBMD0c@5UD!VY>Qc4pKpdkl8B zX6a=dJKN0ZGO9z$8Z$K*tw)*JV@%WwO9#?1U~XwS1>nQT^TXax&P|uaAou>ZYUeeu z(!~zAXA2`)uKs91;DxxInFK`<?koWD@*Gl^c(rT$lvp*JXNyGb=IbfBCsiyn^3P;g zCl%L8i9G-(SIgR(e;c!g+bNMUW59hz_}I>7{x_*i$V!qZ+Xjm#k#1#`q)F7*3l@Gg zhsl`$LXZhiphOqjg6UST(t@Sx(l6Vq>Z0~#C|w%+7@$60R@vSb%lCW9$*Xy3$(ipZ zd(X*xyD#<H`^8^gZf}m-O7L7~yf#)}HSiLV2|GFLLXA|J&GX$Ko9OqS9knatl<=~L z2M+P6zP~%G+~XQF=H-B5e|_JLVE#MM;X~*`m&3=Xlk4X_H}7qY$-g6hU1h*<#8%5Y zlELK6r=ZAce70i=nH7s$d_cB(ogI?nfyC}_2+R4=rA-Wa^U;u@7>N&^TY;+god5+l z)3~<_@8vo9<cEFh^Ll(c&FORb-G804S(%~IA{zSs{eCqiajSmnRUCDD)wQpj_nS*v zEJmKB1<+|39>!oqX0xJUgMz_KbPE+ePgk~z*J~oa;d}k!T)ssd=KcLT@HM*NbHwjF z^@J-m<}QbMy?5Rpjtizo-FmB$rx%}U5!2X2`_<+a>J{fr-}H=Oe-~6gbXId`80@3{ zd+6qQ<iYX@{LxXV-fW5JWW@ImVUyN>6{RyAP9NwEmhzp~g?wP&2*@CQkAC?5X>B^c z@BXcxOOea?dIYcUHz@AL*<s>l0IJhr!^2t~hJ7aY6N*hgBXNkTJFGJF@I|hiy#4t7 zOD**2)328a5XSds$b8HXPjl4ry~LAEROw*UOQrnjk`;*1v%mFnKC1&C{y3{r0RLtq z*DjUQT@RI~X~~GaEzeJK1zpPA!6<qR%+a^KmbhMzXS9DUjQe-$K<THmChLj_(IwUm zbI4^TO4Pe<T+7lTmJ!24>0UFjt7#DJm@V#rH-Nm<b~L{=S%R&YJJZ-uxRBX4p5)HD z&SjJWyNV(vxyAO{Khh2`gKOAPN$r@{43s=f2n$A>dk4k?2MP2SpaAN{1XiUXleDg& zix(!LAbc{fGt*N%f#T={OPg}wQuZfFYi?%A_StCZ-H}sR+u*<#W*2FFJp)sfVITzy z_e`JwL=I>hN`U#-kqP`fXcI4+pg@*ztZ^}#0QMCWQAMJvFFHqVObH@9-n&RW=e7i( zt>DZJNryFrw%(bz*hyi?Yf~7Qn1lauTX9FUrgYNcu3YwGqfS00`6XgT27utjM$p=# z&^}nTC}v9i#4z%P<@bGB<-(CI7LT7Vn1<6H1q4!FZV+hA-7u&KM+wEh^)GIJBiM+r z8`J5AD?SSCz!z(*CWwd##(!HB$sKUD=eIjIV(6~+VD?&{+@y@<98zgQ^(c5#uQxyg zdR)z8eUB>+KEptmE!>jJ<CdEqIwz`p<SFQV2(`F~O3Um;oFF02#GOsNJaYfaE5z zILB4xh8^<$^_<jYa+}wV+8MFp6oO_@US){F-U4wNfJYB<8nDJhdK)SM?3f45ua-i{ zF~DB>6Ol4w6u4a(Xn`KD%iv@Ttg*|Wk<IwwE#)iA9wV8-7iEu94JUGV|DAqjL4h^P zKKB%zQ&f$c02m=BxcMmn2C_aqV;hD9qQ+$8m$y*KxWA*I=Q6zX%G-)&tf}PkQbr-9 z1S}v6!qR&?j!d8Vfz3(u5{u`#P+3M2yCm&KhVrFeJ5(@|4$l`(AbUu4H_?|mE7k8Q zWlTJODl){QIz4rTdBu*AE|C(d>?kRQ5&^u6{q)_c*quEYBDxtu<ug#=9IX(ot07iu zYL1Q{0G=Zvem|au4dRq??zg*X)nOzC%!n!F)lR`pm_~J=o`#~x03SeCGRpJAkM80^ zCOZD%1VTmz9t*J1@s7L8Q3057fY9HQ<g38AyQLkYZgIfEihA9(zpDp{M+olpvOG%f z^A2Dkf9hW5eylxO?MH6)&~4{wiPDg5KYn>R?(>*^=8gX1<k3KKWU?ewIt1X(VXTBe zvWS_n7FtA0J_<|rilHw%2ct|B^fdLUXqi!fkY6w!z-#_iBba^GeDh~prgm2++PPAS z+eO{aR&ZE>xn8lG335glulc=%4=N(a7twlK2`?-~7=OD~JF#_XB5k3=aHqz0$O{z7 zZfkX0wR+~geTaBSikxWAhgV6LG#T`l>42feqewu@)H)+x?KwZ298w-lp>diRMyx&n zF5&RlW#I9MfYkx?`yh(J(KncrW|%YgN%59!*vYh8S~fv*vn4$=(O|`Aad4uQSnK*O zvXu$K=fgAypbD;m#DRehKVO@a21x#~Ft_qq@rI-|X>U<B6?#_p);FZcbc1)UUA3MJ zT2`vN`>><n-S&v!c|B0CRSv4StRxcVh{2CCz3+54;zbBsweoS`09}^N%f_q8$;ijG zqkjz!Q+>`paSB9?&w74kaPsPW#bPJubV)YytFY8Ua#0y#Xx@TEs2Z%h0?B9d#vbjN z$GX^?`}OBQ#OnSvrPYT$QdkRGzWq&c$jegm8ldLYDS)@>x7lZGZ;;EKsGaDbSR6ir z2>}E2?lKY*%mxv!*e523&X2ms`$laW<)*eMT<Xm?|NAPC_IKqx`egj^KLw)#{RzKU z4QS-gmV!Er?@mr}tqKZ2Lr%K|6GVnk5XU=%Ec+pK;!9tWZBk}CO&dO%((m<}?hmJV z52x9*TJEJ>E<A_LOdT<Op-lq0q~aTmnHGKhA82fXqHYU<C`^0bzV%A|?MBSPQfL8X ze^yg$zuci7P{0o?;O(;qPwyItJ)skvr4#*5R~>j*R`CN4s(1y839u|VPWriB8?96c zKm^{R#hPJ^lC{L1z&BEy4X1MM+P2>LaKP~Oh4I1#uHcMl3b~4!V9aj_*u$fmzBDl& z|46Ow-J|(@P=xkGJh%$4x-8hrhzQJ}Zl?IpP*7#&@IgIV8h6MsuD^HkG=WKQ@IZUJ z^4~V%pPf9r6Epj#%Z4)B{{T3Ts0KF0n9mTQd8r0X8#>ZvS@Xrw=&krImIK5ofD0iM z!T3_}pP~1;`O8Ar7x~LL1pas8H19r$VzEa+OxRcfTJJ0%al;jU016YrG!bm~a%mIy z0RrG--QzuP{nE}zE9j0`v=R<!5Kgl~MFk3Oqz!amg&Hvf)aIFi0I=AMpDzyTBky7^ zLNCZdFB(KIuHeKAHaZ3*eKVDQHH?6K<=@|U0>ggZkcii~%4mXrG7oyrb-6<Pg|rZ; z?oSQ15>ELG+q-yA7HZ^T3Tb;<8JTqa0E4rCk`V8I0BU}ucEg%73&oJU1WLZl$AolV zvn1$ta*-I?{*I|23IHqh5p4&+Ft3~SE2t>u_0IH0`ZPg08`ae;0p?g)K7}ow8g_?y z_Ve!wm%p;Oi{QaVw1V?bryn|l(hHd`&)kx6fH8gC7da{efCWW4Jw;7ygjlSE>}u)< zn{5a04o?tDH`4s?T&4MCrnE(%$LuT8t<wy(`I+L@S^PmPS?i0|h;R&wN0R18Bg8m3 z2}AXc%VI2MV2U9?Us-(hWCmE_T#&57XbEJVqTq%co^C(jX%reIZ4KA>BD+O_GQ5_F zx7dLA(e3KHN5x_wqw)l>_U%ormmOL^M<vKKST?e<>kNk?=(FK#73yboR#=8jg*;Or zS8d#oqeAf6jv$*E94Fp~HURN1HUw^a@#feF)srPA$zpbZS1-K^zhmvkfxK+C=Tb^O zrFZch=eQEr80|e@g@VIrg;%4mZBQEl94qZ8Rc1rPFi8LK)v)4!Z&<J7i5e0Xb;zA^ z7C2*ktO7rPrW=E7#UP`o+KsJF=me2}jIt^5w+F>Ugba!~4A7S=<j*P6eQ_Ah4w2G@ z<j}Y~BK6!)CJB~x1otWL-gwIu3vVO{gzscxYSJBV2bf=~KxMbmW~L0%zGCDmV;aQ$ zZO^7e%Tgfjqes*NEsWg#tqaK6F-6bz@+S*5dx(y*-3iDpOb8|>Lm!iK74Gf=BCgDB zoG(MIe(J7%36B0}Dtro{0bwyEaYG>lgToE7|K50leUsJHW088xb0hqDOq@3!KmcaT z2L1aX)c+I3h=D;i;QWNqmMkU?02}um7jX-E%SzNV3xHzJnwe=F9&0BIR$II8IH7oB z-{Z!*{*Co6i7J@AvNFft(QsXNhU>aNA?4S`Hj;RyD)-tD|H|yA*<c}5^XsHknGj3j zj-&KZ=tN!CsSzk#*_kgM*@Tw`&|Vc;rf7DtBJ|6c+ZJ3i4^Tq5-5(T9I7;Q_k<GKa z3ag$Na;*A;P@jh?>V!PE_g!L^02}My5;aQofY@A(bmx$H<>MWUEge&zu{mEJBaw;_ zIa0~^Z4p7+i0*b|FinkS^qm5E3PNF~_YkLLh%*YS-z$F27{oaWk8=rz+8!?1x{>XE z{?=FNx(j^k#`_7<E$OG+r38q*J@1}-<FyK|3%%vr3Ln0GDQ~`pmWe<a(%sy#@^}2` z9WeHWq~8;n@LLN24DJd+7X@7(m=+1ImQNyRwo;*xa%kajC=G}qiX!ifQJyOcy}JQP zphBYous|v#34mv!U8g@HgF+nO;cpefh2YBohPVv~<t6>%Pva9$|8@Y4Ymdk`(-)|) zhoHIgK_O)OwOj2tdbLN_1v3hkbM6{Cgv1*MOuxAk4!9@f0Oe}Fr8&$gD<Z>qbe~Vx zBIgr?ybG`3gt`yI^2O*+ry0#Lkz3!ONhpy(PMSKCVf^_f1s<pyvqgycE+f`y2CusK zcnHyZ4+nbL{kv0jj{tB~t?;1e{@T#JwmuOgXe%<zLUX*p_G#a7g)QVzwYsI@!I8rq zHeKBqsSIa#mv%}VGgs)Yjkpb0j0HfcqVri51>%fB9RQ#|B<cM(NV7O`{b9x^Drt+5 z{M<1GKgk8<*C$(0`j&Cx6j?v!ySL_03=k}``-s3SWH!!u*$^Yrq$bbZ{jftUfriV( zv?uZVpDbk#a{liG9(M%55gr~cT<V3?JxOX?Hu~=k#70ft9Rv@=rO<%F1Rw}=jNUf@ zRHE$JodQtW5!nDktLsiDL5cD#NU@+Y<>Oz(oc~mdbL%C4NWW@WhBz)mHMna|Rb+nG zO>&n^JE9=|VUXyzeoAi<V(Jd)lj3YO%dWj*fr}@vOWV66Pp5Li#C;xpS{YUoJC#l& z2_ahn_O{D02Dpm}=3VA`pTq6qu>v_NNETRtK5+ULM1>5_(!-q%(?zc~wTi&-Z!#J; z(2weho~aj?i!yL{-47NonUAQJEeJ3Q+m2fnbT2Fo7Ea~^$GEFKzN2n@6$LfD1`q8o z$W@O$;m#-WJSwUzD0#uCUsjNP^xxTUnF+tr>y<CGcr5z4k(7d4u)cA)(65ls3Qnid zP=Ib*lDoaLYj;*nF8aSD6)`|Jp1j@*J=d^@I8#a00Gsb93AE$4Q*{x7d&_0vihaW6 zQy%*P2m40%+>LlGdfCj(7p<LzRntNY0Bi!MJ7CNwqjL{HEklfJxQgJC=FAhH_yN=a z*(qf?8iW&xdi|CD=C?m%4ksAhQ6i=N5(zx?5|qDFz4bkQb$TFrehMBoDAW_d{a&9- zJ6kg@Ii&J)g0t^VgHumhemvp~)5zS~h-I07VPsJ|&ZMUa!-=tW*i@g?&ZGpTe35LQ zkm$C6=0<*^$vJ_ze}YSqbN|M)(g?ep9FqssL{Ry4zazy_xsStyOc4VLRiJ)RQ2aFa z<zra28SOw<<Sp3qcKpz(tZq#IxdAz0m|0@M`Hy2dk7Sdtp{qBtVKno*#I!N<`d@Q* zPCKw$`uRIXp+b26D&$`oLj|tr4mZs&o=1P3%MuG)gn2YQh7nOwkHbz78<ZH~e#XH^ z7+zkX(p3LjpPVOZhC4JFBqC9ro+GLt>DP%-GdoP?^of5HtMGOUi*W%K^G-Di8Fxp` z-^*PCxbyy(U^nm$WvVW0ni`MZl$)#-qkk$UZ&~J9jT1`|M>Oe8A>NAz_USV6==g*; zn}M#habLzM`)7_0*L(T%G9k~X4V)Gzp<*1_xb)iBv3FWv<-X`geh)km@Le2zhOxA| zJ^;oI0|C8u@!Vp7QAjCW+Ib1L-F@8dCZ*z*lSl;>+P#lpu^E5-vOp5IVlW&L7UP`5 zU7LHN-%>1yRM*yzdoo8xSOJjHjx!b?Mwo}MOpXWpiLIF<ZQ<0OO&mOwQgj2=<yr<w zFk_%C|JY{^kHOFwQ#)<UI$gIxY#IzTS?l&@?T&b}jq5K8>P#4Fa8kP&mz<&;$F(oh z<6c)Sh=8b2XNl144M=ZqZT1Kx=)#V<A%Tk)_t)7zb>9wN!+7&UlZLh`Rz;4}8~jLw zWVl{y>W7}cALc=flkpwNFRHzpv9)!|><l$<3@kuJM1o<uL1v<crQGV)<l;Sl+Dv~N z6rQHrWH0Z@Nq3sWxh!}0`bZ6VNEk<Pagqp4{iS_KR`>$BTIPOu0J(7<eE@!o@ps=u z=rRbpD0W6bs3jM~*8peW4}sy}HNR-eJFhRhG~;s9=3i+z(<aN>-%pvo*`_JKCi|E| z{c=CsOnxlCU-H+O<q>}};!_Pl-;-+j6ff*K_w7h$>)UVr7YpANPu<)_-2Fwi8>6-q zmZs7b&C9z5C-sUPv>E@yF?Q7~isG*gL`74Kc!YO9ta%rOJp6coa^+3=o%H2tg9+e4 z`te!w(Z;hc2_{IN^F=EGZ*$pmpe}&`1T(Pk0#N0{v5c2AqG0mwqky}c6O)4S9{6Df zj_)#aFomj#Op$YT=Nu2AYEx!udmHrgA6}+;Xp6zBf-KO>d?kU-Ccgzy%VMm>v66<O zq<MjuW!^8n9T6TK(Z;N^;GjAFMps2$3{Yfxvdx2z=JmSIt+y?amNwTJRKFRMXrS=A z{MzSOHIDHwT?s~a4b(m9U#U6i0`RoS5eT_%M|X;cx(Kc1njnhAKLLhv@>5?3B><tv zQ}^5p+Q1~JO#6J~94?@*lhqLI&ODva%qFV<#I2_vTw!1YoRT(X>0*06ZJuQ6(gV`O z@n_8CXqr1R>fr)cb%Y4XSjSedvAi*y;|g}>ZS>vOLbDYLUM$T{AHAmVm8Zeoo(&6E zPXuXlO#eG)SI)K-`U)I%#8AXJQ8x87>&7=@4X(wJVHJ7;Y}X{R$U&Y!_r$=l&^fHp z#Pc{vXhG-;ta0x)<vPch8j?{w6UCjHCRoGe4zbwQPv<!Ft>pFHQO<yH`O3e6gSiSG z1t`&zh4MfWz7@uo9dqz{^c;ym%-x_f!xG+bnG6h4ydX0J!lCSj<qV?K3p;WBjvI%u zue4zRy<Xuyfb!ob)x6*C_q{Qp<w&r7sI>AYn<(_YYO&Icd%}*GzG}2vBgHpXZTPUt zhneRg+Swi@PWMBwk>PrRRCzu94<iAANhuq<pcKljR%YPv2m?NZkIb1}?ewbv5I;G; zjPpI`GKBgRV&7q~*&i}iOy0u@6q9_5JjX0Gc!mUzR595(WA2TkRj(=ktw-0Z>EBtE zS_9?^3yJVJW<cTtyKP(`ExMAA&Ke>PNM|md>rg%zSa}LumZG^Y6fiblah$X%5&dsP zX>mfID79BBg#Y>ZW0V2k{l#k2`=&V4KXA=!sJ7^{4Ahg2$a2SSXU}Ubnm-xmVfq58 zN{!-P>RTxRO)8=(c13S0`s_ZN6l6NphE`=>ABZemHa30d)W9?$9iL-oh|4GrD#msN z+^0FbAPz5!P`r~sNFWZ8fATJb@X&Mnc@0ezhEKq{2k%`h?&gCrBCgigz+6gAKJW~j z3&|g@q11WyCjX?)wAU!SP4Tm=?P;p7$?5*~@g*E<B`p}8@NbS^clr?3h&%)8H+REX zj9UhiMSclPLnB#+z`Qlk>@xXVZ*kG0D_=SynvNVjpcy4xeYmip+p9sx%G~Mym5N!> zHgJ<EcyEK()mu3rEd+&DJR&254F+-~zf0Jnzu26=xS{=5Q+4aWZAz&C5RvqT%2-2T zw5fHr!SS4PY}6U^P2B%GX=kp|)o(QPO<ex(RXT%QUCX7>P)6)?N8E30?3eOu55V*M zzRF6;sH+j`5ITYPHu{FxW)v%7CfzYa`IQLX&mV-l<Ue{XhA!&KLt7q3?*~=Tb0tPU zqwAiGD@mPl$*!4Tj`|w`h0EO@yXh$K`c0n0148TpEsi#4gzrawZ$jo4V}w$%Kv-@K z<2))OYTAmDHIb3Q>?<yky$;jvVgbC=7c=5U#^`z}fDgs*E1AOtqC#2NFj<JFStPvF z$DbotAj9Eg9wCSp%p_jP7;ZaJomi!s0bbrsQn&d8Snk#FED=FqDXtz9Vo+_hb%1pH z1n1x1+HQu5AlnA-ceE(rOj|i5ThNYQgo1JBXH7cRW5yo;X?iAKMSyFfgu&F8U|o2o zcsZ6<(N|1CeiXzlReTwUGRbwqvs@9~U$ijwsZY?Ak`SL}dwJZEZKHc`Aaz$#2;$tQ z+4l=b)4^E58`s-F${KYLw3m#^+u_o_SWrJUha}ocsG#xK1MtB%Ta8iPUF%>OGa+Lw zLB&ch4N8%32EkG=*iK#F8x?<2@xi83U0vp`0<C>Q0jT>bWKB}gB@uw%-t?Hba&#!f zWb(dc!nsD@smcw?XVx_l<d6-Qee&RI<8_Ovw<rxyM2m(AB6~SL8!1$MZADMO(`1^F z$(<fgAec3~k7VEA&>5u%p`qx?pfL*&OKa)rD--~lk#~c|>Z=b!#p=>9TYgn@QNS+y zOvKA>R5jbRLM_$V=WKX*Ov0rUE)YBPXa17vZ>=hjxSj*`>{iP;<2nHYri!TpdRn5t zwuC)nSbM5cg=prz;JSCAW2IptX|YWvnYE<fciu?7EV_HUG}L}zbX~b)NV4P9Z2aFF zGYW)1Q1k2SSfAlvsBq0MYoNeuL>?-WIU2(&!xBd}r`6=>J-|Ck3YF-%i(80gVEEI* zvpzzoW<t14@J}lLQ+?G(>&<PccbgpIzPuKCPV)55U7Szp?5V=)gOOIKn}367K;jXN zS(m&r!2<gE^#8u#KppSYpB{D5(6(MTwcQfs*(7}@<;~8t#w*n;XF5?E*|^Y^Vr;mv zAV7|wZPe$sGkjbbM>z@1v@A|pkqo&ir(|-RF_0(5EUn9U{sSjqwpnh$F&URPEeE#V zWu3H6L+67Y$y43b4{!^PTkZ7&GDFJ}af~(B1F=EAgpMgJJ;Q|5{n=m^6?9?y6&Qwh z4$YCU{!>JdBc$b#FoD7oB5$QJDXhqJI|02aWJ?jne-AaUFq&@9?VcLW%;vq_B109S z!o!TsOK#S688VP?J?7OZqIT1N7f%?_Lmy9`Hy;2&QGr3<QC7xA&tegfyRhf~F%cdL z4~7Y>kcM9}-Q)ZBDi<o-#0x1mG7c`XTB`{e-5TD(Miq-dO2hsc-=;={^=<B|+`nrC zXSp?kkJ_d?IZWVqb>(A;zE3njAEi=So~BuNLEq(8oG1Q6UWjV9T)V^asLeqk3|w1~ zX!X}m(tal#xY2vuE1fB-2^IvE$mq^$IOvResv|e{mB#N9tH1d-8J6tcxTf52Dd1>u z|3AUams4}W-{Se8@(cvb^N)YzvZkb}TD1+e@Vb>vK@J}ye+1V?P7!yHXJ*fQWA2{) zJf4GMVD4X%L<iTEpK;U$RLgRxN3DI14jLGVKB82qE>_2%zK<_LyehJnrUN!@9hQHt zRINt=)}e3rqeT+Olqx$?A58TOE?PffCJ5vG!wMgqyubYGkW)w~c_-lR2&eY0V#u#= zVR!?-pk?2`_aA?V8GVfU$%zTw4L>Jt;sL+EKQn;8J~w1M!k=<lfFz)_68H=sJpB$n zD1!hu#?$-W-!mdsWZxJXhf)lccEzQ);!X8B1PQGk<gzGs2^POvN5*x*gmGW``53yx z7yy!_=*ukUNJ58`6v9cnxg@1Jd$nUcNrQ{5<4e+8CK+*&bv<It^7X9CRaG9*s6mDJ zeQgBuqcC-$HWOsQ=MUn#07%q>@A2V66-du~q&E!Un-C{o7pLwU_sllVm@6K;92c?^ z7v>uu;YfCwh>z?ra|>tDIJhc043z-DSdApEsfDaRq6v5#kHm&1*~g^!a^NM7!8r+u ze_}m~5<HlQF&^@;>}B>5C`7<lS}+0R?0bH1?opzMo3t-Ap;?WEqbIc8H`)GqVxOZ2 zQ#sLN9oGp265|tih6VkAECLCBJ!&kNM=4VhoY#GspY5b9>>&GnQ`hYEJb_UZi4=Rs zl(ss|vriF=T+BmUsheD!`1Ky8or$zVB1;=M{ZJzPr*V3lt<+p^@D>LP);87_0F!oP z*YtG!n$P^KQWLVmjuB6VMI>kGn_vzznCsbC=rURTGTGJj)L{S)&X6Zn%x7Hs-iPVb zV`Ni~DI+&?tO4vaA&dJtY1KGOwmw==38k#ApkgARTA!^xnXQB$5ly6t86Q_dsVn&s zqydtg0{2;XC9*H63&~4nkN7eVy*<xq&$kjKWwO=ha`xpqov^sNYY_nuJQ@^llvA6p zWqiW_#fLd1H|#C!(LHsJsQl2OiTfPDT>HcaS>)`8ec90~*?4sYbU7+E+z%nIHTLs{ zUwv%QVV1~Fco1H);O#_w%Z8RE5a^E0ePEJUMasQSSLiBP$mN8hZ-)vj+az*3c@k|R zoY=3ZuotKnbWA3?>}Y--6OAroFHTI%4n`5^ietF*wN^}I*4ZA0v3z6K*Zbu&%o1H* z7a=YW5<!<NnUuWsD_Kdr=*uqoFj?|(w`85J^rJelkQ49uM<{8qFG`Rk3CxmYwU^W< z=8B0}=U1`BEnSxo)mYAp)D$`|Jy$OUNR<Ii%g&uj_v<tFR*E>7(<?izWJcJK8d3<; z@}fnyhssV#f3yS=?)~S-45qK3k*W|htq=(;>kq4-pL+OI-9%|Y;1AHW+|#6X0i=U^ zY|gCTJB-hY{Hgh}_mPoJ1y-X%PpT@QuMAsPQ8HPkXll)2P}nT2i&&}B$h_f3tw7%c ziRG3K3uS!njcAi7(U+<*(5TTXthka?WwIJys;)?rB_Om|r9oHwO_EhS(9Jr74dJCY z&*hXcc_ksQ*!c79-QCAtrZw^Yb&C--^95!8{jvBcCrK-*%Iu&_d1sV3nk(GDz9tc5 zJ7?M?p<(LBY|eepj}xX@c#d>Ss%tE)6YQ^RU#Uy$C(B4Fhyft37^ou#5<>uAz6Wvy zh&mBybdc7v1k;gzNpS$oDu&h}gGC(26Z{h#F2xpESVI>Ns(f2szH0hB?7qf<)SVS1 zK6a{MYqbG*+K|cJ*zQdH(#MkDAf|(%;g|=DQ9(f%sPzFjR2S-P#YSSGS=o9bHV5Ji zrPaalsNsSM>yjGtE@$4vy{*C{_E3&Y8uR)FAs)y@sT~tY``N!mB!k8YL*s~{u|SvZ zCWT8)w|IZfkh=FIs1>3zk5bF1mceJST*(8Y<t1z|tu1aH1R_|>$|BsqmIK=?JldY} z!y~%E6W((dId=|yNQFvd@J4_knEzK@3TwhhfDm$PK|Otya6fq34vcE-UZ@AJDBoJ| z>ooo&_(!cxmLXVhu}Xe`R?iH0j|vtis9R%zs?tz5OzTsH&VpE|K3>y7(%Qlc-AlYx z{?pnN-lMwEU3A?3Khvw7OP5B5s!&~5%v_ffp!@T9V%JWZh!mvgIjst=CBYdKEleXh z(DFX4$nz38dM+VUGn;YGidT|GbDD+j7RX2%8qU)ZMCl<0woYiaif7Qgk^&KKF<(RK z(Yk9Qt+b-^*{%BUeb$=)v&j-rF)n8PHIe<Di5?iF^rmUqV!`zQ<beOan=PQHXZj&a z#kp#}BnSWQ0PHkP+!Mr5G$?fsB;NYOgIK!~3)M5To+#?11vMKNnV8<YEP+yTaTaWZ z0fh;m%}f1G(_Ph?u<ePbYX&ef8YXPk?@kBV{tMMW;)kQ>psf0Vypm92YasXE#|s69 z0U6!58oNaZ>Njx%Z%v63Z<1q0gMjZJKIN9$OX$38?dXHM7rtkKG-s7P5EBZPd=p8L z7h$bAfi`z$Fbwy7a_Jg&d+O{myf-i`$#_HN{jd`X=2Fx(PUO9&33><s5(feI|0c%Z zoQ%NF@Czm&5dg%RNUKQ^O!>moS_HBMK=J~>xr1OLnnnWy(nQnbY0*4p?5<=ypDGHN zx~U11Fau!;v}80*Yh+2Y>y!pT-6rFisX0+&Fvp5_%<KV5oRaAcXf9fRawV>o6h!&A z4&@Jp=Gr`;Hh;WXGg8hqQF^QEj`^_beit5RHvX||SUhDsLMBfkg*O^ELNE_3lGdop zqS?@B5$K<I<2NeP4qmB-2xmzAdN=jZ@R=snA`A_BIqI-f0o6bWEh#`f<!RcF`jcdC zc~}puarZdHyL*0~ewM<5I04Nko8F>!|4y1YdcblES?iPAc|S=#Zdy|O*GsjdE@6BT zjV)kAA)sqFWhBa~XFB@^3DbZI5qTfc9W)0KM$ZM!32FhjR8Y^dwMzI^&S@9x7s<mA zkpnx9a<Cqsh3{OqII&%rYEi27WHE0#-8boW!Mt>TmAN2DE(3}T81il%40nczyLFa( z)5^?)t|Mutrw1jOA<5D&?i1=|@c&74aKDm08{V23S3Vm*iz{Y(6rl;Bun3l{7D)I| z7KP_%?1yHR=b-=12q7N|FGxDL&&>wREe0!;)DWS5(qbIMY0*HII&Dy!#p1{My3=<L zF(-<CvXpzKz?LBBXrYreD*RZ3L##P1B7^@Q&r1%oCrg+7{bRsme$9#D`0>kw;~%X@ zRA=sLe8^`JY;adw5pENHufDXXGOhP=Rv5j6umJIg(k|35X_S@hmz;x>T`dGOL7lSi zDw?6k0~7pl)9)fm*i-Mb#y3#Dwo_6hZ@v!ThytyJw63wyJaV6ADPg;_Z-g`lPXDDG za3yfRHhaGD?p=N&cVtQLLp;lI<E*6xNM3q0nNfIA(&1)miMme7f7&Iz*B9@&EG;Ce zcc+#t)l(h*Wa;ctQ#2?C;UG;HHtRbm^+3?jo`5<9WYZ$)(xN+lt3zRwRlKcFgL0ec z#-;ScZ8pccMW+(y4?bO&J}#**g=0X`=#Q#GOT_#VqtvBECz|=qk9g<QXUd|}zh}%= z3#fz5(RIVmE!ILA`!yU+OE24*EEU#fHdrZF4ypG-2~_DaXQ2g-^CGrhXmp@ihQzF) z_H42_sK)W*PwDB$w?9?Ot_Ed5U}^9AEI~S!pbky3<|qf&>!AF~PfLlLDQgd@8fh6% z+bOP+Bz0Xm`bv-x3U4!I#aUPriQC@3E;M_x6ERLAXV4G{QT6}AbIMDwRv#}CeRO$8 zmv$*{O+NY!3?6^ji%wfyT=}?|_^$Wtdr|;3NvFgl?S8}l7t1^4A`mvT-eVEA@`C8+ zk96{NxEmk;8x<XX?BclQb*TB_1QZdt6Z)g=XYzL~s;WftyMOqhHG}2}%$I<(+xvjh zcVhHh3*3-sMcRi?clsHF;eVcAOP(WO7Ty=r_#SjgYmGmwhK3-|Ul~7Hj=?O*Gs2X< zK-G#5OjXpGFCQMjP_;ibzHw_Jw@QcDzcB<o;{21qeoOj8{bB&&W7`LaMBp{nuN$3Y z8kMpcfq@d%v}XXkB{YK2t{F$;x8Lm<_~z@s9|K>1#&!eBR7ad6e?4^{-~5t3jvZ0Q zjTm0fV-*4Yrvgg<4*c&3z<2-<usTJcFFF`O;um26fc~=mbeqdCldDQ3%qyk$P@l)Y zQ?jgBFH6bmY38RXux}|~)+^wRUk<F>#9ze^8ufF#-}|_W7h~1AHFU?}_*-{C;3LCr z$jPXX-@8v<^N{&HBTb6KSN`aTx&JV&;P&DY+H-l7`L8u|m~ZQOSJ}V`!vTQd;P@PY zL~zL<dkI7eG>Sulcfx2?mkbr;=RGErVPOzSXV(ntPs7Z@RyzPT-zk)C5X_4XGJ${q z<h@>1IpG(!EsE9ef7@JH_<Xet8w<~s9IvBeX3HqF@{g%ssJ1N9RX(KD+t&$|+Ke^i zO4&BsW*aKI*}<(lBir+7$Zx*7^kV-4xIM2Kp*n)v0cgz_@=Z!~z;V}M?TecfJiGK8 z`QCJ<F+t(0w5|pEo1+EndT$On+r3_Te@gqe@)>{U1J;gkh5|kpdi*v@OM<QU5UfjX zRaHRVLk<VL1L5zoO^o*#lKFxKLVD-av9Sz>#dGooNS<?b66imo$?D&*gY~gz%71eM z92;&l7CtI`s=Zjlw&?Tl+^%uwQ01gb=~Vn24*)KMX$F5N_#?41tI3xuB?inDpYi6* zJi+Vm<^;BniA5a@GoMW`4o@C1pyk+VCghNOhQ^kHpXg7&5>t-3fPEi03R7ROvrkC~ zCA<H^T!c%kwiy!nSD6H1WSN9??SNmOlypId9{N&^oj<026!Yv${cLqc=v3wW@Hdm+ zO>U4>SSq?Y3izXVYfAbP8-2``Hwtrg7%Tkph>X%v!lVpbanNcQ0WN8^LSmJ2EYu%@ zJVi0yoA*?ieJhPM%#u4hRmnONcIDjh=5m!EaX;Bi&Z$aY6NEkpZFCBgSh8eaY#Lse zrFE@*LmX~IZb?oz)nt8dte*fanAgt*KXI%Z98H=(J4GjF=tOmeid`X4XDL`fJzho3 z&wiuwB13Cz#tJL1q^cWAbN!ENaQRT47^|o<T%9*oD`?5&=hyk0v@yKw*FFRm8*7kg zymx>Ig{*`{N7XD#IBL7=*j=jQJ&V)d!iRSqWk3F8nMti&?RAV8xg_}H8#*l;!NxLZ z`{c@t>Rm*xv$qvIWZsKYX*7z%c^-e?_NikUvs`RjrCs6x(@pD4v!K_O;?0YOd~WxT ziEA=UHvQUm52-4NX_fZ-D`H!EFFIU9|9+kK&U1mEYi!rNyVJM-BJw-Hn!4cK3nl|D z?D8oruZrIA5Nn>s)iha!sYZqUD8(wRx>wd5cuY2pch{*jNbVJu)zawL2sPk+mUynA zM4J=FJ~(Sn1aD7RCQuy&>R#1RBLDnXJ4E7)4&Qk93h@~XJpXS%`wV?=`_BCrCh*b8 zt%4s|*6OIg<*Y+dH>!YGQko3GnAwEX?0tu5N;HR;U-Ao6-sf{0dPuf->s>f5Ak3m| zOIwwy8ig|9n1GUVWv>z$icRo`yvLM{|DZ7XAFSX{7ohdzZF+ni>guVL_Wtdwkt@Nx ze`fBNf4^=9M{qso51_gDGaUmb<%3ZF4jwHGd7le*d*lMVgn2)ACKxuFIj0AVKOt#Z z*mdZA*r)9!=oABxn)x8kZ44A^4x~|ZFQOSbHbB3q#yp@Cw%)c;1LK)NFw_?lDEC{` zOEt^uqE#LP-H&u{38t#D6wHVGL&s)sV&T4A`W*gsV7~A=1f5w??KSZ$yH-gmZowi; zfbD0dnFI~nVh~3=8k$@7T!e5uedg^ss8C~*+nADAvWbLU`{9XE7_jB=r!9RU*gy{D zHRpTit;<54cL<s@#^1HeVZJ))D=4%x0{eGBl7#3YNnB+9o$f3dPr+miU>NR9FuoFS zFBu7=Q~yJ$PZdeE6-J4x{}%*eN!diqp1&uWK<<^uYzm5oI8LenGmX#gO$j}$x_(!m zFy$y_S5CA|c9QCzE1a9mjSwWG&ZsZG1EkU@XDc@e88kQX?z80KFS)#^z)NSTOtvzv zVgfn<VoT=BvdyOF*&zpDR8CcP%!HV-wJxme4#Qo{lDKjQf%dkcV&RozAEB#+D<&Vr zjIqw*VBJ_GI}@@Tx#jN=HU?GNqcj30@~mFIZoc~;H0GAqEtjorO?A6HmD;^eSVL;P zsd9>~f>#<|A=}f6me*5d$Fn}5uHT4K(NXO%-GG5CD|>{JpTv7fVy_S+*8=W06>T+z zQ~WuuW7)hrl`>Fw3gmlgu64o5jpz2s!Z9gq@^<FIsn?S?3lVh*Pp)6OWS%|oI=da& zRv|1Gu*LIE=<r2P#zQ|N{j&OxiyU~|LURhs>q{^3q1Oi988AS8yw;6{!&BLoii@vB z-uSi3)KXX|`S(jwE0tqD)N|qM{Rq~ExCqOJpxoSh4@c^ERGQolnAi>MLYCqpq#ARD zc1%BOs_c6|8epwywH)@!>1=3^m>sbF#nKNbPwE=h&m0NN7UO$wYSb%p-6!xdmrKXi zAC)fU8a$>!)0GNW>%O(`k#V}?+E^&nm-pX(iv8I;gmL(&Sxc)xP4yt!J+LBznOw?s zik0|2YGKEZ$mZaAdmc(o9(M(pK0NO*io+5b=Go-}pefJ##CqCGg~bcAX%}{c%nKmO zaL;+IPiv#FR;1X@-P{>Z{V<LQ(o8$e=a>>W9_oHi?vtJxF)+)|%}$)j;#2tGCD?9t z`zZP}^X~8cRlbLqKBNlYzqn7{+=6dyYvejpGb=4u_wLW0>DKQ$O86)`_?Jv2Dhj|$ zd_9BNkv>bEARkFpm$+4jNC2ee3y1S#^A(wwX+_rU6hm~3lR*bh5o1&3Vx%{aL6SZf z4?Cc@Jzfhfcy>E1u$>1Y1BszGC?gi$H1$_<344M(x_j6Di;dGz$g=;Nuh{JW4qjQ# zy}OySGb8n2o`S_}DC&W&UZ@$LJI83sMN(T$0nfkE<Z(Uozr#frgR`h?U6=D#AAzCv zZOG7kyB8^Jb*E=OvnN;v!*V^i9pg;^ewMutcqt^fqo4W?_SxvS=({$-=-=qqHb@Q` z>0g6m7`}Xa`11B7<RVNlk)Z<fk9oG@Cuk4d5}1zuFYGQA$00T$1~NjMv97+CR3161 zX8J@zE>rJVOd9vW@RE!(HY7e5_K8`GiYE5&>RzY4ws0qx)Z4f2H^oO2TZ&`w7j?4g zg34-Gv@spZx35g4X35h1`t%!Z&7Qk>Yxt0%Pua<x90{dA!$CORuhFU7z#neVAj*h5 zQ<B4j5iuHX`3EB=sqb_;&bh0rNb@&rU+TWXQ+`I$4DGx`^k|p0$v)i&uglz6yZnvV zIsM|m>d2Gr%2#0B3A1LdHN1x%Unm`q?m`@Es~-&L*Z@0v!J{67n+kQi*(A*aLgRzW zoR^B26ENy^M2?Eft5Omw9gQEIySC}&23aa-G!uSkUg7BE`cILeJF<u=`x1gdSiC4t zT>ipoO8-o-G9TT29+{k4#34zi3t+?GNB}`}vIKygmP<#Bt0;h5Ln$90!$!PNNG9_% z5ZiW17%mO0qw1^^fwPr?v#dr^4fr`Llg${ZQWhgXP~=FufL_2V`7vfci<#?G)RAO? zK-E`8%#Fj%>;YK4^G){e53+kQ6__^4>^8u!A4r^LcbB%Zw*LgPaHaD{wJT(4a<rE; zDNw}f*w*(z9T_0nKs>N=j(DY4QyoouuRwYF32a{lzLo;6R3ojurf5zA?e|Foniu1R z#g<8k1dOCH(3Ly}O&|=6fcXeu4M%WHaI^6Pg07bS?anNn<vhlsM@RctluuuvS4Xk_ z1zsf4zwcBEB?yPT&jz{3mW&b~=Pv7@DH5&9Ff>M<3y61}0rOBvI0gF3NP^5Ln5R{` z5@1Xm)r_CR$RmkF&!?YlFxnY0YsW){B*K6TX(tAJT?P{qMIf?~E^1<qsTa3hC(nDa z_nZh3=MminZK6wmg>>@kvWw`7odCjvbbNDIX-VGZ<N;$Ir2%K%O5YeZC5&PMz&3}( z%SFV)C<f{<9cQJa?Ms(@L?#gVQ&nsU3Q9>ljzuVnCT#*m*($R=2g4j_T8WEEF$QWL z6Bxp^Xj=i5z2JJDi;E3FuBQp*EX_iMTsWBZe7<|;c`rF=01H#bgx8g~>Pr{sKXSct z`E}*DN>$p6i5F{D<&YGQ3986xn!sr=-m@I0rPY4VK<*2vHU+dhN8!z2V)``zV*~H1 zO;kmgZZ8mlHU=@|+WbbH@0AzXmosZ7m$wF4%N#qgJ0^hLet0rlLJZ-MrL8b<^#1wi z6=|men(`}q^V)ytJ71g<xYZ2EZsHCiPnTEq)|#e>uxU+4H21xl^8Eqs2w*<G5xATL z=)k?mI3Tr?)wM37SC3}~d4Y(D;R2!-YX8Y6eM^9=CH?j1snejHUrqVgp@kTUgT>^+ z8LOfl>~dw6HkQ{}${gTXi}?wN95oUSwh$u=C0-)Q>rxgWP1-YKT}`H3^B7)Oexi~| z{oqx;@Tn0G!owK`jr^-rGTt;cSaxVtxX6P5%eCsMx1#@@D8Kq#+V9z6VV-i#WD5@w zc?mH&$Z|g=kw$`77qyTBrb7wUwyn<`XAx-50Jxd4ZqMO&a~8xM(mcchaegpcmGW}D zk&THwZ7^ii)Y+G<3v6JUHfT0{W-1roYa=tpg>yq({m;&d$=+HP&#<;m=j?3D`kR!V zY=kM(kLA(DT3PoHjF?54QdZkpuJfx~)x1Lv@}HPXay7lI(jisp(B>OIGc#!VlFEv+ znkbr(n7F$4mx<|&h(H@?w1RgrQ=r=b^&M5(WqmQ&v;+Amhw+4pChW_q-8HFn`3tij z@oiYD!-Eos$Xo|}xJCJ|;5sR{(SnsSXQs+K#T@Mn>5spR-0DM5EG}V$)gDbHU~@_6 zgceR}y0w<sAPEq}^wj;~G}{H?2iBq8vF3fK#WgvsEMeymptBI513TXVrG(Top^T-q z$KoW-<dhU+7VJW49#^X69UpU*e@H45T`iY&uDVZe_`DAP@M}6VolzPX6!fb>o3KP+ zPg3g2Gmu{1++>RHYRq^!#%fIb>q+93U<oQIIN3DVFmq8S^g&{cpM`6MKDgRzVQQNL z3YD6so1qjW4%@ahqSjYRUsgW;N*~qyc67*L?6#}2TMmn|%G@cUps5<ZB==_IIiTD= znOoT5K6f9!Q4Y*^O{}_)7lztZpeWV>!ActCnqyVgAKr=@UnrsZU@mi;x@*)iv1NVv z1Ckn-Kw+c?RXJH}f>>`|D>Ah>%&uISk8o3W-c@$qvsm4WrB^w>h|P#0+%D)mP%~$; zG5@woN@pN9^3k)5acK&gUuRNS^{@Yxa`R(8(<z>wZn<H$fA}V8_@fP7qK;6lsbFnw zd&v<a9fyC7)K^m6;mrn?v|Xz?8$XQ$;rH8H>^SS}v6r~<CEN=>29^3`$v(6{FnxQT z<H^q(^uZ>%`OZqp^ew;HpB49hOq4c1w7;Ju>sYU<`mQZEJBPbIr@pYB-$k8VZsT6r z@#hrZ{A{Bw<oZTj!A)Dmt_$#u{tjCQSDr%K4cXE)*_yRxt5q9OX{~!Ey2q~NYC{Dk z+G5$ydPoZ|y~dMQNcvIL+&6O9kk1>iasrxZV2?uWck+iG8sPJ3x6i|F`9B(odcBQV zQUwjtmp<`q*|ePTjQ*>?EY-51w!7F&!Fd+o5teZd9w(dVc@}x*O!P~5X$k|nt5;c9 zC0HNK@hYAF*X!h!Q7%{OBcdvA-sScFh6-}NM<LTl1LmJiCW#fjNsrd=mbS8!ua}yI zmX&}iwaz9lo3C$)jCg*~bWJNr$~^RL0DSg1$vQpAIzcUUb3X;IkbdSSZadl2_$}c* z2POvrF`=Q!P4^2@lf4d{<R|w(f%g?Nj<=3eTY#_K-u|G($2OC0wRjzUp3+r=dT!p7 zp3*7Gok)_J?eo0dl4bduB||VT*plN2wj0R5dzqH|3?(aYE<&$E9l<ZX^7JrQj5bn> zE@ERUww4z1YNrTC_WQyW*4-7iM0@e86;j;u^0m^Qh8$c*v<6mD7csS-|4i3-RdVI_ zd}$SuGx-awbw0NN_7zOh+u(VMPONL0u3(~OSw6$P9OtG=pYG*cr*9?NgwD?p%9QC< zs_9ih#f>&=2i_avMB>vU!7-|?2Dx$lPlb`Txj4xf>zB9lRZni^hgtL5UL_vwmzB1b z)rRn9&Yt7n3RK}ZL8fLYX1ssb$y8#(m$AhBcTR@z^=#sEPl1QCPT_w^vL3}9TkI;^ zy^Z8>RnZ~C9T#~{>CRJ)9k*-se3yEUDrUJajhxze9-lkcL#6^~KzU_+eyz00PhiR4 zx4*P<_@UR9$`x)F0d9^=X8*)@yc(puGF*2ZxWQf7YHr!pg;!KiC+n;z$Y`_qve&;* zk<LxfQMjbnR7!h=(%zE1_T4B=@vUh^lTF(*Q=yqRcdlzEMR?Jps2~kWi~uEuKfFdj z?H=nf2}_NUP$MoK1BE7ltP(M-M)z2Kfs!Z?TbQ<-8dw13^)pan<T)OGc;CnV>$5vp zxA{xMD@VDcm$!R?S2Hiw)RE*hY}@uMTi1AcxfybO_(yj7zxdy1A*1Cf(kw=A)K~BM z*GGG9$@YGE6WDv|bUa6IJ4h19DBwM|0-pfaQb4Td$8-E>BHimEr5KQs;I7{!?_J_( z-oNK>TB_fwjOq}SIG^G<-hJbPH~Rf_uBuS!Sc%#2g}?u_Fm9OTZSE%UxX4rei@cy> z0AHr34c$Ql=y|+^(1wjzTHepYiL6ZEE73rDd){B-${C`8CKy%VxMGs1RZ?dv=s<~d z@ZtvJep!vzfV~PYr2q3_ti9?s`{|9+8d1m{P6TQK%;MY8wY!AJi*?>2Q`Ho`>vRu% zUa;k8)b2b;xPP-yN?Ye;wj`@kZpEm_(<#`wzkJgAZ}ppk?oQLnj`V82NAmPwRy1f^ zk;qy$<@w#3TRfx#^D9M_-cOv>-$+!a^;cFImhXHgE$D3v(S9^fAoZ758E4e52rM!8 z7<|guDIB^f-&lyRej)HKOY6N&m?)FdsC4IsxnzMq^Mg@=+HIYWJQ%6m%qt1yF4vBG zfE<@4kEF|z#D5b{)PN4=N8(?B&EYn@lyHZfP^)fw=Jgwc^A-65AJdCs`o&Yt&r2Be z?2`|5%T`ewU4Pz7c&l58Tg_j#s8=4ju_hgV8v3{I)2GXWc-?-&`WEoP_LaH3dyA*s z+kvNOqsk0W?Q(BdZBoV)(;BYIu3Cb8)*)-SY0ncxNLr%03cZ;TxEDS%qfD=x|KmDc z7;U>(FOj={F8Rkik=0jh_)pN7KyanX#t+vE_t+;AUnOR;zd5Gx0JYNh2lNbn0)9IJ z|B3;w9!0U^Po2E)X;Kx5;sznVP=AlY&eJ)M((#04&Imcd+w)d#cWpfg08}9Ac?>N= z$h<C4a3l^cK>uFY7;d0$dxV*(n@7<n#i+zEt^}HnF$-P2v-#=sRTP%IiDc2=f?lLY z8Kw{ps9*>pO8?j5$-4OC$BZZ$uC|}R3$HI1zyd!-eGWqy8y%RO;v^R<Ed>+iG}t68 z+3=wN<{U4iIhOeEc%2j^_n|-Z`$mb&Jme;;khv8#(%s!aQH)?WI{)^2q8wZ0fr0sa zJd8eTOX80C{DJ#ZymV-j#Hq*H>ze)3b7qUW+1)-Ti3(rYNb0SmAfHET3qGP{olZAi zC*l<)81hg=0$_DsH(r2G<gA!NhwZm%!j_~NPl=1a(h)H{J|tC~JQna>X!uVy9$t=( zApp2PLWRGb9)14=?fV1#ND=Z3RN*oR%ctrZ<N#kd(A?9a%_p)+dJi+O|Hci|{a*k@ z7rN+hZ01*ji&f;3XriHUTYe+v#Rp*&0YTzJdsIY!pNe$t@sb>aym;CI2J{iZ31$J( zTOciE5#%6*v_}YgA0i18kwXqLoFGpg*CPt@^-)xfS7MpvX?t~c7#le7cLIZkT^8YA z7BVP@VFE%JSz;DCCPy6!p2?pH6-CqsAXCUl{}CY&Xc=fjp;0u5NrG}jQ5J}X1c8jC zF&e3Vq?1xwDMVaWrs0Hl_+SDG3`TK+5rr-Gz=ai|@nIf5oNz=%kP(VUgq?|ir$M>_ z639z_SQ;oFwNV5}ueHV2YeT68Ypk-%GTSU}6=m8N1%b$+g=mnuD%zKU36`cDS6GXO z6h+{WZACxCNf8^;{uP+53hCyev-kZW+8}^`TFN3t^3F>TAQkc2ufPKnTrdD!YN%j@ zIk3<rt(~2aLzibhJn>?mQDiWd5U^E|4$6iGsm3RYsIkc>qkQFFXZp8c!Y0^xCd>WN zEVIKi+srMucV?V&e*adKMah;Dn{vm5@azAx(n~W<7neKt746G9NA1~SR0Gy-mry5v zoawtwpXR7&A1|w~97z|<M~7gu-L~5cA*>+<6klz%+<e&yHN|$?J-6L?bJ+9ScU>lj zAVc>W+M+&>P*IA5Lmv6kE4xf*%QSCD_uZ9eO(*7h)A_aJA@$KGA;mUZw9@s$HoEJt z3%u9PHrL&?w4Tjg^~;#+E;V1G!|u?39)$~ncCTg|uQ9b0;Tyd3&qoUF?Hl@yJLVMc z3^?_6`{f_kF!u#K^a%N3dcgCB4EE8<kKexgvpr4w-W7Mwz1=c%%lFn4V=Tw}2<>5c z6bS_ThSj>J#i&sKTOb3sM3K&5t#<vv|K9DQ^}VZUjc!mA-}{i|C)NxMd2EA!Kt%%4 zz!RbnC6uck|F~7Bn_aGVEsG)cTF5j1HBcg=3B&>lrZ~3|>JKR#A`u-Dx9jbUYZipy z=9IX-`c=;_9E2XB<bVbc7EE<*y9^eK=*2G{EJ<Rd14YP)Br}>(jA~qCk{0+Y(Ey@= zWoyykNCw6`5|NF1+#?_RD7f~2v93_f>zBOJH@F1Oh+lHVBO@W_$45fakM4V8MTF(U z^J(#Ie0WqrOxVazf-;l~K^xE%g}8o^5KtnFoChrlfbxklma+sTA6zLAj*abPEJE53 zM_0;#z2=m&940XrI0zvQq;Nt@*-QV>kjeQBjwH+@p&ZUv%xhv3djGP2WFJxm8l}}n z5PulN0{ZZW*PzaAdpMdjAD7K{!ZUKz{KN1@vw%g;OMrm@mK;WaPn4N0Xz?5<LBUoV zT~*|CO$+3Qx(K<(eW;)kov6l2DViw=^JE@OXhso;0)&2OkQ5y$NiC(e4#C8lOrvH( zdkDDzt`V6foheNb)iIBM#8I81$-*A~nX*0z@I#)gDN&0`B8zqi1wI{^j1~vG1)zYR zGcqbwt2!H&iV+A7_1SBN`aB2#p-)S-Dp|o8$+Mz0t$IX=THET@v@+_ga-HiQ%WBs^ z(KWAn6%AYW>eso(^{;|$>s<@0|FXd%_N;wPEMxl^*v3MZjfS0nEQw0;oy(2@vzpB; zXE*EF&Vu%{q75x+N2^&5TDG*RHSKC!%i7nvc8V`bEo^J++S}T;wz8!xZ}U=-+~PL4 zz#T4VQA-ixB3HP|h3#%Dd)(zhH@VU+?jL(=UDMDuy4s!YaE;qf?rt}{xoz%<vJ2kx ziZ`_6y)Jw8^IY_Q!Z*E`^=^FYD_@A>P*A_UuYJ+G-u43cA?D>Tfw}A50wegm|Gm$E z`<r0lE;zsvPRM>8Y+>cn_rVtiZiMSoVGV=WwjG`@i3if)5Tn?$2i|asV_V%UotVWk z&g_UwOyUpEILDmLu#S6--^c&@*vB=tv4mqB+91e85N8N~a%f2qBr0Hm3QZ90igAl% zDkz!Bnsss(pe$u7zZgqK*6fwD++x#m*~?Qd?U0EKU>vu02NlUf4Yb@@9R#5wQcx|A zZ!2et(AmzH#j~FI9OlEy!M1;vL!i&RT0&D~&!26xo7p>N&<a5ha$tiE9BtYXXj%?Z z0CRo|a%s$eW_r_|7IbDo9cfZOR=KI>w5M0=YEi?w(z;%3ZNHP}P1oAAX_|teOHB|s zi`Lib3ifJ;U2GuZR@uO2^<|ws=|k_j(x=7=2~?o2I#Zhjar#284YKXbdVAC0X7-lH zU2eQmB)I7QHVMM*ZZ=~(|Jy`HcD=)D#T^L3Ig+7&bqb~c1cTe4;5!J!6r8hzDHwbT zB-lX~<}m7oJAvUO0E8BtIb{+A0S$zh!m2Y1@*y=mPLd{VpD)|*W&&K`>gsgC4-SNc zPhsH=pE$!0p724;LE?MT`NW$vh>wSS<R!0n5KyiIl|LI%{3e0F(Ex}n*nsLYA3@C# zu5gBbJG|#4PXW+{o^y@w!01R9ImtVF@{vD1(oxs9zAfACP4D2>N*KX}ru}O=uweu{ zFxMSM5QHwIQ8Z0(0U60ShYw95U8**@<V7=Td>h-u@vgVq9m9saQ)KUb4?M`r82G_g z<P3(7L*nJ2=_aJx@sR(QJgw_pc|E^dOAYpa{0^u<ktXnc*`;64>CL73Ah7;JuaCVP z9)D5VuUgz<yu6C@P4^ogce8&6zTxGdgFf(pB6!Gp_a{$+G>zdq;ZITyNFW5oFIo?1 zfV3ReY%b^co+8A)cIE~0TkZcIPUPA5t>5@bTkWOEl8wOnA&Cx*!25Yj{LSC5)!%o2 z+#d<#pGC|ayd|33EnonCpaT{l`TZIKQiK9B;D<OM1X6?!NFe>$pT}TeMeHBTS>N>) zS-gc--<<*ZbpaEQ!06S0ca$LIonU75og9ck>Csve0v`!rh#cTSouxpbl|V%-A=g#J znr+#S9Yjg&;0gL5{}2ix5elF5$${#Bk-&o}Arsaa1wLUEP9X_Yq1;^|MPy+eR$!BP zp?+;1XCa{&`hmE$Ul|S`>7k(-vf&cu85_Le7G{<lQlS-M;dkgE7kVJ2;ox2UA84hZ z#F^Zmk=_TIUjk;<qB$Vvsb5939|>fO0^T96{hA)^-}%X17m@&dZPq8coG5aC*7})X z8m?jsnjCnLq8zm1D}F~Tj@m5NVw$X+%hA{kme+e>)+d%6F#h7!*`hEWV=8`!9~@&c zp5QX#o->*pH1c6CQsX9C+8?T-9?~FAjNYm}h$Gfp6e^$-h9NP!;t|pzBvK#{W>%bJ zA!hB^q0!c5$)o@N(IYqBBe>~*V`jnO4u;wjzG7xwV)@AdL89S70^)aQVrisSXEh|Q zm0#^eWIh69MQYXyTx4ctWIzVsKwg$XZdO9}pF2)ig?$HSEm|3knl=t29y~`kqM!_N z+bLFr5`2fXc;liGTe`_X^u<XHN+Ub&q)#qoP_D;N3LhyN<sc^DcTi=2`+1^LR)Om& zpj3_kRa&J(;^Ij9R`wO+cX%Zo>{+^vVgVjyS-vGJ?xI?<C0@=WTpFPU&Sg9DWN($E zD%RVHq+v>~nh>(&WP)6%CBYzKRuZ-%1ghb!F`^k=UJ1krCjO*ID&!yDWn_xR@o}SN zLLoq2#@=Zb{~IPjz0IV5Xo4JRF5_wDq-ySDjesF8O6HOcT%84<x?QFk)@Eqorfvcx zZw7&Hj(}<YUU2qdW0n}Q1%VpQpt|uTXZ@N{W>x}rS`wz>Qc|FEjDV}?fdcB`p3P@E z-l7L;C9fSBc;=rYj;FMlCsInA_ia{tT3|V9q1@GHy{Y434(Dut*(GLur^)qa4FKb1 zm0-0YsDXkOhbm)&iW-PYS~o`HUD73Y7S({gKpw>9l9ga8^1wyXBxWue7hV=sCV@;& z=N8smMc@HNhT0f_R%caNXR@dfx~RIqsEq2N8d9Z4a@JW&CuYXt$N4CbQiPBq<Yk@c zU6@x89;yHB31bO=q9p?YA5}VOML?-p3g$mjDUWL9k7j0;4k?#1CW;!?fQ26$IA9$% z0Sctc<;mksQ3Mv)fboSERN|T*=E0O*1{ORbMUa7HV&+|L=dr=#Hu7nL{%N3EqP=`b z51z=9X_lfo!F85VqiS8DMyec4YN+DprNY>I0pgza>F)u5>O9V<9%w<RiQ%ZKVrH2t zs;-BsK5DDJ>TAYom*VM6d1tj)o|~3YDpDXDND~?WffF2ouQHl)O6%`M;d<P`4kVr$ zmQo&ED-i6!(&?mrk{Pt(p1G_CwN`|+QUtc5o@mUWj#h*o#9c*<Yq@6Dxu)y7+F<|n z)U6~<|H2l31j7=9!wv+*YTkEsD7?O!ywU-^s!Je{(HR6nzWS)Q%B#OhUcf5r#?4v5 zva4`5EA~+=X8FOY*}xwtXK4*ur}Y8(g=A|*SJt^F`K6|Mt>(*eR?KGB%+{=D-RubD zY|!S`madl1eh1Jl<!QYb4B(+l0RYx2q1JM(7<g@e)(*tihHclH8m(TI(UJhtzLpL+ z#|PX52t*guik8%l0M%k%%l0JNS}HEqX=ZsxoxT=vi6h<S?HBTGu=*|C25wd=SFC<k z;qGki34mybfIw7kMLYoJS}x~8i|10r<^n+GQpDwAD$!n6<38?mK`v=YuG1c_-o6<; zCfomiYJFDg-Y$2sR?FF}?W)$?PHwcd*4ONT0f>eN2t))NF8~y;X!KC?Rs`|_fbvE! z^BQkOXs!Px?sn;J@DA?q-l^<b)rb9VuUV?_vaV?rY56V}?7|lA@~rw+R{&&-84$z_ zY{o#)?`F8v{;J9S=C45buNh=-7@@9wB^UgEj+To(sQ4DvLiX<aDs77G?$CZOaP{pv zYVgy7Z~s|IL3}Vlgz!L&ut1bB0GzM^+ZXz7uy9py13R!?k*{<auBYYR#1@!su`hDn zu$X1)c}Xy7iB}KDaGug`ctNg?ZQ$v~mkUR*b}jL&cA)tJ@e>nQ6c@1(PjS@(Z*b;+ z82=G(8x^xK2iI=(!EiMm@B+gZ5BsheXR&obu;bD&7gw=}SulTL@Nl{BZ82+IAX^*X z7w|$?AHVTCJ*MH#u^c1ucnvLqwJ&sK@M{sWUi|PFb1x(-E^bY7Azu?$DljH{?<L#u zR&FmFU$CQX@=4g;>WVTdhc73WRAkA2vKlScEZg!cKg%uaGAvJ)F7Gnp`tmG4Rxn2v zFKaO|Lsl&#Gh+?&GAEWXLvu7s^E6X)HCyvFV{<lZ^EPvHH+%CpgL62G^Ei`pIh*r2 zqjNf|^E$J0JG=8c!*e{#^E}gYJ=^m=<8wai^FD{f9uzP?19Y=SKv125A1uIs1>C{^ zKmShv7=gh7K_KM7NieiSLv%n}G;Ra{1m3|3>_G&W3mzymq69z-gfxEeK@u_nN}RNs zUUW;Fh5<yO9c1qX3`j`_3JkD;P2WcVu;N_sG(|+TOAEDKxWVT>EKP%SrO<RjXGs7= zVM?QfQ&R*~4|P?G1Wi+fOdAA${WA5WV0BktNdUaTRQvQ;ccfLL^+t&Gt02SyB(*?T z!17?hp#Xpd*u)=vj0Fh7AYj1>2t*15i4?e$1?YfH=l~x8HgOzE004qn{K4g>Kw+c6 z5A+aLQ^XIXfawB23QWWqoPa_zwquJx0JuOFpaEqUL|hjtT}Q+P42KkdK!8~Xwf{g| zKxcn8ytsB2z_x5h1Zi7~Td?+DPlO=cc3t1K4s^5;@U>rWc3>B<YRkb~4}@JC_8#ap zU$Zt~ry&Xifo7+546ij=FGK*$02$>#9f|>Mi^d$Fk@uOk_*HheB1uXcz<V=60BC@> z(7@IrNkp&pj~2C77lZ+S$m@p)IC+~ldh73de+X3n_Xq@dNCyBCh!Ip{bq-iSoDi@8 zQ}~aLwN_hp^OaExQn-)yZ+Ro5f&)N^BguXTKvCa85Neutf4BHjZ|O3`*9t`6L3Ia+ z#tGbb8m=@@3xtmY+GvOC<&rK`XFy4)w;X_Vw$#A%KE^@(G#)~KfVr5ijsw8|gwR10 z2n5&4!DxG?Ku9^2%K?}lgqV|gPX~bK^$_VE%9&HInj1M#2l*KcIWmSh9`3k!hj<NG zw*6wY>C*Im*LVz*?wBvcjf((AV0BKfctyB)iLXJNv-G4>1fnYdOG`DM0{{jXwWve6 zOC7-HW-eIkZxot;0Y;EIqm%BVYkE$TZWw@WN}oE8r}{tF`mF;%ct^AW#0jpWdQFEq zrT@C6V>(&KI;5*hWh48i+b^k~f#|*gjVF4`CV*%Jw?SC8>AreITlS&gFCEw~^n&`g zGjHaa`>&g|g&Q?izxBBDfd#0G=n_PD+b;mz`Q>uJsI#JfWjlBj`hceQQ~$l+ySHch zrx*N$Qv~t~z*8GMR@ghEZLYfWw-Q=!Pj3Lg6S=zUyTOYZ17v#<FZvQJ#H%B^z#IIQ z2k?zU^{boi^=f>0!~3Q`c~`TzXdrsNn{|2{{C->Z%Bv~H+q}qsNY4+1%nR_lM{K<- zDxdE>vX8reK;ZBHqWM{?{F9$G$p8H4_V-woyso|Z7A{29gZ;-F_0{h?=>|30-#7sS zJJ%nIyo>J8y9nCoJG95!vG;x2BO}_+J>Ey`K~#J{H9e94`h%Q-9;UQqPkG_De4k^z z+fO{`io27Cd;tWxc#pl_ig_I*gsU4h2gu0dM?2Sl?)$a_I<f<PdLRA&<cGSZb#=Hi zM95D*#+&|BTQ&|Pe%JSO$|uI}r?kWWFU%Xf?<+jclXn0gK&YiO(*wZn13&Yd^|=Fp z0a(<&e}2m@y3!8>O^X1=t2^3XJ<vBm@MC<g^ZDG%zPvy0pc8}z{4?&Cf4oBr%l~>5 zW_|#F!}UJ{#05D6nE(I~0M5W100a&c0YJb&ISU)gF=No6g#`>HjxY#gpcsP@Brdcd z@gYZtD&;&3@FpYx3>O3Jc`<O!MvOao_VoD^Xi%X;i54|_6lqeWOPMxx`V?wZsZ*&| z9a<5OPK*N0Y(zrPU_&!|Sf&JN(O^WdEDHjEm@$yeMgs}a2DrJBCI1Ztc_73YNGHbv zyDfXZj42c4!LM@Kiapo`=f({ZMF#K-)+S+|0U*n<BbhSGCIBXb6>ypHp1}_>Bz8OS zB1DM=11xR~n{4ZqhUL^@n8$-bglMf6H+~#>a^=gJH+TMgdBr?A_!vL|SqxbKB<0+H zdB9mto(Bhyl-whrGn*PLrEIJy2TPEW!WYXygF3uN-LqLroapnUfzBB3E;&ZqZXoZ# z%Oj<VrjXA+DfW18KA#+fP$DTP^iPhQHvBJu@)SC+Gd*ZvZzJv$giJj0=8NF6J4O&l z#_jxC54s$6+>yr~ef$x~AcX^hLpkt&IDsbvL=;e`xBMFcCW-9-;7+28DjI{X|1c6D zODTkq;|w5laA`>?Fp8ivIoR;1C$%V}uq`G1J3}j#^k8F$fu;Zig%s1wgUz2R!c)&5 zP+(xmGe&Sy4mQ!0Xib9{9SBPQ?w}GjgXENmPDwc&p}y`^kV6aq77`NGR8?JnmDN^V zr3wN+`WRunpZI_Pf;yS1=phLB(A6M5_#kT!?RXLv*jvXlb|+?ux;0my_V6`WTOp`b zsA+5Eqo-T7+XLBSubm27V1+VZBXFaAmECsTeHY$%<ppZLl;*t`-+cAmm*0N<Rkb1< z{~eg%f(<?x;e=hy3^oQ7ei-6^h$WtwVu@kMBa1=MDAnGIJ(K<z<d8)kndFj9J{jed zRbH9pmR)`s=9p!kndX{pz8UA7b>5lho_+oq=%9rjn&_g9J{swym0p_Zrk#Eo>Zqlj zn(C^pz8dSSwceWRuD$*m?6Acio9wd9J{#?{)n1$Jw%vXk?zrWio9?<O?Y<lDy!GCj j@4o&18}PsdADr;Q4L=<5#1&th@x~p09P-F14hR4{u}LMS delta 27906 zcmV(*K;FO1wF2be0g#lDmou@=XabS~2D3E-9sz&PzrX+g00t=FfCLt3;DHDxsNjMO zHt67k5Jo8BgcMe2;e{Axs9{@qY<Lp~QTQODh(stM&;?eA_y-gn0v5p)e~8$_0^dc@ z0vtHPQ3f9iTu=v%H*FB24}UBmK#D|~AkvF5`Y@w+5V(*d9C+LSfj|bJfMXd-24+A6 zZs32Y29VnY@B)r?n2<*VMdWA)nHpIjLPin1fnypA7$7B%CWiE7m~f2wT^?=Xc!NO~ zz!5@=f^mQ*oY)xvC>?Mn3dfHe2>?f*7&!{Zn*eCaCrO@S`rVm0hG38YcR&i5rJ!#A z$3~8-7KFj2u2vL48=GomDW(Acdgw^7=9+(9473`QK&*rPYV35D!a=D)gfe?k07e+< zD~_OIJ1Vh{f`=*`a~33kpaP+A1sqnK@I$Qt==p{kemo$M1$~^*2MY^CKtv$J9uxoy zSioTh4)zWdKnn$%@npRD_JKkO3tth)lLq_{h!y1;5b?wnUrb}h8<#84nt&J#2*Q5? z1rTu`6t{526%S*n@f9M#ns6TKvg<Cq&3TLC69^r^tObAI$nik<=6gaO7W}(30K4W` z#tArh9PykkTww*gNGokJ0KeK>H5>uQ>%zSC-WxH${sNp3$vEpgP?iN-@yFOFrvGfk zupa9RvdkjKtMSwdGi>m~F6#?(#Wa7z*>k(^N`@?sGAP^ataXgw0v?K<h~pBL9(sYD zaL6$w9GNR?XBi^!8ae<GY$-yvcc>abp`fpxhL%&BxI2!$Px<@meR>N8A2+5NZ=tK> zI4#-4uMX@QmwFC5(98v!Bc}-k5QFMB01t)etE-NAsL`jv01?sSNCws8qiuiwK)2#b zwd|Z)O1ieE!;$)O)VhLLs(9g|9@@YJy7{$?S9Z9av=%TtDzy)I-y2BnbR@ytJuiOJ zYZ>RXH#^9Ti+gzx0H`e0x<sk!0P>>|vIG^a)p0Lgssq*KOeCt%iAqob*g}r~$iM*T zEl`MXz!TMw2nQk>@dgn<;<SJA*Qd*sfkpwC5*Q!=JyrD(47AG;rdTMt8!Bgm&RYno zzzBdnaRGfennASYSAZaXi)tV|-x>wtM)Y+{cEf^U6J;1E8r5)0H~eB4xrId`3JQ%g z5J;gSxj~}YQGP|75)`FKqaJ!uOjy(+7YV4q7M9R4;#1!ZBS6C~C9Ho96f2YiQDvfN z5lu?g637}mw8K1H&VhQ6-502qttNIzkZ2Rg<$zU82O@J=ms?@DQV=fh8502c3*89? z;wfqB=#^$nP7hCc5Hv1IfvsGhH63_1PFd3q9xwo(JlP>jr4v~ElcfA~IsYnKw((Q0 z+9m)>g+*a4aa73sB(8si=FA*HlaT=Ur9gs-qhYSIL)lbS{>lJO$&8C{q4Fk<pum8Z z0SXLyfB>q97OhAEfKdclr$7YohaNn@nryTtp3VlwJjgSRsQM$0divAZp$bzQWa9=P zdNv1kfL3QSm9rE$(vrfIISH5&NDY`QY&w*xx|1C{0jV&to{WE+UCpNH&}25*@qlV4 zP3cOh>e8z|Fo9*G+N^wN&}fN34;B#9O=U;at{#<wCpGD#nktZ|I<JN#eW~TTN{|Ey zYMk0STlByft#B%~SCYMJ6I)iyA2D{3v#TFI_leqC)h||gNa>!cNmw}l%HXsL1QlkJ z37v5!VIkQxWj}wDb=r28m9+zjmT-Gjto@BjXqyGALS*VkGUWjRU<w{pqjy`gwpN)r z%oCv4Sy~F#ma22wZQ33)t^y83g)}Mv8owtg=~)+!o(&c192nEYDwC0??P0l&+SVAd z4@U!Lt3cR<BOD;n4h0@0ScS$$1!79R(F0%in7D+6_;r7#U;S{^<Uj^2l2x?)6-YMA z)x)*w?`(&v?*s?ZqRvjYm=&HX)qqz947!+gjOtKRJDH;dFVw#P2<l)BY-0=~Z?bnx zF1qp%fY~<jY;-tCM(uhxB$`TL<1Of&%*)yYs4sKkW2$!#&^t6o)Blj9Jgjp^i(Z`q zsktw$@|1s4+?;XEAt4y;DhC8HTy=WaxT38vez{3#=BoL*vdgkTwj4g$DKWPrYbI)I z`PG7)md)yX^N`^>=F=+oWDDDKWb#1Gp&29wQzS!&^022mFaS`jL$#7VT<K0L496&* zvjZDoSlY7Iocug8Yu_wv{0hxf`NI?jWc|`wKkI)u<8*Iw6=h@T+L))JC0d@vTB)wu zIIl1!_4@L>>mpBAcF+!&jay9B6kA})bVhcP*-GKNlDOIE#pe9-K<flP2Gnr4g9iw5 z0XYoo0<BW){IXdGRn>{Jq*O=^YlVRyDPYWV4(m__V&*h1)5IhHk5^Yu9)P_<SO9>k zLxg|Xn9+ge$Oh^T9mIj;Jw7)lTDeSAi2+c|K3R&|H9y;*)lI}TZ~?cZ{npVt)XkBh zZkRSllf~bF@pUJ}Q*gk!HA5$X9Cdi9qY?n>dR1@_fLrAzmp91ICUQ8?JXt%J_+(n0 za1+RO#?-6Boaz+zoaYEn1vhZ`jCch*%_)Bms5;c1l+dBTx{}p70t1x`rG`0K{+s_C z>gI8DhZ2lEdJg|QM?(*X^Ih@|e!%soa(1b~+ezEbiPjx%pa%n-K!}=OFu#R(2eU!% zP=x=K?qolv&y%Yl8<e>nJ*ZjA1Kn)0A9La*96lt%p7|ljX8-)~bVzNf-d?lSd<B0y zpKQsG9BNiW|M&8cD8bXtPA-*9`QgxE{K27MzNTk60tE};OH(2TxnvMlR&fM@LE3U) znRiG^FfKUK2Lohx5=en^5CYD31D^*5^M!#2C_K;u25sUneh_~Dr%7BEG(HeLcF-kv zV?<Z<Us_d65CwE0uv;2t05k|Bmy>@ZIIueGWMu?sfH{JI*H%GxcY;0O2O@xk92A7* z5=A+32Xlf<LzRIxlrF0?ESLob5_l^B_<)t62Yavwd>{glVgNJ}TM*zXaM*$m0f)H) zhgU*lDG^(G@P^k#hY6tvf+!Gh;)XAkQrGqz5Woj}AOZ>ha}X&NhZI3@dJumRk(h`I zF^3l+h?NKcu98wM!iSKOQjdrcct{X{csFl|h#u04uK0?u7>lwvi?mpaws?!Un2Wl& zi@ey2zW9s47>vR=jKo-s#(0d#n2gG}jLg`K&iIVb7>&|6jnr6;)_9HBn2p-FjojFc z-uR8+7>?pNj^tR5=6H_in2vwySQH@Cj_&x5@EDKsIFIxwk0*g6elQUDsE=@vj{>oe z|JaZG=#KyZkOe7_0ZEVsxsV8XkP6w54Ec}}IgsmE7WH_M7@3h8xsj_Q5`ORlDIfp? zfFn8}lA=<Q18|ZiNs=oG2PmnMDan#C>5?t^k}?UCIRAN*I!Ti=X_J3EnUgeGkz>J; zL|K$ZNt7ayBQxm&DLImGz?341BT(s-Qz?~AX_Zm=lvk;hSy`1_NtIjam0}r|WZ9Kv z36?@B7D{Q7P05yS*_LqmmU0=FbUBxHS(kWumwK6(Ig*q(;+B9Jmw`E$DJhqPX_AL& zm~E+;hzXdExtNaGmuY|L6*;n(mYJ8Bd6}A-nVh+qk{J?vDVT66nrum$O(~jqS(vFQ z0Cp*o12CG1`I@k)nobD^lS!F4(wVxsnY_81zS)~CX`3REnQv*Du6dZRiJYoAmy1c4 zvw57X>721yoQ3%Ve$WTE*%g)fo7@SU-T9rp`I#X9!JNfOo}7QVn!8z=jJcf8d79G+ zo5yJvM>(JLiILVpa0;=O;JKgP*`NH`mg4CVexL(C5SYd(oq4&LnAx7k`J3u_pz>Lt z5;~#ncpX>Z1D$9P+v%Si`kx)@odMbr`U#y3dZ6XmnR<zw(RrdDnvxMpp)d-g*8u@I z@&_5pk8t3jI@*7tJj$0i;+JsHp~RV-=XsYUT9-ZALv)cNCD5c!`lL`ArBXVjR9dB0 zdZkzzr6DREeUKx7um=ROp*uRJNvfk;$`KwKnk$N)M!KeM%BKH$rfk8aA<(3CS^{^P zr*^uhc*>`G+NXf}r-JIIgc_)YI;e&UsB#J&f&VZiSHOP<0hyzhIRGvo1wZfuQqTZo zx~U?009bITQh)+P>Y7DhsXqV&Igpu)ni0KOsh0`}4$z=&nxLbKq_wJ>2XF<KimI|Y z07Y=Am@2BhDgZyw2Lu@xOzNZ*(4@;+0?qoY%o?rEDy`5ut=3wt*h;P1dac`<t=w7y zs~Q}F2mya;s4srt2Y>JffM7AvBM2uTrsgRC6X1QZ%BdcT0VE)sv&95&Dgk?f2mFee z;d&9jIj>?;ua>E%YPy^F+LWjQu<SVi{+c5J`=$c$qD}e$hq|$eil`pTu^-#9Aj_wC zI<Tz)hvj+#=Sn2&8VHSgltXHgj&%o`TBbbziU5Cbz^|@Jv2{rRJgb)`OA!NFpu0J< zHCv=<+L;Iduxv@OvbwW4!UXOKmoIv!()z95nzh}!wOGrwTHCeO3a&qT8YsI5Ct$8D z3kWXTXfZk^FZh=W%9i<a2MBAg-uaqs0I^L8C^?{*e7l!HI}szwnfapzbsMibs++tb zwXT23w-GCnj%$|~OR^zbxg#sGm}|M2>!-^axQ4-pX~+j?>jQw$vTPfpW)KKh00=R1 zu4l`L5RfucQX~f{sVG{wM|-QIxx1ywyQk^9b6HG%+my1yxVu}tsyenH0k4@kw@2HW zc8j3G%a+AEtOHQJb?Lc7i6azHr_1WSUjKg^zF_;c<SV}38n$99oP#m0<=O{*pdzN* zwm2dPs>`~6aIX2nh<|mX2ayL0um{&QC4g`x9m2e>nF0j^2r2Lb`C7ORFaQ|P0y=;K zDbN52@B=8211TV?7cjv(&;kxH0cxuQ4S)eDpus2*!vAswC?Jw4Ff*B&oFXX!K!ATE zfPj*+V=yO-w>UDqE{p|O5Csw-J3fq;+8Ys=iNJCY2q*vqBLKk%D*zY(k|<D;6ClAU zu)$KS!8zap4KM*j5C^H+!4#aT7yt-Zz{0Qj!a;nvV2mT7Dv~*D20MJcJuJ6a5C~E* z#64`p%8RTx0;SDrwN?wLhHS`%Ovryv|EkDN%E*+v$c+riB#Wnm{JlpE7AVUHeBi!* zz_J$evTaMhGn1}9aGfWxhI;@3_{kA@kTf|G2w)nfxha>kq6SS2I#2ApQhbFDZ~!%o zBW57QYqvTifC0r52LhBjI)H--*8r}$CmM_+Vo-Kdf(JBf$2r@S*@8e(o4kMOJDzx1 z%P61%R6+u~+{_T%J9O*IcmO(P0KrVOIyE4E$ee^Zz|3^(nlAu4(F_M-poL<hsmLqG z+#I!bS**tj7s;xzliaK&z|dC<(P1mm4o%TcYS9_{q!q2v7wx2cU<Duz2s{wUPkPBs zfrpxmw&^-EfpEGMDhPrwGpv7G%6|WFwjz+qdXS1H0m?XXp?pw@V%n7X^TfMs&JLWC zs6w-lOC@3;lH!*mcu)fZ;8;C80aIcI6WhlEfGSZS05}^c4~#^{i_OCugiky!MLd@# z?GSi*D^Z=2x01`G7pqfEJkE^GVF1@4WX@C#2Q^^TIfB)*HM|@EI#Pf8)rLC&VttxK zERsQJ&S;&Nlbg}ZTFA<3*_AD&-K*J{?W`eS0#G2xoxRynx~wJuKNaQB-`k|&8e2gD zy6xM^q#M8TYX&hr)8~o<r;O7D$PhY>5_!<lAjG!KDF7J&%=Ov^4NQ79JGaTbC3=k` zQ9!BaoJ~?a0L7#Rn)-j$IqTKE|C6tRbj{OCyxn{N>XX+3fK0|~mut-s1L^{DdE5r0 zsn=`N%pKi{jm^Of#nMgP*KFM)ncdsnJ5q~G=KR&yjJ<MP-sk<A>V2108>JFW$%jng zmK_4Cy~r8f$QfP)Y7ng#zTtPu(0BT-Lj%5ryr>ZH2Z0b1>??n|?hC&b<I*o(zknbj zrmVk%I2Gjjw(j_**@OpDfUt!t0ZvVN5x~O+bL2R3-EEmoM2#x@jZ9{oBOoA>F7N}X zip^u)*x3}Y1Ms&3`^O$JxJ(YmNzUYktL8?I&1Dd)01lT>j?~aS091a?a6r^qzU4&B z*mwNp(HXgQiLrmAt<m5M;tv0f=z>h)9F5_Nej{~Y1AW@86JD*#>ZB%sL{G5jb$X^J zur6@GzZ#LrXS=fM`pNPO2lN{VrQ5ps%Lfnu0XZEOdcX%hz|-*~0!kT|01XGj>%7sO z&u$*waA3|2noZ_B0GET_fZg4WrQLL?;9Sn%#ZKPY477h_E~0SjmRiWq!QR~Wt>0&! z>`$H}$$mLaEw4Q5DZ*Zw(vG~yZtWEtm+h^cfvl~Vec_%BrQWOWg>I*-?FVQQww6uW zR!ia;o9ROX@ROdt1(64Sz_vKx5M_I|#+tTkE59&p2CM7p`Et{;{@Y>!0VnVWG(Nx8 z45n-Omd$_ev<}SV*R0=~|BCD+O7jCC<(f+6zH08p+2^k5<vg$CX-${)z7Xx%^4&bQ zZT|AZj^{)zw?U8dcAg`t3Oz&(mp(7_cuwwF|CU9+6?96n6yO6|AO}{^16HsDj@}2T zT?QlV=q4}(1!D)H-J}&D1U&!+LU03d@3K|kq|JXbQc$o0ETH#2&;upV2Z3M)LBOmg z-~)DI27iy~fU@aR`k5=fI<OquB2v0Dp6a&U@gm~ByDjUzy$~Xg$$Q`jvI|02;0GfR z2)z>row%KJc>q6f)%<<U=N$90gWhu<oy1-PDQQJ1$=&?=<<V}?@=oo2yWrREqu=}y z$ccaZQ&I-}4eiDq?y~>W)YR_eCK>0(@6|5vw4?d%Nl*1Z&-`+U&`Wynmt6uvR0lmE zyEo9JsQm=4Oa@KL0$b7p0I>zm9UCP~V1biRAsjq%=#(f?WMRT5ekLv~Xc0~s9s=*| zcwkT<oF^qHln7xGWeJ4<_*}UdQ|3&Xa0Gt^x&jE04<CN|oH!u@f*w5!031b{ROwQt zO`Sf48dd64s#O8x;o}3QOhJJBh(MYLPr^WYkSY`~Ann==Wzr6Ckx)$o3<>WzNLv?< z8oUGABs7y(VGXqd3hRYKBJGQWV$w=bm}3CqxrN{Ug%hQ)--I~?AZB8CfSb;1EuVjG zY1Qg!%WdDr9Z1)1|6jd)(_#)hS>Xu7rV;ZNz!;9>0gyZEmfP1fX3dvNgDy-Oj%3qs z)2havUHf*`3L%DsSfWM3BoN0J1d?#fgYj_e*x;kE;0d_}O)MmHLpq#-C4MBVqd)Ov zaw5Kj=BuxS3f`b7Jcv>{NufY$qA-6ULCnDEk3V|)iH{NXfZ%}uxFfMd6Hhb|f)oA_ zWF|qPI01sA^6<$IKLD|-MxPWqEiXJkc#E{agu5dGwK7mhF$_j3<H)h#0M4@C)QSPH zv<^^+iL~}3?tm>q!_5IU!We)-ILI7eBs&@yfCGidtggq@P)y1X9e#*&vLAng+-LwI z+59ZY&zMvTF~$F?Gc>I#`xF3yEVq19Ed;*g3=T8IB-2bZ)nwDngwU*REjmXf6-9-> z3y*^T;7DVoH+DGTNd)(bC?PT+GQrF*D9F`TlUC)=yoBsX)dz*XD1iv~YK7xggUl?+ zgpo>G>7?*l@}tFza029yD13kFgAYDH)FY2l&qX)gQS*SH4;*K@!UsOgT?ztipOizU zI0IFKgyI;K%!n-2nm{2u5L!qI31G7n(844tZBn}wE0`fE*sPc#IAF?6y0Pv;3=bm_ zlLFX;{!p&R0p=uC;~x3-R|0?s5ZGeD5;}OIFeDIeBL_}{c)9_+#Myt}jHCED<AJci zqhp*0av<c9{}n2EB0wCrZUA++=9($*z6-CtgsKRS;r@KE;)!0Hg=oQqT9ww<5_)zb zq(1<(0X(sJNC{Unopqa4ks7oXLX=X=1BZ*E=(=&oABPGnKemdgD?j#_>TU{Et_z_j zC0leiwHUr5g@mNg0fm3i;gI7FhgVMwGrN?wQ~&`c*e|og6uN^1EEmcxWF1cb0foL) z7J%hZ(PFK0jfGxa5XA;C-L4eILS6NR<fu&1iAa#$J>wK|tGL{oLvA47Cko!+;o&G5 z1>}{7ynYhB>w6{oCVC<SW!)2yN!CV&zDegjabsHvjFpfZ2n&A>b#TxEgB6|uqQnH! zfgl1)VmSW?%_$Fw(^f)cWIqy?kWP9yfe%7>5kdT+4<ocnA7rvWIPB{HeUX3!u*RCV z<WMas+gERp_c9`?C5IB&mjfiAmLS@$I7<`&5{Vcm`q8P0H++l)l+(l~{*a5N8R8dD zbHpS<v58qUq8NXxXhJqd<!jaoj|}-2v>oIx1Q(Q>2_TlikvL0iJ3vW$-eZywFw6-Q z=-VFp7#@fnP&(AAR^RAHBW+!wjhDnERN4iI6;31&@-m8c_F#sX00IC!5@Q(GvdS=8 zMvP$0lM}y~wOFz;i1SNGx>V`P&7857`+DUsds)O=$})eL0{B56{E$gzI^}B*Qi(y{ zm5@1T|BfWqI!Of|u(kjp5Dg)qfCS4!0nI@$JQ8f6N-D@JGr?^nyK+zmbIBAC!hsLX z#AgZfu!qUH=n8(gCy7olA(@zqd9!4q%7z(CVcN2xJoM#4d8o2Ao`{PRm8C+DDbbAr zQ=<{3<u8BpInuKvqy*qONeE!zEEBvd4H8U4-YAF}mP+SVcuXL}>Np?(>P;a##T890 zXj6kc5^U9C$vm4vqd<_<sp--~RtQ=-K;Qs|+@aCptSJJ6CPbJVO=w0t3f7R0^_afg zS4G2O7JAaFDRy$fFUvZ`ug3DB#k^}?!T8jCJ~MxUYLIEK$gs8qO6v*jY1Ok5Qa1!T zOM?FeA?Ht9aEB6DQ$Q&}tlth)0t3Ne4Dl?ST9Lw5zLs`7Dq_)0K6wJtmPij0w2&Xn zwUs}Vw1yg`D=dX**07RQuX4p`E7b}CA2ih|@oH^S4m!l%;`Nmt9qVs-8!FQ>^K0N- zNnC&Gz=IN;fI%e@+dyFAEQpn4RmduVMmA!D3j7ZaP=E--@})NSIxCvx^^bZWV$A@h zG@hcZZh)(@&l}O?pL_@<iuN!~vfv;o6B2}Sy$apnPM5i%l5Spe8^$%BU@OU03ItAe z)uW)Yl^J%fZxL!?-U3*_TT=*jQ*}3lH7I{2eK1o(;CoK~nM95XAp>kl0+AZuDrj<X z|4>SV+`sN_Zw~o8DMVcSVk##ED_U_8Ao_s0QoUp%R|u$txm@Ce;1$IWz3`b+X;(EW zbw+&%;WDA3xP$;=%ttw^ntRD&73;Z)Q?7EUo|(T2^ydaENY#T@a03&B2g)YkK{|iq zIcZ88qy#pAV?6toX-GFP(<?!0k^voP)1tPM3lW45VuY?dz<DB3xbtY?-~*+Yn8FU_ zGp^B`>x+JIP7tsrV5E`<%55ynyY};3E$ruqCR5I&ejNTF;$JxS4a!gs52`kORBQVe z)Tn|cs5fnGM}HdJnMT#9p`Fh?xXXXQ@^Wjb@~{B)D#QmMM#>(HE8*LwSlMTmZm$1# zyJ0Hk(#sV>xJ?|)>^1k<tO94K0M=c<H%?l{(L`0XKke;ALm8WshU3RUOX-UfvfSBD zDjX&r;42sVB2^8ux_~_qLG&TPP2s~;_i1b3GCac$|G93fBcX&BT_Hcv0mOfIscSy_ z9HA?Zwn-gr@{T)5<M9MHx2rC0ZhM>D97k!m?f7XayBX^EM4<{@^aratZVxUG#0jF3 zho~wO5K~?5nw8F5UhciQLB!UnjUE$Yjm7D>{r5%>AMvv{PTH9+T&nD7amhz>+Ly<? z$SJM)aZf$-lt*OQRo?hawwr&Gs)aY?25L31IE6-65d1|n>~H4ou%j7W{J?QJ|N1=i zAYTiqtyF>yhd&r4@408Cnwy^ZA*!47*zr!aJ#TqXM^nb1CO6l$-#75cxZ9YgKK50@ z!E2fQ`Hv&8%dO^v)<dq<7x@FenL^y(Z|nGRI)EGCgw|FHhdRF%qQig70Fe<UmZ>L& zAhG#d2tc^1hC%>05Q8iDfj<C(|BEiMQVRl*z#sSl3q(BW%7A_`E9X1Es)(^fi?Z>1 zkdNCou?smAv^>bOI%vwY$<w+Nq`@2v0TL|1?MOSkqq!(}p%Qv6nv*T;aJ7l(tX6^y z;^?(j`W;5mD-7Ga1CW1+DETw<qojneB&gVmD~L4!c%=!z12KRLg~$!Qv!VZ<(UQ6X zK#7>1jdHdg6f~*hxNxJs%lkgd%b(B_H*eFnaFagp^FtT}Kk`dCI!u>#NiCUJE%|et znOlfJu(|9wffn(BC{PGXy1&A}5;L$ppnHpz0gO;=xPKeCQ4D`3fx$wGGC!oq7JGV% z_y~lUONw7|l)!)#pP4SZB1NH5MdVS#0bqmPi8!@T#5#lnL2SY5>pt+q!^ks4k`uW; zbiu9Tz8suB9!$oi;wJ{Xh=1a$OX33{Gy_|#3J54R3Bnwn10y$ZouvaV;UEkaQ?{~V zzO`_a?%BPkV5)zoh&!i%D=vaZHZcs`ST-sgjf7}JCrY#1lN9#=B4I3y|888KglNXA z!@(L{K|TCD8tgod+`2pjHO&)0u;E6F1c0dW10X~-Eo;Acv!6V;M5{@_s;IX>5CJ(d zrBf<1<Y5drdB`*itK}=ET56~&X#y^CG5Lc;GJ}I53QB(=`U^Q|5;eRf%#e)Svy3VU zjZ;)fQ<1vrBTG#q#3ZY{Kb%3&gQKkTL+m2I><dZoOG!F3wV5!3KVYq9%BqRDx9!-2 z6~X~?(FnG3$B3Mc*T{hsNC6a(fFcNlK+pj|!2lg-%pYig2_OPN&;bcRffPs``5A!X z0Zo5#0W5zAgdf0*3HSjlxPmw=l=LtIEXV;V8coJT0r_zOL3m9JKmq^DOaVVIP2Aia z+vvx0>BQgh4Ck<v#|#A8JdFcL0f9(Oe_<uf<U;GL6fLRD&uoFD!+^yM&f;)^#%xT( zC{OcrPTB;bx`@X$tIM#2%E6<F`YSs7oC*A-&kBE$PpB}xcbk`dA}%AmIn~I6V7mg+ zlA+-9$$mQxy|B&$NRNeR2)ket49KzX37Xu1m?v2XJE)#VF$~fG1QK8v#W(;L@KA-g zkPx*L(;N%WAeiF-yjMgK+|mo)D2%^2gM_HRFrbajz@1Vo4F9X1CaDz3xPVnL1NqU5 zv1oq)1OS%6kkBU?i~6$zDN2iqkx_(-Px;)>Fcs4=HM;&(ik8bV{~QQIW?C>$k~{98 zA)By8i&y}9IZOellJ`^#+3|w(34@!th#DZ8+8I%W5QE2Dh#Z)Jk+2QD2#o<)luJnq zna~VRfrGCkfJ-%v-}%$yJQwK7)0DvsJWzj(7x)~*@Cdwcl!TZb*wBnsjYrIA!wvw( z#Ym1_yvj;B6F;?#RV5B#N~S(A)9oPBVl~!d9X<I>yV$b{y}Ze0Do0OhKkd+iV*3M` z1OOXxE40WHfiTY77?#4QnY##-gouLRh>c3gRorNRMu`k6u~6i29RaAGcuW(V`G9{_ zZA0F<P(5|mrfEJKJ&}n*)pTXf+_=}hU=!(((UAy^C`8fONK*elNlYn;*GfTH%&-G- zjn^wl2qb;3VHMet<qp$h)<6KL0v)OMn~3-;k(`{?r09cq;}Mnl3q8fywFn*FK#Y3a z49xgd%s|p81dcx$fK^2kPa#*Dv66qZIG&#Ij2I9~k5yPtB^5itlcMB|r3F_hT$H;Y z#)SBQp^*+PL6U?30mDcEKtO@6Wdg>qgE-NPd94(o1X`UH6vUhhh9cR%^;_5DC-qxE z_Or65(t~<yR+Yt$z+8xDqo=FjgGQ{OJducRRo5aFlteiVq3sKVyq&yV6g7W9BBx!L zg(#AP{Mg>Gj#b@=Os#>eAw@fw$VnC05;;D@kciWD4y!DT|Ij&<;=q6!6B^jiT`-cJ zCwX0q-HRo{Sc4Q=%uR?WiQS)Bv%j_8zWu?q8$tn-L^O2V?Ksc{d`bxDC)vxBn{}8< zal#n*J%5=J<Fyjh-PNHP3|oIS*b9J|9FPusMM%|Mi<*IhI8XyyTGcd^liIzX3F9U7 z{n_|M*MZPc*f4_-)soOj-p#!TJV=?jb>7tN+B9U}gh*a@CDg=A9t6JL6h1pfJXuwH zN~e0eFMHDxiBmzC;WLd~W_3{05E#*2j*sn&o>7v#{b4C&R}zL2cx8Wx3mpuK-HXm; zoia$7-5HpyF%BuN(PS+Z=IUWl1!C`cVs&+ZD3lDupbr1|y;O$5;zGIAoR!}-?wKgL z;w{3hFIC|?=F7k}u&=<@0P6!Fw6haYN8nT3JOIa;K%OeWgY>;yC9aH~h28#j<MEju z;oysfB-F3%;NiHXkw|~y>d@CzR0}(H3NBJ)BVo>8t(mz<6xQWVg;<k;(@I_aN_pMn zb!`m1g&KWiNTKVyImF{&4z#E$t}NSLQqu!E1HKowikw{2q&T>j>%@wIP0a9NS7zhr z?JeoB+fPAEIG_Q;&}61{4-m*dyO5&i1t#Gz1C`-Y45VExCY66MqUIl#gE}aTktpMV z3DK?eWHS_i{9}RxFcU3d-NXP?f4Kk#k%NrARcu}h|1JfqVG`zw4zRm)M7?C@QWG*k z@B_saCBdYMA6tkev>}10WnI04A7IoVHdHN5A2XO5z%UYqA&w69m)yAsMO}yy?j_~i z7dN?Np(ACc7)XDWA)1uN3_0+ZMS&B<h>5viT|Br0glOjwmDJE_)P=YL`y>w8S!qUX z*TN8LKoPn)vFNnUr+soayacE|4m1b=M@poV@ZH3XJ_?dvhz1>^x^Tiu!Gj64l)iwS zp+0C!A;X0b0~fti*ckz(;ptCF?1gB^go4yXR+%bU&c1*9MRakQAgYT_-I7Bs0uTMa z0oYIp5Nv-L8c&9Ur5vA6!2tVg!w@YU;xQh;7Ht1e6b<h=ZHMX+wH9tRLQ}5LvS)_2 z)#K`zrD0S<N7tJsGoT^H=|pgy9(5G}B^q6d#5H(Evm9Cr?e^}8l3JC441@)yqF##X z=8QPh(gT0cBHPO$<pA&MAftR_jryiz>=tj!{zQbb4D}xF0e=b=Q9G`1))GuYg<!y| zK#^(%KDcHIJ(wy$Fc)oUM+MH4GBHk|Rozr{F8n?tj}qmzs0&hE8Q2UG%gqx3R|=wh z0~SBA6sxd@^yRJv=!x0_fBmS!aB%_$a+Af&)GL3%B1APG0BP=k*{X=sXS<sSYfxYE z649-cHL(shQ>f;eJ^qUm;doowt&aS&*E9NY|D@o*DW6^L&Mk#pu@lqSP!w}hgebKD zW+1n7@a2Q@l?ksv?iC)snv-zAT%qrc3iT3%n2Qfu<LkNVT_%v3cNIlm3c3>SzlBio z$#8!{EHCq;Aj-u+9{@i^O2_hBT4<a}x*oxRzXo+=ByePPb^fff>_zal>&9pXjOQ*9 zyRI|5o{_04i!+6T5jrRe!;<HG&NCVHT?)8JUkeEhgeIMFHs5rt1*sZ`CBIAdW`_gR zL`_b%_J;!UR~Pr0;9hrW;k{Hz<!10chf{x4yJ4xY7lmLPruc&#Hn9&}B`%Nlq11N~ z?>S%I^gI1<Hb*{rd@h2!t8gFph2QI^O27X--h)6tzrI|EDByL~7-WSgz8G2vVGS`o zUx;L%^K9pLNe^|VJG_AZNyWqV*_(9Q8#)wDVZW-hnz#9z$9bFIbP^$Emt;{bd)9ws zG;$U_!gFzE)vzW(h)H`#WP%@Wd%UoNC;7eGc{gnNV}!Ui&*A}>w(S$UulM?}mrJnE zzMaPo7FI17x%lU6(@AtNbYWbDc>Ai*gAlp`dNE{#JNbbxcvOUWpWAt<?|XySvl$Pg zI#+AI61m0yNXCyQ$A|onH2bOe#yx)yP?EKEV@nrZ-<Y)rfIV0UMTcg=g##-Pc&77r zUc-0JzWS=4`oaJE3pD&MQut*|!Lb*6|FYNp+}FOtk$HKODr08uVQsZef;DxiVO)ob zdQ<+mUx*Q4?|T$=pHuVK7r1HH0T*z3*thy+*ZLu<eUC%I$bbCCH~;cSe>i`d{G>4O zlXW4_r$dUjH~6;-2-kRtr>E$K#5{0GJjerv2M9O<2NEo3@F2p33Kud|2;d>ah!Q7K zoJdf>00BP)V$>)w<A9BDK#C;!5o1Y|Cqu4G$#SFsk8nDQq{%U(%bGY_-jvx<2OK{Y zhY~Gn^eED#N{0?C;WUKP5>S7oKCMdi>eQ@OwPM}sRVsxN1DBG0xTg;wg=T#4$y4?% z+_-Wf{3$4?&jPtf{SXW!qAUbK0|EcJt*A$^K)wS10BGp=G33aSCp*;3bnL@E0wHRK z!+CRN&;UAr_8i*uXwjrkmu}7ab!ybBW4i{>0<-1Zx_4I`NC9eC;7)(TmLP6?IP&7j zk1KD^oOyGnxOwN{<L8P(Kz{b{?(UuJp0s^We4n@napJdp2nrn<cpx7Dh`Cd)Z~s1i zhUFXMRt(YqLjVFGAc3M4h?;=29r&Mt4I=0ugQ(#!-F_8XNKsQ!H8<5&Rz<}jhaHBf zVTd3i2OLxsMpT$mdYpgYLm&y60mumuTzF%JC&Wk)Ab(w$M_q&I@m7#<2*TY^dkl2s zLv;y)Ba~4}IVF`<T6v{K0y*`-|A)z)$bvv+2$$TL&G{gZ9BS~;C2<>SD4l2$`C#Kw z5cKf}dIkl;){t2Gc~l;K<O#<gbY6G@L4uH1mLAv%WJaGAttWqwAcn5j-k+Ijx+$lf zdiqv|Tau_8heZtJ1Xt082!udS6d?y3Pqf)#bX>xTT10?6IaD5dT$kN;EPlGF9%m-% zM_xBJN>CKSDs|pKj1h(CqmcrT2ONeDTamNddiyQ7;T|>6h-5~^rKtkh&}MKRa!D;5 zHi-1ZRJM{Ts)m0QE|8YA5$VAPj0p(>NFUmYizQul_3==*-zg?gAV`+gnLvvUMR6e3 zHhHi_fi%o9$RUedWmCVM8`b};PEkw7$;-V7ra(N1<ixD@z6q9CaptjOd;r*qXF_|P zpz+94^0DJUSM-b{v;*Pru~9%SY4AkG!f^$+A|r}v(OrLg{WWi)mSA&-WZ&H7Q^Zkh zLkT8~ingsG5ElY&d7$TqcnDqLM_XW5dBUK8NPMHzNfVuv9!OsSb(4en4H0&NF#b5@ zl|SCph+O)h1sqoNaD@#l^e}o4Y>VE54{Nq?MIctJ8Nv$b!U4saPqE;r>(Ske83eE@ zG;kk2S`L4u9uE)X!{Iq5x^!8S3gkmkc{oaF<q!<S_3+tiUv!4V+3E%yb@U+K4pzf) z0|n*_gvUTnl+cDjNwl5r{}X4*QN>lZ+B()A>>p&tABMI)C_zAdf<Ts(mKCkZ6~%bW zqlom5XSpItZh#iNAh^U9mD|8UekHgK+hm8T61abWAWz^R99UKYk-)(T!%3G4i2w&V zd?95|fzLqz(KifUC_DuTgvFv{Jio*xf)Gm{L=rcUg;fq)1Ysf%qbNlzkqciG5J?)S zvV}FVkZtKZRomVm5EY{E22SaN?{)wc4yu7|Ez6lgs5V929PNj~i&E3l6PNc01Q13! z%s_vF_&DPUgn@HBBqA-N!8H;De^f!?4xEBQwwVBYDU`|(2BJm^6e$T|VWF%-#HA%6 zq=JdmOP~z$2P9UgaDD$|<x$`T2OJnhNdvhf<pwp&Q}VKxiWEq@)MgO(eKBTGvDy<J zV1g}-QIZ33V+mvCl!v*`OK=3hzkWrK#rc1dEX6a350W({St?2|6dYVd@+e2JIj<mw z>?S<nDK_CytbHFGh$l(tKT29sh0$!GQ>-RG3Q(+?^}5wWAb^8E#He5KBnkxb_9!O4 zlQ#+!NVDXkM1gR#DDX1FI>9!*f?)5VCVeR8bk{bCvaCpMv|1!hAjV6U;9)m_%0GYI zh(aoq38B2&11#aWhmpb|5IdyO5Ybr{TP`M+1Id{7tOo}(M2k`tJt<as7%CJIL4_s| zPZlBROo4c^lFhuQ|3D&HQ%yqU2mAcc+*FiRqVxf2eZ-}d@;J+@R-~Q}tjNR6z*msf zWnp810Cu1_*To{Tdm3s%8@VLYOR|6Prp1g`+n5Q5ITXigUR-9Y#;QWI$(6B5oKQhl zc_mj~EvQk;8%U2*B*JV@dVRvp(FSYU-FmHx_$px?)^q|fKEbS^HG%Bh2QNF=zyvZp zYYC`Thv*9A4sIL{hf2#>Jrs#j2)q(G{|XqS(bgz=b7=OgMIqPnw!L4=5Ql#_2?s2g zumrCnh!bQdkRE7OAZnm*LEH%1fs|ogXdBx#-5b##Dz-`&C6sO_($11XaC*>_qITV@ zVat`QOKx+@4OahP;G3DPTyGjt2u#2O6I@rNKHaU33UUR!BDF6xE{pkYERmgEZ;lz> zFp$BvRU6`~D<?J=&QNSali`01!3k!ppi*n22=7L(6fTPqi<$=p0<vO<{Be-OJTeVB zxyenQmE1-J9q+x#Wb8UH-Qsl8Vj08_?6nYU0deP|<Pp8@L63Mb2xc)8`d}$#vYBs| z6*QyyD<^tehuSn~L?$dzc*ztw1sNAvHaJNHQEC(?UFcD7OM@vxk;{MJsv%pw`Ch43 z)yYIH#i16&41uQ7ga05-xwx`YNO7JZ_B=dUle*ZO%23?y#V!+>8PO;+a=X65>U_z$ zmoerMsI8P#|J`i#YVU+suz5*rW0RYTT6#9EGKcO+8(DY4NwbT`G;ZzX!4HFqg0L;5 z*Qe2uAQwIg#RS{Sd6$2?;8G&j+}<?p=(MR!Q+e~E&xSU5Q-$D_qccgV{2kU_A>TxC zU{b8sQE^w>;3m&hsOENR51F{pv#J?(i__CGTikCxB#0S0?WgeWCd*JoO5Xg~&6=OQ z=-%0Co3GMpCnr16T0yjlV@_>cNs$jkRa&QVt?`7S2N1g^N*;gC8&9Gez3jQc9Gf+i zaAzmIR_jjnyVZ^}vWF~C_s%w!hN$DW$dZ;w;qmiOT5_`!{zeS(onsJ>ru-x%iuE28 z@;qhb?pXhgNl59o&Ph)7!gJn2jQ@OOFj(m&UAj|w_Bc_xeT4<QlaFw?^(pDz`Pmx< z^tNw^1#kXTO>2KD)7|OwwUFZay~lp=v_C%b%X96)5dTUK&M0r{$@`%I8v<M3p6rva z{p~}<?otlEr?%aly!kpQ;0~!Tv(NqY&mQCK`8uceeSNME2L5z?>ih?&e)jX<=%c3; zMSTj#p<Yq6)4%P-KnUDo?O*>jAh}tcjKLnJEzfsUUh{uR#BA*r13DlFiW%S(gy4LO z)_I4|>A_V69dB)52d*G*<$?OZLF9dl-hBt~DMgkYAY-W@3+^CRK|mDPg&*XfzU9f4 zSq7&q{{`R$g?s5B4>qAM=^n)Jf&H0Ycj;MqkRSR{M3=$A^vx9$Iw2U&Q5|j51!hV+ zmD6{q9$$aCo&;jm7lt7m4v_Z|M4^ZaVlj&lN|?Wa$CBXP0vcZ&2BJxQQ$d8&!HC~? z_!U`HUy!|_ATHtUMWXFl$p0CM-{oP2$X|JsAXx+)0j3`yJ|gEyA}KQ7e$d)Bb)f)u zSI6y#>y?*MV9Y0i;^v7W?Uf=f+D9VV6&!>h$MApLe)JuVB~76@qAi*hnMs^7E+aEG zqcc9^i{*wMpcWm@BCd_pA7EnD^kG?`U@?Z`F_v9Q85viU<5!p?Ii90BrXxFMA9n$v zHs1fp4DLs_K?+%9Srm|(F@~cTo?EIt$3F(7a|k3r4x~XAByrFk6c$zHeaqx!B6|24 zpjdxcvF#&2y5YGs9Xf90I&vgCdZb561<{RR(v%?;@`yDWMLqf5Qfwhhj+#YYq)GXf zmmnlT-sC~zq)i&+lTF0rL>V1US;(BBD&|J7WuajJ8o9-!OpcvDG8;&Cq*Q+7RBmKg zl-3?hpZift42H}d4g~bkW7p{xQ(2y|F=c;K4jnMX-A=ZpPQGPM^5m?@6}C`KCEi6m zLJ1=7hXKBY2ykP$p=DYQoIfI2x*eE^Kw60)CbO}eVm_M5$W@G33o0gH8+wZ-TFAo) z|H5Tb#77}zBZA{#E@eit3A&ZZ+MOoDRYj|zCTpr@Ywk|984E-#Umy7)xD4QrOwWJM zb>dT{WL630XeN@_*i48toZNW`tgYK|A|`S&XL8b<oK1>g*;_$G0cd&)Fj69Vgym9* zTwcnPXbz`73PicYSGz?UdZvncJ{x<cCwv~%)cFf3Wt`WH;GwVwToeh`6rFIAr;nB9 zay;ClxeR)`+CUy?v?VCIE$FFTWt@Ldl|kej*v#XFP>@MZ#PmpHV2&q%5}MdNC#1=o zax&R*rV5GDiixTndzNEdiAHyERA)LQ5#op%zQqli-G6>)sA&mTgvI8_f#(dtCPn|3 z6~u@Ys8I{xj|m9D=m>!s`9Tjrfs>w$AH+x*)Xp9_DV3tn5RgvkAjg%$L6(0mBWB1F z#Dr&*oF3CK3tS{&5q4-T^5|J=$+DFI_=t~qn9&LeL~sGvB~8{0@l+-~lT(0}9q<77 zWRal8N(q<`&14oFBr5pOLE7NUVI@RSehX1TiS)t67H(>t&MBRiCmdYrSPc|k(2QcV zQ3+JQK&TMBRMQKbN?5>83x$6G%zQx!Sb#m1K&`5aQ=|*3?5YjL864P+LU8D}eC1+H zAqm1(c2a~?2}Fj%q=%O3hSB73Y=j(SQD!xlW_gunp#f9~s;u;>u%dwPu#I$u|50ek zS6Y<-3K;6UzR_SXgjt5mURuZ&!Xh6Ig!PSLwrVRRo@$Dsl|Vp2RKS1B7)aK*Iz<J{ zR5hUx4}?IW!oiC5fPaZ=Xeq1`DD1{|Y}vpO4){SIpe)LsY#*>}AM}Bm@<wLfh%Cja zQ2-loYS@k%EcW?mm(){e!HlCyDoqU-F?Eu{8pIA5Z9qMfydH!b&1J!Qi*7=R`QWBU zC1B3#?9}osGWz4J0BwJ)%&NrB6w9QTCAm)sh!qRD(3==n#exwExvio#$IkA?b}GyC z*dts#jK>_`-FhvD+39Pp6hV+dK}p4cu?z(;?bEW2#fpgIS`GS8Zi#kD;OhSce&UGh zF)LDlC-P;k;9ic;eh3VB6<BfXSs53(ek=;n3ks=_RImWis_lP2p~^p*YgF_>;r67+ zSSXIHq+EDo=yvVs1}E4KhYv&+yv{)gSW>(+Zmb@v*|yE}P*Qz47aS~Y^?sG1nn3o- zP>W54=q^=_N(lu`Yeh^?K5Ae29&c}*n^pjqaEXx%)vG|*0Te*53RM8~n!!v}S0tfO z_TuVP1uz`k!2o|7gd9}Uvb}FouxXS)fZ}KzoN^xo!|#}xC&SeRpvFp?sV=2V@3{tW z70_25L;;jKh0wx*noz}X4a5nDj|%f?3%_u?W$?`=il|b@UrJD1Y+*xo|DFzOFvtwq zp+-fBjTnN37}3h^q4KG6PK60badK2~-ChM2H)vH9v0;CVly~9?$nB9*uAdhhvA8sa zak8kymGD_noYTeIf*#r1!I|9}Z<X{b;q2`!<`LEsEUBWg27)bt#+Q5&2kVkZ$}k5U zvvF`Ra!tMw`QmY2<q5w@$t!LhBl6!F3-Y(XSAn*jhL}K+q=6Z|96P>|*&JE4#SM}9 zuBTM3lpKGdT!7owRwN~RvgS!#dnz4N6oDE9f*A;cAY>7<t(l6d+Pf|C*(`H3dP?3# z$vNo-#>8xxb+RrGOBDqZRjgTa78!J)<8r!Zn;kQOVs0*1$mXso(ggnkw1!;LSo1Zf zb2fijVa_p!kd3`Or-Ig`e3ltGqs%dPGs@^N@Zx`wn{HhOb|5y_bEkCJiCR;UJza8| z;~cvif+89u9~cBbB$a%sUWnZaHnc+@o6pWMH{Z<4nJ9Ev1=M-TIZK_=wM=BvbI(=b zE!&{9q7u^JpaYV0Nuyk0s-{)2nu)x!h<LMM_Hyq6bX16Lr#cvUS>pxoAW#RjpCFr> z)$xCUcC%|5rrZ^Ba<W|`+Y7S($@9tcP}HntxLyx$-&JGvl?;U9DMVc_9zxXhUC&T1 z5`>@Vu@EiEj6zfy^I%-dHOcg~Vn4=}%<PmTVQ<DoSzfFA+B9P8|KDO~_Ci#NEK`Z= zwXa@mvJ)QmWnW}8Pf0xuaV$R7J98jum$rW`t}~USZ*KVQmcij`!?qwE^pxDFWh`_Y z=C*E!p+{Q@Q^BBa5N~i>_HdW91YgM*qY_0vqHs4i3r6*7ksokonavtDb6d9{r?sEF zBDbN(OA2CjgLnS{w$bQkZm?lErnh=?pJQ9eH0Q=~|8Wz__k1H?XIn`U@~vf*rTTwb z^>_ETww`vC9Jl9^#b?SSe<OI|$+ne%bZ*FEQ%1Oi!-Zyd_-6O5jb%#MC18OUg~$!2 zhHrRTcsPq6#9I_MmUNpH>IQIU_<^f9$F#VPkK1$m$%OI7h7SK{isQIaBqKB)Ig%$i zG!h$j|4DRPMve;kjT3pejAK+%C6<3*xt33*sEN0T^Cs(Y8`7M2S`ImtkIOy7rCY}3 znzy+?P8xex33&@)sD7xKpLtPeb(U8ppYOS!r^;D(A4SwRme8_pR4W`%b|Bt4o?iwW zd^4M?xui!rr9%g96wi*aVosYAVCu$k53HR}d82FdQ%+o;Z~32}`c^<FY6O3RVkx-u zRWNTnIGr;3sAmZrxEVoL`lP!#ugm3p4u}$B6E>>#v(RC8*s`|T`mINVsX3+{M?17n zdtyp^wWDg9owZag3~&E6K@2tmJ_^q=d$T`;o+opWx!t<i|4f^(`x~jdm)Hv*SOL8Q zLhaDXH93k8obRTLIHlzDY#Dz$CyRQyn>50LbIT?C*|42Vv$0CIT-!Vs9Mn)iUr!Eg zx?WIl{f(dFcrCe?JGZ8LlAYK^XFJK`yU8nZwKv_Y#@$mJL|ULBr~G(#Ot;4qe8CI# zsWQB-p9nKI@*^i5&?g#CdI{5pH5~DX4+y+cSYUT_TX&P^$2WV*g))C)dNnJfSi-WV z)~_hO)XUMEJgXbT6`Vk8uPJYuphb;2{7(I?TRF-EsplkVn)ShRm1*5;fgG4Y7W6<= zH0hK=sR;~0AOwOVi3$sdN0};z_uj$bw@unABSX|7mN5Ugcffbs<9yC%^-!A(GwG>7 z@ac0sL6EQvXweQ4_>O-WRlo-i1pCl|3=qfYm;Ny=tg+ON;}Ud=9E2bgJ|O(TAHZyd zKaqF*ut4ZHfV%ytpF3K45D1k3tH8m-a*!RU>%-cYKsayP9PSOcK^pxsthf*UR)1$$ zD)zTg<7!qlvB?eGlJzlt)D(#sAiq+GcIF52=EpA_kj)hdgfV}Y06@IiaRLt<I5tY0 zFsQ}`3xjoRgdk$j4nh-e43a59;fKM42@8I72!~;tj3G)CG592ei7hKjlt4)#W=xL& zaOTvxlV?w#KY;?!6UgHgJwcKBRA^Ap(WFnIMwL31|7umMSFz%}nU!l-uV2B26+4z} zS*-?ZKB(EkBb<LKHC=c_Si*;uCneaz857e*!8$QbxXBpvg-0hGJ?=pH#|jUZ8CTp; zskcN0jBxJKl)2HSL9<o)wDmK#kH<iHKBrc_nl;j`tzpNOJ)3rI*Jx43M7gvr%ZYF* z*5q9C1faHd0KbHzk*-G`A%;+#tNF1(Ic+sozZ+BY=GT9=bNXq^XRME+0pTnj(+j|- zSHB*>clPh$$Cn@5BX{8?O1H&48IEGzx>>-&ASf<_h=S^v%j+N$_?s&?K9rl|KA8f8 zDYxNDaIdyJt}xB1u>SA}koia~@hc5YOi{%ZS!@rjnq~t3%s7wc2oWX>72F{uiYQbL zjSwg>&!8kIuo1I=nrNzy4j8$s?hDOw%MC@@AP_`E^u+4Wpg&sd(#xHu{8G#@$sDUb zwKxj_2IfqdF%2Q%NaMc)FJo>ph$^d-uf*{3NX`qZgCmKWOyHpe?d-EJgfLs<4GtW} z%7Z9_d??e>5*fWz(@i<$slEw)U`VZtmLnAoIoNyy3gsAoG8MT)m17Q0i!NI(xhFc} zF`+lI^Jp(5FU(XmBL&%Gtf4&Wqtj)<6PDR$p<VG)AUBJv4nt4C=q?4}xPvG%0@ZJ& z{NO0Vh6;Xz1B#ehxXvJNZ4K8TIZKG^AsZ;T&^ym)L;u#GD;7X4EIq6==!9qyX3g1z z8E)7%kElg|GDHDAx(lT~YL(-y62$WeiXJ>70|))cfb+&|+gevf>|%1%*86B$c&t4N zhNDt}!GfSI5ghJWESY@{TIio&+$^RF=DJ}8Bneg8Xuu2w?1l-cW>V^<nT|<me5t+= zO)2?}tz(193XLfda2{IiseoSF?YE(-FE^TE6z;-*yM=>tPNt)z`@ck=j&eA`pGGs$ zw!NAW4l^Jq+ap05hg<U7;F#R<%cJ_VyS?k~t~<kr+k9@A6TQ&$tW5`8IKvkWd$lS> zOdBk6Ps7}Hoe;lWcb})#t-HF7W_@+5H~W1f!XwWA*XpmcHacR^JFA>Evke-Jtg+3I zefP<KbKl<kXg|d;;=J!0-)_C3+&uKE)#uyy^3RW+H8=$!k1Qhr(f<3lyWijcO(CYY z2~kUMzw_JX1c<u75zucX_*w%o<iOJDt$$MCkcY~mxDox&auwv@2fq})G-*bC#*1L! z6j(gfO{jf}YToFW=E0}X%p+VP%LOR~LhiYLPKP~Ymge+DxCg2ZcqDvZ1Pj<V=g|#< z0&E}Hq*oS4DeH!lf`j#Z=r$Q<k&Ez=TUweoz`e~0Z+7EO>cUtx7n)ImZIsCtfucCb zm4#p)Ian9h21h*Zu{Pm5o!5?7x)c5}O)p#^6aQs`NPVHHg__YLPy7XkE9lQFurr8% z{rD)fM`n_ftLYsJ(MUk}Deq~3GocaH$U+ph5pSE!3H5AdmOVVqSe*=-D{YxeqMb@V z?h@B8fhjIuwq=;XG^TZq8BDQ^4Gx$b3-#ux%bwv<n$^roGO?LWZElmBzXVGj6c>mO zjfFR67^gK6Moo0CQ!3qb=R4uK&9Bsd(IELV3+>Ly&S$AppZ(0GqabA#I9<V?3FBu$ z5!ywQ)v_%3ODOnw_)v)^G?Ljf3l8C6!-+2Ipd0n5?r6BB&tMdb9|d4UO`6h9k_!%Z zq@xw7s8UUJRHiiznj9lX$C|m+rd~?hck)30$(|1Nb`Y?lJ!QEwqJD`(k$Vk)A11j} zsp<}vmHG@HYL?TfvPe9mxd{Y1#M7;g)ot{1CRxg<RkEf?4+b$q4Gj{Awa#^D;Kbl( z@JUw|8A(59)$3oMWl1<hL8xCDTT}td)?X4dv5j5U6o*w5$Erwi9%1NYF*_z=T{eR? z%&duK*_I7{mb5I2WbH<(rC64KmPD$BgIZY|Tlj=FhpCa2M@lN&_fSuxy7jGka?lpB zl7*fv{jGc^mCxfYmo;{ED{RLiSidqCJR`Fzb+N0O;v)4kw1op_v+JpNj+eaT4KGK_ zo8IzT#Zf(24K$T|-khkHzV$Vxd^u%b{r}37DrQ{`Z<>PNpX`^w^%d}cObeV~Z(2~b z=3EW-3X5Nlh+qU1ws3_pd|?e|n8O?PaEC$sVG)NI#56syYDAby6|b1ZEv8!ALYmR} z6(zzFHVBDFoMRmCSjRo)v58OonJ+qTfkh_rk&$QQBqRB68A9n#SQ=2(XfVhFHi(b0 z%wsKQnad*pa+S%FVpNWQIZ<R@-<i#v<}#xMMfxKabjhOH-hvr-H}3MCx147@i{Q(2 z4oiJVxnw~rnb1r&bQ$Hx6UOGGxQMl_6?5z7v%q1`dbV_>b=+r3dj(=yQ!|>&9BNRH z`qNI1VopkfGd_Fdx~b8_TMZJ^P0!c=&X=||rf=P0P0#w8MJwZfpbuSWU;{gmrsfGz z?C0Ot)oi+(o^Y;l9c@eRn%7s6n`Rg(>Q8T5+ug>{@7!ayKVa0suTyHZ>kJ2IuUpz& zrZ&2%l49Zok$9zS@+zB5??bB4*gBb%AZo_l)IbvgH3svuef$GJNC6OKVE4l*Jc27+ zK?+uo!V*aQZg{tU^F=X&VIjY5G@9hcTn!{-$Uok~MB~J}$!gT&;>&A~=sCFWngPTg zj)PpOOT-%Ac-&rez<UplberLie-bE2@}cm6P}E6f;ZUsB(9Ym~M);P2P!KiHpxqBA zK}evG@hAU$J)MRhu<jk`nlz~1YX3)j;LvxU*R)k!e}MCUGCg8ZveR1KKCW=C)X;Tl zUnUAYAN$^$g%(m85sjn_bqH8+9p)s;a3s_hDh(;Y*GM5G;<`e!zv58D^Bv0+PQgNM zj&od(Ksj9y`>97?_M(X{g)w1X@$^OXvxAYyj{o>p^w9ZV3S!)3p>pge4dM}uzz0?` zLk<9e1LiM(gzG4Xf)B1w1MBDD4^fal_pcuX?FS+bQ5b<Aq5%F8<RB33$M^_-U<N^m z|HAMO#Qbrf94Htb_Q+2>9C|M%d_W82pbUCo2zH<kdSC&ozz=r71csmogn$PmZ~=P& zA#$SyI;0Gu#sn--0&jo?Di8$!CC~@vqRF<xWSAm<3jAOXs;(+-tG4{_**J{#NFoPv z1qnpJUIGvPtRwJpfIrGW1b*NRim4!$;PiMY4)h}ml!N*r?g{lH>^{ur{7<|9(0xpz z4tl^wZh)`S;08><MxelCZXihn!wz^rFyzEk7H0=crV1d?Afkqm*vvEXpkP?g1yv&k zujmSYldr>SZV5zy2p?q+bbumg;P0St1TMqjL?97=2?s;~2-*S%Y7P$YzzlTGBQQ<` z_(S@nz#|GT#H<hy`R?BGVo>@-M|N&IOyEdLz!D9@2@Yj4oB;9+;z`J53{Ya{ECLR4 zv1)EY5BT5(y`s2=EA}RA!~g813>HxYwm=DgMt}!rU=MtdB%;tDhHwNF1Ppvo4Q_7d z2yp}i12}FD1w2LsNWde&Q3TY{AU?4MSFQh0Q8qfwb8_V`V&V-Df_nI|1%jhq<RT#o zaw%Tr9MvEuM&d|Z;8jA$>8OO+?$Jfmt-~bIAj~h~YOV((u@N7!!ZMN)mrn%v;3Exx z;s%y)G9a-X2P7NupcEZ(KsF4+PVpYYCt~vAI-tM=go8hvzy#Xh3pfK#bi`iZz%f9E zBoIPD?t(uw!VQQr?OMkkQ-ihyfoCf6>M$%G+oJGxuo3e@9GCALyOA87F(k^7B`M4+ z332DBk;0VY9W%_rZW0y$&xA=>>P8HIG9whiP*6farbY$Z!cbx{BUp+HKO%X8<?tMb zDeg|v^neeotnW(F!ZOi1!cG!BQWEW81o&Vk%ca7U5DrvQ!$z{gra%)RF%t`~G`Vj1 zl%o{ukHYQ}5dBCv_EHurBxHm`L7d<LqN5hijz92V4o1c|sYZD;ja<r&>_Q}e5dPo< zU<=_QF5*U@vW5`wV$&MSk{c-uPIQpN6yyk25<ExX>kI-Mf52U+kOY)t9xco+9kU`+ z<CEH`P1@o|*rhijW>)$nC5O&Yo*+;%1NVfZ&&EcaV$ZcC$p`r0C}wWMexMsiU=&GW z29(evbLBHBEF=xlApfLK!**_eL|L-JymJ3iG~zB}|LD=n>eD{WLUWA91=z$*bi-8m z(e`%E6_s-%dZ0-Z5??x$eI!yf;AFH$FAssKW<1OV?jSa8(BS}&By~V7N5Ct)u{7f{ z!%9;nM}RHU(-W7^BkX{}ejp^mK*KKVHm}g^B!(f91S6P(CVpco*TN%zrsgnV!UWpW zKbqrZMuIu!Cij3$gOCx-@?a180P23p!xB*qC@~(F-~^tK3cV5ztcV+FKye(7!zcz0 zn1CIv;|?$mPOM-YJwgnit{bnj!rXKQJFYv}A~;ZnC5Xcy0QCV@;0d>bTPh|k4if_( z;{+G4F|G2OR4WSq_Rbi8{QwYZCYb6G#5y8W)g?@ci6nPU4WhuUd>{z*!wzna!uUfI z^RF%HAP0u28^`7jx^;2BH8wRYR*?_Tn50^J!-d>n4(g@|h6w{3k|Y4LJBol_VxkC^ z#EoF4DpL!$&@B%j-~@cYLB$2uEXyK2j1OXy2Kr!JKMccgfa~^uAR6(kE_ZAZM>HBM ztO{dPM#Bs@R;UEF27wNRjZ(=3re>6UQdr9;=?<#6WKB`|pb!3FQ-|*gd;kabz={IQ zB9-rBJ?z4;u{JM_YDMfMO?JfSl^KJM-|}Y3GOti~Pko@LXAh@ktD?9-FTC{N1fDgS z1OX8IAVR@}DgUs4#A$VG$8fA<=hM=D3~gQZ&x*!sAPD7-N83`)_UesEl!x}-2ymqW zE5Qr97PSw2z-WId5Tc+DD6=P)Vv=y?>O_oh@62kqEYl!2a5>E&a-;IR7C}$w$QG7; z(5{7mhnl=AGG)sc8#P*o4-kBSI-$Zm7LD(;?ABftYM<7B*8VnGRktistZdE4YC6Y- z3U`QPw~ZF>eQ>FqVve*RR$?i(VgaEKj<G6a3=0LXBu_WQR*z+!_j%1k@p2D>JTByn zjCh`?c}%F@Tq&=1mr;v0nUMAbCU+_zpw-e%^;R~|NQ~Y1Pkvj&j|k`FU^jUbca4hZ zZyawpT=x!t|KoQm1}agvDq{WM3Vc^w;&yJo7X~YHef^ei9~gcmSYco`jXHRcY*t|b zhj}_UVsZ~zLr=0Mt8y<lnWU3*NqBWFQ`)MQc?<E<ws!P@h>Y$-coOJy;s?>bjovWN zky7_3<SenA6<UueZXr~6ZJ5$jugj2^eY0$e5f4s(ZP$SGcjyckmFidNfVkhV0;3do zs={|-k@yeh_HNl@LUGQ4*SE*yc<(-mfp*w}?U!(ktdcWt<*Xu-q-?43U}y)}m;|AK zwK6A`_&yaZmEWm`J*m8WshCvRgCc1eQ>ty>_KfA0Zd)Xld0CrS8O3@z!LCAQJYsDV zs&Yer*_i({mvhS`n4MXfep$$#dB3c}yGnVY$~cAfW0d_sc%%87TLiuyYLLfQKco`} z&Ua`apoJl+HT2+=!Fi?fj-5N{lZh5{Jui5#S!h1_53Gcqa{{bdLuc$Ro*!yK;rW9i z)(0F_g`@8T02*N+;Dp<vc#$Fxq7F8S!k7(z+Li`Nnv58q_2UZuK#3)%pS78mePTv9 z`YZC_U75lU61t=R=lC)yn{hdfF~_-r=>^9oknic23Yw;KX<6}zoZ)tJ(|LQK3Ye5M zD7;IqScC2stEGwhfBZ1g{%11%Aen=yfYn%c1gn^zf=SzArl&%iW9O>7$gJTYXuk)4 z0{DQT|KT>5BYJlhV0;ldj>JW!%R`0#V5!;Khlu%-z6Y5_x-HCLI`?3&xyTRUWX^tq znMm>F+M;O(n~P%XfZ+zGMHzVcAgp`Htq-PPI9n%L?r~ZJqs=+9xd^@-TWuiLa?80b zl2`=p+K1E@CqhIJlESUUCAYI;Qf%6PwLb{JgnL<d7Z4J=D1ukCZ~EK-AT+d^C!jU3 zZ3L#_+AKOot{v96cWIj4Nh+vLoh7!Rx0wZX8KERsCoo!#^&qD_BEGGJSy_Y67;9{- z8oV<{yjOX6Y0O~|8;O0uq&EsHVN0@uV%2;b04`{?n<*z0C%O5%GX0yIphEP2qQ#cY z__P1#7P|F`dKjlC618JW>JLs^EP87J{w=mEJl+P^#bkWO)o5W9N5L&+QN_2A{Qzx- ziZ>XPD<<Wv(At7_f}Sk;#nJbMhfUa=JlKv*SRi{-mb<y@>AAP6yt#t4LpU~Q3#gU6 zzq8QWxQ*1v+}qqkcKmR=TV$bs(Oa?Ydae%ZE0*G^)q`C8I?RLOQ*N)xq1?~GmSJ_G zc$Lwo1lY<Khp~%XqBOiKW~7tZ!$?3}&-w21JnhWQJk!n0j{#t01g5a@U<6h~uAfz{ zA4hH>8nYU`E9QGXv;?y${V8&idY>H7Yn^*t$4bbfz2`&6|Dd)_qof;uyR=jb%&#J} z|D;+z7J!IjUDgevarv#xr5)2F=&81Z;NXL+{rQZ+dM6e=rASIFgrvIF17ba^*^@%d z^37)Tt(3U<-P7lBqX=^Y!Np@^p@H44;{3SQd@Lp<#nXc~*c{!{ErUx*i<<{@5Wa_( z&W#L}UsC%tzI`eN{Fq38+JGm1tY-OG9$6>g1mK%pgeouh&S*JF{%}EQIp1z8pf1T% zgVZ}bm{xeb&uh0yL!>CH)d}7w;FR}Zq;QKZh=E>|wz%YLq8NpG(kUJSXd8*6uX7Jf zu=W`&W+czqBM)E=wRXO40WgOT_m4`b*vMyk5ozqd_9kH3^Y%M`DV%lNj|q6a+0XxY z{bk`G<3FRY?R@KV6XbRnb{RK_QU2%cjg;p5jEk=-+&rmS7~K%uEX>9XS|ktZk=#R` z$&q9L<-iJhzzP!Z1Mwpew4gwJU;%|-Y9i1A84?PFU;#M>5VU{;1L^1~g?#-W5J2AB zK^f7nU~>zO>_l9D=|#ADyhOjZzTm&M1YUv*XXFMbyDfUaS2?CFbi_PhIqD9BgF#pi zTM#v>q`{E_=1rX`P@Te049LkpDhdYp(*vf}f3W%dH;1G@bp%*FA_=HxI|^e+XaXrQ zV{9f51pxvMoIQB}7&PcdppSzL0p!`!$44N66f05;^rsJh!G;_=di)47q{xvZ|4W)Y zi87_il?MAzG*gfzNeBW3$-F5u&qO$aV&eP>G^o&_M2i|dNby05rAsON5SXS!h!Sp+ zQ20afO9>)y?36I^M&OgBPX*ps>Vu*a3Sb42kxHQ{Q;H4qT%pM4pdLPc{s^)NSkM(8 zCm<X>j5x7>;>8~m3cTX;uuC|97!%F&7tR&SoI88|47wpjrb?H#s91$*iPHjkOl4}q zjabtZcT8ms$HrILaNaI`T|z0{gL^UyjCjR3V1Z@;`Pri<wDjrJ6=M|$i1A%Gd8@13 zL$R{=@#M=>9}Rr!3BXX$Qq--wEu67e-<POw_UjgZx2ogL6?l+i%PnVy4}JeY5Se)j zF36xk*HOk;9uy_mphxQ+S7C-4Zb;H-_o-IGWEG8t+gek_729m9sYRPWPCQ`3e!VSK zS_uBpW!Hcd3E~_d(Qyc5keqD?$3l$p@tBYuMM04WN<InYcpm=t;a7lE^j2B|!S`E? zDryOThieposhgKGf+$r9O)6K>6&g_(Wt?)Rl%qg?aCTiEa{@r*k9+<Js8FMo7FP%` z_-4YDaMZD4YbP#ML0I$MhZ}q@)V3RckE%uyk<0lRXsDtZq!UGYa+VpWQDP=gAfvtt z>qt|o=7SnGDAiV$_NDoiq;Dm<E1CKUG{=X3ua!339)6e+n5wZp8>AjP|G{xWXEgBy zs2-VR2W_{pdT3*sm)5~U2`7a0gb7^nM_P)^S#$>vCKN%@SQmE1rD`DzcTgTD_+Z>- z-VU6hws?8wn2~?pdndsT?@3X(Ox1_Qh$YZlk*)sqN>LRR+X2N8HegIq4>Ag*#>9_* z^2?DPeMG_I!!nNtfpJcHw$mP=3fSGuJ_9)=a23FJ!wP7E2;5R8oQ3q!Ni&u7R4ji4 zfwMnfofvpE>Fk+=0xck_qHtJ^b=lsbC)cLD21g~Y{=LoX(`^?@CEQC*ZFb(CY0Z;i zpIykUs5|wfcj4y=Bp1>f17~;K{UuI+f!Y%{4VRh#8jks5NHXX5XNvz;JF3BO1fF^6 zi_LnX+*Q7|DWUq3_WFIS)0p_(mz$2eQ1=ExHe;#6%D2L~4?h&z8yibE<ZVM;<?*r` zt330C;!S+@CW#1>bXvO_tB9am4?atyExvr>vzt$T+fO^sy7SbB4?jl&LN=X$mJP@H zVCV7gFH*#lH=iE6F%EXkGau9n<vh084i5ZlAVVHRy3pjTTZ4m>10Cq0$f=Kj%xg+| zDAzvP9qoBkdfx@B_aO01M-L_p6|3ODy%WlCRs{sn@ibUF1lI0o8cf~}TPVZQ<w0!1 zlg?zKFtBSuD2Nv{-uWuWtI{ZcZf)wLn-3WmK+7?ZiPj_1{ivg;|5JJEUSi~8{Ajqk zjj^wBX{(|1Aom&^N(G2!G+U`KBqf1Z5LRH~S{=iuqvgm!AcF)<AqyGEL@IKNf6Uwq zsYRa63Cv+)>s=!=x5!Lta+5l`B<5InuoW`Ielp7gvtXE@J$mwLoNT3kD;K%SpCzbf zO{&fw0+$*65lo2@RADTC3Ct5t5&(P?ga}k=841DUS~l?+-|okUW(ISb)C8YPVp2?9 zPG=&5(2_3^<S?EY<Sv%1ra8}fwTa|qb8s+2IX&|rf$+g!ekr22@^`JCp>v=7oZ067 zu)BA9=3YEG<q~B!I3^W;sh<pOsJEu|5@do#i57{V!$j99hid<Hqk-aErz9Gh<<ReE z9=#n$Pm0nYi48jy@)Rn4Mo%`uL7OS9sZD9NoIlXiI|AZ~Ku48_cmk%3H!Z4BK_k>Q zeJ_$b`YG^|Di9<db*fZVOdb}HNJ0Doq?`feK+Lv)6Sy>nRV}N3S%Z>PK}6v>T>Z>n zZl*Uq0Bk~Mtt;_fiPya9bwD23t6%@>O7j4Au!IeyT@Nd$!YX#LeO;_$2P@deN_MY^ ztt?I^i&?xjHnW`7WMn%F+C*A*v@2=S2t;t&)S`B^s%<T6U+db~!gjW@tu1X~`##g& z*0#Let#5k^+}+ZD(KEUYu5o{h+~XQ|xTI|^Lnxx$<VrWX|I~eLZUs`^>`r&P!DTLI zpv&FxYInTWHK}vY`;p>`cfI7L?seh7-uSkcyWX{odgqH@`Qp~R=<V-wv8&(w3b?$C z6b*m}j9~rNmoeZ?u!GmTVE-QWz5%YVfZuE33?n$gilOj-gFpOR4omo~4c4%U$@^Xq zt5~`sMofuU4CC3hc*Irp@QH7{T>cuw#xaKRjE}-&91FR&Kc?|fUp(X_+cw2YZt{|K zJRKzW*vTR&vXP4#<ktQG5K;g{8Kzv@5nQ3mQn<nrhz#W{vv$i}{xX%re1$PvK?-N4 zt(A$Q=G3x(*~?(2t(fIZXEYc1%IB5xZF4}8JItWZvBm#ES>OPL+cG)0h!%*V84X)U zQzX)|y>synjND4Y;nHuWt)>OSX&FPA(3Z{fYJq?UIH-XJrLHXsHZADQZWq?FrZuKp zOM+NKI@jfK^=WxM>sr$`*r+Bps|yWk-k!EBYEX86Z6&n?NvAp5yPY=5tW8@<MM2wx zu8yH&>up(sJGSB`_p{OKYSg9xWsCkbYeyg>FgV-5@C~lK={*8_rxxD=>B7FxCJ^cV zTh{enH);tE2ZP&r)$g{elmk9jGvojeI2eJJl8u5W_(0?|2zd_v5QUDPT;x$Gc_88t zg%S9FAqp!WK@I|e^D~d&2WC*KuOI${K&TZ5G8%WM|F;bXbbI_S8t1sjQAX>KkK6|( zr#Z@1j`ESa9Og2&ddzR0^MLc*=l2f!&}Ecr*Lpg!Nrx@QnZEIke?00er@G0nZu6|S z90f0r`OIq$_CV-->@6?5&{K|fcUQc*rfn^M!PSt1vLHbOms0o630dtNkgN<M_`w?x zDUet%Vcu{UWE?CKg_HIA;i|7lonwpPfZuyRke58=zkT`AXMXdY|GY&+PkPg%UiHZi zc<XUJ@p2!o-;Sq!<OMSM%2zx0a@ajZIPZDT!vXk&gv048lKPDK8sh9Z_~H=@UDsBB zI@_Ix0}l3}1A*9q2I(%oq}&|;<={XBK&S`~M2-IkW^ih7P-i65aZ<2pipLS@_gd>G z0`2#H`8R*{=WF+OTKNYU``3RI@qZK%fCE^795@097*z_GM@grC0>OSy5rOhYf9<z= zBVd6dfPogFf&K@80{CblC=dro5k{wfYRY$hb=4%WRe5Fbf?J>jBOrY<um`y}eGtca zBY+$F_7rNcXgi2nykQ56c5A360wh2Y`X&NjIBdzMYepy#NO)RG$b|Ivgw)1*YlvD^ zczIZudjz<J^5=yKD24(-hGzJB_f};T2O34#T1S|KOW1^Qh<j34hhun$Sx6Co*){@P z*oR>Fhj0*xm56SID1<|3Rt%_GH5hXbS83Bn|ASD6S~G}&`zL#Ru!?38a2Vren+OGY z$Z0ZIf=4HWq_|q9IB=;LjI79uaDaat_;4a1ix_wVwYY|-g^Rg|2S#^{X}63cuy#QK zj28%g!-#vcXn)Z-i|)6K%}5b{&o~0MD2=#Q7S;H0l7fx0r;Uavic?iwsbz*g_<+*q zYpj8Xmq>?sXo;-l0>WmAwuoAS2!jNeS~4PWu!nSnI0E}vg0~lg09kLmF@;)~hzN;V z3R#hr*pN$zTKVQ$tF?_lk&g=r7~_VK#ix-0$C0Szkp>Bp>o|}M`DiA832|hoS}JLe z_h?iNc3P_?YJ`-F`KO8!I0AfdjBZAHaPa?(ul0(jH3i$}W}-H3Q#pQ#H*m{1X-9cl zNm+VJ8IDc~l`}Y%RauH6C<0lD2V2RHUO9(w0EA&#kD(Y+2S%16V3u-#mX<e*cJP!@ zd0K7hT2~pDT3MELNfBRv*^3mpl0<1vx|NLqw^G<=j7><B0|_RR*IKx71ki|#sKsoY zxO0V+iznG^t95U&_ki@MnH9mAG3c2Bd74iLeXvEEra6VFX@#sQjWRNbe_5HAm2hM^ za3ippFj#!KS(Cg82cpSZzp0bEDV(eMnj~PHDS2ASX_?a`Wj`=~g(FCBq^F8yVVJE* zj*r%HuLYl{#g1i|fMytJVTf%M`GAwQ|7Pb&f>@}YtHqwFHG}ghpKw{9_-UVD7;y6F zcBmzuGEtrYnx5fUpzeu`>E?c}g`f+1p9%P&4oaCrIi58|VJ>h7lKExS28%j?kRv&e zQ|XhdrI^_noSwLUm+kpzZsvHPsB5mJqAUu9FB+p}IE6IoS~n_^o2YXXVFxxTY+>+P ztTmzHvz)Irq;uG!C}X55D5JZnq_4%K1L>pz2c=Rf5L6nJsdc3%x=Iule`>IQU$_J( za5AmAj}V27G$1x#)>;)K5KIt;gmedI1}0WOiEBuL6cGl0;3=DLNv92Zr+Io>dukC# z_^0%UT7n7(g!+eaaH!O&sCg&`E4BZd*$AY?mZg$frz(i2$cL%6>8IDnsUzU2plYay zN)e^{Xs4Q~{Q0JEdP(I82c9}?BvX2@_-JaNPz9oS7SV;NrL1^BbQMttIRJW72!PM( z13mC_^yr^|dUmW8k&DVI2h2(l&dMA<5Umw4tsz+v)~b5hx~-@sAa~%cIS{TwseE}r zAppw}0$UdWYp@+*tjKz<6_Ktsxvu-FtX;8a^6G)tN)h&IoA{b&``WMJX`;l+Q541p zzD5ImFtL7yS~x&!d+>td)u!Dx0`b|h;JK;`*s804HL|ExvM2kAF}t!e%Uv&9Tl6Qh zLTjb`^#XthHU^QjoVc_A(6nIi|Fls%wMu&sRLgI(m9rzTvw!)cJ}Uw&>$THmw6R6B zr}ctk>!u$IvKn=9C`+>=@Pzz%Ty#5Iuqn5ys+Z%cw|48bbE}uz<+raTi)FiCgC+nK z5dsE(k+@00xQhF@5fixr@c@$x2aEe(ZTGbi8n=MkxBeEmvQ@ZvD_m^bwhh&iJqx#Y zD{ZT*wiMZ2NC#Z4D_bs0y2jO8ERzQYP!S$r5E0P34#B$=0XDlEyZ`{a6ydwZOT1Yd zx`X??hC92rn_RhTx~JPuC3ai3>s#Bqxip)9T#@R%K|7m*8@=7jTT>GN7()gO5d&^< z5bNs}ODn&<;lA)|5cNw2PrLtPpf<kB^}4+Uz5k1b*n4EH8fM}3xtZy@-L}ALC%eB@ zT@U=g*=4=Trw7iG!7Zb~9n8T9i@*&Gy0pu=<ojfNX}}w$z3m0M-rK^l`-?Zr!U|V^ zyW?ek%O|}ij9dgvT{oP<CzZPKRlN>8#5oqjnp<IHE5ha*gr7CQb&I=848=|S!#B0U z@)ctg48cdNVE}Bq2`rH){25nl!%E!44jjf+Y{em5#xNGZ1#GiOJi}A$#o;xqDG|gL z%*1sZU3aX;4TPDus=z(=y;K~>8K%R3Q0!jU+hJL%#XIc9(e=n_{KqL}l;x|(S)9Xb zd&B}3#Z%_TCvnMGT*Oos|H_pNKcS_{odm+G?8>KH60a=FVvNe?G|R8NS+^WopQX#7 zRm(u!%e&ml!OU5_Ow5?|%gL<F%goHp?99&$&Cx8)(@f3PY|Yn<&DpHY+sw^>-R#ZZ z49?*!&f`qZ<!sL9jLzw-&g;z1?d;C)4A1c_&m(qK^K8$A76I{827Isp7f=WMi#q{8 z1S01He{cgbA<zVE(D%&HKM?>MSO+GM2N6Irb^y={5&$f4(Vn3Pl?Mk(a1$L(5g^^r zC9M($z#DYHzZW17|C}K&pavU%?HK`(jyOTnW?|AfeG+USxeSp9EN#&h!qP5H9Ra`_ zB#qEVt<y^_5-cqcD$V~9>r2!N;?z?u9Rbh=0)f;lfz?{g)L-2ZT`d+KaR5PW5EgJ9 zRsdHP&<7{b2RHx#761r=U<D>%5GW95P_VQXfCFuf1A2hhSD@DU>j!Rs-3N<{0)2e~ zJpeXNEf76`0+<T`C~zfZU;-N*0EOKLhaCVda0O<-*bRZ!0x{S=eF1@R1yBG1SxuD; zaRHTm*&y-LsIA(peb5fk*%bo_e*jkpLD{1d+i|_wZoL2zKm>Gc*OQ&s06p4pP};)n z*Lfh<blux{jf^Mo1CkAY)<Y=PA1x9A5CemB178RR2I1J=cS!EX+Va=fys;c0Z2;@7 z1OcD{k^%=Z0JVT||Il3>r92JQ4PgKgTM_>a-r`N(@w?s@LDKv!0{$)20bm3JLefrs z0~R17`<uTJ9;I0g)#v?vgOsij4yE)P-c)(u0C3)bf!_hZ({;dqgt4aG+r4<nD-bc> z5m9Rpl}FMYP!T3T<H#t|I1K<iZfPN4)RNn{-VFd5P)jJ@(oc&qGl0Bf9n}>91`t3p zn2X~8a4B-|1WX$-0pQWM9pzI#A0E-Pmn-Hq9RRckHc5UnWuClWKI9c~13z8{LEhT= z&51eg)lN<kGvL^N?d#N-%hLEQ<3pI_mm3l^9s&j7)G-a{_YD9L4i#@c(&kMOh^_#c ze&l<O0X-e+i?aW;2Y}>H9@V@-1_d$djGp9=4(KtR<X<qkAZ_Y34gig7)ssu>x$fz< z4FCopBes6(qz(YBzTzUD2AJ;EuMX)b)7YC{>8gJ0YgoB|0MG_Dp6INW02R^O9pTuR z+v*4H*f;aOa?rlayXWEV5RHrOmCo7|-qTKP*5$4T7JxF>&JbeVzIBf493blH=-3O+ z8$2-SB5?2d&JhEjzXf0EBJR5apw#w`@1s_^?9S)uKD@2H0RfNG?9T6#4rvUq?V`Bo zNe~jquIXNXjqy#}?=()*t&X`HU*2LZ?*f77RlT$+UJ;1C@2p+klaAkxo%17&@is5= z7UA;;A@lhg|L^^B@5oB-Ix_T4{=R)~=H9LHlMd1#5A?on5jW6QCGT8qUWp-5?vp$9 zxDEhU-|tC&+i9Qm`zzLQ-{kau?L_YraKi7=o^aKF{`X){m2zMALcjMNQStEQ^g`a* zYiNdDALu^4@{FGG1p)U5ulAT9@&)kYe@^!AmgaNt5v<<R9FQ51kMou8@&?iEe@^t1 zTk}sp>Ea%1L_O}`&D#CW?ig?STJ6|2u=w8X*tZQ5tq%O~?$wrVzc4@P$KMdI58n9O z`dVLq`lm1S$Zx*|VE{eV@1Or{i@x*+!O|gs@w;#JfM^g6P}8$-5Vzjih0p$g9@IZg z5#1lx7ajld>|g#7!|>Do@%0Z700aWCuwdZ83vd)F5g5RY!W#e*DhvTYfI>JCC&D3f zu%JYYA_Trr2u5H7j1ethtO$~#OE?t+v?&RH07J(Bd0Z$=WANZlph1NWC0f+zQKU(g zE@j%(=~JjtrB0<<)#_EONjKW@iIAf~BMSviB$EebOOhM~UK9(HA%GYP)fhDJP;G#n zFJa11a0kL0g>r5zz#9|j%$hb?0tgTm&RepKx9~K$0i($PfW_)G{4>C0ICLsshPecP zfMu})G(XN$xFUwda20m6h>>CAwq(u1eu)^)9Eo>4G<--q>v81Cl`m)B-1&3p(V1_& zb3>1T{}YqJm<13)VVwt`;oNz^xJb%80zj*o0TYVBjc}~|NZHUa95buOi{xz^CdQ0{ zT1lZY%F7E5)4KaEyvgopX`?9U0}zUTJmRY_C<rCQ2nq`Ugrlbp0}S9ih|&WMkBT;W zPeC}q6OTOe>We_LIz&jQ#_j-XPdXlZ^zla^gA{T|A{+9-Lpba(;U@z~B+#e002F~H zjOg$Vz>YQw!>#}a5`ar6fPkY6A8?S#Nhk)205mw%5D6%@F0=5hCIKv?E0^$pP$LJ0 zqVVGcgVx08jyHwcXwN=-IDr8t%Mc+-INlUgt~Vi7Xi5O<xDq4U>cr?yfh>*mz7|t} z0}KBEBPw!LR$F!TRaj%a3Iaa%5TQJw^nd^YJfGUAA_(;8RUkd|Fe?x4ekyiYUCT@M zpl6TLmDiy12$tAfA;7gLYjM?o11Md&%VSw(wY6&6VT&@rpmC{{^<8-5m3Llx>m91U zmh9DcUw-@b_g{bmZgry_1vdC#gcDYHVTNPnY&HfOmUv={E4KLJieI?nia^XjHD8QF z7I|cnOE&prlv7rDWtLlZ`DK`6mU(8HYqt4joO9NBXP$fZ`DdVm7J6uEqKh{AXrz-? zdTFMccKT_kqn3JVs;jp8YOJ%?dTXw`_WEnE!xnpNvdcF6Y_!u>du_JccKdC(<Cc4F xE4u5p`)<7R)_ZTh`}X^9zylY2aKZ~W{BXn*SA22C8+ZJ1$Rn40a>4@w06U(-x)%Td diff --git a/doc/html/big_sys.shtml b/doc/html/big_sys.shtml index 57292a517..b33a9e915 100644 --- a/doc/html/big_sys.shtml +++ b/doc/html/big_sys.shtml @@ -5,21 +5,51 @@ <p>This document contains SLURM administrator information specifically for clusters containing 1,024 nodes or more. Virtually all SLURM components have been validated (through emulation) -for clusters containing up to 16,384 compute nodes. -Getting good performance at that scale does require some tuning and +for clusters containing up to 65,536 compute nodes. +Getting optimal performance at that scale does require some tuning and this document should help you off to a good start. A working knowledge of SLURM should be considered a prerequisite for this material.</p> +<h2>Performance Results</h2> + +<p>SLURM has acutally been used on clusters containing up to 4,184 nodes. +At that scale, the total time to execute a simple program (resource +allocation, task launch, I/O processing, and cleanup, e.g. +"time srun -N4184 -n8368 uname") at 8,368 tasks +across the 4,184 nodes was under 57 seconds. The table below shows +total execution times for several large clusters with different architectures.</p> +<table border> +<caption>SLURM Total Job Execution Time</caption> +<tr> +<th>Nodes</th><th>Tasks</th><th>Seconds</th> +</tr> +<tr> +<th>256</th><th>512</th><th>1.0</th> +</tr> +<tr> +<th>512</th><th>1024</th><th>2.2</th> +</tr> +<tr> +<th>1024</th><th>2048</th><th>3.7</th> +</tr> +<tr> +<th>2123</th><th>4246</th><th>19.5</th> +</tr> +<tr> +<th>4184</th><th>8368</th><th>56.6</th> +</tr> +</table> + <h2>Node Selection Plugin (SelectType)</h2> <p>While allocating individual processors within a node is great for smaller clusters, the overhead of keeping track of the individual processors and memory within each node adds significant overhead. -For best scalability, the consumable resource plugin (<i>select/cons_res</i>) -is best avoided.</p> +For best scalability, allocate whole nodes using <i>select/linear</i> +or <i>select/bluegene</i> and avoid <i>select/cons_res</i>.</p> -<h2>Job Accounting Plugin (JobAcctType)</h2> +<h2>Job Accounting Gather Plugin (JobAcctGatherType)</h2> <p>Job accounting relies upon the <i>slurmstepd</i> daemon on each compute node periodically sampling data. @@ -28,11 +58,11 @@ inducing what is known as <i>system noise</i>. For large parallel applications, this system noise can detract for application scalability. For optimal application performance, disabling job accounting -is best (<i>jobacct/none</i>). +is best (<i>jobacct_gather/none</i>). Consider use of job completion records (<i>JobCompType</i>) for accounting purposes as this entails far less overhead. If job accounting is required, configure the sampling interval -to a relatively large size (e.g. <i>JobAcctFrequency=300</i>). +to a relatively large size (e.g. <i>JobAcctGatherFrequency=300</i>). Some experimentation may also be required to deal with collisions on data transmission.</p> @@ -61,9 +91,48 @@ and thus should not be allocated work. Longer intervals decrease system noise on compute nodes (we do synchronize these requests across the cluster, but there will be some impact upon applications). -For really large clusters, <i>SlurmdTimeoutl</i> values of +For really large clusters, <i>SlurmdTimeout</i> values of 120 seconds or more are reasonable.</p> -<p style="text-align:center;">Last modified 28 January 2006</p> +<p>If MPICH-2 is used, the srun command will manage the key-pairs +used to bootstrap the application. +Depending upon the processor speed and architecture, the communication +of key-pair information may require extra time. +This can be done by setting an environment variable PMI_TIME before +executing srun to launch the tasks. +The default value of PMI_TIME is 500 and this is the number of +microseconds alloted to transmit each key-pair. +We have executed up to 16,000 tasks with a value of PMI_TIME=4000.</p> + +<p>The individual slurmd daemons on compute nodes will initiate messages +to the slurmctld daemon only when they start up or when the epilog +completes for a job. When a job allocated a large number of nodes +completes, it can cause a very large number of messages to be sent +by the slurmd daemons on these nodes to the slurmctld daemon all at +the same time. In order to spread this message traffic out over time +and avoid message loss, The <i>EpilogMsgTime</i> parameter may be +used. Note that even if messages are lost, they will be retransmitted, +but this will result in a delay for reallocating resources to new jobs.</p> + +<h2>Other</h2> + +<p>SLURM uses hierarchical communications between the slurmd daemons +in order to increase parallelism and improve performance. The +<i>TreeWidth</i> configuration parameter controls the fanout of messages. +The default value is 50, meaning each slurmd daemon can communicate +with up to 50 other slurmd daemons and over 2500 nodes can be contacted +with two message hops. +The default value will work well for most clusters. +Optimal system performance can typically be achieved if <i>TreeWidth</i> +is set to the square root of the number of nodes in the cluster for +systems having no more than 2500 nodes or the cube root for larger +systems.</p> + +<p>The srun command automatically increases its open file limit to +the hard limit in order to process all of the standard input and output +connections to the launched tasks. It is recommended that you set the +open file hard limit to 8192 across the cluster.</p> + +<p style="text-align:center;">Last modified 11 March 2008</p> <!--#include virtual="footer.txt"--> diff --git a/doc/html/bluegene.shtml b/doc/html/bluegene.shtml index 8a4d01db2..8a5cc3166 100644 --- a/doc/html/bluegene.shtml +++ b/doc/html/bluegene.shtml @@ -5,7 +5,7 @@ <h2>Overview</h2> <p>This document describes the unique features of SLURM on the -<a href="http://www.research.ibm.com/bluegene">IBM BlueGene</a> systems. +<a href="http://www.research.ibm.com/bluegene/">IBM BlueGene</a> systems. You should be familiar with the SLURM's mode of operation on Linux clusters before studying the relatively few differences in BlueGene operation described in this document.</p> @@ -36,12 +36,11 @@ to represent multiples of 1024 (e.g. "2k" is equivalent to "2048").</p> <h2>User Tools</h2> -<p>The normal set of SLURM user tools: srun, sbatch, scancel, sinfo, squeue, -and scontrol +<p>The normal set of SLURM user tools: sbatch, scancel, sinfo, squeue, and scontrol provide all of the expected services except support for job steps. SLURM performs resource allocation for the job, but initiation of tasks is performed using the <i>mpirun</i> command. SLURM has no concept of a job step on BlueGene. -Seven new srun, sbatch options are available: +Seven new sbatch options are available: <i>--geometry</i> (specify job size in each dimension), <i>--no-rotate</i> (disable rotation of geometry), <i>--conn-type</i> (specify interconnect type between base partitions, mesh or torus). @@ -54,15 +53,15 @@ to be available. Note that this is a c-node count.</p> -<p>To reiterate: srun is used to submit a job script, but mpirun is used to launch the parallel tasks. -<b>It is highly recommended that the srun <i>--batch</i> option be used to submit a script.</b> +<p>To reiterate: sbatch is used to submit a job script, +but mpirun is used to launch the parallel tasks. Note that a SLURM batch job's default stdout and stderr file names are generated using the SLURM job ID. When the SLURM control daemon is restarted, SLURM job ID values can be repeated, therefore it is recommended that batch jobs explicitly specify unique names for stdout and stderr files using the srun options <i>--output</i> and <i>--error</i> respectively. -While the srun <i>--allocate</i> option may be used to create an interactive SLURM job, +While the salloc command may be used to create an interactive SLURM job, it will be the responsibility of the user to insure that the <i>bgblock</i> is ready for use before initiating any mpirun commands. SLURM will assume this responsibility for batch jobs. @@ -407,7 +406,10 @@ and <i>NodeCardNodeCnt=NODE_COUNT</i> respectively in the <i>bluegene.conf</i> file (i.e. <i>BasePartitionNodeCnt=512</i> and <i>NodeCardNodeCnt=32</i>).</p> <p>Note that the <i>Numpsets</i> values defined in -<i>bluegene.conf</i> are used only when SLURM creates bgblocks. +<i>bluegene.conf</i> is used only when SLURM creates bgblocks this +determines if the system is IO rich or not. For most bluegene/L +systems this value is either 8 (for IO poor systems) or 64 (for IO rich +systems). <p>The <i>Images</i> can change during job start based on input from the user. If you change the bgblock layout, then slurmctld and slurmd should @@ -491,7 +493,7 @@ AltRamDiskImage=* Groups=da,adamb LayoutMode=STATIC BasePartitionNodeCnt=512 NodeCardNodeCnt=32 -Numpsets=8 +NumPsets=64 # An I/O rich environment BridgeAPILogFile=/var/log/slurm/bridgeapi.log BridgeAPIVerbose=0 @@ -629,6 +631,6 @@ scheduling logic, etc. </p> <p class="footer"><a href="#top">top</a></p> -<p style="text-align:center;">Last modified 23 January 2007</p> +<p style="text-align:center;">Last modified 23 April 2008</p> <!--#include virtual="footer.txt"--> diff --git a/doc/html/checkpoint_plugins.shtml b/doc/html/checkpoint_plugins.shtml index 7b1baf6a6..286b631b6 100644 --- a/doc/html/checkpoint_plugins.shtml +++ b/doc/html/checkpoint_plugins.shtml @@ -1,6 +1,6 @@ <!--#include virtual="header.txt"--> -<h1><a name="top">SLURM Job Checkpoint Plugin API</a></h1> +<h1><a name="top">SLURM Job Checkpoint Plugin Programmer Guide</a></h1> <h2> Overview</h2> <p> This document describes SLURM job checkpoint plugins and the API that defines @@ -13,7 +13,8 @@ The plugins must conform to the SLURM Plugin API with the following specificatio <p><span class="commandline">const char plugin_type[]</span><br> The major type must be "checkpoint." The minor type can be any recognizable -abbreviation for the type of scheduler. We recommend, for example:</p> +abbreviation for the type of checkpoint mechanism. +We recommend, for example:</p> <ul> <li><b>aix</b>—AIX system checkpoint.</li> <li><b>none</b>—No job checkpoint.</li> @@ -136,9 +137,10 @@ appropriate value to indicate the reason for failure.</p> <h2>Versioning</h2> -<p> This document describes version 0 of the SLURM checkpoint API. Future -releases of SLURM may revise this API. A scheduler plugin conveys its ability -to implement a particular API version using the mechanism outlined for SLURM plugins.</p> +<p> This document describes version 0 of the SLURM checkpoint API. +Future releases of SLURM may revise this API. +A checkpoint plugin conveys its ability to implement a particular API +version using the mechanism outlined for SLURM plugins.</p> <p class="footer"><a href="#top">top</a></p> <p style="text-align:center;">Last modified 21 August 2007</p> diff --git a/doc/html/configurator.html.in b/doc/html/configurator.html.in index 9e04a16f4..7239c2fa3 100644 --- a/doc/html/configurator.html.in +++ b/doc/html/configurator.html.in @@ -1,10 +1,11 @@ <!-- Copyright (C) 2005-2007 The Regents of the University of California. +Copyright (C) 2008 Lawrence Livermore National Security. Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). Written by Morris Jette <jette1@llnl.gov> and Danny Auble <da@llnl.gov> This file is part of SLURM, a resource management program. -For details, see <http://www.llnl.gov/linux/slurm/>. +For details, see <https://computing.llnl.gov/linux/slurm/>. SLURM is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free @@ -19,8 +20,6 @@ details. You should have received a copy of the GNU General Public License along with SLURM; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -$Id$ --> <HTML> <HEAD><TITLE>SLURM System Configuration Tool</TITLE> @@ -40,6 +39,27 @@ function get_field2(name,form) return "" } +function get_accounting_storage_type_field(gather, form_storage) +{ + for (var i=0; i < form_storage.length; i++) + { + if (form_storage[i].checked) + { + if(form_storage[i].value == "none" && !(gather == "none")) + { + return "filetxt" + } + else if(!(form_storage[i].value == "none") && gather == "none") + { + return "none" + } + else { + return form_storage[i].value + } + } + } +} + function get_radio_field_skipfirst(name,form) { for (var i=1; i < form.length; i++) @@ -105,10 +125,17 @@ function get_task_plugin_param() return "#TaskPluginParam=" } +function hide_box() +{ + var popup = document.getElementById('out_box'); + popup.style.visibility = 'hidden'; + +} function displayfile() { var printme = "# slurm.conf file generated by configurator.html.<br>" + + "# Put this file on all nodes of your cluster.<br>" + "# See the slurm.conf man page for more information.<br>" + "#<br>" + "ControlMachine=" + document.config.control_machine.value + "<br>" + @@ -116,73 +143,111 @@ function displayfile() get_field("BackupController",document.config.backup_controller) + "<br>" + get_field("BackupAddr",document.config.backup_addr) + "<br>" + "# <br>" + - "#DisableRootJobs=0 <br>" + - "#JobFileAppend=0 <br>" + - "SlurmUser=" + document.config.slurm_user.value + "<br>" + - "SlurmctldPort=" + document.config.slurmctld_port.value + "<br>" + - "SlurmdPort=" + document.config.slurmd_port.value + "<br>" + "AuthType=auth/" + get_radio_value(document.config.auth_type) + "<br>" + + "CacheGroups=" + get_radio_value(document.config.cache_groups) + "<br>" + + "#CheckpointType=checkpoint/none <br>" + + "CryptoType=crypto/" + get_radio_value(document.config.crypto_type) + "<br>" + + "#DisableRootJobs=NO <br>" + + get_field("Epilog",document.config.epilog) + "<br>" + + "#FirstJobId=1 <br>" + get_field("JobCredentialPrivateKey", document.config.private_key) + "<br>" + get_field("JobCredentialPublicCertificate", document.config.public_key) + "<br>" + - "StateSaveLocation=" + document.config.state_save_location.value + "<br>" + - "SlurmdSpoolDir=" + document.config.slurmd_spool_dir.value + "<br>" + - "SwitchType=switch/" + get_radio_value(document.config.switch_type) + "<br>" + + "#JobFileAppend=0 <br>" + + "#JobRequeue=1 <br>" + + "#Licenses=foo*4,bar <br>" + "#MailProg=/bin/mail <br>" + + "#MaxJobCount=5000 <br>" + "MpiDefault=" + get_radio_value(document.config.mpi_default) + "<br>" + - "#MessageTimeout=10 <br>" + - "SlurmctldPidFile=" + document.config.slurmctld_pid_file.value + "<br>" + - "SlurmdPidFile=" + document.config.slurmd_pid_file.value + "<br>" + - "ProctrackType=proctrack/" + get_radio_value(document.config.proctrack_type) + "<br>" + "#PluginDir= <br>" + - "CacheGroups=" + get_radio_value(document.config.cache_groups) + "<br>" + - "#CheckpointType=checkpoint/none <br>" + - "#FirstJobId=1 <br>" + - "ReturnToService=" + get_radio_value(document.config.return_to_service) + "<br>" + - "#MaxJobCount=2000 <br>" + "#PlugStackConfig= <br>" + - "#PropagatePrioProcess= <br>" + + "#PrivateData=0 <br>" + + "ProctrackType=proctrack/" + get_radio_value(document.config.proctrack_type) + "<br>" + + get_field("Prolog",document.config.prolog) + "<br>" + + "#PropagatePrioProcess=0 <br>" + "#PropagateResourceLimits= <br>" + "#PropagateResourceLimitsExcept= <br>" + - get_field("Prolog",document.config.prolog) + "<br>" + - get_field("Epilog",document.config.epilog) + "<br>" + - get_field("SrunProlog",document.config.srun_prolog) + "<br>" + + "ReturnToService=" + get_radio_value(document.config.return_to_service) + "<br>" + + "SlurmctldPidFile=" + document.config.slurmctld_pid_file.value + "<br>" + + "SlurmctldPort=" + document.config.slurmctld_port.value + "<br>" + + "SlurmdPidFile=" + document.config.slurmd_pid_file.value + "<br>" + + "SlurmdPort=" + document.config.slurmd_port.value + "<br>" + + "SlurmdSpoolDir=" + document.config.slurmd_spool_dir.value + "<br>" + + "SlurmUser=" + document.config.slurm_user.value + "<br>" + get_field("SrunEpilog",document.config.srun_epilog) + "<br>" + - get_field("TaskProlog",document.config.task_prolog) + "<br>" + + get_field("SrunProlog",document.config.srun_prolog) + "<br>" + + "StateSaveLocation=" + document.config.state_save_location.value + "<br>" + + "SwitchType=switch/" + get_radio_value(document.config.switch_type) + "<br>" + get_field("TaskEpilog",document.config.task_epilog) + "<br>" + "TaskPlugin=task/" + get_radio_value(document.config.task_plugin) + "<br>" + get_task_plugin_param() + "<br>" + + get_field("TaskProlog",document.config.task_prolog) + "<br>" + "#TmpFs=/tmp <br>" + - "#UsePAM= <br>" + + "#TreeWidth= <br>" + + "#UnkillableStepProgram= <br>" + + "#UnkillableStepTimeout= <br>" + + "#UsePAM=0 <br>" + + "# <br>" + "# <br>" + "# TIMERS <br>" + - "SlurmctldTimeout=" + document.config.slurmctld_timeout.value + "<br>" + - "SlurmdTimeout=" + document.config.slurmd_timeout.value + "<br>" + + "#EpilogMsgTime=2000 <br>" + + "#GetEnvTimeout=2 <br>" + + "#HealthCheckInterval=0 <br>" + + "#HealthCheckProgram= <br>" + "InactiveLimit=" + document.config.inactive_limit.value + "<br>" + "MinJobAge=" + document.config.min_job_age.value + "<br>" + "KillWait=" + document.config.kill_wait.value + "<br>" + - "#GetEnvTimeout=2 <br>" + + "#MessageTimeout=10 <br>" + + "SlurmctldTimeout=" + document.config.slurmctld_timeout.value + "<br>" + + "SlurmdTimeout=" + document.config.slurmd_timeout.value + "<br>" + "#UnkillableStepProgram= <br>" + "#UnkillableStepTimeout=60 <br>" + "Waittime=" + document.config.wait_time.value + "<br>" + "# <br>" + - "# SCHEDULING<br>" + + "# <br>" + + "# SCHEDULING <br>" + + "#DefMemPerTask=0 <br>" + + "FastSchedule=" + get_radio_value(document.config.fast_schedule) + "<br>" + + "#MaxMemPerTask=0 <br>" + + "#SchedulerRootFilter=1 <br>" + + "#SchedulerTimeSlice=30 <br>" + "SchedulerType=sched/" + get_radio_value(document.config.sched_type) + "<br>" + get_field("SchedulerPort",document.config.scheduler_port) + "<br>" + - "#SchedulerRootFilter= <br>" + "SelectType=select/" + get_radio_value(document.config.select_type) + "<br>" + get_select_type_params() + "<br>" + - "FastSchedule=" + get_radio_value(document.config.fast_schedule) + "<br>" + "# <br>" + - "# LOGGING <br>" + + "# <br>" + + "# LOGGING AND ACCOUNTING <br>" + + "#AccountingStorageEnforce=0 <br>" + + get_field("AccountingStorageHost",document.config.accounting_storage_host) + "<br>" + + get_field("AccountingStorageLoc",document.config.accounting_storage_loc) + "<br>" + + get_field("AccountingStoragePass",document.config.accounting_storage_pass) + "<br>" + + get_field("AccountingStoragePort",document.config.accounting_storage_port) + "<br>" + + "AccountingStorageType=accounting_storage/" + get_accounting_storage_type_field(get_radio_value(document.config.job_acct_gather_type), document.config.accounting_storage_type) + "<br>" + + get_field("AccountingStorageUser",document.config.accounting_storage_user) + "<br>" + + get_field("ClusterName",document.config.cluster_name) + "<br>" + + get_field("JobCompHost",document.config.job_comp_host) + "<br>" + + get_field("JobCompLoc",document.config.job_comp_loc) + "<br>" + + get_field("JobCompPass",document.config.job_comp_pass) + "<br>" + + get_field("JobCompPort",document.config.job_comp_port) + "<br>" + + "JobCompType=jobcomp/" + get_radio_value(document.config.job_comp_type) + "<br>" + + get_field("JobCompUser",document.config.job_comp_user) + "<br>" + + get_field("JobAcctGatherFrequency",document.config.job_acct_gather_frequency) + "<br>" + + "JobAcctGatherType=jobacct_gather/" + get_radio_value(document.config.job_acct_gather_type) + "<br>" + "SlurmctldDebug=" + document.config.slurmctld_debug.value + "<br>" + get_field("SlurmctldLogFile",document.config.slurmctld_logfile) + "<br>" + "SlurmdDebug=" + document.config.slurmd_debug.value + "<br>" + get_field("SlurmdLogFile",document.config.slurmd_logfile) + "<br>" + - "JobCompType=jobcomp/" + get_radio_value(document.config.job_comp_type) + "<br>" + - get_field("JobCompLoc",document.config.job_comp_loc) + "<br>" + - "JobAcctType=jobacct/" + get_radio_value(document.config.job_acct_type) + "<br>" + - get_field("JobAcctLogfile",document.config.job_acct_logfile) + "<br>" + - get_field("JobAcctFrequency",document.config.job_acct_frequency) + "<br>" + + "# <br>" + + "# <br>" + + "# POWER SAVE SUPPORT FOR IDLE NODES (optional) <br>" + + "#SuspendProgram= <br>" + + "#ResumeProgram= <br>" + + "#ResumeRate= <br>" + + "#SuspendExcNodes= <br>" + + "#SuspendExcParts= <br>" + + "#SuspendRate= <br>" + + "#SuspendTime= <br>" + + "# <br>" + "# <br>" + "# COMPUTE NODES <br>" + "NodeName=" + document.config.node_name.value + @@ -198,25 +263,50 @@ function displayfile() " Default=YES"+ " MaxTime=" + document.config.max_time.value + " State=UP" - - document.write(printme); + + //scroll(0,0); + //var popup = document.getElementById('out_box'); + + //popup.innerHTML = "<a href='javascript:hide_box();'>close</a><br>"; + //popup.innerHTML += "#BEGIN SLURM.CONF FILE<br><br>"; + //popup.innerHTML += printme; + //popup.innerHTML += "<br><br>#END SLURM.CONF FILE<br>"; + //popup.innerHTML += "<a href='javascript:hide_box();'>close</a>"; + + //popup.style.visibility = 'visible'; + + // OLD CODE + document.open(); + document.write(printme); + document.close(); } --> </SCRIPT> +<!-- <div style='visibility:hidden;text-align:left;background:#ccc;border:1px solid black;position: absolute;left:100;z-index:1;padding:5;' id='out_box'></div> --> </HEAD> <BODY> <FORM name=config> -<H1>SLURM Configration Tool, Version @SLURM_MAJOR@.@SLURM_MINOR@</H1> +<H1>SLURM Version @SLURM_MAJOR@.@SLURM_MINOR@ Configration Tool</H1> <P>This form can be used to create a SLURM configuration file with -you controlling many of the important configuration parameters. -This tool supports SLURM version @SLURM_MAJOR@.@SLURM_MINOR@. +you controlling many of the important configuration parameters.</P> + +<P><B>This tool supports SLURM version @SLURM_MAJOR@.@SLURM_MINOR@ only.</B> Configuration files for other versions of SLURM should be built using the tool distributed with it in <i>doc/html/configurator.html</i>. Some parameters will be set to default values, but you can manually edit the resulting <I>slurm.conf</I> as desired for greater flexibiilty. See <I>man slurm.conf</I> for more -details about the configuration parameters. +details about the configuration parameters.</P> + +<P>Note the while SLURM daemons create log files and other files as needed, +it treats the lack of parent directories as a fatal error. +This prevents the daemons from running if critical file systems are +not mounted and will minimize the risk of cold-starting (starting +without preserving jobs).</P> + +<P>Note that this configuration file must be installed on all nodes +in your cluster.</P> <P>After you have filled in the fields of interest, use the "Submit" button on the bottom of the page to build the <I>slurm.conf</I> @@ -224,9 +314,9 @@ file. It will appear on your web browser. Save the file in text format as <I>slurm.conf</I> for use by SLURM. <P>For more information about SLURM, see -<A HREF="http://www.llnl.gov/linux/slurm">http://www.llnl.gov/linux/slurm</A> +<A HREF="https://computing.llnl.gov/linux/slurm/">https://computing.llnl.gov/linux/slurm/</A> <P> -<A HREF="http://www.llnl.gov/disclaimer.html"><B>Privacy and legal notice</B></A> +<A HREF="https://www.llnl.gov/disclaimer.html"><B>Privacy and legal notice</B></A> <H2>Control Machines</H2> Define the hostname of the computer on which the SLURM controller and @@ -343,17 +433,28 @@ fails by using an interal default set at SLURM build time. <input type="text" name="slurmd_port" value="6818"> <B>SlurmdPort</B> <P> -<H2>Authentication</H2> +<H2>Authentication and Security</H2> Define the method used for authenticating communicating between SLURM components.<BR> Select one value for <B>AuthType</B>:<BR> <input type="radio" name="auth_type" value="none"> <B>None</B>: No authentication, not recommended production use<br> -<input type="radio" name="auth_type" value="authd"> <B>Authd</B>: Brent Chun's authd<BR> -<input type="radio" name="auth_type" value="munge" checked> <B>Munge</B>: LLNL's Munge<BR> -<P> -Define the location of public and private SSL keys used by SLURM. -These need to be generated by the SLURM administrator. -Specify fully qualified pathnames. Both values are required. +<input type="radio" name="auth_type" value="authd"> <B>Authd</B>: Brent Chun's +<A href="http://www.theether.org/authd/">authd</A><BR> +<input type="radio" name="auth_type" value="munge" checked> <B>Munge</B>: LLNL's +<A href="http://home.gna.org/munge/">Munge</A><BR> +<P> +Library used for job step cryptographic signature generation.<BR> +Select one value for <B>CryptoType</B>:<BR> +<input type="radio" name="crypto_type" value="munge"><B>Munge</B>: LLNL's +<A href="http://home.gna.org/munge/">Munge</A> (has Gnu Public License)<BR> +<input type="radio" name="crypto_type" value="openssl" checked> <B>OpenSSL</B>: +<A href="http://www.openssl.org/">OpenSSL</A> (has better performance for signature generation) +<P> +Define the location of public and private keys used by SLURM's +cryptographic signature generation plugin (CryptoType). +These values are only used if CryptoType=OpenSSL. +These files need to be generated by the SLURM administrator. +Specify fully qualified pathnames. <P> <input type="text" name="private_key"> <B>JobCredentialPrivateKey</B> <P> @@ -366,6 +467,7 @@ This should be a fully qualified pathname which can be read and written to by the SLURM user on both the control machine and backup controller (if configured). The location of a directory where slurmd saves state should also be defined. This must be a unique directory on each compute server (local disk). +The use of a highly reliable file system (e.g. RAID) is recommended. <P> <input type="text" name="state_save_location" value="/tmp"> <B>StateSaveLocation</B>: Slurmctld state save directory <B>Must be writable by both ControlMachine and BackupController</B> @@ -388,8 +490,12 @@ Select one value for <B>SchedulerType</B>:<BR> First-Out (FIFO)<BR> <input type="radio" name="sched_type" value="backfill" checked> <B>Backfill</B>: FIFO with backfill<BR> -<input type="radio" name="sched_type" value="wiki"> <B>Wiki</B>: Wiki interface to Maui -(configuration parameters SchedulerAuth and SchedulerPort must specified)<BR> +<input type="radio" name="sched_type" value="gang"> <B>Gang</B>: Gang scheduling +(time-slicing for parallel jobs)<BR> +<input type="radio" name="sched_type" value="wiki"> <B>Wiki</B>: Wiki interface +to Maui (configuration parameter <B>SchedulerPort</B> must specified)<BR> +<input type="radio" name="sched_type" value="wiki2"> <B>Wiki2</B>: Wiki interface +to Moab (configuration parameter <B>SchedulerPort</B> must specified)<BR> <P> <input type="text" name="scheduler_port" value="7321"> <B>SchedulerPort</B>: scheduler communcations port (used by Wiki only) @@ -417,10 +523,14 @@ handling required (InfiniBand, Myrinet, Ethernet, etc.)<BR> Specify the type of MPI to be used by default. SLURM will configure environment variables accordingly. Users can over-ride this specification with an srun option.<BR> Select one value for <B>MpiDefault</B>:<BR> -<input type="radio" name="mpi_default" value="mpichgm"> <B>Mpich-Gm</B><BR> +<input type="radio" name="mpi_default" value="mpichgm"> <B>MPICH-GM</B><BR> +<input type="radio" name="mpi_default" value="mpichmx"> <B>MPICH-MX</B><BR> +<input type="radio" name="mpi_default" value="mpich1_p4"> <B>MPICH1-P4</B><BR> +<input type="radio" name="mpi_default" value="mpich1_shmem"> <B>MPICH1-SHMEM</B>: +This also works for MVAPICH-SHMEM.<BR> <input type="radio" name="mpi_default" value="mvapich"> <B>MVAPICH</B><BR> <input type="radio" name="mpi_default" value="none" checked> <B>None</B>: -This works for most other MPI types including LAM MPI and Open MPI.<BR> ++This works for most other MPI types including MPICH2, LAM MPI and Open MPI.<BR> <P> <H2>Process Tracking</H2> @@ -447,8 +557,7 @@ Aggregates (PAGG) kernel module</A>, recommended where available<BR> Define resource (node) selection algorithm to be used.<BR> Select one value for <B>SelectType</B>:<BR> <input type="radio" name="select_type" value="cons_res"> -<B>Cons_res</B>: Allocate -individual processors and memory<BR> +<B>Cons_res</B>: Allocate individual processors and memory<BR> <DL> <DL> <DT><B>SelectTypeParameters</B> (As used by <I>SelectType=Cons_res</I> only): @@ -457,7 +566,7 @@ individual processors and memory<BR> when CR_CPU or CR_CPU_MEMORY is selected. They are considered to compute the total number of tasks when -n is not specified - <DD> Note: CR_MEMORY assumes Shared=Yes + <DD> Note: CR_MEMORY assumes MaxShare value of one of higher <DT> <input type="radio" name="cons_res_params" value="CR_CPU" checked onClick="javascript:set_select_type(this, 'cons_res')"> <B>CR_CPU</B>: (default) @@ -475,7 +584,7 @@ individual processors and memory<BR> <DT> <input type="radio" name="cons_res_params" value="CR_Memory" onClick="javascript:set_select_type(this)"> <B>CR_Memory</B>: Memory as a consumable resource. - <DD> Note: CR_Memory assumes Shared=Yes + <DD> Note: CR_Memory assumes MaxShare value of one of higher <DT> <input type="radio" name="cons_res_params" value="CR_CPU_Memory" onClick="javascript:set_select_type(this)"> <B>CR_CPU_Memory</B>: @@ -566,34 +675,87 @@ log goes to syslog, string "%h" in name gets replaced with hostname) <H2>Job Completion Logging</H2> Define the job completion logging mechanism to be used.<BR> Select one value for <B>JobCompType</B>:<BR> -<input type="radio" name="job_comp_type" value="none" checked> <B>None</B>: No job -completion logging<BR> -<input type="radio" name="job_comp_type" value="filetxt"> <B>FileTxt</B>: Write job -completion status to a text file<BR> -<input type="radio" name="job_comp_type" value="script"> <B>Script</B>: Use an -arbitrary script to log job completion<BR> -<P> -<input type="text" name="job_comp_loc" value=""> <B>JobCompLoc</B>: Location specification. -This is the location of the text file to be written to or the script to be run (depends -upon logging mode). Use a fully qualified pathname. -<P> - -<H2>Job Accounting</H2> +<input type="radio" name="job_comp_type" value="none" checked> <B>None</B>: +No job completion logging<BR> +<input type="radio" name="job_comp_type" value="filetxt"> <B>FileTxt</B>: +Write job completion status to a text file<BR> +<input type="radio" name="job_comp_type" value="script"> <B>Script</B>: +Use an arbitrary script to log job completion<BR> +<input type="radio" name="job_comp_type" value="mysql"> <B>MySQL</B>: +Write completion status to a MySQL database<BR> +<input type="radio" name="job_comp_type" value="pgsql"> <B>PGSQL</B>: +Write completion status to a PostreSQL database<BR> +<input type="radio" name="job_comp_type" value="slurmdbd"> <B>SlurmDBD</B>: +Write completion status to Slurm adatabase daemon (serving multiple Slurm clusters) +which will write to some database<BR> +<P> +<input type="text" name="job_comp_loc" value=""> <B>JobCompLoc</B>: +This is the location of the text file to be written to (if JobCompType=filetst) +or the script to be run (if JobCompType=script) or database name (for other values +of JobCompType). +<p><b>Options below are for use with a database to specify where the database is running and how to connect to it</b><br> +<input type="text" name="job_comp_host" value=""> <B>JobCompHost</B>: +Host the database is running on for Job completion<br> +<input type="text" name="job_comp_port" value=""> <B>JobCompPort</B>: +Port the database server is listening on for Job completion<br> +<input type="text" name="job_comp_user" value=""> <B>JobCompUser</B>: +User we are to use to talk to the database for Job completion<br> +<input type="text" name="job_comp_pass" value=""> <B>JobCompPass</B>: +Password we are to use to talk to the database for Job completion<br> +<P> + +<H2>Job Accounting Gather</H2> SLURM accounts for resource use per job. System specifics can be polled determined by system type<BR> -Select one value for <B>JobAcctType</B>:<BR> -<input type="radio" name="job_acct_type" value="none" checked> <B>None</B>: No +Select one value for <B>JobAcctGatherType</B>:<BR> +<input type="radio" name="job_acct_gather_type" value="none" checked> <B>None</B>: No job accounting<BR> -<input type="radio" name="job_acct_type" value="aix"> <B>AIX</B>: Specifc +<input type="radio" name="job_acct_gather_type" value="aix"> <B>AIX</B>: Specifc AIX process table information gathered, use with AIX systems only<BR> -<input type="radio" name="job_acct_type" value="linux"> <B>Linux</B>: Specifc +<input type="radio" name="job_acct_gather_type" value="linux"> <B>Linux</B>: Specifc Linux process table information gathered, use with Linux systems only<BR> -<input type="text" name="job_acct_frequency" value=""> <B>JobAcctFrequency</B>: -polling interval in seconds.<BR> -<input type="text" name="job_acct_logfile" value=""> <B>JobAcctLogFile</B>: -Location specification. +<input type="text" name="job_acct_gather_frequency" value="30"> <B>JobAcctGatherFrequency</B>: +polling interval in seconds. Zero disables periodic sampling.<BR> +<P> + +<H2>Job Accounting Storage</H2> +Used with the Job Accounting Gather SLURM can store the accounting information in many different fashions. Fill in your systems choice here<BR> +Select one value for <B>AccountingStorageType</B>:<BR> +<input type="radio" name="accounting_storage_type" value="none" checked> <B>None</B>: +No job accounting storage<BR> +<input type="radio" name="accounting_storage_type" value="filetxt"> <B>FileTxt</B>: +Write job accounting to a text file<BR> +<input type="radio" name="accounting_storage_type" value="gold"> <B>Gold</B>: +Write completion status to Gold database daemon which can securely +save the data from many Slurm managed clusters into a common database<BR> +<input type="radio" name="accounting_storage_type" value="mysql"> <B>MySQL</B>: +Write job accounting to a MySQL database<BR> +<input type="radio" name="accounting_storage_type" value="pgsql"> <B>PGSQL</B>: +Write job accounting to a PostreSQL database<BR> +<input type="radio" name="accounting_storage_type" value="slurmdbd"> <B>SlurmDBD</B>: +Write job accounting to Slurm DBD (database daemon) which can securely +save the data from many Slurm managed clusters into a common database<BR> +<input type="text" name="accounting_storage_loc" value=""> <B>AccountingStorageLoc</B>: +Location specification or database name. This is the location of the text file to be written to (used by Log only). -Use a fully qualified pathname. +Use a fully qualified pathname. If using a database it is the name of the database you will use or create for the stored data.<br> +<p><b>Options below are for use with a database to specify where the database is running and how to connect to it</b><br> +<input type="text" name="accounting_storage_host" value=""> <B>AccountingStorageHost</B>: +Host the database is running on for Job Accounting<br> +<input type="text" name="accounting_storage_port" value=""> <B>AccountingStoragePort</B>: +Port the database server is listening on for Job Accounting<br> +<input type="text" name="accounting_storage_user" value=""> <B>AccountingStorageUser</B>: +User we are to use to talk to the database for Job Accounting<br> +<input type="text" name="accounting_storage_pass" value=""> <B>AccountingStoragePass</B>: +Password we are to use to talk to the database for Job Accounting. +In the case of SlurmDBD, this will be an alternate socket name for use with a Munge +daemon providing enterprise-wide authentication (while the default Munge socket +would provide cluster-wide authentication only).<br> +<input type="text" name="cluster_name" value="cluster"> <B>ClusterName</B>: +Name to be recorded in database for jobs from this cluster. +This is important if a single database is used to record information +from multiple Slurm-managed clusters.<br> + <P> <H2>Process ID Logging</H2> @@ -643,12 +805,12 @@ before terminating all remaining tasks. A value of zero indicates unlimited wait <BR> <BR> -<input type=submit value="Submit" onClick="javascript:displayfile()"> +<input type=button value="Submit" onClick="javascript:displayfile()"> <input type=reset value="Reset Form"> <P> </FORM> <HR> -<p class="footer">UCRL-WEB-225274<br> -Last modified 2 July 2007</p> +<P class="footer">LLNL-WEB-402631<BR> +Last modified 1 April 2008</P> </BODY> diff --git a/doc/html/cons_res.shtml b/doc/html/cons_res.shtml index 878f9fe66..ae4f02293 100644 --- a/doc/html/cons_res.shtml +++ b/doc/html/cons_res.shtml @@ -203,6 +203,7 @@ Please send comments and requests about the consumable resources to hold until hydra12 becomes available or if backfill is enabled until hydra12's remaining CPU gets allocated to another job which will allow the 4th job to get two dedicated nodes</li> + <li><b>Note!</b> This problem is fixed in SLURM version 1.3.</li> <li><b>Note!</b> If you want to specify <i>--max_????</i> this problem can be solved in the current implementation by asking for the nodes in dedicated mode using <i>--exclusive</i></li>. diff --git a/doc/html/cons_res_share.shtml b/doc/html/cons_res_share.shtml new file mode 100644 index 000000000..66715e41d --- /dev/null +++ b/doc/html/cons_res_share.shtml @@ -0,0 +1,222 @@ +<!--#include virtual="header.txt"--> + +<h1><a name="top">Sharing Consumable Resources</a></h1> + +<H3>CPU Management</H3> +<P> +(Disclaimer: In this "CPU Management" section, the term "consumable resource" +does not include memory. The management of memory as a consumable resource is +discussed in it's own section below.) +</P><P> +As of SLURM version 1.3, the <CODE>select/cons_res</CODE> plugin +supports sharing consumable resources via the per-partition <CODE>Shared</CODE> +setting. Previously the <CODE>select/cons_res</CODE> plugin ignored this +setting, since it was technically already "sharing" the nodes when it scheduled +the resources of each node to different jobs. +</P> +<P> +Now the per-partition <CODE>Shared</CODE> setting applies to the <U>entity +being selected for scheduling</U>: +<UL><LI><P> +When the default <CODE>select/linear</CODE> plugin is enabled, the +per-partition <CODE>Shared</CODE> setting controls whether or not the +<B>nodes</B> are shared among jobs. +</P></LI><LI><P> +When the <CODE>select/cons_res</CODE> plugin is enabled, the per-partition +<CODE>Shared</CODE> setting controls whether or not the <B>configured consumable +resources</B> are shared among jobs. When a consumable resource such as a core, +socket, or CPU is shared, it means that more than one job can be assigned to it. +</P></LI></UL> +</P> +<P> +The following table describes this new functionality in more detail: +</P> +<TABLE CELLPADDING=3 CELLSPACING=1 BORDER=1> +<TR><TH>Selection Setting</TH> +<TH>Per-partition <CODE>Shared</CODE> Setting</TH> +<TH>Resulting Behavior</TH> +</TR><TR> +<TD ROWSPAN=3>SelectType=<B>select/linear</B></TD> +<TD>Shared=NO</TD> +<TD>Whole nodes are allocated to jobs. No node will run more than one job.</TD> +</TR><TR> +<TD>Shared=YES</TD> +<TD>Same as Shared=FORCE if job request specifies --shared option. +Otherwise same as Shared=NO.</TD> +</TR><TR> +<TD>Shared=FORCE</TD> +<TD>Whole nodes are allocated to jobs. A node may run more than one job.</TD> +</TR><TR> +<TD ROWSPAN=3>SelectType=<B>select/cons_res</B><BR> +Plus one of the following:<BR> +SelectTypeParameters=<B>CR_Core</B><BR> +SelectTypeParameters=<B>CR_Core_Memory</B></TD> +<TD>Shared=NO</TD> +<TD>Cores are allocated to jobs. No core will run more than one job.</TD> +</TR><TR> +<TD>Shared=YES</TD> +<TD>Allocate whole nodes if job request specifies --exclusive option. +Otherwise same as Shared=FORCE.</TD> +</TR><TR> +<TD>Shared=FORCE</TD> +<TD>Cores are allocated to jobs. A core may run more than one job.</TD> +</TR><TR> +<TD ROWSPAN=3>SelectType=<B>select/cons_res</B><BR> +Plus one of the following:<BR> +SelectTypeParameters=<B>CR_CPU</B><BR> +SelectTypeParameters=<B>CR_CPU_Memory</B></TD> +<TD>Shared=NO</TD> +<TD>CPUs are allocated to jobs. No CPU will run more than one job.</TD> +</TR><TR> +<TD>Shared=YES</TD> +<TD>Allocate whole nodes if job request specifies --exclusive option. +Otherwise same as Shared=FORCE.</TD> +</TR><TR> +<TD>Shared=FORCE</TD> +<TD>CPUs are allocated to jobs. A CPU may run more than one job.</TD> +</TR><TR> +<TD ROWSPAN=3>SelectType=<B>select/cons_res</B><BR> +Plus one of the following:<BR> +SelectTypeParameters=<B>CR_Socket</B><BR> +SelectTypeParameters=<B>CR_Socket_Memory</B></TD> +<TD>Shared=NO</TD> +<TD>Sockets are allocated to jobs. No socket will run more than one job.</TD> +</TR><TR> +<TD>Shared=YES</TD> +<TD>Allocate whole nodes if job request specifies --exclusive option. +Otherwise same as Shared=FORCE.</TD> +</TR><TR> +<TD>Shared=FORCE</TD> +<TD>Sockets are allocated to jobs. A socket may run more than one job.</TD> +</TR> +</TABLE> +<P>When <CODE>Shared=FORCE</CODE> is configured, the consumable resources are +scheduled for jobs using a <B>least-loaded</B> algorithm. Thus, idle +CPUs|cores|sockets will be allocated to a job before busy ones, and +CPUs|cores|sockets running one job will be allocated to a job before ones +running two or more jobs. This is the same approach that the +<CODE>select/linear</CODE> plugin uses when allocating "shared" nodes. +</P> +<P> +Note that the <B>granularity</B> of the "least-loaded" algorithm is what +distinguishes the two selection plugins (<CODE>cons_res</CODE> and +<CODE>linear</CODE>) when <CODE>Shared=FORCE</CODE> is configured. With the +<CODE>select/cons_res</CODE> plugin enabled, the CPUs of a node are not +overcommitted until <B>all</B> of the rest of the CPUs are overcommitted on the +other nodes. Thus if one job allocates half of the CPUs on a node and then a +second job is submitted that requires more than half of the CPUs, the +<CODE>select/cons_res</CODE> plugin will attempt to place this new job on other +busy nodes that have more than half of the CPUs available for use. The +<CODE>select/linear</CODE> plugin simply counts jobs on nodes, and does not +track the CPU usage on each node. +</P><P> +This new functionality also supports the new +<CODE>Shared=FORCE:<num></CODE> syntax. If <CODE>Shared=FORCE:3</CODE> is +configured with <CODE>select/cons_res</CODE> and <CODE>CR_Core</CODE> or +<CODE>CR_Core_Memory</CODE>, then the <CODE>select/cons_res</CODE> plugin will +run up to 3 jobs on each <U>core</U> of each node in the partition. If +<CODE>CR_Socket</CODE> or <CODE>CR_Socket_Memory</CODE> is configured, then the +<CODE>select/cons_res</CODE> plugin will run up to 3 jobs on each <U>socket</U> +of each node in the partition. +</P> +<H3>Nodes in Multiple Partitions</H3> +<P> +SLURM has supported configuring nodes in more than one partition since version +0.7.0. The <CODE>Shared=FORCE</CODE> support in the <CODE>select/cons_res</CODE> +plugin accounts for this "multiple partition" support. Here are several +scenarios with the <CODE>select/cons_res</CODE> plugin enabled to help +understand how all of this works together: +</P> +<TABLE CELLPADDING=3 CELLSPACING=1 BORDER=1> +<TR><TH>SLURM configuration</TH> +<TH>Resulting Behavior</TH> +</TR><TR> +<TD>Two <CODE>Shared=NO</CODE> partitions assigned the same set of nodes</TD> +<TD>Jobs from either partition will be assigned to all available consumable +resources. No consumable resource will be shared. One node could have 2 jobs +running on it, and each job could be from a different partition.</TD> +</TR><TR> +<TD>Two partitions assigned the same set of nodes: one partition is +<CODE>Shared=FORCE</CODE>, and the other is <CODE>Shared=NO</CODE></TD> +<TD>A node will only run jobs from one partition at a time. If a node is +running jobs from the <CODE>Shared=NO</CODE> partition, then none of it's +consumable resources will be shared. If a node is running jobs from the +<CODE>Shared=FORCE</CODE> partition, then it's consumable resources can be +shared.</TD> +</TR><TR> +<TD>Two <CODE>Shared=FORCE</CODE> partitions assigned the same set of nodes</TD> +<TD>Jobs from either partition will be assigned consumable resources. All +consumable resources can be shared. One node could have 2 jobs running on it, +and each job could be from a different partition.</TD> +</TR><TR> +<TD>Two partitions assigned the same set of nodes: one partition is +<CODE>Shared=FORCE:3</CODE>, and the other is <CODE>Shared=FORCE:5</CODE></TD> +<TD>Generally the same behavior as above. However no consumable resource will +ever run more than 3 jobs from the first partition, and no consumable resource +will ever run more than 5 jobs from the second partition. A consumable resource +could have up to 8 jobs running on it at one time.</TD> +</TR> +</TABLE> +<P> +Note that the "mixed shared setting" configuration (row #2 above) introduces the +possibility of <B>starvation</B> between jobs in each partition. If a set of +nodes are running jobs from the <CODE>Shared=NO</CODE> partition, then these +nodes will continue to only be available to jobs from that partition, even if +jobs submitted to the <CODE>Shared=FORCE</CODE> partition have a higher +priority. This works in reverse also, and in fact it's easier for jobs from the +<CODE>Shared=FORCE</CODE> partition to hold onto the nodes longer because the +consumable resource "sharing" provides more resource availability for new jobs +to begin running "on top of" the existing jobs. This happens with the +<CODE>select/linear</CODE> plugin also, so it's not specific to the +<CODE>select/cons_res</CODE> plugin. +</P> + +<H3>Memory Management</H3> +<P> +The management of memory as a consumable resource remains unchanged: +</P> +<TABLE CELLPADDING=3 CELLSPACING=1 BORDER=1> +<TR><TH>Selection Setting</TH> +<TH>Resulting Behavior</TH> +</TR><TR> +<TD>SelectType=<B>select/linear</B></TD> +<TD>Memory allocation is not tracked. Jobs are allocated to nodes without +considering if there is enough free memory. Swapping could occur!</TD> +</TR><TR> +<TD>SelectType=<B>select/cons_res</B><BR> +Plus one of the following:<BR> +SelectTypeParameters=<B>CR_Core</B><BR> +SelectTypeParameters=<B>CR_CPU</B><BR> +SelectTypeParameters=<B>CR_Socket</B></TD> +<TD>Memory allocation is not tracked. Jobs are allocated to consumable resources +without considering if there is enough free memory. Swapping could occur!</TD> +</TR><TR> +<TD>SelectType=<B>select/cons_res</B><BR> +Plus one of the following:<BR> +SelectTypeParameters=<B>CR_Memory</B><BR> +SelectTypeParameters=<B>CR_Core_Memory</B><BR> +SelectTypeParameters=<B>CR_CPU_Memory</B><BR> +SelectTypeParameters=<B>CR_Socket_Memory</B></TD> +<TD>Memory allocation for all jobs are tracked. Nodes that do not have enough +available memory to meet the job's memory requirement will not be allocated to +the job.</TD> +</TR> +</TABLE> +<P>Note that the <CODE>srun --mem=<num></CODE> option is only used to +request nodes that have <num> amount of real memory. This option does not +compute memory that is currently available. +</P><P> +The <CODE>srun --job-mem=<num></CODE> option is used with the +<CODE>select/cons_res</CODE> plugin to request available memory from each node. +</P><P> +The <CODE>select/cons_res</CODE> plugin tracks memory usage by each job on each +node regardless of the number partitions a node may be assigned to. The primary +purpose of tracking memory as a consumable resource is to protect jobs from +having their memory pages swapped out because the memory has been overcommitted. +</P> + +<p class="footer"><a href="#top">top</a></p> + +<p style="text-align:center;">Last modified 27 May 2008</p> + +<!--#include virtual="footer.txt"--> diff --git a/doc/html/crypto_plugins.shtml b/doc/html/crypto_plugins.shtml new file mode 100644 index 000000000..6de9f1512 --- /dev/null +++ b/doc/html/crypto_plugins.shtml @@ -0,0 +1,151 @@ +<!--#include virtual="header.txt"--> + +<h1><a name="top">SLURM Cryptographic Plugin Programmer Guide</a></h1> + +<h2> Overview</h2> +<p> This document describes SLURM cryptographic plugins and the API that +defines them. +It is intended as a resource to programmers wishing to write their own +SLURM cryptographic plugins. +This is version 0 of the API.</p> + +<p>SLURM cryptographic plugins are SLURM plugins that implement +a digital signature mechanism. +The slurmctld daemon generates a job step credential, signs it, +and tranmits it to an srun program. +The srun program then transmits it to the slurmd daemons directly. +The slurmctld daemon does not communicate directly with the slurmd +daemons at this time for performance reasons, but the job step +credential must be validated by the slurmd daemon as being +generated by the slurmctld daemon. +Digital signatures provide this validation mechanism. +The plugins must conform to the SLURM Plugin API with the following +specifications:</p> + +<p><span class="commandline">const char plugin_type[]</span><br> +The major type must be "crypto." +The minor type can be any recognizable abbreviation for the type of +cryptographic mechanism. +We recommend, for example:</p> +<ul> +<li><b>munge</b>—LLNL's Munge system.</li> +<li><b>openssl</b>—Open SSL.</li> +</ul></p> + +<p>The <span class="commandline">plugin_name</span> and +<span class="commandline">plugin_version</span> +symbols required by the SLURM Plugin API require no specialization for +cryptographic support. +Note carefully, however, the versioning discussion below.</p> + +<h2>Data Objects</h2> +<p>The implementation must maintain (though not necessarily directly export) an +enumerated <span class="commandline">errno</span> to allow SLURM to discover +as practically as possible the reason for any failed API call. +Plugin-specific enumerated integer values may be used when appropriate. + +<p>These values must not be used as return values in integer-valued +functions in the API. +The proper error return value from integer-valued functions is SLURM_ERROR. +The implementation should endeavor to provide useful and pertinent +information by whatever means is practical. +Successful API calls are not required to reset any errno to a known value. +However, the initial value of any errno, prior to any error condition +arising, should be SLURM_SUCCESS. </p> +<p class="footer"><a href="#top">top</a></p> + +<h2>API Functions</h2> +<p>The following functions must appear. +Functions which are not implemented should be stubbed.</p> + + +<p class="commandline">void * crypto_read_private_key (const char *path);</p> +<p style="margin-left:.2in"><b>Description</b>: Generate a private key +based upon the contents of the supplied file.</p> +<p style="margin-left:.2in"><b>Argument</b>:<span class="commandline">path</span> + (input) fully-qualified pathname to the private key +as specified by the <b>JobCredentialPrivateKey</b> configuration parameter.</p> +<p style="margin-left:.2in"><b>Returns</b>: The pointer to a key on +success or NULL on failure. +Call crypto_destroy_key() to release memory associated with this key.</p> + + +<p class="commandline">void * crypto_read_public_key (const char *path);</p> +<p style="margin-left:.2in"><b>Description</b>: Generate a public key +based upon the contents of the supplied file.</p> +<p style="margin-left:.2in"><b>Argument</b>:<span class="commandline">path</span> + (input) fully-qualified pathname to the public key +as specified by the <b>JobCredentialPublicCertificate</b> configuration +parameter.</p> +<p style="margin-left:.2in"><b>Returns</b>: The pointer to a key on +success or NULL on failure. +Call crypto_destroy_key() to release memory associated with this key.</p> + + +<p class="commandline">void crypto_destroy_key (void *key);</p> +<p style="margin-left:.2in"><b>Description</b>: Release storage for +a public or private key.</p> +<p style="margin-left:.2in"><b>Argument</b>:<span class="commandline"> key</span> + (input/output) pointer to the key previously allocated +by crypto_read_private_key() or crypto_read_public_key().</p> + + +<p class="commandline">char *crypto_str_error(void);</p> +<p style="margin-left:.2in"><b>Description</b>: Return a string +describing the last error generated by the the cryptographic software.</p> +<p style="margin-left:.2in"><b>Returns</b>: A pointer to a string.</p> + +<p class="commandline">int crypto_sign (void *key, char *buffer, int buf_size, +char **sig_pp, unsigned int *sig_size_p);</p> +<p style="margin-left:.2in"><b>Description</b>: Generate a signature for +the supplied buffer.</p> +<p style="margin-left:.2in"><b>Arguments</b>:</br> +<span class="commandline"> key</span> + (input) pointer to the key previously generated by +crypto_read_private_key() or crypto_read_public_key().<br> +<span class="commandline"> buffer</span> (input) data to +be signed.<br> +<span class="commandline"> buf_size</span> (input) +size of buffer, in bytes.<br> +<span class="commandline"> sig_pp</span> (input/output) +Location in which to store the signature. NOTE: The storage for +sig_pp should be allocated using xmalloc() and will be freed by +the caller using xfree().<br> +<span class="commandline"> sig_size_p</span> (input/output) +Location in which to store the size of the signature (sig_pp).</p> +<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. +On failure, the plugin should return SLURM_ERROR and set the errno to an +appropriate value to indicate the reason for failure.</p> + +<p class="commandline">int crypto_verify_sign (void *key, char *buffer, +int buf_size, char *signature, unsigned int sig_size);</p> +<p style="margin-left:.2in"><b>Description</b>: Generate a signature for +the supplied buffer.</p> +<p style="margin-left:.2in"><b>Arguments</b>:</br> +<span class="commandline"> key</span> + (input) pointer to the key previously generated by +crypto_read_private_key() or crypto_read_public_key().<br> +<span class="commandline"> buffer</span> (input) data +previously signed by crypto_sign().<br> +<span class="commandline"> buf_size</span> (input) +size of buffer, in bytes.<br> +<span class="commandline"> signature</span> (input) +Signature as returned in sig_pp by the crypto_sign() function and +to be confirmed.</br> +<span class="commandline"> sig_size</span> (input) +Size of the signature as returned in sig_size_p by crypto_sign().</p> +<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. +On failure, the plugin should return SLURM_ERROR and set the errno to an +appropriate value to indicate the reason for failure.</p> + + +<h2>Versioning</h2> +<p> This document describes version 0 of the SLURM cryptographic API. +Future releases of SLURM may revise this API. +A cryptographic plugin conveys its ability to implement a particular +API version using the mechanism outlined for SLURM plugins.</p> +<p class="footer"><a href="#top">top</a></p> + +<p style="text-align:center;">Last modified 24 July 2007</p> + +<!--#include virtual="footer.txt"--> diff --git a/doc/html/documentation.shtml b/doc/html/documentation.shtml index f386b731a..de8b51111 100644 --- a/doc/html/documentation.shtml +++ b/doc/html/documentation.shtml @@ -2,14 +2,12 @@ <h1> Documentation</h1> +Also see <a href="publications.html">Publications and Presentations</a>. + <h2>SLURM Users</h2> <ul> <li><a href="quickstart.shtml">Quick Start User Guide</a></li> -<li><a href="lci.7.tutorial.pdf">Resource Management Using SLURM, -(Tutorial, The 7th International Conference on Linux Clusters, May 2006)</li> -<li><a href="slurm_v1.2.pdf">Resource Management at LLNL, SLURM Version 1.2 -(April 2007)</li> -<li><a href="http://www.llnl.gov/LCdocs/slurm">SLURM Reference Manual</a></li> +<li><a href="https://computing.llnl.gov/LCdocs/slurm/">SLURM Reference Manual</a></li> <li><a href="mc_support.shtml">Support for Multi-core/Multi-threaded Architectures</a></li> <li><a href="quickstart.shtml#mpi">Guide to MPI Use</a></li> <li><a href="bluegene.shtml">Blue Gene User and Administrator Guide</a></li> @@ -19,19 +17,20 @@ <h2>SLURM Administrators</h2> <ul> <li><a href="quickstart_admin.shtml">Quick Start Administrator Guide</a></li> +<li><a href="configurator.html">Configuration Tool</a></li> <li><a href="troubleshoot.shtml">Troubleshooting Guide</a></li> -<li><a href="lci.7.tutorial.pdf">Resource Management Using SLURM, -(Tutorial, The 7th International Conference on Linux Clusters, May 2006)</li> <li><a href="big_sys.shtml">Large Cluster Administration Guide</a></li> -<li><a href="power_save.shtml">Power Saving Guide</a></li> <li><a href="cons_res.shtml">Consumable Resources Guide</a></li> -<li><a href="bluegene.shtml">Blue Gene User and Administrator Guide</a></li> -<li><a href="ibm.shtml">IBM AIX User and Administrator Guide</a></li> -<li><a href="configurator.html">Configuration Tool</a></li> +<li><a href="cons_res_share.shtml">Sharing Consumable Resources</a></li> +<li><a href="accounting.shtml">Accounting</a></li> +<li><a href="gang_scheduling.shtml">Gang Scheduling</a></li> +<li><a href="preempt.shtml">Preemption</a></li> <li><a href="maui.shtml">Maui Scheduler Integration Guide</a></li> <li><a href="moab.shtml">Moab Cluster Suite Integration Guide</a></li> -<li><a href="http://docs.hp.com/en/5991-4847/ch09s02.html">Submitting -Jobs throuh LSF</a></li> +<li><a href="http://docs.hp.com/en/5991-4847/ch09s02.html">Submitting Jobs throuh LSF</a></li> +<li><a href="bluegene.shtml">Blue Gene User and Administrator Guide</a></li> +<li><a href="ibm.shtml">IBM AIX User and Administrator Guide</a></li> +<li><a href="power_save.shtml">Power Saving Guide</a></li> </ul> <h2>SLURM Developers</h2> @@ -40,17 +39,19 @@ Jobs throuh LSF</a></li> <li><a href="api.shtml">Application Programmer Interface (API) Guide</a></li> <li><a href="plugins.shtml">Plugin Programmer Guide</a></li> <li><a href="authplugins.shtml">Authentication Plugin Programmer Guide</a></li> -<li><a href="jobacctplugins.shtml">Job Accounting Plugin Programmer Guide</a></li> +<li><a href="crypto_plugins.shtml">Cryptographic Plugin Programmer Guild</a></li> +<li><a href="jobacct_gatherplugins.shtml">Job Accounting Gather Plugin Programmer Guide</a></li> +<li><a href="jobacct_storageplugins.shtml">Job Accounting Storage Plugin Programmer Guide</a></li> <li><a href="checkpoint_plugins.shtml">Job Checkpoint Plugin Programmer Guide</a></li> <li><a href="jobcompplugins.shtml">Job Completion Logging Plugin Programmer Guide</a></li> +<li><a href="mpiplugins.shtml">MPI Plugin Programmer Guide</a></li> <li><a href="proctrack_plugins.shtml">Process Tracking Plugin Programmer Guide</a></li> -<li><a href="selectplugins.shtml">Node Selection Plugin Programmer Guide</a></li> <li><a href="schedplugins.shtml">Scheduler Plugin Programmer Guide</a></li> +<li><a href="selectplugins.shtml">Node Selection Plugin Programmer Guide</a></li> <li><a href="switchplugins.shtml">Switch (Interconnect) Plugin Programmer Guide</a></li> <li><a href="taskplugins.shtml">Task Plugin Programmer Guide</a></li> -<li><a href="mpiplugins.shtml">MPI Plugin Programmer Guide</a></li> </ul> -<p style="text-align:center;">Last modified 12 June 2007</p> +<p style="text-align:center;">Last modified 28 April 2008</p> <!--#include virtual="footer.txt"--> diff --git a/doc/html/download.shtml b/doc/html/download.shtml index 2b5684af8..8c7208c18 100644 --- a/doc/html/download.shtml +++ b/doc/html/download.shtml @@ -3,25 +3,11 @@ <h1>Download</h1> <p> SLURM source can be downloaded from <br> -<a href="ftp://ftp.llnl.gov/pub/linux/slurm/"> -ftp://ftp.llnl.gov/pub/linux/slurm</a> and <br> <a href="https://sourceforge.net/projects/slurm/"> https://sourceforge.net/projects/slurm/</a><br> -There is also a Debian package repository at <br> -<a href="http://www.na.icar.cnr.it/~oliva/debs/slurm-llnl/"> -http://www.na.icar.cnr.it/~oliva/debs/slurm-llnl/</a><br> -The latest stable release is version 1.2.</p> - -<p><b>NOTE:</b> The LLNL FTP server has strict firewall restrictions. -Depending upon your firewall configuration, the use -of ftp passive mode may be required (use the "-p" option for the -ftp command, e.g. "ftp -p ftp.llnl.gov"; web browsers may also -require setting the ftp preference to passive mode).<br> -<b>NOTE:</b> This server will only download files to computers -on which it can perform a reverse DNS lookup for the IP address -issuing the request.<br> -These requirement are part of a LLNL site-wide policy for security -purposes that we have no control over.</p> +There is also a Debian package named <i>slurm-llnl</i> available at <br> +<a href="http://www.debian.org/">http://www.debian.org/</a><br> +The latest stable release of SLURM is version 1.3.</p> <p> Other software available for download includes <ul> @@ -30,22 +16,17 @@ Pluggable Authentication Module (PAM) for restricting access to compute nodes where SLURM performs resource management. Access to the node is restricted to user root and users who have been allocated resources on that node. <br> pam_slurm is available for download from <br> -<a href="ftp://ftp.llnl.gov/pub/linux/pam_slurm/"> -ftp://ftp.llnl.gov/pub/linux/pam_slurm</a> and <br> <a href="https://sourceforge.net/projects/slurm/"> -https://sourceforge.net/projects/slurm/</a><br> +https://sourceforge.net/projects/slurm/</a><br> or use the +<a href="http://www.debian.org/">Debian</a> package +named <i>libpam-slurm</i>.<br> The latest stable release is version 1.4.</p> </ul> <h1>Related Software</h1> <ul> -<li><b>OpenSSL</b> is required for secure communications between SLURM -components. Download it from -<a href="http://www.openssl.org/">http://www.openssl.org/</a>. -</li> - -<li>Authentication plugins</li> +<li>Authentication plugins identifies the user originating a message.</li> <ul> <li><b>Munge</b><br> In order to compile the "auth/munge" authentication plugin for SLURM, you will need @@ -54,7 +35,27 @@ to build and install Munge, available from </li> </ul> -<li>Interconnect plugins</li> +<li>Databases can be used to store accounting information. +See our <a href="accounting.html">Accounting</a> web page for more information.</li> +<ul> +<li><a href="http://www.clusterresources.com/pages/products/gold-allocation-manager.php">Gold</a></li> +<li><a href="http://www.mysql.com/">MySQL</a></li> +<li><a href="http://www.postgresql.org/">PostgreSQL</a></li> +</ul> + +<li>Digital signatures (Cypto plugin) are used to insure message are not altered.</li> +<ul> +<li><b>OpenSSL</b><br> +OpenSSL is recommended for generation of digital signatures. +Download it from <a href="http://www.openssl.org/">http://www.openssl.org/</a>.</li> +<li><b>Munge</b><br> +Munge can be used at an alternative to OpenSSL. +Munge is available under the Gnu General Public License, but is slower than OpenSSL +for the generation of digital signatures. Munge is available from +<a href="http://home.gna.org/munge/">http://home.gna.org/munge/</a>.</li> +</ul> + +<li>Interconnect plugins (Switch plugin)</li> <ul> <li><b>QsNet</b><br> In order to build the "switch/elan" plugin for SLURM, you will need @@ -63,7 +64,8 @@ the <b>qsnetlibs</b> development libraries from plugin also requires the <b>libelanhosts</b> library and a corresponding /etc/elanhosts configuration file, used to map hostnames to Elan IDs. The libelanhosts source is available from -<a href="ftp://ftp.llnl.gov/pub/linux/libelanhosts/">ftp://ftp.llnl.gov/pub/linux/libelanhosts</a>. +<a href="https://sourceforge.net/projects/slurm/"> +https://sourceforge.net/projects/slurm/</a>. </ul> <li>MPI versions supported</li> @@ -72,16 +74,16 @@ hostnames to Elan IDs. The libelanhosts source is available from <li><a href="http://www.hp.com/go/mpi">HP-MPI</a></li> <li><a href="http://www.lam-mpi.org/">LAM/MPI</a></li> <li><a href="http://www-unix.mcs.anl.gov/mpi/mpich1/">MPICH1</a></li> -<li><a href="http://www-unix.mcs.anl.gov/mpi/mpich2/">MPICH2</a></li> +<li><a href="http://www.mcs.anl.gov/research/projects/mpich2/">MPICH2</a></li> <li><a href="http://www.myri.com/scs/download-mpichgm.html">MPICH-GM</a></li> <li><a href="http://www.myri.com/scs/download-mpichmx.html">MPICH-MX</a></li> -<li><a href="http://nowlab.cse.ohio-state.edu/projects/mpi-iba">MVAPICH</a></li> +<li><a href="http://mvapich.cse.ohio-state.edu/">MVAPICH</a></li> <li><a href="http://nowlab.cse.ohio-state.edu/projects/mpi-iba">MVAPICH2</a></li> <li><a href="http://www.open-mpi.org">Open MPI</a></li> <li><a href="http://www.quadrics.com/">Quadrics MPI</a></li> </ul> -<li>Schedulers</li> +<li>Schedulers offering greater control over the workload</li> <ul> <li><a href="http://www.platform.com/">Load Sharing Facility (LSF)</a></li> <li><a href="http://www.clusterresources.com/pages/products/maui-cluster-scheduler.php"> @@ -92,12 +94,12 @@ Moab Cluster Suite</a></li> <li>Task Affinity plugins</li> <ul> -<li><a href="http://www.open-mpi.org/software/plpa"> +<li><a href="http://www.open-mpi.org/software/plpa/"> Portable Linux Processor Affinity (PLPA)</a></li> </ul> </ul> -<p style="text-align:center;">Last modified 15 June 2007</p> +<p style="text-align:center;">Last modified 28 March 2008</p> <!--#include virtual="footer.txt"--> diff --git a/doc/html/faq.shtml b/doc/html/faq.shtml index 0b931757b..ac6d62e6f 100644 --- a/doc/html/faq.shtml +++ b/doc/html/faq.shtml @@ -10,7 +10,6 @@ to run on nodes?</a></li> <li><a href="#purge">Why is my job killed prematurely?</a></li> <li><a href="#opts">Why are my srun options ignored?</a></li> -<li><a href="#cred">Why are "Invalid job credential" errors generated?</a></li> <li><a href="#backfill">Why is the SLURM backfill scheduler not starting my job?</a></li> <li><a href="#steps">How can I run multiple jobs from within a single script?</a></li> @@ -23,6 +22,9 @@ name for a batch job?</a></li> <li><a href="#parallel_make">Can the <i>make</i> command utilize the resources allocated to a SLURM job?</a></li> <li><a href="#terminal">Can tasks be launched with a remote terminal?</a></li> +<li><a href="#force">What does "srun: Force Terminated job" indicate?</a></li> +<li><a href="#early_exit">What does this mean: "srun: First task exited 30s ago" +followed by "srun Job Failed"?</a></li> </ol> <h2>For Administrators</h2> <ol> @@ -53,14 +55,33 @@ parallel for testing purposes?</a></li> <li><a href="#multi_slurmd">Can slurm emulate a larger cluster?</a></li> <li><a href="#extra_procs">Can SLURM emulate nodes with more resources than physically exist on the node?</a></li> -<li><a href="#credential_replayed">What does a "credential -replayed" error in the <i>SlurmdLogFile</i> indicate?</a></li> -<li><a href="#large_time">What does a "Warning: Note very large -processing time" in the <i>SlurmctldLogFile</i> indicate?</a></li> +<li><a href="#credential_replayed">What does a +"credential replayed" error in the <i>SlurmdLogFile</i> +indicate?</a></li> +<li><a href="#large_time">What does +"Warning: Note very large processing time" +in the <i>SlurmctldLogFile</i> indicate?</a></li> <li><a href="#lightweight_core">How can I add support for lightweight core files?</a></li> <li><a href="#limit_propagation">Is resource limit propagation useful on a homogeneous cluster?</a></li> +<li<a href="#clock">Do I need to maintain synchronized clocks +on the cluster?</a></li> +<li><a href="#cred_invalid">Why are "Invalid job credential" errors +generated?</a></li> +<li><a href="#cred_replay">Why are +"Task launch failed on node ... Job credential replayed" +errors generated?</a></li> +<li><a href="#globus">Can SLURM be used with Globus?</li> +<li><a href="#time_format">Can SLURM time output format include the year?</li> +<li><a href="#file_limit">What causes the error +"Unable to accept new connection: Too many open files"?</li> +<li><a href="#slurmd_log">Why does the setting of <i>SlurmdDebug</i> fail +to log job step information at the appropriate level?</li> +<li><a href="#rpm">Why isn't the auth_none.so (or other file) in a +SLURM RPM?</li> +<li><a href="#slurmdbd">Why should I use the slurmdbd instead of the +regular database plugins?</li> </ol> <h2>For Users</h2> @@ -198,14 +219,7 @@ hostname command. Which will change the name of the computer on which SLURM executes the command - Very bad, <b>Don't run this command as user root!</b></p> -<p><a name="cred"><b>7. Why are "Invalid job credential" errors generated? -</b></a><br> -This error is indicative of SLURM's job credential files being inconsistent across -the cluster. All nodes in the cluster must have the matching public and private -keys as defined by <b>JobCredPrivateKey</b> and <b>JobCredPublicKey</b> in the -slurm configuration file <b>slurm.conf</b>. - -<p><a name="backfill"><b>8. Why is the SLURM backfill scheduler not starting my job? +<p><a name="backfill"><b>7. Why is the SLURM backfill scheduler not starting my job? </b></a><br> There are significant limitations in the current backfill scheduler plugin. It was designed to perform backfill node scheduling for a homogeneous cluster. @@ -214,23 +228,27 @@ resources). It also does not update the required or excluded node list of individual jobs. These are the current limiations. You can use the scontrol show command to check if these conditions apply.</p> <ul> -<li>partition: State=UP</li> -<li>partition: RootOnly=NO</li> -<li>partition: Shared=NO</li> -<li>job: ReqNodeList=NULL</li> -<li>job: ExcNodeList=NULL</li> -<li>job: Contiguous=0</li> -<li>job: Features=NULL</li> -<li>job: MinProcs, MinMemory, and MinTmpDisk satisfied by all nodes in +<li>Partition: State=UP</li> +<li>Partition: RootOnly=NO</li> +<li>Partition: Shared=NO</li> +<li>Job: ReqNodeList=NULL</li> +<li>Job: ExcNodeList=NULL</li> +<li>Job: Contiguous=0</li> +<li>Job: Features=NULL</li> +<li>Job: MinProcs, MinMemory, and MinTmpDisk satisfied by all nodes in the partition</li> -<li>job: MinProcs or MinNodes not to exceed partition's MaxNodes</li> +<li>Job: MinProcs or MinNodes not to exceed partition's MaxNodes</li> </ul> -<p>As soon as any priority-ordered job in the partition's queue fail to -satisfy the request, no lower priority job in that partition's queue -will be considered as a backfill candidate. Any programmer wishing -to augment the existing code is welcome to do so. - -<p><a name="steps"><b>9. How can I run multiple jobs from within a +<p>If the partitions specifications differ from those listed above, +no jobs in that partition will be scheduled by the backfills scheduler. +Their jobs will only be scheduled on a First-In-First-Out (FIFO) basis.</p> +<p>Jobs failing to satisfy the requirements above (i.e. with specific +node requirements) will not be considered candidates for backfill +scheduling and other jobs may be scheduled ahead of these jobs. +These jobs are subject to starvation, but will not block other +jobs from running when sufficient resources are available for them.</p> + +<p><a name="steps"><b>8. How can I run multiple jobs from within a single script?</b></a><br> A SLURM job is just a resource allocation. You can execute many job steps within that allocation, either in parallel or sequentially. @@ -239,7 +257,7 @@ steps will be allocated nodes that are not already allocated to other job steps. This essential provides a second level of resource management within the job for the job steps.</p> -<p><a name="orphan"><b>10. Why do I have job steps when my job has +<p><a name="orphan"><b>9. Why do I have job steps when my job has already COMPLETED?</b></a><br> NOTE: This only applies to systems configured with <i>SwitchType=switch/elan</i> or <i>SwitchType=switch/federation</i>. @@ -256,7 +274,7 @@ This enables SLURM to purge job information in a timely fashion even when there are many failing nodes. Unfortunately the job step information may persist longer.</p> -<p><a name="multi_batch"><b>11. How can I run a job within an existing +<p><a name="multi_batch"><b>10. How can I run a job within an existing job allocation?</b></a><br> There is a srun option <i>--jobid</i> that can be used to specify a job's ID. @@ -272,7 +290,7 @@ If you specify that a batch job should use an existing allocation, that job allocation will be released upon the termination of that batch job.</p> -<p><a name="user_env"><b>12. How does SLURM establish the environment +<p><a name="user_env"><b>11. How does SLURM establish the environment for my job?</b></a><br> SLURM processes are not run under a shell, but directly exec'ed by the <i>slurmd</i> daemon (assuming <i>srun</i> is used to launch @@ -282,26 +300,16 @@ is executed are propagated to the spawned processes. The <i>~/.profile</i> and <i>~/.bashrc</i> scripts are not executed as part of the process launch.</p> -<p><a name="prompt"><b>13. How can I get shell prompts in interactive +<p><a name="prompt"><b>12. How can I get shell prompts in interactive mode?</b></a><br> <i>srun -u bash -i</i><br> Srun's <i>-u</i> option turns off buffering of stdout. Bash's <i>-i</i> option tells it to run in interactive mode (with prompts). -<p><a name="batch_out"><b>14. How can I get the task ID in the output +<p><a name="batch_out"><b>13. How can I get the task ID in the output or error file name for a batch job?</b></a><br> -The <i>srun -b</i> or <i>sbatch</i> commands are meant to accept a -script rather than a command line. If you specify a command line -rather than a script, it gets translated to a simple script of this -sort:</p> -<pre> -#!/bin/sh -srun hostname -</pre> -<p>You will note that the srun command lacks the output file specification. -It's output (for all tasks) becomes the output of the job. If you -want separate output by task, you will need to build a script containing -this specification. For example:</p> +<p>If you want separate output by task, you will need to build a script +containing this specification. For example:</p> <pre> $ cat test #!/bin/sh @@ -328,7 +336,7 @@ $ cat out_65541_2 tdev2 </pre> -<p><a name="parallel_make"><b>15. Can the <i>make</i> command +<p><a name="parallel_make"><b>14. Can the <i>make</i> command utilize the resources allocated to a SLURM job?</b></a><br> Yes. There is a patch available for GNU make version 3.81 available as part of the SLURM distribution in the file @@ -341,15 +349,13 @@ overhead of SLURM's task launch. Use with make's <i>-j</i> option within an existing SLURM allocation. Outside of a SLURM allocation, make's behavior will be unchanged.</p> -<p><a name="terminal"><b>16. Can tasks be launched with a remote +<p><a name="terminal"><b>15. Can tasks be launched with a remote terminal?</b></a><br> -SLURM does not directly support a remote pseudo terminal for spawned -tasks. -We intend to remedy this in Slurm version 1.3. +In SLURM version 1.3 or higher, use srun's <i>--pty</i> option. Until then, you can accomplish this by starting an appropriate program or script. In the simplest case (X11 over TCP with the DISPLAY -environment already set), <i>srun xterm</i> may suffice. In the more -general case, the following scripts should work. +environment already set), executing <i>srun xterm</i> may suffice. +In the more general case, the following scripts should work. <b>NOTE: The pathname to the additional scripts are included in the variables BS and IS of the first script. You must change this in the first script.</b> @@ -409,7 +415,7 @@ ssh -X -t $NODE $IS slurm$JOB </pre> <p>NOTE: The above script executes the script below, -named <i>_interactive<i>.</p> +named <i>_interactive</i>.</p> <pre> #!/bin/sh # -*- coding: utf-8 -*- @@ -443,6 +449,33 @@ fi exec screen -S $SCREENSESSION -rd </pre> +<p><a name="force"><b>16. What does "srun: Force Terminated job" +indicate?</b></a><br> +The srun command normally terminates when the standard output and +error I/O from the spawned tasks end. This does not necessarily +happen at the same time that a job step is terminated. For example, +a file system problem could render a spawned tasks non-killable +at the same time that I/O to srun is pending. Alternately a network +problem could prevent the I/O from being transmitted to srun. +In any event, the srun command is notified when a job step is +terminated, either upon reaching its time limit or being explicitly +killed. If the srun has not already terminated, the message +"srun: Force Terminated job" is printed. +If the job step's I/O does not terminate in a timely fashion +thereafter, pending I/O is abandoned and the srun command +exits.</p> + +<p><a name="early_exit"><b>17. What does this mean: +"srun: First task exited 30s ago" +followed by "srun Job Failed"?</b></a><br> +The srun command monitors when tasks exit. By default, 30 seconds +after the first task exists, the job is killed. +This typically indicates some type of job failure and continuing +to execute a parallel job when one of the tasks has exited is +not normally productive. This behavior can be changed using srun's +<i>--wait=<time></i> option to either change the timeout +period or disable the timeout altogether. See srun's man page +for details. <p class="footer"><a href="#top">top</a></p> @@ -571,7 +604,10 @@ limit from being propagated:<i>PropagateResourceLimitsExcept=MEMLOCK</i>.</p> <p>We also have a PAM module for SLURM that prevents users from logging into nodes that they have not been allocated (except for user root, which can always login. pam_slurm is available for download from -<a href="ftp://ftp.llnl.gov/pub/linux/pam_slurm/">ftp://ftp.llnl.gov/pub/linux/pam_slurm</a> +<a href="https://sourceforge.net/projects/slurm/"> +https://sourceforge.net/projects/slurm/</a> or use the +<a href="http://www.debian.org/">Debian</a> package +named <i>libpam-slurm</i>. The use of pam_slurm does not require <i>UsePAM</i> being set. The two uses of PAM are independent. @@ -665,6 +701,24 @@ for this due to it's improved support for multiple slurmd daemons. See the <a href="programmer_guide.shtml#multiple_slurmd_support">Programmers Guide</a> for more details about configuring multiple slurmd support. +<p>In order to emulate a really large cluster, it can be more +convenient to use a single <i>slurmd</i> daemon. +That daemon will not be able to launch many tasks, but can +suffice for developing or testing scheduling software. +<ol> +<li>Execute the <i>configure</i> program with your normal options.</li> +<li>Add the line "<i>#define HAVE_FRONT_END 1</i>" to the resulting +<i>config.h</i> file.</li> +<li>Build and install SLURM in the usual manner.</li> +<li>In <i>slurm.conf</i> define the desired node names (arbitrary +names used only by SLURM) as <i>NodeName</i> along with the actual +address of the <b>one</b> physical node in <i>NodeHostname</i>. +Up to 64k nodes can be configured in this virtual cluster.</li> +<li>Start your <i>slurmctld</i> and one <i>slurmd</i> daemon.</li> +<li>Create job allocations as desired, but <b>do not run job steps +with more than a couple of tasks.</b> Doing so may result in the +<i>slurmd</i> daemon exhausting its memory and failing.</li> +</ol> <p>In order to emulate a really large cluster, it can be more convenient to use a single <i>slurmd</i> daemon. @@ -729,8 +783,9 @@ SLURM will use the resource specification for each node that is given in <i>slurm.conf</i> and will not check these specifications against those actaully found on the node. -<p><a name="credential_replayed"><b>16. What does a "credential -replayed" error in the <i>SlurmdLogFile</i> indicate?</b></a><br> +<p><a name="credential_replayed"><b>16. What does a +"credential replayed" +error in the <i>SlurmdLogFile</i> indicate?</b></a><br> This error is indicative of the <i>slurmd</i> daemon not being able to respond to job initiation requests from the <i>srun</i> command in a timely fashion (a few seconds). @@ -754,8 +809,9 @@ value higher than the default 5 seconds. In earlier versions of Slurm, the <i>--msg-timeout</i> option of <i>srun</i> serves a similar purpose. -<p><a name="large_time"><b>17. What does a "Warning: Note very large -processing time" in the <i>SlurmctldLogFile</i> indicate?</b></a><br> +<p><a name="large_time"><b>17. What does +"Warning: Note very large processing time" +in the <i>SlurmctldLogFile</i> indicate?</b></a><br> This error is indicative of some operation taking an unexpectedly long time to complete, over one second to be specific. Setting the value of <i>SlurmctldDebug</i> configuration parameter @@ -795,8 +851,102 @@ option. See <i>"man slurm.conf"</i> and <i>"man srun"</i> for more information about these options. +<p><a name="clock"><b>20. Do I need to maintain synchronized +clocks on the cluster?</b></a><br> +In general, yes. Having inconsistent clocks may cause nodes to +be unusable. SLURM log files should contain references to +expired credentials. + +<p><a name="cred_invalid"><b>21. Why are "Invalid job credential" +errors generated?</b></a><br> +This error is indicative of SLURM's job credential files being inconsistent across +the cluster. All nodes in the cluster must have the matching public and private +keys as defined by <b>JobCredPrivateKey</b> and <b>JobCredPublicKey</b> in the +slurm configuration file <b>slurm.conf</b>. + +<p><a name="cred_replay"><b>22. Why are +"Task launch failed on node ... Job credential replayed" +errors generated?</b></a><br> +This error indicates that a job credential generated by the slurmctld daemon +corresponds to a job that the slurmd daemon has already revoked. +The slurmctld daemon selects job ID values based upon the configured +value of <b>FirstJobId</b> (the default value is 1) and each job gets +an value one large than the previous job. +On job termination, the slurmctld daemon notifies the slurmd on each +allocated node that all processes associated with that job should be +terminated. +The slurmd daemon maintains a list of the jobs which have already been +terminated to avoid replay of task launch requests. +If the slurmctld daemon is cold-started (with the "-c" option +or "/etc/init.d/slurm startclean"), it starts job ID values +over based upon <b>FirstJobId</b>. +If the slurmd is not also cold-started, it will reject job launch requests +for jobs that it considers terminated. +This solution to this problem is to cold-start all slurmd daemons whenever +the slurmctld daemon is cold-started. + +<p><a name="globus"><b>23. Can SLURM be used with Globus?</b><br> +Yes. Build and install SLURM's Torque/PBS command wrappers along with +the Perl APIs from SLURM's <i>contribs</i> directory and configure +<a href="http://www-unix.globus.org/">Globus</a> to use those PBS commands. +Note there are RPMs available for both of these packages, named +<i>torque</i> and <i>perlapi</i> respectively. + +<p><a name="time_format"><b>24. Can SLURM time output format include the +year?</b><br> +The default SLURM time format output is <i>MM/DD-HH:MM:SS</i>. +Define "ISO8601" at SLURM build time to get the time format +<i>YYYY-MM-DDTHH:MM:SS</i>. +Note that this change in format will break anything that parses +SLURM output expecting the old format (e.g. LSF, Maui or Moab). + +<p><a name="file_limit"><b>25. What causes the error +"Unable to accept new connection: Too many open files"?</b><br> +The srun command automatically increases its open file limit to +the hard limit in order to process all of the standard input and output +connections to the launched tasks. It is recommended that you set the +open file hard limit to 8192 across the cluster. + +<p><a name="slurmd_log"><b>26. Why does the setting of <i>SlurmdDebug</i> +fail to log job step information at the appropriate level?</b><br> +There are two programs involved here. One is <b>slurmd</b>, which is +a persistent daemon running at the desired debug level. The second +program is <b>slurmstep</b>, which executed the user job and its +debug level is controlled by the user. Submitting the job with +an option of <i>--debug=#</i> will result in the desired level of +detail being logged in the <i>SlurmdLogFile</i> plus the output +of the program. + +<p><a name="rpm"><b>27. Why isn't the auth_none.so (or other file) in a +SLURM RPM?</b><br> +The auth_none plugin is in a separete RPM and not built by default. +Using the auth_none plugin means that SLURM communications are not +authenticated, so you probably do not want to run in this mode of operation +except for testing purposes. If you want to build the auth_none RPM then +add <i>--with auth_none</i> on the rpmbuild command line or add +<i>%_with_auth_none</i> to your ~/rpmmacros file. See the file slurm.spec +in the SLURM distribution for a list of other options. + +<p><a name="slurmdbd"><b>28. Why should I use the slurmdbd instead of the +regular database plugins?</b><br> +While the normal storage plugins will work fine without the added +layer of the slurmdbd there are some great benifits to using the +slurmdbd. + +1. Added security. Using the slurmdbd you can have an authenticated + connection to the database. +2. Off loading processing from the controller. With the slurmdbd there is no + slow down to the controller due to a slow or overloaded database. +3. Keeping enterprise wide accounting from all slurm clusters in one database. + The slurmdbd is multi-threaded and designed to handle all the + accounting for the entire enterprise. +4. With the new database plugins 1.3+ you can query with sacct + accounting stats from any node slurm is installed on. With the + slurmdbd you can also query any cluster using the slurmdbd from any + other cluster's nodes. + <p class="footer"><a href="#top">top</a></p> -<p style="text-align:center;">Last modified 30 July 2007</p> +<p style="text-align:center;">Last modified 13 May 2008</p> <!--#include virtual="footer.txt"--> diff --git a/doc/html/footer.txt b/doc/html/footer.txt index af8ad069c..49b3c490e 100644 --- a/doc/html/footer.txt +++ b/doc/html/footer.txt @@ -1,13 +1,13 @@ </div> <!-- closes "content" --> <div id="footer"> -<div id="left"> <span class="ucrlnum">UCRL-WEB-225274 |</span> <a href="http://www.llnl.gov/disclaimer.html" target="_blank" class="privacy">Privacy & Legal Notice</a></div> -<div id="right"><span class="ucrlnum">November 28, 2006 </span></div> +<div id="left"> <span class="ucrlnum">LLNL-WEB-402631 |</span> <a href="https://www.llnl.gov/disclaimer.html" target="_blank" class="privacy">Privacy & Legal Notice</a></div> +<div id="right"><span class="ucrlnum">12 March 2008 </span></div> </div> <div id="footer2"> <div id="left2"><img src="sponsors.gif" width="129" height="30" border="0" usemap="#Map2"></div> -<div id="center2"><a href="http://www.llnl.gov/" target="_blank" class="footer">Lawrence Livermore National Laboratory</a><br /> +<div id="center2"><a href="https://www.llnl.gov/" target="_blank" class="footer">Lawrence Livermore National Laboratory</a><br /> <span class="smalltextblue">7000 East Avenue • Livermore, CA 94550</span></div> <div id="right2"><span class="smalltextblue">Operated by Lawrence Livermore National Security, LLC, for the</span> @@ -21,7 +21,7 @@ National Nuclear Security Administration</a></div> </div> <!-- closes "container" --> <map name="Map"> -<area shape="rect" coords="571,1,799,15" href="http://www.llnl.gov/"> +<area shape="rect" coords="571,1,799,15" href="https://www.llnl.gov/"> </map> <map name="Map2"> <area shape="rect" coords="1,1,92,30" href="http://www.nnsa.doe.gov/" target="_blank" alt="NNSA logo links to the NNSA Web site"> diff --git a/doc/html/gang_scheduling.shtml b/doc/html/gang_scheduling.shtml new file mode 100644 index 000000000..66c0b7cf6 --- /dev/null +++ b/doc/html/gang_scheduling.shtml @@ -0,0 +1,472 @@ +<!--#include virtual="header.txt"--> + +<H1>Gang Scheduling</H1> + +<P> +SLURM version 1.2 and earlier supported dedication of resources +to jobs. +Beginning in SLURM version 1.3, gang scheduling is supported. +Gang scheduling is when two or more jobs are allocated to the same resources +and these jobs are alternately suspended to let all of the tasks of each +job have full access to the shared resources for a period of time. +</P> +<P> +A resource manager that supports timeslicing can improve it's responsiveness +and utilization by allowing more jobs to begin running sooner. Shorter-running +jobs no longer have to wait in a queue behind longer-running jobs. Instead they +can be run "in parallel" with the longer-running jobs, which will allow them +to finish quicker. Throughput is also improved because overcommitting the +resources provides opportunities for "local backfilling" to occur (see example +below). +</P> +<P> +The SLURM 1.3.0 the <I>sched/gang</I> plugin provides timeslicing. When enabled, +it monitors each of the partitions in SLURM. If a new job has been allocated to +resources in a partition that have already been allocated to an existing job, +then the plugin will suspend the new job until the configured +<I>SchedulerTimeslice</I> interval has elapsed. Then it will suspend the +running job and let the new job make use of the resources for a +<I>SchedulerTimeslice</I> interval. This will continue until one of the +jobs terminates. +</P> + +<H2>Configuration</H2> +<P> +There are several important configuration parameters relating to +gang scheduling: +</P> +<UL> +<LI> +<B>SelectType</B>: The SLURM <I>sched/gang</I> plugin supports nodes +allocated by the <I>select/linear</I> plugin and socket/core/CPU resources +allocated by the <I>select/cons_res</I> plugin. +</LI> +<LI> +<B>SelectTypeParameter</B>: Since resources will be getting overallocated +with jobs, the resource selection plugin should be configured to track the +amount of memory used by each job to ensure that memory page swapping does +not occur. When <I>select/linear</I> is chosen, we recommend setting +<I>SelectTypeParameter=CR_Memory</I>. When <I>select/cons_res</I> is +chosen, we recommend including Memory as a resource (ex. +<I>SelectTypeParameter=CR_Core_Memory</I>). +</LI> +<LI> +<B>DefMemPerTask</B>: Since job requests may not explicitly specify +a memory requirement, we also recommend configuring <I>DefMemPerTask</I> +(default memory per task). It may also be desirable to configure +<I>MaxMemPerTask</I> (maximum memory per task) in <I>slurm.conf</I>. +</LI> +<LI> +<B>JobAcctGatherType and JobAcctGatherFrequency</B>: +If you wish to enforce memory limits, accounting must be enabled +using the <I>JobAcctGatherType</I> and <I>JobAcctGatherFrequency</I> +parameters. If accounting is enabled and a job exceeds its configured +memory limits, it will be canceled in order to prevent it from +adversely effecting other jobs sharing the same resources. +</LI> +<LI> +<B>SchedulerType</B>: Configure the <I>sched/gang</I> plugin by setting +<I>SchedulerType=sched/gang</I> in <I>slurm.conf</I>. +</LI> +<LI> +<B>SchedulerTimeSlice</B>: The default timeslice interval is 30 seconds. +To change this duration, set <I>SchedulerTimeSlice</I> to the desired interval +(in seconds) in <I>slurm.conf</I>. For example, to set the timeslice interval +to one minute, set <I>SchedulerTimeSlice=60</I>. Short values can increase +the overhead of gang scheduling. +</LI> +<LI> +<B>Shared</B>: Configure the partitions <I>Shared</I> setting to +<I>FORCE</I> for all partitions in which timeslicing is to take place. +The <I>FORCE</I> option now supports an additional parameter that controls +how many jobs can share a resource (FORCE[:max_share]). By default the +max_share value is 4. To allow up to 6 jobs from this partition to be +allocated to a common resource, set <I>Shared=FORCE:6</I>. +</LI> +</UL> +<P> +In order to enable gang scheduling after making the configuration changes +described above, restart SLURM if it is already running. Any change to the +plugin settings in SLURM requires a full restart of the daemons. If you +just change the partition <I>Shared</I> setting, this can be updated with +<I>scontrol reconfig</I>. +</P> +<P> +For an advanced topic discussion on the potential use of swap space, +see "Making use of swap space" in the "Future Work" section below. +</P> + +<H2>Timeslicer Design and Operation</H2> + +<P> +When enabled, the <I>sched/gang</I> plugin keeps track of the resources +allocated to all jobs. For each partition an "active bitmap" is maintained that +tracks all concurrently running jobs in the SLURM cluster. Each time a new +job is allocated to resources in a partition, the <I>sched/gang</I> plugin +compares these newly allocated resources with the resources already maintained +in the "active bitmap". If these two sets of resources are disjoint then the new +job is added to the "active bitmap". If these two sets of resources overlap then +the new job is suspended. All jobs are tracked in a per-partition job queue +within the <I>sched/gang</I> plugin. +</P> +<P> +A separate <I>timeslicer thread</I> is spawned by the <I>sched/gang</I> plugin +on startup. This thread sleeps for the configured <I>SchedulerTimeSlice</I> +interval. When it wakes up, it checks each partition for suspended jobs. If +suspended jobs are found then the <I>timeslicer thread</I> moves all running +jobs to the end of the job queue. It then reconstructs the "active bitmap" for +this partition beginning with the suspended job that has waited the longest to +run (this will be the first suspended job in the run queue). Each following job +is then compared with the new "active bitmap", and if the job can be run +concurrently with the other "active" jobs then the job is added. Once this is +complete then the <I>timeslicer thread</I> suspends any currently running jobs +that are no longer part of the "active bitmap", and resumes jobs that are new to +the "active bitmap". +</P> +<P> +This <I>timeslicer thread</I> algorithm for rotating jobs is designed to prevent +jobs from starving (remaining in the suspended state indefinitly) and to be as +fair as possible in the distribution of runtime while still keeping all of the +resources as busy as possible. +</P> +<P> +The <I>sched/gang</I> plugin suspends jobs via the same internal functions that +support <I>scontrol suspend</I> and <I>scontrol resume</I>. A good way to +observe the operation of the timeslicer is by running <I>watch squeue</I> in a +terminal window. +</P> + +<H2>A Simple Example</H2> + +<P> +The following example is configured with <I>select/linear</I>, +<I>sched/gang</I>, and <I>Shared=FORCE</I>. This example takes place on a small +cluster of 5 nodes: +</P> +<PRE> +[user@n16 load]$ <B>sinfo</B> +PARTITION AVAIL TIMELIMIT NODES STATE NODELIST +active* up infinite 5 idle n[12-16] +</PRE> +<P> +Here are the Scheduler settings (the last two settings are the relevant ones): +</P> +<PRE> +[user@n16 load]$ <B>scontrol show config | grep Sched</B> +FastSchedule = 1 +SchedulerPort = 7321 +SchedulerRootFilter = 1 +SchedulerTimeSlice = 30 +SchedulerType = sched/gang +[user@n16 load]$ +</PRE> +<P> +The <I>myload</I> script launches a simple load-generating app that runs +for the given number of seconds. Submit <I>myload</I> to run on all nodes: +</P> +<PRE> +[user@n16 load]$ <B>sbatch -N5 ./myload 300</B> +sbatch: Submitted batch job 3 +[user@n16 load]$ <B>squeue</B> +JOBID PARTITION NAME USER ST TIME NODES NODELIST + 3 active myload user 0:05 5 n[12-16] +</PRE> +<P> +Submit it again and watch the <I>sched/gang</I> plugin suspend it: +</P> +<PRE> +[user@n16 load]$ <B>sbatch -N5 ./myload 300</B> +sbatch: Submitted batch job 4 +[user@n16 load]$ <B>squeue</B> +JOBID PARTITION NAME USER ST TIME NODES NODELIST + 3 active myload user R 0:13 5 n[12-16] + 4 active myload user S 0:00 5 n[12-16] +</PRE> +<P> +After 30 seconds the <I>sched/gang</I> plugin swaps jobs, and now job 4 is the +active one: +</P> +<PRE> +[user@n16 load]$ <B>squeue</B> +JOBID PARTITION NAME USER ST TIME NODES NODELIST + 4 active myload user R 0:08 5 n[12-16] + 3 active myload user S 0:41 5 n[12-16] +[user@n16 load]$ <B>squeue</B> +JOBID PARTITION NAME USER ST TIME NODES NODELIST + 4 active myload user R 0:21 5 n[12-16] + 3 active myload user S 0:41 5 n[12-16] +</PRE> +<P> After another 30 seconds the <I>sched/gang</I> plugin sets job 3 running again: +</P> +<PRE> +[user@n16 load]$ <B>squeue</B> +JOBID PARTITION NAME USER ST TIME NODES NODELIST + 3 active myload user R 0:50 5 n[12-16] + 4 active myload user S 0:30 5 n[12-16] +</PRE> +<P> +<B>A possible side effect of timeslicing</B>: Note that jobs that are +immediately suspended may cause their srun commands to produce the following +output: +</P> +<PRE> +[user@n16 load]$ <B>cat slurm-4.out</B> +srun: Job step creation temporarily disabled, retrying +srun: Job step creation still disabled, retrying +srun: Job step creation still disabled, retrying +srun: Job step creation still disabled, retrying +srun: Job step created +</PRE> +<P> +This occurs because <I>srun</I> is attempting to launch a jobstep in an +allocation that has been suspended. The <I>srun</I> process will continue in a +retry loop to launch the jobstep until the allocation has been resumed and the +jobstep can be launched. +</P> +<P> +When the <I>sched/gang</I> plugin is enabled, this type of output in the user +jobs should be considered benign. +</P> + +<H2>More examples</H2> +<P> +The following example shows how the timeslicer algorithm keeps the resources +busy. Job 10 runs continually, while jobs 9 and 11 are timesliced: +</P> +<PRE> +[user@n16 load]$ <B>sbatch -N3 ./myload 300</B> +sbatch: Submitted batch job 9 +[user@n16 load]$ <B>sbatch -N2 ./myload 300</B> +sbatch: Submitted batch job 10 +[user@n16 load]$ <B>sbatch -N3 ./myload 300</B> +sbatch: Submitted batch job 11 +[user@n16 load]$ <B>squeue</B> +JOBID PARTITION NAME USER ST TIME NODES NODELIST + 9 active myload user R 0:11 3 n[12-14] + 10 active myload user R 0:08 2 n[15-16] + 11 active myload user S 0:00 3 n[12-14] +[user@n16 load]$ <B>squeue</B> +JOBID PARTITION NAME USER ST TIME NODES NODELIST + 10 active myload user R 0:50 2 n[15-16] + 11 active myload user R 0:12 3 n[12-14] + 9 active myload user S 0:41 3 n[12-14] +[user@n16 load]$ <B>squeue</B> +JOBID PARTITION NAME USER ST TIME NODES NODELIST + 10 active myload user R 1:04 2 n[15-16] + 11 active myload user R 0:26 3 n[12-14] + 9 active myload user S 0:41 3 n[12-14] +[user@n16 load]$ <B>squeue</B> +JOBID PARTITION NAME USER ST TIME NODES NODELIST + 9 active myload user R 0:46 3 n[12-14] + 10 active myload user R 1:13 2 n[15-16] + 11 active myload user S 0:30 3 n[12-14] +[user@n16 load]$ +</PRE> +</P> +<P> +The next example displays "local backfilling": +</P> +<PRE> +[user@n16 load]$ <B>sbatch -N3 ./myload 300</B> +sbatch: Submitted batch job 12 +[user@n16 load]$ <B>sbatch -N5 ./myload 300</B> +sbatch: Submitted batch job 13 +[user@n16 load]$ <B>sbatch -N2 ./myload 300</B> +sbatch: Submitted batch job 14 +[user@n16 load]$ <B>squeue</B> +JOBID PARTITION NAME USER ST TIME NODES NODELIST + 12 active myload user R 0:14 3 n[12-14] + 14 active myload user R 0:06 2 n[15-16] + 13 active myload user S 0:00 5 n[12-16] +[user@n16 load]$ +</PRE> +<P> +Without timeslicing and without the backfill scheduler enabled, job 14 has to +wait for job 13 to finish. +</P><P> +This is called "local" backfilling because the backfilling only occurs with jobs +close enough in the queue to get allocated by the scheduler as part of +oversubscribing the resources. Recall that the number of jobs that can +overcommit a resource is controlled by the <I>Shared=FORCE:max_share</I> value, +so this value effectively controls the scope of "local backfilling". +</P><P> +Normal backfill algorithms check <U>all</U> jobs in the wait queue. +</P> + +<H2>Consumable Resource Examples</H2> +<P> +The following two examples illustrate the primary difference between +<I>CR_CPU</I> and <I>CR_Core</I> when consumable resource selection is enabled +(<I>select/cons_res</I>). +</P> +<P> +When <I>CR_CPU</I> (or <I>CR_CPU_Memory</I>) is configured then the selector +treats the CPUs as simple, <I>interchangeable</I> computing resources. However +when <I>CR_Core</I> (or <I>CR_Core_Memory</I>) is enabled the selector treats +the CPUs as individual resources that are <U>specifically</U> allocated to jobs. +This subtle difference is highlighted when timeslicing is enabled. +</P> +<P> +In both examples 6 jobs are submitted. Each job requests 2 CPUs per node, and +all of the nodes contain two quad-core processors. The timeslicer will initially +let the first 4 jobs run and suspend the last 2 jobs. The manner in which these +jobs are timesliced depends upon the configured <I>SelectTypeParameter</I>. +</P> +<P> +In the first example <I>CR_Core_Memory</I> is configured. Note that jobs 46 and +47 don't <U>ever</U> get suspended. This is because they are not sharing their +cores with any other job. Jobs 48 and 49 were allocated to the same cores as +jobs 45 and 46. The timeslicer recognizes this and timeslices only those jobs: +</P> +<PRE> +[user@n16 load]$ <B>sinfo</B> +PARTITION AVAIL TIMELIMIT NODES STATE NODELIST +active* up infinite 5 idle n[12-16] +[user@n16 load]$ <B>scontrol show config | grep Select</B> +SelectType = select/cons_res +SelectTypeParameters = CR_CORE_MEMORY +[user@n16 load]$ <B>sinfo -o "%20N %5D %5c %5z"</B> +NODELIST NODES CPUS S:C:T +n[12-16] 5 8 2:4:1 +[user@n16 load]$ +[user@n16 load]$ +[user@n16 load]$ <B>sbatch -n10 -N5 ./myload 300</B> +sbatch: Submitted batch job 44 +[user@n16 load]$ <B>sbatch -n10 -N5 ./myload 300</B> +sbatch: Submitted batch job 45 +[user@n16 load]$ <B>sbatch -n10 -N5 ./myload 300</B> +sbatch: Submitted batch job 46 +[user@n16 load]$ <B>sbatch -n10 -N5 ./myload 300</B> +sbatch: Submitted batch job 47 +[user@n16 load]$ <B>sbatch -n10 -N5 ./myload 300</B> +sbatch: Submitted batch job 48 +[user@n16 load]$ <B>sbatch -n10 -N5 ./myload 300</B> +sbatch: Submitted batch job 49 +[user@n16 load]$ <B>squeue</B> +JOBID PARTITION NAME USER ST TIME NODES NODELIST + 44 active myload user R 0:09 5 n[12-16] + 45 active myload user R 0:08 5 n[12-16] + 46 active myload user R 0:08 5 n[12-16] + 47 active myload user R 0:07 5 n[12-16] + 48 active myload user S 0:00 5 n[12-16] + 49 active myload user S 0:00 5 n[12-16] +[user@n16 load]$ <B>squeue</B> +JOBID PARTITION NAME USER ST TIME NODES NODELIST + 46 active myload user R 0:49 5 n[12-16] + 47 active myload user R 0:48 5 n[12-16] + 48 active myload user R 0:06 5 n[12-16] + 49 active myload user R 0:06 5 n[12-16] + 44 active myload user S 0:44 5 n[12-16] + 45 active myload user S 0:43 5 n[12-16] +[user@n16 load]$ <B>squeue</B> +JOBID PARTITION NAME USER ST TIME NODES NODELIST + 44 active myload user R 1:23 5 n[12-16] + 45 active myload user R 1:22 5 n[12-16] + 46 active myload user R 2:22 5 n[12-16] + 47 active myload user R 2:21 5 n[12-16] + 48 active myload user S 1:00 5 n[12-16] + 49 active myload user S 1:00 5 n[12-16] +[user@n16 load]$ +</PRE> +<P> +Note the runtime of all 6 jobs in the output of the last <I>squeue</I> command. +Jobs 46 and 47 have been running continuously, while jobs 45 and 46 are +splitting their runtime with jobs 48 and 49. +</P><P> +The next example has <I>CR_CPU_Memory</I> configured and the same 6 jobs are +submitted. Here the selector and the timeslicer treat the CPUs as countable +resources which results in all 6 jobs sharing time on the CPUs: +</P> +<PRE> +[user@n16 load]$ <B>sinfo</B> +PARTITION AVAIL TIMELIMIT NODES STATE NODELIST +active* up infinite 5 idle n[12-16] +[user@n16 load]$ <B>scontrol show config | grep Select</B> +SelectType = select/cons_res +SelectTypeParameters = CR_CPU_MEMORY +[user@n16 load]$ <B>sinfo -o "%20N %5D %5c %5z"</B> +NODELIST NODES CPUS S:C:T +n[12-16] 5 8 2:4:1 +[user@n16 load]$ +[user@n16 load]$ +[user@n16 load]$ <B>sbatch -n10 -N5 ./myload 300</B> +sbatch: Submitted batch job 51 +[user@n16 load]$ <B>sbatch -n10 -N5 ./myload 300</B> +sbatch: Submitted batch job 52 +[user@n16 load]$ <B>sbatch -n10 -N5 ./myload 300</B> +sbatch: Submitted batch job 53 +[user@n16 load]$ <B>sbatch -n10 -N5 ./myload 300</B> +sbatch: Submitted batch job 54 +[user@n16 load]$ <B>sbatch -n10 -N5 ./myload 300</B> +sbatch: Submitted batch job 55 +[user@n16 load]$ <B>sbatch -n10 -N5 ./myload 300</B> +sbatch: Submitted batch job 56 +[user@n16 load]$ <B>squeue</B> +JOBID PARTITION NAME USER ST TIME NODES NODELIST + 51 active myload user R 0:11 5 n[12-16] + 52 active myload user R 0:11 5 n[12-16] + 53 active myload user R 0:10 5 n[12-16] + 54 active myload user R 0:09 5 n[12-16] + 55 active myload user S 0:00 5 n[12-16] + 56 active myload user S 0:00 5 n[12-16] +[user@n16 load]$ <B>squeue</B> +JOBID PARTITION NAME USER ST TIME NODES NODELIST + 51 active myload user R 1:09 5 n[12-16] + 52 active myload user R 1:09 5 n[12-16] + 55 active myload user R 0:23 5 n[12-16] + 56 active myload user R 0:23 5 n[12-16] + 53 active myload user S 0:45 5 n[12-16] + 54 active myload user S 0:44 5 n[12-16] +[user@n16 load]$ <B>squeue</B> +JOBID PARTITION NAME USER ST TIME NODES NODELIST + 53 active myload user R 0:55 5 n[12-16] + 54 active myload user R 0:54 5 n[12-16] + 55 active myload user R 0:40 5 n[12-16] + 56 active myload user R 0:40 5 n[12-16] + 51 active myload user S 1:16 5 n[12-16] + 52 active myload user S 1:16 5 n[12-16] +[user@n16 load]$ <B>squeue</B> +JOBID PARTITION NAME USER ST TIME NODES NODELIST + 51 active myload user R 3:18 5 n[12-16] + 52 active myload user R 3:18 5 n[12-16] + 53 active myload user R 3:17 5 n[12-16] + 54 active myload user R 3:16 5 n[12-16] + 55 active myload user S 3:00 5 n[12-16] + 56 active myload user S 3:00 5 n[12-16] +[user@n16 load]$ +</PRE> +<P> +Note that the runtime of all 6 jobs is roughly equal. Jobs 51-54 ran first so +they're slightly ahead, but so far all jobs have run for at least 3 minutes. +</P><P> +At the core level this means that SLURM relies on the linux kernel to move jobs +around on the cores to maximize performance. This is different than when +<I>CR_Core_Memory</I> was configured and the jobs would effectively remain +"pinned" to their specific cores for the duration of the job. Note that +<I>CR_Core_Memory</I> supports CPU binding, while <I>CR_CPU_Memory</I> does not. +</P> + +<H2>Future Work</H2> + +<P> +Priority scheduling and preemptive scheduling are other forms of gang +scheduling that are currently under development for SLURM. +</P> +<P> +<B>Making use of swap space</B>: (note that this topic is not currently +scheduled for development, unless someone would like to pursue this) It should +be noted that timeslicing does provide an interesting mechanism for high +performance jobs to make use of swap space. The optimal scenario is one in which +suspended jobs are "swapped out" and active jobs are "swapped in". The swapping +activity would only occur once every <I>SchedulerTimeslice</I> interval. +</P> +<P> +However, SLURM should first be modified to include support for scheduling jobs +into swap space and to provide controls to prevent overcommitting swap space. +For now this idea could be experimented with by disabling memory support in the +selector and submitting appropriately sized jobs. +</P> + +<p style="text-align:center;">Last modified 17 March 2008</p> + +<!--#include virtual="footer.txt"--> diff --git a/doc/html/header.txt b/doc/html/header.txt index 097af4411..2f84c37a8 100644 --- a/doc/html/header.txt +++ b/doc/html/header.txt @@ -8,7 +8,7 @@ <meta http-equiv="Pragma" content="no-cache"> <meta http-equiv="keywords" content="Simple Linux Utility for Resource Management, SLURM, resource management, Linux clusters, high-performance computing, Livermore Computing"> -<meta name="LLNLRandR" content="UCRL-WEB-225274"> +<meta name="LLNLRandR" content="LLNL-WEB-402631"> <meta name="LLNLRandRdate" content="18 December 2006"> <meta name="distribution" content="global"> <meta name="description" content="Simple Linux Utility for Resource Management"> diff --git a/doc/html/jobacct_gatherplugins.shtml b/doc/html/jobacct_gatherplugins.shtml new file mode 100644 index 000000000..1dc934d31 --- /dev/null +++ b/doc/html/jobacct_gatherplugins.shtml @@ -0,0 +1,262 @@ +<!--#include virtual="header.txt"--> + +<h1><a name="top">SLURM Job Accounting Gather Plugin API</a></h1> + +<h2> Overview</h2> +<p> This document describes SLURM job accounting gather plugins and the API that +defines them. It is intended as a resource to programmers wishing to write +their own SLURM job accounting gather plugins. This is version 1 of the API. + + +<p>SLURM job accounting gather plugins must conform to the +SLURM Plugin API with the following specifications: + +<p><span class="commandline">const char +plugin_name[]="<i>full text name</i>" +<p style="margin-left:.2in"> +A free-formatted ASCII text string that identifies the plugin. + +<p><span class="commandline">const char +plugin_type[]="<i>major/minor</i>"</span><br> +<p style="margin-left:.2in"> +The major type must be "jobacct_gather." +The minor type can be any suitable name +for the type of accounting package. We currently use +<ul> +<li><b>aix</b>— Gathers information from AIX /proc table and adds this +information to the standard rusage information also gathered for each job. +<li><b>linux</b>—Gathers information from Linux /proc table and adds this +information to the standard rusage information also gathered for each job. +<li><b>none</b>—No information gathered. +</ul> +The <b>sacct</b> program can be used to display gathered data from regular +accounting and from these plugins. +<p>The programmer is urged to study +<span class="commandline">src/plugins/jobacct_gather/linux</span> and +<span class="commandline">src/common/jobacct_common.c/.h</span> +for a sample implementation of a SLURM job accounting gather plugin. +<p class="footer"><a href="#top">top</a> + + +<h2>API Functions</h2> +<p>All of the following functions are required. Functions which are not +implemented must be stubbed. + +<p class="commandline">jobacctinfo_t *jobacct_gather_p_create(jobacct_id_t *jobacct_id) +<p style="margin-left:.2in"><b>Description</b>:<br> +jobacct_gather_p_alloc() used to alloc a pointer to and initialize a +new jobacctinfo structure.<br><br> +You will need to free the information returned by this function! +<p style="margin-left:.2in"><b>Arguments</b>: <br> +<span class="commandline">tid</span> +(input) id of the task send in (uint16_t)NO_VAL if no specfic task. +<p style="margin-left:.2in"><b>Returns</b>: <br> +<span class="commandline">jobacctinfo structure pointer</span> on success, or<br> +<span class="commandline">NULL</span> on failure. + +<p class="commandline">void jobacct_gather_p_destroy(jobacctinfo_t *jobacct) +<p style="margin-left:.2in"><b>Description</b>:<br> +jobacct_gather_p_free() used to free the allocation made by jobacct_gather_p_alloc(). +<p style="margin-left:.2in"><b>Arguments</b>: <br> +<span class="commandline">jobacct</span> +(input) structure to be freed.<br> +<span class="commandline">none</span> +<p style="margin-left:.2in"><b>Returns</b>: <br> +<span class="commandline">none</span> + +<p class="commandline"> +int jobacct_gather_p_setinfo(jobacctinfo_t *jobacct, + enum jobacct_data_type type, void *data) +<p style="margin-left:.2in"><b>Description</b>:<br> +jobacct_gather_p_setinfo() is called to set the values of a jobacctinfo_t to +specific values based on inputs. +<p style="margin-left:.2in"><b>Arguments</b>: <br> +<span class="commandline">jobacct</span> +(input/output) structure to be altered.<br> +<span class="commandline">type</span> +(input) enum of specific part of jobacct to alter.<br> +<span class="commandline">data</span> +(input) corresponding data to set jobacct part to. +<p style="margin-left:.2in"><b>Returns</b>: <br> +<span class="commandline">SLURM_SUCCESS</span> on success, or<br> +<span class="commandline">SLURM_ERROR</span> on failure. + +<p class="commandline"> +int jobacct_gather_p_getinfo(jobacctinfo_t *jobacct, + enum jobacct_data_type type, void *data) +<p style="margin-left:.2in"><b>Description</b>:<br> +jobacct_gather_p_getinfo() is called to get the values of a jobacctinfo_t +specific values based on inputs. +<p style="margin-left:.2in"><b>Arguments</b>: <br> +<span class="commandline">jobacct</span> +(input) structure to be queried.<br> +<span class="commandline">type</span> +(input) enum of specific part of jobacct to get.<br> +<span class="commandline">data</span> +(output) corresponding data to from jobacct part. +<p style="margin-left:.2in"><b>Returns</b>: <br> +<span class="commandline">SLURM_SUCCESS</span> on success, or<br> +<span class="commandline">SLURM_ERROR</span> on failure. + +<p class="commandline"> +void jobacct_gather_p_pack(jobacctinfo_t *jobacct, Buf buffer) +<p style="margin-left:.2in"><b>Description</b>:<br> +jobacct_gather_p_pack() pack jobacctinfo_t in a buffer to send across the network. +<p style="margin-left:.2in"><b>Arguments</b>: <br> +<span class="commandline">jobacct</span> +(input) structure to pack.<br> +<span class="commandline">buffer</span> +(input/output) buffer to pack structure into. +<p style="margin-left:.2in"><b>Returns</b>: <br> +<span class="commandline">none</span> + +<p class="commandline"> +void jobacct_gather_p_unpack(jobacctinfo_t *jobacct, Buf buffer) +<p style="margin-left:.2in"><b>Description</b>:<br> +jobacct_gather_p_unpack() unpack jobacctinfo_t from a buffer received from +the network. +You will need to free the jobacctinfo_t returned by this function! +<p style="margin-left:.2in"><b>Arguments</b>: <br> +<span class="commandline">jobacct</span> +(input/output) structure to fill.<br> +<span class="commandline">buffer</span> +(input) buffer to unpack structure from.<br> +<p style="margin-left:.2in"><b>Returns</b>: +<span class="commandline">SLURM_SUCCESS</span> on success, or<br> +<span class="commandline">SLURM_ERROR</span> on failure. + +<p class="commandline"> +void jobacct_gather_p_aggregate(jobacctinfo_t *dest, jobacctinfo_t *from) +<p style="margin-left:.2in"><b>Description</b>:<br> +jobacct_gather_p_aggregate() is called to aggregate and get max values from two +different jobacctinfo structures. +<p style="margin-left:.2in"><b>Arguments</b>: <br> +<span class="commandline">dest</span> +(input/output) initial structure to be applied to.<br> +<span class="commandline">from</span> +(input) new info to apply to dest. +<p style="margin-left:.2in"><b>Returns</b>: <br> +<span class="commandline">none</span> + +<p class="footer"><a href="#top">top</a> + +<p class="commandline">int jobacct_gather_p_startpoll(int frequency) +<p style="margin-left:.2in"><b>Description</b>:<br> +jobacct_gather_p_startpoll() is called at the start of the slurmstepd, +this starts a thread that should poll information to be queried at any time +during throughout the end of the process. +Put global initialization here. +<p style="margin-left:.2in"><b>Arguments</b>: <br> +<span class="commandline">frequency</span> (input) poll frequency for polling +thread. +<p style="margin-left:.2in"><b>Returns</b>: <br> +<span class="commandline">SLURM_SUCCESS</span> on success, or<br> +<span class="commandline">SLURM_ERROR</span> on failure. + +<p class="commandline">int jobacct_gather_p_endpoll() +<p style="margin-left:.2in"><b>Description</b>:<br> +jobacct_gather_p_endpoll() is called when the process is finished to stop the +polling thread. +<p style="margin-left:.2in"><b>Arguments</b>: <br> +<span class="commandline">none</span> +<p style="margin-left:.2in"><b>Returns</b>: <br> +<span class="commandline">SLURM_SUCCESS</span> on success, or<br> +<span class="commandline">SLURM_ERROR</span> on failure. + +<p class="commandline">void jobacct_gather_p_suspend_poll() +<p style="margin-left:.2in"><b>Description</b>:<br> +jobacct_gather_p_suspend_poll() is called when the process is suspended. +This causes the polling thread to halt until the process is resumed. +<p style="margin-left:.2in"><b>Arguments</b>: <br> +<span class="commandline">none</span> +<p style="margin-left:.2in"><b>Returns</b>: <br> +<span class="commandline">none</span> + +<p class="commandline">void jobacct_gather_p_resume_poll() +<p style="margin-left:.2in"><b>Description</b>:<br> +jobacct_gather_p_resume_poll() is called when the process is resumed. +This causes the polling thread to resume operation. +<p style="margin-left:.2in"><b>Arguments</b>:<br> +<span class="commandline">none</span> +<p style="margin-left:.2in"><b>Returns</b>:<br> +<span class="commandline">none</span> + +<p class="commandline">int jobacct_gather_p_set_proctrack_container_id(uint32_t id) +<p style="margin-left:.2in"><b>Description</b>:<br> +jobacct_gather_p_set_proctrack_container_id() is called after the +proctrack container id is known at the start of the slurmstepd, +if using a proctrack plugin to track processes this will set the head +of the process tree in the plugin. +<p style="margin-left:.2in"><b>Arguments</b>: <br> +<span class="commandline">id</span> (input) procktrack container id. +<p style="margin-left:.2in"><b>Returns</b>: <br> +<span class="commandline">SLURM_SUCCESS</span> on success, or<br> +<span class="commandline">SLURM_ERROR</span> on failure. + +<p class="commandline">int jobacct_gather_p_add_task(pid_t pid, uint16_t tid) +<p style="margin-left:.2in"><b>Description</b>:<br> +jobacct_gather_p_add_task() used to add a task to the poller. +<p style="margin-left:.2in"><b>Arguments</b>: <br> +<span class="commandline"> pid</span> (input) Process id <br> +<span class="commandline"> tid</span> (input) slurm global task id +<p style="margin-left:.2in"><b>Returns</b>: <br> +<span class="commandline">SLURM_SUCCESS</span> on success, or<br> +<span class="commandline">SLURM_ERROR</span> on failure. + +<p class="commandline">jobacctinfo_t *jobacct_gather_p_stat_task(pid_t pid) +<p style="margin-left:.2in"><b>Description</b>:<br> +jobacct_gather_p_stat_task() used to get most recent information about task. +You need to FREE the information returned by this function! +<p style="margin-left:.2in"><b>Arguments</b>: <br> +<span class="commandline"> pid</span> (input) Process id +<p style="margin-left:.2in"><b>Returns</b>: <br> +<span class="commandline">jobacctinfo structure pointer</span> on success, or<br> +<span class="commandline">NULL</span> on failure. + +<p class="commandline">jobacctinfo_t *jobacct_gather_p_remove_task(pid_t pid) +<p style="margin-left:.2in"><b>Description</b>:<br> +jobacct_gather_p_remove_task() used to remove a task from the poller. +You need to FREE the information returned by this function! +<p style="margin-left:.2in"><b>Arguments</b>: <br> +<span class="commandline"> pid</span> (input) Process id +<p style="margin-left:.2in"><b>Returns</b>: <br> +<span class="commandline">Pointer to removed jobacctinfo_t structure</span> +on success, or <br> +<span class="commandline">NULL</span> on failure. + +<p class="commandline"> +void jobacct_gather_p_2_sacct(sacct_t *sacct, jobacctinfo_t *jobacct) +<p style="margin-left:.2in"><b>Description</b>:<br> +jobacct_gather_p_2_sacct() is called to transfer information from data structure +jobacct to structure sacct. +<p style="margin-left:.2in"><b>Arguments</b>: <br> +<span class="commandline">sacct</span> +(input/output) initial structure to be applied to.<br> +<span class="commandline">jobacct</span> +(input) jobacctinfo_t structure containing information to apply to sacct. +<p style="margin-left:.2in"><b>Returns</b>: <br> +<span class="commandline">none</span> + +<p class="footer"><a href="#top">top</a> + + +<h2>Parameters</h2> +<p>These parameters can be used in the slurm.conf to set up type of +plugin and the frequency at which to gather information about running jobs. +<dl> +<dt><span class="commandline">JobAcctGatherType</span> +<dd>Specifies which plugin should be used. +<dt><span class="commandline">JobAcctGatherFrequency</span> +<dd>Let the plugin know how long between pollings. +</dl> + +<h2>Versioning</h2> +<p> This document describes version 1 of the SLURM Job Accounting Gather API. Future +releases of SLURM may revise this API. A job accounting gather plugin conveys its +ability to implement a particular API version using the mechanism outlined +for SLURM plugins. +<p class="footer"><a href="#top">top</a> + +<p style="text-align:center;">Last modified 11 Sep 2007</p> + +<!--#include virtual="footer.txt"--> diff --git a/doc/html/jobacct_storageplugins.shtml b/doc/html/jobacct_storageplugins.shtml new file mode 100644 index 000000000..107c16221 --- /dev/null +++ b/doc/html/jobacct_storageplugins.shtml @@ -0,0 +1,204 @@ +<!--#include virtual="header.txt"--> + +<h1><a name="top">SLURM Job Accounting Storage Plugin API</a></h1> + +<h2> Overview</h2> +<p> This document describes SLURM Job Accounting Storage plugins and the API that +defines them. It is intended as a resource to programmers wishing to write +their own SLURM Job Accounting Storage plugins. This is version 1 of the API. + +<p>SLURM Job Accounting Storage plugins must conform to the +SLURM Plugin API with the following specifications: + +<p><span class="commandline">const char +plugin_name[]="<i>full text name</i>" +<p style="margin-left:.2in"> +A free-formatted ASCII text string that identifies the plugin. + +<p><span class="commandline">const char +plugin_type[]="<i>major/minor</i>"</span><br> +<p style="margin-left:.2in"> +The major type must be "jobacct_storage." +The minor type can be any suitable name +for the type of accounting package. We currently use +<ul> +<li><b>filetxt</b>—Information written to a text file. +<li><b>mysql</b>— Store information in a mysql database. +<li><b>pgsql</b>— Store information in a postgresql database. +<li><b>none</b>— Information is not stored anywhere. +</ul> +<p>The programmer is urged to study +<span class="commandline">src/plugins/jobacct_storage/mysql</span> +for a sample implementation of a SLURM Job Accounting Storage plugin. +<p> The Job Accounting Storage plugin was written to be a interface +to storage data collected by the Job Accounting Gather plugin. When +adding a new database you may want to add common functions in a common +file in the src/common dir. Refer to src/common/mysql_common.c/.h for an +example so other plugins can also use that database type to write out +information. +<p class="footer"><a href="#top">top</a> + + +<h2>API Functions</h2> + +The Job Accounting Storage API uses hooks in the slurmctld. + +<p>All of the following functions are required. Functions which are not +implemented must be stubbed. + +<h4>Functions called by the jobacct_storage plugin</h4> + +<p class="commandline">int jobacct_storage_p_init(char *location) +<p style="margin-left:.2in"><b>Description</b>:<br> +jobacct_storage_p_init() is called to initiate a connection to the +database server and check the state of the database table to make sure +they are in sync with the table definitions in the plugin. +Put global initialization here. Or open file or anything to initialize +the plugin. +<p style="margin-left:.2in"><b>Arguments</b>: <br> +<span class="commandline">location</span> (input) database name or log +file location. +<p style="margin-left:.2in"><b>Returns</b>: <br> +<span class="commandline">SLURM_SUCCESS</span> on success, or<br> +<span class="commandline">SLURM_ERROR</span> on failure. + +<p class="commandline">int jobacct_storage_p_fini() +<p style="margin-left:.2in"><b>Description</b>:<br> +jobacct_storage_p_fini() is called at the end of the program that has +called jobacct_storage_p_init this function closes the connection to +the database or logfile. +<p style="margin-left:.2in"><b>Arguments</b>: <br> +<span class="commandline">none</span> +<p style="margin-left:.2in"><b>Returns</b>: <br> +<span class="commandline">SLURM_SUCCESS</span> on success, or<br> +<span class="commandline">SLURM_ERROR</span> on failure. + +<p class="commandline"> +int jobacct_storage_p_job_start(struct job_record *job_ptr) +<p style="margin-left:.2in"><b>Description</b>:<br> +jobacct_storage_p_job_start() is called in the jobacct plugin when a +job starts, inserting information into the database about the new job. +<p style="margin-left:.2in"><b>Arguments</b>: <br> +<span class="commandline">job_ptr</span> (input) information about the job in +slurmctld. +<p style="margin-left:.2in"><b>Returns</b>: <br> +<span class="commandline">SLURM_SUCCESS</span> on success, or<br> +<span class="commandline">SLURM_ERROR</span> on failure. + +<p class="commandline"> +int jobacct_storage_p_job_complete(struct job_record *job_ptr) +<p style="margin-left:.2in"><b>Description</b>:<br> +jobacct_storage_p_job_complete() is called in the jobacct plugin when +a job completes, this updates info about end of a job. +<p style="margin-left:.2in"><b>Arguments</b>: <br> +<span class="commandline">job_ptr</span> (input) information about the job in +slurmctld. +<p style="margin-left:.2in"><b>Returns</b>: <br> +<span class="commandline">SLURM_SUCCESS</span> on success, or<br> +<span class="commandline">SLURM_ERROR</span> on failure. + +<p class="commandline"> +int jobacct_storage_p_step_start(struct step_record *step_ptr) +<p style="margin-left:.2in"><b>Description</b>:<br> +jobacct_storage_p_step_start() is called in the jobacct plugin at the +allocation of a new step in the slurmctld, this inserts info about the +beginning of a step. +<p style="margin-left:.2in"><b>Arguments</b>: <br> +<span class="commandline">step_ptr</span> (input) information about the step in +slurmctld. +<p style="margin-left:.2in"><b>Returns</b>: <br> +<span class="commandline">SLURM_SUCCESS</span> on success, or<br> +<span class="commandline">SLURM_ERROR</span> on failure. + +<p class="commandline"> +int jobacct_storage_p_step_complete(struct step_record *step_ptr) +<p style="margin-left:.2in"><b>Description</b>:<br> +jobacct_storage_p_step_complete() is called in the jobacct plugin at +the end of a step in the slurmctld, this updates the ending +information about a step. +<p style="margin-left:.2in"><b>Arguments</b>:<br> +<span class="commandline">step_ptr</span> (input) information about the step in +slurmctld. +<p style="margin-left:.2in"><b>Returns</b>: <br> +<span class="commandline">SLURM_SUCCESS</span> on success, or<br> +<span class="commandline">SLURM_ERROR</span> on failure. + +<p class="commandline"> +int jobacct_storage_p_suspend(struct job_record *job_ptr) +<p style="margin-left:.2in"><b>Description</b>:<br> +jobacct_storage_p_suspend() is called in the jobacct plugin when a +job is suspended or resumed in the slurmctld, this updates the +database about the suspended time of the job. +<p style="margin-left:.2in"><b>Arguments</b>: <br> +<span class="commandline">job_ptr</span> (input) information about the job in +slurmctld. +<p style="margin-left:.2in"><b>Returns</b>: <br> +<span class="commandline">none</span> + +<p class="commandline"> +void jobacct_storage_p_get_jobs(List job_list, List selected_steps, +List selected_parts, void *params) +<p style="margin-left:.2in"><b>Description</b>:<br> +jobacct_storage_p_get_jobs() is called to get a list of jobs from the +database given the specific inputs. +<p style="margin-left:.2in"><b>Arguments</b>: <br> +<span class="commandline">List job_list </span> (input/output) list to +be filled with jobacct_job_rec_t.<br> +<span class="commandline">List selected_steps </span> +(input) list containing type jobacct_select_step_t to query against.<br> +<span class="commandline">List selected_parts </span> +(input) list containing char *'s of names of partitions to query against.<br> +<span class="commandline">void *params </span> +(input) to be cast as sacct_parameters_t in the plugin. + +<p style="margin-left:.2in">jobacct_job_rec_t, jobacct_select_step_t, +and sacct_parameters_t are +all defined in common/slurm_jobacct.h +<p style="margin-left:.2in"><b>Returns</b>: <br> +<span class="commandline">none</span> + +<p class="commandline"> +void jobacct_storage_p_archive(List selected_parts, void *params) +<p style="margin-left:.2in"><b>Description</b>:<br> +database_p_jobcomp_archive() used to archive old data. +<p style="margin-left:.2in"><b>Arguments</b>: <br> +<span class="commandline">List selected_parts </span> +(input) list containing char *'s of names of partitions to query against.<br> +<span class="commandline">void *params </span> +(input) to be cast as sacct_parameters_t in the plugin. +<p style="margin-left:.2in"><b>Returns</b>: <br> +<span class="commandline">none</span> + +<p class="footer"><a href="#top">top</a> + + +<h2>Parameters</h2> +<p>These parameters can be used in the slurm.conf to set up +connections to the database all have defaults based on the plugin type +used. +<dl> +<dt><span class="commandline">JobAcctStorageType</span> +<dd>Specifies which plugin should be used. +<dt><span class="commandline">JobAcctStorageLoc</span> +<dd>Let the plugin the name of the logfile/database name to use. +<dt><span class="commandline">JobAcctStorageHost</span> +<dd>Let the plugin know the host where the database is. +<dt><span class="commandline">JobAcctStoragePort</span> +<dd>Let the plugin know the port to connect to. +<dt><span class="commandline">JobAcctStorageUser</span> +<dd>Let the plugin know the name of the user to connect to the +database with. +<dt><span class="commandline">JobAcctStoragePass</span> +<dd>Let the plugin know the password of the user connecting to the database. +</dl> + +<h2>Versioning</h2> +<p> This document describes version 1 of the SLURM Job Accounting Storage API. Future +releases of SLURM may revise this API. A Job Accounting Storage plugin conveys its +ability to implement a particular API version using the mechanism outlined +for SLURM plugins. +<p class="footer"><a href="#top">top</a> + +<p style="text-align:center;">Last modified 23 May 2007</p> + +<!--#include virtual="footer.txt"--> diff --git a/doc/html/jobacctplugins.shtml b/doc/html/jobacctplugins.shtml deleted file mode 100644 index fc1cdff64..000000000 --- a/doc/html/jobacctplugins.shtml +++ /dev/null @@ -1,359 +0,0 @@ -<!--#include virtual="header.txt"--> - -<h1><a name="top">SLURM Job Accounting Plugin API</a></h1> - -<h2> Overview</h2> -<p> This document describes SLURM job accounting plugins and the API that -defines them. It is intended as a resource to programmers wishing to write -their own SLURM job accounting plugins. This is version 1 of the API. - - -<p>SLURM job accounting plugins must conform to the -SLURM Plugin API with the following specifications: - -<p><span class="commandline">const char -plugin_name[]="<i>full text name</i>" -<p style="margin-left:.2in"> -A free-formatted ASCII text string that identifies the plugin. - -<p><span class="commandline">const char -plugin_type[]="<i>major/minor</i>"</span><br> -<p style="margin-left:.2in"> -The major type must be "jobacct." -The minor type can be any suitable name -for the type of accounting package. We currently use -<ul> -<li><b>aix</b>— Gathers information from AIX /proc table and adds this -information to the standard rusage information also gathered for each job. -<li><b>linux</b>—Gathers information from Linux /proc table and adds this -information to the standard rusage information also gathered for each job. -<li><b>none</b>—No information gathered. -</ul> -The <b>sacct</b> program can be used to display gathered data from regular -accounting and from these plugins. -<p>The programmer is urged to study -<span class="commandline">src/plugins/jobacct/linux</span> and -<span class="commandline">src/plugins/jobacct/common</span> -for a sample implementation of a SLURM job accounting plugin. -<p class="footer"><a href="#top">top</a> - - -<h2>API Functions</h2> - -The job accounting API uses hooks in the slurmctld, slurmd, and slurmstepd. - -<p>All of the following functions are required. Functions which are not -implemented must be stubbed. - -<h4>Functions called by all slurmstepd processes</h4> - -<p class="commandline">int jobacct_p_startpoll(int frequency) -<p style="margin-left:.2in"><b>Description</b>: -jobacct_p_startpoll() is called at the start of the slurmstepd, -this starts a thread that should poll information to be queried at any time -during throughout the end of the process. -Put global initialization here. -<p style="margin-left:.2in"><b>Arguments</b>: -<span class="commandline">frequency</span> (input) poll frequency for polling -thread. -<p style="margin-left:.2in"><b>Returns</b>: -<span class="commandline">SLURM_SUCCESS</span> on success, or -<span class="commandline">SLURM_FAILURE</span> on failure. - -<p class="commandline">int jobacct_p_endpoll() -<p style="margin-left:.2in"><b>Description</b>: -jobacct_p_endpoll() is called when the process is finished to stop the -polling thread. -<p style="margin-left:.2in"><b>Arguments</b>: -<span class="commandline">none</span> -<p style="margin-left:.2in"><b>Returns</b>: -<span class="commandline">SLURM_SUCCESS</span> on success, or -<span class="commandline">SLURM_FAILURE</span> on failure. - -<p class="commandline">void jobacct_p_suspend_poll() -<p style="margin-left:.2in"><b>Description</b>: -jobacct_p_suspend_poll() is called when the process is suspended. -This causes the polling thread to halt until the process is resumed. -<p style="margin-left:.2in"><b>Arguments</b>: -<span class="commandline">none</span> -<p style="margin-left:.2in"><b>Returns</b>: -<span class="commandline">none</span> - -<p class="commandline">void jobacct_p_resume_poll() -<p style="margin-left:.2in"><b>Description</b>: -jobacct_p_resume_poll() is called when the process is resumed. -This causes the polling thread to resume operation. -<p style="margin-left:.2in"><b>Arguments</b>: -<span class="commandline">none</span> -<p style="margin-left:.2in"><b>Returns</b>: -<span class="commandline">none</span> - - -<p class="commandline">int jobacct_p_add_task(pid_t pid, uint16_t tid) -<p style="margin-left:.2in"><b>Description</b>: -jobacct_p_add_task() used to add a task to the poller. -<p style="margin-left:.2in"><b>Arguments</b>: -<span class="commandline"> pid</span> (input) Process id -<span class="commandline"> tid</span> (input) slurm global task id -<p style="margin-left:.2in"><b>Returns</b>: -<span class="commandline">SLURM_SUCCESS</span> on success, or -<span class="commandline">SLURM_FAILURE</span> on failure. - -<p class="commandline">jobacctinfo_t *jobacct_p_stat_task(pid_t pid) -<p style="margin-left:.2in"><b>Description</b>: -jobacct_p_stat_task() used to get most recent information about task. -You need to FREE the information returned by this function! -<p style="margin-left:.2in"><b>Arguments</b>: -<span class="commandline"> pid</span> (input) Process id -<p style="margin-left:.2in"><b>Returns</b>: -<span class="commandline">jobacctinfo structure pointer</span> on success, or -<span class="commandline">NULL</span> on failure. - -<p class="commandline">jobacctinfo_t *jobacct_p_remove_task(pid_t pid) -<p style="margin-left:.2in"><b>Description</b>: -jobacct_p_remove_task() used to remove a task from the poller. -You need to FREE the information returned by this function! -<p style="margin-left:.2in"><b>Arguments</b>: -<span class="commandline"> pid</span> (input) Process id -<p style="margin-left:.2in"><b>Returns</b>: -<span class="commandline">Pointer to removed jobacctinfo_t structure</span> -on success, or -<span class="commandline">NULL</span> on failure. - -<p class="footer"><a href="#top">top</a> - -<h4>Functions called by the slurmctld process</h4> - -<p class="commandline">int jobacct_p_init_slurmctld(char *job_acct_log) -<p style="margin-left:.2in"><b>Description</b>: -jobacct_p_init_slurmctld() is called at the start of the slurmctld, -this opens the logfile to be written to. -Put global initialization here. -<p style="margin-left:.2in"><b>Arguments</b>: -<span class="commandline">job_acct_log</span> (input) logfile name. -<p style="margin-left:.2in"><b>Returns</b>: -<span class="commandline">SLURM_SUCCESS</span> on success, or -<span class="commandline">SLURM_FAILURE</span> on failure. - -<p class="commandline">int jobacct_p_fini_slurmctld() -<p style="margin-left:.2in"><b>Description</b>: -jobacct_p_fini_slurmctld() is called at the end of the slurmctld, -this closes the logfile. -<p style="margin-left:.2in"><b>Arguments</b>: -<span class="commandline">none</span> -<p style="margin-left:.2in"><b>Returns</b>: -<span class="commandline">SLURM_SUCCESS</span> on success, or -<span class="commandline">SLURM_FAILURE</span> on failure. - -<p class="commandline"> -int jobacct_p_job_start_slurmctld(struct job_record *job_ptr) -<p style="margin-left:.2in"><b>Description</b>: -jobacct_p_job_start_slurmctld() is called at the allocation of a new job in -the slurmctld, this prints out beginning information about a job. -<p style="margin-left:.2in"><b>Arguments</b>: -<span class="commandline">job_ptr</span> (input) information about the job in -slurmctld. -<p style="margin-left:.2in"><b>Returns</b>: -<span class="commandline">SLURM_SUCCESS</span> on success, or -<span class="commandline">SLURM_FAILURE</span> on failure. - -<p class="commandline"> -int jobacct_p_job_complete_slurmctld(struct job_record *job_ptr) -<p style="margin-left:.2in"><b>Description</b>: -jobacct_p_job_complete_slurmctld() is called at the end of a job in -the slurmctld, this prints out ending information about a job. -<p style="margin-left:.2in"><b>Arguments</b>: -<span class="commandline">job_ptr</span> (input) information about the job in -slurmctld. -<p style="margin-left:.2in"><b>Returns</b>: -<span class="commandline">SLURM_SUCCESS</span> on success, or -<span class="commandline">SLURM_FAILURE</span> on failure. - -<p class="commandline"> -int jobacct_p_step_start_slurmctld(struct step_record *step_ptr) -<p style="margin-left:.2in"><b>Description</b>: -jobacct_p_step_start_slurmctld() is called at the allocation of a new step in -the slurmctld, this prints out beginning information about a step. -<p style="margin-left:.2in"><b>Arguments</b>: -<span class="commandline">step_ptr</span> (input) information about the step in -slurmctld. -<p style="margin-left:.2in"><b>Returns</b>: -<span class="commandline">SLURM_SUCCESS</span> on success, or -<span class="commandline">SLURM_FAILURE</span> on failure. - -<p class="commandline"> -int jobacct_p_step_complete_slurmctld(struct step_record *step_ptr) -<p style="margin-left:.2in"><b>Description</b>: -jobacct_p_step_complete_slurmctld() is called at the end of a step in -the slurmctld, this prints out ending information about a step. -<p style="margin-left:.2in"><b>Arguments</b>: -<span class="commandline">step_ptr</span> (input) information about the step in -slurmctld. -<p style="margin-left:.2in"><b>Returns</b>: -<span class="commandline">SLURM_SUCCESS</span> on success, or -<span class="commandline">SLURM_FAILURE</span> on failure. - -<p class="commandline"> -int jobacct_p_suspend_slurmctld(struct job_record *job_ptr) -<p style="margin-left:.2in"><b>Description</b>: -jobacct_p_suspend_slurmctld() is called when a job is suspended or resumed in -the slurmctld, this prints out information about the suspension of the job -to the logfile. -<p style="margin-left:.2in"><b>Arguments</b>: -<span class="commandline">job_ptr</span> (input) information about the job in -slurmctld. -<p style="margin-left:.2in"><b>Returns</b>: -<span class="commandline">SLURM_SUCCESS</span> on success, or -<span class="commandline">SLURM_FAILURE</span> on failure. - -<p class="footer"><a href="#top">top</a> - -<h4>Functions common to all processes</h4> - -<p class="commandline"> -int jobacct_p_init_struct(jobacctinfo_t *jobacct, uint16_t tid) -<p style="margin-left:.2in"><b>Description</b>: -jobacct_p_init_struct() is called to set the values of a jobacctinfo_t to -initial values. -<p style="margin-left:.2in"><b>Arguments</b>: -<span class="commandline">jobacct</span> -(input/output) structure to be altered. -<span class="commandline">tid</span> -(input) id of the task send in (uint16_t)NO_VAL if no specfic task. -<p style="margin-left:.2in"><b>Returns</b>: -<span class="commandline">SLURM_SUCCESS</span> on success, or -<span class="commandline">SLURM_FAILURE</span> on failure. - -<p class="commandline">jobacctinfo_t *jobacct_p_alloc(uint16_t tid) -<p style="margin-left:.2in"><b>Description</b>: -jobacct_p_alloc() used to alloc a pointer to and initialize a -new jobacctinfo structure.<br> -You will need to free the information returned by this function! -<p style="margin-left:.2in"><b>Arguments</b>: -<span class="commandline">tid</span> -(input) id of the task send in (uint16_t)NO_VAL if no specfic task. -<p style="margin-left:.2in"><b>Returns</b>: -<span class="commandline">jobacctinfo structure pointer</span> on success, or -<span class="commandline">NULL</span> on failure. - -<p class="commandline">void jobacct_p_free(jobacctinfo_t *jobacct) -<p style="margin-left:.2in"><b>Description</b>: -jobacct_p_free() used to free the allocation made by jobacct_p_alloc(). -<p style="margin-left:.2in"><b>Arguments</b>: -<span class="commandline">jobacct</span> -(input) structure to be freed. -<span class="commandline">none</span> -<p style="margin-left:.2in"><b>Returns</b>: -<span class="commandline">none</span> - -<p class="commandline"> -int jobacct_p_setinfo(jobacctinfo_t *jobacct, - enum jobacct_data_type type, void *data) -<p style="margin-left:.2in"><b>Description</b>: -jobacct_p_setinfo() is called to set the values of a jobacctinfo_t to -specific values based on inputs. -<p style="margin-left:.2in"><b>Arguments</b>: -<span class="commandline">jobacct</span> -(input/output) structure to be altered. -<span class="commandline">type</span> -(input) enum of specific part of jobacct to alter. -<span class="commandline">data</span> -(input) corresponding data to set jobacct part to. -<p style="margin-left:.2in"><b>Returns</b>: -<span class="commandline">SLURM_SUCCESS</span> on success, or -<span class="commandline">SLURM_FAILURE</span> on failure. - -<p class="commandline"> -int jobacct_p_getinfo(jobacctinfo_t *jobacct, - enum jobacct_data_type type, void *data) -<p style="margin-left:.2in"><b>Description</b>: -jobacct_p_getinfo() is called to get the values of a jobacctinfo_t -specific values based on inputs. -<p style="margin-left:.2in"><b>Arguments</b>: -<span class="commandline">jobacct</span> -(input) structure to be queried. -<span class="commandline">type</span> -(input) enum of specific part of jobacct to get. -<span class="commandline">data</span> -(output) corresponding data to from jobacct part. -<p style="margin-left:.2in"><b>Returns</b>: -<span class="commandline">SLURM_SUCCESS</span> on success, or -<span class="commandline">SLURM_FAILURE</span> on failure. - -<p class="commandline"> -void jobacct_p_aggregate(jobacctinfo_t *dest, jobacctinfo_t *from) -<p style="margin-left:.2in"><b>Description</b>: -jobacct_p_aggregate() is called to aggregate and get max values from two -different jobacctinfo structures. -<p style="margin-left:.2in"><b>Arguments</b>: -<span class="commandline">dest</span> -(input/output) initial structure to be applied to. -<span class="commandline">from</span> -(input) new info to apply to dest. -<p style="margin-left:.2in"><b>Returns</b>: -<span class="commandline">none</span> - -<p class="commandline"> -void jobacct_p_2_sacct(sacct_t *sacct, jobacctinfo_t *jobacct) -<p style="margin-left:.2in"><b>Description</b>: -jobacct_p_2_sacct() is called to transfer information from data structure -jobacct to structure sacct. -<p style="margin-left:.2in"><b>Arguments</b>: -<span class="commandline">sacct</span> -(input/output) initial structure to be applied to. -<span class="commandline">jobacct</span> -(input) jobacctinfo_t structure containing information to apply to sacct. -<p style="margin-left:.2in"><b>Returns</b>: -<span class="commandline">none</span> - -<p class="commandline"> -void jobacct_p_pack(jobacctinfo_t *jobacct, Buf buffer) -<p style="margin-left:.2in"><b>Description</b>: -jobacct_p_pack() pack jobacctinfo_t in a buffer to send across the network. -<p style="margin-left:.2in"><b>Arguments</b>: -<span class="commandline">jobacct</span> -(input) structure to pack. -<span class="commandline">buffer</span> -(input/output) buffer to pack structure into. -<p style="margin-left:.2in"><b>Returns</b>: -<span class="commandline">none</span> - -<p class="commandline"> -void jobacct_p_unpack(jobacctinfo_t *jobacct, Buf buffer) -<p style="margin-left:.2in"><b>Description</b>: -jobacct_p_unpack() unpack jobacctinfo_t from a buffer received from -the network. -You will need to free the jobacctinfo_t returned by this function! -<p style="margin-left:.2in"><b>Arguments</b>: -<span class="commandline">jobacct</span> -(input/output) structure to fill. -<span class="commandline">buffer</span> -(input) buffer to unpack structure from. -<p style="margin-left:.2in"><b>Returns</b>: -<span class="commandline">SLURM_SUCCESS</span> on success, or -<span class="commandline">SLURM_FAILURE</span> on failure. - -<p class="footer"><a href="#top">top</a> - -<h2>Parameters</h2> -<p>Rather than proliferate slurm.conf parameters for new or evolved -plugins, the job accounting API counts on three parameters: -<dl> -<dt><span class="commandline">JobAcctType</span> -<dd>Specifies which plugin should be used. -<dt><span class="commandline">JobAcctFrequency</span> -<dd>Let the plugin know how long between pollings. -<dt><span class="commandline">JobAcctLogFile</span> -<dd>Let the plugin the name of the logfile to use. -</dl> - -<h2>Versioning</h2> -<p> This document describes version 1 of the SLURM Job Accounting API. Future -releases of SLURM may revise this API. A job accounting plugin conveys its -ability to implement a particular API version using the mechanism outlined -for SLURM plugins. -<p class="footer"><a href="#top">top</a> - -<p style="text-align:center;">Last modified 31 January 2007</p> - -<!--#include virtual="footer.txt"--> diff --git a/doc/html/jobcompplugins.shtml b/doc/html/jobcompplugins.shtml index 93f21b5bc..e208eb511 100644 --- a/doc/html/jobcompplugins.shtml +++ b/doc/html/jobcompplugins.shtml @@ -16,16 +16,20 @@ abbreviation for the type of scheduler. We recommend, for example:</p> <ul> <li><b>none</b>—No job logging.</li> <li><b>filetxt</b>—Log job information to a text file.</li> +<li><b>mysql</b>—Job completion is written to a mysql database.</li> +<li><b>pgsql</b>—Job completion is written to a pgsql database.</li> <li><b>script</b>—Execute a script passing in job information in environment variables.</li> </ul> +The <b>sacct</b> program with option <b>-c</b> can be used to display +gathered data from database and filetxt plugins. <p>The <span class="commandline">plugin_name</span> and <span class="commandline">plugin_version</span> symbols required by the SLURM Plugin API require no specialization for job completion logging support. Note carefully, however, the versioning discussion below.</p> <p>The programmer is urged to study -<span class="commandline">src/plugins/jobcomp/jobcomp_filetxt.c</span> and -<span class="commandline">src/plugins/jobcomp/jobcomp_none.c</span> +<span class="commandline">src/plugins/jobcomp/filetxt/jobcomp_filetxt.c</span> and +<span class="commandline">src/plugins/jobcomp/none/jobcomp_none.c</span> for sample implementations of a SLURM job completion logging plugin.</p> <p class="footer"><a href="#top">top</a></p> @@ -52,8 +56,7 @@ SLURM_SUCCESS. </p> <p class="footer"><a href="#top">top</a></p> <h2>API Functions</h2> -<p>The following functions must appear. Functions which are not implemented should -be stubbed.</p> +<p>The following functions must appear. Functions which are not implemented should be stubbed.</p> <p class="commandline">int slurm_jobcomp_set_location (char * location);</p> <p style="margin-left:.2in"><b>Description</b>: Specify the location to be used for job logging.</p> @@ -63,8 +66,9 @@ this string is at the discression of the plugin implementation.</p> <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure, the plugin should return SLURM_ERROR and set the errno to an appropriate value to indicate the reason for failure.</p> +<p class="footer"><a href="#top">top</a></p> -<p class="commandline">int slurm_jobcomp_log_record ( struct job_record *job_ptr);</p> +<p class="commandline">int slurm_jobcomp_log_record (struct job_record *job_ptr);</p> <p style="margin-left:.2in"><b>Description</b>: Note termation of a job with the specified characteristics.</p> <p style="margin-left:.2in"><b>Argument</b>: <br> @@ -76,15 +80,14 @@ to indicate the reason for failure.</p> <p class="footer"><a href="#top">top</a></p> <a name="get_errno"><p class="commandline">int slurm_jobcomp_get_errno (void);</p></a> -<p style="margin-left:.2in"><b>Description</b>: Return the number of a job completion -logger specific error.</p> +<p style="margin-left:.2in"><b>Description</b>: Return the number of a +job completion logger specific error.</p> <p style="margin-left:.2in"><b>Arguments</b>: None</p> -<p style="margin-left:.2in"><b>Returns</b>: Error number for the last failure encountered by -the job completion logging plugin.</p> +<p style="margin-left:.2in"><b>Returns</b>: Error number for the last failure encountered by the job completion logging plugin.</p> +<p class="footer"><a href="#top">top</a></p> <p class="commandline"><a name="strerror">const char *slurm_jobcomp_strerror(int errnum);</a></p> -<p style="margin-left:.2in"><b>Description</b>: Return a string description of a job completion -logger specific error code.</p> +<p style="margin-left:.2in"><b>Description</b>: Return a string description of a job completion logger specific error code.</p> <p style="margin-left:.2in"><b>Arguments</b>: <span class="commandline"> errnum</span> (input) a job completion logger specific error code.</p> @@ -92,12 +95,40 @@ specific error code.</p> or NULL if no description found in this plugin.</p> <p class="footer"><a href="#top">top</a></p> +<p class="commandline"> +void slurm_jobcomp_get_jobs(List job_list, List selected_steps, List selected_parts, void *paramsint errnum);</a></p> +<p style="margin-left:.2in"><b>Description</b>: Get completed job info from the storage.</p> +<p style="margin-left:.2in"><b>Arguments</b>:<br> +<span class="commandline"> job_list</span> +(input/output) List of job_rec_t pointers of requested jobs.<br> +<span class="commandline">selected_steps </span> +(input) list containing type jobacct_select_step_t to query against.<br> +<span class="commandline">selected_parts </span> +(input) list containing char *'s of names of partitions to query against.<br> +<span class="commandline">params </span> +(input) to be cast as sacct_parameters_t in the plugin. +<p style="margin-left:.2in">jobcomp_job_rec_t is defined in common/slurm_jobcomp.h +<p style="margin-left:.2in"><b>Returns</b>: None</p> +<p class="footer"><a href="#top">top</a></p> + +<p class="commandline"> +void slurm_jobcomp_archive(List selected_parts, void *params) +<p style="margin-left:.2in"><b>Description</b>: used to archive old data. +<p style="margin-left:.2in"><b>Arguments</b>: <br> +<span class="commandline">List selected_parts </span> +(input) list containing char *'s of names of partitions to query against.<br> +<span class="commandline">void *params </span> +(input) to be cast as sacct_parameters_t in the plugin. +<p style="margin-left:.2in"><b>Returns</b>: None</p> +<p class="footer"><a href="#top">top</a></p> + + <h2>Versioning</h2> -<p> This document describes version 0 of the SLURM job completion API. Future +<p> This document describes version 1 of the SLURM job completion API. Future releases of SLURM may revise this API. A job completion plugin conveys its ability to implement a particular API version using the mechanism outlined for SLURM plugins.</p> <p class="footer"><a href="#top">top</a></p> -<p style="text-align:center;">Last modified 10 July 2005</p> +<p style="text-align:center;">Last modified 10 Sep 2007</p> <!--#include virtual="footer.txt"--> diff --git a/doc/html/maui.shtml b/doc/html/maui.shtml index f630980dc..7debca57e 100644 --- a/doc/html/maui.shtml +++ b/doc/html/maui.shtml @@ -30,7 +30,9 @@ Then build Maui from its source distribution. This is a two step process:</p> <li>gmake </ol> <p>The key of 42 is arbitrary. You can use any value, but will need to -specify the same value as a SLURM configuration parameter.</p> +specify the same value as a SLURM configuration parameter. +Maui developers have assured us the authenticaion key will eventually be +set in a configuration file rather than at build time.</p> <p>Update the Maui configuration file <i>maui.conf</i> (Copy the file maui-3.2.6p9/maui.cfg.dist to maui.conf). Add the following configuration @@ -90,10 +92,9 @@ SchedulerType=sched/wiki SchedulerPort=7321 SchedulerAuth=42 (for Slurm version 1.1 and earlier only) </pre> -<p>In this case, "SchedulerAuth" has been set to 42, which was the key -specified when Maui was configured above. Just make sure the numbers match. -The Maui folks have assured us that the key won't always be a compiled-in -feature.</p> +<p>In this case, "SchedulerAuth" has been set to 42, which was the +authenticaiton key specified when Maui was configured above. +Just make sure the numbers match.</p> <p>For Slurm version 1.2 or higher, the authentication key is stored in a file specific to the wiki-plugin named @@ -160,6 +161,6 @@ HidePartitionJobs=debug <p class="footer"><a href="#top">top</a></p> -<p style="text-align:center;">Last modified 17 September 2007</p> +<p style="text-align:center;">Last modified 19 September 2007</p> <!--#include virtual="footer.txt"--> diff --git a/doc/html/moab.shtml b/doc/html/moab.shtml index 30f40b30a..247b3f734 100644 --- a/doc/html/moab.shtml +++ b/doc/html/moab.shtml @@ -175,8 +175,78 @@ CLIENTCFG[RM:slurm] KEY=123456789 </pre> <p>Insure that this file is protected from viewing by users. </p> +<h3>Job Submission</h3> + +<p>Jobs can either be submitted to Moab or directly to SLURM. +Moab's <i>msub</i> command has a <i>--slurm</i> option that can +be placed at the <b>end</b> of the command line and those options +will be passed to SLURM. This can be used to invoke SLURM +options which are not directly supported by Moab (e.g. +system images to boot, task distribution specification across +sockets, cores, and hyperthreads, etc.). +For example: +<pre> +msub my.script -l walltime=600,nodes=2 \ + --slurm --linux-image=/bgl/linux_image2 +</pre> + +<h3>User Environment</h3> + +<p>When a user submits a job to Moab, that job could potentially +execute on a variety of computers, so it is typically necessary +that the user's environment on the execution host be loaded. +Moab relies upon SLURM to perform this action, using the +<i>--get-user-env</i> option for the salloc, sbatch and srun commands. +The SLURM command then executes as user root a command of this sort +as user root: +<pre> +/bin/su - <user> -c \ + "/bin/echo BEGIN; /bin/env; /bin/echo FINI" +</pre> +While this command is executing within salloc, sbatch or srun, +the Moab daemon is completely non-responsive. +To insure that Moab remains operational, SLURM will abort the above +command within a few seconds and look for a cache file with the +user's environment and use that if found. +Otherwise an error is reported to Moab. +We have provided a simple program that can be used to build +cache files for users. The program can be found in the SLURM +distribution at <i>contribs/env_cache_builder.c</i>. +This program can support a longer timeout than Moab, but +will report errors for users for whom the environment file +can not be automatically build (typically due to the user's +"dot" files spawning another shell so the desired command +never execution). +For such user, you can manually build a cache file. +You may want to execute this program periodically to capture +information for new users or changes in existing users' +environment. +A sample execution is shown below. +Run this on the same host as the Moab daemon and execute it as user root.</p> + +<pre> +bash-3.00# make -f /dev/null env_cache_builder +cc env_cache_builder.c -o env_cache_builder +bash-3.00# ./env_cache_builder +Building user environment cache files for Moab/Slurm. +This will take a while. + +Processed 100 users... +***ERROR: Failed to get current user environment variables for alice +***ERROR: Failed to get current user environment variables for brian +Processed 200 users... +Processed 300 users... +***ERROR: Failed to get current user environment variables for christine +***ERROR: Failed to get current user environment variables for david + +Some user environments could not be loaded. +Manually run 'env' for those 4 users. +Write the output to a file with the same name as the user in the + /usr/local/tmp/slurm/atlas/env_cache directory +</pre> + <p class="footer"><a href="#top">top</a></p> -<p style="text-align:center;">Last modified 17 August 2007</p> +<p style="text-align:center;">Last modified 23 April 2008</p> <!--#include virtual="footer.txt"--> diff --git a/doc/html/news.shtml b/doc/html/news.shtml index 56b9cd073..432b09786 100644 --- a/doc/html/news.shtml +++ b/doc/html/news.shtml @@ -6,7 +6,7 @@ <ul> <li><a href="#11">SLURM Version 1.1, May 2006</a></li> <li><a href="#12">SLURM Version 1.2, February 2007</a></li> -<li><a href="#13">SLURM Version 1.3, Winter 2007</a></li> +<li><a href="#13">SLURM Version 1.3, March 2008</a></li> <li><a href="#14">SLURM Version 1.4 and beyond</a></li> </ul> @@ -57,20 +57,33 @@ mechanism.</li> <a href="power_save.html">power savings</a> on idle nodes.</li> <li>Support for MPICH-MX, MPICH1/shmem and MPICH1/p4 added with task launch directly from the <i>srun</i> command.</li> -<li>Perl APIs and wrappers for mpiexec and the Torque/PBS commands.</li> +<li>Wrappers available for common Torque/PBS commands +(<i>psub</i>, <i>pstat</i>, and <i>pbsnodes</i>).</li> +<li>Support for <a href="http://www-unix.globus.org/">Globus</a> +(using Torque/PBS command wrappers).</li> +<li>Wrapper available for <i>mpiexec</i> command.</li> </ul> <h2><a name="13">Major Updates in SLURM Version 1.3</a></h2> -<p>SLURM Version 1.3 is scheduled for release in the Winter of 2007. +<p>SLURM Version 1.3 was relased in March 2008. Major enhancements include: <ul> -<li>Job accounting and completion data stored in a database +<li>Job accounting and completion data can be stored in a database (MySQL, PGSQL or simple text file).</li> -<li>Gang scheduling of jobs (time-slicing of parallel jobs +<li>SlurmDBD (Slurm Database Deamon) introduced to provide secure +database support across multiple clusters.</li> +<li>Gang scheduler plugin added (time-slicing of parallel jobs without an external scheduler).</li> -<li>Support for BlueGene/P systems.</li> -<li><i>Srun</i>'s --alloc, --attach, and --batch options removed (use +<li>Cryptography logic moved to a separate plugin with the +option of using OpenSSL (default) or Munge (GPL).</li> +<li>Improved scheduling of multple job steps within a job's allocation.</li> +<li>Support for job specification of node features with node counts.</li> +<li><i>srun</i>'s --alloc, --attach, and --batch options removed (use <i>salloc</i>, <i>sattach</i> or <i>sbatch</i> commands instead).</li> +<li><i>srun --pty</i> option added to support remote pseudo terminial for +spawned tasks.</li> +<li>Support added for a much richer job dependency specification +including testing of exit codes and multiple dependencies.</li> </ul> <h2><a name="14">Major Updates in SLURM Version 1.4 and beyond</a></h2> @@ -84,6 +97,6 @@ to coordinate activies. Future development plans includes: and refresh.</li> </ul> -<p style="text-align:center;">Last modified 15 August 2007</p> +<p style="text-align:center;">Last modified 11 March 2008</p> <!--#include virtual="footer.txt"--> diff --git a/doc/html/overview.shtml b/doc/html/overview.shtml index 126fb45b1..5184d8464 100644 --- a/doc/html/overview.shtml +++ b/doc/html/overview.shtml @@ -9,14 +9,13 @@ SLURM has three key functions. First, it allocates exclusive and/or non-exclusiv access to resources (compute nodes) to users for some duration of time so they can perform work. Second, it provides a framework for starting, executing, and monitoring work (normally a parallel job) on the set of allocated nodes. Finally, -it arbitrates conflicting requests for resources by managing a queue of pending -work.</p> +it arbitrates contention for resources by managing a queue of pending work.</p> <p>SLURM has been developed through the collaborative efforts of -<a href="http://www.llnl.gov/">Lawrence Livermore National Laboratory (LLNL)</a>, +<a href="https://www.llnl.gov/">Lawrence Livermore National Laboratory (LLNL)</a>, <a href="http://www.hp.com/">Hewlett-Packard</a>, -<a href="http://www.bull.com/">Bull</a>, and -<a href="http://www.lnxi.com/">Linux NetworX</a>. +<a href="http://www.bull.com/">Bull</a>, +<a href="http://www.lnxi.com/">Linux NetworX</a> and many other contributors. HP distributes and supports SLURM as a component in their XC System Software.</p> <h2>Architecture</h2> @@ -26,16 +25,20 @@ event of failure. Each compute server (node) has a <b>slurmd</b> daemon, which can be compared to a remote shell: it waits for work, executes that work, returns status, and waits for more work. The <b>slurmd</b> daemons provide fault-tolerant hierarchical communciations. +There is an optional <b>slurmdbd</b> (Slurm DataBase Daemon) which can be used +to record accounting information for multiple Slurm-managed clusters in a +single database. User tools include <b>srun</b> to initiate jobs, <b>scancel</b> to terminate queued or running jobs, <b>sinfo</b> to report system -status, and <b>squeue</b> to report the status of jobs. +status, <b>squeue</b> to report the status of jobs, <b>sacct</b> to get information +about jobs and job steps that are running or have completed. The <b>smap</b> and <b>sview</b> commands graphically reports system and job status including network topology. There is also an administrative tool <b>scontrol</b> available to monitor and/or modify configuration and state information. APIs are available for all functions.</p> <div class="figure"> - <img src="arch.gif" width="600"><br /> + <img src="arch.gif" width="550"><br> Figure 1. SLURM components </div> @@ -47,15 +50,21 @@ building block approach. These plugins presently include: <a href="http://www.theether.org/authd/">authd</a>, <a href="http://home.gna.org/munge/">munge</a>, or none (default).</li> -<li><a href="checkpoint_plugins.html">Checkpoint</a>: AIX or none.</li> +<li><a href="checkpoint_plugins.html">Checkpoint</a>: AIX, OpenMPI, XLCH, or none.</li> +<li><a href="crypto_plugins.html">Cryptography</a>: Munge or OpenSSL</li> +<li><a href="jobacct_gatherplugins.html">Job Accounting Gather</a>: AIX, Linux, or none(default)</li> -<li><a href="jobacctplugins.html">Job accounting</a>: AIX, Linux or none</li> +<li><a href="jobacct_storageplugins.html">Job Accounting Storage</a>: +text file (default if jobacct_gather != none), +<a href="http://www.clusterresources.com/pages/products/gold-allocation-manager.php">Gold</a> +MySQL, PGSQL, SlurmDBD (Slurm Database Daemon) or none</li> -<li><a href="jobcompplugins.html">Job completion logging</a>: text file, -arbitrary script, or none (default).</li> +<li><a href="jobcompplugins.html">Job completion logging</a>: +text file, arbitrary script, MySQL, PGSQL, SlurmDBD, or none (default).</li> -<li><a href="mpiplugins.html">MPI</a>: LAM, MPICH-GM, MVAPICH, -and none (default, for most other versions of MPI.</li> +<li><a href="mpiplugins.html">MPI</a>: LAM, MPICH1-P4, MPICH1-shmem, +MPICH-GM, MPICH-MX, MVAPICH, OpenMPI and none (default, for most +other versions of MPI including MPICH2 and MVAPICH2).</li> <li><a href="selectplugins.html">Node selection</a>: Blue Gene (a 3-D torus interconnect), @@ -68,10 +77,11 @@ RMS (Quadrics Linux kernel patch), and <a href="http://oss.sgi.com/projects/pagg/">SGI's Process Aggregates (PAGG)</a>.</li> <li><a href="schedplugins.html">Scheduler</a>: +FIFO (First In First Out, default), backfill, gang (time-slicing for parallel jobs), <a href="http://www.clusterresources.com/pages/products/maui-cluster-scheduler.php"> -The Maui Scheduler</a>, +The Maui Scheduler</a>, and <a href="http://www.clusterresources.com/pages/products/moab-cluster-suite.php"> -Moab Cluster Suite</a>, backfill, or FIFO (default).</li> +Moab Cluster Suite</a>. <li><a href="switchplugins.html">Switch or interconnect</a>: <a href="http://www.quadrics.com/">Quadrics</a> @@ -94,10 +104,13 @@ Priority-ordered jobs are allocated nodes within a partition until the resources a job is assigned a set of nodes, the user is able to initiate parallel work in the form of job steps in any configuration within the allocation. For instance, a single job step may be started that utilizes all nodes allocated to the job, -or several job steps may independently use a portion of the allocation.</p> +or several job steps may independently use a portion of the allocation. +SLURM provides resource management for the processors allocated to a job, +so that multiple job steps can be simultaneously submitted and queued until +there are available resources within the job's allocation.</p> <div class="figure"> - <img src="entities.gif" width="291" height="218"><br /> + <img src="entities.gif" width="550"><br> Figure 2. SLURM entities </div> @@ -107,10 +120,14 @@ or several job steps may independently use a portion of the allocation.</p> <p>Node state monitored include: count of processors, size of real memory, size of temporary disk space, and state (UP, DOWN, etc.). Additional node information includes weight (preference in being allocated work) and features (arbitrary information -such as processor speed or type). Nodes are grouped into disjoint partitions. +such as processor speed or type). +Nodes are grouped into partitions, which may contain overlapping nodes so they are +best thought of as job queues. Partition information includes: name, list of associated nodes, state (UP or DOWN), -maximum job time limit, maximum node count per job, group access list, and shared -node access (YES, NO or FORCE). Bit maps are used to represent nodes and scheduling +maximum job time limit, maximum node count per job, group access list, +priority (important if nodes are in multiple partitions) and shared node access policy +with optional over-subscription level for gang scheduling (e.g. YES, NO or FORCE:2). +Bit maps are used to represent nodes and scheduling decisions can be made by performing a small number of comparisons and a series of fast bit map manipulations. A sample (partial) SLURM configuration file follows.</p> <pre> @@ -135,20 +152,21 @@ TmpFS=/tmp # # Node Configurations # -NodeName=DEFAULT TmpDisk=16384 State=IDLE +NodeName=DEFAULT Procs=4 TmpDisk=16384 State=IDLE NodeName=lx[0001-0002] State=DRAINED -NodeName=lx[0003-8000] Procs=16 RealMemory=2048 Weight=16 -NodeName=lx[8001-9999] Procs=32 RealMemory=4096 Weight=40 Feature=1200MHz +NodeName=lx[0003-8000] RealMemory=2048 Weight=2 +NodeName=lx[8001-9999] RealMemory=4096 Weight=6 Feature=video # # Partition Configurations # PartitionName=DEFAULT MaxTime=30 MaxNodes=2 PartitionName=login Nodes=lx[0001-0002] State=DOWN -PartitionName=debug Nodes=lx[0003-0030] State=UP Default=YES +PartitionName=debug Nodes=lx[0003-0030] State=UP Default=YES PartitionName=class Nodes=lx[0031-0040] AllowGroups=students -PartitionName=batch Nodes=lx[0041-9999] MaxTime=UNLIMITED MaxNodes=4096 +PartitionName=DEFAULT MaxTime=UNLIMITED MaxNodes=4096 +PartitionName=batch Nodes=lx[0041-9999] </pre> -<p style="text-align:center;">Last modified 8 November 2006</p> +<p style="text-align:center;">Last modified 11 March 2008</p> <!--#include virtual="footer.txt"--> diff --git a/doc/html/power_save.shtml b/doc/html/power_save.shtml index 9cc5b367c..0f4a8de48 100644 --- a/doc/html/power_save.shtml +++ b/doc/html/power_save.shtml @@ -22,51 +22,53 @@ SLURM's support to increase power demands in a gradual fashion.</p> <h2>Configuration</h2> -<p>Rather than changing SLURM's configuration file (and data -structures) after SLURM version 1.2 was released, we decided to -temporarily put the configuration parameters directly in the -<i>src/slurmctld/power_save.c</i> file. -These paramters will all be moved into the <i>slurm.conf</i> -configuration file when SLURM version 1.3 is released. -Until that time, pleased directly edit the code to use this feature. +<p>A great deal of flexibility is offered in terms of when and +how idle nodes are put into or removed from power save mode. The following configuration paramters are available: <ul> -<li><b>IdleTime</b>: + +<li><b>SuspendTime</b>: Nodes becomes elligible for power saving mode after being idle for this number of seconds. A negative number disables power saving mode. The default value is -1 (disabled).</li> + <li><b>SuspendRate</b>: Maximum number of nodes to be placed into power saving mode per minute. A value of zero results in no limits being imposed. The default value is 60. Use this to prevent rapid drops in power requirements.</li> + <li><b>ResumeRate</b>: Maximum number of nodes to be placed into power saving mode per minute. A value of zero results in no limits being imposed. The default value is 60. Use this to prevent rapid increasses in power requirements.</li> + <li><b>SuspendProgram</b>: Program to be executed to place nodes into power saving mode. The program executes as <i>SlurmUser</i> (as configured in -<i>slurm.conf</i>. +<i>slurm.conf</i>). The argument to the program will be the names of nodes to be placed into power savings mode (using SLURM's hostlist expression format).</li> + <li><b>ResumeProgram</b>: Program to be executed to remove nodes from power saving mode. The program executes as <i>SlurmUser</i> (as configured in -<i>slurm.conf</i>. +<i>slurm.conf</i>). The argument to the program will be the names of nodes to be removed from power savings mode (using SLURM's hostlist expression format).</li> -<li><b>ExcludeSuspendNodes</b>: + +<li><b>SuspendExcNodes</b>: List of nodes to never place in power saving mode. Use SLURM's hostlist expression format. By default, no nodes are excluded.</li> -<li><b>ExcludeSuspendPartitions</b>: + +<li><b>SuspendExcParts</b>: List of partitions with nodes to never place in power saving mode. Multiple partitions may be specified using a comma separator. By default, no nodes are excluded.</li> @@ -115,6 +117,6 @@ nodes are in power save mode using messages of this sort: You can also configure SLURM without SuspendProgram or ResumeProgram values to assess the potential impact of power saving mode before enabling it.</p> -<p style="text-align:center;">Last modified 9 May 2007</p> +<p style="text-align:center;">Last modified 14 May 2007</p> <!--#include virtual="footer.txt"--> diff --git a/doc/html/preempt.shtml b/doc/html/preempt.shtml new file mode 100644 index 000000000..d58acf003 --- /dev/null +++ b/doc/html/preempt.shtml @@ -0,0 +1,247 @@ +<!--#include virtual="header.txt"--> + +<H1>Preemption</H1> + +<P> +SLURM version 1.2 and earlier supported dedication of resources +to jobs based on a simple "first come, first served" policy with backfill. +Beginning in SLURM version 1.3, priority-based <I>preemption</I> is supported. +Preemption is the act of suspending one or more "low-priority" jobs to let a +"high-priority" job run uninterrupted until it completes. Preemption provides +the ability to prioritize the workload on a cluster. +</P> +<P> +The SLURM version 1.3.1 <I>sched/gang</I> plugin supports preemption. +When configured, +the plugin monitors each of the partitions in SLURM. If a new job in a +high-priority partition has been allocated to resources that have already been +allocated to one or more existing jobs from lower priority partitions, the +plugin respects the partition priority and suspends the low-priority job(s). The +low-priority job(s) remain suspended until the job from the high-priority +partition completes. Once the high-priority job completes then the low-priority +job(s) are resumed. +</P> + +<H2>Configuration</H2> +<P> +There are several important configuration parameters relating to preemption: +</P> +<UL> +<LI> +<B>SelectType</B>: The SLURM <I>sched/gang</I> plugin supports nodes +allocated by the <I>select/linear</I> plugin and socket/core/CPU resources +allocated by the <I>select/cons_res</I> plugin. +See <A HREF="#future_work">Future Work</A> below for more +information on "preemption with consumable resources". +</LI> +<LI> +<B>SelectTypeParameter</B>: Since resources will be getting overallocated +with jobs (the preempted job will remain in memory), the resource selection +plugin should be configured to track the amount of memory used by each job to +ensure that memory page swapping does not occur. When <I>select/linear</I> is +chosen, we recommend setting <I>SelectTypeParameter=CR_Memory</I>. When +<I>select/cons_res</I> is chosen, we recommend including Memory as a resource +(ex. <I>SelectTypeParameter=CR_Core_Memory</I>). +</LI> +<LI> +<B>DefMemPerTask</B>: Since job requests may not explicitly specify +a memory requirement, we also recommend configuring <I>DefMemPerTask</I> +(default memory per task). It may also be desirable to configure +<I>MaxMemPerTask</I> (maximum memory per task) in <I>slurm.conf</I>. +</LI> +<LI> +<B>JobAcctGatherType and JobAcctGatherFrequency</B>: +If you wish to enforce memory limits, accounting must be enabled +using the <I>JobAcctGatherType</I> and <I>JobAcctGatherFrequency</I> +parameters. If accounting is enabled and a job exceeds its configured +memory limits, it will be canceled in order to prevent it from +adversely effecting other jobs sharing the same resources. +</LI> +<LI> +<B>SchedulerType</B>: Configure the <I>sched/gang</I> plugin by setting +<I>SchedulerType=sched/gang</I> in <I>slurm.conf</I>. +</LI> +<LI> +<B>Priority</B>: Configure the partition's <I>Priority</I> setting relative to +other partitions to control the preemptive behavior. If two jobs from two +different partitions are allocated to the same resources, the job in the +partition with the greater <I>Priority</I> value will preempt the job in the +partition with the lesser <I>Priority</I> value. If the <I>Priority</I> values +of the two partitions are equal then no preemption will occur, and the two jobs +will run simultaneously on the same resources. The default <I>Priority</I> value +is 1. +</LI> +<LI> +<B>Shared</B>: Configure the partitions <I>Shared</I> setting to +<I>FORCE</I> for all partitions that will preempt or that will be preempted. The +<I>FORCE</I> setting is required to enable the select plugins to overallocate +resources. Jobs submitted to a partition that does not share it's resources will +not preempt other jobs, nor will those jobs be preempted. Instead those jobs +will wait until the resources are free for non-shared use by each job. +<BR> +The <I>FORCE</I> option now supports an additional parameter that controls +how many jobs can share a resource within the partition (FORCE[:max_share]). By +default the max_share value is 4. To disable timeslicing within a partition but +enable preemption with other partitions, set <I>Shared=FORCE:1</I>. +</LI> +<LI> +<B>SchedulerTimeSlice</B>: The default timeslice interval is 30 seconds. +To change this duration, set <I>SchedulerTimeSlice</I> to the desired interval +(in seconds) in <I>slurm.conf</I>. For example, to set the timeslice interval +to one minute, set <I>SchedulerTimeSlice=60</I>. Short values can increase +the overhead of gang scheduling. This parameter is only relevant if timeslicing +within a partition will be configured. Preemption and timeslicing can occur at +the same time. +</LI> +</UL> +<P> +To enable preemption after making the configuration changes described above, +restart SLURM if it is already running. Any change to the plugin settings in +SLURM requires a full restart of the daemons. If you just change the partition +<I>Priority</I> or <I>Shared</I> setting, this can be updated with +<I>scontrol reconfig</I>. +</P> + +<H2>Preemption Design and Operation</H2> + +<P> +When enabled, the <I>sched/gang</I> plugin keeps track of the resources +allocated to all jobs. For each partition an "active bitmap" is maintained that +tracks all concurrently running jobs in the SLURM cluster. Each partition also +maintains a job list for that partition, and a list of "shadow" jobs. These +"shadow" jobs are running jobs from higher priority partitions that "cast +shadows" on the active bitmaps of the lower priority partitions. +</P> +<P> +Each time a new job is allocated to resources in a partition and begins running, +the <I>sched/gang</I> plugin adds a "shadow" of this job to all lower priority +partitions. The active bitmap of these lower priority partitions are then +rebuilt, with the shadow jobs added first. Any existing jobs that were replaced +by one or more "shadow" jobs are suspended (preempted). Conversely, when a +high-priority running job completes, it's "shadow" goes away and the active +bitmaps of the lower priority partitions are rebuilt to see if any suspended +jobs can be resumed. +</P> +<P> +The gang scheduler plugin is primarily designed to be <I>reactive</I> to the +resource allocation decisions made by the Selector plugins. This is why +<I>Shared=FORCE</I> is required in each partition. The <I>Shared=FORCE</I> +setting enables the <I>select/linear</I> and <I>select/cons_res</I> plugins to +overallocate the resources between partitions. This keeps all of the node +placement logic in the <I>select</I> plugins, and leaves the gang scheduler in +charge of controlling which jobs should run on the overallocated resources. +</P> +<P> +The <I>sched/gang</I> plugin suspends jobs via the same internal functions that +support <I>scontrol suspend</I> and <I>scontrol resume</I>. A good way to +observe the act of preemption is by running <I>watch squeue</I> in a terminal +window. +</P> + +<H2>A Simple Example</H2> + +<P> +The following example is configured with <I>select/linear</I>, +<I>sched/gang</I>, and <I>Shared=FORCE:1</I>. This example takes place on a +cluster of 5 nodes: +</P> +<PRE> +[user@n16 ~]$ <B>sinfo</B> +PARTITION AVAIL TIMELIMIT NODES STATE NODELIST +active* up infinite 5 idle n[12-16] +hipri up infinite 5 idle n[12-16] +</PRE> +<P> +Here are the Partition settings: +</P> +<PRE> +[user@n16 ~]$ <B>grep PartitionName /shared/slurm/slurm.conf</B> +PartitionName=active Priority=1 Default=YES Shared=FORCE:1 Nodes=n[12-16] +PartitionName=hipri Priority=2 Shared=FORCE:1 Nodes=n[12-16] +[user@n16 ~]$ +</PRE> +<P> +The <I>runit.pl</I> script launches a simple load-generating app that runs +for the given number of seconds. Submit 5 single-node <I>runit.pl</I> jobs to +run on all nodes: +</P> +<PRE> +[user@n16 ~]$ <B>sbatch -N1 ./runit.pl 300</B> +sbatch: Submitted batch job 485 +[user@n16 ~]$ <B>sbatch -N1 ./runit.pl 300</B> +sbatch: Submitted batch job 486 +[user@n16 ~]$ <B>sbatch -N1 ./runit.pl 300</B> +sbatch: Submitted batch job 487 +[user@n16 ~]$ <B>sbatch -N1 ./runit.pl 300</B> +sbatch: Submitted batch job 488 +[user@n16 ~]$ <B>sbatch -N1 ./runit.pl 300</B> +sbatch: Submitted batch job 489 +[user@n16 ~]$ <B>squeue</B> +JOBID PARTITION NAME USER ST TIME NODES NODELIST + 485 active runit.pl user R 0:06 1 n12 + 486 active runit.pl user R 0:06 1 n13 + 487 active runit.pl user R 0:05 1 n14 + 488 active runit.pl user R 0:05 1 n15 + 489 active runit.pl user R 0:04 1 n16 +[user@n16 ~]$ +</PRE> +<P> +Now submit a short-running 3-node job to the <I>hipri</I> partition: +</P> +<PRE> +[user@n16 ~]$ <B>sbatch -N3 -p hipri ./runit.pl 30</B> +sbatch: Submitted batch job 490 +[user@n16 ~]$ <B>squeue</B> +JOBID PARTITION NAME USER ST TIME NODES NODELIST + 488 active runit.pl user R 0:29 1 n15 + 489 active runit.pl user R 0:28 1 n16 + 485 active runit.pl user S 0:27 1 n12 + 486 active runit.pl user S 0:27 1 n13 + 487 active runit.pl user S 0:26 1 n14 + 490 hipri runit.pl user R 0:03 3 n[12-14] +[user@n16 ~]$ +</PRE> +<P> +Job 490 in the <I>hipri</I> partition preempted jobs 485, 486, and 487 from +the <I>active</I> partition. Jobs 488 and 489 in the <I>active</I> partition +remained running. +</P> +<P> +This state persisted until job 490 completed, at which point the preempted jobs +were resumed: +</P> +<PRE> +[user@n16 ~]$ <B>squeue</B> +JOBID PARTITION NAME USER ST TIME NODES NODELIST + 485 active runit.pl user R 0:30 1 n12 + 486 active runit.pl user R 0:30 1 n13 + 487 active runit.pl user R 0:29 1 n14 + 488 active runit.pl user R 0:59 1 n15 + 489 active runit.pl user R 0:58 1 n16 +[user@n16 ~]$ +</PRE> + + +<H2><A NAME="future_work">Future Work</A></H2> + +<P> +<B>Preemption with consumable resources</B>: This implementation of preemption +relies on intelligent job placement by the <I>select</I> plugins. As of SLURM +1.3.1 the consumable resource <I>select/cons_res</I> plugin still needs +additional enhancements to the job placement algorithm before it's preemption +support can be considered "competent". The mechanics of preemption work, but the +placement of preemptive jobs relative to any low-priority jobs may not be +optimal. The work to improve the placement of preemptive jobs relative to +existing jobs is currently in-progress. +</P> +<P> +<B>Requeue a preempted job</B>: In some situations is may be desirable to +requeue a low-priority job rather than suspend it. Suspending a job leaves the +job in memory. Requeuing a job involves terminating the job and resubmitting it +again. This will be investigated at some point in the future. Requeuing a +preempted job may make the most sense with <I>Shared=NO</I> partitions. +</P> + +<p style="text-align:center;">Last modified 11 April 2008</p> + +<!--#include virtual="footer.txt"--> diff --git a/doc/html/programmer_guide.shtml b/doc/html/programmer_guide.shtml index b1a765a2d..2ee1d221f 100644 --- a/doc/html/programmer_guide.shtml +++ b/doc/html/programmer_guide.shtml @@ -17,9 +17,8 @@ Resource Management</a> [PDF]. engine. While initially written for Linux, other UNIX-like operating systems should be easy porting targets. Code should adhere to the <a href="coding_style.pdf"> Linux kernel coding style</a>. <i>(Some components of SLURM have been taken from -various sources. Some of these components are written in C++ or do not conform -to the Linux kernel coding style. However, new code written for SLURM should -follow these standards.)</i> +various sources. Some of these components do not conform to the Linux kernel +coding style. However, new code written for SLURM should follow these standards.)</i> <p>Many of these modules have been built and tested on a variety of Unix computers including Red Hat Linux, IBM's AIX, Sun's Solaris, and Compaq's Tru-64. The only @@ -60,14 +59,16 @@ The build process is described in the README file. <p>Copyright and disclaimer information are in the files COPYING and DISCLAIMER. All of the top-level subdirectories are described below.</p> + <p style="margin-left:.2in"><b>auxdir</b>—Used for building SLURM.<br> +<b>contribs</b>—Various contributed tools.<br> <b>doc</b>—Documentation including man pages. <br> <b>etc</b>—Sample configuration files.<br> <b>slurm</b>—Header files for API use. These files must be installed. Placing these header files in this location makes for better code portability.<br> <b>src</b>—Contains all source code and header files not in the "slurm" subdirectory described above.<br> -<b>testsuite</b>—DejaGnu is used as a testing framework and all of its files +<b>testsuite</b>—DejaGnu and Expect are used for testing all of its files are here.</p> <p class="footer"><a href="#top">top</a></p> @@ -95,16 +96,16 @@ SLURM.<br> subdirectory is used for each plugin class:<br> <ul> <li><b>auth</b> for user authentication,<br> -<li><b>checkpoint</b> for system-initiated checkpoint -and restart of user jobs,<br> -<li><b>jobacct</b> for job accounting,<br> +<li><b>checkpoint</b> for system-initiated checkpoint and restart of user jobs,<br> +<li><b>crypto</b> for cryptographic functions,<br> +<li><b>jobacct_gather</b> for job accounting,<br> +<li><b>jobacct_storage</b> for specifing the type of storage for job accounting,<br> <li><b>jobcomp</b> for job completion logging,<br> <li><b>mpi</b> for MPI support,<br> <li><b>proctrack</b> for process tracking,<br> <li><b>sched</b> for job scheduler,<br> <li><b>select</b> for a job's node selection,<br> -<li><b>switch</b> for switch (interconnect) specific -functions,<br> +<li><b>switch</b> for switch (interconnect) specific functions,<br> <li><b>task</b> for task affinity to processors.<br> </ul> <p style="margin-left:.2in"> @@ -126,6 +127,7 @@ dimension torus topography.<br> <b>squeue</b>—User command to get information on SLURM jobs and job steps.<br> <b>srun</b>—User command to submit a job, get an allocation, and/or initiation a parallel job step.<br> +<b>strigger</b>—User and administrator to manage event triggers.<br> <b>sview</b>—User command to view and update node, partition, and job job state information.<br> @@ -227,6 +229,6 @@ host1> slurmd -N foo21 <p class="footer"><a href="#top">top</a></p> -<p style="text-align:center;">Last modified 29 September 2006</p> +<p style="text-align:center;">Last modified 20 May 2008</p> <!--#include virtual="footer.txt"--> diff --git a/doc/html/publications.shtml b/doc/html/publications.shtml index 8d918489e..75f6da572 100644 --- a/doc/html/publications.shtml +++ b/doc/html/publications.shtml @@ -1,6 +1,19 @@ <!--#include virtual="header.txt"--> -<h1>Publications</h1> +<h1>Publications and Presentations</h1> + +<h2>Presentations</h2> + +<ul> +<li><a href="slurm_v1.3.pdf">SLURM Version 1.3</a> (May 2008)</li> +<li><a href="slurm_moab.pdf">Managing Clusters with Moab and SLURM</a> (May 2008)</li> +<li><a href="slurm_v1.2.pdf">Resource Management at LLNL, SLURM Version 1.2</a> +(April 2007)</li> +<li><a href="lci.7.tutorial.pdf">Resource Management Using SLURM</a>, +(Tutorial, The 7th International Conference on Linux Clusters, May 2006)</li> +</ul> + +<h2>Publications</h2> <b>Enhancing an Open Source Resource Manager with Multi-Core/Multi-threaded Support</b>, S. M. Balle and D. Palermo, <i>Job Scheduling Strategies for Parallel Processing</i>, @@ -19,6 +32,6 @@ volume 2862 of <i>Lecture Notes in Computer Science</i>, pages 44-60, Springer-Verlag, 2003.</p> -<p style="text-align:center;">Last modified 31 August 2007</p> +<p style="text-align:center;">Last modified 28 April 2008</p> <!--#include virtual="footer.txt"--> diff --git a/doc/html/quickstart.shtml b/doc/html/quickstart.shtml index 7dae93bd5..b9357f81d 100644 --- a/doc/html/quickstart.shtml +++ b/doc/html/quickstart.shtml @@ -11,17 +11,17 @@ SLURM has three key functions. First, it allocates exclusive and/or non-exclusiv access to resources (compute nodes) to users for some duration of time so they can perform work. Second, it provides a framework for starting, executing, and monitoring work (normally a parallel job) on the set of allocated nodes. Finally, -it arbitrates conflicting requests for resources by managing a queue of pending -work.</p> +it arbitrates contention for resources by managing a queue of pending work.</p> <h2>Architecture</h2> <p>As depicted in Figure 1, SLURM consists of a <b>slurmd</b> daemon running on each compute node and a central <b>slurmctld</b> daemon running on a management node (with optional fail-over twin). The <b>slurmd</b> daemons provide fault-tolerant hierarchical communciations. -The user commands include: <b>salloc</b>, <b>sattach</b>, <b>sbatch</b>, -<b>sbcast</b>, <b>scancel</b>, <b>sinfo</b>, <b>srun</b>, -<b>smap</b>, <b>squeue</b>, and <b>scontrol</b>. +The user commands include: <b>sacct</b>, <b>salloc</b>, <b>sattach</b>, +<b>sbatch</b>, <b>sbcast</b>, <b>scancel</b>, <b>scontrol</b>, +<b>sinfo</b>, <b>smap</b>, <b>squeue</b>, <b>srun</b>, <b>strigger</b> +and <b>sview</b>. All of the commands can run anywhere in the cluster.</p> <div class="figure"> @@ -29,11 +29,12 @@ All of the commands can run anywhere in the cluster.</p> Figure 1. SLURM components </div> -<p>The entities managed by these SLURM daemons, shown in Figure 2, include <b>nodes</b>, -the compute resource in SLURM, <b>partitions</b>, which group nodes into logical -sets, <b>jobs</b>, or allocations of resources assigned to a user for -a specified amount of time, and <b>job steps</b>, which are sets of (possibly -parallel) tasks within a job. +<p>The entities managed by these SLURM daemons, shown in Figure 2, include +<b>nodes</b>, the compute resource in SLURM, +<b>partitions</b>, which group nodes into logical (possibly overlapping) sets, +<b>jobs</b>, or allocations of resources assigned to a user for +a specified amount of time, and +<b>job steps</b>, which are sets of (possibly parallel) tasks within a job. The partitions can be considered job queues, each of which has an assortment of constraints such as job size limit, job time limit, users permitted to use it, etc. Priority-ordered jobs are allocated nodes within a partition until the resources @@ -55,6 +56,9 @@ or several job steps may independently use a portion of the allocation.</p> option <span class="commandline">--help</span> also provides a brief summary of options. Note that the command options are all case insensitive.</p> +<p><span class="commandline"><b>sacct</b></span> is used to report job or job +step accounting information about active or completed jobs.</p> + <p><span class="commandline"><b>salloc</b></span> is used to allocate resources for a job in real time. Typically this is used to allocate resources and spawn a shell. The shell is then used to execute srun commands to launch parallel tasks.</p> @@ -68,25 +72,29 @@ for later execution. The script will typically contain one or more srun commands to launch parallel tasks.</p> <p><span class="commandline"><b>sbcast</b></span> is used to transfer a file -from local disk to local disk on the nodes allocated to a job. This can be -used to effectively use diskless compute nodes or provide improved performance +from local disk to local disk on the nodes allocated to a job. This can be +used to effectively use diskless compute nodes or provide improved performance relative to a shared file system.</p> -<p><span class="commandline"><b>scancel</b></span> is used to cancel a pending -or running job or job step. It can also be used to send an arbitrary signal to +<p><span class="commandline"><b>scancel</b></span> is used to cancel a pending +or running job or job step. It can also be used to send an arbitrary signal to all processes associated with a running job or job step.</p> -<p><span class="commandline"><b>scontrol</b></span> is the administrative tool -used to view and/or modify SLURM state. Note that many <span class="commandline">scontrol</span> +<p><span class="commandline"><b>scontrol</b></span> is the administrative tool +used to view and/or modify SLURM state. Note that many <span class="commandline">scontrol</span> commands can only be executed as user root.</p> -<p><span class="commandline"><b>sinfo</b></span> reports the state of partitions -and nodes managed by SLURM. It has a wide variety of filtering, sorting, and formatting +<p><span class="commandline"><b>sinfo</b></span> reports the state of partitions +and nodes managed by SLURM. It has a wide variety of filtering, sorting, and formatting options.</p> -<p><span class="commandline"><b>squeue</b></span> reports the state of jobs or -job steps. It has a wide variety of filtering, sorting, and formatting options. -By default, it reports the running jobs in priority order and then the pending +<p><span class="commandline"><b>smap</b></span> reports state information for +jobs, partitions, and nodes managed by SLURM, but graphically displays the +information to reflect network topology.</p> + +<p><span class="commandline"><b>squeue</b></span> reports the state of jobs or +job steps. It has a wide variety of filtering, sorting, and formatting options. +By default, it reports the running jobs in priority order and then the pending jobs in priority order.</p> <p><span class="commandline"><b>srun</b></span> is used to submit a job for @@ -103,6 +111,10 @@ independent or shared nodes within the job's node allocation.</p> jobs, partitions, and nodes managed by SLURM, but graphically displays the information to reflect network topology.</p> +<p><span class="commandline"><b>strigger</b></span> is used to set, get or +view event triggers. Event triggers include things such as nodes going down +or jobs approaching their time limit.</p> + <p><span class="commandline"><b>sview</b></span> is a graphical user interface to get and update state information for jobs, partitions, and nodes managed by SLURM.</p> @@ -278,7 +290,8 @@ option to launch jobs. For example: $MPI_ROOT/bin/mpirun -TCP -srun -N8 ./a.out </pre></p> -<p><a href="http://www-unix.mcs.anl.gov/mpi/mpich2/"><b>MPICH2</b></a> jobs +<p><a href="http://www.mcs.anl.gov/research/projects/mpich2/"><b> +MPICH2</b></a> jobs are launched using the <b>srun</b> command. Just link your program with SLURM's implementation of the PMI library so that tasks can communicate host and port information at startup. (The system administrator can add @@ -318,7 +331,7 @@ $ mpicc ... $ srun -n16 --mpi=mpichmx a.out </pre> -<p><a href="http://nowlab.cse.ohio-state.edu/projects/mpi-iba"><b>MVAPICH</b></a> +<p><a href="http://mvapich.cse.ohio-state.edu/"><b>MVAPICH</b></a> jobs can be launched directly by <b>srun</b> command. SLURM's <i>mvapich</i> MPI plugin must be used to establish communications between the laucnhed tasks. This can be accomplished either using the SLURM @@ -419,6 +432,6 @@ sbatch: Submitted batch job 1234 tasks. These tasks are not managed by SLURM since they are launched outside of its control.</p> -<p style="text-align:center;">Last modified 14 August 2007</p> +<p style="text-align:center;">Last modified 19 September 2007</p> <!--#include virtual="footer.txt"--> diff --git a/doc/html/quickstart_admin.shtml b/doc/html/quickstart_admin.shtml index b37d1806b..1549621fd 100644 --- a/doc/html/quickstart_admin.shtml +++ b/doc/html/quickstart_admin.shtml @@ -7,6 +7,8 @@ overview. <h2>Super Quick Start</h2> <ol> +<li>Make sure that you have synchronized clocks plus consistent users and groups +across the cluster.</li> <li>bunzip2 the distributed tar-ball and untar the files:<br> <i>tar --bzip -x -f slurm*tar.bz2</i></li> <li><i>cd</i> to the directory containing the SLURM source and type @@ -22,15 +24,18 @@ NOTE: The parent directories for SLURM's log files, process ID files, state save directories, etc. are not created by SLURM. They must be created and made writable by <i>SlurmUser</i> as needed prior to starting SLURM daemons.</li> -<li>Install the configuration file in <i><sysconfdir>/slurm.conf</i>.</li> +<li>Install the configuration file in <i><sysconfdir>/slurm.conf</i>.<br> +NOTE: You will need to install this configuration file on all nodes of the cluster.</li> <li>Create OpenSSL keys:<br> <i>openssl genrsa -out <sysconfdir>/slurm.key 1024</i><br> <i>openssl rsa -in <sysconfdir>/slurm.key -pubout -out <sysconfdir>/slurm.cert</i><br> -NOTE: You will build the OpenSSL key files on one node and distribute them -to all of the nodes in the cluster.</li> +NOTE: You will build the OpenSSL key files on one node and distribute <i>slurm.cert</i> +to all of the nodes in the cluster. <i>slurm.key</i> must be readable only by +<i>SlurmUser<i> and is only needed where the <i>slurmctld</i> (SLURM controller +daemon) executes, typically just a couple of nodes.</li> <li>Start the <i>slurmctld</i> and <i>slurmd</i> daemons.</li> </ol> -<p>NOTE: Items 1 through 4 can be replaced with</p> +<p>NOTE: Items 2 through 5 can be replaced with</p> <ol> <li><i>rpmbuild -ta slurm*.tar.bz2</i></li> <li><i>rpm --install <the rpm files></i></li> @@ -118,7 +123,7 @@ Some macro definitions that may be used in building SLURM include: <dd>Specify if debugging logic within SLURM is to be enabled <dt>_prefix <dd>Pathname of directory to contain the SLURM files -<dt>_sysconfdir +<dt>slurm_sysconfdir <dd>Pathname of directory containing the slurm.conf configuration file <dt>with_munge <dd>Specifies munge (authentication library) installation location @@ -132,12 +137,13 @@ Some macro definitions that may be used in building SLURM include: # .rpmmacros # For AIX at LLNL # Override some RPM macros from /usr/lib/rpm/macros -# Set other SLURM-specific macros for unconventional file locations +# Set SLURM-specific macros for unconventional file locations # %_enable_debug "--with-debug" %_prefix /admin/llnl -%_sysconfdir %{_prefix}/etc/slurm -%with_munge "--with-munge=/admin/llnl" +%slurm_sysconfdir %{_prefix}/etc/slurm +%_defaultdocdir %{_prefix}/doc +%with_munge "--with-munge=/opt/freeware" %with_proctrack "--with-proctrack=/admin/llnl/include" %with_ssl "--with-ssl=/opt/freeware" </pre></p> @@ -187,7 +193,7 @@ authentication infrastructure is provided by a dynamically loaded plugin chosen at runtame via the <b>AuthType</b> keyword in the SLURM configuration file. Currently available authentication types include <a href="http://www.theether.org/authd/">authd</a>, -<a href="ftp://ftp.llnl.gov/pub/linux/munge/">munge</a>, and none. +<a href="http://home.gna.org/munge/">munge</a>, and none. The default authentication infrastructure is "none". This permits any user to execute any job as another user. This may be fine for testing purposes, but certainly not for production use. <b>Configure some AuthType value other than "none" if you want any security.</b> @@ -205,17 +211,26 @@ For more information, see <a href="quickstart.html#mpi">MPI</a>. <h3>Scheduler support</h3> <p>The scheduler used by SLURM is controlled by the <b>SchedType</b> configuration -parameter. This is meant to control the relative importance of pending jobs. -SLURM's default scheduler is FIFO (First-In First-Out). A backfill scheduler -plugin is also available. Backfill scheduling will initiate a lower-priority job +parameter. This is meant to control the relative importance of pending jobs and +several options are available +SLURM's default scheduler is <u>FIFO (First-In First-Out)</u>. +SLURM offers a backfill scheduling plugin. +<u>Backfill scheduling</u> will initiate a lower-priority jobs if doing so does not delay the expected initiation time of higher priority jobs; essentially using smaller jobs to fill holes in the resource allocation plan. +Effective backfill scheduling does require users to specify job time limits. +SLURM offers a <u>gang scheduler</u>, which time-slices jobs in the same partition/queue +and can be used to preempt jobs from lower-priority queues in order to execute +jobs in higher priority queues. SLURM also supports a plugin for use of <a href="http://www.clusterresources.com/pages/products/maui-cluster-scheduler.php"> The Maui Scheduler</a> or <a href="http://www.clusterresources.com/pages/products/moab-cluster-suite.php"> -Moab Cluster Suite</a> which offer sophisticated scheduling algorithms. -Motivated users can even develop their own scheduler plugin if so desired. </p> +Moab Cluster Suite</a> which offer sophisticated scheduling algorithms. +For more information about these options see +<a href="gang_scheduling.html">Gang Scheduling</a>, +<a href="preempt.html">Preemption</a> and +<a href="cons_res_share.html">Sharing Consumable Resources</a>.</p> <h3>Node selection</h3> <p>The node selection mechanism used by SLURM is controlled by the @@ -235,6 +250,12 @@ aware and interacts with the BlueGene bridge API).</p> levels for these messages. Be certain that your system's syslog functionality is operational. </p> +<h3>Accounting</h3> +<p>SLURM supports accounting records being written to a simple text file, +directly to a database (MySQL or PostgreSQL), or to a daemon securely +managing accounting data for multiple clusters. For more information +see <a href="accounting.html">Accounting</a>. </p> + <h3>Corefile format</h3> <p>SLURM is designed to support generating a variety of core file formats for application codes that fail (see the <i>--core</i> option of the <i>srun</i> @@ -279,7 +300,8 @@ even those allocated to other users.</p> <h2>Configuration</h2> <p>The SLURM configuration file includes a wide variety of parameters. -This configuration file must be available on each node of the cluster. A full +This configuration file must be available on each node of the cluster and +must have consistent contents. A full description of the parameters is included in the <i>slurm.conf</i> man page. Rather than duplicate that information, a minimal sample configuration file is shown below. Your slurm.conf file should define at least the configuration parameters defined @@ -338,7 +360,8 @@ minimum configuration values will be considered DOWN and not scheduled. Note that a more extensive sample configuration file is provided in <b>etc/slurm.conf.example</b>. We also have a web-based <a href="configurator.html">configuration tool</a> which can -be used to build a simple configuration file.</p> +be used to build a simple configuration file, which can then be +manually edited for more complex configurations.</p> <pre> # # Sample /etc/slurm.conf for mcr.llnl.gov @@ -379,10 +402,26 @@ NodeName=mcr[0-1151] NodeAddr=emcr[0-1151] PartitionName=DEFAULT State=UP PartitionName=pdebug Nodes=mcr[0-191] MaxTime=30 MaxNodes=32 Default=YES PartitionName=pbatch Nodes=mcr[192-1151] -</pre> +</pre> + <h2>Security</h2> -<p>You will should create unique job credential keys for your site -using the program <a href="http://www.openssl.org/">openssl</a>. +<p>The use of <a href="http://www.openssl.org/">OpenSSL</a> is +recommended to provide a digital signature on job step credentials. +<a href="http://home.gna.org/munge/">Munge</a> can alternately +be used with somewhat slower performance. +This signature is used by <i>slurmctld</i> to construct a job step +credential, which is sent to <i>srun</i> and then forwarded to +<i>slurmd</i> to initiate job steps. +This design offers improved performance by removing much of the +job step initiation overhead from the <i> slurmctld </i> daemon. +The mechanism to be used is controlled through the <b>CryptoType</b> +configuration parameter (newly added in SLURM version 1.3, +earlier versions always use OpenSSL).</p> + +<h3>OpenSSL</h3> +<p>If using OpenSSL digital signatures, unique job credential keys +must be created for your site using the program +<a href="http://www.openssl.org/">openssl</a>. <b>You must use openssl and not ssh-genkey to construct these keys.</b> An example of how to do this is shown below. Specify file names that match the values of <b>JobCredentialPrivateKey</b> and @@ -391,32 +430,41 @@ The <b>JobCredentialPrivateKey</b> file must be readable only by <b>SlurmUser</b The <b>JobCredentialPublicCertificate</b> file must be readable by all users. Note that you should build the key files one one node and then distribute them to all nodes in the cluster. -This insures that all nodes have a consistent set of encryption keys. -These keys are used by <i>slurmctld</i> to construct a job credential, -which is sent to <i>srun</i> and then forwarded to <i>slurmd</i> to -initiate job steps.</p> +This insures that all nodes have a consistent set of digital signature +keys. +These keys are used by <i>slurmctld</i> to construct a job step +credential, which is sent to <i>srun</i> and then forwarded to +<i>slurmd</i> to initiate job steps.</p> <p class="commandline" style="margin-left:.2in"> <i>openssl genrsa -out <sysconfdir>/slurm.key 1024</i><br> <i>openssl rsa -in <sysconfdir>/slurm.key -pubout -out <sysconfdir>/slurm.cert</i> </p> -<p>Authentication of communications from SLURM commands to the daemons -or between the daemons uses a different security mechanism that is configurable. -You must specify one "auth" plugin for this purpose. -Currently, only three -authentication plugins are supported: <b>auth/none</b>, <b>auth/authd</b>, and -<b>auth/munge</b>. The auth/none plugin is built and used by default, but either -Brent Chun's <a href="http://www.theether.org/authd/">authd</a>, or Chris Dunlap's -<a href="http://home.gna.org/munge/">munge</a> should be installed in order to -get properly authenticated communications. + +<h3>Munge</h3> +<p>If using Munge digital signatures, no SLURM keys are required. +This will be address in the installation and configuration of Munge.</p> + +<h3>Authentication</h3> +<p>Authentication of communications (identifying who generated a particular +message) between SLURM components can use a different security mechanism +that is configurable. +You must specify one "auth" plugin for this purpose (<b>AuthType</b>. +Currently, only three authentication plugins are supported: +<b>auth/none</b>, <b>auth/authd</b>, and <b>auth/munge</b>. +The auth/none plugin is built and used by default, but either +Brent Chun's <a href="http://www.theether.org/authd/">authd</a>, +or LLNL's <a href="http://home.gna.org/munge/">munge</a> +should be installed in order to get properly authenticated communications. Unless you are experience with authd, we recommend the use of munge. -The configure script in the top-level directory of this distribution will determine -which authentication plugins may be built. The configuration file specifies which -of the available plugins will be utilized. </p> +The configure script in the top-level directory of this distribution will +determine which authentication plugins may be built. +The configuration file specifies which of the available plugins will be utilized. </p> +<h3>Pluggable Authentication Module (PAM) support</h3> <p>A PAM module (Pluggable Authentication Module) is available for SLURM that -can prevent a user from accessing a node which he has not been allocated, if that -mode of operation is desired.</p> +can prevent a user from accessing a node which he has not been allocated, +if that mode of operation is desired.</p> <p class="footer"><a href="#top">top</a></p> <h2>Starting the Daemons</h2> @@ -517,45 +565,12 @@ Configuration data as of 03/19-13:04:12 AuthType = auth/munge BackupAddr = eadevj BackupController = adevj +BOOT_TIME = 01/10-09:19:21 +CacheGroups = 0 +CheckpointType = checkpoint/none ControlAddr = eadevi ControlMachine = adevi -Epilog = (null) -FastSchedule = 1 -FirstJobId = 1 -InactiveLimit = 0 -JobCompLoc = /var/tmp/jette/slurm.job.log -JobCompType = jobcomp/filetxt -JobCredPrivateKey = /etc/slurm/slurm.key -JobCredPublicKey = /etc/slurm/slurm.cert -KillWait = 30 -MaxJobCnt = 2000 -MinJobAge = 300 -PluginDir = /usr/lib/slurm -Prolog = (null) -ReturnToService = 1 -SchedulerAuth = (null) -SchedulerPort = 65534 -SchedulerType = sched/backfill -SlurmUser = slurm(97) -SlurmctldDebug = 4 -SlurmctldLogFile = /tmp/slurmctld.log -SlurmctldPidFile = /tmp/slurmctld.pid -SlurmctldPort = 7002 -SlurmctldTimeout = 300 -SlurmdDebug = 65534 -SlurmdLogFile = /tmp/slurmd.log -SlurmdPidFile = /tmp/slurmd.pid -SlurmdPort = 7003 -SlurmdSpoolDir = /tmp/slurmd -SlurmdTimeout = 300 -TreeWidth = 50 -JobAcctLogFile = /tmp/jobacct.log -JobAcctFrequncy = 5 -JobAcctType = jobacct/linux -SLURM_CONFIG_FILE = /etc/slurm/slurm.conf -StateSaveLocation = /usr/local/tmp/slurm/adev -SwitchType = switch/elan -TmpFS = /tmp +... WaitTime = 0 Slurmctld(primary/backup) at adevi/adevj are UP/UP @@ -568,9 +583,9 @@ adev0: scontrol shutdown <p>An extensive test suite is available within the SLURM distribution in <i>testsuite/expect</i>. There are about 250 tests which will execute on the order of 2000 jobs -and 4000 job steps. +and 5000 job steps. Depending upon your system configuration and performance, this test -suite will take roughly 40 minutes to complete. +suite will take roughly 80 minutes to complete. The file <i>testsuite/expect/globals</i> contains default paths and procedures for all of the individual tests. You will need to edit this file to specify where SLURM and other tools are installed. @@ -595,6 +610,6 @@ in the NEWS file. </pre> <p class="footer"><a href="#top">top</a></p> -<p style="text-align:center;">Last modified 26 March 2007</p> +<p style="text-align:center;">Last modified 28 April 2008</p> <!--#include virtual="footer.txt"--> diff --git a/doc/html/review_release.html b/doc/html/review_release.html index 84bd79337..3755b3835 100644 --- a/doc/html/review_release.html +++ b/doc/html/review_release.html @@ -8,42 +8,48 @@ <h1>SLURM Web pages for Review and Release</h1> <b>NOTE: Do not follow links.</b> <ul> -<li><a href="http://cmg-rr.llnl.gov/linux/slurm/api.html">api.html</a></li> -<li><a href="http://cmg-rr.llnl.gov/linux/slurm/authplugins.html">authplugins.html</a></li> -<li><a href="http://cmg-rr.llnl.gov/linux/slurm/big_sys.html">big_sys.html</a></li> -<li><a href="http://cmg-rr.llnl.gov/linux/slurm/bluegene.html">bluegene.html</a></li> -<li><a href="http://cmg-rr.llnl.gov/linux/slurm/checkpoint_plugins.html">checkpoint_plugins.html</a></li> -<li><a href="http://cmg-rr.llnl.gov/linux/slurm/configurator.html">configurator.html</a></li> -<li><a href="http://cmg-rr.llnl.gov/linux/slurm/cons_res.html">cons_res.html</a></li> -<li><a href="http://cmg-rr.llnl.gov/linux/slurm/dist_plane.html">dist_plane.html</a></li> -<li><a href="http://cmg-rr.llnl.gov/linux/slurm/documentation.html">documentation.html</a></li> -<li><a href="http://cmg-rr.llnl.gov/linux/slurm/download.html">download.html</a></li> -<li><a href="http://cmg-rr.llnl.gov/linux/slurm/faq.html">faq.html</a></li> -<li><a href="http://cmg-rr.llnl.gov/linux/slurm/help.html">help.html</a></li> -<li><a href="http://cmg-rr.llnl.gov/linux/slurm/ibm.html">ibm.html</a></li> -<li><a href="http://cmg-rr.llnl.gov/linux/slurm/jobacctplugins.html">jobacctplugins.html</a></li> -<li><a href="http://cmg-rr.llnl.gov/linux/slurm/jobcompplugins.html">jobcompplugins.html</a></li> -<li><a href="http://cmg-rr.llnl.gov/linux/slurm/mail.html">mail.html</a></li> -<li><a href="http://cmg-rr.llnl.gov/linux/slurm/maui.html">maui.html</a></li> -<li><a href="http://cmg-rr.llnl.gov/linux/slurm/mc_support.html">mc_support.html</a></li> -<li><a href="http://cmg-rr.llnl.gov/linux/slurm/moab.html">moab.html</a></li> -<li><a href="http://cmg-rr.llnl.gov/linux/slurm/mpiplugins.html">mpiplugins.html</a></li> -<li><a href="http://cmg-rr.llnl.gov/linux/slurm/news.html">news.html</a></li> -<li><a href="http://cmg-rr.llnl.gov/linux/slurm/overview.html">overview.html</a></li> -<li><a href="http://cmg-rr.llnl.gov/linux/slurm/platforms.html">platforms.html</a></li> -<li><a href="http://cmg-rr.llnl.gov/linux/slurm/plugins.html">plugins.html</a></li> -<li><a href="http://cmg-rr.llnl.gov/linux/slurm/proctrack_plugins.html">proctrack_plugins.html</a></li> -<li><a href="http://cmg-rr.llnl.gov/linux/slurm/programmer_guide.html">programmer_guide.html</a></li> -<li><a href="http://cmg-rr.llnl.gov/linux/slurm/publications.html">publications.html</a></li> -<li><a href="http://cmg-rr.llnl.gov/linux/slurm/quickstart_admin.html">quickstart_admin.html</a></li> -<li><a href="http://cmg-rr.llnl.gov/linux/slurm/quickstart.html">quickstart.html</a></li> -<li><a href="http://cmg-rr.llnl.gov/linux/slurm/schedplugins.html">schedplugins.html</a></li> -<li><a href="http://cmg-rr.llnl.gov/linux/slurm/selectplugins.html">selectplugins.html</a></li> -<li><a href="http://cmg-rr.llnl.gov/linux/slurm/slurm.html">slurm.html</a></li> -<li><a href="http://cmg-rr.llnl.gov/linux/slurm/switchplugins.html">switchplugins.html</a></li> -<li><a href="http://cmg-rr.llnl.gov/linux/slurm/team.html">team.html</a></li> -<li><a href="http://cmg-rr.llnl.gov/linux/slurm/testimonials.html">testimonials.html</a></li> -<li><a href="http://cmg-rr.llnl.gov/linux/slurm/troubleshoot.html">troubleshoot.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/accounting.html">accounting.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/api.html">api.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/authplugins.html">authplugins.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/big_sys.html">big_sys.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/bluegene.html">bluegene.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/checkpoint_plugins.html">checkpoint_plugins.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/configurator.html">configurator.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/cons_res.html">cons_res.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/cons_res_share.html">cons_res_share.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/dist_plane.html">dist_plane.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/documentation.html">documentation.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/download.html">download.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/faq.html">faq.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/gang_scheduling.html">gang_scheduling.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/help.html">help.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/ibm.html">ibm.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/jobacct_gatherplugins.html">jobacct_gatherplugins.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/jobacct_storageplugins.html">jobacct_storageplugins.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/jobcompplugins.html">jobcompplugins.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/mail.html">mail.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/maui.html">maui.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/mc_support.html">mc_support.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/moab.html">moab.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/mpiplugins.html">mpiplugins.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/news.html">news.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/overview.html">overview.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/platforms.html">platforms.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/plugins.html">plugins.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/power_save.html">power_save.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/proctrack_plugins.html">proctrack_plugins.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/programmer_guide.html">programmer_guide.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/publications.html">publications.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/quickstart_admin.html">quickstart_admin.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/quickstart.html">quickstart.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/schedplugins.html">schedplugins.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/selectplugins.html">selectplugins.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/slurm.html">slurm.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/switchplugins.html">switchplugins.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/taskplugins.html">taskplugins.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/team.html">team.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/testimonials.html">testimonials.html</a></li> +<li><a href="https://computing-pre.llnl.gov/linux/slurm/troubleshoot.html">troubleshoot.html</a></li> </ul> </body> </html> diff --git a/doc/html/schedplugins.shtml b/doc/html/schedplugins.shtml index fbbe9aaa7..112727338 100644 --- a/doc/html/schedplugins.shtml +++ b/doc/html/schedplugins.shtml @@ -92,6 +92,22 @@ to indicate the reason for failure.</p> the plugin should return SLURM_ERROR and set the errno to an appropriate value to indicate the reason for failure.</p> +<p class="commandline">int slurm_sched_plugin_newalloc (void);</p> +<p style="margin-left:.2in"><b>Description</b>: Note the successful allocation of resources to a job.</p> +<p style="margin-left:.2in"><b>Arguments</b>: Pointer to the slurmctld job structure. This can be used to +get partition, allocated resources, time limit, etc.</p> +<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure, +the plugin should return SLURM_ERROR and set the errno to an appropriate value +to indicate the reason for failure.</p> + +<p class="commandline">int slurm_sched_plugin_freealloc (void);</p> +<p style="margin-left:.2in"><b>Description</b>: Note the successful release of resources for a job.</p> +<p style="margin-left:.2in"><b>Arguments</b>: Pointer to the slurmctld job structure. This can be used to +get partition, allocated resources, time limit, etc.</p> +<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure, +the plugin should return SLURM_ERROR and set the errno to an appropriate value +to indicate the reason for failure.</p> + <p class="commandline">uint32_t slurm_sched_plugin_initial_priority ( uint32_t last_prio, struct job_record *job_ptr);</p> <p style="margin-left:.2in"><b>Description</b>: Establish the initial priority of a new job.</p> @@ -120,12 +136,19 @@ happened such as time or size limits.</p> <p style="margin-left:.2in"><b>Returns</b>: Nothing.</p> <p class="footer"><a href="#top">top</a></p> +<p class="commandline">char *slurm_sched_get_conf (void);</p></a> +<p style="margin-left:.2in"><b>Description</b>: Return scheduler specific +configuration information to be reported for the <i>scontrol show configuration</i> +command.</p> +<p style="margin-left:.2in"><b>Arguments</b>: None</p> +<p style="margin-left:.2in"><b>Returns</b>: A string containing configuration +information. The return value is released using the <i>xfree()</i> function.</p> + <a name="get_errno"><p class="commandline">int slurm_sched_get_errno (void);</p></a> <p style="margin-left:.2in"><b>Description</b>: Return the number of a scheduler specific error.</p> <p style="margin-left:.2in"><b>Arguments</b>: None</p> -<p style="margin-left:.2in"><b>Returns</b>: Error number for the last failure encountered by -the scheduler plugin.</p> +<p style="margin-left:.2in"><b>Returns</b>: Error number for the last failure encountered by the scheduler plugin.</p> <p class="commandline"><a name="strerror">const char *slurm_sched_strerror(int errnum);</a></p> <p style="margin-left:.2in"><b>Description</b>: Return a string description of a scheduler @@ -143,6 +166,6 @@ releases of SLURM may revise this API. A scheduler plugin conveys its ability to implement a particular API version using the mechanism outlined for SLURM plugins.</p> <p class="footer"><a href="#top">top</a></p> -<p style="text-align:center;">Last modified 16 August 2007</p> +<p style="text-align:center;">Last modified 8 November 2007</p> <!--#include virtual="footer.txt"--> diff --git a/doc/html/selectplugins.shtml b/doc/html/selectplugins.shtml index 074620eb3..7ef6a4088 100644 --- a/doc/html/selectplugins.shtml +++ b/doc/html/selectplugins.shtml @@ -25,7 +25,7 @@ This plugin is recommended for systems without shared nodes.</li> memory, etc. within nodes. This plugin is recommended for systems with many non-parallel programs sharing nodes. For more information see <a href=cons_res.html>Consumable Resources in SLURM</a>.</li> -<li><b>bluegene</b>—<a href="http://www.research.ibm.com/bluegene">IBM Blue Gene</a> +<li><b>bluegene</b>—<a href="http://www.research.ibm.com/bluegene/">IBM Blue Gene</a> node selector. Note that this plugin not only selects the nodes for a job, but performs some initialization and termination functions for the job.</li> </ul> @@ -154,9 +154,10 @@ the plugin should return SLURM_ERROR.</p> <p class="commandline">int select_p_pack_node_info (time_t last_query_time, Buf *buffer_ptr);</p> <p style="margin-left:.2in"><b>Description</b>: pack node specific information into a buffer.</p> <p style="margin-left:.2in"><b>Arguments</b>: -<span class="commandline"> last_query_time</span> (input) time that the data -was last saved. If it has not changed since this time, return SLURM_NO_CHANGE_IN_DATA. <br> -<span class="commandline"> buffer_ptre</span> (input/output) buffer into +<span class="commandline"> +last_query_time</span> (input) time that the data was +last saved.<br> +<span class="commandline"> buffer_ptr</span> (input/output) buffer into which the node data is appended.</p> <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful, SLURM_NO_CHANGE_IN_DATA if data has not changed since last packed, otherwise SLURM_ERROR</p> @@ -265,6 +266,21 @@ identify the nodes which were selected for this job to use.</p> <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure, the plugin should return a SLURM error code.</p> +<p class="commandline">int select_p_get_job_cores (uint32_t job_id, int +alloc_index, int s);</p> +<p style="margin-left:.2in"><b>Description</b>: Get socket-specific core +information from a job.</p> +<p style="margin-left:.2in"><b>Arguments</b>: +<span class="commandline"> job_id</span> (input) ID of the job +from which to obtain the data.<br> +<span class="commandline"> alloc_index</span> (input) index of +the allocated node to the job from which to obtain the data.<br> +<span class="commandline"> s</span> (input) socket index from +which to obtain the data.</p> +<p style="margin-left:.2in"><b>Returns</b>: the number of cores allocated to the +given socket on the given node for the given job. On failure, the plugin should +return zero.</p> + <p class="footer"><a href="#top">top</a></p> <h3>Get/set plugin information</h3> @@ -338,7 +354,7 @@ state of the node.</p> <p class="commandline">int select_p_alter_node_cnt (enum select_node_cnt type, void *data);</p> <p style="margin-left:.2in"><b>Description</b>: Used for systems like -a Bluegene system where slurm sees 1 node where many nodes really +a Bluegene system where SLURM sees 1 node where many nodes really exists, in Bluegene's case 1 node reflects 512 nodes in real live, but since usually 512 is the smallest allocatable block slurm only handles it as 1 node. This is a function so the user can issue a 'real' @@ -351,6 +367,13 @@ telling the plug in what the user is really wanting.<br> Is a void * so depending on the type sent in argument 1 this should adjust the variable returning what the user is asking for.</p> <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful, otherwise SLURM_ERROR</p> + +<p class="commandline">int select_p_reconfigure (void);</p> +<p style="margin-left:.2in"><b>Description</b>: Used to notify plugin +of change in partition configuration or general configuration change. +The plugin will test global variables for changes as appropriate.</p> +<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful, otherwise SLURM_ERROR</p> + <p class="footer"><a href="#top">top</a></p> <h2>Versioning</h2> @@ -363,6 +386,6 @@ to maintain data format compatibility across different versions of the plugin.</ <p class="footer"><a href="#top">top</a></p> -<p style="text-align:center;">Last modified 12 October 2006</p> +<p style="text-align:center;">Last modified 8 October 2007</p> <!--#include virtual="footer.txt"--> diff --git a/doc/html/slurm.shtml b/doc/html/slurm.shtml index f77d304c3..acb1b46fa 100644 --- a/doc/html/slurm.shtml +++ b/doc/html/slurm.shtml @@ -6,8 +6,7 @@ sizes. It provides three key functions. First it allocates exclusive and/or non- access to resources (computer nodes) to users for some duration of time so they can perform work. Second, it provides a framework for starting, executing, and monitoring work (typically a parallel job) on a set of allocated nodes. Finally, -it arbitrates conflicting requests for resources by managing a queue of pending -work. </p> +it arbitrates contention for resources by managing a queue of pending work. </p> <p>SLURM is not a sophisticated batch system, but it does provide an Applications Programming Interface (API) for integration with external schedulers such as @@ -33,27 +32,23 @@ add functionality.</li> <p>SLURM provides resource management on about 1000 computers worldwide, including many of the most powerful computers in the world: <ul> -<li><a href="http://www.llnl.gov/asc/computing_resources/bluegenel/bluegene_home.html">BlueGene/L</a> -at LLNL with 65,536 dual-processor compute nodes</li> -<li><a href="http://www.llnl.gov/asc/computing_resources/purple/purple_index.html">ASC Purple</a> +<li><a href="https://asc.llnl.gov/computing_resources/bluegenel/">BlueGene/L</a> +at LLNL with 106,496 dual-core processors</li> +<li><a href="http://c-r-labs.com/">EKA</a> at Computational Research Laboratories, +India with 14,240 Xeon processoers and Infiniband interconnect</li> +<li><a href="https://asc.llnl.gov/computing_resources/purple/">ASC Purple</a> an IBM SP/AIX cluster at LLNL with 12,208 Power5 processors and a Federation switch</li> <li><a href="http://www.bsc.es/plantillaA.php?cat_id=5">MareNostrum</a> a Linux cluster at Barcelona Supercomputer Center with 10,240 PowerPC processors and a Myrinet switch</li> -<li>Peloton with 1,152 nodes each having four sockets with dual-core Opteron processors and an InfiniBand switch</li> -<li>An <a href="http://hpc.uky.edu/">IBM HPC Server</a> at the University of Kentucky. -This is a heterogeneous cluster with 128 Power5+ processors and -340 HS21 Blades each with dual-socket and dual-core Intel Woodcrest processors -for a total of 1,488 cores connected with Infiniband switch</li> </ul> -<p>There are about 200 downloads of SLURM per month from LLNL's FTP server -and <a href="https:sourceforge.net">SourceForge.net</a>. -As of March 2007, SLURM has been downloaded over 5000 times to over 500 -distinct sites in 41 countries. -SLURM is also actively being developed, distributed and supported by -<a href="http://www.hp.com">Hewlett-Packard</a> and -<a href="http://www.bull.com">Bull</a>.</p> +<p>SLURM is actively being developed, distributed and supported by +<a href="https://www.llnl.gov">Lawrence Livermore National Laboratory</a>, +<a href="http://www.hp.com">Hewlett-Packard</a>, +<a href="http://www.bull.com">Bull</a>, +<a href="http://www.clusterresources.com">Cluster Resources</a> and +<a href="http://www.sicortex.com">SiCortex</a>.</p> -<p style="text-align:center;">Last modified 4 June 2007</p> +<p style="text-align:center;">Last modified 29 November 2007</p> <!--#include virtual="footer.txt"--> diff --git a/doc/html/slurm_moab.pdf b/doc/html/slurm_moab.pdf new file mode 100644 index 0000000000000000000000000000000000000000..576a8ae0adb60548e3144b41256a8e32a5f5e80f GIT binary patch literal 759031 zcmZ^~V{~QR5;mH2j1D?RhaKBaI<{@wwr$(CZ5umg$Jjx~_LuiP=iGb0A9svB)?91W zTC--=+<QJxRjo}TCnQ3{K+6P6GIV%zcvgCwH#;-}%Sb>^U~6Cj%gs$dCuMA7>SRX1 z{MDsEKqq2u<z(#e)mrI084DR3+8P-X@bbbsIyo5YTf@4oPpe5MqOc)#pQ#=vF@`Ql zR9|XRdKD!Epw?wh1Wy{kGaNRn*ZNQN0v1-85{m2V{bBo+rfr_BU3-_$5T}3o_FuNS zJkkC7;J6TXu>7S9c+&M6r^DZ(;_U`jy#A3zS$;vN;`}>|KDCu&Ar5;|{M{ekr-)qH z36@VC1J@l6170Mmq#EvgM?gKb$}1R-^&;TRtp?GBHrgnti(2arVfArbk*I%!$Ln3Z zE+c^9$ns}hB6-$<{}NsfV0mFJbb2%sLKpCSSZnumf#+Me!(<m8i$DZk(}W&RpuVd& zdV50C`F5^yElSn)L>pn8XP??bKOXki3%ITK5l|IIDFOz3OSQBq8{Ek^O3Eq#uC%`0 zg#SpO@Oe*>&Eefyxs=ROP{FM9_<K+S>Q^o3#j|q@rUJ@6bq24-h8`d~s}C{-39|7G zx|}7+P;D7qOcS{a4`cySuP|<;VObav`j^-trW;t8##5R)ceS|%TK~XzKX+?>ONwuE zAWupQi|7I2DfsXTo^(mKp-kn-MF`BZUDcqF3~5=V%D3IZL||xmlqn;Hj>5j|A}G}a z7@|Mg4b6*D`i)Y27tyIBh%8*y?3cy!jWE@0u>C}d0y(@;S=AQh;zJ^e72~QniP%Fi zqZf!7F4Q=oXHKRZ=G1;G#}y0)ILZ|vrW-4!>p=2z;>{$kNTdx6pJkM^vcsApO!gB) zR{ss#abfm`A2=N(1XmcCgg1+!%yXy)P-h|q49d%KS3U12K92qRI8o#6`Oud}-zU+( zs*9mF5ZE-V_31bQtlS<teSk6@%z5!-+NhjmPDklpg;s7L;<K@3K3k~!EX2=b!Dl4v zkMU+;m!u|$@$Vp%3*lb(6?L51Q*;CsXA^GW7!dd&3j?FeE)>bay$QvS4YV<HHh#%9 zPa}K4(E;TI>JiY8@?Et(m4!Ksm?E;|T2LY=UnD@UAVbgA2*H9fR(diOHT#6W_i>Aw zUW4tpmo|Od0fW?D>1*{%O@_!efae6mQ>A86p)}o#iyv1u?$^Vnx?2t8$39map8vhD z2vWo^nyfwss}~QoFA&48%3RGqow-M@WDUBxB)`IYhS|dWGv02_6fS@M5hnFpODAfU zaz<MJPja5<$MQLpZ+fY+cX?f=+SDXJqCxf|^FtrKYlhe*xN;k<`lvPGh5i<l7`k)~ z`^m6m(NpVhv82T%QD#rN9MO>8FNMu)DB^~b2|-ajS(P}W)psW>LWH<ltPTb>?V`tp zMq>q=&1J$KZE_pErjgg{qgC;?xFjiG6*C?6jMjk)s?n31jFqqm#$m`18JduUSToGQ zWg?#~M~DbQ+f%3Efk}c6|CqRAF^Pg*Re@#76{UwJ)x%<U*dzD&F|i=OxBSB+ZTj@3 zbC`XJCSKIg1#-jAPF>r2N$;J0l{NykMudJ9J$t({OhboKPm-aBeRie#Co{7YtLAW5 zZLL|}0#d9pzW86IRXumjVQNDyp=UIPUh{MEY?Vd!48di?x{-^eA!SRlX<WfzpBz>J zk_b$zj<9FIG6H0#9I2oVs{{#{xtL<ocH?cP!%Lm5`9wsQR4dZ@1(?Tby2xGef(a(% z-IzV47Pg}51q1w~e&jW6^u3j-(8bBjkA<#dhQ*0R&e#NcEMn0KNX3B>*oNA9=3gBp ze+*|bqyHAIu$5*bZe#-xl5#+|xM^+QMK%CaOCCJEz+M2Wnt`tLR%z*rd%YU%5Ut0< z-fMRem#nt%-KK+9NH5BY%>|kmYCGy2!twa&0!Hla+lmuZcpV)Q;h(9Incby8PKk%} z%a;i9HT9DR(8`=5&N0!-!|A>6pK~isJo@C84+5K>I4z;Stagskxvs!f*|n-;ECRhP zmjMeR9-&Mr52}e7@QRQd;!)|-1%-{e?Vpi7g7c>g3dG5gXUG@!`k^MJN*BIm%u-G) zB~9HE?-+kBIci~zZH)fAr+=OO0|o!=|NpcM%<K&RNdG<m|G<QjyPYusot(buzefjS z8z%w=rvG4ug0Z8mvxA|rBf)<=1Z{1czHV?N_y=dcKt<Zv$Xs8*)(uvZ{;PwHgOPxh zfl>P(T=}n}f1djv9?3b_8Y&t)5omsC6A~eyQ!;jQBA^qu{?aV)-&XLytvG=;0iB?& zm92xKoxY(l!9Q>&=*R%e@~>K6UIIE{Hz!d=r!Oe`S0?JnK)~=Hp!-KC@-@wW`$d>& z=?NJAJ6REiuM75nMu4RgVf;tH@c#^_CTn0}Z1|st#H|?#IR2G<O^$(p^?x`0zgGPt z{hw9;MOlV_9Ps6l|9JC%!_g@^8#w*z-+#>{pi?zB`hsLe7DfU(F=KO6GbdOE7UuuU z)c=@BC;sJV2Rqw;z?#6x!TD=#1k9Zr<%}H!ZLRHWZT?v$_WyzUmpT7Gp#HZTLP=|_ zqs8;{I@W%s?J>){gDZdw+!fje=HlW4AsGGZc+pSe<`)FgAILQX5oSr2eetTSERZOt zJlp+xcd-@7aa5*MVN$Kpyv(%y?6aNWcs)r+c36~^l<1%!Ga=4F@qb<PcKO`;7@vI{ z-#Gx9(;ui%Xu9_gr3E>ZRSj;qenfaANO~$#y)aqU94_ii+G0{zW7!^OpW|qDw!8S= zKg^tu0`U2~-wzHqS}*y0UhZzk`?)E6*Ba_PsG9SJ|Ij=K)Ziz63lNx(mzs|kng|zK z50*mbFFzJ8J)NvT8?!~V?`PCm<`rHe5nL&qrEZ(&t(c*yCn0F0DzGD{-}7*JcQanm zvfftMOdg&bv<zI>xYdk(%<26&St!-@dB)O~oe+OcO@57ih?v-*#w~a<Iv0Dshv8TZ zXJ3SAbC$Wr%Kp(@lHiOs3nNz_&BlL3SU?wi=v1PFaL6XD<P2ESQ4d&mvTnYA;ap5P zv%JDp#DPp<Xs+=+QP?(c1_&rWax>fD>)_<{HhP#|u2s5{iGCEljT`&it2jv%H{c~? z=*EW0r=(pFF%(jRwvrYWRR^`ic34LlnwfiDC*Mpm(F~qd6XQe+S}OCv1zR(%ecvBf zUfG|h8BT&9N<kP&(Fq1#Nr;?B|0x+Scu0?^dG7c)EXi+f*}Z>=|LzvF&fNs54N@0M zE4R=I4R|eS!LU8&_IY?&sNM4|_#``wrHjq&5}UG2VYwnhVI?$BCN+s8F;Xr;V@_JW ztGk|&pf~|bw(057nQKf_W7*4IH$hh``I|<mk2h$GG*gT+Rf08Eh1XYwJXVI@S9Us@ zcRE^h1gOFrtH2s6!0RjjIaP^6X@p5=j#YY;q5oGmBFq+c@=~%Ai{>n?8ehwQ7vw2a z^a*{Zx5PES$G6<m%FyO^Zj$X%CXJ86*GhfOX9r2`H`$p(h-z${;w^kELSj*Lj<u83 zj4?>9u@h6N`#M>-;0BZI6m^I;)m2snU3CLpp$0*!Oi-SDL7Q+%lD2<<WiU~J!crrG zwq@@kr|~1U@QQ?bX+mVn^By|ZTgWx8hW^wkKTzrISovLz1x#4FkmXOqIgzE{?Mnj= zX@w_-zZWJ%A2N%Mb~`xU$?3$^KzBEWt?U>d#i!Al=SM}2<J7lwLE_LSow2O90<hka z*l<x1Hxo^Fpq0#RlBh!^B$49cuLwmh@Wo7NDy2Sh{y&V8{l!|t-6B}><vU8$hN*Ps z*);m;v?eJ;770V8I5FjKoV#9}`U4`@1}5V_j><8r8H>+v4zeSZk&-ofqB%dNv3sH; z!YU${Ic6={or&~>c4iTf>7(LWV%Y9zV5d%}s&HzLGr%grCJ@0uxleL?njK6phtD3# zfEPt?COEf$oxS8+)8sMBsRQKZ#g+Kj35rh?=q9X6mt)q17Q-_~=gA2v4!c*G14yVF zlQt`sx=xd{Cg?Owu1<oOs5}?O+7u+Y=EmC_%G?|*U!ad6!>}N=nPC!H5mTGPk{A#} zfqNnS0v(`#uz6=gWI*{z&V^j&VwC<w>;=4YsDr0=^tOo?1UQDb=qBpORhHNidqqYG zGB|;XEP+r8+q#_VafB*W)@57;_zx_b&%TB9qpb@1`Pv;V@0Zrf%<PMEn3x}D$G1ur zDf2g!y;gAi8bN<>iLy?1o(Ze~Go;P{MRmY~r}Rl+ZMWFE-;p)?d*jw5c!I`lZltA_ z#3S_mTeDVzHWAf;<3{6IO>)y#*=bhQWp>SFrg>)vi0s71D4nyca};%}wx$_gTHs10 zCn+p;Em_I~coC}xBr5sp5bW{G!gyzbP95Qo#-cX8(UJoilZ3njjhe#+=r_w(wn+~3 z+?B`@`MZ1-0f)2T3ZjNCg?0Yteqf=IuO8Ov)6F~EC2v;NtncN`!~1LhTzl2c%;Mf8 zmT~qP+U8m^kNcAMxhkn?N=r_+S2VDZ@AGZ4?;u3bs5#HRsLnn}Tg_N}19diMff+_) z!W2n{@RGmsfY>UbDyU51whE0&+(7QCDA9FaPLxtjxLrsYl{L0WN3-lKM}q3?I--W& zUQ(GJE_D|1$MSd7S(u%r8;n`FAI#8b^w4N*(Fu*xa!z8)zl|_o(bhv$-G4C)9XKB@ z#xiTApJlGLF}8zl5;mI561Qlq(dI}mCB7B>3~yt(0_5)z8)ky777lHf@{i<)$?~PB zAdA<YQkiF)A#421@hxt9CpF^@>LB=7F=^55<*?`a3^|-WYxPHN|CS*}$pK#8L=~Kc zQ{2TBov^}z_Ny?1F4F`gj?@s%@^3O)^1`}<@#%8lW+1-r)t$4!nqPooMw)w-u$;Q` zqt(igi^zGJu~~GUJ!*RvS9d~fk#tUqZeo`qVW|j6a}`x-M{A9yhLyeS>zD-aK6t1L zou|`GheVapzVb3?st1GlRc7@jD(Gi;5i!IQE(E}FqKrp>=?b{~{csMB%NCQ+=!bVd zqs%g|`W%P$Adl#F>Flq%Dbl?JRR>8m9b27+r^DUm@#av+4KD4%NqX3{rvvB{C<QSN zq_DIFF;3VsQGqd#8GK53wtx_ro#rys?jUKyD6ok@M-PHykZGvH+KVoZMlYPDO4*5C z$#!p`e_7V_$?Z!J#?SF$ZSn&o5bopvsOAJRdO*<y)Rf~fxcv#F<5}?4aVRXN=-4W$ z8U$Kt(tXs7^_`4W2N`Q6VS^Ny#DZl=Ct1sFMyVC?+3&0IL!Pq3?*5T7&!zCp;@R=` zK;cxzNN6!j3w#~(ZpK4=^<TB;;v_~wedRKJ<@O<>mry>$dGAd@Jn==u?NM;WxB(iV z{doryReuNakEZfZhbni6N_U}?br;e#H`7&7`ipmGzBq3!27v)Cr72G7S-K&z)(Q4D zZqnvYRaT`xtYG~od)eL7q5#n&Ti~)T)BUuu>Map!Gr^B~J^8zYfK5SG3#gVdkEMQk zp~5imL9heOVC@Ksc9k_AsDsp|=|vZrZjbw;pcLQ0o4><U`sR{5=YNGm6)}GLw3+!z z3>+n;9a6|gDiR&>g{5VqH~vbhBTRP4i*qbizBX8aIbeZCVGRGl8i~#j8Q<~+;L~z0 z<G+X4l$>S)SUZMTzn%MiZ`2^z`E{xF1zMuDJG2ED$GGY`^Q<a!jJT+ZL`1ftBh~yv zzeI<s#QMtJBLK%$WhBqSIFuY@D$1&a)~4oyWN412G&ri>o<ZR#1AmsLC#E;o2N%{i z2S*0M@bU+!j%cA3$uPjU$gC|zS(rQ_47tp`U*TS63|@2+O88G>>d`+Sw#Kc;m5HMR zsZ@a%-`=Pma}cDgM^K$CILYcMI#D9k6(l*svB=0fRnNei1(FE$)5U9}B%CGGMlvB( zh0(t&IZdlRQlm0QMM>56db=7pe{2np)1-vyyx<$CeA3w5cKhwBBd`eU#PNs3$*07r z=fp{;$7!d>34`A2-cf<$T9iKBh5xEDPvT~G3KX6)EIv;-_?eKbJ>Sgj=+q7O1V50r z^z9%^0xe5|eG$yeNE@}P(DAf3mUc55V3Ra&M|QXra*o6>@uRNtz0cN5WgtdnPF-z9 zd1+v6c5ZrLdU9rCbeOM?m>C`m>l_~iA2Ss*EA2-{7J5noay%>xiiMj6=^KU)yo?w^ znB+}#%wtU019Y_AcX<1|`^Nh_Mo0eR4GwdSRSK*PtwqN!z2j;td{Yz+WL-|Rm0sp9 zJKOXAMolid(Ui*&V3RgI4DpXvQuKQETCB1?mExbX1u<4_ddbp-JJN)g6gF6hy!g!c zxIXr0N*-&;=bf>#W6!VX_8gYqSd%Y<rf2NRQRXD&?JFnCI5#>+ewC`KtT;b@kGj9# zZd;s9RFu2KPcXN^Ag#R^AU6^ls({uG^wSRhJ@%AUfK%%eR%Bg};Fv*4Bsj>Uxm~L@ zT^*-AEIGnW)KZ2+G~w3<iw9wfDGjZj48y)Co6Q!aGgx&>q#rihAgtEW&h_-v78>*- zH2iHa-aP%9<C!9uMEx-Nc_}r*;!F}JgA!Y_LPMicW1~WIqe5eyGE<W(Lt_FvV@KO8 zQClZjMJpvq7d0~(?wyF8aF~p8hM0MtgaFecuAOS2Z&_z}NJ9ajvc4+0!2+88M`4w7 zlc}Di{sCz$cgqbkVU5TFNr=EioYI7*>?dqS?a4;b!!U56@|mLD=*8Gcz)f?G1t)uB z$;J2-Jg3(jIUeFD8PHIP#3)vI;$dU8IlS%T`DpBX>Gj*8uwHQh!q}8jhvplp`s%m* zyg3eGSL8-=;}v-8S}pBME$?jsBMV8JO{CfgF3?nT(9|Py9q+4CqlOtG4`?3Gpy<$2 z%inpfLZ}4T+3ATYTCjI1Y^*Cf7iQ;YXFV>~J0sDRDElj=r6MLJwtuqP?Qf5w*Vb?v z+2mzyW}>#TuyS&+Gtw}-><b@X?qmG@k8*ULtkew^`K7H*%!L(v2(4Jz8PKq>BB2pn zudZ7iZs{9b<(TR_%1r4@u?$cYa#VQcS-W^UobbnTwYu@+H)zW++$6@51pa7@utL?X z+?gvmR2wNH5+ypP)=Bu4;Ti<J_#c*a<wVV2p!j^`Ol|m?6B@*+EdJiH4$BB@uIpCZ z>{MH7l;dk-Y_se2wwk0Yicp%0h?30>QvBWmc9kfu28$hBNx*pT6?yj_;p`1-Y`u44 zlqDO!HcQ?vhTVzHMTi^6o1-7Uqh`=}%P2W1d5V3;$;ky(ix3kHx@G9txPR-I2zUD$ zyLw7}1jF2?wCD>w%A@;DcZ`E)b9M&ykq>pKviu7JRn+vL4>}+!%pL~5`TmB*(PfUg zHD#_fg+W*Y0^N8eT06J5^Zxj`5NKO7W3?8XGCx5)1B-oY0bb^@=m+qpG)WPm+#jhy zsXEujOxtuknNUzz)AKL~i1Pw3N4ug})UY*p&xezxxu<!(-q?G*zIZ+;OZeld#p{@e z$z~gELmdBftR$JjVCi+23VnGTOkx@qNh&sIB~f`CLn^1j452!pc9pSfgz_zcmvMBS zYLSJWoPd;-l$ffp>6;(8F=TiTFAGeTuZXJ+g0|^sjOwVo^F`&S&k4n>cTG&JplZeC z@i*5bMR|xLGDez7K2~JpANrwpP=G>HYbsN{V!arlA*B{AO&#B>zgt_Cow7Q~Ein<; zAzpMw0)#e>{oKkw*UBz7?fN-G(UX_1Q=Fw7V{B>7(qpF1V8qS6>Td7Ai+bhdS8j<) z;_a@ZMi-9mU-6+#Y?L2qf8VWqMPJ08b~d$j=FG(bsd4fJ1&%P88Th4~^IYXNW*;%t zwygDjgoLG>go$tJ2+kRiEe0a_9lXNJ@y)F9_cC)!gz3s8(W#+iqa#N)cssb&8>{^} zuLG<vy|>!}F~!7rEWW<feyV-M{^}NgPXl5gkN9Z05V>Prg`yID>zX?;+PvMrLdQ&y z*tCWte-Z+Ga9^X37EdXb$Oxc1p(3ksQI{cdS$Bl!_gWj&;x4jSpEUd5?lucb5{J%4 zUYpTt(I_b&v?SamAW_nSl6*Cgvbx>YI69q95G=P{SUky_ycId(#C{r}F#A1K-gv=D zfmO;%XU)%NjT@Y_6ipF$s0}vT;)H1j)*Qc_!q!m3Fpa_>;GgaTe&1FTpIH%|YVT(> zSVGlE&rH=!$<la5->n?S{M5X<j)u_HLb^-M9}k}d14Duc1cI(1%1#HFt#A-5PHZXy z)@As&wZ;d?m^;0_4*NH|Q)6|y&at)S8)ErL3}h$`Kvyf>mnuD$#n+DH$J}!i@0T8_ z!fCo8ckAL!ZAO#FQL;YJjqQyCb97r<KOvW0U30o^eriBmb|7+R8C#J23N;D)icRYH z;iIb}6@{J`Uj~N?X!+muu!RwSV6nkM1Qo?sCU=Ia3I&OpRi$7hgs7r;pwGE$(>mMI z0JJ*OAdlI3k#?66?_y;E)BxG3PxKI9XO`Z5#WJ6-?ebm1SJ@IrXd3A>grw9nM3n0Z zT!ZM3Gqv@>cWWtb6i(6Q7X03}?#KvhWclR9hMA!OlL8TY$}*U#R8%c0^+mab9OX@i zuK+b-!*uFBq9UU<7wNv0Jmq0m6;cGT_v7I`G!Pc?Kr=`sanc<cbLb4R#jfI6ew@91 zRE2KaJs_;gXa&|t?wXhCe55Rb-pS7X;TA4TQfi8-%E|^SJ^L8Hd-oVwa<o=ZhzzC$ zs;Woau3geAQBf<=R>RZ&b~ZY*aY1smdzzh>?DNdd@0!W54JtCw8+-wCtoP5h?%E$% z6+TibHUiplnqLFU8-SrX_2E^mN#43Lcdx^X#OY7~KDQ@|;;cB~zE=idr%NO+!n8WU zvze&iCMy9EliN5|D$HWC^Jw~Ag7k<@Htgjr^<*tMG}fheeBSHj?&S1uws&ytiDnag zb-v;?TC3O!jG2jw2qQ(i=z%t#ZhP%z@^m$ZRPCOl2sUBfxxrDu6?|lr)Fe1eFcR9H zB8ZiTyZ!#h!~Hl#Y#%B*vLKO%xdE>?%z|BJnB>K{^@Vu#?lVs{n4w~%<3gn4{%2J5 zGiTBJOLco~u2;(oSYctb_GWPEX#!ltpdw>je0_`$e~L{`hDi!#iHtr{Wi7W_?<Xqw zHdp-@?|Fyv5yW%mi}Mg}r4$xIi_`!~E=VQP%U#M=;W0SQX?6zA!6^B520(tWLOv~@ zogsB(fG6b>sq71}#9CNjsp`;dM@;SyI<`i6%VX$_qji;|%3&y+cXfFe?khw}(lu-2 z;o@u{5;8bj*!$dmJPf^M=?bA2#_>LR*>MfL@VCr4k2N9m-o-3d_)o5o{=Ulpa9Ib& z;9}RQTs;@EnQ}rxF$5s)TFZGcLiZ$xw)RA4N5ltPib;%)z3iRy^zl;mCQ1(iQF{ps zsRhQXCAONsjR6;73*#Q+S|mXe6V+U#G2-i;fd>NT+hutT(`8LY_^?mtO`&WLbR4C7 zRQk!q7uu4<XC8CEntE(YT1+)oxK#Vd6Qs5R1*g}>V0O`QznCB7%@&8-{m67?TF~yU zoRfpS&-n$Z-pNtwFLFcSv4Y=3dUpNfLxd$;e3f6km#fyN%+u|Mfu@w3Cq06&HEj0Q z8o>191#KI=>lTXpZAJU?_ACwYBZDy`PzDxEQC!<lL*87-qD0iKiHEUoaZPz}@e9eA zdq38ri#^W}bB57e9nD{J(V0!+92I{njdvss635!026vT4X*(Dk+6npO7TfNgLv@nT zziX%Es(F8<)BNh0lQ(bIb)J=Aw-_HFu(!T^z{8yV<BN4BH~o?0H-g0xO_*pG$KqV` z@OTm$4)CnCFb8uW#lWbW|M>oV_SlkFa-ae$rjEoo9uJF6xlTfmpz*EIQ_#WV7AG{M z#KwT(`SvsExj&s)0FKvbDJMTaqh+!^nPo1W!-oNe);~YRMn~gF1@ZyUTzMq}3M16S zR`|df8XG(pjnz>84!k_onKD?Ef8Qvn)pa_8suwTKULC;le!UfmlJoJ>M`v}ZXA?H( z=3IREl96wVoJ^!JQ!X^!7_wKe53wJ25*<pB9Js#a9`Hg~TBKbJ|1<l|s~_GdS-Iuz zVXKWJPyz;z7<#jnNOI$BvURyJ!eVVWst=e5ms!D4`UJcSI?xWM`B`=OGc?7lKX)*r zgM+dDdBcJ0_<+wz)9%S#IrWDFYygTvA8^&}<#=Nihn;B9fipsL<jm6+vWP8r1&(vL z6hM7RdZ|Eot_B|fFvUdN(wU|qYd2ZsDj%L=dIc}<bz5_PC;{!JuCcc}yor{!E4`!a z`AYgAHRU8a0+((;B@?yTtZ>yxZLpn1*44LG=eBTQd`MdxIt=wdys@Ai8O$!&`42xx z$js^-EDas(XU3UgGN4%c)A+ZBiHe8{%;>-J$ounhfH73)j|0LRdy-qkBMYC#ox<qZ z*#jeQQ&;`(;4Q-a;S_lJ@vJ&M9V)CI4>~;*(O4{k?$N|9`Nl}|IlQ%_#TRry4Z=gY zUx!KqMd;A+J@UWZUG{_0ZS93T_F+_g*KQU{b3I8}ANQB1Iv5BCq2(7)-}6*h(ePE! z0249S7dVzJ$f-lq1%NRE)afkTxUs&rq5(-~o|O;;&!g>%jy`Uow)aBAj-!uXRubS@ zr#GziKI%O3!>aodwaPA0-(|PCkFP#cY4y%+&rgTk6kTQQtpz3c?Zp%*^$_K&bDFDT zvQss+yruMI-E2Y&yuTGkF@g5_=kYf+DV7&`gU0#Fti=kFtx|TeQupdkaAS)!pmcl` zV3gXNFW=D^_|aWvbn4+vmS7cNtbQXBp*hP^bhUQdt&x2(X4oHFP?(7@nUU%7A@D=0 zM~K?&o5E+0iNJHGG52uS-~VvY1o&m-KusLLWm<F?hhTgkN!FIF#Ja%C$<fHlesef+ z`I|}qz=rKs_3CA}HFLFS^qNdn{5`r{m_&d4<dsx(0o@!;;UaZ;4_DiHjws?T;5%Wj zf0=qjk>EAN26x$FgqY4Cbm*!`W1a+BWm;nA@CU?X_a=tk&e@#3<$klWb*z?2G=cQC z2xzyMh56`JR?F@)SG+J-Mi@0LhbbEBRZrh<erXyZFR1U_DR<DUII$jY6Q5Jwh83nO zDQuV9urcHQR}a*kl!aA=;lB1t<8lu7X2PRS$@S^w!GMGPT-&Sf8{1PlTzdXTBlXTm zQ$o!TAN~y^M@@SQ$NLv%iG{Y7wfg7g2Z`sX+J<|nqoo*5DnnUPt6EcxiUX|pX&9`s zr7CyGERptM)<5VYgo03W=^s)lYqvjExtk)?j}VD$@swt&@XB@<$j{<rNzPGI7Canp z?Io7#2pjygW|S8u=DrF|L@bR}`7p@^9H{j^I2wD1^g!Sh`RBxWbaSU>dD;*{BvE7l z6=OGXxdn5xMNx;Pf!on!@y0XVJtl(18)mKVJ1z@f``PKz#1XojC$tXtK!H`)d(>ZC zB^o4p$aF~5iuR&9%cmS<=8|?j#HmOP<8XCTxQr6Nb;<Q#8NthS7LZ5ESyC;ZaEBH( z`LujAtnf8Wu~y)RPE6%cV_c~g&#dzk1XEp1D|p;gHo5lE)x;^zC=1R`4~Wo`<Kz|9 zP&=h^DUjP<X@-m;#fnpfjG-vEj#p@wuB@sywnT#S<?bwD;P5O+Cv(LFPizj@BOM|p zrmE;A=s>cE(pdmtHzUYyD9_JU74$XpXP~P5Ru57zkH)z-{5kG{bbLe%MTU*JLJC6@ z?1ky`_vbsJ;VQ=<8YZD?M)x~Ivt(~2By~mv^S>3mI2(^LX0KF9$y}Xj2S$^#8ty=9 zN*fm06*wC(96(tZW}22QzK(2F*GvI&U$Kep`4M_nT!N}IjG_66uP5UbfuZ4Q2p9;w z98^f@th0x`zK6NJiTm@`R$xy58Nri~)J@b)O-{Krmh*mk^M19eX<q-|iM_iYeZ3&~ zYrs!Lcys+gJqPSgc}3KO8@Rx|MCATml?$D9^yP$*Ek;(Dn+KRTkNzQpl!F5Ff|T?D z^pgM+^MNCCcni?Koc$9u>QUJI9SGNCUGI~VjVqTp@@!R_0i!MP-vx68bh^p2{m{iH znG@hx{mr9MED`w+NTv-jlX0iAw0Q;#8z!l^AfbFK<M2$6`VLyN=<?5Qrjrlbmy$E7 zsLot%`M<|Z=%cf;4;Ipx<6)_=$0_s4uk&)Sx3{&`b9Q!o!yj6NVSXDOCyn;~*?}jv z=-U}e-?ZfFgr=k$F*ySlChdd(&Em*!mZ2Vb>JgqH5}qkSPwx58CQ1&3<fPNffWPmO z=Wfq}YV}ELQzj~}@>H>Zg>n8|XJqe=Za;vnA?-FvQ{)mFzD+2R1xrq9%n%$HIPb2x zrcvxQV8kN(i6bf`CuXH$W2w7cyYdb^8i`@6#wdlz$jwgslzKJQnu`P5b*!9pw4{PN zvy4pZsORnN>1k?izxBF2x`~J>#|Mr4b_juU*)+)^z2GjbXsrs|QJl51u|{j5Z)^hB z%+%1zdTO9Pd=g&-sABa*hsno&F~-z&+QK7!lcO&putxYUou#D|MeWYa{d_Oj-uxC$ zVg)^Omc0@Sx$_)_0xp%9#u6Y_gxnPp51-L5pz*=y{pR*<tGB|6$1UQUk6rP}DJ6FW zhpCv_q2E|z`IB9_XJ93krgxZ>O^Je&DziNZ4t{zHj6E*6)2lyFbwST(pZkH~u9Wn_ zTvrED9BRf?RTa3R0;(+qF~;ER?rCZ4i9^Y~-R9|fxwJ6xA_GCg1|p@zN6y~LR%_nz z_r~e-Y42tkGQ=y+dzCcgt$f?S&2G(eYSgn<_lwd@Qhp^L0Rvi<j18_zi)=t8dt>Ft z;&@js8a*NJ{+Mm%lP-DOSbgeRed^{ejBxk>VXh!K>|N|Q&w;JpBCn?>MM1t(x<$V+ z*BEOK5uH7ePHg*yUj{H|Me{j_p0bj306Q}Xv17FjPfsYaV)Vj&m8$TJ!uz*bD)Q&e zNBYXVQ>t4_%e+%KGjXUo)s_}HH1^)+ZVyY(^9Sp)vsPR08dn=WkCxWQ8*?!ygzvcs z`FZg%sxlVmZaTM0hM#|?CQc`oA+2Dgc|{)T<9zFG@qnBwPpPV$P_-(|%|#fg`X;1m z?rw!D7^fMtlN|FDZHYQfT!WQ^1$$skj%UIS{7caU?Z-mohXO>G&NArNJG(P5{=Po# z?^kOdLX6Pb!O?|>+7m2s_Xik%mCp@8pDyf=p_{cV5YTICwvZXHNDl1^dc}wFQ>L%? z%a6e|Huya6o09<pJ?;JiKWUES<b$@qRB*W>EKhg3?%JQxc5HRN-fT{;B`cHAfD=|S zc4nxG`RMAOP2a?%Bd;D;Pao190M0FWJ&gEu3E0Elk;^}HbEkZJRF>8^G?g?FO#pPH z{d0V)Q%l-_bq%qlD53dqt%++WP+~?x6ufscnN4VfY&(Q(dX?xrCWDqz673H{6CjSf zlfX`HPR`pRZ&N_~DJ+rgPYn{Qi~^k10F5)U{l!@}p85~+R(q*g0=1<nJS#6J6NI!? z95&mJ#Bn6l^V{R!&i2UXMzc-6$S&|#m927&J+e>B-JVZpBi){l|3u*K$Um~-0%S=| zc7JTAZF@A9S(g|ZnfrjXv<F8(fOtj$L=*Y^SAe){*}Km>-;?U0D1HMtV;b%sm|UFX znOX=En@Ervd1(wl<yjLGzNiU%rV4bW2|f2b1z|5r{YPjROJdW)MIw+4^CR8({y|24 zl==u($z?FjPrs;Pl!PS~n-PX9G_K$)b(|Tha^H3H`@ou~I{-xy&hiv;4h7z4yK3_q z?1uMVc(>nSD*w<s*QKe>H}d5n5NfoF;3?Df!VKpX$*wFsGl)zjHeg5984yELQ@g#T zv%c!*n?F$JfcrNV$?MJ#xI^&NeGss>6J#VLmzLbwnjf9rma^npz{&@mp2b@Wr2we; zIZ=Q$^i^6f!UKsFa>&_+%gFWjV{Lv28suwiLZnfBq-8~@@f$ny1V*Nno`4r*ad&r3 zE~QV$7r&M(3tDCHVjsEW?8=u1lN@V(M~IC4o_SrHYVO+uoZ9;~_6k&Uws!V;S7mCH z<zijLx8d`0dYwbQM`!lD+SyGc18j9U*+mlW$rUF!*vZ;ZoMhFW=JfJ*_V@soPajA% zu2$#+Vq>f^>>yf1P1)rg>BwlCT@O@L2HC@+A{_6&Gdl~IWn?grbNSNgd1R=j#;Jyi z52vd0#K|w^@-L<GLx`83e(E}GnY!OSIR(nHzcC5-K);t7t&6f2z`lY<?=N+oEp?xj z6=`J&MB;P%ZgBPFhl^n>v^3ueh!mTuwzkAhNqsI)O|7IF03Qy2X8XTUU*=}Ruh$n& zKdV;6_&?7Sj>=u0|MgP5#CP)nUK=WT-(fC&T<^EyZ?`%jRGhkhg<P;^ufZ4(s!lXt zwb3lKEw%1uh;2`T-0+4+D3c>x4J&6Qi1JbJ#xIUp#sO1?W#)1NiC%ihBYTa5Tt}&( z+hi(tTdK-GPvO2m=`lainX3Z#BD<9LS&05adDdNEKUa~~8K81cofkEI)zKhWk>s84 z0x~h4n`n=#-PQPf<?f_k>z9&QADF1>=A>gh-FJMfnt#@6$#xXO$eQcl+up_8+tK0n zdyAn-P|8DY<56BTu-nu3($PDtTsFt6#<Mdv=WlOkXiev%i_0bNmOtR4+w1yzAr`_| z8eZV#*FedI`1EipaDoe1BUO|C?SLj~r%-{DNm4QmmpA1-x7)pFQILM)&w{2~IW1u; z0X^;b8sE^=n)J#*fb8T6Uy`5<Tt-5qX0GCOzWh~Difd_#V_}LtVd~=(V&K!KB@)0m z`|a=AxIhITq4q6vt9`-Ay2ZwohsJbWntKCh1OH%a{7pe&gY#mbsV$^w3yanQ$-Nb> z*W2&TQ%=s&qobZ41@}M2?)$o>W^y+@Um)B54DqBpJ;lF%dCm>_eH-{Tw?HNb>fq+} zb}&A(K-LM!Ip_B52=w@Q`()JnJ>&~Jw446+eq6bj8!Bon!3>DAtTYWgtli8!x8xTO zOJnM`^c-BN>TlDp#u^&>;9go(*(-JUlvNa?1+0xVNyT|sA{OOW2PXKRnBVt(Rw6%l zYtVnsaG_LJmB7M~pn!6q7-b5nH&QtnI-(dRdmMb;rlSeGB}tW^vwS@+sn-oJ^7b#W zO{~(!D-V+PI81(()g@Dfk2v+jS>*^AD@99v_>Sm{*6~3a8VIAoK!Lr8wxkUG3z71T z_o{WS&v)6S$%Zc|V%aedTbR9s*B25kjZcl?t#HS;0jhhaA1~yMCkg0&=k_?<xCxY% zwibkTZ)#hNCw%ty^Y;E6__YqyTf{p&PfevSf8GCRq7wwsI4#ZS@cey>;t2O<h$3IJ zH7q?km8H9}`#Bf&ftXDW>+VWvP6nH&J)iSU`=m+G4N2H5OrSEYL)Yiy&UR#VGL_E6 zv?SxSGV`=zcYJ}&^K&BG{g$@$0}^DLyGdo9mBu6`Z%@7Qe0!?mfTrlY^I>Q0>Bd{o zs3$2Wy1NxI%*)-7iIR`Q)r}W3MUrEG^~gBiU-yMO8037HLESYfcP-VG`CONKDiKhD zKo2%AI~zB+pNK(cx6};;xaRU~Uw$KB6HU+MNn-Kn0t*}l@cEKgnC0&-@KWiS;mRq@ zGaz2rn7RBkzMQk*3(YAD)ac-43$=uSq1^ymeT4t6%-U*_q7qbU)D|qH=1FvKeV+VX z3ff8VV}U(gj4DZKJy{5npYlwG7AQ`7DO`x1hqS1dv}l^j3$nY^3w8Q<vDH$~%m<j1 zmX;P45s{GL7vblZk&zJ*7w;b)uPLw5&{ez!UM{V^p^vO;_0(gfEI9Xb!f8cUc(UY| z6?-Z@Uz|NEXR{ekeRHb*`$v%ID?_OH`r;$%<M4_1O)(M<>DNq6&6GF9o2g{u`_2dd z4ui|jChS2P$elnvKJWX}@kv!#-L9aBXSxc5E$`>kl@tJ9Vz2G%9Ir2MDc05$UDZe_ z8=8Hwh`KP67NC8q`4?MTef5vV+B!({D0^0Ut?3c2DaZK}_o<BE_v7J4`!)BmF*T@M z5EM~W8Wbi%upx>{T$Ms3Yy<{IbWjHsLRb}jSXO-rR{f7iN`(o|h6)e+YjI!Kb)c!K zQ4wuJJtGyxSVud1$*&QXm6g88$;eyl>HG9@aWPpoL7tyFHng-dF#~_9_-+N1?hnH) z!gOk@zddl`LBNJdAi49NBT<UK*2I&{x*I8UXXR%{Ls63HYHYhX8{n!^-iWC(1?Q;x zbxd{iwD=)tsM>C>$EwN0e%kTLZMJ_JVN{>%7JmHr{#>aF;9FUt)A7~Z6`txW!@J%a z&P2rD`nWrp4vm%5eSg|rdd1i6dA)w_bo~M@@4LITi%1|~yDR3a5YVe9Lw3HCvZ%Dy zEw|mv&fXaE@IaLYPFj+QZ@PDUH!&eyCt_K-P)YU9HbSd?F-2JmD=q#A9ousA`&4~{ z0xJWq9RtpCO<o~xO)X6=4Lc(fQ?DXDKBqaD0ucj>3j^`*?KXy69=1|;czAww`9;_X zExr#k<1CN&i<4O1c5b6Bm#j`vlJ2ljErqh+KuLHRYO(6i_O|FM^{4D^)j+1!d#!z^ zs=rA6yD;q#(>&d2n*IB(jcT1>q&D5{4UD}Vur-i}yYfs%4`?5tq=zx~mwE!T{KpG{ z12OT}w$Q%MFXxr5y((@Hc)P-Powi@^3$4w~sc5TzeKeHiHPm$!)Ipn@x~kf$s>(i3 z`v%DXedk@DC+|~xjJ-u1;4Ro;QE$sw1igk%#y;NOy65)O8LuUcsVT`t#zn?PC4$o8 z(lC)RH#d*1q6AXsf2XpwrL#5{qom-y5|pKov;J18$wp*0@(_}E^KT}9y;G@a@Oio< z$;qkFbIaX#%fPqiZ^!HSAZ6q7l3V8~m`K%rZ`kLx@|2=zRNbb`DpWhatX>){<^99L zMYo&EI#Z$HirJC>Nn{kLq;!ujCLi;Oo;@1IZDP^C_wvHVf7TNUwS#Zq2gMgSGx7%e z7MU8V{7!HldUob|>H6@HhKUsx7Tzb~qf9|Dlf^+yM)iI=C_6iR!R^s>ySLW8`EwE5 zQH8`tI`?TJ{&Z`&CI2v!%JSBd3JG8k#gmf@lkH*l21!f|1_N`wTg&D75EWNe_I+M= zVQPM8X=QOM5)A4VuLhwMdVEPLj!s8l_WDtijMiN~ATjW~)ki{7J31@C%ijFM*Yi;# zQ-rb;Naz<_ag<TGqfxxOGE{sjNoc+@%hmmS>~5ygn2}E7CV!W;tFoB2yX^x8x;T$$ z$1*utR-Ry4CNw><j6SmTmQnm%aHVICmv6oe^>G4<=$K|PMO<>x8&GcR`}XAKni>+a zzCC)jme}2#T>Sp>Fl~u-5;-ss@c8)9a=G34h{-ba$G73}W-~MGz~HL_&&U8*y82R) zGu%NHxnimcT;>Hx^|e}A*yp?2`DI2nwpPABJRH^PwsyQdoa4BwQ_$+*S<><GP-$9Q zl3~$EvZ|DHXNzuVp2@YcF4Lp=_*W;dVG%K!5H6KxqVRdYWQu$1J{o@uXjFTDF3t6s z0LVN#xNdHZIHVo{0XYHeYH8_jPrc8HxxF5l&gMn$BPJ^c)pzUi43VhHMu8Qw#Lp0- zgxz`7-KuQ1m+eBR#~K@+K_aAwg$1g0<&v$ofSTGs%k1rSjnys41jJ2uhuxzW?2!cn z$|l{_b&Vcp&%>F`;py*kPa9mFvAW&up9d6+WVXJ4Jl`*PDYKu}=JN)W?bh&l14_L= z7fSK@-ruf&zn=hex4oaBtp-u#_^Z&cyF8$1c+Yt}VW-H3h4DY0D<&e|yt>{61KCoK zY&U&DwqttGudX&nUA<RCIEJGDSu1Ra<z>e@er~0<2F5PN=AZ!=8?8V#w$hYTgjZZQ z<TT*nE~FEpZ+1upBOFv`e0O6_a&<@gy}P<J5fPCRESR?3X8{#AAra9p!cqeyx#NrT z;Xf32wo35#&S~kG^*zRx#-N5&poE}f-<G#Y=%}V=W?I@Bt}aBWeXU&0t|D4HD>EAQ zhT-<?_VNZ$q@9-td~?UI=MeFE^-hd)eekf4E2(Ly>1JxlDat6Rq-3RCufuQPB%`Fp z7$3!uS}EijBo9#&NJ{mMj0})b(BxMZ7w8?E?QUViq^D|Yx>;GYm9)xgYDNIAcr#UD zWp(-a{N9Q@a@~j^AluHxWIV()ti0$JQd&}0T2fvDT3!ZT1_lEB{1*cwkmTH51!V+? zo!>PR;^S{_Z`C*dczt|~iXInnf_$2}w$^_r@awR49HAavuKYcXn7^Nv*6V3*&o3ju zu(0RHJA$b=0w$%R+E`Wfem<c%rDT(Nwxy}6$_Ef<rDu8{o+d|gwc_*g8(+!~PG8_T zrOb_w-x67XSHV)sjB`vYroN<7Y1UF!l2?|IySsJt*XC$xWNrT8W*^B<zx-O3feG%c zpBf$>BtLni72#-@B$1WDe&C&4LQ{NhUjp8MpII-tOhen=HNQUDrG<X;`^M-pbk@D# zus*oBO29czOGg<MlMD+5KfAgA8WJ`xqRqg<NkKj0^|?c$vbrk&`f{?LY%ewxbak9M z&}Yt)(s1bT%ZR8U&CtXi2-{<bc7`JQ0QQ_*TvYn-&NT1toV5OA!A9K7mTzd*{TZhr zWc@+3y_Yc1`PloymE5jwJ~b6pXUu|DyaTcwwfO6)-VRqU_q~e;?a)|VJ<Z&_TpRaq z&)QC>t1TAhv3q-a9^1H><iJ==O~RFAdirrWfPumOW?_C_BNHbT!&tY1d_qQ+b$%tk zxOjJOH+*^qP+M_ybW~KzXKG<2mzMbj9&tGdc{LSM!on1^gshz{AsqL4Zz?J&MMgyq zj0(Cv-;{T8#l<D$6cs5b#|8Zc2d7~SwJ1CQVDE7SL;`g`@r!_!i1@C#e4f`2n+JEl z(9n|G(!#RfO;kx3&|Gp75(WmR(ey&9`N7s|amdHS8ygsjwQw0aK)0Pw7ZFd!VbloU zK0@)!%*!~~S8KO1zev%|$IMJ3+hqyRgoK1{c2+?*4$0o`u9&@6lGf$~Nn3TZ6*XJO z+!P2x3n2rFI~6y(GpyN4ubLuSLK+kVu9uo;P%JDdc;0#0OGZdytY-vXj?qIdEG|qz zOhe*){S8ykZAeKS%lztj$Vllg#yyV!BO*4K_fH!3#sPtEDJ7)g_VOH96n!+@BcnpA zY8jExaPxC>AoZG=XWL%RCKf*!80wo2Anxh~l+~Sr4Iyw)clZ$5`=P#83HG*OBH}h- zzfd_DmAstXtLu+1)h!5X*6d&{pXUpBOjKD}n14LpPJD8TSDUn{CldvQ`0u>SJ5a^Y zJ3B7tQ&hQ^w>2g$-IwDzK~XN8r>gMy{{EHiE_bkqR7@P_C*t*t)6e%<UwVdl+2~lW z+vUY4ax{|Qx-e)ICKM{Fg>21@ZDx>5*wN{=VVFg{;kRl{$PmaXYN^oP!~S>AI@N98 z&+d~It`G`@mgOL!pv`V=z;=)4^TqpEH1e^HBwS;t=1nfJo}R6n!xu#r3<`%z5rufx z0^Nkv4s}-0){rS~a0N0?SI}@Lzp(IrH?fqC&#S7263X@KcT-dG_ZBv$=R;nF@V)Vm z#<~s%i8kE1q2{A*m-oW~13ZcyZB!Ke5i6^nQztJkWVXDfV0B9cyk(Dp8}z;N%meF= zX%kHq#E?CtM-H^;w;vL`B$+!&6|sEp;I)2!f}$WwAP(GJ-ya@9`gS5RacF;f{p5ts z@<8ey#E(b9c&`2Yutf?TwDm8@;q~U^Abfc0GsKh(3+H}Yep!b#Ys>0_AJ~K-@~Fzx z*47CMG`!SX;G4a@c;{L?PDDji`pxc=WqWC1OG-q_$h!FH?5ooYc^;7K+r|);d~i?% z=Dm7J2R^us$oJMa_8W25z#63V2KT*vxh7^VonqEqkQj_r<!Er)#TBo<vA&ISZGYYL zc=E>&10*qC-`9tcXymx#;o$bc>76DEi?XF9-2h0*RqWrXs;Y`uSoom*Uv(#gEn!2N z-#ThqZ621Ljh!etfgn9z&r8&`o86`r>lZ>*5(XlKJC0w$zDUZ&;MoUnPM^NK`Ssnz zO>@0GCS$^0;kB=}rQ**ldlGGLUU>#u+^Nn^_AEkDU`k`yjJ3xj=rDIwAaG`GS}Gdf zeM_I+@12a05UXq6vyVy0pBE%9bR$p`36(w8(vmXb#tCTVnv46G^BjxE>vzqGuQ`D{ z%*~OW(IH{w#f-+H>>vQIBB00jJ^D^_BeQ>gyVn(?1BSLVsJZCZP!_K@B^%Yd*wHR3 z-+(>@A60Cor6|>m=827URiGbNC#NgogJ7G5URi9i`yvE=*Mb<qY+i98L^+fyA2)YU zq&xq|?G8_Y0g?HMSqesdQA^feYr+l+a$0bx0A<IXfVz5ggeu=W-R&JTJ1xBwyJjIs zlOGM_SLUxI%CSjWIo9|(f|J4db#-4Q+=IR=q^QkNeLNx+c`OcEA5dBu7u<G<QG94I zOrd5kl!@G)XNEBV&V!wy_2E;gj#wPN)L&WMn=H4qQQew{M<f)K;!;xnJEyD%D#=^= z3BRbFS$9Tj>*%aM)J^Pq`XA>TZ1C{FJZbB9l2unW4AIr~Thr~2^i8P8i-9X}Fnn-| zP&NmCEmWmzG!Hqjquo%$=%YL}&9nx3rZg;^ofnRsq_i|lYN_b@9=IbF9sZiG78fg@ znLZG*TZr~_`n+7=q0O+1I|3y3Y+q2()9`(=f2FD&BV8X<+VhM1AV6JRgZ)q6T0cUS zvw_1mFyij!;I$?Ho=t6P-$fH^<MPr*8tz|6GL_kh-cnYmcYkUotGAhPe)d3)BE?)4 zN4$0Iq4(veEOQW)c6K@T7`t^>R77M2aF^EQHTEKQMOPOOFAhb^@_7%vIPZWF4+eyb zhq(s}ue!V2GhBw-y<9L8lEmT?71%i(@@8R#Wg@Ue#)b#&SLK)7z?}Ocqzpf+=aI0I z{s`n53fbYSXGF{oJ%F&s^sStXtQUaBXJ%s3*i?JJ#>7Y?zzw;yuBaUu?y9L#(YFVs zJ<{#=fB-EF3DFQ%e%^SzbZ=u%Mm<1a1A_+qK$@MMi-3j0^Jy)w$p{E2sA+*s?(unt zRfQyGq=qhNUkEy&SFp9MHpF2xkj>?AjPd`NT4R!SR^!B=L&U)Ud=9#6`iMV!f%Irg zGu77*z>G}`0*AD;Vqz}Y4QnUJda2bpxj5+?<R*x|yB-?m9=iM%@MaHFk95&b@A2?= zEo*9Va2npp`6~ti8xo*4H?%c0v<!D{RZd2Ofrg<dFYnWSd%FdQpCqIH!43)#K_@&E zP6bM6jC@tes?Vz`a8BLZ+VcPd>Vp}442ugK&M&AiFbudxHlT#8cz9Y<p6wi;fbzy) zxCu48!7wMnSMe=JA-1AZvV8orpq>b(zaTQ%(4h#xpIR@ieCOOPka((EQsTnGLQ)_L zepPXSeaMN3RJ7JwUY=~>_jTpC*oza|g)MFT#t1AdEh$mZkmqFKLCBn#956^M`bCnK zhGlG2Y^S{lHbp%ZovMQIn8WRlALD$rq@c<n{aZ?CR8&G{ZBE6v%|EAww2qL94yw(g zz0-hafZB@sLqDHpQ`^I%Lv0rn4jczL6$!YbH`>u^Ni})JBs~>nfpfOLk!iQ)jCwl| z@byQtuT-d99KM&Kx1hC$2<)m@m8hDcHkwCJRzOxl1ku2Pwz{ymj0)`83IjcZgoNaP z-R!16HiXT#dtS*t|5yce9tNf=hr&E3a-w`zN}{5M_6f@I=*WnJr6-(Z!EeFnps1+e zz`*eAY+0eGz(;PpqtEL>bs`lQU>%XQx1xxMhZbReQ`9fLOD}R!D3Y;lsFCcBmkc(( zp3!4&B<HsDv@&`oxc>tiLFB#@YzGGMkJ#=j1x}Km?|gYx+4k+tV=~`Zhl_bq4D-}9 z*i9hD+o-82msOTFpyZl-{Kha==2=)-R8*CinHax`yfB52mq%7cs;95>_MO`_IGESd z);L2*LdqmUK}mhNr4}n56qJ<8YAWOu6fTbTSPvZ&3$v%cUu|ovuC6v#<^gGl$^jl8 zULLF|3pgzBGYt&&N-N8oo9gf|FMK@aTiaVItI8-*Spw|LgT86$Xc?OshlGbhjG(Wh zeYhEPPd0WoB^4!bJ_ntuxos#By$rLWTWZ~SRnKB!!$3(*bwq3w2M60NY-u8GO?7fg z3T}RWsN%A6JOYBanD=%E=Fbo?i`oSjT#%FjZ;tg?LAWh1FAJK9H7!t4S_nHF5fuhf z9~T{g%iD;F3bC@XC@U*4GBRN02O4m3bLQsfEUnD{F*;vb1aU5OWEh1Z?l@S%LlP1a zXlSSc^SRmSu+LbtzET$@*xA{PjPzNUu`>^w5)lz9uPR?$nnUS4HgBV&rGA!(l$uA8 zoRYS*FdJ)bnUadi(99H^m@5wE8JU?7NUy5K1|1yCqfQeAxr(Z?sHg~5oyRoL)z>L3 zDQax2#m~IYQOw81$Mjz5x^kt9iVC}`0P}R`=&bFm1A+spYHKcDlyI=I!p1xs7dMxo zsS%vA=K8O4fH4V0<XmdZN{GOmp0bYlvi9+`+yZuX*6pngT*pvUQc^}qS<~1|NCJ1R z(#`q1(*$RkL@a%?&!6X4l$XJk2GP`11IL9`g<6~Hfq8cimm3o!Q4wLdVpu?$lan2E z26X=QtCxoxfD7f~;!Mv-y*WE|<m8{Dm?zcI)haD70jmIF4p?$>aj>z0q_AQhS=m{6 zdAZHZO~BhEC&od)!0N|<dH5iBQSdNWnTH4k_BlH*dt&kiF6MQ$)Cq{m&k1T1lT)WA zVrd{Fb@3eiIa?QJS~^-6M>`zMGrD+sRMgkQ?}G6i7t-3g+EP*%@iDKbT~J)u&~WJp zo%cu5(&1rVAU-j6^v2N2+A<A}6e2`_^bGXgzTU-U#q(<`pqX~omRNP3kDt%R!H$HK zq`l=^UtL|-P0Xf3cEQFWroQCzTv6+klCtXh>LOO=sc2{v)YJ_ut*tze60)**nE(C^ z;TZ;DQ}1+c9$qySMO@4q80f-J!0K&aNP-=9aJ22~?~Vuy!Noiv&CShaZDWClc>pXq zHEC*k?3m2s@iy8znxNtH%X5v*^<F++Mka<jdfK3JpR1m>zM-C@i(_R?WlB;!BLh9w zT>|D|=y7ncVa3HfbP7(6)bx~*k*m1QzK)g}AqfSYpbjA^RYpo27NkXlz|%T;xPvfa zJ;ERtz`V1EdqrI>Fwc&Qd2*0(U5JTrF%L%!4nlrWK|>>o`Q_iwJU=i$IW_k1@gqFU zGceKzg$A~?H9vj*T0vg+JP)^*n=4oagp&hQHy1}AFLydR8ZcuZ^{|~6&#_zzl$BFI zOCv-qWNHysRnRtD(l&AX!49^dNsXG0R$W)m$`#=p7$&KpfSY+Iu=8gagp52=xp;Wh zaWD_w2F7;=9L%?(nYXgGXzy$b4-3Y_JP!}IoxL?a<~cbMlM}`#M~|HFb_Da}WI8&U z<&~wg3)4Nlok=MPzWzSmNDnVe_3?y9q_0<8d@S_SjI<;M9L!5z6z2x!9c@`~Gta@v zo{$uO^=cp3S3Jzq@Ts3ArpQc<#~Pvn^DbVVxS2<J8)wk@x;hl|xV#NHnVx~34Epjq zt_B#LZ)`kD=l#5oWFDkxcXtPM^%>h$2@LbWVZrU4?YqyP2L<}U$z^0>fcDhXQ~(JW zxw8ZW-+libm`tF)>v9X$6D=+(NYBVb$FF&YTU*{fF}Y@}^zzO5t;dI$SEZq&)if}$ zarZ!kL`W$r;=T^+beez!bY9;rg^L>}^We3PWZu%stfj3fEF=&g^Ss=knGg@+VIKGy z8y`L<^LYF`Fkewse*e*f$9p@oi#LZyt_}?i3|$`>8tO+qULUwVJP2R(Xm>X+I|Jfb zTsn_o9+%ET0}c*$2<Z9;dT8-5Pf9__t$c=vJTol;EAt@Z2&DHp6!W&Y8hmD62e-FD zGcPMEeYCeZ2J_GWzqgs18Q<C2!Ihy!GtYow{?3DY_jexGHZ^#{fNN^3peWDE#zKl- z>0@tW+1u6r5%;~aQK=7BCRcCIi^?{HO!Ay+-U(GBISmt6CN>V50nm9GIyy}QLmPKb zL};Xpk`gC78?I|oX9!78(+lajCgEWorSrx|F>h(s+}0HOM=(D=G5pW!JcjwovhJ?S zotInNTN)wgK-W5`E$GLtjy8}fXkT2A4a~p7<8AQjJUa(lbZpd>{%-utlXEJZCM3hn zJd>Lb@>tA+pJ!$H9XgLr?2sLw`6K=O<n$OYk1LA#n)&;8AKvfk>-G!q1qO&oh~apG z$$7H(>t*-Hj~*VrCb%EVAdsGy8ZQ}H`4g}z^6L197xrh=j<;P~e)r>t1I*KE85r8S zd%B^RSH{mgcpD7!c>KKKQ95sNfO)LBN(}S%4%W<gy-h+=!tt4Js>63ps;s;O`WM94 zFql$NQ68+6loTW+#F$m#80PWoJPTf(KZ<#EV4jp+?lcj0=EaU;UhHe;@%Z^8yv-M# zZ~TLp2hqUwtG{CY;r*BI-e+Z|!7*oLWiBoJ_HOX~y<P0=tc|r*c=~%uRpr1?4=Xz_ z5xo?RtTQtGa$4<pMd!@Zmv6s#8#*mRLt76o_pm4w^SI{QxH_ICB0I|{s^^-Fhk5ej zF~9F^@GuWLf25xun;1Eo`DGOISf>Ph$<L$mHb8rTulL^W&dTz_e%;(yo0_-*Cp|wW z<4ESYj$-~eI!{DOLBb||6!SjkaQJ!gV=_;HW?uG>Vg5*OgJymgub=<S{Da-c+qbqs zYE+f60M^#r021`WsUNUTI@<5=#g!FJ?QQ4Jb6ME<5i!Y8N!fd(wx`tGsOq|jg?Z&8 znHSMV;MIAu<LZ1^NYFp8^T%NxGv5X`^Qb|*q^uZMRsnVZIFAeue){4$#?Qa{Czy|k ziw5Rt@Om3y9<QHga{sLJc$oj3w_*GJG3*!SkH}RD9jWsPiE&eCZ-dv*{~E*QW~ak9 z9-gHA;Ql>$os66uhGVR&`1kX)#g&zf?X3{)K7M#7F1ehTMS)b@(k8M7u<xF{rJ|%r zMMJG+Xk>Sk&U@@L@0N@|Z}WwD+vDgwp1chn=EZ)C&TpVIZ?#or_?Ry(!o@u1I;@qI zS^r>fMk>y{%^#xkx@d26B=c^*$aD1Pj-&IxJBFqBL%q#0m`CY+TSwdTw{M*t?ci7) zZf0p_LPSg?E+GyGLBNG;5d6%1TPu2<6l%^=aYY*yx5nA?x_Zd`^tvg}kW>L7VO4D{ zyW{G-sF7zHp8WiQT&3+%I)9uP_7CtjFJ3&SrlC&8Hh=8Eby%G9ZSLIKf}s}LRMu3Y zu1Wn~=7oj$&!6W(Gv5U|k9B`A%oG0sI)BtPDe~icn_9f{Dh2V+t31d&uGypqyiFUJ zd2lsmMuu2^nwSXmbUArhQXI^S@9TWkKJ%Z~25#SZAS7u{z@et?otIiS6<geEX6J0> z;)0*~&pOW}YUZ7B1oQavHg%twZ*2?@#p!Ltj>_Ahb)Nm0%)fvC9=<3v$RAsHyF`ka zZ-dr(tZSF%=Vl-<*VEH3t1L@Qh&|HJ<H}V6A^iO3qcPqF?=>l6Rw*>|@&6R_;B9p9 zc$;Izu=&LW7@hwonCFjAI4<*T9j(B;lA^qwmBry^JlveDY^)ku8W4iDVw)NMtDmo` zY-nxa;r?np?9Sd(X(ju!T$=I@N%0k<@uh>_5lOB=p-0BB1k7Sqepy`HJZir?S4l#A z?7YnpG3+t@JQU#29NUER$B!ODOr)-^npcnuPx7#>D`{z{B_<{|F*U5HDlg2>rlO|Y z#I~{+lecknvSVRpdhz@@)>8(A#Ky-A4));lHfZK?#jszz&9P!w-J|{dQP*LM3yz$7 z;OCEopR>GpL=5|Q_Xuwj6#7}`l@;Y}EX@x$gLs^qhug&56n4G^Kl7Cp9annK^YYBk zP9JP=@4;gZ5d%UtWm$)$n6m2$6(fF$xl*{V!=jicCL?5#un*4X7Z8$>y6`)g&(268 zCLy-5vu<iVIzNwQ{tvh&^$U7_myERJ;nzR@^b_oZf}&hzb_S@&;aMC|QW9f<c^g}+ z%9@IXx@r)>k)c7X(`k5kxL|AoJMrUtEQ?u~nHi;|B>{kI*ZOej{IQuw9y4!)hk1P? z135Vv+{sKb-J_Tn;6E=S0x_%?_<0yH@i0Fz1v>v2&wURVnLy`5+Pm5@I&Xtb=LH4M zvvaW9IXS?vi*@b5ehgbwQQkLvT|j_ua_rk>#W&|xSW$nnSx3$xDXREdT={i5RdrSt zMm#!C#CpLcqWFT8EFZR+?W@a+Kn_SEFz;k%bGU&Y618s8(b=K-$Xq2VhW%sw{4c0` zedvPxVtwm^gcuw{7zgR;>0ZCY@?Cn`n&9VM-Ce3LRo$GOqM)QmOUB|ll;maU>Cfrt zXu_rrzdJcT3Y|quOCu>cVR(3e7Vq2xQg&JNHL3XD!8~TZjSF6H1IHZjlarSd5a8$L z;yB!3W_kkEDGYS;iwf%NYtT!)u`n+z$j`;a38(uU9W7QncVNEFjYp3j;>uMT>giHZ zQz3o58=D)RzJ8;qAZKG~cDR|0^hH`aT0>(4h-3$^bmP%^6!X`I1o*Kq55>#F;}jXo zS#BLsv&hJj>mCsW%F3#Eu1OKGN_fVU=^0tj(a{`U2?bUG_>q;Brlg|uKsX=X2n<^g zz9Bpit?kW6GJo`Z8_@aVF<)L%NK8z0rKjWYTg!{{(vlazcwDM1J^b#Q*RO$OZf-6w zAJ2xyx>s-CLQ6RrEGrs{^0MHZLho?0$I{uYEzBq>DXna*($Z64Y{KJh2uR4uITcWT zJ{3Rn=kUz8alt#^X1^m-P*7k!O#^-1z`U`kQC?AgTU!(SuJ&duBcPX?D{O%px(~J` z9vJ4)>!f1fC%^mgJrg5?AJXG+vzm%xfSHkz0oaI(iLx*^#kvG9KPLlo#ZX9q?=XA& z2M5Faz{oJ@Jk}Apy}1SygPck9ESL5L^Qh2*tBK`Tx9;y@n{VTInvj%)UD7w9JTxvF zhWmy~SZ-LYrm76sGcwX2URVM92@FFV=ZW;_>TVAY4aV<n?2gp=V>AEm!w2we!1gb{ zvB1G6BqY?*Quhz=>FR3#`03}Uh)@Cof`PuS!;K$3yuY%%aQJi#^mc(X2?+^0Iy>eU z=FZGc;qf+SiOH#WRS8Hau+OVR$vD0kR_qwL%7yvah|r*?PoH32Qh`eD<!8fiW@T%Y zSC~IGIR>Z6$I}gK%k8ZVtaH>4>O3%iWBmG0KmW|l$zh3O8Sjhd&u(pRG0>m0v9=5k z3+BVIJ`O~6ZDkqPEyctS==}Am2~knu?DS-;EuTN#xxKy0E2K!stSD?6>6h6RUT|e% z>CR#1e>h7-!6hA>Qc>DI&BDqqE{bI-C7f~CtG(SFY(tn1DH$pO;^XIiZKxmMInvJ` zbxkUMd~)Q|F_{1P=U>W83jsW5d)wm!@8V#0_Uu`4aWNA!lc<=8(a~!kK7M@i{3*n$ z5c*-2n#1M6{d+L<gCz!g?C#}OTVJ!ju||vIz6U7giO6X9wTQ`S(vxHH<!$Kk==>4h zW@%xTjEoG=oyIX2i5y*>3W^I?w${P^;kngkPiMQKz7Ezk1sI)=jgKC^F$AZ;+|(Fr z_^@A~hr&K*W~GBep1`&k?HBaj6yYFbrY7O?b<Updg%#zaH)o8E4Ml_m@O&LN7dsgP zAE>>sNw|MzS5S8MH`t@*&z~lwIWHHHSzXdL6%ZCrN=kD4TUGmbdz?OfT2xHL+|n#E zDtvZfCN?Sz*E*@=Gk?q&7TVl@{LtLg=!a83D9Xz|d-~+akC+-8o;rOBLNHKE2)gqM za#z=vQD*qvyT-;^3JP*APWEve=j8xhbNqZfU{+OCl}yZxBO=2FuJ%5B^Z+mOq_l!M zl(dXk*NljZiF}@WKz|PFN;GtXFS$w_F)S!Ebbk^O;*vwNSx^=tJDre-P*U=ui<@g% zW!dWHT2*Bkoa-!Xnz^;P4(Et~;OxD-w-3LIaB_f<0mD4l!sXRv)XHKNrT6c#->?FD zgtW9|W?l{>6C(hOb!CK(md5}0U;pLq;)LtBV`A8$iLrv>B0@p}B?URG{SgWmCmSm# zCl!y<86Mq>Ht{~`9U-~BOWVIL8~65b`JYo~nT1uNa_aJ%#ySQkOpWzVojP?K<qYuk z2H*i>I1BcUcDeaE_a8loN3DmY!Fo?8_`RR!9xyY$di4scntf(|V)U3djD7n2Y44RT zQ!^tNYrq%;`XT#!yT18jI<H>6yneMmIKUSUKAd;xp_*Fi#wLb=!2w{#_Rx(%>Q^^b zqoTt>x=Bb$v^3NJlJ1VS#rfH-_0{#Y6$s?}x;rvblVoL3xAXzvl~okshdU#jic5>v zHddcKe@013A$3s#ytIp>y_<_8R8NF+NPrJNA2%ThIgOwW^*JUbdC34DPcJuTZ+90@ z1XO1~q$ij|CKi;=g8>Fxi}XOCmQ*-9V%An5J>8iY>6uuVU5?7zzz#sng<A8ds@&7r zwzaXovAzli#N5OP0v~1;CM|7Eu%Hdi4Qt?D?%g$k$dQQ9&e{^%z{jCIbmu@{Bp7T0 zLPAggCwp6jGmH!l`xSNuwa`*XASNz)Xyhv9?~lYpM}YN(F0UXbV`*jrKOEB-cHYa~ zm6C#7SVSl~J~lWs2z}!MI{3+>!`Gl^!R|HJ*GOIv2djDJ%oz!B5g#vi%&*2A0i>rJ z`nRKEnZk;)!O>v|WB>p5-U2MHtXmU)?#w@P@9ppFjv@&Vh(Qt}M9|>w6h+|_P(T5N z7F7j>6z*=p-Q7L7y9EiFgt%KrI_Y%JtW$?1bbo2P-QWCk=dxK(J?9+u*~i}X?sx4v zwU;H)0-h1HqoSl>jyDERVcu;As<Q*^4yVC)lK-rT7~3JCJ%WZZR^gPG`rs^3Ac0PH z*4`ejveM$9ZO+N!!qUn!szwGU=KK3QF&K?~`}ds{I74%BDlN<%A02|bf!>pusfntx zA~*!_A+QSQI;;)`!Xm!_-{zM3cR&0P6&?)y0JjnN>f?<<7T`I8t`od)-rm6m`Z#M# z!q)#XE4V-ffwO;L+y)(%yt=$lSW*blsJO&Mxbtx45D)<>&Ye920utm3_#m(!^!umJ zoKjO$K>%-N0!pl?q;PG0<@=9f=JC@f9bIjpcC@rKjvPG#90-Qw=s~Bng(cX+$pKUh z2hzf50QPK1)(MHRgTsB_zIyfk{W}m3Adrq9KX&BEA<hE_I5-dNVP)U3doK&yehwbK zLn2t-GZ!Ixf*ZbLH_MJ)yZ7wf!@|nS#?B57D<~uck5x%o@$~6aN0_rr06ab!tcr+= z*i*>A{=N;MR7~Qc*hMj@51|=46EEBlPA)D%!3%2Ys>XPuknoU>?hfc5Z{54w+TLnl zqz|_Xsl&Wd^co?$my?&}<Ku-$mT5ej2Epb*9wfX~Q3?(V?i=iV_7cH-??AT)!$Vp| ziVulcp$5-ZgBTXBD=;J|zogJV$R8pgxL!CSdSk&E;cUgkFP=Jm5-J}+{Ps)N?m|W| zKQaigPs`0AIXeoA3LiRh82-z3-~e18JS}JwZsxxI`;VVGeO5^5Fz>PbCuH}Y*ORmi zbGy`LWaGiUpKCV@J1aW}3&%lm6{E29s`RSi)f;zj+*kz&0acwnCjdv>&%6ofB*vTf zgkwK9H#eN2va%xJ$H>IM&7D?UTC}o;%mmfcPy;-0=FDk6zM~M+eKNRF$h9lUdw95t ziHe*)eG+czKUR>JmyL`L|NWRpX#CZ;6H}vk1vy^co@VBzYHG^BLT3a{!x<hwaU38d zD0D$qPDWEp9nKa211#a|=UrS{I6F7}W=nqrN4$Pxy}G6%C?pVMkFK7MimEa^M@cCO z)Z`W9fZL%FoG#YTSRWWZHZHoe>++*V57F-W`L{1?8)|57R1>_Rq@2=OQQ3Wm&wRsv zkcImc`%$68!Wcdo%UuVBc5w2;#vN>2oIE^-czI8pK7CeDP)<q7j_eehoD>orWkN7h zP*#+Ymy=c4G&u!%O>K<5i*r%cuf2N!m=~9n(9zYw>FFvcDu`Yb6%iAWl#&F90^ot< zU1%<OMFrFIv&eMZPoGZAOeCcylN{{S)m0@VCBT;@B_-6<RWRC`SS-fCP+#9rPY3!u zZLO`Qjny)@FbB*tdH%(#x8E%;FXZIsxO=&S#MK2Qhs9`NG&Qs|5YtrG&{D^n;X}hi zY8&fwiwZn^7>0ObX<2DNvapD-1l(9f1qEd#RSh+DO${X#Ww_W+7cPt3gw1QuE-LID z85)_Mtf;RGi;gBb*lFwHl+{$F<zz*~FP;|`I(_aO+)O@xetv<o!qT!L(lUq7h_dr5 zvkK}f*~NOMb>oPB9DKrT2hZ-|KB=r@5s^`qRXzOt<*PSuUxTy&p$qp1#421qxV)6K zB=X!PB>)3(17RaPcR+=O6~V)co?n<ZJ3sU4^(!>4T)V!Scq!i6)*33n9fO*m4Dc2a z8D3RW31ki=xwQhiU};5&iiwz*82;gp%nT3EUAw;4*i;uA8|}$(cW|;N+E~GJF*C;l zrGvqOg{3(NFfVV9w2Tzs^P9Ie{&D6gP&l{m-s<XZPf1Pk3-F=PT<jc3&=j0D(+HN< zmLxmmB_`+&$|_1GCPzVBeG;f>PXXue>FG+awpG<JJ1(NUn@50yUyA2~+Ho1P(`w|s zyb?Qk&h9*LYA4r;qx`2%oxdO=At|e@qHApI?&q74n-ias4Dp(&B>_(`HzAngKN``F z<Qp7V)7-58tMeWxC@GMf9NawU&QuqIl_j9dz}V2p#0bvZ!^<PLFmH5n?B@OZXsQLE z`tIJY<g{c@Z%>i~iC{@EBbb6&ldP$36dylthL0C4IFcPvbNaXd^pcsAIXN@&{M&Ee zeE0sz^JkN@Q&rU!Q87`_6q!bLfS%jY9=yrcfn@7o=jP#-o}D=`GSojZR8U$BoKJFc zG$okp85-d9b@hx4%&iD^&Y)4qmLwZf3&Pe5#}mxZYcHv;>K_{!yD|&NuWqc5OHQKu zFl?P1jLb|muvkTPbtwf!Q7P#QViyI)CB+q$g=OSVh^uKDSn~>Dk*}*9;^;|j=59$x z1l2iDOItYm#pbu<*AAn3Ghe=XJ->LRw7fVnD%``%9j?@dWMgG*2@f4W4hA&n=xh)8 zNli-zNxXi2?b9soz?L8wKsNXX_yRU4G%{kYF7QdEx%m0}<QL|)ceLac<OT%#eX2l3 ztI^0H`3j2*mRA=4Xwnln?xQCU0fdds^*MRj35jvx5up%W!cl^P{9!pUF%J5I*0!d_ zrMVX`U;I+@aNHN)zJxkm-IuGX%X4zG0RNYg;-NusWLSnqYU`>7hI;SZyYsWT>tPjc zMR#v!OKW{rUV*)5tO}mKUl7YDV{t~Ed=}#_Y#7KTh}&~qcK0cz-6s{!N+`&wX<&`< z1bYVx!#g28qqe1~yskDSD>FJFAu1t0G9Iz$g!riVc<_bXlH#7x5ko_LQ0%%`3{D#Z z9E>brG5R<xgmtWJtU#?kf&Sn-IR*JK@o_<6!2!X6p%G!1Qj@A`tH-A&H}2jAlHbe; z_~OOQySGOsMr!J7(z7yR<71+tBjVy?VY#xpqV;l1OIuTQZB<EmX=z0%e10<M&Vb}^ z+}?QcdNcFLqi4_7ZmbWD3^cVil~-35mKGt&r3&-G%W?|xiYrP%P%W&kEZ?{>J~P$O z+MJP-6%iBV8|csQ_YDXQj!BG9&&|m!$WKg5jk%P7Tz))q)o|@#=(V?YwGU5EqH~fD zjE^;UwwF{@retRYL`J%JF>G8YrnYu^W&}-rLlvy9G8QMMtSO@H92lDs5|hSrUT2Re z9z#i{UurcZCrIPS!O3N*m4o%2(_0zu)}33RF<M$1K(}OMr6naNCSHnzM}H|fF(WG- zHeSBme06Ez>9Z$4(;?4ZJfE4Jg#XsnSJyJFj%l#a*48vRJ+^*r71YS(_LjPj|N3NY zmz$<$#vlKV5tuKJ*uVSk(UV8ubW<}EAhWxBI{>=yIXKh{&BAe^<@dk*)61=w|Ki1q zyZ7!uQ{euff@WqXX0A-mU!4O~-+%bv)$7+^^cpw;xcsNj9<N`&5uVjTU}W+rI`C`I z#Z1CQOd>84;*Tjh?iJA5En>J^$nb)yu@2tU!kI$z4+^`KTu@chJv0a;*4ES2)N%P| z*4)|A(cd>VHyaZdBP}g;>h!6@M~@&1n<)#?PtbCD2718};dL!dgX1F|y<IKst&MFh zZJn2aVCR+=0q|&MOf<0$3j4bc9{^l|sE0-e`iJ_4M+ax-W>&AS-D3Wr@A}OfYd5Yj zZT;itI&8f2;64PfTNCksw!i=A(T&?Tm)2JoR<6!1EzB&=&n#S-nV-FKbzy0J4bAei zcJt=k@)De0TW5PsQ$uw_U0rh{;JIs{A1?nga_#NNCAYVIy!LkHwf7EAPHas(GBP~{ z$ZzcE$SEt2N=fsLjG_7k+PZp}IXLTET5B1bh-w&V*hSa2kB!W&N?<AMyz-oKwz~98 zr>I&=WMzKCcy`^yz~su$81Kz@ZviAA1Autv=Ha<df`N=#Ub(t)>*mvEPrv`tY?&ZS zUcGq@zVP7D12B-gs6BZ6@bS~f5bgmm;1gPARzUyz;L+m;4<0{)jqtBOndRq)AASVT zg5UwU1!^A*mO;t<2bzVZK79B9t-XU7_5FKj;rI7H1x=IG*oNcUp1hi#5>`o~=J6K^ z3FmeFS<dV36vgipF*~kquS>8X(%jucBcs!^ifZe6Mn?hFqq8%kv(w0v{l^8MxXJPS z@-kZ|M-7aYypn=~lA^qlBIsd7rX&VY6dn@=5L#GUUAlgqnOh3Um<f0TN%JZF@n-Jh zPw4_*ynOup`GY4&rhC{9t1n)^{z-1Bw{L$Xnc9{#nn>osN6d7w%)ErmEXj`_-FkrJ z2S-!6!I`Yxx&@~+zq&HDI6pZ*_sKT%$HA3;vMV#pYoGaYKEc`Pw*LO&x`w2@!qB8t zkI+abZ+|N)l2ILFj2FgO*#?zeS%1(yvBs}KJs^r_Ii<m^<fQMOX&+V<k>6L`I<tQB z{x5y?1J9cI+~2`-hlM}%=>N->&hb^R<jdy`0(ewhFIrv_F^>~5ixn}A-Y;UjN6=uG zurZq|Rm+56L8ejzLP9U4WR+F4^z<UR^O%`*wkA|UGp3;#H~PoN3d+h{y*$l{md0kL z#^##_k*}GRg^L?4IU~Jupm*`w`r5~|up2*3{QX7PBWcurhV!2$Q+xG>nM3W(+b;qh zc1L@SpJYjXiW~^dj|?Y(0v^Hq&6_J58;DC@TL<J{Sy`T5TtqS>Gt*^mW<3A6{N<#v z%>NEdPBe6M<X6`u<m3g#C((n$?LB=g$TR~YNnPJaSl1;ir|Z^}Z?kI#x#URl)+whH z9oSAN^Qcjby>e|sD$**4-oAae4b;D|tloMQkkKV=8Of>O0%#XDjTXX3!NOq~63baE z!2aKq=`wm&csmCNhM#YA9FSFIb1U@Vn~!_$%GRv+pKNGqqOtvQYIassd|X&`R9H+j zSU6%aum!f)HZ_b)O)wK8Utj%c>cGu}$V}Kj{Au>UkI3JW2l@4zFQ&`<m?HM|Cwgb5 z3*T(y(evjh+L<H|F1DFQ3hdgQJ1F2+Z+-^+?DEp|;sOfzUrK=cv#g+>l1z0B4#FJ+ zu82y@gd0fp54NRynK_bmEr@bh0}&Jd;^xUm-@f&UuiPhXtwt`w(5nuKnX;ZzKdH+w z^vtD3Ro{K|d>g2LVOhF<j}%g=?UKr&=yJ|5R0tn=!8jaDK-Z7$oDRVL|5Nrjp-nch zv;ukK78D+llAc>x-PVVQ_Fu}9iLQcQAm7gP_qO+SxA*rTpMBl1+C4bXH#RypJ2QQC zae<i!oS7wdYg*)cfIUze07Wzp_%EfrfBMULz(3)fDT+St1(F6@1G~?pcLeYcAApzL zxO?xjs@cNu%sk-Cgvj&rplW_I(fLpB1C!)CJ3$a8=NE+}CWEkXWcXWA+>C7<bns^9 zGzgl`se@N;+<o?1n-sw*ZG&;G)ON2HGzjLEFk?Nfc>?EU=$YdZS#|5d(`~H&RprAE zKN?X}b!Zs}RH!GgK0+oD=Z!+o83dm<3Ogu8WId(vuWAf-jE6R!Kyaow`ve5UCZ-gY zK%5Wq^@{+Qyt+8MvNC_|8W=2~2_-)Y00aiJ%gd|x?yugtcV%T|Ze<z4{IzR<v9*85 z27dFwgFA@Mc?{r0vuAFVI{%3Of3%-e+Z6Cm?|-Iu06e1M5lM6J{wGNT4u}AL9RWO& z0eop`3P^roesu22@4)#_mb-?BD_dG~$}8iubNyozTmwVxJQ(Iq6n&ztrlIjsO)@#6 zdST=7^xFNiTC@{tG@MthvSXfro_}Cw2cNM1-t)Q=RuKjsIbMnN@7|+f>9$_~3yN!4 zsVp&?597(L;woqye%2sJKtBjRPh-6~rEULB%kOU(UnK)`6OuiN&hQM2ip|U^tf}iA zANyj0`-bL*q@+au06(zgl%%$<j@k9Ksih^f0xXo4muPCLTM;cM7Z<Lqu7Yy-m>>DG z$l0oMn6Tee_y}B|J^M`i&%cEK7caM>eN)CT{pAzAqYXmffa;m843A10IG=@egyF$M zr%+XM<+oGbZ`}x=L(@}DJv|V;rWKV$re-k0qscyj)--p#y^}7%N>0c4n66K9S>MK^ zmu0QvTr$>DrqMV?oftkw)*`%l{JLjEG5blieX=%cPKmno%%sxZZLI!PrLAw~oSyGl zg8)`JXF$7ve&A`{fKxa>Fn(SCf5!&=3pPX$=U`xAY3V|v_y<EDonBPh)ZP6vRWq?L zKR7YY&wpaio;|yng@u)ci;GiQTC%yV6-^*Jb#+ld;LO+Ge7$4G4jWr*Ap9#UOF*Y6 z+cQPZ#?LZ?f7Cfl*gt-B|H)(K(9I_2-)9=?0?%K){_evM-+lk#0g(B}e?9y-KEqTp zkMDg%J4)|A3tBfdJd@#>G9FPiQw#IxSPeKVnhg4PYMbui;i^_3`KrX+e7I>y^y=kn z;X*UCvB#R4i)fiDk>Xp1mL9%*>ll!`U&>m;rC6I@#gFy14JvIMTI;-W%hW!M?Sv|~ zI!)av(SV*=*FC)r*T1B^|L}vndB|CVz}+$qyjb5edI2W^?K(auw7vN;UVC){{zWfh zzrGK~#LU#*$==h)J0d1NE4QqnsegQYE7hZzN0YI#u<QlQ@7%c)mf@3+kGG+@5zsO+ zGkxgrAprc>Uw^HQ#jM_Y0JsAEu&}xcNJ9wr`t_fP9Hya!k02k7Z9k6cpti~U&z~=@ zE=Na2h+h=7v9@~s{SRoTu!;81-2U(%>3#jyt<RDMH2nG+Q^qgNENv#cN5`out1HaR zO=V=I+k3j;Vt<A0q4MzbWOH{nO7hX^S-#Qn6u%%edeyVC(K0mV*K}|Qt(;iCyK?v0 zMLo|WN@Sd8jjB_@Sv~)R(&2`owX)8|f$23tDLh0~$9286$jNxGoVnFo+lc*33N1MQ zs5XO5-g%#z`zf3+Ki2!WHshF<C!eM}x3>3J_-GdMSa~CBePq^jH(EeQNMcHMSw&-K z*T~Gwr>-2u{88ScfOcSdDJe;Hb=Cd*_XF(%>_tU|fhPg>G#W+V%qdAp@w&!306f_I z`ugIvYas7I)o(m}1fO7lKmg=D!1K#jcb+^&Q3?v530lN}z@NW;`||z!C$C;Tdhz1n zv*%|8PVd~c^KXCuJ62ok#~=R}03KEB4_~}|{@wd0uirpu@CoWjYmXk?eDDw@`MXb_ zUAuD^QCZN}fEV1oyK?(BNG~SfXW_JqON%(UIKSEP4F?BD&(Ofuyyvh0t8>?`ZQa;4 zG+5Qr21-65Col9;3Pi6CUVfGoS0jjC%`9ZG#z%F0(yIn<J^41bc927wC}9$*$EXuE ziPm&TsP0>??p-QxpL_TILqkg+*BQ-SqWE)00UFMioWsgK&3?IE{!Hl}nLDIT=Tf44 zqu|1)?R`wkgICkzs0RIry6a&TDxYbDfODP<DOA(Q#Kg|g-oqPIJp^TiHTCTS{Rrmg zP|VK_jE^Ch-?Mwyu3h%_c8}h^x3eb!)$iSl(0*uG2!LmBVyt_xzi(t1Dg%GEb+k3N zwX}D4EZw}--qRTo5e8ZUfCu2ee)nEhRyyp}+|mTd-?)zst=t2kK6&%*%GIkm*_m;% zQLSw)w;tT@9~yuj1nFFM?bOszyLS8L!pbtx`?ELSb@p^avuUZx>$mURfBq81{OrPf zPk(Puf8W}zThogRaFnvLl8uLtC$7w9W@iKh_`#N@)+UI=kp$mU6JcSYXy?Vl!(CQh z+SYk_aB32b*{7G6Qc{z>eHiH(X>fs4OIHUcC+a&o3To<73X3CB(|x1jKoi=yc_Jil zX^oJ)I#Gd?II?i_?(;X=qzEo)8!Wv_lUjNV!|+SH+&H{m+CD!pa}AAHgTgb}`4m{B ztVB$MRU8r$i@LTE`<E0Ivp^Pk=dTo8II+IG8g56_-43bJ4yjUj6rE1syj6WG3_@x! z<|G`!%8KF!-E?SDYI<>5RZHstbF3cKHUMK@CeH)rt!;?QckZ^dw{acd0;-2Crl$Dy zhmVDXF0iw+v9hx2>+69$K6B<28yoB4BZq^61K|^vFL$=ze)giMxbWQBGkf+TJMP=Z zZe>M4;R^f=3ub0`z#h2LuHCy19XgnvmV6LN4!CzO*xtSD``Dp<6B8ql_j<ZI`}XgH zYS8tZI>ld9TyXE%b7;ZC-HnZn4gO_sPdax_0JN5lj&?{$5VXd^!U9*oOz^i~-#~Be z{sRRCITmJt_E}k3_A}ox2G^Lse!YKuR8CH2FY`R%|Kj3eoxMFhV`HHGvdStz1O~+? zp+l6GF0RHT2dtTeBF>Oi-HjewbK~L5_Oa!I3ig6HUp?<SIh*v0_~3%pE4BSAWgQDQ z?tOl_MNQk8jbDvZohoG+q3x2~Kee)r*gszqvKn?t*?p}*-enMSSe1H6g~Fpu=23D! zpx|&w+1Wm}DZYBfJu*|%5UAeC&V%6{85@_GlUq>@!7Ms%2{K?S&jaQcH*Nq2o<4mN zAP4lTs-^;%hgFa|An7$V)vrH#dWN|QP(Q$Z02TmF{li0b^|k0KQzq>}orvP%3|Ixi zx3;!|*3gY#fAbAQ5T&I>0ClK{pb`+fXE!JuU0oehQ<EJ#H*3O1XpozSyQ8P;@$0uC zp~0{b8b5G=3o5|*7#bOvo0)#~x4$8k(Xs~-X(?&RQc@D%eDlp-X5(-m^yum7sle*O z!h*2x=6UWx+L4eHZyOlOuc}GTFA7gi^NEb52857MC2xr+`IG9_e0o7;trJgPzw?YK zW0$a0cghDbFK8HQ?wwyhxLVP**gU+t^(~Yu3#&&2al3^K`E<M#Y-7wB*$<w+*aq#N zEU#X_KBY$aO4^RgJcfr!?>q{Q2juO!<VgFZt&M0Y5oHsEsAhFDdtCxDfCJAF7%VBT zu(ZChYj_x_{<EI{h<TvFx%G7sVWC~j0vdkh-hBaq)2ItzFq(jQbQR!)LK|pybY}YE zMKKic%1R2}-d-%MEU-HS!l1xQN{Rq@s3QoRIgQcMAX-^K5E>p90#py1p*3kKiLA_Y z2E&tC6C8oX#>PriL>R&L&Yk*ty3gNz2n`K}RpxQ`poqoeO+d6OD9HPRun7%5D{uyJ zlU+OEv}+q`EiDPa@UR6K-ra*991;ZAOCs5z=P55Q3t9;N1r@+4{6j;sE2?5Mvjbui z>A~TS3_s``jcn|J<P~tnUn{%f-7{vdJ%s301m}KIgAPhw#W9axn~_jD)G)YO*12$X z<L9~NJ-idyc@<b?tcCHx^48H_an;+P{qx1aJ!!YF**W_Zetkbq8Jh!gqyutx0DCY# zO*d(J0l#yeAi)=ds(Mc!Q1u{IGfT^B+Aa@Fj!z=mX7niUk<XaFaRV?fCMNO`^Af*; zdDu%Ei|HR5>Fn!4xfdi&dvDLv@4g4LLmlV~TH9N-v@~E}SUrF4ENJ^@@4kmP7o`5i z{fAHu9W2tt>A>aa>Ed7+Y65!SeE1<aI1nX#P&jv=y+AR~l<2!;Wu)sH>j%b0pjW#2 z?B)8S$4j^FxVq7pT)*q&$rG#h?&lTcv9htkI(XK=#2BbDFcA?Uq;KNb-_qF*ex$6d zh@P7f-XyK09ONG(Jle%K(ALe%%*h3mJjMu=ybTw|FQvTq;mfxPg&lC8mF+V1z3T)G zLgg$YAbPFpS*q(_dGp=RUyOYA;@dM9jaUS<4`SS<EyLw)qMLhXwn6(R%ayCwj!N6e zIwl(jmLFC^NFHH(CFg@GE+BOd=m+l6p#KfyEv{#yYi<El@9Z1s6BQSmnUhyl-PqZM z#CntCTlG9(es+CbNbmxRc^Mfg(8ixI59nQg^aNRDY6%eb;2_Jl@5Cp>p+W^{6J(5w zi!(3ZQB-C`MMa{!11*C%x%cAR<r_CvZ*E+>eS3ao89g+#jKyNUeg8pNNDx|vRdqEL zaE+|2j88;8y)X|^EvjEY8jZ}%EL=xq*wEwziDV1C%)vtkkzc<ugVy86kIk$sXTa_s zG2hYKJ+-t1Lhi`XBXBYt9Qz3bbFz!GxcEiXO>_<PVzcuhbcNo~&chq-6+-fOGdUfT z-6}LQkBqUUJ6CT$ku>x^rb^TEs#SH$KaTNok8ebWC}-C0fARa)l#B}2lWHt7HYao$ z5`-`l_l$dwo^9jyPnPJ^T8D_5!mg{Dq-ZXA2Y@<6vWL}Okue+%I{P`?-gCMvDpVCi zGh<sjBF&xR9}L}hVorWRbuB2I!3kyz`$^B=yft}s@x+PaC=@XmEuhe?n7{V$$){By z$*VVS0qlMKd{71k;O{_$|BhYEh!Wa#r@NxSW@Be7Eh}EXb9eFDI@rp_#_ZA}dT12$ z0CG@1yO}M+>f<->b91wxr+^9o(c!6ya3<!VO>jwIY;agOw1$Qna=e{8pc*�mMfT z%<}=}Ge2UU=O9mKZ};rVN<&ivFg!%rpU)Es5Ri^WO9va0ltvE<bMW%DqPpSjoNxr9 zx}E`-s?&L+;OfqqN8i4^RNT2=(n{Gr8-&eite*lgvZCv1b?*{rn|JSjaV{5aJu2%N z%zjnMX+0k?vk=$FvTe}*$@1>Q56Lx?_KBC3+;R`8xgJ)d0or*r-TAaU__5vx#0e~C zFuMhfPO1_?)k9Eb=gx2siGWT#rLefF^>TY(-&V}8+`U&-Ta6CUBU`Mk)*d|kjQK6v z29-LCH*bEzJOGKEogG?3?dZ`XM~)tjhz#dDh6oryV^((N>h0SLYpcjh_!~DS=dOIl zyf)_XcL?UumSOe5tM9UNK4Tty6XZ9Fc`-53xog*^mX~vKvsqaYR0G;$Vxv>jlA%7d zfbx81b_UxPo`)?BEsY$U9B^sSBQQ@7vBS(?eCwMSdq>5SeFKs4NhcQrB1zi>FM_f7 zpUQMfSTQjC^#?DM%>((>-9XhS*k&A5bE79THlifI{@_<0P)T_UE5GU<X(EpnUD!BK zk`Uh7Kf8_FKUq?0$Bx=0?<Xd6>IL#?&;jkoFkUCL87Fame5y2-b2__(jrNL}D(d15 zh_(c0s-u^$S9o+}YDR8lb$MeGJbe`Nwzk&y-@G?4)Q9E0$m9tu8JX$8<N)&Bn>@b- z^E(mR2h7i`uEZzCBjfpdc7v*K?P!CZVq|&>eamZhdHK{y{#{7(yDTj&?!A1yc<tKN z>(}R3SKz<Lj&16BELQvF`w!ycVu-ZawM$l3`oZh(5-%m7UlTx>3^E3SP!#i`qQc;4 z5N<n@olq?VVa46o-(}|_bJW0=O>L8zw&@W(+}(o%v#YB^Q<LCjPyxhJNp*EqYg=YX z*`?fq2xP1#26{js1Pd}1RE>_gg|e>E4rQvSabWY{;)54&l1e&3I;uM6>3CEh)$mlZ zjV)-I1(FAQ`R2P{c{<;I`-=af(cW`9`&C?yV?6=(7G9a#p#76&dhO2NjU&HOblIsy z<;5^gV7*UZeNO85p2GQ^(F^3fXtqn(c*jLcK3NlO6H_A_J0g|t;uqu_9haC}kX2fK zgo$}T6G&W!w--A*8vq_~2<jT31yI?dZGOV@DCPm-NDRw;0BK~;ZjdFQoB?umjddNp zUC?oW{D9VgT{$^91A_wEyE|&@s;{gpU%jz$=JY9KT%9?dGdeSEVPOu<g4p2U;TfKs zu(TwgkOsL9#|;SZ`x)jPo$S%L1AOGp%Qx;Gbo9L;z&xBoVM!tIIUE;67oG0fH#$6j z{Tl3x9zsJ?y`ZKpqqHI@HlF6=M-PdB7~hHls>V^*f(W7OSxq8~3OyjX9vFW9#v^HC zA5iiJKJ^l2alBfde(CMVa81X;$kjW)`R1gFg*W>#rM)sX2Q*yI==sQ7hF`n&U>mo8 zvS_=cu$(j8Eo8D!%H|Z#_q47**crWmbA};&$}W4)<5;DL?9#U9R7`N@1XFt_TQ@IS zU??b@`0QNh+>!A*mc9G-v7^I)08zkjenB2+U_kFy%x}i9TQEO%1<}=Vax&lS_y&dj zQQjjbPo6-(V}cO4q`U-LM!#bMZvrY75fK8wFWtHWAtZ=u7Uu9H_!`ZX3fdbTgg<=b z5GN-GvxC^_<LkZt@R47D?-tCjfir?|+RcoPp-m9^Aj!}m7cf7$u+TR;g62>|!s|VI zPMzY1%K^VY18_JQ2zyWPpMbqio;pbl47GOk0PUwwv;}5R&@uW(iK6WgJ2<!T=+$?g zu@xNBHmVLe+VrZ!YVKHST1or-N0R^TpG4&p)UzE^+9_hPPnB{+i!Pw=8=6|b4cb3h z#;>e$OIiP2!e)nr4Tr1)pdEd?7%+d{D3nW_uv^50Tgi!A(Md?d0%wj0dy+fDBP=Q? zE|G_a8-2BG7xKjoGNvIZDPCAyh`x>pp8#OM^VeU0t){Me{qa*|^_$IAP(JAQxqTx; z$|_3x_U(g4b}$Rnk&%&_zjh5gA|TKYsGCX2JHP(sYY-TK$t!DX8Ce<NaXXoM2I_}| z246}-a-t!Pz~9cD92^|_`g&7W7eT-S^3i4m1<ym96Z3OZSFb85DWH7;Y=`gxP97GJ z!O8J4=m;Pl0bhpy!}-Hy_K%MaOwZs=@u+9*L<$^b2Z$dc)eTf1BIEJq%DRU8Rh;>B zd@I@}?>~Q?Q#*7-*%A5Tl~H>N>nm&+kWe~YKd@Tbu{gAF`}aJZ4<A09l`>^Lt;wq3 zz@<h#uI(vj5q9nNHr4;xV(u9AwYc>GHToVIdu}C)pmF$lqp%Cc;X<a-XZ8G91r0dm z>`rL89o29X#Srx@tRPZz@bZIr)`(!Pt*c{TYy@EN@N}<jsF}ZUV`_Qx{doW<!`sus zk^ssF1U9$;=pSU&0$BwKG4}Bl&G{P}W#y#~4t7>VO9+P{u&r;ZADcto=>>?kb#+i_ z6!@2wl|^uH&?xieb|BuC_BIDcJ1Z+oh$b4E8=&K8@96@eZfRvfBH32gRxMn=F*bK) za&e)urV{oA{Sz1*0D=lV=;Y!eoU?@m!O_vav%lBR-v=3lBs&dGj57yz*3vW50Qn%3 zfZ^K*2W#3cSF~P^NlVu?G|<r2R@Kx{$6`&aZ3twlp^crknT493!69|$e^a2jMw9^H zr&jNZ>3bbgbcWDX&N}s=I^89@u3>1cs{3kf|MIKve(%d;W<)=BK7~C}Rvao69(C7a zSg-ipmTl1f$+CX^_Aylomm2+;He;W>lYm~J5I$1KBvJ?;C2AgjRMBa#D4tiHc3zKh z8tWm0BN|v)gM4*l_%R}5qcXD7ODjs7Tib_6hG%ET=6>?FE#MqD6CjQJkYsjdc5Q8O z<Hq8~2E>)4v(sB^LItP^C`G@|-TL}2pcH9@X<J`tN4KN9p!E^=L2H8RF|j@R*{$HX zH)dB?w!Q-l2l}}C0<;V#23tP;U<9-o`U-vD<WzNATS0YQdQoXYPF`3_hEGg_TX2M< zPau)zZbEX@F}Kt-Fg&Ma^Dpu=HDXxf;PS1fuj~WzIAv_r9CNYs%ERjJ>W+yyjWf0V zE2Zu8Q>*uW|I>*{C}iVP+%1aVr%XPe;=-%tZt9x64cb3nvWr^xDUe0*p(k{_xfESQ zOruc~GmjHC4rdcK;*hmHhx3xa2MX%@$QzK1ZAnbn`+I~&h9oAVgZ8zT+mX3J{^3XB zzxdPXxhuc<gS%h({pslMtNF7EzqcmywW7hPsg|DJissh*>e{rTlDN#A(8OfVuqb4p zhWYgq-p)za!dlb7NL<_ezbnv04SX_dN9Q*l$L6<lE7*&hL>l@woYM{9$1=k5`aso` zbuKoKtiAv62j+NbZ0%>`SKBRQz^drDU)h;Y%l(wLd;i${HfaBRF?NhOj`LL@#UD_i z9m6ohEfPh|<3!BkC5S0}O3r%(4GyWgNSX#JT1QHo`fK8eCM0_+D&5&90EK;Weo<~^ zHSl5g@bItylJ0l9!`LrQIB@0X?aci~Yro=RqhD$Y{ZZJ!<V0IvUqw@MUR6zMQAuoO zPDo;kcLXvrxj9f{@1$pC197Vi*5qH6T@P!}Bl9{(uHNmRy>U*5&Zj~FRU>VAi9?ZW z!pN`cLE_f(&c%&~f9!>^t1H*J&T8+upvR)%z^>?YNS(%`O7)8=+D7c3FE?)86Vmlo zvQLz@irO#lbWT4=+$tH6FJYMkpPXW5EEmlLaGt89C{4#Wc{3j^ycP5NS-O)q!uFv_ zsfoD-ITclv%`KPv`k9m3UHJmYe>xk|*zf*@_2^$`V^@CLM|#J{n!39ynp<)!s#6Mz zVluLW6OtJbG1Pz%dk=2_yeR^BTP)rT;?_MX)LjZL)bO&N*_$^Xe{0Ri;F7h|rj@EX z<#8yGB?#ehXz1F$FpPw*f9#jtODi|H1TcX4-SYOV@(v(xxK$`xgn(_t{w1ZVaX`!{ zz}&y|qInpw(|LnXDeF{m%VcSrw2Nl3`$1AlS&A6?>bN8t(9^Z-!nDlnjBH5I#RKm_ zNE;Y;DK;}Zy|}EfrmnuD17L$F1cYFJ%A%j=$@%%@A!ENArJuCTADfZm*Jr9>e)dm{ zU+(XRh_$$`A*-wc962H-Js>s_{F>qy1d_(Wg=S*sh`dE+j911Pa;rM-Q*i~rcTR6C z-F`}oC^@9$r0S5PNh#)5riqvY`=+(m4Xgmew@+OA{`X~#`HAezuVXu|yi3@4m%PJX zd3#PJrvpmlGa4?hU%lBz>|ats5-Kpx$+ltD7mNZq6r9BgiPF~TQp8kQyKFJzFcu+0 zPB}X{ixBI8e4>95jvS|DW@A9Ku^>|&ynNjP!~A06BU3Yy@(O_-t6Ezx_w^x!Klc-y z|I$=Be~6`jF&y|k0_IEg(Pq0xMjAW2K+k}HM<Z6~zkH+O=phkgp8%3O1039#<Nz8T zB#jczV80sWD@9j&RC&kL#>(Aieo1u)<n1mRh3R?K9#nBXiSu!ZuCMIATGO}OKE3hc z&41*FetLKhc3$~i;)LCDc6(&)I29dPWJre;?VoJFRR0&17caimbByu2)M6D_b_(YM z@vM|pimYvhv`xCaL#_bEV~?QzAr%*WT9R8#B_*;FPfx-S?DQ>(7G#>8JA>*U42n1O zQc6O0E|6qNePcssCn)@(>8W2q<JcdWOlIuQk@+u$!vm8Dy+b?#^qyT_nOvN`Wpo_P zvL)J*#mr2WEVP)J!D41+<`zp9Gc(I#MvIvlEM{hAvT*u4=bl;jy}5s8>Q7f@*Q!-J zvLY%oBVvaFgb$Vuj1CeVBgTDZbS9<04BJ<(Z5UBHGIX)5ukG9qz9X&2iF*w4I?y4p zBX)d+=Rl(I>4(ZgRX2gsOS2t)rD<mPa*7f=QmQ7Cy~AvQ7tVxPn&rQ$PjD7jaZM}a z#_iH60~F?;Zm^O4QjdRfy4OR_^i@4W%w(uWQ)z|}`0kJ_Tkc9*##T(n6POQvq({X3 zrA*R&gu%^Tx@)=}N1U>_k+%GOGv2Gkd5ms5kww)6WcY1QG6GOJ5<G?u(p*_zUEhfQ zk-dd3J@r0Z^!&&A<FsYE#DlY!m?ty@59G`3JW^i(bW$GfOB9*)kb}H>LOmkbW*4W1 zm!eF+AwwvwaPh{CqaVxKKPm_`hnbq5Z{HTy<XNntV#}1}tlIEj*u!u_9Lp43EKW|? z_5jaA@h;uVaMP5pa%JPC@jC0q->rEr1>P>wtLyL1S|J-y44r4*@OyaCA9)JHwscqp z$}OJmM;|K&eNDs1E9>vaq8acPNJ|J5@?ejlsJIWl+#XywB@x`kj;z#6YJ_Dz&*hIL zj=vV%wq=RjHp+ziL9<9O_w>i_$Ri^?#mWM*ovE7vFx{n1L>NxPJpgXshd%}akL`gg z`UDkO?YCa8e_3m2x#=M+Liu6dP>Y_jZfRiD_kKI|B`66M`X4E!i?E8)Y^=`=jtn8; z5E+}02C}VG9X{6lHsL#Zo<9xGIf-6>*{SEgH@2#N*>qt}NqrpWp<bg5Iec7vtRd+u zb~`y4=ZL2B@hr~C0@r#)8m}czKI-yIoU0{UhJpw(+s9yM)h8lIPt(@!URmMOQk)Zd zl1ox*gbXp~$@{H5@cdn}qNb>_3#>s}%ZJ<$EDc+w?$yytZMVDaSsZ%}Iz<#4xs3DM zWuhBa&)@Gnm{iM0RNVVxE5*(_tmcUgNz8}{GWOK%yoq{jGO&}q8EY1qs3cwWg)OUI z?#f?8a8Q|gVMm#8HAyg?#=e6pFIq7)2L>Gz6BDPWZw75iP{n-AozG#A|MJ9zxF92& z1bKXcN%I_n_x{s7K^0V;5(6zz!Q8J9%S@2Go13FO89{9I)2ysXGfb~;<V&4!^e^<` zH{|_+cg@!A=6{S%h8NZaXMbpZ`#s7Pu`BY%!d>B+Qr=dR;zD1!>G5J&d3bp7>X;hg zazn&><N|nmMUv{JTw@YzQa&XBPy2FIcV5D_@~pc27h^ewx-M&Rl{(^6YWCJ14Xf0L z9Be#=_Q)ZpOL7h6-8`wM#mP$4ROY4@fyuKw;LcH2o;;K{j)O)n*lDqhcecrEaB@%< z{7o63hjNjI=diC{E9ydrE$aG3RGY*brAP4DPRMW*<~A{ZuHH|qK0Ql+Wp;6SVS|m8 z6VTAj)#Pk?k3BzfD6`bfm2e0D`ZI{znSP~gbhA#;fjf*@&=GTSF%T$EN9(VFIe^eo z;$~8*t<Vywk0N}<>K4FqjO%4KU{zdf?XH)3&q2OQP1`>PB4Po$yYINVPUg1cJ!ujW z^NDANDL(T<nxoDsEVV%0(=n6$1u0t>C#T2jWnfT}5|2|$L-~dO9k-tYi7&g~3oPa$ zdO0oS!lTag>r4LM?ym!O;dll_ItEu<gx^EskcodT4;=}V#rf19Gd1#;UBH|lWXD)~ zmc>Q-UB}B0zXdWEZWt9G-Q(=6*!Gxp&T%-M4a1laWAc#^I!kwSvc!E0p91tjko*-L z68?dc7@wV_o~Eb-HP#v>`D*LqBiM=}Y-jqhMI`pAv>5nDUu$wQKTynqtbZ<X8bs`l z;8&p9+|=s`Uv4erScHK!Y<?5~fR~hcjE-<djxKzL4)lb!ZxlS<_)FG_PrVa`WEJ<B z$$Qbz6zZcPrgPJCzZy7_)r6j*!q1lB@3!Hg18DC^g!tOWRbIV+>n*$GWI5c|7aiJd z&ib+Xpk0QW?4G~nf$Ka~zuNORcc&MAIi{jEN5!14DtH-pLJw@BE|Q`Q7jv!ltt7?m zYV%bzH3U?xC1DN2X0IiQDpW-WlkiR@3tn|}uv{+pg3uKf|JR@g7F+a&&!eZ=yU^K4 zhPh}+TV8rfIIaN{%vQuH6si9?`Y{n2*fh8wSQ&-Mkx)>kvZAVrw)*D0j&o~gSxJep zL#>+)!m_)&*$bJN@7R^!&+HejjsB8(<O1!kW-*b#`1JJNGU^_#5f_7p;!C?Qift;v za|hAu7L#JT*dLPc5%K}b!i8RfF!xd@fq{&H&YOj99j$HET~sM8BFSAM{-nlOCOhg$ z$^!hIuD^(s9W_R8D%rmrfg%hHYe#t`wZ#;T@gvr%0oy{eGPDH@3vXZx?_iP*o>h%L zj3v3744fO3jF5GHCL#JtCN_GuB2WDiYjeQQUsN3S0wkV=CF-$WB|1>c&NQvpr7h+0 zkqaYY*JX0lJL-AoXzZxW*K%}M?RWQ>C?5l-?_p6=s$ZN{`9xrBqJJQn$<dQw`U(60 zIQfd&^B2$*mITbz^dt5|ng3}hOUTF=l!!=3NXOR+@)G+kZ{9FX3jhqR!3A%zyB4yu z{gJ@a2=a{qTtBemWT>V<yh=jVBAL0xPT#FTHA4ND(VcINS)BWnXLlgBS$hyI!A2Ac zvqM4I7p;kru%loN=`ra=De-j?)jN)KN1xT(Lq#&GWMY{w7=@}xh;-}U+>7q0mLu^X z@c%CUT0#U!e(`e)FJrLFH0}~>Vsw3&Vt4vbPW7MRl5U<ayhSzeB7cRjV^i02aZ?@k zMwEi>(Th}bBpP3)Gjh_IW236Nc?rZu|Ni7@ZcmX4#9N(Ba)8Rny{5Chc8$R@p&5Y^ z<X_=H0rHdaa+9z#u~M_q(4qGrXF!8XqAZhm0g6zr)mYgA;pdy&{NC(NF9YJxy}=r$ z54wH|y5MW*>s#B~JG;3Vo2!dRP*RbX>d&kzjSX-$e5o=`N-Dv^(jTJ>8=nv|jq<et z55n<tE%&{qY(O7TVO1MU*EW=qS$IG!0#0x(l=-R`!W!_+6YSEyM_RSl0OhMIG(#Q7 z*Q7JV&)ik+tUde-<C~*h!mAl<(#62Gm^|J=fu3&#-@0`_?7!csFI{x9pi6~<*L$+2 ze@S~i$TFc#WU5X+r60^_a5B^tz(O^a<4Q=Ec-bbnrSl%1k6!IE)v^f5Kvk5KAohcJ zvotz_@&6hx)$IJ_dW!@Q16vLpL3Mz!fH;huoCpa+yG9m-{D59SwE9+44u#=v=t4-+ z+ESg@(p48^IpU=I1&H!@q0#SpJ;DmWb?u?02n!?*_q#jS3Cr$Rp5+(eW1VHA!pK)~ z;6vLOhx<DSJvhI)GChP8t;%+fi0R<2Cwu#;0$i(+!5oGz=Nft?=LqooQ-+^1^h&Jg zIs>GhCBb$W<GE4>#*>S)B&<p)B5v_-vAbRnA62Z-e@c!AO2~`QtBM^x3?v~KQ(J;o zvQK2jX<&FV6V|P&G$_h3RGdO8-%HOh)92cLesZDw96$E7ItWYLB6sQNWS#=V)tFl% zTCL23g*({l@SXFq_g-NUT;k%Lpkp3^h%>U{Al{Ja0lI)6sK=0ki2!x|vF{L|z%*ZG zjo@g2{WV71F$!5feJ$C*_%zHm0p2}AB)AY{sNct6?>KS*ytO(qC-<?daY7*243-3m zRc4wM9?s}COh<-Xds68+4e+NevZ6N%b!}<2d9|_kLPUIT#i<j~QDl)i9DYz=<DF3l zYnC$LI(b9A0u`~F)AQl6W@4jG_rp1);c6CMo9y=*x|GDkyA*v!=!LSwu44X}W(oL? z^(>9x5n!iYpY<rNvF>ryj#wAGjD^v+-O`<|&SW|ZFVD!Npwu|m&?1VhdA|8K<VZSq z0@zPcJVFiZmL<?)m{ov|F$swt1q*TlfvH$uhPO1gr$w5i4{TI$^%pHC7%l7vCQ2q? zA(#0cPfsQ;&>K7pl*Qs#_jK?7j1gl5rr_oTmS<`ajl#1yFFZa1q00QG6$5go)=|y4 zvw=2ZJu8;QB}^Kl={8^`?a>yVxC=#7>e~u0w0>$VM9WU?1USZpyBV4Y2>`mv&BM7I zp}BeHnsWa7*3LH|3}W8zvri@zoC$3bFWwuZy2P<Y&be01=B8NARm^AIdSGU|M(qAM zZh<alrM2PJ>U>2|v2*#O1pFDBZk?4-SHQcdAp1vQZJr=NM_~qq3;Zsyv?;&E|B$kk z<($u(0QcuMI6w<!(@$@yN5PNjxlaJk2Aad48x$S&TOI%;=sB7W1WftQ=E=g4`z_pj zg9dG|^L@t`ZP=UiU;ZkUJYC0KDfu@uVU5V+7PRuwlYQ~<(RXr~*&o|D+##n^`c~=& zW(!s72ARn!=Rry2!oiiPu@*OWzbbj3xp=x+^w7RWM)Mh)I=i}gob}k(#C5gLo@HI^ z*Ch}c^)89<rGbxxcuxzgfg$M56rII5m7*M_f0pJ|@KmO1!yaFk;Y=fJ<f86cN}0WA z)F?XH@g+m>mAKbl)X7f+?HcyE8R!UkfSlWC>Invs2>@**GWxLs;f^VP5B9H31q$V} z^yugEpF_TaPfccz<Sz8)309wJu5QDqZJXj4qoZ6%wG9bt@F2M?)nHh(n`F0uVgxX< zW36ZCKxIBcv4&HxJcm?aPdx)X)tkm=@5(WIUM^Ee31@ESog|AWSr6F0qFQ?#kpez8 z=hW8_KK1jWWax{Nf!M5nhLVxAA(O(>LmX*Z2^0#$_KX2{J&zi(c&fuXKUHbY#GL85 zwE3L8EzckZnvBPvtUObHpOyqL^HllVHQ2lzAfB7l7E5u8xcHDwFM#a#XU`CcK$Pvp zjjNoW_BHtQw?jF42y+_0l>ixIyx%f{wX*)cLWIY(xm~b{e-OjIg%JXdG+5v*)W)YZ zbpI#_EvaBg?=JyO6pP>52|SvU=qi%fA{!7Hc~dY{7kSy+fIO`AVpvzc8tL9(M<pUP z2~9tEa|O-R9l8t%D&_yc{W<&6Ty>&Lf1zPRpws!_@cKUVNea2sFf5;gJ%5kQDu2W} zrC^Nj2arE``8~!(q~e-fFF6c;zEJ`8m*#PWa>0Vu$P7A}z)`M_%cXzL`e``jGDR;Z z@;ed2#4#|sjOA`jtsW_bC4b~pi^c#E(=o*AWhl1+tb)7vkkc97ubPN6ps2AcIhub4 zi!Cq~ew*Z);Tof~*w_rLy(eaMh+;ZWkVndsVzw;^e)SQWt&}IZ)Hl`fO)INKdEO>9 z>_ktJv<7Y^h(LdXpR37xn4?Z>cHk*DA3Y40PAa@unyK`ulljqm6#+9;^!56*FrtpT zI0p=D%k5xs&23GD_48mX6qy)H-Ck8Mho*}c+1(+cRMA<AJN#}(A^*C(b$qSRx?TOu zeGzb*Oz1Ra$-?=qinB1Bw2esgsxn|`CNEe!`pfvu;CoKEY|~fF!>+;NC|(gO(rx|K zps#uI@dGQ8XDIu?YdG!^ctE-5&?(dq9@G%POjg9NAsp*A+(wauQE`Doagj}JesFvj z2HVBvk1?S~W2p|!6aidJ<!n>?0Ie%L?iF9nuoG6Z#B`kW&?=}JZGB~XoqboWN7Tp& zN}tDd`OLQoBd}8%q*m|K%h$MGTzN624pjq9l^sD%O5v-KtCrB_llN{<kJrcc>iEIi z$JT_$;O7Lx;Snv>P-zLKoL9`O*z)#qjc@+*VEbq8^QDQA-(*OUzH`=r*4ZQyN=*o0 zD>|#}GyGj@GyLdDj_KR#rAHZgBC>O?l0%%7WjuOHzoX+D5!x+w849ml)Rx{GVln|K zuHwTNN$eNjLL$=GFG?v6!k`Y~xIrRw5)1|(HchNEMsGI4WHsWd==ao$%+`<2*3W*R zeQcps9A;+Z?(A}L$Sb!_LDM6(KJ^tH{M*u&$@&%SNUzEyv$x`MisA^St{gO}@9Tyb zKyYV`a8+PqIeP-JHydhq+GF?h3~db4_5bMBck&=|Fgn!!W{&QqG9D<qxRV=E(J)9J z#|`i*R{fsH4B4>$6s}zxhK^K`fJrB^kw12O7Mcl#b%!_Poq#vd={$dKTtwS9cr9c^ z;g6k8VRX!3vh4<SM3;N!1HR{>(tlBl7p=V#EHY*Is{yfs(_eKSSQGbkJL|oW`_<9Z zm0zq0okr)#(P->jzBEwFQ2Xlsp*Zg#_nsn3ciz!u?5kKwy@tK%EA6-UP?0G`1?E}T zbt^j~GeggNo7jQFw?j=qFViI>$2D)C&)de=KTR`Tx=no_?@wF$n(b?>9Z)0C@x*Z} zrkC2^?D5?YA}6q1G?M8Jv7pDm7;DZ{;fuwnQy7s1s)cvrYE2Rw_7#C~tdEWNQpi%) z6V-$?HbnB&SI)6jr0!-$=SJUd`~y5UDVZ)qRj9MzVqOo<R|;*PexSMOH7rwGwMj3V z#MUykhcT6u-k`#=Fk$i_ercm)qo64&&v)s_^ZnY|@rR#8J{|?AH2#pAzvsxNjyo<& z_(a2rt7&wq$mXVo>$UCbC>&I+onozHEVbY+wqyhu@W_?>R9E4)NlJav4A!mNrLfug z`7!nY&E_-i_tuxPa@+@YRLV`~G=|G3V26oUl0VKQD^D#PfYm7TrveKGtEWbiI-ZgO z{_KTV(P2N1^zzHw_!_K%4p_d~J%*D;xVpEhpp>zLr?Z7PyoQ67vBhd}aN#^O#lkbC zN^tRFs5z_nQNzGN^DqVws*s%%3j36MSQ7X3_Dj)sE<)u3*c0e6E%N@Idz{7yS4~PL z1SAjU{9eYKS+yAyg=f%$eNh%{z9Nkf%i?(_c<qm#PxHG*^SFKV>D%-3;F*7GCdk#H z$~U4TE|_^4J}IvEcUCQaxUrKHV-y-g7sBD=mfe!%wsC_7><#21_JsMHN`;ar_7Esn zI_tiF-V6CCjfy<7La5}*ohQT>Srm!OXmb4%E=Uk_+(6ZcDlI}a4DG81)D?r-u@Bf0 ztPB#&QGByN;jy>Q<OTSY{NgQIh*rd1<&0~ElFJx28v=CeApKcS)gdTppz<Wk23rd= zVGiC$+47Y)iw?$p{~&#)F%*OWuVj^e?cga1yJmAr*LRuS74dxoh0$E<!Cg?iMAm_8 zpyUYg?YV&$xxD?Zv@Lu75sM`}r5WV)4SX&Gd&7TbKW1tND6B@XTmDu{Erhv2Gpoav zC53c(555Nvx(5%w57<Um8;t7T)>jZ3x=jd>&B!pW#wx-%Qmoq3tJ+bbHBbDh#{n~d z31sE9vh&7~cdS?V@<g!I5;79-u|GCbcpJZ*@4uYi+i)7b+EsK@)Gn{r_4nCx-*tuF zH|g$kDOTX4<>p#!c;Cg(M+}>J3tmHUO~+;wg)eV(zC>a2tKu?enEYC#cJfe>C|~L6 zP^=-$Y3Pr%;XrUjH|j;Fsvf2S!1TA5%^$I~mYXiMa6UmqEP{R?!huTVCaIvZp1wj1 zY4ZJderUlW{u5QFo&@74EQo_~iQ)bSC>DNaMXIr(CT^{R3tVC<iDcF|lD43<ws8*P zcW5)ce<Yha7HD1rRbvB)XD+%WCF!;U;GT|JXT;FqK1z1l5UThW6n)o_fX9W->0w4H zn?o+0Zb>d%r>UOCCYZSf;M`fiX3gFR1V4z$K=b>!v+;Vc@%1XVBwIo!mFbLO1Whz{ zLPQ5**cv}<)Dx0g1^o6(pcw)jF$Mb&-8?}2@+P!`3rR5Aj+cci`$2V?+0s@gd%kw? z>)XTE5#7%BLSI+(H}VU36qfv5Xcg9Lf#cKp)0^XbAqAm^WvI1nCuMPedOF?llLR<w zwHzF3Yt*l@Fp2EQ$zOxUHN|LFhAcSs0^r=Qz}L2=nxBjPDlbR+{L^-OM>>eQ3n{cj zA!m4y&u|#bQ7J-@(R!h!wZsjmM=u@OT3z}hm#%ETKVIDW7I%Dp^bZ9d6+||#i7$=w zU@27$i8y<L9er4)ewkehtZ&bUe?c1$nJph?wxYbvG|%M$d?}%_NT0M9r>%D-EplS5 zvSMW}`Oa80NLIZ1H5VW+y;uvE%zV!knRi#N<eoILK1RQ8)zGhXX@2i*LJJ~ttVe_6 z!LG6W46tv1`yBmb*y_;k`rG2-&eiD&!FWZWXnLq!dX|y58%JV~PG^Zq528#rL!+`e z_MI8g*VsDBiaElyb@O9Q?cfWg4d+sjUnVzd)5byQmY#phj7(`SvgN`ch%KL^KW1hu zq_X;9sQBJ9+sFgjY|Hs>-2S$uy4mIRxqUKz5dFFbzm(}|erNS!?6k&fB>2nZglG6W zny|?YSb}($(F+2quKZWRC-~L-JDjCE91hDbA?4`;IRw&(6G(EUBlNcR$2i0p9c_+J zJ56d^^q-oqfy6?nKCyTMkeQE{d`4HzTTbirYo_fssTaR>o5n#(^REU&>L+6V%d#t+ zshx>4(8<)$_MbC*BP%#2MiNGne@<99I2f7#)64!}oxHpx3^Jy6=0FP)4p5g834@A< zgDDAvhP;uLsWFg*LD|I!_^%#GTSIfuAL^DSplex~Nf>^ZTAEt`NtoD~;TS~hZS0+t z9Sn_4NkEmF|GAB_tf8|t3DbYIge`&23Z_mX_O=f8cBXbf5>65ZaZ4MZsT1he#t>*K zYHDn6VhYF45BFbB@4kA`>kj}O{Ui^0TiXz-VE;7!f5#udDaW?d|B3k@DE~laW%&=@ ze?7=QUH=U*(?3G^zefxQLQL}C5i^LIf@=7gN;nyMkTCqeA!YgpU0?2lye*tS?!Q59 zTjM1G!2VBc|9a|wkT7%oZ*Z*th4&wD{~kXvcc6qa5F}U<24h1|aYQ!8e;p(s0srT{ z_>YkOyF@tuEfF~q77_-be}>2IU$^)-KqU#`|8dm*x#|BNHJ=3e9~g=R!`=1d!j+Jw z6iU}}P0;F47Y2k;8xQtlO_kQgFu$j|RtU4Rvo|N~$cco*=!F)U+B9Y)xkI3l3Bi$3 z1B`S@kEw7W0#f5HI9S>*!&U=1u2{N1ySKaqb*XGKJpI5t`mshosM>k>drvtOLZKjp zzE^3OeT&C&O}BA0x$p_z&&u4{d-&8v3_t@%Q}r9F99|;Gd35jDd2O;pm*=kBpT#vw zY>_QYVA@q5Ft6v&d2|fXhPz?DksZXqpo}9WqN3KMJVdIqF<}kdPYrKpPD!Ao*y>R> z2ZhN!qD=chaz3<DWY+o}xb+k*X3)+f=n#BS<F@|2QOHAFX1Srn!=n?A5e4odFm|w^ zmiqx&``MxM@!F=d=_|t3^@Z4_h)OCMS8^7+=d52h`h9&{SUJsEA68L;E&Em6vt#va zDN^EryaI^k#p0-rT`T&;=n947iO}%|tK);L^9=Zg=Qr;HL6p~K`_VIr{yhyjIAwCB zQY3{(W0{LvGe{4{yiRzNc2%cVNW{|K*izTLx@-`lxrORRhUeC^ZuN?5Rmd(XW?J$N zu5|shs+KiT@@aY_H`|@8-^(-GO{k;P$|rCtc>1F-qDwKh=q`gV?Ur*IM4+|<*H1NI z@5NMSygFmT)}@MfQZ;5i+l&TmihCQfF&wG0B;A_Up=Ud+OUf7@+{M%z<ODicFdkd` zeGUI9iW&mz53Vw6VUic0)XpBT+;?7=!i~mnR=7p+%XaCAC}RJ@eM5*fCb%gOM&^U! zGo^W-{BPBpbzKooxh2*>QgNlgtB7Ov#6k<EZ4m`Sm#_y};X*vQ<wq{%xO!O)_kxB^ znV6H)HRTPTe(HKEb(AH?Y-MKI{weK?xc7=&2GL6OWDWb0lWn$M`Z_}OByoo7&JXps z+=~8BHlA*QPw2^7FpK{|;r>yIe-w^`k^Mi)^N(5kr%RiJK~h9S*wES3goNqe22}Sy z*D(Lb@cuUiV`l!hg8dJ+@BgDkgFd>6dK`9#e7=KTjRz{D=FjhSZsB|@V<~Rz?YHxr z`{H(zDa=aEvsRij-THu|gRgo4oGC0*14OCkmaNF(4Ezva*PzeG(XD-I+>UQ%aQAJI zyO-pQ9+AH8$JW8eZRiz$+xW(CWI?yU=2u%9(etp!we01N6tdPPw&yc6<y9$u@wX%O zShpH}ULVh!%iCwHpOoCqjVN;mYgx1?#TH#f@ipD4+n<fSsnRZ{^_xrM8?WQUA16L7 z-k$(sK7GFvct55H*~S-`&(lm7I-cj|B<n|VU3Dz&4RtJL?=264LWYSqyvVLycw6tj zpm_(EIdv7G=W%nCp3flPj|U}&PwUT9w?7P60x35&M%%{p7{6u@{<<;Zy^-u{V1?6f z#eD8FL+G_xj1Tf`U%y5|<8F6T-d*0lskibh%K7%A9n}zhLY}vKpUo}mSfh5oAPeXT zljA*qS;nOA9U6}%S@lK840ic=ez}eQY#)#P^!&RQ>qKFeGg_tfzT?|7@bd$vdry%5 zokzQ>HUM!Tju&RAukC6Rf4b<It?fQJf!*Aug?2ga@uZiysOQ`G6hyOTJ<ARklc53^ zbho`wQFX)CTXQ;ze+;C~Nb>e@H%l`FoeftBQ=chb?^@Hs?z_<SA87v-SfdRo%VF<s zk1Po)yU2wY`HpN@s}(1@USPm$ILUJ@<kAqqr5X4w@>HGtIz3AdffoG($una#Afmfy z5vn)RGMy=)vH*YFn<F-kIjl)MIT7_9kszHX<?p*_oX)zEi=L?p+%~4QWz<6RxC}L* z;w){y!XpR{4&6r2!AalD)kS335rtQlY+r#r?$KbrUq-<IsSC5<M8wX+*&B(0B4GC{ zB_h@asd=2XuIH!Hp=mWJPtajfz4f?;MTZQbc@=-Wv#n;lW@`HFP<EfgLxa6vO^xNv zH39shZqT&g#2L}>t|&wVZfg>P+;(PYMCCv&2Jz7g{}yNGomF^zJKy#PeEr4(CatTq ze`}3={g;NXcJ$b)zp^1SC2cu>VF%;}6b9I#B84m)srke@Z~Dj5l@I!1Gmz{UE<VlC z5I*xWYL~QOh3et4zeOHu8Q{ACkcMcs-~x1-N_iZi@mgAU(?(@05H{^sAQvQffV1pX z&+fXWRHz-PQ^hd&EykCM6HHLu{-5(=xX!EI5p7hxzWr*fbhp;;W4;6*e%)et&;8oV zgWjEezuYyBK0mJSA9p@Lbi<D~3mbF&dyysTzRw>Eq1dtVSNa<Qx*s10<IT^`g1n!= z&zGH>clfuOwNdk0aX@x$SC|E*=ki_CwhQB>r5q3H`){tF9-$kSf<*)X@&V}p-3IZV zgy$<e=hI^-n6M*J)=v;?<A>F^$6wpi8_6)A_s_A&uBnpTCb#cvv2Is-9X_9@H)nMx z>J_61$cQp<-Y~O0#9`;UvL3$@1d8v;CWB_!0OE{KfzO#7g_wb)ev?U1FFzGJvWH-5 z*UZ*%28X`~pI|K5(RqqHrVm>n>X?{dFxXSeK%^AGj5#Rq${33RuvReHhc-mxZ8q^N z;<>2!CoyvS7O@|bpI9I#Fs8oLDW`pJ!NI^agZpe?8hV!jW1*uT5E6<RVuW)z<}WOD zuJo(_UdA};V}`-00%n8kIW^?Y6bm*Bou*Q>&n@<V7}r=Oo-F}+vb4Q|Lu>|joj%Df z!1>6j*n?{7+=Q5n$!kl3t$ufpI#jFX_<1X-Us(^M`vrY`Z-CLfH=SJWMk-2Ca7kBf z0i($Xa6-bDZ8-Ino*O;V8B#Y1A0cxP{FUptO+XR($gn;5-R31?2cyhZu&9Fi{KUb; z(&Zg<SHo{X&G&Z_fT*JhJrf8PL%@%dO%kxKgw(dfgYdckm4j?zRjjt8?|@=4LvT*| zu1&HH0(xb8%00&kN_*ri9j%*rq{@-<(UXWQaNUUW70#w^d+@g=-iMqb*hThrg*&Gq zq69VKET+zPw<2Fu3<HNj*}47ma;zjy_l;&wUXsGB#hk!p)m{`Q-kZNjx)P2LqP54| zJu|pIL=IYzNs13OfMamN1O87%g`PT&)zWq(d)UslQ5VJ-k8470Da!W3P-CVZoWtCo z8JveZRQ3KuND&NoIDTpdL>K$tHyaTRiWRF?Um>1@jRU-dX21z5aE1re!7JI9DqoR^ z8J&{S?|pN)Lb?h<Jo_GC4(D~W{v>f?p+o)3zRdFuYJ|A7fs{wbQIQjKdvPfJVc>Kj zI;<2-=lf#{W@(${$E8^_mi{P7D`P8TNIx%>IgQ(2`vI8TqDI&l*K@0%sbo`dpygB} zhQID7ud(=b5&cl$W9BPvTR;JX3vq$uafHN_->Yy*nkE*Yya;0%r#@n*4Bf5d;|>>9 z6!2rv%t0{Q2unN2w*Pgk>&6=@`#Cx!#-W69m=_stxvS@S1IL)1ay`rY&Qj|;O9_Nv zRi9aT4V%~c<cb8p++vb{-X=@Bj@mQ_bI4~$Ph)PrW36=)M%1N$L=)2x0}Et8@Aj&} zQb$~b#<UvyjD){67<@u3{;A7&HJk?H!axhtFHAfehbh(IJWiEBL?qWzcAo<%r#_^k z$V3`$s-7e%_(0>#Tt7a>mcq!vmGj=jl-@c4Ndx%W0&ZO{z~9A5qon<zbxsP0=j3&{ z#a%(5zx8q^F1kC4?!<u!(YD}rDGqRj8#(_w(<w!($X1rTS;TbnJU8c~S-6igZ@Oiv zgJ`5(0{juqCXeW)-bV*ScT*+X2=F@78Z=+)ACa=TF==e+*G!G>!ty)Lw55!h-uXCS zu9t$f6>3VC{%p<lt7%aIQbKKGJBV4fkpca%caF0bE68J#{n*`#jUgW;xqsF0?qGh- zRxO0bsBV`vt3Cxg>;@O?UK5)I`(9wRtbK|p_@R&4E($vb`l710pnF;3=pSN$i)o4$ z*PV8uU}aoCN*$HC;e3^gG6q-W?tn$6^gP|s`CR~t7zL~!5{i}S-}KatnZ7da$srT; zySI<k5q?57Kein9bce5uBTK*G1I0FXYD~rdY6_}rNJ&%P4QNYpJTle`PS>Jcjl*_d zucF%TBS*BS>mTDgp*-Ga((nIt76tm#uw;FqRH1G`Dz}r~3}4Ef>7(w(0symju~AVk zdg_%Ybb{;I{OV?5_x%&V^AU@-Lemgw&|D!y2r=q2^0(ZuCYzLb;dEg25Ri(`!gm#5 zA8Qqunr5l#!PWczj#7v-i9p|<vNy5guv;_ti660OU(A7X9BTA$FVm(Vcu{8^><%C! z!3%K##+HPbM7TL^?N&V4k#U)Aw->(S?`;e)2~Lx=s=34%uJ?NmQytnO<(YOUGNGaj z>iA6@I8h20!z4Q66J!<`A%3afmt%V;xjt$63G)LQ9!w(*IC`O|stq-Wjp_!uqxKkY zZUi&E?-vh<E((&fjt(>w_yN>n%svXr%@;c8t+|4r66MvGKMZd!9h|!BfNe9fSQ?gu zv}gQU%d63>w#RoIVbPU@UI)o}R^*f)fZ@_;PI1EMv@6A*PAx9yeH&<~uV%P3kSr1+ zV~7pZ;UZEqR3C2Kd!RYjY9Yfgw34yz1q$j)GBIi|i*8lsR|A%TkwDxiti-wb=zh}( zVTp~iFRYM>z?&dLNFZYfuZNBE7g?tEKW9A9VDk7YILy}(jBBr;!0zFmg^*k??InWJ zG#2UlZgI066r#W`X>JEvZQw-kXemv|c&nx_21aeO$`(Zho3aV}w-T0(g3q#5vyNe1 zL%R60T!8hlo{=dio*}Azv8oNd<m$#mJ5xZjs>Z!65o*|E9h2;^E^7kgUHTw%yECdi zV6M9~;Q@8;K!>YX69>GQoGCDIy3wezEPH|ltQ&9Z8?os;W(PcnwIo;s5uEc2gPP(} zTE`_cb$Lch$fUW_FDAVyHW>$m8k>OiFK$6LKTGD(CS*U@g3f0N+<b~iOy<J7|32Y% zc*>kx8FM;PB4l#mh+IOzze$I)2f=Kt;*bK6d#qg)yt8f?NcU6B;P|18#Px9pJ6!IQ z{Dp3EixGek!>aoW*h7r|d4`STP9dGTC4_~&bx-gJw)kI#gHKxCTa0k!w<e#l)wL}@ z6%Q5T&L)Q&bv##vCX*-k4rne&DA<QsP;;Vj_TagPt*#m8uz;=G8+nsu$J}a6qol^y zqOVf@-{7l>?#Q|Q&j|{ujndM#wG^pbpCdJLszlfI0?(F3yt9SRkC7ZbS<CAHFVUH9 zV!uk9ai+{($iKRNfgX5x8$=_P7^;3{do%U?5bM+LaNw%@A@!}nMQJzlYmf3QUGNxU z$AV=oA|Zyrc;T03O?vh4Sk%;)UIwP<w5LKD)pi0qN2cs$GJpkw$ggEGFbnNj&-_Ec zbol56CDRaCY-7zfg@p(U_Z*8^Kw<q?-mI+9x^ze=%0gnfq%X$pqX*$d9NVcBS}x6k zb-G8htGZV+Tl0@=XsQlHN$*Xf&VQ5M4Me!=Mx^dIZwQj!QOKMPh}Bk*M}Kh&js5&l zm7Qi%;O?<_B(#FO*wkqAbj&c)lw*<BoQ6C1yP9azh#!_B!x9gQgaE0rIy=fA7USzN z-XY;sVrD>(Ardk_+X<*8n>70IK-1s%+LRKpLN?((B;WSoE;A+dC<+axpfW%hXeiGN z6-}LJL$uo@u)X1D&M0Hh>F4%k@z(^|bU4|mS7EHf`Z^gNJvA;??JB`;hS8td9{ez! zd)b8iY4#BGoMMPqACp|+94eIH*TitMwtNE`3V+_7TD7|<+n;RnwjUY$TlggQFeAoO zeGvPDRx}i%A%SY{u`zp-LN|Inz_Ifx<0xcOKJfYZ#=Y-O1%;Jw1}nS}^Cc+GX8P&! z8ec}NjlUs$Xf?dcF>M(ZMfK7Q{5ZwIc`M?nl)F*f|L~Nda1t*8$m4jM81th0+bl~O zy9OGQK4CK|ZJ)<cqqR5><UzGFO~hZ~tl*@Ip#_hp*X<>S0o5BQYw8QY9b}XGis~8y zww;7I7OC}W5(24BP1s^{2+qNvPiPi<x|h#4w4OzU#LU^`Svtdk%A0nx>SqqA(pe>F zr-4wa)q;_N>`)1EQE0XB8m5-q!+^2=`1zclkYDO^aEk_RVU~bDxi~p5YV(x6)0s;E zqiclhrS&jrEBMR2C4?KC>$$@Bly^Od5yC+<9MzIo;zt&R`ytGl;|P!Mxp5`-sZ@Rw zi=><xQ~^4mPhvpu1tohj@pWLpf|d2bGI|tYF@G|8h^dq@Qro9v`fOVIQmO}@g8@ug z7HyLP9*Z)gzb<G>`co1-h+&;n42nmu@PI&WUyaFNjC$Y%CJ^9m=}wlk#>h@E+XDaE z5Mf-R;POizr#%Jz7joQ;o&Qw==;fG^yZp&tNxG3VTF&KT&otNSvC-;YiSy)b?Lket z;g=7-rNuw`!!2+!ZBhh)Oe;r4jhB1|pc0C0_3~&E9w}NtkNGN$WF8qTvKf+%1q^{i zxM%|LjuJEa2fY45CY|}BETlEtnACT`apxWfz?~>_FQ{i#qW>}LruED3r17P<HWrQK zXaVVoW4M|L_WVUjcCH02va_JX5+NeiDnq=a1>F+Q>#>H5Y7-WKz@37c4TMun2_eC| z*_0z$Awp;W5oQbl=8GT3zT!cC$o`7m@wLF6B7Z(DIBQv5w9qS7Ny3NRT>_4?6EQ*} zo;Zqx$Fu$oPEnzOw>^=`f`|(#q*JR`|7?YqpZ40GicU^H{o1}y$}RaU=_LMH#+=nH zT8PId${HcVa`+u?BcPsuAXC@yOU@xdcJ(kK{5ut2jkC-^9aGa8PJa&8_0p|<NBT@w zk&f9AgQkA-5pxAG(^Np60%l4vA;$0{PM2~eUgQBE1^UUG6VjdR7;LuzF4$C};t}@F z-tmfzK-INlUr(4>qF>yNPA2fefk-8)yq7ieTae6xLH2Ha#%Z1=UF{bIG`pr#ty(up z*eK&GHOV3Bm)^S63YFON({LN=1v0r}MsrX1Y9r)vLALzD<m*`Q_S4x>R>mN7VJ=0c zjz2DScncPUYNHAh70MlY#hy{NLedXm=66&Y32CRW!*E|+*pbtPWFSK$B3Ho}Q_vTk z`s$?WM$p~h{bF<56tlT=Eg0W3<kccQ#w<Ba>PJjM7Mymv<4ExSbb2TuvlK;s(|5^) z-~`zHx@x=<t;L7`+k%}aWFl2(!7bg{KY!KN@zGC%LfGQzopM6zz-T#fHZE$$o3>xS z;K@kU)^(1NPlal~7U4!|4GXhitZ{YB10K>SDtDt6lK!1-QvuUsc*NiFd=iapE#KaW z-)Qx*2k|=u0^UA|6LrlO5gbX3Wc#xsxm(xLH0B0#Gb3=fk;?Z=K@>m8icX{Z_sfxN zEW=z`iA2#x$vJk9DGy;8K@Vo*b?EvWu)D8O^G9scIq@>X`O7k$0UqaiYdJ`V!LNuN zOXk%gHAlkj+E`;0_n@R~W`}aYIU=lq5zQ6HblJFe?%a7x3BtNHk71iw^r@8b#_@H@ z*n4Rcb$%O71~#&L)FJBNO<G)3dNAH=*B0ZP6pYNls`&VOk$yEeY7cf=Rq1U43jR&S z#7U-#;M!Fq5u$He8{ySoxCbHlbmOxNis5_LBh(=5zhoYF2gR9whJp`I`Ba<z{TiDl zxAZXczGGI-@r-FaFo7${iNz>BQMJ?AQ&ZdBlC_8d{WwvTbIod0r)Ov<BW{i=kqYd4 z401Hl4viTdp`jpdDL$k2myg&3!o^n!X`-?Jav+rc3gcVKol=$+>vQ0kY_4;Wu7jc# zz=aUgnbfP^yz#s*|GQhHpxO+2_|gQcZp%K8^;2j%LA##?wFi<sYAT!Y{0EB~Zc_>F zuj>j!re8dU>LRSM$(|W6?zDj;dP|D2kWJ}cKIx)DOAoS70$z{cpAkP)itmH*TwJ4B z_ZN&NW$}IwF<g~7FlBiUD({fikHn7O2Rj%oBxS|v&Ob>KQxRrVJ!0c}4H9vPZBg>& z#Ms~K49PNS7G#sa`!yY?{b4}!*z|n=WrCQ!bvN077{&b~tBzMuu8H{EQ_GBb*nw07 zXb?^&9$zMOf*6I(VCwqC{Y}lx?tOAD$tRM9ze{~Csb-+BVDIsOF}(o2Lp?Fh4bC|s zXnok7VpX80&5AZ~mvlDiyP6|&1m(;M0_(K8I1_Q519Tw<(6$OTGeL8R`|DZ|rmNor zbT>p(QHt?HkRe99tvmZAf>)iKuESXofvP8Nr%uW9c+}6>Av3%=CIJQ|us*~&kCfY- zK=0pJ6S`p-zo*?JTH#^wABixYDwgSr&|Kl-+W|kimbq~rJ>vz=1Np-{tI|#9I?(g9 zWr&evmumQcnc3X3Ea3GS%LI0E@yb6OqBu910=<h<wyOqQS^1^IZ_W%My`6thvhaZ} z66225r9!K34plt`hcr{?W#fZyC2zu7Ar6S^64=owJmoY?Kzl)}HU0fwNrCGY)0uqz zH^XFo__@U@M!c>yGd-gWiXk;ObKmbzhU6uid-XAWM8x&6mP9hV|6cxUEvl<3c4X%t z-Z{qf_ee$c@qO3sh_*$g5|0d6JdVqlU!cjY2FMJ;;5WN6BliuFXIp59zzpgxZ~TUe zZQnCR)Wh5Cr($!z5bf_MqMc>_%f%8bLoKaP4+-8q`n#qhd1I!}buVVC>P3R{Pvzq{ z^La*^Ed93Q8qa|RFXS7+qgp@)_MQK&P!_$mlx&!XnhDFnrJ8O(l{1HX_u-OWk@?=? zGy6-!X-&%9D3nBNC%L;bvpDicIbI6Yb46yM{QU3!vzT7Wi1#V{W!XfyiMeeYs~J5+ zZPylb$no65B-+9}xL9(8=ysv_p}?BF<vtvWBn)f6@56@ras~5;ZkCK41rE-Eec6IL zq`!adj6_hreWy$8YLykfgdmj;Y%4eta?l)T)vY=t(O_R2ag@=(Q6AvOiEmQGPJPNm zogr}%J~rm9w1O@Bs8*i9V{dD*v?go}s%TWm!moF73){y)kh=&S7c-T(;<r(8jY4Tm z(t<wGAl(CfkE#Ne{p&@(;TTig#~fgUV1h5NoLQGa)bbe*>u@RD)CyALWNgzM@Vl4P zHz?vc8xeSQ+awug@++VAiYJq28~>iee_%Abb8nSGLh^T%DHK(5s*P;hf82+z7OY1P zjGgZk*4azD!#R9Z<e=(Xw!M~i9TQYJ?W`vBaspKY)<+A3_C6SueQ{p%M^p^X>@u=r z<kVYWej=L-g(WNrKjL^_pps1YIl$J03p;)u@VLJ>EU1CJ>d|ttZY6{rGSwIt?nhgw zVxoKZP#u!cET$P|X3I5Ly-6I{zVDdNMM2bOgj3iHmoIrUgljAF)S+tO#d$PSEnK<{ zCj(;c+@O4bpAyqWyyE1)>lHcGvZn`V7V(z`@F)?hMY2K5xS3xT8U8s+0Um)d5;`Mw zD5lFPp1-@h6uEZ=orJf(mzZ9PxL7;4_5mv~vc;?q*F(!0T(1l#d|o6p4Y3{I=hzT* z(uV0|njue%P>&hQG^&sxp7QPf=4{&G=j*C2`}>xybI{HPqN0fF4p!@~-y<Sa7&q}1 z7jMJ;DqZ<~IYaHK^SA5f8;_|0ak6K=LAp|$3&)R}gA|;0B46bKPQsOptwky%Vsiz& zLELzMZ}P>x@$Bupto-%%9vARVc%2dyFbi+20)rc&k`{fw$aq?O<G-}xh|s|*N_|DJ z_Li?WsofrW2z+1;q^a>mHZ@|NKGk=1>^$DFWlRzJqAAQF`<-*-iIIT^Mwv#k%!6vv z$Uvd&-A+KyvjJ>C?vxvttRvmCUM6>}6xIR-<c@B{Og{76Ne$-4)v{N-u%Y{In&=ZH zD*c}&hgLHsL=81stZo;$_|Yia`I|ag*|*fiv7EC?M;~AG38rf4QoZ$@GjEm|d=fjR zPBWB_#GBh%&FW&(t|QW>cY}~uouP3dr`D58um<@0f!~VJTZRmu8p}A$3$4L?sD&&t zyEe6@eJ<x!1E<f6N?q$?7f|66%Lg>hOuyNFJb39Umbg>IwG2A#*B+j*<;Nf+)}?=t zC}%URjbx(3d85zOrT?UiZ&{0XKU${nTAXRz9+gqfv+fnTDG=PPz8L+MEzDje?IBO0 zc`{7oY!5thV~*YJ$!)><AdTmITrlCLU`aP@OqwV^r8f8$XA5F4qD_>X&6!vo*0>+f z_3E{x<4|;ak(z#o7j!(`Y-N8ZjKD6pB*H+m&@Q)J^ng>yvdVxS-j5{1wayquA^tYy zs*Qs*RM}>zAX$Go$m^H*PM(k7yFuzWp=Ah~DJ$^WPI+b<Zc4~p@uCtu4y~5{)HY4C zcUQ%d!@&Y~H(?*JPA`rcwVI$XC3PMW=TclkT0Wm0S~yAs?A9(CjEimLcu%<6OSCYr zT35iIlX(o?K}fBZFDZf<XeiW<Wi1bHK}uw9tvcX-pGtCdLX*%BmtP%qi9lwFyS&)v z_TQANR<=kdILv)6rC_eWkid?}?#lw^9UF(SMv=w0D(eie5b{B+Q*YAZ(YHtBI5e*A zWh1S|vc>F((bxpb9%<w?nOY=e?_SV=ia3QVQD0&+ZU5lA<{37@k8t<lQP{%~Hjq{Y zF)kVU`_mDAw|D1e)0vvTaAi|=wr$BesS3%gkx|?g_Oajz?kT>n&Xfo3$QK~&r2%6= z)wWcCr6)L@2Q$pt7un|}GAm~FCWyZIG295&8FH-zgJlpyhG8K_*4aigbk*lfbh41t z5?lu23B?9e#sUzJ@ovmhQIo`P+bN>Sx%|)Ir8MZ+RA&Z<A?eIq&#e2#Uo{9**E8^W zfP+R1`tn1$L>Q9I5~1YVVA>cJ-z72Js@jm05n7{bXlsAwFZP(f2}6t7du+li%50>3 zFL7@KF}|?;xncO4q$yj5z>tloTuP>)AlnOoi+p{^AA1s}>pdL}cxt|m;vcxd$~mkn z!zVbn0d|45&+wZS=E7x(>~}V5`$w9u<x*y;Sn?{_2G)V)#~3vWa+s^?k4v|;JUK&k zovC70+`}EgiSTQX*zu~~bjVwQi%0&rWb<y7%5R#0?GLT6!`NZ#uiq(6J#jb7B0Rix zUd0DZlG5Pj@)g~LX!`f0TPuZU*jUt`b^6v?f?0?~Pl$A&O=~z1IBqsP(=Sks7wTY5 zy}mK9zH<bq-z{}v1;}dixU%uMzH6QdI~M5z71l(0-jBK`<mYw$tv~-(hygfvL~61H zzkNJH?8`ZHAJZ!uzH-5A)@%Z(Yi%u@dng5sY)-M!HfLEv{6{7+;s^b-aU)wQSH<_S zuxwbDI*iWMQce>PIr!!p<aO?1V#PjkU1ZK67_g`ZVY8-$K<PH*MWl?X?mB!)QHK7Q zzhr$Wx!UrX+fb*gj3I9JCHk6VrYiDn<`;95!ez}<On;UP<k3roA}>47DfK;18Szdd z&!JetVKvd%^l{rr=jufLE(Lc)(jTX#sn6;kv7jwK;FN*1Nw%y?SB^J?Ju!jm;sj@O zf-8PfSgSAS($nih&*-Liy<!|`@ZHQByd-p;S&AAK!=6?wr|%owZW?f>PE0Z$BJn#V zHTaIaEtP1+hg6B}Q^qfekg?C`J-~yT@W7FyK)PjilHDM8?X72q6rMn-l0AM6JCPkn zVdojRCt8M{Er@j~#`*p?_dDUaI$%3cQBxQ0eCI2(VYq>9(b<#SwW*#Nq~-2{I;A^B zVkVt#m$Y2Q$1S0qwlSu_kJ?B)uUlsm5yJLWw_91BW+rh4?%$2k?Puc+KkR*qn&Vl0 zZrP3FOv~cD*ndB))mB3m1{#w+1kB%028!=mUVs=sscnu8LNq($TgQ$Ko}0|gEHX+( zm093$dX&D_ePiTvjT9rKtbnoW)}N)tj{BYhjIx!5^0m&R!IL%aojaI>)=in|?+t_1 zR=YNBR)wlJlSi!XQkqxOHFa`-Qc&rKguvwB%suOqX;gp;IppO9h}DMoU|4CHO|=-F zAWa-uQKN0a>irp?sM>(J6FQ_Y7U&p#tz@UYd*vlY#7{w;CL|6?yB>r`Y}2<CJ4!^_ zc>Uw6n8SD?;LA2#$OF8Bh}F6<pmbM~f|jLpuMjKb@yClJB1@xwAoJ605F+W}b5#no zJtKMcn1zv{Mu@K3&41W9hCtT(V-l@+&rQ-5QRI_v&PohHTx_a`o>4&)DePQp$0X?- z{JA_zccksoD!I^8tY{c@ExGOM4T`KZPIrsM0rol96eA~<>KGY$Gu|rpq!bh<V&p>w z&0y`aOT4viOZTiYKV8@VDQA5;u74<JCFF-Vvr$NPwf84zF~wa0!`0_JgOE0`!!3ZQ ze!O=NRbSZjC|8@aP28lWHcn18)!_0hPKaDL@h<Q_j*Jf<&LdYfCw$zLjM@(L8(5ak zch^;mZ%aD)F!p_HMViy;dO4q6fvJT%-<XAE*E-byH9gD$aaU27g*Fg!U9gOT27}>n zldNV?1mMp%C~1%*Aqojb{~023By{Z<^LV~-3*Bp)d=;8^CwJ|EW?3ljCvpYx8xE3z z8*CI9Ff8_4zlxf8{B68%&#Pza^c%WM^zU$4r<kX0%3T~G-Y2}9zl4IYfr`o<p=e|U z%13c!3=7YFpeh43VNpE(Egb`tkk`pH(ejX)N`^A!=p8wGd@7TB9W-zV&looG^a6D? zzb~CRe}4Q_<jbkVS9WitA_{$PCaUmmJ&sJOvo9EuC?|5WiRpvaY@;S}Zqe_WJcTCH zpVAA1>$g=?7jg?<P7p<h#4OxDjc$Xp(cHD=6?fOdtTc!%Gh8!MM7QhYm!y>_E287H z)DUZNDW47OmsgqQ(dcaX79`>qujY7PCciwZvGDRfT1Jo-%3Pr*6{zU8>Q$M5eE7(v z=8_P;X))`wev^`9#^LoTo0@hI-q0jUZnuXn{&*CWEwKb`^C#TWes(EyZ8Ni+|9&y^ zlo|eEg!t#<pYhd%y(bP_Qvc2|<Y<h#Z$W|awvcNtV2cL`lZvirL)cyVja5fT=L@IB z$T5@R$|HL>s*_a5rVB%l3B!L!A`njq$>Ja>??V(d-4$6@XN;ZUT`=dT!pi2}+jP80 zzIgbCzQ2noB^xpv$pv3vnki0B&Px{8%miDgy~hp{1ZjkDzbU4~tI(7xr-rB3ELLb2 z4|k}qf%1w@cBfe0?(RCIy#1+@c7AnLM#x0)X_7CdZT~OE-T_FG=j;2O*)evoW9%3^ zwzXs1wr$(CZQHidWA4~9JGO8C{<zP5;>H{CMs##_Wmi^KWp!ts%>115Rdmk&4Q|1x z6UgZOa;y8puaB+3=P+O&`nQ((u(k!_@B3U|on)3)hxd1TWWT(<c$vJV7+i3T)38%; zpyT0Vc%fYp%2ds(nCm$>pP4PKb3iMTN>Qu820`2~t})%wESY#}=uS=az_?^ToK%#l z>o@iB-6x3{8Tt)2mc-F&%{QTk9e&EUp{^U9^EBf9B6xfuYLatsUkx_mh$E3*A-V#_ zqwA(K;e&K$Q?L3~pzr7nf0D{y>x1Hn^bOkEb&@9;AwxgcNyr;Jqorq|kKOtzfdzIC z7#3C5KrAXwc=Y4I9rE<nBUT%<y_$%IdCW|72(-m#(PIGu%#>&{$}_lFLVM*OtLQyI z$9p}Jb*reI$f_i>Bq|aPYsmE>hkcoA@d9ahHPa<fjwv3qY5YkqiaC=)Sj_2`pDqV| zK@!gC_jn3Z71|d7!o&KhD{vK(tV3rNj35W)r^ZDq6tV)zMg`fCDk`bs<v3y0^@wbw zyIJwI<4|nEqu`rN`B5$HC9L3R*1_4-t7I90QOd7hlSFl|cbGex;bisdb2oRr7;FxU zii~t5!xFKR&+ZA+3_myPoW9Xzekpb6%a?{-p>wdgNOgK7jeYl~sbmZHnwg3Oh#_Z| zbB%!8F9rXsTee(1hx3Dp-i;v2{c*ck(S!mXeur&vYZ!0;5QjuoaRLrPS|8Jea$UCb z1%-Vedj9_>(*8w}{ez!zurdF?L@xu!f0(rYfk5T{MeNGyJN!f3GBEucJyS4tv~_kc zG<GClWcYWApskG)@CabSKg1dkhy})RH`f=ib%WLXCxMNF5$M9qsQv$8NB@=cp9}b> zEI9{TLq%gJ0w4<~CnN#{P>tPy-0;6}VIVL1udU#Jw&DcZ|BIacC%d2{1MI&kVP0MW zy8q!;|M^bTk%55WAGlQkSU8Y91zsq@Kj|V&wDbfF|7Gczfjf-<g70AIL>PcO|Djk# zSb(3I{(BVTKUw~L$N!w`zku^UrxUkkC1Co`y^C8j!7~1*s>H3C2^jxFFpFEW5HSAx z#K7$TgVFvg-+!y(KO{BNe=7F>3Z-Ub0n)_(W~pHrSeXA;{rtCV;PY{?v;BvYCUA0a z1~R?>XUZ5@1^>fS|1YNSZ#RUJ)>;QJn$=CL{Y=|amUjnN02jC`v<=MV<t0KeI`I9n zpT^BE2n48{F$582NuGWArmQTGD5pHz{dRx3707W^rc_~4t<k*9wEg0<o#A*hNk@KI zl$Dg|pdd3L&O!0Nc6z&f?tF~TKTqx*0L|%-R46pv`-jqk9LlN&w_HCWJQ5^56{%jC zENc!IbtY{wDXg(<PqHs?G&|c}d><ZXE=B?PeBK`iha0U|d_J%DcjNus6uxT>bskjB zdBcBbUIJ?H6Tbxr%*RX3#|uq_3#|uBq4SrY2$!BsR-ld9qT2T}>MZjLuaOF_l+IGO z&GS~w(A1L>G*T7V5!CN_xO})7uV`6sD{LkMHLooL7dGxRW1n(*KTj7*b$wp2v}Gs6 zUs99bVjm+WHmGq6o{cWVULIgL7Q@*WVcMK!Zm_a{G?yeeqs_v|)km}O9}yPN1s^(< zC?OoONh>)6mUPqu)}5@IA6_{ZQ_d}~aTReOQy7|Sd`}g&4V(c2%1_+PHuyR?IlYY@ zrdMl~uH>ShMepOrJ`XBR62%R82^qSvVe%<ymqZMO)S#_og+<jtEwLTeQHEybUN_0N zlT0*&=hehG(SnxBJaEC*Ol#lw$CX$1Cu)Y1;D=HWMpATwkFO;}E~5XGj2AqnN7TG@ zd>)qMH@ECQJjQ=_3tH!Hg47173#FA?=!6Emm9${kUU2(7zAn`6`4)VUAI8$f=5~oq zS*EaDlcTT_8Yq*Q#E}{)m!L5xE#KGO%t%n2f+gGZ^ythrrm3;)Wv`o{tCjpsqtwS6 zv_+aJMwu$X8mq$Vt3n<tL+>j)8_hc#Ejj{J;f+;b4He+^mH(Wo#Gy38Bs9k=J<8Dk zs~Zt!3p;rwS&2n+o>q;o<-ZH^94h*ZzSCRcn&0DF?rCLcb2m51b|sU>N8xLwzUH%o zr1qQq+#y6YHcs&lJ{BRdC_2a5$!f+Jq}JGpsnmU)yjyUCNp^}lM4Re5tAeh&fv!-4 zAXO$PPrjf{xFkv2Kfp4Ws6b(<5kcFscahWh30rtYLcKI0vgKtD9qT>h23JFW>Wu$b z>HS3cLyZMYSh|qqPs0U~rQqFb0}fe*Cx*WlCPW`Hi;i|XINs^m#PvXTH-@e37$3!# z(VFLHMUCUsw{$_`&}W^otoH)2-jdjGQ4u#2P4{CfnY$!Whe}8y#YK_`MK18gOlc~m zzF+)*7$y6QwT8Pzu;j~kl&B3;>CCff^wVigQiv=PhDvc_%HKJ6y*Tv;L~aaB#(x}@ zV^lL1U)&yKM<^pDYxG2OeokZeL`Q^GL@sm8TCzJ6=?U%3A|TU8#kIt+-P6EMolRBY z)ShI3Rf0_*f`M|M=Jqr@m|hK^Kan3_7QLI`-2HX-l5b6u$1JA~kee4*;$tT$K2@Nb zuqs`SSrb|e&m5iqMM!bjy~-RwO5K>WS+Uf0mZUX7r(tq^8pK59xiHqIAjvg1-ri8= z=3w~>eF7PV1*y#p1N1UVZ4OIfKny+J3+Wf=0R4l_I~yVc%1?SB<T4kd^e193;DbXQ zJhh{@O}rq$F}y`LQAe(_#Fp4AGD?ua2~=bWgi_en<wB1mRH?Eq<2t~9VA*{3J)|FP zRnX7Z?r3?xv{q(jU!23l{3JWRRkBE#zoG1{f`eoP{m~`LI@x(9uma4GIs+8d0S}(i zCxNxyV(Vc?*68o8Ta(}^8n?NTmRb^z(D!f6S_#@jR0ED1jpsGVO<QGWSyflrHCLJD zogE;u6C0y+&a%!?)UDc@W_W3VE0vsNu-LWaDUaYqtQwH0zgCA}PhJ<sI}>#32!8;B zFzJn!9MG5~<Q-_#94<h=TfVVPa-ip~M3%_k=c@=foCjABHFPPg^S|^T7aIBMVVynS zez0BfW@XL#Ufn)^y!FquSKZDm?oDDDXRo1ct|jxhFL__6l9{Ho<aB#QA2;%Sy-)TX zga{fn=h+w4*#~K>8H;bA&gLvI!)Q#HBFPY5@mC%YTP0Kll_}g+p)rXY$XypDy6($~ zQmP5J3n`<r#y05y-J^0OsLpR9YUu4HmFeM9XAyrae@C5#*;%^9n1%bn42?z)jm8$8 z&?qhEB)0t92=fhXJw(-=gjwjo`EW6oSu6cKbG41J9dwhh(PWmmMPrRNM|vsoz2Ikf z8_P8ye~;KO6Ku6`XuFhuBtJ}+FGU4ey!MRBJlhOe<7bX<aoY!(8E;Sr!RLxei*7H6 zJ=a&r;q-Z{KXUuG3^7U$@bV_A;4GZtF0SZ=6%I6#!VJ1h6OcGELp003$!N(7>kh_e z%YmDL_`cWo&IW6K0g4%E?p4Ba>dH@6D?=_K7iqwV5qb8g?O9yi3AIJiIVrk{U513E zA|TCGRHYrQHJTb$_Ofqd62}k0LtW@Ron|_us*LuPS3y%f7|d@ntG7`>Kf{ZNA)av| z0FD!7Jo-!5$IIUj=is<(F$s-+c=t2PEc2?*acB?nh;En8lGII+?Iox>NUG`B>MT4T z?lzA%hdOR>X%|k?!=^nSK%YV>h;bl=r7ehY!k&u?jDgJHQ^K<aguv`Hm!WnCNgGCi zO$0i65S)NaLmk#$c5yU%;Vf0kPV`E)dmsCkWlf*ny#`_Y952@Xb$|rIog4twoIp+w zD7u82ay$XIKZSHW557JLg~b#dTP0J2Kr2mph?=p!m$B*~XRRb`kOGrfung%WZ@J4T zwIV+MeLa52Q+C+hKT_to6rNc;JKi2BoXQvpEoNzfuVdcLc!;k~QhOmzY9!QGF4I?T zA0m1M<wKnJ(G<iJUqsv<1y_t4paI&ScQ8@)cOd_0D*tS#a(Ae77dlyYAzgDbT@|Ij zcz34qaJm+QzyO!h6sPn&-4I#p6nh&tY4fKltI{7<u>RA%?Cxn%fasC!@v<({!?dyL z9T93X!H;@9`TK-`O+i)*sFpI1rG9#$!Z7ebumjCt?Ffo?l{FrygVd(!MHiWFkB6h6 z6yLzxzr$4e=8`)Xe}zL8F@E~AnfXc#93`b4QpiUt5*_h{rDdZxk|fm;COhQCITkD5 z7_7h?ut1|QhW}uVL}!SMZvl4T({e83zlYeAoMr-8JBC=lUHE-()F9X)xzYl5TB5Z( zv;`O^xavCdtSWPixTuOmM7E+M)%-&wqC-_;edX>EfRm~+(idSIN)B=rWmQ6JQ*%Lb zG)Ge!993`6pm3CdKTFdS)0^vq3+tPMBZFXg`2$o(w9tyc_~=~Z)|R3yOr8*iT;|?y zaIZ54uQ~}O{AV%s=${Z<<JRNK#L<CNs>he#-l?8)5TvX}P@OC|$?GXPQ6kk9Bss&e z$bSK&AAmOtBoXSTi`Pa;I7_IFWJ0J4qkmU&npS<HMrDqQlB(_Xb~SMR+!`LINeR<= z#Wzs-qOrN__S;oQU=i4f;}40GPl;2{iIYx`(@u{Q27S=Irvk^dD1E*UC#f<|;%0XW z6rM6HzDPLunUJhK-^}gk)D8CxKajTc?I246ElYxZ5zNd;8?~y?@vJtMb~74alQeHf zez+8Jfy6NJqptF!&(=$2AVy_QU2R2qX<%)3ZhByPa%N+6n6HnR86FGk0v`n*GZix{ z?MFrydP)LvJS+=}g_{N0JBALtj2J?g<ZX1!Q%u++bhO=fc>DW@#)o@GNB)xy4s(rF z3akySMaM3^lWHq`QxpwkT~4)?Ugj=4+l&52O)k38l&g^ACT)5c;vcPK==JQiSY>-E z#Xn~YVyxQqlBElGqzSJmY_Je{@tN^)eeBPbJl2vgI%8$WUf$5{IV``iCSL_j&)Ait z%t^}IS5B63Zgh^2l&Y$%I6r-ldU)7wTbxZ)l)J)DFt@=VtGyifWh6FK0j(YArycxz z>^Z9dr`9K|$hsiGF@uswaF9oHyH;zuI!=37a)g_xr3{B?!mkY$55g2v8d^OWhJ8^s zn=MFZu<DFRKWw%^SgoU->-o7YH0V`m_}gH-dHM~<3q>%g`eE|RQYuhY6By)=5?ixE zL!(k-qe63|LSvmWQ<Ew~V*)#4N82n>TPImXD<w%6H8UCRgNU7Qn4EHkn0cO*0MjF` zoob+OS!Z}iLjj<&zACxF0-F9uVU=@}sh*|&5os)U%MCMOjmQE?h`>ag(uAh$Cu~OT z=|<7x@bN<B3q`xptFe=So8}w~PWHx<i}4wFPOmv~Jj78lprI0pQLOUR!^UcJc-zPG z$=LbI>$gK;z2X3bu_>hv%{Maj)o=NEa~#61$c?{@SKzH{wY0Caytf66EF^6<k!mBj zKvU5{Q;*DbysyuU8fJ(*pm{ulqC-n9f9JUhp%P$crzfgt!QQ8^v99P`nq8cq_qbT^ zj6_$W?5~uTikOtx{>f^$zdMRvTf=E&lb5xbiQ3A-%E`gbNW<*1FMN7^i1GJ7%F%hY zQa4oOm$o)B7gq2gv|?pvK*Pd{ghp_^zG-#1qi=APW2)~cGo>@dGC)ztQQ?_q?c(il z!XL}k>c)@Xpe@62lNd`9_@gnx3RSmqZ?5D}ZKRAyl<1sVC*fCyYY_D6e^}O)6E%N{ z;`5m^wc%$@Xb`8e_<P4XEF-MBu3K@lQ*EhHj<1ce&92wmYLc=jLTM@@N;Wr0@p}u{ zb)vW$EOu}u0po*L<o!p4vp1}<_1>vbmTdUi?5}n)>`rVhLfkms9R2tmHK4`^Fm#&a z8TL6RCl^#LLQFL1mZ4+g{+(kY+}&I3>KXMB40E5-qVMrh9^G%c6C6C7^K-Dze5gZ} zWfBZjQPYDy=zyp&dl>lUhg%j$mpSIvl)2Uv24M{dbmNt1?cCnZhm)7WW80b;tF_>i z`3WHGk9}tWUgojr2k@sfNfDvkAE`m9y3oc<+jKmgP*7OY^Dqa9^B!M~c15wMVQcVS z3@1x-PxE@cv-fxb?R-#{@TW72w=ofu%{JPGIR5EaNpgk3(wi<7`tmrK#563@RBX^n zqVhO~R8EB%LUlmxDr46O<$D4z<LErqA`AU5APh@JOjX$Q%@5odGQ5YE1t!Z^#MK5t z+jKNWbyVK@vhvI4lw#JqCMH%;wc_gJn`@GyJj4+>Bh4fqD>CvA{m^?TK%uEMm8o8_ zUX0L?Qj3<Rj_>u~t*uI6pn~L<m<a3;FFGRuLL0|^ZsnhAWtW?F{hXob$xAmW&Qgvs zwzOvHF;nL-;^tm;clY2$z4G!acSI%eb~jO@3r7!c_)sP`%1^Yv?^l4MFJezSo7y^a z=3=0$t9(I$BTQxnektcXSGkSZXH2y%YrP*KVJRnJ;=4M6b4Fx~fk=J_ukdnwGi&^V z%-j-Tx-w~WYAE^W$dL`+4sP|vYJbk#0PAb--L^nXF>xM?uP?QqY9Fz`y2anKfEdUl zK3XnB?pRl$s6^ko=1z<@Z+GC(F%u*<t>MU@lmH*x*XXmwQ;H=r;#i$fk=3}U%MiJ& zJ3{n(tqp2%7kR8tn*DEgn*}9_LuVte&FHmgl$1|eQtlFvC}}}Sz8Xkb-EM0folYkR zmb)%2p5#s5iX3rbKMhcr{hlgsyx^q3DrKef=9lxv4Nh8$rU*RL2Ageh!n6Zx4icxZ zHPkRnqc8~g=ZApbch$t_Rzzpo`xy<EP&LvsQ#DhvG+xp7D<?5OHLq`?A#}Bn?o;!} z!zaPOkRSq&LDvvvr-RH^I0zOeHWdNuGW^?G;{)W(o!(xD{hQsXu{vEB*xK?9v3#Tk zGL#0OtCj9cl^)CDYe({9?m3DNOHWkcG~JN9b@8S)qsikaS)b^}_QruZx~;8Wkjt*F zIo&otH6X4!5V^CAEl5d1O~Qa<lRAF*=&DFXp_j$i!Jz_L{trEDVZ<L;Y_JePMe&u% zouR5iL84|=DOd?1swf`lbMD%-&bBlFt<E&a6Lwyt-DSl4SXlryKz8a2J;c|UrFUPk z%m>(9en|K#TjB^!Bb|kimU@PWay^4<5dCqcwm$f7Eyaz(DZ1Q(-`mz58DWhqpS;*G zGc;gQAYxBh1~Zk4szs%~D7TQKyy*ZOphj$%PJKjFWYp#&J+%HxdE8Zn6hZ9$eEbLv zghf2i3{pv)bce<qI)`krt9X$gXKx=>q1$#32&*z$fi;r5;ibA5DT|<Yva^4@gA0?C znxd+*vcXEvKEdzaJwcWntrZj^hiQST>JhhVm-I?h)Jn9~@U*|1jm~UbkX-GaX6GgU zI=Az?VKQukiVXAyU%(ve{j;sR_6JslkBo|qfOedQWMFv%Ff^w=ys9<HTUX}pb$FRL z9SXqb_C!&f6(`*H$^h(iiR49?RwsBi6BXQMB_LvQ8>dQzSxj~wO@By`9kI!Vy`HC@ zt|f=Yy7Z3Ed%fPDp8d`C4z4}bY=W=OSG+-M6<dKZGf@#?q-YmC(8kkkuf0m1uEvn6 z-E$PdCd|7qI0^u&tVT&qg2MzOq3tPxSb4bH?|(i%j8nw+p`s%T5_y;#@Or~6*ky)E zUXEK|idXNx@Kl2tDn>dkL^|$&MMb}G7Ja-{x7X%+wY-8A7Dj7t2B)4Sz(ouyGRDQ% z$N2E4*yLoGq)?W~=p$9ua;x=zp@MI7)dTIG4=5i&JZHW*58+lyVIj0g4Upu5R3g3H zrEC=*gOi+Q=i@mTCEv~f$WK<t=jHQrq>c>mq<kWkeIb@u3kxh&9h&Wk$^Ajc)+ldz z44rYbu5wg4426rXF7Lv9g-A)dW^FuNoDD=m24@R<pbYin(0i7y5PD%8@3WU3*T5@( z%bfFA6GHEO%wmQA<O<pE>-<lb_2U>^>^ha}mqIpEPDm(*0K{EuIWI=&p5)Nhp6Kj| z_+U#hiP5pwy$c>-K&#$F>A_>vUcy3Zf$?gIt>$lI$BVFqaZhnAlAwu+YA(_k@%7Hf z2Lcz{WqA$LWlcu-u+QjCp=^(I9Ho0y`pLzY+LFZQ9&;p3J+>t+rWz|;s(rr_q_zSD zr`N_{cF}QxmLKHp7Khuz$aH2}(C)6BlY_m_#U+{E=}{`lFGJz6g5N}XcKyGG2urs3 zDuH${SFKN(r`rz$O({1|dIVu>*zBz}fa&Ke+BSIC9TfNbiuTprc^cwp24hB`3@n(U zxVE8&yt$A?iKtr>4`birn)2WxuqR{g{alYO_B=<-8Afw;G=IxQXEup*RQ#<p-jOs& z9BYFb+*KN-?O<?dC*+e`Y`c2_)k#kOp`Dhi<_(-q^Q&i0-n?7ac~OGhVtj<a-U8ME z4|Dbppw*e&^hb{02o^^)VWM3ei*wDx<5_4pz_ZrE9L#|X1EX&K^ZU#BQ%hdSfeNgc zIuhe}JS;ZlIw?Vd#<xaKK?jdJoY0UG8v}-yyRW2|{&ZpiI9{ivoc#QZmdW;Hmbr8e z9|jm&|NImi9gQOu$VWVL<&_L5j8GF>;R9!AZ17w(RzvxF@bXk=%3x9ceWRpS*XanV zUc5AWbpXr9%~m8z&gW|%oz<0|P1qce&N_U}$hSpKCQ_Iw7n*Ji*(=zG*pEAn4y8yA zT;FmJcqJ??(k_Ponf>n74-eERZ@GWmYU2o$fWae%-fSh3+&G_XT?PuhuMJ1_9VfzN zR&bO)AKwQZXou7Mth)Lcnqt<UJDAbI!C3#Y;lOorz~`iC_w25m`ojS>07aqic-8Io zWMdVFooLX3GeUFZ+|w4ah%I*oj&ryaKz&7ar9gP01|I-0#YEiFnWiCcH(BH=AD&`* z126A&TXTOb0qv%)v9~+Cjh416y{GH}PWm7<<s>>DFWrJlCTg=;;i{3@U^|Pft8cB& zZQ;Q9khL~+80vv|V?jGIm|e2-AAXXOo7Fj38amj|j5Eb#K(X|v@ox<i6%iGf(F5m^ z_vhsRW2n%d2ZXowq<4r%7CwzTh0(LK2S(neuKM4>TZH?=De&^+S#^3kR9HP8b$TeG zu~-D%qlsPejgjVacxy+CFX@go2oL2*4wVLq(4pgd<bS)n><6XW+6#H?!>IbM-7b{o zdXlj|?JrMtFc1zx%P*jQ<f*Wt0Tt!}6EQcJIF>EQsYBBRfH4Bp=`7s1vA(vV0ZC_` zl@J8aqwR{0K5n75k3z$aqfZhm3Gl45Th@9XbsqU))qROtWtXV$vfJDz*I%i$dKb1A zXG3m^uCn&lf)f1pVhWUci1O7r&DAm4shV2eQu?xPHlYRH--@Gvu2cSb{7p@Y<wf40 zalSHZv4Z5QlwGXUy}A?J*dh%m9iIglr8XDK_cR86bXOUjdU%s1SOplX--twL&a)I< zt=)EOWM7RL_Qw_!W+F^xWO{rE{E+GqqBi@c@Y!P`@Z4$4J>2#8KV38dei=DX69;ga z79GYR7~e;dwPh=@F7a}5G_tba9Zp^TX3{^hVY^kmdD(5vTrV2EB~ulDk1iJ`)gM27 zBNJUfH%C*rOkLi?)pnjEintH>PT1>TrXEowcmuJ)UA7n@rZWf~x+>C`CxKR(me@J` z2{GBdiJ`Z1K4)*a->hsMt7Q^RAiXUD+AU^bK6;(ivirgnFASCuMh(khiiUdK)AyTS znnuVA>N|JJJv1v$tjGJr*Oa$mh3QHP+toH~%((ycBXuWbVO3$cuf5W^oWp~e@aS`L zeR_E?;9x)3_WJwA_LL5np8wHEy))93Q1j!bf5XU8)1Jb~{-s%Bp{-@D{)PEL;svU< z;a=)!DTb5EP?prH))b@S04shP2CHnT$~`hmq<xt65Bdn9Ak<v?$5hJN?ax*2rU>;T zL?T-}rI{+cvfYJW=W(*67pN%<9*%eR5=(W24SrfP$_o>7z=b9vmd2`lnB)Qu)OsHr zjXgyA<KydJUlSM6&7GR%X+sE+M3DhhjNQcL7R=2SMIDv~Zby^F8!vPZm<Sf{n6<ti zxGa3_=Vwb3N9b~%&^p`$1y)@jQGaojXprb3(;-nS+KcKepL39zOWO4iry@0s!_`gU zGD`f`CD%zZf|u(oAdi%@q*}h<4lQc(Y58ba;cJ>=t-yf+yUU@*xKb@%Sm!4Qrn;C` z@VKdLa_yt5iBp_W7Mz_P5uqi=e^pdN?Uc%;KyG`b88U_xD^3+Mh5{v-uF))ASygRp zi3I1%-C4rG;aQMQ=ZXoQ*&MJ(Iz&uNRnbe(kI5fPX90lSj3B$AJU?4i(6`K=fvWOb zJxIYk8W-O17q|z~@ewf;88+q$DGW`pm!>b@U+#&9s~m%9n1res-R}*}lD(CX)EN=X z|5otgY&^-By-_74b9JU27){P<xF1tf+OWv3z}bM|0LsEJ)3j{yb!4l$W(tt|icM@U zj?lB>5>%aG49!P;JsGbF3=LO9z(C;Tph8M#ojvUJJ<RP*++Vh~0(1I-B#n^NZPaZ| zPPsLf^L~2sezmG;UjN^Tz55@1y&(8&$6tu>=K6tp4%nUYil_;<aDjV?$o;!2mpbd{ z%LyS{jI1!Xk1+2Z{X+&R2L<Q_Dd_{~rvWDB14riY7NCDQ`zLDDqp<lq5N^o3J|-s{ zSFUj6*{U=HMqA>)3+4*wbpOisLl>W9PJm<eH;+cKMC3mpoi@Zw#+}O2<{2z(n55!@ zgz~M7!!teVJ7~?K%Rj%JPCjg3O3tLBI(N0@{~j};kIu?ISO|>6hNZ$Dr_3k6&db5x z-qu#n+1c?8e`pbg`F(VpEZX;H2cFoXZ)Yfd(~_$bnv!nB<P2Pxv=ag}izB~ThI-_g zM|g%vc%}&bFVBA((O<`qoOF5_@DE+`-0fLVtv+dO%0%T=o+|clFwS4=jO^Xf?FX<m zWZfodid;g&cL^o3V98018G-`?7u_{CG>W|jj9BD9aYTjW#H>_oEOobQS3Zu9Mq=2i zF-jpaa<kLEq~1)m=HkG19V;gtEvewnEhE!9>Un#6dYYQs@4T*#ZX;sK@j)ZM9YWw- zHBE9zFStu9TB{!KD9&2hSfjPjH#UK5W@>0<JvUGvK8r5`RIz%Z!{lRu7Gvr<ZQ+r= z$q~>HSR?$9&eBqfqIPHIet8gVZ+;IawSt~G&t8dz+<A#Y0hdZlV+jx|LhcHQhtKF2 z(D>x@es}w})mvf3;}-GF$FBJFjFP*8!&FS|&~L1<{MoMDGq4g%(>qMcrbNL>mDwHy z2R}Up#vT{k>COLGbwST(pZk&FzLf0ITvrED9BRf?RrPp91yoxKVvNDr-P6+86Ni#} zyUo+}YH4BORR)5F4Ma+b?-zS3TdjG=-&?1z=e^ry$PlkM?^Uvp_wsE6H@h{@sZq~b zU7$%bNeP^M1Po|ZGB&s>EwUXe*&8cA700`B(dY?z_s48ApLWUP#_Cho>Qgs&VT8j6 z2y+F=Veewcc@Av#7I{5CD+=<R(JlIoxyD#?i0JHzbYj~tkQl(67tQA!ddf=D0qo2m z#7@*UJUyYvi_r`BRjR@>3LoBQseZj|KG9d^ol)IcTIQX>nTbQyskXGpp|STicY9cR zUOZZtowwS0*SOm7d9<`X-I|LzA$-q8$j^(9QI)Z{aMQU{GW_~8HE}kv3~2=`%`5U) zALm<lhj+}m@|>#53014Y++2i_s&7K3=I&Ogf^n8HJIOIm(Uz#w#5GtsSg;4y<ajRZ zz`qnt(0(HH>rjB`%2@{eW@mQ>#^2Yc{o{J=Q-~2-J2<-VP<w(U?%@FAukwWf=<}uh z33RiT1p<0a%@#5P7U`i~L9h5Qe#-RCe)$Qw#s;6~Lvu1<pr_qm;3v(IoP5wWum~<! zgyrc@*IoN_+K#Qxx7*FhwPa;d8gRl&#?A~?F&|z1^Xc1|bmY~O>ghwe1HgqPuZI!8 zE&+Sk2XgtRZtj$CkIK^ehNhAxq6vVGtbdMgb!tf)u&yDt6eTntt~GH31xn0Fh=TWF zCbJ2RkZp&MO|KH2$7Ik_N~--yXmX4r?<BC3o0Idt$lDaqeg;cq`%{C|Dx&~rH9+H> ze1CD4ji>(eSF63$EP>k66rPorlL<oFDh`|NXW}>#>c!p3Z)bbt3!~X4Ut|~f>&jL+ z#va+{<!;aC^O0`Pr+-G^?#Mr~;R0mIOm=^4r)_&QmRXk=8kzfmwX_FEK!A8g0Ynq| z{8x@~*RpqCcD^UoLs9$&aK<z|JTkdB$uqSOBsP&EHS*FJfXcHbCIC$n_DmJ%N)vkS zc?!Z_l={!mFqXuoh0DZaa?H<k<A+BX^-=00SS6RiG(Y{KhEY<MSZqcZuF$vw;M8$u zsLDgv?e7C?p6&n?ML5ed$T<{vpY5v68?am62jSg*hpGHS?_8IrI^W3G$3UpjDuU-s z*Gn^;Hzd2V@XR1`mDqqCRcAm9O-=3gmd^UBpKty^odfRQSR}7|!{Z%-=k9}my`3N< zA-S~V&Q_puU0ceMYXK`Cbb1zVEtCSF=I2BK)(~)My$BB^R>&b|8!jW)-;cHVF=&tv zsKP;``b5i$P~$gt?s*)UR(c9vkj35IHMx{N9bf!bt}JMk!Ha$5lCvvc9!z?o^&KHH z@_Xh@ZK}C%&+*jWx3RZlHD_yQpAS{0Mp-V_MSL4RFQ>OT<Og(S&+DDtL~_7Zmy=y2 z;htP^f`gr`4aI3z?O9GQZ)cCs@$%Utna1@BeL!rCHHIBTi>N8PydxbsZL{lvipn5+ zSX6}L{dZ<(0kez@hF@I1bb1~cs;P0Rq2j}->O67sOS$|@sr(S)<!4{I4qK+~_s>p& zvh43n!adL*rAF(btOc-d;L-a_UFS>P=Ve7&Spt#x+`b!JJ^A5c7z-`UcLE~CrmC$i zu~SlC%TrS;sRqZ7hd;CZ->I*1v*Fk43#VUHD`Na#W(r5;t}gz1DPG~b`5fOEDtX^y zE`8qYx8iTNIw4e?xdVq>ux4+;7>}w>G{CjdEVV7Q?q-N>PlDX=h9@YKBU}wDXC;X8 zQSe5fjakM4Q-)>catn!GddMStgM(a0si50rDtA|^%0ExxzCh_QKhc@10{1Grl=oGL z{zG}zU0^>~k=7ZY@<5#zHGSRDAXt&)o$mrNF`k=fkE`9)_;T&;q+jcol35>^2o$Z= zF`n)_IZ@3&Z?$AQieY5U_3v%(V(#teaQnT*&?G43A-C})FB;hG>3ikq9ab)z<5lC? z8JqLBw==Y+^U1~Kig(K&02Hacxmk#XFqVcFcqJJqxfGusPCcIBI<Aqb$^Ujh6SY&Q zz{w;jnTE@o@{!x^UbHAkzX6Q9?p97q*b0mwIKIX=G_@wZG7um;dCHe0C<B*~5UH7~ zc#|)GU6kTln&McPVo#X*^o$tz{AGy*FwTDeyEZOR!AGcl$J}aPaJp`>aqXcoU6<zG zz}dh**cyLZP}tzS7-(t>Y1+b~wLtn{h3obHyYq~bbM)w_r$@p4PqF*HZmF5vZ4a=M zZGVAy)}5Z>U%$HGhWx&L{64opE(hx1=JkFsKC?jH3COwN_Us7s_<8qi)cZXI*mr0* z{q6m{28L!-)L4QU5NBCw8hBj0oq1`=FCLc0)NScGxK`EQreBRUH1xr}vZ%6G>hLM6 zC`Jod8*7q^^RPrL%C8Pg@IN(w==-Wfe(Bbr|DNGOsje!4g&{!!<v=mY6jE=bayoQG zF--n6__9q$6L?3ODnDoWc2ZKW8(!q?Ut*hBrHxk}B<*pS{3feQt_mM<=83b)5inMY zmiqV|(HE`blPok4MuUL@dl79(8JYx<@}2j(b*|5M*`>*bFDPQ!F%Mgqy@b~n5-p8S zjp4m;$F~8hd#4{S<eew!*!|w^X}EFoSXSCv5Zb+|Z84tk#oN!@`)hz?{a9}i@9-iu zmA?FK|Fels5JcmwG^4}w_Zf;K+`A!)e9hLd^ypNU?#AxdT+}CG_Agj>S4wko*gWm| zoNwBvO@eMn!d_tlm1!NiKA-otBde3CbS9=H8E2K5XBE5S3*??(6WQ)}w56YrAluwc zD)X!~CMkJ)>XjGUQxyj^MHiiqJ8RFk-hxIwNjcHot%zY>?uJa1d>pQByqGDH9Q&(B z#`*rbuiU{P7rPAVu2H#bsjkcyy4+KVfC>b9uzA_pxWWBI3_81|?qh&!F3<MWw_j_b z>A5^fEFN88fx`ek-(MAG`MV3eRC;E(atiYdh?h2IE<cU0<}CO^bIJlWI(XSaEn#42 zH^5e(;J+)gwwk1<1eF@K1q-Qp5<OU7B!8EJb`tztU{4pLN>W-+7J}rbJeQ$87ALzB zF2v45TGUHgG)?6N*<I>|I(xd@YN==D158RwOACvLNXYPu@bk;a$cTuG_YaTPl-FqJ zD&8DlEv>$zkF09-)MKSAIQMhHX+>9fvgDT)dn&zLo<AvPvl&l)bE^LPN010OL#X-o z>Lcpo@P+qHF%k}mWTvKO${XU{RI>47=aYYj!R2QY_8<-9PM{v2_ruxvq^hiLS5U+Y zU4_Ax_siKz3V<)M*Y<6W*Y|iS*47kV)krBDntic|x-gO!pnazK7h79>^^eBdI!N;< zdsca^=@G6e$Hg=EnT+4}li^1DHTSVGHK<$=6j4<g6edEjA&N>|l|m$J1O`TQPzM!4 zSQUL(R(%Op{m)2Bg$d4v3J?1mabMT<V^dS3BHD&}Mk<Q2j&}AEk`b1bmA=Ty$UE!l zhxBrBF<CZ2o}W54w6roY1AnUc?gW$`4#O?NbZV==J#ykfz=la6x$|BiQHsCS#FNgt z8!2>W<!47jQIhLwY`Zxd;HpyIim5UM=cxL1Om+0M_#tSh+HS7Ls>#EC+40G3wtpF6 zRA1;8e*XCWQmG2yTUnvg@zvZFp6V>ayV)DgM8x0vyg!`|jg`~=c-~!l!`JP3yLst! z1$JEC_xEd;k;g!p`553rK(C$*`NdAkqS9Ko+;%TJdt=DsBUKtWSxF|o>E6lx#DsL6 zh-KwMCDjMp2(9+z6lE=}wD=QrY|HKMGxZG$tPHqz3^>a*d4;$&wKTOf?2JrIy^8es zoaSH(L<}e{48*^8+Zb+n*h<;q;rZ3&mtm*0_&&^xvpn9fPGWi6xsA45vN}acy2C=X z6v~1FCE;bL#i~Es+oG$~pR>DF1DRSMwDz5<{v!47!n8w7^K_?a_V2qks&#^q*>txz zF!px9)<7Qa$}<@~qJ4sr9md#S=?Tp8pDY9p#Khm&Li@hFUR1XBs<=Vm?F!#_+5&l@ zwYfPJZFS&FLs?!!T}MG3w7IFPs;#Q3?CY#=kQ~r=(e-uuF}26oTf_n0f*lt1zKliC zYv^R`<L#|`VLzSmR??W7l3Zk5WNcI-C@n4x6B%=R`_w8*Aa(I~DqCARYjZJ53f?P0 zSqeGpZ<U&CL}nuoA*nb2W-^ee)HL`yTax7D)abe6?z>~)+w-^Mb$pbvae2+H^At>^ zYJV{7^ICaMQ8cP<Q)U&aonKZj4VLo$Vd0|NO=X>_&~VM{$p0)ddaR`MfG;K=^M#&0 z8pds6(ZBcl%Eo`*6AHD1Z{P>T7dSKW4*MRN8mjz3a1nZb?t10=_?U)?6&4oWC*q?_ zK{1oXK}=5daW*JBJABFQ(R8=B*1h?25!+FP)J8h@c_RL7Yq%x<Fq6vi&XNiVU=YQV zlM9pWVfGG5ObiACbF*8^<@p#DS623YUUy+?erRcBaVruG>JG04p%i+2Nh*#`M_~5m zNt2w`T|OW&@S@d6LQ*?AE5OU%{L|O-Ng`8(vh$eGFSz0;qi{!~cz0!}_)L<}d}Wra z`{l&lOr<d+oyJZ6K5JKHF>80*2Mly^9?_0va<Z&E!Lm$fdSV%UWa&Ml_@&@l&m1q` zd>iWX6co`h&0>nU<gz!Q+}8K~+08XIBxHSi^n5L`yE(b|<Mnac66-W_U?AY>>9OT% zyYmSX=<Md(@C4MmPdhLGuD~-gz?H7PR^$wKP(`kosva-%0;Kv{tt{;GUG4lbBO6;Q zKOP^CYIR#X-XAY;+|?;)b?_|d_;{!^tu4v1Xe3!xO1iT}H#E=XT3MIr(R}=?lh?3_ z7)=P5$}>^;yk9fLy>*|AzXdd^eY}+B`b+?1o*Y~^w?-UNkB$L30qkmN>F>|IFNwLm z9+}SOMIR$3D+ks0>+%edsLDox6|%%H5Tb<LdDY#jY_?bJLZ~Mi8$i)uvctjx)w*)Y zR$D+#ZJ=fL_PWOE7GwhArn|%L(JS`If&pcd?&`WmkF)3D%;xa)ce&>cuFhE9?)I+( zibZl;-#?xom%NnO&ujB}1Il)5_`Ct7-d_u)_<SGlH@`nlk8`)ZpP{V=QRMim(6GBa zplEn6csyaJ$cKgTKVK>)BHq2aJ_G~VQjcsmeL=QkdeE=0H%DE)S4235qX1beY>4G$ zCpvy^rM3pfF2?4d0hb%C$82n+DX9o=xNgX4$A`O+PKdtQAr*{pP@VDJjWx;D9qAA5 zKp}b}A|+TbZMm-kDsDm|A`-$<10=bV%ZuSZ6!*4D@DI*u>6rCB#+JsQhE$-0pkv>b zw@K-!re|ha+8VAeMX7zQT+OZ{T01K<8uo_a_U!iZ22iA(mk4}w$8Y9<`r&$~#<@Ot z*e8|LG}Lr6wZACJD5<1mrCo2rZ{Z}Pq{bMZ#E@Dk<QgOoQ4>f@^^J@SkWtX&R~8rO z9h>d$V8f)RYHPY#S+te3%4%vx0Iqm5Rbgdy`T6|biac`Nh#(-_&c)<B#5AnD=oV61 zQdU}0UIJQP23`gR0{r}!10#^XxVs9<2oO8JYbM0U-`?G+Z~pQ6{2UcMDdGhAGIMRM z|5V`DVeL3VJ-S-?dloVOFfFau)7+k4Mu1^q&yRNmQ*m^hl!|I&Rn`0TjN+7%P43y2 zrm89*K%AAH>3w*X9L?2=&&zLoB|kWQiRY9uH$HwxWC30UODQwXF|C;Tno6ZvOIi7= zvh0_;TStFwj+RE&<{xhMk^J;4;A0t>;LiG~;o(90lSf(+j)qARSsCmH-su%I#n<)~ z;Qjb3>ou2YXxqDn<cnQe=r_M_j4nfG-7603ql>Eqoa3}~lu<G1uu$-en>)#nuyGM> z1{O{V>JhKcJrb4Gbvf|I$$q-M*ig{bappjuIZH;vp~EjDqJ}g>6MG<Rk0II_is%E_ zb8>M}>BBqMyuWwS`jZ74aXVYSp;`B5oQ9C~2hsLk!a(O!?<-ewySn+*R8*ZY3tI6G z$ad5s$#cCOu3qj(7Z2K@vATMixp}!Z?%$rZolaL<EX))4_VzrsaWTn(v6z~KYsvKV zlX3t9gZ=Ho{JcgcPAZ16ZpE(&8Cll(mHgu3-M!uL=@~$6#nI7GQ7NCPg^^rZCb08} z%Sp(qsgM#Drl2Kc?Q99*c*uKKQAsH>Dso^{(CzuAyo)O?E+MC=NI^L+=r=ex4P&T9 z;Q;{qh$|oxsQZaubX<vu@0!c!dGok=a8H7UmfV&WmIZI3O3Hxdl9P}yFgT5-7gEg+ zwpNStYfQYcfst4Xm!Si6+xct}@pK$Ujqv?56u->8jDvl(b{i9D6y1K#%p|g1l^mOp zlG4r2D(J=`+1uS0v)4+}+MFV3t8TWUX6u-n9)r+A$bjNb#m(*vYqrv>rihl11qFfY zrREtF3rh-KbYAt66OtP18G)B$^!ySQ7p5SlA$7j_hN<TUlvl<wzkV4qQu>SWz$3tj zhz;iblZL%<K;T<S30b(kJO>s<9}V}&sL-lfMkF-c{M;N!y=Lb5wwJSs#SaFC`lbVj z`+5Oob*ErM2prTMK1B9@DBx3qy=$0=xJ%eCR8B_yRsPGX>yIzh9SCdI>|iaQ=PP(j zR9RV=e>~n!d~%9co3yDX69t9%@4TyfP{q)DJ1*xlRJqspH6|_H*ONIxQ7)Y4s_^*! z{*~=6cd&?5OdRKD;`NNPua8(?dWLz~=vc42<;CY;Xr#e)VbCZ{C{$Dn*_s>M%pg~= zqtk1{FpGG@@70=+A&^znQlY(v{U4rnsz8CB?$Z^n5DJ8r<shM;&2DYLc8}-F<;Pew z@`;TkTw|!_ZSHYBJzF=2FN!J{6b_Xl3h}H3x(S&b>a3uxAyeGo3S^$Hpy5t_Vd2AW zVksS;S5*xqlq>LmQ&aKx4mPIeQ(lGeqw$Z%x()}aHr$1w=96xh_u~NrJc=D{R22LX zE32MUCoeB#w!EfbbxQ@jWsiXy^n>%vBkPW76HOJwkUgYF4z%dE9}>KznL9}pv3wuk zwSInrq995j4%}VeAD=+_b|NxyXn%VB<b=-hK<XaEk4M6Isr~x2MG76X^)JZb_2%Rt ze0=UR#FPvR=YC&)U57Pm%j$w3*n}YRsLIsV)(HwUywY3Xo4vdI;95LML`78k&F+$A zdu3ruMnuNQy7=YntJ4d45s>TK#t@Z!a8Ly1y?R9lKDdp@_ue=58*$da8l?0V_oIEe zCT1?3V%A-d7>rfrXmHxa6|cUrzKwHjf8F$C^2ZMYBr#s!x5tra<hYaJ;P%1kohA#5 zvZW>607%JI?BA)Xs)|@x_@Mp3bti)@VMChVI%-;N9+sVrohUhhAU$3$OVqWS-KG`m zmqJw%1|ozzj=;gbNXq5l`6q8qpT4~L&Hcn}bG<w!W5QnHjjy();?FF5Qf+Tuc?MeC zsm@OJEJCv5l*X_bYmX<;VeY8F<C(c>sc3xnEq!*s4{}06tgd;_J|-c5UXZxZjX+H# zRQ6a)OUj5_r(-kMT->LemsmVrzZ*_`%?adTZjSVf4hbtSW;7OM2LX5$0X@F&(f67g znf>$Iy{;G?FtnvX%|#?bS-jqqY*Zg&N4uze1NsnrRI!<sqEs`Qr#9AAfqq<_oUVwE zf^8OhWwFWbixBi(3t|MbdBueg<xr}8+}uHt?);y3J3IvjMCPYvDH!!dEm?o92|Fl$ z(Skz-C_DB9)YYRSRQcxVZttMkY3ZfdH48zS{Al=fZT?289Gj$-V~wvPI2oK@R|j0e zJ?OheirO3nYK5rCV{y>>fYQpi;I>PQ;zNsJ3N?G7Oyu^wFpL3k9_<XR51&hQ#NzO! z{>tj!X1S$}>ef6yA)%lYmy+?{J7qmmN#4;<_(ko^x-(i^M`!(^ZerKd|Gd~>gNHxP zleYdKU3F!{5M5orGu{47--LR)9JmGt!w07bWpm)yLRGp!^N<5O+6^^~KFU+mOlzQL zO2fk0dF9wiN=w6}mWr<Lfjd&s;jigxak285=>s9ZgJ@5u&&xeNv>A4BM}Wkh?F%Y; z9)3vnuT-^Tr0au9dwF#q1gNWPu>a{>>qn?^HgNa`M%>*Tytd@uv#Cw(yJ%u<TwdBp z!%c!DQ<<IUEoFsz|EFfMdYc*NXAk5kQp|O6#Cz8sdS8ypG6z9vXP0A-v0HaVMMPEr zcWGT-V=rP?banCY;!w0KpZCzK^9~sCU_iKdn0v7Bs=K>A!&SK5>m@TGX)G>Lft|x4 zZx%*aCIVYzY<SRqRes4W%!MyP%J7SN9w{r?k3gQGkR85yM#TKk0|<Le-^$6zdI4yB zW+o<$O|_33OpG)F+>k5lirSIku9_MZeS1*aBi(Kf2++ci5Dj7FmyM?@_cr!q)B^-I zFlfLJq}kcI2v|5gpVso4jDUcGnikmP9-j|bRY+n+YUqOYg`fj^1zX!{LmWl}*<23C z82`_yH703iHBJmVL=60|m!SKm&-n9KNRPHOQ+@pa%-FOba7arlCgzgeuy%s1*IJ#^ z%hSF=Zi49ho1tOup{s8J@AfeDNSFQe9*=+5vZfXXr{SHPfnyM`ApvT0Lt8^b%W&sb z<zzG%Xc&s}@;>eN_d9_2X)@{`?4SS<biza7)ME*a5#TCW^+i<$&Y62#dmdmweK3QM zVR3=O`4trgh5^^e29%H$4^L~#vz_A$P~J#_n^3bG409@c9p7>kVk<f&%f~+p>WN@V z0+Gpv4n+X|+<Il@JLhhJ#8cIh5*HR0k^)&kQpE-KDJLRQ(OPSHb-IP$*OlX9FHUF| zwzTmZBe1Zvq(nhOo|A<KA#-AKz#y@RgfuM;%h;&cPJ0n-ih3$KRR!ZIhua@N#`$_l zL6t-Lx0KMRsD#YgoQiLoe@+Wo9U&DRRGUY8rvc9ZwH5WJem>2nwueWD+Ab&@I1X|u z((#VoXh*9h)#Npk^i-4u4p0U-({9Zf^==>l=x7F<3YCk)_cHVrwDu5zT@|YmRa4YP z^9afc$V!MH8d%U)7Z#UMfjwVgpl6VfkQ}g^-So$Xu-SIcE7|8CtDw%qz*OZ>nCC=J zl+Q{@RMgNuML8KA8F8@mgp(}zEf^gX6%`y97@nOi3k<0A#Ep0Kbu*|=qylqXM`Z1- zC?evaMVQ|dMWT1*^-C0rbZi@HB)j7^1E}gfdcuw5+?JkJM$ZI?Vg&_x6T5mz1tF7= zd*$V>y!Lo8ZOFK-78>cU0Drg0yedsFvQbt}k*b`+ns<=l^*%zca-^=NuB4O<wCH=o zyXb>sQ$<xoDoShb9<w)?+0#}Qme~cx1SUuX_{F{3i_xj@P?M4-EGQ39QE6iy)ewK8 zz%`ck>RMY?*3<y|5>WH72~a#d9lR<EICko5rtk0%5>%6!nP=DZbY9#?nj4ywl@q_@ z)xZ6n3hV(klvtTqp+TVnQF*n6-fdV;vaBwwq-dmeyinN{?bg4LyWtxPG%YsWlmQ0% zgTKY4orSDWp`bQstjvk5rJWrlU~PPS%Jv!@xW5Ior9HVo9RZN&BiA7?wBnLLZDStP z_}ggc=-@KKS!m?sHK4h0k<lO(@G*Y+Pi};T!`3%6{Gy_wXJphngu=G7cRV^ermmj) zV&K%(0Jo=gGQtcdykuAF7yR+#hnZQ~x4ZVW(>j1!V~+v`>FU~ARMfqCT0IkD<_~0K z%34ZYtqj3aZdTjE!eXD4u%ryp_^gDMCVCu8oAiW;iP<UCtTZZwBdqjvB(d6x5y+po zQ-v}p5bY5a4P|A}D%qpVvr8)!)D#w0Rl|TTfc(_x@aVnHX@^5`WCUzy1<0M#@k!N1 z6<7dRg@q-pPLf?+{lEw}4hCClemQbh!_jN4gC7Ho@PU@altfrxR!&3AOT*($)1PlH zF3T6y-?c{NloS+1#MKv8rNq9u*33`8bS{9KrVCMfHv)2xRg+TtlOg98mB2=Wm4h`h z&-~q85mRwch~<&>+YHoxXCx;6bb@sHe5gzf#qonX+}o1^h;?&tIUgM47N(Ie%`H*V z(1BKh7$B%QIM~<K`xn(2;Hj^vySukfPf7P}kdTk?_x7EQ`nf+7xXEABtEGoRLp*YD z(Rmu&Led^NH!s43jRRmTP!bhO$qdX1h@Ee6KR7foOf@()Fhsd<r<gk2Jd_q!2Co8c zeqzk8cdRQYVB9d&mTJpst<Kt=LT5jT&d=}a>k#lVxZID-POLM2k3l68^1X+@-|E>K zHXa7frR1f%ET^bB=fvg$M*3rdl8U+yHFdSMSEQ#(1YG(@2upenZnfmklM))vUeAbB zj>^r<P!|`!Q&(4W$Ngw%a7+98gx|%2)K|Q6vcc0~K@--R!ofTlknKCO))Nc(>0;M( zaefs84c9-+O)xz@ZES7mAL<T5&B0=B;qf<ZYX63HoiihE_ioZLdip_!S4V^_gwta^ z&mp%mr==%9r!dmcSlU=ZftwhNXIcB(k^YIjU7h~$Ft-z|Ow8haQha=6+e}YP3N|4O zM0OqmA%(Jt1TK1!a8Ta@r+Zr<J+N6L!07904D}S&rlAMKM7le~*=vV|+c6-D!XpUy z$ny&lN4y+v1GxF<`8Ze}uWyLCr<myHp@Jb>npl9|&KmqWkKBl@shL!25J8DxN<gas zpU#z&6Pw<U;Gr%X15mF8o0P1`CrK<sZDfJ0qCCs0InQeU){oGVCmx*<qoS<}m_Xab z#Uv}w^UHK6ue&qF42t4bXm@j4vZqJff*f&$#GPs_NN;UuF}t#YLxdaoe*iH-&c4Gu z4-dDUy){1OIXM!O6UHY;kDTy!1oPx%Iy#!=m8G)_(>=YNNht}w{yyGF4=+sh@q|aD zuUA}rEcDZiv?K-`%u8Mr=LY5-ZCP+L&%w!_kQ9IQY9H8FJj~PZsh=gL$V`pL8lnR8 zE?%CvnMZjWXVCe&Iu!G`ybU>-o`IeW`tmxi1{j@hY&=Tm{k)H49;9h^cL#O#8QWC} z4D-QZ!R?*xyU(8o1^U6sWn^N2_SDo=00|hmvjha+eg7SpOrXB&atqcIEiNiZ&&Wi_ zuX%=BTi!k~xn`{N^3D0J$A_3#rJ<wMG%&Dn_dtY1NGU4fz7Fejnt%j!Uf(T+iyJ5N z;I)oq-qOmfrL8F}BoH6-yxgFf5D(&E9{3p>A3i4Yc>FvtUr|+l|IvfTdpom>H-|^A z4h;<qT^|@4>PJ0ZAGkg|2w(JQcQ-FP1L9d+I*(x<m(D{24i0t*==ukGXz?&lN<qo3 ze1?cTGc5rt^C060r1v=#^R~Ded}dw;x3@tvFDom3w6{41^Uwglx0#w5-`Uy0m7zs5 z&wye6&VzgRcOKU^HF&~+Yig{ZD9_5qLW*ALV{c>G+tvON_r0=FsSj2rS8vaY$~J^d z@|<ek2~{IG4HH);HV&Er(0Lj<I!yyZ8+T7cXrzpi5+^$wu4__f2uV-V3+cHg;b9)7 z^TtOpZ)w)t))e|jFh4#q{Lkt<hWW~}?yk$7ms{Ig8X@RF*E*;z=*O;(HjpW3UtEw4 z%)i3pZSd+mI|o~IY}A$hZv4!Xb1IxBB*V=-lba9nSj>Z;XJz>vI*(55kR6}-BmMm3 z^cXOYD~kD=`TKVt-tX$`_6zU@28c<B;dp|{d9wHGW%tLA9v;3XxF5?Pke-(sFBw_+ z6R;}s>iCBj_Gi?Nw_RI)_v42H%+qQa7}~mfx}lg?#?L%>8w~S!{Jh~&I&X1+d91lg z4D<F5*35XlO+r$_@tJR`!*@-pth@yJ7sS^vm{L(u9;}p<6eJ|Xm{s8z=JD%13tpW+ zig|Tlo|IkgG!b^@#g1ZL>}%%n`1vEe%@>_-{DYVW(ZKbqzheI3{g?0FXJw|rF=u6E zE-n1_Zt(rRUF__vjkQ&H`g=)L<-kx6D?2X{y%deCGcx^hTJ3m6=giZWZ@+jOIxRy( zTMsYyuqYJsxaQlqI-VsWJIg4l=bDU%dGg~izwd4EFb_I^q@N#~7&)5xWfb#Rrv!Y- z&!h4-Kzo3%_ulT#%JRZ~-P~B4nz#WcJwGSoNaneYV*WTfPee*V!X|wb^FHTr_<8YT zGEaeKUiOb+{zz|wW_}m1pa0DKgWbp5x3)oQRF$y+*4Eqr67<8VAFxh3+VAhhl@(3x zZRgK(S=jgyG09O$*?Xk6r_|i2>bi-AdF3OS7tu%H)p@ex>U>y8&_A#9$6+2b-v&4H zs6o7>tQc2T0d@d5j|>lf`r<jp&%gR7n2(8z2Igt-dK+LKub*df|E%+PnE#x&Vf+0t z>=)*b$W;m*sq+bmaZ_k-gV)dh8pGyhr^7cMo}~TY{ylh|jGP>XW2~$A_w%&Hm6eU{ ztq|=#et0J?xty3qfmGbmCb9;w@1DG+qNGSgL#<_KWOtO#d+amsmW)4d^M!fa<LEq| zybT`a#eR#<Z=f@8wN+*Km@h5D#XROZtd*5n|6p%MD$cykAENWRXm4{Q^KQP#bM)tq zqw~KzhNbvJz0EP0N9lZ9N89taZ=D_O;8-1QW@%<ZL`)<uAr1&Zz=dlN{LFk?D|($2 zYR*z|MH>~j#@X|_ddU3rx+%|)Q~@DjRc$T1<LbPqk!Kp7{QQAjrR`BVf1DWh5AZfG zUOcC!p-#p&f9$|@Se)~1?%djfp%&Uy)>NXdN&Q~tg@yRfpXWg{-vv64b$>9-6aN7^ zf7CT8^5c7(TD<cr1@X_TJjgt**`x=&O&geba5ZK|hFE@@m<aTAIeA%99L$UF>wMKd z^PkrSZr^zzBxz2-p{DJfms&R!Tik1A=WONTf}i=%I?p6(=ACf_^Z4>Mb)T7UZ43{^ z>21W0%G;oIp8c52zkmN8z9=-vA6t04M2eYjgVuShYnSHdW*{)v)6*`iEK5v?J<`wP z%2fg({QT#mG2RC6H7R0NDKzu({}l7!ZFKN>n`6bW`Nahoo&P78=Z{Y~F7s_2t-!pJ zqP(4z#o=Z=+?=dztQuMx5Q4R0n;HJApRcNHXl>!){%Sq!&fZgLCHu2nn(_`w@fD-- zrGwrPNv=VmN5-%O%wkr4SzO#aYQH;INkV+=yv-3Y>@od36yVSt+l2GSj~+ryq^_=- zSC9)&^02KdX=$h>CMGs9HLR#AFU-%TqNd!$wz3$Lw{di`V_{}`@%%Z~QwD^@#>Wf} z_Tck2Xy$RnuwT5*v0_-=qy79**I|nbj+}eo=Z}P+v%Gjj4EuQZ2yYV<`dR0d73FO# z%?~$&c$}Mu+r-=ycD@Ba^OY4HS9;I$^32apA8c^%!D9{)141@sS%;*Uvg-*IBYugw zQn;_fqL?QpBV>`V56<To5R#F)@H?2#&PX99A-1uzZfZR`KaXbq54a}v3wnN+jI`w8 z*FXOB6YPV6qFiQn2B^p3SsYMO5@UgR8(XW&nu><HY7oGYp+T(EX?S?JU~B?A@#A|e zi&>bN8KtBp0f1}Q`f%y|v6)96GjD^3d3_@TIXM~J$xJfcqnH=qKQAHzF{~H(c^EPA zFh4N`I{z5YeGeF!K<7i+yV@~2Z-Y(e1qIHtbFkYvIl!=sb?v}@3|mxD-Zy+*K!9&@ z?Av9<H|JJZQGc>oN6sNBs`y%5`E@x}bygNeJUUOrdch^4_=1!yAGVq8tILZ(4oD&} z?__6lxPc!MwQkYT*`fK!TqP=o{bT(6FQ|Kc=z{!Wed~gR7#u?w2kGhQUcbchU3%J@ z;OAZ4U8*lt-JG4GprlAk#^O4Z<YnpU&*|uB!ln+tJ2^fIokdGaBPlsyczA#o@7x1Y zc3Jc_srcW)JZ8R)3tn#n#~kpJla~_^;OFMzINV@ndIHrc40Q903hL`?&`Z3rFfS~~ z&&9<Fr~4coEmk{sV7|?bM~@!j%2gWb=~7WsA$`3Yn;V|Kexs-$XJcu0xS5RfMOr#q zLt_JoWCyQw<I#B(^Vf$2_^~h##mmFv6dB7|ZXHpx$jFlG9uWn~%Bpy-NfEJ1c*d0J z8ClTL(Hve01y%w0k(HIEq@wgdI3L~!3|kPsAv_SR?afCrfAoAC(D~yrUtUs3OiXm8 zr{nNj%Zu~Uk{7^uT&gTR{O+6AuYqK4ZZ0n$&xXdjS8v}!OF0=VD;kROvf!LT?{Kom z(%G#o%qS@-t!%8)(o<k;!sBfSNXW@K6;OUY6+iRm@XWVy!8_k(zavvnP+&bx1AX1V zys@cKUQvEqTNC`Q_GT<2pqHB~Y=IiO54I&980OLIq+;MFzx(k$6C;Bk(&KQmnu=n8 znURqJ*ocdXvM@Krx&$vjCj)cEP)LC9Fnjw42gCfp$S~+U))BeAxds%2oJsU7m-Yqo zsL+C|iRD+f?(bonZ{v8Hkd%a7(l?<zG%g#4`-V$cZdk3RstnjOGSVMjSONP93_~2} ziS+2|ZVwL)#_w(Hj@0>MGym?x2k>mb_AkG&z`-XZB-GMU_Yd&t>T3V^>F213Pyzyi zfxfQ8jUPR{zp}h=_;d{Pc7Zer2?;tnJLVVW&dg5X@iu3P$*Fi%2}mfg&#OeqIKCKG z>=?Pqh56Zt(4ePJpI}{5flBV>XTxx2WowmJm_If-2B*o#(+z9O?X3;0bJP#&JTQM_ z{Q6Hn|IE$FVTofI?~CWpZf$Qd(4VuhwhRvo=EJc*4n%crWf|8k#l#Qj{Pn2`QBmRS z^kl3ppFiEXy}imSq)5oDC~O+(m)R9waAjiY&SB<%I7>vqB^{hnQQAJu!pbf#ie)J! zoN?Hzz1<ybLzoUJ87cwd<L7;Cs2|`t($61tO)7qTa^%x7nE(0bU&>1h0X%1W+v5Z8 z;$V07>{)SfF%vVBsF;Y+(Q6+*eth!$Da5J}`eBut!{x#Kdoc8aB?f!!?&VfnU$efk zMvLRV2Po!=$Y}Vrh{<WvlVkDaZRqjn{1M(}X<?R(j1146#xWO(99^9XiVIh^*1`Va zxz%S+XS<=k4%RgV7@d!ej~=}-1gF5<)EH~{uwS5u!aiqarGrDBz_u9e7xdi};UHwD zCgJjR&Ytdt73HHhXN-*vMT7+Kd>uCzI~fBXsJ*aBxPN9>P<Hn>*rVpppC+U^FBg$n zUD7rc5Ef5LN^<;LRr`2*oIZV8R7}L&(kwD6e0E_bHYyC)I;rC`f6N#b+T4Hq(A?DM zhf_Z&%F8}``sB!um>L_NI(-U4Fi=Yfy7LNhSJ#(OX87H^#>QF-3UV$^_Hi8N<p5oC z{CqrMR#jD%Ow5cUBEtr*_C9>{059{Tw1PU6w2WBSjEIYge4cwie-7(PG<1V6xk?-{ zEGRQ{e-aYnl0&muP!=LPosfu7Qu3mUn`>ER+3Mz6Rb?5R>nv=VxwW|t=ZJvd?7h3U z55J3Wa)6Kl!#vo+<<(`>%3>9z_wTXaumXC7w6tVqUJfG@BLIwbWrU8F#{c(U|K;xD zgzL9sV%VXHv4Y|vLP7#11v#w!5egS48!IO#6_3&x9^H#J@jmGtA-TOv+rKUw_x5o4 zpHpX<g;k<*>hhb$ItC|9jrC8RI&~c74Dj^^-~nSe3-*q7x%oNwA3cait%s$-dQT_# zy`SeEFf+b-^$M$+eP(`Q^q4n{efs=q@0Bi7Gb0#lz!(JjA^Ur~zWHN1uU@^peziY1 zz!wfaoOkG<np*0{CWe8*0bs`V(2YUrS2tFpqQgPDNk~bwG}Hi+?vA#_`Pr@Y)%CR% z2;}>^J2Fy}WMxpd^a0?NRTSZeJ0qNmON-YwR-Zk8MoCE_bx{Jmw2Py?n~Nh<PlR(w zfDb<(Hz5f*ji3(oIVL4}$p9ZuFE?jzcNb3tRA)b=CzwMf7L?9|0R~%(^gy7NR5&|g z)>a@r-I*BanOK-zj>_A>4nWL>TJxx?+|${%wXwdjz6uA#+{6e1A7&ONEp1J(pbgCp zYv5k)-8F&8k%-XF+7jBp$DuuR=RjX17;FMULQnuFds~Dvj0_I@6?O)-&{9YsCN6qt z<SORxkHkbrfc1qguOKI5X=VaH9Mc$f-pk#Ul7d`VL?}8wHaIj0ed7W;_{pQg*Pv&? z?lsrfNL~;Jt9j<k83}O_A1`;zuf`k!q^BGDx1(a2!iuuN(P0Q<|Nr*h0xYhqTN8fn z%s+GQ?eFW3A_)+PK@uWF(BSSAMd1`sKmmmoRRx6<?ry=|-95Ow1qqsjxLZd$>2%Mm zQ->sUe`&kj-~4muvRO|(=N$Ig$KLhsckMd0mnG2xo)NU8qNHGsHwI5(-fah}vjgl7 zr@?oU|E!1@+aaMnf`&3y;gp#A;4DxeflhYT-X5;9(&C_P&dK7!(#kWcMg}M5`};dF z7>#}V_nj3uLvwK|EzBJs9fG@o-jkWBiK?<9I0Wz^unOoptPTdkBEJCN=9c<*Kl~6C z9t`{dw-Na2<BdWV;5mY>6TEQV-oXa?IBQG7*8egqxIhJgvwvXR1|62Xy1Y<WQV7wg zxWq-c^Kj-65CJOAojn5r666Z_Ag~|w`=`&GQd3hw0B>djO01})aBY3%`;TJg@zW<A zU2ULtw6rvi96bUY2!`b7L8r8ZCD_8r0aOeJ(!yu}_H0Pj35l_T!+qbrdiDPOI}i^b zkd7ZecI3z*&I1QHI1lV$W#6%TFALj#4j#TkB3Rxt7a@9r8@^*V%Z^>U_w3!n!ph3V z&JGSMC?o`rRY_U#^yyPan6pd(JU$t$iinEXQ^>#mz73#MOyZ*0MKP!kp&2?8FWeAL zE-pdA3u@}B#(1NU@Q{w~4(K0m-Mib`-fCc^54Q`c!@N@T8X>xulb7Y=<Aq3;X*`<- z!RA38B)nBo3JweI8|;1d62W}$K(`0OLs~|P4~bZz2G3T57#6N8FeE6yq|iUeA0i;Q zUN|CpW5F5WY{kSco;rOJDjz`n_Dk39LPjt@G6=9w%grG<I|_>mA3Aav{>ydX09+tE zEoc*N=Dz*=kDoeyR!HbD@3H+SWcQxele7$TyVPc6<H5e4Yc~r!D?0}Z$3bxwqp<X< z^s3?28+UKqSOo|HRh>O207u-<yb0(e#+&zqV?Q@HH=LoevLfKe$i%?SomO00w6ccG z1l80~13Yl%%xONpqY%@5GPqI5wJXVcc({s*ikv=u5^m`~R*;vMjf@We{g_8+{MEM; zQ=@qWIbPnLX6B}9YRbSuX9P~e86H1z93UhpbU{{5MpH{2&K3XzEaB_tU0hl?J2(Ai zOMe7MynbW7x~3v1BoJheuAYvHsxmxBNht}`<Q3$A+o2JhF4oXk9~eG1F1oYp@}ox& z(eC>Bw=ZiOYG`g$6TG3MoYGlQ*?ou4e8YZ_h5HoyQK7@a7(N-xT?d7BaPq^(9c)~j zJUoYZc~6`^eO6FVPD#m*>=c`v6cQa}LNHTMR+N#KlU3L>IR$x5ZH&E(b5Yf=y?X$d z7nhXK(bd7}=_)8Hh+Y&G5fhP=k_3nX;DO{_XfAn01=I7h$aLIKpH9q7B&8;k9PHH9 zRV5@Pz?UT@CDhbaFxr|}EXKf4U*Awq2l_m1t*xew)iSp*2h1~h{>7`e-z_dL<mBhL zd%1(e)deMo#b{wPHMBGk(^S{cQpcO&L&HOA8|!n63OsxmhInIXS!qDBu!yh(+*m~g z1!W~w4K;O54J8$2xY$n@E{oiR&1=stD(oE@8kwG~sILo)jwU+TY3t&Y)l{YBWJSa; zo);E6eeN9GOg?^oeu1;X(y}7bGKbHIvhypm3hFD_#d@Z7<A{D7e8Ox8&+g$qsjOoW zkx`XZJ^cLTt2b|7gR}sl3-<@aDqKFeyp*&g^4ujQ00VFXVIw?uK!t@B!NZH5Uzj&L zKlAGKD>SZLyS|!uDc;)F8Y;jYgPNZV@D>pnUR6^GWDX>`wF0_eX+?;NiI|ue{^5_z z3=hy<yS~=gR2Lf??a6R=aIz=bSiy5KGsgp^gTaD@r8x*NFK>^uj1=JWo3}Rpapovc zIJfWK>gsM!Nlo$#@S)IL>>NnY6r48G2$t5CBs=6KCg={zDoQ3MM?qYD5~yfT0q5`O z=}NG+Rn;*&E~31fM}UK0isypbaT&AIYUI7V5<7X$?mTd6C)bIi{HIQxzaSzZDXXla zYi#W9=bMq66Q7a{@tUb60Z%YDA(-Po8qtpA8yr~E+^qkr^ByQDDUh5T+&t*cR2PDk zC7{c|*wDzt2+rKY%OkfiZ*+3(=KcF<ss*6>?%uBCv}8|jPm%+PU`a3|n1Wf8tf_7k zA3tw~j~6UBk{wZV`nUk}l9`h^IWzJ6+i%}|_x{QAXOpv2Rn-+yF;UPInMQVip4-tL zyvf#qWb0t(=HZr}ojEWv)ITy*P+AO}PjYfJC7A0O8sPMG^^6S6tq69`pi#({BpXu; z!qyAN6U@+SFR8BT9~&9FG7HGBZmf?>PNMrTY@Hm8%uF?~SVeVpDFsDQDd`Jh7X`&7 z#TAu>W#mtYt7#fo^9o^+ud5v5=t*tnZb?T3)j3Z~TR8j0=C|b64x@Q9U%q-hzj&pz zyf`u{+{4QquGEHPV`Xg#4;?@b1~lmCY!CQJO-lwzyncP{(=6`5mLM2FHuwkl0yZc# zGGeYS@JXe)`1$+f7v{EiwB!}!1_b(jsz61n(a0eA3X2PtR~G+h(i1rDqbCmmgpJMh zIeFO$iE-f(p%7idQG$Z}VL34|4*G)Dwx-3Uxfd^A{8IC9+!x=zggRZ_m#eDFb8@o) z|Cf^Dp+RtDScXPw>#7EZdhgu3^Rv0@VHIvgcW-A)YkgK;fxTy}3ZA}S5X&cHaYmhd z7UM2#7|11v+jCrY_bH{_Cl$_0D9EX4V2$wvdj|@`J0U%zwxy}Ot~MnrGddw5Dj_~H z9<k_z_^9}J@P*ux;-1kFLqmN~?7CPCP8$Opj4WU=`Zz3vb*yZxK&?K3{@^<~1^F@Y zaY13h0l|Tx5n-27ld5W~$EPPZ?%oBG-^>a4;>FFow?`&MYU*p!vod1iW1^xX;^Jdr zxw5*V^>Rx~TT^vyRY`eiX+<e~elqCJfaGu7-gxnPGxNx!XV2DdtPhP0G_^LBS63F6 z79q){3iH9satiW_D@s97Ev&9A-?%Y8Gu6=AoRO0i5fkMb=+E%?4G0a6NsLd=&B-jt zPfSaVxs-rhemrv3aP45|wYPS)4^K~`bCM5?k2QC;msC}zWM>6LM!I-0Y+NX&wsv}E z1WkQI6|Al@7AK~xDWdHh7@H9ilg4viXOAczLrJDzYBeM$NaM)C$z`dPgY})$TN&@x zom-$WT3Q=Gw`63cB_$^&UW$W9e<?XJBP$&?UcTIXb!p-0vnM~(A<tespP8M6|JK!4 z*D|e+X|T}N)-*Xiwtj6D)X3%bmb#Ds`ebdFo2F*QAODUKm@kjmzx(ddlSklmQ!^7F zv%7mc0J`uwIMfTx!f~PH_rLtp%dMCH;>C-*_wGPb;QpY3W@aa5u1wBfodZ<gfB4|l z>(^iO8aM&C{HM<zuV23rp4CENWb!CF@N3Y;Ou|J>A}$i*k10Ct70}u(Vz^t#@Pewb z4&KzlnL_gq3cHkCP*u}CGzcWt*3;G0artM~+}Y95-#0cl8xt2JEiHBG^r^!~k01)0 zDGSk0&~kbPdchIlbuCSU<0BotT`ldcjcqM$otJ@N=av=$@MvaCG_ei}`@0Vx09=8n zheikbhx&#`2WRGHR<Ez!V*a4-`pp|_H?A>l{p064Y`pW}J_N8^6Y+qyzyIjbjoUYu z)>ju+uFfqj%q-5&EL@qHpS^N*VQGC0&GNH$^XA<05}aOJXM0UkLv=%4U2`Mgxoe;w zF8?xe?d`}Vx3_(~_IBpA_YO`@Y)v~dGCc*zZ|vyEDJzdkN%M`2q51{dx_X#7IO|(l zYZ;q}Y8YwQMc1~Ejm)h|U@7dp@|<$Ey7Ww^s9H*7Wq!kWcHP9l<jT(&@6C5_0VE&; zfOzNT;ki$Ofs9#Rxw>)d=F?|SzyH!~nIKDEy?G73@Ziw{Fp#^bJ$U@^@zci;?g231 z6Iy0gK>z#T(c=dX9zTMO@UK6a<>!YVegx2h-~qV>Y99=iLCO3FnuVr5eE0yZy@MF_ z{d;KP_xC>qO_S8vhU40vyqca8R!O4f@fQgR=XL#A&g<?J#qSg`JFaf8ORyl)+}%SX zqtmmBYU_GNM*-ENvooW!)5w$k#|5Cc$?^R1GFvA{4UCq&l7fPgqP&tK=wU^sBnD9w z9uox+T3B0Mx_+IRTMEgT33vlZ^C|uDX71xp=>lK8eEj_RgC|I)d)N-EFJ8a?Np7jP zZ+|73+Lkn$Nan#u%yhBLyoAgw$&Vl1dVu5yM^m}MnXKKq1*bH>x-zvmKRG}5$u{%H z!IgfpD>KY%pZRh=!P)7y{{G^+hNQg0(4<t4&`2k5e=91IQ5|E97sgoG29;e|f6zU# z#;-v=Ac|)>rNOP_r0<?-A66BS-&fo^vwrjbFMag`&zkw%-@$W-g+KJ@|I3!n@l~(n z%jXROcvM_3T3!+{j}tMA6)}z8FJin$&|sIaF`Ft?%Y<M-rcwh!LNBFcl~uI#^dh<Q zn3;68CR9Q*rlA=(`p3r#%F0~5Jk5!g#%89*=9>nQubGvFiyJLDBfWE=ck$Z#+Q+o8 z8$V6_{YBU#Y1Dp(^PeVDd-aByL+#DmF9IHRM|+K*WJ!LC90<*i3@3mB9>M(0n=2a| zh)Z5u2jpK_S)N{8L^2~Y(`9aEJpZ`-<)pF9{|-z}G<0<2SJx!u<ORhi(SyV7J$)?5 zGy@_@UEfGp*Ci~c>(-NRvug&q<Vf<?DW?=2*iI<(s8Ni)a&1E@(kh4EzJ0e1)W5K- z-g*>}(Isse$*JH1Xcsn(7Q#ot!eJQ_%ULYI{@;}8GI~~cI|m1bpKo*=kX2=KEA-%- zk9+RQ)~xrRY-nntvHfytc2-n;Tv&8eSWGlnIASrd1-92VHH=J6FcTtQU;Sz7z|Dln zOxQpCY4*U6$lsC&`SqJGrpx@8BKGwsdS|8!-)!X3^XDkqnIsP`wwXo>?Ao0>DBxFb zeg^#P^3wF;0t)zFN`U*bte~HgOmz$n!W{#yh)T<Z8%XsJwxxTSIg)iPh;mp15flI7 z=E+CjzV(T(+$U|VMlQn8s}6~ovYt{usmn0*%%w(E-+lCa8>oL_S-O6Y6jG_}lFFgz za?UVR2p@UDI2=qs*N^R-4#58ZQ}#HaO*XK!0(s*W6dsY1o?BVn)`y7pU&@k+u7Y16 z-_G>+w)b_n_xB*5eciCyJvh)eHaa#tGktY&ftd%KnI(2>TI73xJy03|MKlliFQvSH z`pbF1KjEAyiazfJk_K7>yU(O|1n>_ZfS294d+)QV*~0M5JmAcP$n*1{YJM}(`A_cy zljJ))K@cYA7lkAygRpUA_*+rjjBOos@Mh;U2%65RgI8|cefC<L6u~KNgK@3YcCQvR z2<DYAV?C{T0_SDund1^!b?d>?ZLI!P<--p@8c|bqXc-4os3)*KLM9RCjY7{E1fMqw zJ19kDJ*DxlY7BOahc=!-aHcr>1O&t;rWBSyoDcH#ivXCsx;VSCGJowF7%ZR(B|i!P z1O~Iq%d7Y9uim+LWo2b<Wf{TzwQGQ}wSUM4e)GYDJBZGC4B$kwXKs}`|A_y8w4YSl z6!1^)f2MZ;Jfh(dNptW1CrJYihyZ>a0X&icd}(P4NPb~{bneRU!1+&>yM~7=TUv9< zE90|s{bLhc14HdR80JnCeWI<Vq47~oGC87pVdL@i+WoUyv=eGHoL8-~W1fJXe_&<@ zpRoSk^STmN5e6PPUWxVZ-lJjZwqE}WifdS@EHRo7<H@b!Drg*j)*whgKL|cgW4$@0 zZU0Tn?{64iB?EI4l0Ave@C=KJ&CDsRsp}md`(lFohUSK(q(uJ!Kd|JKq_(b(+4Z%l zr6sfiER>d)Xlklk5iKVd7p|<Xf^ztnANjM$*{XAxu-{bp2wa~%`%L@Kzl8r6FSnw7 zQ^qj;<rBT54MO06>Y1$!k4hRipM`aV;lV?vP*ro~w^QD4-3XsU(^E}7JrKR76_rG$ zW-!8|$v%PBG<UqclP<wZPRIC|u1|7V-^QbtWv$~}GS*V2(Ktq(7(PbUBD{J0x@SZ& z`$@HZvNmc?iMsU6q|)APto~J{t#9U>p6^+M09H9?K)Zl`;A!1}Q#d~`eqH~6#|Hch zHbfBTU|?Zs=|ZFU2SXp7UR2uD-TgCFGqEr~I5E!8e`3#`J-eBOg_VVii&I)!vbn7l zO&~jUbx}az%-7$1y<^7?8(V82{3|O<K&L3%GeyqE&oYC5)HzJpKYn!o$z$fw%_isH zXBz4P&tJX%?!yn?egEMBkom`dJ^VO6!&EYl?|no&O7A}lS~oR3li`^%9#J(@3-jn$ z4LB^C4ElF!o9^M^s#YNRs>IxUxM@iA>g8+ULNm0n$C{dpXqhUJ;#-E69=?3*7?8VP z%38yvSestOkM*?;Ds3EE>%4Ny)IN;ugetc>P2DNcfSy^`J-rRrzofkX@PoX0$XSEH z-7*fmSl=^x0Ve_NIzA_~z4<X-dvyZ-MK5B%z7NL4%+%h=-qXiBA|^g7x2&P5e|&r^ z)uWh4ld-a}>;=s4+_@8$;ggS#x1qTa&@wVJedzEZ0Q}cqf31zhtloP7xB~sKu(}FJ zLkRZz^`D3wrlEw7ARmowKaT34w#od@pD(U1M@L17Ulg^mwtD^j4``>biT2Oj{_r2^ zef`$0&yof-{Q4SG#xKn*Z6>=%$Ehl-E6mJIWn`t>d%ECae}(O#^6>Ozb9XmN^3my8 zzR~d%zaTVv)w8nEGBoDbbZ`i*oLIiQa`)LqJ<lUbWSnP>s#C#PJ^zH#;fA5Lvd+bU z=`}$qJVaH;b-lI7$#}1vxz$_Si2X|nEja(EHiJ#xd7qm5DV#4q*88|N<CvBwpQbyv zw)a=~XcqHWc_V9mWY%;yT0lrhVoG*dMPp~z$jr>At{lbuQQo6~c3^rbDM@v8)&2YT z1MLIsMMZ^yCjs^}8b#pDDM?B3y2d&HJlOpD`r@@~An!rdZ#;YipJ0GM0OUQu^UGIv zo;*cS3JRYITEu|BpTB+k^8NcKuU<WR@#5jL=Vt{@@7%TXZ-4(gR$J@GAO9Bs9#!lQ zU%Y(&-TNo6-#}>a3F=5|j~?B8@DL^WyHB58yK@&&S<u&j7u>zOa{D$&FDBq;;k1iO zi#WMBzuEB(2M0&b(7@Kb=db{)bJwnI-PkoWSk=-7N<JYcFZ5ChM6V8BewGwhBZywj zEM&39M|FJCs|Ig9`8Kz9kVBd%VG^mws1r4b)^tdy?pv<zT`F&%d-wiBLrWjm8O>dy z_;W@98qSxT!^%F*ez{%#Oz9q(JETtMQlfmL;KHZveN4-PSJUID2K|V->tPitpJ{}E zbDj(-RMW`B#Lm&)!y8mR1Z9Oa_3Z=w2<GQd%+C#sk0F@fvwPRBUH0~NkKVqwvnK)7 z@7;^gerQ+-fM;-Gtb4G(Z)6xM1An%4v^BT2w0Czb-MrP_(-{#F23i7u2jIVc_g+?3 zI_%Zl(geugxQ`C4+ykIKdGqed)vGz#nQ^gEt!*v09^CIA8h{=I>0Ea0)YMSBcKhbS z$}-UVvp3&$_H;wDX{pKUx9{A4{u0If?81Cce{WBJ-`cHP(~Apml(MptjfamXuFPg; zX9NWJ!Iq}hCWyq51m9B=VPT<Y=f%UrT~=P&)_HkwY7&jvr<a#fQj@)X80i^laDh`x zR|h92>N`3LYU)x7iz8CgeWT()6WX|WA|!8VjgY)LQGt{=vT*b6^EcY02rg+GEWJvT zT6zq_@JqYgIJ{okK0h#X4UJfX!ZX?V6j-FJL`;KK91;?Xy0#JfmlPGVKo)uDuM}K3 zvA(<-Zb#JJ4yn=(sZw|polfAqRedWALTWJPBpkuYisA;{bZAm)dU07*OX~o0tRB@i z0ApSz&jaSIZHUWv?zXhIaUI|Ss)sG6rug-TkA;LTu(PwVva;&y>w!EzbLJEq8|&dC zhl7Fx;S-iGcedYt_M)h`@Z8xmd-ftb?%T(1Wko>Y3j7QUW@dQ69=Ou3-MbDQI+&i8 zd=N<vxOXqu-o5Pm*r9zB6C;rKdb&FM_V0sg(Dj@;#a~ohaPQf3Xu-qXjg5^B{$+1Z zI(JS0w3d#Jc1TDNw8p~10$0FH@V8&zKyU5-0|f;+7G{C=Sy@^3Gv6@=*O<S4y?=aE zPEKYo^E~1I;^JbRy*)i+W1#)A$|^tv2E`|#LzI>-uErz>teJ%(&X85zjUHQb<KfHp zvE_pb_JTNHJ?}a>oAitL;DXjGwf!q)9Sb+^eSWz`P1~7`UyW0pDrFg=?ULL-wX%)a zKVK5E8g@z9eXT&=We{>$m3l~p!lO*)QF1<@;BZLU**>-@zIw(zGE>tKsNTuWgW(++ z8<&}rTTu<cEIMupGGHsu1LhYuZU6_KK7A4(2lT6|rUICURggL$=`}UguRnTvhPet* zKfrwe764BD!$WoTwdg8SChb9;h~nZ5SOvnjwzh)S(2ZYz^9@80rKLpxb*PA-5)iv* zHz*ukT^&<XlN~!ZYr;lokei3Qqo?cf>$f4H!LSh;KX8BxD!};|8X1_InSS-Rzaf>; zvIh}qDQU@4QWD>M^UYpn<8UDK=;`UH!0N)ng0S!AdG0~lk&qN`8yL#3s!7f-3Qtb+ ziHxNNgpg1rZ;2@Rlj_!ddO>Ba6Hi{h^NcBDm#|cK$_FtoXc%hlonJq=TG6%GJiNN~ zEtD$@t49QJyM+w-bi5R7W6T-Z51zi*2JN3LuU@}CrAGNm+K$UShKEV-JPM8n<n6iS zNc*I%jc6$mWfO#`W_2@rT>>(I1J4l{EGe(Bw7#)xco?Yuv!4Hmd7#0$^>q<pp<T=Z z8h+*8eF1^fs0&~)nt*w972t$I8)$ZPX8PhqF%<C1N($cIUM#FEusa08pukH?iU4@1 zBM6*1jnUE|T3J948Xgt`R1ce>HEAh{tju%<!;@JP9D&8g#!6H~7{T_=o%(vZ&)<Cr z4Go4>=5hC+h{fYgK(s3;$oqq^2@O6ga0YRcT|42lYa41UEeXKzumu?2-Gd$+5(L*v zBH5tlDK9SzS_u9H6~HO{LqoDFs$w&<17Z{D!QqY!Kj<8dZ0v#L6>!F1E4$*|GiI+n zgy>ZS=YCRy4oY6dF^^xHkx)9+Ft}RQxo~yk=eg!Ryc5}Z6<B4gh4I1i*3n*Z)!U%` z^TokEX}7T1Ir|iTeLqean*(yB19El%doVstH)(nSzjK}-!54$7dQTrv^&nO=OUrB8 zE)PtOPa@i8^eFF<&zQe)128WpCh`&U62F3Z*h?FW=^q>E?CU|f7bH!4Z_m^3z6Z2J z9q0>M+gr7?G+<v?J%8>jX!~dHzK1v$r2fYJhfob2EYij4z~$)a;$RtS0(#$k_#rqr z5G8z2ICr1DKrzph=(}WPr0W~&2gXLASGxJ^<@%$?OSkU0y3v?izw6}56RY>`=N06! zva!NCc-Fwg7^pHZ5fLGzZ{pbB(%BAvq^zuno|_TgB(0<z<R2qE+Qm1}*3HYz$pw@= z#t4+W4Hw2QrM&my%eM)I9dMtO?K1Vf>jVu#<t!s0dadeNs_S2Q^WD#1jC}Uu+cOu9 zSOl~WV%()I!{u$Fn|o)rLHj4mm8;i|O54afCL0HqA67z09$|YW=YuLPAaxGt2kz0J z{|)0Uu4kibZUI#9>>KD46&IVClUG&U*x7}|dXwW@^*msHc70t)@B)f?85t?i#-A_` z=v{yG1X*Qj2@v++Aj`M!#3#g|LIr3OWQ>c8GcVs!RAxj)MWVX{ErU3@_u|{-8#h*O zZd|*4dwyjZJv6k8#bUmF|3O$t5L$*+bu|@mjjXJUPeeVvFb`2Js$W1Fjm*p}Tt{Ts z(BuS(WDC8_!9xd;U%xVg*5k*I&8#eE!0sP0-_hGWwX_66?#R(2a55Ym`w0YdvWv60 z_(jxBbPe=kv-2Tzh2GH4!yE1uLh^VsIUSSTDl{{XjIpIVS8qO%H1s~EO4IYIRdvcg zj`4DjZ$yVEXV&h2@%z@4j0)D1YAiA~Cv+JSgfJ8LjC+rsZR7S&mgv-4hlrZOuB)1) zXfAmNfI397ht*w?F&qs#`#Id+bGj@lR24%rV_Q2S&7I;O4BdBPPJTgkEhwD931$rY zNzdQBHF<UM#EIi56fqbrpwO+DzxMFSr&S=yt2b`}?0x-wPzDCz??8nAj$O=%654d9 zyQ08mV`nQZD_*~Ick$Xf*viJn?9w87XcY4Ra!@|InJvTW<2UbfbF-kQfC>Q7;i-vm zCg!0{a7kcna9B9BhK3q)yq!Cs8aMX=#77X!^8w~FKVqKeAWvs+_w33_LsJ7VJVe=_ z&l3p{kd8)62OE-<Mh^;e@ba~yy5a4da0H^do&lGt(|M!d>du)*-@d(6+__)UO4&Xe zgw1KJp8_$mqU&mP?-FR6ckh33E*EV*D(e}{epSk8Js&Z%5ZB1EZP5P7^6tYA$u*Pq ziI<h!au2Dw9#*3P+IcnI`LsOvvEB#72`pzYy9JF-suDrfLr`Yt&TtQjfKEK6u(+!A za(iFjR?M&5y;oIRjSkTxTdb|t9z6Vv`7PQ8l{$+zZ+^l&0EwNQ9a=-}=+PrbjvkJP z4CgzB2pB+PR(9s<?b{1$tH?|E8#gBBu6)M4Hs<kn2<Fk2VfDeQ@3M0~V;+1H<Tr|W zF)`7(YuBchmveHnSy>TO1KML^qf^t8p+2;L@_c4?2HO^%hb;{)jU1dDaB0vZFi#J$ z!^~fN>zf#RN5zwU1CjAbCl>=EN!tW3g0c9Y%5+LtF);k~2QQS(1Nqh6K-DPNW*k&= zqbD>rq9nim;8z|{NqGw^zv>=oB99hb*f>y<5Z>B9yN%mFSyF1pj@l&eCnj_11@dXo z0qw^yUMI8}Cvkp!sx+2!I=h68_KKM*>f#NEwghLYqnEE&cywfHMs8(wd1DhieH8Py zw$}IGyf-k^hvmJ<<OwVpnd!jf0P@|NJii6=I}zFk%+IW@#3#lh<N14bgQ{=sXoH?& zWO@pH%WHOd`P51NT}bo0EG;eWy?niR?b_Ar*XLJP;J?R?ZR&X}R{Q1q58~oth_u<Y zOIB9;!Rzl5FD0N~6F`^@G6sTB6!W5@!r*8SZab5mP%Q*u#ogE6W#=Mu)WDWaZIhX{ z=@C5K-Gc+OtE)p(li+1g0mM>Cb#+y1TV_ewrQCuDWUM9zdO#oq3o;c{jgGm6vaZn% zWvZxgVDsSOgBNd-N;*M0sygQBcvK(N@KmymEohkqk_UVF=DS~cI^TZ#ivOa~-g7$p zRa}l^JpuL>UYXmV{gY*S?atqgBfnB~*{MY3#V}4_y-#3$PU`rc!ug%i3*@|LwoBM} z$3;s%Srcs&QzIKYB9-pq7vviqmzZ0SRa$<8iFrU1NL+@u7dtx}03L7%>KdR0P}!nw ze!}x8<^kbI49k50X=KlCkR_m;0djSXbsfE3&~bqLfYyLrIXO84g96&SJ8J8yudFOz zy|HoT^eJRqojIN}Ix}ryVGhlL*x=#e8J?W5v?QRA2DuK$4G8f28Ri|G?9sRbeB{o{ zH|`#E^t~a#Je)&eNg?n#92Z0vo$lH<Iy`^<8tjW6LPJx%pr$UPv?3@rp62664~c*n z---gN#!=UT2%+m)O(Kg5Js`Or7=Hf7BWYtFQ1S*o^%7=ryjq@q>FvmHO~=B>)jPlW z=A?;*H~TTAy)rfjG+fW<`N&&_U%T~S8@GS5XuG7aoHN`lWU^1n<`mBNw5~tc8NGmW zh9P{)E_=`8Sfz;U(zfSROmOA|Q+p>{H!oUXC@7rx>|E&Fk?}f~z5DjDqr-pzQNVD1 zK^|yeK<`$}Z^p1&Fh6z$(baNtGT-d@28I1m-XkYZo<P52f)Kc*yaZZCzheS#0xA{} z5dy$3-MRxIB#3Gj=I|r<8qJjo+8Z5&KYZj6CnpE9gV^cg>%IQ)kzauC7R;}KGlFp1 z&5VwrO%VAY$<QDdFh9Al&^J1Q=1@bz>pgo;o#Kbf0lz>4a5xzVdr$D6fW1zhI!O%- zwRZIY?Wa$)1!hptG5SV{qU{hnIJfcW)pwq;6&%twst!5Y^s2*Z?pSJCN&EaqlK<_W zMCBCJvmH~~DPpoum2yOjE}-ulnp(dN+CN#wudH%QS^r(aW`~3ghpYpj9eukPFn``C zluMkjTf~H0$%$LhNl3#2XO0Mak~_mAEGj52k%xyHeYI>C^2H4@rXeXQURYd+zK#f= z0ARrL*I$3FrmlMZ@l#~=o6S{FKIr$keIrB4DoXqI?Sn>kFbmX?k&&9eb`3ltAkYt} zn@Pz#zy9WH5Ey{TD{E^RSsCDQJDGY0>W72|UrIu9q9KjI-_D&J931-kdQ(>yLBIp@ z(Pjk&&qJFN^K(;IuPP}ipnU;shwuST9u|<n$?-Ah2p}E-Uxxp~`NL)QkB<&a&)`k* zsAug&3LIqzh#w-=4OAZ@<MHOox`z8zocVNoE7~USKYyK5J9I?Z5&7bkQF{vOD{L5$ zP&!;cuv*%&IJ9v4_dJ~sA3mIwGG#rj$*SPMrA9rj?I~vwcJ1~y)&JRI?ils8xb*=w z`W_j3ZY7GKark+ounWfFLZ;DY_54`{4LIfOPH4Fu)o>HV5cMpqAX0Pi@`HHRh+wX* zt7Bkn1Yq#+bgylwnZI#kYI*bhc>pKF+tb350LljhHn;%jA7s@6Sp^9(_VE?X`5PN$ z<)sb|c2-172!|oCt#7Iyn?v5|1&FqFbx>&(_?MNHMR0J?DD&lZAl{btHU~#LD=SNg zCK{R>pyO!o=>nl{X=Ono*;dz9EnL4bHg{!maiOxN67~iC6Brx-f(kw8<l-WnvxNo0 z(b2xMzt_*-2N{GUI}J{ZGY59o(lgQk`5=>k;oAoXYuYYXv|f%$OV>3t(9qUa)znbO zVoj`V2xO|Ejh(icg_@qhA$8|}Q=qv<lmOtTR_}`GdmU1AhR{{cI`yDB-6guNVQ8(Y z`)Y0f@~iKD@5^ImL_c;ug*{SM94ZtZb=PB9ulU@SZP5P7vVQ&cF;xne8vU3yW1qZ} zfL@>wK2pdeQV1U<Y94=7(P^(Jo>!fAUXO7a>mh?98dzC_e05~_F(PB5GP2W4D@vPN z+lNPnXJ^Oee)6_0;2by;AdUQxWOinDZEbPm#^S~X#FeA7(_3po1*i!qMZeG8`uZ-Q z6lsKMTVH5Lx1+nD^%3_$Yl7=Bu|4|Pt>CyfW>;6Xz5@&g`ndZ7v<xQ(TR#0@1hg6Y z3Vq+?RCQZhL3LevQE5U>URX+oPfUVaaD<~zAd%*7LUPnGx70K+Jf~&zFY+`sVp!wg z@~x+@>;v*RWo*?PbFuWw!|Lwpj)^&qGqwFIrS0=mtM`8Y(}_taWaCrZEsEc#Og^CE z!mH(O>YBU_+CN{ii(2<7kVWvJCv?2I6kSA2qfrwxj}tZyXA?H!khMLB^OC>^3hMjF z8<31`Nle)LdxS=YBqpPS_O+MWk-0(s;YZ@X_|xgRE5G@JyI=YJ>FDpP`LhbYw<hzo zqQR-DmY&{<=GOe`+O(pQxXhf;#AMH~C}g09`Slat&Pmt8TGPNtT-*G=E6_v@d@^fC z=Qke5=C^Yz*o&J)8u~Vz(+%LqGQ#ruK-H9WE;f&>z5nnB=6Gpr?Pudx+bv|ks_3|1 z*_ltv{gk$Q|JeLCX#adMc8ocW^Hm_lA5ftk!!X1x5=G79M9kwQh$(zZ&U*w64yn3G zng%LbM@pOeYvPF}Bzr3=-PtDqg?(~<QEp{5@L~7x@UQ=p?svMw*e^~vaOLOi%>71d zzv5z}Uup{dQP{xbL|b29MN@NLRZVJ9No;0LNMee21Tr$YIZ$Koq-SLVajOj0<X@Ft z4{OjP^EyYa-tC{gaZZQMr$PZ$BW-z!Ly>I4$gk=_;@0xc#f^u5?1izbE7!TsYVWzA z$D-iCuIO|~oyMa|^@}OmM(m$2H*Vb%()CudPn5Qb+Ar^PPCrQ8DjAS3VVMM<oML7y z7tI84o~ooMO~*KSGaoIy74!R9x|27;_Mu6siMa(i6;+kZEtmWHnUmUG`2xs)Ivdj1 z@BW4L=wD}JSAN?^ddJ6_y1OfyTXHL^QwoY=GO~jck{J;()PN9s4{rdxDFS#~EZz*_ z);%iJT?#JL@UouSn>QbSYt6{ulC{&Om8v@BaVU@_2;p&P=-R$8jD)U#?3dk3D>t|V zFo5~p^7gFq4j^y1RVZ47fNjM7C8eryK+Gt>+`sgqc^I(Md4o_X>r`>eWNDkUi)OL= zK~hRtiWvIpxFj0T)3xlvw9M>`Y)H_>1Mfjd8yI&fHZwcDxU8_IuD+uKV1p<GgkXQl zqMzr<`T67_W4{}vpR~;%n~~$!XR2X-_D_so?(c_)wYaV!tE>VXIU*%JAT|;Fn&KA( zlE%V?W@6`vyhUb=SH>A~t2*veaRtD4PH!yTeoBidIi%#I>X4&JDdtwDiI@cYrnT1% ztN_EePh9){_hpUwiR{d;V>_<AOW1gqyu)63drl>%14`sG8ZNJ2z1c?WUs6I6DlpE; zwqey5i~>0noW%)=($?ux#8g?kY%$|779m4UIXgLv5bJ<^qJI&N9H(VwV?eaAAX6Q@ zeBA=W{9@uGQ!|qC3V|N0T3avo^&x~m_Y<7|(o{Krh^2oq9QZu~=1cX_X1hm58aul{ z&wzkOBUb3Ye52y%ArWMs0FpZc9Nd`X02&@7jS|jazZ&H$MOS)MdB@bo%H3yvNp%P0 z?JgRH>3P*2RB=6t^Kpr;uk5~B)3@9{z47ABf8>XLdUy|ZUin?(gxzv>dt~i66&+b* zNQV^dpKQNW{}+`PFTT}tjPbhEVij0+3g-jytdv!XtZjz0O}e~8t^mekkD&e`6&HP4 zl3PqAC9)DvPr?xF^eu@NWSX5jgX$j)iZ}F9N<wxnkYq`HV?$>rDEy)6sb4|k*dLip zX6(<A`7edT1Ct27Lp%cXo?Tv<Tu=mk7F-Bi4lOVg^qY;FCxJ{gAvxd(L@gr|MVukK zI`x0Zx#*MQFHc+thL6Z@J*4b(9?Q`6tmajBKdA0z;a^hFG7G9Er*0xBwq)bxoj>|G zlF};J`BnD{Xz!4=*(GDUOP0hYXa9Ezi^EFx&z`^BhU;HddWL39D9M*9#tc344r#da zsJP16Wy_K><w%)|j`=5)UG@kWo!9fW^2-g*Xz@#FvhmBoS~%zvZOoh~wsbGDZy?A3 zROdkG2ZUMK+}hmTjYyr}6HCth$EEnsYyRrE(5HDg!5=$^h8jCNLA~c!)d0OG=Hy4E zX22yd!lOaJLl<W4>S69gF|q}QCu$iQ%VUl9sk{EG99hRPw)pb=)!R=qYX*<0Q%-8S z8~Qeg7>04FP<3eO={4g3_{NcSzt{@UC;LvR%czlD++!<iJMKUH?U}B#^Xs_<v=Cu) z(fk|0JQMaiq=?^KG(W0n_w>p3tHpn1iOa0B^vaDd8`UDG98jSN=msb{<pc8N?Q)bH z^N+~cvq@NpoA}eB%9G3cqw>0(L-MsuiGT+)M>4>}k>O7d4)>2u0G)Fww*UygsJ0#y za>u{`I)C-gU_bV|gV|pktDL*?dv^IWuPiz)1Zamo2>cPH_nh)72v_5>a>9~QL8O7_ zQkg?EL@M3f(Z$Hd4j3L|WFmt#*{$aGud-x4=lIm>iJs|=-1_0OI`m^|uKM0}GM1P2 z%R4FCCx+(s*7mO;!lrlW>B~2bEq!{Xo}2<&`wq*nAC>36fRod(BiIH6gl6OvHZ)%z z9GRF~Tff=aJz7!ILHACSQndw!XFH*?Q^fcyNvp4=tiA!XOIm#;VF8#IRI&SUdsgDV zy7)#H+Xt3J7Z0g8B<_=U7Qp!{I~6F{=O{Yl%GsuKTr_8sw3IRp3`}n+yF8O#HB1T3 z!J6A>n-L(sv!uA%dw5d=Ll_Y;!3oJR8Cf6|a>^@98=7h_w}XV}A0Ho?ncjroM<|Z| z)9lj}C7*g31nnau%{@I3bd=OLK=gsoJA|t!y+_2j28BBN1lTbJJl@{Pz?y_Lv(Pj& zl+iK!cO}=o3NHFCiSZ@FgY&m*yJyd0>BrUFK+;HAB(ljlisD23Gdin!ma4k1mUS+y z-g)|ItmfwJ`*D|wb@3iN=XC)5`wmLMC)+Vawo~dH0+<8mvFxWc+4xo1`4rfWDY2Z^ z+9`_vO49Og5*Da^B>~HpEaz~ZzDe5<{b!WGxN>SlWlZs~3^97Yg7a}rPbG&ukT)t$ zg>u%ZJkqv%#m&V{0{l}OJ15pEJLbZ(T8!;nHH`53L|Y(y5WqnAZb4z*2=bGn)3bm@ zvzeS<+0xe7)!j8TH~_T2Ikt0U^cU2~=1kYWIg;}W`+kHvb1vDb$=<P1a5{)GKyek; z)`N6UEi8dp6~qj<r*~wmTTr-*Ul2m?G<SkC=ox!v#A>FdYj|Fp^fx8iDJ;XvH$T2~ zczEG<cFmx;q4!xGA3a8$h*9`H1!ob{P}jHy^er+V`PJJ`zBo8}`|g8;lu|o#xU{P6 zf%7_ma3Fs+fcgm)))T5MC)M^`(EIwL`M)8=ZiyOT9$aAWIUOs9kPjbz*v99ay<?aq zLH8$k3thHt+qP|+UAC*swr$(CZL`Z&UADdT{&#j}o|)MXyZI#}&b?1&o?k>p#xZ|+ zoOk01V>4g*CB(&1c8rB*d0eF5O}zZ*M<7$tmQl&^1IFIEP2cbCMRv#YQ3w-!R9<3S zC+V(k<~Wk@8NdK2!K=uKkOW3ze0Gw0nxYceM0=FvyN$QEKs$nvt?Abep6IvIO5m)% z*7S5ipr|?V;9}z3Phxi%ze3g4mVO853M)Z}VidGdv*Q2&w4}^abc7RpbkPfVpeMLp zv%tyLD{(hA<z5t=W!!fr&t+3fsJDiw&TZerM&MXh3vz}EA8Urc+m?q8ptCCxlyHEv zqUP|<OLoW6Vsv02I<(h}<!j?nyBsswy<o=!(`lx0qwh6uuOE6Xrm`+a#f-N)cnxz( z4``}BlB^sPb+hiHG{x<D`&}e8<fmFo!X}FKL0b|<sEQ6M?!8JDwCeb9g<Rext}7&V z?yv_MYxI`)ljrYG!Sk^UGm(&v{PdJ?Oam~8orp6qBL55I6FelKIbc7aax#-+!JtfK zMO77T_3b4cr}pmh(o$plIyY;WHFx*lZ^WX$6W4w|*>9X%gQZLGh1xx>qQZgk>FNFD zlzp6IE(VV!SGHkfyA%Q!_98cJCMCA95|Ypn@&U?1MSlb!9;6Tg0~rFHwu{`l+B<4` zC{o&llY4~yiHy-q_SBP<`T4qCbMcfNG{$eM*g%edJPZu$#<?Z6MHP*)BQ|RQyMhZc zG==obA3)2WK$1<K)y>`vrFq-*99!fJzv}%=LiCkPto3Y!p9jS^7oERzDcJ4!2|SBR z)nosZ>VT~|QMKQcwN=DNE{}=cl*>`>spnrHu^}?u$kASR-anusd<~s_hDAxKf;g%2 z3PV^&OThh>qa#4|6Y`fhB}D9d1+;`EIp=Bm;rk&h`54OLGB5-s!s6o6@^=4NiT&2L zZyD$K0R}g~0(a;=%h}ofaKLFe1;zkQ3G_G_ikY9hN`f`QnRz9S)K)(=LjBi}o$id8 zod%Q__CR-8`d}@9#uSROLw}+#+7lxo$AOyCW73OL;_D-7_8e%BziW0!ie*%ZMKj+} zid5lXY5$VA7vEE?MPh+szpmu2!U7~g{M^FJ>1{KOdqi6pTpwrH9KV!P{pUHQTbGLN z5Dos2zJuDbs_VJ9sg8QVN<sGNMXEXAO|H=zIqEF3QdHl*1)?KUKYN<lk)=9gZ7d|& zgJtC1(AwO%#-N!{je!a9t+OLI^AYoK5wJ0`P_k0dBKN^(fCEb+tP%VH6eHZIv9Jb0 zFSWS&eb}B|1;inH0X5AX_4o+5U~A~>TiMw;xw#pesSAsfQ;?SF&;M1L7~*UKsWweY zDn&!npP&t!oDwvR^0fvI!tisg@Vy~#LLO6LQ5#OzHk6TBeuOP{p5k0C_f;=~G~ivr z*{Au8wCt>PmanPQ40V{?l+F-4cUQT$^6)Q;Z;f^duc5b27X{j3^!WU7^!z9!>DB$R zqrO*Pz3gU2mI?)K^khi~NqaxaGNDOitVuqj8_sBQG}PrsLo}A-Oh^}h+r_z~^%`A@ z-smyaG7rf>RFo9Q_XGW~Fgk|tCybYBb$WKag9C^Htp$!D+C!Lw{)?TS3JF8HffxAo z1-=YxNm5$@hT?AMf=kfeR+Hb>Qy*k8=BNwejPSbL?054w!V<uF<DsPp>5L!lcYmQ1 zmOZGvz$eVhvcO7#QlMhbi?lTf^*RhbytKVOHv$)}%K89{YVWQmd-tv4yjd%QItpIF zIr2`*9^f}yj-4{{j<4uC?@YNsfNnp*eXZ;qPb$Woupy-gyTiA`=6Z>LT)9s7Ejbw| zE-y@{Dti1llmuf;Y4Ib;J`)?K0b$8ZS+%QDAt=XCa0seUmz|@gFLwBRb0YXmo_Jaw zg(dEgx^#6j%>d$R%`9Lo*O!38?QL{;FL>GduhDR>FtJXNQICHTXJo~Je!$ZKbO92G zC%*&|0qXh_)Sy3(X}(Mv!O;M_8<e;c1mZ!uI^v<pIfz{xtOuA#V8LIZeqY1=lkfr1 zR_gE^Tqmx^34uWKXyOExnQ4|-7~{JTT^VwnNo5yQ&OSEqie3nmb!9bXHO5}c5%K+% zXO4Ksk;Upz*g*r$_eNbTS;~N$<Sq3|MA%*q&&Q|QsjYh5FQ<^E>jh|S;)YFRDe<XK zDY}f%OJ)0g#exaVQs6zS1uB7Kz+R(1%W+(D{nNNDzAkV%GlOrZg*$D%$y^pzzL7~` znQ@+>c@%5wQftGnv2?BkARiGdTn+TL)gNJ)g`buo362f{?bj3xV~M^DPg!1Hn>4`y z(73=xE)53|4dfRpLMC82kLdwRPbMzV3pnd1ip8(~`N97MCB_Iy!OamU-_$%BfqP|1 zXmSixmFYt(=Es~`PciS#`lE>TtXvZlH))Qh-GY>~Lt1|3DiTR)>?pd_^3hn1mYvxP zaEJ?cGc*z82lP~!g>yQ9b8#=W<h=UUEj7Umqdpw6O(ztd3vT1DJQ$?9#IZy!x>hda zrC2OhE@j<$pk}*9?9ZOGftRq*So3Igzr(25y38s$`^2VOWhK-X@~kMx&MIs!;RNU? z%p-6D-v^ep6twyOBX4KE;Pt}6^w|XlXd!I->8<uD_%Xf=@MBqnv-@-Xct<tJ1DtXC zj^_daGyV(tvJj+x%eN%pKMHnUYHX3FgK7UYLaF4rdaf$T*UW@XJde8{$wyE2&BI&Y z(SH7LV(Z@?DXkJonH!J|See_eOcps0ay%Dy&P<KXxQT}i$%o99v+d%?&P`&f@7UCZ zjqQ`Hr-2sE>qE9Is}jE+{>Z3LNf?kOUIP3BEubd)pnFqfX5&<{3WUK0s&|1ind&V& zY+d?ujj*xH`Wq=_w&HQ4=wyet41st2emfCIKMkZC$d^`UhhInVdCjJtK%kj`A4MXA zAB!{833<ct;O0!AU;%TVejeY&uXo^?>Flw*<^Ftunsd#KT?n;ZQw(EdgiEQ85g`q3 zIG5F06!T7#?6w~t0hH`mt9ja=G9RI6qp8=PBdU;Ro&lcfEt3oP6{vl0S1Cle^LI;* zlEvgKN34X1RvyPhfUoUE^-Y*>{ro5yx{_pPbe7qXWH{|#N#W@s4pi+p3PoWD#(?|2 zCyiJv)lnTERjPAQCpu1TUPmvBbI_p{<H=`B&(wyq(f}szYVZ3d>yIPY3zNDMDGp&5 zZ{oS-pY{8@Z-jt9%I5OcRnAZQ26&F-pBx>i8I|99fQ&I#gN#6(tiP`y?g>p^4`kvj zY#2!xF5p;$8QNTJa!x~cRzYx81x5OB)!9U`<f9YEqcw@PGKn>^36_B;1x0m*hpoez zo25|{?b=r(-3#crRJb;wMS>?!;J3Pcj{#1V{4CV$IY?{usV?26hBb~(_oMy$=g2qF zuf3*G`5g462Xq$sW0n~OV{AWw{OMc61Sg(~YjUIHDDcHrCD1F?(>nRGIgQb8@MIhZ zxq42Q!9}a*(Uhwcy_`sDJea8y=jd|g`w6v1xD@7su`?|y16Wjt5X-lbye6PZu9ANo zPSAcecpL%6%{|G{eDi3mfw9oL1UK|IC}kzazkxdYVm3y|<^lz{r98<NI)b1#p1@hl zxRc9#Qyo6Evf7lFtW(2I^(2XEq1J<N^tbppTf9cu>$Mh!p7RQj!!T*3!b_wX%bvTL zp1d|-P(wxDZ_bJ$>bXjCfPi+~j#f6^HicQfk0wIl@zK=nRP}PGdU)X7?IX$*ous(J z@Ani6ZYtU*H;b$~)z95m0C&l_jx!d_93<5oMd3spcp}$T0VDJI!P?Ovlefd4IpMM` zgsA^|hD)M&ge{47^*4eD^X21*)+Nso4xMkHxW=FX6`mt!U?W&yBY@ws!hTKRXm{b( zitG%E%j}9PtZGZcllu_pF4nWgxE{@AI#e?_P%%{tEuBL&uF#m*ytSi_XszONand6j zKh<a(t2*oLdg?r)#>Nl^JZ>uHNv4c|&Zywpz0R)Q<N7h>MU}c#4K!8u1T@KouE(z1 zLR(Kidp$kgpE_&ehwq-+6P|)!5)4Pjv{XZ-#Tj$nQL|zzIwv(q{Fi_Z&)t{G5+g}u zh~TI>>VM>H0&%4l7@&2X4Yql{9<_Ns<RpjmUG=i#jC^6)MOVoY4)SttJ*9@|_-2?+ z^L_fFYZtZE&!(76K#Hr_=w%YyB}qs`8rx+V*-;qSQ5@F~WR3#Cz@w&#^~T7pMyM=C zoRx!~T9MiM(b@Xhk2FthG>W543|!qkF828qRw+n&L{?|MBEzq39hoeIK*xI3CYk+} zS2JYC5cL&5oBDxX&><LimIzk`R_62PpX|+s+TC{OeSITaqqKt(z50$Gc=ksBv`Ng6 z|ENp`%C79?MN~Eolg4oY{*<UvCo=tN`uiNNT^EK7SDAoHE4)=Oad#e?35Ir$HR6?k zHP!93bYWafGcbH3Xhi0Zo=#?TLT|F`26jxFckT^*;Gr^jS%(#^y&fz)W0>0nTgl<C zy5w9NN4T5yS;Y13VCu>z+Ja1_bL?O=K~f;?tYxTu{qR_lf0Xw?7Nxu7;4(odTH2^# zXZlV<@);^Tqo}~N;QH6n*66pP=Yw_Z(7%s=ngV}JSB)Gty}ZBgn%`$z=6iHo2EIO@ zcl0$oH(9#C#=zt8<JL{Dv`Or+-C!c8&|EZ<=?u}pCx94g&sCvIL@84k;P`8V_TuVH z5}OVco#R-ZnjfU#rL3lEaH*{E<SDP6VylVVe;ZvGk=*(Rcy5z3UWKYqW<kZgA6={$ z**r@ix#=~nQCfCLubITwF?NPAmX_TjLNYU=a>Igj(6W+I6;~9vbmjXJws+0)5y;0Q zI4ey)<`o<`u&QHDir_v|abRj1ohh=qsbT)vb#)L5s?kod(lM4=b{AbW`W5iRnfF{@ z>9$QodD;rptJ|Zn-TnPF@d(c9J?Zx`kg|R<0CZf&Me8_$$;)qxid9-L$tWvNDHMR# zEHhh)27%UBD@hqoP6mDchOcOU7)Nvk@-evyX`lmC@cRM9Q6pU4OI1M1*xu90Tnt*n z-qP57qa?U!37l;Cg<K`LWF^#$MeMj~=%{rR#Tl%KjROq*jB8X9^ZgE_n3@w;xe)Rc zd_s$KaPI-5Il@(woDl}jgQ=jOA!k8t9zo&dM_^x(1zM_1!$mWH*$dvB)$?xs)M%Zw zi$42!c^N+UZ_oTOb*S==>4*trUWHGK>Afzf#g8_3bD)faqv(R#f8DWJ5ZpCyQ2~AY zn20?gUQ;O$5=9>a<;oV^4=?(EJxL?NkFDb>xpL(T@<tX%VltTA_&@~-piUa78d0Q0 zsD>dCYJgqS|32{sI);>igE)?F<u5w%(wV*lo{?X<LkiJ~xUZUbtyFTEKxc&kZy%;R z@2mdz6EskDnq`fyg_^Jk?X7I_&XYw8;eL3OKHnVr!vj|72Hob-a}s*(_KdFY8k;NZ z=N1Bknbf1ZfLN)lJ?BvAG3>`n6Ayev=Y3g6_R<p?b9hSYkJUHur5xx3`-Sa<u@j)M z5y58Bpq5$$af|f39$l8`m&<4H18~p-aPULGF0$Hi)ZnhZg5bzqLV#>WhH(vAG0L%G z^?_dXo)V2&qK_Us#1N`83y-C(7lyn;qXNh?&T3o8Sisle#C*|R{91wkT0wu)S@=dz z@ojOZyk5`ii|3*1I-PIQ{r74t|5w}Xjp*p1i=VeBI@1ochT^7<^*91={`gX<!Ze}c z8b_G?W~6rVNU;cS+1W^}A;ejzMA~Q|u%a8~ieq&jV<BLU<ZW9bw$5VPr4GtF2#;C7 z?@K69slp`nr>v*1Abpzr;JqJGu(1DB^_eHmBmy(+a9m=z{}F<@-+8fWY^aG_`|vWS zs7fNS6^5kEPgvU|JN_s5Z@vE@n>rC_Rtr{Z4Txtdz9S;&wFO|Fjay~J&|*GGc3b1B z_!kyaYluT*g6H%x!Idw<m(8^$m;a@znM22!zX4#}TfJv3-0}xMipqfVdAqanc(C&J zE4L+EfG3sfOrQi!HFtwX2cp;*KW;VR5?KZ`{7Il10UR?1d*j_cg64YRT0(^+812T( zLX}HUTxGU()XQFM9_5le5{~J1e-`<=B7cxxLL)F2?1QVY-0+{AEuGz-6bLE^Hm!ke z?m8-q`P0$rPM#(}QL5!&P+B1p%0eWv!6y?2O=^l#t&f;<=mkKzUjuLMO0~X}_*Gqv z4fv<+_m6eq^%jw7iTs-9hCj!kH$x-~fk)~Gm(~(9pd7z)U~P99j9k68p?<o&^DXK6 z{u&$!JT8oE-4t7$<VI7f91(W%1Umk*NCjEg3~cNyfPO=o3|XicWwIo{%QVa527r`O zn5R$MiP1E=5*0hLR9mtzl~OYl4-=Pc5at2orB~{pl9?VjBlGVol-!fX{!Y;SwQL&H zx-xt4GNJiN<k*M=#f@HT^BrK<`SCseO~2En-SgV!;?CLa3CeJdqiFh1yX-t8e?N}E z4w=>hk?tpD`fnr(>l5GkF@24l<E)rtOdB^pmeelZP?~T~1^G2nqYiBhm|p3nkIcxF z&SD!*6r9+KMY<Cvh9U|}2}8xtzJ+G)A7xt(wQ=Xij_P*LpYPq%$)o7^1L)OEPqTZ= zH)F?59wUKVlT+?dY9t|(Tc8B79-}uHL|u78+-K;GhkK0Gdkl69kdTUW{u~@>*eN)< zvN1XvyAurjjIIuc=e-uS9lCGL_dt9>MDJLvp<kI#SG-2otvil?={8L}ty3=>bXz8W zUYh^v(5HUG4!*6qLYdl{I5|6-8ruA)WoKjw#mq>+K=5A;c6J7)|A)%N{NJcNJOuPI zrnY9z<^)VEKQtu*dKC|QQv!Moc_T|xV`l<-Wfvpo|Ds6R7@Gb3pl)IEpFgv*5ztGR zT9}zTLopK23)@-SIV#&58k-XQlxqIZRg`57ova9$|8t+8o{)vJlY*(Eu$_&)ovo>@ zGXVzyy_kiyv#I0H+1k+ARK(QS&cqaoj}Pj<J-++K@PIg+9{~cO@B4dE%I*x2ff2*h z4U(J;x#naRNcZ!^ydTIm?5jLeSH&b<yNP0&zxJGcu;N{A+vGqC>LK8$ubuimOH79C zaHHpchvmPR{SPec?Ef8;|2+ABNdGe?Z2uoHVPX0okg)x<|6f7*ZxH?e0p+saAMkSw z5<KSb?1HKS{ipN49J7Ek_Fbv}1Iz#6<v;wfF#mU!IsOkUbN+u|nUS6Kf8zOPQ2%$H zC2fC3+1~CygG}J;=<+jz{@c`G|G)3^zoG#3f1Vly|MS$i^p>r~Rd>bh(>`8$C(&H? zCG*YSPSm1RBod8}WrdI&1TjM*Sd5C2@rI);*Cx^aNEqvITYzF>FmU1a-*hx^BNoPl zgki^_EY!#-D(=H{K0D89eBOBPf;0LH4DmNIGW7YXbEo+{t9w<?>T>7P)@@zaHFeAM zf1FZ!yq<f%XTM)^zt6wl-yYd_U{~$lb9cU-a?IL%?l*7Od#-(7kNKAQzn;$RdUJQy zT6bjSxpjI!bN+sPe*Ei&bbCqpYxt$JQ<}RIcbs0kQ%^2a`;#%S!tV8ReKQaK@EX)N zJ;%Q7XmjUl@hM>MXtuXX-phM`B=;fpb+*FcW~ZW;@uU6ws&QY<%(nX7s!jV_|0#~@ zFJA3|ljqMD^z7hEGD7=&u<~c&`cmcbAn<;Cefp9Nq5hNeYJ=bXsk{j%&&q4~4fmeg z^4++#W^erM#M*T~Q^ajO^|5n0vPZbcdmudK1n1Uxo0_skt9QVxaTj!0&h>SO>&@vo z`_f{^|B-nKHfZsZgH4Jt_RRBLNfi1ACggQ61|C0FF(QQ%qXGpW2i?wCQ^S|TcZUNL zU6@}GBm$zCx_z^^8LM_d0H^P`ZhHbm6=HzlL%-?18=_FZhk^P*fpVd*+g7rh5AFaZ zillpg-*<icIk$Q0Yat-V--->B>x(JJj^Dfa!aJIKt5dTOJt+LN?Ty8+h#Jcc9YQI@ zqY3NfvVTyH3%|dI0rM%&hZUF~LoX}$aTde}DUuqApAR46zOi7(;Wc{-+H^Sk1)=4m zcGpb$%kM}Uw1$6Cx!K||O>gXuok6q^vKM840>s+qjvM~lb8~aZi;Mj*(&k@T8c3c) z-L8D7#r2liE$<>~AkNU~_-Vx6QsdX|*}*9>zOJ`MaakrvGN@l<XpSw;?Z%EHhb?kQ zt~pzyj&?mXhNH9ErSmae6+oA#=i#u~>l9&g!uf&w4Jjp9Unep||4ozz%x@3>aHgdd z5((x(rT82~lY)nr{dpuSKP`|DuZ=Uyh-Uf2VA|?eY$zj!&`h5i)msOpQRM@i;j@Fd z9e+m3=c`86Eo&CIUpwkgohBW%+l>Xk(-P(F28Ok{>d_hP*^mn|n%_p|vhqx}O1e?7 z#7ZcW%1N7>@L(Nqv*DPLp!6}rUbuljv$Pok)nQ5LjnMVOFf)zsZXN|DI<?bp%8{B@ zIlCo1sl9p7O)u`g9^xkk;44%)2UJpA-3}Qp2z6YTXTR>Q5wt;fFn}TZoy~>_SL}5C z!$$(D12eb1^T%H@j}$~6RjK9QI9x)@S9hF1h=F|r(%vIPdp7uBzlLmD9CO$f;l(f6 z39bP#G~`SVU93PsPS9WDd%`iPYG56L+BpbVdYW@)+Tk1dyOgBUXcOAlIc(01U}_uK zY&`i}Z)~@(srzj=-9CB?yqdX->23V~U=~}&twNLLhfz!Nt$rm%Q2IA1&)82utwqpt zm~_T}XS9afkGSkjUYs!Qhm&efjZ;E$LwmopX<Fw2nm*b_Ed#1ZvQs1kXxC9ML_6E; zAFr!>kFHVz^PxQ4gO8aG;hiou<FVh939r1DAuo3IJ}~?Jn{<Z6VT(;WV<1U5Z#CD) zCetx2VnWXMl9-!ljHBLx#UZ&W0T8EcH_ZiwZgn;rWD|t^rxJK!6#m8VD8s-XwiX?K zQlwNGKMf?rsZ1bfqDlUEPcRP(Qid9&?NEvO?;8nG$=>OQAPE4k3$pr`OCY;p!iOnG zMN|5dNWQ}m2;4%DdaY9t^hKD31XIWj(qm7m1=b*NB-0RS6s*l~q><9wh+uiRg@Z06 zCQPd`eOhUx<F_V5nx$=%OEVL|U`WM15qMIRf-WRv2?|Blam>Laf~_S9gn(?!XQx)f z*iayhbP(f}1Q;lw(88FVlhy2fy7)0Fhxa$|rSqrR=Qd==d}T2w;eauE8An8Fd<(g? z#0sc=456QLY0p}CtA|(`O<)>qp@sXeQcb@Wmx4C%<HG$ABLrNl2-)*EVf+-u)$x)) zyl6lbqUsDYCu9~Icn2sD5c*_~tG`{rU`-4+=S}<Xssd9ue=~yK9qW&{X#a}?FsYp8 z-2X`3-cps2N`43u{(JB}QsN9X@b_%>5%w&hydDvebtFq&%WIi=kzPDp=p1AM@_l=r zBWO;2aTXQ`-!!4*7cfFA7(xa8^TShh1f;a0+_0J@CxdqM@yu;u_j4DFS2K=QIbY3g zmllF8knMF&I$;JIu!G&k7MPWgw1`LCqJf(tQ4xdGu_M2{&iIOg1YNU<%FIQo1=&zY z??b&78m6-e_kvF}Fuyttt%jqt!qiqj_*$XGeZNReoDtbecp+9CiNI5z$cDIf>(xwg z<>BN0eWfuMi6yU06%3QvrPiTm*n>5LZ~!Iz0tY9sVCkxCUciz`5talATWSdW$@ZZ< z50Z%luM@A<)u9vrZQGk+py@5@8l+WqgG$dhVvu&gi3~%fl?Gf$l(+aW4a%XVZ?^Vs z?Hp9JLq?)x(sY$DPXJu_&;r1e>z)w;%I0c5CrLmH8-c5c5|UO#qomcX&YHP<kYW9D zp6rifb~enEFckhH3B=}n2q$NRNSte^ffsAW!4;1$fi$^+fcj_iwEJ;0=t5*Dv3m|n z=F^|zb%H)~3?*1?bDHOdC}r7#U?fWHsT1S$M_IKYD6w`VCGBlm!)RzQzQ+d{5<&l7 zZv(4oI!xrh6bNL-VPek`O$uxc%(lR>5x|dwEKzn!(}J#v&A8&G0FO4&ZyJXJ8<MO+ zadOtQKSmzDzzSZ)Wu7%0lcrl{C8_oThGx?KSQ@AY2HM~|i#iG8eS$KlmwYTfJ<puo zHy=y20(z`E1qn#gbMF6u;2LgZA`L}^s>7-jsfLj7?0d4sl1uq(GaLy=T`=52Blmks zkYK@2G1cj);WK%Uh1GAYVQL%%_MmC#&fbVCcspp2waAbFz#BldXrUDZf4mLqOyWH) zrMPks(#DJ96cUE3Sm8&CD0EY%acu`9x#eL7lnCWlH7!wDc*(F(ceR_NM#HCOFn<e2 z4QytU{ns%<|93K!zyRl=`Ji)a7;~I~0l4mxpWeah^kK@W04Qj}O$k;Qg+AmADs1MU zA9I?ATgfjhwp!YJ(vwgPYp8~|T~RA>wgB1w&f;O(Y+6YRM@u-1lryag3S?SX;PHND zAt7YQ`mH49xhJ5XoaS_-A9PaEUkgMe7?Z)`cY=O}N3L<ZJ!sNC(*6OixE?fpp_EM| zpj^9RzGF8{)bY6x2vv#pre4D~2Qa7#;TqQh`HYK;_OXO<O{9Aw01jG8)8ZqjKp;O# zExtA!+!{Ctlb_19)G_pkatCj~IgPkLaoil*#|J`vWvE54LDeD&!oe#VmZXw0<P@NS zY|W-p(!tji$*3%tVJfes4uQ(th>r5dJg?5j-IcNQ3LDzO6_5+t1>NCm{Xy(Fohb_0 zn$>g2&H-}8qtX}imbCDLh0s!IsI?q<*mCt(;b0c)g&m?TkONvKvQWIdg?T%A1g!`q zi69AG8X9rSG7$AF7(~2cDx@Uz)u2F*P+*homLd1>rL2ONyv2ipu__V_boLn$d)ZQW z{JLjs3Aw=r#%Mo5@yOEBSp06O{{+s0RLyZ_yTd6lMx@di$h(sy1QO$$SV4SHJ&{NA z6_eL>?4biYTr3T>qgR)5ZAmSVK>!qp<df<!l6>*2ZPjr;&gykZN<bx0a*dP{g^P{~ zZ%>Y1U{mnak~Dj}dtXyAF~y{lm>l0EiWJl#)*QP6cs=)ZLS!vOF$M^rz4!BfGXbYV zA$asFQs$_V(6ZP>rd(e%%rIWbBK@&{QdbhuGSZkY6f6%fiz^^Ef|NK(G|-3SR<_fa z*YYyUG;z<`??nAGDH@y+j$E7=+<O{129D8r6=rfdiGtQB{n^)ov(mxr=|=JVp$G?T z3zAcQ=72re#GgIdnP^lDcT5jWjI6<}E1LYWh$>@_^5!e%@HmIeG_4w4FR(8Jaz*n6 zBKiFc#2C2laAoidZ_%UGI@3_54|s^&egx7j&c}Bzjv21_54U<j$m|KBYIg1RSdx~6 zB}btgO{CvGf+<x-oEz96Z$_Raf?i|PX^tVc%GQ=cwA~pLclTV6_~`{_N$B*T_I@R1 zw3$}X*g>sAy#}I3c$rZ^!rN8u6@Jj7k&{C4TG81_M+0|8k2#b!hHj=kw~V}dTckm) zF#VM%o{h#5E^%@fp#}gBtw1cV>ioa;5tx6Fv5}8l3V3yIUCNb$vhL4gIE2LobREcA z6h{WIQh(LVW^Vw6#*C<dr37JEgzvIjR;kfizAaIZ7G1^s>qA!kno**%hI;z#nHol- zj0jsOQBN1L8X@aB8D`zfOdMCFD0+uX1_+W_aB_AY!GuyGN$}=ZwM;obg-PAfiWb#G zQtX|M%QdCXOvvMS5>1#zLsle%f?nl@b|jz<dh?zjg?Ox&J4`XFzk@QP_yxJ{018V# zh7VWPb%mnvi@b$)wh^m^@1Y-I9hyubUh#F!wrK-FQCw46?5cadXi+qBnnpMUx`R8- ziwNt{nc#{Jjj&Y(Ds-qa1Y5ycjtFbg+%5r`8vh)0&tm|SE0g3kX9RFwS&$LffK?*4 zOpFQ|x5+Y>3`ATY28+YK4a#<ru^$v%vdk3lLUQOs$fR9ltl6K}*{v|M+lCr~Zl8D@ zb%bdKK{e29P~u@wH&iG#bF{?HpXP#`d4K@}Yn92Qg{vI-9;6yW52`ikk<M!jo|<)s zO<A#@rfSC0&_v3SKTqK#mS|XUJHAenfSB4+C+dt0^HeK3iok-x$QZUP^38|5*Zz}u z*o2o->wU12$IRSZmpBoY6*3(1uu#ZP=r&UK$PiTc4B9g$)@xOhUAJZK@q|UzHq;15 zEyb0Ijn1Tl@aD^oo}|`TlS`EzsfXnW=lS=BHv8Z_6w#jGsy^kur}*v(tdp$;i?h(Y zEQ7#7*ga<3f~&CV(_YmD$9mM_-kZ^06*VY-OI*Pzvsws>5V<K6yOz%!G*JbU|0&Cs zZ$79J$BkcF{lIp8r8{&cJ#pULLoz|Xqj8F1X5Ib7P2EDqR&!7z7Tq!=J<G2^5x8Yh zj0iYlpN$mKC&S1cfsi~X8#T}hA_yfj<{+CgL7IO23&&`x+2W)I(^E@}%*D2{=BT@l zHg6C%it$U-)rs(*Ejd-iZxcwFU_3^Q!whR=DB0Kz=iGAec|}TPo}^#}0hG1EA`BEf z(@EHUKn;u>J(OL<0(Q!+^F&ri%;_znF3#%H_+mLO+rHKg3ek!zA@CgEiziri0o3O5 zRK}p7C&4{v#yyPgWXc|x%)m&(lbjG#6TGUCoJ2J+<sT1W!BXI1HQ^`+DW1j<>`2Xq z#-`I?gW`_PCAN4?a2=NdEppL{4+NMf5K0+!gB^L@yAaOSxNXohAp`YDi7&*B45Tqb z8~$^s{zy6&rH9}s%tA$b@}Wo*&px&!g^DsJuLf9~C<q1v13Cz<aWSQ;ol|-Z$|1{v zC>iqRd>Jq7G&&^vnNI{5#F!NV2dMCu2uSxuBB#De$Z>~;DFfIi;ErlHq1gpVL>;bv z$PQNiF4-t<RZU1hAt73!FhMf~)O#wlpB${jG7fq#^Dql9*SqmdU4|$N9?bz&JgJgO zqT7hKwITRk^@Bgj+V4RRbXc2&w{k_*yzOf0Hb}4=dFz`A3imQV@k$GgdlZ<s@&GgM ze#*fDI?Gx<f+hvzKU+*kX;CC*iy6SyrF%$%Wr6Y~kgBr6GI|W?U}4_CQvwIM4qx3r zT!B*RScrUQW%KQ*3dLiuzroyPxGv2E^BIl7uC&gW;fV%AGfD(#)qlFy`iAO?GAPg} zXyqZXWhKZhho@P~x1R!`{)mpzA}F%jG2Zk^i=>t{^AJIhu!9Tu(5#vlnH5G;QD@Bm zT5TUn_O;~0n#n?eEuS%$Fop_IyQAV}Whk=4xWF<{X-iv4FnQl?3B9BeGI~&EfH>y| zhx2bXr(4R%ar4!~41gyQ9yYXaM9t`97|<ga0;X6hPv8=^2TLqwAXHLBGR48_M>HN6 zLbRw+RS#9kvUCfZZ}9d7D|OT?H~%Fy;I8&md=&A@e4^k~yjHzy+kn3|KLoxo|5?27 zaTZKi6iXeL#IpwDs9~~8IbjfgbxS=I))9uGp__+0USGGxuu#ZSM|u^1P8uM%QBCnl zM*iz@p0FjY60*5NB?D^>^#{jTWd?%E>7z2l3=551<_{HC@EHSWT+*Q$mdRh0l38Tz zV+v_SwyP0#2=iPT&Q$&2GE#T-O*+lcb%l)q=UR12?M&?L-RT;#bGD9!4+2)v<oJ6h zC@@$Ua?s%UX!6v9<_CqbJ`_wL(k#Lh_a-k&iNazr#YbIX3dXij4gr(rREISo<$S9$ zdGmDS1%l}^2UNi`6YiAnf9Wv1cBw5DYUj4nk6s8-=d+Z=rBWS$fa{1ziv@$DiOwku zDMZ0F&orwBEG}R`AmfS2SrDO+6E;F;e3WUmFqM)24C2bBVlnnby`wmlhA@>`tR{d3 zu6l8nZIB9T0K7+&_c*|WiD66XGRi#W<gWL2Ckm4ZLP;8@U1<qSi`hlQr9~_kC30|# zEQSF=bjJ(EtYWY>x(-Hqsr~Foa*lrxt4)W2K>M;a9K_LHKN;)_vI6o2_l;o54#%Qp zf__vNU^VJcU|0Yc(*@G3+iJY2=!)q1koPoZSFDUF^E4dUVUwZ^(@wCz*cCHGiLDsZ z=rv;~Os%OmF>1+(dN!;HOSa}JqN6HT2-0QdurJG%MM;bpAyU1>aP=LaZAjfzc*O?j zUG)QZj3%Dg#E7Lg<g2z$@MW%p*(@xxW^^;fdax<2#FPN0r$#n4LN@2$oSg>bAyAUs z3gsr42xH4*&<)l|(I~xbQid%Iy9Kpmzddx#4UI!PZe8|gD21!`Gnl8#ls*}0@Y#*S zZ{>bd&4onLv+W*X!#QLS7&qm!ku4M}ZF<s{Xpywocvu;;W*M&U5i3BxX&tk*tz}Xq z6ii6MDAOHed1j}nFq%R1=Mk;NG)W-975B%z#rjhzP}M|bW<iIR(RC6r8qEJH8-uS- zRE{VzU4&{@pVDz2c5SlFPxil0qKY9|1*Xqf@$a^hKM>zNC}UAF4Fnr#>TmjaAwtU) zk5E%{M&e)5Ppo*oLAg>Wl=jUw?fvq#E)YS8GoGTP*dXf@A^q6=B@ptPl~S>t8J5|a zJ<pYQOvPZ9CK#k>K#DDu<O<^NFlTsxsQ5HZ3ZS^7Fv?iZ-A~~r?p3*r@*0iW^={FG z9q?|WW=K`0+8jT?{jj3ajPhov>w9Bm4bfE$&2R{C8zm8er^X4{YG8&Q;IZ}>x`vmh z5_vqhaElED7}42Ou~m`Jc0LdIy`Ls`hR&Vt-krTC?0oTj2I$*EkG~#2zF$%I6h9yJ zKQ?Efc>zdw_J;WefEhm%46d2DD58wgd8Tt5yA&xHRHVD@qGlc$T5Go+pfHh&Qs@2H z+^^+pz2-^Q@8G1>>I5IJt5}-i0W?-_g&l0F)Xe*-T@Xy&BiyiCQnz?Bg415ZZ4vQ+ z{#7Z$gI~gU(LnlJ1`HeSG{SjfrX{Q6K&~9O`9KnL;uT(mC94kH3R$t`9Uzy=h|}du zt{#l~vsSC>Y#3MG@tKZB>69l(iRNiHE&jBF%b3tHpG(a;k+-f(>^Z~OxfxPsDlQIU zs<7vfg`=HajM;xgTf-~A=%G!&SaVC)>Hf{`7A4dhb+#b)6I4Q$j0IYdEg@=69Ifd{ z3$~b~;2J&q_q<kO!sw&CQnF2Qwq{MVBYD-3-UfhmCYKGcIn<bf1WE@nXO5{`)s%7V z7L!ht$k|IokO=oIPzN5#QpX+Au&gG~-#Pyc%tB3zF;FnfWT~4P+)ap5gPT8fs5u&O zEK$Dmw$vg0w+f-$pw40D9$E!+P^FrcZIhyf$pk~nsV6ls&x8Td$rrD*wCuN+2(A=7 zjSR*XlYZ8P6;-Luj!~4a+`pXa*3!fDB6X6<WJK}dq=h-4K_uRslDtoL=B_$mzGm_= zHV8%Hp0OKQ__qoDsRAre!NGNjW@$!jf6{|%d4hu~O(4|P&cOjBUC*4T>_dZ%(;+7U z<DXW7!A(U>CRy19+%$F9kD)Ly&21g?9|fNa?YHN9KY;irT2(Q;-r=?_sQ)A!gpxcd z8-!)vb-78lO35yO(ee-n$0g)nB{t0Jl{XLx*FI7l85->gF0C2PyOa}T4CFCh<5%KS z9UPB8P+rMHc=)VB(Za%@#I%T=RnaX<;Z9cq^&k!mHgVQTS(g!EU@dYjE{E#<$ti^} zQSE&~C<6JfGEm43{2l!AI|t9~l;$($r5EM)GgNVcWuQpLR-GZbHE40Z@bAG6*#0^& zcfojBR=qk6<pamu_6e(EaUVP=3_wpozrqPIV!6qmdYge&E%yACwOFq)3mF1k+uxY% zL56VHx50321yGk;aw<Fge?v-kz8^pNziYogGu6*M->!WO?YuVhr=!t7XS=cKC$Mwn z)VaU@V*B*G3<hUuqd1{5WufTk{NsM$95iolhlD6vb-z<G6K`f1Z!Zo5nG}dyc4s1t z&xgf;@l=n5dhK_gI2QG-3GtW{@{YPkXOd!Z$%w_77FM5mm>K`uho80m&W`^zN^6HK zhrz8(gSg1XbwNBmV7m(Jl!8mgU~V)y_Qg>+(QrHJsFyRD{>6CXAG=hZ1hkXpKgE+S z#$J<;cAg%uEfoi`BW=oJLQB?FIdt77^fjORv92UTu1*QU$Dm$H*p2>MCD<U&iHHG- z$;8?h<5)JxQfP6;-9w4IsnKCWhc?(^_8HfLJ?XlIy@QyPR~e;r0&G=J@Z2SRKT=fM z_Z|Op{CJbRweCcfsy{^5gMf3e@uZ5*6LabwQ*AyXePCE@IfNu`9ItC%KTP2*@uNtQ zp+;-Nn?)wM(0!6(mcZQIJSu$?{Sd9M`sNNh^aA*!@=vcu#O&@Yrm7la=jyA3EY27t z(IO{&`d*0AwJ*o!uhH$G$=E*SP{REx9K7+EX)%++Tbx5_#~OdjAPqwBDb)ZIx)e3m zf`&r9m&Er$x7?4t!0)b48=_GRf6UYu)r(Lx*Z!uI1#7h@Uet4bVj02UWZtlbBlN4_ z{Gf>rw~aZRdQWplx$1#mYgtfo{cCD>LA<lB8bX;VwX0Y1`j1<|p2$nH$Y3%jL#J;f za$TeYn<4L^>~)ny+93<u)>s<<MEjwkq2<u#+UVpCZn@wJ&YEtZpea%vT1H`m8)#G> zxmr;SklP$<AeK+NKdt1a9dqQggtptH*l(PR6dzgM2*S4n#}~~7fp!P?mdR5B<tlg~ z9xVa)Xu&k|Wr5a30<%^rFMw8|#H5ySuVVUqy@s}<LpaD*$4xKa*XH-i;TM>6dZ0t= z=KZbIDXs`@D@OAK@;|WW#%kmz2pGp&GlwZ+s7{n@0|w+}up)p)N>e=jYE5nEBIzsH z<GSZ;KU=TgWubvyX+3|BR{7UDUx^A9rQ@}AM)?2U%7kZ|51}~g4(ezz&c(S`j~({o z2W^X}s6f6Ii!?SzMRq8$#{|OUaEo<xt!Qh3QHuU#YnKEKfy3(mfL_|+@d}cz?d^3t zKo;gXNwH8lI+;U#J}LPl4igd}S72e_%Gn0+@F$r5hN8My=gz3GCI~7MrqS0jM1dpK zIVEO(ajeD|bx8eDd~_>7d+xa$un1$*m$<t#Ru4XQ2I8&;p`L2cmX+OQEXnz~Snty= z%Tj<{r-dNC!};wRu{unFr%<952A~5mM;Y+d?i)<GVV-)?fs<3Fc?}9^HJg-)Kvs=Q zCT-j=!1Ghsc}W##S2kLVof_5LNo8s{yc8LQ_W3u}$=ainl`R35qc{Th0_UlRky0iY zamYtKQ9jrpeq2>^i$1gL$}z+sO}T_9s5~jNsa_q$M;;WiN_TKC{^V*Hy7UOl2!w`9 z1O?e3N26@>Eq0{n;Yx#Uk`m~~BXENSJ|Hi8k5Sv8WU!5IS<yn8rU9pgsP0cy2Sr0l z@TC)#E57qFH7$&ux`p2Tljg&x#TuhwsDo+cj44hWABYarGqhGozYi27DF6nM-vqT# z39o8aoDvl!jhx06ELG1muL#VV3KitIV9ixpFI9*GrBlfoEZo5jQFH0cuR(O6IUphi zGq;b})^}wwtRmx@ybLJrk0~qh=fmwT97Wz6DNU8Xo*!Fs?o1E?59WE&u&PRP{!y&= zSBg>c0~J&ycgtkIo`+!~5~u0nS%%818gHJQH!QL3`3tELz7Z;R@|n3~l?JGhdoD6? zeTA<uB=Unc#u)Nmq)Kg5$FB%T$%~&tuxPMSPcM=||0)ZM+jx)rt#-TtL4kC~hjg(% z3H(XUE$6$z$BiX97PeY!OLhP$Q<7(qX^<(sf)CR=*#?UHeL_xlk({dkN56w#bwUS5 zZ9>tjO6fdrMDoe5L_crSR2s950IEW+;#lBK`SQlF3b#b7!MZsgBr+*QHKo-55bb42 zP$V%+yUt^a0t}idG0PTTLjM4Kr}rtyipr4G0gG}b90}gDU#^hL9(+kmjZ1Guu$hg% zXTR$&7qgw>zr;@_jw`so!VRi4`DkO9705W+lm;yD!zzwPP=_f6x>mUI(*bP`m?3L` z#-YYM-+z`fIDWR%zY|qCri9Y<m5)5Ij6#^?wDXp#0I}|jFdMhy-W7Jq$Hk)`TB##j z{!y3Peye{YlufSbSi(ad^$^wVBsPTQTFYy>WbKOjuwbGS3Mr_PnHXD-e|LU;xdF~8 zI&)%W?v7Q!S$i{?&dSCg=&->9(>Y3qkILygFn=Mq(RY9OdtW2tWTdD2b@ffmeSS>h zXI1>Gl$#j_8{b5LS<OKKb)LBSovZ}6A#hrjy<IwS)&0yzS<B!1G7=lAYSg92m+o`B zI6Fzs_c5u@C;NSNjdlg8F8_^#e|7Js{Eez_+oo3ffca0K&Z>4;|6#j$^=gw*KCRTS z#VQbJZd{UGTa=(v|Kk!~@1E0)9}!i^+GXE*E_8)M9|;o+>*2TwYbf-o`)o0-hnjEI zUAA}OWJMahFZ_c?wqF01wS5=l%8)`ei=lz&Dmxe>U;F03d`yMA@UJ+o%23F^2YR>X zdA8PYLX0a@Q~I@M!}se4v-B}q1MtHc!(!l!Q5KX>`qJl^ZBK>)`iQMJ&YExrKe239 z`M-{AV6l7|p|xSL)a-7TRea`t>&^bTUGqzfoR7Tddo}3xVcxgLzS`ZcwKL;57>6g8 zjF=U@!fB)(Bh%R=?#(^GBANQXX7Lv<p5M>@q)EHq2e-c#biF!W|C_w~7kMhr{g+8+ z`eSDg&a^F%v^D5^hJE9e>w7Z6tZq;Bji?o()@Xn}g=g?#t^0Q7;b>o5bw>ab=cCF# z^5{wr_b}1nVqUE^T>fj(xAS2TKFB7f7GY&_owYPze*v36rCXvuV)c{6su=oWxDQdC zasoQdDo#o?y0A>iH$V0tD^hl1P5Xi&c6`$aBY38=ul;2`;mVPkj6hhR!=wR~kAIqU zEHPLM2F$F_YvutvztXLT#jrA@)*VEtFNb$l_xY^la1*$p8RszsHAfMVd)1f(wT|fx z(U&h?vt)2inX|MYM?QJYoIe(a!wCJ^BmBfOAL9v6SN#3O$(z9|$=_-<Wclv^FkxG^ z4qG_RpHOpe7JCLRrxo1ZM}-?$o;h?Y)0n8#`>`McWbbb#r)CpGaL!nqi*u?X=F^E< zsnR*KX`2PCO_(tyhkMihL_-PxDkeAV5n@|(GxwB$ezQ6zS<3O}fI^3X3jzJEyRbOd zI8vc}>~X`%lkhaIkfD5T@niyyvk4ioe)EC?pVD|CRu|R63K(-qz8rdE`h~;n&zCTu z-)jv<a(tQGKjwYQN3zcb6!Q%5SsWPeX_tdVzN17|V?F0QGw6(dVoHhh%=&%ls-gza zXN4#_MU_fOI;>K-e}HHVh^vy&seAg3=%7?uY^w!DW-kokMgVvNd`P4n->!}+Ihm}x zf6gB5(c|&r-xR{87ZR-L26<ODLL^6~@io9>1KBi%Q5s#iTW)^JEmpW9U96r{4MNrY z%`$HHt0GOC$tMOlPU0=NM&&|z+om2S*b@6KxP4PM1|9Ju@j+N<E1rz<*rjGEhn(=p z@{5z1xu@((gx7ooL(RF@$C8p9+yaIL;ld%AtA}YrMb?Q_!{`CNH|S&!jw1LjSUKxL z-r%rWNWH07@5HHsISh+qp*%PdniRO*Hwsrma#DfGYpdR#e^_-^N5*PIKk!48PB1B9 zQlEgky){OP82V~kbDGs-JOk^IWFw>@9g|``vq?|~s#D~|WozC0LIXyizpqBcTfA+J z)M$S1@i4m&PDp2UY%LEcDpH-1Nv9^Ft<{q09=5%Y$asW&@t9||aq+)gy@NR^O50v; z9tM*H#VkEDP6@b!dCAv0!tU^39HaLIM*@=GPYGO|v~(dl$XsHp^XBL08vb0q-R;g< z2|r3&A*wT!d?9r0QZBF?+cy4Gfr5lN1Z`y7GKWdPKB$6Wd@L`oa4v@?LlFF+$RUO6 zU?_G9iSUp5yKA<^DJ0x=3|9W*HHj9ITujw~r0hc1SB)}?Sj9>$1FJ&_w>u|@!MRDX zq?n=)ot$4ni4@g)$p|C&#uuS5>wPOfHV8=RWK6_*``vu8?+Dt+GedF<>I{ynRj8$# z38wbX{qWEJ%b+ArBM<VWzW?47O|1LgL1*!))^~v(z4<06HHg$jH8q5AIsC`X-d(C^ z#*!8l{cl#PkP8tsLMo$@-0A^4zJ~2z@{$@mn%oktFn`Zrwt8<M$H^vHn31n*9<bN& zp<|Z}hd|s=MImnTslhjUPkriFhv=i6J-zh8=<LWW4W;e=W7c81!eOxAtZYZD*utyo z@gZ|51$^Bk(K9md4S_{O?0vSC(G_OK3@~Q5%jC(ntN)mZP{@ag@3-WxaeMALV8eU9 zx_U*a4oLK>_rXAs?RioG3AFZGs>+lqzJ&x$4hh48kbf9}14|l4jY&xGMAxuNmI3aF z)Ha8tgxod;BSwBX9o1liO|mlAd;4MNsu|$A)sl+t8@Ut#i?|c%Bg|eApXaluKKaei zxEn@cA3y+Vmkr0Mw+bHa?CeLFbZ5t8VGJAyyuB1d+1+VzF8Hl5#A7yD#h+s7lcj`{ z>z%*fAx)xYIgb`$ue?Uk2FHu}OqSqJs9MoizzvtnvOF8^H4c1|WH}{dD$ukYUD@t? zQMH5wPTP0BUfAdowglmpN~TNtw$5ZykdryIOr_^br<s0=#~A5CSrm~;B-x0k3L{o( zvc2$4lTS~le&#NkbVx9g@_TSqXfR2&*ChNc5QIwiRg}!(p>|PqAz}QzH~6<yJ7(KU zA;-mp$x)mfcnI&P?au8NrW6NhT)dze@~E9|oSf<5i|l~~8bk<2;#0RgQ|Owyea?g{ zQp=5k9`7el+uQAJEZ_bCBBg(SrK3bP9g$afGKv4pvZaEwoToOFI2Hm7*Rs(Y-W!i$ zPWpC@e;j7Q2n4??wM_<XW(IE^^?H=`U}~6+-Ou{Ge~n4x`P87lUF=_nArwlR`I^QS zv0y(&Udu-)AY~S+{u<Vx3<@+3jW6e@#<oBwpI=xvW^wqytNI>ZjtI}$5)6xZFr6yk zoVto?!9I3cd|0oWkFZxFA^m!INjo^7)UPdLSrYeQ#she?P^KzD>3y9ff1dD@k1-oU zKuYy_G4#dVG;zoWfKW(Q%m2=g3LZc}qIZ|Ya{}x+z&cV<&6SD1zrR$6lI==R2jI@L z`rBdBI8ktTqmxyqmS@8%E^TRonnCQYAq1hDu@#DdsDvVW8hNTRV7r)r+xg-UPP|CI zi|oIM^T#L5X8n|s)K=^?Fu-R#5EtVy&$BwUl!X(+{auzja+}HD|HV|=j@GIGPFNQ_ zW_-H4Nint!<hR^{JmzCs*A-MB=zO#&r62`xE|5WDvY$I&nE=J<`S~)()bOyBzu{5e zHl~8F=3aNdSJ88c8QG}0s6uP|K~1X;NmPj?-*9C6txDvu6`CP5rC$PDcIHFCifhT$ zK#VSvAo+lop=hga>~&bKPVU0s;OWCxx`6;w%X+U+z+s@C>CHdcM`Za3p1^l^-x~46 zvZ=%p<fr{Lh#{)(#$8^GbL5M_<-%?gtLGLFg{s$f7CcD7*jA32ZMR&_%Jus!e_^B1 zr}KOL?Ey1n#%#^=_6FY)%Z$>sjk*0ajXrAkfgv6!sfUmuCjg>_Xa>}>XeSw}jwj2+ z>yyWwBkNL306P&1p+*>8Gjld2oWg{vvuLu>Oyxh0BOcdR7Av(ldPqPucn+a3w^pr= zT-9W<_A-s03MN!&RF#4Ktq5r*-T#SzQ{?d3?&W#CpC*IX^EsCD$oe?hQ$raFz_^of zV*6&fpWQ>}{>z~w+tHJS*9rBEljFa2eB_M()`{!WL-Q~9{&LZH`o>z9q<a3#*g<;5 z54-NKVPT2V*_tL03#N*a^Mk#@=jtWxGZ{>7g=T^~M;0el1%GC05cpoh`7n!#I-@2F zEOu}S+>|P(4n5~!-7WaZnLLw>seO2Xvw=$xKY>`=qTn%Q?u<(QzGmg=X;r~Xpd+y< zBJpnBdP|sCrLS@uUiYJbT*e}<MO%5Wfcv4sK$gOQDp%q1|HaxnKv&Xj{oYB(>LeYb zV{~lWNym25v2EM7ZQHi(q+{FGxBEHIdGCAfIqw~JeB+Ld+N;*6U8`#EG3Q+P{U?QF zF1QOx2|I#eHAa^zjj=ed`%jWXrL{5rEJRyB4RJL=so96pwV)O5^9fomGSD|(>x^H; z<Yqqn(fSdY(Dxb3O!NYvm?w8X8jFO@Ec|$Q1~M=Wg}!Jx`c4m%&vDNrV0cD9(yvgi zv%d7(2EdE0h+f%^nUpw7#pdse$0r5|{*<rf>XXrh4JtG<_KbRBH+xFIUSkUamnUO% zvpin;fjw~RMgggtF7+E$h(!BJ4-^4uo!hq7ER;x)UtH`;SPPd5n0<m(AdT>3ibo`l zIBTZM`JyX-+^V$xgq~DsR-Z{KZ^MO6P7vi-f!`JCz@PUZ+@sP)ZWa@o>s;T6@0-$U z<*>qNAT5@pt0)9m=fY^B1%!au7p!!BNSuX<Q-Pgq-@=f~Gh>)Dvvie1M2SU`4^dZ? zB3@^mn*+u*?0D`FGTU4qraO6@I`dFQ$fG6x93rhO$C(5$4cG3Io?g!x%U?y1e6=gC zRs;%2fpo1Ls@q?iuiLdnpC-*Zy)sWV>M<xHN<(9j+S&;m6wGygg@3(4KRrWc$aY>| zgWnhk2!!I5M8pciSe2u_oE20AlSWon_0~ng)@=&q{doUL7Q>E&ZDw?$RxNP6o1=8V z+4}=xl``6v4PqT4mFs|Xtecil`?%IMF~*diO-W;3u4ZjKeWU)YN&eP&V}i-MERPCG z6l@Gt$ve^<&QBaCZ?p+ZEhPRou;E&0X_4MitZ6gebI9BIlj_U6E>Kxbs<kfhRAO$d zk$M(~Jg+6)svvn)OQS3IHD4N@aI$TZ^u;-xiZO&SP(T%2EF5#RN~P~{sI|c{Q7v5k z55k)!f}^v-<>oq%^d1s?+C1x993|!0b905wJX{h`0WG>B7;><WXmQGn>>f9u64MDc z>=iRO=jDfnJ|vSDebFu`UccH5Ci$4o=5N1h6|=4SJ6v{EV$PF#+RiQ(?E)j2Maoq$ z%*Qs=G1RU^thmZSKLs-oMw6i;<li^_i`mAksuEOdI%frInN~mCn3q!p2;MKETNz3K zqxd^-f!i~>FGEqbq~8ZaZ(9|<TWJYeTG;xk@v_wH4ANJd^>b^Qn;=3hzwBV)D!m{; zWI0*J<muERL7gHcPvD18hhO|icxt#p5MIijdktF3%-`s#m&qcVFpir{B`QvWV2Rao z7GfJz8H=4wFhqdsC|!_aC@nPgHKer5LPJCmeqym#Z+|9;*3n2v!Gkw?XHzwB*qKkr zk!yGHFs!Vsa)`;UUJbrg6d+QmPwfv_*LM#8ru{{G$XAgdm>o5hbo=LVjUx<RbXoRl z^~zVuhZ#z^r!<<;$4t9;dP&q`WGhFMm)n+@_=ZlfVnV}q=44orqM3Y~&y1n{<UTYP z9HHzyd+0B;1#mv{d>E4y-WX!&poPB;2Y`iD^eK(RQNxZ%w!*8!efrdt6Ys5A=t_Qo z$hg?xRkv(1Y^sKflp`Ct70YE5CMy*JDd>V41#Dd6f04eF#K;>!$*xOP)ax?zonaX! zAM++gEqPK?r!vYe?)_<|h5|Z>-Fc{*-^H8v)25Kh*%cPoC&qdeF8VQY(=|Kg*k@B{ z5Gu!J_NVB#D|$QLc65+<J2iBs-lfcO+=uXqr=!pM+-GB<N4``ROj+NVU~+X$K`497 zvS;oAfjZF1ZfqMQ7rg=Ngu93e|EcqMn+Z4LPlJu8dTb(xM+gRsblu|tWQB293c5J8 zR6nRmztxI|RpVU@i-SUT&7SqR&=is-*_Y6n6Ty+>S{H|e3C`&ohvc6(lCDAOOKNI~ z4LhcWU@6+-xrQ|@<Jk{Y+!rj7CxRHVsUm3|<Uwp+1QHSDn#boz8EQf6IsGJGc0`rz zaYzMF0LviDiLCPxPZS)H9+@xc4ZV*O*Vc+UR3jE)Vr0IFl_0tWZT|o@2u?84?81X5 zzm%ud>T_9)B_$*WGq`b}5_<nqQusX5uP_4Vgg__Fm4r-<G_5_RWV+9;sGU_p82_Hi zkD<O!lKFPSQVltshaI!PkNy5-Y&-J7<xoqj%}_9MM(g!xB8s}>;iF(_6W1-9l~JDV z%L0*<`Gi&Uu!4!qgTB({P<}pMY=M<Lr1CS2#TS1`yh`*{t$EQp-Og_IrUB$HL%~E4 zYdgm+K#E%a#zxM18I~nm%)Gc#vcd6z*KjOKRY8~T?(X~B)*h!hjR|-osNh5)+v`6n ze9w1pXa{#fIJrztIM%2et{FGc^~*@qR!x<73ZzB4SY9L(-9>$tEjZ#<Frm38vwz== zUcrAptxb8}-veF?Q-xVwA1`&<k6m8(s-I(5cCAuQ$yK{q+C!UjUa#82T{~QT??b27 zxDN_*_nV&+Ja0iXAC_%PT`z4<ID^uU_jkvGk8cY`UC_9DOUH0t%$;k;SlQbAPw%JC zkJl@n4`weJ?kL>!F1^k{Pn>6FZ{+P+$C#OkUb3J2ESn$y4w3vTQu2Q)X_bt9f-$f$ z=KTk(=MS&*2kW6@pk?|mz>DEOz)TGPk6<PmK?6q<Jp&P29hd)qWY>8Q-5+qNH~U`N z0-7iLAN<mm8D=~X@V{T*U#<Rgee`Vq#FiNTphN!~sm1UIWde|9|5x<d{|Q_Am#h1C zc<Fz~mHrpW2L0bjwtvNy(lteqWs$w7S`AjoTgVd2RQwCbLXg4ktCiFd{Phz9$|^|` z3sKkfXoN6@rG+d+g$f?@1eK-*;uYW#dwv?Ipo<z5`a+pi@;t|JL_92yTQb;Pj7z0o z&a_Rsr*l32wlfq2^2Mts{7t}%-i3E(7Y5Z$)QnIPYnc=Ennno(LMq6{yDyy+fH^BP zQ;~fXOTAAmlw?olM1~=nWM4ur^XFkNmK5ugE8ZI477v>*-deu^s4^*&{Zn-z2dD%T zVyKBhj$^4Um_h-E=ZXh}$NJa;wypW%>QqPer*hURI!eSC)yd^b#I42}Gm7>z4lBY= zrAmO@4^Zs0=bM#s_yRhNSa<nraIUi!+NEk!-rcXT(=nD-zM))xx~qwPk@h@=fVVcZ z8dJQn!7Gc+a{a|iY7E|{_xQm|t{`snwMiAFxqc9a&Wlq<!#?<-BrJ}~^Co@Ay4ACq z&Rve&^C0xVJmSOkiBG9XRxL>tVvBE+X~@mPTC$pexC;2!{pz6>GB-^eYx5y2jMxO) z!hW;HbTnm@f@TzjCdc|XbxAAs(wxoj+~ffrTl+XIXsB!XVWdbZ9%%i5CfOskDxgN% zMGNV^Ig2nKm&24*&k%4<%?qM#vc$#;G4JxU)e)H{Oc@8#a$Sla^FgBv6DXXd&0|1_ z<K~y+G1@L}pjAg8v;dVsHx|rlhZa=NV{D`~v(U)~B!UDIq+ez{AZvxzLaigZ9(Lqj zf_YILorM7aY3L{B7z*5Nv<mGQ2P|YKHWb||ria_GG_K-CmV{5zYJwUo_vTm{)<r(f zmE%D$NJ;&=qx<K(07*^b)+JfjjR-wYh6#3GeK$hrA`vUJ_XpPuckX^RI)4vkNm0T9 zz_MgE##<l13CK2Bi!Y%!a}(j?oW{OKYoQhs!8El&{g({Knp0i`^Jk7x<kcPM(7~J! zPGy~WUN{g6>v0xtFiv=r$q#N#O-4pMC}wplYWgdwHV!#V)88{11tD?1Wr4xY%&ECu zJq>ojyk)rlzDN}<L0^8#G-P0ZTOANyQW+X-N|$9%KEW7{RKid#s9N{4<d7S+Zd;jg zpiof?ihko$P_;^lv0xj+;;;Wsqod^sgWyoMoP`?5bwQSYI6;hGnr>xe9T}vX5UDo@ z2D{!z&Wu14Kl&B5bmdYdzEjkucr9z7;#O)nDWjOtdli{8UK1&_{>b}KE4D-(HWeYJ zHb?p8j>8rYlritFbDlc~vYu#mJNj<81hk%Ha&~&=+R7`6seTHQ&u4DmmJ6DdgB35h zS|8#U76K^WU(f5!Ypf-(DZ>9?zy8Ts($Uf~{g(y%5Bx2|{~P}HU+mZaY_0xh?)LT5 zOCJOX66o_k8RI`~{L9(=m#1N*{dZ#wAYuP9#{U8F{TIVHp{%LACXC>*sZHl`j1N!A z@!H{GmV<lj6Nvh_r^@8ZKENE4arJ0Nj3K6_-osc=3XVJSWC}>qv(nb*_L8v?cm0$A zO(V@mVXGvoIIFHU4IuDLS_v#YmF6%{`9TiA-G2KHOjr_0(QgYavSGNRUDuWu?!UhX z*oZ8^PN^8Cgge-uWqg@dwfQZ$!FhP4Y+#=~%tu~9D0ygq<BD@+9wNk(<?Sp1MCWTu zXBwuSflNBpY{%{T{Wl3inr>RkwMv%5_dPng>v%zk^5_?0f18p?YITEpgj5_EZ|!SO zkr?V#Md>Qhg|O!r6i`$>q<TAhhfw3E-o+XPxZK^3-Ri*U*b3-;4CpDbJn;JtBq#g( z{8o>P33$12<#TR6ZuUhf5~-2JvJYB(BrI6!=&@+oM3Nup(!%6>Y%Mv2x1l7LM?)jj zNHGj6mSu5Sd}odUM(7SM8X|+iutcL<jL><}kM`8l;@8ny?A5b7cJ$LY7#ia6NeI03 z^#s)lxhUJEh&dv3yDkBVtG}W;CxE|l8J-9-1LW^(;6#O8FJB<zg{H|z(CaUNlD_lS z17R6Jmh8GI>?3{=$5QO-r-bOo^MDoR{DO+nZA=JPl-b7!!T8b5siMmv3G#I$Xc_pT z4$|2DSndQ^D6z1ixM81rqK(t{_J>7%-cPkVtnlzQ#wmJlJqk^IF7kK=>jCsx-jj32 z42Ed-2cj`fe!)KU{l_0Gg~x28tE}wZ>|tAy(Z!B5^Yjp)*3)FcHJ(=^;yAWH=y|7Q zTeHu-`y5kyz|%?brR}oV&eocD@WQywcSAEGuof04*O$?_Mr1f&5nK}czM>aaM{V0E z1ssIKoeik)i0I{iov~MBodE6;D=YtAKkE1$Y0}NW9akq)NE8RPB7CjRV$E?NGGP+- z>{P(gdsJnFJN$ux;tV6n^_I*L2r+?vm%|s4UybL4Ydm$kM#p}rdjN#|x7DdoB5~hq zwS~*PTC<}G_7kHa73&~IA79qKImDJ<{VvJm5$}i(ggW)utT7c=nrzg;YJS36(JqNN z66J3*4U#aBbcl!Mz8Ora?X2B``VF!3GiS2xP1ev_BKZ3RFP@KXD>GPXwQb}%8-|Z% zOQ+5j?Wx4D%Sl5;Hcgr`UCqq&gh!!<%%m_T4=)d{vaX^2TUVR9@&loFleG3B%l_$* zH*>CYl?VoDOfK&5(?EF`QykRwzVqdb7ugXk38+MDsTpHB@x@0<7sghg?3s4ped(rc z)s8EbPfC$PxZ=)}mx>6}u$|Ui4W)hP8y4NUv?zmn8zpYoaIN-k*c3!G>B;D|{_iB5 zX=*#Hjx7CMC1RnUomD{^utUH}(;?1Em4yrCCRS=2(+kg8UK*TUYaM6Uoq0JT%_49E zynQqefrkrqGu6AZW=~@`IVh2#15BEfVO>W$s_z1)`xY%9ZRMTpTsrsK5t-kUw5FO6 zE-h7gM|EX?CpHXyNR#$Nf0vTzNZOdBCF@Kdm&X|<xzVCO$8Q5`c*5m7Z5+>KGdg~I zYq|wKm99j->H2r${g)y8%ihs3|F7`#pAq*Tp@4tHn=*e%Bgp93{?Rs|`&;xt&cM#f z!B)?}4v+DV2m>HE6|e%p@Rs&~9e<1_U<V*&ril)}l{2*ZpB*f0^Z>O;dd)vJ@^ATu zzmEK8s4HV@r6+G-kEf1DBO@pbIJbc_K%n6-T?YQYw*r4}#qc!$1;+o=Q~^6W=zmBL zaC76)2szt}$lL#+^8eI{*wNw9{lfwMsT2k@^soKG4AiuEbpJHxjDQ0DU)(+**ku6} znEt+0VLCwJ?+9F&3GkWWpNBA0)8R4wqwwd8|0yf+=bT~|bhOZnf6EPsS+L+S{{3xY z7OZ%Te~ToDS+L<T{zEDNaQuI}G5`HX_*=n(>F>Mxf7J(|2ej(H6fBq+|K)D|^W6U? zUGP8q)Bid8KkEQf&{$~&q!GA|vYBjtO!sKz@Z|t?g0zIXxVZQlfC_lO=%I4<_6Gu_ zEba#jHY3frcvV#7kCRcH?s&bs*zjXJC{`#ntWaxMWY~Q6+)TB*9-|@M&rgq!vz3z? z6=Ng&mqJ&&=dGu~>BrHX?Nmd`0|gRQ$L_wQ0Gpz+?hVJcP}f*-H+hN|2D8fjd95)k zbTSJJtD}r_EcLb)N3Z*b$@8HpTpo}2z5TVOOCHacyW5c-PBO2R+G<ydhU|ggRL{PZ zxN%>6`DbG!W@7|LLj+d?BvAQEj)aO%#!69!t&nYc=(QHPg;t0KmWrk+n`gO8C#h<P z@#-jYt?_DhTpiz?4VE-4Hs#h62FCi#{N~nf)uSFWyFQNRinKkSF*Kz|#h#NAUZWmD zN7pE^bD#9jMW64X*ycl6=b@S%q^>bCzBLraI-pEL$<#!!@Ezdi(gf_=7b<++XOUEJ zm|D<M^<A~MXt;l2pHDnByTX>o0#BrCsPsCP+thWK;#Yj+WVFQ9!piKbb2YkLDRUwf z`N)48G4Q-svj167ixZow9ThB_n0!G%mq!WQM3R?Z;olh5Y7wqyY~p^Ma5Kh0)puG! zh!r7VrpN^oV8O5gw>zS^v^!ck5D(j*_;oN*E8y_zr|@~i@4}JXhm_FD=hlz?!kmW2 z?fZuqIA{M=&U$c7pz0uMnYlK|sn^0rG^=w?&xe<}svWP~PtyG;ny9RH(Q&gxrYlk; zW_(>m62oX>eZ@i)#`wj%>g&m$WXB*0mYtniGj+)-OgkB?hNvoqXUXI`IDJ+KlLbiQ zg&4!-INjxl!^NoG#V14ACqwxMQ{_0rr5OFWINc@R$IGzD_0jQ7Fp3URb<VUygRP*) zF2&0*s7{kBa5a3kfu4dyo=~^C3Y~H~y-M87^(=2^##k<;l6lCy%vD!Bw-8i#Nl$G9 zm7}8NZ(*ap#^pz3TG*RU8UR%p*fSKltde#JtT9NBQwC~MT&0)NRMgVsso^C_`De@K zHVYNTYx?+_1rX%QE!2J0H0zpYH+aMpTKcJ46dTs~yn~AI7I=-VrZaxRcc}1or1-AF z1R^Av$Mn1QoWM-r_N5k!q|^<~#~mH48<9y%vjr6A_+<2|x1$5iN_v=w>{EZm?W45P zZv0D%0AbLR)^Pe;E=X5lREUVMv!S}np}Ev;yohZXIDz~;QK&oz=zN+ag+liazTfoX zJp~#A9l{v0C0h!VdPy`U8B{tc)P{)!rm_7+Sdk@f?Az|_I=#Zzx`rd)4oc7}=nKwo z_A){h5faooBiKJCFgqhcgG<8}*{01{9SD92ZcTqhqz#X5jAXf^f*wB^FUP7nN(Ctc z8HEP{<~+{oY_K)D95{U>J-o<&GsL<*b8we!N|r@0q4brR6;t40#Va_LqZu_XT8vx~ zoDWGGn*D)Kw%@VL=u1pl7r$P*(0&rHF-oImcy;X0K;bqw+$<-~F*DLqTkLFW_5yhX z9*hC5$p{6IcS>pqj;DhUI@}5D;co^0jmbS7C<V+%d@krX6RGe!bjKGka15H%+SM$U z>uVR%s2!&zQ&wn2=pGg>KxYpuyZ}TlWaW7Ni!Df@tUC3|*Qa;UWcn?z2W45n+spc3 zakr>SYI0YM&Cuj1Bc@3_Uy-l2__dUcXb|<mG29}-VbZS@M2|8R7}*vFmfSOzxy5wj zeoI>a?8doX;24F|L|;QCo=Xt!OM^zNCILmS-CEsgWkUT%@kx65Wk%&?nn_zLko4%< z5RHShLpWuVrn)gsvfok}I|(#q6=~uFXg;$VIP#C>e(0l@xskS5t!n&l0QLJ{Lxp=( zhOyauYL)wQkZ)$MEMsh_SxaGsvUfR3{I;h7r3AI@a;tpLJ%@SvUcWF-o^IY*F1gdw zr@bz39^PMjW?IT`Cg*p?FbpzQP}WxxxLg)I&Xq}w5*sr++#?R_cs}38y7vMF^c%8m z@~dt9HB}76){v(&=NO^XMvV}p@Gtqw_6W^m%l(VxZp%>^#B^n@^5dL#WkkqTgjxg@ zk(s0FwE((QnLjB`uR|+oZNwF6VUnidzb(QcPeW}j+@MXvd}D+}p@l?YiHNO}l(83G z<kd%iMOh70b|GRE+;iBUk7CqFIZaz`rf&gW$FDP-CTvt&q0W?Ch<nTZ9@5NoHI=hN zsFwz^oY%iu#5b4|EX|XsgeX>ZLSd3&jHvcK)2pEQoy3^izZLIe$*@toi_M1PGjM<6 zw8;mt<x8q4IU8t6Jw-q|RzW*QMC=k93Q=AvO`0K4G>IOH*;xWg!rZE@!O5cEx*x9B z)t!Uxinp(PYO+hYkc_J0qxn+5qwsk$K&m&}2Dv4jqa(H|UotaMJFZ<1zepIUp`4<q zwW(5F&D=)%b@=DueL#OZDp#Aa7O^tDP1&XYcqba;tJLyMxc~Q%d_u4%Y_KW2(PA#0 zg{#9wxcwOz4l8tg{cj#U^iqr5sxxeweOw}&Mbkvp<0Lz=%C_PvT2@+fPy5>qBMm`z zYaE(+W3<r8PkWHZ5OShy2*Js7qU_M8BK*TZlepxtEWUwITMfm?9sZJf;UJ@acCL6w zKob!ARTu4Sb?#UTWzwTv;w>JBKE>%1$G0#3Xx~Q)G=J<N0C6VxLNvsZ(oW@HK#bcR zf!Z8{+nokn9R)$7iwrN5sDPmq#ovcdTHHyQw~{iK;nzxl{G2ljY$I*FO)WAfJmtL_ z+2<<W@8}sUc3TKZE0`W>@e@j-4}%mnGsV?1>7d`oRVAuA7bDge>@Jb&F0ly|xrFc} z%zm%;=ZeWEYzc=c!1h%G?#bR8EkEncIT+75=`Y*vFWQDo(4I?CUr$j+>M7WsEZd)` zLc`O=CO5(=I!)0-)Huf6#ExJ8uFS0Pn;E3%cqgM{LS#zh!0K>Oo8f-KK>3ycxdHE6 z%`e%zSl@L4W>bj9V%LQpTEV<v&_0ko^#ILKvKFNkE{MIP`iXf*sSels1OG%XznilG z3LO*it@AUXAbGU!p3TNyKYI`2llRGF!{iALc!HBNQ0s`|tML<Tv!m?_6t8ucp!S#` zk?2FdF^8ejg~c=i9QcHc;|Om*i-P^6FLP@@^OtjPxH>hwEuu>efYTDG+M>=yJHl4g znq^j+p~ps+Cm^sA8LZ&zClcu|7ws-_37tABFD8B#!XjrQRZ>*Ow=gmhAVsk=qQX-4 zaPtpA>ixYiF*>on+Bdhl-Z$6>f|JuraX<|z4@gYOL26+p!o=VPrpIC8@e1=Ysr#Z8 zTgZ13S%dlkwlQKcqDUCwN1=Rp@#T%;G4rd0#UQf1DLZKmSsPNAs+>4`2nOj7fb1-2 z1AjcePKsDn_)mwQDuZcY%0j4c3icDqkCe!a08y%{E)OSNhmVbc5vs&struKfrB5o$ z+Yaw-)vrwaThV-hfKGn2YG$-#O0;H5v=H#U_8kQ%wrSDRT?kRRNjxX3y`RvyUcq_n z-uKu9&DjP{JNpipC)nQPg)e*QKT*<uvd)7T>uVyH=h>Z9MNzLuOj*XyT9fWC1fC<% zjee^xd+)Y#SL%&ano(6*Qe5a=nVy;Gofw;38yev0CS-)gz&OW6!bML)Pfz}qnvR+n zix>mVgk<V$O7e!L1uG@`HCX&6BJwdZ_yID)8V=Ux?!NB+j^2*%XpPN8ZJ7*XO=I4! z{nt^2Ij#|s8lpD4%2F3&yS3GMPn|jk%~0ZH;9<QcEfnFmCKA*d)+&tR9i@Wr)47r6 z&A$>P^R^`MFUc%1;JI-baj`vZP8D2N63*MAq=%nhQEk}FzAz_T`cF(+mmtlE%i5HU z6|=9k4H6Y8D=j%Z!iC@8Z#K_Q$H~iF;>Ma-qLEZx^#0Hn?JtGY^z+sX;2nNS&&8_p z49>U6jkQZ9ClKi4Qs1o7n5c->91tJmBxo$gA{g~<hQ<LiLYIV8O@LyZm(F1E*Xk=j zA<zk)t`$;gZQ*!&Y7X*$5gho^7h{rg&Gt+dK&-l-@Vt-|YI+LDu0oEfUaF>6rnXk9 zzE-NXN}i@pk*YR|nYyKE9H*(3prnx)uZ^6R0P{}3ia$U~K1s+pON@u^8r?$C+r6kY z(61&prL?*%zQzQc@>_11eVw6(spbJ;IBUZhJ$8k_6hRQrP>kG=s`xu}YSr;t{=>lG zT-h^Oi~fs&J-@U13=>wy+Jd9O320`Q31SS`LBdpR83Mg%*|Dpo`TD@7r`w}}!=*c~ zZC;IhFPMQ5xfazI64m7|IoUI8LQaTvKMa;&Evhs$FEu<i`Snf3E!PpMLOFnwP=S*U zOtd_%PV{Rh30xt$-25Ygip+Skodl8bFf&r(lr*645?Pp+v@VR#Pft4?Ew%<D%8+)K zii(5{3$1>qx7gesM69e})v?G*TTX^=q+?`eVx}gex7*}BzT8Lp_#9+vJ(;WO$@58C z7@7#ldE%Qhv(lkpV1z+_b-KE4vc09Pb&_GIX)QLQF~ZPAlFL-$nq_Y1Znei9&eG_> zjaj2EMsxl-9MAt-ZIBtFdg;zY!L~wQ5uPB<A*uSOcQLlE|BKIlaeHR?>;;nNN9OpN zw+X&(w9-89mPK%Ca6@&6{Cb<pLY)jxGkvplmxuWnd48zEcxbqEmcKk)Bgj>pm<lvz zKp7tWy?fZ*d#Hm4w1LIWv3|OA$jbDO7E#PLOb&ePXzom%m@O4Ra%Diy3h@)nQ+9R^ zh^nuV5x^UIc6GbAc5yJbuTje<lm}3Z-3s$whX>g-yfjBxIF_fUARjpp`$~&MXviW) zd)<(};lVagunqS&Om>blj7^C%O^I|uYG9}aOA(q`U2XSA&v}Pdm6PTx0g1Dtgp<&i zx2B-QuJhhgKIDdp!sNSyl}P31n&`>vcE_V~ax1@FO{T=S4=;z>!<ket)ws_G5+pe% zxZU4aJKX_sJ}^_r<B93(u(08JGj(k=-$ax+sa#*tb-NO6Ni<YkG6r!HCU6-+Ni<y& zyWAwc>QvP-efuE!TP!#I&@9C~6YUQ?5@r%YioE(S-k=8HA)VYzQ0ZR6PL^LajfNtX zhh!Zt%04}h$)-IjBclYAOD~VUIK|7$f*p|3Q;qR3BO-p&3A%%r$}_T{F#1*SD^jpu zp;1F!%j@cFW1|e95S!2#8HyR`PNUC{Z)w-VsrY@R_+s6<hdl^2Vc|N_LBcN5irP3O za{LrZ%*4I=_6{_^OICL2mY^`k`Z|1Q?%@6v7sAj|@sXPMZVAx)B6PF1tg1F)EbvW= zmd(wzgGx)qEn=VLD6usDh^(+;uJOjlFJi}!dsF@DkQ&ygE1c8HEwmWZz#MZgHM4-9 zqDUN(6ht~ScwmXMg<Y|>+>`m*%ly)HyU8C}K$y+s<wfbO+)e1CYI=6!8wq~EL(Kut z8RaAx9_Llv(1zCR;R5J7rU6AI*B<x~<KcpO>3=l3Nic<l9;)KYGaKZ$>me3*go?mb zSt1v-lSX+a+wi(r&MExdchGlVk64L7O8lTE<}3sXmlP1^sRWnS?y$hpYO@Dpx^2hc zN?7ME%@iZ_Rs)9G?JW1e35fSAS5!D{cs{LLW2Yvo55+;QwcHfLPu{a&BeD-(K@LXO z4+evMy7%S1tsp!#CpgjEO|3P9sFa)>uN;@Aa*w!MI*R<ResvuIrmcZ+my|OSG6n*I z0OofHyaF#h;cvXehBrUDE<d#@#kZ+3(o4$N=Hb5Iv)+*urPY3psVQ3<#Y3zsMXn3H zT;{S+=DH}hav(eGk|}?`@JJCt)d9X;9b;rUlrVyn{()*>W8jym-PH66zUbtX*<txz z4eYWNo-^IRl$a>UFc{D`spf-?C=Zhte4c;l>(8a;d;f(g1pf_#1scphKc+09El63= zU&OdP5hFHG8OarO#zm9b!HQ~1qb(Wyh?N^*dlCLFN_vWNN_zYgHPFj}scToh*b`tb z-+y{3nqdh}Ae;me7r6zBa6Eyk5&U+bwAh2Qkl;jO7g=n??P~4_3$;L$O_;Bp?C&+q z6}BNSh8j;o)}T<Gmzm2{T(<>uP(#;@#@|CrQ>(HN?wfujK5Q$43&VGPJiG__LBsE< z`zytbxj>@#pMp19mp;pmu(k{-(QLZ-2AAtELF>z0b5ooT7KhT>TiZO`!UT&;j8l}G zTVkYS9N~6sA0dhlRS5`^LN!8^cZylJh`YzhYs6Wpx!K%KN2Jxwi7$6duyT`ro?3ff zGw3x#g!y@Z&Y=%?{od4G`3<eaLqfrVM?FGC)VsJg)jy*;u&go0U0v+rzJC!n5j2I% z>4qdfEr!4Ao;tPFE}R`|R1xdeK#+Tr9t)4oX^<olY&zC<F!BDA<bXvw_~kU|cqJhy z%CT!?*8Sz~_~b0ZBcSS7y&kqANB$b6NpuOy*icEBo~%V=PZLMGrRp+aq5@5#YR66( z6F>W0_rUiGHY{9X3=}E=0cA%X$lTS%X7}UaeuONl8yOW*fWXy6m)iqs&N?kv{9?r7 zLabu@nX3XsPd?0UF3fKCGd$v%J^%fsqNOU!z3~MkFE2uKJs{~M7ACY$o<2IJCK4b8 zWSN<2m`GkIrGrpb#i`Qui43~QQ3Hs3-XS~%a2$A|U4@#+g#=N;)PNFllL&rgEo3Nh z=^kY^I2_JEDR{N{f`2fBKP{e~BDAK0#^(?y?FuqQnVMoKYf){6j_vl@HHCY~qG^qw zw3i^upvj%Lw|nI6%7uy3G-%>rW39pC(>a*hc;0?I^uMKR3!>&lb3eITbM(IOHO@E; z*MIH0i<~d@8CxRZy~_D;Ts@4$#;jJldd_1pVh4wy^M&8Gka4Gn>`Vx1>Ws(;jR`Ol z{W&!JvUAP_$RyAeC)sz1+=ZV<$v;x@bEDzQ@ZtQQ$)4y&ap1T(6-P<5m>P$}J^u5} z;_TXq;(C2t=qJ?rAeILjwxS&horHo5O>x3g*BPSvPOHL3Bef+C<?bJ`5*vO36Dz|| z+o;%p$PfHxgU$JVa3U?)e|uZT-qyzR{DS1y@j(*N4?UsbTwa1-);&M^@e4P2$^daM zN0n!>oAWnabqQxT+OI+u&>0&mQ$`;zD4U?|w-B6fOPZIrr^)aisr0FSQqUmsVw!qt zvL=G2g(A-NT=d=ZD~f&d08hr)^|2aJ;C2e1Ie_A1XY!hb%4isEC(o-e(i-1K7-fkZ z&|Va-X{)<$E$Eq5V6}Y?(MC%9u9=*r;sF>=^R8h`SifD>dRBnmpnm|v+yLAIF2;;+ zfT%N}{<jRDJ~Wm{>}ZP^2K$Pu>yzMsuUnO=35YET8d~-22i)`NV`FyVo)WaEDgymT z3^XSBDluNH+Lt;v0bAEwtf0U`OI^C>+t2vto)kiU7;gK8%$%Im#<7+JrkNBrPdX@S zpPWQXEwuwB@CO_d#idjTv>-z(p*;skOwcS8W<A+E(2^tv@&FOOUH$kbr-@LCE}Uc= z)hVX;>y0p^%#W9D8uQCvmccX54h8!!sX11N2?TPJC4v)mfjhaoV7t-B5kX`LeybZU zzAyMi`I-f=-!tCadtmhw6dUgzHk#S|enR08LasLvh_9WFH7(YKnyw6lcOS;Vq?NK2 zJssZp?`eimeJ{WK9+YU@lhv2n%0^%Fyk^UBw8vwwZvEt<nDosS+80T#`*7L$<!Egg zi<O|y7Qo*Qp1N6q=d)xj!LSb$O;KKwT*~2}tHAnB8KJ{(XiZR&wiwQHlnjhByn>c= zIj^`p6asfpR@zwa-$Y1S7u}I}0tS5$>NDf)4i|2K#p5(t%&}ESEHNE~S5-HbXEv~) zJV}~rTlIbcd0;@=(HUQ`^6h^RlNwjsn(5ivOpY)_rb00FB=c<y5abi&8q)&Ck#}ch zriPIrKlbo%Y>02+4@^Dlw(=sTr}y+djGT1fKpTa6LdbA(Vwkl$Ta}nyAGA8jA~2W) zTp|b^a|{q>Gr6mV3NC04)$sRai1rnF^HCvVI%RoX9C!UwtZW2bccGNMR&M5svfN0R zA9okWTj}uoAZ6!}-?NpNQE-(|rbZ*LFR;uS5tI5Sa;JvzkSEfyqldej^Lxb|xRwIH zx*cqmwsvz0HoxcTwH|yBng0Y$Ke=J9@l@rK9Z=r=S*7S04kx|Id35!eME&dB>ineN zS>8$7#zNpHpN*&-`7e0cip++J$c!X)4G#$&X=lrzT#qjW;S9iCKG}Tr_3|b89>CFF zQY%pcq|4;(%#>Z)qnwz+wMeZWxoAa}=Zkk#y52OGscpY-#tJcV(U!ju2veP=%R5;( zZ&ymc7|`tw&&f@O8cs@edg6H_)O-zJ?;gixjSR(cp)zrG(b@fQRG;!r&4d`;gGn=O zH3&q58%)rYF2lIM$;?zs&v>&vc05a?ePF?KE`N2m-k7|a*MCi*D1eJ75hB(ZIesM( znL{-}k-JD*+`-m#m>~$g^M%9j@+npg%@?=^TjMOA4;9twgA7_0uFL+3QkERoHt+#9 z*0GNEYwL8z#%#Ai(IQI2Fal3<Qy92I)YN3?D!p<0nIlFBBsG{4n#~9W`Kq&<mrs&P z&>aGfGw}|R87s>5ZS-^8L$B0mDUs!J6FPFl=jws7jXbYBFT~46VMNCE-dJepDWN8% zBw%W9H_Pe@Zf$d1i{qEi!C;L8!nk0=!-r4p;6eS4+|llZaa^92S(VPY$zI$!vZmfn z(ohkay;6U=#InXXy?ie-ZZaCPbdk~>B2$=6u*EmpP(gp>EZT=8@~X{`WzPCg)dP3} zD;$N%a-8DrxgV#|(!}S;iF2-Yw>Cc)s_|>RH6|73MrQyMO$1DJ<vCCZxopTao>*!- z@U(}AS3f>S&m$Vz)Ju~4zlITn`BKn#5SExSHkjtOn&~<pj1{as)7+zfHGM;`@_NT+ z;%PZOSr|P)m2rdA;_S^eZ+{Oz!&aa|pao9>M=ov2uQq$iL}V;%`2{~7re+YL3ZVQ7 zy;sFoiBba=t4+ZV6w@UdKVkMwD|4uMsF-0Z>!Zv;0htm@Aci@TOrM!&NAbqn8J2K3 zDJ-*WA}R<I9gyZ6>>uDE#YcXWRzPeO$s~erx+m+=2NuYW=g|it-P&EDm^m>kTUrqa z%$B$?1%tveAso*X;61U}Vh*+n8yYF27NQ=KJ`_z)O>L+8TlZ&sTPXv-rhWHQmfh$? z2;fpX_kcae-jj?8jU-F8G?7cBtB1ZYdWL(xBN!;R^QU4Ete|(f(=$%+P(V<n|7vnp z%8j-5C}sRg5ud=(mb|AwHm&AzNJ(zVB)tS<34%3M9E_f<VTG$DUEV&Ki`ZRYXmx&o znjRgi>;R=_GU(+-e}$)~w;Tup1S<m(STyb6YNO+7Vq@s?ys_bz*>j5bBq(tcep8uQ zVu9hXo6@jbp=^}hb2hqj_pQ4N2zTZ16CTz?$M2UdW}B=$a_kL^-%cE2&$iNq)+*{^ zY~ThxGt|uk)SGKhzi#4QE^2OKN-yfMuc1lrfeEZB@ELp0Xr*d6CSU8<Ytr`jv9Y?P zODtKIa&_OK#uzw(EPkzy9~s`LVq=W4Fw8zC5lCk6e0#(bdguw*<LR1QeR;KG6dd3X zUS-iZMhD${P3bf_r#BM``z;FzX%rNvPF8$yk)t}O%&dKRfCLyAO03a}JhH3YY^*KK z%{A<8tzU5a=b;$ihDJytyuP>Mh|YVp1<}?oIN75pXorqX!URj&e}!bS<1<cG4LflS zNfi!B6Q=#)_Q#0+I0R>>`IQQL-!9A9k`B@2ncS>MP*U!uWb+E;@VQFQ+7Z#R2VF_h zVVEq>AvkawTPO{Z5MP%n(A#_7QF%=z-=#~BLHZp_L{LW5T*=Z*d$Vfk{qSHglBEKz z2rM-#Bl%O})ktF|8f4q9Y|PG#0_M~#EV;FYyQ{OazM<vT{qo=@G_nL2IPA+l7}jO| z7@Opri=@1T^5K^Jw7I1PN+WGuJ*aw`nnwClE#>}`*!+|-MrTB@Y!o13Oj@PRJJ2ya z00aUn`0tYG8Vccng$|t0_W~^qZz06ykdvnwOHtrk&*4a*5^>2)zM}bv?SV0{sXhE^ zA3Pp!&R;gVO3k^PL%(=h7aX6EbC$9hiK^^-57(7ES(mu^m0_rR1WQ;J%GoP3+5lnU zrX)hyV1wGf`W!0H{qo%9e4x84B6%>;)&duUm^4yWK3q}))|3DnrgLy{Gc$0*BIn#} zc5}L1m>YeO0;6I9l2G9J!P>-9Wzu?fWB>WIbF&B@=pOB{OcMB3vZ?EAz2Y`L<W{8( z2x-R10fUcIy&7fowN482EQbm<28xdbF-{y*zXUycA~zY2+hwt%bSSHIC>z?*LSTJ` zIQ(TWw=tvLdN;cA-JhQ11$a(q=DmlVA}!d2wRVKtFm2|DbfHf3XEXQRq{V5btc}4$ zk5ty&+#pB`Q1f<`%0p7~?%$><emt)~(w1ePP~4iCWuL$pi$PQ?H#W+kuy!?cxSF}0 zKUfr>Hd%R8I$83#Ha0!pn26ech0FSylN}SOEM<D`taYoP_xXE#^kj4q+#FhxTlk?S z+N=5&=a7BrDM^_fqDqOeAs;PC$B;zD#W_z2?Id-2jBS>zIZmsdqpz$lcL${2?o`N@ zZy^G&<w)?yK0m>wgB0rZ*7hWnkC$i5`_;;aAU&jJKt$fY<|tG2{T|wx;<+yH(}m3u zWP^t3SJcYN4MaK&;(hDfF0ldJ#EI+Ok|R*HH6FM7hJ>l!&K4j3?^Fjevi_TZQ*bza zU7Toh+O|2RZrx~my;&byNl+xF0>v+*Z%b7c_0-lmow$iiL0mqnnAoS;n>sh+cGc(8 z#$yeBM=bf!&KmdXR9aYFQ&&)jH=Lp&>6zhK9$(O$T2&KW2p60U(HOmk04AizN5XkG zmRg7WnqmDlgH|abn?bj+h*<Lj-|!Gi)}DVWD>L(Lp1a<+<pi3*>bn}Td1@}!vai}H z>F)eA3s=p@k0u+5X*`vMaU64Zd&95E%UCQ{A8{iH$mh35ybd;q=law2UWkscS7l8y z^qtaAiydxHr-L1CkAHgLE{NYUV0@)X47a~+CU3gd6<ZYQ>6>_hG`0kUf&saOPl?3w z_$(b_uVidLZ^6aaK#=iHu}9Y4KQK7j%Q7_L#nlrd)Nxbk0?RVT#R5VT)-)x^GDBL< zSu*@Cq?(VQV5YeGxr?|%QuL1$gZl?5)gj6QXa&c<WN)4P+96`5C`@`Nj-cpVz|e78 zkkWno4ey=>SBEc>JdD{1_zV)P=Vtl(HOLM3z0h`#?Rd_<N0wuKwO827gCE3DIo?y6 z(}gkCD}r@#NSZ&XQk3tOvcps)Rb|!YhSut`w^vSYwJr8p6oUJm-r*MBQ^%g~&X&Kv zpiFW?TT@O%MswnVQ!X<PWJ)@B6@=VW<@eECjDEn>dOi+Nl%Q?qCQNFUk2iC}gMS|n zApJ3w@*_3#*Gli<Q@6vg<f3EH+;q;4_OXSOiI{@d5=8;?RBp@z$INZnk^tf(4Y;pi zVQ^{JRY@jZormK)Uxr@~RU9lFJl~ZW>ZCcC=W#80-0fdy5bseL-LAH_<4C7A+U>2w z@ONYiVr{LZEy<44t4=bzxZ65C4;N1!NYt*DXnmt1EzqpN8byp)W$kE4sT-X3l$82d zgTq7Z?%)_5_>EIj>3(o{(fo2vRZfal4iXzkQss)4UC82FNa6z%D>?bpw%sstxqGtr zlV*Km5bA_{FVbHXVa|nq1&!EUXg^)(I4#cCNaqj31uWI#=*$TbMVo7ExaAivFj8)6 zj2f5tTpS-?O42=i*#Dm4^G11@l>xh2lQ;3KTpH=~JefBnb9sK|E`N#Z?0I;tr{Hmi zzVLCq+l0H>WdF7F#0Ajhf;N5)KzmTOrvl84rmJkIbToi%y5VJo)ILHO9$>4PJ1Bsa z41v}GV$5PTsA3Fb#~X0eqJ1vuYb?ZSayjjKBbnPGWxiQ5mpO9R+0nKvC72iKh3wBf z)NhK@F8sS$^3)DfO81o6;S*P_wF0H_9yyLcqa#^yHrSf&b<bBW_BvJGiD@-{amvp2 zS_Tu{M@Pyzr%h%o2a)v5Sw3AY?TlTmt<Jm~boBxft}<(nvLb#RZeEvm9>FEjneLTt zZBdzLU2Q>?ZI6zQm)sjZQx_fX*Vl7VU<Q(~{4Yekg%@HI14)OY9EX(>l{sJbsKU4M z<k%U+#gnnQ6W_BsT=M4yXx9J<FP%%M@tg2ysYh0L`o~u!mwJ7r$Bud81*Bk7W5d+5 z<gasNuksU}iW2Se5^eC49-rX-o<7YGrVKLP&Q?bFOL_1$ZyB3xa*tO{*REXECaRNN zYT0Y~`kG>Ha`S2(=KYMUz>ON2H0Fr!&9U9zc-v0c*@q4eIy>cDeiyjxY8M&H+;jq* zY|As)llH_o-|FQ#Cpg^Z;oHm{sSL2Kv-{iL$mASp+f?Q`r(3I^>-XCy{VupbfbY<( zKkNFq0w5ytY70=kVoXc*y$>rllh2Jg1p|`k+KruiSIRn@w98R?dY;&qrsXyYt)9iD z1t`8N!}Su;u4eH0ITe1fKF22a-JfNM&mC&CaH)>us>(kx(0-CZ*pdw~1lH&)9rquQ z4Uj(eJ#W%b`P~vH$<CO)9u?MThva+s6k5fVYvPpnOS<kSyh>}6D#M1JxM3}``3@JM zBt5{vd!e*`kOcWbsnL;P&Z8_SLK4A~zj0qR&2)P$0@M_F{6iP*vN45N3%R|(QIdI7 z=-%?SylNpjwt8>^-?-urUGAJ82kO=jr6nx{AYJO4=VS1nJ-j_UK6{B)4}Z<$?4Ktk z(U!dKe$>+l0I8i6rM9~9o*>!5yy+pyR&ESP4vnX4uWf(Mgnz(i{D5|GA~zw0&eojG z{GxeWFW`(I<Q~jlmfWiC`Eh48xIC6bV`x;EdQz5lQo21dN9y)Dn&EOwUGxDCw8>eo zG|Nn7n3xS9dCxb;OZTYq&)XihR-SG=1oS)OGb1{h;Dg;=^ccu_*qofX(G$hlc9#zf za(uL3I0Jyrx9L=!!n0PAoEXowImhFsO25*A%t}v3_wB}^(O55Z98Ni9acy3H`LQCB zlEoFz<k}A6H!#KH^`q1{XM2vD;+HYDjNB|8{Dq~l<9CD08B?C1%wj*aR&JIcGbkwP zHIU^;SU5%ICc{J}|04b706`Tuf_sbe1ULyudx4KR))Y~Sc!kvjL2y3uQz`00F_KH6 zJj`r_`Csw#MoHX2+Y4O~Cyy5!jWvusQ)7~nl0w45Kc)DD`S_%yq=d!9dIm-+ODfg0 z<*yGf7na{p2bVQEYcLY$9D3McG$Kmfm~x5>+!UTKP9GIBSPaI$*jJqW79aqO5URhv zc#3%1e&T$Q4}(D<nyjoG_W*k{60dvT`rzB5bNpV9*+&Jw<@bxn<Njo1Oj%mH-9Pl1 zrc`&s<N0JMaf&Cd%j$K8+v{*4%E|~;SzjRol65|xGB1pJO7leh3{z8e`M282Dp11^ zYkEnQ(E+v*+xZjciIg|o(LkNeip%h@3PctVl87=D5(7R+KUo>JQXT^4S2}uBU|S`8 zXeAwJW}TnRIv-)=a-;0ErLH#DVqQ+Ghek&F`P8*F^b};ntu3sDM1xFAOWk2(VYe0& z_bDY}qS7n^T;H`Usi~zRdw-Yn-0~~j?}wNMYgJWzd0@u@gAV?Q;KF^5KrZ%L8ACkn zqA%Bxo|6#)K~Aczw(0Dki>*w4BdW|0kg4q5I^NpZ=>1hq*=l_?N<|j>)0#(Sz2#FM zt>Roe@8cWXbD8oK&(ac&mY4dr(0E%h&h^ef8a(dC$KCNnP?U`J`_uNqE3S6u>-BS+ z6ToqK+}*8QgdI+8wnqXc0=j-lk)CfQ&MT~R$ZU48vepGYJWwQqk`$)l8tok2jgCrI z3!9bAl~KI23{q=ejFVSEONu>0Mm65>o~W*o0kQ+Mpuw1}$jU{ps3fZ-W2UB||0+$1 z$!rK9gGYnlK!ZQK-9&TF##G1%3CXD_xd=X{#`R>RpXTy-u@}wW%&N2Ekk-nN*B%h8 zB2yITEet6}E>Qm7(i~B)`jpY3?8ne_ud!=ieumJq4b=iR!PSwh-m~jer_u&OV%gDB zOW)NBT?xLwEz6+)fbs!MvL9)4`HO#=?`Y1iH!|kh3exNO<-Dw^OUW4wXItp5%?hwy zXk}(bNmCW@R8y2yQ`M4F1#YNsFK;d{FaA8~?jxP*K5zd#ejneV@5*NbZNv-?e_O=B z>(a9~@bvJ|KDU`jeJ!j@N=(Q%$T!e06p$2?gbItixp{07!IL;Y8_&>`OkbZ5mw<JT zRg^$XKPy*}4o$1$!YB6NTTcM2sZ`hfJXsKDXIJaI<?Ozt<Js}C=C*r~uylOMs&*5I zqiDI;>vmsyN|e{HXjWtvteRa^EeeqE_-5*;-9cfICRcmKXvg;?Janj_aE~i08~KTv zF%-;cXxg*$^1{M*+8G3~g{$ig!Q(eM_y+wJmK3D;j&~k(dg^rP^ze|3ju9Lj(k<+% zNJci9&PGT|@qW@LJw0&2=~{ogv(mBteIC<JiP%yy>uEIRWMiN)XFrX??AD9|VM;fg zD>Dl!!`1i=oRAO%3hH{hio@+8Ji53TZdQA4e71jKX?`ON1mYH_@@o;~$bv*PjTZm( z^`kl|wTrB8oZoqq=TC9Xh;(0f8<P(&x5uAp!sKm-_}&4f2dQ~mY6aU%{RJoD_$Eu! z939U`F2+iAsVP*>vUlm*O7rR4o1P%R^Rw{QOk-okC9!73f)k^QsDle{sRhruSHDbf za!fWMK8}Im?UGH$2@5Z}d`qmn-kzMDk^%!)H-}DF;yM}<3f^BHCd@F7!+LvtA0HnY zFE`sB(V6;xd(}SPtfwXK=>lfp>FHpKmS0LU2U;n@mW-4S7rCb-x|_^RZE~Efz0<<# zn#$fE9uBIsn_Ax<&aqrn$*8q(%xHMHDAX;?NHM6ynUxAVGDOzYPi2~z7im#EeJT=G zFbL=k@fS+cka#>^(!@Np9}T|v)~UQd7iD>lPDwr5I<0RE+9n+wPG$PCswAhpJ#{_D zWp%lxIhf?X4;n7*Rot!0(uE-_>id;S6F!59;CEzKbSSe}UA71!AE~Xm`3sZm=jAF_ zmxwo6O;uL;nPqIQsx5DT$HK3>*lr)ZU=GgdlGkf5uc~!Axb07_4@|(xJgsrGMQL}m zeD0CWlUjNGc6-0zCeL_Una%E1v|hpG_AT=GoGZfRd4Id+eLp_T+VpsWH19)_;VVbM zY<GpA;y&kcgB~Xx5W@X<E*%Yhb8mka@MB3juv+&5+KlW(y}DW-a`IRbW*Z2fN?&4u zFDX9K@^&t=(lu~2Fah?xSZg|DVJS*X`ud9PjF^15zYT5=@0Af)N)H3k7SmBznNZQ1 za_^!lNkBlL01cuk^O;M*iBCX4gkPkKAaitaKJc6D&PoCH-XS>!y{6N^%m7%A0vI27 z_{-uZF%8AU<YZ%W?bU?{rI)#r@l|M3TUl!D&H&7g^-gv#lBB}|o>$h$^$a{N_pf7v zEKeNFqcTb=N}9>4A7sVk6cW;sPS+tfFyi47!}O1$2u)-%wc`88vBX6>`ue(vNGP&P z^K-Oz4c52N!II-ul^x7XnhF}lm6d~2PB@e0!Nt`%IeZ@STrwT-KtP)g1*BYrRLtC{ zrV<(w<{A?2{2K1M?z+1Ce0&$ZgWx|n+jEQY;M?HTV`E}&Zf{lBf4hHt42c}&vjcq^ zJ2lmO$nj}0w;muLTrQoRgwEbiNdD?<XvrzYLo>DE!#RK|JvfX{Lbfz7@A`Z~vQNw) zb!$#mR+jZ8OixMk*gr{#;Aq0-<}<jI?VGs3u}_>C8M!4e1ucgrml|Q4kWYF^qEN3Q zFZ)qe{KLh$wWlgmLoI#%Hz(_0PRb?VXXzc~O#iOt>Pq~bOHv+&ia{Jv5#$@r@g*eL z=jP?q+u>*WOBO@_rbi{wC#$3&FP~SWHeFlw3l{T(qtj0qy9vo~{Q}|v!GLFH7ovV4 zgM8{#46H=t18&bd1Pb%362PC6^>}l>Hn+X?#FjQ~nuLl?i%&{e1!0mZYEQ@pO{6Ud z-V=~7&CyY*8|PI0?#^E0cRF<F&2-6{diC!SDtzW|1e-gty={+OFB}OiswU&(;nfCA zC<R+Uo8j|B{~rJqLF&Hvv2Yw<pZ&Z&73F1FS(s^Qsa+iGzS-crcW)UO=y|!hpf|}# zOTx&Gu9EOenVIOR&oMD^^3&4NTALe}7iC|nD9TPxHqzIjrKREG;fc-8&#$WH=H{fQ zKWA!WP?(!pUQ&>rlAx}tOhrX0E+IC%wuU+Np!`-=mZW5)axzk1z5-R@97#z^P*YPW zD#=5D!^6!9V_YeE&jbCCq9Q`D9Ur9Us}CP{pFex?_TA+4Bo!5vxR{8X3}#m)F%@>6 zl9Iy1%@tsVtw96XuNx@HNwcytBU~IYx&hyH>eLVE$q85?{Qid@sA;G<IM}b>7~XyP z^8U`wlQ-|C=Vm1(FVN6ZUyu-SaP*}(uS+g&=@4C4-M18-UTJ6N04@bi0UXSb(BQ>e zw=h?hN2Vr(galxhpkk#(OGE9u@4qJ@C1qe_1gRIkC`Cy{DI+Z<b@2iiV0gq-Ss7{Y z`NU*Y>=*StLNX5;1AQGWIJ3e+0+yyGiE&Xm87aUmJjTaF*jSp22n*8D(Ezbqo9o|v zb!KW3F)^{U<OTGxLRDFGg<}O96yQG(IKc<~ygdr@vdW6{lH;RQl@utcD15x!e=G9< zHk{DIuRnYG1dfcTsEC1)9^jFcmVCHPXL~a#DM>(}@ALsvhxBkeb?OvQ`uy25TwnU= z(L+Z&8~Ch=k-@>&-@JJPikBE0b@(INn(F`kkAE;R(oc<#eDl`S_$U=6g|LVS6EhPl zE7P4@+ut+*?+VAw*~JOe9>x5vTLy*(tSn52zvaRGd$3pEfB(Jd*E@zO4z|{WM1<YF zUC-aXd-?AD$c+)`Zc7WZhg<TW=dmz1y*YmqR|5wdD^n9A&}tTDCU6wcLvLd<ki*vo zpkG5{;Gv_V_2HAJFF$;&t||w2L4S^}s_gR$j=ug;Rw4bf+*;aRIVD{S?PFW_U%Y+x z<S`5ra4w9D4CXdAV061aJ_empTSKk3stjwsVt5Gkb<XxS;AgZgtOdkz26~<dM*<Q` zJ|&yrl&X(E{RFRofI~+BQ~+{3ybbpE0Knjin;R}2d>uLnoO%$w!yl0l7x}OM{%;V6 z!*7A2o<4m#IU)A=z+)Ioh>1M>*818CXt<ob48q+7B6_TwU0q&;gJbVt(?8Jj?)^K| z=#vx=R6sa(oNw=HZzdupGB(uzb$?#J2JM2Mf4DJ>mE`241AW-<F9gHi4?q0C$UqO! zVr>AY@*MrS)~=4xnJLhDLn8w=R_4Rac6T1Jv$423+2h)0=*tokVh~6^fBW{vjS-OO z#f7=U&ET|ZYN*xKUxEfp*cw2WS65RR86N?`2Ms)8-bG_AKwel}xO!{5v%3?Xg8;sK z`PD41-Mwe3+U`WGN@7MK;B(45=SG(9!|VP&p1@&IY4P;R^0l#17&4OLqw#-TYf~MZ zaV1@SK2b3)9L&4BIGiCS=a#dG&TV@40mVFwgXhlC%}k9SdEQ|-fc+;SAsM{Vd%!b9 zfj$6`huec&5EM9%YilZs@^t5Dcb^>hp81a+K7jE$A6v>1#XLEgnz~ACd^8m`RdPZM zRzhGu4UP0$+nd4C-@kVkx(P0)gteq3#GF2TDmN?rNao=bKf+-JKP!)$Yey^2Tj_3X ztb>3%yE=Cd_CvU8V5pCkdDI0kywgY^(4eKG1*r#DKRi5yi+Mr*^WZ(AVj^kjXqFeT zq_&*wZ6UfTC@usCf>m?Ag0Qg`4y=EmU*F&ra9m)PaJ57RW)iX}@M-%cRgaWho^2mq zL(7Gsu&~h3<mABc5RBc&VICq@Q5iWdegQ5{b{x4%#~%nt7(}&v6Uv{y`7y}P8}@Kx zZRPl<UPzE1Ozclz@|<{>$6v1Y_Ye~krKcuA^mLr|D@*e*%7BdE(0MX#ZH=7VEVMXa zVgBvgHw+BtpiOQ4CD?a27f0CI^Jm}wncWBX?!xc{10Mt_hc|LTTohFC6?#wu=HZ89 zjpm?OTbQyiGng40;QBe>l$e+pEKH5Dz6<77UO}$BwsvfO&cFaC^98w?Frc63<!;4x zc@72|(7=SG_{H^=fvZ<=GB3afnECp7!_Qe+S~%Rm*})F1bz*V?10%h)h1u1CzTI6c zf7E7eWvQpLEj%O;POF`heN1c=^xUq?t=}{o9li#~5RUMriu}Cd7QmiX);S=jyS#J0 zWq7@+s*0R~Jg2arv%e3#jjg4{)WnTXKYhgdb>IUd!-9!Ph<SxY*m-$zG4F|R`tCF# zC662;qHuEMKAabbn+`XNkBNlitf8jz3GIIlmc{v5pd~gY5`#U&hTuQ3G7q7L<OOjU zf2+%ju|fzRG&3_=-B|tc7#Elz9ER_0s;|byysoZRVQ~R2=3l*f1p%XmhFV2c`P=vJ zdAK;06yy#!0|?I&5YW)kiV6!Jez&Qvnu3C&uC@x5zJC1*R|X?8EC`~x(}d(OQg3gq zV;eGJPoFt+j)DHn*)vUbSUMLk4;LT*`GoYe{*mkY270)dXJKK27>}5Q1O{ttGc=~* zh`At~`mbK;?&-k8JRptq@x;YE#P0%v{Ba4f5bnV+diLzuoXph2%?xyP&YU>|#6m1- zV{c<)Yl(HYRh1P92ni{vslZ+Q{PWLA>6H}R8iayI79r(Dm*+}a#<{q-t?jI;8|rzu zxuDO&S;Lw?G1SvJb@~(u2{9aGZUG@S9v)oGd$~FP&#AK{9Fnf#g$e0J@R7FWhHppA zPd`D>0ecQ-Bn6u{76%{%__yzDf6;kr5IwB6mXL^uhL#4RB&<6F{!m6ns<*Fe@9Ewj zz`UMzaY-R?h}GLb7lWgxrlwL_Q93m}4x7r$!eW!8Bqf-b8D(Upa9z{`%q+}}msS_| zo;-mcj&<<WQdc=mKuko(M@mlF(N=%Bfu6P|upuii2e^8<yJ9`UENm?PVWBPEU3z-D zSm6N+1RD{T5a-}v$GW#`E6eaRP0WmH8|&KJn{e566!XG@zP?^~bY4)9AADndLv4CS z3fM@jM_5x`6?%l6f}E0yBEr*MSw%@sRtoFaF)%SoN=w3U(A|L=1#aJYpr+$N$f_)$ z7Z_JLQqnpRlTn<OlT%t<=@aP3#>R4MdkagVKtqj$jEseYgNdD;PehD^_dFiv|MA0F zB323Kup(`JQ$|L5Tp^*fj3i+1d?-f;CCJ|g#+|zQnm0eb$LKr|b-0<Bs4z1N6Kn?t z@sHT<D+NxHpYMEmRoV9K&0{j(Sci*wQVjFdG}uib#@nc=DVJ52HlXC1eEh~RR_0k) zSyWV&n3)*AiM%j{kC#VQMyjW;^Y)$FG&q>o)7CgcNJ7daLP1G=xuq5>9u$<6%4#a) z6cjFw_E--c6AQDazh7-@tFEp#R^|a|h{^#T9$p@-DGN9(@G}hz^-3$to15zJFfV*O z=3CoaDyzyUQCR})%!9sZ>1Y|78Ha?2LX4oVqkXs;bWb*RHYF7$a6Si}s<~|_5xoqv zqFZX+cva70VZ%U4O?5<U6bA>}Eo^BbZB2D@N(yd%eyHNIay$ZpxS02L2j<TZFpJs+ z7hI5(0dJ1=SV6cgFE0z4i8U=yQCbK)91#@;QXdx`fy>*7i3+i@vM4JnFfuY=<p&yY zadYP8=Pa$v|1mmWS_E+}bYvKXA?`R>!9x-f5@=|s0`s}q>9EgOv%XRnCD_^7jEwYI zn6Wbtn-UQbDz7SET$)4aJT`Bmqosb9h?JT~k(`pYv@jcMZkdvb%FxUdoR}*P<{6op z5J<18#s(c6%%e^d1-Xi<vZ$yCR-MN*(AC!|EGcSiti{i~&r!_B#mDqs>AG^Ii;4=n zssQtJ=jg2MtOJ4rs%mR4UX*aKvBJhY8y7d1p{Ws^vgZ1)a)2=jMdVy+%u0yBoSw3d z`Lg!$wA=!AcGm5!4P3`iR8mq#Nm<j_Oh^KEuF}o<yVC?`nM5pov(KOBSCp5*l?Kt& zR0GF_RfSrc>VbK850@JgBT*4yxMEm9nv;_qbOv<(^{bbM8-NSt;^IutNWD2bb>!rq zqnIbv(bXy~F9E9nVh&hxa&fS+fuyiv9$DF0d3m|b%uT@CBqzo}zrgCpfO+^Jcv0{$ zSeb_i1@<{RFMDG01}^4xwbTiS$<GOD6O&V?CSqwIBX#i{{W)6~XIeU17e_lB%rm-p zdQ{Zc!|#Ie9T(Ety4q4w7x6K#r(IB7*wAq42%Yyw($e8!ULZa(cJ#*3%Gxpwjuaw9 zfAkFW-oD<&WySMrE1;Qn)|Oaxo{yi;#=(w+l%&1oTVGvW*G<f(LUzH%A*R0M@?25t zl#;UQ`syN9=Ba3C6x7rWEUm3PkP@=8c$okG4B;6DVN>sPZXRAW6-8Xk8yM)qPr&MJ zU`T=;c5t-q>+g;T3&F)aAkEFqWo=`Dhj{=jIW=i&dhD3Y<MB4yI+~#2^UHIM&GlYB zUPdN{I(pima-XZ7w!WdBql;r@O=U__JR<`=)?EVTVd!yiuwli;Jah_9j@0y&k&&yo z&c2S88X*Y<ouCdODOE;F92TTSguv4}dANfxVm-nj7r?x;hkHd`EiliHi+OU8ab1Xs za4`=@3=TqmQ9(l^iuvW=&pbacKRGq_@bM!&%rh|32ZaW<v^76{{aQg@_B;=_mzyhC z1%#6WR5uq#A1`-0IvOxzAoZ}F7tgU=3Y3*oKT9J-EM#gCR#nh8Thcah`@s&jph=CI zj#gb)&&n0y92h34pn#isC$RHp7=(;GQn`3|)p0Nn-Uh~Z1{}<{qM5g{wrKBc3l9s% z!#ocUx1GH;KIS<&5|b0gCr6K*@OA|A<YYQJn&p+HvkTKby`4!Z3BLY5-bfEGO!e`E zN2IS;Tzo9_(~Ptv1{};wUKHmB<{fQWa5K-r$)1oDfAwk~*jGHv)9|UEC8o$sjmH|I z0`o3jp17Guc^hZY`MNq3^SHbXIhme;o(%f(I<5v7oo{SBO6UE&k7ORCX?J%Ab@dtB zRS699!C}Geo$b5Np9cl{!O3N0Vu1G4)KmZo7`d|q1mAuC9hgj@zUy)e))Or*DoD@B zM8~grhFe?SJ~6pwtn~8D`K`x?m{+Btqt!GpuyOZ5ghWUwD&oEl>vWoc1aw~CErp94 zC-dO7j%41_%B-cWDJ&!qAM?E2pqUU4;$a^685<uyCi8gwJTPBTRet}`gU5S2vx_%} zN3ISH4Gdi$7#iwFJzgKUK0F9t^k{cCFFOO`SzJ1gVIG&xLjw*Db_nSD2YP7nFi%QB z$*p{bh&(eb0W0$$;|QepITZ7@xEg$BUI(|gK{GEaD}A)LIR^940Kd1Hni=2O*};{e zMKjNUVgAm8d-r!9*EThH!hmaPte_~*%Em&9Ug=|RW7*r){t@@RvQeoIRwh?(&x^`7 zgiP|BYTgM|BRLHdS0*+NngP&x8ag^n14A2kPef>>jFJ*3I~%TRQfCNBPtyzOxhCOZ z9;NffM=@_{*4)+<`bRK7J~8~y>O6+|%ChdR%bk~7+glnT=s?#xs4eKnu8ua4DQI6@ zkPXbg!sBi5>O4CKTXbyHmHux0%#(8}oF*j0%{-Hv5As;dgP&(*`5iisPVA5!pZO#G z{N(f)Fpn#W`I`CrcOTyG>g)Ck@C62lNr>Tig2{QZ_v>Z%$B!N!z9zUI%OH@Rml`h_ zS@{#ND)Q?1hZpu|)Q-1ZTYmTBhXc&hY8e>Xx_i2zm{-QnJa`)n^LYHc;ZZtoae#TO zxk?Q4_72v}c)d+RQo`|>Z>qz0O{%QC1o{`m*D#n;QBfYOl#~=CB*d6i;TY!e>pTly zoj;0sbzq*9UG6jycIL&7VqWZP=JELXBfQNQop1bum<Q3o^{c;P{^9+X@7`x+rol01 zWo0fc{Pu3}{k>i6?5vHoRe1V)Nmb>*P!B6RFA=>IjjS^={c>9Ectz*T)0c0*cpEw` zLql5+FZZx06!W;|+qgQOB_cb^D5~e0jE8yh<1xSQZSXJ;I)9{}ADb9Cn)ziE^H`?@ ze96zF@-{$wfUo!7?#{~c!hYS{Seu%-0Vh2_C*w%wxsGD~I66;6N<qRVeH8OP=WzIW z@nbSifo5Lzk752uZ-Zuj7q6fH%>0Ah$J@8IL26W$u>jWA+yD~v!>J#zPCDA}@5PlB zP3>*x&vRMW_z^M5QAyc*q_(Hj+^FihiG_LPBbgV`N8r_Yvg7J}SV+)6uk*)Y9y8wt zH}j}LyrircS5^Ub0632f4}SXMImXYw`X`u=iHip2Y4Ca*U>>iZXLA3n^LUv5oVQ{7 z{W0tp=8wo#3LUBQ35ju2Xm5kp&;J_3=4PkEHy)m({owvRc%6)#9EM}8tN8cxw8fQ` zjqR-v?LK~ZCoZ|1m_>n9+|nkp2C(m*yrrU~NJT@fWoTq~l+JtXGw+s+KX3DedE4XY zJf6G_9_Gb<i_UMLGjFw3W%!scEyBe-<~ppEm0AB_Z$>K4yv-k?^SWqnb0qU_zQ}X* z=Z>TEzdMGd_(Q$TF_=f`d|OA`^S5uE9qr&)9d2f6W<o?vBrYKi2tmMwYY_a*d|NAe zofK-$QgKBa6}QIO^SXM-{Pemh&yZ9BAz@W*ExY6Dyr_|98lL?8fn25SQ96H|81@hF zHZNX0r>3D!#x{TKz;#%h^KI_j+Jd1L+Emt5qOM8(Ugm{`_|Ko`K{MY4I*)aKFw7JG z0Xl!wH7WArdz)Ik^C|`L&#OGhJg(WK2fR%in0atDW=4isewvsF^mI9SSyCL#i|^}v z)jso|*9LCic_1WdPQan2?VXoeHx*mlYi8$c<>G>$`OiAfBx>fJaRl@D@-}s!nQv_j z55?(i#E#0_pmm=8n9RR_{~o?5G{_%Yc)LW3nQw#Ed8})f=I3T0FxS)5E~_j{Oo%<w z&*REf0wMhT=c6&+2JbZ~Vpb_M^YQ-_^WbfC@OYbJ#jyFs1sI+GCz$7tPdF~~Z5^$^ zypp24ot4GmW<1=StZb|rS{e|7wPKqY{;Qv_s%&U&;o<&jJ?zfjQ)wmpvs{|;4oUG9 zqw%GK-VsTzL7_*+umsFvR(@Gr+&pT(J6B0UeC)i<5i#sB{X7)l&>Y)@^T&@KLQJHt zu9{bn3s3T}tt)A1s3j&QHZe7<s46ea&!(cL+{CuB7?Zbgbh2Y%W_t1bIo4AKgv7?j z3=a0-^EPPaamBD-yv?y<Sly%j{886oiwlmNd*J7fgrBp#cti~Qc=rfz6BPPc=am)Z zZ7j_XH-mVbn}^%P+!S`c1wZqZ6&+W4&-3!k&rTm~aPPrm4iN)FHf33dq?oen2^AxL ziMdj^ufw94Cnh6gk+2WW=NAx?k-G3Zn9t5gAtoWVv9oS!Jvu*+X8sSjCiM$?ewU22 z<l)yp{`3><gMy-5W_AXs$KhEVP*M_Ofq5HStIC>+hPrAHz>%RrtkY?Dc(`C}0z2{J zdn}7tn3)-+q$L4>YuEa4>HM*oM;<e8gNJ#2BLg`(8QjTCGToz?7vMiHA_6h27x;M? zG4U`zF$FsR7|(qV7@0ulL)yFAF*<L9P3Hv#&a-o{+c`PFu#0u=z<vx{R8ihHd|g0* zZ*uJ0WyLq=R#;JgvROyYAt|c(T3q>cIaPI57DhZePsDn`C8GF(lq?^%neD5~i$D%Y zA~5e{XLGoL9}=~0(b3tV`N&))Du(@I{QNJddwuAF{9=9Uf`k|xLl_6?>FHj-#PVHw z+M3|!UEN)(FIC-~ouZ(mNK3}zI+WyP>FLku=xD;G4!=7&J_?;hOG_guIbnEsfEMrE z15$Qb^fjsY-@!a)zKsiBZv)31@RO656A<9%=HfWqU}ky()hP^g^NR}V>ub<Uys<DZ zEXdEr#R;eT933rIJ9l8d&5cKo9^%SX8tUm%QBxs(y&Ibwp1yvgs32!!X?D1ojPylX zI$A?x1Bhe?uXN+lc@*>4hXnYsFb~Dc!{Zbg%UNz6QM1U%lItE31<J~*c&<qiu}XNx zl<65+(9zKxUI_(O0r-)Xm8PVk^guWt-Utj^5WXQi5UuUaM>2o(d>hdD<1t@eQb<fp zbfu@`@LS7^^U{(Rz<6A$EIs`0o7b;_WNvOQFCWi_#=2K;-$F|{87wOrit@7HoI>w# zvd7Zdtu4$bDJiXNtkTj`U~IzUZ3sxn$vG8Jem)gH^XKr)w{gKc-)6rfQ&3P~Jxv3B z-N3xDsZm~0ep_1;{I2$9EF++on=5RA8oCd*B_0^&(d(pQ;3vQP@jVkGgCEl4aI>0< zVt|>Ekpb9<i;1!@H^sUHFFz*(bHz|dfbTGS`v(WZ{J_XC=seaDxxKjt6oZ^e^emV5 z1@oxTf~$$;SGVr(VViH`c$$!ugk91%p*%D$8;1LaOIU7Lt){9B*fTQHA6{4i`w0v~ z9OsGj=<03{4-LleZS0QJ`C~Kx?!yQ0Y{2#}zp=o<CnO})(o**i@agJm|M=<WsEAMk z0)m0QuEUKVJ-olNym0t*4D@z^Gzkd_IyyV%7v|2)PT}!3XNk$FcvT5VD6r3~M9Db5 z7*^~Uxypt4*@)1fr%#_?T~dKc?&W90aAsv|l~<TQHaP~T$;Z<TYs>Ae4Xkt259&NH ze`EamPe1?6&B<YjV;S#@=g)3!Z!yrHv$3`e4-4kQu|5t&b!}xC*Db}w59s{$sR>b0 z;q3HetSz5E-MPKJ$}6Nu$gC)A8tIqW6<%;<V(HFd=6^U#M8PE;oKjKRKFz|)E-s2? zDJ7h7*sHzW9c)9G4k;Nb0pjE5eQl^8;5pLIA9YPCetdG|(=nL;`R8BCOA7%!XM5Y@ z1MlKsclPXAad9ydGn1&8h|$q&A3lD3^86{psu22Nm72rl!TozM^n)b^d+hGzR$E`Q zzOhD&<Gu$d=84E?__c`1Y0{Hp@#Ssk@#y>!-ezfGmW+%H&z;6G7l|BQoeGKzSGLx{ z{^7aRXHRFlp}r2*H3b-*kByHWy)gu*z}(arYxuBVpohXfXJ(~?L!Q9480{DI-4x*< zWTqzJ@^#Lh?u8ZQqc>-ajSWSF1n_(vHy1k@10Sfput~UoW>-*l_cz$1=Fgudq&Y7a zky%~RHWd&SPfAL1{99G~czc{aeOgpZ#N5&>GAev_VJ0>z4A(lT<1>HE7#7;xfBewg z)aZv(KPbw}K70D)$d8yB8=g9S3PLbYO9;C23UXK1mr-W;-MhxdS_%qsE>8Aw9OvZ# zU32_=JYZH;Rh3N4j3Xk$2Cnu#eDnY>^Q5$bI+V1GSl5h*i-~-mdq95<>q<0qgD<&C z95E~?Gjx9v65^6Wvsq9UB0HUsh)`1UqKlhrS!LPk=2}%{8Jz1ZY?`^Xxen)ufZ*)C zySER&i*Ry)kO9Ly*uv%2Wz@=I6{Yv@vEQ%)dW5vJWM*CtBNHP4jCEy%j+Vy%_h0|z z?&5^&w_{@1p^34A;vzyq0wo1Gto;!R7bhDlCnpt;(itAzi#G8-=^Y`ty-VA_E*tmu zaQUB8XPJdnqH^l;o5nf@Crpj?Pn|k-9OVq~^#<SpV>k=;j&`~EIrkquh)1o5rNMen zC-}Xe=N>RKzIycvtD1dgeq!{PH;jGy{Aur%E>klj7;C^71o|QSd%M2*V>+*1y}W+4 zKRCb_4nCZB=%Jcg>c%F9fx!V_#`e&SLF!jGR->ZBLApsuNwhT70Fv&Gw#E6`t@YLQ zwG{~D`?@<aQj=t5P`C5};FVPr;fFgToQg|}*EUw4J%2_?Ng;Jn0=%?~qrIDpBUDd> zb4Y*>KOZ+C2|10R4)r-EC3(pJA5SkgXK!~GPXttFKcpv^LnaoK&VvC4TZ{BSpq5lP zJ7U&WAU)lg80nc<m|c#_+rSP$%!OL>sH)u4*|xQ@zOlXv2gKaO2m&8w7A7riO|YO1 z%?)edUhdsBfyj}F(9YTt+Q7%5J#^<lUnCf80zyJi04IA}gfol`4*L~$2DQ*qNFXLI zdT8V-=I@WhL`Q)2g)XlkCu3=50zVwn7<S&v-IbDpTv$XXIzBcyGzfj;0y_A~qr=yr zXTk0@*Vjm15C^My=FAxhaS<Occg(NG908=K8~V4SVwu8<vcb_|2xI^M_TB<4uB=-V ze(uaabMNi%>y9D`5Qsq%B1F*O?i5Ag6i`3`g%(u>g%s{?!QI_GxVr@jnuNGpM>^?r z&#Y62By@jiyWQXXbLX;IPd(=x_SwhY_3n4=I<=Q2(E^?kw4<V=V2(EiPhs9|2dc9J z><*{Fcas0Ch#1=;p*@0zGFIV~nEK!>P#}R$cGlh=uCmhNpl!~{;=<C(Gpa@gC+7S6 zJ24oIef#&F6*xn4aVjm$9UmQnyMf-5nW>4YvLZMH@FB1Y=sK(p2Ero00N>`8`gcG4 z5EUK_`~bHR`0C@0LKfgTg02(1aNgd*2KqQ_OTyOwGAp=11%b1FVB7{Bmb|*WP*_q3 z(Wtn@MY!{D<`578D$bof0|FA{3iu$fAN2dD&zw?IQ$YZ4W&%pAsHAXhedYU)V&?JF zCmmgFpmwyhG>#lS0vrg2<mf@Cw1p+u!pQ+t3<uJ}XaM$XNY)97v4g{X-@bbF{{1@; z4<L|^A3t{F$RW-H2RJwn>|tf!v3oBI+kOrnzC$8d-ZK{=dV(9iV>io=UAy<}-NVAl z%ErzP4l5`m1dmloS@HDgQ%9JyOaMGS8LWzkir7=gzy7`rpj1raqS!?-s1KnTIukG4 z5Kb;GLBR`Z>Z-<gqmb~Bj_wZVA8*~e+uGi0V5ASX3#r4rQuG=jx|fre<>TXpNS0|l zn+Cz=K^`Q$RZ$8K3+@~2efAQ;eD6TF2g5^JMv4!KSfK{bR)ZK8t}8GkD8Ho8Kgb^< zAh=#QB6?%N8R2Zj#4nyYeG)1kK>YSg*X}|_Fh4Q~uusd)Avrq=iwYk)av1*0b>IM8 zAUrK-6K>|d{riufI(=41=rHfG{U>Dip4XGK40F5GW@O{RzMpG13p*=22MfnRaTTMm z^s4l#;nf>=Z`@b~2mw`{JtqK1+|RrT=p@FQ_k?3VH#aw&p|Y|f;K#_sz|EajTw1iU zhRg)j)KCLFaOTWuKE9(6(|t0yQOLC`$$NOXiiwJxK7A5y=|5JGmzRx<4*&g_M`--j zw-ZyNc?CIM-kxUWrfO=+z(QvPPQw`<KXDu&Bq(%2R!&A!OC8P@00S)H>*rluS~xp5 z{boyl1V_AnW4*ejA}AyfWRI?%j*6->JV!|>3Do2j<bd0u5u7g8&{!WBJ~l48v+MGs zM-S2N`uVpnYa424Zd4Pzp`@JBSy9=2htGV&evpOx6#G%3!@?Lo8OvP<g?4cA!^RzK zT%0^Shj@8UoIZV4P*6@u$&Tz4o17F99c4l=Q&3ivk(ZNI*fco>c};DMy^C{E)vvvK z0GJn-l+e-D!RhHLC@P3v6crH@k(81Ihyvh&<Xvbkc|`@&^Rviw+)tlQ%uFPuCX*cO z)YVlbBqhL?B_$=))KxIrnpiBxz))Y`P)`T?JZ-J5rj6Ayw=f6HGkN~StGC}RE-&Qd z=eT>hgT&PZC5OdmVKg<gG!WBN*U(bOo8d#lLuwoAbBhW*d>Dp!V`*7wK(eriums## zMFj<AB~=YIbxjQ=6=k^CPZutW+=R_*&n_zL9T^&#o~)>^3yY2>I@oFJ;*`}?rR8Kr z#4nx~7CL?I9NbJketv#|v%=D{BGNL4&xo?~E3*pfE7`?*rgh_pejI$lYzNQo;XbLX zV-b;2l~q0b{N<}RZ(oD70HF)_2gE8|KDfM;v?TJ}B_#j@a06i@Ja<5ag%!cWi=JPY zH#<M`>h&u$u3Wpmns_PR+SVE>z#W5{pA7I85gA@pQwd}aB)PQ$x?pKVh>D4rm>B-y zkIW1Y&|SN}*4R`R8yoG(aCdOBC)!xSb1^f=1Eqt(f`z3y2rw^ikF<;w;PaceHvVzu zC{Q@J@80U_Zcj-~@(b{x&|K^sNYE6VHq!`})|MnY<RvEP4$3M@CMHKgTzwL#Xiov> z@9F7Eu(nmzF*`1zyqiaWgI|j0g4%Hzv(swiy}S}TdCu-UaB3&liKG0dPMyCXA|WZO ztfFgd?C$5Ak((2rk__>hsU-nVFgGEX<3AeFj^rC0Skv6B|Eu#JC@3kAoE+Rd=+0CZ zf|Vtp%fQ&s$ixWF+{4Qww=i#Xa_r{)`)H~Kp!)9KuH>|2Pj63>1BqZsFe8|PS(B`( zZWJFsZ-$Q-EI5)KQFHpZ0Q8cXlQ}sv@%-Cw-+cG}$@6EEvr|>o6;Ux!&=i?Qc7UGS z(H^|X)`4W}VCUxHmY$tCFf!CXGE`7n44hALax^8F>lqs0^mX-&49u+vcFv$t$d)7< zQwze@3&#`8&}%QLuIe8f8M`tI$ggg!k4sLX`!H;s9E{9NHLzGkb#*BPMNui~3t|@q z#U;fRm4#*GPl&5&8d&oRVUe$^9OCFnZRT!CM+DV5PfJ@k`^DzB<kt?Pc{5+WdOg2* zrL?>_GAi7|%N?%NhGb)9Z3zz@Kn?~p=;&+@_(@Gm21&ereeKgM?!cBH7(h1o2lxUu zC^Rx+t}gINrMdX|`{Wnqws*AT732m4`hBWEMXS-sAo&W53zk<F|7g+^IPRk-4*`UY z&Gk8X*$Ihp;Sr$_UBXd<g8X4QF)<GMg4VXC#ih9yFJJsp^Kjf3-@b%8UEP<fs>^e7 zvjG2>lH#F3aAa79Mr!M-28Md?+`IF$x$9vSZbf%*XG?2+R$hU<XRHdIzF!c_Cu4C& zoqQJKE^HXcC5YQ|Tz2;<rQIhL&PpiAscB%1@dSGZ3d1`gJ)^dzsl2W>B`Y&JAt5Ru zJ~AG$=!E#F_;~Pz+>+v+(Gf#KeNgPWSPV`Z100MjU@`hQEQEEeY^*@7K7sz=J2?gU zG4XLhVZi~xfuRv$mr|3eYOBYmCpYfi1(M&)3Haj0&AYcpCPr%NYtpkaV&h|?q9fws zV_~_nx}x=ROG{f*b!}Bid1+}yDSUo1=+1!TZ`|H^@p?1!$fIY^)^4m1jSMukHkDUb z7M2zv$)yVO!OL<A@`@`;K~OEMt}NfUF+MZZ(Au1llNAvY<s0bF@b?V}4US2SPtVQC zEXYqxOO3gdfLwk&a@BC{VCc2CcC`;rPoi^@4~&mBcea;QRi<QT1w=-=crk2TD5kb{ zdS(PoeM1$jt}+%UrmQKV?Hm}J5fYQeb6#hUC>}#ereA6`BqvDY$ic~Fsg;BEozq(x z@7A4LpfOrn8$h>YWThn~CnjEsgGYZUIWZ$E9X4LR+<bLu;pwv{Khq)4UOb<forM3^ z)mPUtt&VB1(AL&8IX$+1Z57nW<@T1kkN^5)ZI_#-X2u`?juDtIkJ!Ka?$MJ+;B-?n z6Cks@dpiKS@Hsft3(dlDq2>3#{L{;=m;d6$i@W#kKvUrUpn_&*CuXip&R?AaRNsI2 z;MMEbU-TL{0l569&mON|zY(6*LSSU_C_3<K(8Wx`MNA?t65@|3I_?$F+AU(ZTgdQ& zs<95<)WVrU^A8HUlw43%(>*i@B-Yl`)zoqMXV%==(b3;GHa8m+7b7h#b?Wq~!$*%G z3Y#el(NEBFdIoyI5#e<$O@rej9lc#G?X8V%Ep45bfnevB76I^RW=u4(4hs9b4<7(r zfvAT@2l|KlhDQfy=4Mu}uiaw)pzr$48*4YNF>U?h=Q?b>^WZ)Nuv-)HfVRK?=+TYa zH<#8|7gny$EiKF}&d)4dnVFxxa&=*8eGSd>vv%|5-0~8fUR!5-O;ba4LtR~SBjCAf zpdT*(GIH(h$R)S8eZ2N|=C$_@PEKr1J2Emo1;}sg=*TH6k4j1Njf|oC1=_lLm^nD> zTUu)wn}}){Y1l>AwvUa>tx8}i?7Z@va<;nkOsA+?N@Qhz!+3Vx#K7dr&lvB`cW(hC zAOnDS=jP$LPlADrSzfuiaqH&OXHUQX(rlR^OJ2Qs4ZiT;(E~7$yQn>Q{P6M9#}MuT zFyIqfW>!G|`{2>z2M-=Uf{pO6Kbhs{haY|f(1PFrxdmz;43<I3{0EwarapZ50Ij`) z81?;oXyNzwKLt&b)YyjO+Mc|co)T6`qUP}z2?^(Q{aMcI?i9uE6frxlZm&zQAky63 zLnEWpvx;i#dPYY9)uXdBqqEb<ll{j9pt#BL{PHqeCr1s8mb{XJf|8=Vk|O9~MW!SM zQ4}5%1rS<TTV1+-otaw-$(RXv14;8K{qbh*<4@@VU%Y(${P}|?NTz$(4y!L-zy3*X zskd)`C7IfmG@3}}!AHz=vCO=L%q+=|AKiL@<OfGnxxtyN-MR&*G{3qswKzXHKljNt z^T)xJezGew%xj<daz4S?>9+p<;<|>Uyu#3=RFBX|CvSf%Dw0thV~iKZSlI@ZU0Hw7 zJ+a2GK|LUfXE~+8t>mQdo@pOe6_MXp+&Z&<^ZqY=^#jkE`P|>ZbBBdL^yvS~md^22 zujI?;4FY&nTrXN)5;2bxF^d&3jovR}yhqSrm#{ILDpkvbU_qu*142SCrDT;=wDj~M zx$~Hrbhai`LNlhJ88`aJ#|p~IT)jNaiI&D@rpD%*29d9sm4%BNEjc5-bD($e+WOkZ zw6GgLP5k{u*duAweund(CR2O$hM7a{&D$>m9(G52jh|#meu^9j&5sNxfC3)D{LPyy z8ykpAURwv`Us+k6UR*>nBQw)wZe~3Hxcud$vCRJtOina(bmUjpB;@1;#V65&!|gqN zEXXtiB1v7}NLbe;ET`+%lW((Y2D#)&^42M*6dl-3DD$XMjJ<MgLn_iLhu*$@w++<4 zu&myC6p+y+Z5hd_-~wnDHjNg-N5R5j84}A`EWrNXl<6{hR(LxH2Zo<-bR3XXWpgX^ z;G2(o?#kAz_n&NNYNE0Ia%y%~RD4`mbW~VOG*~!dF|Y-;*EThbOieHoB41zqY3jhu zgvd<TKm2L-z>mn^k_Y+qn=huz{Foy4^(T5~rVHO}<k9ozDB77M4=%QuMhfiOojWMt zS8sj>{Ot15^x^^v_+LtZ`?IW|pOQ><3=YB_1Fncl%Y++9^$)hCdzm?sbuEZ;SOXCg z|KjG!N8i5niLcxzZLLNw!qBS@iJ7vVQa`E7F!ao&MpfT^^n4qre_>g=evcGVsqK== zq3CkXFjNR1dBHdwOhDI<?VJw4{{K_<IH658u(Ses;}#Sik&>QUS>4u$i1uH~l8LT@ zUm)Mk^!K*+b+`BTAfJ8Ru-ZL1&^I<ZHajzYb#Z~22b`HDc57PXdw@Mq8URH!5BM*o zynp)3dB8v6oGFSv?*)<uS_8Y!q;~}H4<CS+-MD-2v#Qy`@XS2m%!J7E^Pp;eGtv1^ z?*o(MJ3B!TCg&H0BqoEfab);gQQVAe9dz(!=QIeK&Z&b}ZrpwLTALKXDQ$yst<-j} z7BmRvl`vyHt$70HW$2mX5?OWY!P9N5{#E6}4?h}FQ*~$=2UMshus%X25$BCU&lv=t zHwrr_MPxmt@vmwOc8rHMo<MM>IQs+y#3rT`mOz{j^7V@Vn7q0;yRtHW?HU*?pa~^E z3IGHKv&+k?_wKLWxp!q{Wo~5|!ThyrfU&iI$OeA%!Gk-9&Up;rM6+jZl{){3|9`Zf zRNEBrPw#)GcK|%1;Sou5@BSxA0}hA)ejNckk^y{aX$nYwVSaS(%J0DWPnNrehbvoJ zbIL2@vvd7p6I=sB?K~LfP85Bjt)`*zQB5*AqIzND@$}mLvs$zhYBZczt+HdDfS!L~ zW(S|J{@(Mt5>^og9ywl#_3z%JVd=JB{|kz1Sg9;Anh)d2t>P+Z9DddyNI*XbK2Kx4 zIi+p?P0R0Z7+)m=a}$z1iO%p0i;B(6DXgjM9UuE*g8PQ%hNPrK{{TO*<dmegu8!ID zwW*~gv;r)YmX~O1s#_5)Cl?p4tgeD`_?RF0v&h-1bC|H-RQL#7pFR6b`_I3G{}(T} zqJ2}wF#Y8dy`v36;DG9xtqhM!8aSVYb%f!;L#I$xbLF>F-f!IqpF`7AO+7sjy`~kF zM5bmi!lTJPfz~v4yuFhy!AefY_?WIwa#`QTqnBl^<6JV<Ql`;3Mx7WwM%E&{dHlL( zL^1nGwSBTSYEFr|^vtBv-fgV@Ri&+O=A54IS%UypIcGq-fPUa<-GEa#KQMk>|9{5@ z{0lZj5a(cEVQJ|?qxc6yADv!Q+SJ|sGgULOFh4jk&d+~h&z?QInT3Uwg^P<*T3WKX ztrblmJ9TwYK;X>R-+aAe#||4?Yask9D@#D9DBCkd&c@F&gMZXHOxQnubpOd?=FrV1 z=ig@<>H^PSz5edQ58r+N;Q^5O$A3NiI6lKvGLP?lL_13FKMPtnH9V8ynKB+xHB$@o z=vWOnESe1ZcWRsN;o+)QAo;4q+<drcNc8IEYvDpOw6VvUnu}<eDv{z_hL#?_eCrsH zyI;y$!=+f8Ud4~~wGAq599rwVa?8{{jO~Ofw>nMTDbawQS=T+i4cEV<y#Mfnym`o3 zgTUP~4!l_3GkO6h0qr_IC$zo!F<yIh0{%rWV!yr*#>C9j-pSt6$2%e>J}bAZp{aj- zd@I$Xm`9Vbvasw0%<tT}6PDqVkB_&Zxe?GZGBbVX@F4*F*I$3Fjm50qdjPls{jjjQ z3P?i;_WJdoh#aP&gpVK}jcq@U>Y%pC{Lh~+t}aJMMTlP%wXwE({rwMUr?83k&)oj- zAL)Jl)~(Ny1~mNo8dJtE%`9yuyGO^VDyu8Z%uQuvrQ3VD;9`G;?V<AU^kj2)H%ju+ z=~=$f@f5!xG<wyuve7a$=GSy^2(6r0zPobw*+o6iBT8hPXN{^;!C5{3gwo-Lp|!Hk z#ewNHK`A^$RmXL`waCeMubjEnTib~JOA0MG|EM;DP2PE*n)@l7FF)4%xHjXMmM5R4 zJGZv?SNLca^H_N!Ykg$abT?W+NJwHzc3DMZXV=Kg%%`p##r#p;qkwi`dMPPMb#>MK z`}YIw1MEdbg@Gpl_B0wr;LIsWN%6YIIsiP_{QCOhwQC^nLDg?Od<36hfItA`J;3wJ zS9hK~MNtY0p9xyTfWV)>ef#qL`zNnnJ$mut;j`yw1y1kWwexR(|2tM&>&GAe7XThr z><?eOeE!}0C$HZ?Xz&T@NNbNC-F)y6CHcEgpIy6i7g1Tz*MJw?y}NSzHb^ff;Ai2q zi%W|*xj4Vs@eKzDN6*l}*1YGi0IPG?u5I1eH8fb&(gsRCAtx{NQVK+`4qkqi6jvjN zUd=3IvBpPreA251Z$0@ow|0<2nkZossmG`jHHp@ANT}{xuI^nbZ=ZYj{zF4cAJ-Yp zU84ANMgbbmmz=}OKFxl)UH(kz9+^9&PUljhe52sPr|o@A%Y#?b<ERGxh`Q@x6)K-; zgn)CN3@KF8$i&3X(cZ%wR6PV`g*El<1N{i*=TOYg4UCT=nBTK|*REam_I8inzPGa{ z0oCu_i_m^(SO|b;aAK@`u)l9)7%BsQwso{Mx3#o)cP!n!)!x$?5fKJj0)PkLzkc^# zR#rOf)!fnq$lth+4z1h+pgwu??#k7xIoX+Uu~DsUEw>)r?;jd~9t7!JcJ0*EP`h^f z=EBM{(EGDD-*xtML$hhA$?Lc8+<*QO#r*8Td{2LGPk-Oqty|NJ3viUOvXYI5k0-9o zW@cvu1o*+0rq(8i#E}HwQxjofp=jsD!^2%xUfR}qd2nhHjoGJ{mr_!by?q$z8EJ5V zQ%hF|CnxGVItps)QVNSBQqz5-;y@GHxOpNZZ)uH?ygE^VlsK|*^X~IE+N20BX&WrP zN|Rc848!nCyWBXuUfMoCFmnx!ScAed+4&S$q^v|tgH;?75{tUF5&M@E6|+DVdFQVb zTsX15yc%vt)ZGrL(hjLocodyZ;Jj6RD-1$vFy<s2!ODu_2HkXMQfhi}SyfBx0CTJ! z)iwZQUM9~2=B;gr%XjX!w6}2`-~y_LEvBaU^@opzgf6hNv$3+W>g(%)JU(;g6dN1s z;UkBGf&<|bmM?d<-+uO@sJQUl*)x0gB0KKe$8KdsK;a7f3=3vvc)%XG(yraR4jnp} zo|b$NNe;MoFWBC_?EBcEeG?NSkoS7JI{WtTgKE(AoI1r{R9tZH*>h;Y!`+RIjSc=~ zZ%;aRP5`u)j*fOnND#Ef!omVqz)bMBU*AA)?fwG=1vwUGf%aKhS@tvEF$UL|zka=c zd{j<OW-s$R;s4^|Vx7G`J!4~_{j$m`Km-QGC!s@>mM*TwBnPaSg(A+7Ro#sqTXW;# z%l5J5g9`S7IA1;QIysy4i}>Jz)+@FBD`g!EH|~9YxkXLenT=nKQ=KYh8KLcx+&{Il zjo3e560#b0N!fj^K;C5#a#)pmNQJ_qOy*H?KA_-mNZHvwwkf`P#yv7q(-5fM$<Bk} z9T^*!nUh;l4Z$orZV57AE6)Sw7dLJI2cABC5+DcktE#2~n1@x6Iw0vaHPx>_dU}Sr z3Q#}5eE=2!PW{6}b@jFADpMxyL7j-=;tW^?!nd}zg4WQDUw`urL=dH=MF4fEh@cV> zyJt5j99>-<Q&W>2J2z{>Mre?mhr6Sv>+$QiA)&#r5gI>mfD0<X`4}1*n46h?^|!wv zmC>>X5osxD$x>1h-+c4UUS{KPAoS?z>8Zf#!oq^E@8)^#LE4d!6mJ_C%CD+P&Myj2 zPV<S3r3QqMP$h4PDEX7>)_i(FWvvrWUcd8<DPxzgRCme;F)wHsYVMt1Ke$@awb(qo zy7euTD+{Yf1aZ5C4Ec1t6l`P68QBk>zSsuspDeFlzdof#`AXW3%RGjMN$)%gjtAuJ zx#URuq^*r;DG_B8gs5h9GkaYEGJpfm5g05fuduYfv1@o3sQ$B_|A={@!MXKy5n-WS z%mNyI<=%Y(fzzl9U@)41d2|)vghCr=c64U?;zcnO@XAUG-rim;tSqoQ1j3-eOG=6W zc&H-?oH>or(jZz{KoA-p76Mcco1rynDT%DibOytdSrZ(A#m2@;R74oT_RgL9db-cw zeFzN=hE?Wq_n?Ty<4r)cD=5hOgRlt=J}Ynrag$v;;k0WTYAr1Z!0@mI7~b839vl(` z*GnSVpyw$sFAG`-{sk4lDf~l2vMZ`$GqVF?6Y0U>jtoEO9F1)3f#el%#$PMD;@vZ5 zuRVn5RRrgLQiBdkUd1twUz?FoI@B<@TGqL6b>ru`<~_U<*?ARMWvqqq!SdG8UUAji zp#Ag3!98iWu-Q5L6n=d_P8pj6a-;)tb^v=YK20}idI7(4o*=;&gQ|K@A5irmRx?Y> zYuYXkOpZ??+Gg}9?~%`#zi|UFFD54P5%Utif_d0W8;j{58|m!pLAe(sO?z+8)9=0q zv_l=}3tHP-wX`%~UsyeV?ks5gXYam;I2WY;#{Gv-4IM1f#p%H1=;`8M8EOK0-+cHX zI5-d`d{8)dpS?ga&y?u9WM!o58|w$gMxa-^`RwKTqsL3P?zp<qm|VZ><jE7O_wMHv z<gv1`!a8`?z{D7+GB6PlA*65O*x%CG4t}JptcaeQ5#A)Nq#WcQBRty0H_+D2%go6I zlsv`=l)McW#xJG3_u<R8356YSpOx)0^}Xu^4MOECBOrRM>RGDmUwQN0&tHsu_Tt+! z7mZj1v=3t3r7gqdZK9idXSPB6C(D(q*N#ft$T}t)2bLdJLP#EAdnM<CDlQ;(4(JE& z(V+hg<1Madqib#fRPXE?=o1weo0*eWRo&Rxg~WQ3<6HGSV19OeT}bc(ig_6sDbU8B zFc0WmfAj=dWoii!_TV7Px9`L!#Gyh3XcJ_Ni;FWa-%(U%L`6lSy8|tQIJx)Y+vOWK zR&Q=xyM23pWf?s*w2Z}KzJ32eSV$0BhE;Vn6>yELtc*`YJ-sjwQ7x)pKpKtA%q(0- zWZ2N;1c_t|z0AQw2a#XDGK1FR$B)gdEN8&(A2Hw2+dZ|k1VZk}(Iap&931-z1aq>B zv$*(0)J=2^^kTE~A#{b_(9Xjf?iE7vcr!U2lieybGmnh1r8`$|K9Mx^KBh|3^Qu*K z%0G_ra*uCBhbU*(?tk(7)|8A2){|;1GBzi484`pr6ZedJkDhJg_D`1R)LMs#n!>KD znxtqhc?W<xM6!p~U6C;y4LbWd+}?A#EGkqLLo;JrJ0i`U;vWp%cVbR{L3J%CoWTiZ z4EssX-@G+>b@9ZB<0up{7%iaCt(d>|@X4oDAjzvYZvpIm{d`ac2H@{Ng#V6R%!m@& zbf>$bz-D7-D=jNtzjJr-+B(?E#>VW@B6?^P^8j*CKD(JM!|LNV?{jmrpr?Qe0MX&8 ziEt+7p-pf}U~F($IJAa_8gjgyJD?gj_W{I55X|!d<}*KHp64J>XK(lH%1T31128;9 z*`Lo72@sHuMoR}9l9Wad3Ul!CwW7M=?VNA~qPm^|m#Wiwqu}b!nMdEgy;R(}U(!n1 zJ{yG1X{?_DF|wlTYIW}tXq$KMe{n7sZ9OXM8O(lF%4t0xF|!cY$g*wF{>k$0!w<<d zllF<1mE3X<skt6jqXF7^HQo8NJovHR2gC_1XE3`3jZUf(LDfT0X6Mdu4~c+IJf*O> zs`YYvU*A^DuiU*?Ra=b?(IZ=|t=1kq{EYc6+6I+6i#Kn6!aM+pot+(8L+$9%BS(%N zj))BBJBA1tKx0;R=IZU+3u~*$OZXc%Cg-kv#=JJ>@plO3(UxKL!K?4Gb3S7pd=un1 zig__H(Yb5ark0m;a<f@k5mW=(V`8II(~_Y+w1Dz_W_AYK7M_PK4K0lvoE&gz&?7KU z53$3{UwrGE7<)&>lYIk`@ku8a10qS=1TTWI_@ByjN?0*4{PhPfl+6SA)!jhVDA;Bk zRCA*zG&Z6nzy9D?9#Bbn3oF0s9%&+v7G2mlP?8Yd+CRIE+do-SYR8V+B=09CbLs{1 zY0v@f$1q+ev>7LHetfDlmUBA0gpKx!nJVhy4T!b`XR4!@uUB|<WNJokWp#OD6Fhws z^R~9u_usrXFw}?Ty~yMVEE$>Uz~lh(-J3kW1@k)*+6T<ftggf-#v|kTdv=4WZ|!J< zo?>Kr3Vq9Kc6s^KN&a0(^Sdl9E$+R1y?E`~)$7;iS6AS_$Bu34c`R1@<@*of;$n!j z*|kenR{Fu~?-DO1pkEU}m<%!of>0FmqN2j!Xb^5Ylbuj41YyP9*WYF5B6HNhmQ8Jw znYQT>Jlx%b1GB5ELsOIBWl#acQb~1nRcl*jN!g{`f(T@+CI)&yAOs6C6;zFmxrMT> z(GF#*sBvKP;NpW9Z<0znK{~2B=IMA;AJy<wvW+chnFW#ud->+OUwJy;e*235qS4-S zI{Q^zj$=Im_7+~5+o1iEWqR$--;E=`Qgqp=MCHXWPGG%HV0}*N_@2V~ozV;AylA#d z*m%c9OFmf>Z4*-?8#^ME?&25Z8y%OJTaZ;+euRm6Kodw@hPM|xI~xEVa0u!epaoFb zqHTV{^C;#4;YbY2eE?}>&u)+<pqv46b&Yi$y<O08fc${gfL%E`IRk?N+Pgby>#DD; zEML8`apv?XWL%v&o-;Z#ZDC;!&4SqA;o%vcoUpVcppXW+4#y1$@cS9&9i8maxC4CT z&dWFM9(44*A;3JGLt#lF@Hre8L>Haz+BZ5pfBhQliylHlQ@x<3E~B&}C^nwv<3|sP zfEeG30;<MQ*MbP4>sd`AiwZp;xgHpP{>CF|V;@lR20rx?W^ue)o_^`=$Z$=^!pPM- zzxn2*iG?@&F{QmSHU~6Z&*=HcTZUh|^<W#ff3j%1q_CVb+%06XPs-*L&iAygKiC<) zfOCc+e9A6+&*NC7i0sm~=TuB^<^)rFCtEi!T3{$BocQcqoV{asB~kY$dSa_%+qP}n zw(WG%v2EM7JGSkPla7s!bI&`^|IW-aGxx*X`chTvoM%_9wfC;QmwxN?PvRX<TF?7) zWARV`K)K)PmoR~qk<g9CwBu-lE|Z`K#KMY;3(M~13uNf0@(wAbVxCZ+X%KQp$!~BS zi9S<MM^G9J6co^Y#_cXJVzLMujK8;}FE}%)u$#E}A%ea~$dr^&&B5zuhZj9xw~2-U zHyG3PfviDJn=>(Sh4RR+L=*Ewm`sA54fC8lL4TD*2QT=4D&!u7Pl5CafjMzuL;qB~ z%@62N$m5YlfN%A90$i0Vwm?`Y%`?AAL>65k^K!M{ovwK1HKOA!a#SI5Z$4HH*<5Sc z=t#eRNG5$><%=DoEjQz<Zc>{xDW<xJMKcuK;AZLcA#4h2kJi_?$ZCJ<4_btWAcWU| z72x}IV=?*fP^HM`H0Tb8=uSlHE=Wrafm+5v|4iZ?BBi6DVn_@R$MtKq!@M6sU@0S} zrUoXNNA$))=L3xTAO2(UD68}Nyi+xM%AQeI5;*<W=|?IG^lQIAUj<8eFu^S+CMIU} zc_8qS5X~c1WJ^!*_I>@eMaBXa_O-Re*2dm*=FRkhJP!|nG?Wo>DI$*T-<{3H!^3-f z^HJ5r6z&JRU$B6Hm=nzgV)?3S*3(c^7x;nLh2MfG5MU$>Ox!V_LK6{y7>0chy$+c_ z$Hybe&F#tGRajn$!Qz=iMG%s#AgB?N9Nzr}7B7ES&v|w8rQM{s5%kYh<#iL=KzTh# zRsIQkL2HMVpv0pm*r2o7$kpQX{*Nyg2#=DJ%3{u6J5jaQUn;?0cSUW9;&JuNvGToP za*g%W+VKR!{zicP(}6sy!1=|yxB+f-U!-io^65hx0)gxJQo*Vdf5i!P1o?@shDZgt zoCSNWMi|xAR+}2xfg9c4y4BlPWcE3m+IT;FdIKnjww4By0FzMQ936mDLbDZ6Z3r0V z=R5S7S6p24<5FN2k%pKobU1NCtz#8ow&>0oL`0jryOK59OL}UmxRcX7)x)s|(N5Yk z2b3f)&@>l8fs=(>aA<XN23qJ~Z7xi*p|Pexuh+-S;c4#TT-a3h;~C-;6$=j#1j~PV zbU>2R911b8u+ZDww>Gy6g&ji1i=Yt03gxl7$hru4B}xw1--3by?s7F-4C16^*Vx(S z=T|j=VK`$Z@-_C*W>r9t%C^kVth@nY_Y;5Bf=-DPz+Sn=P1M;>d=<B_dG%SIrvjdB z@~%2#tJ<n}8~gA3>YHCTQ>GSTf*t~){3;rFG-w3wYo=QDFMrbhvtBp-`g~?qhfT-% z%-}H1|D7CQP=G9;GESxfO-OD-;IFn|T~C##nd@N??~HRn988L%r5)_+ZuY`VVp`17 zv?NO_ldZcIE*WNNIrjJ0eH;BA9v3Q6>_>u`hlPi|z4PJd!6@W#D$p;cYbyd#0YfF; zH@E%v#!!G}9b&C(XeHTR++Fa5@=w?{=z_VC|81jouY=P<Q?soX3?AyO;~lgGo)S`5 z;L`}a1^W%wZ}&*eT~}AwUEfF9T13G>OD0TFE=boYq*#$C@vno3%QAEOG8+?Lx%mU* zJsZ1;*5TfLZF&7A*gcoI-3p#-<KyJe`Sp2RBzG(;P=PMJdQh$Y_v?EpQku%_Q1uUe z)NZ+)K*7Oa^|8Zw)^FfkHf)O4`A3}~ep^WC_PY~JUrer88I?62l>^ZdmE$0xl|TF; zsUHgV{CylZA;w->Zv7pIloUorQ5^i-n!c6TDg3oBdC>Q=bk<4R_Zr-7`1*NvdQ<({ z4BOXE@z7c{P+6Ilw-fbu_2YcAr6{RmI%j^^Y_5UItRU?533X>ydTmf=sb5NO>!Xi$ zUJSvP#a8l!lVI#`yDRN_bha4I8%NF(!rmCm;QgD>N{TxJM|@4s_bt?4FKfFi%R|jA zdb9qj(+<OpOvS0s6ktKx^$!lKZ(nAQ81A?Sb)peMr9%86reXRY;xmV0(}&~{)V?ya zKfw_YE4!q!AZc_YWwPIF4<aeZe$vuxZ%_h4{ak)2JD9WI8vnb!t^ZEC_Usy3XH?*F zIzG9Y{*<uoJ1{9Ww1NFpUhh8=(={{{S9LW|m$y-twzE)>GKum8XJP5&Rx-UQu`~yB zu7+PdGECJ+vYjV=;E}A`xytWxRtTRh7XeWxwfBw`O|%;vuRj+)SiPSei4FI^3~j8Y zUF)2+x#=tzE86R?J9U%F9W9o5il*L_xXfqdbl65+e9~xAOlgbSWV~@x5S02ugPp)= zoC%(vV`4#vIRoyLS5-nKKgQ+FN=W}3{o!4^E17ffxlp<?vJ-+ET|IS%KK=bIGkH`0 z6?EL62X}G(<}38+)X>sY^VgQ7&)8U2S2t~zt_y9IDr}SqOH(jv5;h9+ascvABuBt+ zYEUm-x*hm~ReqXfeOQL&+orsYY)(Rd+nMpbq}HXf)GD51XQ~8p=zV9a)6HI{n2<XE z%k*toTFwq|rhwP&`{$Z_L?Xuybt;{Z0Q=!D^3p12p=qlAKYfoy-dGJw-oZ-L7i!K^ zX4cs)(H4#G!YVS_QCM&19b`E3ORblKEz_4Vc2e^rU^l{2xLBUl9IU)O9W@PQb-l%4 zM!{6zAx8d_MfoQaudheMrhT}j`7JYq*@=g1mQ}{To)u#ZuW!O5T0829t7|~<xJk(Q zi8xR`vJNo8lST(aElf@#ev4bgXpUmLRP$V`Iz#k(Dmc<@|B{KO;x0KVIZr5-rv9r@ z7MTJ5Ue>yXPy-lhSJZp|`ez>d8hJTgZ^~Qu8@Sy{|2<^j{ZoOw98Bhj6^EhTuxCT! z(ojX@2MyEgM4RynW(+7dTFwYe<l^d?3K0LN+m88=34>Tvft!~*8u=o?x5ytQ4zJwY z!c?fGkw_K)x9>LS(1*zZsVZyIJ!~LBwT>?1zbE1l!GEYZ--~M8{t>1A%y=*Spnqkb z7A(3YC2)AVh2{bRBuO<+`+SQJPKi;>@&QS_V0eErjsQ3WWU)VHjrwqoX~i{t)V#-* zM<=%}e@VLoO*{-Zp=aJJ1ZvLwkq^$1b@i9MI!o<;1bXfcX8e!BfBASJc^JIvAB1eX zc>FZG;zGyQHj@fRyUMrgQ~SnD#boSVagDj_S~fw~@_;=7yQ@i2kE*c`Ymx8zEm{W{ zHH|O$B1Xsb(@D~mBPVbAp{O{BeECQpiD8zN%Nu-72p`4ys3Ifc!H{4k?Pn@#NlEol zcvfjBe7H`SDQ9sxm-xyUIs}#s-6P5e`k1G0kg2Bx?uVfVt%pd@g!Y&nn@t%g$4<<> z2Q5ZViYlH%+|BdkKi+woe8{M%2Nf1S?!aGi=|(U$|5Wo=-BYmQ+G5{OWu67DilW?s zgsRQ#@Ncf*8&}dI&HAhQKUk}qgtiTG({9<cVG7G{PnhT-ndiB@zCU3XhUz|H7IIV* z=`^G8{11rM9S;>9lN;t!Ni4@wnNe}Clqvep(Ac?a4{i5T2(wmqver#^Qv*6&r>OSR zIaF<Kj7^S2;{df2p;M?3?TtU1f3{<#a(7W>XFtbEU*>GT&O7GIy}1SmdBekSfWF+G z6U`Ms59Qw@$x@4ca-dgFxL*|0;_BS^T8y~~B8<`o8)wQi_PMhAs~S&xjJfUQ{$piJ zk<}J5zEV}erVICtBLXYTxl+m9>g<gD5b!dZ=-#&uJ5TwpP&rkRsQ1T|)|U5L@Z&18 zuKD4j6QTv#*mdCpx1SI7nYScjSC37w%Ie?a#B=qCzj@?TP4nYqEF<m;NjaWUA<QWx z70)r){qdDc3f@Ef_(rp|Rz&v8Qqg4c)O+!LSB~g?t6XFN6ssgl|4`z-A`;@iczHms zD|I^ny04;*0L^8rAHd`PG-o9E+#S4Oh*zD{egD(rm93tRhXLFwTmbq5rSxCUJq@(x zVUx>Xl8SIi;E776D4Q6~_Me53@lix90#h@RVD^pLzt8neX8b2F%jc1KXR&|Y_M3Sg zO>OGlcHCLg(x0bzskbP@{ywih*Aoqvd7hn3amLd7`IP15fa<&>PBjpwp7aJJFEx;^ zLxP7`9HDWr84}=SX6WknZEW!CC@%^BQ%F&1g$T3cEo@R9exa4Ft}m_W1!<Ah@gp|| z$-q==e0Tn-anRTGB7wODl_rLXRLMnlo$QIx|4Pe?PPKkQ#d9>dQRb@0W|`cQ!h!%V z=Sbbnmu$c;2QxE}wPlr!Le$$_(y{r|OBGBM3x#<AW`Y@8n+V-yk``Eb(T1@-IOLR& zkT5fICuCQWD(-9PatVz*-v=A~ij;H)=<#(+S>_aa44mf;six|Y9PWS&<#`8RXNKV0 z*_r6i3Sn=aXJboQV1D-`U+aNocw>mXBOeNWXt(XQoHIEaTlpijD5Xu-G{GHpAo{_| zQ|*&h)m5M7&QP=C{bpVB_wUuab9$8f9Rc5oJK*CTQD%U0i&?x)^&Ag01MH;nvYdV6 zMScAhZ9R^<F=us?I_g_y@!s(VMui_a$W$8LiBn#$^cM1`WlDdCi;bAM++71avrk{p zgR{IMc{pDJCyhd=%W5UxV%yJ=nGt!=4^>=V%2gWPzk|&>F;{x*F}H7Gx<tOn{X#De z!p1w$_sK;|%>m-gnK_0Vi>vD^+w5#yfR;Y)HdpgU%;oXFa%+9uNe^)Eze0Fi88#{> zb{dtPcp_MYoY7ZTgWVMA=>oOThv7TQJ<V!#l{%sgkwtFUJcC$Iv42_&+mw~rdKqLt za*}US(+y1mfmqzUy!JgjW(qnA|7qhB@=N4KD8C3mSfVT`t#v>?(z6h~0w-H97nk?@ zb#O?E3a?8?OVw5215bbxkv~Vk8w~m?Y84&j%Cp}5`&-d#AMvn5B#se*p3x0AK5cjc z65+4)(G$VS1i$7}=2n5qE9lGP+&F8W%7o~E+eF2&k6@OPZIiN-N38t~`+kd_B~F)% zF=#VFbbeBNSJ~e^tO;b1vw%TxqF1p|5h<+X#M~6k3}qF_$<7$*cY8lSp-yBG2lKC8 z0`YH^)!;cpotc@UU~wzbp{3+`Ah0`pK(Tsz+ki7%m94OIDH_I@<w+0#PFn6cHp&$# zw&VpW*ayn7Rp@m4m9z(!dOrryCgD4q@2aIO+)qnf@2>xGGk83w4K+(mfITbFbK6@F z(EU3ZoOqDCs_y9CSAN&UdTekoHoVW0?Q8Q%w-P(mt7z97+jX{iv;VbVe*kVhuDT&l z&62-1bRB!z0A#u`nxYaLeXHT4Jk9fF=Ups43|OrtX$#Hnup@;kTul!h|3NJWPJLpe zN}=Ei-vb6Wf5aPuJ$Bpg*~j8j_+mWEQY@^iFf%O@+XxbRH|iXcB=8dTl;8)*JZJz& zC573Ea7ebQvbvhC=FYO7YiCbodAX@mgQp$*x|f&58>zVe<W0b@+&Av+q4H&<V%^?$ zanaz!%*=sG>VEEVccZ7WYljGmJu0C~C$ZZOvoeQxDQUPU#UNFYlAl7*k21)?!A!xf zJ0+gKJG<(7snR+`Q+q`NNlY=!_BB&f1qFIM@(EO(wI=RrIKWPT5eCK$6TH&8;>xDD zQCoF@J>f+;x?;wa50I5l5a|}5+EzcN@`4>ku5C&th{gc3FhdnHI|F;se?yX6OK#u! zRGf~2L_Q_un(;r&^&r<>X*zE!I;s+*SH{I}D;22sH486)aG<c<D$w6_KRlu%e+{31 zM#RXdgSo2ni$dGQN+DV(Fc6^!hy+TV5u^0K0@@-{+zPY<2m_Fpe;Lc;GckoEBjDrH z^Y;L~!~yF&cg*vG0Ha$_p?l2UmE7DwM9>VpB2xgj6lQ`P)hrNKNw`ilyP(X4))rVJ zG;kf&_1=`#bx?J2AAFCkAHf=AT&W~C9GHF4nH&u>0n(Bgmsy&Y*cer}?@WL4UAH${ zDyL2=p8bYaqK=3_|A)-0^nq$U8V4Nrbv1tt0U!+);2BxT=#XvNE8fQB@ifcf@}-&{ zxWFyjzFcyTV)T>z9o&Ik)4<(Reasg@2BzO2TEm%OYMtK1MQ@3ns`l<J7!#HDpO2*@ zMY<c#=3<HyWLCi~z5T669EKUqIHZuk1}Czc04X025eG9HH9HMGYClpI6sR=vI?+!+ zDe|ob8+$O^a+_zshr{`GPy(tiNXz_j?=K;DTrERGTSrG%Pft@zO;JfoD)I`$g+D5j z!`v-kwdN@)<ro--lk^c&)57L4{&t`tSOFeY{<oAZsN-sE8Y7vy#&U8iPY9)M)7&eS z{+cB)M*Pco2Xvp&Hr@4Zignf6;m%WAvRM)rUTP1v-hm~F?Xm8Wb&PhI;vl=s-k-og z&yQlVKK(C8+6T?Gs~%QVnQ+i%AGS=ejQ8UlGrDBvy3})qk*pRMV|_sk6jKH6q)f@T zJ-mB*-?8P`&0cdItFSB-Wob#m0PqiMlN0Db;zXHt*MA=OhyZbr_26+7Cul41zwtBE zVG%!Wk%S<=pjHrU$m**g(Y%b^@rgP+>Iyr08$+zeUG%}+kY86?18)CB*#Nk2y>*mf z+z2BB9xnAFa)(qG1w{GT7TKxLiqxF=e{4^|zK%eREbna0k0QpZvp*uBJ9!z%-+!yQ zZPm-6k3m&&kG_+01_jJj;--zh6DoTwxKS?>VLDCn-l)1Il1s2AZOSMk><a91cw7;l zRBteROHTz$DvC0wi=R9Vr@)(1TLU%Of25`vAUJZ<ww>xU$f|KvT*7L!6&L84OI^Rd zxsiWOo%+}uM<nl(yZ`QCnFS=&TUsO7Y%GIBI@#;-U-EMd++g5cW8<8mqMrbPGjbBZ zKadyz`T!}EQwX7CfTrOjEjTbR&7VapG#21^i<WSTOghBSKsr1%550$n^9UaeDhv@G z@HH|pg%kv5tBJ(LbLwH56b!O}AxUJDoneE6HL(Z%J4>NErQ(vt?Uy~0vM(}qLq(ls zovH6iRN_GOxeLKbbg3pRZpdKkgUN5U996(=>b7Pz3PK;3&(m}L^me2Emupzd%_5vG zY10;}jO6sE3`17<m8#Q$a?zxAIq1IaB8|`qV87Xr?IfYK@p;05P#?6CmC3)`+Kax? zY(594(8R2`!nDBHDu%s%xxEQuJd-C0<d+x@z7}T38c-Q#6QpNKL1aM2fS877E;E$l zt0?I2kR=)fnGo8{r{e;lgZV;7&IYU$usq@z$Rz~(g606TSOOaVJqEs@#hHL8dAfiU znp?#p^R6z7OpSx9vwY~p0o|#MR1040Ktrrg^}2+lS!*o)HjK36kClHsC1PpKT_smK zzqD3j<!ASUoD(8Fjm?Aw0lih0k=)KuJiJS7d9VHr%PsID=#NJnGfBl4!aIbkk4EY4 z32f0z9@UElY1T{C%Q^Sn=(!$I2Xm(#P-Sd%c6>TL@9=64?sF<`zv44(bCMd1`Bs(W z=ajaV@q+Y}7Ld6?AA&2|iaG-SQg*Ul^84ap|Jnlu=pgR|7_9Xx1u(x13gXy7aR%}L zqobM>0d9ExC-cF8*}%m@d1&&0l{+#hpn;vA7FVq0a3*k_SSEG8k*7xbH9KjG!22Gk z`54H*dHWf<I4vAaZvWjUr&l4X@C31ktnh@$W>fH{Byi{C&eqyWn0(xne#~Ay-zk0S z-Xf*>j!$3Q+&Rs89&F>jIpWB%Eeq%sjE?z~h6iimCn7x50cl|jc`!$1HBG0eLLOSA zc^5jDtKD|Q)n~lWiWt9Yyp>VqD4j5gO?7_D5_%^ba1?V1(E4!;^V06-3~`K9&}!}j z0-g;38WNcT*xX=GDVs)ywq}Eci&*;&3j{79-a%()a>ol+1`37hF0?oIpf&c)u}o2s zuVlJLMYMPk-Ph{Tth&u|JAg3)Xu0vW3-rJ;AK@5d={G*3>M-X%K|Y#oQ;QE(=>2ck zX(ady_scHQrIc*P?8GRx-X|o0ubm~$E%<N4!WcP*vQ#%rwz<(%L|urK$jmTjnoc~W zl88f7z(fDDRy>aSnBFgSnhSAP25wz`7hmfO@ZmPosed*;=}qV5K`gwreh)2nAIAun zW({RBT%zuNr1LAl@Ar5AD3M@{{ned^LV)fq=sek91qN_Snt+WUIa8b_IiUvmK!0KU zQ@Vm)nB+Nx2(k!#z=;+soR!AZyq5l)lJJ@un(Wb<o0)RiM>n2#dkTGZ3VU=50ux^v zn))gqN0%EfTeCREjlWi=FUU!`Xnj(f6kmamg{D)l5nher9PHc$SbOc6KEsuk9iCp# zlhga>=r;+(e#@9*9_I2RCY$03+pLl)ZU8{>?5$~%n?TJYwOM)$^m4lz<dx=mgL1`+ z&cp&L70+3rk=uP}$@bq^+I5;iUNkKM{Pd|?Y$fZ%q((Dh8f($`xekpH0=jdU&D&@} z3rICj*<UVKxPUqWuAtJ^-qcuu1q}A!c(^^HTgF?oiZW9RknaAt%~6W^U?E-^ABx4U z5V*}}DE128)C&J}=MSBn4%KD5^oTP9X_9)_jSxJ;Z2|5!-!aZcoyFmQ1x2V4*z_`y zWwOi_|9V)SeK+CJ!^Pfj&r6~jdCKxYKz2QkSGPR3MA^QNC&Q5lF*F_34f1Gu`H;Mv zqAHYKWq2YV_LYimt2(E)N^H9|FT7R(_o?_Uv(~I!WVKu+ktAINVmCEGqYH(hy0KtW zcO##9k@9WC=zn`h%3}CLZAkVEH$#XE6%&Uyq%V+<+-_lc#^C@}KBMQ5qd1VG01J81 zfR;#%`$#)wPA26QPUTg0jpdQ4187WlyE#*Q@74-Enpr&9xSGYb?qNC)IP4q#`Y{)b zcFFk!+0jj4HQMHy?ncMn2Je{hapXbo+o}b!X%moh8pKZD^XvD70c=HamEY<{+G_hk z+LR(U<2N1Q?Ps5TKHl%o-F1m0_s^Y4&!I0##$)3;>fy4I%z5wVIq_B9Q(9z!%OFP= zUdt89(PVNYh_qadKs}pCQl$+ZWJ7P0V?m%-V?h8l#W{0Nv*IMHP*i@&Lwb~pvXa+8 zr71SC6~5c*fU)GpU1RODB`zC~<{>e5mBMjF78aGkaaBQa907TpzylPSi%=-&n0a!e zDQde3I-3c1^^lKFbgp4+u3_#I-E#+>@)!#fPfxGAQ(={D+7AN~+jD=hk=Kr{Y&K$$ z6N6f_?1Ad*S&9?r#wy^Zey|UG7~YF5%0r2r_2M58z1di|#}Tu?e{_3{en_g%(8Zg; z$>gstnI-B^wW(nF)%}8~>Xs4m1RlW8GIiQy7KoNV{~~o8B2W>llhEl!w~Hq4FT%4S zF&=P6eUosedt8?<O-tzpM{b2pC;~AvDNIfo&GtMYPv{FS{6G)A)rPJbaAI{gLPckd z^IH(AxdPRf-RcvF_i{c<c;21OJp{ztP-*l|oJ}Ulie%k%jCF4wpUMi43mz$A^p~C8 zCyB+&o3$Lx-|5Ic!$oJ6l~@)%{@6H}SQz^}+Qkq5{rIab^wWIJ#Cglt@B6;>eXebx zSHErW>+|2Pp?3Ec+i%Ems6@hq4fAVVGDlob_~>a2cdb+gV+^QC5T^PIb+|Hd>NF-q z!8(!sga)(ZmLp}i1h(haM;Rm;+vz%d8ao0->KoViS`se{lS>n_yTBly9ZKfwa5d^2 z*tqxO%Z(EIe^Ng@4O-TzZMtOF&EgxFyCay(EACKWSeemz5x~0W*(qpBtBTxz7y1)- z{+<&cQcOg4Q<-`yC^~dz*TkL@!~aLag{^IJuFUSKf&FvO!&xMxPB+a~&s1i`OMJ}) zBIudB;9q04=MD+=SvyFdey`F_&-d5l6BN7ORKUkz+Q#W1$Vmkcy~`vvzn}v;PI=K3 zv%Dg;NDxM=+*~yVG)8~DG<6~+1>D6Op|aCa0?9Sl$J7>#kse5q#Uq-FR-~q{x{!>i zlaH&F1e}(Wjj7dUS!l^J6vfI5rCMm&YPcnv#7WEWar+pW8)OLw7bNC6&zLm!`#o4G zEjPYuG0Yj%qz?Je{v%dvl!rDYGd!X<OVI#R-lE0=veFAsVPBO8S+35&$FO?Y58awG z@N56nYM*k9J^y%l8Mz4T%m%tT)cMEtB!sfBBWENGUKceI$69;1&?cbJ^ue9J?m4W9 z?pwEMKt6yjVjt+&bSmUz@uy&gibb!Z%K?aISrnx44SW?3o<d>%=+bCxCbQdLupvU| zQ%340R2fm~5kH8vAa58gPW?bmVB`>?PZHY&OHO_DX0AYI6<6<ngy}>*)GT;ZtGG{M zvcp4ljxb#G*Zu`24b+_F*kS6RCoRGGsan7D<<LWW9UW&bw1xwDz$xEk*gF1~f?2;a ztM9+g;eqhEjm%^z^W-HYQ7-SqJzRc*@bS{Zhg8-5P|=mU{EWdGnbr>U`UbyLf_&h< zaGWxC1C%zSIINpA(o3N4epobO%9B92e}+DShCG6XJ_hZfYK+7T?HMWwkKQK*$!BGm z)?t*Qoha8H8r1Hq&{-z`GT?+BMt5W5vvKgnQgm)s0{e%z))6)y^mR13P;#HRUKF@q zG|+M$x!GHKSK6&;(EIx0bL6qX;GgpFy%sO{)p2(#K6d0D;3tmBvJ0oByrpM1fy`ex zv0Sb+L#({c6`{Blt(!VpD#l-NJ{oTfeI72AF%}G}>`A@qQrpj5445Z-+mVWIu-<WR zfb|O@U=<4Z5(!qRGD`=R_4F5J%upP92>1~y8aQ2h?t?dl%!)9QkQ^C!jBFKfQK}vv zZsyrJvcfH{mP~4kC2bE(Yn$RE{DiVF_>W}MCxb2PA?xh`i7ch}Bt(4<0POP#+pIWx z>}Tm7JAAdk;!;{INjPk%ynYtM$|a<V`Hs}eKQwjon0O1f0IUbw_ngH$!O$mhIVb@? zFLpj}cK!j?j#O)?luErxw2<l69`M*;G<(yh?Ph!uo1mtjNi?H?6XsAqg1aa1d|!MU z*svs%y+nD~N-3)A?2fKR`OB^2e6lCvas8gp5`PcW5ArKGWY(esC^fcQ!PE2Q^Sjd` zVI|>~b;zwf7gdQs273Levm{t*jXW%BTNGk>=wuG0RN{~+ZE>27Q7bNkAXu*((5*e0 z_Ls7Nn(Ohwz>I@|@!te}B@{Yh5DUCW7g&szC=_8xKL(&=btH_aC$62@JKcw(*KX`- zpRexy%YJ`<4UGn$6i2sjNvuurVyIM)in{uMoP1fQgDq|aH+L7oz5SR9TdW#mv7x-r zwk+TUfR$5OWzINC&^3FIl)A9h+OV;d(=wHgkd|!{7XTDxR~ukcSsuBg3m>Xfyi&&h zOfvkjX&KVFwtVz8qXPmtHvfR-#jLmg4sz`N_@4M?-2JWF``Y2|#oglr&UAyPZ2nib z;v%c?Ac4pcmEIbK0SK9H@q^0l)PG^zP;2)jC+-B>-ZOwL{WpI&T_m@X;ySrWmo653 zpX~BSc63^IsXaFuUVPOO!zl|>36+hMvGQmCVk<Au*p^3Y+WoPszSH~jd+%)OIQIPz zZY|r#^1<fK)Mbm$L@3|vjCYLohltr7NRmXa$s0V1z9KRHKe)}u2duRREKX~%u&PYK zJUm&1X+(vJaRz(GQ!K))-(AlC_S-ae8NRjOg9(LE{NizjA+n#Z`Au%xcU}H4Y?*i4 zrC&Aaw@m>j&40ULO#en0dRzB^HFq#`b#pN{w*SwSqlpbHI};HT(SI&jIXRhF{$DB^ z>wi)C_=p(g%pEM<tccjyfHV~%Mm29Ib0S79MH3ryQ#T?;Rd*A&|3;CvH?{=+plNLe z{4)zX2N9!`xwWO08!R&sqo|{uql>DOv8g!`uvGJZT2YlZcC{s9|F1ShtleCd%w0qs z?VTJQ%pKf_xQG}ftnJ*)U4UmhV>fd#b5ln%b65cZ*#CNcug#G`NyGpmWI+G-_mYg` zISLaqmboVkB?W5T=^Tju=c&~Ih(p9zWwySWS*C6a)r?^M1?N!JyTXpyp$_b0&~bk! z?R$=d9LLdS@Bc2A|HkZp#KOt>-(~WjH~$amf0hZy|AkE0SpJ7dIDpsxpF;VsM*sf^ z<!T@h06d0>o(gyOz|}$ibMwC(bAWTsJ(>Rl%l~=ofB0i#{qHPu{vTN8`oCbAnUnp0 z;u+Yf|2xmp4!~Y^a{Nyx6S=v#14qz*4UPYL&i^U}*#Gm;Ao`z&#+9FZJ-(&~e!uR? z@;jOKia&*a;ZCv+y)ub-Vmv#v^bnZk52B@*7&$*g>PlTQ-H)X4F3&|+7A7Ni-oPyv zBTrIMY#4Y>JnCYttdi1xY`61^oaTR<@4pdE{(wUNiH;8ccip|)dXdw!W?*}@`|04h zq3@BtZT>$_X}!Mx`o8DBU-G{%zTe-TICl}&9N+VIzg_bzJAOTE-EH*V_`jYAtO$NR zUpV&V@2<D+$}95f^?m03`TG3$+Xv(MlJ>{=OK-P4e>dSIvwpXcQm!6|7+B@_`nj=H zfOK>N?w^_G)N#DE`?d5Ow0}I;SEK0bcQBg&nEpCf<$Swa)yMqN`F-7dpke7y`)=Ez z`)&A~K=X&7{?OG2xPqP^eo05^UJO+OKU`nxeBMNUPp{8k(qXhe^Iq)<dp=dS5Ea?^ zjlU7!^V`0gx7VFazg^jXKg^c!+D(7#o{jDkFYzCWj=LgyHs7VEZPObZvTEIj998ms z9pU?N`^>$xISPJcUqKF8zvSVPV~zjg`>rMl{|O)VIuwUQ7_S_a#*I~l22g<OWUi|d z$P>87gO4pPED8|=Q%>Kx-QSAWxFkX}blGq?1)&Ku!urLy<+T^4)VPm@{y~LyX{g^( zwpR${3@iRa|KXwk=I(QT>&)L;NI|e27e3z~TY;0XZ|jABEdNfgZZUR9^m)e*M^G6( zo)<2RT7*v<!PouZuo55XU>^(qQ&IpYxG;`UUg6_BL;yxCJ({o(Da>ng(UHq{?hL%; zXzmMI=a<HPD>+2Ku`GC<;F4;q^;3qy_&q0+cri>L+QB54-LHFIq;H?Ctzlmt&ZB7i zzZDr^1<nn7is9Bb+m?6yOX$IP!)Fs`QTxlyUwh|=XQYJsep;m!*<h*Q0ny=k4tRH) zyDnS~sA2h59L;*Vjc`~lZW>o^Ck!<JeZJnuqgLND<gH1!N8Yy|X`zOC(P4&f;&hM! z`-DfcZS62W;2+gWFTk{^`1m>hjph_)1QQc<aOarNt$Y~G*h0jIGh>O&_G{3*by1sC zKO!3cbCz@z%u4%w)ylbJ&w&c)L<iPs($l!xTnsoXQ{8D|TA!~So7J5QyQHAAFtL(X zWN}c@k3k?+L7P%d+1f&a>_VK2#Eu4Mj2rRA4+f6XR%mqRWtBH#j}PPQ41xOvH2B!` z?tp0*T6)#ow#byu)*(-Wgoj3GU=F}nxN07_w4|mZDq;xwgec!Z!+kS&lm1W;Q|>#5 zJqf<V*~Z5&DcCOT{LXG*ykr46m?FAL+usSqq_(e~1feh^r=}nKPtcvYP(uS+@)-%N z5!=L<5b~2egAy31+1~m%!NT0&5EJ{Nap@Y6UBbF~$T$Yt^Om}in}vJS<TDtPy1029 zZp@Gxo46c&h1+i&cdzLO9k)Hd3>Nvd^OrI^1pmS>wM*KDr!0)1mlfJVq(o5%wy4fJ zO~S56F>;x8Cw^zOM>>tV?@wKxG9N^eYfn#5!|=lSy>w{X6#!a3I>u~*YRGcaq=e`< z&@aWiJDi?wYWt3_(}D|Oy}d$DSdNffue1|!-&2XN{Z?Qu_Y6L;2LfC4h9wb7&Aa1Z z$hhycH^!$jv8?05F7{JcTj)$<-a#c{c&Y)=XC1e#Ma7;C_L~%w#DZs11QAq$rATNa zpdSv_T|d)g)S5qyq$Fv~py^`Ce)>(a4hd6-8)fX$hzA~+h|tL28;1Q50^Ja153H0z z^~6SsP>qSD4kVL)hbI!cgC_UgpdspyvJ4BQQW#>yoze)dL*`1QBhe~apXJIRXS5f? z@%D@aUrbJ#QD^zI)ygDnPld6}*rAkVC4$G2Nq8pmp(+PoOv(`!iEiMUhfjuFPZ0_O z+g!*^uZ6d#LLTiRB`6CrQbMDLx4fXJ+y8VAU{;MBXcEX2%y7zY%8mQVVNJn<WcD?U ziq`rT@obA1()bw0Jmb-wv-Z;rvoV>(HrmFB3|ym`c`YpmZxX~u1QMf!JZgwJ3%KC} zlqEF@Qa^m@z?EVejI$@@mYVnnsgRNT<xgtAJs=UxjJFod2kvWv)3_~|!S7EDN8NS* zCIDE}&hj3BTDPA}HH=CCvQ*$cQm>38QytO+M`M%|TR6XWRCEK`^6!=P?1E@tJ|0Xi z3L(Y*eV<Vb*Zu@+YvgabaLP+~kyR{_qJf2x=>{TldU0L^?XuG$N5(|fj);f(OXllY z7u&qAR?jPI;Wn7g23NfZqfLaN9#d=VY8ZNyV_xy#EwPxWA=>y+2;Xyok`Q5!T#^ba zv07mcH1daV-^HexT;l!EQ!VVTZe!b#7@Y`>wGV-II7$C6ax*s+&N6=JRTmPd^k<4; zp1np5b9_alguh=ItR)hutJ6gz6prZ)m|0GctzcXrNf1y_MAmG-t6LXw<kCc?!6KHM zf`MosstaJ*KalhiHGX&LC4M{fWf^JviF<_T)ZC&oGLIT%9CD+=)97S?7Lyb#J<fo0 z=@?qBzuUQml<ZQFshG7~CoK?x7C*KDuoZe{ML=?SS}(|wFd{|~t71fCl`*L4^=orx z?;qvZzucw<5?I}gv!#qhe@cUJxE&!Xn4pm68*357TXOLv5=#A;+C)bGxpmg_v=wqG zHk{luk0bXPNcB3&m_3dbs<Ac0cT1ACVofv}Bk|mgbp|w6Z3;`RAIr%4nb$EJ8%^vB zz=TCHzBk$<Xq%6a1TF`ISaO*;vBi>u+JSN`a%~0);-N}aoYA#mYU47mdMY7dOb(bQ zpdo~%YEhk@w;YU9MlP~L)$my5jKpQ=SJ+Bxyg*`@bv~5`8$g0L`Ol$G!TX(}%^Rei zNX{&<=Jqed)2xD@s87QHG7Q`XKA?F<npw!hQD7Tzs>N!drF{CI9dH!V{@9O1BhnU) zw9zS8ObZh&1}LYy9yfia?z6E6j5keBfFT^V4BtDM@PzJ!46&CO69M>xXqK#XLXb{& zz}?9FW@MCC55qe6@m#|q@Rh3q$WcUYE3|GL;ib2|EkTlD18QcZs*A6f78|bj@-*lK zG>jJR5NSaz?Q{RQL>XG7!U_#?A6X5#wMVcf7#Tt7F9#SLuFV{!oe6=1C*79eL{J&R z%%US?4+XGhczc#X;BeH_7m}ZbYuUjzz3qwHN^%6r4|JD~(C5-iTf5jGTBn`sR8gVQ zBY;i}u!@MF!ZdEDu+Be&05Q!O$UhilWWN?k$grkDCGUj;ijO@K_Ifd7f5`?0dEk4~ z^@mfpkb(2;N%)W7w$LW#LnGHDJDK~A*dM~9D@AJE2o*9fEjh&#C$y07ivhUksm)7| zVS_;esC5K7@bK#pq0NAmYiZ*cQ4|i}K=YdMK@xbmbWaXN`YX^&5JGCjl0-vSwQR_x z<tVAZg*jTyWn@Eds#4L}up`u7%bkN&c~M*xPxxNlPI{{28I?A5MXO*IcZzx<*9Ss4 z@w(HLa<yycQQd+RO2=d`7i{Q}hKk{2GSKU}3UCz~uOlI?H;TK&+h7KD%;aJD`HKs7 z4T##2%aXy8erstZtjIw(vSE?%OQ?~PG1h{ExWGbAb=rhIAeFNVU-6d?3CF8RF)=u0 zMeXOx5D4m@b0p=58ku4Mlj2cjWpM;O)Bg&chp1cO&Gkf5V~xsWGEw%VNC_n;wy=Z! zLia%(D^yP1&~t(d?sB&=){R|T&UYZUMg;><p-@g~!b=Mzu65KV{BqN5NKpYPgH>pz zmMUIyQF?oJ@r9g5qLrrG-`oG1j*BZLpTg$)CR3)O4YT9g6C&t+Xb_=jBZ)IY2Jd@V z_>&Dd8xA92T$QmxmxhzaB{ApuqGN^kO%)r6|Czp;jFFYWdZ}b{bX8gfvl*hoO{Rr8 ztgyP1!Ma|MU7?MC-gz$`m`&B>hJ5Vq%H-AC%r$s|$*(k<&rK4tP94a(9-5O0>BKNb z5C}^==uniJ1{?!Ukdr_6>1SinvAnRowXt%Bwy)_5E23&lxhh+)StApivomz+^nD?} zkSUd|7D*HjvQXj>dm>e#F8#!h*BZ>jSw4`U^9PX0x4EC*eYs|N5<ff}g<*0hMQS;9 zJLAdPl9pXW^0a?g{1Q&9G2!0C1$#5`DHHY`r_FE)yHmBZ8K&>aqPl<Jc_Pd#I#0o5 z1a}H3v!u_qjl~UV7wIz+KPJeI0TJD)@u~`d6OWz}Nz{qWO*tOCH+jmVwm0@P@4aK@ z-`^$=X@?)EM)PSlm2yu|xC}P}aOni&_|_KwX^g`DiHeJQ>|VsLf9GDQ5|Z<95yvGe zDWvaA(WX2)h?5RcH<!B!5*|0I2ALLuWgWT4X;Y&?Z}YZHMP70p_qQKa{cBc*#t!z` z!Y4g~Oce#8SgMgBY%NOOXDY(3kCil`L|ObEl>!hVx9IBTHi`|aLYCwwsBV*XaR#5h zs}n1({X@BLCL!OPF*~V%>sdT$4g*!00v2wK7tV!<HssB3k{tS}QQ;`fvhg0)k_rN5 z!x<cbahwpb;`cS05(H%%{aiCno514$@&+7*Qlj$fx<kt*va+PMti*NCLdlYN^bDP7 z8eA7|hA#=ulN-@B0|s%s8f^G*br`OaodOBYl$B!=DlOp!_`dfbHcvL$Yu+f}qPi$6 zxCy6BVuchPJYkD%J{5$tNCE+ma|fK`GV35Dv}}bX=!NX)7cq-&iK+HLL3fYR>|O_Y z7^YM5Nz5^}B{a=ot5KP^QNwVt#O(1hr(lLVYW5)(Jc4aDiw?eO^m~YU93!~Sly@e- zDO7sSJuY?CL58{|TT=@;SK$Jct3<MK)!oDfT@p%qTZ6b8D*SW3_!u%98Z&dmirBXR z>V78>c-V}eTIYSJn$Ob8N}n_tfgL6i=BQXCK;$l3|JWE@^c>D7F5Y)dn^V7S{^^uW z-XYusPb1BPg@eJYi}?1-k&&$4RGUYg@kcM)Gu}UoO<m5R1z3`O;Wb0*10Tt~Qv_EB zYc@BL1$ic+!-xm$jzteq_2>PXORkNWrTsUP{Tf<u!M22=Gggf-G!aU37EYaC^Kc|p zEP-ci+x~^%DqOb#8I6NGjn!Uo*^HzG^N*=S11_d%#@P)IleZ0vS=+54%{UAzFpO*v z!D5Ij;#g6Lq`x-P$e)d)cZI?VVC^-)t4N?#ELlVBD}?C=2rpe?Y352(n#|8^th1Lo zDq3UiyE^>9IH)GB(AOp-fm?E#Dho3hxljUTtfMSDR9N}=O}G3?s0C$eRlbx^B_Xu+ z;u0)00`n<^13(?T0wb(r)FN)$o!ewiSlrniiay@j^TbjmKF5L1E*i<IJTd6Jz>5!L zZV~L(%5>I{un*A#c-8~F{#4pNq}<?W(zAjHYzvaQiGoxuDD_WoQQ>mX5e?B8XBocc zFx+VErskHjP@~eW?q!ZdZ74nWA{|Qcst;uN7%*x%O`}~!{rfQP_Jkeq3=t#EXsIuh z&8#2e#`c02umjNyY$}hTG1$e*PL#vZW<LEKDN0opEWS+$_A$^*Mn(+KJQETsHM?hw zTGYcfgE4ZHt%Y*FxETyToMu0f;ZfpNiJW00U!q{VmPlOtt6?Ubo2HEro<X~6Jw@gg zrBU>F24K3_1%J!O@TzOW0E&q*ibaW9sbJsJ;Q|y8q*m}S`&dWV_<7z<XB)D_*$C(k zX%fj*)RH|%{p^gP_G=#l$<{4~yfG2%lin(o)eClNX**ybZx!usC#k$D0Hv#KbY3x# zlB$EO{0C`=i<oTdg~-}eFn=AeU1Y_7uv*Uowy(Uxl5C1puRzpQl~yq0z=w(p2A`9- zDD?yy{^ARj)5b#=x~W?2#8fGt`2Gpyt-yD0C0fX84)vgS!;VZg8lF`l!l?cEd%b_S zp(Kk6gNj}e23KB+(q?3ay>jO{81|?5I6bm5yCd^$zpPk#MJpc(G#Mw9&@Z|*s}jrN zSQ^@_1&FoI;Z%Pc0i4+!G=$1oD=AahFpYZ}UUsGuN32U6Bejl<)g-g`y|(Zx8WEF6 zRVL^QK`6w)Rx5_(tUOPD1MDCqGSLxZYZvsaex^YKqG3>~<;o-;Q76dcQYK;*<sasF zI0GoA6Cx<qb?TboYB@HZ5erRz{*dJ^+LcxiGJ{?k&!xvvudJs^uBGd>dk#&6>kGqB ziwmEniy!Bq#3k{x!6|&}@Ge?rd(@LgiPv|u!x3E(SX%l8_!Er{+f0kaYz^esi5KKS z!ke{JpA?i3PYc9t8PzbYU20i4>*zo^#;Y<BRnHt(pk!HV<+FaMu|v%o!Qqn+*Rf4O zR7>YjaE>cwlsK$KIU+CcXt~i0K*`D6H@4`t!Zj2(2VLkisC2S$cJ^dy$<I5u6h8{t z#!?dQpQ6FzU@5>s6=EpT4p|)*$N!>Y36o_LrFt-XQArk+h$}t*9ie3E0P7qybwP7f z7gi~-CReb)Kv^W5DR)Q{Iy>n_{r-mm%Xg2~MyY;&JM;L37=0l}MN%f+83eR}guGNZ zG?wIo%9u(VO8Z>9X3+W)9t<Xtl#&ev7By)zeD;?ry$-f2>dzs3`E(rS{+M?(*YYry z3hT8b$lx_!?ut!vVJ(2)Sn56(q$nvuSwmKZ_q@W*{@!G9Dp5FD^Na^Qk$EYnn53+j z&5~3eo{9AcAcWy$(Ue^R!Cv3l#2|fu^M``VPn25o5fJeHTrFowjMq;l$D*8|Lg51w z1d5~aSh*0O=>oD&6BZH&0B^oXo^w}AFdbVJyAbxC!RmpNHEorFCqH6Vl4afv`3JXZ zmL$0yYX-A!9F3(t{WeY`6~(}wJ!#p&N=<xB?HXCO!V2MKrMe`A87oYtj})=73%uh; z4-G-75$5m4!Fy&iA6!zD@>|L^2UnyDkD**PwmD0N*-`_9w02Tz0LybThXyf+n+12b z5oH*xG_O*n88-6x$~at;-H%wbz784VHm1FzdI}3~eJf+r@UA=egIQ|Pnu9FXnF^Ip zW;#Mn)5tpo3!3?`Xhx2`V_Zb%EF#mELJo?>Qk5+q`Z67|HhXVdQ}!I=jeSxjm^Ymh zj*j(gs-&VxX?RtJ!yKR73^is;=z#)~^|%%(WW>^egtz!W8YP;#nCu+5@Ct@*5@w?X zh>CHf+GN$J67wb4R?TTWw-JvPhr-mrhZLGPvNce~tku9CTg5}ky~7GN74u-o!Ips* z;DiXTR6a&e&l^pA#XPm;_XFoiqf$Ar+;R#i)VV|fBh7k_k>P-8Ooj>I2$VuDY*k6e zbz@rLX!W^J-8GkhU!G)=p#v+mQBf#LyvLs91ECSnHY<YVjlrs5zwkOkoP1E{F)3&^ z=`^^*5OpTFi<u=?o9=M=fC|8g$uKFHrETnsmp8^#Gq%Jd!f%#F0iB*8=BR@oeni4K zSo}S*GM&ul&4XWRB*cu#p^mGLdcOO4DCqY*wL5&_djIa`H|gk4@N1B<GyLT1>Ertq zeP8+W$?#)q4vru2<KD@*&<HRaV1~srn-D{iRldM-f#;Ye1CRdWey60BPmbQsvlk>n ztg77YAU^+V<;I|Oiv2q@Wvw>pm+y5vUFjeOJFn6%E=_v&!}J~)w*E1G#2vY3q9xH; zpYe{EWYEBx4Dlg^C_yZU;kFUerWc)P!MJ(Z+60IP*Iglq)VySsFLBwLGp|xkd}SBd zl`6_i<+6u2v*DcWnkEO<wO?Yki%BN+>5pWqjN7(A`k@tUxVX>dR=wyuk7drh5#0PN z8A~;HX9;zL3z*`u?%&M0KgHW4tG^iG%)i+4%QqPQ&h3>XHJWs{p$-sL!<3B&TT?8f z=u953>&XhYnWf>I{0r=Tt;U8oM0=&?nBs2Fne6=GTStBu1lgThG05RuXATxD8^W4B zu4!9W!LwINK3S&VBo##_+Pg>_d@N6!a74$pmc)4P_9r+8JtNLY$vB&>VR~pUDMkZv z;mo=2c+{m#_1@1$kNoc%v}%(km!(&D4g6t^dQPr=nhrJ#JUO?4%-{kG7Ie2jqRR40 zz<x56O6UwKBu8B4`EQ(<YE4e8l0wyim2}UxUX~Y`(`*(Kst;Ek>_IIO$=0;g1B!Dm z^+Bt3vzPH9STe7yz3AdUEtt<$kikmM9?Nvgvl0hW-aIRlTr?R%;SP?@&S04aRwNZ4 zS{&TYc~Mw@b&`y3tKzaLDlQRcXnzA8g~1t~8`yuT1l;MreclHEq`+)d<;X_oyN-~7 z(?~FC%9LC%wgr!s7Wo<##~@~#BRo9!u)o!~@N3t8AY?oTKN2V~=uh$KE%DxETw&s1 zP6(R6lAjw8`2s--${r&l=afno7l))~#2jr)?$C;Ne<#rn;lbmQ=A2giHX#nKN3F-_ zQhzu-qY@>le@F^Pri7>fhuI|DC9J%6_Q_3aJ!f5hQSCfOmn2#NiDqus8)jIClN5;j z9_m6EXn^z*PLyXiXwXtUbjk0Wv@MnVMF5Kh=q(yhIweJ^H2c|TKe(pDS-83$?>lZS zN2Kpyfz26WjEH*|ir7&Ed$p~gwk!B2tZeuD=~M8#{`)gq^TOxt=9jUf@225QEavB2 z4=&>*ZoYyh@7EvPU%f9wp*gx}uIMZ|XnJ~oc^|oltU5bkpi9=g?o})$TbU+0OC!Lh zgc4S~Scnq~5wPHWG^1f(2fQXv#Qp2Syyr#yVjeJAWZ2xZ;_+rgHD@1ZC;s#k=Ip$4 z5`K-*JEAIJ@v73HEOGE$lFke|tbw|w;WIE<nM{p;aTQNC-Ho{z<V|IMG2aHpFIT33 z>}CW`^JPkK))iu0WG3p$BtY-VnzNbFQ~a(TzUdJ8TFC#{P?4k5qy`gU(kv(L!L(2d zHA-+LVfw*hX6KJ}A|GNSvNY@EtwPz-?7XQ*A8I}K58s+I<))3Zi<F#S6|H;{a?L>a z!aegKT3puuo$zzwWQ(%B;Z&VwAWYtyh<m8{w1&Y4d-?%eV<9SYa71Dyj4WY-;P-)H zgwk8`M~O01ozA8on_OzK*A&$pk(HNKOy(Bm5k`OQ?LBVzCFn=>&pxfFxxG1Tbq(h3 zwO1*5ym1(kC2pk5{V<gqf37Wvv7L~q_<q%J;)5DIf{C~p3A5rmydzncx<H!{En=u? z^&m5bG!6EmrecGa<o6-Z{Ez+M@86&HBx6{C*y%6om*E&511)Kbb{fz8=of;da>Ajh z{1HvZnAf3&A(LI6oAY>$K2|OYwSy4rIj{-?>l*hV{Bs^!BH3y6YuAc~PurnBsLOMx zkaDNPXK!Q*zsU!;!rsF<8>&fk!xne!aJ2r455U2}DPYWZFen_}@gNqRx7@<QQKdV# zjUj|K(WyQ0w4)iJcDU3*ublOK+A7Yt<SFQg>~zR*-nx}2Ke4?LMQ#gEELjPI><#U& zP^JYdRPjSU*#I6eLg^MNg6&F#=4{hm0PP~lDQy$JrHq9JO&!Nah%oIgTfY9Ut?$(% zFYuX+AV+qs2ixh>JW;x~%vMR1KM^iWH7HM!u}*YmkJ2R2U8y++jVLP+!~o6I<^+bd z+PZKhve)t_4gYcj9DFS*!h?M?djA}+39fg)k`yh;ChF>q3jVp1i_EneMsw32($is{ zPw=W8KN=tm*%4DygLx|zYi^B+?o#253x?0*mFW7ts;dJ@Ee=F$mj(|*#2NU2Ti)gK z4Uw(y>+?KB73DikvsODkok#z7TJ}>CJ}gL~$lAz*y93}INHp^eOLMuwn^k2;6jC8d zXQ*S0hDfe=M#}o)Qj0a_oDMX6^eDmk?7JVbiQ+Prd3iC{4n1`T<FAFFpJ~xoRNQAR zD+IXP9ni1HQ$gNjgrU462JD%zJ5NKUQDYPbVS=#67zxxL7)^U(pZPLCQBr644hiYB znpKFw)J#aHY(6X^2~s)w%9Q3-H(QUNnbh6OWNSIUl$eC~2evfGJ7H2(ECW_zxPteC z7idR*q)js8{XeX|1C%Arwk=#;uIjRF+qP}9%Qm}gb=kIU+qP}9%YN10{m%XGdFR|W z?s)$jd+*G>GDc*^%AI>f=8Twgkq)^by|I9QO;ydxe^0k5K@)*6;t(LCa3jy4c(xZE zypu~W+QL3}&r;KK>g1c`3l0?v2(UtoK-%P)YfaR_mIPiS#?y|0;{^4;L!9#*rnEvz zWf|G9poTC?222i8-W{(9h=35|Ng*haf8}AQpB*}J3I2U3$%9LcF-%2Q4b{XRSr9kU z8{w~`XQ`BO>(5V=3kW2;25hDjTHYW(E-Xy^YXY0UNHxv0)IWVZn4j&OIZI)sNG=+f zMk&27Zwos}#i=c)65ft#kAM)=)HZBW*O^JLoP=ZS!mpqwvbfNf2fHJ02ythyC`tBe zW_aGQEmjCDkn2IsqCDC03s~<d6CvmQmRp|CA(iob7J?2>n5>Os9xSVDuy%Y_JI}J? zE1-h^f-m33W8#WY<flUFHb=+t5xPhh#|vB+smFbuB(X^uv&bhQD|!OXq{d7+F-P(p zq&Osc^)=d9<!BY04B<Bq!uiS=;5#X&jQ1)JCx-ZN$Wnnd$sUAMVYXS?k2J|eT&Sk8 zW?<~kV^Wgygd|;e-Bw=ZQ7tHyQF)JYg|qBI@duki-Rw0ZN%Uqs$WocoBfb~Ki)+1d z>_UxN%Z41FU-5~`iABEqC{OeJLUHMu)jv1LK)*A@rCZ~Q>F$ATbv*=FQ0NicVUSOT zBEY)!$mDU@g3XJlaOf=ZH?Yul?zXRUFxto;#=SGJUBavj)+$rwpbVpzB4TNh|6qb0 zP_jRO+)vEaw!oI1@N2e14_XGO>#xi9J~W@i^0A&Uj#FkE7f8`nJot&BAH*P|nLS?) z5amh_wR$t+S!$DTR50|Wku<pB8-B6rv$P(jXm~}#6dH7|1Fvc$wkjyoR8qwuZIj=P z0TmIOM@EsrK;Lv^-1hO|0&qswmKi1W`$!3lxhsv~w0Pu>1{2Ibg{^4dpoF#+9R$vW zw&TOs^9m6wH6_KTy?b=}{cQ|4z4Wk5W^w>@WDO2#DH93UarD|aK>=o!@1!_mvuO0P z<B^BFinr@&Fe+GCzg>qX#p`BnYK)ZUZA_O(`t$S(<q|?w_7e;D^43N16Is`~S*7R> z{ZN<2qG~|*ZnI$Na*bX#xk#_k!vEX!h&ZdJFkYMP+Xbx7ExQRXJhFhL)2`)o@FJTo z0y+l9-BCS8fAB-c>0ELrCC`$pbl2?hq9j;%=o^=GjqVL|%Qn!Z9+`4FT`j?7Mj%>_ z=JlTGuo7q9x+u0%f6(C_t;^#KOH(L5+NF^Z?ee4E>(!k}$}sg0u>DEB0>IQECZu=T zqQ}ThH@aThu#Fe?%1}BVkql<pb$b@jD4x{ds*os3R+o!%9#fx{2H&jqnR$Bldv4U7 zN>tkr&zmD}%?{_P$q_8H{bO@_^wKWDWa8Gri3}pwhE71CG+mG>+`03|*HiaoN!Q!J zmgn5|XZx$evEPTkPGmXP8MG$ewsv5Qn*E8J0zM~MSD!gP$6`&Yca&cUn!u~{dubE7 z`tFuHZYJ*zb~Tl^_|UQ5%58rgT<T!&$Jw3Fs5FJje$08d-Sxo+SVdOBEsm`)7y0eZ zV)7<-i1mamy%Sj!d_N!PhF2vY{hn+QEg>9{SFGTj6LrXpkP%neGOLFfQ$I)#mZs=! zdr^bGc%UN17vgU>_JhL9H(4@@5VR2uYRc<5ZI6{#;pV5=fFgv(Z}1{-HqZ2qvnlg| zdN4f`_9JjgwtNED@?kMbEu(9K4{w|X@xaVtM@fFR9MZ~ZUko<8LE6)M*wIHG`eUy4 znA`K?mmkkW>lG@9vY&pSg4Qgp)-dc}RC7-zTRIMhMeMG7xoc>yX;cfN$nd1w;ea2A zo}Ua34TkVw>`_?fXB7F2C!<voMbjn|R<jsuP{Rs#w?;j2dSbri3@(_1gx07gu8Dp< zCe;km<Rg!Md3L?$e7fzwL!zCdi3M^nNA$*yLz6jz`g1r%6Y$thN2Nr1OmlO+iedzq zos{!Rp-jbjGHDHHXZJJSpF+MFFW2hJ@T75m4ey)Zlf2g=nWlnGVL^FLIPJ~x9K<mj z=s0GZd{6BmBo|9btJ#$-&#wi3lnbYkS1N;`!6<?`1c*Qbzbp(MzolJ`2uPyFw3wA= z^gt7=0|2Xq4f<unv#n)BN+Rv*o4G@MaDOzno`_%nM1(O>E9=aHk6_O*vJ7xvOEN*G zpG*_#l9iKqgApoE6Q$!&30FRIy@1{EEKk*J_>KmK6?X%sRyJGGyrzQ=I?s9oW?SEZ zMniZ{xEB)Kgd?Rme4$>%CL=hw@Z?}(>L$Gy<}njSS9#{~Hm@K9GmB=1Kf6!j{L`qm zH2qkjc4!aR6L_o>OCD?+w1jy!yKg`xsK&^nYxG3U6pG0{PZo>-MFPy`6Nw`>AwJje zxk+coH>4uHHFYVh2k<UjE07pJzMIe0))FmI1a+yoA=%<ShK~6_ybeN-hC#lD(J-JD z+2PmdMN{?bY%Q9<ueVz1ON@1;#86Jx(EzI#R#00-R23I6GD5Y!VVgR<wZ**A4yLV_ z&`6kU!LVC}LBU~`&fc^HxpfyO7oB0Oe7cSayBN&gjQDdken)5^mj3IkJswflyBM}s za*6;AL>8gNS;OOFC2tnb_Ga6Zm=8I%0L3X%jsU7=5eMkCbu(`gUvBI)oF<}mvE3NJ zF0h<lOcXb_U>2J?T>xyK&_0=SUod7Ok>D5n-9FXm5ESY>3@!Wi98V2FDx&;@sQ6sl zTZKHFP{~3i6{A%EyCXAz&aqy;uz;)^m6TUZff(6yULP&%+8Zt}{dFTJD&U*K@vxBP z=Bw#k_rZ64w^Z>B$Ws`OCV|Ed2B<3c+kvn9uOEfkYS|D^H9faRC?XxVc3N`}Ro=6- zs14TvNdd%8%1J@^3!z`r>|G@~C(WsmQP<Ox1e^%I!zIxx$Sm!#;;LD%lNMImP-PWr zg!sAzvebBfvmdLMh8q01;sSjR?LTr#we!agmKWe89q)Uwb<?GMwu?B(+|fzNi^%wu zuBNcrbHqGAlQ#fr%*=AYj48OJ8WS{~n9I{a6fr6FTI-)rz}jtH7Ex+qKnG=Vvp||) zy>!S(fJ8b#c)KBUh243}1|8b@(cUFYu}7p+u?q@}Xv>uZ5KC>lp{z)*<ei6CZx=hj z58+M+=wDbnWI#lOBfN}JxB%b^PieJJjL&KHV^H5Gv$X=WuU=Z}YG*eDRXG(*yFy&v zbv293Zw`AjWsuP$?EP%D%aJNBx6$lEsnN~cx(@t0CA&WwzEs1!&01VJ<`aGae) zJ?ZTUQ4ZLR5cne&Y5A{Y>0?FsV=HaOuMmdeQ|t$G(3c*AD19RZJcjeI#}rMdi(q;S z#p!Ofw`zM{@zU&KQl%)G_RcJ~UC0^&d?zhiA5Sbau^ap_^Mw<I-5aM;iHHeo8b*>c zMH38P$z$}jA<gnh#Nw?)lLX<*)LEW*CP*hHlHRlC3|qzMiFto=l&UdEv{c5f=ki0Q zc*~1ta#1=dI}y<v@ARz~X+~~($YnYiGT4if0`}t^H2-!1L6=}7j*j6sK^(Htj+Qan zf0EuaLjey$i+kvhWe8qYwapxLMrgd2)8T&SYJR!7iQ?JagD3axDYF;Lpds)GO(61} zTriiDlyTF96vcpp;#kms!Fl1*%S_p<^o>T39R%lfrnE|>PD|ykrd$cP>`Mx<viXY7 zyI1H0ZV$D(n+3krXad31Y0t?lVY9Zwq*Xlld=e(Xs?Q;{iooASzT?W+tFX+{$mZl# z51Z}ZaVx)umcYZZHwHqZ?@c7}IVLS3o3Rd`6ztb%=fLe0ib+22Ur_hW#P?`QnHNUC znQ#Hzn<-KhB6U5Fkv@+4$VQs<!yzPjJ?VL4uNm6q005DRSI8RYga`J*A<()?<2V59 z*g@M<P)rvKzrH?I1e0uwQTk!eF#Fn|Q#p{ad7_e3B$Z@9%g=A91Dk+vFT(|(ny};v z0VxF|y6L+q(_uOpg4uXu;g3FvzY6U>iSow8PNjdPlGK!M`|$&peos_{%QV~K#9SIi z1lzbcYw#wGx95YQs0F1-4h+9KaM<8vdyQ;(^PA5?BjT`^QFVJjjlbi;oP?YNfMc!{ z0)y@J+2SZLR_9mG99hZ5RPusDc~h4Jwv=_%@mfyHA);@k;-vgt(+6@wxnHbYB;lGZ z!)Hk>lO@jtu0G`g(7Y`N99mRErV@N;fe671tQbjCd3C4Nd}(YO3JXUUw#)?xkW$)n zkqib6`BZ1-!8R=2i~kt5qvJ-OD~d%a3NI&l{YNB0MF;l6QnWo!7&ZrHvq%jmpD<*N zrX&AeBHE^6<W!6KQhJupd&v_El`f6Xv++-;eiKGZu9p|s#wbRl_D%H7hY8dnn>RF3 ze{mhSR2e=X4S17pjdM2Q!KygY4BTGXoSD*21$fY-!Qd+R5tWmt<AR9{$XauTs|^&s zBUqx*-NjK7b3^-h6n$sla?{Hds)*(FhRaVAs7as#dHUt4n4j_xCXzkxa9H_v?=2o~ zSG&nlIGyjqnfJ{1W1W@c!2oE#r5sp3nQo_cP&wDxw4_@*({b7$AF(ohH;xV*abMc7 zy*jB5vu-cu3?{BEwTUWbJ`C(67kx0R*Y)xW6;7AczcHaJDLCHQ%DpdLP(PADWtFPO zy0WFSQ<U<iB?W-()SeA68LHB&GeKhp7Q&1xvun|^_f_A39iPfFNEz9N<~sgx3gE>P zX`bUhBF~ys%Gp&fJ2@%Oee$;_G=eAGu3l*j5h?RlY{u!h=aWgD!!~O!3FLF#m+MWJ z>s98+TcDKA1^r1<!U1PojozhBYbwd-AwhDavNmRz1#j=KC8;4KGy71w7QDi9F+s;o z4*bSvoAINV!opV|+Aty$@;+mknL!X3<MfWKu}H+i%Ac2aAOpi#7*NOAZ+e(wj%OwT z-7ET$VTEdg?WNy75LSFe?Al?>ti)9&Hvd2}J~2>8La~;+Pfi~yxX{AXE9!~E;wk-R zjXfAtk(|lh`ecO+Yv9(M5?nuBW*thHMEBYN7!GlR$G+Agj7Ug8Qv6y(2bcOA#{`>T z8sX^_uV@@`)=Zb{Wmo>VO=<lp1F6caA+t>0=1+D7A><P!0XK-l0KUU;&q_OmSqw<- z3quor1eMjwVWrU^I!tLdF))zMh0#PSFhOxZ%ydI=oP~)q!QE`X!qBU8)9>dN>1s#t zQi~)XVs6Mqe6D)G51H1m;(0>J?Q?yZ?-X(BEW?<<kCy~;h;*`?XA(fP+`3PDdc9_> ze-wf9*RHtP5GWxA(YJD{@4z(QwCjpJO<Ht%XP#-*qf<tdhQ%VbwG%igS?c`=hxv_m zc8<iD?YglByEzaT1i>c_kNFFIRe|nmR!A8{7D-jzM;{SOzbTCG<6VM0h654H!sJw= zTJU5qN9B;Kmkex`D%zeMYy&Kn`;c_3n~qTTq}DAl#$141MQdK6W^Fuuv;MqE@z!*6 zg4w4mj~YS@WDG^cC(;tyUlJ#8v<XuqG=BY?@mg4Ek-<`|c{AQ~=-b7U`pdgMKv_+y ztv>NoVs5O7W)`O+pEdoe5Jgo>qZ`i+e;S@hvVD^5<ps2wDVQlhU=?&MG)uHvrQb=I zt<eclEp$B@;qNAb<MYDh<~q;x9uj=IJlk6w71h`aOQo$mToPbG9r~j06d)hbl2jSl zJ?;P{<`eE%D;ChM%MT5Gh-NQ_VqFk?{<Rs*iZPwd2tR6-vu*l2e(tHoTqO0honJ0G z1VyrlmaCy#j%{k9Yg~)kaF+vr3T42JCPPFhzHbE-vya(SC8*bQ&I;8suYS0*ET;+* zykADQGM0Ry_`7dGJ2Uz(Ls9mm*n?rWtxDK7IzrY~_I?_CtTnrX4AmC>Jld9K@DR%{ zyO_8tFK}R4F4i%5dbNlUXNbuY_@Ol6mt+Y~4cBlYOWAX;!AqI>n?3b%S>zL@ag(V; z#YteSu{y58?1O4!v9k%raL^s43kr;-h30<7R1R6F@W>)htd5%P&jitWS_vt5uqN;9 z>PAkx^9eZ$?LR$@D=Vv<VzR4OLvED?iPY*-`vW%&UBeM{0d<G`lnFvOP*O>EBu;9a zzvD%hWv^DRz)(HRP(eSX(T+Z5I>a+bqZA|AI3vH@w#39Ybb=HU8n?3~Lx~p6<lB8_ z3>_r*q5i}X&dzg$1f(l~_EqFZpQQ9b7e@mwTsI#0CZc9YWg?04>zHIaygJ;sPeV2F z-j<cVgbYaTryX8(%NFC7dbnsgl8Jk<LPlY-N+E!fKCnsP<`q7m?3FZn-T-oTU8=G{ zm$Bar>oCQb4>3x~lZGa>Np^9sgoOq&@E}&_k$Qd?U!H_rA+@U;6s~WK?J9KiW8{`w zcFKwGmhd1%j@_(;7{WD!17ABDP`rZ%8guVb<~Z&{_{7ukXMOIosqiCzDl3M(-%JRF zrnV4-BSzUX&wyYZ@MJfZ9pX=e0o#PTh>C!zi+H;UchgU!&8K=SBBw_%MyquFlK~{9 zaVSdqIE_?)h)Ms|iicIxJ#?$XLJsYojkvHBk|p_<u$fb#k>uK+P6-oS)4!dPC4Ni0 z1#2#8Xe2i5nj3?p=t|}q*R+ghKUDEtvPPZ?q06U=rgcyRvwIUrMU-owTp(s>1aIW@ zlK}3DsW{@03L<|^gDfYq&4)i#az=b)xneN(IZ0ewE9y{>So|I%hag@8?;gBE25c0P zV4~fH2TO6KNT<{Hb1{~bkOIW$w-dGSJ77uS^GLta2($|vy)1VU5)I<C?wE@C0f(|~ zRtaJJd#V7s<_1aT+i%ut@aa6Pm<0i>cfhfo$cLXtIy!B}LXk5%ug4QnG#w8g1xs7F z?%8Ziiu8aBL^75WHqpaMW^xaPDqBPO`FODfHj3b?&)=;81EldP(N=Zl#p?7syE&Q$ zkN}55h``o%Pg($!bplLHTn#d;OSW0~aAo8};)8CWSyifnuRJ_F4t8uk&vF_Q@J3KT zi9&Za$SV9U_I}e1?uK%4o1Jp5(KOsJZJ`;Kk!Y-%tMC@cigvNSNGE!T`7T>=#;ssL z@=Rv0{~o=D{d`)R^18qO`YcQpW_5kM)agEUdEcvlj$J#n%D5y~?Pci>ZOwVV>JE49 za`(Lto!R0(D9zn(eNOPc1=D_5w=H$Ov_0Vr%0AxToeVy{Etqsc;_fe<Kzp-vuAN|J z>k2%*pFKa`tav?GykvMF^DzAEbq#*vI=6VEXwN#q$V~K>|2$yb`uJB8$v;y{{!f-p z$=D|dBRf;xzoGT~;nDn|_0TiYG5;sQ>o4Nf|3+qF{+pWh|C8WFD`ezsW?&?0ulEy= z_J3#Z{7;Boub<wAKmgzXpWpt~o4?xlmpA{PEB|otzwok{+5aDrl>W(?f2At@_wGUd zSKit`latc5#gOEYe5P8BRw-J@6U)>B3dlo|K<=wmG~oga69daCNfHZD)(mKcF+^mA zt;B>29t?z3rUm1bVBvctjMUJ?j0*iAEGl`Q<2WN8mdC9b9WKXZ(ywOPCOy)*AJ-j> zg#i5U>Iv5g_|UrW?i_wWbQ3khmBd=-M7^d_0RWK-vGX0s<^*EQ3eQwzAIH)h&<H0v zlDm+jizPXhFv$IRIf^I6`sPZu#<#^o<x95KFMJi5mC64px>Wco{4!!_h=ERGX{?yT zzN+U+28PG_+JCKE^CdN@j~&kxY}NEsh|#N)%T<V5O|@o}9cP?YL|jT$zA`;PaLit8 zRVv^M>M>#7<*z}z&06V}YE1cbzd}vNSljr8ar^79Ci+J@@)mwA+E8oE@x}(Pt+vVy z7q4j0`I_G22P?UQc`Vl^)l}yCffze4&zOw+V26@0IV&%k44vy%&ucpOICC$8(Sq`b zk2WSgWhU8lq}7P65GK=*num4dwZHD7zWumgJ<>tqp^al}K7#r#K7qP$(5y8bO%<i2 z9Yv|lxiL;t(u%b-XSbf4JfLUq7^edXaichl7)i|wX&Bfff2>gj&`7svCEGV=^~?9? zQOc@UC@7cqB~dqdVq=B4PkGwvh+GqfoD*reJ|)?F@aVz>G8bv{7y$gZ<<)qM?oW5% zs$(F!FHJ#rR*Y(=78I`&EW|a7u*n8Qf&>!89~QhoYlXJLtt0xL4iw%(c~Kpmg@Io} z&`&Hel(;*n6}mA_m`E<{$of^x54XS4xQiQE6Fy0+32JOSnqz6%7WuhWP6k21r48$j z@1O4irL|34m*m|xBMiJ4Cpi2J-3cL!L~T&tAKWrLc>3At13Xow#Rvz!CLyyk-TL}Z zfVV+e0fzman+PB0GW9!N3$vOCp{)(>zhXSmp7JJ`KX;CzsO~_64B>ikDeKJh#sO2> zh_iD4?t(X&{NUcyWMaaLY*DwOVYrfN=aj=djh)db1dfA{1p+lQr{RA6G}!h1EyHd7 zGF7YuZTTtFn33acbwFfEZD_D5U7jQP6n!{S1zo+MYQx`}Q(@G$ZDq!ZQcWc|`i);n z-6kc*ihT@IpdOo6Psi&!oKx9y7D^EJC3*hQ1TlVTx{Zl#WUzihq`@2r)J7i#3mk3y zC=5#J%9UDtr<h&wTGl|tt;}#zMlqAmDiT+`Hey))vColCY>6gRDqKu$j_S)Dr#&7p zQ{G+YJWmdIJ<;q=^xbd?a6QT7?DWiyjdv7t{S-LA@7#esHzXS;8(v7YA=nR0IAH$2 z-Zxt}m`fm2g#RXf{RvEe#V<NK=6}#Ezqqph!YpO}L%sas$^MUm@&AEY`cDDuzYkac zJ<ax_hyD+Xv^V=+)(Vn0`#;E}+ZGt{0RJvL|AR2f!2S>CSm^)NIhH^4;xFg^4aN7L zLgj>(rUQ0Uj8~Q}{cA0;sxYDB-Yg%ELP7&8xy_0ZsRG4oG-^<;AqN3@oD$6^kM0yZ zWA`^u!Pyw4#0mo9`<}awF6+DNq8r#f**xecZ-ysi5j{pXlfkuLH&c7}PrGBB7*kgw zsh@3}sv5M6^b_?{u>E~$4@44*y;U<mdBxRa-#&I9-ge*LgX>)OE{J*dLJAEtaI?T{ zzC;SIJ$Rm<*rU3b6OF=&;`Ac-o)?~W&nR<m$}qW{Wc~0ZeFXcT&{lRdWr7s&JdOmR zs6NP2C2#Ojp?~<O!KxEs;>^wvW=8)e1U-ujQ{^tOfWZ0u_Wcdl>yvHpX01=L%Yz-m zQ*Bz8VO!5Iume}uD|GG}R%YsJDh1D!H&q0<Q7j_^1OI?3VITNQ2kvGBE$*~6`Rg05 zP<dAC2gE66R$UfC!Tw#z=dF3YU11Mu@mS<y^9{1)D<>!V@bl@!)3_&n=ZoFjmMS%= z;KWFYZpRZhn!9`O6x?|&mq{2rf)t&<3ECjv<!u>%Mkuaxp=g@h@?2kY4w&~gtgcXZ z0aiU<96F4KE4u~0BK*!RD34RV=ff7WZ*ZJC^1j&h>93u5ycKfbELpTN#N~W)$Xki) zcL_QyaIJKq+0m=IwHM%=5#cyoJ9R{%6ageoD&Qq#Un5R+iIm{m)E|>@V!+SvMfw`o z(SH32+n!E<T;9LQjr{5)q>g@Ah^mjf+(Ynm*UvaHpM0g=GO7=IRu<kKk>){gK|Y}V ztsk#c{GudvMXiL*7>9Zf0S{v!{gn=ff1J;5cDx2*U6mA=60tl3*+~%*+;y8sa0*ud z5;1`*T^YF7?ssqH_KuR2onIJ1L$c<I9;I!t=)v(2$mKM!H4|x!`?&6&KNgH^C4sC& z4M?zfBb$egx?W|ifZp-5L%p+psB=Sa60`5q2>KDEDtm*0zyK@DK4N7lq5@we%c4!d zMUOUoO<a@k%ZnT-pTUL@f9r=7DE*$I437yke?WmHF9Eoy6pf<;EIgupxg#N{y4H^! zti`2-_<;4`>H=$X{E6A(Cj;7s>`26E{>0z4_}~mJUI?VF+d=}u!^#R8hvd;0ho-Yc zR&R+oQ3@J%!#bYK>`Jy2vj%;6bzJz~57H-oS<7vm<S||HVHEhdX{ug4AN-xl7#@25 z6oWS*-s>0{TOD0p?JXbos!2_#M~|#cCXZiQ%dM{7&hFk$cAj69<@+<qcNf0Qs;aH^ zb)AfpESVJ85Rq(|;UlC=Hj#-ms>dC+DK6}gLs;0e;Hd63<8mabAAXqJqo$60aWCA` z^Xy(v;t<9KQ$qJeQAcf2Q0H0kvnOKty6WC_%wT0t3l!cTJLg{Q?U=e=w?({OudV?v zW(-^mHM85_fh>2b^AK}ZaaP`|it3)SIhW|1*>?IBsMtmZ4i5|<AFi|P>~rJ3jTCGP zsmQ>DR&S&EEE8p^>>9|AlCmQaM|q@>Z>pNO`h)g~P9j~IL!xw3N%TqB$twbgK!~&K z1HtJ(ku7o_TdKJNC@j}*vdRW)ex}?HZo`10<5cT_+wG!3u4$<v0Y+oXmDL{d&Lj0m z0VJ*1WxdzshJY4KR(vpV0d8<lGWF#lBYgj)l)fz@B<&-uR0AQEX8}6vR7myX&JHPm zTy+jbtRJnxWrnCAdA8ih;U)|q4;jhXtc901PyY4vk)!~X0vIiixu_(nBfissfO7L* zn2lx(Im1%2y;sQNkJep}*K7qYhiQ;g6}4yBhnO2V`ljf%f@_lC>87Pr-MeVhs-D3b zN@xZ#;nr40?wH=~JjKG2KQeS~>=Xuo1d?Y2VOoBa9!QFUbkvZ>ez)7`m@9$=D+)z# zo#eAac%n_3%IWs#wFc7pey-*RXw%lMUeSa%owid84poFyT#i^PUT8WQz3npsN&3@0 zUoDJKxSkN8lJnsgj}-X}PduM4DhWvYJ7erMk<1GSG`1k1evKgOL5&?0GOEX0X%3%( zp+f_1YWxsM?%acCAbrkEB)QUxI~lnnhmis=D`S1+GBwnrV1g=om*ehAl(um{jXS6F z8K)+2m8`I_=b9X_pL??vk1-@zNYbz2pw4uZ7=~6_Ovt3deZhiA1w9zFXsL5l0%!8< zilU36amcxxT%_eD@!<2Fg2|al#rFK4LbH!O+47o$y%Q#2We#QVYv*$Q;NL~KR;1cn zr`;3`yImsQmt&T`m5xiRtUqM;<mf(h=9^w~TR5rC4ah*;$lS<X_es#Ra0A*4N<4>j zE!m<C4nefbK|y1F`@uTNvuHqQx(&7~B@aYWLkBOJQ<W;uzrThkb49_M5Ky_Aa9X@a z92+Px##m_%xlAn%uC8~*ibqoIeG`wOKZ;Z#>lxAWNV4y>m6Qlo`l}(3XA&a>y?io7 z#qaen1g#s)hSyLe*)W<%|0a5?9)Ll{VXt1!);}RUO@V@IoCOIWXAv2a5n6b5EiD@P zp$Sx4-E`wNvbVf(-l%@Q0q6$un7`x-#}F}Asifvi!(j56T;hC)s3kyP#ClCSDtde} zyPY!ZQH(os?L2lSl%K9Y8Tp|w2Y}+5b4Q3@QLr{8_gfNvk2TLlHNIPn;^~`H1#PJH zs-zwg{F7V4h6&TD?Z&_k6M_uckH+RC3v%`jwXsvT_mwoOw}Opw>OGUIM&Ad+xP-!T z%&fZy;<C;jem5KZK+i2!;7li!MCC1oeAjN6n(ssDbDU$PonL~y@&oM&UJOD_mofUz zrC^$Db@!Z^;9@4x`eECjR{jOxjg$hfXDG8dr)>nMA6<b4-Q8Mb<$(&Tt|8$YK0l%c zfw0Yy;R<^FSa6K)UEf!Z>=>70opBzup&OATVp4S@+dIp1;se3Z7up=9z?6J?ZLE0m z7TYM4tCCQ{RIcQD`soh|Pk-;oQ|9IP<*K;z)x*I(FB>scKLD4o?UnzY!{cC`ou|PR zcM*}Wl#4M7#v^N#Q(8t8CKyd;1<8AyctgO>+eyA(@S#TWG0dnJC#@!Qi$hC(g_MK~ zjOg#|jTIzL$GoSO3EtznqO-WV`Bq(E>Ul(N$b=D6JJ@M9gMvFNDqF;-**y}Ayr@Lp zJlL;<H8URWxG)&pTmR|tf^@N0iK)ygk{XbNnYWXJnJ3Hakj^+ZDn3cIwIq%4HYIxB zMQ8^kn|_I1d$=07+ju$qqYf{IK^S@<z5BeitX5&&@&E@yPVI<XE_O~`PsMhw*)+{K zL;D#m)f}h2R9kP>UL?+AI!{q9z@t}mr7#5`<H}5Gjfv@X1pw13b#%A5D(cvA1`k=| zbGqJanj>3S*C3SZw;yf2myROYXruaSqqJbR`4qWlM2vj06=hwzqgglh2(oB;gM3sF zJ~D+>DvZD@mgPFTKeZ#qcb-k_i24ehj5{dN#DzHRt$}GAeuGeFq(Vds6C=&Uy?%~w zW5~0Mh?D}AnX&hr5|62T`Zw|3^BA|cQf1kuu{DEZ;>#=-b=D=s7IlMOrexFW)%h7k zAu}go6BrhE_%cn_yOH$-r?^V$UGr$u@Nb0Y={fMk^!NS<G{#PKwxTFs)c?SG1I-T2 zy8Ylz)YEN3r0`A)uo;w+>3eO<OL4A8BO~g5{$|LxID7^|)kF@P<>}<`14W+s4ZzgN zPzBNnYz`R5#<0pZoWb)MthhS^1kr3x7_Fo%(g`8nlc12x*-r+RuU^W&qiU^A>nf^$ z=wK;VrEjR>fNHs}YjMG&;_>@{Q6L4@H3@MA<#%B#*9qUnlMTMZJtSo^0y>8kHL}zA zE1=y4^fm9WC^G5Ly=ny}f6cNK78DPrWd1bhy<UMCXJN9rIM<v?s%y!pO7*ZJt@&aB z@EMhm--?V)5g3;Eit{%id}(hUs<VhJIeQWG1D)04k58Uxtr*g~r1Ue7QEcH>1CRH@ zpjP=NC+^F7)|7!9sTY8=7si^d%gs(VxC(j+WgI(<b36LQM|#0dGP=tZ;!h|m8h0q= z?m>}K!^jvG5`IAWj^vc(C7duH2IDxGGLCZU+<Qn0c&2e%Gf6CuGXqwrTzrsxG7`>f zAW=R&nHmSV9ZE9S{JeKlaNFXIoT6kH*R_bz`Ac}cfn==I7~3aAcw0;%0!doz=VVkO z+P`T7Jas9lzQvt_dg&(S1N}U~OoYZO$rI46!i3be`;NK7rg+TN89sdWu!w#g`bra~ z0qcluZk0Zc3yq*8A1DKNawG8*=iE~BfX;iqqBbKJ0CIUE(fX}Pce#^{Zi*Hdx;&i= z6dEh^8&I%$R9uvMdP;64EZV&2g%WDm3xS&MiTLK4l>B4P3xaO)5YP7k^$5`V*DU-C zftmT2?2n`{MjDQ#=01rUW?VjzDdY8YSljwCo*<Gfiwf9u4_%E6Za+K~Btdrd@aV+X zT%oANO)-4SjR~Q8Z*fbEwqBBm=bsjZq*Q=F4H13Q%5Jmu+a4CLan#){PMUWOEg;A1 zkuK3Qsu@zHnG8xm0bg6mQU^0203;#x(hMhXPC_AhttsM|`P6zIJZ*9CLX6KccMh8v zrO2L4RW|{;2`|jXYe4<SOf36+wIe26v*(x3Kw;x$n~PN|?;iSFr(I|v-7bXJU1(>Q z@X3%?&6kFJrnxS_OKXrkheyr(Lqgm(Z|%cwE0W5_-NHcb7R%rsf}I)QU^ady_QI%j z=T*?o8FFg-%mh4W5f%hMsSs5+-e&3I+P@c7+qLpx)eUM4!{1{UVzY`;;U76<n9C;3 zGrimDCZfxWsk=?C5jWU2?_3clJx?m0%isaX=a2O31q0Ac)m#;6ETWzYhq-dukiFUM zyOdUg9ld8=R-{43lhL>{k)3pkKiAQ6217}x$mF|0=lWo|Ih-*SM)eD8pfT)=rD~vr zaVWR_tge`w3fyh|*&?(gik4!T!)8N+<(Nc);xR9O<-|$Uk`NWqLAL>u!YYu&c*z0~ z$T%Q4c-YCW`8&B{xg0hGNQ2HTZJL6rD)1!Mps?A>n&RSN=*Sz&tWj)np)F!8q_0vp zN&t~)`!KT7@c|EMZxUTB+VZEUI)_$6L8aGv`_Vj!VNSEzCi``3mR!j1r3Vy6R^3+? z?sfgd7xQ35eHh7h?hK&n0P*&*msaawL}!ZdkJdm+Tk4~spkMe^@jXonIX7h};imA9 z+B{(P95*oUe1sc^aEQ)A_hjOOiYls%_%kDY;%M5I(m45018vmxcDcV%qVipvHFU@@ z@UmTnhVK;t0mK8Sz9^r1--s%QM=NIU2?!W2Y)fb2`;8*r57Y%~$8k_%zy41|BV=F& zr=X*CsA+Q$Gy%_*!iXPqOd4cf%FNN0y^*6wGr&LFuqA^vLA8<dcf-dj-YG18F>YF4 zsN&DSXp<T(8tR&#m2YGU)Fz$lUakIu%&=_n()e*^rt>k?S}GUE0;Yc+*?IRtpJ*~) zhI781JJ^ryc><V7yKz>ko;mw5H<*KIeLmx3zrHsw2}}mb2|0&3y6=JpozDy3tsI_t zZu?DWkXv1N%tJj1!*isa7{htI5FeVg{QTh=jAL*$SZ8f?`9#d)EeH320=?UFw5XvD zZ4nyWgmFNrk2Zo8asb`?dAH;{P2Uh;dqgSHHOU9M>tkLQ1$0_PIF~-COTHDffY(PT zn_v>3J2hYAeLxy?`B=|#!nWW6(k&;#VAWfrsk$l~7vz?Jt?siBUlxdq7-i4KASMFA zaUZImacxj;()8~N{Y6P3{X5bU*VC{Pch*ShMM2L7ax#;bK2|p>A;zolmt7z8V&nV8 z?C9E*F|*mc{Z{j7D;~rSC~ka5<tcRvs|Md6!Y>MyOBUSTDUWv&B(xL;l}Q%d!q``f z%~Sa*S_XB=E*Gsbsgl~VaX-G@oJimVp={QUa52$odn<&|_U`I7Bx^{u$lVWSOS`}d z=t^}QqvUobYlv6$cP4LAO4WH<5U@(gXl03S6ffwF3&^>QA^o7r7`XFuAM8l1&DCkb z<Y=wa52k^=MT_*<LV<K6m+LVkK4j28V`!0DM)C8wVSL-!5)kYK`U403v+~?uAkZHu zjER}~KSkD9{vk*A2h9Hu>`U%1Svfg9`#-vH^nW8q3Puh#PWA>y4tQTmYJXu#f;L}5 zV%Cmd8~y-JU*M9ok)fHMfQ>7p=AV7CvNL>@Flhh5m;RQ%`>W=k2K+geoV|^KqLCw> z=GRvtk*{MLxqgY;{iSIq@b_Bq@3lCd_CE=k{i!bKKo9vJ!zVmEc(lT<j-rZ=e-zFB zl!-df<I(?z(A}Rxk*|jSwOxdfh7OPZ@8*jzeHjdYfqq{@2drNP^FKf``Y+?p7{?#L zjQQ(3<3Fmf(9q*C|A+Bs$N!Mz`*TcjD|!Y<mcNDMz6>Tj)<0+AYZqSzBOc4&`};EJ z@mT(|ZU5!m{2NG;<^K^R`A-UftbaT6e+NzeUyI%S<;!0e-+zkT{dI!=aXI|0dC2<r zwe&wiVHv)Di~rO-{2E~UABo-l_kREPBS_~RAWB**tzU`fZldfan;+9XTRHtWfnC6@ zAuca3VFFRUmX|%$u08<(J^q~iAR!i{8JDlh$^vn6%F`XMcbA*~?1#llrN$K+4U3Fh z&t6-p4mV@8qzC!w@p1MFGNa<`<o{%JwR_!q8J&F`-`P(!q&!d}Qg`efNDHzntLp#e zL<n<>m2_96d||YxJeb!Rvq2-bLbo~2xWLqGYjO6zf0(=&n!@GveBVD<Yr5j~dbztD z>ER;xUa75iqio0?Sf_sWtHg~1^b?qkk(!MW8Vwa%4U|IRFF6)2Ivp!T9=1WU>tWDY z<PlyW7F;TtrfQz$DV?OQA;znt%(ca<*>!V%cQsnlvf5JENEjIFv+$o=yVZ<(%<TF& znJd!udPdil9Tk60N_dTW2pe6a!peOzybyc7hhU!%Wt)d+c9OY4&p>D>jCDeuhLEd? zVC6r=&!r7Ka4b}UIbfAma++GuQTJPQv}(A2;h0Z6v$)1m!~{*GZ>aP>QP|RVni5cc z<YKbM)xpf{s&g~BS}AiO75&J68!_^_S8<dmsKtp*)r|^~PfWffpwFZF)<lw*UlGt4 z)oK-PU~1-Zlkj_tk-G1!f)F!8&_bE}d!QBL3iRHH^3vXD<v=`Ce<I9aqE6t^wS>q; z#CqXK?n6pg<#X%DL19iq<Iep<476*&Dpx(IHb8YSjoe%t_|$7*BdW~>m)FC~T-C03 z?kDL%6m3*iyV$rzBJ(vVA`8C0GKq0Cv7vGyGE@BGUG>eR1o;V2f^}!7&P-jh8uM<( zsxgXM;dwHJ9!{SP+++dbcp>_5IZk&u{BSW!ck$^^_UTam;Z!-!a4C9!E>3p|_IMd4 zg&`Wg8G6xSs@}P7Scnbe*p*}%I`vs{1+JFw4!~2e=o89zSD{Nzr+10FrGfSB%oyvH zOfoOIx25`u*EXCQAL*HWkZM%4;w@AZOk93Mrj?`Rq!B=skt1W#&sEY6!8JzNajGC~ z%Iow}+KO7*JPo`gnSgBh+-Bjzcx_)li$H>0g@rm8ZHumX4x>j5;Ux+6qS#-J&$}q- zZ$UR$8hYcW{6|V}$I9<&%s|4@dCcpz7X%i9w=cDrB&F`Cz8+{G-SEsh+AY91C#R#= zy&WB>HnPLK<e!Es?jNO<4+f`q|OI>YI2xj<coQK6zDuEv@_k1S<w<3;VuKnWD* ziNX{)f#=htDV4g(_}3XEdkVA$Iz-UrOSYA$43cQgGN|=ZXp9pH%wzkDFe6LeICeZZ z^m;{Z^o>Ul4ogrg7z!?a?`MQ5!zE~RMsR#gV01==g_QnUWS_QRb0YX5v^@<2PZu8D z7|D7^4LN=~UXEFHoC;J1Gzts!jq4<<v%%ivYT)dV^yo7G%^36c+{r_}DOnz^gvw8D zR$Pgf4X@xtfp*lgXfbj{Xg)M;XqF70{Gel*$&Z+-E`FnQq5U*oYm`>Q`1&M(k<xu` zxLHAxb7rKaw%FC);syK|Gz1+~n+f7e5ICtJB%U5N_-Hq%N1*lFItI^lkjyuJ;tL_? znMkGeuwB2esYbv_tzFIHxqc3zjk<9<a%F`!gdV@b1?e5Xi7Wt62-`Sc{9q4ODyvSt z_VevsG@E`4>Oo!>^zpVmT-+<Fl9}8SXE!!G&WLG}%va{GEq*O!CmKX~a1OUhaGLZl z1u~#Y{f1<Z14ZE#%hF=LdA}`dc>de9Uho8&%gj(qEuLEl8n8htR-1sb*I}*htTLf~ zv-mW<{3@gJD$T5|6+m`$ZHU%M)+wB-Nn6ttC)t0gjDrLcqlz@~0XUyU0~CpDxgYZQ zWp1P`R;L;t;VWO-kD<bSYU9}KeT~Y4Iq)}&SJp9hl&qy+h4ObfDgyRrfu#hs?Fy^> z&pk(ZhTcEWPoI9jvtIF}r%!ud{eF0V?U`vQ|2;XsJBDtQv4Xs@lED3Q!Sh0u#3ZpX zv%@3esE+sZZLE7gNYJn$+b+M_E<jt&NPG=xI&+Q*LSxhfP6q#qzigk-GPXRRSmCxD znNeI{?m9otWlv6&LQS|uNEwMGs$S<y$v9Jj^6Vz8lFm+2neKbiG%Ug*G}1K0_QG$} z>F)?k;K+2~$gB~ub<%Q<VvBr+Xs^htL8?EAn1uG74(6knv{KH}mYW$`zHQ*w8BY^7 zYOK&?N-xB{<zk06Gha{T>=GKJ0WIhCZx!(m=7h-dCaS=TSDjLtWthTiU}t(4G{2LW z@&vTveJmL_>UOc)aef9JOq@0O!nXjXiczowm()`RrehYgb4J82u_F`ZrP8Ju14NS; zAX}U#AScYN+8doN`fvE-dSBl;>96?sDW)d>EEkqjSAMiy>US2oNd8Lhl5L07lFr!? zTa_=JnW!7rZh&7T0?<%SS=8E8si|RUC;K`qadaQp-;ToFW~xK1%3xP^6)@h3%JeF; z{5w1VJ2amV<OvI8%3-vaTW{g|Xc78g<~yei8onWdXAgtSB9HnEyLKP9=vL7*QS~^< zZmg=kq?(S6&fL?%PQyq;u)`XscHS5rWb)HK_z9ST7&}}@@|+k4<e8|zFu)`(1r)1a z5X5#vF;Yi>v_Uw~sK0|7-Z8)g*g@50JA0i6=0ch5XqRM*=aFx5`oziYO8_eNNP#xl zJ{$m7f*)8zEGgYo{w3JB!!fYk38=$a;Pr7ZB%0{(GKm@pa#8$!_@vdHjAbh+OBsHx z6p+N6MNk`Q<85k@CE*$0^~eEt@j*w=V6po`Xj;MaNQ=L462mWWF$;5C9kULG16*~Y zsta*qL!s^xneGz1Akix@FT(8i`T*{je8QIS?*&+X8sB=d_eaamdvgxQb58rqcKVBU zz!P-mQZzSGR1td$b|%XXCaO^J^sy*RFpJJo4B)j+Ft)JbH?UP%l-5~*dQNsTIwnM? zL=SC_7Ihi#CyZ2Y36L7_5NdwN-^KcE2(p-iH5R)q^w0_Ag#h;f?P~^Vhmp6atZ;+v zC)H2PJIi#q-5&-ddi(!AAE4ATlia>I7Y<fL#rA48^_J*8j88rwm;a?maL5~yoPknD z6km;>V4ofBP@sIHzXY+*435YUioo&<h5lDe<5vehA?G~8*UzftIO)gI+Rp-b;R9W# zfwxU`rS;WmiB@gX<f0y9sq4(LsLU{6At@3N*oY2R@b?pm_Lqxwm;4NyIxa6Jeip{0 zU?){kR>ik6F%u+3b}*sFRP}TZ2u18&Uzix3*jVkGTixgz>;uBd>7_iR0aqkN1>z*N zvJhovbO$luH1mA@{xYfmq7z%le;QeX@&U3rVl|>n7~xN;dUOf+M){ZtBV{#+<Y>-8 zT0`E3_)A?uk|PwIl<X@}32=izJicCvcvZNBlZ4t}8i=Ye3bc~rgz6&|64RHAaaEV6 zi@wvx=D-MbVu;QQuD;4Awe@X>&yG3_v%q#Ve^9i1Vzhc@v~)_ec1pDHw|m_?N?<JW zqNlr1qH?o%E;dJh;c<h4i`aea*aYp_1}+E3j_*%Uy~zuJ`{@$M=@M-7K&FP;NacAB zr&Uoj8xd31@w2w12Ma+LaP*@H)n)J9HXbUykt#FlYD>xsy(`l*6TK5-lWRi*yxoLM zQ0V9vxQMuDNoeWG2&w5PiLvl8kj#kYuI40fs5($GVlW|+zat_aBSRj*BW$6e?C$RC z?(Y~J_>b4v%`}$D(bu%*9ol~!S6JejAZozta;PnJF}2&;T=djwa?%bZUIiW1Ytumx zA~cbp)UZ{d7w@VRU{B{pS~mYkkj~qd#=j!BMu+9WWx~bsvO809TS>TRi;^9Fenqik zw*X{GxC)q<v@JoLk(9S98!P5mYa1jgQdL=UdV~(Yzu#(}pN><MyTXk%vqmMUy6h!0 z6zeYq*Y@|(4&)ntO3%ft@(Rhf%8hkMr63UO<JR1&(weA<)*g@?<RWM+#v~Z^X@<lB zF+r0CS5JUoo0rXC4bbT;KPAu$nXVO9Yi;3tdTI_1co7-^?29o=xnX}M4<uGUNO)dI z3Nt_Z%Ew88p;@Y-QKqq0s<~FGu}YDqNtvoKijlglZ5pSolc1uN7_W<zmhk<ZfDM0u zlwy*QX_god%`LixvbTFtXP{q0VM=9nS#pi}Tgtk^GRFpE4Rg%{+;G;WD_ZOdfjOKI zp0PNEF?BIEWNOvPTK>bp(OlUxd5htTk)wdC<_t4t#@d3j(J63dml=Ev$YH`%Z5bSc zSlNl2wdKaZmY4gZk<*n2pM749VlRl1355<dAc^`iU{3Z7yRZv<9huP*lvS0M_LY|B zmVlwTr1b_|RT$^DB$RJShh{pS*QbWHlLT(y-0lGp!9^B)*)Bpzco-QeaVlDncZsYl zOFEaP7iVXk&Q{xl5oL&bOGQN@#)UTP=`D7*hY>3)n02i3veuK~o9XD8nHZ_bXzg}+ zk1zL;zP^W<I!~7B28#UBR>o$+3SRh@ENt}1=;*(|VO*|nn(S}sYF*?QYg&s<Xid=d z5fw63xMx|~d0HKDhqJUgaAVeJicwu9hT{d+H3nJ0s+aD}l<X@Em0=0uoRX>~e2TI3 z173U&irX{8XD<=GJ~GGGe9Z9mqgCelwyi=^LmH|(6gS$`7V6}9n;DvIyF4w&DDuOU z#>2v8vjP;M8-cFl#MK}%0?Y6i?md3py@xq@LK<1^o*1UfhOSJLwTNM~VQ}JOMe}6p z#cZqT*WG?)Ig~ubIOE{p1gnCHjQF-`;83@B>k#+-_BCqxl<E+Isat8@`{*#6mXG!r z6UX}O4Co^V>_BCa2o*`xWWO8SFFeE!0;=KuH?xED3{z9$Oj9Dgum%W<(NctVR#)5o z@pImhP35HJN?_vbDB&a|#;rMUvD>`QlrM#Gq6o#_U?pPtg*IC9hQrCIg2KuVH?t{m zo};Uw_Hbr33=N)(fdpx;2_BC(woZ?)`}}Xrp^vBLufrn78_hJe(fkuplB5cKMK|p# zbS2Rcamnb!Nf_VC2uh;qlQ<M6@ztlQmKoXyDc)jv7=~sk=b7oq@JLul2r2XG0eygt zKtnrum?6@=MO>_5v`vO0RfpuAF3Uc>PROS{D<h)>RZFjq0bSx1<v|Wf8K}p2S>WLj z^n&lert(azC{2D8{D>6lS8CMK)bYMP-`p(w$|;i27#W5U<UwmFfN$;4!=;S9Qhd2# z+rtrzlCW@-=p^M3X+vY05;=YbA#UbTeR~I--z6`<bW2beV|x=mG<SIaiVJ3Jt^7#C zcenKQ^hM}yYh6`s##G>!6fK{d>j06Kid)1n%UNP=`Vm=S!&2jek6*-rANQsX<COZV zQC}pdl}C6nrhz5qUS?(iKSh~1A}N@3Xz<V)XB(?xZMi4&wU_0k>vl^ZvVbs~+1s1S zN41;KSKa*l)GreBke7xNmMh9dC_K))x}goV+4JYuqhlICRC4X1FEJi2u(#nyqq`LI zudpL^d_@+c{B{HQ;*Ky;=qhWZf_Bm<uVg#EpVo6q5(iF(9vcxW5r~N&G{jtm0O8Vt zlDw6mvbr5sm^y8aAk4Sz=-de#Jf)f9ggzSIAoe=TJ#hl#{mYe=&KjQ2>ee`D$m_#! zkZP^B#PO5&t=NeiLspPN&<sOBpq}pi_--o*&nyW}wf9nMEx;<JC&w$tWvM+P?v{=t zu{E!6B0zMt;O>%gMncDcfZ#y<kG`$I%1#8BF0tdyk8UVVt;+CkX^r%fGPQYn9Q15- zBt_}8Utnm<*GBOY>&sB+e_Jm5xlrb|D86ziKm0RO@qXcvGL*Ulbf-GT#Cj-U1Tp;s z#mLUcKU24<=@WF(#U-=D8e0S8suh+i-N>AnDA+jU>)E85A1b2!m!i<~{7YYdE)D<t z4-8>g1awwNkbwM{vV^u^RiOY;)AB_0*dSFzH<X#5+B8l!)Kgk*$)Lw<Ja9XUuy;|i zQ&dy3<DV!&-cHP2dy2(gU*Ymy!duw_Q)mM2G>Ev!JxG-E30Q+*-HFO-AKFTa3z0)~ zu@Secx#L%u6})`HeC=d^uW_!39Yrz3coLEprTV<wT&D7d{nrC(*qX`sdst~|RTkWR z6ItTJjw+}KY}d!bd$2zw?7n7zO5E5_aJ2q2&_>(RXZaDfmLV0|t)G4&<%Ua;hH^JN zlox}=VRVkRb`Q7TLnNifDa$Rb(Ni*xaXWU7;U$Nv1cgW;8o|mt#cf+8J>nF#;%qeB z?QW+d((2|Umpdldct}6bY<+GR4VuAz`FjG-p$&JfZ|SbAL#ptSP_p9Dj8GHxF0M`W z&!`V9YmM<#7ytA)xQv?!p2FpFM^u~^$KUfvo!V{}$qqBAh;?ru$o-uj3ya2Olq3~m zKGt?P@h(Af$SNE1a+Y+mk`Nr_+%+=m@p5-^dY<7KSaqUV4^@$)c!S&|wgh2ntRlid z-XglMjicLAb(JttfhtwC>mY)GpM9Z!=ywhED_m*}7$Oi3c~=p@((R|+-q&2E5%Q>R zBouf-0yi^#9#4ol+q4kL%Mq(f@rs>i?g}6S#a|9{zZ~{H!y}$K^50)7TB@=<8ef3& z@*=c10+UW-zlZfHGDOGJM1G0KT4$yjCsGv3=)sj$ajA8EA^~r4)_mRfyn}fO;yCd} zy9qZ@2n!+q(f~-vO(OV_wUD90t$&=^;B+(tq2%4>2l~MR`m}g<2G^Pj9G^p=vM0nG zWp0kHszbdMHn!L2&=l?|kE%0*++KnthpKSV-tL*Vr|?UXwm};Q3v&$?pWeyb&g=H$ zq5myiR|q99n&-*Gmb3STzj4NCxE`kKE^@xqcWjA-?>gthdG#m~3!_@)`Z<r)gaZ_e z-Vb)iO3s4;yfY!Vsq=pU;y@k0jh%{-g=<Jk$H@BP?LELgD!-FW!{-#gA)_op$*oG> zwF1cJ)C!}J_mH+r0h5zhc1;(Nv32L<LhawEpt8-Vv}7L&_!hK>H})?%r{XJNm8I@o zOC#%g(Im9KZ>F|qTGu;Qz$lVT-h)HU*~kgu790|qoCfmVIe7KP&6}8n;o(qpa&dm` z?j~UA=9eTWV@1fKKq6pp!6?KbrrtlRCn&o+uYRa|coJe;On7@R4n=JWo!;5cXa4#X z-vqq#2R!`OUGMYjm=FG%GMa-<3IkI~U1M5NX+&zKM|iY7GQiZ?Rm0TM$|E$t{@RVD zd+QIL#T9pRY5SiRG-i-<Q$SS7x|G8sk8Tu&yr+y^a$@ChM1G&8Z{h6+yC*RHU6`L( zeNfZ8fUz`{-SgUB`BVztGWG=;-gW2XJv5yX$CmGvwA@fZWC|NclFGSB89Lc|diX@e zf_txOX#;X57Z<S7{tfx-_qL#><W>quSr9VGkqR0r+9e>8S^~3sLvnia8;6FbZelU{ z^Z%6j!I>jM3qEw!o}r<dw#&JdHPM+lelZCyfuWXeURvf>2KF9lRo9va7l&qVMdWn| z8U&veHe*rr1gV#GDuqWu<7AY#cg>0`zaE%zS<f~7@!pFQp8op4FKq8%Zivx%b*1I{ znxStIl_FBksYK1QhT@{*1>=zEjfW|fgWBGCg8Ct+q+O*99i9Aq!3<=TRkrl@-ncn~ zN(mk#2;H|LOWW(qx7M?YYT3n%z~_?-o2ogd_~-P5<Xwp^9jxyihn8r;pTnBEIWn>N z+j2YT<Ue#<aQJyumlARcf)i4}=h%4q=-b#EI3mMxI*MDy>w9koq&J_}3p&N8&n)jI z?@}S{SPX*CXBZFOUd=5hxn|Tmsm07Q`vj@Kc-eXOrUAVyu5obX@{LV!r}Qemk~X;z zw4Rf5=hgB7OOstUY#dl7ZW?t)+F9P%(cT*gekQlFx?^AfeHjRIF=*~MLFN7>Cup#8 zYh!Y8&dD>5ng=z(m_gpfBBUasXfUF1Amh?d&&U)A{^TL@aV7H)=4yVk;G>Pu(Bx!& zXLn%@1hz%Ni76hTkv3kwx>h!N_FldzjTtpVd5t4>p=A)#o<6U~rS321S|R07B;{1X zq!tK~zEMDVcH@MZZ*fF=;|W)P+49q;pL-@Y_v6;gx-lWc2vO4%{h(Gh6(3d=&&rPR z?i*{i5tqcxqR+@66wRC*kjTKeq{2&eJ=cZ~d*<J&y1+a{xc#GJy5{~QY)S;2>hd<R zepx+H#aH7>uT^%84Nu=Z=<xrSE@16%?drsMQ+H2sZ9_^yaY#}sU=LBPp0%xxotIl& zeN16bcy5QHeFCM7GXcMWph2{>Q>mnV0g!)A*^fgf*ebj>EWcmMJQ_x_6T1GQ7pHFR z*7Pl6fG_WwyI>y6rWv9a*a9L(E$f_9cWrp#u21@9G2_V7QqGEI&Q895Ab=3D4vgKv z%pW+8iXM3_X?l5K<NkJA&w!eq2g>9Nn`*jch8GRSm0eG(8SWmQo?4up{Br4b?3*z5 zHyU`X`+RWT&hXS^`;{xD4UMTqC1J_w`zA-z7HJhy?w8i$99?zMB8v2a4I#gQs9B1n zL-7UMJgC%)NM6GThuB85ph^|1n3KE3{sqgu-4|fs>(Oy*<F!?7k6dPrAWh#!0li3S zX~*!Kw&D3ZkwpWVh%5>@XB8CizQ901O)W%x2Ms)y^awT^k9z#d|IB@3s%O`hH}BsH zj>&=W_AI+9qpYKee{odFP;&KXMaTG+v6-ob&*bBuZ}1J<-^^Y+V1eewv2_s4M78$* zfwIQtwBl0e<L+paqi<`kVeM(;lkF5;W8#;`rx$cq#GFjnT+A|4!Zsh67dB6)R`d}y zi*bl;5-^HHgcP4J_LnNJ-~QM<w2E2g4<c_KRL-m(sNvHn>yS?+?G%>NdUNw}O4YC- zvVdI9RnyARH6Sn|HM6Xt36-Kb6tR9ATH+5d=jZS!|H|!+wYyuXn8Xf?B8!46GQA_I zYBaTKq`GTjc;?Hc+xai`7!GRu7_Wc*95p|=>+03Y<`z(U2wptGqHMhVjO-j#Ef6pU znFbW9xumdb_?{6ornz7b%!^uOiCJe0nx=r-OWGuwh1RfY2If_qOt$|E<!Y{>E<Y_c z46d|~Yy{<WGpPs2JC|ztHqy!<%)B$#9y}{<pK*<?BbId&({*(A4+u-iD5|Xo;HOZQ z2H$*`V={)NQ_Q0>hZFO2@D*!!Hdk(MB16)tc{PXx3^fqx(WM~xH*y-rx<+PaR~Dug z=O^~3(9h4|nuKt~pnz{z0sEUBgI7`Bz7T~yTJ3!d?HuGyk&2Efnr>-QmNB#n?q~T8 z`Sc>itaF4dGeoU2dGupwm3>qYSuzf3?5aK++xJdr`zw@{JG+?VWXsT6dFL!DZZ%KW z3vL4&&!~tPm|SlkUQevN&Y}^(rDkvK<`$fgl2>&JV`;{Jdou7KV`JEc0{{$A2ZpE6 zb$W4OdTDWHdHLSngPy_bAoxT~@{IDXo~dmab>o?}<E>X`puz0Q;`9>0zW^WE=l>53 zX<>t4avHl)QxuZ&3xnd55y9bB?q1p!){=(aGByeF_6huY!Ng*g<f0a$mYKp9>4Iiy z!sclVY60vz!OE^#j2c0@4)G_v{maX<*Y6uqH>_KN$F`9TL!Uxky(pc47D>xAk_$G0 zX^j(W4?*N5&0`pq9L$_uJ;S52%PTtu2Y;)O9%tsj<hPtJq3iO^mF2nBm76Oo>vwNK z4UWoT7B)D;qb+G38CE!uRX<VMJ~=qKxVW)8ySf4&#MB8i{Kv^Ajei}w9!S)GyN58e z)pv9iR9}kA&I^c5bPfnLb3tgDS&Qm=iJC@<n?^7wd!9b8$0+9pIxk?F4E4NGJh_|) zw|<zoRU)aZdscD#33dP4GB~}Bx#SA2CcS2uQ#(Y(^OB}d1C^AsnnS|E?Y*w?EmOZ@ zG8rcw8%IP?NNiSaYhT~5*Zt?_#*R0w7pDc9pPiWhO#5eC(U1e-bF0g9Yby)uYm1n= zu|B`PzO=Qu^L+2-@`{eB9|4O3<pmq>w97e7(|Ju(ZCB^uQ_yT~Z56gKv$O<Tn_66) z__8eJ_({~yR~f#Ih*6INqobGFE~DmQX6E=r#n~bKO`Kd*j4XLHJo)uQxwZU>C2R=J z>z&t+2H^RO69i0?m^Fe(q!GM^kqnx_LR$Vmef)R=-oL7BJ$Q-&esHDf>MA^{IcG>% z<$%busrYcIdxNVPTYZR3Zl+RnS2S?|8y}gL32__*bo+tlSJPmA<2?2;^rt5FX<uFj zNe9knS6AoPRsmg11;{aVd3$SRdmAb|-?)2s@72@F`c4HcHvmjjKPWi2H?L{B?DBN~ z_~P=`28Mi)eb^?3{Hfn!cyQT!%*T7LU$1Iu%POyoO3%Vvt1_~8RMa=;RB>fh_ob0{ zJ1t;DeZh|3H0iui9FJivuVE~;ycdZKf<-%oP#WPHkbA=3zodNl_)|ka>MzuQ$ajuy zIYwUMM!Onu(<EXE>!i}2<$F&{FVApj1xV;v+k1NlC#4pn<5sMRnxpmn&?_kU-S;r7 zpz160YipqD3us9PuvfOWP!%}8wY_?4d+qjZsOz_HeXe(IKYF&e_ws2-bPf%l4xylt zj#o}<?RZ}ERAc|#%*yJ@)&^_}O+H5Uv5<eT^B(6q*Z7xLItQ;-G_|A`mtkfmSb2D( zRu}4;(91c}DY%nfv^&GA!=mB?v~%l4azWLNAi3y7DCI&T?{!WV@!;Xk34wopx&83j z{zNheU4@OK<z31(koDvj>{abz0r>85=m@o3Y7Q2zuHF$b871X#qQ6PSAC}GW<EiKb zTH8N+^|h7H!oInILLDvUP*H^6LDh{rcMjIgyLaJn>n^(9yZiFp%h{DxB`pLYt1`Qq zS4e(eVe8HEj+qhI=&jA=FXYedYd(5pL0=#^i2V_%{$HQWf4)}L+S^-HTMuT>FDAj! zKiJIGUC-KDNY#u)(urKk@dp7TG9eQl{b)|z2o9Za4xKPoji57Pwx=as{vl@P8&PyZ z;GdqJ{M0_Qg1PDnB5xg9C2X3a8`y$c>nG#X&^JA?{wTL`Owb@$)yy7jd~{|GnAjUP zZ~l|X5ESk6=s<OG5$$Bqw0{wH4DhSBP%^$x_}0C9`0K{Kd-r#Dp1pYy5}gZzPbOyJ z65p8Ldb6l)rekDjegn3-g^C3LdyLOVlRtsi=N#Fc;rZYVl(7dXK+igkvi0^ec63oQ zwjh(VB@nYdEn)#tHj|PUr*1f#RtT$RFg(&JA-@x`{)dP)leE+BljkQG{+VU<_D)&H zEanPScEgx}VWhIhB{h#4G6_3}kfK{VFPn!}^}O;x<ZV5@f)i5<Yw8Bk6AXUetljYg z@;>!N;lXmuq2+y_cC@g6i~9D12gj@+_?vg`XdC+waHxr!geO*yg5Xzm&yFpwuixHY z+Sq`9p~A3o9PEz}eDq`I;2`+D<bvYhgk(fen3cP?p^crmmeqHnHa`fNoIbAyQqQg( z%Ay&>tRBFm9srL-a-QD{nxBDyHnsMI!#}gM4=q>sE;bIXfXM56=Zl!9=>(#jJiWZj z)tSwarTeK>Lu{Hp8s>JM;ZdoDCFooY4vUKooAIxgX@9=w%;Eyb`{!77ZFPSAS2MFu zJ2uoGZa;i<`_U2g7BqYC;Ng=e&)&RraEl@0P^FS`a8GV2>71|Xp1(f3zJBM{>h>mD z@YgVckB)5@a0RwUXE;%FFDEXwcjQ)9!#IeHigWZ0G<8B~nwl{vJAW@=dYV_4kXMIE z#g|z<kWtm2LDiQ*1xcyu|6g+6XL<Dabc0Vg{4+(q2>qvI+DA5`3i||&<JG+D)xGPe zq+Fariyu6H+dICYiAWdIwzcy@Mx|$0x3-VWO#jx*%-5G<=l10toq_x;@1LCv+Oea} z`B$orf%(p($58L>?0i%2A9#M}@nckd^cdCt@#D82-?wxQs_J=>3K|M%`-K<wS9C43 zUR#`4-dMf01>buhpNX>f2WI9RT5NSx1*yjbvth~Us5uF!8EAI$1{SB~Jx}o)5wI#! zU$kdX^QTuq(kXe<Dtgk&yPsAI`cKuM|DzTXRnU1t<DXM5b<bAyETD|d@Vc@?s+4`9 zb^yxBpObMMUA(jT@OgA$uYi`Dv4fLWcywN6&A{mBSE1{-Yd7YOGB%%qzq!0Lzq-13 zK;AL5Lri*5+xJmNgS~ry?{j^KsZXAKu6w(Wp6tK7hpxN#c6RSR-ht2Vy?pWU(~kvZ zO$<W%B*LcdNzKKV=gK?hu1v0>0^}`>;G<GHGfPXD96px)pS^jc)I%`a+|yflsV*V6 zATTb;*+0b0*-h8Nf=|ooKa@jG3!0LPTc1<$p;7RpmUpL?bEB4VA(!{~4}<vsYY@+6 zo&4<C$wI0>Azkw{w{fEB+G^YIdRon}n0c}~vO&YQk@BLWvUSvhXKw~)wl!SSCG~6^ zeEdMW>N+~d=H|Y>QhD5^dE;NFXi)OLzP_}%`33l2&CEgC@9jJW4d3Vd(bFeT0djbJ z{N%~b(<i&np6xt)x`TQh^Y~=H4TgO9_+7N@zy9!cX=77b)d|8$HP^Jl_Sy2z`L>~@ z#Z4Grw!Y->S7F~U*pFjlejM*=tZ`JjeyFOY6@uBwv`mnCYY(KMt%H(*>1p-A|Em%C zy;?90nszE#S4tTdN+}0&DaSL0v1C^1#O5i*C-=ko-!CnL^Cg#2(QEtYrm<g%q<w*= zZ<DND9;t*)dgYb-PhTfj42tS|8`wF5$fp&T_6-kz6}tW=hl2@PF)13<71dP;Bo8Y5 zKJd5Ez<<_uv~E9yiq><W8bvs!K7R%km_JxC{G;H1vIo83!5)VEJ(TBp^mzBhv)$)U zO>IL6d9=9Hks<kgfPH=c!X(<~VALMv?U$D@sr%#pv@b&dTI#2ox_dzC6LSmvV-r!X z-o;JF%$#4_;s4Q${tulf3VC-bX(tMxUDBQcUFoz#co0Q=h(fEFx?g_&^l!%4%a<=^ zXQrC!tDEYo;mOk{d;iLvdGju({ssWwJhWEQHZ5kBpyFAp?cdC*<SnG_yY}GO`0{;2 zWWJQ1jlH)Ih<tftGb%qhcX&P+mP<L~*yd%SfnWMOg#hj0F<EzvpV^mpH0@tJ%#+Wm z4lo1KPtcIV<C7OJaMiu%&v&0a-|qq_*+-e5J(M+iyaR*9&p&@Cs%W9%(>^O;XdF;d z(lHP2ZfJHLW$)2yKZl;EgIc;iH-~Ep2`<|M*!K(#RW`R~mR5wPWVnSySbHM%ZS9o} zObB#C{?j;_NGp_7+Lc5GK`QG`F6T)m??s~;pb*qxl+<UJJ^<m>zX$5~?%uYsG(XSF zO-)TjMn+0bK}JDAPEJlnNlC%Q$*!rcT3=J~=kL|n{1(R8T)wdxmeX~?ItyHls(Uq| zsHJ;U`R?oYRh>6w%mQ?+?A${lk_(Dk`}%%IescdF0#l0%5G2m7uFQP}{AWwE{owvV z;4yX`v>L-L2H=BWKYjJ;$;($yU%Z5h`7f$s-hKJ%*Ot#elfQ5K(LR6Y)sJsy7FT3c zod`J8q%5P0F3(-+TkINLS=iiK+1dcL$2gp+uV>~QXF2WoJnBw>4d}(QaoKsu$XExT z024=7EfX^#1H>t-99E+c8V)fs76EcrAqrMu3N~R%HevpYDpJZivZ{J<@kxJo+UI9y zWTY;#u`(+u$hx^WrX<ES*Vn*{%Zu}E&Gnh7Nk~sOLp>c{ZZ0lPj>6omKl4IV_2r3b zh+eO)0`@u{1qz5t5P3fB5GqOgu8}3sd1O+(oS~hK2P!9DTHkneVjNd0@<>-Rg`QV{ zy3D%1wtx;=S8v_gk62L(|8PH0-MRl{7p?7Iqy7>N`KwpYKiAhUUVmHRMa;XX9`WME zn-3q}{`Be1hmTNS{rKV4`yXGv`|<g^chBCuef0F%?u+No-@J}aDq_E2PABc)mef?) zy-?r3I5@M0_Vz271SY1NA7|p|^vcRjutsaE)2l1}H^weq?#wE$3QbA(2#>b%K<ZiB zDH)mzxfcs-*-=uE*;trZm>Qa!7+|WIk)E}MDGwJ1I~Nb^^WPVEpc+)2iHV`DsqTpB zE+2k0GSEGJ`gBO3-=DK@J5OGulwZX}uPIeS!lsF+6s1=kxwwt2S@@m3SA#RR^<3k0 ztn3iM;R(3~%{{%CTfyUZvsk+WCT_jCvNFHA3a$p_;ZZYW_7(mCCgqNC?_fgELFm(i z!j6&gZwP<+`t7T?zgBn!9^pmwyKkPoeA&^_R$W<EU0GUHQCeAEQdt4Dw7jgitgNX2 z>cGP%&%o`x|LOhA;xfOaIRv&Eh>ZN!86dx7bY*^heP8Wy&6@kBvzL~Yq-CUKWTrz+ zNlgihj`H&Nw?`mM?d;5*T}_<b)Qn6Fe3Dg65uEI-SWk+ZvppdteL>xoUw-*pV*l#Z zOFBAQzVkf5^D#f-fp9)^_DpVe`k%0SLz64H^<%(%*Z7uGWUaVWHbk$|)>)^8Onnk+ zc3-_KZJScE@HBHl_(sR)R8;jJNKuae7M;h4JZ5Ua;`;hGLF>MU2jCxKfCoQ=&c^%- z{EL@|RsA6ChwJNi?|1h0m>3y|h>5^PepM+c&YU?TCoBEx!%xt~?mT+5^WxdP-5t~# zb5<p8O`rJ6k?P*XwxQ*lD{J#>D-h3Oj6I&837vXq{01W<0|^Q7;jcS;_Kc9Is14Fz z)6CMu)z{SCivtJq9<C0j2+7q<kS|_B(DC<MCoXwWf|G;ow|+`XU5%WAd~s?1e|z^z zn?@?T=OKWFs^*%b;98~O*TkjjOCf33&^x{P=y_;XvznPLcv1A6;a1d~;op7(0o2kD za5c-zsHu``s6@@m))vOq{F<ftqVQm89-(R92ma{+4|AAm%p<1mzk;c6fBg7(cbA5S z+TX|P*^|Asm8Jdq_RVV<>5Ia`g7^3Kw(sA&_ZT(A+<&t7^4*)5<YFpbEiy3+r`Y<c zp2fN=i`QpY=hi;YOu)BnYH^`=aDbbOv$w1L_V(tlU$?sa_U#*aSt%}F9&0Z@Ju6#n zYbO~kOEy-fy`4wfTkBA_Hr6l|eu1r(*%=}-W+Bz7n-f2M`kP68U-xAKg0oot`KO<L z%Fj;sa(CU{#&VbD=g*#kI9QmO{P@55lb^qOn{{cV{l;e3<gN7DQ89}YRb+#zPb2vS zM<KnSt-V(_Z|}K86zE#pdxS<N<)flku&GP8Z_lo;gKM4N+FID&29gimd2-?A`WnX5 zVk)#>*xFjYb$jK`-NnrfQ25(ByN_SJx&P!T)F*F$+<X1*J}RB~+0vlGRdCrKK7yxx z|I?=*KmGjiK>g|GpWl7>2yDX>P(iE}8oYW3Ek5l%dkV}a#YG?f`Kro_{CvFqLxVS$ z=5O790N;+j7Vz-h$9H|#Mmfcd&z#pc52>i=UVwO~Z(?P3WoZF|?%TKLH#eu2mk#?J zcoCTA<>3Z&4}YD8sxk`)yOF)Ku9dC2ne};DO(G&9R0;!KQ2(MUJRu??Bq66_5K!pu z?Zw<F^lwxQ^>vt;7_q)(Yh?jrD*+(^$O6{iB`+(@&d#>DvheOlEPrfwXz+@+yQ`YA z;>8Q%aN?lC6HRs1Ab+39@zD<-K49r(pMD-1=&LL(EHBQlC@H8aD@;#Kckzkx3rmbl z$TIgx5Vt8%vP<O@k|n2MWMpCy6BSX{)YC9?wQ%zah)v9|uDv!jT~u7CuCB(<cOK5H zh^Vlkp+S99Jy5+5007^0r6)ZjEjB(DYD`>AY+OuyLR@@ed}3lkK|$`q#s(-nyrQTe z54#Opl97?}^YdBV+<gB2$4BU-IspG@cek<rQbArey5<z-@7L_=iqiGXO<=yYxuK{a z7iw`q9_9(E=I4}`6mD#+zyJ6Xbe5ebd$^caS5bl^oSB<_<?5A<JGUP`L1phz!_Dp< zbe7S{$@3y=1gr`wPRTjVQ<wUeD_gHBt0=3eD0z6g!LOcNTAbb7JUGUoneXT2;lj#1 zoE=(vdL<n_O;ZakGfNRw0}>wT?+7XX-|x@-=l`7gpEJbYeXb<`I88)AL4S^4F}tw( z8#eZKcS;L$%}tDCr6t8gh52}SB*aB!q%N8n8<rI2K7RBND{Szo=K2~q>EG0{qP)85 zaxk0FCM`K$K~6?k5Y8=+<VA58N4tj)e$V1`c6L_49_w55^mJU@oC=Ea(5c&7u-wTl zAvO{s-{O+OyZ68PJC(UvY0S)wuw^wBrQiVHjMOCfv2Yw<pZ&Z&73F1FS(s^Qsa+iG zzS-crcW)UO=y|!hpf|}#OTx&Gu9EOenVIOR&oMD^^3&4NTALe}7iC|nD9TPxHqzIj zrKREG;fc-8&#$WH=H{fQKWA!WP?(!pUQ&>rlAx}tOhrX0E+IC%wuU+Np!`-=mZW5) zaxzk1z5-R@97#z^P*YPWD#=5D!^6!9V_YeE&jbCCq9Q`D9Ur9Us}CP{pFex?_TA+4 zBo!5vxR{8X3}#m)F%@>6l9Iy1%@tsVtw96XuNx@HNwcytBU~IYx&hyH>eLVE$q85? z{Qid@sA;G<IM}b>7~XyP^8U`wlQ-|C=Vm1(FVN6ZUyu-SaP*}(uS+g&=@4C4-M18- zUTJ6N04@bi0UXSb(BQ>ew=h?hN2Vr(galxhpkk#(OGE9u@4qJ@C1qe_1gRIkC`Cy{ zDI+Z<b@2iiV0gq-Ss7{Y`NU*Y>=*StLNX5;1AQGWIJ3e+0+yyGiE&Xm87aUmJjTaF z*jSp22n*8D(Ezbqo9o|vb!KW3F)^{U<OTGxLRDFGg<}O96yQG(IKc<~ygdr@vdW6{ zlH;RQl@utcD15x!e=G9<Hk{DIuRnYG1dfcTsEC1)9^jFcmVCHPXL~a#DM>(}@ALsv zhxBkeb?OvQ`uy25TwnU=(L+Z&8~Ch=k-@>&-@JJPikBE0b@(INn(F`kkAE;R(oc<# zeDl`S_$U=6g|LVS6EhPlE7P4@+ut+*?+VAw*~JOe9>x5vTLy*(tSn52zvaRGd$3pE zfB(Jd*E@zO4z|{WM1<YFUC-aXd-?AD$c+)`Zc7WZhg<TW=dmz1y*YmqR|5wdD^n9A z&}tTDCU6wcLvLd<ki*vopkG5{;Gv_V_2HAJFF$;&t||w2L4S^}s_gR$j=ug;Rw4bf z+*;aRIVD{S?PFW_U%Y+x<S`5ra4w9D4CXdAV061aJ_empTSKk3stjwsVt5Gkb<XxS z;AgZgtOdkz26~<dM*<Q`J|&yrl&X(E{RFRofI~+BQ~+{3ybbpE0Knjin;R}2d>uLn zoO%$w!yl0l7x}OM{%;V6!*7A2o<4m#IU)A=z+)Ioh>1M>*818CXt<ob48q+7B6_Tw zU0q&;gJbVt(?8Jj?)^K|=#vx=R6sa(oNw=HZzdupGB(uzb$?#J2JM2Mf4DJ>mE`24 z1AW-<F9gHi4?q0C$UqO!Vr>AY@*MrS)~=4xnJLhDLn8w=R_4Rac6T1Jv$423+2h)0 z=*tokVh~6^fBW{vjS-OO#f7=U&ET|ZYN*xKUxEfp*cw2WS65RR86N?`2Ms)8-bG_A zKwel}xO!{5v%3?Xg8;sK`PD41-Mwe3+U`WGN@7MK;B(45=SG(9!|VP&p1@&IY4P;R z^0l#17&4OLqw#-TYf~MZaV1@SK2b3)9L&4BIGiCS=a#dG&TV@40mVFwgXhlC%}k9S zdEQ|-fc+;SAsM{Vd%!b9fj$6`huec&5EM9%YilZs@^t5Dcb^>hp81a+K7jE$A6v>1 z#XLEgnz~ACd^8m`RdPZMRzhGu4UP0$+nd4C-@kVkx(P0)gteq3#GF2TDmN?rNao=b zKf+-JKP!)$Yey^2Tj_3Xtb>3%yE=Cd_CvU8V5pCkdDI0kywgY^(4eKG1*r#DKRi5y zi+Mr*^WZ(AVj^kjXqFeTq_&*wZ6UfTC@usCf>m?Ag0Qg`4y=EmU*F&ra9m)PaJ57R zW)iX}@M-%cRgaWho^2mqL(7Gsu&~h3<mABc5RBc&VICq@Q5iWdegQ5{b{x4%#~%nt z7(}&v6Uv{y`7y}P8}@KxZRPl<UPzE1Ozclz@|<{>$6v1Y_Ye~krKcuA^mLr|D@*e* z%7BdE(0MX#ZH=7VEVMXaVgBvgHw+BtpiOQ4CD?a27f0CI^Jm}wncWBX?!xc{10Mt_ zhc|LTTohFC6?#wu=HZ89jpm?OTbQyiGng40;QBe>l$e+pEKH5Dz6<77UO}$BwsvfO z&cFaC^98w?Frc63<!;4xc@72|(7=SG_{H^=fvZ<=GB3afnECp7!_Qe+S~%Rm*})F1 zbz*V?10%h)h1u1CzTI6cf7E7eWvQpLEj%O;POF`heN1c=^xUq?t=}{o9li#~5RUMr ziu}Cd7QmiX);S=jyS#J0Wq7@+s*0R~Jg2arv%e3#jjg4{)WnTXKYhgdb>IUd!-9!P zh<SxY*m-$zG4F|R`tCF#C662;qHuEMKAabbn+`XNkBNlitf8jz3GIIlmc{v5pd~gY z5`#U&hTuQ3G7q7L<OOjUf2+%ju|fzRG&3_=-B|tc7#Elz9ER_0s;|byysoZRVQ~R2 z=3l*f1p%XmhFV2c`P=vJdAK;06yy#!0|?I&5YW)kiV6!Jez&Qvnu3C&uC@x5zJC1* zR|X?8EC`~x(}d(OQg3gqV;eGJPoFt+j)DHn*)vUbSUMLk4;LT*`GoYe{*mkY270)d zXJKK27>}5Q1O{ttGc=~*h`At~`mbK;?&-k8JRptq@x;YE#P0%v{Ba4f5bnV+diLzu zoXph2%?xyP&YU>|#6m1-V{c<)Yl(HYRh1P92ni{vslZ+Q{PWLA>6H}R8iayI79r(D zm*+}a#<{q-t?jI;8|rzuxuDO&S;Lw?G1SvJb@~(u2{9aGZUG@S9v)oGd$~FP&#AK{ z9Fnf#g$e0J@R7FWhHppAPd`D>0ecQ-Bn6u{76%{%__yzDf6;kr5IwB6mXL^uhL#4R zB&<6F{!m6ns<*Fe@9Ewjz`UMzaY-R?h}GLb7lWgxrlwL_Q93m}4x7r$!eW!8Bqf-b z8D(Upa9z{`%q+}}msS_|o;-mcj&<<WQdc=mKuko(M@mlF(N=%Bfu6P|upuii2e^8< zyJ9`UENm?PVWBPEU3z-DSm6N+1RD{T5a-}v$GW#`E6eaRP0WmH8|&KJn{e566!XG@ zzP?^~bY4)9AADndLv4CS3fM@jM_5x`6?%l6f}E0yBEr*MSw%@sRtoFaF)%SoN=w3U z(A|L=1#aJYpr+$N$f_)$7Z_JLQqnpRlTn<OlT%t<=@aP3#>R4MdkagVKtqj$jEseY zgNdD;PehD^_dFiv|MA0FB323Kup(`JQ$|L5Tp^*fj3i+1d?-f;CCJ|g#+|zQnm0eb z$LKr|b-0<Bs4z1N6Kn?t@sHT<D+NxHpYMEmRoV9K&0{j(Sci*wQVjFdG}uib#@nc= zDVJ52HlXC1eEh~RR_0k)SyWV&n3)*AiM%j{kC#VQMyjW;^Y)$FG&q>o)7CgcNJ7da zLP1G=xuq5>9u$<6%4#a)6cjFw_E--c6AQDazh7-@tFEp#R^|a|h{^#T9$p@-DGN9( z@G}hz^-3$to15zJFfV*O=3CoaDyzyUQCR})%!9sZ>1Y|78Ha?2LX4oVqkXs;bWb*R zHYF7$a6Si}s<~|_5xoqvqFZX+cva70VZ%U4O?5<U6bA>}Eo^BbZB2D@N(yd%eyHNI zay$ZpxS02L2j<TZFpJs+7hI5(0dJ1=SV6cgFE0z4i8U=yQCbK)91#@;QXdx`fy>*7 zi3+i@vM4JnFfuY=<p&yYadYP8=Pa$v|1mmWS_E+}bYvKXA?`R>!9x-f5@=|s0`s}q z>9EgOv%XRnCD_^7jEwYIn6Wbtn-UQbDz7SET$)4aJT`Bmqosb9h?JT~k(`pYv@jcM zZkdvb%FxUdoR}*P<{6op5J<18#s(c6%%e^d1-Xi<vZ$yCR-MN*(AC!|EGcSiti{i~ z&r!_B#mDqs>AG^Ii;4=nssQtJ=jg2MtOJ4rs%mR4UX*aKvBJhY8y7d1p{Ws^vgZ1) za)2=jMdVy+%u0yBoSw3d`Lg!$wA=!AcGm5!4P3`iR8mq#Nm<j_Oh^KEuF}o<yVC?` znM5pov(KOBSCp5*l?Kt&R0GF_RfSrc>VbK850@JgBT*4yxMEm9nv;_qbOv<(^{bbM z8-NSt;^IutNWD2bb>!rqqnIbv(bXy~F9E9nVh&hxa&fS+fuyiv9$DF0d3m|b%uT@C zBqzo}zrgCpfO+^Jcv0{$Seb_i1@<{RFMDG01}^4xwbTiS$<GOD6O&V?CSqwIBX#i{ z{W)6~XIeU17e_lB%rm-pdQ{Zc!|#Ie9T(Ety4q4w7x6K#r(IB7*wAq42%Yyw($e8! zULZa(cJ#*3%Gxpwjuaw9fAkFW-oD<&WySMrE1;Qn)|Oaxo{yi;#=(w+l%&1oTVGvW z*G<f(LUzH%A*R0M@?25tl#;UQ`syN9=Ba3C6x7rWEUm3PkP@=8c$okG4B;6DVN>sP zZXRAW6-8Xk8yM)qPr&MJU`T=;c5t-q>+g;T3&F)aAkEFqWo=`Dhj{=jIW=i&dhD3Y z<MB4yI+~#2^UHIM&GlYBUPdN{I(pima-XZ7w!WdBql;r@O=U__JR<`=)?EVTVd!yi zuwli;Jah_9j@0y&k&&yo&c2S88X*Y<ouCdODOE;F92TTSguv4}dANfxVm-nj7r?x; zhkHd`EiliHi+OU8ab1Xsa4`=@3=TqmQ9(l^iuvW=&pbacKRGq_@bM!&%rh|32ZaW< zv^76{{aQg@_B;=_mzyhC1%#6WR5uq#A1`-0IvOxzAoZ}F7tgU=3Y3*oKT9J-EM#gC zR#nh8Thcah`@s&jph=CIj#gb)&&n0y92h34pn#isC$RHp7=(;GQn`3|)p0Nn-Uh~Z z1{}<{qM5g{wrKBc3l9s%!#ocUx1GH;KIS<&5|b0gCr6K*@OA|A<YYQJn&p+HvkTKb zy`4!Z3BLY5-bfEGO!e`EN2IS;Tzo9_(~Ptv1{};wUKHmB<{fQWa5K-r$)1oDfAwk~ z*jGHv)9|UEC8o$sjmH|I0`o3jp17Guc^hZY`MNq3^SHbXIhme;o(%f(I<5v7oo{SB zO6UE&k7ORCX?J%Ab@dtBRS699!C}Geo$b5Np9cl{!O3N0Vu1G4)KmZo7`d|q1mAuC z9hgj@zUy)e))Or*DoD@BM8~grhFe?SJ~6pwtn~8D`K`x?m{+Btqt!GpuyOZ5ghWUw zD&oEl>vWoc1aw~CErp94C-dO7j%41_%B-cWDJ&!qAM?E2pqUU4;$a^685<uyCi8gw zJTPBTRet}`gU5S2vx_%}N3ISH4Gdi$7#iwFJzgKUK0F9t^k{cCFFOO`SzJ1gVIG&x zLjw*Db_nSD2YP7nFi%QB$*p{bh&(eb0W0$$;|QepITZ7@xEg$BUI(|gK{GEaD}A)L zIR^940Kd1Hni=2O*};{eMKjNUVgAm8d-r!9*EThH!hmaPte_~*%Em&9Ug=|RW7*r) z{t@@RvQeoIRwh?(&x^`7giP|BYTgM|BRLHdS0*+NngP&x8ag^n14A2kPef>>jFJ*3 zI~%TRQfCNBPtyzOxhCOZ9;NffM=@_{*4)+<`bRK7J~8~y>O6+|%ChdR%bk~7+glnT z=s?#xs4eKnu8ua4DQI6@kPXbg!sBi5>O4CKTXbyHmHux0%#(8}oF*j0%{-Hv5As;d zgP&(*`5iisPVA5!pZO#G{N(f)Fpn#W`I`CrcOTyG>g)Ck@C62lNr>Tig2{QZ_v>Z% z$B!N!z9zUI%OH@Rml`h_S@{#ND)Q?1hZpu|)Q-1ZTYmTBhXc&hY8e>Xx_i2zm{-Qn zJa`)n^LYHc;ZZtoae#TOxk?Q4_72v}c)d+RQo`|>Z>qz0O{%QC1o{`m*D#n;QBfYO zl#~=CB*d6i;TY!e>pTlyoj;0sbzq*9UG6jycIL&7VqWZP=JELXBfQNQop1bum<Q3o z^{c;P{^9+X@7`x+rol01Wo0fc{Pu3}{k>i6?5vHoRe1V)Nmb>*P!B6RFA=>IjjS^= z{c>9Ectz*T)0c0*cpEw`Lql5+FZZx06!W;|+qgQOB_cb^D5~e0jE8yh<1xSQZSXJ; zI)9{}ADb9Cn)ziE^H`?@e96zF@-{$wfUo!7?#{~c!hYS{Seu%-0Vh2_C*w%wxsGD~ zI66;6N<qRVeH8OP=WzIW@nbSifo5Lzk752uZ-Zuj7q6fH%>0Ah$J@8IL26W$u>jWA z+yD~v!>J#zPCDA}@5PlBP3>*x&vRMW_z^M5QAyc*q_(Hj+^FihiG_LPBbgV`N8r_Y zvg7J}SV+)6uk*)Y9y8wtH}j}LyrircS5^Ub0632f4}SXMImXYw`X`u=iHip2Y4Ca* zU>>iZXLA3n^LUv5oVQ{7{W0tp=8wo#3LUBQ35ju2Xm5kp&;J_3=4PkEHy)m({owvR zc%6)#9EM}8tN8cxw8fQ`jqR-v?LK~ZCoZ|1m_>n9+|nkp2C(m*yrrU~NJT@fWoTq~ zl+JtXGw+s+KX3DedE4XYJf6G_9_Gb<i_UMLGjFw3W%!scEyBe-<~ppEm0AB_Z$>K4 zyv-k?^SWqnb0qU_zQ}X*=Z>TEzdMGd_(Q$TF_=f`d|OA`^S5uE9qr&)9d2f6W<o?v zBrYKi2tmMwYY_a*d|NAeofK-$QgKBa6}QIO^SXM-{Pemh&yZ9BAz@W*ExY6Dyr_|9 z8lL?8fn25SQ96H|81@hFHZNX0r>3D!#x{TKz;#%h^KI_j+Jd1L+Emt5qOM8(Ugm{` z_|Ko`K{MY4I*)aKFw7JG0Xl!wH7WArdz)Ik^C|`L&#OGhJg(WK2fR%in0atDW=4is zewvsF^mI9SSyCL#i|^}v)jso|*9LCic_1WdPQan2?VXoeHx*mlYi8$c<>G>$`OiAf zBx>fJaRl@D@-}s!nQv_j55?(i#E#0_pmm=8n9RR_{~o?5G{_%Yc)LW3nQw#Ed8})f z=I3T0FxS)5E~_j{Oo%<w&*REf0wMhT=c6&+2JbZ~Vpb_M^YQ-_^WbfC@OYbJ#jyFs z1sI+GCz$7tPdF~~Z5^$^ypp24ot4GmW<1=StZb|rS{e|7wPKqY{;Qv_s%&U&;o<&j zJ?zfjQ)wmpvs{|;4oUG9qw%GK-VsTzL7_*+umsFvR(@Gr+&pT(J6B0UeC)i<5i#sB z{X7)l&>Y)@^T&@KLQJHtu9{bn3s3T}tt)A1s3j&QHZe7<s46ea&!(cL+{CuB7?Zbg zbh2Y%W_t1bIo4AKgv7?j3=a0-^EPPaamBD-yv?y<Sly%j{886oiwlmNd*J7fgrBp# zcti~Qc=rfz6BPPc=am)ZZ7j_XH-mVbn}^%P+!S`c1wZqZ6&+W4&-3!k&rTm~aPPrm z4iN)FHf33dq?oen2^AxLiMdj^ufw94Cnh6gk+2WW=NAx?k-G3Zn9t5gAtoWVv9oS! zJvu*+X8sSjCiM$?ewU22<l)yp{`3><gMy-5W_AXs$KhEVP*M_Ofq5HStIC>+hPrAH zz>%RrtkY?Dc(`C}0z2{Jdn}7tn3)-+q$L4>YuEa4>HM*oM;<e8gNJ#2BLg`(8QjTC zGToz?7vMiHA_6h27x;M?G4U`zF$FsR7|(qV7@0ulL)yFAF*<L9P3Hv#&a-o{+c`PF zu#0u=z<vx{R8ihHd|g0*Z*uJ0WyLq=R#;JgvROyYAt|c(T3q>cIaPI57DhZePsDn` zC8GF(lq?^%neD5~i$D%YA~5e{XLGoL9}=~0(b3tV`N&))Du(@I{QNJddwuAF{9=9U zf`k|xLl_6?>FHj-#PVHw+M3|!UEN)(FIC-~ouZ(mNK3}zI+WyP>FLku=xD;G4!=7& zJ_?;hOG_guIbnEsfEMrE15$Qb^fjsY-@!a)zKsiBZv)31@RO656A<9%=HfWqU}ky( z)hP^g^NR}V>ub<Uys<DZEXdEr#R;eT933rIJ9l8d&5cKo9^%SX8tUm%QBxs(y&Ibw zp1yvgs32!!X?D1ojPylXI$A?x1Bhe?uXN+lc@*>4hXnYsFb~Dc!{Zbg%UNz6QM1U% zlItE31<J~*c&<qiu}XNxl<65+(9zKxUI_(O0r-)Xm8PVk^guWt-Utj^5WXQi5UuUa zM>2o(d>hdD<1t@eQb<fpbfu@`@LS7^^U{(Rz<6A$EIs`0o7b;_WNvOQFCWi_#=2K; z-$F|{87wOrit@7HoI>w#vd7Zdtu4$bDJiXNtkTj`U~IzUZ3sxn$vG8Jem)gH^XKr) zw{gKc-)6rfQ&3P~Jxv3B-N3xDsZm~0ep_1;{I2$9EF++on=5RA8oCd*B_0^&(d(pQ z;3vQP@jVkGgCEl4aI>0<Vt|>Ekpb9<i;1!@H^sUHFFz*(bHz|dfbTGS`v(WZ{J_XC z=seaDxxKjt6oZ^e^emV51@oxTf~$$;SGVr(VViH`c$$!ugk91%p*%D$8;1LaOIU7L zt){9B*fTQHA6{4i`w0v~9OsGj=<03{4-LleZS0QJ`C~Kx?!yQ0Y{2#}zp=o<CnO}) z(o**i@agJm|M=<WsEAMk0)m0QuEUKVJ-olNym0t*4D@z^Gzkd_IyyV%7v|2)PT}!3 zXNk$FcvT5VD6r3~M9Db57*^~Uxypt4*@)1fr%#_?T~dKc?&W90aAsv|l~<TQHaP~T z$;Z<TYs>Ae4Xkt259&NHe`EamPe1?6&B<YjV;S#@=g)3!Z!yrHv$3`e4-4kQu|5t& zb!}xC*Db}w59s{$sR>b0;q3HetSz5E-MPKJ$}6Nu$gC)A8tIqW6<%;<V(HFd=6^U# zM8PE;oKjKRKFz|)E-s2?DJ7h7*sHzW9c)9G4k;Nb0pjE5eQl^8;5pLIA9YPCetdG| z(=nL;`R8BCOA7%!XM5Y@1MlKsclPXAad9ydGn1&8h|$q&A3lD3^86{psu22Nm72rl z!TozM^n)b^d+hGzR$E`QzOhD&<Gu$d=84E?__c`1Y0{Hp@#Ssk@#y>!-ezfGmW+%H z&z;6G7l|BQoeGKzSGLx{{^7aRXHRFlp}r2*H3b-*kByHWy)gu*z}(arYxuBVpohXf zXJ(~?L!Q9480{DI-4x*<WTqzJ@^#Lh?u8ZQqc>-ajSWSF1n_(vHy1k@10Sfput~Uo zW>-*l_cz$1=Fgudq&Y7aky%~RHWd&SPfAL1{99G~czc{aeOgpZ#N5&>GAev_VJ0>z z4A(lT<1>HE7#7;xfBewg)aZv(KPbw}K70D)$d8yB8=g9S3PLbYO9;C23UXK1mr-W; z-MhxdS_%qsE>8Aw9OvZ#U32_=JYZH;Rh3N4j3Xk$2Cnu#eDnY>^Q5$bI+V1GSl5h* zi-~-mdq95<>q<0qgD<&C95E~?Gjx9v65^6Wvsq9UB0HUsh)`1UqKlhrS!LPk=2}%{ z8Jz1ZY?`^Xxen)ufZ*)CySER&i*Ry)kO9Ly*uv%2Wz@=I6{Yv@vEQ%)dW5vJWM*Ct zBNHP4jCEy%j+Vy%_h0|z?&5^&w_{@1p^34A;vzyq0wo1Gto;!R7bhDlCnpt;(itAz zi#G8-=^Y`ty-VA_E*tmuaQUB8XPJdnqH^l;o5nf@Crpj?Pn|k-9OVq~^#<SpV>k=; zj&`~EIrkquh)1o5rNMenC-}Xe=N>RKzIycvtD1dgeq!{PH;jGy{Aur%E>klj7;C^7 z1o|QSd%M2*V>+*1y}W+4KRCb_4nCZB=%Jcg>c%F9fx!V_#`e&SLF!jGR->ZBLApsu zNwhT70Fv&Gw#E6`t@YLQwG{~D`?@<aQj=t5P`C5};FVPr;fFgToQg|}*EUw4J%2_? zNg;Jn0=%?~qrIDpBUDd>b4Y*>KOZ+C2|10R4)r-EC3(pJA5SkgXK!~GPXttFKcpv^ zLnaoK&VvC4TZ{BSpq5lPJ7U&WAU)lg80nc<m|c#_+rSP$%!OL>sH)u4*|xQ@zOlXv z2gKaO2m&8w7A7riO|YO1%?)edUhdsBfyj}F(9YTt+Q7%5J#^<lUnCf80zyJi04IA} zgfol`4*L~$2DQ*qNFXLIdT8V-=I@WhL`Q)2g)XlkCu3=50zVwn7<S&v-IbDpTv$XX zIzBcyGzfj;0y_A~qr=yrXTk0@*Vjm15C^My=FAxhaS<Occg(NG908=K8~V4SVwu8< zvcb_|2xI^M_TB<4uB=-Ve(uaabMNi%>y9D`5Qsq%B1F*O?i5Ag6i`3`g%(u>g%s{? z!QI_GxVr@jnuNGpM>^?r&#Y62By@jiyWQXXbLX;IPd(=x_SwhY_3n4=I<=Q2(E^?k zw4<V=V2(EiPhs9|2dc9J><*{Fcas0Ch#1=;p*@0zGFIV~nEK!>P#}R$cGlh=uCmhN zpl!~{;=<C(Gpa@gC+7S6J24oIef#&F6*xn4aVjm$9UmQnyMf-5nW>4YvLZMH@FB1Y z=sK(p2Ero00N>`8`gcG45EUK_`~bHR`0C@0LKfgTg02(1aNgd*2KqQ_OTyOwGAp=1 z1%b1FVB7{Bmb|*WP*_q3(Wtn@MY!{D<`578D$bof0|FA{3iu$fAN2dD&zw?IQ$YZ4 zW&%pAsHAXhedYU)V&?JFCmmgFpmwyhG>#lS0vrg2<mf@Cw1p+u!pQ+t3<uJ}XaM$X zNY)97v4g{X-@bbF{{1@;4<L|^A3t{F$RW-H2RJwn>|tf!v3oBI+kOrnzC$8d-ZK{= zdV(9iV>io=UAy<}-NVAl%ErzP4l5`m1dmloS@HDgQ%9JyOaMGS8LWzkir7=gzy7`r zpj1raqS!?-s1KnTIukG45Kb;GLBR`Z>Z-<gqmb~Bj_wZVA8*~e+uGi0V5ASX3#r4r zQuG=jx|fre<>TXpNS0|ln+Cz=K^`Q$RZ$8K3+@~2efAQ;eD6TF2g5^JMv4!KSfK{b zR)ZK8t}8GkD8Ho8Kgb^<Ah=#QB6?%N8R2Zj#4nyYeG)1kK>YSg*X}|_Fh4Q~uusd) zAvrq=iwYk)av1*0b>IM8AUrK-6K>|d{riufI(=41=rHfG{U>Dip4XGK40F5GW@O{R zzMpG13p*=22MfnRaTTMm^s4l#;nf>=Z`@b~2mw`{JtqK1+|RrT=p@FQ_k?3VH#aw& zp|Y|f;K#_sz|EajTw1iUhRg)j)KCLFaOTWuKE9(6(|t0yQOLC`$$NOXiiwJxK7A5y z=|5JGmzRx<4*&g_M`--jw-ZyNc?CIM-kxUWrfO=+z(QvPPQw`<KXDu&Bq(%2R!&A! zOC8P@00S)H>*rluS~xp5{boyl1V_AnW4*ejA}AyfWRI?%j*6->JV!|>3Do2j<bd0u z5u7g8&{!WBJ~l48v+MGsM-S2N`uVpnYa424Zd4Pzp`@JBSy9=2htGV&evpOx6#G%3 z!@?Lo8OvP<g?4cA!^RzKT%0^Shj@8UoIZV4P*6@u$&Tz4o17F99c4l=Q&3ivk(ZNI z*fco>c};DMy^C{E)vvvK0GJn-l+e-D!RhHLC@P3v6crH@k(81Ihyvh&<Xvbkc|`@& z^Rviw+)tlQ%uFPuCX*cO)YVlbBqhL?B_$=))KxIrnpiBxz))Y`P)`T?JZ-J5rj6Ay zw=f6HGkN~StGC}RE-&Qd=eT>hgT&PZC5OdmVKg<gG!WBN*U(bOo8d#lLuwoAbBhW* zd>Dp!V`*7wK(eriums##MFj<AB~=YIbxjQ=6=k^CPZutW+=R_*&n_zL9T^&#o~)>^ z3yY2>I@oFJ;*`}?rR8Kr#4nx~7CL?I9NbJketv#|v%=D{BGNL4&xo?~E3*pfE7`?* zrgh_pejI$lYzNQo;XbLXV-b;2l~q0b{N<}RZ(oD70HF)_2gE8|KDfM;v?TJ}B_#j@ za06i@Ja<5ag%!cWi=JPYH#<M`>h&u$u3Wpmns_PR+SVE>z#W5{pA7I85gA@pQwd}a zB)PQ$x?pKVh>D4rm>B-ykIW1Y&|SN}*4R`R8yoG(aCdOBC)!xSb1^f=1Eqt(f`z3y z2rw^ikF<;w;PaceHvVzuC{Q@J@80U_Zcj-~@(b{x&|K^sNYE6VHq!`})|MnY<RvEP z4$3M@CMHKgTzwL#Xiov>@9F7Eu(nmzF*`1zyqiaWgI|j0g4%Hzv(swiy}S}TdCu-U zaB3&liKG0dPMyCXA|WZOtfFgd?C$5Ak((2rk__>hsU-nVFgGEX<3AeFj^rC0Skv6B z|Eu#JC@3kAoE+Rd=+0CZf|Vtp%fQ&s$ixWF+{4Qww=i#Xa_r{)`)H~Kp!)9KuH>|2 zPj63>1BqZsFe8|PS(B`(ZWJFsZ-$Q-EI5)KQFHpZ0Q8cXlQ}sv@%-Cw-+cG}$@6EE zvr|>o6;Ux!&=i?Qc7UGS(H^|X)`4W}VCUxHmY$tCFf!CXGE`7n44hALax^8F>lqs0 z^mX-&49u+vcFv$t$d)7<Qwze@3&#`8&}%QLuIe8f8M`tI$ggg!k4sLX`!H;s9E{9N zHLzGkb#*BPMNui~3t|@q#U;fRm4#*GPl&5&8d&oRVUe$^9OCFnZRT!CM+DV5PfJ@k z`^DzB<kt?Pc{5+WdOg2*rL?>_GAi7|%N?%NhGb)9Z3zz@Kn?~p=;%a!V{|25wC#!0 zvF&thTb-n1+qP}nPC6aiw$-t1+qT~M?z?ZiU#HG4j8Qdeui9(Qx#qG)e36imf|Tg= z^|_p<z3NRR!V+YEfqDUDr(|U|tfs$_C~A9oeLuiFYIbV`1_4O$H=lAr(Tdp-qF0zG z*i<!)Z}T}+&Z`0nM37)0$H3*W3Iw-yk@Jvr`V|5LKMc5-m=N9qmzs+Qi`p<5^ga~t z?Kd#DyMwuPHq$D~$DJG-0N-hedwD|k#_FNJEzip#Lp`rII_?|V&JECQMcwSoq^x{Y zbpyK0Dq*s_^^nFXj2wPvd1LI;TVsuog|9nlyFODDZBn35Nm7oKS?I^yf_p*14Db;1 z6*L!>byl;J(Xen66H^oOGZWOE1izHNxZi|qB<^L;A`1)q$X~Rp!^z@;;YHKxGre)r z2D<<u8Nn+4fUg}ME{IpATL&S7K9IhEJY<73#iVk}is2Fpj;k$*#O;Y6FMETN9W7$y zzb((K<{Fp{_l@%NN%!`Q4BG13^B*0hq_mV=Y*ZvXEG$H+ynUHZru+}=Hn!YvJz4HY z3e2XLY%39C;8>g3Q`FQjQZa}}i(u~i#vNhqB55Rr<moD_X}4RM_*oWb)-n^4fbs4S z_hkC!86FxwRw7z%dUBL@L{3^NrY+fD+n3jI(f(5Zq^ZTV2~kci-zgC>I*!e)IZaum z*w_q0<afI}leIQwakHzZDY%S}FnVP*GX_O*B~|_<9##f2O45*%u2~!<fv`leo+aB) z3i16BAn{O^67-lguZ4ZRl%<u+q^XH3*kTM6_>obN>qZ8N`B8B(k<#D;>-;yoT`YD_ z8z=<kiKlgWbJDYdzn2!(mYJ(!Eox_$mRPy?ntg517e)@BQrqLc-;8b2GKx)y2)|>I z|1iX@^t@lqCW-AhE3u#wHFS9K0NQW3I2T|i`yKKQpL;&$$E(u5_XeXnp05OzcD`~0 zO-(6GtEH#(vcQzL{a$t!&zJQca5zC)KT4(vYV>-LvloBq8{biK-&oBIO81LOlhC8= z#Zhrzq0g+*8MbPT+ys`_A#5%5r~kS~gy~4umRFhQV}<^?v^2lC;P7g;v;kC%Z*#D7 z;G$xZ($Tq`J(LZI#Sy_|P{Gd1%{Y2OdiEjjy3ol4?UUkn>CjzP#n{l<WTge@AE#n~ z+?bn6v(_QNymufH0<;54!+t}(g}sG}L75$ys@BwO8h+(ozfIy=aa1$eeC{7}TX){? z@PpTCq2Bs8_k5qs#;kLsEvaFuR!`B<P0&tF(`u)u=X5!`7+CngPu?}S-XCq;p=9c6 znR&@53A+gE=s1z?v{~g6(tk5MUS5unwY2zjKDiz>JtN4<TFdY-GxI@=*j=2AP|?Im zO5Ddp=e<F!wR`^I;yl;Vw8G8^x5Tno5nXPMi<zjB)Tg@WyvuZKZhw$2Db<uTHt`*B zcisHgdoVVQbv@~32lA5$%-<at+S=p-1EMu_+u9w@kLJzfdcNnZ(}|>XyFB2#Z+2$E z7~?xi`P@HlZs&)Qu0R-fP&G|e1;5{SX7`~G2?PRTZfXS550AqLeZgk}cR<=8ED7OK zg+@Q1{)QCud%pmxx`MDCKfTO%pPvOFWRezELig9QJO5^*s7mA?-D3XuaqRM`F?D{0 zHh7K3#8YisBUMW@x3MWqS}+fE-8lFBEe5h!(7;mAFh7#?ISi58I(qwY$E-ym1|OZ; znHU%d#9k9gP7Fkge?lst5uidKX{u}KwEJeIYr%~E`QZ*GaZmMmYkIXWcM7W4>HYEX z8bl#p?6rbW!JymoC1G3EV&_MezLv`RSNy0?Y{I#2qBD4cb~KJ~)6@R|N=&J(FTHZJ z9ijwStj65LDZnN0Jj#AQ+?V28Ps1`{mGkPz-?w_&{Pnfh9$MJhKU1h2HzN*|WYbV5 zD&QMMV-BgWLZ;|w_|5Ru4b=$o5k;DD74=me6kcar0L?EFw$(Pc8-Ttx3_g!w(|=z# zw)>pILIfU8zQ<FiZdGay#*g7a-IcW&G<8v!;!v5Q(3xVk7z{hbX7tl)nKGzV#)I{R ziWR|r{bDErT9-6FJdm{A|4DalCRhH&Qk;j)@isgRGd8B(#XA|1I%HZrbd&*^P?Mel z<O}JzNV}YJ-EP<3)`lBvaRp>PK1EkZEf#!3?{mnNydFc-g)b-989)hK#k^v2j3s;{ z@t`N;;3)h-3Hm-JGiY#e!=-e!pbqpjG_rNIMVLuh=1pu(dB0mf4h!ohz7eElS)H8^ z)Ruo996$_G%mxiy^7+t?u!50D=xmFtFO$)hoG&HWHCREm9f;p8QI(+KugFr~mnuYe zIob$Q&ryeMb^ElyFZ9$_Zh4}`&yv|-kCyF#nq#uZVDyXi^&4QLjBC*Ler+e8GJC4J z^FTp`=GZyo#jBV&(LC&A#CctfEmc0}Sesks*L^x#x~eLBYpXjet4p)0a~d+$LNo(; z$Y^O9MpP0#y-$m?-pSw*=@o$wi>KbWk?lmN*T)Qo;_;6^Bu|eNo@T|c+tvrO_s5iL z=|2(ao6~>8T&!jBQ0}PN`9gkN+|51gfx^7#N&3GxROaR+O1r=z^y7n7hf9tJ<BC5b zHWzuNb0xaag*&Q)A)|ip9gT`@cRxndY*5%#{ig4qsX&NGug_K#C>v*dm?-+Kvh4!| zM@#(-G}?XR&=kuq6UXvS9H$tSp$R?uIPs(fm*Xxo5qiJAm7X~BWY}vPK|S^{&<GL| zv$Zrfm%_rI^u`h+ssr_iwx=GRo1a};o}Y<wd^T&>_&8@c*tywxSbSU@0@9%})5%<| zG!MM|`Q&i`qO6E73`L!OZ{r@l0=t=1k^I*XiIAFL9n*zw;5Ueb{%M=7Ue^uftNk|< zygL)YBlq{YmcA^ruX5L5Ne4VULP+Gtm?1yOgK8a&U$hi9qHXY<Z%mI_q5n>m1*zF? z_&jN5p!HGN1UXbKxYl5>LLa3t8u3+mfF7Hir0=6>xSaRR+f;t391b7~;3z1$m}5gJ zmr~U7{~|{|j`=l>4W7dR!$T=*%vW?<V+rRP9mbw5XkX0v4i5g0ycjK2Fcb0aF$(af zvz@b}p4smb2a7iM2blm2035Pme7xfMs%ERr&BVmS)&#rnqshv@uE}o%()S){5+6S0 z0k<R4U}l@b{T=@8*O;Tcj&>t=?N{su;LYDfmT<cMqL2VX2nYD$gYYJTcw1P&i2EA& zI$w=l_rA)dc7%q}XlXi)(d;)keHx)^g9%^qVjan%`Q%p>=iMsEFmvU58tmUbtbI0U zHlkXxW~x=y8)f@v`{|oP<<x#{-M6EtB0~~z=|((WZxtAvHtT-FMp~(t(K7u-+#jvl zqc+F$T_O?}6oBTJGwR|hviX)7zqZ5DBLj0nP2`K10vdlsPEJr&mYw4hz8eO<haQC% z7K(fW1ZoeG3!AHBS09_oipUy4X_Hga%q`2ckm)EeF{>-9107%f5WY5ytd=|dsohq7 zf!5C9dzXHn?g{?JWN69vQ5s_WJVfm(fd94Qf0kYa7bl6snG@m?yxS*SBCq0f*rK#$ zw+WpiJg+3f$A{EajGiJ=Y|7YQFv=gW#Ol`Bl|<hsDLee~r&`WYV|%C|&7^8yn|VpG zc$V{b78d{SC6Z2$k9IQ=rgw=ZpL%vn*@*UsiNd+9|6YNjD!Zf1>}!o6fGU@nV28i& zj>V?G5~l#8ug<sMFysd;y9m;;zP^FQ>92wpC}F}ZUFEff%}*8;mO4fOPNu18Kf~#4 zzK-;e5MY>#Qqt6{Z>vI=A>wgyQ4rjjSl;&TG8#s})v_Y`prMfxETLRy5uF;EW)AXO z-~vVfU+3>pqobKg^5bnYoHNjAy{@mT0YsoiV;3a;b=Z&ZgOh(inK|y-M}$Z6S^!p) zgN-+7H=UV4flUea{zM&~llCv%o110E)$VS&D$#5CzYXtJ3-O1uL#y3iERA);fB#0A zr&A~FX@#Z|bbFs;#dqk9X$};rXXp;bJvPBp+u&)QcRG$Q@JC+;Q@3!-Y*NkoXKO6; zb-^$96n1{!1a*3hXax0cG9z@VZ?kxUP=H)=^HVf-y)x-~xqyC&G8p!BBM$wWSlAlf znjhvNA?K%Qsmv?$^Y*4$EchdqR99bn0WopC(T1A3n-mw<lGlbj!_3U$b8<rr_*mBS ztBM({-0}jnf_~Q4w!?@ELqEMe=7i%F=LN?R5ysdE#Gd6gkA6(&FjUiviHU~mp*7Vt zdwzZ*nwQi@JWX$W-4LGod@fZ@CqlA*yx}MfF-+3hkavg;mr+;HOiTb<lN7CcweJ}E zg<R&{-8>jOxp7F|&7Nv>&)%Z-5V3kLHq_5DvmPxw+e24ns&{udKCg=MO_NF*XPU*7 zEA?sceFx9&hUPWZPYni~vI$Y~il_{`bTy5Py4Pg3Eo!ZYe@em7aekMw2gz)C<otc7 z%485&dT(Xl|C36QQ_R!S{H*q3j&ZN<L2Bh=yyVQGDJU*(DCw#pYGk%NGBHt7jW_tA zuvOsis_zM`PH=Hq{Cs}~y9B!s6%7fX0C{4?5#5=fl90G<AL0h^u6(?`?KN2uT?;MR z@p^;j==%!-4)}WS$JN}jc|{dqU~+zF{`2q6>G65L`+TRU(dF~J-P>(AHiMA8-e|hF z^ZVve)jA(0{Kf<js9YcdS0`Vaz{9o$&2N63iCe`HY<k}yk-c`v<>+)cVJc|MEc;__ zbagnsvx_s3@0jg2MoC3+wsH2X-oisciA~S+E_EK)0xF!At8F${*jUvU=O88di76N! z6k$Ya5OjT0(bP!8bth?!>xRT!yyq$)?f72VTU_IX|56&Dlns}nvPCR8i<e$$RbS~) z+vRvYe+dgq5iVm->g2yT{RY9AO3NG=<Dc|x)A^D<B}nHfls#6=_b%A!FS+!Q9uHER z-!Fyy3~vV}dDJ-;!TwI!a8%0kBme$Q&2A0gD)WJxgs?qcf_*|C9m`Kl!$-#v|JiQv zTCUc4dUB21>TYSE09m}=jGFN^SBLgDvp1~ts_n5e#-axEYj$a5YiVk9bEDntT-=(+ zMn;Cz1oem7>G6HmRaND_SlpNcAK7k=L#WyS%jNgDzZzSt;95<0s8gu2p=%-7xF*E$ zCjdXycD-C!kneDPKk6T2o_TNJ*mikx7H+U88$EBi-thZE8T`2D_m=aslk?kNtJ0c} zg0)YsuTP8#i=(KXFf}y;2Y=~HEnXrI|Ca#1t%Pb|kY|28I5422s=K!2;bCuy60=&8 zmZqqf)a8S9ifv&Zpd_URML}`K!wqA3rU(;7syOdcuqTMR*6Km>)6OC$zLQ%aAepn? z{{HHoW33RH-U64lOEO8*6K>!}s$+w*Q+JJDfYAv)Mm?yXW%U(Jy|7Y5I!FZ%1tqGz z8Tma84ZT4S!{fLHO`Ef>yA#J&Y+)0jY!0DJ(H$*IcBfOthX(GKCF9XgyuPu>J;+UG zb45!}2MraebAVGN!6G}rNSAyX?5N5neBAA-xv|9oe+RTYjIOx&<uNQSB$&Rop#dmu zJv%=S;N^EZLc_&9-z5zT3Iy9lO=WOv-u8XUFKxZqSY`2gBH_7OAGS3S72JjSfP*zP zb?+r;ovYsLK)~nzlbz~~B!STC#;~&0b@9HE=Yxt$8vpFs&i(#$9b`H4n8`h;r>t#% zz3OD%H?SEK69fNcVkgJ#CJSgxjg55`7e{Iw>hA|t>y^G~(c2cZyZi!!fZ)O~4S3Sf z(0F3mW`bP))8pIa=dCO&YszrXyZgPjXPDK+$7f`e`&mCmEeH-dNRcO;oJwCk^b@Kw zJ%nT`US%_eK+9==cl@dDFbL);kW-KEnwu-*A?jr(aEYerlg0!emgCCDw@p-LJv~M* zMk$Nh1Uv6CaYLY~K4wKg52YT*RbthrN^r{r$x%JUQ#_=<K>A3*kv|vMl6iHNznQAr zbBK>wX&$L`Yji4T7atopEj>v~0ltrx+x7>uJ`M1(V{+I6pn%0XC`3>X73IaCf5KGa zxrt8M*cNL9JUv6(VB`gQeE=8$+0OxCm&c|PY9;bZp{($|JveoU{+8zEz@?cOJ->T+ z5u~CbQGiQncrFT3hZzSYo{kQ_l2Q^r4~JF%Z}a$!u#SR~^V{<l;=I0EWE=r|e^~0? zS1cUtKN;!1k1ajKDYNxZ$QD!<ql$_s+urw+x~BW~L=PvYr)9loz)IWN>m!dVp|yl~ zG&@|Zv6^y;DFzrh+53p-BFNx)c`~~Q%Gac`C2vn5lPXj>-JbhsDx<VoWw$XXCI)jX z%d0FsfmY4grWM}K_D8y88rTXkaEFc%xL4;+G;5;?>{Z0<?iI+(9NHS)9{!@=uaaxA z<Gi6^64$&iu}}w>z|`Qquqwu!3fTlTxYX2z#Y+d2*&l2g8H<*xrna%W&dMFD^s6EJ zJKQ6;ukFzV*&t760u;yhaLWfIV7?SqpYgB1hco(4@Go3*S7+0=Jy9mKn=wh4tu0;J zO4?c;@c!IhDalBHdnqY&CRfbt9FeAmAQBD%2B<P(Mjo35RYc{4Gvq*$1}a{_;Lwny zvIth+(^S@*XZ!TF4>UY{sLGMUb8h(HzKmca4duvtQ0)(Rej0m7ha?@I-9{jSPDl5n z_ofyCYil!z01pLxS&C#e_m~POCD(-hf<W)3`VbbbrztmNX#g`sJ?-0#g%dczi?O~4 zI6$`_(xd6v+hj4V;R`-d0YCOt3__699b}5r%Se#tUSMq1Txzp-**|VO;_F0R^*~oQ zsSLU8yIY#oby!@_{k$LS<Fja~UFCX5^Ysys#m)e8ECh3O1$Z&?%WznDg7i3L6ZK&R zm3zt&%0CjRvPg|vt<fV&$I6kcnLbcnCrtn8aR6gvASb^=zDMZ^^jML?jXEX7J-s*= zYQy{~<7GEJf87n$D2#sstGTY#)X1u@r_SehMQi?UcHJGuiCOqH^cf}(k54-{c-nh- za=NFFZ3*hR?fpW`$%#bfEzjwe(<R6>oqW<^Y*KiRdk!`9J6Em!_2TfYU|6c<s<nOg zk9N=b!6+(l?S71YP*-2q&+Wd{`wy#>nGqS8xX^BIWqWQF!MCuoGCVs2dFy9YA>IKY zDeu}K$IQ~?_{1ngGVc&{awk0I5HPKI-W_lG5rS`@lXA)Y*mVvL|Lc&Jl*qH{Or!i< z&*Oe`S})rBX>XkcGe#Y}5z%m?h;HD}CO^Z=tXb%tVmzf{IaYFggqs`}DwR<6mpFle z7uj(TH9e6F!U@#XZ0<MwK3%n0ft4+Y^7RGo2`V~fMtYK(%I3;y$Uo1deXU1ceP34} z9q}8`NDpjms+pl2M#K{xzXxy)6U!e+Pdh~8E!Y1j{^k9GFh@0xj*4Po*j7+B6%iH9 z?*O9<;%s}~TR+6*P_?tw?(p&QHNoa%ZHyi??DqK<QWr<k3{`PqL${Bqtc;cu;p<{V zRG=%@6U2#`n4r}bGX}PP2ahm@?V9KlhDy-uVGdf_AC8->q&4fkB4pa0UFU0R1pjq4 zGD~XDjEDFB1Kg=Ts-g8obV0@${sCycPV5}AH8VAUdxie%&Ygt|KWUSil_f5^u82p? zjz59*fxlRC{=QtrWlSLU)-ldbB#hE*>B{f+eJME_W+};%mYJQx85<>-5%oFRD=yn+ z|4A<OWI;15{BM7C#ot08#{=chEm9x0sFT2i2haV^wXx;YPD`zfF3e)2#X~ZgwMPiI z>1HVFE962)nc4Tl6z*LD=zGED;d1*gVxJO?35zawrs_}6)6Hm!3ehN#Ljk$)`Sg~D zgxt9n4E~PQnFvQ&b1vz~?=>|tqoJeG?QsI)9o#EpLqivHB%bDI_W(zE{*H9IfwO&% zYbU3M+!B8n!0c{WggyCDo{YVOzO}tNXJcq6j$>yQPc9C}Gsr(Nq=|cwBNhR}>|-K1 zGrQ-DF-c)1Fh*XH)tqV6A4u`BzonpX6O(@<!Z_XBHRZSOUS`^Z=eK8rq?ONm6zrT$ zr2~`yF6E6jy*bHMe%#<mi^?2OruzJIOP6aD^9;9ZKL>hD8$RlD><M3$F$+FIrUs<t zk$Rh%uhBbTA~MRB^c0bnvf+qW)<&RO0J7%6w)xt~`%<?B_3sC*8Dj@TjBGiI(4w%s z>G703VuZgeZ&7V)`J<zk-nQyQ%|@4+idGE5EU8vy)e-^kNAxS*8e|H06o;K}KQ91S zHH5Dze0efUDi%u+8xD8O6AFzbXr$_TbauH8YgI8K^@7Vmeq8N6)X9Ey>jr%^n`-db zS8-kM#BW64je5Wl>A}P_+h(;~oSFtC0W>s_mBH4G3=5Rz6Z7~R{qMYi8ozan>@Yfb zx){7nJbQ~7Qbsw%x`7Ej@AC{SFR{TSAFza531IsW5{Y$ihQPX)oH1ufbtcA-M<|Bm zNwC+&O~FOV6;nKArveIA_WYkcUps09@)91fG(Beo79?>PIxC#=iAY;(0v$2y0vd{z z!?9~WuPMl#&LHkAW<V~78FeWdu_+vUy~|lsowz%KW1b?@sn7P{!<)g)$_vtJfKFA$ zmcMOHTs(|Wd4US#=H{j6?M@EHg|Ne}k%J$!*y*Re2LPbm1aA+-JrD94)WmdU^*@Rs z(*0L2*PwE{OY@*?Lt{@EpK&W!kJkl>FCFpw4q7@opmSB1`{f!C+CElO+vypOwK?;k zRee8vM+7cb!W&ncq*PVk`p&OW7|3$<PzC?MF+&E*quuA{_wSe^+43Z1$<u)w3~nxO zn;erkE%c_!*d(PlpCa>aYzBcfR8$BnCGD8VgZxtjW-KaJTBZ`KMH_+8#+Brdo`OW+ zu*~SnF|lnS_0_WoCgr6L`u96~L6~-llH5Yv<=jW7?#hIVH_G)fbmr+0iBMjzM;&?| z+}pnIk>B~VTTa~XD%!C|d>~I4y6M0VOOlD_<<xczX^*1wYMIF4Afv2)mz=(j?Ac3p z|7+GP>_Mk_v($?Fu&5Nj2C5CJ5-BbZNy^n8<^e8#+8?cW6;0pZ2oFJ2aqZ9+OdcLw z0D(R98BQ*Q{A#|9Z~r~z5y-AM+_(?G!g$&?o-8*L#L*?j1qjQ}IQW0~FZt_madCkK zfv$D%09mD)N?N_HI)|eNDr4;|u51==78?Ttgvr2_o!wpR?9AH6pSkgnZ3x5g&^Pbc zNBCJ6v#ns>2UFu5KqG~RF5+I^Oko2F;u|hJOcBhit2GX8UcXPAiztF$!b)Aa#q<S@ zB$VvgNBd&HS%kG630gj+up9`TcMf#5h=SoGY9qk%{Td>*Fd~${gXDiiF?HzF%=SIK z9I=-f?jKp?>3KaWj0oX)XDaGqW`|<co<4aU(F)b;c+|&j@vArQD6Gw7*`PD7mm51m zn|f}n7FfaV@^=a)_8z16n%-A03cr|ZK30~tKLS_sqR`@C(A1}-%zU}fKG{gP<xcH- zf2u1O@CPXL+WQI-nClB(t4`Pr)oL*cxI)Y>Iyy3MU%Wtud@5{_QpjZs_80{swH9`Q zYyR#r0<{CB#y~~}?Pb_(2P6I&Zi#VpL;8X<nF6zhiytiDeSkzk0o4$+a=L%s^>ve= z=YNecQ4_!tXumcY9h)bE^hz{3LxjmF&{jLc!5w%cFWi65=Ovr97d8&kBM4^Ci4E-~ zca!7aC7aDH0T0*Y`Utozk!yr7m7igH`yG*giNwR%a(lAqmR*OAH^*L%z_s>JK45vJ zZmB8p{vnq5eVHS2h_=vxue3&GSTC32C=x}VdyV^7vj=`nKx3$;(oss|Tc_VNEEqnl z5-b<rrvr=8Z;LWnDzjGWU$E9_gx0Kt_yDL`EVNf5&j2YcH6?vQSQxHPlQrhuAUtz1 zF%=cC$UM9|7CHx@-+TWb;!#}f`FX2k{FpJRBF}&F+~z|n4CJ-npDu%byE4KqBqSvK z?Qw<YB_WzYEX|M@<>~o4vO>c0AEmLf!q&jvao|b!gggrihSZb%<(N+#)4M&Dg@=dt z_U5UihAGqwb~kGZ0WmF{0rc`!Qmdh+qRRCFu@1WdktM*0?;E{kI)Nr40MQHm9(Wxv zd5Vieke=R=xh*xj6oJ7rfeI%iS%g<6B-y_`8fy=ISIK;J_NG}QzZUSzQsQwISw()` zOHuHGIj6D4ikIis7O2%+tK)36e}Cr90@|ZwrZAiERgISK^cMZ*tG=W%Lw3D<Vqg4T zHN3)lY-)W3VS6LM_Od05%yoG2D5!-U+Wl2LYxek|0Rhi>cp+=vhQDZ!I*9a0TS=q{ z{G0{3FNNz@S5+A4TZ8N0+&I_Rl&1GM7+HGUe|i8Y2G-~M69HpT;Own{6GG!fPz{J5 zOi#DyldrhACWl49Dk4=8Dd;fb+A6zp!VKZ9Q;6_p7Z-U8w3oD$6j6Ko8Or-ZSE8-d zCw3??9w0OqPLA^zH+SFs`V=(J*1|-HWL0%pl}@{diQUb_(IKzC8087#6BP>&5D3F} za<E5|*$@KJH#^(i(6c<f4TT;2lLuZlngz;rX^v$M@Jf{Azq<|v1zhE-HR{GnNG-Fm z&de;S0%bVEhBB2l(8lFJlgj4wkiXe|#4g8vO1W*nlL0#=s@IXHUiga6p))E|+>g23 zn`G_P2If`e@0K>-cNN#Z*W*T}A_A`bA$*Fec+_YFF3U#hH7^^f|9!9PeLtU=RAAC@ zKL4;A<#Z<b>*gZyD~|kBgeD}jB=A$7wWy)YR?BkLjdQ>`Ck`UTQP&9ab}@coA~7mp zuAi5pk<8Fq4-*SDGaEkgy=$i1!Q(_Fius5)b~Saiv2oZR+8cuWmjdL)w5)zXl>VR; z?U~+ud!x@qvk11((lZxpDd@<3M1B^s3Or}3<9l1J+G*u5Ra0y327`loYkdc8grk7e z;{VhKZ^V9s@!395cG1!ja?$Zru=pjbt1cNTCmpC|A6y_ulyGGG%W;9JWr3BEx5VV0 z;f|HfP<?;ruBxQw0_={{#Cj3WsqSHH;Oy!wHi9dL1(-k=UEV8IJo|pnM@Uc_?<;N4 zMQ)c!^XKmMR~*`&{p|$KWxe_+&1b53(VIL9=gu}X9TDjQ1yq(eRCWY2RQA32Ccd!y z#9k<vv-c6)_-Gpm>6Le&Q&Q+p@`9kJrnL2hHleRwvAv#``O`L<p4Xrbz1Ppvlk1At z2AG}}^82RzzOu5^?5)V7rH`|<#{9(AiOiXQ#?!Tw#<`)dkEmNy63hLX^S$EQ>mNNd zGa~Tb%vNH@90bEhElxBmQ5hmQZ|s@#@H@lI{daGIi^(qZ>~WP{-#1W4?iSXUX8UUE zbjH2qC#`y`>2l+r$-suRD<ACU-`-5F(Oj|jDnx^X@_G0JjQ{9RL?`z}Cicm~sk|kp zyujfR%G$;MLQ-prNoKrR?M0CP^rE5OT%`bn_&EA1*qX527(CzH)O;sixwQ|hFvxP+ zA0A&$eEzoT*)uHAvxNCnSm`|w(bCfsRdUi*k+D>eur`$yH;i-xXJ&5Wk~g|8G&2En zsDN8K&`Z%lw3;Ek=N7Bpy3FZvkPVwE`30gvYU2?hoM7EQQgbG>w{$l(7!&4q5mHw{ zv)ndidEJ&foWIjsec~*hHB=z^7)7=A`yz*d!*&yO?oqW~F1a~!jp5o^RzQ4%8atlB zARRm>)6kR_a}wM>yS$K6W|-51g^;cj{r+8}J&B|LDNmv-q78xzT_t6bF75p-J!wtn z6?DXp8+UHy`YYt|M9<7f&39eQb9ne~dk4*5Ek~MiC74J<=K3JiL~LZH1%D(jL_0ty z6{x!w?H1hL5+C(~4h;RmO?~!i1_z;^)#S)dV$*zaN;&tT17$oJ^sa;X$yzsKba1ub zMcSqm4M!_DqyOvX-BaZq0+C&-3Z-VSzs<idvZ8Va!3oOVjh=^m53JgGk05!fb7hBd zV~dQ&DAT%kAw@}zNUXQB)}J^t^Gz51jT0Bq*5WgRVAn$8xL9sf>?}N8t(CRK)!hYP z`azW7!TNq<`8mg<udfHhMm@MiIgOKq843H#X5|LHPjb<ESJz?TO|8|$6_ucPTqI<C zMC`~PfA=uK6NmakObw62J4H>S)rPPgE4Z(e93Xn#WbJ4-eI=tPxe5;o&*BRtsE$;N zBmRKD7dI`#R{{oF<h0+vo=sw2BQ7RtjCiU$fy=G5&H-HyFIln@Fv$ZJ9Qqo)uGQZc zdWyeLs2Qh5n++B*qd~dQG6#P|%q^WN0{wqFteN&1F^Gj_xp=sukk0+R^Zk%x@k&fg zjRYI(h?MX<y*EJzJ`DFrl~@w*U;+p#G_@F>ABlqno>4Qu=ajkp!i)Tv@Lu>pkE9-F zO*_PX<M4C{PW$_dk*c2bco*!Q5F;Ds01|n?@HYM&0B{I?#-L2;_uw4Th^l!idkilO zjcuCwO1J<a9(tURQ;$UgWe2{9dxwbXnu~7D`IZg-uG_syzk@JePj^ID-B+Ex;7vzY zFXKy2bbJkC@i4T@9P1wCZ_E@-hVDhD=-c)MLv(dlm}9Wp%4C(uN}JF|nf6ZMYQT_D zT<#Y!I;M|iqLwrnSv`uJTtCvq16>4$@!u?-pff_aNR9_Z$zQJY@x~H9M#5$klplGg zWqLyUD}?FNrWex*uM8mrU`fzj!n~jl**d!E+VbE&7~0U<2y~2S4;e8T6ams~#9TYj zB6OsvqM5`U+>d@EZ6`_l4075~p>ZR&e1#Xz1miQ0l}9RW0;N}`yLyT<%&_I;CAK7# z&BpsjSpsjIiF4E|uPV<l=GXDft7JwUQmOyQ&A#1!L=8wjO=tIPgr4iEc!ruvQ;w!l z55e)>BUrTFm$r_rnv5qhABv|(#=cS{>pVeYXD#11-;KjhncqrT)ZdQxX>y*R+Dv3p zHaj!a+YyZbl#c~Zpn^44H`X>bW5lzzQKhCnhl^gOt-j7$XG%Oc`v`f$LUDjxZr8Ee zBA|=n=s>K<w3iIX>IwA<W13!`8C;1l)kB0*SYqRh8^t`8b$nIeX$&(pzubK+uFJ7l zLB^FS$y&DKzOjd6g*ueUyO^Jzvh4$2h7w$QR$yi*-et?iOB1v=jA*QQt^__V)2kcq z&)Xmxkqn$>KX7|_QJ;7U!?(3r1<K8zA4Z=l2K`JT#w!~h#$p(77fDL+<nw-<KvHra zg54cnIws@Y$BnEuNT`Qryv*m1C5^uq+_h&4-!(}`_(QRXG4~E6?8+e`JjclZvYe<| z0MI?9%>-zU!@U4*zsG5Pfv1k3RXx0l%#J%R*H_jWT5fu9^Duts59Fff%sXmmwf%a> z{zOHg!hmDNbYWHz>dlSW!I2>ZECM5Ak|4I#s-vfxdSkxhmxZ&4?9-Tyx7`Nr2P4bs zw=Eau)U>B@9;$VU(4(iNry8REVz<-NagG=|AJ5|KOi;~tgz;L!l;dvyr1@IX6-e-4 z(*rbiRy_i|^gmi!J*%sHnhNtm&$7viO%S1GJbCp>|6XV$Dr$-<yFnVIG=0bnK>lDV z*S$M<sqXc(zx>8rhe{Q}L@eX{d6ndb(fdlngHE|}Ov!yPwp#3@&1#m^n9K|hCv8X7 z!JDMZCjDcw@9(;K1~O51Lt*Qhm%9>}FcvaX-;YryYz-oG$1xgU=S53~mZ0DhLPEmy z^sV4+G0NDl`HOipvK&ur@JmwCNg(6voV>sx_z*C|6I?;rCHAisGKBjbe1!>ucWZ03 z_ir#;!wf5H@+{N48`*Lf4E-B@#4XuC(0z+lhuO5@>G0x);GDR|&-ziW$UWf?7VZkq z)bjS4R2TZnEsr;g%A=#pcZak{ms<kfV;8{3JAz~%#X6H{v(gzJ=pV4-x{DIF)fbhO zSG1K_s=CajHLA#O$+<f_6pT_IGLZ39+GG3dZi#iIPqXCSR!2(_6Y1MpI3~}Yz<UQ7 zIkGU`cn)gW5XYr5-nnM4!O1}x&<`bC9*QMuo}>N-&FD*Qw&<HT5iKHbq+Y=nTOorj z=)0u+`38T{hV)Fm)w!jW#Z5L=PC#Q1SF@AJ1LnfWk@Rv8SK>YFyKgYJ6a8x0=vJMA zJ$E>>pac5SQjoJ89c_R*`aihV5;x;YE&0|cJ)~dPtZsoUC)i%5|167(t=x4p9yrL> zsAvbqfJQ9N?(Vy;u9LZ~dCwa7gnYlV!WCZlA<U5H<(FF_ALy8gUV)RXo0HSy{VFIp zS&_%FwXys%;GWyxp2&~g|Lq6*5^6au#p09p%==sZYY*{1+Xx(e0&V?kE_|A>ctk?q zm7!ySvUs0{6Q(BqvP<ZT!>m{f&$9R^|C<E4;g2BZ!cD{C;|HwWRhwSZu6Yi}^I>RX zLUcY-d?%^SE|&P85mSJEaH3a{p<m)yNeNlWYJU_IA;;RHCEjg(d<5H&e%YFQZ4-!o zD=r01>uF9-<_C$IlMc)$%>a$v;rt6!TAKSDV9Tw99E#8|hRu!x0k9I%Pce~Bh%tpP zP(hwhc1?mOo3Er@xKz8*2$u2R8N8Q`&0#+3qT08;4{Jdqna!wwmHFBJ2Doi{XahPr zlfa4lxyq{#?!0BT9W93Y`(wg-%vitH9<|D_Q{3~nJ+Pgo8rFJWb9eh-S7Iw_vz5*G zszO$<Cv-t3>Y~WYu+i6RKT1;FuD9Mr(n5jVS`ydMtoK`!DZ`Yt(edw<GhtOm2g_x1 zFY#S};N}c^V6eq(`aF4>ehQtB{52B^ZO=<jjlkB2gx-!kgCq&KKs_Nq0hs~y2Pq>r zJ{AhjP*PA))>7MA&~|F;Dk~{5vafZshFfuWH+>@&^&7kP_sx3a+8iiZKrGPeZV?p@ zN=Q%dE2HY=8gbEoEWWZ0C*PqIys#I!X*DjkjT4uEjg$*i`c>#92>l?56cof5<g``j z*4frx-A$R=DxA_S96(}(VZ5uBtR%qS<(fmF<e)x!TgeV~3{)5x)Q<8<Xo)Hq;YO}k z19pVwq-hHn7C%51KS3lKJ*%307)x@u7&td67$NHXjYIVmjjeTUgr5h**5{qSb0|6N z1c*EfOVr}LO0*$YoT%GwN?Xelq83L)Z^~q;cGdDOP}q@~Z)EANJMJIQk-q+&eTGL% zs(?AE@Cid($A}}C%F+{|`~M0MKP5))eFZd!Cp+hA_!IggE%+M9;4?A?C&A<6)A4ly zSz`Z{ty`uU0f7DusNfxD_hMF700QVAynG`7mpEp;H02b~S4pT^I3u^%k;V$xBQ#(I z)#=WN#i?IuZWnxqwHMw3WJJC&D-2kD(UuhTV-%z@JvO~4HK8uDde?#O_`7;%s7P9c zR5aratxyF4o^Io(d(l1RN)!$_?(0&{GCV*6%-=1djKMa;s9Us|(e-hP-SJB)Ent>Q zs%4?@4q4xe>>b>eO-<LuO=Z{{Uh+q;Zj`D6!T1WDp`-RZ8)en)TM#BH&9kSO9eJ8F z&e~kEJ>=iq8#<dC*H{c=>Jdmm{#6bnXMR#%ZX$LjRw_1XI@DgozfhnONGn8MfFh(D zRaUkj*o9^{{}0=<tH5|vZ;-~B!){+e7hH8cJu5pqCpR}EGc{o`3QDq4z1a=Lv432R zU{xl`$t4&VdSi6q;}b$A(SFvT!C3yT<$gC5ji@8atg3_QS_ab6i;wU{&J$dVWqxXf zKlJ$)@b+jwqbxgWoaL%3G{PLl*QNgcK6h8XxAF)mOlXO5iKu3<P8S8)X7cz13Ozpx ze)i~m+0opqEnjxAph|{;Hh8k8gZ+6w%rvG=VyaF#qaXa+=xCrLfPriz%axce_O^p} zN9R4f5VO{8qG=xb7g<3<jL;wa!@}?wI)FGqvc>7y^$r0b3bGP3f@}|M4t^9jIT0F; za)T%c@ddRAZ~3#P91_jlzy+VEt+hI@wYx6ZV#HAg%o*u*vC02tBhnJUb>pF_@WYuf z!vFq4J3MPZX^vl*k9CfX5-nfZo)2Yn9OiWpYH(p|b!G@5MuqJG9^KwuSLW_p*?GN2 z8hse5oNMTvj3dy0x(qjU=$%l(b=H|`jtJ9!jOSX(If3jqOX8ZO0{k}rHoNO3;c>+( z{kOz;keHk>y^84Z<G*A$BPt7ECi_fk^alh-dcvwrg&IjImXcFQnWpp{J$=62_nQmJ zcl^ZD@-RGUo6M!Ni+KtVUt?wgZ@Ibv5@Bzn&3D1a-gk|GcZH2}ii&;=G|tG32me5% z2j~FAkxw85lK^UZV>IAEVVWPadPoew?glOX1c`KjzLxag_zd(89?k<?6sQnHnE%&c z-#B6*td$xfC-;e~QDP9tEQT17WyT*%9IVkD=+3{g9m%B^)Xu&(hzj0FRJEnmX4OXC zi;)R^6=#kF$5BOUFu1|}P4|YKteHxHo0Ltp3S{^mPS3}unu*OioiC@*#_Ks)Ez<gR zR7tUkPf7Z}VV6qwdkXnu8YQ5+R&&&X$AH}iJ=Wv+rn;w5TS6VsG8RU^4hwg>I^&s4 zoIFG0f>NVg1M_IMmW7sjh>>*eL=ayQ9DH@m)@5Ka%ql>~n2bP=gaI)D$5gB*&0Ct= z+bTuW4>BsamP5-4Li^(j9Vr8_n9KZtqbnUB<PDk$tYY!6dwvLbL5np6k#}<h$ulvJ zLE>3j_%%KPuEPAG84ILS>nLa4*?>T-XT{2IG2^Bfy3HRFb|{O_+=U{k4ef=On!f6b zF)~xTfe!HzZU)9e0)Xyvvj{E+C~ltl=ImF$+J#2ALG*_M_Q}M8bD=H5r3d{qmw48w zdDn`$+*FJCiiONO5A-b8$i3;4R;XfDT5Dd-u6H<PTbF4?XWzJVtIWi@0^TKgnQ8g; z1-w9Q`B@|`(EFg$=KR)xBZ@Yb3qEf=Y~LMFfF{zGzwUCcyg$=RzW|Ok6h{CzP#sk- z2XMygJ)Q{yOa;v4$v~6&FW&xy0s`!OG`J#-`;!4H#F8m9b=;K_uNjH!1Ri(5%tu$| z&BI5}(SG(|Z1ZTBj85@qsT+t5WT_iO2CJ+G1%V3(SBCm}{Mf^q#6!l?*;dhG$2uwX zcU;=s+SW<tQ-3qp^#OaPRk44!KveXn1RPi+9}(fcCP*Vg@VyBti%}YRInux!^}FDi zbk(LEt`5Vwdicm?-HoIYd(o(2Op3$XU%_|6K06Ude|40bA1^J=4iJZkxlJaXAmAAQ zAdtxD&*}_wLQy|Bus#(el+V(um&<<v@eVpQnKhET*q0|*eXg;#1FgDaf@Oq?bSc?B z^h=!w!DYDy&Ah`ns}-mbK+B4=nxzA_`3S=pPP_ITQu%S_8R)6jJU(|{j^6url}ds? zd$-^yQAELd$VQB8<#9{`_}ZFRTZjAB%Zrw#FHUjBWSt&LLC}Iojz|x6pl-vHFAU!| z0^IjLsmI}{3~T$UP@juB(Q|3>IeJ^1ga2zb8h^I*OshXD31sG}^0{xc{y2ocFs?0@ z<P>)CA)Q$S#_#XmAtHfjo6B2QS%0k?(3zh{vh?6))c&i1(ndJ-(t@=z0e(XGC$zcU zKa!^5!+(b31CG^MV9iy>XVi73<%O1&(WDNRosAWWKRWO{T9WB1lG&mf;Td^T(Nvar z+1s6YSQ|t!uKm>0y+MvkgliI;#d&iDP1WqX_3<j@reUVf!CI<Lb?7hEt?{(G9_`;h zhrUT5b{mJ~vN0DPFj?h}S*PTUaQy*tr*HLRTm;IlDGd_Cpck7JAg|O<s}zgow1%co zDR>UDbzCk3^H$HpsaL7G*-<nEa1$raF=Z_GW2y}ZsVw;;XPVUd@aPVqmTyD3jUW}= z#Ydb@u>REqoPkA6-6=8rvlwhaaj-i?Hw-surNu_3ARWE2YeVESL4rJzp5$}w!LVyj zP;8|<DW!gC4j-DCtx5~lY2l~35+pS+tHF4BoBUkO-oqSqnsfi2bMsNdvFRiuilvxJ zpSzf!yw~8+!$jV1&I%*zxQnwvK(^fum)70Zg;~E3$HEW^G1Tl-bhD|uc@f?1BTE&W zB)KE*cjfbM%G<`*3#~fT&fS*)cPaReQx+_oKdU$kBS_i_M6N3Xhi3Cav|_-<ZwEiK zBV?M1(T}<Zi=%mkElGCt)`E%i<P!d^N}MAdIN!i<kH7-TJ%`R9hj1W=0H!j+{*4hB zcM;YK9E=K!912TpstbeTd(fCJ*3(A#9!;g%)Khpcv6XYp9sg)uVX?3IYK9##TEu4J zrH0mk-Dn#sJL>GZYdxYzMv(eFZpvqWP8fomQ6sc@pIyDj_hHM4Dt4;qYbfsuYEb;T z9=UD}YdQVw@$`6q>Znc_ynAX(d<uC<G#DPyR0)$3W6FL<&x|YY7+3!pumEy!?!Hi( z6!lY@1c8RL4wz>Xi77V2fvjq;vCr~%tIqPHCOf3>sFfc7%@dZHca<38q$uOjRjiLm zXoBl7-(x7ec2QmaY>dqSq`LkdzD#Dn{23bghyAjY{4gByFrFLu$Q%VjK!;6|>WolZ z4AEH)xhe)cHKVfhVzTtI9%-LiX%&W<8M(W<UF`G9tx{2RNvzKNL<V15+cQ{+L5_8+ zj5GQwuBOP3q3g<li~9Z^@PBabtdXwrY%J%`K=)<?tu8yv-rk|jVY&hF9z9170(-+F zt)FJ7UdrP^GE2L;krj=DWbxbpuVNLNBxZ=ljpqoh+Hh2aibQle;m!Q9yYsLNNQ`@& zA@4+-i7uyw3!@_1{=pj|L-GL3baKNJ2ICz!$YZ+Pb05%s59NW&TAUcI)ezw+gPcbA z3eEtP1?QT0;+@RTLhg456IXuGW>jkJV+X^rpZQYGng&|e50Axphq({r(K-tbE@Q-^ zB@OC!ChxRAKf{El6y%xbTsJIj4NVO^AFSj49eo^W2zr?;8#=6e`+VOuy-zpKcI!0v ze|<i0>uGeXvvxv`KqU~yubNzG{j|e%gNvHLa8Xa8H^6`z17WN=SAi`SrAlQ)5UBpO z8((Xj)Oeua9MAgH^dN~SX*E%ePi;*gM|JHKS4HA(YItGz^EM#RbBluMDomLw6DIck z@M5*l=2;xYO}BA{%CcQ*#W=2(sUw`Jr1Tc~2MZHA4?I{q9UD1yQF*>gXPzH%Tjw-C zkz4|jv*P$;ZvMUln;Q1G2>vrQC$@&+nF5=eDz?{-tHZD0YOPc&Z6nD=chO}-h`=YV z+~>Lqw=EK?(-x2(oo@N9uJ5n0M<_O*asQA0)YX%IkmFKrI>#|=J^@>FoRa);CK)-Z zUx64+($f_f&=|cn5>yEk<gn*&gbMZt@g!GZALHvk^tD0qO&`!4)g#orRRkrC>^+^# zf5WQVTN;_K6^9fqK#?!LP$-8KFNK-0{yuK}ci1wF<_uZL&IyTm#yu>7{eA~lM8k!z zRPf^zYD|-CVD|y5DbiJgf(Z`6gE_yCF?&vR7D@gEn6NL&fGkw}!N)Lv*$r8r*7a%m zRBsu#i#hvvc^NzpXv+Xn9V&by+P?)et|BIX>%PvZCJZ-qaiWbvq3M9zf8DWL5ZyIx zQiFT|DPm9P*EC9`B+<tp+0r@pgNr_hCn;pakyU&}SMEF^zNn%oY)0c7UzlJ)^l^O^ zL&`sqD&Z)^>X6qArYAli$3LVIppO$;1PV{QwI?q@r{tFIP(n2$?<;3rD->PEFxlXs z+6L**d#jFsMFW+mnbw$^=!x^NK1vqvyqR>+?gxkIvrS>Zd%!7KqhCLKPR6X+n$q!G zVRwc9+(cqDlYDd+{9Phr&-Jh582;m>kr%PN<G!>#YvBolB_g#2$odAol!1KUzObJ# zbpYhoBH1nKRnrQgZ&6I^Fl9&}Ts}h{K!YDZLmmQmP*n$`2X^%2g@*1D17-gHHLAua zLOWKd+SjexRirgb^3~;l{)g_&%4=!sjV0&MAP@G8x7->!68Lp6Hd}a?u#z9JlHb>O z7O~b{bX(LRr`!Gd;(6e@O7E9^|GgY1@YQ;IBRYKG;_oAh$-E7#uCT6cJ&MGaH@Z+F zKS`{x!Wk~N9;KBsR3ySzdNvei0DTrF{%1G{RKbmE$+4=JsQ@tZ^KDB!uGV79r545~ zn1Dsl|La$fV!3e|u&t+`5W^q2fqQ?H5aED{sxwc#aU>S_!T6+zfI}p6|MMc1xG-b4 zw!uX%QRO63D=Y~cU|HKZ2jM4_sqTMIHf=1(tOl~i8j!$TbVoweV++7O8@2iyONad= z(PfRV98ge1qb>%E4VB%?j8Ha@SUS_1Qno=|J%foidjr6_w|dW<yA=p|6qSbJ_i<<A z^<d-cQ)*4IfJ!dY9zzSBXzBuw2|}|mdfaTlC$S8y_e!K50vt1i_z>Jag6DYSTf&4U z8tx>>z?6woUS+hl*U4P0ALjggBp%V}`YiNwMg1VVghgV>--A+Sy%9J$TR6Kt$rq9r zYFvR_-*Hs>9Y9Z~Gk%%~L#3LHMP-FdECZdyj+jCmJgy;1y*gyhsT&C6ehs?5BiZs& z>|c2`(jV|=uWzK2pr?>rQv_m`2k{(>!3>!^6cMElN=oy$KGo=z16!NRK-BWJ4b9W# zonLY1_t(Hs&~ZUj%lhx-aUKlCiXmYqPmtp;i!`vg^`M51eAqXX@zA;QVP;E;y9~2j z9spPgrFr_K-EZ0kSCS$})+$R@<`NplqCwK)HR4==oYYb+ObYV@S5)48xuSdW$i^7` zhGpY`=9Srlw=pfy$gu$hh6l68<~z`?<Kuhun_;_CtNXRp#ht6m6P)oHPr>9!tMvSD z-d;SB9V(p#GCj~T-4un=`owQ`L{EMDI5YMb+s4hGHLa5`j5dNxUT%fVuw4rau19L& zBO@xcqsWE}4KJ>Ip8kZHv5?YI+(6;8cdm&C2)1R@7<GJXt88_9eeax(AI7}z!!Bod zn%!Hz89A==8VcqZpYjaTp!_nv1xftfZTJR<tRqK^{|vkKaF4ZokHujD7FwPzkc}q= zKY<`yIzn$_cY;Oux3k^hdAC`0oBmtlJ%~^U*(VO?A4JB}6`$dC%eLbN{klnqb=qaU zPV+c$()`aKhO}?^fwvV`7!zA#Cuc_!1DpT0><leoIGBhSiT>MQ;ox9o{(qzFoc}w@ z%S*%{ZDMQYY)-_^1sqc(Vo>(5Hz8tBmov09F>)qiP;xPJ{+|&E8v`@o4mAs7;K58B zY(xy=CKhJq&M-{Ct1$@MS=%`(*&7&{5CNMt|92e;TW1qTdpm0bXA>f4M;G9!N-_pc zRzw`E|9Af{3uh;J6Gve?8+$uj6I*8(;8lKGSOZT2{{DBCh>4M%u?Z1BKg|E$lKa}F zk4z1|nk#;<*73soPmM)Ca=*N-BuzR6648V>HfV_fFf$aQ`RHhA9|Wp0t)E&Si6iZ9 zb1=+|`Yt>H>yG+vq{7%g;5hK83e^7=7WHB~pPgqmJg>cXA{cIfLT^Mxg*{(&Y&V@} zb}j2#U2cEcx~=NCrfr)1|DV)u@8_QH>F<}E@AL2Xw?~d`_+`8Iob7L?Y_nG1`}Nz^ z?rXo-WBx^fucvdno}BHKmTeh19_^mb?2WI_kE5O+ZZD}D24C9SB{|#i$LTfObrjMy zK&!ZNyVuXv^<2b*YjD5xZ2Q*3_3f|ur@-CA>7GhCZ=b!PoQJg6>2img?eZR`kGAit zhCNj?+p2e~R;_Qnr+DfOf|`9NPhjXg+y9b?)H)xi0KTzbD!d*<K98?YUlO4-UfHiU zgk7IX>j-jed<NeL?>Wui4Vx?WM&C|so%d6PJk}E*+owaj#PfXn!Xr)yZVk6-shf1V z`z-2r!3SmBUkCU;T%OY}&2|DG8JCa)7BAVjWLP85yx$chVP0^duLH4&gmDUysa#m) zXaHH*Hm2%o{%rm`Jh+&Gy!>DhFom?Oo89#|)e9m7J;zns6A<c9eJo#wb@!c6`MO;! z^bbn33q76I;+;Gw2N+Qlo%{RV>)X$n^;16!L0N$oT(}%RY*`M%p7j^L;hbCT>baN! z;ioMh903LNI3Czgs$aYs@ZK(a`(^lud%IY0pJMztL3yzZGO{0M!Tdi&(xM3S5JTP9 z=Il7Vr%%Bf52nAMHGNg@n#dsh5C0Eq?*L`X_vCw)ZTpsO+qP}nzGd6CZQHha%jPW` zuYSGWynfyN@0odP<}fq&I%{R@%>3?sBI6Sw1yaMesMKunn5H{+$3`zw2-%CWKLKp* zbH@e$?YX%*<i*K$7-@4{mIj>XP`4`=YVmi=?3QN{H4ta$Z2T-@Z>jNX_x#|D=$DSS zdU07Ma59KrWN3~p&h5sIBfBkfNUk|+qqbH(G=`(I>Xq{eZ54nHchAFNv)38I=7jSD z*Beqwu%32gh~AqBHJIPtufv&^R!Ah62j$`mU=4C^9=7L^to*b<0=zbkEJNz$5B+H? z(AZE048fT`Rm!&x3d70=ID=;gF+0ADl+Rc7tXq~WaKCocp8`GFs<#^perF|0+YR(< zbJe3WTC*XSq|~N{<}z}Owu(AYutbU|lS)aOoA6*AaI@i<ksx$2!(O<7KWAw(1ggW5 z;v0eMhe2i<@7+8KOmu3e-;^U2jZ$_?cv5@wpqp;oeLcia@b_1!QVxiOn3^3jToCHG z5chuFT_Z??&R_t2_B*Q$A+G4z`iGA=R0n2md*@G}TpkIq9I9f=@i<&U%U5?Ce~7+) z1Jd3jM0+;)V86OdS{zf@7QrQ`%mn9vC>nC6hYnVt00#)@_?}Qqsw!BAfL0CymafK} znO68l{w@W{G}?p~b`GmE1DNUtHY<1j)*I{XYwCX6O}CHk0*^-SVtO0jG0bACm{n-f z{4i=sz7=Rv1ciT-(v1BC)LH}`yK!gycSdWt{fNun<mD;DemIH7)Hnqs7qs_Fn}&5B zK+{LtsAWJEadwJ0Kg~MorATL+{o~*2-lOZ3z<ekV_uv!8LwKhvjd<+$WP)q&Wys52 z-4D!u|0eArG1y|0&KO8yj$4iOvB`7{i<pp$y(Fe4YNM!kATdbJN&twnwwvaHLbp1b z4blk$zB6&WFmnH5c$8tF4_k|l-zk#Hji37BVwA=Z)X~Jhy(gFk1t>!G({?CD{Pzt7 zDP`{TLXh}@)&*Gn%f*r1FyX_LqM|AMi6!1)@cC~cNW9i5@%tjoLW0R<2kEdURRe1f z*psOV)eF{U*waYpY=p5q+`>T?5)-CX7(cDl(|@%lLz<;+lSwh*!(d3pJ>h$jmx3%L zWC;jH*0ImQB!aCa@rM9!%x9-o!`P4`jC2s;l?3R^qtL*ZU69u7eY*HDD24Yo@TT*n z+2=N7$9!cmCE<WEco{`Rs(%Z*wZ!tPehi_Xaca$4c&mk28cty9Z=r?zuToCG7MFrF z@ZrM!Y>eP{ts-E{<ACu~5L3fT{_vs(k&mh~$efT~Y~UFnM?mP4IjR131%ov<*qk@% zzpDyN;V@+YxjWGtanU-C17K7>%envgyX7re390CZAnv~h-y<nTUju*7S|4H09LnPn z5m`sP)U~{pnHTBB&56!V$}iWq=Q)Dr)E8%Af$&WoN_Gh&xPl>A&_6#sRfkVPBf<r% zQF1zHM;Fi37Ir^($#6a6XqEHT>~>`#&;r?B=cFB`zX3bgZDfI22}y%^#3d5ADI66s zNEJH*>UGXr6eQr9O;~0wTrI$gLUJGKwa_q~O|Tbys*d^9X<#)Rr5UEW`oY@@E#~`0 zV(g5_R>A|Z;z$Ud`b0X!xm&Mlf-46fcl?#cR3w_bGF324YL{Avo?#Ex49pIg016I{ zZ^7JE*}Q-yogyRw9JbUD_><;AX&yKe30^y1wW~ur{@b=ULtn#N#5G8>>IRjLVMIUe zfCCwZQZo&xkT7rYVH$*8Q_pPe-P$>*Xor+o(YWb4VICi-@Sz0&Q?`3X5HOpw`GPnB zEo=m?B1%w70gZx2r#frq?m?R6%XzXtj>*{|Q`|u4w*(-o^C6t9AtF((fjVBS89QhE zFL9*F4FuHRn`hmRn?aYtLy6sUSkj;V<gXKSnPVuys+-f?H-st67WgAkqEDR|XFpzw z4FU1BBS|T5lNtsC{qa3s$dCxS_j(&x4U=I)|D`}cGj?Np=4cWiYarGI_Kg5O9Axpb zGwK#}4Qz%LH+gupiGGtf6xfhtb@J2mru{Lp@C6p|Do*pP;g~d?GAjwy7ceyA_Q%pd zT`-Ua-&xd281GY*Io;$FvFUlH?7sO}$`z0kl_^MoG+pQZ4+zfTMn;lQM5sEfO5th< zanHUdTP)d>KQ_aWa8w1uE!486Qv&!4ehR5hM-89Jd(13;V+~W|z_15RLwELuoWa{c zgDgb`_y9Zsl#3RcLGUNrAkM_z(~=4+2O(`dI8Gs9xC#}1B#44HW$J(JU?jFY%m5Ri z{HmtKD+{ma7wWEeb5yB$RrTj@;i!PjY_k72M(CL)L-7xA9GVY0w}vsr>Fa~*Ecxjk ztWF=MobiKzB;1r>g^}w)&Y;3(4*D^rdAOB;VzJiJ<dd9+s#`-fyzPouiLnOA^mi5y z(`3^~SU6h3S)`n6R*)mpzygi;GYJYJL)LF4G0i;z{v@-eCHbJ0l=@mAB*vHw7P}Mh zD?D<I+wDP<@{#fnaK-hY?hB=8A_n2y74;pvX`+hHg+QoEv^Vh@wmE=7l@C|{%b(A% zxM&|s5Z6SqCk((&Lt#>U1QiJAN1@5vhJ#xJ2VwkEvXm-@4pH{t4JfA(7ch>CUF+mP zu&)fY2sWr%I6)|QMctA_LYj;mM1Zx~L{cjFZ$&aHGiI3bYpFw^5*MPQ+zI!q^GSDQ zES>y@mQV%c!gfJ-_*#Du8%}46e6~jQ9I|tOZ1Jen<-8>g{9qxpWEyHMdmgrI{dG8) z#d=|fNDJhErm+kZ4^Ls<jxK&HLP;WULYKOF+_E%8Ju?O&kEk*UF<mtXpd%F6WV>a^ zJ$xyPz!gvNpg^p$I6bX>M#NsWBp#p6Icq|0u)Y!64-JhhC56T3mU_&89;9N9Gus_b zfiWVPPEXdIB+j20-^2p!gX)Pqny-+&u5Aw;*x_PnpcTEklxs_3feZ{ljz~7C1|z{6 zzuHzE=i{tam!t?-0wvo>AzrxXDF624=mj<fPbERUx4ZW>6%$iTGKtClO{_pp6=Kc4 z%a7M{UnfY~LKvfu0MdIu|0fgRY$ybeZbi}@RRUTDo6v;wi<$|>D_OWd_IK(^B3eco z)1|!S;Z<=3<VKJp2eCT(knGBK8q-={W|;==dHbD+e<pc@Gs2OJ6TN#+Bm2M!I*<HJ zE(c-I8ihaGT5wi6m_6+%o<9`9fNeo?%Fj7q4>s|8k7gzs6~i6VLjxmgaO;{nzbvB4 zh`qe|nkhWaAu~<0M#l^63xQ0*e1TAIKLarat~*=_{L)+GXtmBHl<@-|BDWubWQ*hR z-HUyOGycP^UH~$CLa>@mt38&uC1J@?Fh>K))JGtt%8+9N8~Dx8vqZpaj4I7B<W|Yr za)_oogZ%EE^YK@D!Fduo9f-YOi5X3%RWx=`t6;Cb$Pr#<6rj*{m3xIBv`FNnV7z8@ zcGA(no#A5+g^humNzW|<&)ybEP%BJ-C5mUGk+@5o>}9Av0J~-&mREKDpZW;Q-^kd= zM=k|CI=3$6ia}ZT7cuNYV*EM|q%8^~16Zk`HM7|pfT1xX%3vu$7#88XY?f83G?s5m z<RnGcF~@z#Dqk~-l-5vBrk<%`#7c;;h2r(JA*&HGo|9qLy-Y-LMG7K!$fN*4(hE+` z&Lfymio^-td@7bH7iTc3JDSlV8b}Jg({Z^bbeRcx>`x*IvuMZ)q)^bST+ojAR6%dv z6C@Ci^|FU4X7zVaX5^rd>kc5WbYs8Z%DS#m<Uz?=Xl5I+T6iD&5!Rte<>M7z*KC_M z5ER5Tq(rZ~=Zh9aBB!Z^QlLAy(!2<<9-Z;8Y0(H;m7zk1Dnqd4tz`+ZCe7^<kg0xM zfb4k;U~*;>zvhepTvQfh1U6umh%OVMg2Zhy&m{vA6^O#(ux*2|US{kE1(z%{2D}g- z`VcT`6&Y#t=XG|=&+N9LhM?Ogo<tpCnn6$wH0zgm=+_Muiq0G@vGJw3AZH$6z`$B% zGHT)~MZO2A#L$6gPI{#C7=fo|-C<Ky?5C-iF*h`ku;<T{JBcP5RNRiQQzsy%w$zC@ zBf~t^ii{#KqcAXpEen71BJZ{TWSlkTq0oFEtmHN`H`gIbgk^yYhdeA4^b@>|)HyN$ z5juzVjEVJH)nL<UnR`5Cmaz>r#8FLgWn`r_?jX4NvZEueHPYZzp+o9ne!_V+-Oyqi zoQERZ6Ij)w*!L9MJ%x3$wP1D@oR^{JKM1?WY+G;@QhC~|x@2FETHJdx+^eDj;cJO2 zIAc-`K@lV~VPw<vnS&;*VDvv@-tx@{QDndIORFE)uCH{5&ZHyCn|nyc?{_pxG03dD zpSY=8$k=KQYQ&;lhNNQ#4HSl37Qu*sBl6iuA$c;0+~E(&gR)TtsUU<<G-C>~DHEXX z|8?mYO*vbf)L?REX_2|uR@NMK*U{z;%t}6fg}OQs{xc<~tS~i(ln%yYz&OmXMuw7! z-Eht=2cK7<P~uJsmgh%VD=fl5!84hJ-3O?Fk)?yOi&(%;xpkh%3W+(pMbyDreHvdZ z$7S8u+(98+ks$z@<9+c2%PxT0T%O7p6!65q2g$gH(V0xy1Ct&YNqCYKgld9UF_aar z2BP@wAtX==G^{EV<six37=j(C(a_j*7OY>~(YeGLuK}*@QlLpDQt^QR69r5mt){;t zr*jv=(Hgf6k|wCH7AgLPxRHT0W?;j20o5N#%dGei9EDk^U{5v_Y3$j@nj~LQ#^}`m zYZC=Qudh!F!8tCfShaITr%o|sIS?gH)|@Zxg`Gx=WIywX0D~B_g6{wo{t^M{zDVfQ zR|z@p&@iPB`vlZc?It+8Ac3gO*$>&l!q+7e#igPF2~bFYRw#tuOb+#)3hgHgE53|_ z-pe%1%)|L^G*g!$!i-0KKp9V>sGR6F;%#jJzE}O=PrPP2=z$Jvlkirqppv&;P1Obo zb|YtfGePcN22i}xLhT*}CZ;sN#Iv7ruz=3ImXDx84tZ>g=_n<F#AGo8uyy4gl3-b& zbOorQB)^Ov12R~cH}I6ePNvOUcZ|zlN)-!{@2q6L9aW)l;`Jw(s|?qr8Gk;bG1!&H z88bXle`rP#AFcX#*IM6DT~P)(8aa&|B({t=ndR^_OZoOwAk=S>F&YE~7CVNUJ}KeU zvSw~V2x2yHejn;p^CGjtXiBP#dC=ANp=4i6UaXlc6xi|^b8#c65Y;<ME*APCJB&*# zedV^al?3DW-ImZRN<qU1C3=VpK5#hyW^>x5j2t&#UCaP@VxeIJ3rEz9KKcP&{2?In zrSb$$A$zdIVtN8a1tb$3tbRnJaY00j8Wpur<t$6Lu=xgWU$9a~jdF8P$pLrOr{bfC zSEf^Wr{cBhUE79VYx6_k3-h1F3m@mf1Vyn_fl1tJFplcRyA%`p@z=LhLt!0Z80tEC zxa0M8Tl5Qs%ylH!@fRck0vpxjpQL1<kMjgAX_b)89m*M4Yp6f{7^_5&UpakLhL~ZY zp3C&1%mO~64~<JQRKq+8S}Bo5$~GpSR%E*xVTUl!sqReK4=ycvSKp-F3|&{)7;vFk zr`XQO*4~}2E;DEASopwi6;1YQ?-T_F3quwfJRePtYS8?kFxH2hF+_@4i2UC8MKMuG zG^Y5dD@@+V7Rn)D@`CcPCZwEqRXT5;maIS^UHX7BcxJ+#;{6XThSx5YrF`w&R{GHk z0qT5~qL^f=10YZxAxW`7a5UisxdFKdxW>6g)quq%3@~Io5g9Wg6mr5w=!}mNjV7iN z^6x=hnN%!>zNmK;r_vC{GK<v&u)tL>j<O9B0d)ZH(d0dLFd-t?lDdpCk2%@Dd%F{b z$@rnfjnl3)_$I|{!eUawmW$#!IEEI(070}T3q~xWur@jlhPtW!Y)G<>zY(iVh5<qP zveg~L&|W|3?FzC2@&)z{VM!0iqNRg=To+(9YEWQU05B#CBw4rBcvI08(eokiX-uwI z8B^wII5NY=MHwcYV1KYHW(X5oF{aUL#!whrQ*UBalM!`oSQ3_O&6Pz)m9G(`%FJP3 zmMe>r7%)O4dx_xcJ3!iyx+(FB_0hZP2ksb*J+X-pOK-?lZJprDTnDq6nP<&tXNq-U zQ(B2A02rSdSyc&GolQA9^~pk@B)H_ujWH3%mdBtQtdXKodfOxoTIhERYDrB!bj%Hm zLOX6<_Gc)Bs`fLOrppvR8K{4;8HL}<no`b%MAEVD9$~{dWZ)Y$<+GA56f16e(v)Zt zx7c`C8L?y;tnU%YL%wOAu(qvbk|z{QNWdu39%OlDrzta-LG<SluEjKoBfu5+$Gye+ zQ_54;L}g|{hnCTH5;Ew|gO-iKS0^e(6qzhSHLFc&I}f`y+2$wv-zQPV5U&E!Wvuvj zTge@W?H-gdE1Cp?4K(#P{ag^C<qAirsW~I@ujr>%Jl-IjDddX#W}EhY`I?uAz(g5O zQIf2X^@)&vtp4H%`OS){*v|CJtj(SmN;@W^FiR8klGMP(mWr|k@pqUr+<=t48pZ`s zTu~ThEEn!)a1-|`oQ8RghV8nyXhIHnw^1`B%2REQAK-piQE7&GGgS4xu`&ke$_8dQ z__&P{h(J^01gteMLl5v+`wLye%TtNm9-O$v`uq&&tSZ<l$mcts2YlX7lRHBfPIvFl z-V=7dcs>Jk?V%@Mk00N!sCx>Zk9r@Qv(P*MNO$%I`T77ee#RJ_GjUOb8Kv`#7dUn) zk}#-9ciTnH+|o4GZasiu!WE^?`?0xS%YSv7Ct1FOlUAz}e7vq>sf!2DSh(bOuqjhB z@27TwF?Eh`!){63;?3~SdJVRP#RB?QB?%5dh47*Q^|tisH{7X(^2SU`R>uKd*>Cd! z#plE-ya-BG9k}GPV#_;#uapp{%a>d|81!bXR@GQBuD#<k9SzedPLUGL({5V)X$F@u zp<_OmnzbWuU6<H$hOu)qB+Zmv97I)MFCYs?JG&UNe~Yw+SANk!n|!h4mafwt&+Zl_ z)EjoTAot@}LY9mLT97UwYEB%jX-f&T7^mPGKKu8)R${{Fp}bPCPI9znO|&C<)sWl< zfORI94X`@Ym;eV#1u<ogsae&OaqbqAOq9sli$@R(^(;^Y9?4L}9a1x|CeYnE{|U@O zO^eZ&H^^kJn;P6rh*E`{KXa%#8gVR9y7RWwCOKY(P-;+PH**iIf;p&C$;!4#(Zpng zA>q)K9GGXsfav6nS6o{5+e-vj44y^?V~t5a@4||zRAa*^%2(=NPIYVPVSJH1&15tr z|8Ua898f0|YfeetCp~vp88BZnei<8tB6iQ%jV%1rg#J_k7AWuFx<tJ+Bf3B7!MQxa zPMO9ZYHR1<0GzICPFVJ#&dTAC6M=E8nV^4D5tB(;b_q94)%A0SC@{@!9rKu+*M;WW z^SvK{=m%3&2(NdzZ42r@4F{$mOUedjo_AetlBrU(3t+H3#KCb1Ij+QpS-tiKB<9>l ziX%m%ImM+h!+Do<f{cMY!E5|Ve5!-v_6N)>c?b`ml`mRY7!;otwzDd_MJe3rN}w9V zfx#xqIxXumBnYfUuEk|nxj#K47b2{^PY6XI11$rA-1xQgtNhNvGdrdEoN4Jrsr?*P z41XCglA%?5h;|KHj5qvyumiTg4$NI3UWP@tPF?B1F}Hofs#wei4+;aIr=VZ{lnAlh z_;<a{z^W!&{>oac*O-MgzK*RaCR>mJ9QJK6Tw4Lu)t0RC4&R@UlAZ6zPrmQk@6SxN z3(vQ|J_dGP8+y~x=%2IQ*mM)vxw2|pUw^QDdR_*Dv$Rm0P#LpOw6%}99ykWg+uI=_ zidNn46wSn%>BrlP!+<CG<CfhS3F7l%F<?B^BB5UU-6u{&d}~5H<^;W??$H?~nO!ns zai)dTW*%n7|MdOJ+J0yI^)*Ukhb)W1r9_Rm$jW(1G(BLu3gnc6OG|HVI63ykUO3Tk zJL;&LGnxLyaN{4lRGtL5ljc9goi55&laF?h9<L=C2eBh%!fZ@K+EqF9w@vVCKKEl? zQJPGR0+^Rxt(2e}-BdYPKhBAe9*NP|+85(QCdg87amL+4k*ulFVMCiH*kbk>*McqS zZwp%o5ebhHO6dgHs;<C=OZtAKh?MX9ug~$5O|sUyQx(eo5E&1Aj={#$Dq2s>se4S- z`H1v^VbSFf;<$0Vu6?~Q`M1Q6A_e*y%?)p6>EuH9N%C2Ib9eKo^iA|bw7%+_JM7R) zppVMmz3LIOyR(=ostld0ui`Q|V~~W49PsITA&P%}**8H)w}U2Q`;<Zn_N#F4#$%>M zjSFva4y7Dx{4Imj3BadR0*q->R9Old3Uyx+-v`}tKlTE@yFP6QM=|^{Q(sgrL(yFO zn^G36RiAiJFZhV01%i`#!WxdyuY>c0COX_U=5Xpg%^hW{2SC@dpk({kRPTa#W?j_< zGgE3;ujTX}w}L&9mu8W{q)&&=-iT$pNCq}T-b2~yDhahh7PhUi)Q?5_p`oE=(dOD{ zWe;vS;R?>1ZlIvaQyp4HVS^i}l^;1<QS_199BUw!&$>UY<fa{SWHkl1+a%d;oQo75 zncwikw*<x)%>@8=2ltlAQUYZwcpx4v0q)U)sprcAt&8|)tx{e9S_Ko6TE@MK>GE|O z+Kvw4AX^<by?kGr-z$e-VAAOT53QT`w^FA#Bebj-%oE6d!(JGvlAR)8oM_G*rih|C zQLql^la;{=12j^Y;OSLsXh9cAUCW%*J!kvbdYP7m270CS{5e|XTkCu!ELfC^*U}#0 z`*SNDo^3va;;b{Mt;sMK=UzQ_*#9eNTUc2c@~v37u{kQTLy;{e5GIF9w4-Z9OB0Mj z<R?wH1V{)RR{sa|(hj#*kW_7NuiF8#5cg?{h4Rtq9P0CF$!{^3kO0{N3w>9PHUJNQ z{ONBf%FA`Gj0$V~pfVw9Jxv1?I1=qMBBmF|YK&2b)E~!3w>-4xp34EV5H?+jyE{Ym z;A3YX?rIR~nL15b*<Hqxte=bZKFzWWIoRK{5X5&lzg<HXhbizB3beuibU>ykecsxA z{V6xhGcQ_jGKw^>L4M6<<1%5$s&R>=jr#?7K5{!R$>QwFMvJjC!<swEOm&BsBE!%= z|E4+_dvwyWC4l89_Q1Wsd8%QglnDkLvQbZz4_1hu7{%P8&n&xg3{glEPC;@?Pl{~H zS4Xjt2l=eh9o);`x#|WkJ^VBLq2b~|K{m+IDBHY?9VxoFQXrco_&V_jTws9@$cx@% zR5mCXtm9i&G>|4~KxrW=`%~3H(U9W2>4fD9@4Spn3!`Ulp?AkpyuWC$#;ED*U|Kn1 ziWA2Nq62jetd-O60|kf+fPm#TK`fNRtC|(2L_|m;r*Q>J)iTX10<)$<1=ufHauwH0 z<>NqTm9qv5cW^^gT{`n?5FMxw2#LVV?IX7JT$v53NI5630*d=%%1Zosak~pgk@rSQ zQ|127k1aWOCJ2KEb3dtDRi!!qIM(|sMJf0I3#yX4rL$iz!Y~nu(sb}FL*-PAHcu}a zmRR@v1yu>&2oySb&D^m{160X87wI{_!dK`M`9K<D40tY6CAX>KSNJ96#Lgg?)mf;f z7fGQ(%fjL|-s4PFPc|UPk?werF4rf4KFPRbeK&Zyuq4L9R*P*(4<MyW@+>m-Go@C3 z!L&}cf#80hl965}r|SLI>)=zF(1uZ+Q1Gf!yvQ4oc(N<e%iA=O!feBbs*tTX;eS)Q zx-qE2EzxYSZq5geOiEEnDfK@@dsz|?PR!D(^VlK>gJw+3vi&8lcL2WA`xInFX+YwD zMKKeO1n=1|TgYh-z9g#3sk<W3%u3g@-}Q%+$xh)o@spAL8t#u!g9>#%+8Aa9GL9C7 zJ~RBVvf~ldVM>9H6|UTLK$`<*$QnT7P-C9&vE>YopY611q6+(zV7i{tkq4Gx2&1f4 z-cl7ntUCkD#_hOwg<bMV@#u$U>d2OV)YZ1%>YoTD<G-}b;USN@h-!A?8$z<J<+YqL zc13+yFwqHx<dn&b46P@oonK#W02kz)IkD1rC(7U~y_t;XW#bRD*x-Tb?4`p;<#ZjG zpa^bs-CzFRf01!A($oFA`X=T+KPG==RUB8!&J2T%Zz8~~=AeK$Pu!R$E5dE?pOs~A zmrh)FKl4)5^7X!q#D=OEcIon_``j+hPLlC{OzQE<e4qbCyM|Pg`^Ncoeeb69jjCtc zrds-dd8|ikRXeQruwA@*y~!Y#R%*~<6$m&tF2SZHg5Rn4aRsk?&tb-gh$?99vTr>X zy27r9go%aqaMFY|6#CSCzL?fS#k=Y*)4OoGA_d+T{=qF%uXoGRz6*S9K(3NS-#~bs z9gLB$b#q`orp#6NM+{eaDCGEo&h2@gwKbdo<J!c8ZtdCN{qKWW`WTHq_~DE}F;K=R zGs-7j>2u7sC;b3j#MT=}O*p-uXf})7A4gWOSl*1#+OSwEHn*!PUUR?oX8+u-`6UL9 zM;`RO8g%<G@7oh!t!~%anQ<J9!&6HJ%!*#2G?I>y>1<;6<{lv7Og+%qUyGN|@8`eM zq}=a=+g}U1ULF4)Pu?9zp2>0jVbq@f*x7?KX$vH24f>v8+j!;to=h;S+f#WXY=x*b z9H2|#9(-8qzMXkE+SgLq;m5@JsIredy4J-#Omw)MS8WZK`&#tvd>DievWclhSeaaB zDGk_Pz~)Qo7VnQ({Uo+3hQ1u`LsX-ffKIcDlN5<AEK~H&k3D8V%1*3lUogOqZyI3$ z&s6fYzp5u#IZ~D84-0gd)Ti|EPm_uz0&Bs5ne}<iJYeHfy!EgcR)W;LgDCZ7_s;6R zn6(^k0yi+@IDw#IFCuiW8WX3|Hn}1E^2KYG2+k>UmJ(pkC##wB$6|LFp*w$spLphF zIOXn&zrQ?v(|;xYQ>}_D_Z<KxWXsxN3&-)p7<)6@({nnl;PyVs-@tOup<9{6M5W%3 z1?eMue=|BY8zX{q#Nu3DP!=(rP1H)3&Y4ZyEMRTIj43+YoAf6di2GMDx?ztH*`k}d zrv&ty)iKIYj6VkyIt*O$>vi3Q#kt0j2<Bst8%&;tr*Vc1<#UN8<FlVnNQ?HH7ZmuE z#tX8zs1#Pfm`m{H&>7J!9A<yMgaMkaH5khBW^(=9-nV=t{cJ!n&j6prf$^SpIauU9 zN@Ow8b<Q(`&gdti5Kqsn-<PT?Y5;kbkD^sju7sq;Dup`+h{k}pE(x8wr`w1QN~OWJ zT2Nr}!Vqc%0B?W~iL~S0)ixm`m2vma*`qmnJX!pcLeTU=j5XaL=gLZe<j6R_2JqNG zI!$hvMjP&yo1b!v6|O)VtLs#QP&I$EjNARHK;35ii2;t2cnhvxxlrD=sf!7=#C8jA z-_(skOY}%|5Ej~sC#^JgrBTW*D>SnF;$&v-DYFvcH6KA=bK&)|q$mrwfMG$fa7gOv zVbV~Mbt>5~dhp8|WU>cG0ely%oMj<za9A~@-o&eS;!NHghS{-D4xA8865Q?^g)<>J zslfQPRd>%ntU9YBV>O~5=pjlwn1mpykKf(i8Y4v%eYLGQ&FV3pp5;iQ5z>H`QK6p6 zIH&{FDRSbfweEeP0VB}gSH0pb-nK?^G{5&`n9T<#q_aA<mKy{Wsm{>2Qv=b~YRO~| z+ulcbJVLH`%(L34_&8VhU`~?4wwH^W-Z()aOV^A;9PVIV;<b*TJ3JW2@O{A%pSbr^ z99KInU62+sm&od(`T4nqFPC?ByK`3DkAg;!@*E{!5M8U36YR#ejW3nIAYl$c3)!~J zVG>{;MBX4imWM|umtBKC2!2rbklb}J6g!1j=*RutHQVA867D(%EBEo5L<31Cs-jO^ zcB$j5N)bh*Y^9ok)gg%6ofAaw+@w%aOx}l1#wV^wg6h3wh>?5ai%^*LzLg&v1gLmA zCTzX^Zob%e1a0V<A+ZH@4#(Ll*wW1iQ~UdV_-Fp5Uy`Su2l-Ope{X^&+I{b!z4%n? zyFiEDd=r!!MB<{78bYuf{u5vAF4;3<NrQ_1Co5IZg%BDcl|fN<^?>b{y6qpbk{Ube z+!D<&f6rjndT&6-$tD??k*~kpV6WjrCoUNdfw-Xxf?Q-%gKzepdQ`6t(MLIZy6J_{ z*^yc5irf7sEW@;g!(gT?tVb-^LaS==A#*7OyxqjnGt%!3fklLDeYTa+6=p{CFlM*Q zWXZOx$4rDMWWz-FTe5#~d+ym`!+XBEdPOJ?h;^&?!9bAhxl;iWXzaIClqi&a3-OyA z5{3mJf71g6mNbkS5fkHytYMWb1Gpnn*&LD(aM|dO82aUORD%sR$w>d*+Ydum$pF`> zmQZlt$R!V0#GObVVe*RjyqG=n$!~_n-7pOM00f|N*>IeCtKjC!&VGbRcXmt`!oY#R z+e<N!*_{^Sgx?B7JYkhl_^FaTSxPXu-f8*{X&g1nakL0~?KOfnI9|+ayaazr*^0gb zZm?XI<=Jqre&CZN!yztRfu`l?%6i|6swv2S*1q%g!b+R4B>=ZnGF{TQbuOKPoXoCi zA~jz+&G=I_#83yyqKH&H$wn+y2(eOw^@VquY<fENGk4LrL!5zx&x5l<ol&yACgD$k z093lKf<z8Cm5YiCF@x#e;Ga^hm~Ail92a9oM=>&>A-touJ2y~FNp_OBcmXryQ9GSD zS(C#TnF9+nh!Bj#r*1jM&^0ytoC#N?mK%9po=@(!x7*uT-u(ka3jh8}NAYZ0La*>- zV*i<COL-|-Pc0}hECd+NWy3eTH*SNR^z9n|ILw3*2tHRTn+%%F44yiw^(gDX)G!;n zAAR2ci%IDD)S$Or>|ci=7)q1*n#LNjU_VAy%S*s7X%?#X8rGl$0yqx+OV&}9b%9nc zzp!r1;_!h-<vqL{5uT$Z7#8zjI+fo!brscuZS1W0uwEx0VXs77>h<u7W^g{KUrXAu zB<{nE8{pAGiLwNx_jQu&dBRUF#%u@yDb?r2z!!JZ*dZSPm|UV-&NM$NcmM&3&Rqu2 z31H6w){&BOu1w_p{iQmTbXS}z0C%3n-wu=7iJaXVowPc&JR4SFX-fmd3}SZ;Aqd@! zwNMyXITYE`&{KsT+r=2%&KHMZ;zi<Jc>hI=FFs*5>!*gGmO`h#{x60DF;Q;wJgYNH z88}f~)3V%=+f2UxFUHb#v{rd=g1X=_qqE&j^093|zvUL>F&~q<uAurr=c7eQc}W20 z0%;^h`?-si2@sr~pOHDHhMT$k4Ug)!F%^6@_iy)m6&<Ijp^d7G3bd9V)U?Wwc$H}K z4STlVs(21-p&3F``W296XFde1n5JwE#ON|Hk`H(pik8a8UWeuC<Sq;jo*sOq8!!-+ zjQ0vT90uyS?);N|M3#@hDSUVLts!?Tt8y%Ue%c@X7{cmq+~w6cN8SirPV6?(dM<tu zsCq4DfrAu`ZKasmcFWbQT))rq7glOLTEACQ511h{CTs4uH~5xVCX}vi%<ZRX^ijJH z46#57U4#r-eqc>RGr*QbJBd&=JQ+qFpFFM{8JA*w*ojaGRf6c6ne!>36h>6-MdOWT zO8;>jvADjnSjolFLww4?3kdnSwQ4owswU&Lmud7=Fu_8@stoLJ1xPcg{!av)B8Sg* zFVDaGY0`K-pJO?XERT~tH58!$7<bZ6tl!M{vwP@Vf7rEUI(oA3I-#C%a{RYWj+}qJ zb>jN;P#@>sUo9F<-&pGqSI>VLIY_PeVb}dJC@fJtU(*0&##B~xez2GST)m=sCWXnZ z&`5A+&*Gr0;LA)60^e)67-lwBW6)rR#SSikn^NJ>rsEi_y9GZzmt&MRu@5hB)^`cw z!xwE^6gZ*Col(x;*Qh)@t15U2bR;rCB-*W8ZwV8v^i^uZ>we^y%~-^>Xe$rqcR!RL z$dVsW;VfLHk|_Z5AT8%aFs;Mv)u1z%;`fpyJyG49G|okI4Ahp=6qa3hs@M!&=e?Sy z=b-@k;J44#FQc^b7mPEG&VhQ&USnYt0>L_eAa5xZwXzB1;~UP#G8F;Rbq$ysqg>>j zPsa3#duCjx-eP|natwx-SQo!>nzSf)mrW=-l1fSm7M4_M;2D%Rgb6LNGWUsn;k0_m zy4~am1yiD6{%w1<PL4Bt|C<WJFiZ9ij0maTjS&a}@)oaSgH;5vu%ML0ji~M~8bHoz zcA-q7^I1NzM3UV3Uia(XqA9zIrgKIz)dgb~*}`oP4n<+qGi5<f$m1aX<0$VM2gL;} zD4r{0GXW&kjhZp#i4b~h8BcL=(4OUq6dQ0M2_Wn&V+g$E=?kI#ynvGMznA9FmsVNo zCy3H3q+jBmsHObw26xBIn>b0l;S`Pq{wxnlc#YN(%n+xmg89U{xvulcpxT~&=lugd z^S1h>5CRSBo_2)F$RP|JTpD|DZMR)|;x98+J-#^?+D({L(G?L1$emq;F3Q#h`cZIq z7#Ei)OnL5GoABGi!6A_RGKkobm>Y`pe;0&RK;=->H2e&aaSU4{_`g0SDdIViajeYF zHS2`V4)Rrxxd+I>H>l$rIl#BTGkA{4Cj01#^v)VQQ{pWJIaIZm6zeyqvbLKpTb1t3 zx2IYBDhp{K#X%?0RQ+PCVFRV`3MX2zHN%ts0Ge(_RFoR6CRn!NzlMKYy=c6B8Uj?- zXV@E(%%&71m}%v5De>DfYzR}<wzqil-U?*mi>5iI%3WW<s+)tG0|eK?Ccv`Bsn-OY zMcA915jVg#krUmu5}sa`thF_IXZ4d3&==a@<Eg48Tv;pc6#gOw5z=KSg{B1kij$(w z&g=gTP;NQ>8)w}L)_v`%c@Wv+%~-q_l0UE^n?)(UrwvKJK_$;_sN3T}J^m`Szw`2X z#VI6)Rjf)K(|U4S3sdt(%#NoD<Xbo!VImDOTIq8qsElLMt~ObtzGp$Wfo0?CH|tu4 z5aH)_TnAJ6j~9RcBV=#h@NG2Kkqmbv;=V%}*G^a1*2XbFlb@}Ae}u8lYKT|I+5!=B z?QI|Xm+Bh=c&?jme4#-DGUNqv+B88pZPYb+@=Nm#g6L}A;(O?7PSJLMlYB14w0Yu8 z260&`I9r0Qy9mdK`eedFvMB;=cg3<IQ$>kofGM?8E;=Hr=nI>(R@W<GoPl<7IzGJF zCx?cK%l=YwzG9b$w`omHtxJ4f-A35GiV(4SQ^rv6ma%&jk{*!WXn+b~7$;f=*`DNC zy(=_+TxH%y-8vlg(>yioOD5gKbB<FIqYPRZik&O!+kJa{QgaVz8IfrhYZ{DL>3osH zclPK}+90|Io=9GyGZYYgF|5Cm0OkypAEpEbNXZ}5VL(xJV`?)gw8&G^-Ke@K|3OW) zlt+6uhH`RXc@GEty7nEW9gQflDipKdWs2D)X{sdv%7!3j!P|ccfaLzlU=|Le<~3%h z81<S4%(IPAPWq9cmA`0e(U|3x4M<vPqJoUz^qgoE_3{@=I+W13d&2zkkGJ1|jeCyS z@yts<^WPB}fy{SUkQ7I{VRYi}!T?Tk(!^jHSk0OG^%OPza{Ap=@M<pdERey5r4TS5 zMyaJE4C#zj`N}&i)Ce-uhvR_kVKi)?{19CoG<%igF#X&7+hqHt35VF_8Jx)`%kXR% zMR^K_iXl-mBM@>XaHINZ!~6i#=D37Yr++IkBAs+q;VokRTzEXK!NVnantSffB~9{9 z#xqoFRZ}yidEe3$G+j@sz_h-7D(|V5_nIx{To_X!LoBnKGL*xYP&&Fw=j;kOTQhVk ze~1)lUtHB0k4y;lCk(QR*uDtyT-g=*ne{KDsoz=3=4NTPM)V4FygZUbIpXioJ#r9} zuw*lxUVM1Uze@DFgB~jhWJHvpCU-70BA-CzC9mT{%HyzZ2n=#OsVKC_b9$4imPec_ zdb#C9NuL>lm|9z;IUjdybr5rfIPuGZIG;e1dofQQC%U?wrou7vy6>mcv9#S!U&X6C zzkcVjGb=FwEfdRHPus<fDO<=t8LRG$78T(q6x%65sJ%kl00qh5*I;buE{Qi9_VjVK z4x<2#h7p5r?w_>-sOScno4Ffh+m`RL^8b=m2uljNg=JH%4gKro<#n`Y?|qTql8ir& z21Xpdw?$qZaCLA;Ke8Xr&0}%SwMpB2%e;eOTuG|AVX4YjEGO2>_9m0!CGNjw!<D#> z1;sm)_vdcn2LAhHbJpka@#nofTaw%R_138O-0S<O@jZFt)FJDZR(p`EH@dUv`>r?E zyU#QDIeKCL>q&X>ap!xQ?<17%%eHg1_pS2<Z$$3-@!@Rb`D59v7wXsH>KUvrYtQBx zcAlQ#%jd=G^X<CNlhs?c7b-8K$AEk23-_hf2W4088CFh;ufq2c+s@a2g-HG*Qu2Sd zrdvMw4a&s9T=-wfo`2(Y{vmr9nCMylo$UFCuK8!{|4#P&Pa^0471_hc!us!I&%fe& z{wKx5`mZS-{C`dHK>hC&&+vd4oF6^{K;QTGqNLq9B0U3!i5nytDRRx}ETGQksd+!3 zZP-_NrjD|4x>ghUG+*rn+hE1J?6&cNCe%Z~QC~aNdzPp)>)}Svzvz~KjQytr*x3Fr z+UehjqyN+=Km5}FKeQ8_u!*aMk%^e2fycjMp8hlC^zZ!x_1{xYSvuk<3MhWF9VQ!; z?G!1M>OsX6;V7Vwb*fqjLB=V;l{KU(C1{&QbRt-yaw0b3BE?Te!m4vZNy_ku{gNi? z7~&=+0gzTTe6NXI(NAkrwoFdfQ?glq=R0S-vUr~VIGG9q1mHIj{UPMX=*53<iiGSV zZbK+fu+5Ks&!h$bCKKl1Ka$H2###`Wug*J7pgp1$Np+@hqrenTbuMR=|L5r}k(%IN zAk~r7nFLcL)zP&4(_~Sp@K4jV;!oqxN<1wI@L2+_4NJt&&kLl2qZ0fbf4)14q_k*G zoi7ya)eTfhFzeE)R7pC_wdYlw=UvuC-6~XnY6U=YE?n)@C=v)6Fk?RyZNhpk*yvSg z&ieJe!_39o+66@L1R8Fn1jab?mHd2kqSssEPmbK!>{J=A{H4X@Z~aUfso@Fbwceaj zS6v(eX6m`VU^X3uA5F#Ps<~=4c5U3atnWGCD!2;82q`2v*_!^AonhCNQ75rMn#n|I z8`D+L`I(LZ>OXFr=%Vn_C9=1jz(7k(qc0z|Y0t$`$13Z@Qt5DQP0^Nj;H)k>{3%Eq zHgI%K)P;h)RT@K%q2Yrv4sKOA)vN_*p<l6)8(g%B^!GSP-|z_s<JP$*?xRR)sh04o z%G?;2Z^e>#A*(W^B3}xfSe{1ZCTp7nK%BDvI~A|z@f)P}6qx=eE8TB4tU8x=G@mmZ z<V~xHnPz0dWKv{(D?Z@O5_^%3aYJt>N?+l^*zTT^;Ga~&FRbxYzxL3p_2ONyQQSCC z4QpAR?jtjK%39cxzsc$d>+QVS66n}h1i07FMnEBCj2llMUmt>Hbj&+e6+E}2jeMA< zIRlJ;6G4@V*`a?vd1ia@4skF9d8^5Y6Ak}FGjlNC`v*=#bi&vGMcge;M@?~?2b^w3 z*i47fHG~fRWjfQD^(9=obd9B~>&Acz<9>3h>?!oc16STkwD}F~hCh?`^t-jy%#07! zs&QS@cs;|xC7)#uH@ig`0uLz{6lQ)=^Y_imNH6q9w&$Pg4DoV|wU-=ICeDwIVbN9f z(UI0H1<tf{%&{0%OpW5&tw38Y#R>b)^?4U6b=A<g4*_KjyYzS)j!A66CR{oLT_0!! zm&&zVv=E+ailUQg5`u~>J2U&3P{ZUHqeW1dtwBmw1iGXNIJAoOzv@Xn;tpk-xx>}> zvSX>)Wz2pXDBMXp$PrDaekZyK<ytTq2=NX1YHtr*j`$$Vg%3STy!jAK#0z_I4`byZ zO{6mmbMv=$zOgJ#vk(IQi${(;Q0!dn_+fR%;QH7IAOipPy4|_OUIm>c`mg%+Z^HC% zvQAIW^1l@9AGQ6@3dZ(-pkV*Ks{aT5`tQ~1KQNj9LCxR4eT{(uAOOAr|AjIC?Bl-~ z^S=ftHpYKp3e*3ADa?%jK0dMjEdOuh>HlY2uKF1O0RR97@*d@Ep!o8B7JnYQR#-^@ z|H`<3_WB>=7&-pear%d${ZGe<{T~ANhll-tVq2%RwVZHSkbG8a)4bQARE*^>Xt&qu z<S#=4Nb;Y8(GS@B5%lWhT-m;TdUKeV78GJ*Y{c;)DY&v`GgS3@Z7uFDZff^d`@ufg zEI*)xm_2V~;n~avdqXGp63JSbv$o-wKYMsgs$!ATNG}g~)Vq_Y6!4gQU}xSLLR^OJ zW^H9>YuxN)b>Z1gUNFU&O)oxf*xh0C+|F;C?mnFGzSXplLxmldMvC?x3||f_q3;Gy z)ZEYOamyNjsgauH#@W5B8cJ{=c3$HG!hN<gCM|kdu<-#fb&c24JEX*(-34i5<yXwM zb?=jENTzP|)OUZq+~3EA)IQ63-&xOn)b;fi3=EEm9(>+hLaGje$E+^EVBw|qu80OP zj-Rn(1Vqq*0TbOk>vrSiY~^ox{#~@QuJ=BFV=J?BxBf_@FdLQo+`R8EfefURPsOcr z9dufIg66WkynmhCi~lxv|GK|<K2bzkmUKK-%k_fn*WKDTgK*WzVJZp2jiL`^qmLTt zk)uQ5Qfk{wzqYY-k%k`(!TZqV^$JH+9O*_gU$U$XHcLc<ih$s&-BV(En%C3v3r*D6 z9)ofJisu9|<_g)Jgc52DnD>yLPeZ-Af|CF3CuJ1nk^DR_(<xXqPojh8unnY$BNU-Z z<2MPWAB{_ML^x;xoyG{F@EA5|qd68QBu%y3p#hk~_m%~{TlACPv#z<YVS4oninp(6 z+=b=rr~H&jlkNRI*zR~VZ~PhMgx29Oic|6VppB<p{<(4xx0RZ>Jx1kV2<h`COp@pf z4we3D6@oR6!Bs(7B^m-j;JCu$2f-n|BF*3r9!46?i={Sh-!^?pNGWOE+MbNt0;IM@ zCALFnpA7M*2%gYwMsF}1sRCRr9=v!>asa25*z1qFK}AD+!1=3FgDW$``ZAQ9CjUd> z;*3g-qxoo5M6C@ul{Pcgp&w3l^$x^lcPF8tm|f*h+7ubTjgEN1%Qq&Ap{QAs*DMJB zfMRPtLI|lST4yKtc!I;VwP+0ETL|dXx*ls+i7{^ot@9GJed_24z6V9<b9R5+B%K^+ z{;<LeGKT>07?g6TcGJ|AG={jVWAlCD>dzFs6P=CIxb8Pg$Fg1JVljfkMpNOh%ghB( zKCHiQCBk2#6qc+WJ6(61ukd#?MmNLe$_$C7Uk59vAIF|<J}$3kCns!XrJa?Ti9h+e zb8~fa|6J0}(7Fwp(dlO0pX71t>GX7Uw6pK^;zIpGh}O?u%QfmafU(>*m&uLElqlgb ze=}(Qx{>ZO+LP=LAZ_a}j3NbR9p70L&pukNg-J`|_20<NS(W>nw#G+Zya=;j8}f}& z`K)?dsq*FV<odFIY5aVAeCgWe$-({m?bA(9EnT-87-=1%=9)NuY%HoD9e6>6Bcpq* ztfO(wH(f;e?|OA4af%{|gf?m$SraPUVLzGC5>O7ZN-#g=()x)@d%HPG5OmPfXb@=r zafJdxV(uP;N%Shs0LUDEMGm6^j0!>DG2w8*E(e8L*kym#<Ov^l1wfY~Ks78__Y73K zMG6E2JhqGVP#Geob5z_~r&fO{k#_0?78wnY%8J;p$N^UxpeDqsD$PO`sRg(kd8$jk z=z)X--C|YG&Qy>^EW5=P4fk?sZj7*e{B2e%;|Hgi6{{xGad8vgWvoscgdzcNd~uBf zO?;rl5M}0-YtSP6*!A|F6@^EGG-Zp+R8b@H_uY2hhh{iaM1GzM7Fz6R0=PC(Rok}; zok2!7h-4ezmok#db2%x4sVj)cOJp%k<Wcw$86v>PuSln44egqsGc$(Tt*l|w8|e_r zpv=r+)Z4@{Jcx-dprTOF%sxmba;YwJ#4ydoE{ZU==ui=8Q%jCsYKV9qZuV5CVF+;E zvXaaAk=^xpv>_Ah)u4!SoB8aU{-7iqohK0kn<Y{NrrOHtCamePHIxJjs7@q*J9p}o z2`qHND%@Cn=-rN=B+N*jIdiM%`@DI4w0@1O_>iAqZT12ma@6r#Y3JRxK|JunlrC2w z1CEAePj9ODh5<Oqfv}7Wd1m6HtX7a+)m}YC?9D!S`Y3AE_C%^<uF}oOKjCp<;B+JD zHqo+H?9Ar8xDJMQr*m=3Dtg#Bab?s(tAO#<@pCeKc=3Y3@cOYBta-DOEgKUxkF{}g zh^rO?+$2~J7Oq-_iM}FD8cA>S;*h3~q~eXJLBM<w$sIw0$S_ukd<8X0cc)Ty60!p6 zbM&h5jwArQ_wIQf;?*@-Fi)B`Ez?7y_iYu77mK1gU>l-J8a#*Zu8{8dlA6IUH6nh4 z`EGC`dV0{p#%!(L-k5=!wNg1Mq0SBg>IZR}UOOBxu^jWH=tFDfQ)^S1^`FYt?AwCb zRimmVaED~3PR2|)@yM@dltY0ZYx3>tR^G_W5G}m>C8X>xp|yf{V*UW2t?i7xMXXvy z0^NliI83En9gU}*TkAYAoY&J<nhC|mZY`3E?$c~Gf0NUs980qM$>YxJo*Ab|vT4Fr z#BdkwvYiC9SycwH1>KG#9gWS7(&UFMqeBL$5Iz`$Yp|gACFjVv22xB8?oaCkhhWej zWvjzlJoA~bJ>1n+%rztq2jiO}2r9w$J&M8=sHMCp*O1A@5+>xW^hC%ZN)%U9X>I{Y z7*4~W_cuj#?9Ui9NP(gE&jLx$LmdiyR$q?rWtWi1`g!nWJA}T$N5@A{fD_6{WqYoq z8C2_opE-Du*%&HN=igF<VBzAmu0iXO<BJTKXX1VbAz>~Q#uj5#ux!Abwa2ocK<~S7 zo$yaxltXJF_`VrPE#i7^Ucq%LR=6dIJP(w0aA^lqY~KnyhmOUEgEKO@^Lmyu;tNlE zLG%y~c|j4+a?IF47u0t{>3c9Z_-G-}Ai~681L*M_C1cRu^5cnk0(K#u!P1%s{Ec83 z8lp8{nWkKSCqNujFwzj33XTgyzH<@q`onIqk~QVsJaQ9C7CM!L6}v+g+LiHLv>Ec$ z6d_BJ4|H7coNcX)n5>+x7iV^@Y%?E*R2)9H6TQ(!WQFgR2Xs6VYzbrwBPT%h-up55 zC_AzoLFe#Xf9dWqSRK0rz+7m*;=;1dr4d;ZvVfSKPi1IUom-IMih5g1figr_kpGys zJ3bbsj%_u%6;OVN5Z!!YO7JxJYi4*_(B?ZjhV{BMvYT1Be}H<?kS7}~_xT_O)PaC& zgW1zAq7*87<a8uZ`AN_@b2n;4MU&5tho#@Keg=WgM9ym0a2m<QEPF1CONmD~NSSp^ zs67`};e~N5gAZvv{ltgDCbHfRJIp<8eia=8`SRG1?-6^!3Az>Ec2g8lmYp;Aecmt( z+*f#$mmQJK8vS+ffoBS1R7SUXQxBBTA<^ueWlViiMwhpM;ev+RDssdYgzy%zn}@KR z$xDZhjknAPTgZnjbvQ#dfyc>U?7Wr!5D#1t-ejzjm;~K-o|qnbXIu#=v!75I=hZ{7 z2On#&@+^x^&5xC<y$F%K-g8V=o`EYP&trm}2(J}FN_<7Sq0`E}N8ivwV9Xr`OLXNA z>oYN|Id~JpaKwHa0|>iE*$*ZP)$XcLU7P4C9}9CT2-^0C{YZ*4vYXc&<<{1YhDCWN zp&Nc#t@j)M)P09R;Zp_U?3jx7WPRG~#tYQ)0^eMqDdf?d)Jb+3>k6bV5N2*^hc3)) z9|zOI_53gT&|-^@U(uql37{7pT)>^V7wC)Z6h&K;$zcxYZ8Q6zf#xJj+t&C*G-Q)2 zC($-hQrW{gQ+0DPt8n!4Ml^w1LQc36j@-M(MF2upP?Ts@+J@~FgDeC@iX~mDRw(nK zjX;=_Y2XrIb9oIfzs5lt5$-w-d@#*$0mHb4!2e+HoTDRayFMM89izjJZ6_UfY}>YN zJL%ZAZFg)Z9ixJd?aA}J&%5UPzL_;^@ZYS|s#R6HPSxRk&b{y7KKt4p#j8yfgv$xL z<=$<#HR=Z3s9BSt7z3?)6=7u7;I$Sk!O~wQ^ttm85uf(zPM?PyB~diVv7goO=3GsA zY&5Km8%uCjF``~uoTL-D8{;?Ber#?U6%;MvSmE=+E1a8dFsv_Wn7>kvth=}?+X`3B z_cB)yNH?F|*jDNQ*|1jKT$r_}BNN5=lLimCZOcYNMul!zIL$qRmRXn657uNmRdH14 zsuq@7n;PB+Ya#jb<TlRD>D~K3l$KiiDb%LY?ijawLBf=>i`awD+_l9iFa-ThCRXR| zm{`CD<N@X7N-vh65eC1Sy5zL$i^Wx<G;@IyAPS?@sW{Kv4{k*uMW3%!g_b*G8naL; zbgoD+`0MxSmKDQU)deR+D&w!fm`M+Iy(4-0dZ!WF^LNCelyIw?Gz~5jPbakNCq_}A zXLnxF8>#JxYy=WKaG(unFbwQ~DRCX$F7kwEa<!3LFXmoXpEfaIg77gW;_eV7)xV%i z6w=pX@VedJTFdN7L1rbU<^655aE|y=?aQRkcz6gI_{9uRGzUKm4taa#XJv0jzYYX* zJP<44pet0EPC&8ADdC}@lsFy|Rqug~F3{$E6BM^aHtCNG-R44>Gu!rWd95*q<M1LN z4pu@M;Zz*sG_Gj5sz<;?|1Kf=7*r)mTLxaUmO^(3<lF@`$!d+v<(+8{tl4lIR}FRT z6bVir+WQ^bfs(5VL_4w7H_QlwZs1l2gswRe%4%+kEjqRc!?(oa5H6r;A9}|fU*`|F z5jkv0QIXvsV`;f5Z#Jm{x@m$1t0~VT!U;$VvJ~M95Qn><goTW$D9&@yH+Y4|JQBBz zFB2*HH2&u2ueoE4xfWs#Kb^6M>3ox;NT64|+((L=CGv*pil?XRQ7^|bes&%|5(^FI zyUeqYVBrB5%DN?(BYtOBN-Bw00eM`v0;JdMg5_?A!K5>WJXsCh6Rc3M;%cmWslM>m zVLJR#O7ce6pty@fW4)gX3(&zpx#U?VCPkSCY1wEI<?e*51sbsZGQ-jdTMIOaT^#C} z(o-!!?YnqS>`yg#Yrv2qRK{xhTcv0Jm+1&3B~jOM-o%f=GBW-&X=Hn~ToRrY>2t~= z@tLv_2G8Kw@Yrg@=xvOP^<Tt_gDIuuCbQga^hSQoYc3J(=|bV#E(oKfHbxNq4AX=0 z#a6D@<D-g6+;PNIiUC$Sa<`Q`XCU0wW%Dn)iw%@m)otqfcve=YQ8>(;1*<rTxJo2A zR5VauCO|#fL8{axt}rPRp_1I@&zUY^Tw9&gmfLn}09S?!e!~iz>knl$GlTeR8y;;G z<A5^Q@AD{K6qb!EHHnmLL4rmV>*z;P>%W;9v?Zj)c$5R#Zg%l*Bj+xV6|T_qu1c#- zx(F@J6b4?`87IyT+W5_6y+ypU52x38Q;6d*{{+zTbyiaqXih1soq01E_lC2{FNV#4 zAf2D>hr$_IA?AQ&AT+q4iQ*0Q9@*QLpY1hD@3<Um%ERh5JDrA!-Ga2B6<`Y|zpq4r zPjgz&X5^0otT@E8Y}Vy~8>-_9#^0p{sl$JV-RnIh`9!BvN&5CAzjVs+uoyCv^*$^s z?Y%i+-SEL~DdA+zk$gKI`%aGb1IP#?*&o%xBTvBxJb-#AdAUvC%8_iZ0)hLihUn;s zxx_@W&Tk8Go`qCAKK<;48c0s|cUQYAFNV_GWstE+NL3{5oGDdyk|>#H=E`%`Sd~F` z6;X1jj2+~H@;5x_Hj!Y&9XxVGGXMAQlS0b;XLjEh4G_6S7W%`$ZD%=<)OgR=y<|JV zi&g@gjlk_#Qgm!5Tipe?+I=OiYz=^iF;I8k275lvtQGKCa{B?8<uFP3$q5WTjGn}l z)q07dI*A##3%=0)xvLFgeM4#M2spmR)YTM*r8M+r1;K-w;dQGki;2mhuF8i>;{v%k z7S<^?S-4SF;WCl6=)2+yf^-y#{PG)HQh6t#nQLLhI+v|$uU$6g)=9c?X5!Nof2dcf zhIC%On5)_eT*awXQ{HJ)PIZ-**wWw9j<&#>5t8r*egPFgsiw%TrHn<(h4qKu8jspQ zT?0&v`ED$Qvs#Ske*J9CkV*4Dp_;v}1d0s+zvw59D^Jx4Boc}h{rwC(0Nc~}3@1rX zsT3|zCh7n1WUC@R9~P&tdEa_TL=-7U!oPy&+9jc3h%{FcnP!C0(N_2#eV%jxU#&dm zBw=_rT8^lV^vvg*-puqr3TA(cMSlrM%q$%LJ$~^YX>I=s;>iBZVw2Ug{{W{EF#IE* z$s0M?IN2K*IS{bZ|4VZcwE2LrSvxuqu>AvflQJ?i(-W|9h0*xy7i=7iA9#93t-ly+ z|M24e{^!3HELnRS0|g^T0u2H>Ss{@RG@Oy^2NCXX5S+k2?}GolixFu3Cn@f)@ROhe z1I)j|EWEq~bi%HVq6&_G(cu1?ChEXI!0;~;++Pz#K34Sa??u>X=?Pf=*&q>y596<m zm;VTsuz!5!_-DIBm_7`Kzm&wkgf*5AgX!M}D=h;7%fAhdzy9-Y{f(KH9)|H>qUm3Y z{5Rq5ueA}gVqhg;{|8L>VXzUf{}bQ(FxUy$|6Yf`f5rmC{;!#T%;G=nwf{-fjs5>5 z!t}ooHUF8-zrt=G2kh@{{=X4wPka6Tk~8}WZgV%cz?A+HI{)iq3h0<^OZ<P*EdN@G zznkUXZT7EEfA64wH<W)6g6#h%lJx%rLXhL1BlTbR!Y4xT6CwDC5d1_4ej)@v5rUrx z!B2$XCqnQOA^3?9{6q+TA_PAXf}aS%PlVtnLhutI_=ynwL<oK&1V0gip9sNEgy1Ja z@Dm~Ui4gom2!0|2KM{hT2*FQ;;3q=x6CwDC5d1_4ej)@v5rUrx!B2$XCqnQOA^3?9 z{6q+TA_PAXf}aS%PlVtnLhutI`2Q0^@b7$`zjAo~6GHIsY^VR4+4E2S*#9O%@IN4$ zSeXA)Uf4g`Oky8-Y4)}@mVc##IodlJ5zzhDT%P|OP~M+Ta7E3PwkD7F%NV=Kmir8^ zHZFfIFc&Cm=(Dpkgdp^f*Rx(4SKmOO-T<xv@K6h~%(G`DC4mH4rRmP+o3o7oj)M}# zGUH11#zm&hNAJxvhs!ZKvi*XLp9%Kz(xYM=6#vQS?(n|$HadAbys@8ZOueH*q3PV+ zmlEVqQqjNS`V#IQFX5p;^~7XRwLh;tW`jv#g=KS?d5WXa-s<dgdpCJHG=<OS^}4se z)_l(A{d99Z(#uWZvr<>%PSuz*_?za@zY0GA#9v@GPI5L*Xf#Y{HAoVjzw}VJ_;{=g zZP*6Yu9s1Jkym(yL~yBin!06{w``K8mV}_5D$kancE{cM)zxT8(`r+GJ#lcX-y&dc z?OG$|KCAofXs%et`w>e^W>oAkIq^B>E_`&28aMC3@bt&yEi}h`82dc*M{fHiR_2$+ zqIf5?X=vHnC^r5B!aTa5ea9k2gnc$CMW?9+Z8iT@N2|u$C(ik#6N?L61ssSZhQ=zN zBl%5zrzruYdv0cHd~KYp?s|8V^ObTJGSRn!mk}fHTV+S_!aBV8G@Y1GxuldcB8Gfw z;AYbNg37?Am^Q0O15-24%fzcOCYt_}N@AQSK?@}w*dQyW75Lo|rKR1`s==Rd14#(K zlC*;W7vdtPQNN2u^6pZ@s~+3l_KR{Go3?N7;^18aSGgM?w18?tXl3Wxp{AaTnlNlm zxxMe6=Bjsm^4`hzW9VYCJARB?B(YqOp|BF_E0G$<k{Bu#p)vnlys5dI6sI@>O|<Un z(w?bLQDxc5Ts1~lE&7u}sfX8ZgEU!)GG2r=T!Gh9fjnG--cxcslyf{(a4=PYH(Z7_ zkcZb(`gOb<htd#}&<v~iAWiR&PI#yd%-Fd^ITp=HN+rIg-!{-gi0A|QR(Fw0ZkJD~ zhoyn_^~@OCxpWF2g^#7$iuV?h>UXje`(TxrScPl27=(m^s4Oc-%Sj`kY9mLcVz*VY zPQf)MnQ`i1Evk!*GP=q-x_otlWa+>hxx5zPqMurR{uV(*dGZVO2wE21^PEQa*uqQV zYQ^ypO^-Y1STDhsxaxZ2$NT`rmqVpjRTfZTseG2-b*DrYg4a)VIHYAB7=E6Z;62DJ z+FGq(ct^*h7k!<b7&bD)d=&47D;{rURSx4Ise;5I58A^SFL|KdMKNKbBCf_7ZU9T^ z>z|_b<q$*)^WVZ1xWMMqrKl8p$oYRWO7s?L4t9!Q$(3#?QX3@GnPt-GrP3NF5t+vi z6yrpfzHn}Pa_aSoT<RN-d^sq^sAMcWz1qtRS3*iu?~3Alo51dh3J)!dSmc<tV0R+Y z7229cK&Fq3ZHi{Qp@A7c9<RWuK1>5G2OUKO1?E1=?rO9*IUhW^Cj*=nycpwL|8eq^ zYfh2FET#6BofT8$V<#v)lBXNBEMAOW5t<K6ADSg6q}cCVX7(qcuK&4Sw$O3>Q*)F~ z-T2}tkcrA;Zn#BWf@@}^wXVd~-r@=B5F!){LW>zXffX^iG4v+`VhCU-xL2SJ_%}B1 zbg(oqKgp?(^Gvkj@9-V}4>U7aa$9$cSf0N_Sd&hIwrqKk4Y6lLq#%PMu*d=srLc|j zsV+x|VtGy4g}-0lqS^FIa4*`jps$bZ!QyUlwe;k!7>BXhVP;&jM1c~2UCDD9$G2bT zcg~SkiB6LNWuOMsX~3xVcyN^7@vN=p8@F3BhJUVH8w8KgxXlbTRe$mb!Gknv#%mE# z^*OB7pHw9_Y?K^lRGep4ou`|%w*kqFt_{&S$v8z)H*0B_;-v&Em2;B9U{{kR-GLRb zszacXFAu;RKFy7^$7|OReyJ~L(H$z<qcM)p*;B9DpM!d_cxD^pK+j%^D3ZI$RTi*6 z2`VG1>yTgNf9wV18~W&C9Y0*XvYqp0WK8><U){Yv_s+CdTusjJjA0pNuAr^2B=Wc| zc%7<{nj|%4b$Uht>iOPZ#(MUG1q~Z>><Vh^0<~0)#MV%!v*wtg)kjT`qzTXY%lC*a z<0}G7<gY8xn8fsDFA5S|c4b8=RfSuHlu%h?8niV^PO`+QPA<c%=<Otw=wXwm5x*?L zqfSF_EnHzt!+v3gLZgR5V~dKfmy&h-vH0B(^BHY5SjFudv(TQ?{(KCxX6i}$atmWC z@H%0=@icLh`U-89)I!2b-q)}emW!#}9b$uY(B=Gr&0_vvxuG(ANy^A#)yGt3nWo6< zU$cA)TV6>`c>~)B-j<A;bh<h0xZZ>JCr+CEkXu30eo%6Nl{QcXW#ANca7D#0aiD$6 zPoqmW28ty$K(qLhh?Y3FYHxJB7_c6I?{jhEq`%_puaK7FRv|2_rgU$)G~g_9nqp)c zm1Bq6n!(i>UtJ)Tm86r<VL(_c0@PSRRovEGrJ-(VC-Xck4!8{(=s@RbH`OLlVYDkh z4;=5pV1AZfzKRU|8dg9I{(uWU<uF>pqqlGYScKo7f#tHnBsBcu)ypWo$g4KPq1DeL zx>-E^t!A8bCtk%~LRH&Fd+uR>yK$s3#9@s~D}Rh0Cgoud>IhQ)2M1DU%G?i5m=jTf zVW3HTN;o$EVCb#J64cH>DT7GR(EtZ`f<vGQ$o=ZG4vu<HoP~0k(Qb)WFMwZ3#>CO} zQy|9IkwPu<JtQFRM1RP}cryB_f-}f*heI&CBM66+po_y07);UOWl~jewBnz)k&{+8 z(w1#xtmTAtlAz*q7QyXgP1k9~mc%FDFGluxO7=T@f0cMFgryfwkF*8|Co@Js{je~{ z*EZ{9+{ah@R(&c)Vkp#8D&13R7c6=X=}nyT+7QSSS3ukv30sKkuMXUsvo~7tr!V(l zJok8@e0!jH8!Ay}E>&YaRRyKDaC@?Rf1(<LKp&UV1gH2U)c{%Z2zwLv=lWL_R>j|} zpuI;snVl1&Q=$hpfJGgq+X*9;Ya-M}f-kkYayRk*>w>K2kWD4-3%&G0`JrI_pnDoY zTHzF}$}2pOd&vzG^Ul(p?zablNj?Ere+H@a%p|r>{|JXDV0`s%G4&DeJNTKhPazkf zKy<(tnv#iL|LtcDVWNFbtV5yFrT!B19t#u-W7rqg2y}*sxTcQ|d_vZF<of`dqT{4L zYuf-T$f+-Uy*k0xw{y*pPD`|Ui#8AA5LZommQ{I%5f@c~h{#6tS0(?zH_?HLA3ddR z;ZuhdB_xl+IFuY@%1SDPRwibGWM~d1G&m|=9)V#feZLnbMkm%+`{!2I`+xO=;^p>H z9neB4kYRvwky%-YvM_mo8*rI<J;Odt>OX167x5oQ*P_3HZ;V)tC=o{mP^kdUKwhZs zvk)Y$exW*=bCT6kw4+3*$xCpCVUdxytDJx}3j8G0OBJh*6n7F={gn=`B8(2N=s2Nr zPmRhP87W!a?d78H^tLfLLX#A#{e-Wt{7z$i-RZlnhQK1Q70VwSE0+|jmK7_N8mpBW zD-3+Ab3+A&YhL_t6ZWmb>?b$7V}S6uLE&lq-q-j<t=UFy2ggp>2e`hJ1(3ZAakLC^ z_IXfKLoL*be23%e7~1uyDeIrJwq*MY!KX+JqhD&uUwdpkmHVQVXVg@elot9{re`Ml zCdMY$h6ee1h?(KAuukz&@G+AyGg7{!WuPa;Bget8pqRUwlfGbR!%6=@2$i^sioTBy zy@QIfg@?1dxvjswVRYa>T;niPU#7rX)0}tc&^@fQ#5X}vN7ms~UFv4;u(dhut=Hh9 z8%jD41~h2VLlb{#CPlAhuf{6bQ7-&CofmD{qMIm{za>R@PGOCO$cxX6kLzuBqUgSo zc-kH#GyM3BZpUE(!kTy<I5BBkiZUZ1XIDN}!nxM|>szsk@{-d%eB|xzX3P9^f`aTh ze!Q7A25I$KAGzU=fiftq0AH=3@52unc{tVHp#@fX@eXN}M1uW18k^Oc6P2-AgA%{E ziJD4qh(>)|VDP|AFr}c>5~11WWir_UwfifMiS$CJ>x5O?TDcw`T0#P!ga$$S<IGYo zIUXs3NYwTd9~Y9t%}*q7(kQVt%GA}%)z`{2*2>gZDbqEm($q(>)3&rs6STAwl{J%o z>Y%14!oCu*6AqG5P7*WEk`Q3J$F@@S^(<-+4yemdDX%U|tg!&6{+3_nTxY6fsl7uQ z&fajvj9(!#M-n117Naz#DftSMR(-Tqa5o5;D}SVDHGDF16mZp;VZq5<TW~fy2FvO; zLyiMKNSvxGM`HX@e&lX#xjwk*?Qw79bnf}xKEGC>58TLvQkw>ZRBag~H)n=J*af+s z+-M2Ts#;U)T+?e)z|dU6dL5}coC`P^9XR>GOxx??*syMr$Q_EuBQPqY*y4MRix4US zc4lgVvL?(;5*zE1_L=GF$w`;9)z+`5a+KYr;$jiwBAeeCt#;Q3Q7bDr^=xu7){~JN z8CY3a*l8)49d`NmPq)#2eg|3F50+{M3j9)5#%99u-h`H{><nmFSP@VNE*F>0_Sf`v zF0xFuZ6zjjCRq9?@>$9}v#cGwZI1ZE*_xgBaci_C7_Q>OKLviP|6+x#S-LS(w68Q& zLL^FXO0E(2Ey2|feDd2b>Bx$lJwx$+%Nk$vH6zrIRi6L8Wfhth+E~-6u->k^P%q2Z z!q{Tl?PWPeSrD!`9v&%^9jE}`1bUGmrV4`{R8GKn>ltzL8t&u;V`Q~+WSAinwlYoL z`UATin~M-PmN!c;ZcA0a{+dx@OyU^(gp-pCvKk>e3V6f7p?>$;Ap!RKIcE8o`T&}_ zM{(W<aF9dyo$e3^&-&y9^eq>1UwQEx2CAsZUJsOiWT+i9T;uH(i-YqFb92&6a}tBF zIykz~Qj}J9cl+((V?My9YSMBgC~0<-coGKt+8nIJecpG<kJ30vgmU*+6-vda7G}!2 z!_la`{EDu-*_0SB;C!efl0_9;o%eJwQHpzl*YkzF%k#s}2WAPoKQ@0J7BOCLp{<MM zpNNqllkYFS>`<mJjfGA~!6He<1}-Nmjb%vYl%FJ2o2p)B?D$3b63@#xG)pzlLQhUW z%1TO1mEQp33uXin*2T*Lo#7+mVvV3>G8C;cB<FNi{_cH5G3`|q9V4hxc76!r@>4+$ z{D6#+W{i&&8TpG|$PMIFzKIo;iEg28w9tTJlct8Y&&8jOjq-LG?Zl?&aO_}DIzs_M zYlmKLrLQX`XY01ToFV9m3ztbwk`B=}w5F-i<0sH!W}Y?IH(&+ba&k-8L`8A7myttr z2e;4okjB<Z_q5+{mOe&b#2&WR)iq|!h5pI0a(Q_U(CKOT#hkNTA850;=t>*bT3<rK zVot(@7c~T@w1_5sk=!<3;l;Q{*0@{gnFYdBC6cJ*5VE0P2iAC7xRq<my;;wFtWVw7 zn*z~=#5pWJKGeP{J;Z)$=6{a;qahCXXt@x%V_bwH6MSkK+c8?a+&+em=|C|lbq9VV z1o&V+hHp(Ck}MJ705w7dR-=Lr1LTs<a8dYbYt+IHvKa3ayYFt+bBf~oPKKWAQ7cg> zNpG|y+(kf<Qi2kERS+^domM#7?T+9q*Bw|qiR-*&Sz^S#>cG&uT@_w<K|cd3loU@I zA5ZGnIB6*w!tqe+tT)97Q}(PlzBz`jpoU@^hJwRA-1>jNt|UIOBs$jGO{=qjtdg1> zuNs%3@r=4zI*k6Rad8<1uA_-`lbkyeHU<ic1RekYUO|+Z2sB;dAebLrSD0Fr=HJvD z=_6xq_wwBDUGGee(e60K){?7>;Um$Prql;sE_YifcV84+IglH6%Tl;qxTgxE>4ey> zi8HYtN*qDSctba`GYZJkX>NXpSafm8>a_l<4u0N-$em$iPVz0pIP_y|Qo|1yRS}^e z^f>?2KafYu|Eh~EjQ9nM4F){0Ag(;IJw!z)P}H;{2`fHW1;rhG#!ZXX$%bZ1vpogk zkewH4dlB&_MrMk7N@n~WJ=n*IrF&PQ#QWoN`6}+CWPu|zfpi>9QtS~d%Jl%IPW0P} z+G-EpN|GCeQ*^Njzq_R~BHRjDE^)qYa-h#RPsEP01bRFfRg+3>UUn`^Y2E%~fEvDL zGX5G~mR6mOblXgxbhoVnA%fWbcJ~?*0E4)v5vZIn<_3j1a01a}TlOe7!rnThOt<Og zA6j9!1Y;<B$xC(mt0bJ>(bn$n8a7lya-6Ed(i$r@^ANvt`w&@Ts9I2n47v%jqD#!S zRl+ktK{LTd-NWvBIx4+>PGY%pf}NM_{lwPylF6V2G9thWYz}j{`}d~K%5NBDK2j<+ z0@@LpZ+(kvQv)+<gUgy@yfr0mp8IDB6CqRh+#V<j(_)0Xo@rBC9U?j5CYA9XjYN4@ z8S#ji+(yZgq2^=l2NSR2qz7y=p-(5tM=OaTG0xp1vz|{kN5_9My@INbG#cP4a}_Sp zntv=on;I*NFjBON?rGuav{s)dPE=w@R_{27U=!w?>L2)Dz(qt#j)6f3A))Ok09m@b z+3mjF-HuSi^q`_63lh1T>GOI)&)KGjN}P>YorzU$Kk`(98Yo0K%tbitzDGtqau&Qk zRkl`Vdp13R=I2Lgtp_C^$HRvAD=@~!)kb^sCs}8u87EN|N$Vk%S97a&zoUX}a@BtL zJ+F}7f_P4RvF^gnl)^%25$ZsRdC5e&*$bJ<Jo<-OjZT0WXholPe~33$h=;|K6Qs5@ zu%Ed^%DX}=G3Mr2D%v!g;bXh~4$YBXav0hpXdR`fvKaEG9UWfzyYdkdbd6egxHxNw zgbYsRcHY--cLOgOIzs6AvAhqSwp@Kr{7o}X!wm@CH_`KDeq&3d-!F3CoL2$SxY#wy z7mxXDCY%tE4E~7QR<fRqP+f^3&0SHM;c-D0Kg5TIpLR}pdUz?j6Que9sNID5)B+=w z;v0=1!+?31{Mh^0CJEq#1XX7#jJR4Sz@EVAW=T%nL`j1oKFkApLkQa)9Y^sFm0n`u znU)0ciTljAhAx|;CKL4~E|nhgc*%_b!HJb&=xub|56=(cYJ<b|_SZyuO5paktfRf1 z_vsm_?$JT=H*y2v;k@rey0*RK1B68zeB~d0FITm9iHGYKeGN%h4|)V)E11lUl_`_A zC$vqlj%!HnmnE(9>ys43w=~AI0BIOd1u-oHbvZL3^CD5#1|G(q`4y%9`H!BAx%+K3 zs?g&EF>4Ub#lh@38=cuW)<NOB;z--ie&QHw)S!;yNG*H)eOn>#>_VIEQ^<BQ`d6)# zY*nuhInB40IdT1ZRr^s9W`pq#9DCzqAMh||e);e^6B~ZZ@*Be7d|+@}#jrS6+}$69 z2K_y%&CNjVNii^LX5Zi+Pwty?iuROYeyAZaj>N%WQ?8N_#H)kUdkEUQU*m)X7g_5w zJYK*5eC$mn7J%h-T*%7JO=}u!O=Ou#<?v>JruEBBves5VP=>g}GgDehgTx3iwh`WQ zg2D#NMq@RQy8$arcA^Xt<=-{@+3YeAPSuT<Vy8C6@_M-ufs*z1)I(=^u4^4S<LXqn z|CE+%gPcetKUpd?Q6IdMw+p@-dlVHykr=SL;pYEDSX`i02=_Jf#j_XAFj1-L=5C{f zBS0J)j~HsbnMh*oWUP6yKHPj|FtP`b0GnRMQTzb73Ea~Pqxo8K{xu}Yv^Tpyt&M}R z_HoUg>u`_HQN#AZO)2?{J&Zq!d=FsR_33bJ8Hb&y-~OY*{jU=b8;An7>?K&v!Qv_E zbJBBp!c$c^|0xqp#0~8U8nRa7d9KpIai(Xm(r(ukx4R<XPU<Q<+x@F3Dcj;3%B~O5 z2dN<|!2z&v1uT)E#b$}CN@|VmB(kcuu{^VZ1MN-PT-Rox3*?0b<-lNi#?HV0MnYy< zV{c(#Z#Ox@6rBdi(woA+F-TNElxIr+p(F3k%1#ZVLcQ$~UfGdcBOaK0*Kg%VO;7I` zdYQQB!Gkpk_l8m6<;JmUceN?Ay5DJcQAA;}2)acPJLei9&1Ugd4;7x#0n`ciWxwq! z_7$K*#dXPjcXQqiOtrBSa^Hnk@maZ=E6(;HWxd~B9B*SF?1z$@Lw(IrW<|qSMw=Rq zzC6RRXhKdNn8=$NCP1CYz>OX5X({NFaN=1CM({Y;ENkoG7HWCTH)uO}`(`N)mT`Q= zTI;RGBR8nBD_*VS90@P8$$fb7o=mHIYIAx#;HuyvV`n8O&Tscao>CW4t}?5!GCDI^ zL(@xAPsY_cB+m<^Fp>$l+b@T|p+TXvzzaCmM|vejkZhT<gO$2lXOtUTqz<L+Ef1sE z`gHMzM&Fn2Jgr?9Z>$I_4`UgGNQCAjL&3$$b-POD$%tWhcusyY+;~#D%bUO#sTLt} zy=NSsJvtoEjmFH~O>g(jS!2pKEempV4>sMr%_tZH{#T-wOgYvWURIWRM&^tCk@KH) z`a3pk*NSIP+l|SKdBf*KszUgvQehIkk)vl)(K&Q8H2JgS#T{HNrx~K~8-IAhZod+> z@B+b0@HOs|`S2gw{ZJvxBK0}qXyqvh?SpUNW1Z_5x?3kRb{4yhN>(wN#!&=Pn<BuS zKg`XBE;5?7AGzX$LDNF1VK_|CP%pZAzVl1b2zf%nb0^(EvEszIzl^?*dl{6OEG4m> zZ^A^6_+8vlw^Qa<<cIm#DUQh6-<k>!JtWqqmIh7j?Pl9tz^`qNYjf%P9sH_wLK+up zynFMj`*qN;BY(JiW}1+1V^OVlYPOefimGL>lRQ+6;ix>2A-SwM&ZyAGil2hPDpRa{ zgUk|P7i#r|K3pgeHJkn}nX-EGZJE0vT<rjn$OcbwvI4JUdyf1hR)*vhHEGV>;o44o zp@y)|S94NnZgl1&&_u*iUy%!)n8$%y>y4wngGdhmT#&zyo<=pcYm}x8AVd&F_){@< z5|^4YH<}l;S?IeSj1{gu(%oVrn7?3F`@G_^@U@;CFN_|b%X&a*bNA(0cDzRZ!BwO| zqK8O@KrL%6sIhp+LS`;%)kPeSP&W!w`>3N`<hv@d`YkPJvBn(YKq*7A=^b|8yegNL zkA@Ylsv*V_Y~PWo6mpm=+5C}pc9dYegJ}tmo60)dE~=6^$q8l7$?*;mN@9e(tP*mo zST+e_(=)|@F}P4+JfATH<=Wu_&BBFM#oC5QaJJNqB@_&f1?gy}kl=yM9{X3Dh_Q(Z zdJ#H+?5=ovYHB+z&~_ll*G2{SIsI#ZirhvQQV@^&sTbTS?w(X!cr-<twV8YpLj%m2 z$s_#Z4bfnQLm&;4P$i?=je%*Rmm-oHBZAqVGG3guduh{Ws-KBm?J0YPW7Fzx0BTBW z7MUejYfzl2l2FVPO&fe|nTn3dJmj82W1G_h^o-be6(?u|vtK?Qj28q32Ft;qKyb2< z!Nt=~?sj_aW_HGIj~g2SS-mF&4?>bxkylk&rB+x@yQz)4l`1AVy?;h`Zoc$%1L3a# z-Vx!<^a6D4vD@VoP~)#)19lRSd$*O(v{%s=<AXOCS)s4)pkLg32lSKn^3d~=Qv1-4 z{Ef}}4$R=pf&Xy!j#jBfV)M5lT#|LXj*ZnXo#V){RcQDRHO0XTW(#O{l4ts&i;XeI z!?OCBMWI+A^6!yM7+@yij%R4`^yk-&QE@>)`jp4wnH==&HD}P}o?J~N?zb)^rc+U! zxY+Q+M~~{Ev$FT+)0p95DYM5a@yV_7a<I3ywA6C8w}Idf%tJH342_UR`Fw4|`!Vm+ z9zx%+;Npm;s1rUm2^%Wqhycanz;BwS7IEwzmL?LGE<#W4@z+I^900*dr<(?M+abr@ zngQADozkL2R9fMoZ1)W9^uEf--Wk=p2UA7bX`G_KB{X;)UnB#X__IDuu&?j5v+9yY zp<AC3i|i|osF3UrOJ!>doz3c{SHQuqXtqj>V(_%=%#?S@XA{ktSkP^U@-YVsD%cZ? zh?KTk-tMlhhQ`)w&+~(;@aR%};0Ta?aGdjoF%GFYHz@@x6~LClw57EbS`&SJ1DHm- zx@N{h9rgZ$*!+|VR##N0T+D~Zn7m4xf1qc4@Zkuo5WY%fXevfhyD@V=-U_xhzJ!rj zLQS4zF2z7>Jw~E{NhYMQ`2Q$C?g);9OY0R-f8+CdaRu4vF0<ru4F~bIEj&7=<Syed z`JuY+J6vD-U|Z@DP>!YH6)I_6B=4xgYzKscpPB@1hYRNT><3Vp)AiovzGJv4CcQJ$ z(S{I%oHS8U0W2v4Ye|9+GdQ_<SQvTWP;zg!c(|M|%#A)tgVV49Nh<P@vp2I<o3;J9 za(sW-xmtt>_KfvfCJlZm-PCurUGW$n@~GDNaB0RUKg7qWKFxB*Iv2%xHh`j?k<xu( zoC_C?u8>!6^d|FBha7H<9(A=Ibz=uc7@WT_SD-BRHg>E>-$r+V=fi`7Am1_Fyzj6} zv=xVl_Krw9w%yz}edv>d*{ppJ840>6TT}2KhpKBH9*|^(==r<K6=7-lw=dIF<d5t3 z^yN9nRM!?3ImfW3Vvsc|O--_B?A?u>?iL=WcUC1Q%{E?DF4lbRP0jaLW<MMe;Ik2O zbK;^^q|HxVwXYQo-hYpe9*-_USi(s0irm%4`qW(G0XUZ)l2te%tCg7>3ow%Pj7e49 zT=SJNj?<>cIA$qY60{q*`pf(Cc0d~(PK53G7orGS4~5A01&Gd_q|q<8wkM(ee7sv< zFIL`!7@@R+qVo5(Mp<HS_b~n_o$3QWoY@^hHENn8pjTCGATwZ*?Azvbiw)u@O<eAl z9)hW_@p;@fCQkKrwfYHsr8$t53*7uz1eXiK;zYa4w%rMB+eZ8I)%w^<q7n%W7-2bM zdz#7*ZymjpiL2;T<mJQ4iG8}gsZ$GHcSC+10`|~X<kB~t>~Wti<%QKX4Mhz^<0(4Q z-Wk5-@dd4^RrMbWkwUX!nxmJHz{HG%D0r`?((6zNnYIX-^vY2=O!`g5BwBBT#sC~S zM}e*EtgM%L-Uk2HV;CZvuj(Y0X?Zxy{^}=WyYtg*JhgA+&32O01gZ<;c$S`y#t13P zIBYg=2_s0Tr`Lzyo$QcL4W}D?ke%T!%9~{wyJQ{~J3Sswesy}>|1|=4L;jKp>n}rU zy!~Y}Wz)UB#Hz@^(99dOsWm7Z9LOVbN;HAbZwY|AlDYl31^=@alH&UmXLQ}|9h0-8 z98(iPLIVj>Jui(uupDbb{D*78o~{g4ZcNWTOF`I;Qu`JX%97A9ca{Jk!+c9My1kQD z8=^jdQFQK4@zpD+8zNze!DfW!3W?48kdD(sly5t(zVBJ_bo!$xz*-zb%%H$|Z&s{d zf?o073UBw?kLT`tWji<2_(VM21wal}5Im&2oSEW0BiWXOr3aEJ$M|olI88;<R8?<o zXs<5&`sDW2*yH|*LGrvY0BjLFbnf}@Yy}z$$)+T>H|IuWwj?dM<gxNWrDpI}L&{H8 zeI3oi8u$pU7vKTK2-#<C!lq^W`LZ_N1@`mRHz3le+|#lmRQV2{cmN_&ijTnZGPpZC z#uid1;tHQjl>{x*c(D(hv$o|*gGdfF;SnMt;L|Uwlg)g(0OLC#!_NR!Co3oKR~4pu z87|g&d}}^W$LAU3TXbfRi>;6HSW_Dvj<ykmJF<oG_O>$C6h|4=$64LH?Ooo0#p63t z^@}BX|BrH87`EU|q9*Ke4s>L+jV^o2%Khx2k>L(E@XSsErfF#m<Xk><y6$Nz$+0RS zVuQ(QJh5^M+58L1{NQ4x$L~7!8zycy4~_vc>@Q5hT~M#ZhO45ic`(mlQM(HrCkve? zB?X!p0ulJ!K5JZExnVyr=9(I>1w;x>RGOP&#wFhu$H$kF^#OPLUo-t)sL!)A;Z|$& zCmvPGqWvBx^M_>5Pycu-oa4KC11=2|y>2iU-Y$2W@i&_t5z3C;K89Q{rq4kbcPfrF zAF<I4)eY6oM(|A!g6y!mdr0F0Ty;w)Mex!gu=)=lvxEb>1k2R<3Ie@&pGW2r2f2n) zUZ=rC_PSVwf0n{+j?#U0v^`rH_DN<T=RF_&i_)~4z;3nzt<#k9Ep<-h#6?@3VA)Tv zTxX!sk?aIJT&<4!#|t+{y=vd2^xA*~6<0@XqluowLzUc<W(&51Xhzm-zwXu!=I*vO z*Y6t)4T6&HvTOHpq5+*AKIaZzp`|ieo>d<0F<F1Q+e50_@132`c{lu~&N@9WFXv*w zjilfNp1$=Jorz5hCId#f09BGzxgdKqkz4uloJ<lDDY(2zui2e$1@nURYrp3-TuW&Q zn+fP?M^^X-##f}4`ut_aj`)5GO2ekbM`&a#T;|GM6ePJ6CpqLN*%2n+KOhD?yjvhm z8D+lwSs4*1<0I6%W^T62J6bhgyKq;Zs7Z0F<E-QFZ;rdl%dc~q4=}NTFll1ZoFlol z#Pxjn-hRx<IdpK))g|xtyU=Y{r`S~Xs_UbZZG8lP(3u$LUp+tNhJfD$yv)pz$pYKE zdcN$9OwN(DPi38Qd$a|(f4zP%?1m5i=sUC;{&c@xeAF9NP+x%V6JuFw=(}6FntW`^ zEgY1>)M@J4yHL^Fq+gCPF!07bH?Oc$Z1XNDD@5~O8E%k_b+<q)$gK>B_d7DX?RhUp ze(Y4Ihfi~+R8tYh!Vsr`w5J$i3a&L&J{mZn7$m#zf83;_3AiRnmYcD7J}j!$2`ljO zE3!$b(84PXlycuse3sE6Q-KRV_P|-@@E<NlOTL3g^g(NTBMk|FR%f8Vo=00yg8GI? z`NDhAJk#T|=-gn<7Z|?ikb^DEUc~DIftJFj%J7oE<x>aQxz&po{KE4S;CADBKUlvG zkdd+ygmP<WnU5oU^z!xcdhh$T3ecU$+doZCrZ0WoeQTf-1X4dPPHXe{evIM(`(l71 zSG6%HH8h@~v$p*{6ZwXiNe<)YLTN?@lcP181)_D-An1xD>=`Ohp3<h{{dQyXYk4f0 z&e)_V?YKPsxNLi5j?CkIG}G;xw)hPKXp_4^d6t#NI4NgGt^9OzyljuA;I#d2Yvtj} zOVF_EXI4~aGh(Qxn*kFgABT%8FJ_Vi$L{ihQLdlP6L%2M={AF!OJw#+vJ3O64)=J% zR2c$2=&a0iZ2xWo2A%CfCt%7Yn`iSJgnUIbHJj%ri+cxXz~B_04|$ns?)Drnm98nS zto$qk;+eIn^H-zu8FRjntdaorHeR+63utKCHPGdIICv%2X5%E~z+%IeAR$!`qFbxe zM0iOkN5Qu__S7F#KNVLKg&_DTPo!x9Vx;H7`Pex~^SVFhO_F(mwimh~kMGYmnrfN( zrpBbCq=ZF8#HIN~`1z%!rA5TVdIv|UN~_d$6fObh3(GI)zm_$-YO#{$oO(H7HKWQr zSaM4WJrp0$PVSX5*^I_P94r6)79{#m2sNIcyhXk3-|;{cB4CleO;%Nndx5{0NYuY> zz431`IDc)x?x%s+3ee^Ax;-8lQ<2f>2n>IuE7RZbdOTiAn&M07wt1f6^#Lrz*qETJ z7%FB$vCkJ!=SR>^X&r0)!PZh+{;j^U3e-5np7Bw>;Q-f!<Me_1SlSo<aIoHP#cg<4 z6*3zLMO1|bg^3V!fTA2%IUfldfq@Yn*j||sMp+MrRZpB%?=6B-ew4GW%-!x%%*SOF zU}9oeKwDSKNJTOHQRl7b+b@=-rJjhfh-<5f+tgCAA2Mu$JYTh~X=$aS`+isOT?;7Q z?uVI&YFAf++;QT8!-R?>x$&MNQHniR#gR<A8OnEN<Yq=eQj+PYZ@N0^<El_z{ZL^F z%2M%d8*l4s@<mWru~}b@QI&&vx8;*vZ+$n!s65rlfBOReSgta~x3om3?W3_RJl<Y{ zceyi|j)=eUc5^fl5+keg`mnw5jIY!6eEHb!^3id5-Q294MF6HYJEA`V0o}UNWT#t6 z^NK5-vYXxP?DfHScT_20q($lYCOd~WqoYzaA{OOy<y5b1zi742#wn{|q{Qx_Vw$eL zAFHiVV5PyfV!&Fg$jQg9sHUi<V5g;H>XxO(Wi<v-AYwprVIcmw-o$Xt!B)%+3(Ku6 zJqtae#rI}roaXU*a{Q6AnO$$gC8J&NQ)f`9nnFpiuPCepwNT}2YfDsx+CyfiN&r*y zt>&&{#UG^JZRl3;37*arjow|CdewGNQtQsvI>zocm@0_<Z8;{xJG3`o(*0<=b6tUH z{=>O|zUa718z`U0r_=K0Ze>?+ylvr|cAJm;g;r)}l(p18p6W_+>T25ZYQT*R9ThDV z6(#S-J^f@;J*OS-N3Y{MA63IRz?!f_BVQJ=2)Yd%jl8|QbWZIi(w>Xzlamq)j0%hl ziv*>_q@W|BudeQ!MF}KN|BPp9NoB0hM@qtZ#w$r8XZ)#9l?hL;=OHBV;$KhvxTjJ> z|NVGDf|FCd>zcdgnt^Y}&z9HWPSV=>DZ9o)FoCM|)}Y68=^;tMu(CyoRj7J)QLQ*g z((8-4vrZ?KRl0oL1+xSHgUAp-QSlc4hg|eKdgf3lx3PKe&eIbc|4CN}<QBfZFC<^U z<gXW)mx$yLrB{N}kdqUabC<ij6ilqp(6Am6ZzT$f$qWu+GOE|(ewpdPGj8{W>z$R( z^{?~T4$36fQrQoqamO2jO}YE&R2J73R7g|$kvv)1(3$S0FA&7UpwQ5l+tpkicagCr zCGfL4bK|oE3rq7G5ulLQcvT3+P$LVHv2@x3)0g)eWVCK_{s{r6&EDb?T2UGPo_1z$ zJ|6et=^~Wv07BoOvV*kzE%n0frGdg@2|}}_X|B%4LpM|9`m|IUSGk*vZRPol?M-h` z;Q3iZTb8l0lG1pK5}}FFMf6__FKLC3c^A57c)4bqkZ(u8hz=>{<HSX0-TtLEJ}(ch zF3G{ctD8e7D+!&AiG{CEcM}#^M-hE}{`dEHP3N2K_n0gLzkTZNuh!F3_Vhm@@Qe(w z#mi4+S%Ym<5lbd2fJNRZ$)09QbGuv@Ti^7E`sVW2ySsyGo#wWeyHgxDH40j7JPSHL z9x4qh3o<Mk30CEz&P>rYjT6~s)<t?WZ@<dK6)Yk~W5R{fbQC_Xr*ttdoqHn?|9aKe z$Kq`7(JASBdzbZ%U-rodfT=8hcGZ;Bmxu1hgzRqjbSJZd*I&j<dzCk<atsluN`?Vt zGQ^MIqJ*6}m7OYVHs`HEsE6un9)TjH`}uh)HKh{GHd9sA0T!8?tLn=e5b=oXZuZ*; zPuRca^eG#3mRHrgoILg?*9RxyWgphK+GBJ&Ti^F6=E-b)etW#0@ls|!tjy;0DcP>z z^ZFNiz0Vcn^S!=Yet$gzWN&&sKw0*q$nsa9VRyJg((s=0c)*O44GQDGJ(i7zzj$`M z3I?zxAK0w>0BuHhp<i6A54m_PiEs=?PGu~yA(oaLYWunt+vpoP8<_$7pRF|m*w~7b zk`bP9U6E4&``ZwXh(4LYWsI<p?Qxy;Rf&~tskd%wQba^ViZGyBvhR6R+=N6#-w2EK zkz^0gP6vNe+}J3>-8!YDV%ByUSr`EuPyrJH4}&ajlF(62Oinhn)Lop3Qu|oCm|lc8 zx0k2Y?F_>1*zV-?p-4F`5cp(|T+Sfk^XeWMWqadcAC^<oP}5CTlT(yXQc224xm<=_ z!Ae9*4l~~WKx(Ftt&`YCjVCGAGc?pkMnRKXnxCV0XtceC36&bJuIglE(NfebsjB)l z<$^a^5n57{o6GN|z$4p<2n4k0R7l1{OvB2HZZ4@QX{jmcDWK`8@2RgZz|Viy_X~oY zyCbiJ0I?ljBR($f>iSx3{kP}a+mPsC0VmMAsY`S1n>@cZYuf?p!THjk<M7$r2`SyK z#@5^t0t|CIe!K(dvID@+WK?U*ithIZ6vw1YGLM!N6%{#u;*8XEul?i1D6VFFUVfu< zx&DbWJjbM&k&$a6bFc~+O6d`f35DdRWGann%5w7Z5^^`!w%+P2P4$fR-`woKa#PPg z4olxCcg9zBcXyJnJW>j<G)xl6N}ylxj?SSd-Z#&uUI6bIPuWZZn_gAl-r1#uzVrJ; z>oBy}JmIk3IlG9%I!s7K8Wxfa3I#p7x_uiEHY%V^!@@~IJ>d1eL87v}DE(-h>_?mP zb$K0a$M*E;)1)*U+WgWYsz{SGF?+&x7^3YVh~85>j?T`?J$NS?H#d%&zcXOMuck}a zG-`g2&=9hIA==!D?`yyBe&R}ORWlnOkE}6bK`Yz>+Kin4_E2k!tDF7W!GpGMq^6c) zW>%_&`=@JVtKG#03-i#ewKa!r<cCDxaCBAtg+yxVVd)eDgWc8K?5uh^PBMm(P8s>A zv<&O)Qf^`4_Re<L#N<?U*}=g<K{20+xuI-I`bXyxn-!l^RVFDcOhHS?+TIk*ahvm^ ztejL}SYXd6uhRvhw2dn!CN8U>KtVYo=-b~v0d1g3;XVcW8k<KXQ1cbP08oyI?~={u zae24Cck>MmEwLpfGy~39g@ggkIV(QCuYUqfH@K1?bfp@Hd|0f$j*(asm!S=K)9H8~ z@n{5EmGI>)1i!?rgoAy#dK2@*DY|-_oJ?RlF9H~okkCy}%j?7<+1cI{vR6ycS|1^4 zsjN4nW@?+60Dx%0rGatBW2d(UHJa&Fl0=J00|UWylXLV7g(U<}+t0hn2uX}|4Z%t= zy2ypageiz=NSrP~Fm+uA6xFcIE*=LA75`w|@(3^@VuN~prD3n{697pnA`P>X<G`Zm zq2c~DB($uW76Ao2J2L}RtC4=P>FH!_{)K^|wqXzardB{n%`wOT90zra50SkW^5ZDM zT-S|;U&rs}D<z_mmy&yS{Pv-`24c;a?yu(acmj)#EGY@~i^JQBOHA@?kuvdMqM#7_ zo^yT!tPpZz%jI;8D*N=Z!lbG5bT}g@%7yb#5f<0myR_Ni1{$7>iR1J@yqb3W{u<*$ z&oC<!72|omIR8M7MiNvL3WdUiLPa%~sj;@n40H}NG_f)WJ&!l|QmFwE3{gQX8PdJq z`|44nvgz~Qd9=h8Oo7m}7$_9D-l;XU+2!$g_BtGed}u8JTOXovl?|w+XY1tfK~VvP z#Gz6^A)YozHzu`3offn)V2bTug2>SkG}y|`&%fPHD5m4{tf-=dbon@Ms>=Rc!$fzz z$te@Q*8f&t)#f14f;~0RxYz0My4z!bL$RfejD$O2Wz}_T=jDaSl+zHbY$}7Z=+bwE zx^<eoW8E@ops4^Ku!C^Vf)WM!BF;;azV)*#hVK=u+SgZ56i5-sp1TA7?jERTD?A;C z_N(VtPN)obq|Sc)I3$e6>i0Jrq>z3azq~A7FHR1^yN4bFOo`Ai?w7@<RT$Hjj1IWI zb#Nm0igYb4?Z5zobKN<<>FcvsuKB|RR7Az^?9LfB=jJw~M5K(Y^Y2bR+T9SR{@Fe) z43UX@dj+6g%ja}p{hNq<FFnKG5vTR7fQqkhUt1TeqGwVmrriXIL0Od#`X`)S@M`O8 zTR2yCS4|GbzI@R~`oZh-eD^B~IreZcsI`A$tHIp7WMM(aA3|ao`+KsAiUJlEK5*|x z+(~~!*nsA{wyI`}yG46_J4#jnP?zW90(JFzr%BoBnNWqez6jx#!^dD>B<ZaG<c&A0 zM^Db|@@Dj^u~rU~F@7ii(nrfe;cJE+iIx|y90M)xczZj01|caRsXlbl%KaXAkUKH} zFgY_J8HMk*p~vp~N=8VC)iLYQ!zAR#3ltl&7NCKI${u53K^cDK2rzZY#=XyajKSme zz2wB#7)2iB=15Iz6SwqaMq^R37l2b1(B*@Vy3trm@15Q3cEM<arY#O^EciB%!RtlI zM)msRU>lXMPY;}rDkj}RlxkAr$l9tRz?Z9?(*^NPu*F=rBqq^q9-O{o?gzniPGLTH zDWnP?H+Nu!8~@w&7Ehi&k=c=H5=L!7Q^p@F!Zr$WS};g|C5JBmnp$*(3ZERE%`G%r zP2D8hMj;5}FLmS>X3r!_F+Vl4tnjr3$AWTeYCb}^dp#FOk?TWxctpx_SRAz8z_ilN zxUJ$t_)tGEg&I9kMzgyf8HT5D?raUL_8*G1f5hTT{*lqS%5Y5?(y6+;M?yg<EGFf@ zam=`*lDMWH^^M$`c4M@%ipuy+-N3G^_jbC*1_uYok+OOvS$1K=5M5rqHraejU5C6s z>$?C0#RsDZVYBDgL{+>*bC(4@*bXs>I>=GgNU5V|O2NX}dg9pnnUaD@Eg4nY1$&^Z z&0p2g<ZS6Z*#ks&4c?kcpOX#Pw;ptMLx8}Z?g=b-7`#pND_5~)r0ao9d3<u~pHfp( zXaC)^(u+{xq;C%bO5E8Pw6fsWwXQ|&GjD8VR9ak5!~G3Ox;!(%OVSec=6BUt<t8)E z*Di=5r09#n@RyDq^qwr`MGk`E_6~<GBiGKdvhWOl?&6x9`fkLIsLH~@`GF`IKCgi% zr!7$8e*ZABP`4oAWj8lFhVwAnr!!_kk{Dc~JX`yH-VBV;bOg4Dn6SXzirk_r=u;nr zq`^nE91>R2F9AFQ!CQQ_jEK1*d*F7MKILN(wE|H1%uGz`>#DDpm>4MpxWVUEW!1k1 zJF2Rb_3VIY4|F=+!GZIGgVlwV9@p;A-CEcaQTGtoK%u6-AWcutgu}q%c{i6<rTP2k zRW-pRc6q<Ts6Y@iQbXmn&INu{eYdfxG{9lhm&xXEi1vFMUtyARQsu;;L&U&;e+;~7 zc#AuEf^ct1G11fW$Ban{1cR`!WMVGb4s9jKc&gSuIy>s==O&1{xf~ee9ykZ_f3btE zMLO%Hcfb3yk}*EtKLO|H^f3kj9T1>4Gq5o*un2Q%R!T&JhJvOjE$z{IdAXhvJ4!_T zjUDJOf=;+EoD2|G|Md|it30hJ!#Q?qY0a7HQ|nLTW0;@gaC$<8hGxJuv<4<*#lzDa z_h{vKpDL~Yh8thC9Rz(Od=b}l5NsnlF2lz^4eWtn@(nzl4IPpI?4kMG(r3oa9Eqo* zDJeEIG&l(&?^^{I=$ovFNLh2W#re?&eosf1vz-{BZRo<<cZ`7i;-VsXbvaHJ9)$GK zu|EBTf^Q@#DOg5^g|=GrpySlzQOU{}_gUP2_|Z-m3-T%)Qr{(ohD61sS7wxbTKuw_ zNNWhG=pb9%Tif+{`lv0b-}G{6*0tQ-+f=uKVZm^alaT;hx<hTv7F1&wOj6^K<~S#- zYw5NtPN>&?{vU~EA5y4nEWW3Km!Orq$bSPDLFm2;*hs8LSW{gUdW4*UoRW$n!qZ(@ zMM+Lp3hUP~FfmF>OTuu_-GLefZr^#JrsF}#sw|)v7*{z`(mD~7QJj{OQ(9f=6X?gr z#&T<W3rnIvLyd%tjD>@PiJhHKM2v&?JRau%@xxgnRte{@B5i$BMn-yEA)&O4Bw+7+ zC`Sh+$lnLXox1v(H$T3|=sXa0xS5!!Ff$7iYzGGMkJ#=j1x}Km?|gYx+4k+tV=~`Z zhl_bq4D-}9*i9hD+o-82msOTFpyZl-{Kha==2=)-R8*CinHax`yfB52mq%7cs;95> z_MO`_IGESd);L2*LdqmUK}mhNr4}n56qJ<8YAWOu6fTbTSPvZ&3$v%cUu|ovuC6v# z<^gGl$^jl8ULLF|3pgzBGYt&&N-N8oo9gf|FMK@aTiaVItI8-*Spw|LgT86$Xc?Os zhlGbhjG(WheYhEPPd0WoB^4!bJ_ntuxos#By$rLWTWZ~SRnKB!!$3(*bwq3w2M60N zY-u8GO?7fg3T}RWsN%A6JOYBanD=%E=Fbo?i`oSjT#%FjZ;tg?LAWh1FAJK9H7!t4 zS_nHF5fuhf9~T{g%iD;F3bC@XC@U*4GBRN02O4m3bLQsfEUnD{F*;vb1aU5OWEh1Z z?l@S%LlP1aXlSSc^SRmSu+LbtzET$@*xA{PjPzNUu`>^w5)lz9uPR?$nnUS4HgBV& zrGA!(l$uA8oRYS*FdJ)bnUadi(99H^m@5wE8JU?7NUy5K1|1yCqfQeAxr(Z?sHg~5 zoyRoL)z>L3DQax2#m~IYQOw81$Mjz5x^kt9iVC}`0P}R`=&bFm1A+spYHKcDlyI=I z!p1xs7dMxosS%vA=K8O4fH4V0<XmdZN{GOmp0bYlvi9+`+yZuX*6pngT*pvUQc^}q zS<~1|NCJ1R(#`q1(*$RkL@a%?&!6X4l$XJk2GP`11IL9`g<6~Hfq8cimm3o!Q4wLd zVpu?$lan2E26X=QtCxoxfD7f~;!Mv-y*WE|<m8{Dm?zcI)haD70jmIF4p?$>aj>z0 zq_AQhS=m{6dAZHZO~BhEC&od)!0N|<dH5iBQSdNWnTH4k_BlH*dt&kiF6MQ$)Cq{m z&k1T1lT)WAVrd{Fb@3eiIa?QJS~^-6M>`zMGrD+sRMgkQ?}G6i7t-3g+EP*%@iDKb zT~J)u&~WJpo%cu5(&1rVAU-j6^v2N2+A<A}6e2`_^bGXgzTU-U#q(<`pqX~omRNP3 zkDt%R!H$HKq`l=^UtL|-P0Xf3cEQFWroQCzTv6+klCtXh>LOO=sc2{v)YJ_ut*tze z60)**nE(C^;TZ;DQ}1+c9$qySMO@4q80f-J!0K&aNP-=9aJ22~?~Vuy!Noiv&CSha zZDWClc>pXqHEC*k?3m2s@iy8znxNtH%X5v*^<F++Mka<jdfK3JpR1m>zM-C@i(_R? zWlB;!BLh9wT>|D|=y7ncVa3HfbP7(6)bx~*k*m1QzK)g}AqfSYpbjA^RYpo27NkXl zz|%T;xPvfaJ;ERtz`V1EdqrI>Fwc&Qd2*0(U5JTrF%L%!4nlrWK|>>o`Q_iwJU=i$ zIW_k1@gqFUGceKzg$A~?H9vj*T0vg+JP)^*n=4oagp&hQHy1}AFLydR8ZcuZ^{|~6 z&#_zzl$BFIOCv-qWNHysRnRtD(l&AX!49^dNsXG0R$W)m$`#=p7$&KpfSY+Iu=8ga zgp52=xp;WhaWD_w2F7;=9L%?(nYXgGXzy$b4-3Y_JP!}IoxL?a<~cbMlM}`#M~|HF zb_Da}WI8&U<&~wg3)4Nlok=MPzWzSmNDnVe_3?y9q_0<8d@S_SjI<;M9L!5z6z2x! z9c@`~Gta@vo{$uO^=cp3S3Jzq@Ts3ArpQc<#~Pvn^DbVVxS2<J8)wk@x;hl|xV#NH znVx~34Epjqt_B#LZ)`kD=l#5oWFDkxcXtPM^%>h$2@LbWVZrU4?YqyP2L<}U$z^0> zfcDhXQ~(JWxw8ZW-+libm`tF)>v9X$6D=+(NYBVb$FF&YTU*{fF}Y@}^zzO5t;dI$ zSEZq&)if}$arZ!kL`W$r;=T^+beez!bY9;rg^L>}^We3PWZu%stfj3fEF=&g^Ss=k znGg@+VIKGy8y`L<^LYF`Fkewse*e*f$9p@oi#LZyt_}?i3|$`>8tO+qULUwVJP2R( zXm>X+I|JfbTsn_o9+%ET0}c*$2<Z9;dT8-5Pf9__t$c=vJTol;EAt@Z2&DHp6!W&Y z8hmD62e-FDGcPMEeYCeZ2J_GWzqgs18Q<C2!Ihy!GtYow{?3DY_jexGHZ^#{fNN^3 zpeWDE#zKl->0@tW+1u6r5%;~aQK=7BCRcCIi^?{HO!Ay+-U(GBISmt6CN>V50nm9G zIyy}QLmPKbL};Xpk`gC78?I|oX9!78(+lajCgEWorSrx|F>h(s+}0HOM=(D=G5pW! zJcjwovhJ?SotInNTN)wgK-W5`E$GLtjy8}fXkT2A4a~p7<8AQjJUa(lbZpd>{%-ut zlXEJZCM3hnJd>Lb@>tA+pJ!$H9XgLr?2sLw`6K=O<n$OYk1LA#n)&;8AKvfk>-G!q z1qO&oh~apG$$7H(>t*-Hj~*VrCb%EVAdsGy8ZQ}H`4g}z^6L197xrh=j<;P~e)r>t z1I*KE85r8Sd%B^RSH{mgcpD7!c>KKKQ95sNfO)LBN(}S%4%W<gy-h+=!tt4Js>63p zs;s;O`WM94Fql$NQ68+6loTW+#F$m#80PWoJPTf(KZ<#EV4jp+?lcj0=EaU;UhHe; z@%Z^8yv-M#Z~TLp2hqUwtG{CY;r*BI-e+Z|!7*oLWiBoJ_HOX~y<P0=tc|r*c=~%u zRpr1?4=Xz_5xo?RtTQtGa$4<pMd!@Zmv6s#8#*mRLt76o_pm4w^SI{QxH_ICB0I|{ zs^^-Fhk5ejF~9F^@GuWLf25xun;1Eo`DGOISf>Ph$<L$mHb8rTulL^W&dTz_e%;(y zo0_-*Cp|wW<4ESYj$-~eI!{DOLBb||6!SjkaQJ!gV=_;HW?uG>Vg5*OgJymgub=<S z{Da-c+qbqsYE+f60M^#r021`WsUNUTI@<5=#g!FJ?QQ4Jb6ME<5i!Y8N!fd(wx`tG zsOq|jg?Z&8nHSMV;MIAu<LZ1^NYFp8^T%NxGv5X`^Qb|*q^uZMRsnVZIFAeue){4$ z#?Qa{Czy|kiw5Rt@Om3y9<QHga{sLJc$oj3w_*GJG3*!SkH}RD9jWsPiE&eCZ-dv* z{~E*QW~ak99-gHA;Ql>$os66uhGVR&`1kX)#g&zf?X3{)K7M#7F1ehTMS)b@(k8M7 zu<xF{rJ|%rMMJG+Xk>Sk&U@@L@0N@|Z}WwD+vDgwp1chn=EZ)C&TpVIZ?#or_?Ry( z!o@u1I;@qIS^r>fMk>y{%^#xkx@d26B=c^*$aD1Pj-&IxJBFqBL%q#0m`CY+TSwdT zw{M*t?ci7)Zf0p_LPSg?E+GyGLBNG;5d6%1TPu2<6l%^=aYY*yx5nA?x_Zd`^tvg} zkW>L7VO4D{yW{G-sF7zHp8WiQT&3+%I)9uP_7CtjFJ3&SrlC&8Hh=8Eby%G9ZSLIK zf}s}LRMu3Yu1Wn~=7oj$&!6W(Gv5U|k9B`A%oG0sI)BtPDe~icn_9f{Dh2V+t31d& zuGypqyiFUJd2lsmMuu2^nwSXmbUArhQXI^S@9TWkKJ%Z~25#SZAS7u{z@et?otIiS z6<geEX6J0>;)0*~&pOW}YUZ7B1oQavHg%twZ*2?@#p!Ltj>_Ahb)Nm0%)fvC9=<3v z$RAsHyF`kaZ-dr(tZSF%=Vl-<*VEH3t1L@Qh&|HJ<H}V6A^iO3qcPqF?=>l6Rw*>| z@&6R_;B9p9c$;Izu=&LW7@hwonCFjAI4<*T9j(B;lA^qwmBry^JlveDY^)ku8W4iD zVw)NMtDmo`Y-nxa;r?np?9Sd(X(ju!T$=I@N%0k<@uh>_5lOB=p-0BB1k7Sqepy`H zJZir?S4l#A?7YnpG3+t@JQU#29NUER$B!ODOr)-^npcnuPx7#>D`{z{B_<{|F*U5H zDlg2>rlO|Y#I~{+lecknvSVRpdhz@@)>8(A#Ky-A4));lHfZK?#jszz&9P!w-J|{d zQP*LM3yz$7;OCEopR>GpL=5|Q_Xuwj6#7}`l@;Y}EX@x$gLs^qhug&56n4G^Kl7Cp z9annK^YYBkP9JP=@4;gZ5d%UtWm$)$n6m2$6(fF$xl*{V!=jicCL?5#un*4X7Z8$> zy6`)g&(268CLy-5vu<iVIzNwQ{tvh&^$U7_myERJ;nzR@^b_oZf}&hzb_S@&;aMC| zQW9f<c^g}+%9@IXx@r)>k)c7X(`k5kxL|AoJMrUtEQ?u~nHi;|B>{kI*ZOej{IQuw z9y4!)hk1P?135Vv+{sKb-J_Tn;6E=S0x_%?_<0yH@i0Fz1v>v2&wURVnLy`5+Pm5@ zI&Xtb=LH4MvvaW9IXS?vi*@b5ehgbwQQkLvT|j_ua_rk>#W&|xSW$nnSx3$xDXREd zT={i5RdrStMm#!C#CpLcqWFT8EFZR+?W@a+Kn_SEFz;k%bGU&Y618s8(b=K-$Xq2V zhW%sw{4c0`edvPxVtwm^gcuw{7zgR;>0ZCY@?Cn`n&9VM-Ce3LRo$GOqM)QmOUB|l zl;maU>CfrtXu_rrzdJcT3Y|quOCu>cVR(3e7Vq2xQg&JNHL3XD!8~TZjSF6H1IHZj zlarSd5a8$L;yB!3W_kkEDGYS;iwf%NYtT!)u`n+z$j`;a38(uU9W7QncVNEFjYp3j z;>uMT>giHZQz3o58=D)RzJ8;qAZKG~cDR|0^hH`aT0>(4h-3$^bmP%^6!X`I1o*Kq z55>#F;}jXoS#BLsv&hJj>mCsW%F3#Eu1OKGN_fVU=^0tj(a{`U2?bUG_>q;Brlg|u zKsX=X2n<^gz9Bpit?kW6GJo`Z8_@aVF<)L%NK8z0rKjWYTg!{{(vlazcwDM1J^b#Q z*RO$OZf-6wAJ2xyx>s-CLQ6RrEGrs{^0MHZLho?0$I{uYEzBq>DXna*($Z64Y{KJh z2uR4uITcWTJ{3Rn=kUz8alt#^X1^m-P*7k!O#^-1z`U`kQC?AgTU!(SuJ&duBcPX? zD{O%px(~J`9vJ4)>!f1fC%^mgJrg5?AJXG+vzm%xfSHkz0oaI(iLx*^#kvG9KPLlo z#ZX9q?=XA&2M5Faz{oJ@Jk}Apy}1SygPck9ESL5L^Qh2*tBK`Tx9;y@n{VTInvj%) zUD7w9JTxvFhWmy~SZ-LYrm76sGcwX2URVM92@FFV=ZW;_>TVAY4aV<n?2gp=V>AEm z!w2we!1gb{vB1G6BqY?*Quhz=>FR3#`03}Uh)@Cof`PuS!;K$3yuY%%aQJi#^mc(X z2?+^0Iy>eU=FZGc;qf+SiOH#WRS8Hau+OVR$vD0kR_qwL%7yvah|r*?PoH32Qh`eD z<!8fiW@T%YSC~IGIR>Z6$I}gK%k8ZVtaH>4>O3%iWBmG0KmW|l$zh3O8Sjhd&u(pR zG0>m0v9=5k3+BVIJ`O~6ZDkqPEyctS==}Am2~knu?DS-;EuTN#xxKy0E2K!stSD?6 z>6h6RUT|e%>CR#1e>h7-!6hA>Qc>DI&BDqqE{bI-C7f~CtG(SFY(tn1DH$pO;^XIi zZKxmMInvJ`bxkUMd~)Q|F_{1P=U>W83jsW5d)wm!@8V#0_Uu`4aWNA!lc<=8(a~!k zK7M@i{3*n$5c*-2n#1M6{d+L<gCz!g?C#}OTVJ!ju||vIz6U7giO6X9wTQ`S(vxHH z<!$Kk==>4hW@%xTjEoG=oyIX2i5y*>3W^I?w${P^;kngkPiMQKz7Ezk1sI)=jgKC^ zF$AZ;+|(Fr_^@A~hr&K*W~GBep1`&k?HBaj6yYFbrY7O?b<Updg%#zaH)o8E4Ml_m z@O&LN7dsgPAE>>sNw|MzS5S8MH`t@*&z~lwIWHHHSzXdL6%ZCrN=kD4TUGmbdz?Of zT2xHL+|n#EDtvZfCN?Sz*E*@=Gk?q&7TVl@{LtLg=!a83D9Xz|d-~+akC+-8o;rOB zLNHKE2)gqMa#z=vQD*qvyT-;^3JP*APWEve=j8xhbNqZfU{+OCl}yZxBO=2FuJ%5B z^Z+mOq_l!Ml(dXk*NljZiF}@WKz|PFN;GtXFS$w_F)S!Ebbk^O;*vwNSx^=tJDre- zP*U=ui<@g%W!dWHT2*Bkoa-!Xnz^;P4(Et~;OxD-w-3LIaB_f<0mD4l!sXRv)XHKN zrT6c#->?FDgtW9|W?l{>6C(hOb!CK(md5}0U;pLq;)LtBV`A8$iLrv>B0@p}B?URG z{SgWmCmSm#Cl!y<86Mq>Ht{~`9U-~BOWVIL8~65b`JYo~nT1uNa_aJ%#ySQkOpWzV zojP?K<qYuk2H*i>I1BcUcDeaE_a8loN3DmY!Fo?8_`RR!9xyY$di4scntf(|V)U3d zjD7n2Y44RTQ!^tNYrq%;`XT#!yT18jI<H>6yneMmIKUSUKAd;xp_*Fi#wLb=!2w{# z_Rx(%>Q^^bqoTt>x=Bb$v^3NJlJ1VS#rfH-_0{#Y6$s?}x;rvblVoL3xAXzvl~oks zhdU#jic5>vHddcKe@013A$3s#ytIp>y_<_8R8NF+NPrJNA2%ThIgOwW^*JUbdC34D zPcJuTZ+90@1XO1~q$ij|CKi;=g8>Fxi}XOCmQ*-9V%An5J>8iY>6uuVU5?7zzz#sn zg<A8ds@&7rwzaXovAzli#N5OP0v~1;CM|7Eu%Hdi4Qt?D?%g$k$dQQ9&e{^%z{jCI zbmu@{Bp7T0LPAggCwp6jGmH!l`xSNuwa`*XASNz)Xyhv9?~lYpM}YN(F0UXbV`*jr zKOEB-cHYa~m6C#7SVSl~J~lWs2z}!MI{3+>!`Gl^!R|HJ*GOIv2djDJ%oz!B5g#vi z%&*2A0i>rJ`nRKEnZk;)!O>v|WB>p5-U2MHtXmU)?#w@P@9ppFjv@&Vh(Qt}M9|>w z6h+|_P(T5N7F7j>6z*=p-Q7L7y9EiFgt%KrI_Y%JtW$?1bbo2P-QWCk=dxK(J?9+u z*~i}X?sx4vwU;H)0-h1HqoSl>jyDERVcu;As<Q*^4yVC)lK-rT7~3JCJ%WZZR^gPG z`rs^3Ac0PH*4`ejveM$9ZO+N!!qUn!szwGU=KK3QF&K?~`}ds{I74%BDlN<%A02|b zf!>pusfntxA~*!_A+QSQI;;)`!Xm!_-{zM3cR&0P6&?)y0JjnN>f?<<7T`I8t`od) z-rm6m`Z#M#!q)#XE4V-ffwO;L+y)(%yt=$lSW*blsJO&Mxbtx45D)<>&Ye920utm3 z_#m(!^!umJoKjO$K>%-N0!pl?q;PG0<@=9f=JC@f9bIjpcC@rKjvPG#90-Qw=s~Bn zg(cX+$pKUh2hzf50QPK1)(MHRgTsB_zIyfk{W}m3Adrq9KX&BEA<hE_I5-dNVP)U3 zdoK&yehwbKLn2t-GZ!Ixf*ZbLH_MJ)yZ7wf!@|nS#?B57D<~uck5x%o@$~6aN0_rr z06ab!tcr+=*i*>A{=N;MR7~Qc*hMj@51|=46EEBlPA)D%!3%2Ys>XPuknoU>?hfc5 zZ{54w+TLnlqz|_Xsl&Wd^co?$my?&}<Ku-$mT5ej2Epb*9wfX~Q3?(V?i=iV_7cH- z??AT)!$Vp|iVulcp$5-ZgBTXBD=;J|zogJV$R8pgxL!CSdSk&E;cUgkFP=Jm5-J}+ z{Ps)N?m|W|KQaigPs`0AIXeoA3LiRh82-z3-~e18JS}JwZsxxI`;VVGeO5^5Fz>Pb zCuH}Y*ORmibGy`LWaGiUpKCV@J1aW}3&%lm6{E29s`RSi)f;zj+*kz&0acwnCjdv> z&%6ofB*vTfgkwK9H#eN2va%xJ$H>IM&7D?UTC}o;%mmfcPy;-0=FDk6zM~M+eKNRF z$h9lUdw95tiHe*)eG+czKUR>JmyL`L|NWRpX#CZ;6H}vk1vy^co@VBzYHG^BLT3a{ z!x<hwaU38dD0D$qPDWEp9nKa211#a|=UrS{I6F7}W=nqrN4$Pxy}G6%C?pVMkFK7M zimEa^M@cCO)Z`W9fZL%FoG#YTSRWWZHZHoe>++*V57F-W`L{1?8)|57R1>_Rq@2=O zQQ3Wm&wRsvkcImc`%$68!Wcdo%UuVBc5w2;#vN>2oIE^-czI8pK7CeDP)<q7j_eeh zoD>orWkN7hP*#+Ymy=c4G&u!%O>K<5i*r%cuf2N!m=~9n(9zYw>FFvcDu`Yb6%iAW zl#&F90^ot<U1%<OMFrFIv&eMZPoGZAOeCcylN{{S)m0@VCBT;@B_-6<RWRC`SS-fC zP+#9rPY3!uZLO`Qjny)@FbB*tdH%(#x8E%;FXZIsxO=&S#MK2Qhs9`NG&Qs|5YtrG z&{D^n;X}hiY8&fwiwZn^7>0ObX<2DNvapD-1l(9f1qEd#RSh+DO${X#Ww_W+7cPt3 zgw1QuE-LID85)_Mtf;RGi;gBb*lFwHl+{$F<zz*~FP;|`I(_aO+)O@xetv<o!qT!L z(lUq7h_dr5vkK}f*~NOMb>oPB9DKrT2hZ-|KB=r@5s^`qRXzOt<*PSuUxTy&p$qp1 z#421qxV)6KB=X!PB>)3(17RaPcR+=O6~V)co?n<ZJ3sU4^(!>4T)V!Scq!i6)*33n z9fO*m4Dc2a8D3RW31ki=xwQhiU};5&iiwz*82;gp%nT3EUAw;4*i;uA8|}$(cW|;N z+E~GJF*C;lrGvqOg{3(NFfVV9w2Tzs^P9Ie{&D6gP&l{m-s<XZPf1Pk3-F=PT<jc3 z&=j0D(+HN<mLxmmB_`+&$|_1GCPzVBeG;f>PXXue>FG+awpG<JJ1(NUn@50yUyA2~ z+Ho1P(`w|syb?Qk&h9*LYA4r;qx`2%oxdO=At|e@qHApI?&q74n-ias4Dp(&B>_(` zHzAngKN``F<Qp7V)7-58tMeWxC@GMf9NawU&QuqIl_j9dz}V2p#0bvZ!^<PLFmH5n z?B@OZXsQLE`tIJY<g{c@Z%>i~iC{@EBbb6&ldP$36dylthL0C4IFcPvbNaXd^pcsA zIXN@&{M&EeeE0sz^JkN@Q&rU!Q87`_6q!bLfS%jY9=yrcfn@7o=jP#-o}D=`GSojZ zR8U$BoKJFcG$okp85-d9b@hx4%&iD^&Y)4qmLwZf3&Pe5#}mxZYcHv;>K_{!yD|&N zuWqc5OHQKuFl?P1jLb|muvkTPbtwf!Q7P#QViyI)CB+q$g=OSVh^uKDSn~>Dk*}*9 z;^;|j=59$x1l2iDOItYm#pbu<*AAn3Ghe=XJ->LRw7fVnD%``%9j?@dWMgG*2@f4W z4hA&n=xh)8Nli-zNxXi2?b9soz?L8wKsNXX_yRU4G%{kYF7QdEx%m0}<QL|)ceLac z<OT%#eX2l3tI^0H`3j2*mRA=4Xwnln?xQCU0fdds^*MRj35jvx5up%W!cl^P{9!pU zF%J5I*0!d_rMVX`U;I+@aNHN)zJxkm-IuGX%X4zG0RNYg;-NusWLSnqYU`>7hI;SZ zyYsWT>tPjcMR#v!OKW{rUV*)5tO}mKUl7YDV{t~Ed=}#_Y#7KTh}&~qcK0cz-6s{! zN+`&wX<&`<1bYVx!#g28qqe1~yskDSD>FJFAu1t0G9Iz$g!riVc<_bXlH#7x5ko_L zQ0%%`3{D#Z9E>brG5R<xgmtWJtU#?kf&Sn-IR*JK@o_<6!2!X6p%G!1Qj@A`tH-A& zH}2jAlHbe;_~OOQySGOsMr!J7(z7yR<71+tBjVy?VY#xpqV;l1OIuTQZB<EmX=z0% ze10<M&Vb}^+}?QcdNcFLqi4_7ZmbWD3^cVil~-35mKGt&r3&-G%W?|xiYrP%P%W&k zEZ?{>J~P$O+MJP-6%iBV8|csQ_YDXQj!BG9&&|m!$WKg5jk%P7Tz))q)o|@#=(V?Y zwGU5EqH~fDjE^;UwwF{@retRYL`J%JF>G8YrnYu^W&}-rLlvy9G8QMMtSO@H92lDs z5|hSrUT2Re9z#i{UurcZCrIPS!O3N*m4o%2(_0zu)}33RF<M$1K(}OMr6naNCSHnz zM}H|fF(WG-HeSBme06Ez>9Z$4(;?4ZJfE4Jg#XsnSJyJFj%l#a*48vRJ+^*r71YS( z_LjPj|N3NYmz$<$#vlKV5tuKJ*uVSk(UV8ubW<}EAhWxBI{>=yIXKh{&BAe^<@dk* z)61=w|Ki1qyZ7!uQ{euff@WqXX0A-mU!4O~-+%bv)$7+^^cpw;xcsNj9<N`&5uVjT zU}W+rI`C`I#Z1CQOd>84;*Tjh?iJA5En>J^$nb)yu@2tU!kI$z4+^`KTu@chJv0a; z*4ES2)N%P|*4)|A(cd>VHyaZdBP}g;>h!6@M~@&1n<)#?PtbCD2718};dL!dgX1F| zy<IKst&MFhZJn2aVCR+=0q|&MOf<0$3j4bc9{^l|sE0-e`iJ_4M+ax-W>&AS-D3Wr z@A}OfYd5YjZT;itI&8f2;64PfTNCksw!i=A(T&?Tm)2JoR<6!1EzB&=&n#S-nV-FK zbzy0J4bAeicJt=k@)De0TW5PsQ$uw_U0rh{;JIs{A1?nga_#NNCAYVIy!LkHwf7EA zPHas(GBP~{$ZzcE$SEt2N=fsLjG_7k+PZp}IXLTET5B1bh-w&V*hSa2kB!W&N?<AM zyz-oKwz~98r>I&=WMzKCcy`^yz~su$81Kz@ZviAA1Autv=Ha<df`N=#Ub(t)>*mvE zPrv`tY?&ZSUcGq@zVP7D12B-gs6BZ6@bS~f5bgmm;1gPARzUyz;L+m;4<0{)jqtBO zndRq)AASVTg5UwU1!^A*mO;t<2bzVZK79B9t-XU7_5FKj;rI7H1x=IG*oNcUp1hi# z5>`o~=J6K^3FmeFS<dV36vgipF*~kquS>8X(%jucBcs!^ifZe6Mn?hFqq8%kv(w0v z{l^8MxXJPS@-kZ|M-7aYypn=~lA^qlBIsd7rX&VY6dn@=5L#GUUAlgqnOh3Um<f0T zN%JZF@n-JhPw4_*ynOup`GY4&rhC{9t1n)^{z-1Bw{L$Xnc9{#nn>osN6d7w%)Erm zEXj`_-FkrJ2S-!6!I`Yxx&@~+zq&HDI6pZ*_sKT%$HA3;vMV#pYoGaYKEc`Pw*LO& zx`w2@!qB8tkI+abZ+|N)l2ILFj2FgO*#?zeS%1(yvBs}KJs^r_Ii<m^<fQMOX&+V< zk>6L`I<tQB{x5y?1J9cI+~2`-hlM}%=>N->&hb^R<jdy`0(ewhFIrv_F^>~5ixn}A z-Y;UjN6=uGurZq|Rm+56L8ejzLP9U4WR+F4^z<UR^O%`*wkA|UGp3;#H~PoN3d+h{ zy*$l{md0kL#^##_k*}GRg^L?4IU~Jupm*`w`r5~|up2*3{QX7PBWcurhV!2$Q+xG> znM3W(+b;qhc1L@SpJYjXiW~^dj|?Y(0v^Hq&6_J58;DC@TL<J{Sy`T5TtqS>Gt*^m zW<3A6{N<#v%>NEdPBe6M<X6`u<m3g#C((n$?LB=g$TR~YNnPJaSl1;ir|Z^}Z?kI# zx#URl)+whH9oSAN^Qcjby>e|sD$**4-oAae4b;D|tloMQkkKV=8Of>O0%#XDjTXX3 z!NOq~63baE!2aKq=`wm&csmCNhM#YA9FSFIb1U@Vn~!_$%GRv+pKNGqqOtvQYIass zd|X&`R9H+jSU6%aum!f)HZ_b)O)wK8Utj%c>cGu}$V}Kj{Au>UkI3JW2l@4zFQ&`< zm?HM|Cwgb53*T(y(evjh+L<H|F1DFQ3hdgQJ1F2+Z+-^+?DEp|;sOfzUrK=cv#g+> zl1z0B4#FJ+u82y@gd0fp54NRynK_bmEr@bh0}&Jd;^xUm-@f&UuiPhXtwt`w(5nuK znX;ZzKdH+w^vtD3Ro{K|d>g2LVOhF<j}%g=?UKr&=yJ|5R0tn=!8jaDK-Z7$oDRVL z|5Nrjp-nchv;ukK78D+llAc>x-PVVQ_Fu}9iLQcQAm7gP_qO+SxA*rTpMBl1+C4bX zH#RypJ2QQCae<i!oS7wdYg*)cfIUze07Wzp_%EfrfBMULz(3)fDT+St1(F6@1G~?p zcLeYcAApzLxO?xjs@cNu%sk-Cgvj&rplW_I(fLpB1C!)CJ3$a8=NE+}CWEkXWcXWA z+>C7<bns^9Gzgl`se@N;+<o?1n-sw*ZG&;G)ON2HGzjLEFk?Nfc>?EU=$YdZS#|5d z(`~H&RprAEKN?X}b!Zs}RH!GgK0+oD=Z!+o83dm<3Ogu8WId(vuWAf-jE6R!Kyaow z`ve5UCZ-gYK%5Wq^@{+Qyt+8MvNC_|8W=2~2_-)Y00aiJ%gd|x?yugtcV%T|Ze<z4 z{IzR<v9*8527dFwgFA@Mc?{r0vuAFVI{%3Of3%-e+Z6Cm?|-Iu06e1M5lM6J{wGNT z4u}AL9RWO&0eop`3P^roesu22@4)#_mb-?BD_dG~$}8iubNyozTmwVxJQ(Iq6n&zt zrlIjsO)@#6dST=7^xFNiTC@{tG@MthvSXfro_}Cw2cNM1-t)Q=RuKjsIbMnN@7|+f z>9$_~3yN!4sVp&?597(L;woqye%2sJKtBjRPh-6~rEULB%kOU(UnK)`6OuiN&hQM2 zip|U^tf}iAANyj0`-bL*q@+au06(zgl%%$<j@k9Ksih^f0xXo4muPCLTM;cM7Z<Lq zu7Yy-m>>DG$l0oMn6Tee_y}B|J^M`i&%cEK7caM>eN)CT{pAzAqYXmffa;m843A10 zIG=@egyF$Mr%+XM<+oGbZ`}x=L(@}DJv|V;rWKV$re-k0qscyj)--p#y^}7%N>0c4 zn66K9S>MK^mu0QvTr$>DrqMV?oftkw)*`%l{JLjEG5blieX=%cPKmno%%sxZZLI!P zrLAw~oSyGlg8)`JXF$7ve&A`{fKxa>Fn(SCf5!&=3pPX$=U`xAY3V|v_y<EDonBPh z)ZP6vRWq?LKR7YY&wpaio;|yng@u)ci;GiQTC%yV6-^*Jb#+ld;LO+Ge7$4G4jWr* zAp9#UOF*Y6+cQPZ#?LZ?f7Cfl*gt-B|H)(K(9I_2-)9=?0?%K){_evM-+lk#0g(B} ze?9y-KEqTpkMDg%J4)|A3tBfdJd@#>G9FPiQw#IxSPeKVnhg4PYMbui;i^_3`KrX+ ze7I>y^y=kn;X*UCvB#R4i)fiDk>Xp1mL9%*>ll!`U&>m;rC6I@#gFy14JvIMTI;-W z%hW!M?Sv|~I!)av(SV*=*FC)r*T1B^|L}vndB|CVz}+$qyjb5edI2W^?K(auw7vN; zUVC){{zWfhzrGK~#LU#*$==h)J0d1NE4QqnsegQYE7hZzN0YI#u<QlQ@7%c)mf@3+ zkGG+@5zsO+GkxgrAprc>Uw^HQ#jM_Y0JsAEu&}xcNJ9wr`t_fP9Hya!k02k7Z9k6c zpti~U&z~=@E=Na2h+h=7v9@~s{SRoTu!;81-2U(%>3#jyt<RDMH2nG+Q^qgNENv#c zN5`out1HaRO=V=I+k3j;Vt<A0q4MzbWOH{nO7hX^S-#Qn6u%%edeyVC(K0mV*K}|Q zt(;iCyK?v0MLo|WN@Sd8jjB_@Sv~)R(&2`owX)8|f$23tDLh0~$9286$jNxGoVnFo z+lc*33N1MQs5XO5-g%#z`zf3+Ki2!WHshF<C!eM}x3>3J_-GdMSa~CBePq^jH(EeQ zNMcHMSw&-K*T~Gwr>-2u{88ScfOcSdDJe;Hb=Cd*_XF(%>_tU|fhPg>G#W+V%qdAp z@w&!306f_I`ugIvYas7I)o(m}1fO7lKmg=D!1K#jcb+^&Q3?v530lN}z@NW;`||z! zC$C;Tdhz1nv*%|8PVd~c^KXCuJ62ok#~=R}03KEB4_~}|{@wd0uirpu@CoWjYmXk? zeDDw@`MXb_UAuD^QCZN}fEV1oyK?(BNG~SfXW_JqON%(UIKSEP4F?BD&(Ofuyyvh0 zt8>?`ZQa;4G+5Qr21-65Col9;3Pi6CUVfGoS0jjC%`9ZG#z%F0(yIn<J^41bc927w zC}9$*$EXuEiPm&TsP0>??p-QxpL_TILqkg+*BQ-SqWE)00UFMioWsgK&3?IE{!Hl} znLDIT=Tf44qu|1)?R`wkgICkzs0RIry6a&TDxYbDfODP<DOA(Q#Kg|g-oqPIJp^Ti zHTCTS{RrmgP|VK_jE^Ch-?Mwyu3h%_c8}h^x3eb!)$iSl(0*uG2!LmBVyt_xzi(t1 zDg%GEb+k3NwX}D4EZw}--qRTo5e8ZUfCu2ee)nEhRyyp}+|mTd-?)zst=t2kK6&%* z%GIkm*_m;%QLSw)w;tT@9~yuj1nFFM?bOszyLS8L!pbtx`?ELSb@p^avuUZx>$mUR zfBq81{OrPfPk(Puf8W}zThogRaFnvLl8uLtC$7w9W@iKh_`#N@)+UI=kp$mU6JcSY zXy?Vl!(CQh+SYk_aB32b*{7G6Qc{z>eHiH(X>fs4OIHUcC+a&o3To<73X3CB(|x1j zKoi=yc_JilX^oJ)I#Gd?II?i_?(;X=qzEo)8!Wv_lUjNV!|+SH+&H{m+CD!pa}AAH zgTgb}`4m{BtVB$MRU8r$i@LTE`<E0Ivp^Pk=dTo8II+IG8g56_-43bJ4yjUj6rE1s zyj6WG3_@x!<|G`!%8KF!-E?SDYI<>5RZHstbF3cKHUMK@CeH)rt!;?QckZ^dw{acd z0;-2Crl$DyhmVDXF0iw+v9hx2>+69$K6B<28yoB4BZq^61K|^vFL$=ze)giMxbWQB zGkf+TJMP=ZZe>M4;R^f=3ub0`z#h2LuHCy19XgnvmV6LN4!CzO*xtSD``Dp<6B8ql z_j<ZI`}XgHYS8tZI>ld9TyXE%b7;ZC-HnZn4gO_sPdax_0JN5lj&?{$5VXd^!U9*o zOz^i~-#~Be{sRRCITmJt_E}k3_A}ox2G^Lse!YKuR8CH2FY`R%|Kj3eoxMFhV`HHG zvdStz1O~+?p+l6GF0RHT2dtTeBF>Oi-HjewbK~L5_Oa!I3ig6HUp?<SIh*v0_~3%p zE4BSAWgQDQ?tOl_MNQk8jbDvZohoG+q3x2~Kee)r*gszqvKn?t*?p}*-enMSSe1H6 zg~Fpu=23D!px|&w+1Wm}DZYBfJu*|%5UAeC&V%6{85@_GlUq>@!7Ms%2{K?S&jaQc zH*Nq2o<4mNAP4lTs-^;%hgFa|An7$V)vrH#dWN|QP(Q$Z02TmF{li0b^|k0KQzq>} zorvP%3|Ixix3;!|*3gY#fAbAQ5T&I>0ClK{pb`+fXE!JuU0oehQ<EJ#H*3O1XpozS zyQ8P;@$0uCp~0{b8b5G=3o5|*7#bOvo0)#~x4$8k(Xs~-X(?&RQc@D%eDlp-X5(-m z^yum7sle*O!h*2x=6UWx+L4eHZyOlOuc}GTFA7gi^NEb52857MC2xr+`IG9_e0o7; ztrJgPzw?YKW0$a0cghDbFK8HQ?wwyhxLVP**gU+t^(~Yu3#&&2al3^K`E<M#Y-7wB z*$<w+*aq#NEU#X_KBY$aO4^RgJcfr!?>q{Q2juO!<VgFZt&M0Y5oHsEsAhFDdtCxD zfCJAF7%VBTu(ZChYj_x_{<EI{h<TvFx%G7sVWC~j0vdkh-hBaq)2ItzFq(jQbQR!) zLK|pybY}YEMKKic%1R2}-d-%MEU-HS!l1xQN{Rq@s3QoRIgQcMAX-^K5E>p90#py1 zp*3kKiLA_Y2E&tC6C8oX#>PriL>R&L&Yk*ty3gNz2n`K}RpxQ`poqoeO+d6OD9HPR zun7%5D{uyJlU+OEv}+q`EiDPa@UR6K-ra*991;ZAOCs5z=P55Q3t9;N1r@+4{6j;s zE2?5Mvjbui>A~TS3_s``jcn|J<P~tnUn{%f-7{vdJ%s301m}KIgAPhw#W9axn~_jD z)G)YO*12$X<L9~NJ-idyc@<b?tcCHx^48H_an;+P{qx1aJ!!YF**W_Zetkbq8Jh!g zqyutx0DCY#O*d(J0l#yeAi)=ds(Mc!Q1u{IGfT^B+Aa@Fj!z=mX7niUk<XaFaRV?f zCMNO`^Af*;dDu%Ei|HR5>Fn!4xfdi&dvDLv@4g4LLmlV~TH9N-v@~E}SUrF4ENJ^@ z@4kmP7o`5i{fAHu9W2tt>A>aa>Ed7+Y65!SeE1<aI1nX#P&jv=y+AR~l<2!;Wu)sH z>j%b0pjW#2?B)8S$4j^FxVq7pT)*q&$rG#h?&lTcv9htkI(XK=#2BbDFcA?Uq;KNb z-_qF*ex$6dh@P7f-XyK09ONG(Jle%K(ALe%%*h3mJjMu=ybTw|FQvTq;mfxPg&lC8 zmF+V1z3T)GLgg$YAbPFpS*q(_dGp=RUyOYA;@dM9jaUS<4`SS<EyLw)qMLhXwn6(R z%ayCwj!N6eIwl(jmLFC^NFHH(CFg@GE+BOd=m+l6p#KfyEv{#yYi<El@9Z1s6BQSm znUhyl-PqZM#CntCTlG9(es+CbNbmxRc^Mfg(8ixI59nQg^aNRDY6%eb;2_Jl@5Cp> zp+W^{6J(5wi!(3ZQB-C`MMa{!11*C%x%cAR<r_CvZ*E+>eS3ao89g+#jKyNUeg8pN zNDx|vRdqELaE+|2j88;8y)X|^EvjEY8jZ}%EL=xq*wEwziDV1C%)vtkkzc<ugVy86 zkIk$sXTa_sG2hYKJ+-t1Lhi`XBXBYt9Qz3bbFz!GxcEiXO>_<PVzcuhbcNo~&chq- z6+-fOGdUfT-6}LQkBqUUJ6CT$ku>x^rb^TEs#SH$KaTNok8ebWC}-C0fARa)l#B}2 zlWHt7HYao$5`-`l_l$dwo^9jyPnPJ^T8D_5!mg{Dq-ZXA2Y@<6vWL}Okue+%I{P`? z-gCMvDpVCiGh<sjBF&xR9}L}hVorWRbuB2I!3kyz`$^B=yft}s@x+PaC=@XmEuhe? zn7{V$$){By$*VVS0qlMKd{71k;O{_$|BhYEh!Wa#r@NxSW@Be7Eh}EXb9eFDI@rp_ z#_ZA}dT12$0CG@1yO}M+>f<->b91wxr+^9o(c!6ya3<!VO>jwIY;agOw1$Qna=e{8 zpc*�mMfT%<}=}Ge2UU=O9mKZ};rVN<&ivFg!%rpU)Es5Ri^WO9va0ltvE<bMW%D zqPpSjoNxr9x}E`-s?&L+;OfqqN8i4^RNT2=(n{Gr8-&eite*lgvZCv1b?*{rn|JSj zaV{5aJu2%N%zjnMX+0k?vk=$FvTe}*$@1>Q56Lx?_KBC3+;R`8xgJ)d0or*r-TAaU z__5vx#0e~CFuMhfPO1_?)k9Eb=gx2siGWT#rLefF^>TY(-&V}8+`U&-Ta6CUBU`Mk z)*d|kjQK6v29-LCH*bEzJOGKEogG?3?dZ`XM~)tjhz#dDh6oryV^((N>h0SLYpcjh z_!~DS=dOIlyf)_XcL?UumSOe5tM9UNK4Tty6XZ9Fc`-53xog*^mX~vKvsqaYR0G;$ zVxv>jlA%7dfbx81b_UxPo`)?BEsY$U9B^sSBQQ@7vBS(?eCwMSdq>5SeFKs4NhcQr zB1zi>FM_f7pUQMfSTQjC^#?DM%>((>-9XhS*k&A5bE79THlifI{@_<0P)T_UE5GU< zX(EpnUD!BKk`Uh7Kf8_FKUq?0$Bx=0?<Xd6>IL#?&;jkoFkUCL87Fame5y2-b2__( zjrNL}D(d15h_(c0s-u^$S9o+}YDR8lb$MeGJbe`Nwzk&y-@G?4)Q9E0$m9tu8JX$8 z<N)&Bn>@b-^E(mR2h7i`uEZzCBjfpdc7v*K?P!CZVq|&>eamZhdHK{y{#{7(yDTj& z?!A1yc<tKN>(}R3SKz<Lj&16BELQvF`w!ycVu-ZawM$l3`oZh(5-%m7UlTx>3^E3S zP!#i`qQc;45N<n@olq?VVa46o-(}|_bJW0=O>L8zw&@W(+}(o%v#YB^Q<LCjPyxhJ zNp*EqYg=YX*`?fq2xP1#26{js1Pd}1RE>_gg|e>E4rQvSabWY{;)54&l1e&3I;uM6 z>3CEh)$mlZjV)-I1(FAQ`R2P{c{<;I`-=af(cW`9`&C?yV?6=(7G9a#p#76&dhO2N zjU&HOblIsy<;5^gV7*UZeNO85p2GQ^(F^3fXtqn(c*jLcK3NlO6H_A_J0g|t;uqu_ z9haC}kX2fKgo$}T6G&W!w--A*8vq_~2<jT31yI?dZGOV@DCPm-NDRw;0BK~;ZjdFQ zoB?umjddNpUC?oW{D9VgT{$^91A_wEyE|&@s;{gpU%jz$=JY9KT%9?dGdeSEVPOu< zg4p2U;TfKsu(TwgkOsL9#|;SZ`x)jPo$S%L1AOGp%Qx;Gbo9L;z&xBoVM!tIIUE;6 z7oG0fH#$6j{Tl3x9zsJ?y`ZKpqqHI@HlF6=M-PdB7~hHls>V^*f(W7OSxq8~3OyjX z9vFW9#v^HCA5iiJKJ^l2alBfde(CMVa81X;$kjW)`R1gFg*W>#rM)sX2Q*yI==sQ7 zhF`n&U>mo8vS_=cu$(j8Eo8D!%H|Z#_q47**crWmbA};&$}W4)<5;DL?9#U9R7`N@ z1XFt_TQ@ISU??b@`0QNh+>!A*mc9G-v7^I)08zkjenB2+U_kFy%x}i9TQEO%1<}=V zax&lS_y&djQQjjbPo6-(V}cO4q`U-LM!#bMZvrY75fK8wFWtHWAtZ=u7Uu9H_!`ZX z3fdbTgg<=b5GN-GvxC^_<LkZt@R47D?-tCjfir?|+RcoPp-m9^Aj!}m7cf7$u+TR; zg62>|!s|VIPMzY1%K^VY18_JQ2zyWPpMbqio;pbl47GOk0PUwwv;}5R&@uW(iK6Wg zJ2<!T=+$?gu@xNBHmVLe+VrZ!YVKHST1or-N0R^TpG4&p)UzE^+9_hPPnB{+i!Pw= z8=6|b4cb3h#;>e$OIiP2!e)nr4Tr1)pdEd?7%+d{D3nW_uv^50Tgi!A(Md?d0%wj0 zdy+fDBP=Q?E|G_a8-2BG7xKjoGNvIZDPCAyh`x>pp8#OM^VeU0t){Me{qa*|^_$IA zP(JAQxqTx;$|_3x_U(g4b}$Rnk&%&_zjh5gA|TKYsGCX2JHP(sYY-TK$t!DX8Ce<N zaXXoM2I_}|246}-a-t!Pz~9cD92^|_`g&7W7eT-S^3i4m1<ym96Z3OZSFb85DWH7; zY=`gxP97GJ!O8J4=m;Pl0bhpy!}-Hy_K%MaOwZs=@u+9*L<$^b2Z$dc)eTf1BIEJq z%DRU8Rh;>Bd@I@}?>~Q?Q#*7-*%A5Tl~H>N>nm&+kWe~YKd@Tbu{gAF`}aJZ4<A09 zl`>^Lt;wq3z@<h#uI(vj5q9nNHr4;xV(u9AwYc>GHToVIdu}C)pmF$lqp%Cc;X<a- zXZ8G91r0dm>`rL89o29X#Srx@tRPZz@bZIr)`(!Pt*c{TYy@EN@N}<jsF}ZUV`_Qx z{doW<!`susk^ssF1U9$;=pSU&0$BwKG4}Bl&G{P}W#y#~4t7>VO9+P{u&r;ZADcto z=>>?kb#+i_6!@2wl|^uH&?xieb|BuC_BIDcJ1Z+oh$b4E8=&K8@96@eZfRvfBH32g zRxMn=F*bK)a&e)urV{oA{Sz1*0D=lV=;Y!eoU?@m!O_vav%lBR-v=3lBs&dGj57yz z*3vW50Qn%3fZ^K*2W#3cSF~P^NlVu?G|<r2R@Kx{$6`&aZ3twlp^crknT493!69|$ ze^a2jMw9^Hr&jNZ>3bbgbcWDX&N}s=I^89@u3>1cs{3kf|MIKve(%d;W<)=BK7~C} zRvao69(C7aSg-ipmTl1f$+CX^_Aylomm2+;He;W>lYm~J5I$1KBvJ?;C2AgjRMBa# zD4tiHc3zKh8tWm0BN|v)gM4*l_%R}5qcXD7ODjs7Tib_6hG%ET=6>?FE#MqD6CjQJ zkYsjdc5Q8O<Hq8~2E>)4v(sB^LItP^C`G@|-TL}2pcH9@X<J`tN4KN9p!E^=L2H8R zF|j@R*{$HXH)dB?w!Q-l2l}}C0<;V#23tP;U<9-o`U-vD<WzNATS0YQdQoXYPF`3_ zhEGg_TX2M<Pau)zZbEX@F}Kt-Fg&Ma^Dpu=HDXxf;PS1fuj~WzIAv_r9CNYs%ERjJ z>W+yyjWf0VE2Zu8Q>*uW|I>*{C}iVP+%1aVr%XPe;=-%tZt9x64cb3nvWr^xDUe0* zp(k{_xfESQOruc~GmjHC4rdcK;*hmHhx3xa2MX%@$QzK1ZAnbn`+I~&h9oAVgZ8zT z+mX3J{^3XBzxdPXxhuc<gS%h({pslMtNF7EzqcmywW7hPsg|DJissh*>e{rTlDN#A z(8OfVuqb4phWYgq-p)za!dlb7NL<_ezbnv04SX_dN9Q*l$L6<lE7*&hL>l@woYM{9 z$1=k5`aso`buKoKtiAv62j+NbZ0%>`SKBRQz^drDU)h;Y%l(wLd;i${HfaBRF?NhO zj`LL@#UD_i9m6ohEfPh|<3!BkC5S0}O3r%(4GyWgNSX#JT1QHo`fK8eCM0_+D&5&9 z0EK;Weo<~^HSl5g@bItylJ0l9!`LrQIB@0X?aci~Yro=RqhD$Y{ZZJ!<V0IvUqw@M zUR6zMQAuoOPDo;kcLXvrxj9f{@1$pC197Vi*5qH6T@P!}Bl9{(uHNmRy>U*5&Zj~F zRU>VAi9?ZW!pN`cLE_f(&c%&~f9!>^t1H*J&T8+upvR)%z^>?YNS(%`O7)8=+D7c3 zFE?)86VmlovQLz@irO#lbWT4=+$tH6FJYMkpPXW5EEmlLaGt89C{4#Wc{3j^ycP5N zS-O)q!uFv_sfoD-ITclv%`KPv`k9m3UHJmYe>xk|*zf*@_2^$`V^@CLM|#J{n!39y znp<)!s#6MzVluLW6OtJbG1Pz%dk=2_yeR^BTP)rT;?_MX)LjZL)bO&N*_$^Xe{0Ri z;F7h|rj@EX<#8yGB?#ehXz1F$FpPw*f9#jtODi|H1TcX4-SYOV@(v(xxK$`xgn(_t z{w1ZVaX`!{z}&y|qInpw(|LnXDeF{m%VcSrw2Nl3`$1AlS&A6?>bN8t(9^Z-!nDln zjBH5I#RKm_NE;Y;DK;}Zy|}EfrmnuD17L$F1cYFJ%A%j=$@%%@A!ENArJuCTADfZm z*Jr9>e)dm{U+(XRh_$$`A*-wc962H-Js>s_{F>qy1d_(Wg=S*sh`dE+j911Pa;rM- zQ*i~rcTR6C-F`}oC^@9$r0S5PNh#)5riqvY`=+(m4Xgmew@+OA{`X~#`HAezuVXu| zyi3@4m%PJXd3#PJrvpmlGa4?hU%lBz>|ats5-Kpx$+ltD7mNZq6r9BgiPF~TQp8kQ zyKFJzFcu+0PB}X{ixBI8e4>95jvS|DW@A9Ku^>|&ynNjP!~A06BU3Yy@(O_-t6Ezx z_w^x!Klc-y|I$=Be~6`jF&y|k0_IEg(Pq0xMjAW2K+k}HM<Z6~zkH+O=phkgp8%3O z1039#<Nz8TB#jczV80sWD@9j&RC&kL#>(Aieo1u)<n1mRh3R?K9#nBXiSu!ZuCMIA zTGPb61#}!uk}fJ{vY45fnVDI#n3<WG!D42xm>Dgym@H-{i<$YgXa1R;y*qo)-TU4- zucgzKP}!Xk8I{$U-&YZ>>-=5UyOaL=;eK8oh;Dk%y1OA8PHx^N=bUJGnkJIrsONb$ zJ*uA=DHsgh3(nuJJLZkhG~8egL9eTl)uO6w!<yteeu&fn4H?Jfe-NQz_-Z9;%aV~b zAS)>RL^`{pi$pic%;5<>A%KhGxKoxEcB79sk@7VbF{hw>D>yFK7us7U$dENVn@V_Q z2<-<=g6b0C1-Z-B)yvRP1oK7Lfzm;sV?@2nipin~lw~90+J+LPBS8_%ChFwA_aA9L zO4?&k(18q#8?oanI&;AvpT4j9t>!LJc44-ouRP5RTR~oGM@-pbviCbj;Ds}BmU{VF z?GeV}GQMSn%(zn~b%5Ob(;YgxU;1Gxw`V=<RA0?2%uJSYG>v))j_(%1vhB94ZEVGK zJdyc8G9xPXnIc*D0SYT;>9*x&9R8QZwTxxM^?08a=MjqSL^frM3qylF;RulGq2Li@ zh~~=r>iR~EWX=|f%&+(1;-@L=kCV3PQcuo40-o?NYyg(qb@+7wsEgwFzC^KEFBt&U z6Y3SgFgrgnybxt-01u<E!onUmj(I5W{HVm$9A;{Hx_MhzQ(&=%h$~l-x9Y%oVUNHJ zb1YYMwKzUz+XH$UN^tF2hMA^#l`kJJOVC+2rm^O^5O_P!sA;@CZ3k~cGIXAK!|CNk zdEhCE*wSGYsIYjv8-1u8^f!$huWGy-i($Z7ATGsKEPy_OpyWOPy*W5{O2)m78(C?T z(ul}<nkyVj8h`zM(~&K5(<~bq0Ldc3+}oe9qkxF;7$*mm<4n~G1l3d4f{*Gn+zZ6* ze?Mg)@X#5&qK{jd-Ff5f_RLyK%S{hv5zY_w_NDkS`-U3o>t2J?&qQURqQFDt3=vjQ z>W%f8!I2>ZOnhS#;$XIw>faBw4JLetPxB{{xyLc<FFTFgcg9vVFPpB+sc8@6JXC8G zVZR?1A8HAImbf1uk8{M(`FfS)W`k(GB8=A&q#Sk!B+b>4EJJ{WnC+vov+CpHW~6It z_pGe&X(`PKJ<2C5H-m?n^At3w3_Q_DRn`_)bptoaX!(*E0;glB*1tM>tMB%7Jc(nh zL8gjgAeM8IUL?7r_de6`piwR#QgZK)t&}+Hu$m_|B{Rdr$=XwO@+RrA$wE)|Wv*Fd zeIe{_ENWZz_D}&8!TiG12R+J!rAdh9G)4o6ylBPH8XR&&KtPa@u^F-@K^glocQ%Jg zmgj{9c1}Vv2|#>ZlIJ-D?*gZJLMkb{BnH|bLb+eTmYKkLH#bLnGeg)Kr&(E(XP92y z$(Fic=wIj~ugUs@Z(FT9&8LixhZoibXC*aB8%DXJc17M;xGTL<D>`aZUFoYfJzp%V ze*ZpybxezLy~gJ~bOm~QMUd{JSYr}vQ8~c{Ne4ZwKPzQhc~V<`MqQ4js?T0rrHcBL zp1rY0MlbUv10GMMJ#@(JmRdu4H&5<ubFvaOmA$TmWAf?=x^<LOAPeV>=b)Akby_Uv zoo(?RoE($`c~imRp;)Bm`Tes|>)W{w+qbJ1QEfsWq+Y=%J0ZhOsGFq1xyAso#*A$J zmD$DRg$*`VPN1e9t`=w0JB;~}-?B?RT#2`^uYMui&h#tgqnq_g4%`vUf{tj5i@`1m zbhLpQXajI<rS2wG+KO$_`bff;tnNW9M_Arw16Cy^)*gCUcN}D^RJ8qL07on?9v(Yx zZj<?K1&^9|1bpH-5lT<|;O1ZE6qni{?&z2ap8*$JHz%j(>qT%#vNDfTTT{h(;4OE6 z1ED{Azza0mB1#1<#lnNm^y^FEa}UvgT_m;vzK+2q7amP`JR*VL^3b6`dAx7q5mPgN z`8m|tK~AitS9yGNz*U06@LMo*(S}jU;T`7Aifyl1*Bpn_=`fTD0U93(p0mu4E|z%G z$X`G|!3dv4hlC|DlM-^0zosiGLyWb5lX|uF^%ZPK61Fq_*uoe4R9+08($|`tEDRR2 zAnBh=ng%#_hYR?w*4onN2wPz-<XDV~K5Tv%1OzK3`w$c5j2Kh&1R3lFY2Pe(wDC;R zg+sOT4Z$k@GmH1UsU_T3Lrmwo_ii<KB)bJAQ<a}BGthm*QwON?M-mv(Pp*oZ{Tm;- zEho$2pFd;5d(2rsR`0dTu~Iw=w>+_&e>JZ5KIiZB!7j&E*5#_2^Hql~V@>D*Pt-?~ zmt&!=)xDLbx?gU-il&7DqO~Nhq1x=VB~ym0>Y(A>s%FEgjSg1G=bz)bLF42NdZM$% zZ1_HSnY|00j%1pPhIJHVq()*HKtOFpoj?!=o}nD!BLh!^1OS(ln;Z&-WT_~rscL`S zoY!$~?<y}XHFl_Tw}D&s@GyHJ5%V9r4Dic&;o9ggok#qx-Q6lC5}c5c(N|8@%QfO^ za9?s^7eT&FDR|}}devr9VizYV1skOhq#|78EeLfdjT9Wr80@@R<o=_*qo$iOwM`_Y zTO^R!7~N#&YqE*}f0tVxzKWy9=yer4=ple%U|2WGBc&~-WQ-HFRs*yxG%HK{onhe( zc;Ovbs>!Rm*_W|2f0Kc8gMtyfKENbQU)jV)&sOBIUt(>}<ui|x!(M>UtElvAoOh`X z#IiGW`&C(6MMCt#h}c!RJk`$Ef-_|HFU(i+beEmCcW6i-11Ikh-=x(*oz?h6plo6! z5zOT23DE+C10|1%zVto=wL~Pl<ZA{H1R%}(8Oq@?GKM6<<KfZqbpcS~faT3=rfC5n zgDVih8;tISoSZ-ekaXNaV<0X`jCfhfUjV;KLNy{;`6W&?)_@qHfy*e)H^wZ^KUHRT zz_wX?;Vpqj6pM1g0Z(7FCq+Y#0ykyEW)!C;)JN6qIMN+{)@%<I%c_xxWxb#lsUg7A zt&@5b-%>6|V}s#5FXk=514)4fxJQ;V*ku`ai?uMi-Tz{D`cO#=oZ*sboiDoiV&F~o z3TDUlRnOI3ZP*818oF06THO(Ue3{P3NoS6YvikZZ7z2go(aYSPJk14rbvD@nA~XMr z&i2YJ7Ttt;1VWI1g#*cjpM;m2ke!K@ijA5Mr57<15=07Vna~@k80ku#l`R-{zQsM@ z&F<tPC?3TJxM}*J+fUFHM?+uV+TPyT-QC#ytB3>zC0UvN%)0W}09O-ewP|v4DLT6T z7+u8pgplbse;bex%mB9v|0{|nlo3@{^}!5nLs{8{d-!6P39f~5|F1>R27L3lyR`4o zR-Lsj3N@9Q;f~{LGMVD19;&z2o`FRPtud~VH4HWxV!&HWp6>vn=i7JE9^DUnn%l2S z=Upr)(%~SDUaT3Q>8}UbCbUURH7O_bgPBcEhPndiUyS9s5;G)TwsCLhe1_*^R=Z8L zEW$FsC`m~W1c1F+8XZCf5+z8tIzPJIAOML0F9(l&ae%S_`yDqq5f*`bg(wLA0l5Hg zMOs?{f$CxCibvSqR#VW{T_0jO;-m}eg7m!59B{QBWd+1_<*B6v?LrV4aC@c`k<+g- z%P+#mI?G0hTBz#4hrBTk^E?PSIKR0vJ%kXW#&!pf=HQ_xck`+0vQ{gLHVj$8HS|ix z5fm_0j*~j{N}%L6<3cq{h~Y5CbE)EzKqk(TxGJp#zs0}B?siUaSh+&~DK#D}p&&x9 zCU$r~kPK%`WeHfxK9U%x17pihShuTDBdNqvatf)^l%1kw%ysyEav}MRA9-0FL?msI zx&G*4{sk0YYi<c|wK5MJ>0qnFcgDxwcZrUBfrWjHf_4aSoRJ+5_J&9gqzfeZ<p^9b z3Fxc-7!4SJnC8!{5gG$ze}x)<ghbL$Uq>=9J`J^vi+u+d4I%^{9`G^PH;xztYyB0G zll#ccI58M_23>;CDl6Rz8*_9U>PM!0XL8vYwTquEqLL31Rb5$)d5y8pLR3Ou<%tvi zVRZ3V7@Uxw&9_EBShH1tu2MF>R(^r+;q<zHsGZoT*Zpt~Yr33;)h20JLy?x4c$cQn z3_n+K*i|YV(<}wqv7V(CJOtWl)Mq`6Z?1nBwIk34DQ98y@3i!wt2dd>#x5{2`Cew6 zZ)ow2t#!V&0emEbI}zAV6dO+iqiqSW7-kiqV@yV%M?wdmfMY7rm*p+X?`@MI{0TfN zxSB`H2}}$9frgX?w2;qyhpi_YAM68?4S0$rp#JeL@Ch~62w2hG3An)2A_j?Pab9?Q z1Wb+jO)C}vr`A)>c(4IBV!bMt#U)IdW9T-ZrR<Rx9=VG|QyV*q&b9nB7GmUn?F2c- zN4guD2nhgnSC~g~IYM&t%(dh```67k!40C_?XyoNem@o3Bv`yNNOO&6jh=I>oXt<Q zoU5GAzVSrMaf{lWI%<O~VWqX<)#`eMQ?+xQQg-o+%dpN)tpCots3<q3xHgX)q@y^4 z#07F2T-H+97WkW@o#l+r2N%n48w5xTX){1?saG+8>FK8cwhbglAUA*=)t~_6g4=sI z9Srm<aJE1WiY#E^niLYS!Olm6Bigh#8MsU&oibg|T_yFLmAHoQc>`Ga=*hi!`szD5 z%<PYC{N5p>Qzk8Q2eySMa|h32mG`8;cje&9(pZZhyIYmI%UV3yEWYntBcc9`OPgKY zJj#Cf*}`?X&z@~v63{IW{q0=}4z!7nkYG;>xQQX;))a-sIE}mlsehLGRq#Z%dcz(^ zm*G?+V&uI3N?L`zc+@B+#qlLm@Rgv?UeqZ-1NjR2snx|1`~We(+0+XdEDH#*k;oXp z>H>2_(J<J*_A6MZkfm2YpZ^T}739}s&Pe`3Ux8rFspjf7l=`+QrZEcAxpc>num%r; z>rySMMW;zl8-OE#niFR|Lk9@+5sp5bcIh>w27Tfc<n^^>eD<~it@q_3l^Ad4X5LAv zn1c0yjp&QD=OHoB$L8GEHMmdxf^V|)B`Gc#tW!fN2-@Jukr`o*)a|&6MG<?(K)1aQ z8gbZa!#aLy)Td(3^jz9}PCk~WU;{12<BwKeX$>c(LCieWzPC*_ZwK&aCUqs!oFcBi zB-0Ck`TMhXh*030?fJEve1P^9$TaD1d3rE&>VTCXS!3)5S;0EFKz|{;Bij6K=%gw5 z2+{~VphFE7SPS*>X${>eMWH2CRGIxH7ZatDw@zHo)?~WMWVYxgct+k-RJBE3_6`>w z)<!Y(OMi_FAK=4Mk=n!-N#1-xv#$=_2DnuUQ!rDfpsm%%y7cE7Hn=)n_YSY`L!ZRp zJ59q1xft_z7_171tiKeEaRPu8j$azaxbRioQW~X(LC!WRfuE@#Rwx!MXpPJuQ*a&S z>$zO}=d2%xQ!i5Wa-(VR;U<n;V#--=$J84UQdtT|PPC{E;L#kztX_uln}931OMY`Y z!v@sga|RVRcc;Yg&!Dpf$H8tBUNKyumX#Qr0eAMst`3n;2Mh8@dy&s}gut#oK(dwb zq?GxmIlgISx2eqAq(vO-NfFn=tc2j|Z}4-q_zZK@Yt0Tk<`<$wV9`lOmdG%bJ$5la z_^iUAg^RvkofJjYbC=`-18=z>EUvk)iLibijD;f-pntVj)61pq=0)^yh$>TZmgbJU z-BB#Os%RfyE3)qVdg`$Vbd!ST^vjZkleC(%D3Z7XU-YsnXlSM&R67QA{Ce;`H&U*J z2<><GV97UL5i8<t{nZen0)>Qu6{%CCeU~d3?h#m^3a_CPh#_o<As{n3k$|R1^qWW< zB@RZV1rDV}Hud?z@m(kkSDPtgJkRDb9qM1WFtJs$Eu90jZm?LFe6_<)=&cgd@iIfJ zfM~RhRh{+r-F2SdMn;f+dR|q`kWLr@pHL&T`<z_7#`j?<h$;V2GtgAs5!9p*z8tx1 z3vWGs@A2|{edw%77`%CCPkab{N;DiE(NYVSkzmSwMazz>=p5G|4V(wwKlPX|ONu6y zB}Sm(tOu;K2_=+U;DA?jR@rCxyVYm-QIZ`qw!fAgW)_IZ&ACYpaZ;4?=qWeEBs9Zy zTI@0uUAn3-y*I^X0j0W$51%KqpOc0~rL&)xksm}r9K>@2ROTcY3Nmb(RBw#ZYJ|pW z#8uhvr4^l{ACse>b5HxwMyoW;%*fr<?dniaVV#PsM{IrKFFN?#){(_Z1bnDhZIabj zdGU+<5URcc@T2}`57+>l2Wym@A{)!;BfxvJp>~%&MsM%X#xPyKWRJd+C%%KxZ*5X@ z6mQk>V7bMe{HV&NL9%#mAny`2nj~iMruD~2?YamQgvvxTI+2aSv76KIEC}>l>>-~- z?1?Vt`7`5U+Mk10LPq3)7#ZY7M+_$0?huD``KP`hd!DNO=XKaI+AE<VzYO!5;43); z)#hDl<B7Jj-;20k9ZlW%#ad9Pbq*bk#z+fgT(k_eFYoS43J&t`$iL~%JGzb$iIp~L z*qgr6lD>zF{8Cb6o^@NdvNJL>^t!W&8~FY9TT{^6bjiqZ&ByoirulWMWu{xV<>$xy z<CeZ==Njt|h!MyHg7_8F3vE(+9Cx_r33OMD6naB+$T48X+EX>y5;3Y&Mg)Nx;hp$8 zlcc77C6{>ChvqwJL}}}Z8a!$nd<CjY=eTNO4>O}PBhu@@Ag@gdri*Y@s%)6p*MqZ_ zBHKqvWOu!$Wh$!<nPro>I;PGDrqZ(OFVHMZXgu(s9dvBu)WsEru0IO=iQ0cm@e?W} zAh{@y-{%+ZIkJ7l8W+WTq~^rZG&)gYb63am-ga{o4yn;jwbn6~UhoiGG6D~J;L3lj zuXNueraEo~?$Pa5-0b@N7`umL^BoU(`<c3O^b`27jGNAB42w^|4h_4saGXg_fl4?C zy;*ju5*-S?w^oWOfr1?N^o2mlVLzVu0`zTs4cb5lxX|nl)k!1rtB;zXw6TMivxPXU zhJ%%{#cD}t(L5yi!V`sRXvt!@Iji_#)4)ONFschg5j!UY#tHYZ6xQnvXfX{Jp2~OV zW5_Wrvi_Yr%;qRJO$sJB1W)F|KE~Wx^%*3^C%}SzQ4V;%G93@y;%O&zZA#C#^<ATN z+&<>y?dfUoG_XAj0ClMGjp&FAW?e*1it9bkswWILcX6VQLZa$|Iegr(TN2(hZ%_lj z0U%;8sOL0Fq$IKXVEM9HkNvYg@CTVMh$Aa_%5K~RLVVH1(O8ToSAH-df@tFgYDSdl zQECy$L>ds63}#2Zz=zPX2vCO!tpY_yK01@<AioqAZ;->ZqHe2Z+$xn_$1vF7AlnD& zPkXC>10EWvI?lGi&_YX`gY{LheC5rigYwuv$e3vk2grb3x=O!x@R*EIyZKAkf0^A4 z{(S?9(Oml8Lr}a_&Vg&7^br2-sfib{qVu+_BWL~roh3506@dB%Ka~T&;XJV)F?9kd zu12w2HmIi+L0u!8)nmvJgS)<m-hqVNfrQ=#ZKJ3Ue(T@XR}>n$Neq(9%rve+FGf97 zs@~J9-chDCPx8~_fEqw^Vdb^5^TAYbY*Yk	e9&8wvW@ADbz<NmwonTrTWuI*DBE zF1{}ARM6{we)8IPTcP()zWrQ^6ZmMmz7iYWcMb3r!(iTm)lgc~u^C0;D;S+GRh%SJ zTIP&USc}$987da#D?1sAGlV(`mrNfH22paST6C)JW%>>@P5QDa8CPey=~@Tl8-mXw z81Nw+tXyG|1_<lvFT{|p(0?0%94ZnxQGMctJC4KxKNz1B8F+wX5pY_p78h>f-afd% zC8nB0VvQ+f3wW$;oP*#U(oFA<X4A%k&1)fQZGaM(i*JYtd+dO)PDZUWW9hISq`GYI zR0F>k(`ZP*VnOEiG9#4FA(l<IrIfEz*GyyJ&RhXu-dewA&t40J-iygX^80$Q@p`iH z^{KR_SVAV3>x`j>Of+|a#RQ|;8sBd;;t^W~HFzgd4*?xAh5F)O--G4(;90?hB^qrf z$ib9LQeI@Wb=1q9tsUf%-V=@JcD)z*yP>?1ox>ur6z)Q*vR(-soy?zH9~BBI3N<Z5 ztZh4~hzHWs>5d;K!ceK_Vp3UuA(DeiVn<9N3K`cFqh1-Z;M5C(@wfz8+m>#9Dha5% z82K5PzS}qQ1HY$;TuT&uh6nK!lfnE8c^D#cAEb<yxB=DZg(F+LYk&08r7g|F`Hg?c zkI#?(q2R;s(XDIZOXED~%9TSR&R)QWAC_sLvunYPorSP3$m3zN6~oL{6gOGs`8+_N zrIZ#KllJ1Yjc&xnPOQ~dtjwh}jKzZ_C96dFKngO8bucN+cU;j0w-w4B$s_Ay^y^kl z{aP31cRnVx07s6E$S^z@wYHx@_MLB^qn`|0KeW4_+gv@ky1c*`FL9Mje`}YWW)|$m z6WXKDS$?4hxJ)-grnEWopBd5D*gDLPJ;btg4`5CE!52;&$)%{UOlH)fjS1HyGyj$q zo!VJ!%Y}*?S20I_#LQSkX(ef>^xiw$%mdhL%cU{yeA`mn?DqcLJ{~`adEJ9u%JMS5 zwR$mjTH`em%riOW8KyxNHn|2)6z?{Af%~GXK!o=QyLxwvxpa%kVF?;mks*+aD+51) zAYV2@Z)<;qNs#%Y!|`#aMSY9@Q}Z>LK<JBa9QFWs*24v#(PitF(>nc{X{Sxvd4q1t zIN+lBTZSR+6Tbgt*$u|j&cxZp$<)yHPs-lN3Wk%5kdg3Df`x;Fk@=sc%zu^g@)9!0 zn%bGWSP*iu0LqjJ8B{$TObHn@6pXA)ja>*CR9uZ*{#GJoYiJIr_-bhaIGLG^osdD& z)Y9C-1%`=`LB!t1-buy5(Abm^5UTmlr>MvoI$INRvHv|MZ0X{xXzC<lZ|h)hXKLp{ z$Vtc`ZfWCU>I6927`m8>ni|`in8NV$!~E5KkJZ7S5(oi=NI<=xpL5doCtnzuFiqW| zDacW3j;4Th-;XT%fbAka%CmG;O)|8bC?^GKPdWN4UgbAU_OxK`f)0AyX<oC%W!d*v zyZ?<^{s{X=EgT&G(8=HW`lsk$bi&2)|A|f*L`?x<{!AsD3_S@M{%>^g`sr^13Iql8 z3H)D0`CA|V^qc9Q!^`z=hL`JqGQ6zJ|1rAkfb_qN@BeFA&ij~vfq;O1=HDyW!U*I8 zl7QE?HFh%4zZ32s<HXGM&v9b?Yv}%Uoc^-9|0COF;$Zt1=>V4a-$^HB2QW|v`#<(c z=;GuGF#i93>|p-YjtT$Oj?c4oB~g`71ExAmSE<{nQY$q=im4(|!SCzTwUI(hQbQ|i z$Wu$u*Nhp&u*DU`Y$e5tAB;uSr$v%g5x?|Fn`&T6nwA7XTh|CYC-KBQEKk_6IA2W2 zXJ5{A{_@G@dt7%m69oz)Y$91F7R2l!ymO9*?j>zQDo?a4h<nYV1p=iI<q|wlC<w)# z6`QHfKT4!OpchMXp>n6fl1y_cXIA|4=OUGs7+5INk=&UKS0vNXv;e5GtW^3_b)gKX z1Y{EE$v}@2>22Af0L=?!Lcb*jIsuLyMKaoSM=qzz4jP8)WLS0SRqA9N7CJL(E;Ftx z;_emd0OmI|_w4yrjWUslAuG;Z(HgwhtgU{9)>J_6E8KK~oqbRgU$D_?YH+NJKndWj z6QkaWaD3>>cB{%{@sb`(u=PE8sD>|`-)8NXhWgw9C`<RnDXUpO;&2)cPtAF&iCg39 zS$+2&PvLnuX4rSK!;Q&L`CpuRavEf|$iK2s+eY-1bnOgEfer3g5A{&_8Im~L4&h*> zCNUNc+H|JlY2#FN<7jkwHYVuHJ8+lg9M=oe2MwKElJsDpuT@4+V(A26OhQ|gj<jll zS{N5?75e9FqXRt;Ggkc~A$WB!NPDSLTdJi3s<Ku`6<e_tT`8)JXej5y#}+2hcq!V( zfxb-GTuvnDdwPS_9)U6fFh*|<>^j$WbiZR<lr`(9U(G1QDdZ>y)&ihwB@SX8qei~Y z)c&I1<GQ*_LIK3EC-wvyf*p)%{RCGWRCg{kqgu9y+vqI5vKEe%Pl`I?dV8O?L<Y`9 zA>NhaA#f-;lg6X_=etljU5kz-C9lmGV?UNj?jRFy5|~m6dyMx7uN)u#0WPKx->-6# zB!k!j?p&<5fx(kdop81wQ8#mw-zIo1f{xarY$qca8o~!IS&nt5{E6q!+~TO~x-elP zcpuy=yTAM6L#l2h*?Pme6aGqn@NR82Hy1#&Zd}naS;=&CEnu6*%V`mX!bi>nhntzx z^1gZ+>VbXB@mjyglq|<we#$jt;eJ~k6kpO99%{{2;!Z!o8i`fM(k!ms2)5%<9&_ki znQ^7jP!ErP6H?W*&q%Q48pjcC!ecPh^Mgfltz6DS597O_Dmt7bBdW-@H+P5)H%f^$ zo&$&5=%;2!Vn`lCK(AQ2)JX1@bSzuT8?3&SA4$t8V+~kE<xSQ_iE26uIMhol*M`eP zN@ytf`f|tPL<q+E{jPhSzW}O<bap5HZloNniTu~>^vt!re;ixW6qHcl+<_Ax3?~mK zVMLt?qyY{Rn9$$;ueYvomcXY-{>|k4XJY!><jly(_RksYANS;cnZW=NA^v`=`8T)4 z|1_OU>gp!pu_gL0)t33LFn(!MJkh=#%3RT957Th7G1Bv}Y+aiU?p$+ey?)xd9PMZ& zO=)m(!~ta&!5mttQ1#1|EgqPf`;-$<VBhv{9I9m$`%ZCr=i<%uZrqXE?cpRb_q-uG zuJ3**d%7nq3wyo?TJJ^mwUF2REi?Pk8P0Ujp22R4L2s*v%bRUOZ`T}mCcFA}Zf~0n zs!Js<EhSFi@cPrrdN-N!*J?Uzj=r3}kFS{L%L#-&Uo)thdw^Ks0~J=dKdYt+LWutj z5je`bqcweL|Dp{LsEJ2{tNF>2$sNA_$UVx~%f#AgH}iZ*#-=dnXK>FaSMhOH|JOX; z99DnYtJ<<FriK|00Y2ZLg)7L|rCC{4zM9Wf>MuV@OZQBmh`Tf4l6ybT7AlXAYb9GZ zUT-j3mi?RGa0WQ|*!_MbH}2Jb3^$IJghKQP>9{ql%1!03eKs`?@#l4A<A+X_n15z} zyvRBgddauA-4FT)))QP@v4C||ZDpwZL@+Ds!=V&6-a7mWGZfP7w~)T6y>OC@TY|^+ z1nU1pC+>iSsGP4^8H}1suUw`o`s4|p#XZaa@|fG(KSFE2FL`yw(o8nuhS0Z!R$>SK z{WiOhDRg^HI3EFwev;|;&&%&?exZ_$G97-q4HS}&2vl0l$U5#|i_0s@HN4r6U@{?u zR{ZrFCE$o$1FBGZ?liqO=6TL!oTn|vxGv{qcDHIhL5PaBP9jp=n7G1NDYkwKUp$-| z6->f{ZqOZ>2G=9JI^{0uM02Q<Fy3LHA&gy@6ZQ?W0%s=@z9X%>(}3ZaTXe#llu$%$ z_wAR7KOi}!mmlm@kwlPB@?$Su)oqx$e8D1nSX+-q@Q$tAq!I7Q+oRrTld|W1#Ox{2 zKTzc3jOL{rV8{L4HoUVZ4=a-zgE~Kwo7CJGIf_oldkkHem*ZB$lv0h)!ra|bP~)&r z9S>^M)Z#a6i})KRE~m@(=Ta6wkkzRfjHAU=(GCm$(zY`>6f&#nFust6k((i=cLW^K z&``lJVU&bLTgSa?tBmkj@Y*b66Ox3b$(tprR{SvZQU1k^GExX<ry~-D#<pw%cG{_% zWlRR>is3hMgxS^0(d#xD9K6CM4Os-N?93}l50}M>h&*Gpj&1Y34ULqDElIHr@-2=U zAI=Y<AEwxEW~>eAQW9@_QSKj?r?2B#r#(Kdp5Bgr{?6WR{?~(FF>0smE^Fy)E=w8o zf3)ojK<+=kyc|rPFJOJHr9K1MgPja~;P&I0$KRS+4>_#U^5R7R9ydA&N9<iw9F*<@ zA#WQci6Mt%9sQJLflvJ^z|u#27h3jty!Y~(1oRa$)P)=Q`ZDa3U7et5H(I^h=X!kQ z{&_I^_;{=P^_62Tsd34Cia}op4a40=^F%21*0F$emw9fR<NHe0I@31EBYxk04<Dsv z>^E(`02G<>9a40rkH?=<4OYd`mL*_38j*236`IhP#3tHl-go_}AT3e5eL<1J*T$`3 zGM+8vW4|MgsQZj!xyaNgH}6sDMTEKyT~4TgUWf;8&|D`RQ;0c(r{V2stk>^Qsfbuf ztTCc6qFvRZB1qG|wWjRKwTjC+G?m?c*m;(xvk2!|cRMXsC*V>DZ}=Rvlmb0~0>M~O z#(@o*T9>cMh$L))&={pGizxsPg70DTUvR^i_!f1p<HTE0<#!ZqOa}<|aW*r>oyr@C zPG^t%RrwI_XKiEC_^zp7a>_8rMC!u8h;GDquEWB}Z!FBF)VVa6aCOIGiA6$*Zusn* zGBDu8`)okl6FsbMXHDwefjBQ$0Z0wCe()IEOO9uclGFGFeQKV3msGr}TV6ri&c`Hb zrwI>Y_?fL#k2fz1iskr0pz>kTVtq#&8qXc(?1Cm#gF9jMsz$i-d)tCvn4_ytVa+2H zYkvI>$q`2wMH;E;p+$xMNbD*P%Hx-Y(7M|5HY1cKI*YomkP%9*&g&}hwdPkOENU6Y z`(lswV+Nu)w0RHJ#AW+J8hcnL7?T-i4)d)!A8P&Qm~kl+n7peMZFgK6M2`W`B=%Rt z)~7hwo=ly%OyCpF3R<)jU;Au<(c%|EpKg1>N?lH<a%PqD;m2)aZ&6zUm}`;~+6f-j zP^|4%MW{2R_=OzX#UrO;7|MLA1G5;t$G&|oD{W}K2QYlLq({9Ns#FBWs#o+HBtt%Q z(<kWyFvf_8cFD2QMZqVBY&qCL*R&u`pMlhTg%Ic!_Rb^kNc=Mq-=dco1)w2d@?ziD zqki^qj6<#ZM^9>RfM*1pg~&R1JF`lhdKoy-5%jlZUP%!%)OC%M5PcRTJ0c_vSS*Zb z=C=&x(uddTWqZ<1W*|&EoAX}nZzH?Qbb+n2*_8riyQ}b<L62GD7vhRwM){5JBM|mv zVRvpvuj}Ulx8Y5->!6!XaL1F&&yn7<qc19`P$pbrY%V%CS2ET(^7i0j^D-Y)ASl*g zCS2xD=3(0DpqFtqw|@7pk7io<v)w6jm;RjheU_58nvq5Bn(b-R2nWfwZ;u<Z=&OP} z!=`q{NJuPLsw%(6wyUYDq8jam^S&W#9<F!@xX`dOCaZMp?7d1DeW4fj5h312HsSZ) zczVPU>VvKw-4!+wXMm2YXop!A&%V;ZHD?KE7=Vv*)M0~QGD`XaPB2uG!5o8!J7_HE zW*_23zc}KPD?&kPrljwb<v$S~>ZCHVM++j}ZosT&<h#zco*6@akMJRpstW5OV|=7< zfKj|cw28W_*BKS42{)t~rFTIYf;5#_iz;xjT{99%2_Nr2;z_RIqgxmkIdk0}%wR)V zkdApqq*5&Ooz&7!(L<Z6#%mtUup}^(jUR(9hZSc(Q<}-uE%S?j&9-U^1ex@22!~k; z_AHbop+42X4H?d4!&A>P4*t_?76MxIz)00lRyGDCry@;aOiZo{R1Od(I>=3m2jQ@7 zrNuLqGSDFhAhqKGJ=%gc<_EA+bn>v1?T`VRI~jY+GVKOM`p49$fsc^P4Ypjj1~d1S zUUi&=AE}c>HWG)MNK5BsaiMAo$&LHI>l?B0U55=_tA&Y@tQH)%g!9YN9?4_F;-rsV z%{QLDtUDoiV&aYM^@;7Ri%|7mOCFPtd68luZyc}hgpT==SY%4L5~3i=V&Ka*zcGZq z?49}45TjU|7@OmZV}%iqeKTJRc&xy{>W`0kz37c^bKN@hJ<*HJ(S8JQ^Ps9$hU%T* zuj?R0N)u$iq_$(7lo~<-Dej<5sgjW^w`$5C<H+kn!4R|tK2|~Us5d6C`TTfoq+oA3 zG)A?dJ~W<t&5bLMo7d4Q+`0Ctaw47@F8T1WA+!6YVLLSpBWBNg;8Ue|*FG(l%RVNB z7R3^nY;drcr_m3pQ!)U2DOe!tay@dI%QVpu0wnEIme=0BWuxA#2=km;W+>8I|7~)} zEvE6~4i;ztYwhY<x~Xn=V!IhhmPf*d+q5>!hV-JrYpcqUL`wt6H`69%DMe5ksyQYH z3S@UV$+r1e1iFdq^+pnB5*Z}VAciZw&RqX=1tV|<1({+Ovm6pxAb>D7A-AG!zc2OR zBX!MuZ@Eg!xnfq42R?5Cp5EK2G1ZvVI4EUT{=ulN|6sU+0`iBwc0xB=S9))#C3igr z|LSasqZJY28}E~Y)e%iEx}9B)Wny(YXSl`Chh*<dqb<pBjLo12Kh=e31$zW=`8X@F zFVz~dPprAThK6VmTssrt%(|=>*610WulMeNtEFoH6IkzdYF~MMhS~SP-#BqT^qt6o zQx3xg!A{(}XL77|D5L}$43-|1?SI0dsS`Nk^QO0*%udNf%Fbpt;=#ro4y{DYPoKhJ zNa2vI_Tlk%>KxE>bLo(*EYe*H)QiyA&z3q{sHK-jy=$5Hpnls_{UsJT7_QFmpkP(( zL3hK|faqv%Q{09YuAVFLizYDL9XbMkpxwCm$k7ih8;&WzNtcS=4I#V9vek(KAsao8 z=G)i}^_RS!Yv>J&n0>A?=dWJHoR@1986qvV1CHdsb&(n(yDW>3D4d-P%FHZ(h}-&B zp-w?o$5AtRqJ}71a9&+rJ7z~1z8O^v3}E|cm<WI8O6ZTb176Im{gHHz(k5bMLOpEd z1W_6zE7rn>?+}X(6Nzi2BW^XEt<{yC`89I19vcI(6kSA~j*)fjiMl?mp)Y7`qywcx z*ABm_1I^<X*#-LxO6KjVVXLQqYxKYsWTPFPwlE|<k=sSc052_6N;tL`yF&BCOa@B! z4P{w*pYvrE)F`V3HUwrEfm=)WV;a0*pl`_fayYI!aN!<RLA7}+vIrH=t*9<WFu%=j zmG_dn#?b>SIe{}XL^**T$U2Q##PnmwK@}$Z(MSEZ759CeA|L#Xu`3-lO3{~5NA`qH zG?J)*pB#!~G@+{*trN}&HPVW)EcsV44xQ37tqa82?4iq7L^le|ytt^;Vq5I`nYZOc z9#kh%ssg+}wJ7NaN6bPfFtx!StjsNl!P9jMkoHo6Zo@d!Ye2Hx!mw-{<|B<+CVJJZ zp>ibEmbMV<kw<XN5i#IkLM<{G+U85^C!obB;$maPq`__@szY^0&c2&lRo}U_cImtJ z%(b!gDy=@%_nrXvN)yFmjRE(D?V_BNV}T7Td@oh!Eblw4G*md3T&Mh~OJvc}zLi{( zp2@;kdP;70{h_QAmEk-L*}_<xQdn!{>7(Y|;2LAiq>Rrso?1JCiuLV)_VfgsVL%o` z!9;a5;u+;4U8q|v`lkvpI6s;NyZ7AVyoZS~v;s|N#P<yA(_|igc1+xv)r3lrS0w`n z(Nk1;C_bU)+}di?z<c4TumnGgkWGKG?>ZY5EoqqF5ZSt&jc*0Pg6ma`jSiWE9KFfa zqgb%|a530Zc2m9?WNQR**NgAMPETS_nI29~4Y498J*_mx%s}(QRg8j<+TInicX4$e zZjIde**oxM{ieCZ=huFS$9&9Rqy52=a|>nsUeG87J48M4Ee?TKA5}D6g{FSGisRbD zrA|2l2|V4Olbcg<1Q;vqIeM>F<_EIedy`IeIjS!fW+J~%H5KZdJc>cPmI|fbBXC3( zP#?Q>_PJb+l^sYG>aH~GTa1q#7<tX0LrkeI<|X<U1iw7&z(_-dZa=$~>LoUMw;>^w zZ{@GjI04)C>?*i2wB`cdhuEGYKqi5CI6+xmz|fa~AAqBtiY$i~3pHBO@7?<cDZ7)K zNM=wmy%&#Rj3}Y?R%t}CsM#sPxW3vN1!NrXzJXN&EpCtdjz_1~{5TeiZK?O(zageH z=*CgaBFt=zLh{1F2T}9`YbShCIwa3RZO%~YB)Iz|wyjMHIO5Lh1c4x;D9(b-`!LQr z%?ZMX{KWMnpF_X{Asw*gmn9Y>TDud{<0T%}qa!F>#y=7x0hjqz_HRfmBiyz0nLt=& zo))I>fln?tMf>M)wg`<$BseT(tcVm|-#4&r-0ddsE_P7L2_D-6Z8ImR1j<mZy<;Yw zQfYt8lRh7meQ_*8(>lP#q7(j*W;|!5R*nKJAS1v0{5J3%mIt|?#7hh#>TKVK(0#N~ z=`?Tayja4&jlLbWrVttl1w?owB$4mVVA3Nrf;iBJ7R<ggF)}ZrPA8!jZ1)F`nRtR5 z<w(`@-BwA%<ZagjS1TxFBEQSj_v%EyIYdF{+jS+f__hsa3eK|;AzST35>t9#NJOAI z<aO!kSXGk^#jJH#OqfVG17^?8g?<Q25f$F=J;;)rd6vIdvI8s78p`*+{($L=1MHkV z-PaH$`F!s?&}gyP4~3X~%o&3bI6siudXlxT8#e{NFs%k#@^(jSLr#7uTg>Pj_?iLJ z7u|)6bpx#ii4W>X!8(f75DDjm5X+Dc#?rDx7Rc`BO>wx7Nz;LcAJ-`hv?S+|W#ZNR z2BS#iLET8>zIsCbNe;OUBr!JSPjOpvSjxXB@af$;c<{!l8vbg=uvMVGxe=&^7#B0z zKW*9uYK{U=lC<-+ys-Q?b;;&-4X#EQFgCCdgM=<kUuSp^`HgoMbe~iPL84Yt!%Od8 zVB9q|7{$R)8M(2~qqy95da;<MS^l>+vG>JbIMnXp+RD_q%j#!`@^ZujvQBxcD9>jx z)YZVXk@6%hZF7>6oBB=Z>BnZ`JYk14vnJuY-wyNL$Sxjw*Y`x>V@*guzL+C9u>Inv z38c;U^x*VtMvf410az|O(G-D)6X%l|3J6$whE8)0#tpUUuHJ#Vyf%{Hwjp-Cq-X+8 z3J)wQoFGes$%;w5Zztz1tTRnuk}VIIpNiVON{>vxg3|#C@0Bl+U%e4o+lBk+Qm9ii zpC*v-OaQhgt)%&Ldr!qUEEm-9dofeo8$E2Xw8Q4*?5(On3^mX24kIUhYOzp7vwF+; z%7wH8#o@_2vo|X<r|MWF&A~N2$z!(hfl#hde{GQE0U`ZGUzxGm@3k(^=QEnV;$C3N zM4Z;nEu7LrZ+RMkUCiBasV1dJ{uBdvlzVq(>Q8IT(S?fPa+MUkog^$Q=)&^Ljj=|u zv_FYJqx~+<(nO)k7~#N16y=w~v<i1`;Q^K+)0coBv*VdMbg1{B&C2_0HvKf_N8#Zi z`6fpmr`-(m5Br~6DQ$kk?uVf8!AU#A@ko8icdsOYhp?yJt{u5REkV@9(RFzfuo^`B z3VXm+KPADDw(c(V9kUPTZb+&n(-9o~+5_5<@4oyZa_haAe>r?K0z^K!pr`@c%}4gj z$f*zAb~JC!C19H<xuJx31x4HedZ_{Ptwv)NklUd~Yz67OGR6NY+7hh$L?7p7nspOF zcoof(0y#q(cni^OtWCCYFW9iB8Lt7Q7Yz+t#53y_zP${(vt-+~mDC@li>uFpKThP# zGp~Ye#X@3ehOe##{Xu*P+ODtVPUJLtV~|=S`=R#m4vM6THm{i{1I0d?G*6-g`a!tE z7L?c@oRRk&V`VZwz>$zx&HH1GC)wqBfSn5+QlLq*!RLF_&zhR~g7KYHgnntl&UYVe z>96P)WN(@X$@7wH`}r5*(;VKdKu<c&QRU$R<g~*TB(;QoE%Y2SB6p2G`?(owN^Z_l zy&3g2M8p^ADRNQj^7G+JvqW(T!DamQ<3|X=MI!inP90}%Ri<JBZ`%QQu$hMtnEEk= zjMH(DZ3PATAn$@|F}P&ByOlrXkbLR45fw7USXe!a*a2adaumwY2!KCWA6zKR@A1y4 z5DpCTtGFyG#r2Hg6e8Fl9+Q3$TBLz~!yf6h)rx`3v^Zi5%L9^Fqyfu=h1&mlmKDN2 z`W22@;X9n~R^4F-dBz9_9Ip!#!dw``RrWH8!UHKr2i;pja5|iT&%`^f*(De5UTSOQ z8xY)$$dB<kwc$b0nBl}(_JM|<7)A)v1o$;KbiX&U>jZNai-ig8(COWmeRK=_6+*5l zr6)V0y|2YV>&O(%h>%p^Vz(;plwv%fX6QgO{pPWle4JCM`!wU|+Yn42AA*fChVzKG zt{#GGM1m%ZmLqnmJ#vU{D#u%pB1$nv7`}4POUnD~A@M+qa9y91;}NsTQbP-T!?H3G zB53*2zp|;LT0{*2P5-orEkJ||2_4Q!;&~80iD5vAnq?Yz=$-b(*El{6M4Pbk#2`$s zQAcba((Wx^S=Wwu2&F;`e}^?jTlaE91kPlfU94*|6qsNH{$M*Go^Vq5>YW6bp-Hy{ zOg)OCp>sFY+moiRwV^XH?bnd>RgO*WJ~)8&X7Uv8tLQgx6Rj9#>+`Br2<rDt%08y} zz9FPV-wyA&J!<LqWg)syyIMi=W~K-$400?YNOL#`P1@;jsWs=O|2O2;tvElHSQ%M+ z=h#mk$7$rBPEDHg?-{&SfgQHR>8}&fY-aqsn9yT{9CQpNRbcvK5>Zcf%FSBWs2~LV zH=%h<zyb{WSvIqKu$ZWi25)8FJ5NT^%h7_vy4)dFR@q+(QPWm9cuYSN`r)%Ta&YaA z<btxKW~^IhqdLn{^_4~`H8!?=>RkxdKUOld8aiV`@jT-|S2l>cFoHJDYHU^Q9L_Ec znW^j^r}-*uks>nrC|9^8)r@$mLBkk-p$8r2h+Vy#q^qIL@i1veG({NMJwf~UiNise zCN1si3|#-%RGc2!;5AD;rGKGJeeR*{V_^LT77CzH;#kr29qwubS4vrjcXIwH%K161 zBWb=%fj@77A?oI7;fqa`g;f1T>Xd%J9im6ke7JL;qQSR{H|o@WoU#EctA7XjTwI=` zvwcO?baU)Eob%Sd$?OT5Ua(h^1cEGyqbGoSzwbKBFxG8ddSOv;j1?XA>j$o7xGetG z5dB(m+#PssdUb~v4J0Y<Ck2@f_BQ3O^YTde$Ij}&p>4l2cPvljmgj+{w6|wMTe9U0 z!Tnh5@`CIq0o~wiM!i61Z>W|THT@MFFwC5AqUkFdyqU{E47DTy{a0MuIY!C{In3@v zM3GxDzppKx<avbq;jkr20m7!jGT^EhM<@*XG|da!en=Tvm*0lEBx^D!)=9G}y9Q*~ z5pA(UIrfv^7$>mvmUqJ-%!AB=E5U%I?rruyfu-jZtN$xP^LOCo4@ASx%Fg;vh=%iD zK<2-r%8GxZKPVbH{oyY#{e`A0n>yROIvJZf6EZO}{vE&(u?N7$b}r6@Tz_C502Kn@ z7t_*E*xmz1`_CC10C1k2iCOm#sPh*`<L^!Xbm0#mr08UCtYYdys0~PpiUar<rXBzq z#^2Z%!han_{yIt#>i#G6`cHEaXC|1xqv8DggbZRHE)ps(e*o-1brQ}@giL?nb7jD{ z1DF?pp8l;|oQ0l|km(O;0MK1=RzQaNZ^Q=xsLcV$u>B>hI42;(^_Q69On}TEE&XZ2 z21v8~qdhx46CvB*GXNR?$t3yH0V!K1E`X-~I#bG)8Su4?fA0{Wjv3H9#=jUNfE+U* z_qQ(q&_e!`w*EB#hqnGAuP`zGPv{N*hwK$5roUwU?_vM{pXe)p7x#}2{)4~rcSZge zz5Gv@5&s+7$6p`wcUlkAKWydyMlt7ofqxhu{e<@gTial2f4Ct3>uU<=gkxL!-(9Q! zfKypn{^R2LV=@0l?EeXI=YK~C`AhEqN(K3cCjPSC|5Xz(|DNIWpP<}xU%6VmuWop~ z+K2P6q?!x<<o*SlNm_JD#9|3?Y*14DpytSgbKkzn`XW%3Ym;ifC608s&%!V>8o2TV zt~nXFlZaqJ!*SqJeb>k=D(=N{IXTU4d|Z9~fnc-_0<|6;9sYRHxz&7{-L<4=eZKW> z=f0xrmbPK~ABWU#pU0lhsn4gp&(qJ>mwS#a_$B+-ysb~?T=O=++qLVJ?o0pYL;eMU zkB3wHp1iH))-5>&9-W@|-1U$5x8FU`?oX-fh95dxrFmQNhZ(h7^%SzTfWVg(_RsGt zYx#)#mtg)GxejdyYg-?44?#NzQ$1A*KEAs{d3R~gQx%R^TNOP_Z|$EKjl1gRcGa)e zZQ7su5AoFN__ce^UH~HK$=-)ll=f+VC4hhTp~mY;=zIVC@F5jO<DL6#OVIVMvWB3* z#%K76@S4~1*|@RnVEpOK_T%<f5s%Hp+t%^W4$&Onp2&zZf_vk2TIvR!-X4p_O~`&Z z_s2e-FPGQUQ;WU8Th=*5zvWXd4jJaiBkyM=akw{J*mHj@B0-!|R4Nx{1uBp{Y&%m; z4Sz2G4K7^F_kzL@QBb9{&8wZYIQ26^1bwF!yCY!gFau0KhBc4vFva>EOtd#j)H8kE zwvz1vNJkhkWZm1_-plLv>9u2jOF?;oRvfrIe=K<pf}XV}zTv!UotoL0evyYwUu*#- zv^XBvFe+hQO?V&I-Mw-=#N8cCxOWMD?BIe}208h+lMsGr(X?oS0>m(n)meK^pQ&T8 zrv0f8C@nwrn`ScbfCCw@8i6^LX3P6@y^$LZ0LBm9gStBgYU6jqgZSySwl?6y&9NVC z`@1Y1G~cmqTOr)?a>M+ZZw@UOci?#RIBI9U@nidB@0f%@*H@#sEDJOREFd~O*ADl3 zb<2s<4kawlg1u2kyB-$P$wmF#<&eG#NSC+!Zok>*7-?<H<&NhCIW<&YCpt|3MT{0A zV25D;S4%52GTfbN@hPY#B`+Vx<4|@%dN2`w8&|dw?ZTVEq&0Y4I1{GuuU>WPmkuhU z$~y$ZM@I>Jfy~tRXN~M@wrt3NcC@Ko7ajHM)!BgK5|zyc#^vei;a}QQVQ1vDW=0lr z3e0xOy5HbQlu^f3lGoM{AvzGIBC(>u7-9!~@PYwW+6;x}IIsLd<o0HmmCk=Ng9;au z))_G2L_?>N(-N87-rVo57k^t11#o5g5w4O8CMEIJ9t9x;ZB&GJx9+A9tU<Rwh%x7t z-If?n{CMTfPZFjBE3ds1;Pxw@3{(M4x#jmLLSoBDSG-`DfkOlG&OKCn4rG6yhFp3) zOT-4z8MxdS_fK(jlq^qO>|h}-F!0eGk=QhKhz=p`TqJBg&1rM($kl>vDzZuRF>Rb& zb{8fH^;H~p-hz!6_Uq@g-L|VPKfPH#&AhpcHi6%8bFC8A;mI?DXe9;K;K@-`flVsE z9L8XlqZl|%IukxKTO%EYTzAILj+k~M$uuWMsi1jaeV^JiZSsMd-r9z(f~rV!QY8iH zR?yDGI@=uXFRObFE>eRFU_3oS51IE7ozFEBa9&f0E_@fD&$jj6u=)a<bOt2gi%mOY zp-H)}HCIN)GcYY<!%laSS(<2#zrBJ;Kyz0DK^?bUH5Yz&ud`hxA0rYtmc);s3@k=O z9Rzu^v+VFrl~!$hH;|N|Hi4pzA@%khW9b*73O7jKq81C>H4>(lyU`Cr76e%lVhb#n zL~+MLj8OR&LlsCW^$JHQcnwA7vqDYS8)Y6AN-5vZfHSThT!X}!LQAYsxcrMVos7X& z6x-815^OdpaZ-)>-C84qpfv^BJbjZwhJ_FgQ#$^E(2KGZY&J1lNI1HVa~duQVmVnb z40LrSC#@RJmJ(^Gg9N`M$UqU54$l0Pyk_U!HGoMavaf+ZLm=HDuOTP)Bby}|7lO&h zI4WA>Q`o&FPEh@A0OOcjd&<)HYnYYM7?!~XdSu`d_2hGLDOiI59s-~+O3<x}h$Ej1 zE<j1*D}KrwU<Xa{Tb*InnCx5w-%m;;q+Yqh>Q6TacoW058PmR-s^C;EGbXT`L;WFF z?cec0%&Nz^w}6L7e5EU)l>?9@19uR+r6m|^5O3M*qa0Yn`8=bd>qzH+EG%c`NBi({ zV{noSD)jDn4WT>t##>q<ebR<goWTh%VhR`b%?wV|5t7k~@xW`A9QE5XB(Sta+)ke{ zUHo#g&i!b1KerTWfo`vJ)`>7!h41e&w#2H0ru%ZhBNn_S`Yo!TCT<Aa=Y+o~M93|N zxXePdT8JH$>^9tIwqY`dXeabY1M8#H(0cHjR)qS}8-FXTg#QPbiOUy`5<aL!Ct}F7 z2l4^#?Rs@nJO#w~-yi8LMdB%o6NQ81_Gxt(nGO)mpq#*o;E<4nmaIQ2n`g0QQ$?ge zBjy`|0YPh2W<ax$5p@#Oe{|?1eA@M78ff~8xrJy|U7;~B4H=~GaiPFbYo&vHC(fU{ zn*`(3(l=jzwQ&h4+9D@aHfg#@oFN4He%AtoCExW+7&wQ!`IIyfJz@x<;+wFH5;_%~ zZguvrn>$&y50~-2corAKEJ;HVZz*7Qmwg0zqc0?Rh8p;B=A7IK1d_<(t4L_xYsX#p zYawT%14&)e*s|||l+R-fStF>S>T8p{SH!6cmV`s!#2@}2#?C2NlptEq+qP}nwr$(C z&bDpawr$(CZJ+Ivok}XHo12@;!%S7bcFpucukZi6FfM+Xt4%?P^%E&+AJbY!LxYI} zKFH8W`p*ViSWVLrqJZTfKywZg2bLI8AR8dIMUKrtejH?p@(Y?)bWLo=)jtaGXp;k` z@hGsNDH;^#m(7Rc<PnRk;MH6f*(0&(y5-iAYHwg@CLJ$jL3&^yjec{eQ!qZ~DD!$L zXW}yp%sKrFaa5}yXR6bX0O@)z178qaBTY=CVTe%mSXCl55E7pKuXb2+soS<AQE=3S zBds)YX48TMi~fpf&L@rEDF-a9{^O0)6Tq-X&BKolMqDAg!9%RYh6Dh-fmBPDTEXyV zyC5zkJ~L8^t4E>jyg1IG;kb&G{-lUP_vIRQ_Art=9_D~aQ2y035>-Vv42$)*`?+c~ zd};;@4{+2#=C(Q8PLcX%DNq7~oW~YJE^Xn=@dgIqy3780M{6_3sTTquAc^;-Sm6}< zkh7?;SwsHJ=^lSdL9y8CXbVWs!!&H58b9_$t;N{_We2)SMrd<rB`uw-;4D)wwJIr) zX<>mT2AG9}kRcm(l9}gU0Yw8;=t#flq@;fqiAXS}Lc|{h{fkcA;`e*eq<y6W0^M*u zX!^q_n@K>p_Qm|h@0+O;@*ohZlN?OFM{JK^P!%FHes5zLmzEsj2;-Yc4@3YsXemug zPN0GS{VBEh+Hr7e;UG*fGoh(t=@I3QK7ewYZ~^1FIkeA?g!;=-i(!LnL=uHVRyC|h zC1uDdKm^%ZOr@kl?kZDISun#@-pd?=l(`X|<j;8CUCw%{;^-ANwS_Ao7k3MLBGw0j z*>Sp36>>Cd=8;_j<x0k+uNSOn;fIQ#rP5LBIP$UO8g3)NEH{ceMOz^UwM=B8czKKR z_w)$b5K5DP6T3Au;#XuK8dxxhc*RsmN$6`p0G*(~raG)bpWw?_1#fsuh6LkOBpB!% zG9wRir11E4FWD0FLJW-2e!U%#rKNu-M$=9OE`wDqaOQd<C^1H*G8o8vk|hL^5}H|o zeNjD;#|jivHgp`IgF0QU47Fp{mh<dLEs=o%C=kh~)L|s~64u&l;(cAz>ywoLOQGbN zC?$%PoD@D@oxH)O;i)BQ4)zazrekAENT)D4{*fqBP>0%Z><i%aKGh46wGzb|Ab|8e zEo^52TnvZe(XUEbph`l^ViTEi{m?MOc&CW`wu94Flh88LnXeVBj&DjTAvc4SI7u|n zhvimx)0x-vv&uDbFFPJZ1F|R@T@X%Oof+JFn>Yr~(0LVR^Eiou*C_+o*F&;1z#Qnt z@B*L+2ki<|Qh&bz2e3)61KQabR19}a4^52hp`BZrg7U~}V~&cJTjq#($E<X%T3v6j z9|Uqmi$x;&!%W0jxSj}Q@M|B@leK!&Fs3hfh`a#=(jCs1Pj8M{u7s~Y4T6w4lR`D@ z+8uEut%=J{Lb;kqX1;={)kd6~*uWn~o~44`<J9R+p%2P7R>QPCnG}ytTrc<;g_p_b z^dJuYrRKC*)-l+@Z9;tpq9=G+(SX9c)$W!4(4tXOLJ3+iImsu3k47)Kl(vR{OnV;~ zc@K6-gWF&Rs!%+ej3r#-<*vgF064UQu)J#uwi_Zby^yhyPh1Ombst<Sl!CLLu3|Za z#RYU7$yyah2eHyXYv*z{0mEWPRlrh%F)SnY*{!P8Xste$DM*WNV^8~$RexrcsBEBK z%{<e>Nt6*`izFK8Lf0Z?J*UEL`k0C1ixov5k;wppWfq-XTt+dWlt>bN_*Jb^uP$KH z_OxO|HIWqiX5#Zq>9Z2^IbKB*=g^Q9$)KRuxS^d0sDnRzCP^V)8sv^s%^Mz}%qc)2 zHylA=>BsTm%DZn-6hO&aY3G`-TKS#_5H_I66cQBQ*X^1&5fsHWrNwT07K)cdqh@G? zQ=vP#)4hqXUR(%n>Cgz<RG`9!t3t6AY~+ZrrY!6ek*V>oKn^?xF}box-g8F*uBr+% zgBr0)#a4(>LE^Vq=2HNP3&mh@*mptLt}_pVLrPbe0^dlEeF>Sgi;Xo0^1FHzX7}4s zL(v_Q&Z18+%^|1;TMSA)4C;rA#AZ*H+4<95k+Y64U|_AYn6z+}qdtRGW9dP(raUru zjlt8hAF(Mb57Sl6SsI&3ISLjioW+t1D<38{Xc7_ATI)q!kYQfyM8^<VP#77*S495t zAs=-7`e&Q)Qfhq;Rq>cxSm+Wb!LmX|Kpqzf`3pTn>7E#Z2wy^b#>RQCX|n6K&cB?q z$l8S&;i#p$F|pB^bQ0eG*wd5L8EbN>(j)b<yyE;d+tg+sT7V)t5M0xzJoFUbKZkX; zvt)4*T99QBI0}ElY+rN}R((CFzUJ77UOM<NI;f@w;ctyEykJ%fMG+!5Wn$Ozorfl> zWD2-o+3_m?QR2AwPj499ZK!gG&Y~yIpMOpv7;rL9HO#7in!K-H%-m@SZo;Blfuv^v z4HAJ{5ygmvBlg`)C4DuF+7k%Phq6@zsU(6>GG`99Ef=I2z`u5ip_(g6ZZy5Hvdmg) zFK>x{>}>Y|W}}$6L0y}S_)W>FD$PtFWkT>6F^)5BkfCJbHeK>6z!wxLm3fjw6a-M# zi;6K&@Jy#*4*_an<mjR7BNwq#A6zE0Lt`%<5Os0ZUMH3+aM=#E_E3mcWeI`i`QALi zatfigR;DwD1U(6!Kr)|Tbf;1ez+?tT6JO<opqk-TjpQV1fGE8@gaylhM%09(9i@1h zLb0PX8=IOhLJUeeyO!A!G{JRT3$@5aE58t6qJb%8)D8CJbss}H+v0aY(uEAvqa=P1 zH#3pO4Q=_ae(x^nSd^YaqA`mU9mt2HOg#J9k`*e;nY<feZKEL=3=HTXxF*Dus`oDF zH7JLz2BT%jTMA^nvD4|09A>`}U=U+h2^^s!-XbC0mx!GEs~{&F8>bCmUx7Mn{s_%2 zN+RlT4M29X@^{NdbE|4X0u&LV6$ulxP(XdALHo<WO03|Z_c4#K@N#_`&(>#(vf$Ai zQ6-QnsU-aw^|3JoKd5;QAXzsX@<4~RP5h`(RL$S5p>Br+yO+1QpQLav2Pj!>rE!l2 z6IUK&<~>Y3T101AFF??wfIPLsbdnZDVz!(G*tu~JO|&Xhz5!HKR#-ug1sN*JAAC*Z zAlKomKgAU&qmF|pa8b6{jjmKY^WF~OF2{9kAy~+43UQ-#!Hh^U7@k!kK&$cUUhf~S zFV3Vuqo9?C#FmvHw;GvYt=N4Hg7Oj_r$ta?wP(EVmljDYZ{Z<=AYlg=@TFO^C^j#O zp`y-Q0A1@CPVuwi!<x-TfvuRekT8Y{RePl3W@RY0$GFBaP-#zJO*Hx3Zw<Sl5;A&L zW`MZj2Zsx2v7lSd%>Co1hZzV@B0OSf>4ciu&oHP*FbqVoT#?8n>;RTj!a%5`h-8X` zHGpV5A%tjItEwKRl5O=Te4)|D53I~dv%&&YYS3Nnwd5r7o%vkBxn#X&->wmVePI}U zapAjU@#`{#usDu7D4Ay+#!16upK{V5;r4-gIJ`3)Lqj(occP(whhedZrJnRQ;fgd+ zaI=Qun~WUvWr46Yy$Z6WQza8?9rbq~<CGZ)s%Cx%0WvK$@|eF=Sixrvpm9luYgwj1 zt0c3@*vA#pi|y7T?GYBZG+d|#z-6Q!8=7@mpzDj80<W~{l{%Q%J9;uSWasUiik=0m zW61Fj&QV~nFyx@Y3((}Lhb)eY;(RHXLZw-RDV|K;l#+zSVoOfC!xfC}pd16IuBeV{ zLo4{!Wbzm2$O{ECWR9ppW+&Y#Key>Hy!WZC6zb-8GEUwIQ5Uk6#HG?40fFj?NJ|7m zVu-FN3@JpxH7_-*2Q9B*fFToz$ypGgkP|n<W_^`uwJ?>Dy@qgQ)36x(qd!rc%R-sT zE!Pskg4Vn_%Qr~{f9KuCQVux4go$BG>odze=H>1V_9u%{2*OC3X545AOiS2B#HB^7 zmLzg<j4Ve0g6Yl{jakKDZFL=u^wI{{k>s4b5Nk|F073e5G#tgz-oF{_3$p_Y1P_g1 z$&SZkWP*QN7htvOP+(X9Fs6&7*$*{%(=nAX3!$Is%x+kj(-!GCvLhzNnWkM}+t`(} zL`iKJGw8MBC`@f>_pxdzh<diHiOY5tDxza5w+PbZ7O-zCRmI7S7@<;q#BdFrAniy! zRCpx@=-my2kBlas*u;os_vCAK&hX`KLpdxgbLMojC3>)_ZN!uSOs`FBYJ_YqW}ICH z<e^ZK+zJ&Ym<Z!5<Is&ZNHHjV?NWxV4Eu$3WM&?^7KX-Qoe!>uvy{TshndVX<x1a- zH2Cbs5f5@^RP&)x^lbYl*l>=S1jfw;Y-EciN?V?^rCKDdwjS2Ttl5Se2gC}HA6jQ@ z?dw?-iG`DrFv@gC*`7J+Dvah31NlVjvCR?)a3urrA8`Rx3RJbxS=rEG<#b&{j0Ov! z<>T-*Ny?GMrb|#Q>eD(dBW}%h1t|ef$yBi<Ye4jws{uXM@<-zPN98O^ra@qX%>&K9 zKSWrC;t6V6?r6e0`nff)4+vK(h0>w<mV<wR)-@t9apr5Z6dPni5~M#{fCNH8i&7f4 z3&RRqi|3W{o~anj@+5;44RDE-l3ZcJBjzj*AQhjcNg)(>G)6h=mHP$U<dZ7b?@X~# zhu#C4up{0>^em~$bi30RxIb2Ox>5ctbwgjAtRcFJp*ao#Zj&S;(DVc$TP@7+Gd$Me zV)w|(bP|sT7jB7x03$k^Dz+-}<=*!Zzt8K`-td+4<EM+yq`e=W?;w3g*xAp^*S~kv z1I6zb{jaS#XkGxMM+d_K1Atk76AZ4|_-LZcvIV9q9Q#x$7*wRk-QpG=8Csh^y@25& zm1Qo6ad|&0cX};Ttp7ri*M28#yl>-ZN(RwbxfS-XsnW8ZruTs{bx&}^A4vZsm=j#| z8SaXR2M(-B5gvgG<HZ2#?-(#_y3+{fkDHdRO#r%aJQM&*%!^lg6PB(yaw}xVRdfR1 zC?n2PEW3Fy>d#rPsk33+`XppI8D&tOBPCg+-?s+P4y|B9$9^xj=tMoZEwks2VCQ8@ znX9-uimAe0K^Bd5bu;F8iMB;l{m?_3{;=kiZP1<0?H4CD7<IKG4-iyAmW~Hmk}V@@ zO`fdlNDH=_q~aR=4d{KZ!i3RBd8cHX;%v*F>_GCaC4C44>q;pfWOJ-F1rCx9X3iQ{ zx2`Sc+AkrUER}PRh$Ip2U8D{=k)@75reRr2q<?hT4$4MNk2O#*%wnmZ9@<ZgR)br( zaI8HUbt+YU^s&+*JzaxPZdB(mcMq$EIjUC8&aq9^!eoLW<<yfJTwub0=;BLIT3+!# zNCH<1nL!3)i_N&~#)_^|XU8ZmP##!G`_tOX^d@zl#biYB<*bD{s6iy&lA3ZzcImD< zXt8ecHa-MJ;-0x5RkYoV{#pqZq~PebOtU;Ib~xq1wKB;;l`arwXYc3;oS|nyRQ{#G z#_5<FiE*lxXmDQ{n?+WB4L3vG{mZ)trT^K$Jf+}srTypmIRHTXE4r>2(ct*d9z1X! z0Zd7roCD0V;I`5%Tdiat$Y^zpgX0={T7?a>cIyL3!gYugPliT&j!SEf^C{&F84G!a z*YuP0S`Wt)0GMC;91$_6P`tP}BrzjmZ(aO=Qnc5dNIirDgH4=$Ufyj)7*vN`hs&Y* zbbdi0OjP%j7=}O&S`GraiNA+m@#yH8liG60y!@uzafvEUumTvx*rqc~w+=1N7x8bX z6Lz2;%v~@+mQ}A_L;1)luVd1>MBEn-3Im|Ga6sXl7_q{{tKpaP(PA%HU61n~x0E5! zwKKzH4>p9ueh7hUFNC_;kyF{@-wrL^`}gwA|F7=fcb58<=f|C|p}qH}{!9$|_goJ) z{UmmtoI3Z<Hnwl?+fYchHi|PUQ#Oi@&MEgZ=a5B52P8!Cn){=Yxp)i1L`O+D@RUIO ziaQfwLIErWjHh}O)cb(@<e8{nZK%h*kWcgzI+GNOYi1nIjIjFb^X$ZSKYsS^Cp-Sn z7_B|B90s>C4dN0T*ER9Xpxqjfb1E(!gN4!5_zy?XWaGn_lV0vr#t-9tK-_XgGT>f% zz%);W7<+92+Eqq^wp2XCp0p{82`yQ7)$m=r(9c5N*M^b|xjH2<AA@??@3?@ON{B(c zGZ6z4lZlNV#+huemC(|xyN42abCcty4sD3#++SQv_T;-(_D*6_US*WBNw76N!7JB{ z!zfW{zfb({iL)*8w)%5bs)0~h4+74irt@k#Pt55jOtpo`jKLAHl~9uS3B2w@{cwek zq_1K{hFYyn9~POEBKIkZIRXoJi|C9k^kcOCn)^rWuxp^NDz83`$hrMFOjR|;uC;dw zS)6f5q9soFjDt|6UqoUHbZj?xDz0BSjPS4;2X7*FM$Dw>0q0oSsW!kWSc4FJS~bvw zE>(@Su(3$*E$MUUPu|x-(7*0)TcR<H0L-*E)$1@cw}Ix=MH{tOUeqgoVj01Z6yEU0 z6ZG4Vg5b%{Kb!M74W1TGay5gX>)B9p1M6y!!Mt;B8bVpAb!)fs`Y$^np2*8{$Y3(( z!xtYUa^0kZTcMv}?DbVd+M$cPHdq>`q65&-&~j+=?R0WS4_t7Cm(BN3&=hHotz)nu zjWjARTx}=@$n8$G5Gxlw-`4UoPPuYgLc8r!?DsCkiZ3i51Q9!e6H69?fcrxSE99v` za+SOgFIE6gXdyHU<v}*Z0&~`>ZvbsVNy)7f-X-(}dX4QT$8eBsPFvo7?=7EIBX2Mn z^nk}UEr&a4(_E3-){GX3<X*5>#%ko}2pDHtv&X4osLqsZg9hZ~up$6Wl%{z4HJaMc z#nQL3XZ3$`{O!EW%EN-Z(|fm1*7(=E-iZpAq!YAtM)|iNWFm4bhEZH}hjg?U=i}XL z#*YW^gLg$#R3JY}M4DQnqdJw?V}oFFxy3rWSGBdkC`GGq4kbZC;jjk2pqKY}yo06d z`uhGHAq(@Ir&_9<oX?~FJumeVhY1aoE3`Cl<7@};2q2jG2Ss(g!JS!YLl9gpOrx)5 zhyq8db3x4f=2U|*=9u=|_~=o9_B?PsViCrsFLifktQmUg3c_6rM!nFWEiZq}T$b~9 zwK=3+k);5;OAkf-g!A7wVs)GbPo+dF3PcBFjyB+{J2aU7gL&ai2To3z?mZ-+)nZaE z0$DvFnY{V52+vPp?=4l5Q`KZSeqmJmD3zt*_*QHb)*sMZFYAC#R=y0d63r2G5VSx& zf|NSRh(kW+iSoq;@vFF&SNxrAUx6V8Y04!;LFGxAL-p<?KKiVXUABjN?Ukot=-MkV zD-aeT5gcrb9D}mUx73-chbs-TMM|KXfWQqF^o+dZGfr)blF2r)V@(Tbnhum6s(LtG z6C496!Iwc)q4>$i)Vw%$@h9x@RGJT;7Hgb_p&q7<GqxmYVlXC1&(KCC<0(jxq!0*L zehb7>C8D}TaavTAG-?J{uuMJ6qB1CZI!ut`nl(>pqf8+lgia-UsAvy2RL!-kpcc`Q z=7@+G%)%jZSKp1ru$qi(>L##cAhx_TfDgB)Xbkyav@A{jZee`cr7KYcJcQ>}!@4@% z<+rguP$fpm4_H{8(j$}eeie?1NSv;VXB8%|YP@xR)ws-d5Fn&R_(7=H#b@r0RTij5 z?zzOk^%Jqmki-wt6l=(PohG$Qov<n(B`<yf!J@%RJ+njx4O$)^zxf$&rgpXoL4owh zhjhI$1@ukME$6q%$BiX99==v$M|K1$Q<`s?WsoJkiVxE^)eeID@0^_MIweitOTUv} zby5dLZBo&@TIni(RPxonR6l>qR2s9L0IE{1@=V}E`R3lR8n;xd(Wa#UI4U_+HMK0@ z80~FYP$Vf^yWV4m0t}idDccTTLjMSSukSV3n#z#W5sPv*0tw!8K(2_(0eo3ZjZ1G; zu!W7j_pp1Li`icBH0hg(;}&jPxKWj+0Bszz5*bIE(trhiM8)X@>NvGf*BV!TCa~QR zGjtuGX}Brh@6>7*$KP(oEJ>APS|~$b`NRXuD3nP~JAb(vAkLi;X7gdfr_w&<tYqv< zD{XWqAo^z4e{DNb+2oFnB_i}i4^iD-VpCYIt)h-g*1otO3nnJ9h=MAGiLvd>tn26J z55N^gS8kll<CzLLYhM=AW%<N29X5DS21nV*Nd<i;CMd!m`ktQvpF3on%!~~G?*7U7 z@2@HR?8?(Bx!Do0i7f<}wOkYsm&tpx6eYM#fs68--LlEsp1*vQb^LvAqj6!XM%{XR z8NLrob5rDeUsL*gvi~mc&~72s<^SQ}-#-0O{)eh>*REFfjCrb0XI(d<|GZnWcDuzW zpI&CzY8?bPKOxDkElSX(|8)be_rz(=kBBN{<9cW_AGXS&kA#VZ^?cThH5~TZbGek> zOU<|DF59<wzA6pgAMwQ_+o1oz+OZFOYe=D*&Cp16n-hXjpnZR2F|NX0v@MRSG8}sP zO#kQa0$W=IA;zt#DgF9i!_T{C^Nev?1MuTn!xEs(F&30>`m(>VyPgb#^pQIsoV5`Q z{$e?-^4m^qU~znzVRhkg)a-w5s`)JZH(CPnx)+uiIbV3u4{FgJ!hIgj{Iq-A>SiZ! zFpkfy7%?mRgwshoM`v<K+*^8qM6&cj=kS-V|9)P2rAxa%g><|ZcE3B_olZTTMqS8r zZ!_u4eC-{;nYIU!wgvy2W#4?~`ZtwmUVotaLDU9OXEaEk$}{x5-t#c~d~&F*x+j2% z^HuE-b#kkRdz|EWy`a_>A^)@F*Y!LEA8Z?2hp;-e!CDr0xQNZ4+9NR#x%N$BT>^bQ z(vPT4ISHL^9WNysQ&g_xR}go~ij<R7+p%bfozOhW2%e?v=Wx?NxO$={BM=_sIAuWP z8;~v?M-0}A0W;_Oo^`~|uk_$yIid`y^$1bs$KjLRb2Vo*(hP2B&Upqw%~4F`UOg^B ztz&vm^y7!uA{mlf?jkM7Q9xchAArT-I7)x{0zdhekMW$RJK^d2{KMd#WV=QUS^i%j zn6MpNryU&UuVaD_ivt6f^D1uNi^4rD&pf)dX>4@b(|E7}vd=#z=N1!0aLzcK>no~a z=8MTXsj_+V8QVpyEtqj7$0yT)BtwaSYNkKfqr`UT=I*J11LpNivXm2l1B)C7uLbnG zAH(C_;z@-HuqO<s&Lh&fLWc{u#Zw44E+=Kg1}q8-eajMrSY1_%Dq$=n`Eu!v=@*Z4 zzTd(D&DI-@<oL3<f3NqgUdX;1Q7kgS=Wt+rW?YY!_)d~ojrCme&7m^~h$$s9vKkJh ztBV^!{whS%DXLUK(qWasodU#QK-`vwO+V3Z#ssI)Vp}gNGJ9hPHvxb*!iPrL^X==H zl9S202jm{mp1hnbZKo18zmZ_gG|IcN5h6J;O{@dFG?L9w7^Txi{K+dweZY!Pq>IyY zu0^O`xL?8Tc~_)qH~Gc@$4Pns*Qi>oXy4Mq1Y2f*0C#Ba!Js34AwCKZYr~UK9>38n z<B$^`U3qghxA2r*jr3lKWT?IJ{#sU&gImO~BwRcubMr85tjs=_Y8*Sl_W_ye#Zd&` z2diLR%pV$23vDp<?wh<&uz+E4Dv}2$LX!fw|A)ern37y*^4_L*5D;FI-I=)-IRNw= ztrJ2@nA|Vm?qGwFDu%w+-jZ(plEA=vBH09KNXMkuz-$uSiRv6RdDB+^x!8yi6yT>( z`H^5(D>YWocQ(TAixb*a6IaIrf{IjcWYVRHXlK1_dVuZVD>4x&Uo!4lV_b5Yr*||j zMQPW^&BI`lsF<y1&M5(Rv>^FjPuLR?f@Ac#=tMx$_bq{|lb#_&2bo7~ebw^!Z!Lcw z-~MjboP<9mtq|2EN`Vl%b{QAgy<Izhnm}RVJc2f|UAf~Fz#)i&VL}`)uW%lRCPOg% zkjOEG+fW#GDv9uK`@4It)j2f6Z5&qq>phtkl3YyHfTa9d*H4WynpnkJEfcF#2)8FU zn8Br4v9yGuADx_ELWvaBXW0lN@7@ogDEo7#ATAhC>3m$oX7|%#ss9Ap$TL%N2kH`z zt4*l2hY6<6>uKaS|1v1e*T{!_YZ!PkMHB0Ja@1LRt@B%?M{l_gP75Y=RZR;eT#5Kq zi*%RjowcGxMc>X&6LKYjMo42+l3P1s$JemiCNHhEr^zeT3J>rMVQcUKbed|Gg&F<1 z;{kh*7(R2&bPU1`QxxJRpC0;f@YJV%cZ@m7J<!W2iphz})==6VIAa~5D;fbaV`V#G z#TH&uPY9h)E#&JViJ6u8Yz!(UV(+)Bim5a=W`Hq&SRqfbTRUYYLLna^e%g_{!|i?I zfQ{(=>FyJyIwH}lIRpbicHl_^NThYxQB|f?@hc)|c1#=*g!Ezn3My?JGbSOy6J5tD zT>)@Mq_#aKCFHg>7&Y?G?W_SCYL=C`J2(tSSIq?1t&vo8-^`;3T*95q7-ja3{JxsI z@GWS8#@#du{{jS{cHMNE{;1^P&dGU!$#8K>5yrrQz&l7al--{Z=Yrn}M?7PbRg@R8 zm?|Tj+UPR-gfxks<2+e{z4abN8=5HLGg*c|r)ooA1vgwN&-QG5(m3)>mgSU?sYKIu za$|eyL)8)zxaipXd1IqX+!2IZE}bdu-?@}YMNZ++GL>E^n_(&;B{kB8vMeT(NVXME z6Gp7kWP9VAA)lE^`_5Z3>6Bn3<@eyK)L@e8s7>516oks~Q<Ti*p>|buC1Er>7}_q= zj@|WE$aOVgauO#88pb<mfBXZADaAn=pCD+CJZ7&OFK2rECVOOw1`&#p^x7lO6t=GJ zkUQyy)OxR=$NSCG{_*e-$9H&yNEt9t<s^|qN8}xmLJ}~$Vx=H0=cx@Pj)efjwPN&v z_rYVBo3UFP5RaKS3c>G2ZJSA(mC0LAy%B9QlooDl|9d_k-eD4XzBcObmITye2!+vR zy{EH9E;@{p*YObwNSTMJzlS#}g8)uI<I6dzu`SZc7ZlZxTOL32s(waPAi{IDhQMMT z&7=vqq^+S^vX5Vs95?*pkO!p_((lJNv_lKY1KKiHrSV_pJOD42%2cH&eeYA`e<%Ir zW6g&VkkWkL4E=DoOdJaUfGH$v<jo4ALk1C$=-p-UoB<9TVV$U`=F3GtKi_J?$o3_u z192Bv1MD$roGCbb(8+4jDso^Imv=Nl%pvyI5rWap*@{GfRl<-xjXYHuuw6~S?fq~F zC*LGLMGoJ@`4bZ7vgz5)wH3Pz4DcC`#Km|l@~tndWZ}ec&C2seAF}udewfNS(ApHh z3F||~jW71MD8_dI{a0F%$9+xfyMr5oTuzpx6r=!L3T2R(9Okc9CqZy}e<O2jEe}h@ z2Ojl9QyTbM-d)dUH9eP@k*%7mDzvsg)Qsw|M73DTJx7lJnnW&JkvT$h#to2FR{;d9 zxRzWk#MlZ6k}r5Uini+JL8sN))IJOjo<4llA7CJAS)Wx3I1JQFy@gkY$ZTK1bNHT~ z2P2+1HkCMng7j^JSfZL9+?BO>C%#BrF6?%(25tdSs0M8p!J|}+UFF!h4y(27Jpb>C zH#Qo5I{$Yw513(dW*eT55BSzNW|Zz-%-z=+^fCJ{4DldIJ%mg-0bngebHLUmd&w|$ zJXt1Q-+b;|S=SN**vT*mHNu$M*~@9+R3=oNC6mn-s(=X`@%aAoIH{$vV*;w7D+q=8 z^%`~L>SmMmw;A*_Frgx&>P+l^ijd~g1K$WZ#g5+{-kx`d=`wh|-{ZM2tS?i&wUl81 z7>_c}Z2wrE<_^%gw>flVJA1S7x}g5z<Ob}Vow(qCbm98;(wyc!-7Fc;+}r4q)GYiM zJ4&zmW7lsR7L_Vpu4@9aV5%s&JUb|Suieo8C4<SU)J$~e$mXP~<j+bA20v)L8euU} zXVheY#SSTjn^xu2q30Z`e*izflxLDLb%-c*F>np$ClG625<H{Kn^h?|)cn=?sV;mA zaw0ZGB;K#zXbl&u@>6cd>v<87%Ur^>Y_A9ra6eWU%vKmw<tkdCmMsMHAgka)FssMx z)1<eM7VwrLJ5$@5GRZ@93eu6*5|LYct=tOR;JcY&;H3ol5^%^dD5tUx5Q;a6$%T5! zS!ZPu2En>~rf4k_v$hT5=O4+zG7|;Ta|@gwr&{7$NWt`t|I4&Nv%~Q*>=Xhoxgl}y zJY`woE|*w*BAuKXA|j>S$UCH91QS+jZQ&dD#%2AM{jkLu2Bu8O;$?TSL4h;!<V6i( zlr6UnBTA-!Zw!Kfyu;_zXdOu+A|x$&FQ$i23&=IYA)G~gIma)aM4GqI=YHE)JZ)du ze91(vwrIjCSG4QFsU(7Wp(5l7c^WKm8tqf-sI-U$#d~97E{LSISv#&W8Oney>nQ;a z+PgBDY6~tb351<(0)e+Ob0vJ3A6Oc3cWnWEZJn)ghA6X2_9NkmS|;Fb_;|{)g_Fz| zLFrT&!1}C=*JKmP0&%`3R6wGa=eCdns^i&zIWXwE@GH0mA=tR#X-}kr9Lm_qt$6^~ z{?M&2@iuGS>z8|_(~L<SQyH0v+|^Cws$yek5DoW;adnNtobSG~1;0BI5(+6Gi-;YC zxv9i(w<w|tDvzSB>2HLLW7HNY@bfK2nZSjNV{Lw^RWE#TRG@asGe`lxNfYnH3BCiK z$$LsZ)z3hzf6?ffnqVcwsiw26)UY+3z1w`<ru<~FJHzT<RYVIZ0Xl`I<{xVV8zhZa zG}(r&6_LCRXtotuS!TSJXw^>e9`SYarup%01W?tG>0m@Ums*%;uARrNEMUjDDMD4( z(dx<fAecoUmhO}$e|rO~VF7Ld5K;%52+J0)Q5$#>>0o+6(g@p3LHyW8bbejB-rnSs zJwQgtP~`B0r>35GW23THgii({tjAaeO$GWBFHMt^Ki~yWVKw80vtbSEzW&-Wglzd? zBGCsa5Y(8%s+`c<j%3iNnr}bc<8h>saFaIBb$z?)92(0eUaf&?GqtOYsdX=I&sz=h zPb3FnG95BT`Fk(8oO8;)E=9AUcTuE~b@RuIZ9P+%==(Olleyxzi+}hPdaz*hF&5`U zjyoFp)Tx4NuP0(>>lCOZz}|2;%2aPX%%^K(i3qv=afpqt_JIJN_s1@w$gmL^@(MY9 zhA@II`j#T)t>qp;Y%PE3Gi)ulcz2*#A&+v#B55|0q&y9rJyFkHlyg*LDseHz3<0*M zaz%-`veYWjjK(<+9T8RRjon4N`<*D>P$wmW0N(tYQ`6M-a5<$wsoTTHthTn!H6g!# zGyF+am_(yFb2wzj#622GA4q>JP?ad03oVoUK<c8w4VoanDu1(n1CHi(fd=+1i+=KN zu5&VzELu5=y&LMsQ%6E_OD||Su~|1;I*fSPLb2n&oUxPiA#@Ks(flG8C?JLs*Z^fg z%voxGOi2uo(rvR5Krsyy8gps1sB^OY==$h@AuaXP7YBC63JPEa4@ZLfjy>i*&1msz z6mzd~rJT}qwNd~TBM|eD-8(`c`8!$6q7l^mrc71iKC{3D_Hn8ye^RuHH!W>i^ZfEb zDQhiMkWrl8GtJ^Yfg&l#Qd)OU82o?)hfUb{zp;Cs`56}hd!nO|1&)hS5=i$<&H~*S zz{$>97_5V9xzqTs(KB!7|C$TmEkyqcX0l@`1}=nCY3qtWx?okk^Nk2Mfz0;fI3jx( zk2s_}$J7MR-6T8Ccv<{2-F<7uA#wc+&TN})bTNXWG7Uq`n52~%1UVbDS@XJSafE4m zTFRw6u#*&-LAIv&5xH<FGLhcs;hHkTGymwCF7+tu8K%9arIp%pXk`YPp)Xx%*3dDX z|60d)%N~0vf~lA(p4CGY#_2~S6H~2waf6(r6}D3_Oa^o)q2_`|E{ys+23bwwP>gt~ z;)eW}?T*RJ{~~p3tE@*eW)(U?0ZFm~(JSnL0>m^t#ay?K0G{ejnL%&JV>OYSm<rVN z(Un&88>pi6ePUQ;0`?CAqdadK3LWyi{*;>4374vVUIlUTccu`g_6}L@*CTs9#C#D> z!io^iH_+5U?5oF_o?e%kNbG{%=lM(=UC--J$=V*iS3U=eG9%CmiJZ-hef+qJrNXO; z+TK`kF+pO9y)uOQJG3oOuq;6>#-`q~M3Yf(KUdob3eZ?M3Ha9GMF)VYUa*C^yK#<P z#Xg$=zMNusa_9pryINh?owv95$$^8<RY7YC!2}u@NyNbpMNQz%(Idm?VFVAa<t6tP zUCRT@9)?L3nbxM28h?qrcpv+RY^t|Jz`8AW(gqe3-)#Q&<K#X3zqhS9-<Ox)XJxK5 zukYuhN&j!3-;3tIse9*6xj*T3M|t{VdrN+w`s00vyhGn(R}T2EDoZbW|7Q5V!svhO zy4LzWy58_c<^R4sUyT0!S~2f~!arWSfc0bR-MYZe*B5&GzIy-ru;Ke^{gLC1%E#m} z=pOdQb8Y=a)tz^Nm7D6P`0s>$@8>^5B>xd9`G2kSDyIH{GIO#N{ol}@|4!`ucZ`RT znSu3xDcSxH0_;E0J^z!e`~Q-AW%^~&{+CoSE6aZX=Kf2%=l{fc{<mQ9e=YQS-2<wk zv(fqM7yA(BINSa=+ozK|kQ?L=gdODV?JZmg+VA^zfY#kV7+@fXdl)p_nmp(BQ&m+c zNkMhK=kxh?H;D7JT&2>iMyq9&dGFnKFVp#9ik|$WI6FDXNl9)}l9Td3Ec&{ApL|WP ze$Jnr=2|jdsgY@Wj!tBSIaSq-9=VYsJrkw9l&L?Mt!qz~^``7GC~YzA&vS0DwYxgp z{9ayXZ^q{E`F*~RPqy0b_<cW~pC$%)DE&4X>piJk@<+C5-vev$lYjz+787I^6GSE> zL^eZY&;%>aMawRyDpAMnQ5*-D^j7&qH%Nuo%I0a>7x^k@X&Xohny3pM2pSGO-M-yT z*K}<6ly*`^riQG8mbRX><NoIM{ah@S>HEH8>dH?_zNe*p#=S;PZqeWrzM0%eyuUzl zE=O=IL$<rhJz(Y_wUj2hqRvAqG{mqAo)Q+)hn%>Ss=%GF%c{7}t>|e6Zo1gEynJvi zr(Rj#<0xZ;r82hE`dujP8M)30ss80*vBTHH&h2aRG{4)Z`a>@MQ~Why>ieSRB306e zmzb#^7p|C^eoMqyL<87HR#aRQ+#1(u8*OZ1>HU!MIK@mmbX7x)9V2Y5$_pK0%e(=5 zG@-h7G+8^63^SYxH=3#!a&|8zb`!H*I#KwV5n21*`EycQ(9(MF@|pna9=yrZ45kZE zA4aFJ)CDp3S=x$jf5YSZ`mt1Z=vVlU{3MP(F0Wf++B%i>o*bEt&`6ccES}UvwG@>l zdG)#eVOEOr0yxF4w^whWDP4p0FlW;YO{4TWoyrhz$R1&~1bMm?bG#a_zZ!A89Ie0n zaxDLHtoU@U8gIN3bGQ(%zXEr<3Y*FVgU}MQ>@?HxT0b(}9%|}Nx(bu_D!m3@C*T0! zElm6kZNIPdPeHF=g_n)7-P6Jp`<+}mKc%0I=7#S+f`$P3l~bsCT)grVOdMQNaZIkQ zi_NSlK%J=zbD75`d5`cGv-~tos4n$=b|rmHBYlw;L7H4}zG7j!Xlb%;K%jL9QK8aG z6P&Jf-!hl!Uo6ozDb2FPsMhyGG|aEi2OKTK=}W;gm9KNvZw*#pQQ0Ea?Zz7-YvHGl zMr^W5FZ2L!4A6c=Rz2Mg5WI`a$@{^c9&~&8aem5wCL3Nqm9@^(KpDcsVQ+fl*<Xdg zeWh^`;$rS*+8$>%a!<+PPE}w;%F85?%G@B!S+dkB{S<=ROwt1-IwL(|n2HtqDm2Dv z^p-iah8c8bsYF(Z!)4g96<=Hj-du)*Vh=`U6G*2O=rv3wH;>0Tk*WwOTD>t`KQmap zF_GbwQLCKu)*P-x1|s|OaEJ`i@vX7!&$Lj}m($hQb?2GDRlt+*z<@j#dA%)8=654k zf632oi@(gUpRQfK72DDkF)C;R6&8ObiZ}>LE|lmeZOT?-H$;{rvc?uE2q{l`)>#5c zX_}IEDp$HMlXWKPwao4>f|;qkmd4waq`4O+IvUH}ovc3~&cVVl!E{+5e|7xRTEdeV z;ls`jLkEO90k^UE=0oKG1xas2+!kV0wj&P%e^V=vw9dYE$-+SAh*te1J%y@Ldt&dX zXkkVdK(Q47Dp7m48w1WTm8$y8`@n#~Rm=IW&;itSVShh|)77K0I=R^+Nlr7%^PGe> z>0(vE#`4cfPLffySGQ=}6xZ3HN?>D}Oh6PTJQyn9M79pA-IslNlj}$KX5kA|9!nD) zjbvUCSfCc2L|r24LFcWetJ;+2-SW%q>bsoUyDZDDP5}AItucC6dDm!~HeGECy!4>8 zDlRf8tUB`4SCC>hEie>{^<k*<kEMyOM7?@Kq+gp;gR#<MTC>FbW3AegC5SKUPxdKJ zw7j*bQpM*2H6f>~kV>M)Zlz7Z_kpt_6F&pY%eTjG_B+1p?0LVt$Jg)AfrXCh$Jyn> zDNNIx4b+{D6kd-NpBr^D^VHVd9`Bg5CjNh4Q~k%G!X_>Gj>Ywk!MYlzl3OVAxl1gN zT9f7oa)ft+Rma3OiPgd7N>A0O%#ub5_r*znjugbHG(<Z@R8iRCn)QAS2y&&UuO1?6 z862fm8KBeV;gMEhQRX4{S02&lp^;c1P#GXl*<%u$WEET_Rs~EjK2bMA)jdd9M2=lg zmg87-GOn`L+nG85cL<xz=80RiHt2F?SCYO8aU<GU@8=2*iH);>*NcYt$^=IX!sYo> z)et4?E~zbZED*JDbNx!%zsW54f;$O**34S<`#2rB|An5+T(t!tb^v8cP;r7(G*gFU zW0!Pu$0V+CqLLJ4(r1|g#FH7LT3@H2rYvnbnO?32?F8Zb-9Nh;ZTJT&XQq2piz;ZU z{<T>fb`!fv|Mg_ccSPyP=I%+XE0)bo)lcd+CM**JXsM<y>ujsl*0OPw{~VV(dkGot zM&s?W&?8l6a;&-wp6*3w`IK9Kj1I<)C?*Dd!vURho-F4zT)976g*{n-=C;QmG(qwi zV3J$q(_G-x9pV+=E1M^&pC&s@RCkir(6iTDdOJC2nP>@f-s09Rnqq)Te>;Y_09TUW zL<mn`lHh{65*Hc=n8l}pVGj(2+;1sI=?RuKjs~6#a`q%R2bcjrsk`mwZ1Tolsgj@U zlkV_23n<T?xp?{rM#r5f(WN*>0N_ap1aC<sXP7I#1)p|42XVXrbG{0>KM#Y#5FcMB z(*Q*+OMZ!-wSAVe=_F^XB5afamRhn7?ILe|$}F=Xz7n{fIN>cn=@}R;_gaa_Dw&_? z2og<Wih_`^w!+u5>|r{=*CeUCkt8({>93IMuW$?%zXSIr&i`%>=1nLj?udpi!3op? z9LPVOtiB#BIGrxI9IiSTE<1oo(O=5Y-pNo$9w<4OtvZ>hLnko8p)$uVyUH*|)VaXg z!%5!3RcBM#W&<9$ILzsp5uX!3wLe?cXMUM6RevHvX(2#rFi?C>4BQcBvjT4|_gooZ z5Ge`=83I1m4$+OI>`>d_1wT$}o>_L2>+yU!4Nmn7db}Q?Hnf!9zqu9-Q%1-2ZMX1~ z8az!-KcQ5NQYJd(4^PiQYa&UmCrok5k9RIneK1;sJZ6PJW{N;!i$Y_JN@)EJ;4=zt z69U8RDlW5uY@NewKsWxdO<Dx|BzHQ$K})=DpRN%797j`ckxgxZ2?s@)h{#@iv_^24 zM0~hfqQAl;a_+pkob+83n~IZMO;w%H*4$E<9M##J7F*rND>wpqaC>EDa%N|9XlZk2 zXmkh|uV9e+lnz3f937aO+}2v0mDvl_nA_6l6Z&J;=tD2DRPZvk0qqBLcfxi;l{hAd zTK()6=!^PqE}V?*D2j^}7kL9^7jl%Qk~CKYCOO5gzZgi1P%@!mhGbo|l&h4+Xcnlt zC>pGa%Z&P88WfgaY`d<{=Z}%=&+f<sZECpQ2fmToKU%w|9{&SPI98$kc)`$k#ngDs z+<4iHc-@S6QNS1dXKD}}tFpJ}2$E{cWF8KeAkk^#lAFY1+{6^!#TFiCmmcUhn8EZF zpyO;Q)NCn^Wnc>vU6kq~=gYb{x}BIgyW~X&@{^U&8wAElr24AwetU1V!C18gO^r3x zmBEeqg_*&bsoAZu5&nK+78p#-8+>GZj5LhwbfnB|wA4hz1SnQyD|aihFLXT^ISIIM z>BpGZzp>%35HSw0FpkeJO)t+(&VuJ#oR(Val$cvO%g)^f=QTF?=Ez!z`dk`oeJtG$ z_BR7f+T8SGsdu4g&AJSb#7J#qXbl{7nB|9RCAjm2u{P}nDY8ZTvV?b(c9`&d_$>H1 zzK&Nao*OAQU2*c`@1JOnoYp{WDR;p$vkn!=3(|^?Ra50$TV10hW$J2cu76>pUtadw zm*<m|74Gm8E$z_B>TU-qOeBUYA#{WMbwdQk-?9s_>wLqDZ3`2fGpUG#hj_L3>U3sm z;&n%)M|p@^%dv?j{oA4NK+Q2^Av9AUIhN&f*n{<ksxOHQ!{-}CH99-E-`?88f<Ht? zfQAw*GafkKDMLs#Pg34j(ju*{er>d=u(T_+w5qhWDz&#NwKl1;w5c<-Cb2U2buE%~ z^-|PyQj_&jvQnVGi8u&H$f;(DSr$nNFg)Wss0aI3^+txZl;+en*QK{u0W-Fh*12|= z8(14&5yta&-7yk3h^!Dq2+Sm@%xKGTp)%_(wu)az&X%g)DLYI)OkITBwHH{ibGBC8 zOfNxl`z#R?Ku=TV8mkbPB&sev?QC{N_I$nmn!4V33pf=uC=Y^~np5e~0+DI10~O>i zaEksxY@#q-gR!mC(Y@30*%LCclD6AHsEgzVOhW@qJGIpFxxX}NoF(#v;PncQ2`jS} z$p0gPLV%T%k));r^_<Giwx)M$adUOm>t?$@8dHUQv{qIoW>#vyo!#O1bQ-g<f!)Ne zC~r3#y_=1hn~Rm1j?wK{^!MW>HXz_MSMSY6(^y$h*4E5YRLPgnhK+*}6%#WG0`AZK zLz~kRL*pL>=7!F4b9!@3BV?sqHQq(GZoW<z{P8@U9{hwYx^i@Psqti?ZLLu@@cOl9 zOBJUY6IFPkB-gZhDgSaDqu`H#lk)D|=*3%P-=Ey+Eq_Zwqj<GtfqmQX%<z`_9_5`b zjg=+^{&uEzhdv*hDXQW~mFdW6`Mh9d*jC{CBuNb@tdJ@KrWfz1=kG{YA1G7X!wZva z`G}2qiVg{^E-Y?BoOr%m!-Rbeqo$`{b7JXBtSc@qZtyy|*ciZFW9O!$C+8&Sr_Z?c zOPW(imVT9Gzq8YPdI9=#Y&^TGE8w33@DsIF5_A-C^W%Pq!02#CNSKzFM^<OI1(vqd zg|<{iQ7uq3)3q4gyuPlN^Y@}N``THXjgZvEN#a>3tS2jwa?fS|xd1A&R57Zf(OTr{ z8(oa_9p{TlC8Z4mPs=$;zO%cr?r2sGEG@p9krY{;89whXj$ZHIJRguX;_s!^=eU^J zPCH#=yx>fnG`Z4H*+aJ)Lq$AfQaUDS8Wvy`QAIps8kf>6q2^rOI#c&3)mI`P)7T>Q zGAjcG0T~+^F?CThkUxkiSVS)$D`d8x*dIGMUGuS6^)W@)+p2%Q7nJipwXt!+>Xmor zK!1{z6+utQnP{i@*$@$t48xwm=Zeg2sm%>a3}QuwRa$kl_5ALycXz9Pt-w-RV<WLb zz3EMa2<@B)cvNvW%5QfZ2Drk|QdS;PU1gkO?ddEsVyCYlB`v+{pPoUA`xF(|o`^~l z93G;_mQG(j@xjgPRR7WmJg@y$U&LMxc6IfZEG2<y@rs3o&X8G|_+?y++!b~fKe09T zYz_W|gk@ZWNxwQ}u9;D-Mq&k>e4?ufEo=!datkYj8LFf)X<_7Jqo;Ow`#3dQ>jSx; zgKQstPkTbKCB*rxettCm>ixt4npW4Bfw5qx{B+##JaK<SqLcjUTe{HOeLQ|k$1H%j z^v2TwQUZJsKa-zUFB#UT$TLksWj52|Zezsqo=9=nIy;n-Zt^(abVmUXyCoH=6IT=O zotTXn<kTNJQl3(PXjx%t{#r13{T^Fvy)G9})~9Yv-jp4_%3Miee=R`Bquy#CypZIe zYE_l1miMcsEiO9B=14r0M!P*p!t`TXP7;^!4U}*UlW<U&x0gVHryAlb8=_0yqs&HY z@LJi~>Dp;|TJM<Wwewh9?fZuqP<<VQ=d^-}h$&!T1kj)}zzul$nP7`GPJ-ph9p$-A zIl(=hi9vFfE+6lcft{YTIKA#0EM3LMIDS$iIVvN-^(v2*D$iBPjZ?*Ok6h)KmA}*x zv^`)4^$F&7V<{8J**|Eej;2Am`fY9hz*hhK$?dVj)dIcigy+dNwIU@6GYkK%P3i?< zVydH*Mc$V`hK3921iuZiMB$Mz*`Yv#ixa9+y28{&g2gSWQ!x`m)sa2X7Cdz6T<vM+ zbh^^P&N=uH4p!lx<K*XP=H#dUp@sUnvi2P*m;3%6m)}x;s@B*dGYFTVq-9>A;@oc_ zT14BfG`7dEwlX}(T;i*(_<ij?QIWQYiYd#Dv%`aCg<_6W<&e{9C_2=d%L+@msyj}< z1#0A$`Sf>WWoBI-!b=-P>g$0zm>7KD&+B(s5ET5ecCcE~lm`UH@D*6AL*={T1V_i1 z8vUL}V0g938kC8`10VIxXn7=qi-Y6q6Lh$=%rteijU8r2&N+V1!8xMzSe>v4Ib<t% zb+4pDhqQN+vQCn{mY3txd`wo;lJt7d3<n?izbgm-2WI1T@TedkkR^=qzU@8zjcq73 zelluy0=fxWlEKxjx#0!Pk#(IZzWQ<x?~~i4nXoy09xr6&c}c<}@65UVZn6AG^O{7j z7NWw(>_m7B9@8|LaI2}V)0uB6vQu{X@Q<ssi;a}9IJdruMemR2i_7aApOCr>?Pi#o z0_6wPHi<Pz3o|t_Cdv-+V_iJ`j=H;)nHqGNx<h9%EW-R7qtn29n5bx(DG<mI1k^)i z02@ya$D^OumkG+aeiSrBVIogUBR(I<C5NnV>Dvk0TgjS(citLcW92C4r6}j4f6+1T zT*coXH63+%-mM?NMMW{XJ0WS8iO`Wl%1rSI4Y9wltX*!VSt?bjoFPJ09gjxeKNOHX z?uOsI=NsHt7|)eI-cz)VN>l_jN(&&RFpbC{ZzV^K*XTUA#r13fQpK+;5bTEy>}~bx z3ZXL-B)Nb{?MQ?*&dLf?U5|Dza_VTvxh>j95nXQrwYvgE0bS{)yW6MeNGVF1zC{-g z2YU;ikkQr3(f8@+b@(e=Uj(fvp6|`ufqU>nuyw(8ycw?VId-`+U}}v_;J)C;ZSyP^ z2diG~{=JCZoC^$`F%bU1R>7MIqBkY1tv4noG9koTLTYUM<M4*}*D|&*Np|QArH`<P zMrfi&YPSVw{A?MjDE@DJt2AIzl7^csdP0Ni*|E^gUU`1wOnI{jKGYjpa~S(8J!jb= zwP8xht*$ijmFEIUbFY1AtGU)1w|YNCqReiP@XW?I<N+GaZ{`Q~xXbDOGCGr$9(-`1 z;Ns-ydvi->aB-SOLSZaAUMN6h;4nZjOjx?hU-g^!a@YBmd$}VSY0J2KF~Eu1Lgnmk z%$fgupzeWmKY{ant?Ax9U8TeSWHMz2$w2`tOX?bHDO!qHm5RGJ^D^}>Z>SC}|3)&F zzMsvQ60a-x+!542&X%8fXe?&&&dLHR6P?LJ#Bp{gA>C!sx=uzX4kEsJCH4n5;9cYl z-@55}8a}`6H2(&cl%1zdy>}I;U8Yx1tli%|;AP1{`pr61nzt1MO`x#F6DK<)F}XH8 zJ>Nt|0=??2EP<WK{u=<|Kpnpq7qHU)4f*T$wxFiuRtiX25HiV;3K}ZfB_NVo0<(KV za(eR{hlZwZVlnyi|CIT`nIl3AK6KTdp`n_#%ej>`(V01ZF$pe#p_Xo5TIN;;_8w_f z*O~_xhh}d@<aG!d1fLZ)V^Q=3sh4#sg-1c-WR$me&5A3(9++`i&o%w=-is5S{`$Z# zZ0}%hh|zd;rRDmXp>GkDB2vz&M9s5?;-cdP<B;i%hbfhV+TMAB`XQ&JU8M{io&0>k z3}ls6w)FPixH*GL2_7Q|-M1o3+w04>*0YLg*~N^&=aUPYsyV0l=k$c+U5PCntnVF% zmT1AB!<xD|GO_vFay#hcKXhAg_<2>A5^@TH6H>tE*m(Nr+t?d8BExbzid)9(dv6A$ zH=oxFI>o2YEbk`oQX%bF41&*R7!TfF%`GRnX4E^W#mqDN1gXDx*?IP+0lh4)ad74G zjZJW;^eVoRHn|YAo|ALu)$##LlU+A#99Sl98g)k6S>D*u-Wv&iCbzP>V_*P%83=PR zXzn;c<^ClnXs~i?V{&oM$uo|c2Q|T%LEgn8q#~kdFrsiE<I+&i$P@_v<RS8LCG!vF zYJRieqm9te<YawkcVP_#wnf2-DITGbHeSBERyKO}UcM=f88t(BjU#rUWf0PyKCj26 z?l0$BA>~jc<y69?76_5PQ9yZi<Aj-SaYTCK30HsF^3$iEdnPvb<JQc&F(JbUQPULt zpjI{&A66C5%8v2w8*8=^m&DDY&&VJY&72&N$iTRy!b^2M*M<&z=HIHiz&u2_{i9>L z=KdsXN(7wh@;0%4Sv^t3SK~^rRd$RGPv1P~@c)-CVC`@1>cn_ccTaI`LrOt$NKz_b z4^ge2wXKewms?zYOkq!WZik|M0;P;I0l$HuLA10}sib`Ykbh3uk3%QeD!eu<zhBBc z8b-1cy8faUr*7@m^etk5FYlVWU>?h+8KM{10wP8&>zq?}ZFu3XPx@ss<H*xe&WdKv zPQHF1fDo|`jNQP@A2^PR9(gTkdU;{v{&ri>fSR5M%H#{1YPw~H7Y)XhT~DhS?jD|= zTAZEya_M&Ln=tk_8hEVxd~n{*@YH1cl`EwUjj2T?Vae(HCP&j2X%$lLm)7DOU3JkS ziu8gFA-{pBS&F1X@devFsMLx`Uc(57*haIUN)@Y^le@+K1<SqN7hvG)(Q#|zwN-78 zTxN|RP2WZVy+~?l$MBrC;rTm}MFX0MEDAYi6%_Ekz(7GwEkt|=4Lp|g2sRs!di=`& z%zb03XV;cD@81fJ$${|pEW0YBtfPs4aa74ra`k9M$M}`8nW=@(<l~=j@D1DF%w9WS zf#$}sbr8%%wf6pjvc~4L;!^12?r4*vZ)>k%?P=tb?G#;O;+My#7j#y{oJ`nU%raBL zHXoQ5HczKk^bs|SafodaFp5Nk6rV8mmnyH{{@6UUidp6lB5xj4&a57&;nOJVkWVG; z6qeI^bMtXZ)vzJ5fLzX1)5_5`ATS~|v#g;Bm7+Nmv3?s`;tw$A=kO^1%I%G{yIZN4 z#14xhi-IdMy(6h=G_`7^x@%&1=F6qq`7iYt4r=@uuYdg<H9xuQ>eb5T7EpT#UOd90 zY`p!9>>N}r5HJRr1{A8fq_AuFo)I*rxnK{>i&|xgS!WBHrhwW@+9aBV*05>@=2e_b zw*L#|YObL!KP@&4uC$MA1m$!ysRzhAmumSo(#jyryffDxJS%RWagD7bmUR=;b#(R* z2usN*s;vj$r%;v#-+Y*3GKQs7%%d`g6Z3QM6>E1kS8i`2L(-{vHHZWZH4y31r6Bk> zavH|EMrLPM7N!>GC-$e%&(GnSgmA>5fNxg;`<oquS5e-+5QRNj?R^aG9OO-rijFCo zZfR1MF|-QqXZa2J^diNqbA&B3M6EJ;^kZn1eN+%xG7f3%sy-Xr_fBa0E0mQxyO`u; z%g|bR=PW92HBZ+IZUY<7sE8PtTyGy<PprJoq7lHQW^e807MzfhS9J+vX~uthGVmZ{ zW7viR01QwEhNsYVdU0WTX>n$G`QF}xp26!N_(V+djPkCYscji`<C(SNtygED!R*T7 z^b)|o03X@s{|^jlVS`|D8oN<b6q51_gW{7B!Qoc!UfLGcl7`+gHVN|f3H*A&#A24@ zq86f-nZg$7f@W#L=4lLS0qi=#%C1?A8bP`a@h80f%geLZ?;BA!tXqP|wvi1(pF&=} zD4l>7Ny{{n3pRmijT37RLF6UPV;Gej%$!|4!=tjxD?0`Uf2)ulXXe1<x12Ab>+;Q& z<+;_Bn=32pcW*%rj>=&cHaNqhEomMZRydGVKT+B~IXJnvxUo9Bx&j}>)Cn~F$H^v* ze;v9WNYsD3hcL9&cXSn0Uy94l3y4j04hS`KL1>y;i|TrbnnsD6MldLQo<6U~DCY(` zFJPJs^}JC$xts^LewesbBB`u<R&o0Yb^qEjIK7Rz<O;4Py=ItGJ4D6vlBQ1sm6WrZ zL&C!Cy{_>sQ@>&|87CbZM?_FaY*ub-U*E6S{paS!jyJ6rrv;jyotXbj`)6FykOSd! ztIKn1D+}vui<r8xKEJ-cw6(eOeDCJ+ijJut0gD3V1sm_Q%Q;Qcc}-JoSLfhU&}?pP z6}B+5v;<q5T3np?vMlBJNz~6*8NQ8(QI7+oqnFw)qvl~|=J-U#*&+Q+oLp3lEO|6M z`SnA&wfu=CYzWTlo!5^B;Q5Rb1Wc2dHG)W_5xj<x44T10TK+$M{CEQ1zp89Kc!~mk zaHZ+$Dm<z=XGmD(fXK6{_;9IvgR2=^eTYnMrc!iQG;shMADNa3aU2A6`+?_I(_nt% zJoYj4rzZAkUtR`D2hL|#SLfGO0bNW5$T4+!duwHT8!9~CxO;c+)zixQP6aJD08CUr zC^)w_uW7pM@^t_B;_}u8hJ28H*d~Vjso!FFaM^pz$9u0|uWD(_DzA)6&%#`*GO~A6 z)Hml;ab;EarIB|#Enq}_!H(ZF>AX=Kk6|pYVJx-07l{mlMLUF08sQp{d&1toq<r}J zQ$s)MFVuj@caCj2Mqc7ZyBcxRBw`8cq|%<{drwO*&v0l3Na$GGdwT~br52;(R;-Dd zqxJmID=7Kh_b{uV>MQeWYoO{2Xh{dKSGKlL6*#}Oy?SeV?e=Y`>$h)xu6J%fdbYRs z@@YtP4h^3Up`ekDS59i}cwX~VWB=UD%IeD225bsVK1TMjkbkiA9_KpO_?K5Y2d`E% zwWJr9VP+;+d3d8%7wVeO%Q@01xRYMAJHxBPqT&OzbL&NNLDh{Qx#&bF<w7Fwbxs!X z;Ni{*fq#Cv{qWiTL^23ng^i=-UCK3(_2d`qRqbK{`0jD&2(?^l4i>Ji-VrewCFO9U zze&X(md)|wspte++dq5twUy7pzPW)y9WCZiQH0+?)r~uM4%W@Pcj0mCF1p^k`|{n( z*_Bl#Ed(K}GP{~rNPb^o>&^0xnGx9Nt<B{x<j?JEK6+$9Um!S${Sm4DU!Tl>zE;)R z+gnsy4`$CVCc)7^*v!>k&)Ql@)r>^aiCoI@2LU58Arl_`XinV-4xMlgoiJ97pfh5& zrzKtfA!g?rQFKD!pPrul)IPL=x#|icZyj1CY?`7Q*n(Q?C*#!6H$AcbD7SG;&>&dV z%pPofbY>2i*c&%*{*%cN6z%iqKy`5u?PSoje-U;J@T<2_GQLmv*1dc9>&Cr%_jh-m zy?GH5oeP3bCT8Ih-<aQev#4#RV`OQ51Gc$^iUk0BjL%1tKY`ch9NC@W`QQzdu?HzY z&pM8>_4YG%bWt<5Ad|Eu5VJllVgXS$lad#wZaAA(2&-l=Jklv4zZ0?khln+kwA1dB z=O-BcnPv6%PFcq+<_c7H!<c|!q_W2)HIEuH2|I_7qFXyJn}=5Qyz)TgZ9Tn$6H^Oo z>ITsh41V9N-SGqRKJ`W6!E(%@<$a%aw6K4R`u2kd$E+aun|JPL8~YG&sEM0|CsvPw z;8%6ejxDaQ-`-x@*noeb!mx52?2iz9^ke7XAo#uHg5uzWWJFMymAkj0jh(ob)pw#c zKM0weKCcH-&#oQHq8Y@j9>Am?0FOj+p5F_apMiijwf2O=KeMzCEm!w0HV&?U$m@IO zi<qbB1frZgy}ZlSnaz=<`>9n!Y??kA=60UpQK^L`=v)mBi;E4L@voO@f4=9;;sVI~ z=U8=Zb$<O<GqX=SHq;+(KYVoi(Gm3)G<)#i;gcuN-n?{hiy`7rrIK=RPi`scoUiJh zzdpOZe&^Qe_9j~J*D!*Qj%^ok1-3_LI8k#iCoZ*j<W^S0IEajjbMy@~bwX&GnlUIl ze=lHqnpc;QSBFW(msvfKQPrP8)t5m9NvZ1pUvl1OdGz>ngHJg8Gey1#{ikHwM>e7g z`vi^S)x7J~z3Zr?T%1CSA3T5CJHDZbNEg$#wevzorDs>SwvWtA|JKaR*Oy}F_T?R& zf&47*pPdZav7^oTSE`PI`Oc%qQ19*Rd{gfqcz);cV^n?g7}fsq<F_B*w{#Aw>Uoh0 z8VYFpg%|c$bS<=ATbx+lSiQ9c-+LgRiL&<xX677PY;{xxsmBDfVae&JISHs4Xm;`j z7N_MsPw^WOuqsktv}aKBr&mGJDS6W>deX|fpH>U{Pt~CRqZSfX(0M}RpHnV%&sOy; zpp4D%y0Sy6lzpLg0LsaqlW`nfytDc6d30f~fR>xFgOgWybY5l6!06~#q3gG6H|CBq zHlKmNxx6&Ly1IBk-Z8X8OnOk;_fbcKy?cM}bA5=ZPo8|Pd%KUG?7zE*uDkbkcJDsk zfzR%}eDU$qj|F8-3_|)O!lv#?&Bd4J$~)(-Os=5<<SmTgqf$CEOG}s>K9>ETy?Lb6 zLonOi(_46{E+MxdFfPg2Kg7)0P1nMLPs`~)ltWGnnv#oKpHuOnQShXecc+$fqn2?Y zm-qM&gZTez5YJ_u{OsAuLaIL@UGp@zaiZzkYTNL7TFtPSd9pgPLBqF^@}i@%b<~4r zZw6<!HC)pr^=uq`{6M?vIy%SZ=DxmCdEBLW<6oy}Q1ZUMzO=dd1^8dh%t6}k?K}nz z-{<_%(<e{?a(I0F<jKy{C%ey{?L2$BgL)nF_+-BghJ5(=U9{}K{_u8bV^dny3BpM= z*R;a++49c$wxOlPO&DLczU1&%Vc#&=k7Hwg9PetZaa6i~sH&wEg4xKlOptnO52T^3 zgOY*iY4yPWs}cFVS}+Zob}Cs{N*NbQDF<>X$1{epWLD|K<|)M|_rv+$FD--fC6`gr zYy0S?v0sU#eSxNLldN4Hsf0~><(2zSUnf@#it2kC*g1m8rxlm>4G(`6y8b4Ig9%zO zDH_xj)l~>24=VgV@VC*xf7W)iZa;*I)^ng5ML4EDe+CtpKUgvRqu_tC2fg6I9)|oq zl;?T$c=yG#-RDnDZ9@oow7ArfA^ClPeSQDJB--a-)E?#SmzOZ9`{Vw!FGB!Y>Zh8z zdqC<Fa|`@q6H%_-#ZAY|oL}4F|Iv*851l9qd3P#lCkmil(w+ic>9j+55Jh~5LaUg% zUw;1dZ^qcmmoH{#rkd)jo9e3J$<rr$|H_?t^Dd|U1_0kYv{uqKEoPRW;#sTh-^{Ay zEu`(c_Tbt0@_j>OzLcJgy|)jDe0gItDnB`Qcs>}GOF84%=4GLQU-~?S0PW#1S$B+| z*_U@T?O#01lh3LSFay$0(2&F9lNT>=)xGD>cb`4q?*b^<N12~Jlr?(11B1oSKYu8y zXrbZLJ}Y2o98glyF%Rx;Xm%ZC@6l>Mhn}c|TDm?rhieH5F53gx_Y4hHHn(P$R)nWy zxP?Smdm{C1?UfBo2y{dK(>R$(E0k2)l|%+XD(g-z=Se2-MWY&^5Y%9l)MuAI0O8fY z2kQ6k-nOwcKhMieO-)5cMoLaWMnOSNPEJNiNx{X*uBonCUsLht@737+7RK0IzOfmW z({;f*3tWw=do`h`rF&HQ?(6qeoi}C70(7nH+(ROg3yNF&`hG`#a{nFzQ;Q1_B+jm` z%zXv?XG^pF;Qm42F?Joa8pACH;Dca4ef8?e%U4fdyo8GRFREhRefjFwmd`(vzi<1| zK7Z%ck8fudS7cP32sqTFETfAq&t2+U>>6EJ*xXv#+5ol3IGm}kXXYGdIqmp7>P~<S z=*6>f*?GvwSO=c~6GvAq6Eh(L#3`#BR-+Ia4lyzo0diI$3RYnXHepIOVg8FMQp!5A zs(NwpNq=|R=VxbRq%N|tGAk*_y16)}B*rz@*T9R*i}P*G^_i(jNKZFIJsn<dE-p@v z!rZJs^Fmbh<%w#DUazeJ_BtK~3W!P&c|PqBDoOjUktNW1WKzAHp`DEfDkooB-*|Om z99JsxNLMq3o>zdn%(}j|fDT$$Z{6CDSWyc9a6eGpx&LGrt?gf<{t^xOt5?rI*Vivz ze_P>2%)6)_@#4js4<FwC^y$rqk5FIz_~F(2A78%v@%g)V&)&R!^z_;8i|5bZypB#P zV!vQcC+*;t)KuBMP~X2eIJ1WK_A8hKCZ?MoXX5De%F0c!Mr*6nt1JCC#x7m%%qp)6 zO-c6%kGAqa>RH<<8JY{Z7Yl3IQBsiESeRIt8k(CJV5*ssp0$N34;KeJ7Z2?7-xqkG z8dRN$iJ`5j?uh9wAAU44&^>+nbV#7zpR;c}PhO;yU&TbPDOE$lrirK&rB@xfxQ(n? z_?^91gEP1FT;p`C>=41>3AqK$J-wJ)!Q*$cSi1uzZoRp(GQYYCt_J1dQ8Q%r75)Jx z<&JUhU_#JA=+lG3j*;<i2!Hwd?W?!HR(J&-;YIYjZ=St;+0oHfU0GILSz1+5T3KFF zSpl`QysWsatf>F$z{4lc!0o*M>HW;&GQXrb1hyK8jQrLaAiraDWqy5qU+r<tn){}+ zmzI^JWu#?frbA6hO$m#R^78k$M<7h??982AO`P4-j7$uCl2uF*ob0SvPl}tfJs~B1 zLEV*Ke)(Hs|LWCCIyzdu^E|-wF+bvga6WVPOm24ipRjvFlPkIPW59gZ_?A;-t+-V- zM6c4;S*L|eeG+PRU%e}Bn^LmyG;=}tM#tw=RP`T7QI7u>oyUkgW@^FW`uaCP>%NBv z;2&au2S0<(#{3HWi<gI0{UGg!>+5&#clP#}7#WC&iNHpFRVgXXoH-*WEB)%jPte8g zJbJYA;@Q339n>0gRwZstpZLm=>fXh+q2-$^Yx8R>5YJ+aJ)WNloqA~e1|uT_2?_Dx zuRDA8jF70P4borJ%+kcw*VNvN0|)aSt`4UN$<<7dFJ3~>@%LLNE_qRclY{NIeo9MS zjhupfacTa4d-qD4Mk>4KA%KOd=9;76TBYIF#HH#>A!*mpJH7end1zL%nwc$lQS_YQ zR@9u~-+luD)Y1=dHOtGWsgi4`M9s?97RJ^5nx*-o@L*{kp=sX-{^<b^bC_z(Bc|@Z zf~jwR{P=ixmxhMg-^c6OlfAW-rTzN$&1)Iyi^9T!_xJX;@87%k7&XM)f3o-T-J6)? zVk%xOGBFFM*!rrT#kwnt*JoGf);`Znz_)B_aiMo`fSZf6x2yg3_U5l&x4QiH?HhSn zDK1_fYcD@ND_d=ACmAhEHddy+ok!bS>rl5g)-V-*fvuI<86q-fA=Rmy6F+_Wn@N3N z_hkZtvsnH4r=Nbx&rbJpcirB`a+l`k&z^!fSeTjo_`ms+pTByWb!nvi#%9;#t@PSa zF^d#cWP_?tBl!hKA-$ljy;nDH@3}-2=vv!*ghnRiqoP-^sY|zS&#te7Yn|WPTG-wO zk`LW^a^dFs8phLNDzsnN+FHJKd*#mE#mx;+_}e?Xk6*pH|KusuCvSh;d;RV{DxLV* z(xAdsaM>R|f~S4|)2AOl{rvGj{pshQ-+lN9Y{L^!L97)Tym|*MKJ7hw3d|?PMIZk8 zs>+J|e7yZbgEyDvZ{2?Y-;TZ(@bKNocYW7JImL|6oYyxGsi^2)fOw~GVr6z^X#s-n z+qdU8H>Z}D4*MK<5t!%Y;RbXMf1QS^G7AU0k-f97m94s&^?6xMA|fJG3IknG|Dr2A zAtE9qA*W&xQ0VUM#oQ_MZ&VERb(onLvA$(%WdUO=0U-g%0@mLpFDuQ?&bGL+@a{(} ze{6SX@QSy)tD3Un#S7wa;-JD4O?A~Ef1k<m(GMR!VCiL_ejXa=t1K-nFV3$hDX1zd zOixXB@rm*aON>m&GWSRjw<%DvOXU=jC8uFzWMUB$6;apJ(=c<jaPta?P0X*Zy*4#n zR9vX8uEx)I9?q<YsIZ}-L48v_P`wWT0N-_`Cp{xAHa-?=Ok7NCTugjITzq1DVq!u; zLGHrF1}HqdqNpGbyA4~Ck&*KA^I6^8eE$B&N9d$F0RL!rx3T_GL0&ex<`m}d*X-(w z()G<vV7|4vp{O7iYH>jx<_W6i=aiQeZfvZ-|M(MhmYpYixR_U0QGz3!nVWs(>XnT< zw;w)1W$#hL&F&s_meI+{^CD^ltO_bl$vMqam-?40TdyjsD66O_d3d_Puby05oZZ|! zIL4uw@8{*=!pc0H9a?&NB^^CYQwuFKOA%EA5+3RA2r2*H@6Y__|D5`tGsNG0t|b3B zO+-LJe~w=<yRiBjHuiRRN(*z%O^jrvCB;OA`FMFG#6@MKE}9t|mK5ebe)JG4Z1Ab( z`WiUt-_)|Ayt?XgFq_aOEjeC6PDWS|&MlARMR6BLyN3^c&*F4;c2>Y1>s$2nbX?q= z3X1a3soPty+{rB=HWDJ=;*!F<_rLl(mAP4I%*>3iWi=J0-~iu@)Fk+^a2#Nt{k%OD z<z-n}m}zOLT^#Jb+2Ff(Zy6ZqdAYfuH_1p#!pM%UlJHBJndqs{F)?xS)6&vfn;VxG zWnZc&%1%!<($}G-rQzb?iOtT>ud3$e=A@@TXKG|nn44K%QjngKpsuP+MMWttAvU|V zhB@`1{8m<$q-3OWGE!f@0#)H0Nl8jjQ&TA_$wPp{!_5g}Tq%0b1O1SqB0{hoAEf82 z4<C1*KYQ`^-Q@Hn6&01Zn24MVW>+OK6?UGIlETBy6<~&~K?B*Z8z{(0v$8THTpTgF z0pE4%)DP*&30NWg{)Zo^X{b3k*stFh-hKJ<{?5*mH}9tBW+f#r(9lv}kPvZj^rbkj zOD=Bd5M5W@w-lXTX=mpEE(J~j9L$i=;Kf_FFjtmGrY3}h1Ynn-Vx>h(L+!iozb7Fj zWng3ksTaN|MM*^|BP}I$@d6lNc*Im$8ENqO#AH<L7xg_tG7lO9eH|@0v%*3GmZm0& zaZx!LDZni}#>Yh1SelCn3)0ci0I^$}>)(8JW@-{KF|oAd1@y5(Rata}V+9)&;6D#I z!3X`kJqq)(%8K)n<D*rT6ey`Ee7xL$EAs$0oY2FsKYRKFj*O_Nh=GwF;E|P<e7H?# zdow91NkE|Q^Z`?c^l&?M>J(7={Mj>HU;60LLq|Is_^gSM!NJ$xym<qPmlzv$_#@hy z>i_+Ze=sr9PmPa!^VZb(C>14zu!sl~GZQN-)16z}-!uU43dhaa#R=3N#r&;X28IT# zEKG;L<-z@Xuvg!I|GnwgJBBF^w$_A1gx$Se&)>d#`R@J5jS=W>OAE7yTk@ahu`oBi zIe!yZ0|y%`QxhZ5Y8GZDa1_u(Z(}o%!`B9&UqfTyp`)Yq;ghE?KYXmNDhGE#e~zxI z?DGnazWz~GA^o%5TH0PYC0z^cV_WxMynXiMF$@!ME{u!}<~BB9bh|!22Axq`L#?)| z3~Rn(cnI}%&h|FoXS6M>1;lX%dY%YJ0uo9-C7a-ss*gYY1h0UALq`Bq0CGIM4fgi{ zz~G9T8!jDu9Xbe{dJw(CACV9j`LF-}ZxDyWZ-JtoK7BelA@=ydV;D<_i9Gz)`q~O; zxSYHU!rcWTdaRpWU0#HPWA9+oKhX2;{X5j?lN1kBKsa`sZ|`bvCL$&>Hq`%he_p=^ z?Sh|wxG{{C<m99Sec10W1jFACKm5SRKo8JjZ2+h89R0c0u8z@}DbRUCBLg;8=EKc) zcOJ2`vA8<f<JxEF%Mub|5J*0M`}W3-5s>J`g}KAc;IwLLsMXb9f(A?28bFs<S5p}o z9|6G!4LoArMPn^MURYeXdTYD0yAz&+0KR<r)hw^wy=SW0?nJCgVn!k0bILpCMwafw z>;68Tz+q8o@$|~_wXsncGLqt>@qb-wQyrXfC0%_!Q86wY%)7fdoFOLXma~Y?ZF=_s z#XO9I=g!g1OpPCT-eEX^{U;$I8NAYaz%xXFJ^+x1+k;yW6gZD-YbuKJbmwSypB(p| z`HvnxfblvXTgnl|JUN+~x=L((G!->fazYGNLSR1)jr3dFo59lGzjqh92`;CEwWK7( zoIZUjH!J-}=HV1S!eIqJE03FNM=Q=->27VTgMd4`I(HBDL%3>SsE?I-)CDlS(?}rD zprxY)sRvg-JUoPpc|rd3;60*ZB5CPpmKU+4ww&y3A-X9jE(8aHRdc?Au(1{ntbd?i z-{2K+Tws=PwL}JH60#`pY5OHrkCa@VZ697k%Y~t^u+Y%t<iPL{jNQj!9wJsz896R~ z0WMB<9Jxxz9|%YoM74Yq%AdaZG04vw_Hbiu<@l#wNRS^)>`!0voOqbWU#|A|5EB!n zrzS!4be#4pOY<<wfQ;bKc`|Knjhx&pv^Zd4{_Wd03=HR>O>O-p*mpM<N7&l)XW#yr z-3RyX!tewG9|S3fH*!H-6jbpQdQb!A;fG_5=Ac+xn6fZ4m>C=3`Z?f~n3xzWOpUR= z3+7f{L9V>Ec5HsmzyK%n1-Y3ppr7aEZpC(a4h9;~z=Wju#r2het5<L`FTe+w`TBXo z&skYoINZS5!49l-VsZilBfYhS+0}u*-CZny)MjmEsi(6oJR}fKtDTd5Ol%bN+^);5 z-!vN?z6Qq-j_{?5{Ji28z@AptIUuLIymP)~c)hBsikyNxr?8;2zYn~Pt)<1(#Enls zeZ=~8-~%JWf{96pd4)yTd3kX$?}>2w?ld7Kj~pVRaB}57oEM0j4mXRBiG<^<p{DW) z?SBrI#ravFB{n7!gFVEC;6Jf4521(T1#uXEtILbALI@u;Gc#J<SpD%B7nmR%hVN~v zug1l^uC7*LaRDyoU%h$-0i%Y7T18d)+xPEzxHy#*<PJ9j2+tA_(9qF}3JV^7x2dk0 zf`X#1whEQLe*Fqp1|u>o2%@>ugyb+%Z*Q$*8!}=~pE+}mf&R?dGfj0^Iu|bw7a#xm zg!Hujk?Z;fdbpTpVPS$8kC=o625W3HG^XK*xgebSuU_fy>A=H0AdU3##Kk<s?*fAS zaS5>y?!hp6_Uzf5%+$lp40LtQoH+x;LM&-xZ)0O?iFLPCl@$pH2`Q<mz+L?O^Uq1? zl@#0>gn~vEA>~Dv=So_}xwyEk?X0RB>Up@ipwGft!<s)a)YCb2`V<KXF&t!W0U<UX z9$d_OxjFyOsk0;;lCI%}3F$@fk+$ZBZ%52eKS9s|dk$wL1)Db(2OtFax9@C!(RpeR zJ*>8tkcfzemIk6EtUCk#P)0_ox36pO>E0i}yq<P(Ng;5E)!RT9gQKUWrczo_IyF5G zo65_=Vw0pKC775QWn`srUDN~2EX<9URu}i4Jb@pMb@0_vS2;~UOhm^=N>16)R)4sG zp0*~iAuBHjxO%y}Vm-nvY%Km^p)K8AdV0E8;Q<Q-8xfZf=ip$+y0>d9%kVQz%#3Oq z>)P9!aM^Vf^TL9@zFv5AUQm!9d}Dn>ZF)uu*hs8LSW{gUdW4*UoRW$n!qZ(@MM+Lp z3hUP~FfmF>OTuu_-GLefZr^#JrsF}#sw|)v7*{z`(mD~7QJj{OQ(9f=6X?gr#&T<W z3rnIvLyd%tjD>@PiJhHKM2v&?JRau%@xxgnRte{@B5i$BMn-yEA)&O4Bw+7+C`Sh+ z$lnLXox1v(H$T3|=sXa0xS5!!Ff$7iYzGGMkJ#=j1x}Km?|gYx+4k+tV=~`Zhl_bq z4D-}9*i9hD+o-82msOTFpyZl-{Kha==2=)-R8*CinHax`yfB52mq%7cs;95>_MO`_ zIGESd);L2*LdqmUK}mhNr4}n56qJ<8YAWOu6fTbTSPvZ&3$v%cUu|ovuC6v#<^gGl z$^jl8ULLF|3pgzBGYt&&N-N8oo9gf|FMK@aTiaVItI8-*Spw|LgT86$Xc?OshlGbh zjG(WheYhEPPd0WoB^4!bJ_ntuxos#By$rLWTWZ~SRnKB!!$3(*bwq3w2M60NY-u8G zO?7fg3T}RWsN%A6JOYBanD=%E=Fbo?i`oSjT#%FjZ;tg?LAWh1FAJK9H7!t4S_nHF z5fuhf9~T{g%iD;F3bC@XC@U*4GBRN02O4m3bLQsfEUnD{F*;vb1aU5OWEh1Z?l@S% zLlP1aXlSSc^SRmSu+LbtzET$@*xA{PjPzNUu`>^w5)lz9uPR?$nnUS4HgBV&rGA!( zl$uA8oRYS*FdJ)bnUadi(99H^m@5wE8JU?7NUy5K1|1yCqfQeAxr(Z?sHg~5oyRoL z)z>L3DQax2#m~IYQOw81$Mjz5x^kt9iVC}`0P}R`=&bFm1A+spYHKcDlyI=I!p1xs z7dMxosS%vA=K8O4fH4V0<XmdZN{GOmp0bYlvi9+`+yZuX*6pngT*pvUQc^}qS<~1| zNCJ1R(#`q1(*$RkL@a%?&!6X4l$XJk2GP`11IL9`g<6~Hfq8cimm3o!Q4wLdVpu?$ zlan2E26X=QtCxoxfD7f~;!Mv-y*WE|<m8{Dm?zcI)haD70jmIF4p?$>aj>z0q_AQh zS=m{6dAZHZO~BhEC&od)!0N|<dH5iBQSdNWnTH4k_BlH*dt&kiF6MQ$)Cq{m&k1T1 zlT)WAVrd{Fb@3eiIa?QJS~^-6M>`zMGrD+sRMgkQ?}G6i7t-3g+EP*%@iDKbT~J)u z&~WJpo%cu5(&1rVAU-j6^v2N2+A<A}6e2`_^bGXgzTU-U#q(<`pqX~omRNP3kDt%R z!H$HKq`l=^UtL|-P0Xf3cEQFWroQCzTv6+klCtXh>LOO=sc2{v)YJ_ut*tze60)** znE(C^;TZ;DQ}1+c9$qySMO@4q80f-J!0K&aNP-=9aJ22~?~Vuy!Noiv&CShaZDWCl zc>pXqHEC*k?3m2s@iy8znxNtH%X5v*^<F++Mka<jdfK3JpR1m>zM-C@i(_R?WlB;! zBLh9wT>|D|=y7ncVa3HfbP7(6)bx~*k*m1QzK)g}AqfSYpbjA^RYpo27NkXlz|%T; zxPvfaJ;ERtz`V1EdqrI>Fwc&Qd2*0(U5JTrF%L%!4nlrWK|>>o`Q_iwJU=i$IW_k1 z@gqFUGceKzg$A~?H9vj*T0vg+JP)^*n=4oagp&hQHy1}AFLydR8ZcuZ^{|~6&#_zz zl$BFIOCv-qWNHysRnRtD(l&AX!49^dNsXG0R$W)m$`#=p7$&KpfSY+Iu=8gagp52= zxp;WhaWD_w2F7;=9L%?(nYXgGXzy$b4-3Y_JP!}IoxL?a<~cbMlM}`#M~|HFb_Da} zWI8&U<&~wg3)4Nlok=MPzWzSmNDnVe_3?y9q_0<8d@S_SjI<;M9L!5z6z2x!9c@`~ zGta@vo{$uO^=cp3S3Jzq@Ts3ArpQc<#~Pvn^DbVVxS2<J8)wk@x;hl|xV#NHnVx~3 z4Epjqt_B#LZ)`kD=l#5oWFDkxcXtPM^%>h$2@LbWVZrU4?YqyP2L<}U$z^0>fcDhX zQ~(JWxw8ZW-+libm`tF)>v9X$6D=+(NYBVb$FF&YTU*{fF}Y@}^zzO5t;dI$SEZq& z)if}$arZ!kL`W$r;=T^+beez!bY9;rg^L>}^We3PWZu%stfj3fEF=&g^Ss=knGg@+ zVIKGy8y`L<^LYF`Fkewse*e*f$9p@oi#LZyt_}?i3|$`>8tO+qULUwVJP2R(Xm>X+ zI|JfbTsn_o9+%ET0}c*$2<Z9;dT8-5Pf9__t$c=vJTol;EAt@Z2&DHp6!W&Y8hmD6 z2e-FDGcPMEeYCeZ2J_GWzqgs18Q<C2!Ihy!GtYow{?3DY_jexGHZ^#{fNN^3peWDE z#zKl->0@tW+1u6r5%;~aQK=7BCRcCIi^?{HO!Ay+-U(GBISmt6CN>V50nm9GIyy}Q zLmPKbL};Xpk`gC78?I|oX9!78(+lajCgEWorSrx|F>h(s+}0HOM=(D=G5pW!Jcjwo zvhJ?SotInNTN)wgK-W5`E$GLtjy8}fXkT2A4a~p7<8AQjJUa(lbZpd>{%-utlXEJZ zCM3hnJd>Lb@>tA+pJ!$H9XgLr?2sLw`6K=O<n$OYk1LA#n)&;8AKvfk>-G!q1qO&o zh~apG$$7H(>t*-Hj~*VrCb%EVAdsGy8ZQ}H`4g}z^6L197xrh=j<;P~e)r>t1I*KE z85r8Sd%B^RSH{mgcpD7!c>KKKQ95sNfO)LBN(}S%4%W<gy-h+=!tt4Js>63ps;s;O z`WM94Fql$NQ68+6loTW+#F$m#80PWoJPTf(KZ<#EV4jp+?lcj0=EaU;UhHe;@%Z^8 zyv-M#Z~TLp2hqUwtG{CY;r*BI-e+Z|!7*oLWiBoJ_HOX~y<P0=tc|r*c=~%uRpr1? z4=Xz_5xo?RtTQtGa$4<pMd!@Zmv6s#8#*mRLt76o_pm4w^SI{QxH_ICB0I|{s^^-F zhk5ejF~9F^@GuWLf25xun;1Eo`DGOISf>Ph$<L$mHb8rTulL^W&dTz_e%;(yo0_-* zCp|wW<4ESYj$-~eI!{DOLBb||6!SjkaQJ!gV=_;HW?uG>Vg5*OgJymgub=<S{Da-c z+qbqsYE+f60M^#r021`WsUNUTI@<5=#g!FJ?QQ4Jb6ME<5i!Y8N!fd(wx`tGsOq|j zg?Z&8nHSMV;MIAu<LZ1^NYFp8^T%NxGv5X`^Qb|*q^uZMRsnVZIFAeue){4$#?Qa{ zCzy|kiw5Rt@Om3y9<QHga{sLJc$oj3w_*GJG3*!SkH}RD9jWsPiE&eCZ-dv*{~E*Q zW~ak99-gHA;Ql>$os66uhGVR&`1kX)#g&zf?X3{)K7M#7F1ehTMS)b@(k8M7u<xF{ zrJ|%rMMJG+Xk>Sk&U@@L@0N@|Z}WwD+vDgwp1chn=EZ)C&TpVIZ?#or_?Ry(!o@u1 zI;@qIS^r>fMk>y{%^#xkx@d26B=c^*$aD1Pj-&IxJBFqBL%q#0m`CY+TSwdTw{M*t z?ci7)Zf0p_LPSg?E+GyGLBNG;5d6%1TPu2<6l%^=aYY*yx5nA?x_Zd`^tvg}kW>L7 zVO4D{yW{G-sF7zHp8WiQT&3+%I)9uP_7CtjFJ3&SrlC&8Hh=8Eby%G9ZSLIKf}s}L zRMu3Yu1Wn~=7oj$&!6W(Gv5U|k9B`A%oG0sI)BtPDe~icn_9f{Dh2V+t31d&uGypq zyiFUJd2lsmMuu2^nwSXmbUArhQXI^S@9TWkKJ%Z~25#SZAS7u{z@et?otIiS6<geE zX6J0>;)0*~&pOW}YUZ7B1oQavHg%twZ*2?@#p!Ltj>_Ahb)Nm0%)fvC9=<3v$RAsH zyF`kaZ-dr(tZSF%=Vl-<*VEH3t1L@Qh&|HJ<H}V6A^iO3qcPqF?=>l6Rw*>|@&6R_ z;B9p9c$;Izu=&LW7@hwonCFjAI4<*T9j(B;lA^qwmBry^JlveDY^)ku8W4iDVw)NM ztDmo`Y-nxa;r?np?9Sd(X(ju!T$=I@N%0k<@uh>_5lOB=p-0BB1k7Sqepy`HJZir? zS4l#A?7YnpG3+t@JQU#29NUER$B!ODOr)-^npcnuPx7#>D`{z{B_<{|F*U5HDlg2> zrlO|Y#I~{+lecknvSVRpdhz@@)>8(A#Ky-A4));lHfZK?#jszz&9P!w-J|{dQP*LM z3yz$7;OCEopR>GpL=5|Q_Xuwj6#7}`l@;Y}EX@x$gLs^qhug&56n4G^Kl7Cp9annK z^YYBkP9JP=@4;gZ5d%UtWm$)$n6m2$6(fF$xl*{V!=jicCL?5#un*4X7Z8$>y6`)g z&(268CLy-5vu<iVIzNwQ{tvh&^$U7_myERJ;nzR@^b_oZf}&hzb_S@&;aMC|QW9f< zc^g}+%9@IXx@r)>k)c7X(`k5kxL|AoJMrUtEQ?u~nHi;|B>{kI*ZOej{IQuw9y4!) zhk1P?135Vv+{sKb-J_Tn;6E=S0x_%?_<0yH@i0Fz1v>v2&wURVnLy`5+Pm5@I&Xtb z=LH4MvvaW9IXS?vi*@b5ehgbwQQkLvT|j_ua_rk>#W&|xSW$nnSx3$xDXREdT={i5 zRdrStMm#!C#CpLcqWFT8EFZR+?W@a+Kn_SEFz;k%bGU&Y618s8(b=K-$Xq2VhW%sw z{4c0`edvPxVtwm^gcuw{7zgR;>0ZCY@?Cn`n&9VM-Ce3LRo$GOqM)QmOUB|ll;maU z>CfrtXu_rrzdJcT3Y|quOCu>cVR(3e7Vq2xQg&JNHL3XD!8~TZjSF6H1IHZjlarSd z5a8$L;yB!3W_kkEDGYS;iwf%NYtT!)u`n+z$j`;a38(uU9W7QncVNEFjYp3j;>uMT z>giHZQz3o58=D)RzJ8;qAZKG~cDR|0^hH`aT0>(4h-3$^bmP%^6!X`I1o*Kq55>#F z;}jXoS#BLsv&hJj>mCsW%F3#Eu1OKGN_fVU=^0tj(a{`U2?bUG_>q;Brlg|uKsX=X z2n<^gz9Bpit?kW6GJo`Z8_@aVF<)L%NK8z0rKjWYTg!{{(vlazcwDM1J^b#Q*RO$O zZf-6wAJ2xyx>s-CLQ6RrEGrs{^0MHZLho?0$I{uYEzBq>DXna*($Z64Y{KJh2uR4u zITcWTJ{3Rn=kUz8alt#^X1^m-P*7k!O#^-1z`U`kQC?AgTU!(SuJ&duBcPX?D{O%p zx(~J`9vJ4)>!f1fC%^mgJrg5?AJXG+vzm%xfSHkz0oaI(iLx*^#kvG9KPLlo#ZX9q z?=XA&2M5Faz{oJ@Jk}Apy}1SygPck9ESL5L^Qh2*tBK`Tx9;y@n{VTInvj%)UD7w9 zJTxvFhWmy~SZ-LYrm76sGcwX2URVM92@FFV=ZW;_>TVAY4aV<n?2gp=V>AEm!w2we z!1gb{vB1G6BqY?*Quhz=>FR3#`03}Uh)@Cof`PuS!;K$3yuY%%aQJi#^mc(X2?+^0 zIy>eU=FZGc;qf+SiOH#WRS8Hau+OVR$vD0kR_qwL%7yvah|r*?PoH32Qh`eD<!8fi zW@T%YSC~IGIR>Z6$I}gK%k8ZVtaH>4>O3%iWBmG0KmW|l$zh3O8Sjhd&u(pRG0>m0 zv9=5k3+BVIJ`O~6ZDkqPEyctS==}Am2~knu?DS-;EuTN#xxKy0E2K!stSD?6>6h6R zUT|e%>CR#1e>h7-!6hA>Qc>DI&BDqqE{bI-C7f~CtG(SFY(tn1DH$pO;^XIiZKxmM zInvJ`bxkUMd~)Q|F_{1P=U>W83jsW5d)wm!@8V#0_Uu`4aWNA!lc<=8(a~!kK7M@i z{3*n$5c*-2n#1M6{d+L<gCz!g?C#}OTVJ!ju||vIz6U7giO6X9wTQ`S(vxHH<!$Kk z==>4hW@%xTjEoG=oyIX2i5y*>3W^I?w${P^;kngkPiMQKz7Ezk1sI)=jgKC^F$AZ; z+|(Fr_^@A~hr&K*W~GBep1`&k?HBaj6yYFbrY7O?b<Updg%#zaH)o8E4Ml_m@O&LN z7dsgPAE>>sNw|MzS5S8MH`t@*&z~lwIWHHHSzXdL6%ZCrN=kD4TUGmbdz?OfT2xHL z+|n#EDtvZfCN?Sz*E*@=Gk?q&7TVl@{LtLg=!a83D9Xz|d-~+akC+-8o;rOBLNHKE z2)gqMa#z=vQD*qvyT-;^3JP*APWEve=j8xhbNqZfU{+OCl}yZxBO=2FuJ%5B^Z+mO zq_l!Ml(dXk*NljZiF}@WKz|PFN;GtXFS$w_F)S!Ebbk^O;*vwNSx^=tJDre-P*U=u zi<@g%W!dWHT2*Bkoa-!Xnz^;P4(Et~;OxD-w-3LIaB_f<0mD4l!sXRv)XHKNrT6c# z->?FDgtW9|W?l{>6C(hOb!CK(md5}0U;pLq;)LtBV`A8$iLrv>B0@p}B?URG{SgWm zCmSm#Cl!y<86Mq>Ht{~`9U-~BOWVIL8~65b`JYo~nT1uNa_aJ%#ySQkOpWzVojP?K z<qYuk2H*i>I1BcUcDeaE_a8loN3DmY!Fo?8_`RR!9xyY$di4scntf(|V)U3djD7n2 zY44RTQ!^tNYrq%;`XT#!yT18jI<H>6yneMmIKUSUKAd;xp_*Fi#wLb=!2w{#_Rx(% z>Q^^bqoTt>x=Bb$v^3NJlJ1VS#rfH-_0{#Y6$s?}x;rvblVoL3xAXzvl~okshdU#j zic5>vHddcKe@013A$3s#ytIp>y_<_8R8NF+NPrJNA2%ThIgOwW^*JUbdC34DPcJuT zZ+90@1XO1~q$ij|CKi;=g8>Fxi}XOCmQ*-9V%An5J>8iY>6uuVU5?7zzz#sng<A8d zs@&7rwzaXovAzli#N5OP0v~1;CM|7Eu%Hdi4Qt?D?%g$k$dQQ9&e{^%z{jCIbmu@{ zBp7T0LPAggCwp6jGmH!l`xSNuwa`*XASNz)Xyhv9?~lYpM}YN(F0UXbV`*jrKOEB- zcHYa~m6C#7SVSl~J~lWs2z}!MI{3+>!`Gl^!R|HJ*GOIv2djDJ%oz!B5g#vi%&*2A z0i>rJ`nRKEnZk;)!O>v|WB>p5-U2MHtXmU)?#w@P@9ppFjv@&Vh(Qt}M9|>w6h+|_ zP(T5N7F7j>6z*=p-Q7L7y9EiFgt%KrI_Y%JtW$?1bbo2P-QWCk=dxK(J?9+u*~i}X z?sx4vwU;H)0-h1HqoSl>jyDERVcu;As<Q*^4yVC)lK-rT7~3JCJ%WZZR^gPG`rs^3 zAc0PH*4`ejveM$9ZO+N!!qUn!szwGU=KK3QF&K?~`}ds{I74%BDlN<%A02|bf!>pu zsfntxA~*!_A+QSQI;;)`!Xm!_-{zM3cR&0P6&?)y0JjnN>f?<<7T`I8t`od)-rm6m z`Z#M#!q)#XE4V-ffwO;L+y)(%yt=$lSW*blsJO&Mxbtx45D)<>&Ye920utm3_#m(! z^!umJoKjO$K>%-N0!pl?q;PG0<@=9f=JC@f9bIjpcC@rKjvPG#90-Qw=s~Bng(cX+ z$pKUh2hzf50QPK1)(MHRgTsB_zIyfk{W}m3Adrq9KX&BEA<hE_I5-dNVP)U3doK&y zehwbKLn2t-GZ!Ixf*ZbLH_MJ)yZ7wf!@|nS#?B57D<~uck5x%o@$~6aN0_rr06ab! ztcr+=*i*>A{=N;MR7~Qc*hMj@51|=46EEBlPA)D%!3%2Ys>XPuknoU>?hfc5Z{54w z+TLnlqz|_Xsl&Wd^co?$my?&}<Ku-$mT5ej2Epb*9wfX~Q3?(V?i=iV_7cH-??AT) z!$Vp|iVulcp$5-ZgBTXBD=;J|zogJV$R8pgxL!CSdSk&E;cUgkFP=Jm5-J}+{Ps)N z?m|W|KQaigPs`0AIXeoA3LiRh82-z3-~e18JS}JwZsxxI`;VVGeO5^5Fz>PbCuH}Y z*ORmibGy`LWaGiUpKCV@J1aW}3&%lm6{E29s`RSi)f;zj+*kz&0acwnCjdv>&%6of zB*vTfgkwK9H#eN2va%xJ$H>IM&7D?UTC}o;%mmfcPy;-0=FDk6zM~M+eKNRF$h9lU zdw95tiHe*)eG+czKUR>JmyL`L|NWRpX#CZ;6H}vk1vy^co@VBzYHG^BLT3a{!x<hw zaU38dD0D$qPDWEp9nKa211#a|=UrS{I6F7}W=nqrN4$Pxy}G6%C?pVMkFK7MimEa^ zM@cCO)Z`W9fZL%FoG#YTSRWWZHZHoe>++*V57F-W`L{1?8)|57R1>_Rq@2=OQQ3Wm z&wRsvkcImc`%$68!Wcdo%UuVBc5w2;#vN>2oIE^-czI8pK7CeDP)<q7j_eehoD>or zWkN7hP*#+Ymy=c4G&u!%O>K<5i*r%cuf2N!m=~9n(9zYw>FFvcDu`Yb6%iAWl#&F9 z0^ot<U1%<OMFrFIv&eMZPoGZAOeCcylN{{S)m0@VCBT;@B_-6<RWRC`SS-fCP+#9r zPY3!uZLO`Qjny)@FbB*tdH%(#x8E%;FXZIsxO=&S#MK2Qhs9`NG&Qs|5YtrG&{D^n z;X}hiY8&fwiwZn^7>0ObX<2DNvapD-1l(9f1qEd#RSh+DO${X#Ww_W+7cPt3gw1Qu zE-LID85)_Mtf;RGi;gBb*lFwHl+{$F<zz*~FP;|`I(_aO+)O@xetv<o!qT!L(lUq7 zh_dr5vkK}f*~NOMb>oPB9DKrT2hZ-|KB=r@5s^`qRXzOt<*PSuUxTy&p$qp1#421q zxV)6KB=X!PB>)3(17RaPcR+=O6~V)co?n<ZJ3sU4^(!>4T)V!Scq!i6)*33n9fO*m z4Dc2a8D3RW31ki=xwQhiU};5&iiwz*82;gp%nT3EUAw;4*i;uA8|}$(cW|;N+E~GJ zF*C;lrGvqOg{3(NFfVV9w2Tzs^P9Ie{&D6gP&l{m-s<XZPf1Pk3-F=PT<jc3&=j0D z(+HN<mLxmmB_`+&$|_1GCPzVBeG;f>PXXue>FG+awpG<JJ1(NUn@50yUyA2~+Ho1P z(`w|syb?Qk&h9*LYA4r;qx`2%oxdO=At|e@qHApI?&q74n-ias4Dp(&B>_(`HzAng zKN``F<Qp7V)7-58tMeWxC@GMf9NawU&QuqIl_j9dz}V2p#0bvZ!^<PLFmH5n?B@OZ zXsQLE`tIJY<g{c@Z%>i~iC{@EBbb6&ldP$36dylthL0C4IFcPvbNaXd^pcsAIXN@& z{M&EeeE0sz^JkN@Q&rU!Q87`_6q!bLfS%jY9=yrcfn@7o=jP#-o}D=`GSojZR8U$B zoKJFcG$okp85-d9b@hx4%&iD^&Y)4qmLwZf3&Pe5#}mxZYcHv;>K_{!yD|&NuWqc5 zOHQKuFl?P1jLb|muvkTPbtwf!Q7P#QViyI)CB+q$g=OSVh^uKDSn~>Dk*}*9;^;|j z=59$x1l2iDOItYm#pbu<*AAn3Ghe=XJ->LRw7fVnD%``%9j?@dWMgG*2@f4W4hA&n z=xh)8Nli-zNxXi2?b9soz?L8wKsNXX_yRU4G%{kYF7QdEx%m0}<QL|)ceLac<OT%# zeX2l3tI^0H`3j2*mRA=4Xwnln?xQCU0fdds^*MRj35jvx5up%W!cl^P{9!pUF%J5I z*0!d_rMVX`U;I+@aNHN)zJxkm-IuGX%X4zG0RNYg;-NusWLSnqYU`>7hI;SZyYsWT z>tPjcMR#v!OKW{rUV*)5tO}mKUl7YDV{t~Ed=}#_Y#7KTh}&~qcK0cz-6s{!N+`&w zX<&`<1bYVx!#g28qqe1~yskDSD>FJFAu1t0G9Iz$g!riVc<_bXlH#7x5ko_LQ0%%` z3{D#Z9E>brG5R<xgmtWJtU#?kf&Sn-IR*JK@o_<6!2!X6p%G!1Qj@A`tH-A&H}2jA zlHbe;_~OOQySGOsMr!J7(z7yR<71+tBjVy?VY#xpqV;l1OIuTQZ9H9ba3o*2oj4oY zwryvVY;4=!*tYG>#<p#3Y}>Z2H@{b}UQN|Zci-E8^sTwK?|07m&ZjErVQDEw?d{8Q zGUb0@x3T4Z>&bdQQeZy4WLt?C1IO0Hk)p1Sk%~b)S_E_7H|_{?7fCB6tUzB`O}E|3 zEWo-rvzD2V1YGY9aZjdip5dY4V<n^Irzc10M&xCrW7?Aab$t087ws<%PMTU=n-JyY z^PLhAqvP1!n$uKNi%rZSM1QrrGym41DsFc5Gy|9Q5kar4X2GB=uB6Vt#KX!!MoAiS z(ld{vA`p=**0*9Oqm<Ym0TK^oDM61}^V-<gOIg~v%vxHwLM<jhfgd>~g<fQkxF0n) zGbt@TFwcL(+r@JCw1HA^o_Jc1FDE@K_<LzleVL^y*0OeHX^D-SzuDIYePQJADYZTB z`_05AEu+|Mi10fW`43~<O3(Y%Y?Annvob3xQA39pFQEN~n`;4fvfm-^@VVz>e!ME} zdv7qR<M~QRdFLxP(9Dd|tXgJDKMPD{+wWy(@qAhT0f!5u^`m5(phmw3IeYP^fyo^e z&yDrWpiIA*3<(3uUK};g75dB?y-}<7$W35*9m3W^fBMgRM3|0b9R<~Sel}>brKS1B z1&3Gjr468Be4CSl6BiYel%C$@?4fKxJdOw^gBo^Le#X%g(z6eF*M(j-XrC0nOPBt# zD#nK1CMzw#;5Zcn<i^5GhOG_(=Dh=v5TFxK8ukn7E$l5+9LoI2Os%G7)95Sr`fU=| znzNeO=5zm;=XdA*jsSSAHtMZ^bI<q5Y|J`m+LAh^TJ;n?{RG|AG@VX*dQO+4i=m|t z{N!DO>;2Kj9ZII2wz-$AvWSa_uC5d5PMdWuA;UL|<K^WDc}t5==acJE(=&pc+;3T4 z78ZVp5xa|%5o+2vDareo=)5<GwRX=x++63{TGrSZ;Z|6dD`LydaWNBBQU=r)op+gz z&Fv2|C8b(YCZ@gv?yj5vdJo2?v92fG>_Etf!2I2TOIw?KU_i8{UR%4v`O&<&e9!mX z?{p$*y)F;<?wg%iFsAs9QhxW3o7?$eq$?1n9aJqdHKFhKo!NaTL;}IUn422G^uyyY zLSOKiz#Wh_2rELkRN>JNsJ|h_0^Toxs;(fc$4@Vd-REaP2-&2CmC*gQ?9RX0C~A`V zN4J<iejK}eYEGSBp$%T6G4od2)=1Y9&24OokQU4XT{q4>e~E)E7BsLHG|Z1AeGWt9 zwvOI@+_7j=io-{zb|wY}0<qUbk`qI*VlqfYGy+rzBrOeX-FDxsbZwZ?KR?{TB=4y| zZ_Tdu<xfHNJH0<XUV|tlioI44Dj4;8z9enSTI~GD)7Mhj{z@G6iBCA!O>_oN(2d3s zZhHD3K#422^`%#CwnLNvlhs(7xCFTcpGP_Fhx<}|>uFghtaDx+1^QM`o4>yH+CvLF z`)3MO;$|d(l5AR<L`8h#Xv`svRmc=wO}`obx}h3jexgWo?xMb`gTm`<OQ88h!nXPb zPXo}`hSBE{Z2Ir(#&(}mScu@m$@h5b)U9gG!T2#esJn^|qm~{Da~vvj6gqRv7Nb$8 z_>4hXEprBq>Ugk$aIq5D&!3D%K<kpmhX<0j`#<T<%@is>S&Q?qIp2ndVaCREy7(p| zQisfnhmJBJ6Kc{^fP5i6H))qsuG{U}+uCqrEv}&K$EVl|spW!i=zR`_ve#p1y2$0^ zIwL57tGHK8j)|miBp&o+92})TC_&%HWCkrRZn(6bHq?Q>re?ODjwlN$>%6J08Q*v7 z$6;aJ#5aPB9GkQAf%-Dp!2!e|<!sQvCBF~d2pbrQr0%wa#xglw$@x-}U4u1b+kwR0 z5_Jh0{)!yceW_w}m!pjc%^XeGR<}<J{6bG{<(4N}{4BW*_GsA-s0AiR3`W0rU%w$X z%D5(7@7H$nDT}9?J1-PeXpWsTUc9QQ6YaxJMx58x*iz+l&Tk8={JKv^D_1oYZygP1 z6%83S4K5?*T8L&K4;d{Z%Y;gzum5Rz);k$IBC{g+VfoY>H?o}w_4=5>SUmpmhveyz z($l;ccKi3i?ENv-S~?jbLv#8sn2WV6UaB2+J736;i@UjpJy4hzeJTI<hRWQWL>U)2 zgnoRm>Ts#?U|fku#O5NebnZkK`fx`LFl5y4y`xd_?e52jnhi>us$UHKGZhFC>Gj!4 zf@R}O4--YdRJVPA;Ap9zp=P^p9GX(uW#U-giQ^QL3N)cd9~YjC&~n^mCPMGmxAGHL zo-9XgBdEtd1{y&^Vz##C=2BSrlm1v@M0KD((e~8CbMv!H%kwi)j?ZT88b8+zCkGD) zFRPD>LqIxIW;(g6wbp@`KfeMFK#UFXg|Vp9?`_<pS8z9zI#S>oA`wyxtYf;+4g3a? z&_8Xn)$6*Ue6{~(f^TOcc;x;**UFc5_Er8GEa`xkR~U)n7&C;7BB<8E<V9O?BiaVv z`Nr&+4f^j?S&+KzhR>5$23jArO^`#?f@=*18}v~MlQDml2k5csN%}sDrptNXyiMho z>fr#QAdaH4iv>26N-1Tnz)uR~<Cve**x)&wFuYV^Cj3RWHCAx0(P13fLiWX6@8ID7 zD2mZig)$NE9-{z%I@`HA>RJ3Qaj@uee~=5p0Kg#|#>Xq3uWGj1+)PbPZB4QJKANok z>ze#VAbszFCh_4@9&kIN4d%8fJm2BpevLUQ>u5Lf*M7xr0KWWPWJ#y%FG@);gm8c_ zJ_uhTh_|IBjD)YDuk+Q|b?>WuYDZ`ot+tla80~(8)2A`24w%R#AJ&l^nooXJao(+> zEDLwOr{VtX!`f$qRwJqvTc&zdy>YgGwx59+R8H;J)_psQ8ZsmSw_e2K^;Uu5X|vup zY^1eD86ERq#Qo8#JsJx<-z6dmAwg&XdE+kbBAaj7@oPIQeR41-)I|P>DWLIJ<m3c( zW!X7C;k!}bd+1SUVWH?ZK(O{8xv;r9cJ;BTtcbi3lrA|n&BCf&8=0OG6SKOqI?(au z58-RW$ZEOMpW1De7igUv{&$)8>7L+kOvaXcALSvY&qLI%0{EXh{%7e`aB)&NTsa{w z!MlATB?_uehb_umcAL;SBJ;|!{QO8=#po%b#b!+X1)~B1OKfhPT}cdmQgXvDf2!pj zHMfTf(oC!Nby${^if6fgWnl^YS|aK6_-Hp5Wqy}z@~P*rl8b16m?)gv`tL7LRAqOR znSHGp1W@BP7wYi$-Lc&CSLPCA^40zJ8;1OV<q$<WHZU->JpEbl0wqG2rKhsCu=&ZV z%38-H$i+N0?PoNd&EJt85&~T2qLj6?>f5T&Wr=uQToi?NCYHCoyNrhsaJ8+8K4@vB zg-WQ_S;eM?rdfjg7Px^cfUooSsPWOvB*pQzInEjAw0_su)c_(;qp=GT|2pi)|G_0N zpu!S&?IX%7bu9?1#mUZ>w42UCpvbNadw-$<&qena?#<0A<7#)eT#e|p{NILmtEI%l z*`f9BPu9k|;lF>QEYfKb_OwG&3A(+{vEn=Q$Fv5DG&1xC;~tyfX>9Pc&O05)7X+d& zgK1iLWH+g2{j)Wf`McnkdkQ<hZ-P2KMl^$ZH(3xmHMUtjK`22kc?2jMyIz^~yj(!P z#2Afwx)F!|O)PAUZp{z#l28cHwp8Yo`FVTOE*AU|PpYf0y?~gw-e^Nj-A#&%YsqUv zo?&5O^*OmA27D~*`&GpZR&IF#T0uW+YujNYM4+GE9&^I+it~cwhzMhB1Y^%~n@2yU za~P``#KpzJ_0gK@nms>15iLq;Bc7%=zHSIleLk0}rV}CAKHhMYhZra6Y$!U!hs$Ux zXeTCQfkMl5ul5}yzmUtkyPF3SCpS*1yV+CC?%7+k9wIi+#fJJ>7Pg~hXM5<XOpWdi z$LCcs{%KMvlT7oNa^*fv{_o(q-O#+I`l-QyQ+8o$K2g<Sm#(IfQTLk6wngpr@K0$N zdamzMjv(1BkDR~n)R~NeOYf~5`+rg?bBcLenxEBQEHLghJV>p5OqQHEwS*)jjHFyO z#f;6DM<ymps__Ot6t@ceT@5^e*$FN#i=Xe$V3%MQVqzfylps%RIAS{!)RL07?L#~O zzLk%+x4kB7qHEzrJ3epl90PwLzyW{n{kXbYHlLUx3{1`st$+T#IXyn_cc1T+HG2G> zw|lz{$L0`n*Beduc7ETyYTD<+gx{C|0@Vvd;O^vW6WDEA$l~V5nS^y5!KU{O68UR~ ze2#916Q-i(%(6e`MpuX9JBI`##g6%IW0Z6hR~uK)>McAJl=$>a?^5S+Eug|_x!PuP zg`G`faSl>SfS8i;K?z2*20_m^6-}KqTyK)jq;5#u#e1#-(vJU?qs28|<S&&WO4)EJ zDtp9|vqb5YcJ-Amja`n{^OuN-G~qJ#q;CF;(=QO5skF?2F@Z_nHr+3oQ-XBfLb+q5 zeD8vt{*p@{neia?`TbJJ&+v9&kVlha8SL+r4M(jqKl1P2)a=#(t_nZ6X$bq{CD<qQ z(XqnBG<<X%@t^GmujOjprzh9At?rfvN|43t&8Qh)3k_(0b9<voui73v6D%4qzh;+4 z_LinbH#fS?&c&^HY-D6eEl_``ogUw3JvB9+i^YvO@R9A-IE1PVuv`I;`>V0V3hvc( zhdRY78~PT4jcY<2e**AR9oNf+1%(dR_oMzXmYMek&TW?`XORZWveEOF>kYpzl);aS zes6g{J9)qDwJPoTC|LXC`ufC}usF)<2{SWuaPXJD)Z!(I@P7&5+sdehhItmpg98J) zYI<u+9v=2qC^4%gX=zGINnJizr`VSE0m{<qP?VHsygV>gXG$<pq)PKX1$#oMYpot6 zWOkM@@tr)10m)qT_V-u!oNI;943@ZbT~bL}o^S&<(j6OIoqB5mf=o{EF&aVrtgEkR z8ikdjGC`_%C@4|w&B*U*Xy^?>7#_zpXgXYV-JLkL;tQJyWpfB+O73V`ayy->KD2N@ zt(cC;@cPCg_aHZ&EtIT09W+&?&jC)A1dAL1V?By#u%jxQ@Nu`R=EfEW{2kEpF#6)+ zm&dTUkYI+|h6bRt_3ZpSfKR~b2n`qae3vvVC=hHHHI>n=dE56XzqIvcW0lqGiG=rR zec0AiOlTM80}j^A%)OVOb*_4|0|B4sPj;#|k|aW_8{^7W*Twrvo)0Q2Y5cQiJJ0*m zb&%D}V<yj_zKV|h^{SIa-@s-}Obq;&shvEJn;f7qH8$2&LISCAsJ|any;tU@MSok! z?(z!^0)iXEEZ|8~Q}c;+n;CNXPmgbxpSOyfoEhUi-|qL`o>5j8KfkeY?q~fNjSx8G zAZ4CNaw<dh5E)cudI-r>yy|8Qfwt5B?)X#PVGztyAeTP>H4k^jL)6Po;1X@qC#@+y zEa#PvZ=0CxdU}k0jB*x@DR$mv;)Y;TeawoWK1w}~tK_OrmC%+clA}h7r$k79fy|Mj zqd@MCy~OIOKr?l>=MX=O@;p-M*638wE<QGHT6&VUB77emkL?c@16ts)V{+O8pn%Cb zC`3>X73IaCf5KGbd5BKg*%xaBJv~F)U=##<eE=8$xz7O+m&c|P8fA)0;jHkzJva@B z{+8zEz@?cOeZPBnQKX_GF@Q^HcrFT3hdCz|o~|ywvT_nWFQ;|?FN^q$u#SR~^V{<l z;=I0EWE??ze^{E{S1cUtKN;!1k1ajKDYNxZ$d=TWqe@CB+urw+dS?6fL=PvYr)9lo zz)aiP>m!dV;kAT#G&@|Zv6^zpDMlE1x%-IdBFNx)1#-Ixs@J5mC2vn*(<)SXy`KAM zYU8w86}K@cW=0DvtE()1!B(x=rWL-<_DA|;TG$G4aEFc%xL4;+wBN=P*sF-y-7Aon zIdnC8Jpx6)UZvJz$N56TB(M2kVxbN$fuX^BVO5L;HL@vcaH*LKtCub)i$B;jG8P?m zO>JX$owYkw=~qMcceqDxU)!S#vSFU?1SpR0;g%0bz<epJ0n=Z94`=k9;Geh_uFhs} zdt%IJH)B#TTU&Z`m2|bd;QhJ1(o&HC_fk^mOzxQ3IU+4hAtW3E3{Vxsj68Nr>WIn- zXUKshO;o&q!J#236;Z6dr>U$r&-UqUA82^^P}L)c=iKnYeOaMKTB?!vpxPhs0<`v! z4oSMayNy5uou2+j?@cWP*4Aba0UiqYvNXwR?lCn`O0EU{1%cj6{UIV!Pg`!p+5l#R zdfK-e3nzGj7xVif-~iozNT0T2Z<E!mhCld375vy&DF{JIZ;&}oKO;ebXMw3zYpKoN zW&gPCh`$qc)dO9_v@+zj?`~;U&tY*r_w#<RkKeMTc9r`b&DTdz4m$(Pu@KDB72w4r zAj@g#3DV<~P1J`ORPHHHsPIUn#wtB-y~cnj6Dv=$X7)gJoiP2U#{rCqk%Hn5`5vVw z&|^g!H|mrS_w?dixDAs`*2`{s{<<5gQ3U@4R%>0msgcb<UxVN8iq7KQ{JJ}g3$yTR z=rc?K9-nS*@U-{v<aEyf+X~ck+xvx>iwlX|TY<|hr%Q-=I{Bo-#I*1n_Z({ISFU>d z>&4+)!LW48Rcrg~ADy1_gHcrA-u)Q;psv2IpT~Wv_a9a%3llOjaiQJb%J$qUf^T7E zWq5W5@)lWDA>IKYDc{;4=giXN_{1ngGT#t%awk0I5HPHH-W_lG5rS`@lXA)O*mVvL z|MQTJl*qH{Otbu4-{XFBT0h$RX>XksGe!fv5z%O)h<@PECO^Z=yjl32ay+GCIaX?Y zgogqbDwR;}rv!na7x{4z4Fi!2!U@#XZ0<MwK7F-$fwe7&%Jl{A2`V~fMtYLE>gLL7 z$Uo1deeFj+17BAkU5OjeNDpjm>Y1S&Cd3n6zXxzlQ>!0HPdh~8E!Y1j|K<IJus}75 zj*4Ps+*VXE6BQH7?*O9@;%a-|TR+6*RI{_y>G1LLHO1y<Ym6Q=>h}2-){sEb3RQJs zN4Jlutc;cy<?mubRHQH07s82|n4r@UH(8lE0FN+%?V9KlflAQtVF_B=AC8->q%-fm zB4pm4UFUCV1pj$8GD~XDf`|A11Kg=Ts-g8oY(dr;{sCycPV5}AH8VAUdxie<&YhJT zKWUSOjWsU1u83FNP9TBpL7-S_{=Qt*WlS*k)-ldbG>po8>B{f+eJME_W+};vj)jBL z85<>-3H3SJD=yn+|4BaeWI-z|{BM7C#ot08#{=chBU&G}sGGoy2ha1)y|LxgPDi7H zF2ZW8%}X+wwMPiI>1HJ7E9^o~mD%^h4DMYL=zGEL;d1*gVxJ0)8H+x5rs_}6)6Hm! zD$yvALjk$)`SezRgxt9n3jU7Ood`!+b1vz~?=>?vr=_RW>v00&9Xu;zLqivHB%T&% z_W(x)fsS<gfwO(iYbU3M+!B8n!0c{WggwPko~*s3!EbvFuEx+%9LLTq-dr5cXOMs5 zNE7!UN34QI*~dik=625)V^Si@U`%|Xt2xuCKak>Me@R2(CMN$vgmJpLYszomz09-+ z&u`BLNh_cCDA+lhN(To2T`C)G`g2mN0=U7GmX$f6%=P)_RxZ~l78!2Wehv(nHvBZ_ z*b~00W0w4c%neA(BlR{jU!!-xKxC9X=_w*DWy2A%tc^ge0A$UBee<=E@1<@F>faAK zbEXc67`bv3;YASzv*Rgy#0Y;mzM|UJ@<&H6{cW|0nvE`XRqYsrSyJuFswD!xkLXwW zHOLg6C{8=yem(#&YY1OU<nm;eR6Ld-HXQDlHxwF6$XLzw=<ISG*1BRu`URJh;<(y- zsFUO9)(!e-Hr4R4uj0DiNx+!E8})!Q(u0|Kw#|CEI5iDO0%&R?tAMQ;8x<(eC*}z> z`rmm2HGb<D*<tkX^f7puc=nbvq)hUNbpsRn-sc%uUgCpEK41yAlECsIB$DgkjDdA8 zIb+UJ8q7=|k5G)slVGoln?j3JD`t2qP6d=~9QkBDUpwjq3X&eMv^{48mLzc)x+`1? ziAY;(f*mpIf|^QJ!?9~**OU}aXApOmGa#44OnQ`!*pv>w-sNnmPCOmKF;9`{G-rG8 z;mzRY<ppUqK&L7bt6#RJE*{3Hd_V<qbMw;kb|)v(LfB!~$iWXf?DW�|3x&g0BbS zo)>uyYGS&w`XA*G>He#iYf!n}rA1J-k%=da&$zX#$LoURm#)Np2OT{<(7CG1{c;Tm zZ6B+v@AM4E+MIdNsl6Y*BLX)o;f<?J(rRjNedpIGjO4lcs6v0>SRexx(C+i|`*$pm zY<ZKi6zIVX2RE0uO^->O7J5@<ZIaTPPm%dHHiN(#Dk?;jlXgrMK>jHKBNkO_Z8OQ$ zqK!aklS&FmPaz_3SQd1ZnAo<E`s!H()ACXWgZrJmAWXYNDIQ^-a-O48cNN0L8<qMP zdW&?3L@2M<qYix!o^9Xv$nX5wEhnCLRh?L4evl^&y>#G<CCSwDa%ww<v`5K#wM_JI zkV(#<OWwdo?(8MI|21nC_Mp?ES$f5NSWH?#6V(P)nG~0oB;{%k^8gn=?T>c6s+MnX zgohBSgidG+CND28fWRL53?~;tVKv{zxBs5%2xM0RZrlf8X)<jaPoA3z;^-3N0)*vf z9Q;50m;80Pxw*lDK-W5Wfvi$ZC7pg(ox{-swTVs^cQz{ztBs){!erpe&h9RDc4loO zS#CUJ8^SO=^vyf=5q{RiY%7@e!PGb>&`9B-i@29BQ^Zh`_=Xz~Qxr4nYK@bJ&+ilG zB8uRrh;mnMF+)Kk2^B~7(Y`ov7GZ5if|d^{E(b#AodaDhqG0%m+6b_GzlKOHjR_U* zAO#*#%p5wkvVBi4N9<*X`$raedtQ$UBSJXenTxtuIH1^crcYi+v_thf9u06?{OT<_ z3Trc2H|S03<;RZDrk)$C1y``U{GCFHy~h~5ruQ|9!Y}5Uk5y#skHD3^D785mwG61J zGG8uqPBs#5c~ZOHpXv$*`~ix+_P)Xd76wAsY7=%twc1RAt`M_}j*cwb7cY<@pNd<g zl=9g^J;s4Zt%aT7TEBXXLG3_kFp!Z!dl@&|!HCJituT&mNMCR!Q()F`@q-1u50EG+ zp&Ei#PWR8dzHSoq{jV`5Y64gT?bjxwWAkK@UWrC$h%lK1+iGVxc><3VMEcM9z2vg? z!p1>*guv{%u%W%=Z*u&*<g$4r;o+KG9|4yo@{JH?3Ny@azasK4k$AaUZci57vg^?C z<~Yg`xYr)a2du6%th6NGKg1KiFLOi>(H0u;mDi|^>g7`$MWYyUuW|os^}w$QY7W&@ zI!bGP>-L+41;d9`g5~1-bYL<0ZBZplXVz-}3)UWu(4Li)7yvboh4xD19U!Hnp<+k~ z3&Ztk`i*%v2+vYXOic|;G7s;Lh0XyO^xpr6cobKAe%>mZJZ4O)DhQlBxA~BY0D0~A zr^}#Uu1v5C2?+^*dtBl9NQh<-OEV-#d3(N&tdX$%M`^9Cu{E)G9C*_`A<x2sA@!wx zI_49{^lndO;o;%Ey?H9DV+!|z-OZXoKun8d0KI&b)oW;|sdIfmeuv$F$Pr+~_l@2% zpFk54far&Q54;YTKE=f$$V~6Z-j<qQio)QTLWL8OEW)c0lI-6ejkSlqt7g7Bd(*B_ zTnqYTDf2put|Gtgr6_vAoYVfsidW##5v<i(tK(|4e}Cr70@|Zwrm&duSB;kM^cMZ% zufC)<M|Qn@;#mA%HM+ujY-)W3VSgjQ_Od09%yoG2D5!-U+WlEPYySA52?5V_cp+!e zhQDZ!I*9a0S4pG<e4PckFNGUaS5+7r{029;xpA(sDNXNjFt+lz|MUP*4y@1jCj!Qx zz&Tn0Cxj-8pqdarn4fOZCtq=KO%IEJSww20($HbVwN-ZIgc%}Rrx4-IE-ng|XfJ6g zDPs2aGgSA7u0&g@PaII<yg+C!oC4=BZtlLt^(knct);0j$*S728of>rGl!e0qeEVO zG0GFfCn^>mAP|QC<Y12^vmpecZ+5o3p=Wt|8wxv^j2B)mnia}*X^wRc@Jf{Azq<|v z1>EJTH|oVoN-wjs&CD#R0cAMDMzWPQ&?e<Tlgj4wkiXe|#4g8v%DHX7k^ws<YS)pc zUieDRp);yeJde3No8;})h89)j?^ZV7cNN#Z*W<=!qJpjhA^b{ecr<7PF3ZLmH7^^f z|Gls4eLtU=RbkR_KL2nS=X56d>*XQ|D2<RQK@*Z&5%{UhTGmiyt7p0D#W~=d69<vv zXlMp`yO_K%lNcAU)Xz)PN@ZxThlz)pn-3rP-Zj(j;Blc6#eBq@xSF}z*f{JD?F~Wx zO9Ap?+SWfIN`Fv^^-OQRy)opXSq58b>syGo6m;Z1B0mdT2c9$6@xQHB?X+^5sjD}4 zgTX<)wZ4Nk!cjtM3w#=YH)6lR_-r4jxM*t&yXbl<TK<&N(~t_4mkHFi4=#`=N;tCp z>A1k$vcSf~S7Lh4c*n+Wq_MwqS5;DT0d~h_`g;-2sqSHH;Oy!wHiA2b6&OGlUEV8K zJo|pnM@Ukc>??23M{bwM2;}bdR~*`&{p|$KW&Qdnt!L_Zv70<f=gu}XT~V0=MO4-} zR1O4lRF1v)CjPMd#9k<vv-c6)_-GqRnU!~-Q&K1yMM2P0Q`&k$oAB4J_+HP;{An9) z&udVJ{_E%I$#unR158f~#eGwLUs+jd_EzN4(#P3aV}4@mMCQytlj&M2libkPN7St; z$>n~n`CbW~^^YFf8Bus|7HjciPJ-d17AM-3s0>k@H;&AC_?=;v{<}A!#bg%-j=0LM z?;EHicgx?G=KJdF^d`OKC$0Lc>GI>B$-siND<2#d-`>ow(cH23szif?3VHYgO#kRn z#3uJeC-%w1slBD9yujfR%GxFVLegl7OJ%%S??q6MdC}5ru2KR*d>nlhZB5y444-dq zYQ7V%+}a0L80EO^505V=K7U#F>=_m4TfuxPuJj&=YU}HZDLd(@%33K({x*}7Fp6{o zXJKjMRxrLUG&covsDN8K&`;4tw4Nco=Mk^oy3FZvkPDkC`3a&*YU2?hlJL8Kq~=U` zZ|QDoFec3JBBZW@cDZfJ>bfm=IDe<N`ovixYp6i#F^YQa*F_E^r|l-{+@o5(d~$Q- z8soLIoS?)84R$=EVLEtDrjZ#P<|Md%c6lL{>@b%HD<OR+`u)3RdlF~=Q=Vj5L>mM* zx@yWKecJn5deWNiE9i(H5ANK`^;gK_iN3k9y6?KU=kV~~_72*=+K#m4$}o{eEcHRC ziP*@@3;sx6h<1QZYEXA=x-Gc9C4QO(T^NRioBHh43{FBn>&cOw#HRV;lyaU!2da2- z=v@bkleKQ9=-_I<i?mH?TFzE*CjZyXyQj)K1R}duRVuAuf17__<VEEULK9TI8$A#C z9$2;W9zhD!=PC~4CYBkEQD$}T!b(z_kyvkMtz<Ye^Gz51jT0Bqza?e{!LEfRaIxH| zIaqnSS}SXdtGf%p41%b@gAM$~@^g+yUtbT1jeBs5avCQIGZOZf&C3mapX8(Uudc(w zn_8=hD=I<pxJk(Qi8zoy{_bIdCl2+8m>C_1cZ!)ss}Es2R`6UYJ3#ch$=T6u`btGn zaTgvGp2ZhPQXi=mNBjYQFK$|fuLKOV$m_g+J)6e7MqEtP81q(l0=HXfodbFvUUK9m zU{VLHI1DxVU8}z?^p$?1&@fGnHXANtMuT#rWe)y`m|Hqk0{Z`S{AS)~!XOrr<L2d# zLOS>N&i6x(#Vav2GZt#BBT~li^xgy=_%PZdRc1}Rg9#w0(9&jnek2YSd`8Xuo>SrW z3or6x#(UuhJ(7N$HR}-ng~Qt+H0|#%PO5g&<6W?KLX2#Z14!fr!`t|C0Kg$2i$R$( z=)pOp6;t<A@fcnh8rwAYm2?3@JPbG?ryh$0Dh~V+_YM)&H5c7l^DP?!UAKFaeg|Q` zp6-aQdat^B!JCe*UM81Z==ho@5@BeUIlp^UzA;lU8M_yqqHo(5jL<b)VUEFWE0a|t zD{Vp>W!pPNssTgBak*c_=$JlQiP|#c<n<`>^8H8`5A+cjCV#VdgU$%yA~_$Fq<*?G z#G6R^7>k%wQhnr|mgx)cuMnoom|aXKyfTIifF(h9iSU6wWb5js>nMQxVCX>WAkZ_R zJ!HgWPzK1b6LarCi_(*#ie(aa@I3mBw4EgFGs^2gg~pB8@)uq>6O7M1RvxLk36@@& z?dmJdu)vm6l-QC`HJj`oWeL7<CC<^TysAFKSX{?9uaX;gNT>dzF#mS@5j7z7G@aeE z5qhq#>KSS#Lp7R4GX%$fk6_t)U)nmhYC4|Caww4=8T(3^tosCwowa=5d^Zk1WpOKQ zS${j;r^R)GYBP~Z)$GhzZ$~r&P&pPlfeO}K-B{b$jFHINMwOoW94>m9w*ES6ohk9) z>LcV03&jC)xn0NVi-0c5qXY3GvtDu_t0&wmf@yYnW_TsaTn`aSX@!k5ZXEMe*6~$= zr#Z~r{BrlPxGv9X4H;LaEN9h@`^FKD73xr?;9_xl%DxYH8A@>JS%I0Me3vU5FHO+d zFs8NUy%PMmOs{UZKW~F*L^5=m{lM+zLw({c4Bysa6D+rQei(hK81yrZ7_V%27>i-V zT_h>NQ^@;q0!hVl2zGaP>6nanA2+hvAgK|a@iL!3mNfoeaMzwGa@Qmi;Sa?s&eA)O zuq%&<@Ej)#$a11?0YLYZHWQ#Z4)+3h{2r$b1fM#BR`u~JGCS_PTwmF0=y({wEy4t# zKah)_Gw*1i)%WWi`xBLf3j>ao(nZ)rX*M@z2S<hwun3GzNP^f`tB#&(>P`5MUlz_H zvQJ|+-gX;!9*nK3-?m&>Qq!Ksd8yYaLyw-8o@$8ti``C7$2nu@eLRb^GeNc95yoo? zQ;xg+ljdtlS0KTI%?{8w*z^hT(*I~{_pGk+YbnkPKg%U6H9>@$^XAnn|9hd8tf(of z>;`F+*76}Y1o?xhQuprQrMB18{_+cR9V%566S0ho>?+9(qxY4T7oBS5n2P6MY_-@) zhs`{xF_{G(PR5S9gD**sUFOGR-`{nM3}mA2hQii0FLz}y5iDfpz8|B^*qTJ>j$^dI z%8OQvEkVI2goK3Y>080u;#9F;^B41I<T;+$;FqMNlR(DTIeCFo=pkT+H@JeTOZ;Ce zWC+hY_zE)w-`3V>@84kdh8Z@t<XPr-H}d5!7=|~7h+Fc3p!*i<4)bZF)8WMpp*aan zviec($UTt{R-Ova)bjS4R2PQIEsr<L%A=#pcZak{ms<k9V;8{3JAzan<vO!iv+@}p z=pV4-x{DI_)fd&3SG1K_>blIOHR{N3sku8l6pT_Ia***<x?}t7Zpn3|PxIv7R!1vQ zQ<>XZIA+hDz<UQ-dGav6cupF*5XYr5zPV<v!O1~c&<|x?UdkmJ-lP5ot>{Y~_UM~8 zQEeh`q+X#HTVca3=)0u+`38TnhV)GR)w!jW#Z7iLE<j@sce9h}1LnfWk<4-rcj7(l zyKgX$6T@oR=vJMgJx@4`kOTVCQjoJeJzanX`aihV5;v1dZH3k-eWah)Y;J+9C)i$Q z|E!9Ot=;u99yrO@sObjAfJQ9N?(Vy;u9LZ~dC!{og#5p<!WCZxAk2~H6_#5eALv<# zUV)RXn~Tfi{VFIpS&7%NwXys%;GW0dp2&~G|Lq6*5^6ae<>Hgh%==sZYY*{1+Xx&3 z0v&^EZhYFXctk?qm7!z7vUs0{6Xqs?vP<ZT!>m|K&$9R^|C<E);g2Ae!cC*%;|HwW zRhwS3u6a(!^I>QcLUevod?)G7F4lOmh$%onIMJ)<&`$}hq=c+w^*@SAkYjDplJ7P? zK0<9sKW$CFwh6?(m6n30^|dA^^Mk}JNC)PVW`M@-aQ+3VEzNxnu;tdm4n=4f!{*0< z09Z+xr<h15#F)Yts31=$yC$KN%~#ScT<YCu1grS(48F_8<}e=(F`e7qhqa)Q%x2WT zDgx|(1Kc(}bO0TlN#Ml&+~w5=ciytwj+Vpy{V`!Z=4@YUkJ@F}Den2(9@tJ(4Qsux zxx0O^E3p-|*(&DzRUs?b6M7&Mbx{;$*y!uEA0?@7*IVzRX`#SsEs5)BzxP{{slrrr z(DCn8GGSFm2g~JhFY#S};N}c^V6ew*`aF4>eF~qC{52O1ZO=<jjled5gx-!kgCq&K zKs_Nq0hs~y2Pva4ITjAiP*zk`(N^DD&~a+(Dk~{5wy$;j4Y%U%ZuUkh<~Mfj@0<0; zy*W^_fLNg2-6AFul#rg@S4Q2-J>p{USbSw0PO(EJbYU-g(`r&|8z&(N8z~>C{Ik$Y z2>L+^DJY03$Z4z4t+TDYx|=GsRV1ZbB!I*i!(>-ISy@n^%Qc5U*+FCUwvq$v7^pBX ztR3Z*)D}}T#*JLB2J8sW$<P%rE`ES4eu79gdR8_0FqPzPF>-BEGC|b&n}q5snf%tX z5qTaEU!QmW&Y|M86D0C1EK!g1D$#*laiVFvDQzuJh*}&GyD5{S-c`@LK;b}Uxsjv4 z?zn$INBa7A_8A^6r3&Vx$}a-_J4OP*OpbvF-T!BR#3?ay?<=4=JlQ!{)1S~EX~EY} z7N3bJI0+sfpPs)9$P)XnY~3=?2m%alK!xrwyBD*v0uVs|;N=?wxFs;-WvHfrzDmN? zA{n{Gj<nXm8leFzs7`mrtWN#PbGzU>Y`yT7AR`KeSz*BJi?*bwAEO|R>9OfWsR?zF z)w>S#$KTaELq#&Gq+%IwXoac>@bnvG?nU=hD^WP$xUWk&%kTh6Fn_m*GDh1B<8HBL zCfCO)4#zL$w18P|>6V4UJ7fbd@^^4sc6B`$H`QTpc&Q(~dQoZ)1mi38Mvgl3>{L~^ zZ$X%-w9lUAb`)vOIBRps_K<&bZ|H4qTw^gzXhtA~1XekboCQevc!)Td*{IoR=uvwS z|3ZOEBCQa40g8}r)Y#aAU>BO*{6B2Zt^(sxy+Imh4!eDYTyQn?^{wsfoZQ@u&DBN3 zDXGXy^=CJf#{O|Pf>oI&CzoJg=#SBdk534jM*IB+4aV|!E%&>jY(yPVVN)AS*EW=q zS$u>qa-QH`Eb~(@{9(YqfVW5Y8D-T`<1AlYp&8~dzApXu*SWjOy|qU`VM0rcOGGu} z?{qPcZDx;8pwRQ9fUHON%Z~P5efhGB6;&z>w84`t9qiBhVWtUP5_5IR8N=Y;Mn^+k zK@4PLIqt-C@wXkkJ9_Wog_yN&Q!R_ozsQP`;)MR-AC^YP&;i5=QY}u;u6GCkF_4v@ z5oCL43-F`3$%)W#lp91Lh%cx`cq_7+a!53HLl=Caw$|#r*6zAs%MnLiFlVIK#U}ro zjYum1_l<{^;tyxS2><&Fo$#yy<v9TnezrMwDztnRdw!J7ahTUZsKJG;)tMoL7*+NM zcyxPrJ=wc&73cLD8T4VOa_*sba?U{i=`!5Zp?5+>*I8%kIU-E^G2Uxs=LGU!tchz< zityV4+Z?W!gvS-D4BwLDLE`cv460(skN=Y4jHxYwk?b?6@gEQznF;GQRT?DaSSl`I z7248s^z`|5-*0Xt-|-VqtHbc5ZE}~+E|w`ke2uv!yw&OgNQAwO4*vx|N8dFD-W4{^ zDJuFg&^RMA9{dB50iX+zKt6#GN&=|skI{kyg=u~)8X++NyBoCl6C~0BhFa2p<1^4Z zcsLJmQJ}&QVg6r(edCCMu-58`Ts$YP#)&~7vl!w;RvCY+aIi*qpgaG{btIQw&^Y_r zAS!wzQP-AMn^zlqFGeQxRh&5z97h$Y!{7$@H{Ba`vSlg*Zc;YYE0E!PxI7=9Y9==8 zbibTJ8?WbJwMpyOQKiHuKBXA`hFvP#?<wYwX_kQQTF=o49Rqe7^x2N%o9doMZ3%Tj z%UGHGIxOAk>r7@caq^5z3QCQ04K1SCTNYaCAx6@95<z@Laqu-TTbF^sFq<GfQ!)Yr z5(dNs9CNY03}0z(Z>uy>Kgg)iS`Hl-2;Gk_bfgTxVlK-Aj-E_>kT+;1FpI^%?)f3$ z1ufPHM8VAwB+t|$28nlR;pg}WxGKwsRxFTCt)rTCX9ohYo)s&<#7&xF=r?~z+Mz5y z^Aw7vHnbOBYWZp`#>h_X20Fw?xEY!V3j(^!%_F!Spm=!ao3mg2Y8M*e2GJi5I3^Pd z&V{!KmmUn#T;kcH=3Oi1a#JnmD;6^EJkYaTBlo6HTA_;B=zjBQb-lx>*t$$BIs3+? zTW2QL74R)7$WAM)FW?30D9j>pgWd;~Hs`kn98tEhUhsS4Vf*fY0<@5}{PmW575tfB z`UP=*LvaT10M$|T@&ISN-s71dz*N9oo-8!E|Kcqf6cAwNr^OX*+@B0sA(l#+spF}X ze9cH)C-ArfMm~D7ZyrASj`p($W1C02<n&5prEVZLkfm-A8EkSMlmsrE+!-3{@na8b zk`Eb6XIn*&9qXht-*IVkYg;FoPyNl@*9RP#*2Vtaf>F_*l5k*+{6vKNS|E*#!S|-9 ztj1{+<wyf_H19%ZGF6*)xVntz8sQ_CbvIJV97Ur>F)0pje}&!&`|Ly={WVZ-e!R3e zJ3t&F<~Esnf`DfLfIuRXKbtem31$7@!1`2>a6W6Vey+d;#5?HJWY$RTVqcz6^||KS z4z${iDV8xR(xp`U&`%9s1efI+G>Z<CtX7~#04*!ddX^qo<|7PaIPKbVNcG2=XP~Eg z^Z49-IePEgRVoSo?A?N+WDzCXAv-a$wZ}0D;A?AMeI4#wKQCH_p*Y1ElWlq^1wk7k zIU+sOfu;>lp)h>k7;xYFq!EXsI;`WXN^>se#K5i1@91rL4*sv%c>LMQGp+utB#?!- z%ICiE_s1dpg-LC(6qksL59!PzaQ*)79U>Bpwz<4@mGjrW0i7W`l4Afjr}1A6lrhGs zml3L!4e%4jKcUO*{*g2dA5IpI4>;Cfg|$!{pV82rRuEoRL6bgMb~aHg{^-E-Xi288 zNM?^}glFPQMN?hk<7jv0Wor<_xc1XX_XasG5vfURmf*`3GE=wjHo&WtpN5$}2WzQ1 z)n&NU_>HI2^=SY8IrL2evD-K-pN+ZjfXOC*%r>Q9jO!1OKYgno<0epXO=*xE2EEv< z0C}Z(TBTgHpffUqO2KoGtK)VVn74i&PQ6Oi%Z{QYfSWjRjwxfkA5&{UNM+3*In$ys zfJb)-wR#)MZ3L;{DL&$Ig7vQ^;0i2i>Q0Ffn8jcZii6!Dx?#LQD=juQ1L^3ET^pj9 z2@>L!@}!t+4~AWPf?_Y_O)2$DbNJB8Y*k+PofdwoCrMHRvl@)2zbU}o>^;m`r#1KQ zIX53Q9GhM$qF9=_^tp@W$$JeBJxuid=BzNXj;A;q1Z3OoaB1CbU4-rXa4Zaw5JTNg zRWF;Sn-9_5KC)ENNs1@pepey?ro3%@z0kTt{oH*CaF>GbIAzJoMOMXC7(vobAbMRH zI5e9Vq8$S^emnS?9U<FHjDFNTSRBnKVnwo}zZOiKC!g?dRq`C^!1)G-X9N~d?m2V@ zIfMf_1Td2o@o$X4xQqC$$jPL*$f>x*uC_2Zz6Xuz@_X7C-=nEihh_>7Cbn{}x#J(5 zD=hXkf6cHXMvM4Ny!6l-uo`VcWk;P|cdbYC$Ouxu$4&Vx*@O|u84W_4_u18Zd>^*F zm{O;zfu_o?kS68N>yhi$u$I%$9#4<=r;h4`!Mmrn#HWy#M8n|`E!8k-apvrI^vt;O zj&TjLfCZ3)bN7YPq$n~O5(HYVI$)elB(Bs92ePWO#xX0<tu`xwn(UCiqh5OaH%~-% z-c@pli?WPYPpLj8p$V?TVvn)#+C^>ovoSUUkm~wt_%fN}k}Ndx565LG#bG$)VLT6T z$Q*@2K!;6}>WooajL_MPxGM%cwW6~0W3u$K9_gN1=@f@qn0UInUF`G9ty59-NUYEN zL<e76+cVgRL5}sROfvc^uBIrCq3g<loBIA9@PBabY>}=C?5yX{K=)=t?Jhga-rk|j zVfq1y9(_j-0(+w)Z8CFIFO~5i*`?jw$cn~6@^~JASFtK>5(`A*#&d*rZ8$1IMIt)A z$Y%c7-Fa9BB*s0?kar@^M3>XTg>eyG|KN?V5k&xII)%{*qsfjN<S~8jxew^RhswZZ zEl!N~YKX{`VNN4_1y_LTf^$tg@lNJvA<w&msjGlkGb)YFv4hbVS-!NhmZA3b!((yY zVeSJ(wC;j~%NVg(NrQ%+={p_SXPC&8q5{jD>xPxBk(r_A!|%9%M;}L;LSClJMh@%V zKHqmu@6*k*-MY>FU!Tw0`kEc<Y@LuJPzi+btEN}lWOlf2a8VN&E*dEeh8R#|AWSvq zs<6dk)TvAeg4I8F<7-Wl8V?km<Jq2?9;6VZtS74RX?_#PQ(rs9Rgt)x8C@8W-3A1D zZc#E{g{e?y!o<EGUaS_{JWHUs={2rUTeVBCn8ejGcZ4&Sl-?r$U}Z+<g$HY=XQ!Yk zD$jT6%=05|>zo!Kl21T#RvLfI&EI!mSH~U~#eb&Z!qzl8Q)G8j!}i*7b@&-vt(|JE zV=T4kF1Bn05%|QN`&?JywnajH+5*y}+pVzG_5C&W2*vI*?*GxBx_Z(Na$L$o?>L6d zFKCO7Q<6W<EGtj_GZ3RmX1W3c8l$&Hk~)Es0`~lkP|^M%p5zMbV|@LGfeuK%*#nxR zMufVzs*seiy{D7KFIWwGD`Shb;*i1xD2l}wN|lh}r7&~0U&oFA4qJxNoFNN2xF9jl zc!njh-|xVRXu0u~3x1qJjcJh&>^@*MMY?KIGQ%Nwu;lkKWzVV2A}PE81NJ3ZkcEmr z_!t&1yCLh-dOj_m8ZF~?F=rnyFN5a+Z5cqSLzRC-=a*2%Rm9{kz1KOlgyE(xF0@f7 zG+l7}uR9J)qPwO|8juelMeGUvnns0`B=#62S32i@aM1_xB#n$XvWl<d%9AI|A5|2E z&17=p3ll7aK5n3DMD-_9H5`Rl1M-^D?8FD;_=gMv^l?IqVBv|k&g3QNl>E{iN~l)k zedVlcg_6q{COaHd+aSYvZ`BbnX`u2n^Eajzdg45+kFw=EUnV`Y`@v!QY*QHU8E{J0 z7}gJ;lQC<yrgZ&QI9%aBH<6gkr5@ddewE1DbN?$jhW~hJ<U=g)xG!zbT6n@>jYw?) zvc5qtWgs88FB~V#9RP*3NDj+-wX{O$TNJZ8Oj!~Lm(P#~(BKEqkcYq>RJFnAfgODX z;i0?4K-s^4jjJ(=(2f<W_VucEmFUcqeDye?|Dijx@mbk=W63)-D1bfVEw_e_1b!Wi z%@*DztmFr*<o7k6MXYrf-4=Do>vg}rcpkW}GWaFme=o-gezo4-hz%dO`1^=qvTVa@ zD6Z@L9!28M8(k<-m?TzQ;R=^ukJ3&VDiY-{JsXNMggy(C_%j>?s^~_&<XF|qTmYCM zd)tzTtF_#6sfF<gCSVov|N0rERBn<6EbHkf%=kxs;NBl4L?mFM>dX^w9ElZvFg__F z;1J2e|GY>wF3iNOZE%rWOeKlb8cWg!nASGVN%#q6ruW~-ri}%e*Fe_%1|+Z)-H{OW z*aEQ6My>zG(qlhKcKyaz2`DI{)ewiphRW_`K`5I?ES+ghDchi_p25VMy#Zj|Tfb+{ z-3o?0ipf9;__(w4d9d^MDYvFrLM4~!jG+ZjG<AW;1fkg&KW;YQlUN1TdnM8g0gjnN zd<bqI!E?OvtzbeEjdl`bVag<^t}<HN>trw14|B*KiAQw1J`4R^Q9sBpVUbw#_n=hR zZUj%x7S3)@@`V+I8&@FLcN~>}1u)R-j-MvNP^)EQQClMu%R(n{Af^xpk86t2tPWXl z=>@{LUxTjiNVU8a`&V9#^auRe>l^7L=qaSo5`~!MMLfr1G)JZgMMUX?lGgfVKs|cp zz~1IE5Vd@5L;G}j=U3eM{WUNYbX*YCvi@s%oEJl>Vo1cv6Xf{IG7W5QJ*c50ANCDp zJan#nn8k|nF2g*R7XVg5WsyE<_lvH<m88g#t;&jxrG%ELXpppcjW`z|FTGR?lfv@A z9hG-quH>FPvN6W6VbwUGb!GnGZ9)e$a%@0>;l-@6`3|(}`1l_EX58-7?tX1`ap&&x z1ZTR&Q#3u&E<OL7w---jhe~gW%mB1ZH$$QNed0GeqOY-ioEdwJZR6(8me$E1Mi;@Y zAiqLx)UJ&M*CV~~kr9>JQDno7h8I^p&v3%RR7hndVW{}oJJ-Yu1lzJ{jXOTJRkymm zzIRT?4`bf<VV5&J&F`(=j2+kcjD&JbPI-rEQGS}-f+YUxHhP0Y)|Ds5e}-LqxW`() z$KtdE3oTC<%*K<3pFof+9bvGsJHaCS+u82$yxXj{&G4=H9z-aN>=TFc4<h5~ir?tE zW!rIsVcoRjciLsWZu2;B(){lUW7;?Tz}t!|jH#`Ole43#q0N6sc1Bh(%uL)wOho@3 zv2t=UvHbsKX14#k%*RK>C}V1C?rcHC%nYovOr%7_sN!L7O2nukZ)9a^>`cU{>|*5n zzgHw}49$TJ>Xs(|_2y(JVw5noG`DbuVdmf>Vid9aZRe<LZ)gncB5De(YiBC%Xy`%2 zC~50#>S%BG+tAsR$l1{a*!t7b*-63FQN+&1-p<z4)|m)+_b<!ez-};%zy9kdYHDm} zVoD?+0P}xO<Gyy8sVjk|h~_`tZn{R(Mx9!r5nMzafd=tdtEP<<Y?2yMQB9FrjInOa zD2DS(Ud%>9tmw&DRBc8iSp^ZfPs&sSOTx4`@P~D^;A;|Z%+tz*EvwVjgk0wJY{!&$ zCjaw>lbI+Wkf@P#gG30chv>m6>PIhGD^gjaZC>1a1|0y5Qj}ZhP(CjNXHIOkD)%Ij z;gCTr&6(Pb8e1aGxr{~OzmKzIT4F%HbbE3~GF+i_d*dRo$+AN6zosiCU?cD(fq@+C zG?BrEJra0(zH~@*Vt^y?x4lqWoBqW4T*+R;P>meB_D`i6dAo(qtg7>@%jz$;ay6iU z_6N`0<#x3av4|lX?nB`^yyu*ae!14PfA2foOoFXlU?hK#(OPOytg~P-@T&u}&WdP! z=*DKd(q!qH0b8i$GkK_*KTP2F`jm#+{68?(?yGY)vjN26G+f^5%N7&YhP8{j?tR|; z%P_3a0`lX{$#1zSE<ITdavPMX47An}Jw;txgAx#f$F*ZUGy%pWuGV8XSjkDu#lu#e znRvQ572P;mUEa+JhO&0N<#~sV{6B++j?PJXFh6dUM^Izw1z}7=S`<&TY5>hlOE&TY z^EOcd9>?ixz7ddox>sbq)Tzx?lKzz$YoiJ+I0`P5l}5Bw3t?l6ljwYut>XaXiQm@~ z3Hlyh;58>;OhBr}ixa2Tr47UP6c2UXI&!KBl_Z4%)xcU1Y`xfCtbNqT$BD*Iv>>jl zyEp`>uX*7}pe5YJtkO?#!9{c9MmMTqf4Yy#;4f|FO!=m)C8@LXZcSw5S`y}4JspC8 zmNjWOd3=2ck=3<mUsm+oiZS+Oo#Y8L@gjvO5x2wqeDcil7Wl`_9PFbmD?vJlBk0D> zb{`Nl3Ecr_0~&cZKN&s2XAyX^9%(Zf&R8Gz@0#^gciN9+;leeJrnU<UCY<ldt)jcY zj{r(#Gs(sa){SWD&y!b6i@CWVx^=^<mdWa02bVnd8T_nfQD_2`90<7Cc`dJ-m!Tfm zk1WrPtG^OuSSv5tW~@9PYlFX*HHL>;G8K9LoMDf|s$pvu)ocdY@+ytlcdX93&}yiK z#eWE^XxgPG*l>^I{%pi&G}QBjMRKWF$-xNazoITYo+KwO&$KhQj}0?Qi8Y>wfZH6P z;Xq<c9z(z=U%l2y?v`*UUC$Y;x|bVC%PM8_Uqj<d)<unMJn=u)ODxld`-_xNpQrxz z!0Si^&Q|cyy&#YW-S|JOy#;U_O_nVzSqv6f3>I3<%#y{-%*@Qp%*@Qp%*@PWF-sOR z|Lt#Pc4q!J8}Z)8c64-CR!4Q!&B{9Wo`idT%<aV7jg*2m5Kqm`%wAi2M>91{L-P90 zAJ}n0vvRQFh13{8=wKm$^8WR{-nzzG2Ad}Q4=DB*GW`wDXqo;AVzmDW#OVGXK#ZP= z`JW*6-$U4c4_E-;`X^vP|0`g<etH{#06_wM0{<s_{#@i=_WTPZ)Bh)H82%ruVWj`3 zG0cGN{|=G=C+3{@(g6bj0rlnFOIt$o<N!7SuWfV81fc(D-=E9=V;?=+KkpXHf3ok7 zdjhzezrp{%)#ks!|Dejc-8wUZ=diMcHv5zWtiG-A*0*-kc~iA!Ig;rJiQ);7JPBFJ zw@1wwdQo0IN}^PiWPGyk)Ki;qN^Sn#+i(emXlQpe+XX=RU<zy<fkYY_RftCBJ%_DX zf)%!3(XoEJ!WcEbCedC2Byfgoj5)%WU75y}?}^j-6=%3GV<8H^W~dWFnlN%$Q(9j# z+?C4A`3esQCg2#sw77J}Kv#*W-P_~TY=Y4523%{Iu_yBC)QA`lxlv@UwiWqXYsx1< z=0phLaOy`79*3atb%V+i3p~at3NhgC5E^c?xOoG?Tdyh7kl^j=F>_<*56ZBa!b(t& zVtVH0Qz7!&Y-+>Bi}VUmVT%+X(EOD9w%e*}pThZVBlmJLUPD6pyo%JrhWaZ!x?faz zSBBrs*C04#YA`XA6gewxGU}1(6RM0>W7>Bh9_h!_nuJj+P0csHfz9d@oPrL{ivCU# zc-F#R5wQroZetkKa8VvD3E|>2Pii<DJh-9*VThxULi2ndjT&a?cw%TS=-3N11$p(~ ztI=dVGO?w{BdD~VLr%cFw&s%7z~5lxKrb>AzHhgI)S%{YIa51$OqVVA@mF;oCzHzg zIRA1hUIEcb1xirR&^>0Gko%dB5aLhdw4pG7uX4oELtgtcaat0R3s3ylLYiG=t+RcW z8f}u?*}cgRCKPN@3@0lsA|U@XtY|w%8KGEeH#G3vO~-k3!Q2O~QO?HgQk{ev2!ho% z)7je+?C&{Jlqv_ThVAeQsb8>dV|r_A-D-O6u<b^^`NU^ok=*<ASEWmFtjTJe9^yN) zF0}k6jpVMfUDeuitDSFEfA<r{sEc=wjsM})L)*d+I~`BS*j~XuK6JWDF<5w8Hr{6l zZ4+dkd!M7=bc=2>j5hXwiW(ZIBbpyb53$xnrF^PlweYixxfBSxkWGz6ZJM;iSmJn) zWF5q1ECKTp@&}T}y6ib`f(YiB5J!;0FWI2iCM{wu0#n>ZswgoSp~Rx^xI+bj54Hs- zIgfQKAS%Q6Eg^)8c*4Q{N#Y#}a(3Zn3i)(l4Ya|#E%$P4lBY~K#F8AWaQn8=`ATu| zUQxwz>ivbYd!;|#iz-QNZ&Wm&Z=QqinDvhFz|QsNtdDRv9FMZg+enjnG*J|}%g4#f zZ^O$y5*6<Ww3n`S4~s&!=0|SbT-BArDYeeyninD&9pk*&qd3&&vW63}afc22OTm~} z>Mst@w=G{(N69gf7}3{+I*Ux{vj~R~Z+}yNNy}l$xx1+Je2s*7jsNkzImad8nkvzx zUhk!1;q!3OI5E?6_R>E-%veN0pH&1o3OBnOM6?nv?kyjmO#qWOZ8uOK{;IxiYID*) z<?Um7%X(WeYcZW%o9F(bTb(>8$n3k?k;&XD`(ytJNKd}p7kytNDzreao<T=nFZEyh zx9%;Y%C|S%-L7zpMy-@dm$@2z4^_Vwe9)lB)N^ivOk3&sJV@}dh3a%K4{%=Z-`l3p z`U~o<uhgk>$D&~FXitRpdJF1FAk~Kp;nxjYRbs%=Rym8hRF)O5t>lkbVyLWjI#nyj z$hF?x-m$f;4sVa76Ky5CnUGH7cj@C^7s}CJtunAtw(+G_vsBp9zIO;2z+W*yTVObc z%!8UWE3GO+Eda}!n4u(cYvw#C+>L{orP%ED#cj{k&<4-@qh|j$e?p%N;%2UT-8vZj zm_m-B_%&z!iRRjArFQaiu&<}4<eZLev|x9gAFbG=Hc0Q1m-MaVaqO|XB85erEHXUC z_{_be+@h5_uJ_veOl>&vKY5~m@B=yqrhoB8%>RPbf1?93f5rYXx^{m8b~<|2zu~r= zp}n=EoxY(xfIa+u7r!+i;J0%4v*!=u%R$osR)(AE@>#p!ssC}<ENt|EK~Z|mKluI+ zmh#uJfBeNCIwE6dtuJrrfTs@F6c7T?5knUM3;BzD@csRp|L@<Tc$)v_=KdVcZ%+sP zHzLE$jYlKs;vg*V@P~W+`AyiK4(~7OBL_G;z}o;W^{@Rx4Ais$qyA=-5F?;K|MzPb zVgVGG{_cf^=m3Skn<>Nu*k<_0A<WctcuapQ{Q2X5aHl`#6t$#hfTpAWV>-am0ZNR3 zGyVNIK#3Vp`cvC~`;7@u`a{kBy1sv@puZ0PM+N<ZuhIYSggSp`ee#mJ_U3@o|J@`1 zPlANOK4CZ?JOrSg&(C==>r+HpI&?#4C=z0%s^e*3&G%!IUSO+`kJ5Ba1%p)e2GU8M z>NA%9vRA21gFQ9qJHLaTR*KgQAqnRFwXXlrRR1#fPaUwZ{8z*It0Mo8dih@pn*P0F z82(<{|C5S2@Adr?FZJWy=WcC-D*lP2{{OFOpi`D@vHxWDKVm6HhJTv<ug~XyHHW{N z?EfWwrw6pw|Dx|qjQ{o&{oU^W9pe7G6HGy4wGA+h`a0Tns^u}mvyH=#1JoJP3hLtG z0xl30@OjZo<>C_n1W4I302X3Sl6mo}sK^&Dqd3#~dUvtu&vsa%P-axA*0{v5_3X8k zW`8|SLvm1%kq~buCp9L@M)q%o?hdb8FT=Bs<2$?Q#?%K2B&yE614({1MP<Dkj&EUZ zapLas6fX?sRR;@N<JRb8mKfH@ndeyQ?X6DU_YYI&!_&9`rJMbO_2x?+ua~>q(Oyn6 z@71~*H;Trb!CzF*epR^fU;OyyVkPEc1;#=J)&eC^c}tH4i%-YPP)4kgZF}jpmbe91 ziTIa`XDD0dxXY%fYKidbDe`RaYIoh7-dzlrH7vK}HWC5J*3JFr*KgIMAG5kYPUefX zy`C{NrN=~{lM`Q~AHv4gDY5gO49-QK@1fWhLRlA}S{$XWF*3h37R5QD%s|Q1MzZi8 z;^)x>9yk;!z#XtiDmYFrYN`6IIaoH{zpyVPota-@%VU8j(KS|ipU7?LIZpE_K5{Zz z;c8)Jb=SKYU#^xrlL&tlyp0-q-77hW71rUzrD;cp$R?#+5YXjQ0yh)q7gPo`MYmZ- z=$n{&TqoX)Gf?%PRT5%F@|!Di!30_|tbW}aRb1X1s~Svz9Y}&3O415Ex)KvQkNj0M zn)i?zR`uNWaZr@o*tB#15c}07V2!f@Tob4!m|AAO9di1$s0q#boYU*!WxjgXJMWX^ zAetsRyF+BcJc;Rw1c@16Pm$OthR8s%2!%0W>8|E_N{s9TB+&{Ww>Dd!qQbPBxn_i_ zQuI58To<R`8eyssX`%>Yqynd>0&%1SwWs8CIOlY@;BdMEXQT{cAP=Xf^!r3P7P$dB zz9~lWVVdr5?XVDQ=<!SOatx}oluBF;-yNW*VBsg!?d~Gy+%E4@cME;1+u3oJOQ{qd zGH(mjRj+LXm7gSMc0tO~G4i*t(Qxqvky(}w7E^{m)rJlX#ja~4o&4(z(i4<HniN+V zWi*v_H2G?H$x;D1vUx3nMG2a|e&&G$d2);OaGK`b3+#rEn1ah<s>N~PP0zci7;iz> z*lM~Hr@TiBZ^w%7Doh}PlKD)(>dpzw`EOt9u!zgt(R@A7!Fmvxv@}~maZXOhuKGGV z(X6FMc*s5tR^30!s_ZAer1BF6KWU9*yyby(7e$8(3%MAnyB=9c-6jazm4g$=FZ>9T z=Kx(um!weWA?5u=FWy_IG1w`DAzQkwK&hWhW12~&n@VkzL|_&-P>dB-`o_NF!LHjU zbggGJ`t7h3t&+a*{ANEhOc5bbtt*oKV-m9~GAyJle2HzwoYj#)M__vf4v{t@rYVZ$ zjtY9>bfN;Q`Zx`w9Ape01eo(AyQ|U8_;T><k>u#2;LQl@_P3*lY;%e%dMTx!%$%qK z4=Y~bi5$(CMe$P9s=z{M`tTenKG{L%3ZowpWqra%*<#0Og2ot)n$gus00V{l{78$O zILGX0Yh8(po%svoF?a|DxF#bMpnE&HF(iQwKKN)isF$w|_!lPkOpp{XFVVSx(`=N& zudrRecQ#ef<hJe>(L6u<&?fD8Et&EnYeJ9k2!1*TV4+1IazSgSa~-x|h4PxTD?i`9 zCDWO=pk9;}ejjg}!==6AYN@F`Q8pvf<ILD*@d8EOx{}v2wjV>N4^9!5iH=kLWgz;L zX~4*KII!eiam=k|oA=w&2ET7y8u(98I86;SR1&xZzJ6)ch|?sX=(At1KdVY?*ep5C zsJP6mx=c51Zv&DZTOX!zly;1u1c(cn;H3C3m$MT?V^)(SJ%AQ4tAQhvt_(mQzs!%e z$7$8ze*?_^)fq0@r!tDm*;lJNn1_5be`OhGL(N_eFOt2>RpPTd3oIk3>yTUHeeONV zH}KZMIDNW#XSw9g$e8iIym@$k?VW9{xS3km9mg=tTt(SfP2_T2^gLH4Hco2F>hy>_ zs^|H98}Hc<;x}l_u`Q^v4bW6E6kSK2$(m<`QX4Zykix&@E#D`!h^q)Fk-M!xVGz}m zxhjZv-jfj~R}pL#P()^qZqNes%V&vEoLz@i(b|eD(!wOqz<*o%iaZ0gy?BE*1M`g$ z5``8Lg(WhsUQ)(EWa*~?`YXy>kh1F!MuC0DgN0~DjnuRBl@|I|;0^qGqZz^`wN>gY z$;J4$yzikcOjpynyM+4bAS?L;TgALXxgpX#NlJ*K)u$AunI?#8-?O|6Ti%IHxC7eo zK9-G|w7c1CIX;69CeNCE5nI2ciIB5_mNrlXW?&U|a74x}v!VRRPoqgU0*WElM=}4M zh>|$JW@mW1<iFvM>wR_SsJH6lC!dz$S|KQ-s`zNJJm4gBo&uPvlw*tBn!(W-S6v{P zm82ctp^slI1k_kTQQX#CrLJaSEB!hmc61*&(1FU;ZlXn`OmAC$88Fd>#`r3=auX5o zJ+y!j><Jre+J3BrOLy_=XzA<0EDVP=I=;a-&t7_|C2rMOHqCx6;jQACA2k!iyK%~P z;woC!TJui_JB_1_!S?GMn)&0j&?!&*kS7pwB5VjDDf1%i&}YJYBS2HQ<ghG$K~UR` zCCHrtlKK%KWB>c*j7K5C_#49c=X;Sc~P-W8LDdo=3hV8IvcsF9B%ZM+-Gc_Yr_N z6a63><49<y3oalg?2kchPr&WZ0<VsPq0xm$R)|%=P>K`oBc?3xq%7J<n9K3&BtXRG z&4b!Wnr_pIEeOwkUX31bl^k^T4wbkshNc(JjJEm<Cew#QikO?>YMFM@AK<F~s6H1Z zG7#t~mFg+A4HCYD@FL84ZwTOuEg)=-fGNcGQv>eJ*&nO;-Isefk$XB&zB5p~1DU8j zpQ^r*s*KcIxHDCLFj<X;r-w~$j8%M=s*k8~g1LpAu<>1)S>YEmNbkvRX6K~vwD6(z z(ULa9{iLDtEdg>P-nUvE*}FKu4Sr@bh^7*^#a>#0{1DK7kbU()%`mc7rByD7{p5zp z1t+OaxBJ6@Byazl--8sorsCV@zXgNk(Y|}Nn0Slz9VVn4kjaM26CCn{q-3Jj|4698 zPqfR4u`g7-)?0?!XM#kc5B<g*j!G9E+XQgnlQK@DKL=P89H#u3+Xk4wocnyOSHs)> zaj5}tTEf-a)Ol#f*s5A{%u2KL*vRq(1lGbsmAnH#ga;}_dP-fxrjIL1h@J(p$k|Ag z6qWHUjZOJUQ0$GVu#`RB145Ddel1RpO>V69&#!It5A}oK<n~b<QbWoE7Upx1Segqn zF}Q>2bC`O*!n{oBy=cW1@t#K2qJDsFj#`c?5=Qz{C?8#Xd82sDf|IZuLUu4?C#fZC zM+#S!6K4;_ARz@zy8&(FOTgDn6|IgCa}-k<N(WOGME$DZFsb}ViOd)gAyM7!>8$7Y zu{k(Ol@y}&f~%+WNo950>9eB>$HccC!y6PMn-rs(6(gA%qnR2b2z;-7M*)g$R{V4q z`lG@$fs@t2UvNUd@H}q+dt9RCTqCEwLnq7=Y+uUam;DSelngP}1rQShP2`Gv`_t-Z z>W#>0tAsfll7q#da|F7vZ#CubJ=PvdeNjrYssJj#*ta?}JJ~lmKD9nP$kRi}2#bMn zj*Engo{XN6@+~a`H7O1;7Mcmk%*BlO4NVJHN(3%M{3bH$F)HK%GScQNtnJ->{rw%i zJ@4^4o2l9g8OFNCf_;b1ais;WF_IdhHoMAlH)Ds5^?7f-ItR^g(q+(5gC;E$;kRaD z)LPbRjFMfY!tXPAQ5G#aiIVx-lK7WoRv7TyxQw{iUbbfnZmWss?a|UB&#$PqZ028> z6E6cMr))}*X2oS~%g0OD*V~7F6e}w&J3fAmxWC_OS(u5Jm$}4^Gqpk^uD<9aH4qsn zgVgl*(G2`K@|2N>RqYj0V3`+ZpGHo=-_NDKRjn~u8KXHUKEz4TRDwk?=F<X=17?ga z38|V0#kwG!$r7N|UvWyH8!}TTsM6NT@$}RZ9PlDA_@zJAH1(S8nJkb<^&s(iF&U7~ z6|lO598<kaO|4vQy-a<*Ol^%kU7aFLZ45JQThk<7Q!7zPBPl@}IXw~Poq!d8kc51S zka3O(58W-Mm7=d_No#OGO>SCgZAE;Y2{`qa+zR^!LoHM71Hwr5rVD!9DuEe-0G^R3 zxe-;#cj&b0ll6j!!K3-|XR=m<7efa=7xh^ttjzUAC&N?FtZq}pSg^yy>AG?RdXe%I zH!F*c!7VTMM?=R;kDqq=weo#nhQ{PtR9}cySH9%t%(4kOBi54|F2h<@YiM3-cy93- zn2B3$AXJBO04JjYCm)(>d0w3w)J+k%L2|hVL<Sd||IBd~K*qz&OpRC4fWAv&VP4j{ zFgZUv>vFQ(9*QhS+FLFz7BVWb{*}>cdwUqUx{6iLA}eh*6|tFtk(Gs+mV(}4oB#N7 zALZ+Nn5Fe(p{g&>D`{zDDk$fLZ^6t;hk}6-4hiRcb=_=tOIznG!%*8+VoYO<p@$@w zrNlMI+`--EfIE_{(TN+oPF;fLA~ur1_e*Vv8KP$S&Q!s!(m)ZOAl@;#M$D%KTQA_n z_n@RBD`M^f$?GF)V%^6SUoS>!;petxNLol^O{e@uyUJp{3{MMvi%qwu#W;CEn8HL@ zgmiX*{MROst9VfrXw1NJJo<Z&@VoagM^9)&%iR-$4C&C-8PZk}%yvu;eC!zREZx{` z6+m(sz?^;YQ_M4fRuM!sTvR0RroMgs-mQH+%<XIR$|>a`6l0IVg7?v34$V)RV=Nr2 zvony7T!;gur5|X>!p8eOkbV&%wotH*_cu)TPP2^7NwdvKbb@MNsD{gtn%Uj$_s7rq zN7hwS7OR0tb7O>4(3rPope1e#KGVMBMoB{CdqY)773Z4hDI4}DV{&q<I&P-ZqTEN9 z!yOS!Dwt~A=YxrooRi!hZ>(J&fH)tRDfIEw>~%!QXrqO?E{1n9TAW0#zxcXCiMBKb zDn12+C>ayDoS-y@E}30!3SV`)dWF7Yi2N;%n|^qXVu6X46pxshn2;jB;foKbA$VvP zHxpEbw~(_HoTl+`l=85w<3;(W*9qB-XH`@*zjE2-@fYU=d0DVS5_+m}9%e+uZ@R&E z5Yzd_mK4T1g*s6J0}4$V>RR4czc)9_0W)M0o1(%ngFI*q`0%aldpQ-qua;bF*z~do zqb4q1Cpk*kM_E&wq()7gL5Z4r)ZE^I7Ie$XF5eOq#oAm)49_3lzv4m|St&kJ|GZlU zurES)8>{LXQ^rES<QUn!JbS40G~8nLIgU~*laHuMYvx)XeEec|{P;IjILEZ`COx6t zHg3VC*hc2qd#Tw){8UAv$mC#>;h{q-oNesN^_AYN*FNT#?%OTCs6xUVCU0*_ALSlG zUsbc;r+!i3hdk6A@SM@k0uk}vHI40PEuO9b>X;4`ol<w`ON56D>TU4R<SxM!9(JUP zFVAdP(4mi5(itZFwb}}~u!AJpE5-JwtJS=M*ny*g$42C8BvR4`H4$eKP=qAEI8PP0 zw05T@mR7q17}ISB23O(+cUhJwp^q9c)LvJGCr)63e}$sLS>y9r{W?1}Swk2Oa-G$d zD1OSmCEE{&kX7Umbb}Bu*r$8HpSP8SXBGsfntN$=<`7kqQxjDa(o`Oicgx37-_@_K zBf+#a5blz5M?=R!KoG$EkAPR<r6&VSmf7$Y#x~@q*Q9v2G)DVK7~4HP4th5_lcTjd z&M`G*>!Nvx^rXo3fLF?07t7t2L{|@GM_jYy?-w5_La92zcWPpdt%egvkup9|4Q&nm zv$UI=Kf#xrowGWvzN>*<w!w2|7@86N2sR1<aFZHd*vN`-d4cDJm;Qk~YTkDpOhNc> z7%b3W0R^$;iS5D40s+D%6-gLzLCQ#OsI#t`)Q;9v(;Dq5;K!`o2s=ygchS<*l+)4^ zpQu6Jj!fNq@+DpXxO^A$Ry4;Fm_#@YA}V$d66SaURU`Q2NNKtM)lz~JiCuW93AekY zGd#=^Q8sa*Zfc;<C{M_iyaZ|@8CioubwOr6OL4;vKv2WhjVIp2%F?Q{5$>BwlOA@I z!G++vKOWwL{h{Ia)dQ5`$6X=O2hPBoY|5TxM_F5km1wqH{X!}XmZ1$~uDL1Bhf2a| z9c*kLZec>iB_=2;EUYk6GmmjQca9OohpYJoNT8Y^D!N2%TE#u$<u&51)!c1wXCl+< z=fzh#Ct0~kKF@4?t{L=OAj17ULFds&x_@nHul|Bo;vuGB!J{6f`q8(vK0Pq2I=G@S z&RtXD>Tz%pKN&oY%ju3JKO>62=aDwO-650{W?UKP-bj#llMx4x&S{t|5n?vpemMCq zMtsO39rALPe6pGt9PQLSI_L3ncXIkW(=)L8M7;sFGFSc@rCDSd%EU-Xh@PxfcwZAo zyS4f<ak3IkqI%a}2opc&T<_2iklr^!VjL7I5CLUZ9>~JY)pqaW;eM1Xx(68*k)OcL zRFB&eYThP2MEqjZ@<Oz7=b5V#L|;DKem>lO?=vFunZ4lsrLwg;+oS0PBtJh=b0aYM zG!7=LU!FcDwl>O(H_0k1%_xbyNJ<x>yqZ&``x6;-i=!40_q;=R@#8r1#JCAIlM4!< zgsTB1<|Py8WG`kaap@grH98*6LMeE+`+<KjgFh{uoguWPfhOb<DD4R_MVpynC~Hw| zg^lm^+c!se%A#qFqI8rZ%b>}fcXW8>@5zOW(==-0U}LSr<I_2s*#h#PKMcHOXbYg` z$8bM+*l_f{@HWjljx@k^-$gBy`Hn9W|Gdima9TTx!p5vox_Sm^sj-7Y(D}jdSju?N zLv|$wH+My5hQ$V&i---6yzHKH0cKKn$4mAfA$Q~FQ}T^gifuN2897>j&X0MFX%YvH zk5_S$M2oF;Jlf|w-zv$en=ENCz=eK7Z3t$0pkXWCrO-_*ywDUUJae1<(a>dG)MTu- z%%R*v8Yi*o&p)|30=0vR4T$`}Z#LOn?uRDRQv!B&WE|{lz0NO)bxsbGe~{`6j^zC$ z(6Q+y9l$Tz<S7Tly&TnECGIZY^wcF>+-cziEuk|vSEr3XUQo6`J8mI3-<CBmZ_iTT zKho&a{H35l<V7|0)nrWt%!-6v8o20t7FHGe7XY4&vHN2!ve5kuK5G!g+1~Uu8<o*0 z#$NuX!f0DUKVh^La$rYsgr=R|fsKGycA@poIYc`N?Ym}5wu&d9JI$w-F>&K|P3u_! zdXxSE4097u2V9Jq-vCi(V#6;PUIS<>;kdC@Q4ID~H@7E&K|l9uGgA;dVl=dxxsR{U zXOB%eMf*z7BB}`Vqp{GK<ZDEDacW=c-TCd@Zn1)cimdeLo^L-Bo_kXX`Czym7PE45 z)0)Ox6PadH*}UkWsC{#jthCe)mB1fxOcj^YAkc!1tOfTSAu&O-QJD2*??6kF9mxZQ zdG`zwnw=-ZD7tY{Y*nY3-mf>qk+MEsdT1;zb*w^WT^tJ!Uea={5fcgIrb-1S>w|Xl z_Q3XHP9lTJ68+aUUHxA0iwiUhVZUd-dGx{plH)hsJ#4nH`HMl}5JGM=6Ns;$jW;g= z6b4oYBYKYFVbaUkil2_|0`@gSslHcSeh*GE>CNs>Yh$CYeO|ZYINs-RPzT5nD<*%l zgZ4v`>p5C+c{yHR!D1!ow_^`eA3Afl1}|XAUWQ>GES{#kB)*ixKUabEn>I#=-_)9< zB55^R;3yrOV0Z;B?RHspeJBF%q^z>FIk<_Gv?;zL?*erCAT(sf+aE370E@?KvRGiN z5L;n73azPbuFP&?L3t53*R|>E0C{3S+S8d_u<{;!5RsVF*qQ6w*-nizM5RG6^``J{ z4iXd)<eAU{`jPkMWTr=uAwTx<Z)}Nf;SbHc>bLVFXJ+;dJdK@ozk)Ui_J)$-<i-N@ zKH8L+-5#{M$RaVA_+29jopKEk=CZh}hYK%gj@0lEWPThd^cA2&#&*g6bamPbNVT>V zaNC1Y_Flc2FV1!+W`5jTnrNfL?}wC~M}E&yVnzW-8%&QyU0-0CHz6huOy*6G;2}?D zV8@K~v=sD-J8~@t!MPu9m9_P73befE>$e?#{IC!M%{aYbuJuyok{wju6RTEqiufwM z#d&=7nM|#7Zhd|_;3DrVZEMLd#%n7gN3H`eTbb2Z83mB_*6@_jm3FZT&hz|I7{LJC z?VH2f&>&x0;0YY#EwviWPqISZ!A#k$J;sSCREN~|k%v}nb-r{*rRPI)nbxj@GhT#| zhqm&CK#1xrL*CiaWv5E|#gJ}qWL|D6%xFrg%L~s3p%yM;qh|t_H7X3pmCDr3Rd?^h zNqyQUEem37A12+b%`gb<>rkSmbUDTaPF9v$M&_H{iPP_N+6NX)mx@;po6V`K1%uZ_ zio&mvrGiAdqbIM#!t<!6C~_CcOS{;bj<W<|cYa^-yM0Sk!wUGX!PYrT7Q#fd`XPf? zgz9s|P|8!{+Xp|u#ydCAbhgiCZO!)@6)mGRj3V(Qw}gN@Ma)cxuQHl;o;hL#LDE7f zq1lX4kgvLWe)39E33xz!<xIMRWX6hidmH<l@YF9eUQS}U+=7l8^}TwaY$wmJ$Pe|l zRT!1AyEhRWeoCxOEe)LB-^;eX`ntX~p~a!&dpK0<h%h10`0(LdH+0yrD|ftiVG^Hj zZC<T=Zn__Tj;yJ_n><{M=Abl?A+e${K`-COjGKbSEM2U0hsYFe8)Es5HcTJ@Ih*z& znY?=IV}-LJO!W|+z#2zkssg8EXP)#dMw;jxIceU_{?=A(u?D}+M`KEHery&n&_uvg zUy%!yn8$`(>xHGZ3r~A=bVd3(b{^T-u3nll02fXW?ngo2Nmy#e*l1SJX0GRQI9|B^ zOmmM8XZD6(?fs6;#M63qx;S=-D&r2R#o3o<(eWPf8(V=2ffhU!9J#EupvL?u3z4y? zRR?||T+J|4)fgtN$Y)J_?MGVRQjHn-p<;$a(<jV<Sye7I4;3?PRYSA|C}5>{DZ~gz zve`59+!)?O2g5QBCxunEZDb{3k|WZ*qr(F{r1&UlStZ1Fu}l*9mPd*{eNdtNL_U2m zAQ#vbin%khvXwOf|6HjnQwS(56T-=CA>I><9p+G*kdd)6Y7y!Y$wTqX^z=?zfXzUT zkF_%JYx;M8W!cRxgg`E}b5GcF?0w1Duqd)LD^s~7x(4VA<L9r>cLakK_5oB30+sZx zclsuYo(c%6^l+xX%eb-DAEiuQDH0Mn+Eexo#%I)Ak0{Bln536stU$1)OG407G^}y8 zq$@h6@(_CpjjYcPQ8QxVlpUe;O^3YQ>96qg^;d#GfM8`Hf{JGx-E4K;Ol^%^pEo!C zvwF|)o&+RrB5ta(N-Z%Q_fi}8DwU0MdVi1Y-hJ!o2EtuE`h<ry)%Dl0!)%w8M~=IJ z@!yR{?A=ki&{{)XiVNDLXNJ0YfO>Q59neeK&qK{iO6@~E@iQ{*J2Ztg1OCn4J65F{ zfyvtjcTLjqK0aQ*e2FE?QlaiQ+!XtjKbudhlQh!@Rdk#&4u;v+G!n@io_C*UQXf4L zdm=-Vt3SVPoPq-!!n-^M$M~>kzd3^@_v~ge@t}1vF`a_q%-NdvYt)!7Dl=<;K41X= zh7xOxA`d{9mW{QwrKOg=z3mI`zycKG+wdrHr1$qW9FYa@_F&qEMP~;T1?{l$DVPvR z2RKM3dtQ??)$mid&@`dYbRk+&_dg&?dIZi+qmu@E-yzG{ngP-5mC~X}P+H-xWcv!` z__;>U+8Nop4_!svX_O+*AuxCwS0oLRm{6a_-`98ES#?b%->pZFLGm3-SU^U^LdnWp zd#if+{pfHgilq{*7%VM2Gv!m_)mURT24u&+eB9og0_My-Jf*FcySuBap|SPW<MQw( zEUFY2IQ+{27}jOOIGg0WtE9Z8^3k^ZjD?jYN)v5;1E_ksnnuP`9p%B3=)$xzMptBr zY&0NZOkShTKh!lk1Ox)B`0tV#8VV7Vu8f?|_x!DmZ=pmMkW*)w%hBN5&k;zV67eZa zej){k9YL|MX}x@EA3UCKE?+jg%PhED!oGOf6rP-tbC$6gi>Ms<jMSGt*_68bmt&}V zhDcZy$vG%9+5%zWrY1qzVuL!o`W`9I>v-*PKG5A26F-=0Yk`YGOc^UHA1x~ZYf6BP z&^fxgn;W`gk#lafxI14i&X2uFfl;vlNht7;vNp3+o3{PFark`Ny;%Ye@`&+VAr5*g z-O_WhS#_TncCXe3gf!#ifX>J1K8<qvI%kCimLmmQL&e9!SZ59@9RbhYs4d2m4q5DI zUCL@*%Ek_~P*^`fjsO|V9n2W_zRm6ekEbVjex6gB1)mY;C`&dWtzDsZOxyV%dQfKt zb6E%O(&9AJHYQ*q$13aY?hqt}sQG(J6`^VQ_ir;4q|Y0VwB<Rc6u0K)Ij1ltq7XI8 zO-(W=tlf>BZszXi50)io&DNe(&Q?5bP0f!trXmh-U$fzIb7G^ErOeJ<v~CsjKYvY( zosKPmTR=;43q91vc-P$G9I-DyB`dQ-R4Xwy7N8~T8WF3wy5uXNou<u<v(1sU#A`Kh z^q2SN?SeGep9$LWE=J<D9t)5j@DW@(N}*nF?@U4YdV96LU#)%!&_iklM&=)AjxojD z@1y-zJl6w$y0ATlY}7D=L#?XXM5MzYI<U#>79GS*n!MgCJqA@<=W)MpOq}lPYW3y& zPIV|F8?Xg91&1@-(qy~yj_nzB+h+Ug&Bpj@q9PF$D1JG8dz!L{m$vTN<V{p6;>vO5 z<N?k8^tm~=n*pyj9&5-uV(Eu=_Jntr(&E~>x`H~q(KHQl?<~*C#G>Z(nwrRBguq;= z#@ICkFd;oY63)Ac)CMG6rVU&stx{wTgI-fHk>&@!(Gixc1K)ObR@U1BcY|N+DKvrg zcQqo5v^=a8KeaQGy@eSTuG$aMW?P9FJe9=>919NzBe;|mEEemJ_)!Gp^V{Q}j<$&B z1~U!bh)%Fq<;^nmUD8iWo$gO(L!IuAe;Bwc;<ro~KWSp4oo`zyTW<9wmPPsord}XT zt$|@+K<*LK!tp%5%SYI&nLE$hUlVE}$bL?<N7dawFgQ8LGBn}EHxMDzb5rR7%QDBu z0YVejbS21gBU;WmGW>3&+K=E6ruc^Wi})iF^p8}-`v)o2Vah{j1*iTLAKikwVIrnz zOnNAe;Fvr>*KvBV(tXFx&wWd-PCq1h81qx`StMAmt%{9nkQ?rM!JS^aiQEIvY^R1A z@9>uge~94<yr*>M3lpqY1e=o3^Z*j2XuoY`$LT1ls_Lyxt+f>&@7%r`JM7=l2p)I( zN85N$o%?>f+W`gwGAW7e&484NElG>cdCWYJsTthW5OUL1-^cPW1^`3r1vo&_0(Mzj zFlpJoKFo~|0sTCH^e<G(kJQX?RX!tU?nmJ%#V4S78JwLR<BO@2v4yXtiu@L7+?a<> zSv#_&fkej|U*W>TzouVTC!2bA9Zl?h8F@WYakO;wdRJzsm*!wzz_sG>aCn_Xyhmkp zzuMl3Cz;;taIgu--<2thv$K)5B0I^bKF#XpZtwCsS~`6oR=ZlJ^^1<OM6&^F5;kU) zwWlGWZgk#PQtD?7i3qd5`^xCZXOfmiN6O(%qvMvQoE)PZEIOF1$`vEKn9aMG%nK%3 zditqtw`uHp_vGL&&HBb5*ai7sY_KNGoCp018o9UFakkibR#Kpm!55Cp>AlX;l^ZI8 zHs92E%O_N5tlZobJt6VAG%>N9tatQq@IBM_jq)-(6LzgOfAU$mEXwzJDt}n!^8B}l z{3Wi7*U`1Ug6AFj;>Yz~GwxQi16<juD}ZuAo4f|1Jt#X+0meo%R5n#Q8^N~R@v=kf z9wCelvDGXb6~Ib|LF)lAW(gZq35JQ&4LEA?0hjbO7Ge##oOXk;%x$qU?;M%yJh|K4 zSbMe-%!~A5&SyUAH^muOzP)UDYR74%d&-=M$*Z<H{;~wmTqmHh(d>9zY|W1P=POqS z-D;nt^jiOTK(=Hp!^xiGW98hlW^<OqD0=2>-|p59fK*zW%g;@^27U=Qne|6mVgF8d z?@N2nkW%R^k1F@}=&aw}?ZH*;k4{dP+?&4Bfb7WE*YnX}hLW&+FF*Q<E<`5>laIzY zj;bW8a=+|TMQrEGu``H^r(km@y=Qm27A)}7t^*eQyOdJnH{;P#kFN3zOsq;S_xVYW zpYSB`OTnbYg{x=FU+2nR6(l(qC)wvG+2SWZKEeAxeVQXo8)m-!ULEBt<H6UwWo)+1 zJ6SVZzj9NXtVwaLW3S`wZ;rjm%dc}>@He&wH*R9mm?yfo!1j3i*?!8-K74rC)g|Zp ztI%~%yVykLrVHR?Tc5$6v?nKc*DlXF!M|=Dz0J;($N<~9c)ab8PR*0FPiLKTy0`he zeZPG&=>8f6@Ew{Bzq>!K0PCOR)fS=pM46Tw`W{wqrk<N}3kN09wVS&3uatGSXjh{3 z^}Vn!%_?ja+Pq523Q_!4M;atz+|1z%ax4Agd{0d8dp^q%pF7oPzot2nt163Opox(| z*pUr01l1ZSoeUh34U#<eKX1`c`QH*H%g&m=9v9VWhZcDH7FowvXyTLxNV**)zDjG8 zD8q)Gx?`=d`Hd8#BtLwG_eN>^AP)A2QllfoTtHb=g!}<d{>FXPJlo^F<kVor6A-p! zpMxpLTEy)Qj*`NoLid)x?Og}ax!sEs^v0EN<a+1wI9R`NBrR#l59!*_vJi{^?CIm_ z`Puhl?MP<<=iod!nYQ$G@1uc+A4u)AIIYe7=P8ms%$q)vY}MwV<nTm>_WI7}Y{UnA zCMmS5Gr1`VbdKg+))&o_27VU=L5~o=@{~4hua7(Hp_TDu8YAPPwA1qR)3Tk>c@p=} zu}s%n>f#S@pe@b@r8#CQqokZ&)$;SLiL!mFg7fx=?bW9nPkw{0gsjNUX7~^fSA7O@ z9yVteZuBH^w!M`@!(3nO7tTPS^Bp=>=ZNgpWM{^6ZO)1K=`uK4kU8m@nEt(ZG#Z=5 z&ZBAPY_6@#FQlu&so7i!Ol}<@{)5vz-lSzFxjXaR6gno@GIDct@E2AlPTvhLXU%wm zvr7Ec+PGPQ&7q*E*FjbuVZSOeHyb4>1r!^!1PZ9Q6Wm*#Cw`THbm0G(XH6BMNKjZy z6aeQXKa-+95+%MA%*V_@SkOsWFiz$M+F9&|IDNd>Y^r7CnI4yvloS*a5|iQ;;^mc+ zk^+dz_YRI$l~$=~%U>T|F0Q<x4y|Z()nX*gJNB}}XhfE|Gv$^Px+^?ioINULvKUT$ zaj5+Li=P0{L#Y1x;w9{5_lfgGJ{$(&$5d6-geTaWv3ULa_6P4aozwRQ%zi5HZGRme z&->HSab;=kj)1Udnlimj&*#(Sq-mb`ZtK@sZttVTXlr9sWdnswNY;e{%KUKZY0Xph z-<X=JE5FoM*MJ&_Su;wjjSsPn+0LIhPo;dm9uL;ruDXtls6b=`Aqgu}Au-^C43L#$ zE9E0#!qL&A0^2F!Lo4Y*GwX^m>wbij%Z;(umATnoi+Ve+9T^)N6j0aI(o>L)w6(Gp z{TKph&i90mhu>OG-lvv|ib%8YbA8vcqNbLL>ibo}bIYf2e-LUGqE%h_<$)as3_3&% z!Ik?Qfn4;pDwb%*)j+N@BR4Y=f}BKKZOg?`4_lf1Mnst*FiY8|ZKAEK$p=nN*?MCw zT16K6(}qW8qxI7Ot@2zu|Kr=&=W^v~p5<j4EpPQ5!HM<~oa^1eba>p&kGqq};Ak1` z_oto3S6uC`*X!qYXMp4Kyt`Yy2tNYo8$<yH0=jjiNY1yD78F)HWwyFmS?hxy9w<^k ziHp*4jdzdl#>OOTgv`t5%PHPjhNv|!CdjLyB}E@0qnmDio~o{sVWh#dqQRK2%F4y8 zs-&o-V5X&`>y)L&W;F(q!J|QNpuzvX-9mH8!Bof$4b81Ay$Cs>#`R*PpW*U+aS+Mb z%C5KOkk%?l&>j@1CR60^D+(<^E>!;B+7elz`jpwJ?9b4Aud(M)@f)Fc2dWiplB+XC zy?4*KUZov`*s8O&j=sAMx(fVYN0!0h0p$aj_#n#mQipGb_jumFFDmxh8q)ju<-EMP zTge3sXGid^-5M}o2%yrcq^Syct0~H=scOlo0yj2vRJ2r7lzg7{^pi~YoOgVlyie@X zcNegMHerTDye(nib?ZAAdU<+kpW9BRy%yCcCnXjb78n{7@k@$ILWM`&+&nf5<4K(V zp2*ac%-C3nkbw1wQ<Ol=_+6nQ9hP3tg-_(kyO9W(Q>m`^dAcah&aT#V%h_{F$Fu8e z!)^Z{VdeCaUE|IlPtkg>-{Z0Tlq7FZ*`mlSP(8P#S{x|h`OVBpyOY8)U9Rqm(Vq87 zX!uA$;T~5+HtG{Kb2x<4$gFqw<%Na!tScB|8&}T<g2#Vq=neWUJULkL9q&B&?9BPn z`Qaf29U~+pv`5HGk&J9AgN=}c;{CK=dS>u~)2-olceQil`vRuD5|Nc;_S0DG>E>Wl z?m;?*`K>tx!n9rlS5`JurklwdI3Xbj6x8)jHHZ5{L`+G^*E#L^iMfHr<%P{~5Qtlx zD!5|E(M5?E8ZEw=>qm7GYFAmmc>nWeFEMe=$P7OZThkA3_eZgGA@cSke4oIw!?gTu zwZfg{fx=U9eADF_j?U*}R}-cBv{Wh=*}IG#rG<>0EiVw@g*kW|rt$HT(m3-HfyuEY z)S<<<w8H1SD;-muT+=Oxj}u^c`xLVY!lH|Azfx=Ow<j0p<e;Fnt>LrP_|C?}!uOYl zNpp;oaDdv-<Ksiq<yQM6I@7=}@482T&_c?-9$*BXo(`sX<)tiZu#F;o*;x5#iF;b2 zr`f{HHrLt4Cq2Brx%~a%;jmh}x$W)Y9LrUej9LrFoQ8*sLfz7w1cOSPS*fTqQ+Qqd zOs1K6i5A7nw=!`RgMi)$f3Y+jiO2ILUDQ+i(eR63y~_J@akkgkwA7=W^Ty_oUGm}4 zbe12hN=oY6Q}=UxcDGx)qiMnWkkRsf<=vVrT{yC$fq$7a;WL;perHZ)r!tH6Wvc-4 zvD!KyTQ%`Pex7nosd%&XbXB##dFIxd+R7$)9Q=l>-Ok|)=Fq$zd4u-Knp&5m`@z)4 z;N(}Cr*)3@Xzk9{&wa85fQalb_xB5K^312zxtu;ln^jzHzhckN`C?q2_qXex?<Ysu zTb@sl7X3&vycH;z9c~a*+~-{G&=Vwsg18^gWn*D)9v$!e{w&Fd)*IeHTTxx8S63Uu z&YsIcY=aTg8Otp2r6tE&J}$-9dWKGhroes|>&-_jEX7I5aIe@dh$%-0JKzrR-kCvV z^e_<Zv7Pl*iIr`s_pSg11OfsDXb?@A&pZlFd;)?W_{Dk%GRGI^gTKh`tQBDI9aB=# zYr7214T1G3fboGxzASAK(NIiIO*OUDU0n!MdRsV~T!l5am#5Y34#Mo(?B?_#Njfg# zd1sGa&jJ$H>zo*7d*NUnms3(v(o9v8l9iBCNJvXMUx(hnh(}0_&_9YGG?U5Hi60=x z5f$qi80aA)p~x;T%+uO8+T22iNKRB&buu$)Drl5cRSiu$<4jeAl+@(r@_Nd1$#lX4 z0c|-Jl5i1HF>|AuNoYt|Xh?YQX?W;)=;`tC@?P`}fs=A}<dxvTw|`ZSi;caxy;a@# z<?-<`EPPzR4)kf_++6!1$E(HMc8Gj<x%~SyZ0>$iQm3o2HMay0&CHe;=Mbvw@F*b} z*~+4#`|}COAt{r@y(L9iS=Ns*BQ@Rg;50FkqZyZ**YHxde-a=^ku*CxdP`siS^-Ti zHOe+ApZt<cp<Yd1PFh|<>gv+gTb-q$ma*}RlXWOJ^%8Jf`o=gjzN@*p5q;;9l!u{W z5Jyx5`G#|H2}$<3bvgZZ^qKLJ%`mX#S@q+SRZ`$5uXmI-U3<+77W0FXvlxv1q+|p@ z_Ipqu@Y%)n$AF+=0d*P%Rub|dx7Qs4g~e4VpmDOEY%SE~b+nz@(WcK3Q?Y6BN(reT zOi@Me3)-Ryw+F*}P47B5IVttvoT=a4IcWUKfDXHvDP32u`87&~&-{&GYd5a1{jvLn zBe7N0bYdc+#*hi6a2seVV&TVAtqrzL_In2x%7LM(YKp08sV4UCuGQ^!XKM`fW7pQ! z9F|cL@xGC$s<<oh)YRkBX*xRFoB6ppwREgxG(+t&(lIG%=DFqE!or>1ozTgt>FTn> z!^47N9%C~DnUr*Z^N7ic%c&}p5ELY%#%FGC3Szs@c~eqKDljOpqnFd}`l7glEh;J| zBQH-zKFaUY-#-bZuR-QE4e}n7N5EI}9k<}993Iy>o5%h7VPpU92MS7JOG-!vtdTMi z9g0&{TwGuOB#KT@B`?ToH5TcJXnh?$p$0Zx8}OFn=>q)8D3l8R+ea{NiD?NN>q_+& zIv^;z`IwrDXSpmoG9n_PnVFH(jzO@sxhrI?7N@p4LC{p*XhzP|GBrK|q6U)!#-50o z*&bAHrd3W7E+P&H0M$v((JK@b=Ra@1>?Xk{GSo2uEk)}h6%-XDBcvj7y#9i&;{wR* zjbVEAJYb;k8||Kpj~*Tq#Pd59YkeQzm!u-%P+M6x46+_7&Y@v}6_vDbNSL|VS)f|= z^s_AwM<cUubab^1`(Ssqe2S_Lf%;%r$lE;dti2F`y99k(Hx_mqx0kP&h)h~a>e2Db zo8lITIb){3n#cVGG%BK`B*ZrsXFE1A$)iQm*qs5OocS~7@(x%&_|AsI@f2C+<!zNg zL;K};mS30y>!~6%wzqeAtHTu}EEyfk@riIP?ez0K+MAYcPC7E$<92D`i4=t>uqFf& zi2;d%Vm?!SeTxz35_))Ybr5O+XYj339XtrUf>I*5`=IyTy+#?38LIPSnInh{u4ySi zAYh|Yb9$@G{rTd3Bogu1N*ty>Sp6pZsFs$clg%4R83Y20LLP~5#thYn*amrq-&&s` zrhgecN1I=NJ2yZ7ekZ<|hR36#iX6fj@Z40D{l0~c>iUpX!hf&-rM9NUMx+UIuCM;6 z-QoGLPX~)+Lmd$Td&tbJ<Iv8{4W22h&R^M725a7>=K^`}IQ781ZQMXr0XAR@?v@29 z{N<Y%H&Oa_LRmD=J7~3!55F*w0+1bN$Jd8Ppq}lpbS&!c9^cs^Gu#k5`*C9t(4MP5 zKdcdg`>lQR08)YMZ1@jPJ^JY4A)%aaOD}8CCM_8quzed~1a1}SnwnYx{`!|X^E@-R z7w;Sk$MMMU3O`w$GORDntceMT>6sTk9lf=>!O#7&y<6xa68HBDKs;A2X+Zn8;CbG9 zMgWVN^(=vkZ?NB6m#U&>Q^{sr`3XUol@9wSot$xM>uX!sSNGP8kH^1#(?by9_I`aB zibRY#9t>>lpWJRRGb>qK)b;}xU%~vDtgI}Lfq@I$3mA9O+Z5EN`U#L+X>l`euWv`n z@(1ejcwVHe-sm(gTe}db5YrRF-?j&keW9d_{<9D6tR7ui)9bsjo5osMbo#j6{A+Jb zbNTNXwnUnq+_H4k*c0vTtQq*kM@jV|Q<iRzz=NC-{zp@@lM<1*uA927KJO&>_!u2? z?mY|wzT7}D!R!9&2*|9_=H}#KHx5T8&e_<HS<lfp+&<UrxawnwgPd%sX>DQ_9*ihV zigtXkN_;vzUnB3-*VB9Fwz{3s+MuY50~!l{3}kS7lCw~}iyZDC^YrP0@lZskn+sD+ zsh?O`R`~mHw6i<IKk&Dh>6An#x-NjxcFc?5&Eyp3gOx%k^KfzoguC*7+-`H_=@FQo zm?WXq7Bpr2w#08EBc%p~@Kdz!@~f#ug{$z+(caodvC+^;vS}0mH~LmbdS&`bq!^u` zk!6Xi#XlaHTT=rV!rkw=LWtNH1|+FblEq-7_5!Asa>8yE8^(ncK^JKBKpM;LdZrth z#(J>Pw>)?%))I-qmG~{KeUssmGOS(o@Q8qfR9H;Rd*_hxKp}ohJLVIyJ>yDmX&IUE zi?V@LNB84=odp*5C`Z!rooL0G1x<Km?bdkfBXtAf@uKev6a*KPESSZPR|8q$8pTZp z<ZvfgKk_g~MLngCmLUZLYx{+5J0T?nol+vQwhQJ^NsG6tqshs_YpMr`<QA+ol{P2) z=)h{w$rTP9bEYSt;A!wa(YIXLhMuMeBIWtTwSQVwRgLvm&uTARg`=L`7ZAeE#=zA@ z->wZ!O78_DOT*IQdMeHz2vX&l@tzVE$alZ0#w)iNvA%bK4<kff6^6Za?4tH$DJ`+# z6}NZTcNw~LmX(EN_;D83<kWY=cSKee4lWEtO7nOQyf|)y5cd0piiWrb3a+@i+R|Ny z+Pqva;uA$<6Xe<09dKu$g`~r=ghz)4>{a9z-9Vjt!zB$qtL6|f6MysP8VK6vsilX{ z4c-T{MfWZr53l8e#ARe)P}@*>zeYz(!NU%^v@EM08tka5Qqr{rrasi}bOQs<4+>Hf zRD524ymW10O+?;@V*!Dj{)R9!GaCjCgX7g)T9xMKmsiyUo!I5|4y_DMNKXlw*E%1t zPb+6_U8#>nuP2?&W*_DIF|o=Z>8QevMgxzA`}rJj*YFX0_5$wKl47i@>xUkl5&#Nr zZo$A<v=h>bm+?}qb#igi)6a<)d3QZ9$T@KN#qZ4)sutm*m)7m!_iDz(LjNSJgCl^0 zfDG_an(ABY>zju<HY+BgKtV#0m6rBszP;T}i=HGR|H2IL6GFv55KKN2QyT(|l2x8p zlwqB^wzTF<_o?=$@z5>IvpK#XLqXAD8(0D3GvnZBOt`nQeNLCw|G<u`+6ja@5xk0R zIt;QFo{;9@odI@-GyVaV&VmYo2l~`}Y2iKVYKFj7(UcSu5)zaIp7*1I1LQ+SNT{s2 z+Whil6St=$%gI(0-zH>n{U@4#esNKeoSG~<6Bk_i*m$2_e8CT*loSkr+_a760>}jA zL}aoO+G7@{FK(3M)uNm-o8(Ukfni}WsnuB}?-t*zCgK`=3L1zOx7Kz&u0Bc&$`9RK zstrvyw>Fg>U>Hy=#AJk{ZJpt^W^;=1D+bAl2s11|{^@j^RY&C8K0m-hGeB3UObo7v zz9+w>n-KJhNQJP9ye5iUK!#sNTo^&$yr!z4sFV`)*)kn1otT(-pUupMFD97PmTOMY z9`8sQWeys;GMn5SJ7T<SMpC@In&t`8@$k@)ow+-Vc-~L`$bg85K!1OLLZ7rig#RNa z&f({Ezbb(e)KLwArKh}*kedd6ZbQTmol6f=VF;p;Er_AawwE+OI{D#aP6Wr6)RYoh z1{fp@2=MFZl?w_mskrP*4_C$2hx18&`Yn~<a926l+XcoINxY%;l2Wo{#U$pO{WOoa zAzGzFRTWhQg+xF^-yPaP8yKA|tRz%cTz&hHxz5O%vOK@U$}hq<_JfaC)U&k^l>!Gj zAz{pnd><K^I_g0M{yP#(eQ~$8rFnT(6~LDOn}&=5<KSrFRFJ{2QeH89g}oP}`2PSe zLD0U-o15zJFfV*O=3CoaDyzyUQCR})%!9sZ>1Y|78Ha?2LX4oVqkXs;bWb*RHYF7$ za6Si}s<~|_5xoqvqFZX+cva70VZ%U4O?5<U6bA>}Eo^BbZB2D@N(yd%eyHNIay$Zp zxS02L2j<TZFpJs+7hI5(0dJ1=SV6cgFE0z4i8U=yQCbK)91#@;QXdx`fy>*7i3+i@ zvM4JnFfuY=<p&yYadYP8=Pa$v|1mmWS_E+}bYvKXA?`R>!9x-f5@=|s0`s}q>9EgO zv%XRnCD_^7jEwYIn6Wbtn-UQbDz7SET$)4aJT`Bmqosb9h?JT~k(`pYv@jcMZkdvb z%FxUdoR}*P<{6op5J<18#s(c6%%e^d1-Xi<vZ$yCR-MN*(AC!|EGcSiti{i~&r!_B z#mDqs>AG^Ii;4=nssQtJ=jg2MtOJ4rs%mR4UX*aKvBJhY8y7d1p{Ws^vgZ1)a)2=j zMdVy+%u0yBoSw3d`Lg!$wA=!AcGm5!4P3`iR8mq#Nm<j_Oh^KEuF}o<yVC?`nM5po zv(KOBSCp5*l?Kt&R0GF_RfSrc>VbK850@JgBT*4yxMEm9nv;_qbOv<(^{bbM8-NSt z;^IutNWD2bb>!rqqnIbv(bXy~F9E9nVh&hxa&fS+fuyiv9$DF0d3m|b%uT@CBqzo} zzrgCpfO+^Jcv0{$Seb_i1@<{RFMDG01}^4xwbTiS$<GOD6O&V?CSqwIBX#i{{W)6~ zXIeU17e_lB%rm-pdQ{Zc!|#Ie9T(Ety4q4w7x6K#r(IB7*wAq42%Yyw($e8!ULZa( zcJ#*3%Gxpwjuaw9fAkFW-oD<&WySMrE1;Qn)|Oaxo{yi;#=(w+l%&1oTVGvW*G<f( zLUzH%A*R0M@?25tl#;UQ`syN9=Ba3C6x7rWEUm3PkP@=8c$okG4B;6DVN>sPZXRAW z6-8Xk8yM)qPr&MJU`T=;c5t-q>+g;T3&F)aAkEFqWo=`Dhj{=jIW=i&dhD3Y<MB4y zI+~#2^UHIM&GlYBUPdN{I(pima-XZ7w!WdBql;r@O=U__JR<`=)?EVTVd!yiuwli; zJah_9j@0y&k&&yo&c2S88X*Y<ouCdODOE;F92TTSguv4}dANfxVm-nj7r?x;hkHd` zEiliHi+OU8ab1Xsa4`=@3=TqmQ9(l^iuvW=&pbacKRGq_@bM!&%rh|32ZaW<v^76{ z{aQg@_B;=_mzyhC1%#6WR5uq#A1`-0IvOxzAoZ}F7tgU=3Y3*oKT9J-EM#gCR#nh8 zThcah`@s&jph=CIj#gb)&&n0y92h34pn#isC$RHp7=(;GQn`3|)p0Nn-Uh~Z1{}<{ zqM5g{wrKBc3l9s%!#ocUx1GH;KIS<&5|b0gCr6K*@OA|A<YYQJn&p+HvkTKby`4!Z z3BLY5-bfEGO!e`EN2IS;Tzo9_(~Ptv1{};wUKHmB<{fQWa5K-r$)1oDfAwk~*jGHv z)9|UEC8o$sjmH|I0`o3jp17Guc^hZY`MNq3^SHbXIhme;o(%f(I<5v7oo{SBO6UE& zk7ORCX?J%Ab@dtBRS699!C}Geo$b5Np9cl{!O3N0Vu1G4)KmZo7`d|q1mAuC9hgj@ zzUy)e))Or*DoD@BM8~grhFe?SJ~6pwtn~8D`K`x?m{+Btqt!GpuyOZ5ghWUwD&oEl z>vWoc1aw~CErp94C-dO7j%41_%B-cWDJ&!qAM?E2pqUU4;$a^685<uyCi8gwJTPBT zRet}`gU5S2vx_%}N3ISH4Gdi$7#iwFJzgKUK0F9t^k{cCFFOO`SzJ1gVIG&xLjw*D zb_nSD2YP7nFi%QB$*p{bh&(eb0W0$$;|QepITZ7@xEg$BUI(|gK{GEaD}A)LIR^94 z0Kd1Hni=2O*};{eMKjNUVgAm8d-r!9*EThH!hmaPte_~*%Em&9Ug=|RW7*r){t@@R zvQeoIRwh?(&x^`7giP|BYTgM|BRLHdS0*+NngP&x8ag^n14A2kPef>>jFJ*3I~%TR zQfCNBPtyzOxhCOZ9;NffM=@_{*4)+<`bRK7J~8~y>O6+|%ChdR%bk~7+glnT=s?#x zs4eKnu8ua4DQI6@kPXbg!sBi5>O4CKTXbyHmHux0%#(8}oF*j0%{-Hv5As;dgP&(* z`5iisPVA5!pZO#G{N(f)Fpn#W`I`CrcOTyG>g)Ck@C62lNr>Tig2{QZ_v>Z%$B!N! zz9zUI%OH@Rml`h_S@{#ND)Q?1hZpu|)Q-1ZTYmTBhXc&hY8e>Xx_i2zm{-QnJa`)n z^LYHc;ZZtoae#TOxk?Q4_72v}c)d+RQo`|>Z>qz0O{%QC1o{`m*D#n;QBfYOl#~=C zB*d6i;TY!e>pTlyoj;0sbzq*9UG6jycIL&7VqWZP=JELXBfQNQop1bum<Q3o^{c;P z{^9+X@7`x+rol01Wo0fc{Pu3}{k>i6?5vHoRe1V)Nmb>*P!B6RFA=>IjjS^={c>9E zctz*T)0c0*cpEw`Lql5+FZZx06!W;|+qgQOB_cb^D5~e0jE8yh<1xSQZSXJ;I)9{} zADb9Cn)ziE^H`?@e96zF@-{$wfUo!7?#{~c!hYS{Seu%-0Vh2_C*w%wxsGD~I66;6 zN<qRVeH8OP=WzIW@nbSifo5Lzk752uZ-Zuj7q6fH%>0Ah$J@8IL26W$u>jWA+yD~v z!>J#zPCDA}@5PlBP3>*x&vRMW_z^M5QAyc*q_(Hj+^FihiG_LPBbgV`N8r_Yvg7J} zSV+)6uk*)Y9y8wtH}j}LyrircS5^Ub0632f4}SXMImXYw`X`u=iHip2Y4Ca*U>>iZ zXLA3n^LUv5oVQ{7{W0tp=8wo#3LUBQ35ju2Xm5kp&;J_3=4PkEHy)m({owvRc%6)# z9EM}8tN8cxw8fQ`jqR-v?LK~ZCoZ|1m_>n9+|nkp2C(m*yrrU~NJT@fWoTq~l+JtX zGw+s+KX3DedE4XYJf6G_9_Gb<i_UMLGjFw3W%!scEyBe-<~ppEm0AB_Z$>K4yv-k? z^SWqnb0qU_zQ}X*=Z>TEzdMGd_(Q$TF_=f`d|OA`^S5uE9qr&)9d2f6W<o?vBrYKi z2tmMwYY_a*d|NAeofK-$QgKBa6}QIO^SXM-{Pemh&yZ9BAz@W*ExY6Dyr_|98lL?8 zfn25SQ96H|81@hFHZNX0r>3D!#x{TKz;#%h^KI_j+Jd1L+Emt5qOM8(Ugm{`_|Ko` zK{MY4I*)aKFw7JG0Xl!wH7WArdz)Ik^C|`L&#OGhJg(WK2fR%in0atDW=4isewvsF z^mI9SSyCL#i|^}v)jso|*9LCic_1WdPQan2?VXoeHx*mlYi8$c<>G>$`OiAfBx>fJ zaRl@D@-}s!nQv_j55?(i#E#0_pmm=8n9RR_{~o?5G{_%Yc)LW3nQw#Ed8})f=I3T0 zFxS)5E~_j{Oo%<w&*REf0wMhT=c6&+2JbZ~Vpb_M^YQ-_^WbfC@OYbJ#jyFs1sI+G zCz$7tPdF~~Z5^$^ypp24ot4GmW<1=StZb|rS{e|7wPKqY{;Qv_s%&U&;o<&jJ?zfj zQ)wmpvs{|;4oUG9qw%GK-VsTzL7_*+umsFvR(@Gr+&pT(J6B0UeC)i<5i#sB{X7)l z&>Y)@^T&@KLQJHtu9{bn3s3T}tt)A1s3j&QHZe7<s46ea&!(cL+{CuB7?Zbgbh2Y% zW_t1bIo4AKgv7?j3=a0-^EPPaamBD-yv?y<Sly%j{886oiwlmNd*J7fgrBp#cti~Q zc=rfz6BPPc=am)ZZ7j_XH-mVbn}^%P+!S`c1wZqZ6&+W4&-3!k&rTm~aPPrm4iN)F zHf33dq?oen2^AxLiMdj^ufw94Cnh6gk+2WW=NAx?k-G3Zn9t5gAtoWVv9oS!Jvu*+ zX8sSjCiM$?ewU22<l)yp{`3><gMy-5W_AXs$KhEVP*M_Ofq5HStIC>+hPrAHz>%Rr ztkY?Dc(`C}0z2{Jdn}7tn3)-+q$L4>YuEa4>HM*oM;<e8gNJ#2BLg`(8QjTCGToz? z7vMiHA_6h27x;M?G4U`zF$FsR7|(qV7@0ulL)yFAF*<L9P3Hv#&a-o{+c`PFu#0u= zz<vx{R8ihHd|g0*Z*uJ0WyLq=R#;JgvROyYAt|c(T3q>cIaPI57DhZePsDn`C8GF( zlq?^%neD5~i$D%YA~5e{XLGoL9}=~0(b3tV`N&))Du(@I{QNJddwuAF{9=9Uf`k|x zLl_6?>FHj-#PVHw+M3|!UEN)(FIC-~ouZ(mNK3}zI+WyP>FLku=xD;G4!=7&J_?;h zOG_guIbnEsfEMrE15$Qb^fjsY-@!a)zKsiBZv)31@RO656A<9%=HfWqU}ky()hP^g z^NR}V>ub<Uys<DZEXdEr#R;eT933rIJ9l8d&5cKo9^%SX8tUm%QBxs(y&Ibwp1yvg zs32!!X?D1ojPylXI$A?x1Bhe?uXN+lc@*>4hXnYsFb~Dc!{Zbg%UNz6QM1U%lItE3 z1<J~*c&<qiu}XNxl<65+(9zKxUI_(O0r-)Xm8PVk^guWt-Utj^5WXQi5UuUaM>2o( zd>hdD<1t@eQb<fpbfu@`@LS7^^U{(Rz<6A$EIs`0o7b;_WNvOQFCWi_#=2K;-$F|{ z87wOrit@7HoI>w#vd7Zdtu4$bDJiXNtkTj`U~IzUZ3sxn$vG8Jem)gH^XKr)w{gKc z-)6rfQ&3P~Jxv3B-N3xDsZm~0ep_1;{I2$9EF++on=5RA8oCd*B_0^&(d(pQ;3vQP z@jVkGgCEl4aI>0<Vt|>Ekpb9<i;1!@H^sUHFFz*(bHz|dfbTGS`v(WZ{J_XC=seaD zxxKjt6oZ^e^emV51@oxTf~$$;SGVr(VViH`c$$!ugk91%p*%D$8;1LaOIU7Lt){9B z*fTQHA6{4i`w0v~9OsGj=<03{4-LleZS0QJ`C~Kx?!yQ0Y{2#}zp=o<CnO})(o**i z@agJm|M=<WsEAMk0)m0QuEUKVJ-olNym0t*4D@z^Gzkd_IyyV%7v|2)PT}!3XNk$F zcvT5VD6r3~M9Db57*^~Uxypt4*@)1fr%#_?T~dKc?&W90aAsv|l~<TQHaP~T$;Z<T zYs>Ae4Xkt259&NHe`EamPe1?6&B<YjV;S#@=g)3!Z!yrHv$3`e4-4kQu|5t&b!}xC z*Db}w59s{$sR>b0;q3HetSz5E-MPKJ$}6Nu$gC)A8tIqW6<%;<V(HFd=6^U#M8PE; zoKjKRKFz|)E-s2?DJ7h7*sHzW9c)9G4k;Nb0pjE5eQl^8;5pLIA9YPCetdG|(=nL; z`R8BCOA7%!XM5Y@1MlKsclPXAad9ydGn1&8h|$q&A3lD3^86{psu22Nm72rl!TozM z^n)b^d+hGzR$E`QzOhD&<Gu$d=84E?__c`1Y0{Hp@#Ssk@#y>!-ezfGmW+%H&z;6G z7l|BQoeGKzSGLx{{^7aRXHRFlp}r2*H3b-*kByHWy)gu*z}(arYxuBVpohXfXJ(~? zL!Q9480{DI-4x*<WTqzJ@^#Lh?u8ZQqc>-ajSWSF1n_(vHy1k@10Sfput~UoW>-*l z_cz$1=Fgudq&Y7aky%~RHWd&SPfAL1{99G~czc{aeOgpZ#N5&>GAev_VJ0>z4A(lT z<1>HE7#7;xfBewg)aZv(KPbw}K70D)$d8yB8=g9S3PLbYO9;C23UXK1mr-W;-Mhxd zS_%qsE>8Aw9OvZ#U32_=JYZH;Rh3N4j3Xk$2Cnu#eDnY>^Q5$bI+V1GSl5h*i-~-m zdq95<>q<0qgD<&C95E~?Gjx9v65^6Wvsq9UB0HUsh)`1UqKlhrS!LPk=2}%{8Jz1Z zY?`^Xxen)ufZ*)CySER&i*Ry)kO9Ly*uv%2Wz@=I6{Yv@vEQ%)dW5vJWM*CtBNHP4 zjCEy%j+Vy%_h0|z?&5^&w_{@1p^34A;vzyq0wo1Gto;!R7bhDlCnpt;(itAzi#G8- z=^Y`ty-VA_E*tmuaQUB8XPJdnqH^l;o5nf@Crpj?Pn|k-9OVq~^#<SpV>k=;j&`~E zIrkquh)1o5rNMenC-}Xe=N>RKzIycvtD1dgeq!{PH;jGy{Aur%E>klj7;C^71o|QS zd%M2*V>+*1y}W+4KRCb_4nCZB=%Jcg>c%F9fx!V_#`e&SLF!jGR->ZBLApsuNwhT7 z0Fv&Gw#E6`t@YLQwG{~D`?@<aQj=t5P`C5};FVPr;fFgToQg|}*EUw4J%2_?Ng;Jn z0=%?~qrIDpBUDd>b4Y*>KOZ+C2|10R4)r-EC3(pJA5SkgXK!~GPXttFKcpv^LnaoK z&VvC4TZ{BSpq5lPJ7U&WAU)lg80nc<m|c#_+rSP$%!OL>sH)u4*|xQ@zOlXv2gKaO z2m&8w7A7riO|YO1%?)edUhdsBfyj}F(9YTt+Q7%5J#^<lUnCf80zyJi04IA}gfol` z4*L~$2DQ*qNFXLIdT8V-=I@WhL`Q)2g)XlkCu3=50zVwn7<S&v-IbDpTv$XXIzBcy zGzfj;0y_A~qr=yrXTk0@*Vjm15C^My=FAxhaS<Occg(NG908=K8~V4SVwu8<vcb_| z2xI^M_TB<4uB=-Ve(uaabMNi%>y9D`5Qsq%B1F*O?i5Ag6i`3`g%(u>g%s{?!QI_G zxVr@jnuNGpM>^?r&#Y62By@jiyWQXXbLX;IPd(=x_SwhY_3n4=I<=Q2(E^?kw4<V= zV2(EiPhs9|2dc9J><*{Fcas0Ch#1=;p*@0zGFIV~nEK!>P#}R$cGlh=uCmhNpl!~{ z;=<C(Gpa@gC+7S6J24oIef#&F6*xn4aVjm$9UmQnyMf-5nW>4YvLZMH@FB1Y=sK(p z2Ero00N>`8`gcG45EUK_`~bHR`0C@0LKfgTg02(1aNgd*2KqQ_OTyOwGAp=11%b1F zVB7{Bmb|*WP*_q3(Wtn@MY!{D<`578D$bof0|FA{3iu$fAN2dD&zw?IQ$YZ4W&%pA zsHAXhedYU)V&?JFCmmgFpmwyhG>#lS0vrg2<mf@Cw1p+u!pQ+t3<uJ}XaM$XNY)97 zv4g{X-@bbF{{1@;4<L|^A3t{F$RW-H2RJwn>|tf!v3oBI+kOrnzC$8d-ZK{=dV(9i zV>io=UAy<}-NVAl%ErzP4l5`m1dmloS@HDgQ%9JyOaMGS8LWzkir7=gzy7`rpj1ra zqS!?-s1KnTIukG45Kb;GLBR`Z>Z-<gqmb~Bj_wZVA8*~e+uGi0V5ASX3#r4rQuG=j zx|fre<>TXpNS0|ln+Cz=K^`Q$RZ$8K3+@~2efAQ;eD6TF2g5^JMv4!KSfK{bR)ZK8 zt}8GkD8Ho8Kgb^<Ah=#QB6?%N8R2Zj#4nyYeG)1kK>YSg*X}|_Fh4Q~uusd)Avrq= ziwYk)av1*0b>IM8AUrK-6K>|d{riufI(=41=rHfG{U>Dip4XGK40F5GW@O{RzMpG1 z3p*=22MfnRaTTMm^s4l#;nf>=Z`@b~2mw`{JtqK1+|RrT=p@FQ_k?3VH#aw&p|Y|f z;K#_sz|EajTw1iUhRg)j)KCLFaOTWuKE9(6(|t0yQOLC`$$NOXiiwJxK7A5y=|5JG zmzRx<4*&g_M`--jw-ZyNc?CIM-kxUWrfO=+z(QvPPQw`<KXDu&Bq(%2R!&A!OC8P@ z00S)H>*rluS~xp5{boyl1V_AnW4*ejA}AyfWRI?%j*6->JV!|>3Do2j<bd0u5u7g8 z&{!WBJ~l48v+MGsM-S2N`uVpnYa424Zd4Pzp`@JBSy9=2htGV&evpOx6#G%3!@?Lo z8OvP<g?4cA!^RzKT%0^Shj@8UoIZV4P*6@u$&Tz4o17F99c4l=Q&3ivk(ZNI*fco> zc};DMy^C{E)vvvK0GJn-l+e-D!RhHLC@P3v6crH@k(81Ihyvh&<Xvbkc|`@&^Rviw z+)tlQ%uFPuCX*cO)YVlbBqhL?B_$=))KxIrnpiBxz))Y`P)`T?JZ-J5rj6Ayw=f6H zGkN~StGC}RE-&Qd=eT>hgT&PZC5OdmVKg<gG!WBN*U(bOo8d#lLuwoAbBhW*d>Dp! zV`*7wK(eriums##MFj<AB~=YIbxjQ=6=k^CPZutW+=R_*&n_zL9T^&#o~)>^3yY2> zI@oFJ;*`}?rR8Kr#4nx~7CL?I9NbJketv#|v%=D{BGNL4&xo?~E3*pfE7`?*rgh_p zejI$lYzNQo;XbLXV-b;2l~q0b{N<}RZ(oD70HF)_2gE8|KDfM;v?TJ}B_#j@a06i@ zJa<5ag%!cWi=JPYH#<M`>h&u$u3Wpmns_PR+SVE>z#W5{pA7I85gA@pQwd}aB)PQ$ zx?pKVh>D4rm>B-ykIW1Y&|SN}*4R`R8yoG(aCdOBC)!xSb1^f=1Eqt(f`z3y2rw^i zkF<;w;PaceHvVzuC{Q@J@80U_Zcj-~@(b{x&|K^sNYE6VHq!`})|MnY<RvEP4$3M@ zCMHKgTzwL#Xiov>@9F7Eu(nmzF*`1zyqiaWgI|j0g4%Hzv(swiy}S}TdCu-UaB3&l ziKG0dPMyCXA|WZOtfFgd?C$5Ak((2rk__>hsU-nVFgGEX<3AeFj^rC0Skv6B|Eu#J zC@3kAoE+Rd=+0CZf|Vtp%fQ&s$ixWF+{4Qww=i#Xa_r{)`)H~Kp!)9KuH>|2Pj63> z1BqZsFe8|PS(B`(ZWJFsZ-$Q-EI5)KQFHpZ0Q8cXlQ}sv@%-Cw-+cG}$@6EEvr|>o z6;Ux!&=i?Qc7UGS(H^|X)`4W}VCUxHmY$tCFf!CXGE`7n44hALax^8F>lqs0^mX-& z49u+vcFv$t$d)7<Qwze@3&#`8&}%QLuIe8f8M`tI$ggg!k4sLX`!H;s9E{9NHLzGk zb#*BPMNui~3t|@q#U;fRm4#*GPl&5&8d&oRVUe$^9OCFnZRT!CM+DV5PfJ@k`^DzB z<kt?Pc{5+WdOg2*rL?>_GAi7|%N?%NhGb)9Z3zz@Kn?~p=;&+@_(@Gm21&ereeKgM z?!cBH7(h1o2lxUuC^Rx+t}gINrMdX|`{Wnqws*AT732m4`hBWEMXS-sAo&W53zk<F z|7g+^IPRk-4*`UY&Gk8X*$Ihp;Sr$_UBXd<g8X4QF)<GMg4VXC#ih9yFJJsp^Kjf3 z-@b%8UEP<fs>^e7vjG2>lH#F3aAa79Mr!M-28Md?+`IF$x$9vSZbf%*XG?2+R$hU< zXRHdIzF!c_Cu4C&oqQJKE^HXcC5YQ|Tz2;<rQIhL&PpiAscB%1@dSGZ3d1`gJ)^dz zsl2W>B`Y&JAt5RuJ~AG$=!E#F_;~Pz+>+v+(Gf#KeNgPWSPV`Z100MjU@`hQEQEEe zY^*@7K7sz=J2?gUG4XLhVZi~xfuRv$mr|3eYOBYmCpYfi1(M&)3Haj0&AYcpCPr%N zYtpkaV&h|?q9fwsV_~_nx}x=ROG{f*b!}Bid1+}yDSUo1=+1!TZ`|H^@p?1!$fIY^ z)^4m1jSMukHkDUb7M2zv$)yVO!OL<A@`@`;K~OEMt}NfUF+MZZ(Au1llNAvY<s0bF z@b?V}4US2SPtVQCEXYqxOO3gdfLwk&a@BC{VCc2CcC`;rPoi^@4~&mBcea;QRi<QT z1w=-=crk2TD5kb{dS(PoeM1$jt}+%UrmQKV?Hm}J5fYQeb6#hUC>}#ereA6`BqvDY z$ic~Fsg;BEozq(x@7A4LpfOrn8$h>YWThn~CnjEsgGYZUIWZ$E9X4LR+<bLu;pwv{ zKhq)4UOb<forM3^)mPUtt&VB1(AL&8IX$+1Z57nW<@T1kkN^5)ZI_#-X2u`?juDtI zkJ!Ka?$MJ+;B-?n6Cks@dpiKS@Hsft3(dlDq2>3#{L{;=m;d6$i@W#kKvUrUpn_&* zCuXip&R?AaRNsI2;MMEbU-TL{0l569&mON|zY(6*LSSU_C_3<K(8Wx`MNA?t65@|3 zI_?$F+AU(ZTgdQ&s<95<)WVrU^A8HUlw43%(>*i@B-Yl`)zoqMXV%==(b3;GHa8m+ z7b7h#b?Wq~!$*%G3Y#el(NEBFdIoyI5#e<$O@rej9lc#G?X8V%Ep45bfnevB76I^R zW=u4(4hs9b4<7(rfvAT@2l|KlhDQfy=4Mu}uiaw)pzr$48*4YNF>U?h=Q?b>^WZ)N zuv-)HfVRK?=+TYaH<#8|7gny$EiKF}&d)4dnVFxxa&=*8eGSd>vv%|5-0~8fUR!5- zO;ba4LtR~SBjCAfpdT*(GIH(h$R)S8eZ2N|=C$_@PEKr1J2Emo1;}sg=*TH6k4j1N zjf|oC1=_lLm^nD>TUu)wn}}){Y1l>AwvUa>tx8}i?7Z@va<;nkOsA+?N@Qhz!+3Vx z#K7dr&lvB`cW(hCAOnDS=jP$LPlADrSzfuiaqH&OXHUQX(rlR^OJ2Qs4ZiT;(E~7$ zyQn>Q{P6M9#}MuTFyIqfW>!G|`{2>z2M-=Uf{pO6Kbhs{haY|f(1PFrxdmz;43<I3 z{0EwarapZ50Ij`)81?;oXyNzwKLt&b)YyjO+Mc|co)T6`qUP}z2?^(Q{aMcI?i9uE z6frxlZm&zQAky63LnEWpvx;i#dPYY9)uXdBqqEb<ll{j9pt#BL{PHqeCr1s8mb{XJ zf|8=Vk|O9~MW!SMQ4}5%1rS<TTV1+-otaw-$(RXv14;8K{qbh*<4@@VU%Y(${P}|? zNTz$(4y!L-zy3*Xskd)`C7IfmG@3}}!AHz=vCO=L%q+=|AKiL@<OfGnxxtyN-MR&* zG{3qswKzXHKljNt^T)xJezGew%xj<daz4S?>9+p<;<|>Uyu#3=RFBX|CvSf%Dw0th zV~iKZSlI@ZU0Hw7J+a2GK|LUfXE~+8t>mQdo@pOe6_MXp+&Z&<^ZqY=^#jkE`P|>Z zbBBdL^yvS~md^22ujI?;4FY&nTrXN)5;2bxF^d&3jovR}yhqSrm#{ILDpkvbU_qu* z142SCrDT;=wDj~Mx$~Hrbhai`LNlhJ88`aJ#|p~IT)jNaiI&D@rpD%*29d9sm4%BN zEjc5-bD($e+WOkZw6GgLP5k{u*duAweund(CR2O$hM7a{&D$>m9(G52jh|#meu^9j z&5sNxfC3)D{LPyy8ykpAURwv`Us+k6UR*>nBQw)wZe~3Hxcud$vCRJtOina(bmUjp zB;@1;#V65&!|gqNEXXtiB1v7}NLbe;ET`+%lW((Y2D#)&^42M*6dl-3DD$XMjJ<Mg zLn_iLhu*$@w++<4u&myC6p+y+Z5hd_-~wnDHjNg-N5R5j84}A`EWrNXl<6{hR(LxH z2Zo<-bR3XXWpgX^;G2(o?#kAz_n&NNYNE0Ia%y%~RD4`mbW~VOG*~!dF|Y-;*EThb zOieHoB41zqY3jhugvd<TKm2L-z>mn^k_Y+qn=huz{Foy4^(T5~rVHO}<k9ozDB77M z4=%QuMhfiOojWMtS8sj>{Ot15^x^^v_+LtZ`?IW|pOQ><3=YB_1Fncl%Y++9^$)hC zdzm?sbuEZ;SOXCg|KjG!N8i5niLcxzZLLNw!qBS@iJ7vVQa`E7F!ao&MpfT^^n4qr ze_>g=evcGVsqK==q3CkXFjNR1dBHdwOhDI<?VJw4{{K_<IH658u(Ses;}#Sik&>QU zS>4u$i1uH~l8LT@Um)Mk^!K*+b+`BTAfJ8Ru-ZL1&^I<ZHajzYb#Z~22b`HDc57PX zdw@Mq8URH!5BM*oynp)3dB8v6oGFSv?*)<uS_8Y!q;~}H4<CS+-MD-2v#Qy`@XS2m z%!J7E^Pp;eGtv1^?*o(MJ3B!TCg&H0BqoEfab);gQQVAe9dz(!=QIeK&Z&b}ZrpwL zTALKXDQ$yst<-j}7BmRvl`vyHt$70HW$2mX5?OWY!P9N5{#E6}4?h}FQ*~$=2UMsh zus%X25$BCU&lv=tHwrr_MPxmt@vmwOc8rHMo<MM>IQs+y#3rT`mOz{j^7V@Vn7q0; zyRtHW?HU*?pa~^E3IGHKv&+k?_wKLWxp!q{Wo~5|!ThyrfU&iI$OeA%!Gk-9&Up;r zM6+jZl{){3|9`ZfRNEBrPw#)GcK|%1;Sou5@BSxA0}hA)ejNckk^y{aX$nYwVSaS( z%J0DWPnNrehbvoJbIL2@vvd7p6I=sB?K~LfP85Bjt)`*zQB5*AqIzND@$}mLvs$zh zYBZczt+HdDfS!L~W(S|J{@(Mt5>^og9ywl#_3z%JVd=JB{|kz1Sg9;Anh)d2t>P+Z z9DddyNI*XbK2Kx4Ii+p?P0R0Z7+)m=a}$z1iO%p0i;B(6DXgjM9UuE*g8PQ%hNPrK z{{TO*<dmegu8!IDwW*~gv;r)YmX~O1s#_5)Cl?p4tgeD`_?RF0v&h-1bC|H-RQL#7 zpFR6b`_I3G{}(T}qJ2}wF#Y8dy`v36;DG9xtqhM!8aSVYb%f!;L#I$xbLF>F-f!Iq zpF`7AO+7sjy`~kFM5bmi!lTJPfz~v4yuFhy!AefY_?WIwa#`QTqnBl^<6JV<Ql`;3 zMx7WwM%E&{dHlL(L^1nGwSBTSYEFr|^vtBv-fgV@Ri&+O=A54IS%UypIcGq-fPUa< z-GEa#KQMk>|9{5@{0lZj5a(cEVQJ|?qxc6yADv!Q+SJ|sGgULOFh4jk&d+~h&z?QI znT3Uwg^P<*T3WKXtrblmJ9TwYK;X>R-+aAe#||4?Yask9D@#D9DBCkd&c@F&gMZXH zOxQnubpOd?=FrV1=ig@<>H^PSz5edQ58r+N;Q^5O$A3NiI6lKvGLP?lL_13FKMPtn zH9V8ynKB+xHB$@o=vWOnESe1ZcWRsN;o+)QAo;4q+<drcNc8IEYvDpOw6VvUnu}<e zDv{z_hL#?_eCrsHyI;y$!=+f8Ud4~~wGAq599rwVa?8{{jO~Ofw>nMTDbawQS=T+i z4cEV<y#Mfnym`o3gTUP~4!l_3GkO6h0qr_IC$zo!F<yIh0{%rWV!yr*#>C9j-pSt6 z$2%e>J}bAZp{aj-d@I$Xm`9Vbvasw0%<tT}6PDqVkB_&Zxe?GZGBbVX@F4*F*I$3F zjm50qdjPls{jjjQ3P?i;_WJdoh#aP&gpVK}jcq@U>Y%pC{Lh~+t}aJMMTlP%wXwE( z{rwMUr?83k&)oj-AL)Jl)~(Ny1~mNo8dJtE%`9yuyGO^VDyu8Z%uQuvrQ3VD;9`G; z?V<AU^kj2)H%ju+=~=$f@f5!xG<wyuve7a$=GSy^2(6r0zPobw*+o6iBT8hPXN{^; z!C5{3gwo-Lp|!Hk#ewNHK`A^$RmXL`waCeMubjEnTib~JOA0MG|EM;DP2PE*n)@l7 zFF)4%xHjXMmM5R4JGZv?SNLca^H_N!Ykg$abT?W+NJwHzc3DMZXV=Kg%%`p##r#p; zqkwi`dMPPMb#>MK`}YIw1MEdbg@Gpl_B0wr;LIsWN%6YIIsiP_{QCOhwQC^nLDg?O zd<36hfItA`J;3wJS9hK~MNtY0p9xyTfWV)>ef#qL`zNnnJ$mut;j`yw1y1kWwexR( z|2tM&>&GAe7XThr><?eOeE!}0C$HZ?Xz&T@NNbNC-F)y6CHcEgpIy6i7g1Tz*MJw? zy}NSzHb^ff;Ai2qi%W|*xj4Vs@eKzDN6*l}*1YGi0IPG?u5I1eH8fb&(gsRCAtx{N zQVK+`4qkqi6jvjNUd=3IvBpPreA251Z$0@ow|0<2nkZossmG`jHHp@ANT}{xuI^nb zZ=ZYj{zF4cAJ-YpU84ANMgbbmmz=}OKFxl)UH(kz9+^9&PUljhe52sPr|o@A%Y#?b z<ERGxh`Q@x6)K-;gn)CN3@KF8$i&3X(cZ%wR6PV`g*El<1N{i*=TOYg4UCT=nBTK| z*REam_I8inzPGa{0oCu_i_m^(SO|b;aAK@`u)l9)7%BsQwso{Mx3#o)cP!n!)!x$? z5fKJj0)PkLzkc^#R#rOf)!fnq$lth+4z1h+pgwu??#k7xIoX+Uu~DsUEw>)r?;jd~ z9t7!JcJ0*EP`h^f=EBM{(EGDD-*xtML$hhA$?Lc8+<*QO#r*8Td{2LGPk-Oqty|NJ z3viUOvXYI5k0-9oW@cvu1o*+0rq(8i#E}HwQxjofp=jsD!^2%xUfR}qd2nhHjoGJ{ zmr_!by?q$z8EJ5VQ%hF|CnxGVItps)QVNSBQqz5-;y@GHxOpNZZ)uH?ygE^VlsK|* z^X~IE+N20BX&WrPN|Rc848!nCyWBXuUfMoCFmnx!ScAed+4&S$q^v|tgH;?75{tUF z5&M@E6|+DVdFQVbTsX15yc%vt)ZGrL(hjLocodyZ;Jj6RD-1$vFy<s2!ODu_2HkXM zQfhi}SyfBx0CTJ!)iwZQUM9~2=B;gr%XjX!w6}2`-~y_LEvBaU^@opzgf6hNv$3+W z>g(%)JU(;g6dN1s;UkBGf&<|bmM?d<-+uO@sJQUl*)x0gB0KKe$8KdsK;a7f3=3vv zc)%XG(yraR4jnp}o|b$NNe;MoFWBC_?EBcEeG?NSkoS7JI{WtTgKE(AoI1r{R9tZH z*>h;Y!`+RIjSc=~Z%;aRP5`u)j*fOnND#Ef!omVqz)bMBU*AA)?fwG=1vwUGf%aKh zS@tvEF$UL|zka=cd{j<OW-s$R;s4^|Vx7G`J!4~_{j$m`Km-QGC!s@>mM*TwBnPaS zg(A+7Ro#sqTXW;#%l5J5g9`S7IA1;QIysy4i}>Jz)+@FBD`g!EH|~9YxkXLenT=nK zQ=KYh8KLcx+&{Iljo3e560#b0N!fj^K;C5#a#)pmNQJ_qOy*H?KA_-mNZHvwwkf`P z#yv7q(-5fM$<Bk}9T^*!nUh;l4Z$orZV57AE6)Sw7dLJI2cABC5+DcktE#2~n1@x6 zIw0vaHPx>_dU}Sr3Q#}5eE=2!PW{6}b@jFADpMxyL7j-=;tW^?!nd}zg4WQDUw`ur zL=dH=MF4fEh@cV>yJt5j99>-<Q&W>2J2z{>Mre?mhr6Sv>+$QiA)&#r5gI>mfD0<X z`4}1*n46h?^|!wvmC>>X5osxD$x>1h-+c4UUS{KPAoS?z>8Zf#!oq^E@8)^#LE4d! z6mJ_C%CD+P&Myj2PV<S3r3QqMP$h4PDEX7>)_i(FWvvrWUcd8<DPxzgRCme;F)wHs zYVMt1Ke$@awb(qoy7euTD+{Yf1aZ5C4Ec1t6l`P68QBk>zSsuspDeFlzdof#`AXW3 z%RGjMN$)%gjtAuJx#URuq^*r;DG_B8gs5h9GkaYEGJpfm5g05fuduYfv1@o3sQ$B_ z|A={@!MXKy5n-WS%mNyI<=%Y(fzzl9U@)41d2|)vghCr=c64U?;zcnO@XAUG-rim; ztSqoQ1j3-eOG=6Wc&H-?oH>or(jZz{KoA-p76Mcco1rynDT%DibOytdSrZ(A#m2@; zR74oT_RgL9db-cweFzN=hE?Wq_n?Ty<4r)cD=5hOgRlt=J}Ynrag$v;;k0WTYAr1Z z!0@mI7~b839vl(`*GnSVpyw$sFAG`-{sk4lDf~l2vMZ`$GqVF?6Y0U>jtoEO9F1)3 zf#el%#$PMD;@vZ5uRVn5RRrgLQiBdkUd1twUz?FoI@B<@TGqL6b>ru`<~_U<*?ARM zWvqqq!SdG8UUAjip#Ag3!98iWu-Q5L6n=d_P8pj6a-;)tb^v=YK20}idI7(4o*=;& zgQ|K@A5irmRx?Y>YuYXkOpZ??+Gg}9?~%`#zi|UFFD54P5%Utif_d0W8;j{58|m!p zLAe(sO?z+8)9=0qv_l=}3tHP-wX`%~UsyeV?ks5gXYam;I2WY;#{Gv-4IM1f#p%H1 z=;`8M8EOK0-+cHXI5-d`d{8)dpS?ga&y?u9WM!o58|w$gMxa-^`RwKTqsL3P?zp<q zm|VZ><jE7O_wMHv<gv1`!a8`?z{D7+GB6PlA*65O*x%CG4t}JptcaeQ5#A)Nq#WcQ zBRty0H_+D2%go6Ilsv`=l)McW#xJG3_u<R8356YSpOx)0^}Xu^4MOECBOrRM>RGDm zUwQN0&tHsu_Tt+!7mZj1v=3t3r7gqdZK9idXSPB6C(D(q*N#ft$T}t)2bLdJLP#EA zdnM<CDlQ;(4(JE&(V+hg<1Madqib#fRPXE?=o1weo0*eWRo&Rxg~WQ3<6HGSV19Oe zT}bc(ig_6sDbU8BFc0WmfAj=dWoii!_TV7Px9`L!#Gyh3XcJ_Ni;FWa-%(U%L`6lS zy8|tQIJx)Y+vOWKR&Q=xyM23pWf?s*w2Z}KzJ32eSV$0BhE;Vn6>yELtc*`YJ-sjw zQ7x)pKpKtA%q(0-WZ2N;1c_t|z0AQw2a#XDGK1FR$B)gdEN8&(A2Hw2+dZ|k1VZk} z(Iap&931-z1aq>Bv$*(0)J=2^^kTE~A#{b_(9Xjf?iE7vcr!U2lieybGmnh1r8`$| zK9Mx^KBh|3^Qu*K%0G_ra*uCBhbU*(?tk(7)|8A2){|;1GBzi484`pr6ZedJkDhJg z_D`1R)LMs#n!>KDnxtqhc?W<xM6!p~U6C;y4LbWd+}?A#EGkqLLo;JrJ0i`U;vWp% zcVbR{L3J%CoWTiZ4EssX-@G+>b@9ZB<0up{7%iaCt(d>|@X4oDAjzvYZvpIm{d`ac z2H@{Ng#V6R%!m@&bf>$bz-D7-D=jNtzjJr-+B(?E#>VW@B6?^P^8j*CKD(JM!|LNV z?{jmrpr?Qe0MX&8iEt+7p-pf}U~F($IJAa_8gjgyJD?gj_W{I55X|!d<}*KHp64J> zXK(lH%1T31128;9*`Lo72@sHuMoR}9l9Wad3Ul!CwW7M=?VNA~qPm^|m#Wiwqu}b! znMdEgy;R(}U(!n1J{yG1X{?_DF|wlTYIW}tXq$KMe{n7sZ9OXM8O(lF%4t0xF|!cY z$g*wF{>k$0!w<<dllF<1mE3X<skt6jqXF7^HQo8NJovHR2gC_1XE3`3jZUf(LDfT0 zX6Mdu4~c+IJf*O>s`YYvU*A^DuiU*?Ra=b?(IZ=|t=1kq{EYc6+6I+6i#Kn6!aM+p zot+(8L+$9%BS(%Nj))BBJBA1tKx0;R=IZU+3u~*$OZXc%Cg-kv#=JJ>@plO3(UxKL z!K?4Gb3S7pd=un1ig__H(Yb5ark0m;a<f@k5mW=(V`8II(~_Y+w1Dz_W_AYK7M_PK z4K0lvoE&gz&?7KU53$3{UwrGE7<)&>lYIk`@ku8a10qS=1TTWI_@ByjN?0*4{PhPf zl+6SA)!jhVDA;BkRCA*zG&Z6nzy9D?9#Bbn3oF0s9%&+v7G2mlP?8Yd+CRIE+do-S zYR8V+B=09CbLs{1Y0v@f$1q+ev>7LHetfDlmUBA0gpKx!nJVhy4T!b`XR4!@uUB|< zWNJokWp#OD6Fhws^R~9u_usrXFw}?Ty~yMVEE$>Uz~lh(-J3kW1@k)*+6T<ftggf- z#v|kTdv=4WZ|!J<o?>Kr3Vq9Kc6s^KN&a0(^Sdl9E$+R1y?E`~)$7;iS6AS_$Bu34 zc`R1@<@*of;$n!j*|kenR{Fu~?-DO1pkEU}m<%!of>0FmqN2j!Xb^5Ylbuj41YyP9 z*WYF5B6HNhmQ8JwnYQT>Jlx%b1GB5ELsOIBWl#acQb~1nRcl*jN!g{`f(T@+CI)&y zAOs6C6;zFmxrMT>(GF#*sBvKP;NpW9Z<0znK{~2B=IMA;AJy<wvW+chnFW#ud->+O zUwJy;e*235qS4-SI{Q^zj$=Im_7+~5+o1iEWqR$--;E=`Qgqp=MCHXWPGG%HV0}*N z_@2V~ozV;AylA#d*m%c9OFmf>Z4*-?8#^ME?&25Z8y%OJTaZ;+euRm6Kodw@hPM|x zI~xEVa0u!epaoFbqHTV{^C;#4;YbY2eE?}>&u)+<pqv46b&Yi$y<O08fc${gfL%E` zIRk?N+Pgby>#DD;EML8`apv?XWL%v&o-;Z#ZDC;!&4SqA;o%vcoUpVcppXW+4#y1$ z@cS9&9i8maxC4CT&dWFM9(44*A;3JGLt#lF@Hre8L>Haz+BZ5pfBhQliylHlQ@x<3 zE~B&}C^nwv<3|sPfEeG30;<MQ*MbP4>sd`AiwZp;xgHpP{>CF|V;@lR20rx?W^ue) zo_^`=$Z$=^!pPM-zxn2*iG?@&F{QmSHU~6Z&*=HcTZUh|^<W#ff3j%1q_CVb+%06X zPs-*L&iAygKiC<)fOCc+e9A6+&*NC7i0sm~=TuB^<^)rFCtEi!T3{$BocQcq=-iR< zI+nfr_OYYGfB;dzaDG7^Xkb9^R?Kh4uv;)cb_LPZa&j`??Dz(S{ZZZ{Cr_S0zhi<B zxTL%UT1LNP0&fB;77-Bwz%Sjp10f`cY8K}3BlsH4l?vJ$9fUu8<Paw(2eX6N>Er9Y z{_v4sfbSN}uYogyaN5m`j-gEu`5?*AAQvz{xv<bTI)dg<L&EDldrqC=hsy!KKm%|% z83=n%@SlLaPM$hR4Ggt*^#JXsPqYPQP|z{@Mv0>B5IZ=x@#xiep0O1i(l)9NIokB9 z!)oqWYFbJA{6~`i?Vm*D6x6dFQ`#wFvQL$AM2jw<?;Dz0zYW?yS;nuda!XnNUBYIE zgbjzR1E3v!yBIKk-YAqyoUmKOgj>mpThU2K!vbfH2z!z{!y_yzC@ztQhZ}vhY!~vy z4Kk)7DJfo9T!_Ao2%i98!1LE%f32pjdj0WJWc8cPRZu?Y_qlx|L&_>j`}XaFMs_d@ z)RB>qn!k1pJR%^_52%|-$veOP=4%ibfXORsYZ+M?;Bh;ddIsu;ga%(qLUN)Zjlkc| zog5q-`uciPR~JFR1M<;k1qIJTn-lYMQ&+DlDJh_R0c?lx0Ztwkkip6EG3W>&9sys5 z|HJviW%iGc4ouJBP4TE_?L-P3We12KBGnC4A0p%N=E}N;`&FFzbbKq?ChtFgol`q> zMA;Gf;+0W*3hOIu7?4mpTtBc{+OasaaQpW>oev*AoRu<VJ*~;A;J~FuJ+AF3XAySo z_BPf3*<$V(^|iS50X6y_8GCLeilA}$d84ok#^FMy(P#DiSp^L^<?K#qxgFJT6U7kq zEUX|>bMW$mc-DwuuC1$MU~B|n@bGl6ZK#>Qabs$E^Zj`MC&Sy*!jb^W2Lv{_0O%iN z)dE=s2{HEZ70vk@8)fCC4i0u!L`w*VA+W7)svny}-suI1wsmz-X%zUEm6b(saL_38 z<#r(6mi9IWM>{JkONb^Knj4_wXz%F)p>AnqK_b~!*H$fDzcDs<WpZ($vZfOD1^p8k z8~}m}J?P}(BAl~@1;NqLzO%pA&))|bgd{r+PK+}LcGl7}(g67&lYrsd2M24~E?2Z( zj!8?`H8jxB)>hTjP{(3TtZfKns-caYwwZ;Rp1~n?=YLb6xki)#;HOsais^eDQgnvU zRn9u~pgP?px~^ept*ZNKZU6GC?|$#gV`fA@c0PqYQdS%)6drZgV_2{F+?H+7{>id_ z{q`|c3YQxFm^Nddypw=lpb$P%$Rtt-A0=uYe^k+FuPB~ZopxT2aT@C(gCiPPS%Z9a zWcV>6W1}*%(@QH#n_Jt5M}}u-$L4<Wwk_ZsI1?a^{E%dJW_E3DapT6~#s<WdqqEan zYeEI62`ELs&)xd^E}#@?glSt}Xh*lByP)+E_d#od>oKuC`q{1ExHo23SGK+b3<vtS z`vSBKCk9(S{a^&N8Ttx+-{e$vTU$YOU3yVzLQY;-N`_BNf?IHeqfa1_=59iA)G@cz zG%!4;W%DocG&N#a<KXhGr?2b-@;GH|)f{uN^vc8P?&^+-IgK;5{VS#I^HZz$e*e>n zNhoCFQ`{|z-=|DIpyI--<!<VlybanvU$To@_bHG?@S!Jkytx!zL`<Vm6Elw!HV$VK zHsX-AJ%{s>zy}KI`^X!RjBQCw*!z2gMusFNql5Ofm)nuKLH^-K;=lOQ>A5Sv`GdP( z`TgnW@2mN<3ct4|^R=SEsi~Hp-iqed{Oa1YqLR4GoY2H%&#)+DpoaPN6W-2A*TP!U zz(`!%{J$&EL=Aj0Ye(ld9>?alb1T@3n?xG=Hk{K9;Kwq;^7=s4lyxpPkF34_@CW92 zX>9Fh<5$})WWcKExL?_sPs{z3wtN5B{5EL+d@**6IgayHAjKb0p&i38#4Qp<&ErJO z<0XhGd`ixH1Pu<Ux=5M^Dq2TMoBC_wi6$g_D=OXDCjf<ga(+>6Wi{|&_wewq|B~)^ zy2IEnPB?Jo=k3h>Mr*&~VxwPb3jI;oz~n?*UtdL2b6!<VYEemSW==?AigyGuGPyZW zWACJAWdm`m4A$gdm0b^O&?ECYN3P!OpS^KTht8)$0aYVyd5J@jY{JN|>Otbx^3KJL zhkxvav8yZBxz1|uxuD0Q;J~ivbV!}Xqe}IQDcVNtpD#CV-4oLFR<ci&wu;&>?{rQ- zNZcwJkS}4G1fQH@W-J%Y1aO|Jq$o|tIC(Q4ExZ-;`&qh^H^TOzNvVms1vwQ}mCY@e z`}&!a+Fkhq$bUK;(%A3*h4tuPXJc1>+edoG$C|pkE1Fw!E2>ipiefUdgA<Y&5i!(& z5PJ`A0K6#zcv~#q4C2;3D%4#HF4XX{p4po>AAf7j$l#K-)25ZGI^}UFkR=G=acJn; zzA%i0u7B*8-AgMsxCAhO`Q7sNtnv;ZZ@5(`T7-aY#Qr6vs&PQfD8Ssm^rCqfu+w>i zP$}zFam!?Bo3x8&vHL+%N?D2+`s%nO8qm|V?83Cn?2K$k(8UAqK}Z`IcPTbAJH5E9 zu%@oQqXS@rC<KIHf6Aht=gIl`<RN3f8>OGL%^#bQ<JV`ZVSe^cj9>2WhlsVft|6<e z0vtIaB|RWE5&W9s7X*^V!i8pH=ZL&TW{g+H8FH&S?o)9Ez;{k>EZu%eizqpy<fQ75 zqe&^|R;G!V1pB77*A1)y!?#ad`~LT3jrob}%&%iRuDnawc$d7xUU_>?C8q;Q<TDyB zuV1~{M(kfwLJ}%4&dIi6)fbEcITW1535n9y=~Bd0S-Wg8<1iK>Lryt6Ig1eMfPA8V z5sn<EWoBbQw6P#l9lU(q0>k`b;v-WtlJW|H9;;egFZcB!gg^Haod42PIe&<ye=!{R zJp$%S_0eX#M@AYuyFkx?fJY-%=)ZiU;^-j}WS;<%I|CftnB)K&9wdzt&S1Y9<ts&3 zdQ^GG)W*u)XMRa_2juN88inb3)gDxFJ&E&iiLS5gzFO0_+&;bW;>~~Lhkkl^4|ZPp zUE+k@a&~)U?Kl-3S!76u6z!jEzf}Jhl@~9*)pLyTy3}G7Sau5M1M#esRf?=_hO|w( zyhE-4#$%75{vj0?eOi)ROeH0<5>HRU5bX3Vi56s<ojZf-9}J2&^ioPfb}o=)Nqu8O zXD2B9q3Nk#LF3pTnM`Kv&yo2rg~J1r2)#o*0`#6;UYT4_1br4<2wV;=FckEgjhiQd zOf?}n;0Q!5BNIiOA-g*Df5^G$ljAQ>TnC1a$ZkEP>~tQ>(Dkh5Rd+wA?q=a%QqVFB zswSsyA}F?G<K~?|`Z$u(D%kl|_X=q5kha++W4lY1#3pC|cL|HbO7_p5zuboFUsZaB zW=tr_mny~#J@XD}xbvvE%GqVhk}~B;nTn42CzM_G2pOH%^S1KK4bEusOKGz4%fSHW zKp4MTIOr2?%$z8;bT6`RAjkkz=RoKOgjw0#+T7iZNS)skOV0hrrTEWl{_42Ur+GNR zA3KMJ8aq2dz2{fe0KF&X<VU4uz$Gxkqd~w!7iR71VeUjRvIT}GY8e{KV~zHyyZ);j zS;sN9`11VK+fOrV29K#zPHMUv`ZkCdhH<G-b!h47HRAyI#*uZu*b2}m`%bCLsF7UU zV=HSr?mztPnXa?*>$wHA5MguC{2RbL6ZSi#h~Hc^KdNZ=^vU+C#eZdq%dE5X%8f4@ z)gq@HP@xIv1}Hk^1M=nVa+DnNkI31xNmz)R_|u}wlgs;~^17Tu^0iEffCn>2GQh); z;ZF|^_m52gopUL-00_URwjLC6$G`wOfA!B`KlZzW*<T#1oV)UScKI}~EIKX(Xoo%s z{1K)1oboCNSL3pB!je)!q=DyBnL{*0D&5@C#mL4E7#?F}B7-&At>*TxvSdBy_|)o& zp6QL;`r)%W^kZtS`rdUimY4R+J1N^IhUWIx_OBqqrg!P-%QuZJeR`&zoB~?=4$H6~ zmFK>Ilhd#x*aifIX5<t$G+!PZnV4H!zuDP6T2a$M_fC{jwFQP}JE5{u#P};otFNW3 zz5%pLT74y90hkw5vHNj*R^q?9_(m7o2bM$^52-jL?vr;G!1*gX6)4!}C_3cI*`{+` zG-s2vlrjwrOm8W>Jd<8EObN}wn%iib5g@*^q`2C9cvAyI7!fhS3CS@TSs)d1$}3A7 znrbh%gM{cGA0L^S-h|&rD31Qq?9&t_pL!Vt?IR=2Jv|U~l+-ss^nuVjgsUjMN5r@W zg*y8L*f9k>-rmW;nuIm8&@?oZ(J}jXCD*+QF8VHs@g>89^S5ieXU}5k$JN|G(nwh( zvdKA$;zRs1I;(n?s=BY1buO&ldHQLr=H~7DahHm9@g6+qbpZVP4obl%+c8D9Q|cT7 zm;>jr?58!^_*K~X6xfa_v7FZ0DT@C}((-Q-7N~tC0n3&w=Ww3BN!t+pXOzIWa%x0n zO!2S`F?zp(^Kng2C5JqaH!4nra@MIl(zbiW&BaXu{8Jk{C)O%E=EAdDjO|=CjPUwI zTOfQ8z(DwJL1Eqq@{^*|vw%gjnVetQ($?73-8D2g0JOh3wsU3l7u3k+OxM3TlJg7u zeuO%6F4?Kc-my_|I*2kraTV6qgLF?VEP+@R#0<EncVw(vP`Ha<5JK-XcY-tM8GB~L zYNn=ZcwU?IHznFBEW^q-KfZK$c;R+-&7ioU_gNhuJw}~~QTRRuXA#p-*SH4sEixea z)!R?LI5>Ix?t_GsQaf_Ew5sia^E!ZVAb&Q1`Uw@*6RIpH)%IM_`}(5!zahkKi5g%Y zTww1x9V>^B4<CNm#^*n&kbSf0F||Ia?Fj7UosMaGsgR3+@R3i){DU&2-D0Na^cdmU zZNO%|(>HSK#|UH}6+I&zb4$FvGms>S&H%y$Ao)bbLP!{sk)4!Rm{C#&%DIe5`)z%F zK)Db?Ze{-89!39B)`QPqTFuR(gGFc~!_$+{v$PKkfTLBnw!v`=s%t@Ur4$y&XXk<d z3rS4&jf!&*jimSogL(&VwLs|I`6Im>DdCI`YLNe%Jngi$hkaN@T=Bs4+JnfPwiB9E zJ~f(wcinkh0K2@C1R=sXs;=bnd`;hSX~)9CjfcNyH2&K4Td5h9c%t8V8FS9F+Q9js z;lT^oj;pYqRA)V{h1eO)UIEOpizcZV72B}<r<Ql`-upzChUIiKlG_w)<G7S4hg4h@ z9rIM23suR*s!pKo9d-#DpT&6=HI3bU_IhUhLF>qxXGDg&p_!J68DIto--_x6B7y7^ z=pGUQ&;oz~sU~FSL4=!AQB_o1SKicIbGf~#r>A3Zuy<?>*m!tm`Xfbu%I|=Fo%%W5 z!>7~_qg%eWMMHy;nVG?<DZn|%bzt?T?rw-C;3z=sa76I9WTtM9NJ$HdPl8iIq<cUJ z^hywegD``0-lTWDxw@{tD3-WGmA*rPB4ZY0;a8MXKR&&7pB__jSjq9MHUsBg#gAdI z$va6~MNwlLircOL;6c(%ETb>g`~lVd0ZFuat+jnHr=Z?1INc{O1+jo+hF?--Y+h|c z_rpiq({%rzC^v6EAbO;ySC4zATn5;4D!K5exT=wh)yPHgsZJ^3SE1|@HW4!LPA%`d z^Yj%E{>1A2h>S{A19Me9LkMh*NDgLBE+7?vLV-x>ArZj&0Jeyfbf8&)AAk_leql`= zFffFZ4ILdVJ-zJ%{eals@iEXp0ATO}K=K#9)t#Mz4g#1QqGJF#NEeVyD9-EJ+d-~F zo1mt0D{C@K%YoPvbMj*|bHY>7;RN6`zzt~u$WWx6hc}#|xs!_t$pN&Pj+v#Fk+F)d z;c+eJ|E}Q1rA$>LMVtB*q*V?LE!-q{rSmA*iy8zQ`PQG;4FE|4fOm<hFK(M-zD2e$ zcJ<CSa{g17fzdfjx74Ka5ny{I`vh(^S6+2@RmXe{N(sPTi&k+S%h)Yqa$M6jD6R4O zgBKGk_j+e<CYN*?+R~J82AZJoEp702juvDp$Ob?@**6e$J|jFjAT}{HDK#oB6TlCe z2LeeD*dTiVr4aB|G&O?@)V6nk{BG**f{+t@0kqKNzJ5S;8(3doYj1CJcXwlFS3^f9 zC?%+0(cDtX#5uGBt!0#yrxq4p$}Nb?%t2+0e{2GT7=U)FfAA-?6UbCFR>hfHVN6Wb z^$n$T%sJJme^;alU_I0v;w=4(YP+v=Pp+$3hjGi>OPWUOcvSOid4Z&nw~cj)t}kqv zMFHP2dHshUe*B{(zg_-sEAv;^?Y%P-N{0fodNi2>nEU0NWGs_0u9fO8#hSEoVO>8K z5o1m{l9@;P;2iQTe8<#{+P-C4Xoi{*QAtN1fCT($V&@2fA(2YAbN6=g4gf^~*n`Ls za0qAw=8w(HhKLgI4?qR}2h|N~B(JIl6nJ5E9U!^5uA#WTv4m;Ja$N&-5c$=$fbiUk zY5+O#d0J5kbRUU;^Xy!Rox@YoK0`e;(lsa)qEv7_65Y!R`Napt)ZW?9#tw~DHS`S? zaClx#^8ZwH+pkO&#|Nr8#``5Vf`HGd8<xiVA5nJEbScsx7jViu?^AS<B}Oxn+DcIk z-#*_qcI_YMtJyC9Ny~#rPeU$MMd$Uzlni4jX(x2N_akGN0UAuqYq?b_5L1pQI6+h^ zsOuG()4G2DIS_u!=z419h?CzXb-cYY&H#|FXK4*;+0+4vaBW;Y>^(NY?-m^H85-#m z73UwD7@Uv<{0}YwT>wZC=mkKN09RkiD+G4{JbyA+g^d7j@Dz~f5F^7bup`9F&<Nlh z8uSc{0zn0bfd0k7%MXGJYgZ3AI_P#|l7j)!7Dpgzo0w`E7|GxWyqZqCl&I_~bbf6Q zQG5_7u&AzQu48=Fijl#q>T(|Ei=|fz8-&8O^J2U-oKpbsmEBjX5e>gMdiD0N%-ykF z{&R-3@6rofBeL5QN=MB7N-mm*?Ne~#QKHIOrvl+?(kejb0O7M=H0O}DvG&PsA6){e zGcbR<X?QKMWYC-%p<-mCsAHgEXpA+t)VHz)$eB8l0e(ap9atB**V!ikG(RmclnyQs z76r@<F{WQkJgA_+xJw|y0m(qxpDZ{5ED8P<5SswipfdCtUQCdKd;**UrGtR6_w<4I z6pms6P<L_xp=D@|5Ife?Ld(bmRWkfqj$bRgu_)1aHC)f=daKw*M`X2)E!_!8ZxYq_ zI;2FFv$~}1T7Fp7jZMzsf^m?EcVT2fKLEa}XNfs+2Rb{`wx0Qau++2+#TE4AHqDZw z>p|Wh*7RVPb&xcR({`)Upq6RVtJIwGc@>>@isH}cxCf>*ENnblz55JQZgtO6bbgPS zYn+m?qdeA7P0vu<)Et6bP|*-M0`r5YwxYO!<|nx`>^*!O8Ghgb09p{<fL_<YP=GP` z!ao{p2RDH_6u%&{PXKfrz~|6|0M3EZp-l)SfYtFN2O}FhJu4eTwByZz*da=l(6KtK zN%@<S+a7uHAr%*1P5OC#UvhBK<jURV;Uz6nB&c@*Z3d`!1)DTZc_$uqH#wVF+mOoi z+DY`A*6QA+^}E|6D*um{(%RnWoc6-DIq&2)oNK0lzCVkM{YeZ1v^^jn<5~f<FNE`D z6EkC%CaMs_(klBv>P)TOZyQ}N>sW}$?XmDmQ#Pf^VvUt?`kFxdAHi=#a)4M8@Co?0 zba4gc51_SWdI1ER0ApZmFn~043g{=Y#lzdqgF&J*z+a#~>;?M*&dr_3pw+>lK(6aq z+TaLQ;AI$N6A<l6y2e6SI}UZ%uM}vzWSzL=?YR`4k89B_+|yf!7RQ(Gdd5`=>Ux5D zSGLR2^QslV`tFt@9ntdCa!I8pH03oR5o>wpA^`s0(^uOJ|6ieuOfIICbQd(w#+43R z1(b=IhH)yn98jc)<6{8x!1h>=8db+!epM>#MRP7WTRn1oY3l^UwqwipYI>Kl>ZgJ- zJFR{5v~2w44eVsFrb;+Nb$uhy)gXHS|G>r|Ka6ed@pevtTkrzVB<4;o1ZOIM*@8@C z8nR44uuXw~K^^EUpuP!{&kcwqh#dguSThS~R?E;>UC&TK*Hjcs<kxg$QKtO69C^2t zEvu9@i?j_eJeCwy);u=4bUQS&MHcUWSebkQ=chxjlD0@Zpg?9<aK2z1tWVDh&+7xX zt?6AZ?O1FaT)p*h`~AlMA_d{J$4{OY)btlLPv<nwP-7c_08i=qvdTFgRCblKNyh^0 z-KzolD)!kY)o3hYW(SoVaZa&0b;HwZ_s5p*mR_EVD;^BY=yZ*#GxN$;vksOsqR8r6 zDqsy2afWL8#-OwTt2(CUICBett{ymorM145jXu#9HG_`};2`iX@D!+~V`c%nz>eC+ zCeVXGJ4$*c(z=8T+V%%Es5_OYf0uFmTAa93)NGfC>0U`9uL{|O9ADi&IktSqH?dmM z#P^`06NnitN{OaRF{t;wGWLAh3>Ak&`|#?dim^JTo&f>hcjeaGciTfW|K-Yu4<8m+ zZ&hBND!x1qcyNlSQFBT<iSyx5qVTG@%iE-DyI1RY)&TOA9dgcTGdLuzIHZXpx*kOD ztb)eTp@mzG1IzKn{ho<UwjmYfzQv}DJVR<C;9uH=A)!x^#Mw#f5ae|53OdLO0IGV% z>ITLd2F3tpO+$dRi5BvSY*9BbQPnq5)-_ht#mnLd7j<mUV;y<5s9Y+}914!C(zd(A zEOrYS?>(=_au&Bo(2z^U+Q2?4v#h(ZZ@#d3jL66k!g=tj(Im`b0qQCaxqRyG`;fsI zPeoF^RbW|E;Xqj@Lhs1$xQDmt{l9v7`}Vu8k;U@%xsvvU_>y7A$XZp0OGh!D`xTsz zs=LcrB!Scc<m-6UsFRB%Od<{|II&zbKOjd^whRH;T-r9#KE77bJ`<F2nc!Eb=bE8I zP1ANs#yBUbJ6=+<jghqsyJ+Gsr0;cB*Zs7Pn*f$}M#t@xHti&aikLR-s3zrz2IY`C zg<FksK!pOwWs|XE6Sra&G-5fc!+KhaonMWeU!C<V_P8uj$1*TFy}osDp{i?^5?&%} z>VHJV`MA29G$8@!Uae%C!K?1hCg%vTsstgz!oLK>40tq(c2G4l>-XP(|6yCbZx_J8 z#@)v)!$`kha(NzbV&+>UXA^r=%Y#kU;h>7Ew8bTmIskj7RV$M+1+?7viCeIU6Ar7A z6)i*EqRR5?hnof#^BTr{k{ZmtbJXl(r7Xh5@IhiGfft!3V(2eq;CBY+Euiat0^@m9 z!|kvdja%7;L(XBJv<<5`fmPIWuaF_&oc*La`*CG<K1H?@DqQDrV#*|<OLSUcTkrJs zrlIAa^d^j5)NysnAw{RNSZ`(fEF}GhL(Wl9GSYV3YHng?VS4mzkHqHu=2<{{W!GYH z+kAch^0oWVw$Z*_w!;4X_upSxyW2Fp2FM4IACTE)?pq{p8+Qzg%ocT6l`dfx2g(63 zuj^F{rbQ{1Fb+SiLgA3GWRtYyQFM?r_O<j%4@zsyt{p0D9#1LnbC0becxGbkVkGf_ zf_mO3G46*{DVz$9`{nG}q;2;|5_ev-*ePndOW0(Oura%cF{h~U5eYLf6*~<Bx-B&( zDy6ETb-bZ}A-TNIJ|It#5Oh+@^`N5TaWz*N%S0XbDmWQgV(L*fcUBpD;C69Bgsxkb zYg}VW)p$kMVg-WsIiUBkt9RZaDe1QXzg@mi-o5|+%Jl~|eanFS!j{?S!U5-~IxTX_ z89hI46&k0a%P~z)DMEr4wM@^e76=x=uSqG9CngK)`X5nn<d7on1N_U|31D0SqGt3I za#)F1d{sb7V{m$NXhsWIaB5>{+NOo1)CVTj_$O3_rq+jNG)CvN#O1fg6m$lpHaG?s z>N>|s8T;}Br7JldQ=<v#2g+I{!{PK8wTgCGCov2Tc`~BGFrLy@(GXDhr*~x5O##%a zm>m{q_~C`y&t84E{aCllZ;k5w`1r-^fw`Nt{VP>H$XrH1o9>CtMqc^SR*}cGy*L!f z2NYcdaQ^Z(X&~~o-6{e620rzA2>F*N5R*mqgHEW@c;xKC1@=o356IaaQg%9_K|PD5 ziy1Iv@O}z}Ko!eiRbnWZidBdbAy@|QFJ|a{0p|%!4hZLww?Cxld|b`#0?tp?@)Cwx ziu0_|cB@i#$`>^Z2f4*A>%gt%Drg+6>6~mAS{0Jh3w={*`#hX22zcb>nbEa}&t82R zr`j&t<@b~quinmGf7mv54gK8^FlS2jctA#{O>jBHr{@j)__RC^s=M>3x}DVa7Bh*K zx5>~TgZQZd{2TZ-=rOwhP}+(%X|h(AqzLg6X0aDdA_aB*h4cc1^a6$Sg5VQY#f-z? zUr<5D@{)p0nwn#-7Nr#DQLW=%t>IEEXOn(jHweTPbTm9_ZhRO|akEeZ_Z-)_hRA~c z<jT>c@{!o0f!LzKwCai4zNPV{yH8%e`S^O|cG)g}l=J`Bk3asn@$f~*<aL03MHe#P z3s_OxTx`h@J)z0er%=ryQP?<$Puq)ImByt+J*eu&k6{Svhe(*mg91@^Db{kWz%io$ z&?NfaNRO~--gU4FeS{t}K!E;2gHozwmn~@?e?c#pPs8(o5`{y7%&qEr9Lo?h4aJa? ziGk%1Xn^abRgZ^cce%yYIYm^HBWoB*Z3Rt}%QqkW2WDQ{F5Bgo%7bUG2Cr-Ykus(7 zA|StR02%7ZYnqNN8TLuNY#mgAqopfYM++GTp1?8=snZXr(72VU+$yfn7aY;>02uRY z`<&A8KZW&0Ob0%Fcr`qEH9f&oxRq%Kl&M_G6okyRy#$Se6m4Vm-LoAcYJ#$Rk}Adk z=P`wS9`W@ApL`u^3Wkzm;E_!VsYod9o4C5MP3+s{pCt7m$X&htq<?l3{3Y!R5Hq6U z2UIlhKj`Yz>hb8JK~O=o*aq9+a#KcuE-hWl>5>X5R-PCsYY`z$2$MDsl_7-7Sw$+@ z#%MTRf(m%=0^8t9xA>-jtnSFdftcdK$h;oEq$XNK1<^YX>liO*5hjKY62S*cT1H?g z$?kDAxpl*f*Y9mB_U-b2ro8|D!^VT>ql>rOM%T+b7E-ImLvwlo=l*Gz1Jc`rv%8`T z`Vz~A(yGTYYA5oWXP`R(1E(lxnayuTEVFhZrE)Z_YBVyZD=?+SBc|FRpa4%x*02hd zGW0o%q4TOzc$CQ8$}Ze$w8L8NXY_nzEW^z_vl0qB7q4yqf^WP0|H=O^-@IGC`M7m> zIlX!WFz*~uW#(I`>zbkBkRWdpEol)hVICr85-4owf6>TaSl{QIj^`O|cYrjPygi33 zi8)YD*d=1RTNn(#OVs?Ei<Y~kZ8;PikE+uy=rS;*C~8PyQT@QuwL2ev*q$q6yZql( z-oAVP;OWcdjr-#Z*PD80^J@BIa@#y&K%ZpkJI5;$LS*p)l19E_`V0|WPaz%m3)=Ma z7}xU{H)#Wgs+m8Y97_$%4o;}ZE^Tk<7$2Ivx^eg6wpQOR|5^KkA3+g)j~c`UoA&NK zV(+)#a@;Q4WxH&b?Xq3A%XZl=+hx0KmoJz94a54_b_!)~WOH<KWnpa!Wo~3|VrmL8 zGB+SFAa7!73Ntr1Ffs~dZe(v_Y6>wjG$1e_Z(?c+JUk#TOl59obZ8(kGBqGDAW|SN zRC#b^ATL-?Vrpe$bRaKNbz*dRaAhDbNo`?gWgstCX=HS0ATc*JATLN|X=iA33Nbb~ zATL92Y;ST?aA9L*AT=N`AW|SNNp5sya&T{KVRU66baHiNATLu*VRLIBF*7qDFd$MO zFG6W_b5Lb+LvL+xZ*FC7bP6~iFGgu>bY*fNFGg%(bY(<kV{c?-AU-|{b98cLVQmU{ z+U<Q!uN_x*9l%@X)cYh^)qf^`q27~PEm0yxk&iV9Ny7tY5+Kuzj7)tFDXCi|Tl|v9 zx_8nFGB99uJAvDBV0R-!83_zG94Vt%nxv(`8FcFFoU`}Z`@9Zf+yVΠIW%?z`W$ z>zuvUUTf{&{cd;JU3Qn<Wp~+Kc9-2{ciCN*zI^w;|Ih#Upa1Ls`}*}a-woGq`=8(c zbDn3oe)HS$*}m_8`R6{>KmPXXZ@-`4*tg$JPdi+{{hr+Z{ru*?33vFG++qG)->0A7 zkms4N-?1l5uku}bs&D!I+1sEG<nIK3nVxXC<=fx>)AoYl<<GyYPkepi#-H?;`Om6x z@ZYMxG5opz?z7@nXKt<gYq(qgU-pnH+#avvb9%V(@ap6KNs3$R!>f;~)BaUFc_4lD z`S$)vhAVrX`E4J){ZrcxPc=PZ{|@_~w*Rx;*l=V2b2Ys4&ZIkR%>3W!#^E8;J>x^j za}Ia2@t2m}Ened@Sn`DAsXG4T9ep`^&-k`)Km42R2E%Fio*N|?$oD+|t0ISc42Jo` zjt?J?QHqv17^)NiP7Ap)uC6|wKJpw4jiaHWghT^Tm)`K<+mBZ4AP!%0*>30oDu!Sf zelOd2;D%7K;TRSCC=~2pUAU&;p$BmaMG{@}^T6-%{m|R#e`!EYKernUpMM!nHy^*- z_&vv;@m#Lbjl)Cs-hCQBQ5BCn3x+8|JXsBVb>icx9S`E+7!3VKJ{p6ejxSA4{OW{0 z2}GBRAE6J1c-qo$IDF6P1hnMO{s>xrR`Rta1%TyC1g<~RQ?+UJm|n>8H!ww`3BMHL z$^&eE@;eXzdfVH?d^$Jei*4qpm;<13u;5RJY46=<@jcTOgB--^$?1yW)U*EJ>f`An zA6<P|rKp<&l>~r`hn{X6@!H>VIBpY$pJ+C-TwAdV7;<!0?R4iZtN>j+zVzg^eCZI| z%5?NQ`Vy9fUtEiZU;0HX27uuo<j%IY2@(wSRHf?!StvX`H}}Szp_qdq9=JK2Vl37C zV9jd<kB2cBLe9WeD*C!9VypBHVfS%IZ$Fxr{rOm&@im+WfVmaV;&fbA@!HaW>84ZO zvM|`tt;fz=&xY(KEM{V8O;0jzQeBJ<BT^L0Q<U4=4+gpp&xsj}1TT)od>w;f{#>;P z6>-#3`XP4wVVjsf^3fCwjhDNC%yKI&Q=PVnl)1ITcV3Y5u?XLM`2L4eo&-rpS8o#z zgcZp{JmRqOvjnnT!-6oK`!;PN9Y*Qd{C-FZx*4CjySw#Zpd|xO6;ih5$qtgX{=JYu zhG1~A65;g-xt#~Y!B|b0kTZzgA?yWB$~wYE789F!T^fTyIRpjC;X{s>RtCC3Tb>Xa zURls*TZ!7C;3*}|7Rp;2o;Gwb23Fb|Hawx-`Zn?Tm*Tkay?$QOJz1aAnYcgZ4AZwq zYloE4#TBNZYXy{wDS@<8&Tz^K*orSWWV@07nzxB?#&zM#?C3G#i6vRg$tejt3w`*w zS!|#HwEVcoYJ#jJot8*IE!h?9MZ36g_3y2}<?fb)p$d6;h37Kl4|DBVksJG!A?|(E z3GCor{29T4v|PkT4W(tfjtL|=@mbl(%b6Hyj)v>ulryv}V~qO(M+rKs00`;0@U^0& zcd%{RCdwf{=|~=kD1oI96vYDkZfUxEmP%B!{a{E(Dr5*OjU;=0$}_`3DTiQ~-zr6c z;$lK7P4Zuc5<mjkK{bJ?NE3G%4~SEYjVXa7N&5^SK=BAAeAy}>z=~&vg(yzLFB{8N zgRT%bl`JAyqS($jm?bZ5L>hT_i3HM<lFd~z{cBj6AGehWXPDk6OEVx07)p@#AbKdL z1k#e6K|+hLIM57}2H2E9h6CEsotLc)Z72}Nx+5N@f?!Y-EevPtCa&TAb$~HbiNUfy znLn6tpR%2f{+u(E90oCbV~UGd|3Y`Rk3d%Z#24v0ThD2ISB7e0${AqY7Kwq^D$V(& zrv$P;9S(+SVv0a^tRXj`IShbNM^_$|{Cq3~P>ir)o61bnvOU5m5D>sk=dJ&C1`T9k z+tFpg@~nfFIc6~g^5<X1bzA0;05VkRp7Z_p-+fA}2~vO%NP*!GzDh?ht`GAzv5Igs zhdp_Ui?Afryw%v7po@GwIu|%5Ku^Hodd3!Wz>sNa5dSQPC+rMD))+#f!O_LcuplKZ zMLP{yrs%_OFOf62i1X0wG49TCYo7kKckO9Gwh6hgb6kjE+6}|KV`&+y2`v%jJ4J)r zM2w2VE04wneCj@=gh6(lBB*FYtwA;vCG&@T(z4B+A>oDSSQ-AiVQa;VT8LKH{64n} zM}Ph$WONZXracJOav}$p_9nzS;IUR^9ZwIC=Kh#7q(+t2%%a66Z<nwans5fS12_Sa z1qTNpX*9g6wbB|)mP1Jch}5!!OJ2QG(F2<j4_uK}yt-VG|8BpUU|D@dc7$52@D(pH z#$cG^ITH*jT9^W(BB0as%>+1FUuW3+Y;=UA-zFqdWVG&*(I5h&^tJ#QPQA`T0i8Ov z>m-sEh{g`8j6zFM7AY-Vt(?yC^h`DWbj!hzGjw5_NMS>JNdY!=<PJ_^5hI^rSRRjO zI69FZND|B15EXmd>Am&agzQAblfBRyO#Oi<`N}Vw$P|TE+s!=iB9_%@AjXVF_PZGA zp+imDK}gu;N=toZt}$U?$>BZ;hKeuyv26`mWyK<a)Pn(MIAm}%jU@tX0yfe(+JZkE z6G*7(EVdU}8!^^*P!AT$!DWyX4ThCiDCp|6;>ah7(lrOHI%u55j+kAjYe`o41{P$w z^{0bg1_ZKy&lSrIedrX>UX|xZ&CxTRz|oH?)&%EO%n1OPUUb3y2s*{HG9`x*3a}ch zM6C!&dcgK>8cvqnZN-ZYE271=EKX+3K_JqAQI~V&vi+6eG&O+8vdqZ?4db-L@^E50 zh2Dh2HKbu606l^#(`j0S59i(lbR>PvN>SG1hPXW(bB2f=QL2C?5kl~&SnqEPN!@v8 z0h0=Vtj$QPqwO%#u<qcVRxCbNVA1goD*|V2o!oMYUuKmGK*BlXXv1{3h%=C2U<X~) zfL`O*&E%HpKm-Jm@TMAwC|?QA6%CuifHRnRccujzHm@zACFqA(Yznga;6-akHiAvT zyQRe~oh?aea%v7~mg-unC=)FW0?ENMLP8S>vE7t2(DngEfmALf{4PpM{?Z~O7|Vr6 z@<D*3<#v$Zz7|V<OM!xR9eFIkhbgoq1UleGf5`B(E0LcF5Ui7MWqieL;|vv0iCFJI zp)u3baE~F7v?bw005~lvWu@f`g8_gkT0XcO9j*=tWEq+ZD~>M_PUHFlp0gbRkUKbA z=i@@as1>9Qgsnu9Lxt8@Y9&cbCny9#Hnn9+ONH;Ml@&A@h*bHfaf4Gk5pqxGJo$9z zy{nHeP}*BVstMBGqP>aO!Gt#)yOvO$S*_3$bb?N$$4l(dYAp}LqYFxy6|Xp;8&0wA zi3Vxeqq;@53Bp=rO$t3dqoChjAh!^vlLL~xSXhwNObD?w7$QAJR3#)Ytpovb3I@x$ zYKHR<r!_(CJ*C4zk5ou7E^wNP;hjnzKV9lJlAncOV-~vLViQYC8b5cJ=0NI%RcIW~ zy@@Fp#!8tmC%u$NK$DTQH3NPXdK1T?QI*+Ta0`RFb!uT-jn~wlZY60G0{|!yC(Bn1 zNj{O+xUG<WbXTyHQURt4PO~XUqtkLw`u1{s2FwpDNi5;u;r`5yj-@5b894tWQ79{h zY&hUR9=`LiLMFB%j$jZ3zw^=Dn*iy=h8{20N@x{H3r!m$Wjg*WGYou{M8S`Hm)4UO znwT@}P-^7urK$<qgi<*qSQo@j*4~&i*r1!JSsm)R@<oB0D6(`A<#lr~c)qhZ!siz~ zP|lw@B81o}fj8KNoS6o2F2^2$3L(O7qLr4kE0J&p%6s80&W#lqco}(F7@Wi1?ku6G zimYQeskQDiiI8!dm|Ct~d<OmyCsAn9B2VI)5snVMiBkvceMRNhuw{ob{0|79!4M_g zIraN|IL<ne{CBZI37yJ9tv6e_k0iE|)N(?eSrTS`L6)pyIocZo`eJ&fL43$7m~w{k zQ*3I)Exnp3^7A_NADN=+lou}qaDb*~Et_kN8-%w)zhFh>9-E8-L*A@-s(=eci_1ch zT8*8Q<-+n}^`0qhVRvP|@i9H&-6e##48f}udb49lb&yW%hhP9WT7w#Vt)bkpiWz$o z8x!SqqCH*lb*WN>ob&6BI73H3U2!J1QO3d=mj$lRo!SA1j>c35mV_8-iQqSCtX3^* z`qU^Tr0$O9z!O#e&QdCD3if7tmxv@&5e=hAu`Y(!icNaUh-|+zBaoy~Me-9S0EA4^ zb98jZ846M)l6^l_YL@Hi442<pjYU}!QNPWQpJgwblAt*DMUu}J6Hz7#3)edfav&>& z`hChJ2=%c}<d$c#@(O1t1qs=41Pw399}cLz?i5f3C$=rmvl_NO^uZ9>3noyJQTf<z zwAv6+M_Efo?!D2Z(?yHTEJKzHx;vPBA{zB{Anq;}A-7Zths3Lf8&GUcA{xtRZ;}%$ zAL|6+dBPbwn<V+3#sKT9qMC!U8m30oBNYUY+ceOX0VASD4IDS#1UBrN;)I2!)iQ$k zB;<Y}GFzl$S;3&Yy-?2JxD|#MaFgeZ<r!xPD#Ep3rg>nn#G^*e<<vJnm~|7I;}{GL zYnw7!9aD?@gjJ3&1X{~^nLT3%mz?q&DXQX_RcAD^v?Vy9(I|69lVPgy$=NKD5tp{G zMRXGk_OC_95Hu7qF^JVf|2`Apxe*E%WIZWb`@^d|XJ}|$Ba;m^35f~hqe6f}@rzyM zVFW|!3wn-^eAihwUAEBm=rm1khhiL7mUc2WE@ZkP@cwTvB(GyxI#n+czBKk6_h#B# zH^b2iBH=;TUn$~xN8soUb8cxgbVAWhFhJvo^BK6(c0*P6;jHX9*^JZS`eNa%D+E8b zkfP}`R)!QpCuK4>T7J+ABC0Zh=``Jcp#)Mm@PL@H!rrl~cng~^BcRaql_0@#W0ql? zu=C3Bu+o~{wS==8F4YMyGzEi14%I~%iVh=w+Lk5uVT<2DhM)><Rs^ad2vTP=gl(un zEWsb_a*Zm_rIfN|>1t`4)3~U$jPkm;eFHWq$?X-_%86j0B`T_BWC=`#9x)i?nrssa zO^@1ipQ#7YQ7KbAl!Z`06xgGr7!)35%MIcHt_)5u3U7+i8<z2O%AAId>G2U=9M|^A z)2SUc;#%JnBGyeI0?<DAdIp`M3ftApn!`bQAoB#8^9)_fmf;3W!p4&JPC^Q_4^?7L zNUZ`XdwD}arvk-RLyU1sJhO%yi&?U>wCROlrMkP+Hj!BeTy>&aCq=6K5Dbh1DNI*j z-%nlghB>#8-UOIJU{{Ms{t?=m63Ag~KkEv?i!L-$^o5KWqfu}t#EWElz&4ans;Dx2 zvJGvF2ryt^E(kiwMpCTb=`UC*#A?EfOeeLWOne)dE)sCg{SXWhj@BS?3W@lN33$^Y zbHJ+!$#JsGU=8*Hx~+FY&(cW|TspxCx-~z%O^iELSqT86Ar_-UAhjq8`<DxVP7O%a z92dVc#WX!S`(w_qnng4oEaNJXB~nzAcgB5eVF%%@^noPUX2W?G4Q-P8sZmv+-mNRR z2?p>_Z1Bn`c&GrS*0wBoj0Q(j!ZSVMmgCYFG}xgKSttqSZW(e*MG`Y<&H&x*c!rW{ zqEqbwRZ~#a7mftOqoBg}k~k+^KCtE;K&LB@2%&USXx@yfQRjTzg*&Jnb+sVTnzMy= zEp!=)lVHToQXm$sd%W1d#IU5AC>AI!PYD}MNGEE=%{8gs_Jaz0MaV4>Q8jNd@W4w% zm#DQoA_ycm2S9!-*Jz|?qm3#nn$ZQ<xx|%!YCamyoD>bI&S*$u3WiqlDmyhWq;DAP z8emkonAVbH`{1^R?J7cI^iwbh>puq$fwgEZ)S8}me_k1a4<tjyVQF#|n!qr^ULeE* zDAcKvIzw;<lcg{rQc)6R92&t9W644hX|7dQhg6(ucZkuleSZe0a#^Wp1xmtrR`#Xk ziup6>P;;f&t>A95AK1~v2h!2~rPBQBg(0MmD}$6g*bH)5WZ)^vV3F?eE5wMph!|L1 zpdHDvu-!1yqcpH3?vd*yf<fA?DE%fU1@+M(wwS94wYpTA8rT&bK&OvWFd(bV<){&w zX;_~#{8Tju&R`22CB&{Y%LS`RoF+HOP?)4{*NSft(K=XkD!~U#O7gL^T(t|ZqqBnR zTCh^NGB>%snOIHGZgQjaKx>UBAK~Z}3>p|t3kRVVPb<S{<D-v$C^CjiG(#x!WcX5( zLq?9J<-CYcV{Qs@g3IeF<gSLPKG#g3(Jm*VL77bBDuvF<cq#kbE*N~^D{4@$(A}Bk z_#qY1oKi<hmvI3Cup%X;L4}PX>nLF;MF&~xS**fo><j}5ks~KG5egHM+K0}5Q!QE< zQxki`9Zi=SF~E%b6mzGBGN@_Sk_Lm<d^xDvB|%sKeaDsII0i!_4W_V~sCm#%@8RIe zqm>|sB(u$SEg)s3H$+EEL~7GWo*ZIn#Q=mZ=h9;}Mh$IUabjMV!8a04a(fZ2WyJvm zz@1oeM;7`0FmIxqf}ug;Vhtwb$c;>dhcOriu2%{M8UPGs(j}bntscycs*TZx`<OF! z8k)>#m>f;TWTcv9y9V4Fs?H*lw;0VAuE-QJx0mpaR+SN6Z8ehAZfI0R$5ieROQ>iK z_|>bVlrb2FO1~oxvAP7f61^%OrC=AlvBL5(WO^GT5vTAc*KTtUsCL7hG&Ij=F3zQ1 z4VJefDF8C|vo=;CHgsk=yI?1V3Q0Rqsbm=t$koUTvTPEK6u-DiVYV>fqOT@qd0l8> zV~4u&b>hw`L#*POGtH<{{V^;bH)DzMPG&05hKnyY;N=?*ahf1ww4pX8(xp<{dM&0} zB(`mNYhyK>VcFp$Pzm~4=Qg<5n<$c^%1I1UF5{efotRWHX9&TdBG`_!NDvOC!I1io zfhtfcu8f<U3x}vKyCN}Q(FLf;53Q3^ilk-J3bj|vTy({Dv~Ho5f%B9qjwIItFPheY zy=zb7N8sbAG*V@Q2Ew$#v>({fhpAEJ6_=jIk@*+sYdw7gI+iF>;%D1%fT3FK5d$Nd z_KZq439*w2fHr|h5TUhFmm732)i$+y>r>xlMhw)-FiI=~rD{@6qLK0$&O8AsK3Qa< z3OkG#s5R?&=?=>CRXSpzvtqek@fJgI9`TIMB~;9~a{LE?8jP4?pw26?zmH8}7gS+q z93UODNf83f$ssnb48-&g8sgHt#nsG{Jb5}DrC>lY7dBNJRTJvp{o_A<_RHVI>vQt^ zbbZQie;$6qFS&>3{`LI-`4!<&{q<k`+s_L<021<WVWD6E&VXbXI?j-cBATbsGV2^~ zmP!m267t@pwLDBMY<Io^h(xNVbmEVn{?+eZwaYdCg_PH=l74*dk1VCa7BxFi-y15I zoAb=z0~uZA9f<KIcadix>Azv#L`Q<b*GeJd1w$T<0bku<Fxq%5L!iiIrq{^<b~y2& z0Z7nCs(c})*Ks>goR6ux1MO20&8gIOc`;wlYu8sc818+Mn{r~ADd-ZDXqfP}fi1(; z84Hg6)U{lT@pjZVp2ZuVno4I>b#X>j4eJS`$Gf~SoqI*MiL3rE3uXQ_pQqU_=Fi}y zlCfgDwiCf1s|lvagJ~wz5n9US*j!6Nwq%waV)uc*`KuWWUljQ%Hp@A;oXWWpe6A(& zf(E;lsKPdJu4My*ON29<$X9Ews5;=KCCa8wa7c<IL%z~0gXK*tkmM{h*OD*tbliiS z6_}1-P+^-iu*}2Yl8jak(dlun<;HTRQ}TUkTqWk$2vf3GIA?f=tPJC<Rh*q|mRcDy z3?(^UO2W}H7zn#Qky6yvfZ>w|QiaVE1~!hF>bx3^t5-J|q@h#6)t7g+zB2eq=$kTP zDExC;8Nyg1N41uf;wI{NRl;c4WcbL#3M6=%;ESW&v={cO27^#>cGN7?&PL+Pc{<g~ zI4YPxhi-3iaRZrNXd<ZmST;Fvo{AXeT9RP!s*al`sO%2SE4(uFIfIyY*%{_2K6Nes zdi%iuBLZ0kQHil}@wkM+=!pX<CzPE7G|_g|v`ws1Z-Oyu<QyDzhUTjq4A<^`0VF!& z5|AbqE$AIBXB_)Va|w<K=N_~EllHI<Jb?kAru2!4&rqb&(!)s2L~m=P@f4%qypk)! z91I&Hoam^$Vj+XC6R#aORrBcSC_^Hz^OA=UCk3bk3ECguAF1+jdYzWF>NC{%Q@QFD zM<CS!i!rxc#4gwiM?Q)F!@3Q@um*TRkxeyTuvk;$a-X@%Yo$ki9ts!$zM{cU=pzxS zWP7n~!q-|iq1M=se8_1`AYE=|88?Js4jb`>4!EKU?cGjP-#^@jrr-bd{XhS&|NWa+ z>w5a{eqnEX+F#9$7yZw@8!yTmpH5dh{@fdWzWBq1oLdxg6*8O@TwLZm^f|+5xw#1l zq}O=zQfEiCFv+>4hy%+&kkxoHA(5dC7z}z>iwgO{c*^HRf3Ajk&_aET^A|EoG<BMf z9L+;l&h*a7+`u24-upKn{>LqE6HXXAQ!EkFHahGh&BAWi0&|ufE-+|f%gFvXqsp@J z$8uht%bET$@PUuisgwcVn1ReZnMOCRp%&|zky}cT2;WO(G-NF%ysO0TxI+HXpZwWU zOea?<13oZUry;!;W>ke>kaHq15;A0Le;DUYgla<5&UkrJC$zJ1+FUJ#Y0vi^X*ZPb zwl}&XB|TFVr^*J`UP0@1nc|B@OMm+x{mJLsC%3TZRVu-TO?e<W!?WnDE_xZv^BGpr zikZU2M%9KSkjWmr;$MhR`jh;mQ82Dr+I=)km7{peD9<2hcxa57+ZW^(z^(A|8;9%y z{HuGvSc=c!&ly!#F}v6KNKG8b2_n-u51HYHQty8_+Xctogv*b>Q->kqtQ;Q6j?G47 zqwyT%OLDG(YJ^xJ2h3H1WG<FgHKMYkUig#y!*`$j;e-FY{cR%07=am=_*LwO7Iwk3 zmeOoi_B|ErKO;;*g_S*svgH@<g`tGXx_8>p9I<+6a!#$n1=ySlPQln#@`OFlc3482 zmao_DPha)jg?baz&l3hr=)~#zBu=~~!rO-Xhc~dRB3p*i-fS9J=0(8^3kyyb(6}y6 z<MBEUqUyBp3JWNgakj?|g|aME^*Xl{U=z4<t_aoXz5Q!X&2pYjT0-8qN;mLyq*3)W z`XGtjLCMo-K>^^y;ngRWgHEbF2=!_J^A?3H(WrxLq(IMWmiPd-LX(uX$$X_Rp<c4M z<>U?tw{qKjfBCihtHt;XnJ)q4Y_;Oum&`hfTWc|Bk|%o&>tj|Y=nxp^TF&H_Mip}@ zHo{;hs0~B_vngdBU#(eN3#3c#P3N%poq%q9W~hgQe3-u6<<~#hyZIub(@T+CT*g1# z@l1)GXv7qBUBg^jG0>2Bt;poTAB5gSR8$H2r9`u}jElNbH;#i0o;yamyw+P<1}R0W z9O6j?h7KCR{0r3IJbZ*pufM-{;}b(X=$2_z<>=5A_vog3M+}C7PNHdGb~(5Jd4V9! z{|YMX**lu5Y#@ZFLo8ofVH6G}T<Ie-_;Rfn$8ncqIH|o*3wq&o<1|AXFQ#~SF|EV( zyMrCqgca#nEvTsSn$%8!b!_4-)lDb{@0f-W`woEMVl{Eh2bL)oqk<O!GmKz9ui{|L zcNyt?E(a$mn0&)PTD4@TL<y|PNtD|2(hol<Z+uFnovX8H$mwFP@=BXnarmTShroff zuuX6mCaBZ^)r>fU;e*jD#S)gvF&rnydKCON2ps_DpQQbqZ>bnY31vD$C@Oj>ohtcq zN5=F}oTuL%?0cVBVRgPh&OnEWNQ8uK6O9z!KGV9EUL8vW+a(}fkq|ougY*;AeaI_q z6q+{4-D@ogWtak(hE?Lst%QvUNIsb&sZskrGPKgi>34_n=1V>wEgHxyFt7}_IgX{1 z$-<3;USVuhne&4|B%%TXPum1(REey$QOrd}C5z1+L8n)nXsUyp%!ffZ>@}ZK*{4vD z1TIvZ!=v9FhE{dEp{@~eEaM_0254}K-d}b!VXP)P%k6@t!H%e=fj%9*qsJ5B#;2E0 z@6pKAbi0y72ZcQLSZl19bl|a8!K+3oKLMhwmAy=z`Rj-o5hIvg9%_eARb$)e>$21~ z;ekR{A^IUvyFO=l8mEF*CwkK`I{t~)Fq1z7vyNds?3YU3E0NYfN>4}W2sBtVE6vj; z3k9f%klOo@W>)9g2q+TrJ`(KN%L4r;J5GPvK06vo$cWdaZYJXiOs1e|n_!zu)*lSF z%eVv`|L7+s?3I^adtbUgRmxlpR?1O)tWxWs#!2>XreC1jWlI^jAPTBZs^>uZQ|<6! ztR1FWvTU`X1B;ZFRhFlL<QDkUK}3_BTd;ZEC<Y5MlbmiJNMGXz-@o>RYbs$SaT+Pk zi4qTb!A_$(a0k>zRytnRLA5q7zT&*xIx}xk=9B$0IPMPIL$XyYp%%y)suLVrDPS}Y z#Z+?T3gnieU27dr&4Rdb8HU&Zv&6HYf97h=9Dr`kW|LJo%tD!8Q{{OYVumtKTcFge z0FQVv4BGL@eX4Jj=cUK|T9?M%fsF0mfY;oLQ)KThG>L}wUJ+MsNZLbAx2dl>O>d;Y z8Vrq+qbMqsGBLO3X1o6WcL3`syPl6s^5;|sHNTrO>Zr-|E*l4fnK-A#<*6^a83hn` zFTMVOeeV+-nwgn^yuixP{rt-xoT}!lPR_*!$=eVN*Pavvbjt8%l~NAcK<TKR-lxj$ zz4tyTuRp)|#*c?pV!U2HnSSxp&&wx1{L5cHP5<ie7VZgGPyZYr?(=t3{}o?uxK^k1 z8RlOuYp=y$^xmb{?%Od>n5SX3Yl8vM$w@a`MIgIh{Ou23^Eqcf5fwsgb>eK$ht@b> z5*ZpA^yjo1#E161>eHCMD?ZnFO~2CU)=LM#iTpfGv0w2ux!?otVJKCcFtQ@<orM^o zTkzv($W%L{+(#W$#D?bdFL(FRHn)i(817|dFWC2C`|tE;naC|*2jtFSr2?AAG!*?W zr}vKDdN9H-irxA-u8A;!Mx8ZJ+;TPsk3O1*uZWK;H+Su<K4^g1wSk|!(bO?H^*tBi zt`~5KeevgiTfKI#&dD4Y<mhTK8LGcSm?gT#&7CB8wY~yGn_mUbAJgpj`|5j`OL+5z zx%r~J`Eu{(%kt)n=}$Y{GF;94-{B5rxPv9Pg#XSr+W9*F%aUiX;Z^z~w+OFd!Y`IQ z!}Qp_@y_(+;#*bUKp7nTtZ<9v?p___lX2|PR=0^y{?mWE^urH?ZH})H*2~#7r-I_r z8$Xu4NWqHN{UmFp3+%<f5mzb73z%z=N=1#Ms8WBSkLEQJos+J)(qS8sw8k+9n^S*q z?Xe-&<yK5Uh=Xy<U@Crrm`jf%2DTUs&wlxv<2OH2@p);*Qwdt~2&aEIeVo1P&uYcA z2VrM9=Ljn}q#}5%$Ve+(W$+^Ye;&0-g`TK%OF=lHC$7+e8aQ#rFY5IV%J)7o=sdiU z^X%yQVEH86tyU9H|AGcXZZ^7Z4mo{eLwz)GFgkPA9l!NZ@C`iB7i(pXjF<DsgkTeW z|1xv6WDy5Bj~wjlDx@>%%CAbN&}YqU(i+<g$Wn3hWx<nSNP(;}cN@kdZWm{GmV&`& zurf_4$@hYzal-6CU%c{&kamzILZKVUVaw==m^y~Up*u&FAUNvEOh&<IqN0AMkwP_f zRimm5Xh}YvFJmv#<emNahyiBUvSLm?n>*VN25R*t{jwBjng`Du41LXY<I_InlQm;r zbf9Mon!zI}NST|l;!CWgvIO@~j4n}Bs|hX|rw---jTi{-riaY)FWQZSmn|D>(or*f z7(=rF2eJ=_i*G*QTxBOFO?ZKx;VtF$=hNJlA+-1;8qKm#b~Ygrax%%-0QIsa%_w4+ zE{S)ap_cI)iBT?(UURMxtkLk*9liNcEVyL-7zZ4a@dsF|(y6%HUKs||H}MB>w7nQE zBlRQWh=;cxOjF41S*JKoL&nwkb7yFJP1cHh(TXsx>wNswQce!i7-=EW<R*4`WwNTA z=t{E3;~#wl%f1{@2jB*&HPWEN#a4!~WqiNN=}>44G;*U)2O<_q2XFrrI+B%?qGb8E zUg3d=t(>}=*NVXc^o(4EB_Wi+KzML$7?wsC*SNKqYxR*ZHRVaO31KcWQL!^*gt`@T zi^}b{u=~=o7=wX-SgQJwZmvqlp}*(FH+~$3yRDC}JOmXIuwrDpSrKk))MeotaDGI| zich7;daYxn=AT~U&`K$8zdJlIWRg*wUS~N-4&%{D`LH3qiG>_u`_ghCB)|Pg9bA~1 zLM{oPBWvrm_xG+ppFZH;yU$2~DJ?=O>J*_u7h9(~2JmjUKbJtFlF$%a6K<$+%K+j8 zP+^geJv~F8I9V`+55q*{D0ah#8<r$Ph}A^A&$e@hiFU{hPyG3mEeR(^RbV8j>|K9W zDU2gjYgU>Xx<Vbjo`f)Tv{9y|D8LsdKS)v~6@AoV7@zQe5Tl&?-Jy?!0aED5L~P#s zXw$&u3u1bjN!<$S4m!6&w!Jb8uY2>w<<~!8rl44$3HY(W^JNxBz4LKg)Ap}_(k~aa z@PwCyC3RJoh9T97V#*|VO1{o&Efp8soR>m%A`1|gF;Y&~<2N5zZrmrPu5T=#rdo)B zdWANzeF1XIv`q}g{_i{n`H95mb((R59fwguJ15M;`fz$*EBSGa<(}bQnWK%Ji=0?e z-ofWJ#V(`824*!j<uw~a*H@8-(3YY;y(Ep!O#8Bfq#`%KZmW%|XJar7XYti1m2TJO zGa?ix#Uu0GPVXJQ^EeHOzW%(wMJnSYUajH=1QT#PmjIG2aNSi?DO7)>AhdCk#X$*s zFam?7vd3d2BpyZB8m83%co8dY<Ru|HZD7V?fS$Uo2E(*XOz+|1h!<6w2VJd6QFz*) zD1y@+%9+M9e2V?+&*^@lwF@2EVu<_!04sIca?JXwJUgA8^$eMGa+O0E90(ramSIic z%||*9-G~wAHce4aL}<&WA<NmjX8Q?bjL$jc(+%!?#umfLr9NcT59lhl7uE-1)u^0$ zvh!Huew0l)NKC30TXJ?b^S>2ZLO|)c-~RYEE|T3r4%DX2roi3mOqLUsI9g>((WlKa zr6na|T?%QWCP<WRN0&npt64VqKFuf1&6oY3(`33xF(p5FI;vPQO1Z9*+@e7WnSW79 zo;)jcRdpmWX5qu!r(2HRd{CZsWHNF`Cj!JC<+$>91sO^>C6JLpXA{S7U64*?<oHeF zX%+~E7?bwBPcnztS8$%nb`rMmP+mR#Jh=Mt@sI4ibyOtJmhTIVJ2dXnNaOC_xVyW% zL*wr5?(XjH?rx1cH17WB-;tR!bLQT6-@1RjML|_&WUi`=9Th9~-jVhF1lRTs0;zX* zsjYB26@goDJfZi*ys5O9q>CD)5C%Ln+q~`*&J%}DM)G=vcNBWe066zga*Gto)D+Gt z^5rn|-ozjatB>({`y8FX<*r6+J>R<u?MEPG>SGd9$c*(6NhKFPkEl_g(qmAK95CQ8 z6t1MLBGU|&RCaFFkjd^1r^0h^F#;TGgFg)V&Uhk^ec~dr3G>iV{%*BKHvCqBu-N17 zDP`|;T(_FIX+hMB5eLAni5yu0Qs?6+$^D3zRD@9<{MST}2OUrBRehUm0HE(8<x+;( zVg5bvU#XoWaO?oKY+!83$fk+}o}VAe1Bo|<$$hY=8NIF0DeS(pxT6x6Cl;r}$jq&& z02_gCF2Vbu8ZqVa1IY&>y6Czn&|o^~gIRfE;g39sJo9fq2ysWpOs0LTBd8|Rs;!Mn zyCWpXVVq@lWGVqGh;3MuIdGZE-Tg*a*o@LB4TfLkKcsiGx%z!*9nfpO0ddH~psLNU z+Sh(>R#aLPz&=O(E1mV!$-)RQR>y~Dj;P>ZD1O2rzpPCJTg*Ife=ehD6V$a(bWng& z^MV{#=o2mzjK5$>_gWOrV9GUuuS-7tV%nMw4kM&0SphycPx#dXtO!X>VP&huba8YO z8Vg4YuGA6e3%P{*!gp9S<YUe0JL`}%58gw#_V!C%jz}i?NWAQ%U)m7_<?Yz>i&3^* zA=qq~O@h_zJOYr_YWBQ4325tb5tGfPi)oo&uf-2c6k1eXkA}|BeMa==98XVh4UzOn zZR_akcjKsoRxfBmz9O3NDUv)ust87a4YO7vfl4?MbetYp>=_ac`FJoRf#8bx;T03d zV|)p8$m+BDEA?dF!&pL5T}6?ivxB>MWW6Wg(o;+2N{D53`b!VvsEMFIa&^m6FyCb$ zjKsQM;j!{;UYp%q&bO1qaXMayGHw}fM>{G=0|C&k#O;{g8LlU{P}zU6s7thTq~Wwe z-eYBWuN~~!<36=wdvs9jXI`Jq>WyERYY>)Czv<bCEqGy8{nE)TkUd^f0c1dzm$kpK zmVRA4rMxGG&MZ-habihhB`e`hP4olXsyXRr&{v{WVSvH(FMu6WU{$AP?X9{5J3N-6 z6F0C9&au~a@Z-i4Y?|dgAkCbR&)!xkJvu7OdGNLUW`OW*vue2^NU+pXt_i38mPayW z7Tct$*q_H~SGp%nx<`R6cb;4#2h^Fkm<`^r3cXW>+E|RoO_caRadlKb6T#M3T}+i< za^|jNHE@~hbex8h6!?Y5I$gVn%*^{olzw;y<Zb#ABOM<w#_`SfhC)Fz3tw*T{&WmO zfiIdreWr%UX1S*0(LJK>>6R(hSf2W9{ozEGg)i(zO^Tf)WAgUI;u8G%MdfNZd!==u z0}ITIJt802%pTG%S6Kr=<wzM^EsvJJWA$IVl7s7}N&bQsAlACj1BORj<Fc(W3nAqH zAtrJmsEJDn$TrTxm-_8^l3OU2D08~g>8vwv%&MgBn2tnoMxRkKciov)mLK^@{)Y?1 zz8}win0tkd><k7Z=c&FC?^nf@iXr)t02)jQ7hy1vj`@)U3ot&BFPLfi;5hT+Cw$vk zJ_W(&zm1`Oo24lqAc!vzzX`h_7xFmiT<tTgV#RR<liKEZGv3JI)S8DdfFCaY$R^ax z{5c&DqVCdl+}-0bZK+)d&RetWVnra27(m;?uCfK&blIjQ{4inG;hAxwUWZN|UJ??6 z*xE+mAaAat9R_=acJdpEKFeus6>h!XKLCP90s%7=eMOe$e1=~EL<&hs#Y-0vOSdtE z=j~OLG@1<&%gpFlwTka(CtGo!qvt!=3PqGHE7%%X3g<q_Xcx^lt)m*3glN+rtcvP$ zvem0&Y3p^r8|AKz*T)&XN^>b8gh5786ulzMVSL4Kaz`35RfFSx0UEA`loaYM#+WwY zJqEv=KBzps>H?Hjr&#L}O(x{T7^!8l%kfy!uJDspHaEC%UGk>l2`1SlN}Zj;C>w(r z1Nc|M#K15`DOdO$g;*OL5!S%eegAgVNO1VOV5zCrJ*}G<pC;G(8b?tn=G0t%BNvw# zm`{_o5Q+@sElP|cJ*(RlpxAWW6>HfH#%bxUz8BHtNnf}Vg2%Teol!2jqv@-5jY5`H zU%T^;a`b6pckA!71-pO<CZRHAbo0@5HFVVrAuG-@;CKFX_>m-taJknFzarLAtIBwl z>W&%y8pf43SEi*DK7!Y?s22L-k6HZfmw>Hl-KW7wTN3Pnkn0wCY%5KEOAA{cRUYQ* z?E$(fvpy~ja}xxJrKfF7T*W7NuuMnG=v<u|M2Hi_q;dRUs<5-~@elPE@Pdn3v(JHx z8F}m7b<&xn<HoTQDTGCdV9YU^P6Dh0%A+we@rLj)?IrWF^d$wRK86%_nWzZJf)C90 zYHg1MQ9A1J$#`%^udFHt4%>6_*|KfU?uHc=l@8HaRVzW)3Vejhbt!%RYx+)MU$wqy z4f-e$1hJu{kZg$_RsV#-iz>}psal4mxSOVcc}S%mxzDhRqmw`>LbCda{B+$M9arB0 zQuNKRjVTFQsBk*Z<~@CIFR2&R8Al*1*B<f<O+Jjb954C=xfi+!8gRic!+t<PWqk@G zF_h3l;?1zCFz;SfrG#5+X4>NKK+?`Ocva0C^cyN+LS;xsu0^ux1xbnp0P?!PM*i#P z_+O;XCD3#Gk+W)36!ba`eWsa*$VR=0P>LT^)hLa!ih4xNRFQ!PusRM@@;Z5PMQsWw zom`-Cy`!yHV506LHe9ljkGwYo1|YI+W<-U*UeMX`w4njT*{Py2_AF+M;ogOfKODZ- z<vbb--1DX|W61bS2a&01@I%;Rls<Cx^VI@RbYa;bI_vdY$KQmP`%Rw4*^IjyzZ<MS z)L{`i+=J0ur0E{@Bgv0Jlhekkruafk_^y=Str+j1TkIFGX>_l}h9na&$~=WkAM+0< z)i^uEk8@02IV6c*Nw@^6Evl*})Nh*_f+TB+<rr2sk7eCea-A_p9P^{gqzI+9lLfMR z5{QSFX&jv*rmF_7W%m((*%nr`$06ZE{`eVW8KHF^!m<2M#CxW5Izz9cgw@r;c9rl2 zsA%c0BE<-<fm`2!4T9p0G&=F%$j;?xG<%&FVo1J`ff!slPzt<$DK2;%?vo#eafGLp z;!H%MLY&eXRW#jWQ_#vR{ucL|@&jFMjX2}win$7WDi<qy{s-3Um(i_=JLdyU%~nJH zh-uB|!|_O}_Pe+I#SL87EEWbi+As5jlIG)9QA6@3(s%lb8-saycrp1_a^OmjP!?bO zB=9QGRy5~?Yjr!i*c$thz6=Htf~{^JH3KMU`WYKJ>7`p1Z!+=VO3DPq1zf@~D^>=c zyScgTZCSgYWH-d)4Wob(25+r>FZVg!xuO}^4(8xAIc8s_s=s8|K+`WJR$Vbw<j$88 z>STVBNN^MOUb0}1UB-aqn#lTfHF5#>{;)dfaeMpmoS!Vn?0kEw)w=KWyj6K0y|8PM zbWEz;$<!L$nDu<t8tUBU?0p?PvBtfVpS|69ALo7vq<*t(UF>{neZU!zy1%_S8n}O% zH|m7M-CaC_@nq^)J;Kb=`tk63@_2u_>~UxIl<tPiMd#e(6!^gL+w6s`E%OK?Bf(SV zeUEwL?Z1mi{#8=)zba`KkG_M@vohrVJ9W<=rOjVDA9@<b|4{eP)BYQE4?W%gfx3r* z?mvlpKKB2UxQCkG;HQb6fsn0^GamJSRq){bdj${VzbJURX#oKM0D7`+r7R%1vp#k| z9-C$uaR7k-#>Bs>{l`Q))_<y({?ZTqIrg8`OZ0yvP5(F4OaEfvf7dYm&n83u&!VL? z4Phi1B(KR9gB7x7(u7iFzkJeQB#_%GMKyRo{RIEg3gUzUlvO=y0SrMY0SjS){5w5< z#VNixc{qe_Q3GW(VS@r62(t?A$5{68yQMKpdb_hR$+Yw7)(N*X&ih|>hWr3Ncy-@? z5%8dO;@#MVLUa)}!57C^W=B4!QUCyv@U!ylNoD(E%m_@EXC1~+?NJFN+LJnxq6;V5 z7t=}q`Lh>EjPcG9Yl&-(gU%Ccshj^eWl}2h=hT_($H|XEG!+rhQ4Ey@W5~zbbHx0^ zV!UlXwk>&LYLtieC$iScI*LT-RY_%vL@mbZ(+c*}4$Fd$C5j*Te+agj(~SyQd_Elp z%$vMb7?&9ftrFEqudZk4sc1_ppAb%8-IWC22z%~=kC#@|YE!(?feVX`GW~^fDs-O4 z*SLWS&Ok2n)d^+A**+lpj<XX6!(O<-L`?RI(?<QDwJX1?J9gM}P6N>ba)}Ps#@{6; zSTrS+i7dWOq#`v9Y07APL|1^?w<`ylNL<vhEKLW{P$J`~^LtI|Q&AL=@*0uk8tiLh zRK+b=i?cSra+3OWZ0%z;At5g1h7cntxgqua8)XhvD*+m47A&NCXDvd#oez>%Jc2<v zG|mXSND~^$MZC&VSB9k<F{B+x%5=%U&jpUmk0W!CG>rlvjG3R0MQb^`0#_aa(R_gB zT$wSd9GX!)j<66{%|a&X5eedn5w*>@fmREw1zLu6-R;Oc`Ew)NI|}?i=)ez5(d4*W zsO4JG4wy)etjM~RjCa?eshmX(%<=CeRRq;mZcQ=NEDO9G%SQts;1c?^hqsS6{t_C- zEsHWP>*0DH^y6$k`mWy~3x%vuU+-Mf-MIQ#Y5m-lB!s{9fBZO^mEqdkcO1MG+Tu&d z)$DlK7>BXX;cAG*co20>VBb0Yk;bGa!QAhkkz`fvXpli1caEhUxt=&+@@ugcu27D6 z6G?Zjjg3Y|+{k9N%c}azDK-w-j8oX@4gBCZUo%0Vr)O1NFCGRup<dEmex0QV7o#mb zWEj%3y{z;LE-DWWG^WY0B^{#=MJS@H<X5iwTC&TIShp@uJCG|Y21dQ`%Bxr<M_aIt zV*aSZrq<E)fP!}@UCKlW;5;MEI~XUzFG;g9vW^JUjgQcq1%Y1cC1Zl8jvIkRDOo;O zj_VM%DO%0!FTa)?N=z?e@LECQh|@p}sXO#K(2OZogHC~uuE|z<x?#7)17^s*>6qin z2CpNW*^0UuDh942o|u`MzO?d;WUQM6=k=c5v*m<jVQ0Y$s?rD3#)Jpv{p<g7;}UZb zWb)g;8^Zn}wxp$@Vf^PHM#uW^25FlAgF%drk?B81>HoW9^z*x?J`eyn!27=)lK)il zFR$~TTn^K}vxenw*8J}@o#V=*wyR7?YdhqY%-BYPKEcdW8fMJt_9-S9t+Nu$3?M4v zq|?O3O-GCOJ~1MYU-d>s%uZ<Y@_Iw<w7?Qu{5LjH{02}ZF0#8XfaAoot}ejJHgC!d zqvpF-jM72M=qQuwmySTi_n#Aza$#}t;&TKYQ8pgQu9?T#D6>W`F^16`evl=Z#^Xtg z&{<GgRmweB5HtafLQAkTMp16vjDPK9rM8)$bmnn^n+g(%t5@G9b%d}>*-D;Kw99R% z5HVt|fCvXhgA+XC{07&5h(O-t0YMN4btX_6X6Vz4ZMbRV<_Q30#ax=9!SfYmc1s7G zYQS$xw7VZ&wHH^vDjpbK{5i6hP?siI9NIbnP~*!Wp`V=qWRnwK3*W{nNef!#ihIvR zcO3a~k{w;yQ!f$ts_uo?)>pi*5Q#~vi$27M2YXnOzsZ|k0Jt^J=<#V|*IL+KA%Zm) zonU&~epT|73&4FZ;I$DK_wg_i`x-LLr7j*R$@r*_waJ4u7l4Zs#+zV>2qK7B6Wk+! z>5tQorkSZNVNp%8F#zB%K<x8Pt!t0G2FlvUwA=*>US%^lcfoocaNPl*22fZm+y;9i zmaIV=EuQHpaY)aLQC{ua5T`aWATX*t&7$3qE>9_Z7)-*FgB%`MVfZNkb2LbqTS?jp zpvi7QT$0(a3v=j_&rFgkxzIP#>P_hp<%4}^U}<q#19V)NxUWT_3r)O5j%)Iw+C>&p zGlM7;C}F}k-olo@=Ei2Su&}d&DU5Z#8@9s9qXR+PZcap1yG%*SG3`bP0`yWU1dIX( zYA}a0H035Ojq@E_7ugKwGCbLYi``)?60@hb&Qm)@;j_A+vWn`0L8r6!Sz{PvuNlD1 z>X&7d!^dHgw@{?@1LPKo=V8T-SmI!xJp@m+`y1$Yo6H^Z7Xewck}CaHF6raxSf~ad z{vlg1Zp>K=>?+u-={CdZ)h`}=8KG_LBg$FA8D<lJVJ659s_;uY;JHClK$C8c%Uq3# zjFO^=;p~Bin6fF7J`L7|`kK>=Nj@T!r57@9tcrJ3KofGJ*ictKR|WovA3J1cK}?6R zWYH;!@NCl@FVYn&e-r7}jw4={d?X%Ais+cxQXo1uaz4IrlDnu8&=)LYlWO4f=UcRf z<D1Lu?%r|hb=#GSoTAB&)a0v#Dx+AZ>E`n?1zhLO(6u2Wb*TXc*J;e+QaFo4jwf9h zd(2LT5C|^rp!28+kSR6sTK{VI>TPWd3JbBYbW^x$@T@+IQCxTuNh9DCXgf&1lGX3f zU2?cPArZD!pPuMIW+5&MMft?|Q1dOL69O#_ZV!%Et6ldYMZII#YJXG#*%M#FjYh&i zVZ<`y!XeI=JPJ4Wxd`rkDCZx}UD;RCeynqm3#e(94IOTF9K(vxoqnO(Iz?8^QL~E$ zp(m?!?LMg`FG-VbFefWYO+Pg%s>!p55}oCwv@`bS>ooJ`*Es|_)bDYQs}q_OR(%eG z7d0X!@HECZE6U!;M(jaH?c+6{72jvd$WAa4PI88=Lf*yd@0T#m+k)H`%T5NAdToic zwkGJdMI$JcejQyfZA&-D7zL>f=0~pfQ+IZHt}dEEhQ7GSxcQW+k6)iS#A_mVkZ&H0 zFs~~Nw1EjESMDpQGS9P_EyQPb)s$g=Kd^2bss-6){PmdQM0LypH86Lruv6Kq@6DOO z3<Vxy!_i*(95@7Pm4=OuxEi&3CKb)yEH8zy(oTc3C?MMcn9dc(ov4&8np#4U^>SEh zG(@Av#ySL(!MliFdxw|oVpV9QuADZh#$aM~@0Kum4XtPZ?_J<mI(5@%mgZw5tr$7V z=3K-4oK0l0fJmih^s-TOL(iRBr>=80J9Trr`w)F%96|`&)T4IBhn0DO+c&!-a5jwQ z5hY%o>ArVyMsd`fxtpvtuQA|Tv>mmb8rH#}QWMdMr*OOkUC?=|58P<hIiP3?!Zl78 zDL1QFM%=WzZxGL%xv8gG_PUGiM_$GJvr>^OD{tYEyCeNQ#cf%tjJg`F&PAkusu~@M zk3E;C_M+-aBB_w1=}tj~2{U^Q>ZJ3{jMnbodDK6S<u4D!$i(tbk4pcKZ0Ju~@HdEo z^k2jRX&u`Sy1)nE;P3uO*1*on!B)?}?gK^ehmY`)HSk$|zzQtw{~Y<_$z&m^Ki0D} z(fMKJgs1k$-7&M$eV`oaH2$y%{=ruG>%IR-L!@o3^yCce@zn6BrTGOvu5aM<!A|%K znegN9TfV<<MesEK#asWWE}tDO<lnFfTwHk60#5cqa`u0?34hKB+0o+B{w=%tb5ii5 zs(&39q^F|!FzRmx2r_&W{-Qm6)LoGIqrmug%7P%vM}hV4frTLLN8ulCnek))A8DZ= z6BR8U<KI5)!^VH&AO2K8#Ny+FY3ct2EFVqvQDXV1&z~IgZza}`(%-KoVnP2=C5FE& z`AGWyrM3QDyu<K+C*Jv6%l!i{!SMI*^ncFO{sk}bFUe_tj}WXE9y~zT`}?e@)iDAM zEt-KN1PL)>#o;8N#_OSRH=t$ETXCv}yneD;-S=_s$`j_^l4r?v{T)@v8=t+dX7cAW zL2;(tm5zVYZ~rp)Pmf_{{%0@xt0Di<i~fP)_{hKiYTf@=D31R{vi@Hh=I`I&|Cfe2 z?e_kYr}yIBW^Zf)EBwje|LbEC;Fx(+^xv8N$C^JTGSL5r*?)S>e>404Ru2DnyZ?Lk z{&xZi<KKJle{G3R0?8+V<dZ=1Ng(+okbDwIJ_#hB1d>kz$tQv2lR)xGAo(Pad=f}L z2_&Bcl1~E3CxPUXK=Mf-`6Q5h5=cG?B%cJ5PXfs&f#j1w@<|~1B#?X(NInT9p9GRm z0?8+V<dZ=1Ng(+okbDwIJ_#hB1d>kz$tQv2lR)xGAo(Pad=f}L2_&Bcl1~E3CxPVu zR|FD<e`?$Rg+TI`0_?x3d;U_^{X?4fzgb_H?n8_A&-H~F8U95u_dlw8{;QnlKX4@f zGn?agJBYmca!Z5j`(>ofMALnmdkeb{JFp|TCB)g;8LU6b$Lm=)rIVK*K({Y@A4rfH zN&4Bdg2InjX@#lw=bN*2U)KF1`4Yo&)%pecjYp4-6uZk&YLdOYw76JXS;-L*R?>ed zbhde1dl;O&9p2bZ)+gVQBT}~S>`CykDk$k*v40J5i4k*^BY&bdtJs^<9JN9twLrHz zOh3g`Yi<7Nd3!f;Iyi~T?f$yEx7v8l?eTPTJ>1Pf>bYD~<w9Pc)&Gm~(We48_KVMt znP~BuX#SC4{uO_56yD-Pfx_d_667H(B%5wJ%>^!jWg@=C!YPWT8LpBE%4#CKTJjug zyy|V2pRY~^i|Q5|vTO1EqrGOnv#Zx?k@p#$Z%4C*S{{$+8d4)7kBRZmk#`{@s}xu{ z5BjIVkGBx4bHOZg5KRt}m+0wV>kDEWkf$J|tHYUj_wjS6{rBt(<YD)iCFC6@=QUM) zR_rb6Z=cxa5>Cu6u;eg76KLxzJdb2IbR8ytDBN=}SmJ78W^~rN7@aSdI+6&z<-H6W zc-$)5i{{th#H46N2FWBOoe|LHQUEp*=jN6BHAJ>pgy|WZxLw9yjnY&0o|J#X4Cgab z;DqwGpkIdB8CF=_8L8-xgYHX!9Z1mhKe!MTJPrR<Fr0Ij98&Sv^0rryUEi>EdlwDk z<hR062dV*36-XsL+X_DUT+o1Ob;{v!_cU9%?V0mVvKL7mnb{^hW|qKsL4wGHud6_8 z7)7M7P=L%3w{TN+IU!1V1Qc)C(V;nAo21ORoxWm-qFnGhiA)El*9v|jA91VzeW(nl zs|;bN2&Jp&crfdDFmHdd3}>hWy)Or+s~CH%6q8IJ4c`R4a6d)ow^m4y73Ao-SSdQ? zNm4nky7v~qL!i(D%4TPQV|IsUv8%bB<@NL^^SNXaH>sz&%Cg5MyfP2TiEV&VWR%=B zbR=wSUU-Iuz4?RzK&62_eWCLTNju*vz0?>*fCl+RS_yS|4Rx+6UZSL5mP}5QKtY^_ zw~v`WL5}QvEv$xF=Ny~CJ%+%ds7hf>Xv5<+3i?aHC6=nr*fH;c{L7)jt1=^yfJ83i zubNW=GrsGm8cgC6S5$8|G>|R?Moo=oV4S1lk&B-8c2p~=A#T!l{bkp;k_x-AFUfr0 z0v|Mo(q3|aItwC$g#?`p)tnE^C9mUzY)e52<mL!N<k*4dQYFacyT0@Oq7&=RSMP5Z zM3*Vvl&8>3q&7*X)Jdi?Odv3g=_|yHD1Kqva%0o!5xmqj9R9jrj9N~Ye|oi>9-;sr zui6pL_BM{u5grm$61u=TWyaz_pv}KI1&crv7S#~Jd_xI2c05*wS$UWOR0=c#2L#A* zl-W^lYjoa!a!+z_miJ<adHvhLO{OtP2CbOFM|ws?o|^?P|45d4#Jq4JVwry~ICXI5 zJ3i@N`x1i>5k+m>TFHFdah&=HwW{I8ksm#|>+DdItQh<BaC1$Oldahk_#tQzI;aK% zL@X0rVtr5?EnMKic0l)!7QkN^TvGv(fV@Pf{6D87<bQ>1`+UINfD>Cfn?!Pa?1CG# zVl|~p3#`7mg@*Cb+5-yC1CR+={XErX4U{jfO1bdy?pZLIdI{)8UgGoewBBFXDXf&7 z*b!khG&xL<ZWPN?;H@coE@34cK)L%FW)bf&;adWvN09=EWQzk$<`KixY`T8CDW(7W z%Bha;2${n~UtKwllON_wy?TrW0eO$zYVAoyeBFA{aa!4Vdc}FFNoxy$)X3@}wS$yH z7)7IonlVn2?_wz%F(gJMNx~g)9+N64()Xo4$it`E;no<<D*Ugtc}?1b1-q1nF<HB+ z6??PbFJ{loqpT>Ii=hQFH`$6mY)|}42x{77S9l-04|4TAwb73su3nkXxzf_6JkPK0 zUZ1<Co6D{y=C()C4bqp9*Oucso#)+8m57ZJ8Zz45!VhY>-(N<%b_4kI>$7a~s%-o; zlnq2yk)|?c86Z?gjNm2l&v{FCznRCB`4!1tmm$-O=t^JY#X9at3y~=cH1jJUF-6vC zsui7Nh?1XNhE&klh$+xOB~HP8U4TKFg4mqDLY;#8$^ec`1CGob9#bnJZ7;mQqmTBC zyb_?~Ovu2$>##Q$$)KKmlDgDH*9^FZUu!t^twD8}DnnvE_9X{9xQX#%GJE@*UMkR1 zZr?^B??85t6nBCmf=J~txk<V)f+}`~XMWQwu`!ol3*OtJVS`pDs}1{mz~1;tqc=kH zmlR<#R^Z|~a{n~U{5JORm_=4(!rT<<R6~F$Vm)ND-|@)tvn#d+#|yq|zPO$jHx9bX zUOsXuNzP>g(kcq~=8Jtl1y7R<jKi~Rkebuj+hZ#8Br+1TV%zla3k3n{%g75`8Y|RP z&26NfheQu<{rlQbI9rW1iInJUO3(eqI#3y&C6}(k{IG-bzJWYoflS(s6mjazUmPsJ z>`g<lTcP3We|7JslU(3ZnP%1K<rLZ|oFc3mBi@ctvK3R-w9=e?*xRZft`D?ZW!K0Z zrGZR(*abfVlNDx#4@#O9W`jHt`Y{ABflCI>>=OX7Szm<I?kAxa1~lSp=Yn?#Fb=j? zdDg~S>xMaBDmBt6*6e=ZU6eL{bp7OqiangK@qHH_fFs@qtUiW>W-{*#Y|QQu*yae- z?!^D%Fc1<=XlRL883eg7?lx?~;zrWEg@maTzeXHLbk;1Om89W1rO^D_3D3pw9%s>B zd-p(*>wIu({?u@@uRtPQD7dhhDXykTJKY|x3Ss4`2$4R2SFvPQu}y%`Ihe<{tk*g} z&gi^v&0$daSU#$N-C4UMWxso}_s6o2`%1U^3b(-HwPusm){>PFyYsguO83SqQSo%K z$c!)xPm=Wz)Q>PWu;SLRm6+szF#&ZSZKt=73rz~`TOBND(cg|6C|wgE)#H7w)|R=6 z@mb?zG6icWa+&X@;m-{M?giRa^VbL=ZB|_71lvul8=w0r+3s??@0Z}|d-c1YT*pLg z^Yphspd2c;N0YIqXwQCJ(jKWys2ssQcTiF~N-ber6@I*JR+L@7!lmva#4aN^B3<xT zrce~x(CCH_2R<(SbC{=(S>ArahpDBH>C34XOsy*3CgHjIhtm?O+@#7uJ;YMcoMBR& zro%##BOtI68Yt)OBNXZ@6YeT@4w*bGD<XOnz$9ZOQB+XEw=go{BSE$^qQq2kcl8TK z?D;i6J~F<x(mT7d);rJ(gp=JvzE1@%M}i8(PGVsu#7OT7qQ`FH{tWdrq5Gs6Q^0#1 zQH}BjvOa7vtne+|mt5)K?8^)JeFm(!#Q>7MDH};OX)9u=imVu0FgnTiR;3f*`X6!l zI>{oHVWJMA$^)q&N&+Y_^7i9O_Y_DBVPWExo$ijh4sYxI!;}d@noqd8itm(`*X>?g zDzJ<{Hluh0qGS@HR5GF@lA|<|qXYnNwQk6Pu}ljeZh{HROyW3L?0p5s^zu(*cClmP zHD>BL?Cjg29-w=Y=D+Nwi6W<ovdjS)>uVsD<=P!rMpCVXPg=&!Sd;9{2b{vwj(n{u zeeJSxQ|yUQoK{g@RG9Bso|+!-86TZk9qi}s`o;i_j(&=Zh>MnpmX`E2B@HDZ1|b@f z5z*Ail=uZz6IxOjHc0F$JmNkg=ng#G8V1_t=C=0shR%-naFx|Wb%_*xRejE`P5ZFi z9M=d@6+w$ld9jnB&D!d;yH<^zdNAQU;Gj-}2IAY-Mq-p|mP+)ZZN+@-shkM&Chd5M z+)WAmb5cumI4)cUTr3Zp6M2{A_|w)%siDVb6dP8vFHG_0e&Z9?#fZ~lGB%~7MQp3B z1B8W2ii-~SFk!d18%=Xlv2xPqxG^S{sKk|LJ>T_(`%1tye7!XMd4?X+axg1Bg7PeK zV(e1L2>5zA)ix^C$IGKM`o#t~2pWno2}Zn{AaOv9&?LZB;vrb(q|%xFG<(aA33P&{ zY6O&9n%N&7ngacv`1`-~Mw=vGvObdf6RGUQKh7tHn4XAXrjTK%m8hzgs;-u(t(K^+ zkfo}Tr>Kr#q-<&!$7*QCE2<~NX(6S?L%kBP;P;b|O?+dRA;Lp*iE1YA=~~e2?^BhX zR9smSTV(`H{w2G_wnksgSbYaSl)3JN7PCxX3eS&cC_-jPS%eLlQhBtRch`R~Tlz@a ztp8+S|HDabnh`U7b^fQpF>pqw2|_f;e*9!jDLkEU>5+@2`C9*mhwHt8!?_!eZEm$( z4~T&gnI`2IVwI&Y*;&)90*(l^-whU_Eh^PD&eh#Fe(0NuS+2oXhOh%Bq5vlDn`pXU z9P8Ii5V(MIy849&7Mk&7Ir1anVWcO=Dyl=?Brr2AYMvRNo}6_2wAdU7FGbv0EG!f> zEU@~O)@*aVAHKYdS<5UVWjPVHo`#;0fsvAg)@GA?|8yJS?Y*C&`CzW1C&w#cVQ3;C z>w$01#6pXVjvfjQ>v(b5XnRdl<0wsE-BM&kZG^6iD4U_kIm6V()nbo3l&Rj18@)<Z zgz6+Z6!+to>Hrg1)#8nbyluI@0vtiCLt>SvR}q%3-;?)VQCmjX%o(D`TgKR`mkGXZ zl;RxErbSRnP<>Uq+*+&he62Kh6J3*ar@Q$mSzd_zSV)*urk@;41JFgRh%zLGe<>c_ zty}2LYlwq8q=Ciuk$##~@bc96W?_t040e31D6R~h=uKta+G{$oQL$r;6E-$>uu9m7 zaKLpvyV{*=yI82}=g6gFihT%%F8MjngZ(UO9_m9(9Ltjvpto$WJ;en=R3ssz-7avS zupk=<==$3$M!TQW42=oXjR~{@svsx^i{ToXovpWrkGTg{6%*#m{s}W9-zFe2u1$f9 zT;{wcy~zv{1j%*=DiF&~HPDjQ?2blcWtX*GOeRIR4$cSL!Wfk?RJl(3<0Ux8x!hh@ zI@~_ud_czF`(xARAwk2nCaRh!-tkB=64~Cu%Qi)t;wXsNBy^%g48T%?;waigHrWY$ zmC4E_y0!tbml!U(!5Q*7Mw;(<#7xBB$aCwycmW%L26u2VLZo>LI$FYN7!5`!4azv2 zmA-o%kxsc+L`3o_m7E`bag39b0of;^qa5XCLO}Sc6L<qQnQLT0Zls;B9l_rx-=MCh z>3Q*ceZ90*N;AG8A_ODAjavT)zNK9^hXVF;(b<}HH(MY|{QPBtgScIU6_s&v#MlXh zh>2U(^$l=dr;N<vH9<kN^<~)L?EdXDE|{UE!aWtw&EiM)_084Vva-sAA>Su4N+u`A z4k9%Lw~%dyz1Y(DEu!3tsoD!4zmN?-_C*EOAtkgyS1`MUOJE_oo+<iPa(W&=S%D}# zF_2_%VBZpF6RUi6sXODjhv}*FdgDh#{<kbfPfrRjrLJ$@DyF}WeIh{jxvALUI3gYS z!(u(F>RVBp+?_vC$5epGq?&ziB0OARPyM$BS8>MBkOLKbIVOX=Ha&!*_7EYMN=u~t zHj+q>BpV)Q%UOBRJqLZaweaO|#Dq60B8~!pFbO^}?g~&Tt#%7c%~pF5#_Kk8&iFO1 zk_?e=UaEi)I~`^2IR0_IWeV~q^^YgDt87%Hbs;!NHI^G9_({7Ktc3PK%Sb_J`avMj z54S!%*X7?%%n6P)c2a80z$zpr#wy07DBZ$u77rt^)h;f>LA2E2ZxXYIgGYgY;6Z#3 z0GHvU#{G;JS@GsZ*5oEvBzZT~hkHmETHW3Dy4TthBQ@JjF*IaqBDsllCCPLFmr9-I zOI;R3miJ|boHOKZ=kLjbDceD}s-lf72jhnk)80@FYz%xev>F@VK^GhyGukb&RYA^M z;5gC@Oo<2s4TC;%lPX^5@Ul=j{>Qne-o6|v-dAl50l2T|%#a{{dC{fut$|AXenQ4& z3Ft8aN{B8f)6N=H4px+t>a9tjhb&z1TMKYEky4WslTu^vC;^@hjGa4jMIIl+<*TTt zf*B_NIQ(${QK4&q5c>nLD#0%Y3X5GB3vmuaHlc+E+|H)<&=3m*nfSSyiM}4g96=kh zB8ag>Bz1C?IqBI9g*Drc1T|#UXzVqlB&9MF{<iUZ!rhh<s32VD+uduRFC^TqnxA6q zs53ZP-w9}gb;+a5FiZ2GBK3x|Pf(fuBBZ|bB^UYWKv4*dy|vBVHB^w8_!xPaxg~mX z`XO%n)**t}U?m?v2}A=}S%-*qvzS|~oO-O4s;kZQRCsFbtk_cfI13la`-!#JCB0q~ zSg5Z%@GRO;=dTT|<zJAB+{EO}cvQobggpzZlYP@F{Y&bjTvbKRZhL33<AIa79Il9R zQzG~~ZYh(SZGu@LM&&WC^#nOrX)$nU90rNvL8hav`{S>o#QV%rK~E=%N6Ybnkv}_! zXWX7{j*fq)yZcujsntQ3XUkn8HwrI87#k`I(vda`?P}m?HCLX;kC&s0S8m%0V&G?; z>hAkoK!=8jj{-yZ!y|9Y0hqfu+w8pE-42sRb|Il4@DaF}=yJJ3%vz@giJc8woQafg zJ#v-<>B)uK&4$|TyoZH9vgN%#l{Z&rx-~oj<>rQKtobJ%$3TVj%F#tdS4Vj8CRk>q z7$%SvNb10sR&pqJz9Ru|uvdS?J+ELMd^ir=Q7!_FWCHxip{fA!If(??ne*w2oVte@ z^$rKq5b~a_KA>+*pbrZtC-5yPz;W3GiaY#_k*22TN}7}#A)`CJc8y`~GN_uv$Zf?) z(x|ehZEfzkJF=l-)b$!TSeUDD__PkDHXhe+cYQBuTKp)vQCtsh*6ck`ybaS1Lv^s7 zHxY9s-lL1eJQvw-KUWSSurR6=FCKH5jo3iJXno+eETrA&z&qjt8#}_&L!$l7ghdC3 zp0-apyST_YV<mbIkUH^mDSix>i>}vy89JDQ%#FH_Y7hgAjaB|Bff`-yaIpL1bfYM% zX1u6Q9~bfgr7n>9j+(V_n_MS8|4c*d+lk9GVO@t+L4%R%BD+%8_ZabYU%v6>A&4y$ ztdGbK^lF{e>2_c|HOX&lOWNMn#^dyiSo>%{k?^~oz)%hkfwpz`_dfiBb?(xSxR<@s zqsZ0itFD^3lPe9ZfCXgw`tqdF+Y|BzaN9K)$IGI|`SnQ>+*=A=imxOjkerByo~n!q zziEMxQynK=*W9u~@7#wcW9WQa3D0*ufy?MecC<5j&O~7_jIxvCksoe}>-`pKiR9l_ z7^Y#XyJyYsk(qC`bqdx>Li4JTl&S3gF`edB%@DtKy`uRj54lcv2ZFKw(FU9h>0dvh z&iJ}t(!BbRm_jil%_8V*%PuYt{QW+zm8K>@w#2BYRWolek0<vHSp~a_kishPbi>h* z7-TC%crmJ9YF+tkU9K?$0}3p4X&<lO;~u+{zx{yXvY*e$&Q56<ZH{N0PG<F>g`o1z zPO#Ke-B$#?!!c1<OaVg;G_(@fbpXcz&O~O?leqydPIMsi7vkN~k85-s4<YZwNwQIy zWPH6`4@Jy)d+MS#Ki9Slns#!?-+M~QwnB&}kew*zAFmDA&e;Lki8=}oB#rl7S$Fn% z!Y|Cz$cM&Ge{t)E){j?cxVc+zV)Ye;!1)Hg)<__>dNSI$P#a>p+#l9;5DS%B!dm!n zaO1bD5lo3)c8(pGVBDSAo6^EcSN*tZ%YL}aZLeni;H;4N)fUnRQMT(~$?55EbqSM& zpx5?ejm&`)S1Zsw=FCMXw*JCNigV&~S^QIFXrD<VG`MxmaY~YA!#Vci{xSMz;NniF zW#_vBz;=oX8|%HRa0%<e8?ug%Ngw#Sj99ya`71!NSPf=#EM;O#3<tp#mG!0Rbxa5k z;>Ma5J#7GYbZ|Rb<1-fCy*DBf<0@M-JzJZJVfu&^FvjjA-t~ThJc1l!nvZ$pof+xL zAtdm(UHmH>qHDN)Q;*us-0-QXU43^WM;#d827&HiQk?8)Ce4l(MJAU!%?{FVbVfes z@NYk}4d7=oxGD$p&!`Vn@%N+&_vCx>P{5-*WO$r^?)W8J+3>sUKqz@GU(FU~x)L+p z?<|b9(Bk)k%giFZW+^fu<0>LgjznCZVVX4{B=(KxOb+28ji+Hn4Rtl;^@ureE(XB5 z?r)T|baC)Ez2@q*?7tD3ivp(|UolmCsBp^kEA5C@D*OzCk=o!mym(Kf(mu61J??Xo zbCj~N;1lJw5tb#>hLb7Js4tI5PgGNP7uS(;vJA{||B@d@57_CQ#amY=SDfb#809Is z9LYzrMApVc(Wy1UfgxCf*z%TxT4;H?a6_r<MSY&qs*N*RfS!Z8^o2l>@+3{p(ZXq~ zLh8wYc4uf-b|S=ZLbAgH&kMd9Hf*hH43{M$1jm`u#Kl=>=k2H3q*qD?*vKwas%eWs z04mHtyoOXM`Wa3}hH6^+i|x_R->Ecr%ot8(&u-T16Bl#(&++8>FyX}lL^{Jq&%{Er zC??3VXNe2jSQ-w~1R*y*F!-I`MJgeAe3u}r97S^>!kWF{flGq5S)$0LNwKZ{Zy=-X zYpB|rC(|}&JM{_{k?MxwcoG|efbGJjCW9Ag4O@@w(E>mzK@^azM#xAP9bG)U5|sRI zU@#mBH{eW|kuEPI?_=(IB}R)0%;y`B5yRdWcNDE;xn;S*o;LEs(zdt80)r3n)yc*F zle;^aRu?d<8)KU6+TQyE)ei7u{PlNl-ZcaJb=$IsJ7>nRxmISCI;SSPv8PBHdfSPE zg{bz5eQDxL>SJ_rJxsVss7z9YiZ=+1p*BGlUui=4{g5(g?h?r=H{O;w>OxfZ;Rvj7 z<R{8-ineCIpF~L!ogyX7y4YRYh|X8x*LbN<D9nybf81yyV5}|6hKSE$MXL6|RNaQ7 zIXJlZ{yuUVUf-%#oYV&!N)YNpPS^ge*p#8(G_S=>*J*z=fAx|277f<)1+CKa6^oI( z`Q&(hWFJM^6<m{}C&#?)HS9N*JS990Xfh~LNpoJ6*+T{bLqW4P+*qipL9oilN_+)g zD`G2zDgFyprl9)@Y2pp<P<y5o*;L$=Owbi|k><dA_VmSIL+pvBk4!Tocw=q!i#QzQ zmYFu;<=+w<5N93i@8H12hQF7TgKZW{CxC9aCF#)x<jala(gh-3+g%`=IWj3(S`qNg z6gx8p0YfvwA5G`uJuus1473Ou8Y!U^pd66g6;4e~Zl(BH_hormDFHsGV*4t|tarfs zbE=-YL!V;pN<@c5kfvCg$R^O%L7o{s!aUv(^q1NBQPT65(>dSh8OOWJ!>iE2n*1)| z!d$(VG=3(Ji)U|5+SMPOQguF{AhTqYT7<F$!kjD$LQ7J&!qt>2Yn#YH=*l;=I^9P} zi;7WlfY37;@N}iSz|+%P3IGCtmIezboN{on(Qz@cF?4=hU-!-EKEZq77rzR-s>mp| zKzG<luHPwFGRo@yJ+giCwW|{VclqEQ4%$S=SKAh&RYnde<_gMpI~JjPOYux|1!W;7 zV4aQ$;_43K#ihGXH(@siB_|=d2j$4e(4=SI1lknvH(U2eg-RF(Zwu@tN!#n_Xzk)T zrVMkLn$KWEGz?$n56$-P>0T%zqYN=nOx`Bph-PrSyF}x9Xz^HMX&RipxizEY?4V$t zrBOIW`(3+@Y1G*#SL5+}&GYf8<m4xgR=h9~BRVKdEWNpuCOGJdEKv&FGAmrHEX_?# z)oiUTUvT^8AQ)Z-hl#^Iv0HG2=R8{jY3k-3?UCiRLPjT`f+Xx=!5Qs%jZ;)Yk6nUO z1cOrrX}-Ju8AQJyfU;3*r$FDf$#68Mfi-$0H7O7jm$@q1JVQ9Vuh6lyhd1v+RuH!v zCdsk$_g}{pNCCyi)u!<E^qjU=TvEz)>e8W;U}FmLOADJTTAFEXR4%?A><>gRm!lSf zq-3Tiy^B8^sZU1%ZP}HM+L@6<otT9twN!I;c68L$H($G*?_Y&P6ypMhe%S-TJg*yN zm6&ywkh4%a*p!<xx3oZRpsB3`R!db?PkX4L*n1F}n^Z#Y2oI8p{D>G6SEzFLbqx1E z0)b`xSBW%r`7jD+29C#DzUKOuU?OwyiIeojNYKs4FhpSS*d#_D;XH)4fN1EH?jNde z-0m+<U)DQI%sHJxzIa&YA03l%l&~5JEAM#?)fPWk7rXkFqN}+FiCY%P+AA^G0AS)K zCqUR>0oy-&A1KXgd+c!B(cTmi-<fD>f{K7m7%3?oEGhzOh=UB#Iyk$U8MtDSacnfX zI-bwZjyy?%P%;CE%X5EcX=JW6Y59F+|NgLjwE!C67UjM~9Pm=Sq3dM5>^e5+TB-FB z(u|UQOg>KbsF%{!ILgm4AIRGnDBR~qJF-)1^SgIPY%m<P$zVn5P*mzr)VHAqL;DD@ z`$=PLVMMw1tas+QJv_+qaUWC9c?~&6Sg;CeZVR?z*vt~@LY(ByWbC<0iBV5l8-oZR zDzCb_f|2B-<nAbz1*hcRzD$vSe_Xq#Da|@2zcw?=I)*Y90jpAKXplx`>8x*eF>^h= zvnV=gv~sU-wB&YaXuQ8N5w?eg$%M_$ijGi{G(C0Fyq4E{|1~ypJhA|44k^JUcvl_e zS#^zbz_$31sKf?VsmM^DhnlEkNUZGal&gq(oH8}aIz!qNt69h1TiTnm4OC}$B4Eoq zACA|2$p3xs2f?|6B+BLH)&zvNr$_Va#qt|J9k_;nc<!FY2xHXkF6wWEQ(eG^Gn+&3 zdUaD+l!}UV1X^^WJ?orKk$&8S@yng!LtxcaZr9uT_{pA*X73-^l>5>$ej6W`V0VOF z7;kmlvN@q@S#N#5S{q%CS0JJU#xJF7O;Hl|(9$^>zlumkSUM~p-=p50JT>ET(dX5| zV+ndiD1Ot*9P{i@oL^a0lUIW?oTMi1p5|T}o7b3JQ5BvK<DUsuAGrhr{6>e5i1TVJ zxdslKZVj7GqZpn=uiH>ar16Gtcz`Km|6?;VBjaU`tIntS7?QvWTb0N>B?oiKNA-kc zXKsp_v-<6OqmB3!p7Q({j=7t?A#BnTCbQLB>@Ymi>GdIxgAKx|{#2bO!cXXn(ne{z z4ylKQcGriKfp*vXKN+|)!q;>tA1PwPt*;wN8!oj)76p3xCLTZy&Hf=E0Ip$^Lb2T5 ziw9WC>06JRFmcsjq&$;s5jD4W^gr!o=o|22>xkfMxhQo3Wtd`PK0*_gR7LPoLmG}5 zQv6QD>bJlk#@M>qv)BU?w6|n~+dD~>L5h7y`JcT>UOIU-gG7vx7<3Tqfl)ahQ^%=+ zinnc7Ji8X0?LLTdP-e%V(}>U>8)a*kKv!J10$bg-W7&J|nLq2QJVT%Ee8C3G@E%ef z&x|pj;jN2;Q~gL3BYifN93~?uD=IhEHCL9rJhOYMY_Wbv!n@t*9c<z~wD0<CZ~E!; zODDy*HfD#XHzmwF<}h)CC#P{$g2_%+V2|XW_kG-2&%*(T<hRY(fJ({q_F}5P^XuiV zt%IXfx~F1-t?(K;aXko4Dm((tN#kg58=X%ckIsKCR^T&F;lkMenXx5P>`!#44g(t+ z3X^(SnP}qKaWJ<1W$5`p*}=lW<5h{iR*Icz4%d>~&Hi~B;TDC#^<r}?mSl3h&E7f` ze_J{~#@1TOlJqF8@;IZDtF^=9VBz?VSoLC&#wRku0@WI%LCA<j#*UhVs@`!|QL&dL zC@jS828O}mhjB^@?RR!hYHgPkrNk(uK#~4L70xJ``ApvVL|zb);^TKM+jS%7n+JPe zDV7&{fe!H3Lj4sXrX0v;;P9RKwv+kxlcGHJv>&0k9G<J}9ofOcsIv|A*FOaFjg%T2 zBFDtv7skdG6Lk;n_OR2vUntHq)1g<YbH^W*N+P@;Cvpd+&rg55$(`dmc^q8o$-Cd6 z&A(mlG~#YF+QXI{JAb5HkjBscsCP>Clpps-)0Eeh+v`C#T=6o4Ywp1e_pwyX9ppiZ z2Z3upV$32|h$3|3pI4wLg?pS*mzW4uWU^XyM$*@XO1v|q&a-4LGb62;icn8d^I7k? zC|?z(oPX?O%27E?D&A6Lg^gdd)bN$Wxo7_b7#YrtwZYP8t9`t1w%4ijN=U8tja728 z*EAUKIy_X$K4~;#-jAST%JlARZe!?dX>sCNr>)}?cadJbml5)9clA8Ca}O$(%5bZ2 zZH>(M-Psyg(R%;$=Q-E9_vBf-+vVkKB#40o^p7XPo`N%x@&3ev5%z-$@rvv(yOd#@ zxw35ZVq!^HTnVq4?ap~~d^D@SX4RaEsqh=|XsCvlx%<YJB^G;pq(+ap<M<?@Qer~Y zGUYC_WiIj(919cdauaOu6Yn43d>`J;;3o~zUw$tS|0v<c*SKbAw8=SIF<rfIQ5~;J za;{;k;q7gVzRJn1ahUTpvH~?~U{s$ax;4jgd*Nw4W@8)N-|y&<b^ev_yrWfUEPd7S z;bfa1K_0Zm$9PxHPdPwgHV$5<XGx?1ZJpd+c84csNm?f}PB~m#d|j}wAM`t60zQ0) zM&0kuw~LQ8oa9vJA$mj@7wdZNmairr8?y8JCD61QI(9FVbT(*~BK7n<u+B}(Y~))! zic0d4eU^vn#G_oy;PSG|ePg_jOm4g0OA#L1RcT;Sev+vuiK3&5l7iWi4$=oy>nk4h z?UVMC-1k0iP*eI|6D7(_n>`;ERBHw2xqBB_#g=K{6#GfI?8QGzX^|*Fha9_NF0uLy z6(T3z!N7SUx4aPt`a-DEl48su&ntiv!jZjjT{KR2c`p2{v*h**S+L8(5MU|b@&rXr z;#Q`8$=&p<0c+pv#tC@gj5~0?ak}rXT|1DHu;2rCu4|f$#(#A8a(934AzV4op2OKY zO-!UIe%^VjqviupJuXaXapgHiw1axlLzJml@0S=HOVe82dY=w^gG>Jo>Fh{mLIRnk zF_ZB{<EW0$30}Z0=tpT%i<ZaRjn%-?Xd<<tQ9;UaY3gyw*6=Ke>-$K$^EFlB8z{gA zN1fsf6QyB7*2ij_ryFA>yOepSt#_Nt4_EGd`W<l@;q8rZL2k}^^km$uj!s-?31X}} zOZx`d-dax_{s5<2v?`8anahce45wNgW3iJZurxq3Qd3dAJF%$L*7NNLla8638|PoX zFAF7Sa>g;bv;q0{PjY*HFEP&En&l$bHpY^couP$0vo!vRZE!wq${m<d<g41k#T;k` z0YSA2v~&**qrlW?n4suasNdwzuk1>2YjGM6BMxrQ_cqItEKD9JzY@<6%1d@4Np&DX zd@hiSkp(}e9XDr`$OW)9-wAenf41IG&A>f5Dj^{uASfs*$t%dqD=8@{C?e9`KU`5< zp{gZ!d2l|z^nx<5q~1}Do-pgs%?70&UgFA_U6k)C|9EzCuaM4cF!sg1{P!<Df{z(O zwdW@fArISkoG)^rQ1FBk6%}LdATLH@wXd6RyqmN?vFk8;DM2@VwYlAIkB3K<q_o=n zLLRA0bl2S<j~5drxnn!6o~OAy59TASj8K&H<<r4g=JF_VL#ZY;j@5o+Xs9gxQe9pF zs2^lWE3P!!$1-9)ec(8j^ny9;ueDir9vV^x%LG6aQldnp#|P>oEyYsIg~x!Ur9%O< zRm6u>)PZEu5oOYO3nh~sVXG-|vAGoSbX++wGSbhZs;Q<UCmm{OW+@;XU|d}63LOo- zwiv%nE*24%V&>z-*0iLel8os2RmOe&L;iLz*fdD9vi!>(8x9C$kSM$}*C{-i$a6(B z(Uh~kY<pUEdN>#viI(bylY=gn64{lo61{(hl2^-EOGkqjtg4dL+DfFd4CK2txAa=` zyFP09saEdWSD43ArAh9^MQTk?wJm|M)*_tC?fz6a-1WDcqw&B<X|30Xt@&qMt&Zo* z$5zJ=$K`%=vwRkMFuBnd@o^)dQ(KbcbTeU2ez{$GqmzZDHsJ1#JPDY%AQjhW`|xID zM50R2taP@N{FQltO5<#dtP)Z}<Q_b-;fm*2Wt9{?1*#bp%4}IiHfmWpNjV84B^6D( zBsn^x-k%f>6^tDf?)UWus#6w*e0p$jc6sqx&=D1`2Ls&{r~8w=aMng<trfeJW?r0D zKYt~u0$)!-a1m0z5_WS_c$vyWdb^S@edDeAj(yp0`0g!;W{`2t_9V6L{|Dhf9>3#v zvsk+WCT_jCvNFHA3a$p_;ZZYW_7(mCCgqNC?_fgELFm(i!j6&gZwP<+`t7T?zgBn! z9^pmwyKkPoeA&^_R$W<EU0GUHQCeAEQdt4Dw7jgitgNX2>cGP%&%o`x|LOhA;xfOa zIRv&Eh>ZN!86dx7bY*^heP8Wy&6@kBvzL~Yq-CUKWTrz+Nlgihj`H&Nw?`mM?d;5* zT}_<b)Qn6Fe3Dg65uEI-SWk+ZvppdteL>xoUw-*pV*l#ZOFBAQzVkf5^D#f-fp9)^ z_DpVe`k%0SLz64H^<%(%*Z7uGWUaVWHbk$|)>)^8Onnk+c3-_KZJScE@HBHl_(sR) zR8;jJNKuae7M;h4JZ5Ua;`;hGLF>MU2jCxKfCoQ=&c^%-{EL@|RsA6ChwJNi?|1h0 zm>3y|h>5^PepM+c&YU?TCoBEx!%xt~?mT+5^WxdP-5t~#b5<p8O`rJ6k?P*XwxQ*l zD{J#>D-h3Oj6I&837vXq{01W<0|^Q7;jcS;_Kc9Is14Fz)6CMu)z{SCivtJq9<C0j z2+7q<kS|_B(DC<MCoXwWf|G;ow|+`XU5%WAd~s?1e|z^zn?@?T=OKWFs^*%b;98~O z*TkjjOCf33&^x{P=y_;XvznPLcv1A6;a1d~;op7(0o2kDa5c-zsHu``s6@@m))vOq z{F<ftqVQm89-(R92ma{+4|AAm%p<1mzk;c6fBg7(cbA5S+TX|P*^|Asm8Jdq_RVV< z>5Ia`g7^3Kw(sA&_ZT(A+<&t7^4*)5<YFpbEiy3+r`Y<cp2fN=i`QpY=hi;YOu)Bn zYH^`=aDbbOv$w1L_V(tlU$?sa_U#*aSt%}F9&0Z@Ju6#nYbO~kOEy-fy`4wfTkBA_ zHr6l|eu1r(*%=}-W+Bz7n-f2M`kP68U-xAKg0oot`KO<L%Fj;sa(CU{#&VbD=g*#k zI9QmO{P@55lb^qOn{{cV{l;e3<gN7DQ89}YRb+#zPb2vSM<KnSt-V(_Z|}K86zE#p zdxS<N<)flku&GP8Z_lo;gKM4N+FID&29gimd2-?A`WnX5Vk)#>*xFjYb$jK`-Nnrf zQ25(ByN_SJx&P!T)F*F$+<X1*J}RB~+0vlGRdCrKK7yxx|I?=*KmGjiK>g|GpWl7> z2yDX>P(iE}8oYW3Ek5l%dkV}a#YG?f`Kro_{CvFqLxVS$=5O790N;+j7Vz-h$9H|# zMmfcd&z#pc52>i=UVwO~Z(?P3WoZF|?%TKLH#eu2mk#?JcoCTA<>3Z&4}YD8sxk`) zyOF)Ku9dC2ne};DO(G&9R0;!KQ2(MUJRu??Bq66_5K!pu?Zw<F^lwxQ^>vt;7_q)( zYh?jrD*+(^$O6{iB`+(@&d#>DvheOlEPrfwXz+@+yQ`YA;>8Q%aN?lC6HRs1Ab+39 z@zD<-K49r(pMD-1=&LL(EHBQlC@H8aD@;#Kckzkx3rmbl$TIgx5Vt8%vP<O@k|n2M zWMpCy6BSX{)YC9?wQ%zah)v9|uDv!jT~u7CuCB(<cOK5Hh^Vlkp+S99Jy5+5007^0 zr6)ZjEjB(DYD`>AY+OuyLR@@ed}3lkK|$`q#s(-nyrQTe54#Opl97?}^YdBV+<gB2 z$4BU-IspG@cek<rQbArey5<z-@7L_=iqiGXO<=yYxuK{a7iw`q9_9(E=I4}`6mD#+ zzyJ6Xbe5ebd$^caS5bl^oSB<_<?5A<JGUP`L1phz!_Dp<be7S{$@3y=1gr`wPRTjV zQ<wUeD_gHBt0=3eD0z6g!LOcNTAbb7JUGUoneXT2;lj#1oE=(vdL<n_O;ZakGfNRw z0}>wT?+7XX-|x@-=l`7gpEJbYeXb<`I88)AL4S^4F}tw(8#eZKcS;L$%}tDCr6t8g zh52}SB*aB!q%N8n8<rI2K7RBND{Szo=K2~q>EG0{qP)85axk0FCM`K$K~6?k5Y8=+ z<VA58N4tj)e$V1`c6L_49_w55^mJU@oC=Ea(5c&7u-wTlAvO{s-{O+OyZ68PJC(Uv zY0S)wuw^wBrQiVHjMOCfv2Yw<pZ&Z&73F1FS(s^Qsa+iGzS-crcW)UO=y|!hpf|}# zOTx&Gu9EOenVIOR&oMD^^3&4NTALe}7iC|nD9TPxHqzIjrKREG;fc-8&#$WH=H{fQ zKWA!WP?(!pUQ&>rlAx}tOhrX0E+IC%wuU+Np!`-=mZW5)axzk1z5-R@97#z^P*YPW zD#=5D!^6!9V_YeE&jbCCq9Q`D9Ur9Us}CP{pFex?_TA+4Bo!5vxR{8X3}#m)F%@>6 zl9Iy1%@tsVtw96XuNx@HNwcytBU~IYx&hyH>eLVE$q85?{Qid@sA;G<IM}b>7~XyP z^8U`wlQ-|C=Vm1(FVN6ZUyu-SaP*}(uS+g&=@4C4-M18-UTJ6N04@bi0UXSb(BQ>e zw=h?hN2Vr(galxhpkk#(OGE9u@4qJ@C1qe_1gRIkC`Cy{DI+Z<b@2iiV0gq-Ss7{Y z`NU*Y>=*StLNX5;1AQGWIJ3e+0+yyGiE&Xm87aUmJjTaF*jSp22n*8D(Ezbqo9o|v zb!KW3F)^{U<OTGxLRDFGg<}O96yQG(IKc<~ygdr@vdW6{lH;RQl@utcD15x!e=G9< zHk{DIuRnYG1dfcTsEC1)9^jFcmVCHPXL~a#DM>(}@ALsvhxBkeb?OvQ`uy25TwnU= z(L+Z&8~Ch=k-@>&-@JJPikBE0b@(INn(F`kkAE;R(oc<#eDl`S_$U=6g|LVS6EhPl zE7P4@+ut+*?+VAw*~JOe9>x5vTLy*(tSn52zvaRGd$3pEfB(Jd*E@zO4z|{WM1<YF zUC-aXd-?AD$c+)`Zc7WZhg<TW=dmz1y*YmqR|5wdD^n9A&}tTDCU6wcLvLd<ki*vo zpkG5{;Gv_V_2HAJFF$;&t||w2L4S^}s_gR$j=ug;Rw4bf+*;aRIVD{S?PFW_U%Y+x z<S`5ra4w9D4CXdAV061aJ_empTSKk3stjwsVt5Gkb<XxS;AgZgtOdkz26~<dM*<Q` zJ|&yrl&X(E{RFRofI~+BQ~+{3ybbpE0Knjin;R}2d>uLnoO%$w!yl0l7x}OM{%;V6 z!*7A2o<4m#IU)A=z+)Ioh>1M>*818CXt<ob48q+7B6_TwU0q&;gJbVt(?8Jj?)^K| z=#vx=R6sa(oNw=HZzdupGB(uzb$?#J2JM2Mf4DJ>mE`241AW-<F9gHi4?q0C$UqO! zVr>AY@*MrS)~=4xnJLhDLn8w=R_4Rac6T1Jv$423+2h)0=*tokVh~6^fBW{vjS-OO z#f7=U&ET|ZYN*xKUxEfp*cw2WS65RR86N?`2Ms)8-bG_AKwel}xO!{5v%3?Xg8;sK z`PD41-Mwe3+U`WGN@7MK;B(45=SG(9!|VP&p1@&IY4P;R^0l#17&4OLqw#-TYf~MZ zaV1@SK2b3)9L&4BIGiCS=a#dG&TV@40mVFwgXhlC%}k9SdEQ|-fc+;SAsM{Vd%!b9 zfj$6`huec&5EM9%YilZs@^t5Dcb^>hp81a+K7jE$A6v>1#XLEgnz~ACd^8m`RdPZM zRzhGu4UP0$+nd4C-@kVkx(P0)gteq3#GF2TDmN?rNao=bKf+-JKP!)$Yey^2Tj_3X ztb>3%yE=Cd_CvU8V5pCkdDI0kywgY^(4eKG1*r#DKRi5yi+Mr*^WZ(AVj^kjXqFeT zq_&*wZ6UfTC@usCf>m?Ag0Qg`4y=EmU*F&ra9m)PaJ57RW)iX}@M-%cRgaWho^2mq zL(7Gsu&~h3<mABc5RBc&VICq@Q5iWdegQ5{b{x4%#~%nt7(}&v6Uv{y`7y}P8}@Kx zZRPl<UPzE1Ozclz@|<{>$6v1Y_Ye~krKcuA^mLr|D@*e*%7BdE(0MX#ZH=7VEVMXa zVgBvgHw+BtpiOQ4CD?a27f0CI^Jm}wncWBX?!xc{10Mt_hc|LTTohFC6?#wu=HZ89 zjpm?OTbQyiGng40;QBe>l$e+pEKH5Dz6<77UO}$BwsvfO&cFaC^98w?Frc63<!;4x zc@72|(7=SG_{H^=fvZ<=GB3afnECp7!_Qe+S~%Rm*})F1bz*V?10%h)h1u1CzTI6c zf7E7eWvQpLEj%O;POF`heN1c=^xUq?t=}{o9li#~5RUMriu}Cd7QmiX);S=jyS#J0 zWq7@+s*0R~Jg2arv%e3#jjg4{)WnTXKYhgdb>IUd!-9!Ph<SxY*m-$zG4F|R`tCF# zC662;qHuEMKAabbn+`XNkBNlitf8jz3GIIlmc{v5pd~gY5`#U&hTuQ3G7q7L<OOjU zf2+%ju|fzRG&3_=-B|tc7#Elz9ER_0s;|byysoZRVQ~R2=3l*f1p%XmhFV2c`P=vJ zdAK;06yy#!0|?I&5YW)kiV6!Jez&Qvnu3C&uC@x5zJC1*R|X?8EC`~x(}d(OQg3gq zV;eGJPoFt+j)DHn*)vUbSUMLk4;LT*`GoYe{*mkY270)dXJKK27>}5Q1O{ttGc=~* zh`At~`mbK;?&-k8JRptq@x;YE#P0%v{Ba4f5bnV+diLzuoXph2%?xyP&YU>|#6m1- zV{c<)Yl(HYRh1P92ni{vslZ+Q{PWLA>6H}R8iayI79r(Dm*+}a#<{q-t?jI;8|rzu zxuDO&S;Lw?G1SvJb@~(u2{9aGZUG@S9v)oGd$~FP&#AK{9Fnf#g$e0J@R7FWhHppA zPd`D>0ecQ-Bn6u{76%{%__yzDf6;kr5IwB6mXL^uhL#4RB&<6F{!m6ns<*Fe@9Ewj zz`UMzaY-R?h}GLb7lWgxrlwL_Q93m}4x7r$!eW!8Bqf-b8D(Upa9z{`%q+}}msS_| zo;-mcj&<<WQdc=mKuko(M@mlF(N=%Bfu6P|upuii2e^8<yJ9`UENm?PVWBPEU3z-D zSm6N+1RD{T5a-}v$GW#`E6eaRP0WmH8|&KJn{e566!XG@zP?^~bY4)9AADndLv4CS z3fM@jM_5x`6?%l6f}E0yBEr*MSw%@sRtoFaF)%SoN=w3U(A|L=1#aJYpr+$N$f_)$ z7Z_JLQqnpRlTn<OlT%t<=@aP3#>R4MdkagVKtqj$jEseYgNdD;PehD^_dFiv|MA0F zB323Kup(`JQ$|L5Tp^*fj3i+1d?-f;CCJ|g#+|zQnm0eb$LKr|b-0<Bs4z1N6Kn?t z@sHT<D+NxHpYMEmRoV9K&0{j(Sci*wQVjFdG}uib#@nc=DVJ52HlXC1eEh~RR_0k) zSyWV&n3)*AiM%j{kC#VQMyjW;^Y)$FG&q>o)7CgcNJ7daLP1G=xuq5>9u$<6%4#a) z6cjFw_E--c6AQDazh7-@tFEp#R^|a|h{^#T9$p@-DGN9(@G}hz^-3$to15zJFfV*O z=3CoaDyzyUQCR})%!9sZ>1Y|78Ha?2LX4oVqkXs;bWb*RHYF7$a6Si}s<~|_5xoqv zqFZX+cva70VZ%U4O?5<U6bA>}Eo^BbZB2D@N(yd%eyHNIay$ZpxS02L2j<TZFpJs+ z7hI5(0dJ1=SV6cgFE0z4i8U=yQCbK)91#@;QXdx`fy>*7i3+i@vM4JnFfuY=<p&yY zadYP8=Pa$v|1mmWS_E+}bYvKXA?`R>!9x-f5@=|s0`s}q>9EgOv%XRnCD_^7jEwYI zn6Wbtn-UQbDz7SET$)4aJT`Bmqosb9h?JT~k(`pYv@jcMZkdvb%FxUdoR}*P<{6op z5J<18#s(c6%%e^d1-Xi<vZ$yCR-MN*(AC!|EGcSiti{i~&r!_B#mDqs>AG^Ii;4=n zssQtJ=jg2MtOJ4rs%mR4UX*aKvBJhY8y7d1p{Ws^vgZ1)a)2=jMdVy+%u0yBoSw3d z`Lg!$wA=!AcGm5!4P3`iR8mq#Nm<j_Oh^KEuF}o<yVC?`nM5pov(KOBSCp5*l?Kt& zR0GF_RfSrc>VbK850@JgBT*4yxMEm9nv;_qbOv<(^{bbM8-NSt;^IutNWD2bb>!rq zqnIbv(bXy~F9E9nVh&hxa&fS+fuyiv9$DF0d3m|b%uT@CBqzo}zrgCpfO+^Jcv0{$ zSeb_i1@<{RFMDG01}^4xwbTiS$<GOD6O&V?CSqwIBX#i{{W)6~XIeU17e_lB%rm-p zdQ{Zc!|#Ie9T(Ety4q4w7x6K#r(IB7*wAq42%Yyw($e8!ULZa(cJ#*3%Gxpwjuaw9 zfAkFW-oD<&WySMrE1;Qn)|Oaxo{yi;#=(w+l%&1oTVGvW*G<f(LUzH%A*R0M@?25t zl#;UQ`syN9=Ba3C6x7rWEUm3PkP@=8c$okG4B;6DVN>sPZXRAW6-8Xk8yM)qPr&MJ zU`T=;c5t-q>+g;T3&F)aAkEFqWo=`Dhj{=jIW=i&dhD3Y<MB4yI+~#2^UHIM&GlYB zUPdN{I(pima-XZ7w!WdBql;r@O=U__JR<`=)?EVTVd!yiuwli;Jah_9j@0y&k&&yo z&c2S88X*Y<ouCdODOE;F92TTSguv4}dANfxVm-nj7r?x;hkHd`EiliHi+OU8ab1Xs za4`=@3=TqmQ9(l^iuvW=&pbacKRGq_@bM!&%rh|32ZaW<v^76{{aQg@_B;=_mzyhC z1%#6WR5uq#A1`-0IvOxzAoZ}F7tgU=3Y3*oKT9J-EM#gCR#nh8Thcah`@s&jph=CI zj#gb)&&n0y92h34pn#isC$RHp7=(;GQn`3|)p0Nn-Uh~Z1{}<{qM5g{wrKBc3l9s% z!#ocUx1GH;KIS<&5|b0gCr6K*@OA|A<YYQJn&p+HvkTKby`4!Z3BLY5-bfEGO!e`E zN2IS;Tzo9_(~Ptv1{};wUKHmB<{fQWa5K-r$)1oDfAwk~*jGHv)9|UEC8o$sjmH|I z0`o3jp17Guc^hZY`MNq3^SHbXIhme;o(%f(I<5v7oo{SBO6UE&k7ORCX?J%Ab@dtB zRS699!C}Geo$b5Np9cl{!O3N0Vu1G4)KmZo7`d|q1mAuC9hgj@zUy)e))Or*DoD@B zM8~grhFe?SJ~6pwtn~8D`K`x?m{+Btqt!GpuyOZ5ghWUwD&oEl>vWoc1aw~CErp94 zC-dO7j%41_%B-cWDJ&!qAM?E2pqUU4;$a^685<uyCi8gwJTPBTRet}`gU5S2vx_%} zN3ISH4Gdi$7#iwFJzgKUK0F9t^k{cCFFOO`SzJ1gVIG&xLjw*Db_nSD2YP7nFi%QB z$*p{bh&(eb0W0$$;|QepITZ7@xEg$BUI(|gK{GEaD}A)LIR^940Kd1Hni=2O*};{e zMKjNUVgAm8d-r!9*EThH!hmaPte_~*%Em&9Ug=|RW7*r){t@@RvQeoIRwh?(&x^`7 zgiP|BYTgM|BRLHdS0*+NngP&x8ag^n14A2kPef>>jFJ*3I~%TRQfCNBPtyzOxhCOZ z9;NffM=@_{*4)+<`bRK7J~8~y>O6+|%ChdR%bk~7+glnT=s?#xs4eKnu8ua4DQI6@ zkPXbg!sBi5>O4CKTXbyHmHux0%#(8}oF*j0%{-Hv5As;dgP&(*`5iisPVA5!pZO#G z{N(f)Fpn#W`I`CrcOTyG>g)Ck@C62lNr>Tig2{QZ_v>Z%$B!N!z9zUI%OH@Rml`h_ zS@{#ND)Q?1hZpu|)Q-1ZTYmTBhXc&hY8e>Xx_i2zm{-QnJa`)n^LYHc;ZZtoae#TO zxk?Q4_72v}c)d+RQo`|>Z>qz0O{%QC1o{`m*D#n;QBfYOl#~=CB*d6i;TY!e>pTly zoj;0sbzq*9UG6jycIL&7VqWZP=JELXBfQNQop1bum<Q3o^{c;P{^9+X@7`x+rol01 zWo0fc{Pu3}{k>i6?5vHoRe1V)Nmb>*P!B6RFA=>IjjS^={c>9Ectz*T)0c0*cpEw` zLql5+FZZx06!W;|+qgQOB_cb^D5~e0jE8yh<1xSQZSXJ;I)9{}ADb9Cn)ziE^H`?@ ze96zF@-{$wfUo!7?#{~c!hYS{Seu%-0Vh2_C*w%wxsGD~I66;6N<qRVeH8OP=WzIW z@nbSifo5Lzk752uZ-Zuj7q6fH%>0Ah$J@8IL26W$u>jWA+yD~v!>J#zPCDA}@5PlB zP3>*x&vRMW_z^M5QAyc*q_(Hj+^FihiG_LPBbgV`N8r_Yvg7J}SV+)6uk*)Y9y8wt zH}j}LyrircS5^Ub0632f4}SXMImXYw`X`u=iHip2Y4Ca*U>>iZXLA3n^LUv5oVQ{7 z{W0tp=8wo#3LUBQ35ju2Xm5kp&;J_3=4PkEHy)m({owvRc%6)#9EM}8tN8cxw8fQ` zjqR-v?LK~ZCoZ|1m_>n9+|nkp2C(m*yrrU~NJT@fWoTq~l+JtXGw+s+KX3DedE4XY zJf6G_9_Gb<i_UMLGjFw3W%!scEyBe-<~ppEm0AB_Z$>K4yv-k?^SWqnb0qU_zQ}X* z=Z>TEzdMGd_(Q$TF_=f`d|OA`^S5uE9qr&)9d2f6W<o?vBrYKi2tmMwYY_a*d|NAe zofK-$QgKBa6}QIO^SXM-{Pemh&yZ9BAz@W*ExY6Dyr_|98lL?8fn25SQ96H|81@hF zHZNX0r>3D!#x{TKz;#%h^KI_j+Jd1L+Emt5qOM8(Ugm{`_|Ko`K{MY4I*)aKFw7JG z0Xl!wH7WArdz)Ik^C|`L&#OGhJg(WK2fR%in0atDW=4isewvsF^mI9SSyCL#i|^}v z)jso|*9LCic_1WdPQan2?VXoeHx*mlYi8$c<>G>$`OiAfBx>fJaRl@D@-}s!nQv_j z55?(i#E#0_pmm=8n9RR_{~o?5G{_%Yc)LW3nQw#Ed8})f=I3T0FxS)5E~_j{Oo%<w z&*REf0wMhT=c6&+2JbZ~Vpb_M^YQ-_^WbfC@OYbJ#jyFs1sI+GCz$7tPdF~~Z5^$^ zypp24ot4GmW<1=StZb|rS{e|7wPKqY{;Qv_s%&U&;o<&jJ?zfjQ)wmpvs{|;4oUG9 zqw%GK-VsTzL7_*+umsFvR(@Gr+&pT(J6B0UeC)i<5i#sB{X7)l&>Y)@^T&@KLQJHt zu9{bn3s3T}tt)A1s3j&QHZe7<s46ea&!(cL+{CuB7?Zbgbh2Y%W_t1bIo4AKgv7?j z3=a0-^EPPaamBD-yv?y<Sly%j{886oiwlmNd*J7fgrBp#cti~Qc=rfz6BPPc=am)Z zZ7j_XH-mVbn}^%P+!S`c1wZqZ6&+W4&-3!k&rTm~aPPrm4iN)FHf33dq?oen2^AxL ziMdj^ufw94Cnh6gk+2WW=NAx?k-G3Zn9t5gAtoWVv9oS!Jvu*+X8sSjCiM$?ewU22 z<l)yp{`3><gMy-5W_AXs$KhEVP*M_Ofq5HStIC>+hPrAHz>%RrtkY?Dc(`C}0z2{J zdn}7tn3)-+q$L4>YuEa4>HM*oM;<e8gNJ#2BLg`(8QjTCGToz?7vMiHA_6h27x;M? zG4U`zF$FsR7|(qV7@0ulL)yFAF*<L9P3Hv#&a-o{+c`PFu#0u=z<vx{R8ihHd|g0* zZ*uJ0WyLq=R#;JgvROyYAt|c(T3q>cIaPI57DhZePsDn`C8GF(lq?^%neD5~i$D%Y zA~5e{XLGoL9}=~0(b3tV`N&))Du(@I{QNJddwuAF{9=9Uf`k|xLl_6?>FHj-#PVHw z+M3|!UEN)(FIC-~ouZ(mNK3}zI+WyP>FLku=xD;G4!=7&J_?;hOG_guIbnEsfEMrE z15$Qb^fjsY-@!a)zKsiBZv)31@RO656A<9%=HfWqU}ky()hP^g^NR}V>ub<Uys<DZ zEXdEr#R;eT933rIJ9l8d&5cKo9^%SX8tUm%QBxs(y&Ibwp1yvgs32!!X?D1ojPylX zI$A?x1Bhe?uXN+lc@*>4hXnYsFb~Dc!{Zbg%UNz6QM1U%lItE31<J~*c&<qiu}XNx zl<65+(9zKxUI_(O0r-)Xm8PVk^guWt-Utj^5WXQi5UuUaM>2o(d>hdD<1t@eQb<fp zbfu@`@LS7^^U{(Rz<6A$EIs`0o7b;_WNvOQFCWi_#=2K;-$F|{87wOrit@7HoI>w# zvd7Zdtu4$bDJiXNtkTj`U~IzUZ3sxn$vG8Jem)gH^XKr)w{gKc-)6rfQ&3P~Jxv3B z-N3xDsZm~0ep_1;{I2$9EF++on=5RA8oCd*B_0^&(d(pQ;3vQP@jVkGgCEl4aI>0< zVt|>Ekpb9<i;1!@H^sUHFFz*(bHz|dfbTGS`v(WZ{J_XC=seaDxxKjt6oZ^e^emV5 z1@oxTf~$$;SGVr(VViH`c$$!ugk91%p*%D$8;1LaOIU7Lt){9B*fTQHA6{4i`w0v~ z9OsGj=<03{4-LleZS0QJ`C~Kx?!yQ0Y{2#}zp=o<CnO})(o**i@agJm|M=<WsEAMk z0)m0QuEUKVJ-olNym0t*4D@z^Gzkd_IyyV%7v|2)PT}!3XNk$FcvT5VD6r3~M9Db5 z7*^~Uxypt4*@)1fr%#_?T~dKc?&W90aAsv|l~<TQHaP~T$;Z<TYs>Ae4Xkt259&NH ze`EamPe1?6&B<YjV;S#@=g)3!Z!yrHv$3`e4-4kQu|5t&b!}xC*Db}w59s{$sR>b0 z;q3HetSz5E-MPKJ$}6Nu$gC)A8tIqW6<%;<V(HFd=6^U#M8PE;oKjKRKFz|)E-s2? zDJ7h7*sHzW9c)9G4k;Nb0pjE5eQl^8;5pLIA9YPCetdG|(=nL;`R8BCOA7%!XM5Y@ z1MlKsclPXAad9ydGn1&8h|$q&A3lD3^86{psu22Nm72rl!TozM^n)b^d+hGzR$E`Q zzOhD&<Gu$d=84E?__c`1Y0{Hp@#Ssk@#y>!-ezfGmW+%H&z;6G7l|BQoeGKzSGLx{ z{^7aRXHRFlp}r2*H3b-*kByHWy)gu*z}(arYxuBVpohXfXJ(~?L!Q9480{DI-4x*< zWTqzJ@^#Lh?u8ZQqc>-ajSWSF1n_(vHy1k@10Sfput~UoW>-*l_cz$1=Fgudq&Y7a zky%~RHWd&SPfAL1{99G~czc{aeOgpZ#N5&>GAev_VJ0>z4A(lT<1>HE7#7;xfBewg z)aZv(KPbw}K70D)$d8yB8=g9S3PLbYO9;C23UXK1mr-W;-MhxdS_%qsE>8Aw9OvZ# zU32_=JYZH;Rh3N4j3Xk$2Cnu#eDnY>^Q5$bI+V1GSl5h*i-~-mdq95<>q<0qgD<&C z95E~?Gjx9v65^6Wvsq9UB0HUsh)`1UqKlhrS!LPk=2}%{8Jz1ZY?`^Xxen)ufZ*)C zySER&i*Ry)kO9Ly*uv%2Wz@=I6{Yv@vEQ%)dW5vJWM*CtBNHP4jCEy%j+Vy%_h0|z z?&5^&w_{@1p^34A;vzyq0wo1Gto;!R7bhDlCnpt;(itAzi#G8-=^Y`ty-VA_E*tmu zaQUB8XPJdnqH^l;o5nf@Crpj?Pn|k-9OVq~^#<SpV>k=;j&`~EIrkquh)1o5rNMen zC-}Xe=N>RKzIycvtD1dgeq!{PH;jGy{Aur%E>klj7;C^71o|QSd%M2*V>+*1y}W+4 zKRCb_4nCZB=%Jcg>c%F9fx!V_#`e&SLF!jGR->ZBLApsuNwhT70Fv&Gw#E6`t@YLQ zwG{~D`?@<aQj=t5P`C5};FVPr;fFgToQg|}*EUw4J%2_?Ng;Jn0=%?~qrIDpBUDd> zb4Y*>KOZ+C2|10R4)r-EC3(pJA5SkgXK!~GPXttFKcpv^LnaoK&VvC4TZ{BSpq5lP zJ7U&WAU)lg80nc<m|c#_+rSP$%!OL>sH)u4*|xQ@zOlXv2gKaO2m&8w7A7riO|YO1 z%?)edUhdsBfyj}F(9YTt+Q7%5J#^<lUnCf80zyJi04IA}gfol`4*L~$2DQ*qNFXLI zdT8V-=I@WhL`Q)2g)XlkCu3=50zVwn7<S&v-IbDpTv$XXIzBcyGzfj;0y_A~qr=yr zXTk0@*Vjm15C^My=FAxhaS<Occg(NG908=K8~V4SVwu8<vcb_|2xI^M_TB<4uB=-V ze(uaabMNi%>y9D`5Qsq%B1F*O?i5Ag6i`3`g%(u>g%s{?!QI_GxVr@jnuNGpM>^?r z&#Y62By@jiyWQXXbLX;IPd(=x_SwhY_3n4=I<=Q2(E^?kw4<V=V2(EiPhs9|2dc9J z><*{Fcas0Ch#1=;p*@0zGFIV~nEK!>P#}R$cGlh=uCmhNpl!~{;=<C(Gpa@gC+7S6 zJ24oIef#&F6*xn4aVjm$9UmQnyMf-5nW>4YvLZMH@FB1Y=sK(p2Ero00N>`8`gcG4 z5EUK_`~bHR`0C@0LKfgTg02(1aNgd*2KqQ_OTyOwGAp=11%b1FVB7{Bmb|*WP*_q3 z(Wtn@MY!{D<`578D$bof0|FA{3iu$fAN2dD&zw?IQ$YZ4W&%pAsHAXhedYU)V&?JF zCmmgFpmwyhG>#lS0vrg2<mf@Cw1p+u!pQ+t3<uJ}XaM$XNY)97v4g{X-@bbF{{1@; z4<L|^A3t{F$RW-H2RJwn>|tf!v3oBI+kOrnzC$8d-ZK{=dV(9iV>io=UAy<}-NVAl z%ErzP4l5`m1dmloS@HDgQ%9JyOaMGS8LWzkir7=gzy7`rpj1raqS!?-s1KnTIukG4 z5Kb;GLBR`Z>Z-<gqmb~Bj_wZVA8*~e+uGi0V5ASX3#r4rQuG=jx|fre<>TXpNS0|l zn+Cz=K^`Q$RZ$8K3+@~2efAQ;eD6TF2g5^JMv4!KSfK{bR)ZK8t}8GkD8Ho8Kgb^< zAh=#QB6?%N8R2Zj#4nyYeG)1kK>YSg*X}|_Fh4Q~uusd)Avrq=iwYk)av1*0b>IM8 zAUrK-6K>|d{riufI(=41=rHfG{U>Dip4XGK40F5GW@O{RzMpG13p*=22MfnRaTTMm z^s4l#;nf>=Z`@b~2mw`{JtqK1+|RrT=p@FQ_k?3VH#aw&p|Y|f;K#_sz|EajTw1iU zhRg)j)KCLFaOTWuKE9(6(|t0yQOLC`$$NOXiiwJxK7A5y=|5JGmzRx<4*&g_M`--j zw-ZyNc?CIM-kxUWrfO=+z(QvPPQw`<KXDu&Bq(%2R!&A!OC8P@00S)H>*rluS~xp5 z{boyl1V_AnW4*ejA}AyfWRI?%j*6->JV!|>3Do2j<bd0u5u7g8&{!WBJ~l48v+MGs zM-S2N`uVpnYa424Zd4Pzp`@JBSy9=2htGV&evpOx6#G%3!@?Lo8OvP<g?4cA!^RzK zT%0^Shj@8UoIZV4P*6@u$&Tz4o17F99c4l=Q&3ivk(ZNI*fco>c};DMy^C{E)vvvK z0GJn-l+e-D!RhHLC@P3v6crH@k(81Ihyvh&<Xvbkc|`@&^Rviw+)tlQ%uFPuCX*cO z)YVlbBqhL?B_$=))KxIrnpiBxz))Y`P)`T?JZ-J5rj6Ayw=f6HGkN~StGC}RE-&Qd z=eT>hgT&PZC5OdmVKg<gG!WBN*U(bOo8d#lLuwoAbBhW*d>Dp!V`*7wK(eriums## zMFj<AB~=YIbxjQ=6=k^CPZutW+=R_*&n_zL9T^&#o~)>^3yY2>I@oFJ;*`}?rR8Kr z#4nx~7CL?I9NbJketv#|v%=D{BGNL4&xo?~E3*pfE7`?*rgh_pejI$lYzNQo;XbLX zV-b;2l~q0b{N<}RZ(oD70HF)_2gE8|KDfM;v?TJ}B_#j@a06i@Ja<5ag%!cWi=JPY zH#<M`>h&u$u3Wpmns_PR+SVE>z#W5{pA7I85gA@pQwd}aB)PQ$x?pKVh>D4rm>B-y zkIW1Y&|SN}*4R`R8yoG(aCdOBC)!xSb1^f=1Eqt(f`z3y2rw^ikF<;w;PaceHvVzu zC{Q@J@80U_Zcj-~@(b{x&|K^sNYE6VHq!`})|MnY<RvEP4$3M@CMHKgTzwL#Xiov> z@9F7Eu(nmzF*`1zyqiaWgI|j0g4%Hzv(swiy}S}TdCu-UaB3&liKG0dPMyCXA|WZO ztfFgd?C$5Ak((2rk__>hsU-nVFgGEX<3AeFj^rC0Skv6B|Eu#JC@3kAoE+Rd=+0CZ zf|Vtp%fQ&s$ixWF+{4Qww=i#Xa_r{)`)H~Kp!)9KuH>|2Pj63>1BqZsFe8|PS(B`( zZWJFsZ-$Q-EI5)KQFHpZ0Q8cXlQ}sv@%-Cw-+cG}$@6EEvr|>o6;Ux!&=i?Qc7UGS z(H^|X)`4W}VCUxHmY$tCFf!CXGE`7n44hALax^8F>lqs0^mX-&49u+vcFv$t$d)7< zQwze@3&#`8&}%QLuIe8f8M`tI$ggg!k4sLX`!H;s9E{9NHLzGkb#*BPMNui~3t|@q z#U;fRm4#*GPl&5&8d&oRVUe$^9OCFnZRT!CM+DV5PfJ@k`^DzB<kt?Pc{5+WdOg2* zrL?>_GAi7|%N?%NhGb)9Z3zz@Kn?~p=;&+@_(@Gm21&ereeKgM?!cBH7(h1o2lxUu zC^Rx+t}gINrMdX|`{Wnqws*AT732m4`hBWEMXS-sAo&W53zk<F|7g+^IPRk-4*`UY z&Gk8X*$Ihp;Sr$_UBXd<g8X4QF)<GMg4VXC#ih9yFJJsp^Kjf3-@b%8UEP<fs>^e7 zvjG2>lH#F3aAa79Mr!M-28Md?+`IF$x$9vSZbf%*XG?2+R$hU<XRHdIzF!c_Cu4C& zoqQJKE^HXcC5YQ|Tz2;<rQIhL&PpiAscB%1@dSGZ3d1`gJ)^dzsl2W>B`Y&JAt5Ru zJ~AG$=!E#F_;~Pz+>+v+(Gf#KeNgPWSPV`Z100MjU@`hQEQEEeY^*@7K7sz=J2?gU zG4XLhVZi~xfuRv$mr|3eYOBYmCpYfi1(M&)3Haj0&AYcpCPr%NYtpkaV&h|?q9fws zV_~_nx}x=ROG{f*b!}Bid1+}yDSUo1=+1!TZ`|H^@p?1!$fIY^)^4m1jSMukHkDUb z7M2zv$)yVO!OL<A@`@`;K~OEMt}NfUF+MZZ(Au1llNAvY<s0bF@b?V}4US2SPtVQC zEXYqxOO3gdfLwk&a@BC{VCc2CcC`;rPoi^@4~&mBcea;QRi<QT1w=-=crk2TD5kb{ zdS(PoeM1$jt}+%UrmQKV?Hm}J5fYQeb6#hUC>}#ereA6`BqvDY$ic~Fsg;BEozq(x z@7A4LpfOrn8$h>YWThn~CnjEsgGYZUIWZ$E9X4LR+<bLu;pwv{Khq)4UOb<forM3^ z)mPUtt&VB1(AL&8IX$+1Z57nW<@T1kkN^5)ZI_#-X2u`?juDtIkJ!Ka?$MJ+;B-?n z6Cks@dpiKS@Hsft3(dlDq2>3#{L{;=m;d6$i@W#kKvUrUpn_&*CuXip&R?AaRNsI2 z;MMEbU-TL{0l569&mON|zY(6*LSSU_C_3<K(8Wx`MNA?t65@|3I_?$F+AU(ZTgdQ& zs<95<)WVrU^A8HUlw43%(>*i@B-Yl`)zoqMXV%==(b3;GHa8m+7b7h#b?Wq~!$*%G z3Y#el(NEBFdIoyI5#e<$O@rej9lc#G?X8V%Ep45bfnevB76I^RW=u4(4hs9b4<7(r zfvAT@2l|KlhDQfy=4Mu}uiaw)pzr$48*4YNF>U?h=Q?b>^WZ)Nuv-)HfVRK?=+TYa zH<#8|7gny$EiKF}&d)4dnVFxxa&=*8eGSd>vv%|5-0~8fUR!5-O;ba4LtR~SBjCAf zpdT*(GIH(h$R)S8eZ2N|=C$_@PEKr1J2Emo1;}sg=*TH6k4j1Njf|oC1=_lLm^nD> zTUu)wn}}){Y1l>AwvUa>tx8}i?7Z@va<;nkOsA+?N@Qhz!+3Vx#K7dr&lvB`cW(hC zAOnDS=jP$LPlADrSzfuiaqH&OXHUQX(rlR^OJ2Qs4ZiT;(E~7$yQn>Q{P6M9#}MuT zFyIqfW>!G|`{2>z2M-=Uf{pO6Kbhs{haY|f(1PFrxdmz;43<I3{0EwarapZ50Ij`) z81?;oXyNzwKLt&b)YyjO+Mc|co)T6`qUP}z2?^(Q{aMcI?i9uE6frxlZm&zQAky63 zLnEWpvx;i#dPYY9)uXdBqqEb<ll{j9pt#BL{PHqeCr1s8mb{XJf|8=Vk|O9~MW!SM zQ4}5%1rS<TTV1+-otaw-$(RXv14;8K{qbh*<4@@VU%Y(${P}|?NTz$(4y!L-zy3*X zskd)`C7IfmG@3}}!AHz=vCO=L%q+=|AKiL@<OfGnxxtyN-MR&*G{3qswKzXHKljNt z^T)xJezGew%xj<daz4S?>9+p<;<|>Uyu#3=RFBX|CvSf%Dw0thV~iKZSlI@ZU0Hw7 zJ+a2GK|LUfXE~+8t>mQdo@pOe6_MXp+&Z&<^ZqY=^#jkE`P|>ZbBBdL^yvS~md^22 zujI?;4FY&nTrXN)5;2bxF^d&3jovR}yhqSrm#{ILDpkvbU_qu*142SCrDT;=wDj~M zx$~Hrbhai`LNlhJ88`aJ#|p~IT)jNaiI&D@rpD%*29d9sm4%BNEjc5-bD($e+WOkZ zw6GgLP5k{u*duAweund(CR2O$hM7a{&D$>m9(G52jh|#meu^9j&5sNxfC3)D{LPyy z8ykpAURwv`Us+k6UR*>nBQw)wZe~3Hxcud$vCRJtOina(bmUjpB;@1;#V65&!|gqN zEXXtiB1v7}NLbe;ET`+%lW((Y2D#)&^42M*6dl-3DD$XMjJ<MgLn_iLhu*$@w++<4 zu&myC6p+y+Z5hd_-~wnDHjNg-N5R5j84}A`EWrNXl<6{hR(LxH2Zo<-bR3XXWpgX^ z;G2(o?#kAz_n&NNYNE0Ia%y%~RD4`mbW~VOG*~!dF|Y-;*EThbOieHoB41zqY3jhu zgvd<TKm2L-z>mn^k_Y+qn=huz{Foy4^(T5~rVHO}<k9ozDB77M4=%QuMhfiOojWMt zS8sj>{Ot15^x^^v_+LtZ`?IW|pOQ><3=YB_1Fncl%Y++9^$)hCdzm?sbuEZ;SOXCg z|KjG!N8i5niLcxzZLLNw!qBS@iJ7vVQa`E7F!ao&MpfT^^n4qre_>g=evcGVsqK== zq3CkXFjNR1dBHdwOhDI<?VJw4{{K_<IH658u(Ses;}#Sik&>QUS>4u$i1uH~l8LT@ zUm)Mk^!K*+b+`BTAfJ8Ru-ZL1&^I<ZHajzYb#Z~22b`HDc57PXdw@Mq8URH!5BM*o zynp)3dB8v6oGFSv?*)<uS_8Y!q;~}H4<CS+-MD-2v#Qy`@XS2m%!J7E^Pp;eGtv1^ z?*o(MJ3B!TCg&H0BqoEfab);gQQVAe9dz(!=QIeK&Z&b}ZrpwLTALKXDQ$yst<-j} z7BmRvl`vyHt$70HW$2mX5?OWY!P9N5{#E6}4?h}FQ*~$=2UMshus%X25$BCU&lv=t zHwrr_MPxmt@vmwOc8rHMo<MM>IQs+y#3rT`mOz{j^7V@Vn7q0;yRtHW?HU*?pa~^E z3IGHKv&+k?_wKLWxp!q{Wo~5|!ThyrfU&iI$OeA%!Gk-9&Up;rM6+jZl{){3|9`Zf zRNEBrPw#)GcK|%1;Sou5@BSxA0}hA)ejNckk^y{aX$nYwVSaS(%J0DWPnNrehbvoJ zbIL2@vvd7p6I=sB?K~LfP85Bjt)`*zQB5*AqIzND@$}mLvs$zhYBZczt+HdDfS!L~ zW(S|J{@(Mt5>^og9ywl#_3z%JVd=JB{|kz1Sg9;Anh)d2t>P+Z9DddyNI*XbK2Kx4 zIi+p?P0R0Z7+)m=a}$z1iO%p0i;B(6DXgjM9UuE*g8PQ%hNPrK{{TO*<dmegu8!ID zwW*~gv;r)YmX~O1s#_5)Cl?p4tgeD`_?RF0v&h-1bC|H-RQL#7pFR6b`_I3G{}(T} zqJ2}wF#Y8dy`v36;DG9xtqhM!8aSVYb%f!;L#I$xbLF>F-f!IqpF`7AO+7sjy`~kF zM5bmi!lTJPfz~v4yuFhy!AefY_?WIwa#`QTqnBl^<6JV<Ql`;3Mx7WwM%E&{dHlL( zL^1nGwSBTSYEFr|^vtBv-fgV@Ri&+O=A54IS%UypIcGq-fPUa<-GEa#KQMk>|9{5@ z{0lZj5a(cEVQJ|?qxc6yADv!Q+SJ|sGgULOFh4jk&d+~h&z?QInT3Uwg^P<*T3WKX ztrblmJ9TwYK;X>R-+aAe#||4?Yask9D@#D9DBCkd&c@F&gMZXHOxQnubpOd?=FrV1 z=ig@<>H^PSz5edQ58r+N;Q^5O$A3NiI6lKvGLP?lL_13FKMPtnH9V8ynKB+xHB$@o z=vWOnESe1ZcWRsN;o+)QAo;4q+<drcNc8IEYvDpOw6VvUnu}<eDv{z_hL#?_eCrsH zyI;y$!=+f8Ud4~~wGAq599rwVa?8{{jO~Ofw>nMTDbawQS=T+i4cEV<y#Mfnym`o3 zgTUP~4!l_3GkO6h0qr_IC$zo!F<yIh0{%rWV!yr*#>C9j-pSt6$2%e>J}bAZp{aj- zd@I$Xm`9Vbvasw0%<tT}6PDqVkB_&Zxe?GZGBbVX@F4*F*I$3Fjm50qdjPls{jjjQ z3P?i;_WJdoh#aP&gpVK}jcq@U>Y%pC{Lh~+t}aJMMTlP%wXwE({rwMUr?83k&)oj- zAL)Jl)~(Ny1~mNo8dJtE%`9yuyGO^VDyu8Z%uQuvrQ3VD;9`G;?V<AU^kj2)H%ju+ z=~=$f@f5!xG<wyuve7a$=GSy^2(6r0zPobw*+o6iBT8hPXN{^;!C5{3gwo-Lp|!Hk z#ewNHK`A^$RmXL`waCeMubjEnTib~JOA0MG|EM;DP2PE*n)@l7FF)4%xHjXMmM5R4 zJGZv?SNLca^H_N!Ykg$abT?W+NJwHzc3DMZXV=Kg%%`p##r#p;qkwi`dMPPMb#>MK z`}YIw1MEdbg@Gpl_B0wr;LIsWN%6YIIsiP_{QCOhwQC^nLDg?Od<36hfItA`J;3wJ zS9hK~MNtY0p9xyTfWV)>ef#qL`zNnnJ$mut;j`yw1y1kWwexR(|2tM&>&GAe7XThr z><?eOeE!}0C$HZ?Xz&T@NNbNC-F)y6CHcEgpIy6i7g1Tz*MJw?y}NSzHb^ff;Ai2q zi%W|*xj4Vs@eKzDN6*l}*1YGi0IPG?u5I1eH8fb&(gsRCAtx{NQVK+`4qkqi6jvjN zUd=3IvBpPreA251Z$0@ow|0<2nkZossmG`jHHp@ANT}{xuI^nbZ=ZYj{zF4cAJ-Yp zU84ANMgbbmmz=}OKFxl)UH(kz9+^9&PUljhe52sPr|o@A%Y#?b<ERGxh`Q@x6)K-; zgn)CN3@KF8$i&3X(cZ%wR6PV`g*El<1N{i*=TOYg4UCT=nBTK|*REam_I8inzPGa{ z0oCu_i_m^(SO|b;aAK@`u)l9)7%BsQwso{Mx3#o)cP!n!)!x$?5fKJj0)PkLzkc^# zR#rOf)!fnq$lth+4z1h+pgwu??#k7xIoX+Uu~DsUEw>)r?;jd~9t7!JcJ0*EP`h^f z=EBM{(EGDD-*xtML$hhA$?Lc8+<*QO#r*8Td{2LGPk-Oqty|NJ3viUOvXYI5k0-9o zW@cvu1o*+0rq(8i#E}HwQxjofp=jsD!^2%xUfR}qd2nhHjoGJ{mr_!by?q$z8EJ5V zQ%hF|CnxGVItps)QVNSBQqz5-;y@GHxOpNZZ)uH?ygE^VlsK|*^X~IE+N20BX&WrP zN|Rc848!nCyWBXuUfMoCFmnx!ScAed+4&S$q^v|tgH;?75{tUF5&M@E6|+DVdFQVb zTsX15yc%vt)ZGrL(hjLocodyZ;Jj6RD-1$vFy<s2!ODu_2HkXMQfhi}SyfBx0CTJ! z)iwZQUM9~2=B;gr%XjX!w6}2`-~y_LEvBaU^@opzgf6hNv$3+W>g(%)JU(;g6dN1s z;UkBGf&<|bmM?d<-+uO@sJQUl*)x0gB0KKe$8KdsK;a7f3=3vvc)%XG(yraR4jnp} zo|b$NNe;MoFWBC_?EBcEeG?NSkoS7JI{WtTgKE(AoI1r{R9tZH*>h;Y!`+RIjSc=~ zZ%;aRP5`u)j*fOnND#Ef!omVqz)bMBU*AA)?fwG=1vwUGf%aKhS@tvEF$UL|zka=c zd{j<OW-s$R;s4^|Vx7G`J!4~_{j$m`Km-QGC!s@>mM*TwBnPaSg(A+7Ro#sqTXW;# z%l5J5g9`S7IA1;QIysy4i}>Jz)+@FBD`g!EH|~9YxkXLenT=nKQ=KYh8KLcx+&{Il zjo3e560#b0N!fj^K;C5#a#)pmNQJ_qOy*H?KA_-mNZHvwwkf`P#yv7q(-5fM$<Bk} z9T^*!nUh;l4Z$orZV57AE6)Sw7dLJI2cABC5+DcktE#2~n1@x6Iw0vaHPx>_dU}Sr z3Q#}5eE=2!PW{6}b@jFADpMxyL7j-=;tW^?!nd}zg4WQDUw`urL=dH=MF4fEh@cV> zyJt5j99>-<Q&W>2J2z{>Mre?mhr6Sv>+$QiA)&#r5gI>mfD0<X`4}1*n46h?^|!wv zmC>>X5osxD$x>1h-+c4UUS{KPAoS?z>8Zf#!oq^E@8)^#LE4d!6mJ_C%CD+P&Myj2 zPV<S3r3QqMP$h4PDEX7>)_i(FWvvrWUcd8<DPxzgRCme;F)wHsYVMt1Ke$@awb(qo zy7euTD+{Yf1aZ5C4Ec1t6l`P68QBk>zSsuspDeFlzdof#`AXW3%RGjMN$)%gjtAuJ zx#URuq^*r;DG_B8gs5h9GkaYEGJpfm5g05fuduYfv1@o3sQ$B_|A={@!MXKy5n-WS z%mNyI<=%Y(fzzl9U@)41d2|)vghCr=c64U?;zcnO@XAUG-rim;tSqoQ1j3-eOG=6W zc&H-?oH>or(jZz{KoA-p76Mcco1rynDT%DibOytdSrZ(A#m2@;R74oT_RgL9db-cw zeFzN=hE?Wq_n?Ty<4r)cD=5hOgRlt=J}Ynrag$v;;k0WTYAr1Z!0@mI7~b839vl(` z*GnSVpyw$sFAG`-{sk4lDf~l2vMZ`$GqVF?6Y0U>jtoEO9F1)3f#el%#$PMD;@vZ5 zuRVn5RRrgLQiBdkUd1twUz?FoI@B<@TGqL6b>ru`<~_U<*?ARMWvqqq!SdG8UUAji zp#Ag3!98iWu-Q5L6n=d_P8pj6a-;)tb^v=YK20}idI7(4o*=;&gQ|K@A5irmRx?Y> zYuYXkOpZ??+Gg}9?~%`#zi|UFFD54P5%Utif_d0W8;j{58|m!pLAe(sO?z+8)9=0q zv_l=}3tHP-wX`%~UsyeV?ks5gXYam;I2WY;#{Gv-4IM1f#p%H1=;`8M8EOK0-+cHX zI5-d`d{8)dpS?ga&y?u9WM!o58|w$gMxa-^`RwKTqsL3P?zp<qm|VZ><jE7O_wMHv z<gv1`!a8`?z{D7+GB6PlA*65O*x%CG4t}JptcaeQ5#A)Nq#WcQBRty0H_+D2%go6I zlsv`=l)McW#xJG3_u<R8356YSpOx)0^}Xu^4MOECBOrRM>RGDmUwQN0&tHsu_Tt+! z7mZj1v=3t3r7gqdZK9idXSPB6C(D(q*N#ft$T}t)2bLdJLP#EAdnM<CDlQ;(4(JE& z(V+hg<1Madqib#fRPXE?=o1yJy>oPBU9&eDTivnUv2D9!+qP{d9jjy8R>!t&+xFf4 zocBGr_k4eRW85SoYfbGvYwo?O>Q^;q6?%GVf~xYy@=9>ON5Y=w1Gk=!i?_D;HAsXz zCML!7U^YGCv9|9$n7XkAG}7ZX!C3QEKUsgSAh;Q_(XYrzCb}(oB@<y0k-T<Lnn3o} zr`@#!Y&I2J8?AP4Pak7U9_EH$0|uSmUjl04NE#u^&Mc^QQ56-xWQBP;=n>^<%5?d$ zqQ=LmwZx2;r}x3ajbJ;*dxRk4bh{V>7x#u@CMu{+yD#w>wr19N8XLd_FNbG{?HF-z z-k`x8>muu0o<-)Ro#5{Qq1OqWf;Xop`>-!j1#jJ$IB*j-D4CgJe$^IosoL_!G2inR zNzUDsDLaqyMc+8Y*b0Y|n=W4Z-n=a){(@Odu%KpSC3C|39z>7)^vg3Q%X;roHu-p7 zBQz|nx2im?0N~?+^y3t+3tiBTW59vueB)T(bZn!hR6-SEGSuWEn#kP6hud&9kns_4 zrXkPhfi{7AlK{lMU~zZ8=?~u{M`6IA$(gSF-Sv1qQmjlc0`Q>#pL>6J$w5GDU-1Wh zMQe|TeP4AdZqMsBF*K#3q0;Gc1lT(`mq!N&&u57|%uw!t9OQW0Q)&86_t>r+9qV(7 z{a}D*c1pwTejMgX+ezqI*{QKNgoI!@{K@3X!D4#??iWKEzXLvG;xouPB9Jw;eL5eN z6jB7G=N4Yco<fF3ij5YPg2awb6h(w_yuNMBYumZVumj6$%K}a*n{&_KKAlVjfd3Aq z^;X?k$rWDgpb7JeY!HUJyfX{utM6v%u2;VHwCL76lxLXZKFXu!JopUtNK3<Y*3+LO zw*VkA!jkY9o|3fgfLPj!r;-o6>dvz9(!l*(y9wD3O>Iiw4jv^__FZ5>NY3PF(hf1) zPlmg&rlst`!BcljWxRU5LseNb3Sov=v!ZelkNf@C3(YD-5@#fvtxqpE5a84hu7=RX z@eHw8G+uNV+!0p@6b8Scip$~Y#Tu+-`LNV8HrtP*DzCvmtcN$QP=_<g`VT$jS9OlO zhIn4c`)m>J3=A`^mP<v+DF7#cy81UI&{ac&e8su=T;2x1TQ9&KzcsY1P#SoeD4Yx& zJM(E`dRfHUzHwczvvdqkv4I3{(6}23K>83Oi8U~~fZFHmQ71_?2Kx60NV>!c(3gb` z{sr=76C5SSd@^R%Jd&=@ZB;xu33phku2Vd7q8K#oWp=rEq|H^n_NX;Jbp?x|=v9&{ zvLBA8;J4<}z!yXGI%Ex)WcEH@Wz5Nrob5qTj}fVqr@QcBO<<;F`6<<aNL5A_qBh3P z?uN+RfE~z9O^Z)kf7s~fLk~KJ_o1mVQ%}10fdJtqxVymbxW27Ij!#uo^^*+}@4a}s z1eV!em<47T7<s^Wk6F67zsyT~YKz~sQ`67@B3E^|U9193+efOZ+g(G^)~D{&DsKmG zhyZ0JtYM{5N=4<h=j`e`9chj(GXHNlMu-48l)JpV-fc4^8?J;*IT|qifsLgt<0B%+ z`R-(C>x9&%lW*MX8-bwp<>f+(3EReU!2JpU#G-7eX(F*wxE=syRPh7CgP#BlmJwAc zD!Mheu4)FsxU9rp?{0fH5ZyLjl2d@QjPvlsO$mSDTB$CI#w-;)9@6vWuwB=kbIa!~ z;wx`v(~<K{Su5I*2lx?9Clw%A5{x}ACby!9yA+&ON`(&w=w<XeWc9pdPM@=SUovN4 z_y3qRNiDk#iAeFPBU>XY5@T}_C0*{K?_=Yp{ML+B*6;}mcjreI*9vJy=i<T!!n1=q z#mWJfTgkKb>AfRA1l|#c8}kM-H=44ECC$kIc5sez2AJii?fu^U7X7q2I5<E9K~~$j z0A8i)3ToYsTKmI$3L~vdjw~ilCTo3tgo%LV?VTOWtc;ollAKtGR)isVsOvY(L)^^s znHEs5{mC&lKp=(t4#IBk3?Y39!fOs3bYb+&%T+c`Zr=~A^GG~FA;pfIBHH{0B68NO z!#y#;Q-rY<0a7+7zZ3wKdj^PV5edUX(29rQ^EpUtZip{;3&H#F-NgQnMwZXX#ju_9 zQ19>pSJ%s7L3l9R8$)3SBP%4c*3|LKux5yE`-2{Kvu~YQdqGVG(>je&o$Tl#%H&f+ z72h&uho55zq1Py_=hU8BVc7X>(~;6oyF)NVPclt5It@K?@{H$mt>g8$8_wj8x5wK2 zK0hG&ZaW_VJTpE1E0uBE!5U3^J{Rzr1qTPlt@CGy;1BstVlvq*{w~7+q?Up|U>c%b zh9I^elxW|+fppVtw1E<mgjt{+UK2lKO(emrV&ew!dF>;SkwMl6E}!h3b$njO>H1xv zjaU0K1=y`l{EE(%MtUI_nI=G|=WDH*X5$PvloRSb<MEWq+zlNA?&1fvW5<N@l)cXO z>yXLflz@k8ba?=}D3)yiH<6oWcohxLyFlV%Z@xKRaLuYk#hGO-L*Q7wFYB|oRI|{K zczYL%|GLN)K0ukT$5mXVFsPGFauANB&AGx()98X<<x?N5u5gf2|I+R?2@Qe|tpLrz z^=`+Y_uV8<l**{l><`i$3D=yF5bpyqjfV1!=jtP-rX;703k}8gZnQ$b9e`&nBBY=I z95N5<jE2ev((AtaYw;+m^7yz>G<ry%P?qC8erok576SOS-ySalMP2A&=i}ny(z;yW zxrqp-5lhk~M!32@4=s@}{6?rOEiu(Gx9z!7Js?g)gCKMz1s(DTqq?^yGjVWmUSB;F zRnY~yL2qYFz`>`4(gCr26;-P#DJXKhfvrNXfo1T}VtYnz7>=O`@PKtgzWQGJj2~lS z5PnW=OW%~3UI@eB7(<5P6D`0i;S=rM9FDeyyeVhAIC)X6{<z}v%~a%a5?=ZCvYRCD z33Eneg%K;qsl`{Lv0BUCVE6XKoe2n!l99w{%u_j1w%uJQ%2RbgVfxMG;*oXXYsKIa z<Ds$T0hr|#57X0zG$P0T**(7oc5p|qXvXy6T^$^r{oq{2tQB{`4tW6Sfx3b~0U*u- z-4?_2sw&G3^{l}3uCJY{txHn7><um4?>^jt$okgidgFmcA;DN%fR6Ev7C_X&p&1@; zP$yonv5gN30jG#mgr%TD32Q2C%ka~MHc!CAnw*{G%u$|Gl9EL1?54@@4qON}lOI_j z#kc^bxv(EtY1lb?W>+U5xi;p;0z@k+ODZ&4T@0+Q#t!zmbw%GF!9S2OaDW0}c#ijX zi8AVg!Fy(AI_tZZrnVq4gGjjGWqvV1x-8Bz%>um;B>3&DK|%twT-64hXbGt$7Ut>c zMHRp{oFN103Tr5%GC+{ZrqtlHtR6z=BVWawR?$SD?P8Uyh!am-1*ec{<w?$m9L^2W zwkmzI%Ca{L>#y7LE1#<|Lla>>7v5kV1r;1h6g=l8L$&JX_2j?uy3Xh0kwF<I1?%HC zt6}z^1V5b|BwmGK5(OxHQVTp^r5W>T@+{R%7o8Y;tTVzuVk|ZFKrd&bX9gm}e8##t zDJscy&9zXm5L45kL!a9wnr$3*WP+&oSR)q`7i(+#y}{i<i2fviUrf_d5WEDMT%>Di z<Mov`2gN+dTvOLftU13u=i%FvfMviLLoLtiO67J7n~ADwQzs}K<ZH_tNCO-hgeLEY z9#{kBD~$KnzLK-1rhv1yhrGF<jE<UQi0sb*O}n6cS%SDj8$pM8hUR%@dhTN5JGxtD z76Y}t?c2)Y>T}Rrc4Mmr9LL)G(Z18m)97%HC?)`aF1)x?EPwL(nhTd8H`-HNr-|4q z{>huO+gpBMbDH)C@GR@rMQS`z#EM+!k~#fpMbQ@inJ<sb6obr)V2aGT8{5bedKcdf z33K{3j2-*STH@#O8z53r2+5E9z{kdvwYXM+&mFPduIIUvR;sR-z;@l2kCWr8@|Sv; zuI3+ijd?w#rO8>F5r>QKr>hNl@h#&S)BQ$MHRMJ)AukWeo0Aety&7}f;#zC(T~yP; z@Lr6TVn=LvLx;_dRLha+!dS1Y8FTR4LyWz*ulx&%&a|vC6&+vKkcV#ORu`sws%tbx z-DSrux+|%&V;_ltgtW`=tY%+c3@*PoqVJRm2Jq!_ar@}|X}*h0><N$Wk%m!tNlto# z!6THmiKjtOYKTduzgq5w{~+<CqTE;^0}A$b@R7GMX1UgXy1uUdiobMi>szLiVYfRt zx)}cuwd~q8$k(-i`H)}k-WS%?)fG{6)KQkUke9GBkr6kDa0O#zY~_$Myecp?2DLAT zTin-8(nhqLCcfhotJ=KC?y#2$oh%jvRwlM~4;PBF>K(2=71&+8og9b?^*s-+EvH&) zowT@W%^Awu?yfp^63-mWmwbq%SQS0brem|&K%RY2sgq4?iddz)a+2W_U#G;3rPEIZ z%g!(`p+=tov&$+gAeSCucW1(<`Gb1*rrwso*87+%Q5xO~&Vj0&G(nT{_L`cos{H~o z?8}KgyL|N-{BW#mYN+b7Cgw3Tl-AZxm8R)HRi+3NVZc}yh#ZgkjbYvo$rI5Q=nn;m zn<n)p-0mU|<-9fw?fi9J)=D}XzOUuP@OFITTv1XP=Yc(WEGg8Ez1i_<C;hLWD&O;z z4Jj(N7BG6hmyO%UidzH%+ZJVVjUYek{!h}vGJF1U^6vGn`#g7ynmPACIf^qS`!OT) z^oB^2+BX3ON%aVf*V7gftm(PN^WKK>^Iult(*vMa0^-;ht`w|HTpcYHHAPjO`Jj4% z<X}O1zN2~BM<XvU`-FyF*oD~*6Zq+IdrPKe`aX}czjQCJLc<zcstC&~KyWySNO=fY zzrCmJqJzZ`_6D069EJT6G5MuBi0M$yd8ud*-t8)5OTFPE8A;AjuwQT*n=e6es8SUE z8|<y9aS6TxsIOU8>+SQ&IO-+*e7xF_tLhIx-Aeh>r{nG^Ls|?fxzB_}Tdmu%B6_Z? zAo!h<esZKqe*yg$2nR~W0Cf25;)wzv{!hCV!yY{vp^yv*7e^%0nV(ml@3&~2Vq+6S z{)SotMchAL8z6n}2D`+HO!2oc{&?jYnsiSOgh6~y$QfU=N*umng}w|p&paT9QV%mG z?P8)>T<!c*etu%aD#u-3`Mbx2-;A<>;<-R^)_?B<Vd0TPeV^3p!aAT5QT0%AADSN= z-7xi$a0ZxoXt9D%+!yea?0LfP?8B?7&pS2dn%8+dZgwYp_d|U=+z?%KUbJ_EHXK|$ zjV{<xan+5)Ls2fWt-6%H(38;VIu{&&-L%acpsKmR9D&|cBq~Q#Scf!7xBU^S0va@o z$@wHiMfcW-*ZfIJTK8R6wioIAo+cd4C@qsK@Dv{|g6&>GQqYAq)=0wJP{@>w{5|)i zR99ec89()>$@x^=3tey@XaZD+5I4wumbOl+mK>NjniiB60u4RNeR@<nng34~LXK@H zVH#p&kqp9i&IjM&*5iadI$15qkeFc`o`Q2Hys_zribG{rzLHCm9bJWKM%c0+#WqCb zO-6f%nS8J8@w1f6FUn6aW>>LIE2M_)Qpx>4Out;ABl{#Dr?R@%L(X)SJwi-=l8>ZN z4#M%=A(*$^m9&hm7>~s>9*Cz#M8A+FYCl3@W-i?|-HyRen%zj5*WHZuXs{n6TaRau zH#yPO*%Ax`DIM`2Lk6j@tgo(bM2Tl^Axlku3>7|4S$>|jOc%Se_uzAdhF}4FxgAHU z3qT!Yhx=lMCf%d}ubx1+5W305ss5!fLmhYsnFS`+m|@goY5Qk6j`|Qo)AQ~7!kR3T zB}7cAqKriw_A6@`Mu>f>oU_@<3CkYP^I)8F*D}mB*_%x1SV^4Lx*?S%*CpTkMQT<3 z-B~Mm1CqYu%sX~BH}WG_LD-fSGhdn6)BVU}`GBu+_*g~#{b&>&_5x8cj$AJEF$6j1 z0qE_)g+n6FUCi)Gy@Xm=`tw}gXu{ZA{%u=^&~4+-a6d>UF~;t`xE)zUgr^v3piD=K zW+14pk|sP9hoNpDPTz+qJ-)~Gz!hDb@{IOdPnQ?wYHCheFtbozsP}J$PZ_tAP^x=% z4!!XT0tNm@3aLWO!jv29GXuke2pD*VMnr)uE0u?j)pbTZN6+)8;aMk9>#sZYocD$n zRj-@QjL9jFV_Xz#WFd!-i;vX=y+y7kCu3|;G~OOXSs5T2ZwO;G_(?~dehG6m#LEz1 zK_>estjxN2IH|uiHM>?;cr@ha1fFCP6&k@qOu2IF6#Jj4B+9D`D>{K2q%^!q^?`q* zE7iW)d#dbqwLOcXuR$gYqa&8GlUydaqIJJeaiNkgACYtJkFFFsYB8H8G$b;@!~L|S zXy;DQVfhI?(UZ1jmi~>Pv%a8Z)zeK8R0!i6Ll5)_1Ex9ws>3K1AoHRHU2|a2F+M(i zYU*atmKb^T=iK=m3Td_nCfEfr@dUu*>y$Xp#((cW%@tHm-XYfC0ujvl2DZ!q&b_%g z(w!E>Qa{bioH)bq=1RKM0Ym#r8-7FD7kJlf*={;za5A*8&Oa-zPEt3*5wR=u&cs>n zkzCeRo$O3ovFZM5UU7JM@n)YA;e3O~edG-E{)QmgL$<~s(xiBb1M(a6sP?>=W#w6U z`2}S;nxZyiag`$COLF$s_B&dMH!1K~GWC&NR;R=o(uZkccZ-9Cu<_5E8aM`zu7Epx zX<5=x?pQWTnP7*-QtsI%&w+^nX^?kCY%a1zO0L7+dW~NfS}eb=UxhUZypX#2pKS#6 zH=%A5^5*LOMCwyBbXR5<mlrlznAw3Ex;UB~jqlOthYx=)b#cVs!M^zfaXQkjl#XoH z%G+^<G4k7^E-nT-$<k2!tD*M8wG_J=RcOk!MCu|5UNO4{Fdbuhn)F)~6<NCJq~EiV zu2N9<jRFF(IJvp)xVTK@wB$ai<Kpv(W`@Z>^Madxo0D5=fw-q(BzOTlY@O`v?r)cY zL5T`n4lNC37yfsges%=DtbVW1sEf#D)MN{fTGMZ@c`sdr{WjrPdU#rTR~)!hp|Obg zKFfnge5JA8^~Vg2yrmaV=Leb5<{qW7k$%^4vP18Ij0GD8MMw7-J1f@RCLMEZ4rfD9 zM);^a#JG-9e>#|ANx~<Adcg=@ga-x1F%sf36IFl9D?p64{*rjJ_V(s)MG~|z{@lV7 z`BGR6oYK{pn8*thF(dAqOPB@(c8BxJS8i_Vv4<_Q6tFKuK^rnX3IKwY`1u$W;fNSj z@C+H~0cqRFf4uQR+<{H8^9#Wu_A8zHqM<3&TTMjkru%+1a5$q0IZcU|CC%S;!(9ug z{Z9fIVJ}Bn)&8xQ^p=D9P;YNkXqPGT=jwxIDQ1#e-j+M2<7EA6_e;)B5A1Suc}<p* zDNkkaGUm7r@OW+Hk5Ww3wVL<hWY??BH{p~JK(?0nH59A8mPGPUB`s9kJEaU*<&l9h znVbt;7ijG40e3W(s15H&50ejpv*9#T;gGi6)Z}nXJqW0+h*Jn6|8wMHyzju%Ab!B5 zKa7q9g3=Y`m6bGAH|Mn+TRTdNiw*5+T&>`i-P}xGiA8)zul#&6UpY4VisupYH9MO{ zgaYGIQ+rA&x;ciO^&W~YZNh$Rlk=b3317Du71_jyOTb3R1}F*^c=AKtOCkjZ(g!+j z7P$UtZL8`ePi_%P>J;)PGDI`lQB73j<Lz+C##6Lc8@Z`q1w8`nFwn0V;gZl4kvGJS zSgQis7MT4>olm#$4!rOIEYaXm+2~DQoU=*CzClJ0Uh8KRqN`wJrDHAh)F-w!=k%3L z&SuL;;89Sl8sk~41+nZ%*?L{lQWh7vFf4LiDnqfOntT48^&8{049!*h-90MOXaDI( z*e^+CP)B7RAt<XTaRd_?S^`u*L4WZR!f)L#KuuwZPC4p+_<l(9KKjzQ^z=aq@VK}% zJRJZpvETCM4Z}1aklr;2|1EmwLT08v0?2QiJVPK3arD@q<dcB7N&;0v={ZFXRF;4o zq5jLrj<<$Pj=hSrJ7C+)-SFnX!*T_gp@7pDtqGCPBft%*(W!;WakUXuJN7h3Usc<K zg+G;vMbckU3X~DxY1T>H3h&65BeB4+Uly~M;DIDS{anLK>1@&sJ4Kr4T^=S`9X=IP z{AW0%n&%5{zv+3BzJb}WsOmVoDi3+VOG0<+M5@^1jV;p{IB3nWkXPQk2BITVJ$abg z{z!4cTAfX_gGkG{rm?<uiAFP`9ERZMU139V;w9$hBw%G=reL9@LGDIOg9MR4S|;!W zDnz<gVP*-0oo{mWd$&2g42VVc0&bW-==9-t##YnSwY0T$bagc}RTUB=BPT7<omp2H z?dNC!tu#(dEJj1q9i<5y8y7JC<!c2JgyH8>=6g-nfIO_ktTK?QssHom!UKGv(>TXM zsjq4Qv>wkq&Mx&wq(ytRlWbMFdZ_)_npB$TnVZs`rMrJYTyvCjcom&hstE8FgZl?y zqvv}*NtgDgE!Cas(nSXovScVoy$5qD=<m0K3?u3UhN`4f+JUqN2Yqcmv~Pwo9Pz1Q zuiH4cG+smVQLCNC8fGDB-{d94@cqEv%?*yA{0ZYEn;oBAZV`Y)fR_V@zu7^VfgQ$7 zjE97MzeeN-|AbtCw;-u5gFtc9cg7`XZK=v_>8uShA9m0NbwYYsX!N^YkFWsZxOUf& zhjzjb_q#jS3d`(MoaGhbVV-3nN6AyN<N3ZZ2J<oiIWWJuGChb8rOa{<k80<pBYpd& z<g`}(6Lko(jAQVPlr6w-suVkU@C{$yWyXnOmH^#ul<P{-DUMW>DSlN_9)62=i`C@< z|EPS0_Df<cP)t^cR$1iep+6DMkir~*WS@u)e*<Iv9Jg#$rbJSVCTAB=qAEE<O`U7= z`QkwG89Vl{I0#GFB6a@L!8i#NTWx9%Z?Q5D9ByZ=#dFTX+H-}5bBT#{f{c0u2%M1- z3-*pk3#1Js{_Pl?KLJQpca#bYurbY-Q7t$M$o3i~_85t{kG6)me{33R8wcwiE)qll zJk;-Vpl1v*0M=3!k)89{#V|e)cm_?3z#{#(1s2B0Hq@UqnfAnzb4n*4Yeac3B#N4n zD$^=MuZ4)Xp7K)%yralMRT%7`-o`tFKg=15K-Wnds^#C{yVyM*9;?SUYPCNdLmIAT zVKs^C){rH|#y=!!(?Ty4?RMq!M%9Z!b}VNp`Hz5h>UEiqVjF88M{Mx5K}wnEecR35 zXljk7Gq7?EjPgqibM(!Au{6&&*MSeGa>fJu2xH-@p|>mnz%Vl(4Sga4EfN~|I2=Qf z?oaNLobDDWf?nVe{?%-1c3^7gPgJCIpoJX9dn}!wv4LJ78GutPezi~c{?91U2EcNz z4#2s_W>H97i}QkG!(hsc?;6nncWN#9j2jEU5bIICEGlNy7)7%IEn)k8;fb?AIJv&9 z;6lSkZ6Qi}awot(Hr!R;NPrKhv&=M{!yb~8YpyBl#kXd@0d4^GexG$BKL1Q$6Myku zFU2{QIdaaWd^RW9e6D;x<JKKD(<Nee>bM26h?&}oTchI*PRYi3O2Nq|Ce<<{zBZqG zQBHbFZfzbXKuc~0i38*=u%s!k#s843mFb+v3kTC@8w5xLY12<<sawvE;klO&%L<as zpA)b<s!kTj38(vLIuK~me>PVdiqvo6h6EB|VCSL27H-&^@LwjBOq#CctdMv~k6*)c zzXc#49qCthZ(Rqwnf=j?!yQr@1(Fh1U~7mHSMYRZ8Fw-~XEu&>wYAvM`&Ehi^u^Q7 z!iV-XV#=?Wl-bqI<BZ4NCXTCp)(p!czfQi$Ump^1pbb0(_<I__4Rk?w#>h;DDL=}P z`erHL_)mXUZrEaL)19e>4PVq=ODeJ!ju=EG*}taozv1`T3Oo3zeZPi&Zg#Q<KS0cB zH1+@nO9uiN66yVzonVg1>IVAOCIbcXn7Vayc+bJ#Kqe<LhjSKsa`~&y)K|BmRJM&V z43Uv8B-;iB)wmFxm#R_B+Kn<>0DA;bGGi=fXaH$GLeYj&t~>^np-(*mJXD*;X79>S zyI(JpiEw9b=N%*p$(RpV2)|jn9}xk4ZqBK$!F}oG{`yH<l;niYJT;hvpb4HBo*H6L z*@`1q5VmIsbl3f;7K5ccq~)Vbc_!jW%c05R;AMUW*570}_GICaQg>P$z{pkUeb-?1 zegJ=NR8u6$F68V@JiP$m-(TH>1bn}&FK%39{4}pYrb!NEXu(V={Z;~g8e-M`<gbzT z_Z7fBrq1bvPMCrZBMHL=I#OeTHB%XzR@0u66IfD0k=kE!GLkQPZ^v<OPNXSMWQlBm zr{_*aQC{R`ZFAyct`|YO@>NUq0zN7hs*Z0G=g#3bQMK#T!>N#+f|)u4ZLU1groB+J z!qMt@uzUL${2~J1X&92tLZ81!XO=x;o|H4h_5+eVd954ez*BNbs+Sl7Io~J;exZC^ zAzLt`HZXxq!m*dB<#6tsvwRv#zD(B1iloAW8$Wi6DrLGGRjEfvX385r)u7aaN3{>J zcpc1X050b&I%Ic*^{c{T4=8NxOp4;2L1PJwf!!v!rn^QdDKazxZtsp>9sDsJ$j>F| z@ng0v2zK=mlBI+zsl+$M{#_%ZMRDFLCG12;f~Xp1B?w1%gO{VpYly8@W48Y(Cl5Ic zlSVSUNQ$B4se|#+YZVSPRQT=sv>>9EvnUG~c+2%*am{s2i23VaG!zjZP1ROeCyTO^ z8_~@!qD0<Nk~92nM=tNWtaWUyz_MNS%xw|qHVM~Z(wvE%q>{ZLoTv>?_^Kjca3(ic zGYWL<X5b?$T)K%6^{{iG=ohz;1<|(dY7k+rY+V0}#2M1Q(=`m|Ff34+$KWZ%AQr?R zkcqUAUqd+BZMc;@8@>DjoBSe+%KX6CE)=@6)s!Kwdt-?f<s=SFbj55_dq1@cEanwY z^^gNvv)FX3)Zi*08*P0>d#!C}jr*_RVWeL7>#`Y=aRcB}N`zLg)62Km9!yygg+I!A z>PkEO>STgf!&fb#%_kpS9`0|C?NxCDw~wvykHOFJ`a{DS%ArzX3|Vie88Kz;V`?P+ z^T7LOZu2Dxkt9Ef5UAK|0XUmLOrZ%5ctvZKb%wW7Wri0y(LQxswd5!*S4eu!MPiVh ztdvVfp)M+}5w6{Am#*N-S!L;?Avzr>*+q2dB9ZliBqZWD>qW_rgD{ALSWdu_Iq(OA z3>hcX8X`9vpfVe9l=pdPL}uznW$I==P(QX%%MUTqb9Qt(+vS#7CV$r<vOM(_9(ZYK zOJ^noKGLZ)O7AJZocwVFRa*v7>U+Du`r+J|BV6QIn9iO6(VO)(J8aRry9YOhX!^vv zbRFFB><kVyNlcMFmBs?47k6?Z${PkqV>y95i<GGn7{MFXpTae3!jKWl<56jZHu6Sq z&qC87(C)AXz2dRPI~?cF4GXDz2d)JSe)yxO{xCSEGun2AIHJip^9I>-SL(Z{!HUvc z2^N~v&u)M(XZKg0cdCvh+|KwY;C!<;cHtFiLZ;L@vNsqd$&+%@(AT`We<;d5$hrUV zOMBkld6ZD3xL(cH_>G$6BUEToUXF3rW!=KYz(n8U-YTa5@cmGo-_v->z<$lk`|Gyx zZK`ReQ@g46^W$ktSG|3W`47Y}WE_6%it(i;i7mD(T;w>Kvsx0ZJ{sgGFn#rzGHj6u zMKV1CUzOlaY>iPu!@j&zEc0XIy(FTf<#-h?r4^nm#g$`BC6Swn!MOp+jemg0CK<zJ zs1ij6O!V8q`AUKHllXU6orYx!i#DlcqnH|o_ArLxlACYPObn=8@StrpEI%j<%krH6 z<oXh}{+Z$>kc~rfQW$&4$=kDMQN<h+#(kn>$5b~sm1l8P!SvjAu@?-g(oDA0GL&3! z6In6<4|wFrd8#dU-6WzoX$J1n?v&f?`1%}ufMoF=^Ly`2UODasJ}Tj)aTvwq;j=-- zD$W~Ykd~zo3_xrAIaQ7Zh1OjyK@msx1NQ6{U*2v%mgo}neQXU{PYXED<Q~OAEnL-0 znP1Y-&co466jsg7!q9BBD7auA^2fq6nNo1kVyG#z=ut!eLGuub6GQ<kI|TYE=a2;E z+bw7z6$h?jKJ*FXs0L}@&OJtBgo`>E0~~@oV_pw^)~w15lH4-@urEpj&zJwkMKgQe z30|Ah@oxT5YaX+WI(>hB9ys%FO$WF-lzE1=METP%!zV;_US?I|h8jEAQAQw9w888? zZ&}R=ZW}i!f!_fxVh^a76mp~lk%vH;l3BO?^B(X=sc(qGE4T_SoVfx#k%f_%^hVb{ zFhTsNV|vO4<i8`7!@d)$L0r+99D4&FLH|U6I*M!ND>(Mjnz#U&lwG|29-<L(S25#K zuHZb1&H@M7IzW5YU3myNG*EGpVTG=N8b1f?t!Vznok0WTwttX1(-;b%0jqeGcJ1IP z5xshIQrmZ#)dl`z1Bu>L^1+Q?v{>4XqrdnF{{6Xu8?mhYuB0t<{t=BSJh>U*^$mP3 z1%AhVW<6$T2a;QjU^TB(NhyH3`EF8+E=>gP{1JQ)5_As|d>^oltTOPcZ(CPRVDL6R zKsqhWunMgZ<w(AAPp5K6f!Z{|M~4lnAJvJO+rq{RL)N}t4)h6UsU>7M;B$X;rr<Vi zInRGNuczTOe6_Rirm$UBr}O36W8Y<k);IC)Ybl2Bv*qSmWN6>n&szkYaSK*Weof12 z1c@hiWWHE#f>3^$Jxq2jQZs3=P?)FWbTCFA>NHgR_fQ~+yeq|`LuEHZKF~DD>!x^2 zjrpc?4UBgX9uvRcr(mE$nNbQLt*5U5-EY~xJHPM2LjL2Grye+CNKEhpu?gY+2S{dq zXNAf!p+>H)0}C7?N(sc47!uZiV{Kz>_#coaI)7I-Wi-&V8lu_?D2}o4mWZIs1_<+X z#4;_K2J=y(!wOf)KfjPlO$-(jGOL>rp>z(hWV$7(be*zl8Xafm8VKXg@-1WbhA;R* z<R>Jrw;Kz$I}1;bVoQ=aWMZk-C`!<HV+UANAd0o&!$v(WkwrkAXFTN~&=EthH{Q(y zShg3g1x!f1!FHT9OsP2eWqM0nt@QcYK{m+);jnhcM}e;k@;m7TED}@RE~FCkHQ({+ z{OQeco`9S{!!pF$wu7RmKP`>+*hxGLg-R9%h2=LwX{ZEN#3aI?F?A8jl|eIhod6iO zE0DEq$>!%GzlzJ@UjN^_J;Q(Sx(a@12!qdXA)aB-nST2bg801$Qc6Qqk7DG~o~6~f zFLLS1n(Fc5*0<=-*Js~g;8A{L^P1?=7#EsC`Jj-a2k_CSc?#(4T3~&99_;J)v5?ua zAw~<b+jP?$E+Eiia<kM4TT$wI7otK3=1L1@#$qb^!U5u<Rl*z~S*gVum?Xw~j>z1* zG6lE9;q_74b&G~RjZ4#eFC%I|AjkUeFkI->)?Wd(?eAYBUvyi4G&^5foZUD&JizF$ zaO8~-HA~LYa(811Y>{cqztI9hrki{xw>tKn8P-+XI?9MX!nAhvV@~<Q6G|P<At$>` zYS5;M0oNrp|DGP1++JwSfr1lLHb;BRNMAs1A+9g~(LLM91u(W{Q5m+sZz*qfdVX!6 zj2%S1?ZGakdzju?yc#;JaU1Yw8=Y_sQGFLQx&e+C?KF6W`=%{Ri2DS)dVhzpbcexa z4jNLH%9n*B1wW1;Q!-3zZF`J?pZ2HC{%NO4WsCMp{Vfn*;G1_0RzG<9<0X&5Rr8j^ zI_;WqyH(0Xop#e0;Gy}aK$r3b-}k!g0%L4r<mlvJtZ)7I&ep&JhJl%tfS%y*9TOWH zJ>!2?voQRtnwy(|?x(ShsgoH20}CUdQh|U@$=%MFfKE-;z{1$jiGWVg*}&<a8VPHC zQ$T~NxzXR=Y%BzH;>PBtW==2+tn37ILbg`64vKdAhJY@@#(=zb#$pco?gVraHcrM4 zcD7dfPR0aI4$gp9L31ZZIb#PQTWdR88)F+M0>Efdb1OhM7&_6v{e+DTZH<fxczI#| zMUC6)MTWLGiad(nRGaZCWh+H;xmr*mMK}uhLyd|iQjk${aCy~_<RY{+Lpl*GQCSgd zagoADLt&L^p+qIbZ#|O6Y8c|iMFG&3ReUcATv3n9<2Fo=m*X-SS2OLCUKu=3>y9SE zKmh~|#Op--7+nPSj*-yaBrQm#@iw_JZ|T%Ppk%@v{0FkR!C12*GnF~V@w5lDA}LN3 zt`wN!DNdz~a({oFBvRu2^Q77m+Y{jmq}m!508QrQ@_(By6#$KZLL4nA=t(@SHA@6w zc%D@7uXuk4z_qPFN|WZ;=}f^+O<#o+v*ve&3Tc~}){L^#jPr`9YncjQ11&V??8R1< z0-=yTGxmMK8obA>wQiZllwbE7+;p6cZ9oK1puuW#V6+oo5#V1tdaVV)*x<GGR)x{x z6)h%z^GD)f6;CLy)!L+*%3MDvQ|ILwvq>M~Pzp9z)kU+BOa1D3ZRZ|W-bE-zNIvP& z#>AJ*B)g8Z8maa7$#j&KVI6sG8@*y+y@%B!9TZ-=1ooCAI9Q1Z^o4^Kt?5|m7$xl( zDs8TfaoW;0oTWMY^}OE$`VLMBIxx^Tio?j!G<+~d!Oil=8r48e^o!QAeRI~4{_aPq zt3Kfn+}f8U-4w}9l@fjx>8m4h%~*2IWEBQf<ny7U3lpf^WG!Pr-^Q)3#^ZF|J;ADv zLFoam8c#N?8s}CtpA#J9HOq*}Mr5L-AIN%^e4uMZb|P&f2HuX8zQXx29i2tNfc-Vk ztZ`KMJLr|Van9H%t{kWa)hv&9k?A}oO>9YDWHm&!wq7mqbnJ@)+$$%8;84;=^~Vn{ z_rcQIW^GII9-C2yK1>sw0Y;w0FvViF=pT<BnO?m891KC;s?y@b16X{n9L#tAffG>e zaMmCZw{sJ}#<|S`j@KfrC&K9JLi?|nPPC_diRRB;Vkm1mFkr&CA6?5k^L_Ckl{ONr zJz-r5CVxMAHaDA^@}XMRuV@&pq}e;?vP|P<HVH%Fea{Amo0-$_ynY_+f_=~QSiejY zFU44X&N5-*d|w?9T~ZqwY|fD9{C$c!9Ib+>URb>mXv3v2YS+Fp<4mQd5*qt1prme_ z8fVQhhAr5DOQ)~n1B>KbzMPE~!gEPca5O<mSe9XHY8M@9kQ8k=2M)K<N6CsrmpF=m zR<?4bme?t7U$T}xP<baaoRV3>?6-=-ov4i*(Qxc{q!VAN373WxSC^~$de7xR0LGku z-#O2l3)Mg}yAyjqTng6kV{&$S=El}HhNWQ&O2B{az<~#bor|3yti}jZ4;u+g;Ge&n ztsCqm@G0W|g^T^enf~Et^z<zM$;6mB{x>E@|9`>6m>K^Y4+Gr)pLp2+nPUMs*MD&= znE%1CdKiF#fPi{)9%QXy_;LWZfS+wktVAH-|Bb@G)BZ~#BgcQy$M(O_$ISkJK_4Rv z>wgjW|1WuOU%p15Ku|zm{~I;`O!A-4>%WN{=KqBn!2JFZIsd`_|Hoqf5B5KyChfQ; z3fFN}%}<AG`a{RD%hl5|vnfG_N!rlVV<fo9EInMDlE`@Jt5YqWs5A*f%0;x%Dux%% zz#npZM_zZw_8CTSk1pyux91uv@t0m|FP~iZiu(XTsUHiU@~(A~`7d=t&sD61Gi%v# z`B21IN%?%wn5#MCc-m8C?oi{9CGo)$WQCqmm2X&-5;E9R8_qC%QDAI(skz^d(Bg3> zHxC1P(!a}os6ZGE17-deS05wo+^02=Ve%?$SXeKiWjV1*Pn*U7+S?rAWV*!=^~~4~ zAr6z9EJn{@REI`*QuF7#zG6~yTgvkA0gmJQ`4&p(hJhSH1&)4n4F5&gQ9X>^b8_vX zB@C7@G7Mx2WsiUpxvfjZ7#b-yjeE5Dt{1UW`oR^1DASr3%|Z52+NBjx69vyps0AWS zvh&^YEj{Pkp}RXg*(Iu$PlzsEGcln)NxHZ5BNv2=;tmVkSsibvMkJ;jb>ObrZ>T#N zhu!pWzRjPWDQRA?kjwso7JGeUhV+tidEI8h8Yr|_C=gVG*RN2XW%WES49s%NM!jUS zm|C4g=@2q<l?&UzAc))!^H`IfflnR4=I{lDgDt$93C71g^pw`jIB~%ci*nXc1eSqB z^hjEAJ?lt9-?5nCdX|sFCYZqow;O6ZdWf$5GG&t-0|jsjC`cDI>If&?j|I%BE0Yq$ zC9T*C7#E9Qe$dPPR4~TO29t!|7qMK2I-Oun`VQ5I8}L&I5e-{Z*aI@&IxS88G&P^C z1QbyNBVqy1k5ULUkipJ$Sc%W9m;!qU2@)bivkWdJAA@y{d3IZrK|ig1S$}CezJuMP ztA|uG&@w-2<l-3?FDe%2j|f8EvV73wdBinyiD11k{G3r)b|q3GHq|R-Mks`+Jf;MP zMbZQ(=lXNpbYLj=^)|DE6UlsZt@RozSu!O<UbV8gUtDaWIn&A%b#JP?4MDcsShqo8 z|GhB1lR#nCKHea^P&7*dav-%$O>dtXx<E2qE8<YHv;yoMbrd#!Q3?1Hzy?WglxmG> zCUn3Y&UDT{3Wpt~6<fHSVe#V!N1A(l^gJz2J9Ti}u+AL4#QL_CaEvB{WDmvdkrPFs zN+K19w&cF8mWV+7+wJJw1LAFEa}OEb4%%MR9A91bWEUDB`Ym6_NV2&?%2<oc)Mi1X zoBENteBV^P`r%hh{E@Z2HtpSV*BYG89&_Gl|9e3SSNz^&J|N?={bv6>77Kd9=dO%f zqyUw<KchfEFN+0fnZ#F`JnK%D*8>Fnag@B9^O3HPslG2#Vg2MVpX1k%$h?ZkgE9x* zotLta1gI;xs=0S|5xCa-%S|6wSS02js=e|)&*QfU$vM`Pn~6fX>s%kR*>0Nkkx_gE zjci408H6xldaj86dq1CwS!3{rJ`je?Df}D_?F01<{?s86uY}&C+p+*a?ix1u7P*vz zKOP)NZ`%FmoD6GJPqJEN`T5j^*x*b#@1aBV;jm#x%Hi6u^&u)Af6kmjS_E6q`Lmt< z*u1?-B;o4l$tMD-(?+{O%9Y_CthM?K)HjXm8v-y!K^!Fx%gba^ilG_z8Kw06D;v9+ z>)Y>xqewR0{=~+e6JzSx6jfYAT5x;Yi#b9&<YZ6Dl8IfxnN+S~Ixgg}F*=dU{9-h+ z_Hsa78o6Mi{~=q>DzqigU63298ah;$Pdb9UxN(^*EL3MB;btQhUmRa*E5QsmW1(^T zqssQ9t~=T4QAmDLVHo0JuJPn^3;SXtQGc4mO!ChjW}b_;;%2C{Ic|gtM}b?s_sI6M z&^`{Vyv^=JfSWv+`X7<)f|zE0kB;aB6jMGOeU^w-sa&Tf+x*yK!_EcrAn2w6Q2fno zRXek__#@Fkwt{I9p~0C%;G9tm$_xifL-tmw6uG34lm=dpwcX+x*xrt9;;Bd@P~P3? zgDUY=_o_Kx%i*~)L2U&d!SsCz-AO+CXsm;wm7I4yjhDD1#0P*yiJ$4l8jekcm6DX( zMLD-qXt95AOtAz{j_JS<&s$%EVs{31M$(%kSEcNx2`AWGIR*BsL2w~luPfwFhVfD= zdPz%_WLL*!J>>=SqnakJH{`Qd#_}l|+|Bcwq9j8<V+T(mQzF&8Z)T-#me>%yrw6*t z5TvwvGgYiz@R_)xaqFdkrtNhK%f4u5Eanv`G-d<&nM-1NI9>I}^b!S0v;B_P3vDD} z$KBR=W5@-%FnNOA;8)7dqJ?M6oN*Sav|hF54L8-*jj;w9N)=v?;Os4I^ID^vhy8cg zTQKyi*g;>aPAaVbP2~R@nlZ33G5n{{XJPtpV)8$bnA|_GOHSY6FIHk;{FeftVC-n? z>|ki@2mo#@{~<+%Yynuy#_4a(Uty~NLkBpNZ>}$B>qemYSHEE6U<CN18MXh4)_()U zf5!e5)qjzooP({QqOlW!Cg4_B6o3Sc-2edaAKWMSudC3%t`Y>=|3jhpJ6y<-0p>rZ z!ploQC*tNLrs(t+_Wf-Wb7UZ3_-9stcLxZ1z*PUK7iFWRC-{eG0P+-N02KbA4}b*O z0rwpL(&j{&00oAB;Zso-K!NGs1y))Hf`21dQI5Z3{$ks|FUCwu55xFZhf?_GE&mN@ z|IS9jnu&>kf%RWK0VQTY>F>$|#sNxzsj~dXz`rF%7zUQVs^&j0^WT>4pW**rx_^Uc z*8jgi^uIpS|1W%2l-75&1Wfl|9sb|CDa?PO>Ho7jb>S^tjjQT{+pT#t|3;#|;QPZj zcQZkQMxIC{E`|k4q7T&cJHgzqUq8JOC`vU+G~eTg+gxX180qz#x%}50^jwLBFrnet za47QC(h3T@F`Z7&GU}gJ-~J#Ntb;(UM@EJ|UAAvEo@I0_=~!NDeb~6JXuG6r82{HL zxzp>Z>uc)kIs5DE>+SV{Z3}+M_APts%Q4He#piDAW~KAW_vMIpf$#J2%(g3gYq@z# zT9!+z>mzIZ^W*)n3)=NLd0qcgYpXbWEA}Y0daIV~XEh+^dYSFZ$I4m`;{FwwZ)%oZ z%fZ^#=iFn!&cReyg{+tN?qK$P%F9%l{q<H^7sGq&*Jb^#im6TIn`MjUm+oUM<vL#V zo}&lA26?*oDG{N0)>j@=HutH_?M~qR@bdU65klpe^<s_R@u9eeAj`s||Ap|D-Skzz zv217f<;e2qZnA*OYW#icWN?RYj%QD3*b%|C{w5`PgGOhMN$oahzm)THAJ?11W9qrd zmhV0N0;13SISZQ<WB7^ttDGp*6E5VXFB%a)Mm{2$1EUNDNCvi*p{j~Ei}w}>E-F7a zFGv_vK4tTIXDvqMoB%=BVa4Vcm@-5U!-sCoZ97D+b_WCXogC#{SG%QXI~UR(M&!Hp z-Cg(9&ByfGiLW`o3|})gT(&Q!3>$ve+B45k_KjB6Y*e4n<EA$jpFC;|7i<WHAh$Za zm-FskDK6sf4hGzZ7%x^}ZZw^=%=>8&FSKw<Bz`Vph}-I{ExXs$30TAa)F+gNkIHQ$ zDY)N(6j&ACoMNN-!*8A8TQ+);eCRHe-BD00pIa`(FORjgelJe8{YdM>lHZ^?_BGqG zq2^Z`rZ+rusDU{BCnF~jJM;CQ+oyXc#Q56YYK0}~ph;kUk)c^OI5(?X4(v9_A=zfE z^;(*>uow<bDi=;iv=u<w+@1IPjb0~6Yokv0T(94ggLSncLv&w7s3H7z@b@R1nxVhL z-76KIfvS^p^RPV)X5{`3B*bgs$S|N@c-Nb-1dj=2z!045R-t@tqcAAHN6>$=7qjI{ zOa6FK%eZ05fb?rc1!TI_Qn^{3^*bq2+^nNto~|64)SL=A|3PhHU?wfgXrrM03!Yd3 zWlS+~Z4D8k4Ph!AGZKt0dcX@e5b!N+ghI8SS9m3KdDl<>&3ik80vDCi?l<m0MWdM6 z6rR}H*ypMfdshnuI7R&#s+a{PA*O1Jj1Yu6BE-F0b6XEqr`;DopZUgWO@u3YvhwaD z4%3F2-P#T~ah5|0DvPSnbU1<#-}KoL%O9d=SNDDA0jf0<vad%?`gbf-*aqP_xb!Gz zuP7RFy1O=3pa2IL_{fe>bczZ@n}B8(5|)now5ewJYVI}#=>*!SCUzF969a_GDmE*3 z?#3(Y%}dH|%XNp3&Mc35_FQTU-yz&wvzTRQ;>-YQQLZI;Vg!YMgW{y!D9myM9lKF` z+*ew2xZR-h&e-`e!)`dK`uGS1G#9M*bBnrF4p75;%aBDt1xaSII6ut_>bXdJi`~Oj zW!J%Fa$qiuyIb%P<36I}g?b$JTN2@=_X70!w$3|dkAH(!zZiU>aeFj03CE54%J5h! zhIw?z*-j!;1GV9=HxMys&T=5Ala}koynNRh>(w8lgnTFBcwyxJg@`BvAn!KjZJx=J zO7$Ol;$oCWP}ET*p5CKOeF7ArdcU_QMf`US1SzF&bwj@MgRBU!_?L<!yJ8}SDgKJ0 z@F$UYgCpR-fg<%<p(N;zFbxSNm+7O!9#aXdLSj#%CQ{2=o@D<`N@p#M<?b2|Hk%MX zq0IPUsg{c0oCIz9dy`Cxi2x2mGWL<cgS;4QHa<f@FtUby8ZH51IgvjEbaf^(r4r7X z9BHtP7_TTmPY#6!&h+d@)y{{rAA@3ePaSV6-*3C@y3FX$45maJ2nH|1h)A_BLD!}j zewFur^b=0aDRXbt5DSA*OuY@XaQ`LBiI>7+usS|m1VCj3ze@!nTMh@DpS+kVUedc4 zHJIG58vXRqpL2CQz2rzp-O@*uUoH^vM*3?r#yz(cfyo>u3}ClMx`WP|hp|A6N+(%& z0KcWTWI42gACkEL4q~UI7=0Du9cyib9djs;dqiXn$^4&%<@B6LFK$kB_8<JR-8&wG zXpY^n=H^IW)S+bOaDs~%f_XhN1LHLWq%<O2@ajd!eYSLQOf6w|)8`DAlMa?ypN*~; z<^oO7tu>BXVS20ZeI178nB~wk-wwD$0@s9pMf6d{41#-|@)iUMxMUKQm<d-3u%eLO zg?i1_O=J@81Rtwmezxmd4*b#xQ(1cFZH5)|{UkMV`o>no1GVTt1ex;qqn~rTR>c@s z7BTkl^EXq0Xwu?%-oOvrlp6FjJBUV5cHnq$NJs*6=0D|)vsgcqg(N`3=Ia6h@e&nh zK-0e?YQ?GiY14}Pvgu0GQ}-5e3DT&zMx|pI)cd{1fec5f@f#$cC}-||0*qZl*L3;K z$|<N|>j#N~QNv~Y3;{^~eG?F-Ovj`ka3*Kt8A&`^*dRjLFF`4JGzuE+%8bd|`=2bI zPGdc>Oiudg;`&0K62PoZ`v@`y--xsI)$n3W**W9z#lMfOBB6S&opd~`1)U4`Cv;3> z{rvDJe;K7qA4UmQS)1U#CQ4o~Cm8%C`q++f0&w@Q3WzTsNJ@DdS25`8jqLD3heXi5 z)mp==8xIiq&j$jVvK!ekN0EY9fw0c9uLkhpAd8orP&c8gV>2we$|0hS_87;az=tHM zksqHn><*KK&$2*PaGGTdME}+<v6N7GhCnlFeJBppfdH%XokAUh^FBtI)=4@No0wtB z?4F6CTm(B(9)||{t>e`54#hc8&qx~j4W<UGT(}ZS+@t%^21_P+-FhGrfhupHiCV^F zT!3KKPd>%*pzb4ShndB1xNdv|6n?Lv|JKfcGk7zokEKAL0Ej1ma?V^M2=QnW%!$N% zLQ;NlFQkPB$1x-fSH8@T^qb&yiQ1JdoWzE^DR2UeU&VxYdHx0cY|Z6%mI^hmir&l( z0u_j<b>_N5gsw>v41X`jzFD7Ba~M;so*tz3yr0hA(!_r92|pNE{B;pl7`ZO=Br1G* zpC8k2ch@3tEY@n8T+-uEH7l69*KH9?G1dU-p7z24noJrAa|a6q^W;;FGIC@Zc#x4E zCP6`D=-Q1$rs+puKvpPP(sx=(sn1y=5{$86v0DMZ`~#QR?M^f)A1VI;7hHGh?of&b z5-`qfQQzU~2CBGhD5Q!6J7cc_>peJBxp1{B{#=H+IlCCb*ap%aVIX!I3gf~9m_T4Z z3Ju;C9Na1dC?m`?SgL5cZ!&wYAX)Xez_DEHnn!zr-6g06@IjTr@j}6iY8IptKgr0! z1Xvr5C8dI|%92o-F~gKzitPgxxxP8b9&x`o9d(q)(8;Z83Y9_6Zsv7_FZTqo;j|~q zWvW+BBRd7i6b?z9&sfkP_T|G${zk24&%u_dy$pviU&(J1X@c(6Fp`Gh;mOb0(jjO@ zDoOy2|D&cByYLgLmKlSHM^uTFgsu_{*Z~G&tkojq4zZX;;DV>HPasA~oSxP$En+8A z5|2;&lr=s(SkDj*5LOIXN(x|QPdVg24N^A4nd%6qz!;QFr6=o16z5NfYhVHOLG?f$ z%9T%A(XxXLY;(5I*Nj@4&$b~oM+OBV|3)^Z3MauEx71P@>*J(alc)e(1S3;VA)Y_y zAou#{-~};`NF_nNv%T{<9vxjsI)=&qMIuj56=KD{&5zf4S0nhNi6~kR39RdGW<4F~ zq(20YZc)+<RRUHTo5+~+lbQ+6D@nK~#xrFx0WIw})480*{zYLK^lFd-2Z<Vbzs%z1 zZ>Hs(^b&R4)7D!N|8(*?C!_;sM|!u;diLHUbRN0MY!0HJWeR_`<=~7|2s_##JbxI% zUYoq6WWaa84r0`Ehh{Pg6~hhFT^%E%Z{w0Ww<My%kiE3=k|{jaKK-{wm9`heClZ;w z*({OlZrZnKgpP1U$a8OzgQXhdP{wyesO<lTv2zR(Er_=B*tTukwr$(CZQHhO+cxgl zGk0v>OubYkd3i}HKhCM@zg=~Duf0!yYYifh?r=VT`f$u~C4RX#3PR>i3DvS|cgB;n zC9OCK<!K_B`3a`g7;$c41AiEKl?nPxP-i%W-7DKzjnMXHQ9L|yJ>zE<U8bPZgE$0~ znbT%l$6|-H3-ud_p5SH201EHccvJ;Ii$+fiC2Gayrko5t7(M4v+8Vl>_T4k`9_)~Y zw8IQmqj)tNOSmS;{S7w&;Lr-j@~JKS(-?*6jf{<a;#$P3d+%DQ6q56J6~`egE}-j3 z)}}Z%gq03jH=nx&7#=sK0+tqnVHvs4ZdId3YxS{0L0WPXcRGNq`ZK3QWdrqM=9L~n zqKpVzEYV08wjL$xH63Bo&rF<9qA2=+Oa>4lv*hgJGKLAIM3UsouWFTcbpeyUrxh!z ziKN&+n~-lxpPf{|@gkZukA|#B1_iy&4edlg9rEEjMGEoUD0iG@-uM7zP5}zJ=?DT# zKY<Td(Q|{M07~9QJKv1e#`iRcunA43kf`{+Vb`*SpeU{>Eq2qpSh6e{Jxe2;2HnM- z;X{P=>_Tuuhep`00u??|9fqx7BS(ZaZDF5;OpSj9a^N|H$(2p=o;L<?Rb7-7+=Nvo zwn~f&lCaIPkP1j#BnFGaz6-+kH|sDYv}~0r=#AvqkB~{b#8`8%pu1OLZodOH4Ba95 zEan8$9D-`7)u7DNpkbs~Z0=-*oj=1BIr|6$2G%;8Nefpw`ZGi|jvhp7+B1{a7(6}a z0h_YwFhkXxrKyFKqi~VJSuEMG>V9&QCJ8aUtwGcU8Rn&4bR2;Ng^@90RpcKZ@<Hcs zbc+curPk+gHIKQ4g)VV2EGuLr<Z-c3fY5!k?uj9Y@FlcYT)fY^CcAFi!t*(ctX;Sf zj#`=<6C0gL7vbHHJv~Xiu_l))JyIXb3(l+AmNxtFA{5bq;JQBLp_lmnIjpmtC5wyD zqAY{JQN$x=$C8_{>dQgRUyjX~<%18SgBofO{<ego3ud)26d`g`CUz~q1!$rwroao9 z9sfcQC62p*jK-ne#%d4fY<l8?g{M@4K_}xh!|aB~sk?@ytew`7W-Pi@NO~5~U=g@g zQH&@!V!y34(ig+%J%O+SC|fm<Dk2CabLJ4+3PGAd{J&1IRP&`NO{N!Cmf6c46|FH3 zT^+u_Y!s8%sOwXazmlA)%FF~(CKQhm<2cI(8A>*O%O$@Od{L27nI|PwK>%f=xC8?Q z&vY905TFi5jvmTBY6&~--eoE$EbihSQ5R?ZWpcR^m+eq%4~1w=mJn!x@68J=w+L!` zbtY?A(2L*^B<m4IcRK9=OlD{-=|xTmss&!vNKT>_h|=3rSg;&uR82U>QHrNI3_Du0 zsk!AM)S$Ghdxb4g6I{o&NQ+#w>I(rT2AEPt-C$2%_aTh4Jz*ClL&!iqTH*(BD+_7D z(3bxSYA~9PMd>Lt2D4bvfqW#|#A|>pMWL#K$)^d{HU@&hz<>^dYf?<9X77StgL1@b zC`N|7wNS<fJA)3%VeT6N1~G1pz!56)Eeg_OnaFvd8gkOHX~qEd1*ogmU1)wu5>bb1 z5VDJvzehHPTU8ShpqLP?SeT%d0_rmzIzSFqVigCypLvvpm+RAbt|3d51&`*4Dv?x4 zCE0z<*TxY1p!O+{WW#LO6CKtz>7!CnwP3fFx&spIPTuBjio&A;pmeQ`#v=wyTzQC@ z_b}~f37uu55J8gy^3)E~Nm>+%*>VnG=h`DI$*M^C8c<bPVHG_NWVpCs=p~7RT!*jW z6jz{}Iv%3XMcHCErb_Y5=T9hi1+Hr=!D3c(s2i;dW@NI#$ea=ZTCI1_#=uBJNfref z1+6?JwyXrX)#xm1<?c%`l(*;vErKGeJ>%Vgv`BhID-RI_2|KueAI-W&iFt7>6?N7k z=z8Z!s=pN<)?5w>Y~`GVgfUc@+5;6gD?^Dr#$PN0m5z+HB$Lnmw(x5zA)_Z{28b(u zaJax$3%Zr8Ja>OR%piCY;ZZ|NC)BI~h9NzI5g>|{$|Npf2e9N)20|r8BvTx$K}6$8 zAw<hMRrPR{94q&T#U@{WuyQBON()e_ArG~e(vzrn=5qz-(v8}EyC(dN#S!qO#qZLk zugg%vl6dOi6rK$jCk>N*$|-}yn|tb!h^`0>4c!9V$;O5qhNWVb2GX0vE7Bmrty+q2 zGIG%8MZ&g>YRJ|ul`O0c)ZcxKS7sono;|5R%(B$TXZ})Q1)np3#w8u8W0?l6mdqhz zpHRpsv0IO_M_A<2aG@FmmyvpCY|&|jZYXXJy3%S;>SSW??9J4WU9fX1eiE>bCC5KF zM}fh@kb?#<M3bi;wm2$|_oHA6lV%a7cr<xaN){H2D?RCnP%yTGatxZjqB^b%tK?gk zDOjW<FA~g@Iid=koARLi{6mM~vrlcMP`|K~dGbbxx|pLRE|u;G2-HACS}GVCOLRqH zNFfTYd8t`5Wce2c7&4KVoCOgIIcY0=&QF<E3sV`{dl*+X9gA@w<`c!aJdCNray<zw zc-@DyVvAHz1HgAY^?(CRm>9OKA*;f3LGJcof2uf@Ae^Lm){U0Hw3J;$Tw26xSt1X| z$Z`}Qgzjv~m{kncR@c!;FMW_5NzTa|vDS1H5M&@%!%-aV{hPtQC?}{;@X!dB?06zp zCggW?0am9D1%?FxW4c6|b6<-$6I&I#81|XL?1q&!W08R)J8DvrW!eq)2fJ#HD7hVD z7QJo)g{eLLE>0~KQO}k&X~oV$MRZ){20^;Q0`_gSx+H}WBTTBF7_PAkqywp!3a``v zy{B>LfziYZn;5bDj(pwD8NR}8IG2TG-kff(R1Y?-otP4U>7|)XjgZa7jI-N-JPb;b zTcOef6JcU?0=mftDHf%_L&~s?VZW%J%*<2Q!q7Op>)!Qnj#9YhFpGJ%Lg|~42A|zH z@?Or2Y9TC|o^AgG8_qF{z__K5jcln@Y1@moOpBz=*3;UUHOFxCfLH<YL+gyKV<Vd) zsc1?PMw#v?$168Oh0z>huz+YIu0;X?u5>WrBR-HyfvPShI|n+vg07p0(O?m@VgkN4 zSvji2bQ!8ueMZM+)UCy?Fg5Tog({9@9f&?_EwI;G{z!cPsDeewG#G5CWw7P<g9xuw zJV8y*8%umgKey)f1>s7gP&zc<b_gic`ilrmob?hT#Rl1!3>m-{D1lJes+5lH!m!HL z>UE{OXDSA>GQ}W816*pQBv+L9fH}tlNX4gVQUt{vgHgeH<#7Qw^{C2aRM2eHsdtYi z?1*<CGe@d2)8X_59)J~-VN@_j-Pj*5YlyC5XpTdG+boF)G&4!aRtGck1dnyN)HAv| zlg#7Eg<EPMz=+PKimi%#x%Yj<@B1>nH*)3t@af_^W$%yYH$>kVe)jYH_3s_^K=J!o z|7&|5nil}+!NIW50AMb_1cPfXA%-Zce39u2$39I81{LXHx1^OvhStWt4=_Tcs@&x; zKL2O+R<CuM^<QYpdTo-Q&rLi{=@1$#x56GaReJX0%sw!t?g?(hJ*j)5Il)E0;jV~y z(BQfh;Ss1XUM!&gjse4#2aRySglXCOB%m9|eIcO4f_Rk=VcEJPw?a;QWf$<ZGU9CI zikl~+{=D_NIvd7~Z(_ESQ6}X%QnE$HU0WdS@G2&B-1kbWPV~Lo3VYrtc7B$Wxr(c! zm@4cQWbt@+4`Z&kXnSP!4?VQ$4{Lt;Cf(`$eo0cJQFj~iAVD=`*+j4<*$Sf8)X9d9 zv|yV_8m`f6VBdQ+CX7DHJ0;sRXM4_6Cz4Mc>3tAbcWT8Dn`50RaIka;bM}O~bzKG5 zektixnVf?}6p3)(5_Ry2EOo*$4a<5G{e#P&;2hM9I0FU4Y?g+Z;r*l-HMqqK$GVd- zr!wUSUn?Eb({%{tCUp*TkMJ6pqZ-wmT-!7)OePpoPCcoiMJ5c0ZoWjNmDPZQWN@X> zS!6J_xXjBQte9$bc8roj<-yf-_qIN!H>vY%CL@Y3XD!Sj4I=T@wA4egOApl{iw%>v ziD4)bkF5RZ;y*3uFI8Z{3XX0oG%Isrhtr;1t5Y0Q83N&U_KuFgnR*sP6<-=`oQ`=> z7^hlE26t6)*<=-e;by6Oey>^vXSi=-o>K6+(*E=M90VZ#_4`qbY;?Tu2pK$&1g0cU z$pvOvbX#qatx>WMVzfHO!Ep^ct;U8~zwrem;W|W0AVZ@)$E7vL`IK^ojDtMGYyL@o zX@KJi1S}|fij16BC|Oz>mY5Z>w=TIyDc<Wzq8`S9!6wc*ujnx%46aA6$K_CcJink2 zCaQl-3P&IZtpI`C!r#NMd~o#2O>4bmUU^gQyhIfzSOtt`Y}Xl~+kh75i~Kj-1v}UP z<{_9U%c|F)p?u_&-#KMnD(;5|g#pl4G^lV+j96*n-Do?suEk!swh`|$VJSnPYiEYZ z9%2ZGeIE+fQ3Q3pBd4;*|0k?$@89z`|G)Zw-`VO{ULUu9hW0*N`m?d<-}AlL^i$aR za_Zbaf3W@f-iAYSv{9T<nQ~BcbWXXSIEO7dJ0T%T);%7S%*9(7COb<bfTsl#Ry~*q z6ANK6V7%0$q232Qrp`qD>%u%2gnVNj(V3)JT(jbFW`)(~p5`Y14B+SNezN2LjMLg9 z%VBUU(;zOhas4Hp9kN>oa!$jgW3Vuqp7`M?o@%-uchbw7&irA#3yfc>Oaa`>2%O=` z6l1R|M7zpN)Rszs*poJ8F`*^vsUErQ5c*ln|JqcNAy=ma=3`JVC+tNxQwcRla3*3v zVluJu$2gM>u@YLI^YBz6Z)tYi(xDBtoPWi&WKX$mWA7p+<yA&0p8{Lg6TEWGJd75V z_W#8Ho;=$oZ*MqPr5X&A^(5dNZa%M}^TM2Y#8g|1${ZRMTMZ*gn8fQj)Q?d3Nd78O zWT?~H@@0`pE%unEm?yCCu!zarMn6UysJ(l@4*v`ERqfrc5jDR*kEyE0*uDNPA&WBs zNwmxfpLr0bbnDNt4LZIXG95pl98P#xgM&92H!EgRe2;T1?Nk?N6{0~1KBF3BLYJn- zTGUjm_m=!Q?4JL15d5#_+m>h?BM>wFP4#a$n%iJY+LDdh3oq&wKe3EpXew_+(+T=b zXko}ym;2TNPNSEFlU(f(=td5d+~9`VLkRD@n}$$!TK)Qsy#Dh}s2B3eJTjQf`N+iw ziChoq(015oID11ik#^Y9t__yPspudyG_)MrLI<7P(LEPj(PhgW6f{M;W7{}vXcLXf zGgmu`0dj{^9mMKI@3*!5tW%zxme6j86#Jb^iQ+TM2SMbH;N-G}AmIM+!76!Luv`@{ z#IqH^BU&iUVnwh`iNL&d+8aQ-P;yG!q)#b*p<YwR$uS&cyVJIh|9k6a_2?T+COzP> zP3z%K`V3c;wl$+g61g|*m9ZN6IReI+*4%NL7^*WR+mHcy1*`}_Go>k>eyyf9bcyth z>{-KWZh)PSSw(oTPe$LLlXd=$?suZ1W$8q1oiYAD_cD>W79%Jwy2CnJj0*`KwG+pK z_#wL@Dk_j4r6SF(G0|O0>~X;`dE8=MJ!{%pV3eZ2c~_DkVQ^T3U(hRiJU$`P_5J<s zN65lF=V_KIC+7>Oujggn;xJ)Baz&N~Zk!zeo`D3j|DdS;ZgOW;*${+O2-E0m8KS_E z>Rb>rzd6-nj60_P4nBGnpuG-Uk647U>B~Gk7;A^0yMuApLr^a?Xe%lnvR32*Tx||% zS7j-{ZZpCVKj8xQjaVILz|$zvii6MrnPUw2>JJTO+%YeF=)lP-Gkk^xv|3FnL?CM> zB~!K@m*Dv+?0uw4bE}&zCoYWY9;C809N$We!UqCd8e|>N$tqR=R%1AV4}uq|N0HK| z7;(tQy->c`Abw%L{F3h+`$`NkNK-B$3MwzkT&i~`@v$d`obo-~zux&8hOT`Aa{}R! z5+NbB$gwE9e9K*FdbrXc+oS}#i3r?a!B5D`z7y28C|PWiJJz(2rWrsPVXB8SwIQ*P z5`39Nm5QHyOf5^}7w+K?r_y}*v{(}~3=J^toN=YelS8q=dWJSCnUBGOBt<~L^4lPm zDv>p<iZi03q|vjug5~Pj7FEGHGvR_9e_8XDHp>+fK<HF*hKu)b!_-{63+oUaX^x19 z!7Lo2cJ<v@3~R`^rmura2jeQr0{L)zi^q`<#>&&>Zx<(4T)LA)z(aXnG^}efTz&`Z zgVkb`{D4I@sl76}?^h9+h{PGXcvj)^s>a*rS4}Hy2Z2Irgdc>8-F)UASmi-#<X+1R zTtAU(49WZ;&2fgjf77LQsT0=(q~ygfAXqe5sb`nTpg}7l61F}Q%+$`dASjR?_>lf? zP6K_DbIbW}@o{5GPDHGi+L0YW%9IsYW*cNnui?YAPj`Ue{yQfp`<t4s@2%g(uR5gz zqc)}JQ=@cMFedq8U#4HMZ7Pk~K>$@HS9K=vp?rO3Sc6-p)nwCJ2ppY~rkYkBc#QV8 zA}Eraqut=SLjeZOl$>LSFQI<~zSsW}Voha8>WD=-7l{P#H7Hli<p91SrpBeWCfLeG z-*?#ahl|->@ih6HiQ@+Dk8qPJO(EI@W)(7yHl+az{HTi43Dj{~k*+na{A^H%BWBnJ zK=Vj*f&Z!198Q4UtXZ-u$Ba;>zVeADmQfg!oOZ!V4M4mHBh1$Qq;Hjd>RIXdmsa}N zPGHRSZovAVC}opdI+n<=XFWu9dx<S!x%SF>E?N7M0W6r<q+$xHR3^suGqdiWA9sK& zitfC4nTInKaMu28rpt=SCpv8K;7pG4(UVI0E=*7aclzF+K;K(roUF{ufS!S=h3~Iv z{G6)OYPq>lu*q!%nDsmq5SOVtvs5LxErE-Q+}-l2o8DJG%6k6(x3TzeRihp~zD&RS z<@srHzOQM0KG}blw`ezz>hk|^@NXX7mH(mY+jXdwKVhEg(^=P#>Obw4uHS4k%4d`t zwpj-QE=)?YYl{+e>wjIt>pgOs^CO}P*|;9sEQGId=p$ibVLhF-V2y;o^j<D!^ilJz zd&u@Lov%rQ4@7?P$TsTVvv%$S-xyM;<}fr7-Q<R16l&icSxl&K7yl8*RT&98eWG`N zU1V#IB*eHeHKpHpHT=ANGS8f#H2^=JGb{zl8fQWIrZ0bu+x21?qL147;H-;e2oTF< zmH*?!1{Tkk6<!|^PtERrUBhP)u-O`z-?O;F$ob5Reo%+*5aD}&=C9rBRzEk1gK>Or z#fVweFPuTzH8z_|;?dd%B$BNUI*-5n_x1DAJ44#zF|_l&sOR13_H_E;H2OlG`wx@O z?AP7_oM}feX?w`OIrgo0u7A@><_!m`A4KgC^+rSVX*|PE8@=~)PbY`ks(S*MIA1jm z(I+>0xW~zke;3u-BjtaV{kxxr;X`cW>JiqaH(ASr4wta`(|RQaqt?GktV^N)jt(HI zQ%*r=SSLt{#uis7`4`5YvLfXs*L5x#VkfqYF@k3+`#W4W60V)7$p}OQJ5C!=`2}W3 z#}k9KVZhA$y=Nb>^DEtZT8=71YCS-d`*Zl_^j^(djkbUrnsc5(P;-<JdDKiuQ0ti9 z5&ih%wMvHORk%nCaukx+Ed*k5IF8X@KEqGF@-d$C^dvt1J^wIxC;3yWhAjUt2u#?H zt;-IM^Vh7)m&JjB%XtmA|5@P<mS+Lo+B7aE{c$410NM8+lXI&HA~<I}&fhDl66TAk zda3dS^I6*^tZkSHCC5k8!DK^;z#1lZ>@i|HbaRiiph5EnCRxhK*Pvp@p}zw9Jr5BH zZV9A9h1ipZ)8~;HTwx=H+~TPO9G6ovVuKb%MSkUpLaeT;#Z@pCl6-me#`H_ax!-RQ zfMy#_Msj@F+`qr~t)9ugn@}vWz~^ybd}m#cmibPSS&j8v3e2Ii28k&pGP4^GrE5x> zKwcGM=oD3|A?dKn;Z6Z!F(7Wr!e<`ow_-!mX|b)B6q$W6gqs1to8ZHu?fLd~Ov%Y) zJp%I%XiuKcmj9#?w!D#G%{Ixqu@NFUF->j&JU5ZeQW#~>MY`t~rrl#jD$>R4IoBc7 zEZ(i+_P#69beMc&fa4_JgKJbTRd#IaVS=r&--A1}^kUEvKNBBCgtz0#C{J8#mUGAn zkFCBrn_GCvu0{DQMlsY~`FyP?$-ymQSQ0KBleu}CHdW=EOErxj;roJ2_u(jl?}Jsc zE)@)qs)aS0`t(m-C|JO-I2Fr-6QN0g+y6u1N=i*BGI?*;I|z)Z&FRWoj~WDeiqQ!r zB}^F*@NlrfNE1U}?`X}ieokayJ&|mNG^ArvY-BbG=|Xjmp1N*t_*`nj2oChusQO5> ztCJcp>^~c2_rnS6u8ptf0YOD-Ff!@ZM6|PBF+IR`@DrJgk}sX`sx>Y>&DT3xkfOBf z=jLHBNm9(wGv}0mJ6e={Zy@ZA48<|}Tyi2H>Hn6%)yc>dqJzvQw!Uh8eXZlq=iA@y zo|g!qq!psNL@5+P*DmJ*yR+-yPZuakT0qc7wySWQ1~>#!Fiec+<rU87&}0aK9~L>L za2pQCP9qWi9e?-Cw>gJJx=p~!f4!&BLXwNA8jw`{)%91Sj3HLBR?EWb62k4x3t@0+ zQ7kK^7(gfImrx=_^<6Q-$iMSPD9-uZDU1&RR63s!vDy8!SROcmHuB1n+=04;<7yXb z>t%we_kJAx)n5i>1sVmAZ;gYGrf6clkB&OaFZKRQ^ysa3A?YEcuBz!_gsYLipt6Tl z-<%aKD*B(CbRkzFXoPe|CAsw@c6<%HKjdX~_B8orS`mR>p=^!5fKJmbvM^&mw>)6) zkt1iWS&qTD;fg}s<TJw`4qp1y?~bu2c?Wu##j&~3IT}j4gJ-Oxbj71!W~^){tk}Zq z>WN_sX+?a!B(ZZcpH0CfMC=20)v;CP#tbm#_p9WocI&6iL@4B=#E(02x43<e9I%mn zKRx}TR7WIwwTECJ$PPT|07<kCJF3c*D*nX;EsjZ}f{@+}K*42A<HjT;c%mCvWvc)l zh}5>nq=ekI24hA6d0n+&!!5Ehw+Dw2=&D)Zy0wyu9$WboLCd&PnPbd8QQued7k-7U z(70Ph5nq4+)UI1jGapqv+_|~WFqtk+slpgI5O@b^hO+y!;#}}M5r}7OvWoH|7SrW~ z)0^F9pO7Xo^PDHkus1$qXv34Gd?qXK=Tz<JYv6{f6**o_j~YjQDYBdrGF52WPHt?E z{is?(0vDZoKW}VwNjrjYD`m4~13Q;8X~?M@TBg#A<+Dt`@ta1vP?jZR5-GOg>B5NB znrv@;v*fe0>EHRwCS4MYr2L*-RT@lEopniniUgrD{S_tic&J@fT}c?t4u=1fYsc;S zDCD`CFgb~n1C8LFbUe6&VoGt4CL{`)BahqbCdio{zsVk1qCtdVB){~^Glg%cJLFBd zA+_Bp=<$B@bbQ?3$MYQ?AyNhoRy#@L(h>PYrji8Cty(Ea%Xw)-iDMzaaIG4B;C=8I z=4I~I1twr7jY06cQQKzGW@qs>P;bWA45vrf+W(%<hqstSUN24hyQP5*7((H++3y)_ zQA-XJ<n?@n0#fGT>hBRv${>J~(D-srYHUk%@`c3>6PCwMysDp(m5A`1ZK1H3N3-by zF6ry2mh2N3rN@oBg$M^_64LL-*R;cnDTCTFR%Hoa<~#t;mdaFRDE;r#<gZf!@^R)P z2uSIEZ-)N3+a`{M0KgQIwen_#F`+{UNc0}Ec+LO^j<8NtR0|cNpPz5F;bi*~)Iqq5 ztbz8JG|m(pzUXAN>6N*#iYq&sAm$MJ8werj=4{0xz$)R$UPfN34A`zF;P(DFgi~*l zpCX5E;{1t8^EveF=Guzg1_t<yN8(~U76sN9R<dwnxMmgkWB1wogFj5=ooMX};DimK z6UG<&+Y}SKfB~y*$P<314Lu=^!7eAuQVLQ4E=4j(Ob!cIYf~UNeZQ4Cu8xPL@&k|h zzBwIyJ^!}%vxc5a%*a;FRTWx00BTlsM50D4^^PMqU|k}Qt=Jr)CG#4{s=E*ZR$NQ2 z4q|+j1j!G)0!3SO>!8bOeR>}T2Tvcq+8r2(TGn@s0uBT9Qg89aAu7jD@EpFk_uhyn zo=qj5pfKZ)K^#$SFYfAkf)ig9E*Ex(SR=Q9C{&}ii{Mcj#;$VQe5ciVPJY05<r^D~ zK3%}OnJ3JMIkOGV#|L~{JTpqqF6QpbEc&?p7lwGSq#i<+oB*&EqB&sOvb|)uI-V>O zuU`Roo~&yr0qj&bgc@ON-Q4Aja2gY;&a%l?D^=hmj(EaAMZDDV_%Q+1@D+r@!bYt+ za!rfL#@j4<I+#$gQB4;1KSfA$>A`OVoD#?HP9Lw^!wea`zVC^=XV&NGzB<Zq0E`D2 zXSRPVkMjrU+<!Q9WV`xu@VcR1aq<Fp&Q4tLKe}=K`e;t`AFr2<XYXuuNop5=j2)%d z0<atY7#5c)U2bRsvS6wxxjZ>2e6L^AzLLS@S7|1BaO7}ORq<!1hkze6U5&Dss55G^ zz+#7%!Of_0>d<ozH{64tU&=Gdm^wrjxfr;H@Dqr2EDN4d=Fh1V9%@!!T+|f31v?R& zA`<U6Y_>&+Rr@P<;PpNW$Ym|#T6R>13V0kV4CN>csd5#sQp*;Bd6HFfA(%B__G{8x zNDKH#k)5e+Pn+Z;ItA-UYl+A$y;N<7Z}MHwGVoG@d<i(@8dOkO2MQ&a#O6Ug=Wehv z34>r=K2fxli&@(S^Yf49Vws5o>A3|hOi(TJEv91nCA>0i((G`2j5vkDOKwWsIZs<w zddMY}oJgmng^EZiH}MWD7{P>>SzGwUzj0Z=<=k&`hJz_nvUuBFY*OHiK6+C_80E<Q zfe|IszcU6wK;GeVYO;<Z5fPG>yc5&Irv>Di<q*y$zMSV5PbSS@?Dx3oFPX8gYPn=0 zS6eb+l`G!$<Wv$ty-*SIf;<foIF0eGb5vTwg5te4F&9Kq+p3#TnF?dTmi3YV2kl#( zO0xwQmIT7iF@eBaoxKu1EC?!#y!~qd{nt82;|x(|jqFFl3$<Lp!|>shWg91jFOt%! zD3J9@8L!zUiUs0)U8s;mFW+r36;#J-;Bs)tZ}AsWg%E7o^s*;XK@MZ=;?_KX>$vaH zmw21A?(@&P(rLk@j;)GHLhkM%a#gW0G>CzFz_|L0!d&37vkkvH8X5*EAd83{jk%@7 zaJwX;3M!AHt{Gs2jAPUuCGhhtMVZKjjALznsnsBSaa5>w$}>a(zD1MZ#0kCwp2d4g zK0Uxdtbftum6m8F#HpsUqSUxOle63Mw_W+sVt1A`pt_hAQUY`uO)VhK1~yn4uXw5* zTPrf<51`q0R8_h0dXiNK!F%M_^_%9$w-G>fW0r#v>3mvIlDT$1x3Yj8<CX|jeP^2& z-@RZqfmnu9y8O*GtcC@+1wd#$Y!WP6f<|4?MU;c-1xXWZ3kC5*JJI>yvW<>r-<&}* zLWW|8M?5w4q-z_My<&Va5Me#Wa%d{hp9E=|+=4-GfJ&=bZ=6kQSdWdD)?sAJ4-<)g zNP*y{Tvp}8z78aVCe;G_kzUUujl}Ep!S25|YtCVDY~nQ<m^Rb9+L&5*;`Y2XApb;i z5vDRAW0k-6LMk|??dwxD8~c_-npn4fyxBIggo(ay61tcxf2a6|UttG}MjzwxPUN^_ zQIB0Jxb}J?cD7DIS_15ihht0))+2nnHkOEx8y|<*_-Y>r;Q8)$iN%IZ$dFga8MB0u zbTKy+sc)@!2x996%b(%vc_q7pEeiRRvlhv7StJ$d;Ot3y9-^FM8q-Njsb&bUy;ZA9 z%vEJpL1r}0`RItKVsGp&+CA??35GhUnFR3W-<+DJu7@kBg-Si1zGiiG^{$Bp4O<b9 zs=_21Em<R>J0>16Ncuqf<3XxK5nO0l<Ofn0jc(8c3DpH#4V!Q@FN-v=Z`t%yuX)ZX zOtNSdDE4lsACH}hDXo2=6~tyeY#A`(<%=bb|8mDqGKSGT@k9%XU7&y%N?`+)1u^HS z1282qK+67@jRJ~kn9!I@qeY*S?Z-641P*Jdr#(BcGgeXnD|k8*G<5DU?`g(}*Pxht zS19F{WvG<_s2G8mhwk1I0?FUXViu307Bpw68uyz8EwWEgO$U&oRlaFy)0!7l3`tpQ zp@NLz^qpyz^a~VAIhN6Sc){QYCOT}vCcMV&c@<<{1n!BBK^8hLNl75xF*ytLU;w8$ zYhkbst>?|)zr@VGo&ReodbbdL70hDCQVd#*pwiYAfpo#De&-t%ZU&hfz;Q(OG#+(G zeTuCOnZHhPob|T&XS(~=f<xl^3eIesV{|c!qA~+R&6uo}6%08SyjA<MWpRXQds@b& zJGhe^l}Wa)_z|^uDKeSS<ms9^%d_y{nj!Td>lLoOuBDaMdT3<^nyD{cWY*X@Q}9yH zcf%fcDT1k(C7#_&70&5TBokYsdvT4Ns};UeI6?+=D52(pM=p%|dkwON#GwT7QpFAV zmF<?vEZ`z-d%L_>Gj<I+Q2|M^645*SfC9ucBGp{Cp8%ffR+&L>*mEt3oR|vK^ud)@ z^c$$M?0s@XWfImMfl;0}9fb~gL4R7!>V!*GKfjVV<vU9VQ+tOj@9TlR0b-#TCvjB> z=NoAHAnwKUOi!=dOeAhm@AG^%p04-hr*wS}-@AZ=MVS$3l|;^F);?iE#ZuwLL~U=p zq=X=;)Lt1v{T<pCC`6W^4r5DiMWWfLZ-A?P6a{EJf&_f~@S+nyRWHQC+`~B6u5zDE z0AEfqA|>n|mR+qr{MN_E=j6b__o}chm0%JLj3n}4hoUy<`sjgS>@bpt*Yc8ko38bq zWe>xonoMiUN{zo%Uc8_ELpIGvB5=c&J9!feif^vq&%@Lm{J*#DdB5l9-)D8cEWiKf zqgns8-~U<j-}Iexmz;Y>{ZYRD_};Srr~X9$A@A___>}|xi^}rz-oIJ?uW<SwyYBV= zkM1|TG5Oc$r;D-IuT}GYDE#B~3s`@)zU>R_0)3&k@2mIM`%S+W>yKO?R6ZupA&>Ak zp1;;#R6Y3@Sb1swivLd7_kR8>MDm}JlK<CAuX6eyC^IKZ@&AFt`VV5~KVW5M2G;+{ zEB^-$^k3+n|4uOd|5;=({bEV~!y@CC$ogOL^8YK{^S|Rf|3Bs!e|teybT+$wqYCch z9p^e;b9}qFgSbK5A?zS;Zf@X0(SF}IgR~w2AppOz$RnT;*5tW2pQ@@t$qK3qy`N7v zyTP2N6)II`wOXxf%zN*Cds)u+)AZygB{?a{PD*l9lAM(PY0=-~_vmMO^>hB@G~b%} zLXAw@dvqcz%&Dqw^uUc2<&`AutxWyFY+ZM<qBm`iL1~L=f1Z1dt=-+}=KuUMcRfCj z&+q$ve6rns%kTH`^f)=lL+QWS)Zj(kS}^*D_C2T$KN%=UXem)<DN$r9Qe-Ps22HT? zT(tahx(ao|9>sBxNpFo$bdywgy?lYDV~MY7j<%7MpqaYJfuQlw%kA64bX~`GPiZH0 zbb8o2czOF#JN`AV|L0=4T;J~<Q&)aU@;yEEGyWxNYMTb9=*{F>;{6$tb0v~v1+v3c z?jAE2skJQ06?Fkpp)r<S@RYEKKJ>(;Oa<<QT~@_)epOF1Xv@X6_4$KqCGE=k4o4Xq zERC_X&i_Jb&&YLNNcEM6#SUK&JFmal%lvk;+MQhdr{rtW)bCl%MXIz3FDXkuK0+}q z<A#W_m<F(&thl5$q%FS7HpbY((&s+)VVaqC_^Or|J670Ql@~hHmU$EQXi|0kXsT{B z1!g1-ZY)hN^z2Sb>^k;O*<{g6W>npK*Uw2=VQbsL^GhPEN5~dW3z#lILpYtnayP{M zXIUG%{WXu@%g1v4p?}do@{@S_`1~G;8S6CGJ91<;LL*f&vjkET)iP9;l(naZ`#CAf z3*c0{zCOLh<_rzi!`v-1G>x*q8B~UN!}bVsrN}d7m=iU412u>f6=(w$m*WMO<0Yr_ zHFy(Mm?K4a1C_Wl)!0-f7=)IX<)>MOfAymx?4hP_rK>S%uQF=!bpj6n-onM-(DwVw z+zb2sE4^)u?H(7W*>B}C_$mEuG&lYB5i|tIubjfv;}eu0VdCMEOJehEU2Nt|0qRX% zn9Du4$a{sindN6_!gQ(ca;oTSo9K(R2-4+33KWYvM9Wfi1B0wXiHej~o8fe=`&YP3 zU$I2jr8LWvqTAjN(J;Tl?s2pXXD$WLRKCttzcpBaMP-Xw|1@0_SqndYG+~ofd7}sV zV1N!Fvg+w}g5X_TPTdXl_M+R%Pw-RzGuib1sj73H0m>964u8{|$oVP)?k|gv6c_U_ z)Al^Gk$X%Lcd7;>QeGj6QsxF($(E&78K4mS!z4Xesx#UvhN)P&uR>#-PH&k@YnVxA zmPTZiG*XToSNX+t;KOA&BzA9PHi>jviC)W8di`*m8>Nbns?`_E^)rjr7aJ8(6}`r} zV9nu5WFWG?0EfsBlh78&{zMBkb2(FkU4NbhTn#)04-Ck2k>A(qWPUq(^-6wrQ}Sho z{rK0_N3lIa5u=hONMT7*g`b0<^g@Y#%BFlRZc}6>GJAZ9f{^m0cY`H}l%_dlr)st5 zGDT;KUd!z6B7~XRdwHTmNt%0cva_ke!^!#s;v6gj6HJ!{^4CHty)`0*5kCCvFl<n$ z3-AvX-$IxipdjhBh}&YE%AcsipkHYPlHS$dAz2jU9NDIytfx?2W>4%B9V5)>0w}f$ zKqYGLc5T2Ju2S8Qbr%#kv}U>R6*h>vAspcEaJqI>UN1LyB*|%Jd7hitE?uH3*i`XZ z#Yr-T_Tm;}o9a3jTm@`QlLd(4ga<?Am&Dd-wfnp;Z}RuSqeb`vmB-RVM<az-1Qw`O zCrOuxddPXZ`Km6pWw+uor{*@d?l#-9y9+>mYI~gCRo*p*rd?Ot0xu(Yy_$;*3ag$x z?FFQSO$!W#Vq*mA{9}2tJ4vsB5b4)p-(bA#nAR+*;8?5fWEtYi`jdT{6D@x|x=it@ zP)*3`Dzu8IsYhu`@O|*C*u>ue^YZQCoBftACuhO`_TlCGb8xY<=3#E-a2nGzcN2AI zGnLnK)%RMR%sj0vuh%E`teOAc*Yv<~n6OD}fn!O7V~DPXspK}wLf$e9q}G%<f*j$k zVD&MvO;Sxrh0<dUDzl`K!d*$S`;me;m4;}ih$;$Oe2d<%_f?)0_0@e;9fPB^Dg$)- z0zA?hEXo4p{^|qz0yGi}1S$grDtl~Fv#f%P#F~H!#wY4ln7Stki^#F-$x1wnPUcni zMh8<T;0|H4*#dE!)+Sw^>}v8?5pHA$>)m|eA+d2b@J8{-Ub)~{VT3$?ni`^H{Ux<! zt_7kNZk~T>$2XY;Uq~0h&$?Ngem|!p_rI`{*{k+I#7>|r2`WyI$`<O-9PH8_?%1St zPE?ZOEc$FSfCMsQRO`Q~sHw|aPNtV@!8^hD{&!EVMw<aa%2^qnHKGcds;@TdBW_~X z8NYVG1&%14Io!QT^(C@-Y5K`M#)Rc!0IfCD<z4M{+FCY_@}Cn@XV0M{J!rh$7J8)W zOpevJAv1mGET3{44>2LQktM{SZ#bay&Qle<hO2jHYp^Ga(A@SIgeFM7gG_R3e42}# zy2HHUd*urx4KrkiN$O708hZA6%Wo$Kt&^?c&fDC&#nTK>8E?lB7vM?~oCpya%Mx5r zSK>kw0CV_MFzi8Lko&C_D7_)F#xcNC!OmU;=K!<dC-pZyoXtMitJU&T{nDMjXMq(t zvlovaA?Uc1rMeWy2mm~(LEx=P<P7sAH{dhQ=OB(3V9r;ecjw_y7~&HfWE!BT<tfiG zbGA=%HeKXw)r3tlz*5WBVcq0yk6Gn5#8(1$lPA0tC%uDX72d0n*`*7Uox!5%OwkY$ z)>im>mc2|T_?jg3*OH_rA_J9j1C@?p;<w;_#0B3iA-stt#GNtFr8q%afP)3cQ#F5w z3QuPWFGs2mM#>K$QuUWJwRbYrkq1i;=BiI->(L2}aH!0&%dawx5p^!G_Ha^maMjsV z{;&ZLUL5B3&Wg{ApW2_T=`%minyNn%p|lbpH5w>BB?avWvsr<+Rd}rqGKdsMfD8j4 zYlrGaQFf|r@`4|yx6H1%$@O|YpN6FQ2S5BBr8cyb-oO4U8m^3v>(^o7FEw<Ul5s+* z7_Cfn${&%Di`Gn%(m<H%RFL3Ys(Np<4tdN9fy@+%#1@Ul7@gSmTfk=(+$IG^*i~HS zg4nu7*nqABV4JlF_DODaev6iP{XSh0`Z<oK-V&SIA`=dZG7*uz_*kvr2#NSejl@8u zXVm<8O$F(@C^i)*xtgjvp{=>4FgdETIW4xjuXji!^3b2v*{Ru`t>NXZo#C-zV7$U1 z>Qg!h<=^m2ZgN{|aaLw;P-AXO-%sd|IinA~q%y(FxJI-e(A`PfNmb(5U~2WV8=x=h z*E~2G+c6XuD=zXz%5LOnO(kirNKA5yUu#>CR-qI^!%WHg7%5jNjj?P{bx|}}6_;7{ zR~i(SUu?U+-`Cy9^=Ef<k~S?u?*rdR?H{e(V{gELCLF8Keu7|Ff?`^NW?q79W`b^J zf+*m#{u4C_j#c^FQzS`^WeN|6OR(sSap`r^F>X?-?oum{vr8}Z8_ZC~D$sF`6l#tX z#|p58i7rY_vGZkpJl#(0yj{wY1Nq5n*fj#<6jDR=_kg{R+EAR@qNc{W>gv$u!s6`E z?DX9B_$dDXF$)YP<~2StK1Mo5P6kp|4q93gVj>hPvXzGw*%!JVjGP2qg!Dsf+-qFK z3q-5~ER5sRbMx~Ple6IYHm9Z51|{aU&Wdx7!FjC>zB#fMqCS_#dOu5#gZ=ejvo<&V zc-n2)S&J?MBr#Gu8CoMpJ!ZwBS}E>AQJhVOL8@%=zAWJ_r5z?bA3h5{j-TU|iq~f9 zb$7h{#QP_jBd0YGTk37d?3_a-@}jh&WA$_e*LL?9Nx8b(y6Y=!%=7bJ$I3#ovcfHX zlBFFwS^dorg^9#S6@+eZfNrS3#9K}gcD-LjiEUAma~2hm@G!6TUcJt2ZG!Ho^cW9O zTLm`JR6qw59;i8nEQDq%B*%(;E_;aHaLpx=VZ=g{s76;O_uE@Xc*uvyD9~`CW#&ES zJ7p-T=1J=NYI>B_)o+3e6_$3DmR7aac9r&amDUzjwl;N^))ZFOzOF^Gu3oB|PFjjS zN_HyrHxUQnC^^*}G0PGu0ftvXC-u<4n%?M$meRc1)`s*pD`4gyr46nf=0?`W7leuY zT@Q?;O(H7<5dt$wDl^&&T&S%2i|vw^(X-|1cgjwa4^tN*5A8)(?A+~DH`7ayynai> zM9|aJ`KD?FCW-0`FFTu^(LF!!S5wzp9|5Q0M&%(;Q*$akS|Bpb4WPn;MNUz7#AXW9 zbr{=v9o<_U-#sA{D`~qOg!(9Mz;rag^ixYc-@8kbra2-n2wv}y*zj^|fdY3C6auW= z%w#nksHZe`wspN5i|ebaJ~!L_vDj+lqxJG~F|#uJKRKO_kEgMlo7m0lit=`IF}pdK zd3jh_85li|#jhXFae;xSd3tX)n#Rh4vbJWHqDp>*Hf$V>sF;}15OD5y_w7!P3{CC| z%#B?Y=Je*6M#xHeYP?HqJ$zj*_!Id$z4(dSbQS0xQWGgcf3(Kfz#G<|ELEIpO;q8D zl3mjqqyj2%j6yyFPbzxyVwP@@{eJRhwgW5)jS|#W1omwsvLad=dX;y&HCCGy_&b<7 z9Qu82rm0GzRA!=L<nu$6VcUT3k|i~uutKW|n4W#2pT479eW6Tk4=+q|<Rdp1C^{vu zy0N$kaT56Q3={V?egm9;b1|eZv97qdxWVh;;$i`Jjh&m19-Wh+A3x(aE@@67Sq4;A z{LfAc=mqG{vGMG#u7H0E!B5oINYGKl&5s8lf?^^ZAz@mdA6T8;7FpWU7TePpMYTZD zOxI&|^ZUD>&)<vB?Ca)iHbc{vrikaDupX^ID!f(#<^!qB(!{8a#_Eu3u5~docAPJ! zl$16Nye#J>`Oa>~dtz8Mu(bHDM^j~aX8C-+IQo2kbv__#<m;u?=Y*KqP6u66g5Yet zG`Z4n`F)QXLuCSFat0=8Iu>9xQDp*SI+xNMq2_%322;-%)mIW9)A$ng3M&Hz0T~+^ zF?DeZPymQ2SY#g`D`bwpn7bXEuK9SJ`naO&P4z#&3(5uGy0~~@^{U%*AomnyMbJ}n zCfaF!Hbg`u!|*5Y`C@ZhYIB29gE)~9l{Ou1J^#DEySvrDF0`p_aZy-dKJ+F+gm%t@ zJgT^x6*oH$gIwWgsjK&Ct}@PX_H-7RaWhwtl9oOVk53>a{fdg~k3?mO4)-zR%cswu z_~2%Cs;_hcPwT(k7qPd4U44TkOKDJgf?`pTGh}uaemU0?ccq=hPh71%TVntrVL2CJ z@|Px@YgTldkyv3DpXgd*D_i2T+~O)>rYdP{dN}#`*r^@fK2GiS#$ev(5Zg!p<DO7l zDRBX-zducY`T%jDrq$oepg6EoemZV=o_KeWm}LKk)^79;U(esxF&iL0qv<q|lmH*Z z-{hywTZT0{>P(YRna#AM#~87qH%c6~-VUX-hdkad!%@J~Zdpa@#MQ)SCw4OyIqipz zl&1_JMpjswzYa`Zzt<L9uiFKb^|1$&H+6@vDo>I)KnoD^sISHsFEk~%Mpfmi_5G@O zn~RRJB?=Fv$!<@QFyq*klf)%r6D0z}BmxxX?Kw!`v6lGChUilFD67dDyiRs*rfx=_ z)+hF9{X7m=`|ds#R9^?-DZOwqavB&I0W|mwa1&mBHpF6`lVD|PM|plrPH<0Wa)_Lz z+t=r0aHlstUa#jGOINWeo}bi6j>-sdquO(|+G|a6^Hg!dGf(+>^_4o3wioQ6A<^7! zJarN|=LgNy(KI+uzrFn**qXb0UauXl7U*pkJWr0P6)8!$S;TK|(jW*ETNAA;^1kvh zJW@m__-%kC3Xg=z4h0%gl31PE9j-1CB5qNWhM5$mj_ifD=&4KRYEL__)13i!&cTOp zum=AWFF#K+FF*4SEzIAQwf{)D!teLE{Fd@pwZ;~iMYs$jE%y!+=Y9jxBKqS>V|xs1 zE5n1#CBD{%-`~+29c7ECn7Yz5H!@^aB<4s}0XdV7qC>5@qOhE&y5scQphj(*&wNK! zW!2{+JhxM%y&R~6iNW{(ynKfTL%|<who~h_dqQB0T!FPYRJ|)sa&(TX(eHT%MbwzA zLzyVt^HE=qRYWnkI5@sMLPto;%uv_Z*kNYop5yl(oFht)*9(h~L$-m}^hr8&O8X=$ z>m=K2c{@HX#AY`yOK<efa`2J=yK)G)XEyEtj}G<)S;m;?|FfsR`3FjkpNyKFfNqkO zWN2-Beq>Q|bVFyFuc5-z=j0}NHhdnR#~WFBL6Y#uCu@GcN30;qyf(?Zm8j?;CkY;d z$246g!fLwvboN_{?37(T;^Qj)Vly>7-mQOf$>-zg;_`2<Z)p96b_+~xq4GUyyTm%A zg_)Wd6J@9Pu`Zr|XZ>yJY%RJ>{h_lM7Gc4)(P_{fOmvLQGzerU0_veMfQ^@@<I&H{ z^CV^b016tSFp-y~5uY#QvO{)+^v$I0jb!b?J8vzpv2wKYa<uc&znIu}u9EMM+RplX zpSBO+;^J7{ozV2lB<QGNWu}D0#<*Wt)-Es0ERCv6&Jdxxo=2nq9}37GcjK?_`3CnB z#&hLQ@DgpO5*0y>)&fW^N+&YNU(HqHH9F60bv;{zRPpZ)0{dYDdt1A@Lg>l@Nhu^! zI}%}yx3a=i*Q4Ewnm!tKZjbR*MAw@{?WsgjKv%l%>G3T-Qi_(QZ`H-a!QO@^WOTK1 z^n3hy8Trc57eOmd;Cu6N;2!!AY+H1lXo2g0id(4)oL(mrxGVf|+d7NG!D>*udoN}; z=K=#~41zzfRq$bg=t~W6?~BciN({A@kQ$%(IK1ZlO)Kb6mK{Dr=_f3v5t^)(+HD1z zI9q`#PIygdlLkyq)^L+WPi%BOI~Ka$t0-uit!Od9hk8S631@$y=PW;@HcTzO(Um5? z@>(Qm>9a3uGuK+@Rv(~9lGzOwp52^)JV3+wReoR(yPO`+W3$;AAqNKvE>4bq*EeJa z7pLhY6vm<xMFK<y4ucdUgk`(@)xWxzyWX$D+XKl+TgJni0Z!BwDtC8t-u&kSbq}QH z5uE31UHA6!Dg*u}izzEu4hmRVQrB2Z(Ne^!Ox&Y|muX;SQ+0Udx012+|7^vUdSAik zjiS0cTYl!Fv6v+|D+{PhcBKpx$J?QV_LRryIvJffi1_7~+8<nlcat-G>t^I@`2LR5 z0vcITcOJL&-c_J>nO;D#c7OMPmn9eJS9PYg{811zfx;F~n(CCq<l6M|dJ`E9@~*eC z1a=}rM{ijAfqlPvZ7V1{R)dnzL|~dsgu<fQA|*)D0&4aacJg|}4i77{Gh%#y{Fm}R zm`N-I&F8Y3S6G<UHr<)Zx|qr7#|TLmSeRy~r**0Z_JU`rx}F7&9&TnYdh7~;1(J`- zW~}%GQkm{b6)G-xWRlYEJ{v{dkC)-7InVlizZ;F5_wzA8Z*yy47qRH!T6*%4RcMcx zN~APbDKgs}c35--dX#Vx8%~)TytV5Y^g&o&qFV}soBQQ62xF3}+V=Fa+rb$u1&K!t zvC~c@y?r&^zSbONwK*CyaFz_6UBy}ccJ>r<tQ$#-TXTyWo@OzaKb+OY$<+A1?Zkb4 z%<rP@@aCzSj^qRtgse1hw&C${zP*9XK{V|2dqaKf%G(~Sggt*9nrpOtp5~V9R*CRh z114zR90Pt=Z<dz4(`<#ia&(6I6Qr`=W7G4c46v7`&e7rIAy(F*{E4dXcj$xAl9%j! zw)y}lovh1lgQG!ahvh1^rMbJYxQh+xN7mldWo!W8iwWV(XmP_Kc=AI^Zqe{-V&~*I zH_05A1<pE{A-FqAR6<l_!SFjrK3Y<kpFn~5IVOB?Bz=TGweoA;FOFSUI5zP$cQ=Fu zX%iicq{J&sWYg>GQe*Ra(d(m(#bg!sj3ss}G(}j-<EtMpy`ewTB0NzbIZ+uZL4!!_ zQxx!YI||9PcNFnDf>ih4d{{2e_a<X|JzAUTVk8`fP+Ug&E>L4b#aBa}pNhM`!M1Kg zOdpvonx7y*na#mTm>B3N8IpG8T@QovKHaP88sHJ>_%1f?I{hKBkpjuNd|+$z)8thg zRX>udS9OblmEYqzzx$U?XK-zQeL3Qx>@M$UD=a1GD4|4U0Iyh^*IFBwo35?nD`P0{ zVHaNH3sf4z4A>u7AlTBPTv_4^nBXtB881w}LDj=X)2~0pgC*94-1Sq0UT$~2_C#wG zK!2OnZGbmjhFOHZ0YoH5dzGW?ZtHPSC;wr}cr>xBG#6zyC)ejk5P;AK7qc@kgU=Bc zh383Y>E&s7^y8*IACQ#?K6MP8ZPjHOh7LQHsw-cOd4rdir<IfY<#gfpH=E&w9m}KU z)5l?V?rz28GnKj&Hnvif)Nu0RlT3UW%@S4VCVlM?N7r?cDC#K)masptLNlsF_#Skd zr(z{aLT?x%d_%0cKnk@wBe{F@2Xwl32i;)*YSGd9s8!WAUdJq!Kz2_Iuvfy$a{Ml* z?ckj|Q4}~Uf(DAy92MoRZ$Lm$mR7_E9F`~T34#qf-s@}X-;A$umHB!a`|Tc7++^V0 z3r#mwaZPcgzoW{eg445w==fV|V`gRO{P9t~{T=M~-i#i%LE!1we*^eH2fwj(5X?li z_Wpsg#^$u*Qt0FEXp^IFYp-GLY2=gb6kTKDm&d0UbXLTiOxRq^GE>4fAD9<5Pp4M& z5jBf(h;0%uibRAIpD^~9DzD%E*gUj~S>_KSZyr?6tRAT0(<tkZPbKXXmeYE3^KnYm zupzR5T+UU~%F#6-Fd{Xxtf2{&qB#_?ej8fi4>0HF@F@Sv?Txj&TdA1D4vQj-f-5q; zBdKaMwQ8igYhrlj%ca}-FZCD>YWx_lfBhUaKe_Ab)yn1;P<seoJi?-Ey#0*q98@h3 zFb0_h6sozTuxt395j3W`U=PfTT4jk@XA7F9fZ9viB$|cRuxbY8Rh&$={|n`6uAweJ zEjA3Uw2y2A<#aQt2go~@YWX(O${@_VGuIwGD{h~0jjbb=braKdboLJjOUWpztq0(z zP?iSYe3)Z0hNV-?qcVpR^K<YOYj-wRZf_z((y4hhhy)BZ5b4pSAow?O8pgUtW@lFx zrWWTX_NUO#&*7ScaKxa1Z&v~Pn;nB!QQp1~g*{sBeGKg!<V}%^jwzaMX;PLkv<mKL z`3?E>BE_t8ge@~ftulG^V`!CqR1jG*4r%PFJ{#NjPH6ipl$ATXnB-*3&{}!tEGlj_ zPuB}>0~^n%h!~h$Zy#Pyth~;m5x}KpZ|&w5oRE@NbqQl>#(#S<@E~Jj*oFfD3{VG# zr_gnJabbFCab|h>-rj?r!RsLSL`?FG@~)n#Z5egrnYH7sS7)HX?8@Tw62QLzAKB;s z4-9ExgJ5zRyHQgVlJX0K;*$}<;a2Wm+7{N5hTbwZ3G(&{{CdH}VwU8h7NVA!!WQX* zW@*CaX$)!s>^i~9u33y4LAnm{C%pa3%d^+-8&Nl`TY|^7kqtwiLSDTnoq!fe%QTV; zHi2o46KfAa<R#5x7?m8%oLxP`qq55@I|c`TtB@XN=D_5)oG+p4^39dyxz&}MD=X`F zZ$S-?%3&5ZIK!hYX&xC?IFMC8QQAH^IJvmEu{yiD0w2WG2{io2$tI0|9l9P!)PK8& zFtpWobQM%zip$Omh)r}32sLv-Xqs7z>UxQqMv0q7FerPTKCj0p=LR}2V44i|yiq*4 zoCmjln7CCUsjPcear+5%|JpJ*y^Xo#3a%!-W|&huM8)%xrcVQvl(U*c!ouynuJJ8X zzhW{OCmkC{L{La<R&Hxw->=vG=jO(aH?0?^1)86onEy=sXI#;c1L1S4%X4ch3+ro( zn7Xk(zrMb-wYl?r@8<G~j;S93ivr~Z8}GEsIZe}fO;c@G=ipP&Y;J88wlK4_1Y4V0 zT%7o_Eamt~)X!HLzKw`cj{~Elm)b6)=3!>$_(a9oA^lC9TvUuKc{Duv^+UO}{D~!O z2+r%B*N+C^`HT|;Op};3f=HwhyoQksn!!R^{y%;Ecmm$Ps%$-YiUNLcrRnM_JgPZo zNLb~7$g`>VaH)HPs~KB;h)iy#Qgl}|aR3`1nU)E090YXxf#+A#V1DB~_A&IQCiZDx zUIs}A&SzIw=hs#NT}%bYF?D%+Yh`;IDm>q~dw1{E)5`iz1uZuKOjJK8IJY;iX}aw4 zbpQC`^4121e2{(ECWic}-(q-h*?Y{#d#_)wYH7<VuZ&91!d$B|vUgO}H|JDwWmWg3 zk#{>SU_^bvj^8xtyipvFVJxp<EVaBBi41~8JA_ag;Tn*8!rs56eE9fNLqF;-)PTr$ zj%_(cUgAc(8gbJkVhQV{(w^mePfIV)aA*Zc=vdo(dj}__7Ng@<tcjYV_59E)DEZy@ zFsq>IEAwk>py~@~Ne8f3wzg0eIKQ>MdTV>__HC%^w{Lx}cWys=wzv24X-ISq4WAC7 zpplMOPHOFVUh`CA|J=;V>dMvzYzj?2M)t9gf3Wi&=Q`K;msdImuU0g*q!*WAW+qs9 zc%xPq>YC8YInpV(lU}qt!>hxh;sdmE>qT-w)r}yz=tL;xLL%>VP8RXt;m!$xe}1|B z@Y()EG6-FTjicpV$~BPn<QME!?P3A=?s4b{wOncr7Ot+|5iuDh<#3|ENyQ(Q&GF-@ z=mc8ZKYR7HmCwSyxq(6*E#^>Bgx^8cjXQS^*3G+j;c@FOy576{^4-hXl~pAz1R<+3 zyP8)>eqUkh&GL?!5!mRh&E+rT&+ThIdSpRgAUKHq5vl%PpUi*0R@K_uTU1*QX3sAs z!O=h1%++1b+FD4}j6~9jT*~nW0V6UY6CV9&PTdF&op27FFjkGAGh()<C0+g@X6GAG zbVA^to}T>FKD2_l>Ix!n9a<%9nxY%nf?De*<J8bMJ+b~Mw{c9+AXwGR9&CJcW)7Ix z8#izMlgSVi?epkBb#W2xWYDyK5q1pltG7@xzEAkpy?glU#=U#@cXyt>c@Yww3xZE3 zX5kXwnBRJ{sBNZWWNCf_wz-9h1ps@D&qtF#f!F68*`49};0=_q2Pr_$I*zjS_A_>L zQ8Tt6le8revpy|i0Z}%Sk{73LIGa`ot7b4f(kUUo6S4k>h&7Y6)9#b!Cm8;jW%c$> zS;s8q3RHH(n1ErVvd1Mgj~X%wJBN^>TRShChgS8x@<8NmJ-vbxQwwYA2GJ7?e&4L! z@dNTc^+n;qa?GLSeV=x;uz!pC_Jaq<tRVQCckXB#`w(!biJOEcR*!<<S9Q;hEv~QM z-d@_+fPbOFuyP#ij}UzHW9Q%?_`T$U;^2g2L{ONOySJf@ow%0OccL~w2$`HduLn}k zt{uvv8N{p}z@#1kk3@2w-wT?bfq*u(_JqSfv$PK_SNAS94z7U6>wD*mn5XFkqMSUv zyvx;@&5@=1sZ~R4nm!ulcAnu;sf8uzTn!G3iw&Fcua{|mzUR#10?7O4Saof6e*IT7 zvrjuV)E{m?e02NK5%m@{d+^}llPAyKymWAjA>vS_l5%iQZYk-Uuj-z^KD)kt=ho`> zCR*^<FoKVcZ5MC_wnt|;QFAXRF12^$R#w9}h>VJJ^bIt1LTH+rF(^BKFJO9_SC^1i zhe^ekSv`<Z)t^Dtmq7(dsp|h<a^7co^!RjxPdNNDMZO6Ar)1hkHlhmq1dZd>yzAAy z>!_q$oI;BqJb&9ezM+Xo7t^-2^Fl_YXIHnjkIYQ}*38V;mtyDk<sF@Y{4DREoebKs zqs{qOs*ZvA&ZEar@9pe-Q|}*me&_LHRDJXq)&BA0w;$iPbPlTOd65bl3TXR<7xq_l zEwo-+oLJsiy|o43dmx{QviApO<{VmVbyNkZ#{{!s$?2#$38)!pcJc-mr{z6Q@f#7a zDpFsxXHfH}S3%M#dDAL-(#pG^Rtx%1)u8{Q77|s^c|zl#Q!aJSR`o2PjLq=6vO}tr zeW7*$%E_OTaU5N|v-$9ObYZW6mYcDIlUI0jUS-X|=;&9W>$ht+=8iHppMk%*yfnYM zx_Cg|F|<QWdQjW<QAdNldw=h9eTb<~o_wx*yN{mizq^O7yZ3f>?>^pv&+ff^@$u7- z1!YYPLi!}artV42#h2&GJLj%UuAu_tEsWr!QaUqBOPCx!mi?c-d8E`sFx%YITX?B1 zA-5nfF3H(H#LU@E*TRBN%jrLqLrx2tl8alPQ}Llu@T8V^r<QZ0mT@7M_xKNk`2TAV z&t;wb?Aggesy`uJ^E9_{qUqXd+wgi?&9InxvO2Os!?%(0qNB2P)PrYl24}W4T+=1> zY#e<2K)dQXI>+YbzP?g<+@*QrU#DnL^1i;lw7K~O_+QP;LE7)_JO&Nl=ls#rCr|-$ zczpci$<EU!yU(8OJbSu>dL8rlWWNoDeE9fXwCunB@OEiqQ(Dyt!bvsPw8Hk;^3M6T zp{2!57+<!&<nUKv-!RyZV`F|C?`o`ZRJwkss-+cz*~qj^ka}wmq@k^Yl7Z=I^}zqD z5&6AZFb$e^Dp^-b85c?^2XZOLGlsEbR_Vm%Da9xE!};GYEratVmr>Dc`{<^zUx}oB zfu?VhtX&?dgiU(omHSU$Csz!L>U$g5IfBTi6_@r64}TT9{w9Zm30g5J8q^ilRR|;x zD*Qh1x6#0V)^@aRKZJ_bbD$bUIHo>-1{Ih;STX#g;D53Qz2LzfhWtI0=Xvya_r<f_ z=TA*-LkM}axYUs$`F((WegDEF+UH=@9_8(qmoTaO<NmZSLjYRpr<%HZK<X273;bgf zQLf&_O~=ffU)$mT(Tx5NohS-<cPeQo3ZPxmo&sIzv_p6hMSO@ttC+f9e*W}t#@Nf3 zFJ@<^n(C{Y>Z;+%(<giX%AI-hE~owm0N*^cR?;>tW|pAhS*z{e%&O!qr0u)*;Mw@{ zeM4lvl%9>fw-1PXd1EswKRI`JJ{XouIpf&oWubvz`aFdI?cp(5cZ{Femv=PnUp&l{ z&#DeE1JX~>ki+AX7cX$tz30z&pFQ920w~!>nV&tBHF~@QgT>E3e<-SGq2bd$D`03G zP*Tz{5AJSgb{%E!(P}@3o~VObx;{6DYY7Q1+XLA53=LH_w`P`Bgr{V<g+y3;BK2+U zl?_Y?bVL5rIGIQ*lvLW4L<T`B>rO7`Nha?_qZ*(P)L@j<XO}(z;nlwf>i6#6wy`un z&&y3sO+`jVN=`vWK|xMVPDV*d!Ntk0sjgaIQ}O5T)!6(N#@JlGu^E=rb-_9dT#c%G zHKC}bdsO-E>-SZiH)YHMbgk^%Ln4w3id*~oen)<C{~iKUiwh7W&aSS^eFgkyOSApp z{z2d|b{(`D!z~8jgJ3^>_3FvXS5IHOgo^nus$$-K`Rdn}&p(sDZ~M_cf9KVYZ)X-) zWK^99IMk#pql+%jUFuux8eLh~+*;Y%0JX<BoT;y8<{W1^?f5+EPJj*Q#j|nQdC16E z2cG~FM^`NqGa&=SDXSb-qYxSnF)|hba#kS<R$&S@VM;b({);M7$~v;DdU5ede|OsF zXJ=%jF0!#QD=Emjxj3dI#x>X1z>CX^^KH%bnW;%gPd7t79bRrOE>4cZ+^j$ILR9tT ziE4;mudM?1IvxcIh)NK7KJ5@HN&BvmCD3_fQoWp^os9=7Ctq6Mcy(eNS1R&IS2Km4 zSAe?Ay1ur64q8`l-P(^>Q40TXKTzGd|6~`f?O&t*5)JvQSI<A!*DqdwTj52_yQm)V z;>DW}AKw1->CK0aP+$G{;nn*eU%vbC`MY<|-n@PE^x5u<=g;4~j!r6KzhF)$?ckQw zRN1{y-@iCGvxfHeE0_c(rkfvU;^_3s%1y9FYpc_%EB!adE?w@-Dz6GnN%shkw(>yg zS=%WYnhUuX3v1a?QjpnLm{^z^nwuD4s+p0VwS_4U7Y92R5A5^b7kHo=RGo>5p{=Rz zi0LjLel#-BJ$?FgNTA=Jvu`_3UZj*?#YC?uRYSt2iKrB%R~@;yjjUPtoxN9sGq?3z z<8-X-5W(RIxdqKVy_j3U<9D-Iy8|X}y}7b7zq$&p2Ib*VGi3G^{sAWCj&bi`LeN3z z(}Ti}k@0T`fBE|DtGB;acm*EeMfAIGp1pk8(a~02Syo+HT2)b6Szc0E0kyQethlVK zsQ>D~!za(c?Y#f#{mkMrzoa<?wi<|x{MH#DzhiV|etmsk?QzYT`=+y(mX)Msq-A8L zLrqCd35$;M^7pq#AWZG-%$;3LoZZxnObmRIRZJ0_?5tQ%ikq`NAtil5-IZT{`CDTD z>eWj+I$FN-JizlYKjMLKK6Ca=Zg%>guzN$3E4lS!z<k&EmQ!S{xK%bpuhQ08r-e*? z5^8o|y(?{-QnK(gb3yn<$LCa3^&d!4j{g>&$A~;;YQf_A`Zq!AzJ~|kA7X$9KZDN3 z{0jVwmxopTAnk|i>v!*W_V$<<8Hk99z(#&mDJjmJIU^@4{p!O{(8caNdbIQ6*}dHz z)EaYEC2mch_{x#$-o>_|<(n&O^J^;*&ti-{o}USwdT9IxBO?O|3Gv~tJA3wwkf^8) z(qGfe(!|x*)ZU8&2lF1T4yOpo)l85tUP93E_gg0}c~OFsgYCC|N=sdhoPvCDY5sqE z_ez^aD!b<)fQ72&nxo)arQz4arRqx|Y1hy@z4_>QXjZeDnJsux^qk>V)SThpeggs2 z(hqPo%gd;#l540$&C1pm#?}0qrTL=pU}+wqY2OF_=>ZROm}<-;rtZIjsc(P$_;`1h zhKAbT$LraXy|tC4{rdLJYZ>W_!oq_0_x85$-@Er1HN@P1viI`co0#NcDqbxzF$<^I z`l_DAx+{y<XIJOeKF>_Rw`^*0p?7eAn~SrztNr%&=C5D3y8QO-8+lnNE?ypMFF!pi zTWxD687)gTR;In3N84NLP`5VLFcp4*t(DmsA~I$n)v22kKYjX}Nqt}UWdee;SpE5@ zpMJ{EPWN(m-QLD>m*(fso`N`7n3??ezxk7&zj~W>X{7zeX4mAc^x9D|ixgF4gQ`y> z`2|NIy`ZhVS2u6(xkMD`THAYsMkeK>qF1n~OSf;&uCIe@o!{D8*xm+`58Zik;pX}p z#?xXdv|rfTTE2CA<<8y3%?(ia+dI3DU%k2i<SEoAZ-3l-{q8<0o%q?(pu$yf*&jZF zr+xp^ryoE4{P95j>F1x{efS7$!xK<JtQ8u(dIv2&?LB)6%qPV~AO88O%8LAay!}Ii zH<#vb-G2bzj=mP~@ZHCEeb+`g#f;CK*EbKTsOVmRc&Be-Wp-t00fO$^x92xEr<Ruv z`y6-?nCIo;26PX9orbD13kSQAy|b>Bt-6`@d09;&A|g}@16@%6qANThA|fOqr(zIL z=<e;s+$r>LR1EcXn3)){zGZ7=0b?rxApyt&*54&BE6vW%wz#tJ?nf+tY<FnzinqI~ znzG`>3*vC%pu!VPb=4q$pULsj4<9~Y>1CgO9vbMYEG;ZA&aWsbs46Q=Pfd66iSi3e zj7-Qf_ec=8DNwRY<rI=7r(tAdVi6M+QP<SdFmttV^9qPf%&)G!HZ@&TT&S+D#?N;i z&a8;2u%V$reN#P9y$=8Y-*u%YJtHkPJ{D?BTuf|SOngFId}4fJVnRVd?!v|fC_KEP zs2~r!4O^0tk@EBNS>4=x{{F{D=%hLT|7dr&vHns)UN*Ys6z1>O?COfr_03ISzO}ib zs2~?=aX}vD399Djl$R84Y^=Zk_!D%NohN&^m{(U(f+L)nn|<Z#m5n>MA3i~4?@`0e z?jCfO(aFj4B5DMz3Mx*?In7g-`j;zPuPUo3tEeb>c)G!_o?Kd--P}Al#-W+-=jGwT z$~>GMT6%gV9X(A`3oSEC5mf^c9_jB0DgWQ^&-~~Aocf<L#NU0cB>y-~L_k4*j$bjm zu=*P|_I7tl3v<m)jAW%H#YBbqczGnmMP;Neni(6G6y`pD^bjj-@Tund8aV0S)Uu+y zy6SQ;o6sgLIbK0dMpzKeEsx|yaTiCshYx<w;&gU)R=^(XTlDmFT-=-rit^B@+gq^Q z$t@u^5+dK?lES<9zxq3sxmjt<%#5&QH5H}c0N;$%B>1s#9AKaQyge1=Wm#F6X=$ll z9PGZ?;JbHk85rn!xw)V>$w*7W$d0a(@JpGQ=&8>!F>&(K($ZR+8<!VlU#cj|PER({ z*P*4Q;o{+m&CbuSs^;eAq^Cb;YGhEDn^|5`ke-sDuBuE$MJX;JHoLZlIrX6YR#ujz zWTbL3QeVCTRpA^-NlH*tQz<IRLx97>%?V>%DSFQX{g9#}La-elr01&-A9tTWd-3+& z<n$yJ6_vP{h@1>&S0ynOcAk=w!o$rKV1}(h1KF<|D9A~(vN9uF95K29-*xKL59!GX zSRwrWhaaeEs5v;;uiqHnefjeK&d!rJ@22NwB_%J=&{AKJ5OHwyr8uukE^g@%U02<= z6rEmaXXgMe1x^7R%#hIF#ap*9SC&VnCWM3pV3(j`rA13a?Yr;4Cm|(eU}OZT7rrP( zNku6mEhTmF0vKR;#8g=sY4G{PWK`@I^*ur|4;lk~9W6Ms!a@R;rY4DTQ8^hYz%4w+ z$3)mznu`bv($UcXv0Iz#-+XmuY7#Lqv9#m`^sz!!S#*VC1sfFLKMy#;2mQP~3iGnc ziu01=qg9m@D5)rXyxe~)^8hxS(8I4kd-?>9jHsxHfsr2Ik(HKwxJ_q!Gbt%aK%npR z0aJ(ca65JC6j1v7*)v>U`smR^M>`w%tcj7q!Pnosc>{`<7#nr?Bifqk|NW1DFfr0k zjgNfu*3|eY6(xnRhzJui6Dupzom<=AGyv}k$IaQr3Dh3N{H<FCh6b!GOozYa!To!% zSKojCz3JCGhA9rV)`Uca-MwAU-@beK?)}J(5$JA93$uq?@}K9iFgLw9e-l>&2OBF> z6C==S7G@@J6wpI&V>6J$*9M?pLu25fqoeiVlcz5~e5|f22X{e#j;^Zg^9qi>{!vyT z{j=O!+Fm&&T?_4FTlZhQefH!r3=?oJjEoHCHa1{%yFNY!ol#put+uKRYrbN52=#T& z_BP;Wv@NU!#Bm0Co(M+*5=uTLo8XkHk3ancuYiC<M*vg+ay+~Z_V)n5;EJ0YE**Rw zItZM45WT}6kq{U8umApU5QoEWfuf#1eL6WI_V~bK7)ywWJp9)B+6rj6oV*Oe-320g zteah3UW9{V?_kqE(DUy7JJjft6c1EDICh+G?`m%*A|^66)c<vVUcUzIf}el5F^rYu z<fH?A*zYd{!`}}-{J_XS571(50H^XC{khhzj?tMZ(0M~612$IX!_9Vg9<j5rxH{S6 z+Gps?5)xt%NIrl2_Qs77km$vQxx>xiv}$Uo)zx2u220o)K$llnQyCc_0l^0iJYwEO zV=X{lSX{VzYrC_%6P|+rzI^%BEU(?YXR6xnM660;Mj_yH$~)&qmhQvr{yv_-VNq%E z^vd$Ju~8T@lH#NBe_d-+9h`9`U41@LF)kd;ySq4?AtvXRvxv@ZdiMdvJdA_q&e6?G zjURd5VK{*OCm|sjywZEXGem(t0FZ~<gIf?3IFD;<DvI)S=V*7I9QU62j~+gN@j4${ z$`QppIhmTeN^E>I6*X0ILJU?yU_T9w^jq7T!P4KqcNe+|E~kXGq$I?gK7A@TEB#33 z;S@i@VFf=ckDF^pE6!W#Zf&fCfIGW7cMtYMxN2aikCl1U1u(qRNFdOlrK1I@2UkBl zJcNsRLH_gLJ)&YFY3XQ|7qO(aoa}8Ox+y3w1P6jubH0MGu@(-jf1qFA;1zIOV3u&T zL<VLOvMBIr`z2M6lw6)|A6`Stg`u#p(9q=M!0-@^-N#`bB34luIWB$yE>3nFxk|?$ z2uK)2wR{uGpT7Aq$j=-0aAR%d_@`b-kRMF!Phaw!c$mjuuJ-p36BDJUCPDOcoc1eA z^DxSQjNs6DGHq>*oZKw5IACG^?b|mD4CkOtZT%(KcQ+SD*xK`F-~O512lwv6@B{-N z1Sy9%azR`aRPhyhPy^=ShhvTApjca&vM@8485`jGIpCC-m>4Wfjj_H9=2l)ouDrH( zY<|wb04MVWxtTDapXcRn#ddiP1{%=7grxYz^_79ES8y^fzz3N5`gz08Sy@^*+`!qv z4y<)zasmS*y|sne)q%d<T`YgpW^HAur?V|QBoI!kos)e`Y!vj|uFI|8G#eeh2FDPN z@TH3Uyy6zXo>ta5Ag8;$bG~JGy{f8;oPs>3u%NTQ54?@7rNz|5jZZ&)#QJsM10%zN ziAjigg+<tTd2uoCiE#SvG$AFA93rA{a^*gp7l@k<H;a#ngyXECrt%5xe-4(#`B|VP zHYO5-J;a9KKd~|op@-xJaTtHA%Zsr>2p=>vGg{qP{qYzVm>?X6?`^8D#>Kp@u2x}j z0WRiWy?O-!qlSiBMOFFR_wRYQIF%IS4mSe`&k_*O(9wzt3m$&AsjixWf}*ar3YETo z{R&qGBQh)qqPf$A<S<fiZ>?h+GGb4kIdhJI{><4kO?6m07cUPNAOHD;^tAqw>-q+I zxR_^QVS*Tsn1lodYiu(#rs0UWAe{QIUg_@Xz{5Nsjr8%v#XQ9C0)qT;39%6F!7zIE z?Ae^m)WgjTbal?0IRnH(ENNqJV`FQHb+=WO6$uClDXFQzUHts>&q?W(6x<qwf<_i0 z<wck0N?OLbxVWwDtg0L8dAPZt&%#;5nm;kr(>Zne6bT729As_*AvPW!T+Dm9Iseb8 zvm_jnuHl6V=|%97w&sR!N6b$@LC^tv4re3<n>Q8*AO!fg?`(h3d1??nthSbrh=_)k z2BIXaI|KeuMn<Z)uWRq=-XFlco_29bA#jM*+dvnCqo<~(Qd&_uH9ZcS%FDuHlcXdi zn3x%5WTkLj)C0^c%#D{;7x$h#fgg@_@YPaRIZZ%JM8`);PTA2`f4G63wkEJ4D=!DQ zdbzt|J;E$(EdF7kE!|yudb(KQ0Sg2h5tk6>;9$qPw`(iQ@H0)!jA|R}+S{9O*>x22 z!h*iOUU+m~P>>&dV|_zydPWM^NUTR#Q(YB$gq(t$l8Pe2(_L9bNlsP@>(?<bF-l5H z!f?>tff@yF-+7>><3Y%(ET9({S2<GBIuVmmoR*VQT3zWA=*Py!a%+1FOQJwSjf9Mh zg@c2Mot;lajDz<)9_Ih?!&xF$3FojPZGBTlMtWQ!p|p%7VDEe=M+YUy-v`E>y84<o zKfcH4JP>ubnV6_BGYb=J2L|zv*zPL@PLiMRe0f#b_U+AMGT&H-i+NHE^VBrhO(4eG zsHrKJRhBlO<eGf^#xPdqSy)+ARF#;S7{7_UFolnoM^;9vr?2z&o!c}xnAg+RI73K6 z$|OQTNqxDc7AqbUl$6S9D&!OtE{^tC4;>Q=v!}mbZELHpt~OTY0cnWJ0UjP+9;_(~ zI4tlp4Gi^4E6bak>hLfxd_3k`+gmED$|zA;0_@C#zG>-b8Jii0goi?mps%BSxEXX$ zHg+~86(w*!2c4?9Z731F46~wJYTbBM&thT2KuJw?L~IlX2iq-dX(DY+b#h7yZhn5K z;<9o)0)n`h_jU*7&k!(++65O}kdy&$j`dhUxGgU)3z~^FEl^Qf2s<1R6$Vlt7af7i z+lYw@v9hu#D=RQEGGOHg8gOxQ=H}-tt<3*1I$v4@aV~UZ7=<D3I9S0$5)u+<Xs817 zx!LKk&sej*QWquI+1ZSY^jVm(GY^{*5fLh{DqmchL+Ly=Z=<87ewK)onn#hGlD4!k z8*6Tvl8VaE%oLoMD-PxvnVAqsud2od9URP~P7?*WimI}xs0db_$28E@*C{M1YHX~< z&%Do3%*VyY^j_(@a;1xk3cIQR^K|FvtnI7=f&;2*Yc5`taImq$#ylGrH<zKQ5uCE- z`mb_;F$qQFTx!fph`^klvX1$(_VKjb0(N%R?X3-5$52#KQbtKx)7VT%0(Y*`&H202 z1ZSB<EPb=jpXXPUm%)_=(bQA}$AwjeTAS*Dd3O(&8xtc@5n;GuSU{SSlO1#hbpG|L zmxmjG3+3YCOwUNYIXiXa<e#IMC)Lr_DlIPos{mpSSaNc4u(5%puwour*;#pcxy{T? zz}qAz#zDWp>c@b2_#k*u@Gw}JhX@7sIXf?VV)6zq=5@8y35dzh32GCQQ>P|kX&@tY z@f`g*TNh_qI$9S;I~>e2x_Ek2)YrrBg7F;}(%QP(Qc@T3F|VgxP+Zv1aOnu0_eavw z;bC4NJ~4Lm#?Z>zG7XLtB1C`m4D{Z<-o<6b^J^=hnReEeSaqI{pU=j@j)at?z2#e9 zU0v5r%%(zi!Nwt`zU1;;QR|eFvg-QkB39<9XlNAF)D0}Htvrwtva)!X|Nac&83tif z?{sb+UNsd(T+AC7=)zCH>TO_1f*p2nwC(HfjtC3E#XKO*&CO+PV}XZx04zB*X=-}x zn9SqxHrhIxpyBh&bB)dQUOrw%CWbnC+MsfutDd&Lp`N3QV`WWcN>V%{13lJV0_I`p zad5C<#l<{y3QmsH^pug2tGLd-j+PoB2?d>?4k0O3MoJtOq(y|l(>i&$gD_$}!XOvG zyt9XUMO`g0&yI_Ea*%Odh>3794@V3RLVi&}LnDg$<=@XdKQKQzHTLlFBRtGAFwzHw z2DY>{KYjgLL0<Me54V?_D_8}DlLJ&Y7e^m2cRD&6Fk>L~u$>prv0Mt2l~X@UBSb7@ zY7tgd&^BArHgWsG4z{34jhc>DU02V_72zBhCaIu+n|UX&^Jf@@j671gczD%uFc01a z#&-rB%(tSMx3acq?`#VX3&z7d4-dDUy){1OIXM!O6UHY;kDTy!1oPx%Iy#!=m8G)_ z(>=YNNht}w{yyGF4=+sh@q|aDuUA}rEcDZiv?K-`%u8Mr=LY5-ZCP+L&%w!_kQ9IQ zY9H8FJj~PZsh=gL$V`pL8lnR8E?%CvnMZjWXVCe&Iu!G`ybU>-o`IeW`tmxi1{j@h zY&=Tm{k)H49;9h^cL#O#8QWC}4D-QZ!R?*xyU(8o1^U6sWn^N2_SDo=00|hmvjha+ zeg7SpOrXB&atqcIEiNiZ&&Wi_uX%=BTi!k~xn`{N^3D0J$A_3#rJ<wMG%&Dn_dtY1 zNGU4fz7Fejnt%j!Uf(T+iyJ5N;I)oq-qOmfrL8F}BoH6-yxgFf5D(&E9{3p>A3i4Y zc>FvtUr|+l|IvfTdpom>H-|^A4h;<qT^|@4>PJ0ZAGkg|2w(JQcQ-FP1L9d+I*(x< zm(D{24i0t*==ukGXz?&lN<qo3e1?cTGc5rt^C060r1v=#^R~Ded}dw;x3@tvFDom3 zw6{41^Uwglx0#w5-`Uy0m7zs5&wye6&VzgRcOKU^HF&~+Yig{ZD9_5qLW*ALV{c>G z+tvON_r0=FsSj2rS8vaY$~J^d@|<ek2~{IG4HH);HV&Er(0Lj<I!yyZ8+T7cXrzpi z5+^$wu4__f2uV-V3+cHg;b9)7^TtOpZ)w)t))e|jFh4#q{Lkt<hWW~}?yk$7ms{Ig z8X@RF*E*;z=*O;(HjpW3UtEw4%)i3pZSd+mI|o~IY}A$hZv4!Xb1IxBB*V=-lba9n zSj>Z;XJz>vI*(55kR6}-BmMm3^cXOYD~kD=`TKVt-tX$`_6zU@28c<B;dp|{d9wHG zW%tLA9v;3XxF5?Pke-(sFBw_+6R;}s>iCBj_Gi?Nw_RI)_v42H%+qQa7}~mfx}lg? z#?L%>8w~S!{Jh~&I&X1+d91lg4D<F5*35XlO+r$_@tJR`!*@-pth@yJ7sS^vm{L(u z9;}p<6eJ|Xm{s8z=JD%13tpW+ig|Tlo|IkgG!b^@#g1ZL>}%%n`1vEe%@>_-{DYVW z(ZKbqzheI3{g?0FXJw|rF=u6EE-n1_Zt(rRUF__vjkQ&H`g=)L<-kx6D?2X{y%deC zGcx^hTJ3m6=giZWZ@+jOIxRy(TMsYyuqYJsxaQlqI-VsWJIg4l=bDU%dGg~izwd4E zFb_I^q@N#~7&)5xWfb#Rrv!Y-&!h4-Kzo3%_ulT#%JRZ~-P~B4nz#WcJwGSoNaneY zV*WTfPee*V!X|wb^FHTr_<8YTGEaeKUiOb+{zz|wW_}m1pa0DKgWbp5x3)oQRF$y+ z*4Eqr67<8VAFxh3+VAhhl@(3xZRgK(S=jgyG09O$*?Xk6r_|i2>bi-AdF3OS7tu%H z)p@ex>U>y8&_A#9$6+2b-v&4Hs6o7>tQc2T0d@d5j|>lf`r<jp&%gR7n2(8z2Igt- zdK+LKub*df|E%+PnE#x&Vf+0t>=)*b$W;m*sq+bmaZ_k-gV)dh8pGyhr^7cMo}~TY z{ylh|jGP>XW2~$A_w%&Hm6eU{tq|=#et0J?xty3qfmGbmCb9;w@1DG+qNGSgL#<_K zWOtO#d+amsmW)4d^M!fa<LEq|ybT`a#eR#<Z=f@8wN+*Km@h5D#XROZtd*5n|6p%M zD$cykAENWRXm4{Q^KQP#bM)tqqw~KzhNbvJz0EP0N9lZ9N89taZ=D_O;8-1QW@%<Z zL`)<uAr1&Zz=dlN{LFk?D|($2YR*z|MH>~j#@X|_ddU3rx+%|)Q~@DjRc$T1<LbPq zk!Kp7{QQAjrR`BVf1DWh5AZfGUOcC!p-#p&f9$|@Se)~1?%djfp%&Uy)>NXdN&Q~t zg@yRfpXWg{-vv64b$>9-6aN7^f7CT8^5c7(TD<cr1@X_TJjgt**`x=&O&geba5ZK| zhFE@@m<aTAIeA%99L$UF>wMKd^PkrSZr^zzBxz2-p{DJfms&R!Tik1A=WONTf}i=% zI?p6(=ACf_^Z4>Mb)T7UZ43{^>21W0%G;oIp8c52zkmN8z9=-vA6t04M2eYjgVuSh zYnSHdW*{)v)6*`iEK5v?J<`wP%2fg({QT#mG2RC6H7R0NDKzu({}l7!ZFKN>n`6bW z`Nahoo&P78=Z{Y~F7s_2t-!pJqP(4z#o=Z=+?=dztQuMx5Q4R0n;HJApRcNHXl>!) z{%Sq!&fZgLCHu2nn(_`w@fD--rGwrPNv=VmN5-%O%wkr4SzO#aYQH;INkV+=yv-3Y z>@od36yVSt+l2GSj~+ryq^_=-SC9)&^02KdX=$h>CMGs9HLR#AFU-%TqNd!$wz3$L zw{di`V_{}`@%%Z~QwD^@#>Wf}_Tck2Xy$RnuwT5*v0_-=qy79**I|nbj+}eo=Z}P+ zv%Gjj4EuQZ2yYV<`dR0d73FO#%?~$&c$}Mu+r-=ycD@Ba^OY4HS9;I$^32apA8c^% z!D9{)141@sS%;*Uvg-*IBYugwQn;_fqL?QpBV>`V56<To5R#F)@H?2#&PX99A-1uz zZfZR`KaXbq54a}v3wnN+jI`w8*FXOB6YPV6qFiQn2B^p3SsYMO5@UgR8(XW&nu><H zY7oGYp+T(EX?S?JU~B?A@#A|ei&>bN8KtBp0f1}Q`f%y|v6)96GjD^3d3_@TIXM~J z$xJfcqnH=qKQAHzF{~H(c^EPAFh4N`I{z5YeGeF!K<7i+yV@~2Z-Y(e1qIHtbFkYv zIl!=sb?v}@3|mxD-Zy+*K!9&@?Av9<H|JJZQGc>oN6sNBs`y%5`E@x}bygNeJUUOr zdch^4_=1!yAGVq8tILZ(4oD&}?__6lxPc!MwQkYT*`fK!TqP=o{bT(6FQ|Kc=z{!W zed~gR7#u?w2kGhQUcbchU3%J@;OAZ4U8*lt-JG4GprlAk#^O4Z<YnpU&*|uB!ln+t zJ2^fIokdGaBPlsyczA#o@7x1Yc3Jc_srcW)JZ8R)3tn#n#~kpJla~_^;OFMzINV@n zdIHrc40Q903hL`?&`Z3rFfS~~&&9<Fr~4coEmk{sV7|?bM~@!j%2gWb=~7WsA$`3Y zn;V|Kexs-$XJcu0xS5RfMOr#qLt_JoWCyQw<I#B(^Vf$2_^~h##mmFv6dB7|ZXHpx z$jFlG9uWn~%Bpy-NfEJ1c*d0J8ClTL(Hve01y%w0k(HIEq@wgdI3L~!3|kPsAv_SR z?afCrfAoAC(D~yrUtUs3OiXm8r{nNj%Zu~Uk{7^uT&gTR{O+6AuYqK4ZZ0n$&xXdj zS8v}!OF0=VD;kROvf!LT?{Kom(%G#o%qS@-t!%8)(o<k;!sBfSNXW@K6;OUY6+iRm z@XWVy!8_k(zavvnP+&bx1AX1Vys@cKUQvEqTNC`Q_GT<2pqHB~Y=IiO54I&980OLI zq+;MFzx(k$6C;Bk(&KQmnu=n8nURqJ*ocdXvM@Krx&$vjCj)cEP)LC9Fnjw42gCfp z$S~+U))BeAxds%2oJsU7m-YqosL+C|iRD+f?(bonZ{v8Hkd%a7(l?<zG%g#4`-V$c zZdk3RstnjOGSVMjSONP93_~2}iS+2|ZVwL)#_w(Hj@0>MGym?x2k>mb_AkG&z`-XZ zB-GMU_Yd&t>T3V^>F213PyzyifxfQ8jUPR{zp}h=_;d{Pc7Zer2?;tnJLVVW&dg5X z@iu3P$*Fi%2}mfg&#OeqIKCKG>=?Pqh56Zt(4ePJpI}{5flBV>XTxx2WowmJm_If- z2B*o#(+z9O?X3;0bJP#&JTQM_{Q6Hn|IE$FVTofI?~CWpZf$Qd(4VuhwhRvo=EJc* z4n%crWf|8k#l#Qj{Pn2`QBmRS^kl3ppFiEXy}imSq)5oDC~O+(m)R9waAjiY&SB<% zI7>vqB^{hnQQAJu!pbf#ie)J!oN?Hzz1<ybLzoUJ87cwd<L7;Cs2|`t($61tO)7qT za^%x7nE(0bU&>1h0X%1W+v5Z8;$V07>{)SfF%vVBsF;Y+(Q6+*eth!$Da5J}`eBut z!{x#Kdoc8aB?f!!?&VfnU$efkMvLRV2Po!=$Y}Vrh{<WvlVkDaZRqjn{1M(}X<?R( zj1146#xWO(99^9XiVIh^*1`Vaxz%S+XS<=k4%RgV7@d!ej~=}-1gF5<)EH~{uwS5u z!aiqarGrDBz_u9e7xdi};UHwDCgJjR&Ytdt73HHhXN-*vMT7+Kd>uCzI~fBXsJ*aB zxPN9>P<Hn>*rVpppC+U^FBg$nUD7rc5Ef5LN^<;LRr`2*oIZV8R7}L&(kwD6e0E_b zHYyC)I;rC`f6N#b+T4Hq(A?DMhf_Z&%F8}``sB!um>L_NI(-U4Fi=Yfy7LNhSJ#(O zX87H^#>QF-3UV$^_Hi8N<p5oC{CqrMR#jD%Ow5cUBEtr*_C9>{059{Tw1PU6w2WBS zjEIYge4cwie-7(PG<1V6xk?-{EGRQ{e-aYnl0&muP!=LPosfu7Qu3mUn`>ER+3Mz6 zRb?5R>nv=VxwW|t=ZJvd?7h3U55J3Wa)6Kl!#vo+<<(`>%3>9z_wTXaumXC7w6tVq zUJfG@BLIwbWrU8F#{c(U|K;xDgzL9sV%VXHv4Y|vLP7#11v#w!5egS48!IO#6_3&x z9^H#J@jmGtA-TOv+rKUw_x5o4pHpX<g;k<*>hhb$ItC|9jrC8RI&~c74Dj^^-~nSe z3-*q7x%oNwA3cait%s$-dQT_#y`SeEFf+b-^$M$+eP(`Q^q4n{efs=q@0Bi7Gb0#l zz!(JjA^Ur~zWHN1uU@^peziY1z!wfaoOkG<np*0{CWe8*0bs`V(2YUrS2tFpqQgPD zNk~bwG}Hi+?vA#_`Pr@Y)%CR%2;}>^J2Fy}WMxpd^a0?NRTSZeJ0qNmON-YwR-Zk8 zMoCE_bx{Jmw2Py?n~Nh<PlR(wfDb<(Hz5f*ji3(oIVL4}$p9ZuFE?jzcNb3tRA)b= zCzwMf7L?9|0R~%(^gy7NR5&|g)>a@r-I*BanOK-zj>_A>4nWL>TJxx?+|${%wXwdj zz6uA#+{6e1A7&ONEp1J(pbgCpYv5k)-8F&8k%-XF+7jBp$DuuR=RjX17;FMULQnuF zds~Dvj0_I@6?O)-&{9YsCN6qt<SORxkHkbrfc1qguOKI5X=VaH9Mc$f-pk#Ul7d`V zL?}8wHaIj0ed7W;_{pQg*Pv&??lsrfNL~;Jt9j<k83}O_A1`;zuf`k!q^BGDx1(a2 z!iuuN(P0Q<|Nr*h0xYhqTN8fn%s+GQ?eFW3A_)+PK@uWF(BSSAMd1`sKmmmoRRx6< z?ry=|-95Ow1qqsjxLZd$>2%MmQ->sUe`&kj-~4muvRO|(=N$Ig$KLhsckMd0mnG2x zo)NU8qNHGsHwI5(-fah}vjgl7r@?oU|E!1@+aaMnf`&3y;gp#A;4DxeflhYT-X5;9 z(&C_P&dK7!(#kWcMg}M5`};dF7>#}V_nj3uLvwK|EzBJs9fG@o-jkWBiK?<9I0Wz^ zunOoptPTdkBEJCN=9c<*Kl~6C9t`{dw-Na2<BdWV;5mY>6TEQV-oXa?IBQG7*8egq zxIhJgvwvXR1|62Xy1Y<WQV7wgxWq-c^Kj-65CJOAojn5r666Z_Ag~|w`=`&GQd3hw z0B>djO01})aBY3%`;TJg@zW<AU2ULtw6rvi96bUY2!`b7L8r8ZCD_8r0aOeJ(!yu} z_H0Pj35l_T!+qbrdiDPOI}i^bkd7ZecI3z*&I1QHI1lV$W#6%TFALj#4j#TkB3Rxt z7a@9r8@^*V%Z^>U_w3!n!ph3V&JGSMC?o`rRY_U#^yyPan6pd(JU$t$iinEXQ^>#m zz73#MOyZ*0MKP!kp&2?8FWeALE-pdA3u@}B#(1NU@Q{w~4(K0m-Mib`-fCc^54Q`c z!@N@T8X>xulb7Y=<Aq3;X*`<-!RA38B)nBo3JweI8|;1d62W}$K(`0OLs~|P4~bZz z2G3T57#6N8FeE6yq|iUeA0i;QUN|CpW5F5WY{kSco;rOJDjz`n_Dk39LPjt@G6=9w z%grG<I|_>mA3Aav{>ydX09+tEEoc*N=Dz*=kDoeyR!HbD@3H+SWcQxele7$TyVPc6 z<H5e4Yc~r!D?0}Z$3bxwqp<X<^s3?28+UKqSOo|HRh>O207u-<yb0(e#+&zqV?Q@H zH=LoevLfKe$i%?SomO00w6ccG1l80~13Yl%%xONpqY%@5GPqI5wJXVcc({s*ikv=u z5^m`~R*;vMjf@We{g_8+{MEM;Q=@qWIbPnLX6B}9YRbSuX9P~e86H1z93UhpbU{{5 zMpH{2&K3XzEaB_tU0hl?J2(AiOMe7MynbW7x~3v1BoJheuAYvHsxmxBNht}`<Q3$A z+o2JhF4oXk9~eG1F1oYp@}ox&(eC>Bw=ZiOYG`g$6TG3MoYGlQ*?ou4e8YZ_h5Hoy zQK7@a7(N-xT?d7BaPq^(9c)~jJUoYZc~6`^eO6FVPD#m*>=c`v6cQa}LNHTMR+N#K zlU3L>IR$x5ZH&E(b5Yf=y?X$d7nhXK(bd7}=_)8Hh+Y&G5fhP=k_3nX;DO{_XfAn0 z1=I7h$aLIKpH9q7B&8;k9PHH9RV5@Pz?UT@CDhbaFxr|}EXKf4U*Awq2l_m1t*xew z)iSp*2h1~h{>7`e-z_dL<mBhLd%1(e)deMo#b{wPHMBGk(^S{cQpcO&L&HOA8|!n6 z3OsxmhInIXS!qDBu!yh(+*m~g1!W~w4K;O54J8$2xY$n@E{oiR&1=stD(oE@8kwG~ zsILo)jwU+TY3t&Y)l{YBWJSa;o);E6eeN9GOg?^oeu1;X(y}7bGKbHIvhypm3hFD_ z#d@Z7<A{D7e8Ox8&+g$qsjOoWkx`XZJ^cLTt2b|7gR}sl3-<@aDqKFeyp*&g^4ujQ z00VFXVIw?uK!t@B!NZH5Uzj&LKlAGKD>SZLyS|!uDc;)F8Y;jYgPNZV@D>pnUR6^G zWDX>`wF0_eX+?;NiI|ue{^5_z3=hy<yS~=gR2Lf??a6R=aIz=bSiy5KGsgp^gTaD@ zr8x*NFK>^uj1=JWo3}RpapovcIJfWK>gsM!Nlo$#@S)IL>>NnY6r48G2$t5CBs=6K zCg={zDoQ3MM?qYD5~yfT0q5`O=}NG+Rn;*&E~31fM}UK0isypbaT&AIYUI7V5<7X$ z?mTd6C)bIi{HIQxzaSzZDXXlaYi#W9=bMq66Q7a{@tUb60Z%YDA(-Po8qtpA8yr~E z+^qkr^ByQDDUh5T+&t*cR2PDkC7{c|*wDzt2+rKY%OkfiZ*+3(=KcF<ss*6>?%uBC zv}8|jPm%+PU`a3|n1Wf8tf_7kA3tw~j~6UBk{wZV`nUk}l9`h^IWzJ6+i%}|_x{QA zXOpv2Rn-+yF;UPInMQVip4-tLyvf#qWb0t(=HZr}ojEWv)ITy*P+AO}PjYfJC7A0O z8sPMG^^6S6tq69`pi#({BpXu;!qyAN6U@+SFR8BT9~&9FG7HGBZmf?>PNMrTY@Hm8 z%uF?~SVeVpDFsDQDd`Jh7X`&7#TAu>W#mtYt7#fo^9o^+ud5v5=t*tnZb?T3)j3Z~ zTR8j0=C|b64x@Q9U%q-hzj&pzyf`u{+{4QquGEHPV`Xg#4;?@b1~lmCY!CQJO-lwz zyncP{(=6`5mLM2FHuwkl0yZc#GGeYS@JXe)`1$+f7v{EiwB!}!1_b(jsz61n(a0eA z3X2PtR~G+h(i1rDqbCmmgpJMhIeFO$iE-f(p%7idQG$Z}VL34|4*G)Dwx-3Uxfd^A z{8IC9+!x=zggRZ_m#eDFb8@o)|Cf^Dp+RtDScXPw>#7EZdhgu3^Rv0@VHIvgcW-A) zYkgK;fxTy}3ZA}S5X&cHaYmhd7UM2#7|11v+jCrY_bH{_Cl$_0D9EX4V2$wvdj|@` zJ0U%zwxy}Ot~MnrGddw5Dj_~H9<k_z_^9}J@P*ux;-1kFLqmN~?7CPCP8$Opj4WU= z`Zz3vb*yZxK&?K3{@^<~1^F@YaY13h0l|Tx5n-27ld5W~$EPPZ?%oBG-^>a4;>FFo zw?`&MYU*p!vod1iW1^xX;^Jdrxw5*V^>Rx~TT^vyRY`eiX+<e~elqCJfaGu7-gxnP zGxNx!XV2DdtPhP0G_^LBS63F679q){3iH9satiW_D@s97Ev&9A-?%Y8Gu6=AoRO0i z5fkMb=+E%?4G0a6NsLd=&B-jtPfSaVxs-rhemrv3aP45|wYPS)4^K~`bCM5?k2QC; zmsC}zWM>6LM!I-0Y+NX&wsv}E1WkQI6|Al@7AK~xDWdHh7@H9ilg4viXOAczLrJDz zYBeM$NaM)C$z`dPgY})$TN&@xom-$WT3Q=Gw`63cB_$^&UW$W9e<?XJBP$&?UcTIX zb!p-0vnM~(A<tespP8M6|JK!4*D|e+X|T}N)-*Xiwtj6D)X3%bmb#Ds`ebdFo2F*Q zAODUKm@kjmzx(ddlSklmQ!^7Fv%7mc0J`uwIMfTx!f~PH_rLtp%dMCH;>C-*_wGPb z;QpY3W@aa5u1wBfodZ<gfB4|l>(^iO8aM&C{HM<zuV23rp4CENWb!CF@N3Y;Ou|J> zA}$i*k10Ct70}u(Vz^t#@Pewb4&KzlnL_gq3cHkCP*u}CGzcWt*3;G0artM~+}Y95 z-#0cl8xt2JEiHBG^r^!~k01)0DGSk0&~kbPdchIlbuCSU<0BotT`ldcjcqM$otJ@N z=av=$@MvaCG_ei}`@0Vx09=8nheikbhx&#`2WRGHR<Ez!V*a4-`pp|_H?A>l{p064 zY`pW}J_N8^6Y+qyzyIjbjoUYu)>ju+uFfqj%q-5&EL@qHpS^N*VQGC0&GNH$^XA<0 z5}aOJXM0UkLv=%4U2`Mgxoe;wF8?xe?d`}Vx3_(~_IBpA_YO`@Y)v~dGCc*zZ|vyE zDJzdkN%M`2q51{dx_X#7IO|(lYZ;q}Y8YwQMc1~Ejm)h|U@7dp@|<$Ey7Ww^s9H*7 zWq!kWcHP9l<jT(&@6C5_0VE&;fOzNT;ki$Ofs9#Rxw>)d=F?|SzyH!~nIKDEy?G73 z@Ziw{Fp#^bJ$U@^@zci;?g2316Iy0gK>z#T(c=dX9zTMO@UK6a<>!YVegx2h-~qV> zY99=iLCO3FnuVr5eE0yZy@MF_{d;KP_xC>qO_S8vhU40vyqca8R!O4f@fQgR=XL#A z&g<?J#qSg`JFaf8ORyl)+}%SXqtmmBYU_GNM*-ENvooW!)5w$k#|5Cc$?^R1GFvA{ z4UCq&l7fPgqP&tK=wU^sBnD9w9uox+T3B0Mx_+IRTMEgT33vlZ^C|uDX71xp=>lK8 zeEj_RgC|I)d)N-EFJ8a?Np7jPZ+|73+Lkn$Nan#u%yhBLyoAgw$&Vl1dVu5yM^m}M znXKKq1*bH>x-zvmKRG}5$u{%H!IgfpD>KY%pZRh=!P)7y{{F0eWmH^Uvu5K2cXxuj zy9EgD?(V@Yc;gnFKyY_=_u%gC?(R1Ielu(Cy|cdiV`fdm>ON<mhVH6UyY?=7YNs_M zzrA-VUo~b*5|olnM;ot1Xz~ktKyw-Tx1N^Ylt9fum52aIgauDQPsLvTWu_Gf{332s zV~w{Cglog(a}P0@`n<N;;~W|+ba(VQnly2(R<$>JhzR~uRhLOyAB`mjoh1^JC2E7o zxLsn(Fu9r~jaF?m$WWwE8RFYFrUDRjN&Vd&S<CI86qiOy)o*NtIk;RegM)A*Bf1^@ z<KamI=7j?XY0z<1DZfGSLIxi44(Dw5>y?+4!TM@^A-T7A@g;JrIp2`mEJ_ux`;ZjT z^YK+Ca3VJeuc$0jDc=YJ*zp(yDj+yf&)aw!9X@`TjJ^)co`IHDroOHiD>>V&nVmWR zXY<>Be$Ch?lB_(ti_4zI;@7=BsD7&Hz`k<<ANnD72r?<XO-ao~3i_hcg#`OL8|aoj z$(se5A`HSMdFtC@rC%LRwxYB%w4ocFK23;oUDf3qo*1#y6t=j-C0pQ@*ql*Vy%IgW zM!0CBTJ+r?n~BG)p6WmOU|>SB>|F?A)y$mf?zYlmye>u-${(|SSX$@SygOODsjGVH zYPzUu%Cc*68?#hHHG<-hzhvc@(a8)9-mOl$$AgAsmxSJ|9=c<OHsfKQ@6(tHN8kRC zJ={}yS`@->{@9zoJ)~Yq`HIZgnDQO&Y$bz_dP~FJ7y9k&X69}O9PY_L8u(gQo}Cph z>xzKXO9)XJCOsO2FL{sLSm2ey6Yt6p=A;RMivGECFf6gzc^_W2MrB*^ow0YS3@JRN zHd9%sWR&@Ctl+!arVq$DTI^?})#@9Ap<HqvKaz9gG{LM2OYG6ZO&}}07;~PE)cx_P z^1z)V$5~wu?y-x7K@=CCsiU>N5E}MiFcKeL8DK!NIdS*c_~_d7_(+oFvtGR-z&*vq z$;-*d=Hu$<p8}JfLg8kkz2^lKP{ad>vm-w-6}0=kjCynnZKu;j2wp<PLu*5{P3F5p zULg|$lh>QQF6&B{d#}d$x5k2oZf~=#ec7g;6)qtX_W1ZjP$&<vgTGP+Ry&$L=_sxJ zvL$r6GCyR8O`RwS)UaFgdC*S7=%KL<bgY<jtHNT3J^0ORB2eK0erR@-vWuqWdfGE< zTmGT8--j%Or=;R)i3_7zOjRxTjS}@R>f0nPWEK}3AGNrtK*4pDHG<o(P|i$YheGaG zNXWmGg&0Y~>Bu+tk$^w#t=w(3tbXTsIP}?HD1_hukkECbqh*g5RU0ktW@ct~X1G0X z4K~1<2EQR_-&+t!eAt8s!j@Q_h23x7&#+Iw`YhE|j4Oppze0Baf9^J_l=I~Wl@tV0 z7{Hehj6WXC+sX<~($~n><znQr`&l8WEhLmqN85RXZnw_)-2`11LiC&;=Rh9AC%2+7 z=UPdQl_%HJX!rVV<-Jb39^INfU8ADbBomnFXJ`(SRsFGX+lr=+3QfeLAAWzik!N(= zsQ(EcVWU|>&ytF~J6y3tYf0d{Kq4tD1S_au(!o<;`zbeiX^&$-0pW}uFAzQf0{)5^ zAEPNRIVB{1H4b<UIS9$m7yASVRqrL{H&#Y3-#3&LP}GCdCnhCZT9@jeGEiY-SC&@> zI6eI#ey$r@E_MD>y{Y;HtD7b8D*HOw74(VC)RgO^GQj-4kKU1o_-zY#l2U;XBaO$M z73><c-6LA0sOG%iq_SbZ4x1%9t0E^LfYMQj`CF{eoVhn|SkQlg-Mzgdfw4zge(>o} zrGk^z=0IMuS;ej{>w<FOH23!m9Kr7kWbGbrtrlV|uTl*@wVc-S;jMRL`7;~;h(JMw z{Xu%>rB)z7oyS7B4d}aNwGLF_7Gn0*`}7-xeuL)}Lpd}wG_pGWmiGiBN}Qpux-z%^ z&Zfpz!z{$jGBN3AJeeubmJ%EcQge|i+S;`(6_|1)e6Fra!dqjDo8BEJgGl%~HY9I! zbTYz4)T?ab69bd1fqrv5APwN_@;PjBFf~qjxM_iR0zPTb@o~|I49d~i28(?j^b>gF z7VJ}Hjk)v@<CDG=g4gEa;7{02VI@-HP=UWa(nRE@Pepifw@$m*-Y!)qc`p6C=G|;1 zd3UmJv;B>&zGg5rHPSMLHf~2JB#Efg`xGa(&0s{kuRt?Rzdz=_0g=|0K>M`aX>?BT z*Le_a6R+Gl%``AmYf+#Baj`4E{qriY-D5~AuzQ^qsa<oE%@d3Y?3`DSs=niyMc>O6 z{6n0{xT_O+;P2Sn#_-1MARif}AYD^=PKlqlH{E>RABlvT+Uhf?vCFj<^rY>Cn3$%V z7St(LRyLobD^kGQqJdvURDbz~7oZvZy}G&;PEr*1;pILnjG!<lD29YM%2p`)B)f6= zZ8D3gl2JlJJj?*2p{CLE{T<n|s5<;%a_!@a_}J%tp<*%~n*HqsPi25<oZgnQO=7Tw zwv29UOb(Q2x$4!rW$YJxo^x|`XX@<EC4Dn}tkpSvjnPHI?m1sqJI%^|u;}6dTam8W z+2-`PEG{rfE^V4_5ml<vqb2YeG_xI&(@;Cn?|;l8Lc=enHt5>XFf{zLD!pZ1XEp3y z29AOIvzRkbZo?xh^_3=_Noe7<nRE9~5>-|qUsL0w#*-!1t)>ULjgRSq3zxRAq@=O5 zo0hnV#p2M|SWzWG|C`cA9?;Fu6I7kx>N@}a`Ur6jaV9Pv>`w*uz>X)rHAW*Pb=^9^ z3*cXRdwJPuupzk=nYZWnhRiYq3Ip~8x^G7{+%x&bmEhpAzG(jic4u|@yxzROQdQ{- zcwX;p*Bx3w$zQHD+}itn@~P{b4ibN21BlelkU_hXk9E-7Hia#(zMM$f#1O4}U!hPu zw<%=lwK-!eX-zExvDZ4<oL)I4nJBj`w(BEhBDq_*yOysJVPGUCr@9x~532!X&Wn|{ z>q{K$n)5Tz(t@N^On1s~VpT}`zDXDw<YD^b^rkff60Y7eWzhBl&zwzev7)KeMrb92 z#poR23oeqy7dn*}dbIXgUXLH5qB6vbxZ`@cPtM=L@FtSe`$h!EeOvTCWRHnb`10is zm2<uGwt9=sePl-iHD-5<q2I$=K}H^JmQ@hYITL|Kb$00Q--+psK73UHNV8y$`*VnQ z*n>mGu}Q>VF{FPs>%0~#^&TGFVm3OP>ZrixFV`cdd@VI$fff$N<zCfY_NF+r5PprW z^&CwN_3rNU>+SO!v$&|J(AwZYn5{0~M}2j5-n03&8OWi{<`|@kHHd6MkK2op`7)m6 z6vrB+3R{LIqP0t6JRlL|v98<s+?-;Y+v`E^2<y~q9oMGogNtaLRmt#a)8(4q2U`Ez zS+BQ(pS^<L=1PUmY$UuxVr^}FRA>xU<(Rp-1tjECPg3CmW!T?1$W0Y=BcmM4!~VWL zJ$3z+1rHAgYqY54qU2;{<%A9&oMT)o2Y(eA4Hzn_6Fy!z>l0<TNOI*_pS&Gm^p$20 zvaj}5QL*j3O8$x5wGOuzw_Gdvu#8st^c~U(+MWn~S2AsD-0k`+f<nyBh*6q>y==?R z7@GOzVzPm11ZZfHt&OO!$rzY*!dM=MRT#S5HJ$Bvb`o>zNF_5!CCWcBGUT_~)qLm> zzF9LLd?n}^iP(W&cd=Bq_H@)zlQ{)AmlMr%0!;KNCm{|hY{N#~FB<Ec90|9;OG6n7 z3!m;oV}gSitLy4OsjVlcr~dqc&IcIy_@~?Cp@9Jq+vrJ5?v0zi54pw7S8K~`UJqn^ z7psGIX5zxzaBm3k=H@@UiJE6B*V~W?dH-Z4d80@nHM=t{ZFHQyF6H>3qm#!zdbaYu zK3oP`Pu-{U_8X|`I$SP0TlVy=M@2;;ewf)S@Vd(b>XV|Q-6SPZ>IZsz!8N*NubK=t zh3(HjAfTXlu+04*w6wGy*fv?97yoqmcKCU#%FCNG-STgL?(7(6bO;ETm}I}#j?fB2 zLibbUh$bd6Rt|iHDNhL|n}}6gk0R1>-rXL3sM!yMdkEk*5V+*!NxO@D+6q{pYj~$K zBZTL=@bPUCms?GVGKf;ipf$tIIgeixYN(A`5;8!m#dDKd_NfrwFhg<D{Ou_j+?ywR zpyVW&z2zXeye!yA)9E=Nz^XEfQoJ!d5x7l=i=Uj5preG?L(gmXh1HM_#CB{hI{*x* zSO<*+=B})?5d2T5S}ZTgF$c$dm5`@rNDG{zP`3{N3n2gAC+d3Na73#@c`lL>wzGqv z3Dw)w*ch-d6=mReiztRtP#_L)Ee^{@Lus?%q9)MOBUDjIAmrn+>HTgQn-<!ZS9E%P z+C-YuQ;muz<N$=H?S96=!~K(%;(OoJMf!WX76#Rd#%fqu8Ew=1c3j_l*Ma2j==iv# z`vg>JTYY)paUrr27mHz!k26wLDmB3bryzeD{;L2wC{~feKAieF;bg(vQ^c$ST|vL= z_7{ywa<!`a2n-98C64t)hJjGC_H@G%e|zgaLn0k~nFOR`TQI`2OFPC7lQG<7<jl?` z=<_W4D*Z0Og744LE77C;A)!*2{BY4Qd*>k2;5EM@%8~}v3_YmW+?CBs51bVUF^P&p zPg7N0-&tex6Q}s2F7q?YBfF>N-WAm-M{f)q&v$>r2h4xA7~YUM73krDxfS#c-_p&+ z{AEX+1><T&8g64lpT3;Fnh&x!yIV#&0`Rk#95$UNYI=r5TT2)Pj|dA~6*(=3!-^)n ze9Q&9FF^~Pz`uWBKw4D{r{`fJ<HfUea?=ME5ivyV!0|CVtbbQdxSo!B=ryqV3!)&M z1GHm;9^ZC7$b-(n@TL2z8VYA)J&=e14RTS2Y&rXo29!#!4f_Fw*-i5%Dq2fdYRpy# zVT^v<vmK2fbVLyK<IH~#(_z4Xu5D+X&Adt==tvFn&{sJSNm{?3CB`5vPLX$xxmkOm z#ldy=u;oCY9evpYQ`4+G_`2t2VOrmDel`33w!cTfs;PRJ=M}@(M@Sww4Z<lO!pRNb z#VjbtW#tLh<(x^<gB@7vsX(lFPomByGitNKh%6heK(=CjM|~ML`KQYff|-eu@&@%5 ztt-G|Nd`aim>B=~>{O%$`>ULn{p9RrCrrI4;Ss#{s!l^ayP<)mfZqkZ<*UVIXDByz z{>Q+3s3IaE{Y?LH_x{oGjv=l!xaX$#6Dc=03Wc{Kw|iEHFw11(QJbk*{we+`%)s|- zjn?P0{g=E!nWl^8*6BaGU8j4)=%Bs(A?999ZA~xl&xP*4IK`~YsHmj*_B%_PGs{T6 z`Q_zdnQ5pSUn}wn_K3;(SNgf87RE=%hM^Mq2VfK15wQnAX3gXFXw#QqLWiv1=dAY~ zr;v!>_UXw<JS$GLN>2?uZr3LbetAFatg>N8X+qW`8?O~G^zGZ`rg>R3io8;d{w`aL zmR=p=rNoCxB3Az<No3?jaTrL;NaBig1amQ+{fW5CP-&58V+W>sd4_+4j)|R?lAxis zzO)?t*E3;P=bqot*Ud*y@(Mh{0~eQOY9Nal`AE<24pPg^`U}d#7RhMS<zK45Ip3fx z(M^9vMzS$&Dyf=_iHqm9K`;bzw><8w?&EW*+uP~3`FQ!7;R>+V|LQmH^!XIgltj@E zQFG<MbciZ1|D_-%(7}wX#87G=j2AUFMz1Sjx-_*18Ey*SG1emr6KBxH8o01K7&Bf@ zZ_#~0%(6MXD$r05`R!t8n%sevfZ+8Dq;qX#UGtOpoSX~d9SC}z)FpUhYN8MS0`uF= zPc|OHgmqeWwwPZv1$-Lzf^qD3f`!tvx20;XBSO*FPBHdkq0|-&7k<~T3yHtr780!K zSvjd(aM6O8(I0<##bny<J}4v|&1r{*rS?{qrRIZTJTO3BvD(miy*L&EMBZ1PwGHQ1 zdRkRXQ8p7DKC<zQ9b$xacVl^95myH4^qw!~2(MBg+zSp5*XzIGyVMvgI1JfS6@R)O zu7-=$NQOZ%6tFX&cW*^#=&eiPpwDQ%u`sk1m!h`ZZgUe0ItDuZE@zOxgLi3UVBl<q z%+nI%7T}~P*p|Z3ce2ZM>FivWT?B*!OmCNjJ5V0v$T>(E{&3Lbt`7;pb864v%f{n+ z1p6z2GIk4gz$Rpzc}SvQVgGnGA}y){!OSnVoHdF51tm86y9^9|eByUxIOnUIhTPWe z^K=Ku+}2F6<kDG>yseXo6p-=Xp}N*$FeAMzh#xd=Rh|XTQk#2f?Rtr3ndW}!=g5d{ zD?odSJLao4VkJP#QirlQRBJo+F?<6uM20yM9>SA<uQ?%?v=FK1fvtFOtUuTDKh<o& z{QW|2!Q2KFC0~jrGB2uVemLQP91fJ{FQ{%Vy?635*i;{@TI<kI(}_ZwCf6yiSRmqm z`}NGQ0{xpelFQz=mmdJC8Y0vdJwKWzmxv~c4nsKP3xUNEHc@vwI5}U1w<#NvdBW$S zJgoE{Xy-h*c85KfPBOahDZ8w77BnI9M&IL#@L*w?Zn0S`OiBg?0kpJGRUuYPjPq1x z<8uV-fj8cu9KThp%uohIhA4t`0tc%pa%KhOn!Ygu@6$9KFNyvHABebXDNy?mGO1Na zrhuBKtPvM!O%~?2dl;s~afs*nb>VsHC36B*=R7KQ&fKqEA6ptkic%i%bX_MzR%9_) zdQ04j@hBTBLTyp2LR!k!gV8HrFR3V<PoQqBroheznf0maaj6`Ay-V4XoO#=Vq8=hr zXis(!!x|wiO7oJdKuA@l*5B>STs=(C`9T@Tjg1SBo9$f8bD{ekLwjH7aZ`@F_W&Tc z3H~mqTRzkkn6b(7%D+?t<h##aZh@ut=azw)#-^TdKBG2n9?x@9A9|9vZS)KbAmpl! zpXVze*Y=@?##Yy0wC#xpz546^D>7)a5>~(5AfvAS(sO!=#zc{AfG+$8ffYJH5#u&D zw|C1D#f~o_Ly-Z}sDFKN)9jGUd9FK2&Nd;X@fekVZ9Nd8uB=Q{C1J}<5$vxr$YN2m z(J_}=E?5hIH7%!v_7o<8glEN6jf!pwuC1I#GAk{1G`!u~3B<OKm*y4WE#*Bp{;5hl zf2CR*#bB8N6%XU}e9&g#!Mo}E8u6Jsz2VIJs-_!lA^`S)rJn*?SQ5-U&nGsc$h(wX zmP^F;`<dkpI}{9k<WHV5d!I9=;rH4t8)cS$4vNbNYN6YrtB~XKk^R2d!QR6sO#Y)2 ztETN66z(C6E~y*Rgw4l?4<K@YJ;BR{Qe4ip_3gc-J^<U6L>Tn}SeZ`R#ZqLagE_fI zxq{sCQ;xtl-~v#Ohld9u5PYSL4-{3ZDyKK-sBt{FqcPRZ;K^j;WwSLhLK+WP+S=a6 z%}lSZ|C${O-GVfT2z&L4dq9|RHr)*2y*Dw+1p-pI>mcpsPZu?kBE8}vz!t;KxLD!h z<@bBXJBuXxCaThrUC5YMPe#p|d9W)1nngI95#Xf*N{a!oIj100i%2*Dk`^Ky-;V)u zD-&YH8)(6MG;_yx?M&a}^C1Vh!QP>HzOLtk{P19|SC)bfR!$gp-N~cpA)OF|wtGYT zCcj$Cw*2aJwlxORT7{7VjETqkN}(m(4xn=gsrLw@*W|8dLD<<$<Dsgo!vUm<7nKed zleQr>b^6np?$KJ@HE&YK>qAXm9}u9_?cggyWN9dTsXk^uP_4r(<OVf8@8ra~dG-Vy z{I0Y?PNk43++`Ag(wyH8sr|jn1l%5+77G;>yqjsg6@v6@m^IeH75Nk1_;0use8M0h z?>!VMDww*!rQ_Yxj*qK21K=grSd~9pfWyl8ujm{(lxLFRDH3dEp_b|?F5Z9xMbX|< z0WbNCozPLRE@21<Zd_O|g{v%JhkPcl6e2={+dbgCNTD9eTycu!<$HMUISL<l)AiB3 zdu9zL!3<|963@zAX`l6lrnR=z>zhRU=XsXcKE_-fp~?!4ajn8{C$UJz>`VMq?JmR> zA+3R`awi$BPrY9A&>+Ona)@j~pEew3zYXd{ne=L%zd<^~;X2b&l6~M7(Xd|ee0}8f zwA74op`rLb4L`7N`Vm<RNoi<6Mdo3h(Xd$n!|vOEJRXIWp6}NxruS*%YKnqKk1amr zqM*3;>%)2AcQ<DExwyEv)Gjwfeln6N<l;1`VZN@90~-_^;4qzy4XzgMmLp$^C-g~Z z5VV2xH>X_EsP4^)3<3gzmlsbJ4Q!Eah?{A1D5y!%G!T}ribfSJ4NbNW*pJXFFnJ=Z z*q-5QmLpgaA~1uH&%WnAvxk@%B-zO=x$9z!b1^sqGng=9vUx;RVzS-qgOS#dSGDwK z7jL>1%1a@?3>7{Xv1Qcfo!?4caHn)XaAFmCb%m<6S8BNH9bO;#GeGbt>AzXc1S*D0 zx4H|y3sj!dSfIL{KXA@}E*oFq+&47egK@kN;d<FoL}WWYdE`~Y4{U!coVK`s(}F_e z-anJKY$2R?K<`Jnr!OZ_1}$fSKNrFbD=W%O41YixUR}9V*%qgCIht5|+`fANsQOlC zd*cBkFp!+hfFokld2lVLFDwt&nB&j*_-6YBpeiDDF&WrU(&`HPQsOkxjbo^=Mpsuw zD~zY)-@nBj9Hyvm_uWV~k{&o=B=|tCxiCt+RQ&8+%gbZ%96KvB5wd0VMRf+<E*4IA zGbhKK+CsDksCRT60zd$qz|r0gS$bVCRL}HuXI<Cg<R%Pm&{sZ0`Cn`>ZVNMPGk|B3 z1mN~63=C+Ot5L5XEhV$a!9F#$pbkpI88ntFw}mw=1%XsHrUa*E_K><B`l)2Md`|>y z6{%lF9D5NeyM#=sP4M1l^R830RvKAWl)hTqe%_Q_`d*Hjn2QOy2?h%&s}s;-5V<az zXjVO~CH=Er*ZRIcu&BW$<GugkG|6gD0P1I>2r3VKRfZ*|uqN_Towll?&eX_o(~oh) zJ0%Sy$J5jb^ma9UVj(lhW38Q)p_5M2Sq+s4v9K6C@V#kd*dpLYCy9ECHFYz0v$b{H z9oQLw{`(sg7t^u%237opTD)s={pE!*8^bEdO2@!bqA9N}`yTaC#3taBrAFXoxnirC z%UnaFu@eFT=B4=+ydHrHT1W8R5V9Wk1<q%4Pt{dNN5oanQ_1R^yuPM%h=Oc@jzdtM z0!iF~-8ZK>mZmv&X8t0xTc#U!4r9&Tt(%IXsxyciZnGcr1kN>gBYh_qC(+?NQEVUs zy5Rg)rR>r7b2eOx+H_ZCjUi&QNLDa=r?+h1?j*GxG?xu(BefrCV#Tj=s9f4xF!aP^ z^OVrpV$eB}EYLZ3VjBcPZ{xdR;7(qL@MC}3O35z0f{>CzzEb7|J~Sk+#<hriY)kBP zJ<T4s&~-frwi!IXA0J(oJ=ei?HBsI+<o1-5BxP<y94x$@tkmbmH;<)H{WYDerZ&wE zdA>*An2=iR)t>E^)Lnh+qMH&!^k%h@IOHN4JZN&JTZ&8*!+YUOpGDjnWbM6q5uQ(U zW#o)0@A$leIrwSy<J@9bW0k?QyY#5pU^zu$^gR*Okap>f)AG}s#qAeQ^sO36Ke1vC zVIT8f1~l>UU9quUiZB{)=?O1LM5K~d$y8`sZ3*eL7n_}M%CBB@wCl@MfM6ddUnM&; zjw_?btE;Nd_zU;ez9lAkZioHD^Rf5uHeEZ$c?Q;S?@CMEdty2U2I4Bt`f764N>V?} z<t2?H+#y+6TX+;rF7qwSARNmO7WNE&>ml1rk>B!4RBoJSbvVk0P859uQzN(a2p5g} z(K}RiBC@k^GtnOv>US1gQ%1MgGGTq$l0BHa)m?eyBAGFeCw(7Dv-15ci;2r_9ew6r zy;dQyF=B=3(nVfKa*Y-@mdPjuGArHKoF01|(jl`npIUB^+k=gmp&j$~RjW0DtM?&C zswBJxiU(8e_c%lH>q|<)irzE$kRLDp%+lpY@cog2g^7mms)Xm@U}|d{U8;@~U8xFO zgfVMvAbLD5D$5)Y#S7UU&`tyXQ-^*7VP`>rc1{nDaqg-%b2*KR*w1EsXe+*9w(xf; z@4h2-ECuYgqvg>`C-bkMO24z@bs0LYW=Lk>^ZLz0`3(|@eX|<1b`a3^?*~OesiW{1 zb@y7=U9JaC^{hvrBF(9)<EW`sT79H>&8vvAv{nSp%SrQBys6oSv)=l#vtK_Xr}`l- zMI`ZY+-W%3_&S=)s|zbT^B@cZsUd?5{YG-L4u_wg_ef2;@C&l)$BEP8b{8#5jeH*z zei>X`hK4mXSCW>MgA?$OQ3#N5qQ0f>U_-_a^ah(7ABMGyoBz@nz;!C)y-;z4>UNj6 zr(gG#j-=+v-^)LV&6A=zP%jMs1NmCmu!vX==xb8Yef@Yei+T<}8>=$mt852tx02iY z^gX=fDT*MZ_t@|ls|-4pzn>W>e?y~Xo)~U4n#cYH&V!NO|0R58;aC}j|I_w^WtSO? zR8*dak0%o46zHAnhZ;>#WM*z6Twg<?LfG!T4&L`>yhE<S7JmchPgJI@!}NGh8YJ|H zp8h$b%HtPS;KxGnBmjOOb3bj~Ch;AQuT6Lo2$UdKKkD+%+c_ddHO&IV^Fa`-{n-QH z5q*t9n=tIc+ou!P@Kp5}oEsQfxA2v61-W<_@q&*%=804t1;TF~!z-)KI<;q;)&x7Q zcgFqpLVZ1dBD?87>+J-sJGpt8o^xXoYMDxgVw`9F=u-W}{*BGlIq&@Ix^>PNQ_~IZ z5aPN#Q7xj}Hl$vzwOzClFkljs{XvR}?V}y9BTGS1i>9E^i*k0y5RPS<n!y)%LW~f> zb*C)-&5beERLaLh)PjooE$6tzKxB7`I7Qa{Y%=bdDYy?J0k%VwAN(#;Pd`Oh5z+@s z7giUEff?g2Eh>%5UzUTEXA4%0fgD{towSYj-fyVoC}EdLK^G<@X2?z;|ICGGbn3qR zK+RpK_`-bKKzWK4zLc`aj*Pm|boU@b=!H9ehIZ*$?GeuMGPZG<!lX?m=`W?lr~8-4 zKIw<a%&xVNQv)^65OZ1T;bhtY1c6&5tLEF{=8<Ky(RkK<$&`rbXR1WK2Uy&U#oNZ4 zQN#($YZ<HB>(L%<?jv;Dv2^N27p7W!k|BWVq3{t*kk<0r%G!FAWX2}C%*6X(!PBJ8 z$4T>4kq37VF<)p19w?UEai}p5=%6~-lPECnrT|6tM7l+>&CgGaF2q=Bp+cyvaq&h? zq8>`xKFSER23Z=PZr<is71(T`V@g!yty}S5IKyy497_~kEsu{mb^%WVajsoUa8p#T z@+G6iak^_JbT)h!LT~3Om36nLEl~9+M$Xf3_}%>I4}AGyo4V{mrIwF(!w+TserDmL z<#l%>QB3&rWJLsuIbV*Tsd@JyZuZZe5(#c&hL-E3G{e%KW^+dpMql%8TGK^u8f3$P zFl-X6-F<P}3dl&0F>-(mXPPDeY*%q35r)%XH-Ojge$r6rp)GLPfS@eB?Z(UPnZ1gh zml4u3R1o$Jwcs)Rh89+1x7MjQURfmH|4=zalwFK=eQmmbXaEU^$i$Q^kYl;x;GwG4 zRN(Mw?j$_(IBM->yN>tH#Jcij!<98D`C*igW|b=B;9=pRiln#D{rGs4D~iF#voJFq zT>BMiw3_($VJ9$Qwwini8ZyXy4}+84fQTUFkB&~)^0I)o(yYj%e4=s#REPy%POZw{ zCpxLJs)F)PuzDG79||L|KiH}@uZ~{oJ6)|$-?3L=lEko)OSr#YB)DUBKhyDHQZF4+ z^X`o-7dq>*TO`ycvLYhL+S9c0C+Ks?ei`pcUA0U@CF!ioZ(i~GsRAL2gUZtLWtat5 ziv-hYgbvhs(VD3#FzASwm^dY6BWP2CI{IVwY!-te%M%y!oSb|d6!CRQoZ}L{^Pl1i zDx>a@_}dH}%=-$t!~(^?u`%488pKgI#m=5M&GPC_vDg8}_`(=|P0<&4+ho&bF=>1} zIKL)5BdPVZc9<t(NA!)2x6CuCw6!Y9m9c!o<Hf4{;Nbk#F*(BZnu!0<74Y_oB;7-` z$|ByVazX(92jZ~itcYXzNp0yFV=0=ZCVgRrCgM|i=EfcktJsGEY&41f&>^!^Y8B<( zBC)&K$y&@z_PQE@#j`8m)=^G@B9uRti&j3^X`zIFrqQc^yk8FdO$DEiYJryTptnx@ z*SRjoud5d^9TIPpZs8|85u***n}poiI-q!6O1i=F%)-+AItM#9puUTz(b?<{dv54J zcCm{m{uch#H;C7nak*r8qejVrH;h%-5p!W7&_#iP-d_{*FG6#XyJ@+OVsoSc%C}2) z_W-sdTrcy#)`f*OKlRh@xF}X===(-MKrAjle{Q?Ejb}IKJZcdV3w+NAQ+g7FvOt|x zTx^EEV_+qD22Hk3Zf=j)i@=~nWj?3o`qFd%TV9|8i61BM<qPHldMQ2C{Dbb)>r3u) z7wKQSa6CgIUBgQrLb}jcWMbc?fkUB^Sf9EhmIlF+bJ(-}jA$#*lGsS#Rh+`$TOe!x zx^dy*9nSW$ZMS*HESJ;iAgn1drT{sivrKyjTkO~H2|zC-$+OtNH%Xj?xQs-NKT68b zBQ3wAUTuARgj-O)*_nN85{Z8*F9c2+XpfKQ28vsf_su3ufq>l+fO%?7jXjR=r8Xjt z1sGU^7KZ@<cq!S3s0e4|sQf3GKu;L^2H~UiXYvkwn(bdm*0G;y{O9$Jp+1`8y4T%z zD}h7ljp(VWf*h&-?&}`9fVTDoNYY-O(#pLXZ@En;tHIvhsL(D8_K%f&of6#NKXW%d zaGfXWR=S_Fw|n51qRXl?RV@T6f|qc|^ufkzA}LF7F;}bKijv$fH(te(LqOeH;#V<# z>^3J-hpOsg65gt&!>bMVm&#|K6S{rD&+7NU;)q)JdGIuU7dahDwGazw%}GfL$2Ek8 z-HbSaCi6c-KO#Z{n*s-dl~9@<iUg&pD5<IHXl%^sI=6I`6cw2`RJ;E`So-<X{DoZH zZ{!l_oAJW4-d8k-oTt;-BrX~lmy*&`LetGN<Z5_dcwrYtxkW8}<{);}Y+7g+BPj(R zp%9?*E#FHR_D&ilFpxRWc_ZJwy`{CXlRBwc^mnJIKbZ-Z>9$6qijZK3TNaUuqvr5+ zIVZ#+D8s<0dYDg2M_kDSKVr2Kuq84hOP|Ly{{}Yy4klIaS<&FbT$H`R#Jx_%3{?X( z4KYwQ{h@Cw`q(G2I_vV8Ma^X|MB<rWq!Hs)qzk>|Oxto*+*}$LIX@(RRU%Kbt&wwv z#)-;$CC_l#c6*13^6~fNJ?xjX8icc&fGF&bC`lx9c}5aU;5UECV^Y-aXFy|EqD!_G zkQj(E=W8TK$jls+fJjKlAkYDd5(Aeuu34sp0ESoK!Z+BR^BEcbNZ@}6a!mj{lGw4b z)Ds|FC6P+ewCqABIvY@rQ2!-#=Nl6?=U$bWZOAS5ZbU1vA;tWRP*C+nOG4zAVX*p? z=#+w_xSEK{ZAXT~&&sWV0$DY3@w69=d^IFQhPAIh3vQ{GBJm*cpBJ(g5dl&VK=<$x zCc89~PVq)&xBCfBrw^56|7jkXrn&qZR6{R{S4cY!4SiR4wLx!0=`Y>-k?M{_qe~3N zPP(%k)D_n+f!OGDkDeCxl*ul5D>I1>(5cy147OKp(O9OmL(sy4%Umcfg5><XB%Cbl zG#s=H=-tSvFyK-sOC(-^0+cItc8);!xkh*3o88GpKrFg9SpC#~r?0RpzNUeJjlI3I zySs^nhNuJ;HAS((^qTU>U!Ho13bVw-A}lO}5r(kQF%h$0em}s2aDZ;5epgiW=tHXP z>isD?MzXT=_lN~9V?6UEej52-3<c&0cIe+Dt=p<x6e`QKLLEm}Wm3PN{#3oS@$k=& zYl?CWuVng>A`Z66;_(hj^nA<v+NJklPj{=ac;3N=E*%P9=gFP|@#l3v-IP9orSkU) zV}EMBlaZbf7OIInPkf5R%ND^6gZJQE)JmtBwq-~vs*;ohF%a_2%J>l0pEORo$@$Uk z1_>YzwiGyo>HupAc@Q%`77~Vbg)9v90W*(i{k5tT8sn#tD<Mfsb7f9*XHAgRkdq#S z3(E6+1Mq4s!WzJH<)N+g#f3N=czdQBmeHp&BPc4sKEpwck*n$;fVMsg_uLQDKew?w zHGmYQ#&L&;>F`ru?&ed~WwlBca}cJKXW*5BD*!lIf}b?-O048I?Lsp{g6%NEcd6nM zNAaC4ennadaZ_-U)9sx2uxy#}Q))C&LP3;KP5kiwZz6&TjTOj}eIz&e1BNF%X49fZ zi=q-u%`KuzSA2?@GTZ9=$%EoMdgN)nAC|C5;o9E8Isu5Svamw5UY-LBcd*qJI1}LP zxx^y4z{NX8$2<f9XQanMz9BOL^Z=5mM^M5E01bl?I!I7rnjfoXa1_A)3M2Lig}jfk zn*8tR6zmoO-W@_DxCm4z@T0$H6gdFiMgy6f_sGp8J`ijgOM=8Y?T<Ac&hQp&d#Zd} zV(}TRi?1!Rk~a!Xb#bLdrHS`^L|jkVi4)ObWPt`8eo$}2t#Lbhx(eXx_qs+IDq<J6 z=lw&~*m{lLhjU2%<qW(IdF?8?w8YrEG-GP$xr)P%QtpUW5%{*v46X1XV7tzM{V=wn z=3&^5SP#5}joGiw>L){u=~Oyij<IQ8u}QX(<u8t=xu#mEp%mVDFkdk|LQU-EMUXMf zF2ul`h{TA31vQ4iQfMH{U!2|DEJM-@HY~i7Mb8aJ|K$S{B@Hm2&3cEYFB==^4W16F zVgc4X-uXXaL>q%Cx;ug8m{~@l@GZ=J8y$jFV|~+(28B~=sHcB&fE=-&WlP^BOdFyY z*1t&Eqs>3^=8GlOwdS8|`)bZd$xUnrIL3y%8<~m-0Xj=9!g(BFc==`<GoSsc=jsvq zG4J*`$K&%(MK*{R?hKP%W7#8T-O6UNldNXT=F)FGFf-gDb|#OSVG7ylfADK}ydtRD zxlSs(_{OBzq{r9f@h>RKO)9R=5d`QePNVRE-v$;p<~I8uP_?j~33wCW`fh;(v{5#I z`itF)K$fRoA-o?jT>iYE?5J7=fD1wQ;Zz`C!ha@54weEqfBh8(<X{(|!xyXH9rs@% zmHs_d!&@%(oEE=I<Z%PCeDvjBJbVnC9H#e1)(^HR7?i&jyMx(67rR5HvCDf<5xH{l zq-n0kj@+$C-K8y@Y!uwLt&-Dz#w5?IY#gOO^fvNb?s2Bu6aqViB7eP0Awbj%kPz=` zgVi$y-I}4ZnIuz|qV&zsz6zhnR;=6O>oJ{bh7Fz9TuG~N77QCl{dRmw6@De|u@`d! zYNB0zd1`WTgxW{WZZPu%gG>W}9Er?8b{DuKs@nd()rmlnT()k5Y{4_ASMZ7PjG^rL zo*d!IQ>~RPSoJM4920bubLrNBZ<>5au8UO|mTjgP&7d3sjEoqYX$DZ6k5H_^<V(*1 zwJ#^00iGI-qcgXqnB6ZINo0i6H*-!>1yt<&9Hgi=9*1OrkBwQ4RfJE2oL{nxg}+^} z*(V2nBk4dThNpx$(zXyN=7;T?0B*Y<G-L4826cVaXivqR8F_RBoV=}0A^$d-j6Pa> zCfA-61+el}_}te2c-u!jGp#O^<`#AJA)lHD>F>|(0TQ8Kw&&Mw@<5#{@Tso{@{Euc zw7}&6SrfckS>b9qe?JkzBl_&lFA0-~VPC@t0f(Aw@RsVMQ<{2{iXw}u7&3c{E~ZL_ zZ*2q~O^FO;i5!vjh|K&+7-|droUJZ=>~-Q;mwuWl-e8ACqE+#YlKk1i<{A#2h6Lpb zlW>!#5KR@wdW`3qKL~U??j2s=2R_N5w(AEKGO_3Gu-O$3*(VfD@PPn@<CoeI9wJq@ z-*r-h;AiV)V9&G<%T)80^v32ezX=@WYj|AyW^En^lP;3<Gb8DU5XO#NqDt6qN7U<( zlGt*GPPAza5iuP@tX~GQ>%q!+3lF%R;enMz+yMm*oxh_5r?EH!W8k+)u9&VciVIE5 z!P>f`R|Y7j0)_dcJt=2egWy*lU^t5Tei!>CJHBbBH>=G3NDe#Jmm;fzTMi;HSQq4J z^d97@(VqGHn4OCrhRYxwUMRy-{Mf<z;Jt!?87lUAb&?-Z!&{gM2Da(Gzp(1QD$4%3 zKN5;ejHO|(rk_dM$&dWgA);8xS(-Qec3Uy`s<dTvHQ%O9<MihO;N~}>(}Wcp_ty&U z{BW{XBC*TzfPv|pV4Wz4(d+*A%y79zQp|(S{=#4UqSj<v1}j0NISO%qm!(co_FS&u zc!%HtrJe&P&;xkT0|0Y5QDA*I)=l^iB`#*Ac`l^|4)wYI(H&T9*B_H6gdPpWy0jAn zaM9&6jctGF-QaOA1*!&}u$m;MVr2$aK;3BT%G+w}JF7i@4Gp36dR&!Ge;qRhJE28t z@jkhDjqSlz5La$jGt^Su7S^Krb~$v}9NKjJ-sS1>`p{Mx*MIZS68{kV6mK**q^%Yz zBf*mSikTi$+BT~B)qf6b@AT(faYE!*Su!L#?i!GtO(LP(hyb>%yTUmw*r`4(h@R+} zvZYacn3^LhH|r)fz)e-cr>|Ta71w~!X1T+Zf9a~e_+B5K21s)IK6swUdHyvd;t%I} zG39<3^nNTah-FT~!Qg{t2{k6@O~#n)#yn+xp4yQa22mLX8Ta%L&GbrxtjxR}ovsc! zr8Y@u`eZgIeq#O4&8=zdq+o~o6{cxDWfv2ahp;uJpiO;m7vx`rpX?ECiX3dGk0A7B zBb^R=?C$P?^+AR{$u0vY4<ZNS1D&rH=w7O$fpQDm*%4*+{S>jh0Ixzdx&&6J`nAV! zo$4@jq_TKS2GRB0k(<-dG-#|_yaDfcys-}Fxigaj`riI45o1b!>=a7lBPP==cj!Zg z>{B1`T@Tg1^J=^(o#kNB38Sof#4>JwwK<onSkkTZ_k7-0M>97;@kVr7-9ty?k*~Qj zF4{&qmv{GtIs4gnl)v=m99>69#f$1R?af~4zrKfxPADm|&bX~v+Zmf1dEWhq`Frqo zpe5{OwrK3Q>h1G+)9^akINhn&*!%JRxM`r(w#wcPJp>a+9J_3Gq4U)q-yI=x49ivX zH=_|2%m^5B)u|eMp*T$vGm=o{x9!+!(}emxC6`$ChlV?8WNDkRN<!KnL<%&Q&M_5a zKh2HLjK5y{2Y7B!v0Q|z(xk&hzwVzc=i5F?qPgqWFVR@H$}E}2RI{{&u@n_wqkdsy z!Q?}PXl3A_q%A1Tb#2e_BW-D)6eLlILvc|az0c0wb>z^%9Tg*dq~*rdGCon_a979m z+H!OJ7F4N|WTR^$J^xdD(HJV=fhYU1rp$eVjOMrrtV^#`aiin&W8@x&!)Fxu)|<3^ z)C+c4%*)_3f-4|ohly8|JIW%bK=UmCt3h_M3=0;kyGn{Cj*1fg^o3Z-VK0{K0^)6S z^^2h{Sg!dUhLdKvhPRrqw26bKv*mYqO$Tce%ay|5{5crP`6nvX;KGGa3-<4a^?&!9 z1~FWq^EtVpu}^phrEp(wAPVSs2vzdF9K(!gQ}k`$;WR|JX;HBtAbGIn_AqD8s86FP zK7kDO1v#*}vOk1amQUNktCRXZP4AjbqxMlJZ%<GCr~WNzpiqaJz>w~D;k1kJ@$dT2 zGwN}J4ISJV!!Q_nkPaU=oK_?^4ePXEZ=evdC+u@FHA;f`eV}~t%+I~E9;gQyROF#$ zLS;AJ91(%Yf=FCu(<@)NAYsf=Lp5XSKM`tSXr!9ZmrUkIK46DmWRYMG<C=u>kGyrq z&%q}Y7H-f&v?FfIr`^hwT}QAv5MWyR8Be<_4nRc%<;Uqiu(dJcXW@NRtX}!k8DM|z z?WasPgo0?mD_UV(-G5BPuG*N;^IPI{LwsLHVYZOI|0(>vNX~)hZ_y#*+fzM1a%tOb zacjoh0~T9&QWGfZ8~9WL_J;q&dBoBNP+W=Nw5nB4&WF85Gq1sxBZG2%555Brx&sfs z3)n(e@Bh`eWuPcBa1$ROmzru)iB*7cs8q45U$L!BZ;{}u&jtGz(}kVi+RhtC!Ld#e z;*nsnIb<l{V{c?S|0ZrJ*MBLur~V{-rL*9=piM!)^ZChh&uy8}FY)$sF-GX4`T9zH zaL*O!BaY3w39qTNs{3OYMIdK*u1Im5RB4GjOkp)r=l4K?m_YH#K#URWNvPzX!9Z{& zcbWyKif)!Xz|_~54at~ls}0v`IG-RQHeuk$w?O4m(_~OvPd^c+KMH-fK(t^{|FMb_ zPl8btHpKqegmC|T6ieV~fm%$cse4QRJde0)0=W&2lr5;NZIp}n9mZV$AI>I^1X@%< zSN#CQu@>Brk#yMsa8HJ9QllAgAEY{d5UTp;70_u)z~jPXcC#Xt%pw<0HUBPIqph66 zCYZhg;N050rq5gp1>cLy!U+2O<ly(<5a>~9{%r-5SfV?E5j57&0T~sDVQX@~UPnk~ z9Z>5PPdflOWC`{my1s|Z@+P!~3yC-0ij#vYk)*yzYi_NPJ6qk)`g%_~q}TDD@8^d8 zMsW_0!j`)Oqso3IbaXOza($F5q9{_o1iiZDr1IULkwI_tI3A8hJrjq<29;C}Hh~lQ zH)+tQmN@P5fF-wn0Nl?@@YOBprl&$+`NdGL|DT<np?0FKd`fLGsA)drQyeA>RLT%! zv>q54?eB&(!xxSmEv|i$i<h=^59c?2h3%goeFK4qd67-4-xo*uu$0RNM4dgs4nM4t zA!b$s>)LYRU(iNFW=aQHt*LI(EVB6k5Jl9MDdYCv>FeCc3Y^$0tl3$M=$H%o$qQFV zvjGY+3)OJHS?_ovb8btOe<lvCjWDiR*Y{~(SloG=(u05;>(Jo%u&Zo81MJ)0K8HV< zHrsVNpPOBO@^pAYGG7uXnH}g9pQh&Q#FE&fGgzTAf*{k)(WrkM`ArWQXl@>+M<3$a zx&zsh+XX`D!+8`HmMDx{b#M^6Wai$|B9qz*Y<VyUVoGNjk64-WsjVfAl-|2%8u&oY zwoE#cwzo~SjZUx6t>e-CsMlTi#WYWgTk97Sr&WGq;Vjc*zCk*)Z>HB^@!va*Ul36B z6i5jl;aBc%aTafJxU3*TN>hX~31kq*kmQSp7;Wv3aEMdeTOA*_8`U=%Keb*1iA7L- zV(|V#r9E5-7+*GRI;}CTnzj8%KCjhl90g69f1NNTe<Jq1EV;p%*_k@KIGGvQ{&Qq+ zYz@c4$xgyd^3M?)7Z)?@{~qSx`d`ER{3J}WW_A`XmLx13+@L{a5++p-2Qv~TO$B3X zGZPmQCKXp>mw$~&*&11ZE@)Vpg5J%_!AZg-X=Y_%=>o?>!X#?{!`?~7!N|mn1k|nh zpZ}sFXXI={!ovB_CxK4CS-ChXnmLKu+dA0Wnc2CJaFZ~7xBB5?<^+2EVdP>aW@ch< zY6d4L2=~88__@;GD}e+gK>>7se$GnUpP(|c;F!68p`t{uJema4dq1-50kaGHC`r>( zHBHf}ryduoI_2sseU;xZ-PMM>3)t^&p?gjLF3Y*M()n*#^DlD$VGS48fAEFp-|&T# z`Tv41?5zL67EaLdf6thIKI;FGG3P!00MMh4<UVI}3sMd2pOgRNF$p-~+LHb^WdDnr zf5>KM``=`9{yVa{{y&k;BxVNca%d*uWaRPxCI;s{EdNm0n|-ff3n!HQKMCEk!Ak^y z{TurJ^|k+@kCo>?==(<|{~7zw;?BwWe{ow`xH$gPeFe$xf4Z;#g+M7gkVre&|0B^P zE>5nX`SRaOKHPtne3Jhx`RD0+k{C)Dz{ys#71|b>q%zH*0-A6PsQYSl9h4x`q~Nkj z%A`W9RTCz0yzdI)wvyrn4<=&jQ=*Bg$f!NiW|}yXW`zM?Y$}DG6ZoPYmd5PZoG-@Y z(=VsnCcM)H9@m`B#Q*^$_2g@0!Z=+dcg~Sty1zD~l*HTR#Jr}_10blxc!c*Aa)R+@ z#HTB=kK!5k8O4)bXxwRVC6irBSQY>Iagj=n_s^ASO>9d<$d_rYp9fvCDpUIB(uFeU zBIqQJkpkj4p3#;g0`%v(GQq#%{hdIst@$!K3`Z`f$_|=F>J+%uf6CP<S}k>_)m)}s zm%qE0s)O>yzwpkSZ&oUkiW;%w-{r3&dd}Dylxj}`yI&Ee;_U1LA_M}BSCRswU4#lj zPi@#W)+D0?SGJqwrVE#hxWY~Ei361ap@KhFCp6V(|3a{JUYxR<_aP4^<MUOXH<`NC zt(?_#?(*fHhvJ0fQ5>$1f67mA>&t0U*rH9OVKfivE9u!87J(VwuN>-Q2r?ybHy<Lv zOO0dC?>FmC#nQ*9>c!CM@vV<Bmb4Nq&N{B;{^>Vza!JsK`*N)^gdWWx1ZNuDq;#ZR z1!!bmuvO@rwT<-mI80gb4Tt8}yZG8olhjxt1uRcn8CGn<Q*@;&H>RVW3muss$K<DK z9tEI|{kR;9Gw|?&tU7{V21O>lxbUi7Td;hO3D8$<A|@Kp$$nF!8`=m#tQI<mw+<Wo zIMe!x<;8S#76yaT$(}gl=!mzmD-7aX@iE+aFpaA?9&RJk1d1EEet%L`lhxRJH^(z^ zFNp9j9}hsm%9++3-9O(2%jsFRE-HC$M49-qjq?VWdXd8wN!Vk*KX_(%3;yL{3G&g9 zlO*rQ6LRNazx59shiyZ!1&_Fy9sf1PZy9j38euyg##9^n_mb^cZ_<x!?#wNQwz>lc zE{y-dy{t3Oj|fI}J;Bxs-koIP&x2P}lZAy4rcK?lw&`-JqiYVw6k$fA7%UN578Jtt zthU$H(?A#eTZZS_MXF>8&eBt+IUDcWO8@so&B1}DbS2(DC%8k=>bP13RqKIve99vZ zZOhZHbeig+v2P-(TJ|Y%wmhTw-|7jOjP!lsQC!QGvamt~E@<)($0<ll)9o!BqC<^; zN1Mz-A*}b&a-uLLjv!%`E?;UUc1k)HuV(dE+{zCnXB4voS1|Y!_0S{gkAR2z@g+J4 zsVH%^IT|l_d`={g?0I*cbAmar^<QVUWABDaAnPe7W~QdE?fqgn>L+1E{Ac%_1mL*& zxJkmQO<@f2Q6NSB^?SW}jlT#rN&atC*uON>KkABwnVI81l-R#pvpN6&tl9rXh5h#m z>%U*BUqAg!Apo#|&wr<q|MeyRMbCfiK)6`{jY{VFXSV`c6aUS|;J>KH4xM2qf<e@d zMUA^*FrI+%73@v=87EDVm3XO8##Uj7*nps5@NS2v%lF!a%u>5CAf!~px5u&5v(oZ4 zef8~_<H6$?DXljy=eL@K1aH<<g{H$<h1vVNxBiSjZte+TewS`R9E<_te+6X>AM6sp z_7YrK%nXc8sD}%sZ(d9Yt@-hHq}wrF{lYRvl1@8L;UEysTdt$#5wf_V(U_6bZ%&ap zE?S?zdn48uk!7;KS_n1oUO^!F>$ux7*PQ4dX$V#M>t3BVp{OuWK}7`%0wq2uF{MV7 zmv;v-rz3y{-O_Vihg0L5{AWL$cZ^?`oUdob#>>w5-($0+5AKlUFZ=l{vuW6|Ax!MU zJCLcF4XgWP-%`Fgb4h_=8pxqV3NMD}$=f4Yy7N3fVXK^NYF}n|9;dK7qmWv9g4)Vc z?UO?b<v;%XvN)?#e(r94@aJ&VeK!2b-1Bu64ZArNDw*Cy;V0zV`8lfSI#>Il-{8>Z zrko$Me=r@>pN>IiwuNEIa)MCf+J<GS#zr}DIT`VqeCmXOP~r)rM8*;+RgR8uZA)s> zEB?Vq)`vZX&1xT@KPGLeBy{^Z1tf=#O=?gHxF)0|k@;nQIgpt5i}LGoi{hY{(<(&z zpN7K_%x-byE4#*x658!n2<j4Zif{ORm1%_$q+(%Kr<iaiunW&{Pg8Q}l`Lr}cw6-e z*&GYnk(}eBM27T93pSkAtPq9{VXn5ZOng98Pfq=ze)@+ZD-7@=cO#Yg0qWwFCXr0{ zS#~1myk<R1CxZY?-@++nutx#m7TmJ)Q+9h#U?0vN#DigAM!yq6(K>s^U7e#tNd*h{ zd90JJkX!`P*^;o^lH)Q<G#_I0>GM$wJ$|3k^=UC`ZJy{TcW*VZj%EbIBrnAv`&m#% z<JmGDhs<HxYUk0yG#{Hy<tpz(A(|qZgIQWo*@SMO_^*SJ@@Cs4Aid2TnYYi`F205* z)=-L0<5zC)0mz2qrvR_=L~TzoY!g!B1vj<_*#E)UTfkPbGu@)$WHR9iGcz+YGcz;O zBok&%Cd|yt%rs%#VP>8%(+=F6?>oQl|D^Y%r`>XQ*=@@%OTArd*;Pc4##K6M;4(@Z zMj->{Z$H9)9Chm3Q3DHDrNLUzwBt@7$Dy0~B&{~9Qsvj>P|!KQQ>YWmKv7az&@4!s z8e#c=1)lh2+W*W}uw2p%6Ag!8)==F?+>~&~?28~%UY%Y%yDk&)9Ie`fq7ieW{Yy2V z=_o6_HMk#fEPjZzLGoT+kL9zMf5Tuy_w(>@=;Gz_<LL4J<LzbQ4zaZLN#l6=Y2E-Z zO@HE*^yM$a*X^g5tF)WPH7)$s9~`9=W8aK%Ji7QhbF$VmSFDjFK_FtIE={{SjaCS= zkOGv|f=yDU3!QmA9lpQi2l9eI_91YfS@V0^dAT)HSGJLyDAcTi)MwEim3c-7b*=vU z-QxZH)WuaQN2$g9+1Kys*h<LWtyh1;q?~^%M*_!7_5fojRIyXKLeG^pLxm`o8Qoh( z9$<L(hg>?J#p2`{?1O(4Q4@fOR7a(awFfO-?&6bPTl}p}KF#I*ERd@XV1Zx*$<*-O z=vpLidML_A(uS}z!~L7+9HTxu<3#cr%5J=yA`rug%Z4@h0A*HjA~d`{lny!ETs1*` zP>n%Evh6z}{Ec&fA##A_{u|hh9fL>$km&Rm^HNoW-4e3=kU-n*v<=y)fcImpsaZUc zlpG|jw3z{1G&4n-z&Kmd@THX9R1nI5ipnuaNhyTGQ|e!QkK~$f84QI(^$g=`nYTeP zZ-49hogCFLacOoEHNb@v0qjl`pMQ}9opEYme%)Sy?lRi<#@)QO9SriXLVHEm^_fKC zvfkcFajfL^jlh+82_0J-5{>uw2RFmddRf85SJ0Yz<1SJw1!$`yyMh0y<ccG}7<|uD zKfuM86{^d+ONAj;i!$0k015tqEGi<aS10IEVo|In60RT0iW?$f8J^qg!CI<y1TThf z=VI#u14!J~W6GAUdW{fgL`qus=?=itzMXaXI%r{jFyRtKT0|;Th#L%(evrhG4BRyB zUyZiS)$DcW-9nw^=`ed3O8vXeTt-~_yDfJfL1=d~*XmLed;)n#vzu7&T<(;5QyU)H z6}_#L+K8xg<4V)R>xC4dnm1=y>%n!fZq;}tRx6To2-QPJBMuM414y4InjOuBDQ~nq z;S#+f=CP?A$#6qG<2CQ9m?7-k!A>uL=9p<ZyNK{N$UMk~9B&Id{sk6rb;~x^O}o5w zT&m2{@Y^PpCMqXDU}m+-gnk)8mNa@kfFs@(y>^^)ztjme*w$nnZx-GxGXy(?_uC(= zkwc@GoVzfYW(8eQOAPR(a@9oD(36#Mxn=TXYh;s9<%_hzF@6#`MIW-#I>{gH1J|Bq z4T|q7GIS;i9~>Zofe31lT@lUq4a0#tS~k=>Bl-R)3&`_m*QY0CYn|)A@(1vBsUT&( zQ(wkEpExJU1egBFSTbecP|=XU;^1-A3_6t=PAg`VGHxCfVISJNC+iU$8s4X%65i>i zOexE)C=cu;K4PrAmx9xR*$?Nq|JGmwH}o<Kj_%jRTmnAM2<Pgg4l$`B-B}Xe78UsC z2a&3ApEd&;C_9G5fFv@isiCoFIiy6iPXqxqRh_jEDkREqyzK%NvoyExcZp009@GFb zCrVATc6BD|8moZW7EIdEH5O*8{u9>BDo9>SR;xUtTifvwRk9aLghM4Yy7F*9TCEL& z)91GbTa{q6%%eDo$g(9GutPWYFy8&hRgK_lez^vIYTYvK$vu`M$tcc9(|WXo<W=6q zx<+~2GZ<z-%(9)ST*O3>Y{Z|#`DW$-Gvi`@DgijwKt5stQR4@YL<_un>vOkE%`sQw zA?$kUa64l%L=>7w)~V<XBW=rIC`x>>Dp4|ZAXPmwjZ>UvM2u#Aj3(P4dp*k8U?iKd z8dBz<G%lff6>&)bJKC4%oTwQwd4)cBdbjsJyGRA5OAbo35E_Pfkz*Ik8-LqATo5K| z5FCw#bW=Q79T-o~oP^RUNJ7=0982{LN`c7X#<Cyv=8P!T$60TICWJ1zz}RV=<omb7 z*ONw=^Se&0LALYYj9gcFj(hVgL6;IAnflA`(~9dI<Jj<<DirV`9`THr9U}#C9B6$r zg4a!MU|($He17p5<cso4k3(yC-e2C3buh)L8jE99$PKt=&S4k|V@SqFS*qI=_RzC> zqDWD?)2AmBCI_SNZ1hKv!SE3MSumQ#p6q$ys#rm7F0U;R3qHYF{tRT>pD?hxF~%04 zx(FbHI1CvD46j<$IOcm0kc{`y&9{y7zC`(%#U&TQovNk`0n$eyIiXsZ(p5_hH)xQA z5nc8)bA$-UKw5vdcUn25MyDQ@Qik5d78eIqRdbiCE3b)G#nNWvPV1LC?m-99P1^H= zR3;FAm4Jvec%Sc_OSYo#A1;m#0o)5|ux<EY!DJ=Nq*?GEbO*Ng;39eB{lpJx)s*5P zK@Fklbd`a_Oy3O1vJgR@Yg;S8`0vGqT<|~Faf?s`$8Y78Wacp(4M)~l-GahYgmxCd zLEsW>TAEiD1k;lpWNw^(gE%UXq1!C-a7vOH?>3c7PW3aHstla$pm6C`%KeO6Q!*%` z8v^zV7%6HuTg4y?&XcF>VTn9A1_LjowB>P3FJ9c<@6`LX1FyPHog`dn0s66ddT)OH z#d#2CSP#O?$&t`+IUXMeIEB|2GwhYWCmi7H1}c2jAEn9_s7a<?A0`OubWina>}T)0 zIZX2;f2?guU44txCRL|@lz%M{F?lVc@?Ph8IlL*&Qa_%+pS2l;Tjbo%<(4Liud7mO z2poBV)t220u+ENUgv@oyVE2UGd(wxBq@??8AKt@1qZ?QUHO4-YFJwh__!)_Lj{s&t zR_kEIh375TEkm!t^>(IzC2z`2&0$_BTHhGhcKKi*m2EecmngJGxryW>S~Q=IAX^Fc zdM-v5KeOhd^N}fSrXU<)on{n*QrwcaW+6(#5`1>y1}PNMxkX7fpIoz32p{aD{8EgT z3$}Ni<5L%)YsIZD?!syk)-i}=uvkV7yVKRksG7b&3WGM&NHYN+UrC;?5ULIpM{}VG zdVeHVn%i_FC!8CXjoNru61`hBks9OPiDLc}4&kBUzC!9mhM-olczlNRJ~`Sg9ET{U zLKhlB6XrxM+l;^+3{K3+qSD`~SC105;5%C#f}ycUA=u^en)r?ejkTc}&(>Gyb&!Eo zyasQEHChHJkTm)Ro$RB<8DtdZ+e*1_%ClNh?P-u_UeesRl|Gq#6@`O2MNak}e1#+@ ztz2Exo#&dLO|wmC9K4oTp{2K5pnFiT7}$*7{w)xc_U>r0MsvtzLn;2M0jJc(VjH>z zDL&Q(G<qDVMb2%Isrg|m8taPnh8OM6e|RME3N~!IYhhayLki@1vu)U%%Oi)(vC5-- z1%yhf+bX_AmdPq3_moBqrNC)|;E@*7QA{C189c7Oo5J=?i*giPI7IeP3c2Ry^!C1( zad-1)US0wOa9P(V2++hvo0a4Y%DHytm~^mVoy@o|*4?0|r>_(k=&$hg6#AD|@VRC| z#S?sKNIgo(9R7$MP(V-5*DG)$`NK-Lel)1o*=@c~+~mlgAD<xIL;($Yt&iChBX9@v z-s`D^is{v2L266PrF77ACDoC^_e--xdS4r(1qE}HmmvGa$K{XV>I&qB7u{$2rtZze zx~>#{ODENfBGBwTRz3|XHtWDzL!-rVW@H#c=xgwz*eQ2yt5z~e-K@iKN~v~jtGq3f zF}u{PeCuw18rFu^1^l&*-#UrIlIX3OL(+>FBDH&x#$!h=Vm-|~3B}}|`~$T%Yl`N? zu|}MmtuxA5cl-d^Cc82HRE$lv4)ngu5?S>ty?ow!<S*nG<*JH%yrD7o^U8&J`*`dT zKJ0ay4p5sUCiqPA#R1jj_6f0}uri^w2_~Pa{-s{Jtu@*@-1nRcm}Q{zb|Z^<%??4M zYg8x$k0AF%W#l>;(aCY?X)jIF{PGV?Ny9QSbW<=%|Gng#DkWBF{yb4$S;&JmiXpAw z_!GRG^zQ2a1qAqyE&Pws%)-I-e?b5arvGI&{}-rN{0}OxXyp74s?NmxFCd_7_S3=D z*~IK8ArlA7f30c}hc9f>-sPX1e~fQsScWg3O;$$24(^0H|6siwT+CnF6`A$^F}44P zKlo4G|3OX_ogGY6%v=a{2pJSb#lMIGGxslk;6Jp0@V|GFfA3O+djAiM^dI0=<R=sC zf5`!UenJK@cNYm2mw#Z=f663&G7~a!{!1N5{A40z`WFh2_{s8B@L#C*pK9V?gZK}O zE6zgC_%+=B4o#f(OJM$&Y7l4o68=?#INR4V%m2K^{7;pCG^qT=GyGqQ<DU*l*|D&F zDdyiEd`awI(!ULUNgQ9&zkvOh#QD{w|2D%QZuiw!&i`uotIhwVy8mSUkLv!1W8wU_ z$^S2oMcl#J?w>(A+x_Q_FJ?vfYkpw=Ge1nsTwmQ_VrKbL?f;ry&i|jF$?tBNxeLyk z;?-qJc^(|4y-eF>7;8AIlxoiCCLX`WJutG}fyXi(Ec;#gw>2FdM3g}jqg}tx7r&3U zD}m4D&v%1Ez~@`7K<oRR!ROCUno<G3&m!Iq@lXE3q|1hpR>bp_&sPG!OTT8GYG1$^ z&davTAE!^P&1rXy=no%pAg@X6lM=z|?B|0(&6V*|g`c_hXUCNehM93s?DLgGmD0Hq z?{+tSy{W)j%Sxc8)7QS7y}gyadzC4H#$8pLD7wfywE2&#+*OBBtBFItRT4K`fX>m= zL@pP~fe%UH+bG`gdMh(}%#~L<)@@JvD&f^x_H%brts9wu!P;^wb<EY77Wl1iHOD1G zGu{5H@-}k~!xY`!(^77-qaT+8R&VM=%8i#t?q@p0uO8l(-`f*FZ!AFLc|`IJ?>N6f z|5dV2kMg^>7kf?Mb|nx9xm^o<rOvU~+^%)caosk+^X+Nr-M;o-`*;t%YJK;{?%95P z;CS=h?p;pFt-adb_dW$aabNls(d~PSOduTNyz)+SJRNiO9`OqtFK^@$aBi<Hvs`;W zX=|ChzC6Aer1=3u-n3YRwsJ?jH~dzdL%M38(sNVCfKR2L`g3n-;I}<n5!>@XgAeb7 zEsmp~b3nakpKgQ6E7i)c{XkScJI)iiw#PPd*q|d8Ne>_BHMLKLsJ2}(V9!s<bCdx0 zPoMij7}gw);X_IY9Jk-gN)Xt!&M$2QA%^=8(RcjWR8U{J$4W@pSFW`;X8VD?Zoe<? z=7+hAbeS#z&(iH1A5Y3LxAiyIPTfqy7LVM(VLAgm8~)~3i>9_8MOX3b^lY~k8PZ-3 zui`(gGNqTdtj;4I+%i6W4)GUk2}s5ht-I~oA=oy8GLm@<yDsxefa3zr123e2yy1~- z--B0@Pj7yq>Z?{plv;&b*!4eo+M5$D2grvgtr1F&{A0=M!duQ8;^Ns#z6*N~wlqQO zCtK>`03#hNRU4~qg2l8kr*$lr5IYOO>J5)=z#e;3t=~f(-4xDKXKS&?zHj5v23||~ z*l}kns4d2Ni}^5c)7mD~&e~xB5>R<=*EVPeM?RkNboFzcDqB3#&Zf~xCu^3roLS}C zi_OP8JiB%rE@FM_;I)h-15d|pI^t}7eJ9JEz%G}-T4#Ol;BDL5mEJz;t8k6nAq`Zy zQ5R9q{9DhqS*W8<@~YE_9gES2)p`*YvpB0rXix0fc7?}bt{tlgfowXNz&hn}V$1CB zmm0gK4JVu-&qli@N1V-N1L+O%@2!j7bZGX4hb%wOy_pMPTdETRR}&{?3%Y6%&(>R} zBF|V@D8E`CWzKZ$e*cJndQjt-P$tOrZW%?LtYKY?<t^ZK^|f7Z^u6Wc_dP%}>#iki zb?-sz&AqxOSte+IALaY<fKE62yUgulH5U7+ZP|v}i~79u{Yha;!-^TH#lCW2d$DKn zvX=OYV$tWW%F9Tt)erdLD3_bFMtu^i|M0l{2wuJxotxWegSj^Hl=%$&eZOy!w57yh zQESjb%r>_@&LMqOHsx^dGwnMp{y1T=P~&!wN4GsY*E9PbbNej5&5`iF)@DoKS+igp z%~D|j!6UEUq!XQ>n=ZvR>2NBIy&bi{>cU37zRpqq^&jitl%Q(_aPE()8v}N~t+unW z#p@xDdb+WhNS~-U%TNRGj$}jHsJl7aM{2we5q?&7)$S-B<wg^BRC?*`wRF36BODVN zI2IQVn1@Vb8BI*A5xuLf`R^_t`VwKsHDwzE7cV0=wKDfF1d6&5TNZMR!Aq8Nyi8uq zMjwL(<_fawTk`|#8UQwiWS5Z4JCzq!taTI$>4Ls4y<MOZ&WkpJ{TJ`X3s-K%_Mrk< zv^LXjQM4xG9GO{HHtvLE7I^QzlH$PYYzBt`<D#p_J74vIQn{LU?4GHk?uh~FDK9F- zShR!cdBgdmYU~b&7Y|z!KQXpL8``VG+4uTmK7f(^u^PY4#-K;PCd0bTDZkhRyN}Un zDk`hZYN+wkH(lrMj`ei1v$9+d9iwhwdMoiM|GLbg(r120&80&xD_%AIY3KGuYE9us zrw^pldF5@oO3U=$DFzdTqRSSS*~Y*!i$l<`0*gaWiz_a_K+eE6SHCAJ?N%juCqO0P zQr+yytPpL-@4Y=3V>t~ZFV2qW3Bo1HX~IWb`9;@?^F3Gs{F@5#t_rML>viw0LRX3U z(6C1Ip<fvMk0o5+n9O<#Q^b*e9(}j-&~*n^pBM6TWZ+i;veaFh+BiACW#Zc97F5Ge zAD29w<&S?QHu=#$3mVcmUivTG){TzIY*SKhwV64|wKKj~6n~|gw=e#jUj98=yUGZ@ zw0gQ2?Vr8W@@?{Kl^k=yC=Pscdm6y9cX^=BUGuYCS9h#`!f6YIX6|I|<w4S+v7Hs@ z+Kk%0{L`~c_nLmZba4sy%gT3qt~I8}ej7G9!_Q!c;Qaf(zE`H>JZXsJSe(YWikSnL z7dSP&wZLnQhryq%t~bm0u#4@*##_#<6&ja5Qfueh_X_Rq5(kL&*=?N5C-j!wUbo}( zy%wyu*^Bo?qfZ~PWfuu6hn97?Yn2OVp3NmZujMp+S4^*dd4pb)X*=J(--$Bk%U!~R zzEwLQh*)CyOSm?A;S~TLh}lx(SN^FZJnuy+jlR|e55RZxwHB2VOZgB5qn&gd7$7rC z8sB^i@n&GgE%LjStk@+^3^ikPVUW`ioqpv=A-IGEqbJ&aE>~n*g==;4A0>l?b?d@S zx*Mj^+}*cOp!uf{-@r%K7#PoX-K(SbK!W#*H)pMzWy}yu6<|}VZy&E^J!~lK!h;c3 zU-znRd)vV4(a0yKZRifrxuQRRF>u?(C`Ml6u)+^r`A#6peTYZi+0y;%)7o7t?_~SR zAh+!1jGoL-bqwEzp1(^0;@LIl@w$OBd&oPtHGhVlxDDU3#^79g*DH9W)zjQ%Q}oHR z@r|~1@D8>d+{_axK@9>Nxi8_bM={DgF{}e&l=5q;3zO5Q{heX3G89|ecQ`i9z)=Q~ z0AvVpo6kM&Ulmw?gr(8@<&<V1?4>J-%!?;yFNj3R@Z-YIs32w@;WEQHWJ>pDFQlHh z?YI|zEgxSOdNpR)FH$%CQQQsxrcl0e6i~Zb?sPaf6SgjYESe<Zp|O3^pj`SAC1h?j z^}8@=U08h!#dj<x;%<1BtQweYtuV3MNPk7eugiCw(C&7K!pr;9pI0b33T*5;>|XC# z<o5H8`81Cwt(hjqLEj7uwPzr$^&pndVJaZp?Xur3>F$1b#I@obYFGLQx!|u>Ov!rA zZE+~gN0-5>r2!=a!zzX^);cSWYhQCgyOMp#%cjoS#QPMOE`=)Vn=3|pevbx5$c|4j zDi%pkGTK+-cwp5ZLgy6{1Gil979gQ>YRW7kRKH7gFbAbVU{-1N+$n~*)Vj^gJk`NG zd<=i4>X~2<p2Bs4o0@FJm_qi`d1f9vhqBg<k-hSk0za;td1HgQIs<@uoQm6lzdUZ? zwF!tcrn~=RlPzS+{W=sem3k|wc)@*oX_zaNbay~q7BiNlo(vsZ$m0I#I#g(LJt?PO z?(pLxMnWaV_>e904#El0@#O1Tftw>f5e=v~ygt#EQmQg=7b#6BxK0vHENBbb57Ah% zj@o<EZ<Qw#a~hzd$-<fXLyfe|5kE;Mv&}kRevrD+Z-s;ZYJF4!L4a`AE*0&LQonle zWRt^@DxS#|8GR;+u-!TCLQRFGab&l?rkP+D60Vv!?WDA|dtGMU`yIn8Rp6jPu#A*4 z5`R8JuX55Y%if9kX&_1E9?4|eeEJ$SidV+6F=x5-%%y^O@wk*a<nL@vx$>^%mTHAw zX83+=2Ur{(B*%hY2ff`<Mb8%y;$AFW=*tu1QQT9Srg6%yyv)Aasg`SGbH;KWq~|ch zF;!y+=G&s5&QO_8SPyv2FRN3S{Yn5GM1%1(2YeANSpJ1kb+!4t)eb*MptF`0Qx*w; zippp`&fjj1jHni_pq|!&B3E_h+Cqg;I8v7kv}N6X_BUQZ0}1)Qs_hCKHetp5n>5Xn z$3?YWmkuq6hg04^;?KHHxHUqb!|FubUB)9k5j<g}R2^Mr9h29iX$R`waJsJF+7nW_ z4k5VkW*L;*xpNaG&hK|X6|qU&Q$)&Bm;yO}Y`(MUpgL8EIT%x7?5_`U3TCc{Er#vm zl;~Jw`~_rEa}Qh&JisgX{)KxtT~$z@9<xn*uYHIZ;vMiU6{xi_o6b3G<(Ige`p|p2 zx$2l4{vgsaU5}lf{5wUcz%3O%D0>=`7H1|eVPf7ir@e7o#N8lUPQpV!k;JSNPp6+O zVvd@@G*&~AK$<ywlI{T-XZV5z^9_5`UsCaT%+|;B2vXAX37seRcX9BQj0#0jdQ;`V z**G(s!%_BEz~X!SRwsve|96`s^f%zKSr>vQn}cp)`+Iu#Gr<Th)!W%s&~FoIgZNy+ zSb}yj$v#BW9Smhhnh-ofXo0w3ET4HDEeT=l;m9~>I$dGnq950W@a1V~u(r88XGk#y z;Sy1jQo_92?*YL@3J&<A<FNEsblH=l#e}rMWs)I3=FH+SgbkBcZIOvZB!9Dq_xKXB zlL$L{Mk8h8$CrU}Rb7OSDw5HG7g6}FFFptdhl{FBCrYXXp-%ZHWe3|{gfMa$z!k9i z63US2BX0Lz3QQ5CFSqU;LvroC$X?#O9EEpfD?&W>NXK3^eso>=e}ys=Jq@B{o9yM@ zzgwioi)*LqAs7o*Y{re>Tw975T<noIbvr{|t&YZ5!kGH>KWE1Rh#=z=ccQT$zp0N} z#MEtVJvc6{hsV)C287?MqlP23-2~k)ZF!mS19BgDbb>X%`Sd;XN{8EHK~P7R)y^R% zaBeGYAEG`OeF&j<8eD&mU+dts!|UpXe!l`VdeA>d`Waw_2dgm@#ZEgF#LDFeI0NH< zuHf>_&XIp<{B6`NiN{DX`fnG>PZk3r5#T7*xIZ%N(!#g(1X<05F|zUW-d4OLILuVj z9u+eo&_<S*4=&$SLtfUWC4!e@PLe_LV~`1J_a2E*C!+RG+^`t-cI1}i!z_t*_AG^4 zfD<;yx|W^C`zYHutGaAWzq|F*YyAFA$5O1W6aSbSFF!Cgs{l7A_W&2LY{9$bS5x}= zrVr#IC>W9Wc=3M!y14w_+qW9_Ud93M$tXE)bYEZW=Xe`;g0CIQ2Ti^QA0(e|__l;o zv7P`%gVRml1Kb0Yt=a?RX$|#vu8+I-_CIg*jh~}rRk6%h?%tFCEWlqRdiGy@mvsIL zY+wCz^=^yLKttcThr0G?7cIQP6Rdlm72KNk+NK%6tKW9@>}p6U-IglLX!D=f8Z(sh zqG5Wu_x32(V@I+*wI^gXvjOjXfSK9%zfU>1bLZJM+2`0WDs9_pjp?_wzl=7f_WPgB z0<MU~-H^6Jsmat!AYXn~z<-z`1P)s)O><tdv;TPT!;<uunCnIW%z;o0=x!Q*>& zfY{b8&YG@ZfS-%w%+<}%jNW!^n&Zt9bzXS0b5)WKV-2-kxpr9Ufm5x=GnxZSMXn^A z-Stp*I_A+is;Mbg{kPV%zAGu}g~eP=l{>F3GF!q^OOLdFJ!}C{of)*X;*9StUD!>1 z)hpar5~HN&*bRf8b%PlNnQ_gPBiOZdXIGyPm4YCh*nIN{)pV$q67>?yO)B4W?CZ3H zum2*SBWTW{e-UpT_}4s!?h**<Dx)xI8acVBFD)=?ogKN?PpvA13sHm->NrgX1}(u| zFKz!Ua-JQ12GSX_`$OLWJR9F%aM%gxCwBP|Sz-(2AAgSf@U?pT4EQwoqU!a~{jePH z;d(zQsj9`}Yf09<JQTY>&LLp@Oa{J0RI51i9B)dKwf@}dCj95ohNx9Vx#cq<wgJss zpcSLt@9=S<^#YNX+I?H7=Ggf@58ti|TCJCxW6_rnxrD=)c_ewl9r?ALl^})VHR<lw z$Zr<N<tO;@R(96zaHqCRZS=Kb>fL>}#R4{WCI7@eax23-fU9kKtvr1AbmL4#d<Spz z9@r(oyn?P_eS_N+n0bfW1k5bCaRF>rQ|Z0?erA^YaOCD-)aA;1>i7A2mwTe^0v9?# z^K&uc@EJ#|G(3L|SDVcN%MxJ+dS3^<@fUh(lK>X24rvIYgTg6RUHM|wLl!7S$K{Cq zV#7L%@i}WlHP_~zVUIJVVr@D7IpNDfF5iOQ&S3!w?igo{EoZGQBN_!gmuzTXguzy4 z#f6pMCvS4nT>y0#WTJ(px4XbV$n(51=c*i8#LO6Z3uOu=*m}0GTy^pWdmwqZX*`S> zMJ={hSVUXJ_)Mo-ecu~<J#tL_4z6~@_Zn-cl?hl#frcJM1=B;u%cld(j3l(*l0}Q- zAhGnkxYs9cWrHY9J|0l|G#)QYDUkj@fm}NEp0I?-AT#r(AWW5geLjU%lL?u_6;5LJ zVn_+!^1_U51b<Pga)6XORJ{PuDeqON5TNQp|HL8iC!l0sB1_Fr!-`57O)7BXu%T!n z7w&PC`71*8nKFNrJg~4_KY@m%w?d_j)k;fBE3AHF5x$SEOW+VB=4kX2!&++wlN?M< zKDROa8JSS?y$p%S&A*pH*oj+`6x9QLpa{$;w}@XCw`v1v=VR*Esp@#fGmvc~4A{Re zW?a$)Qx}C2*c}6MS1zsSC0km-4qkBSCTbd%*#I`xTR_C)$w|EurO;@7O&rb_{-?c- zhJ1%7HU?ev(Dz1U{6$bTa-<N%?i0C{&;`oAt{7$LfiDZ%F~EW>7&6C{22{Wd6E-rD zp_pw%Uk{Pi804>F`V{-lz;8+DTpYP{N4FsOHAYZ7Oat<boo^A1(|zB6%sf6Eju|w8 z?feCkr6b(Lu;}dn5vPj5+aJWGr!a>h-wJC6vJD-T5iF60Uv;65<jSL+j~qa!`38}q z2RqJIr+7=*ilJ=;7L#AbbB!TA4&pe=tLI=$22#fX{nH?{&H}zE555oOQNQe4CsGU= z!Q)PkJQ-5^$W4b!b{N{L<>ZAMI4?rrLusv0KXDxTX(O{VLlQ)StdIQg0b~Zqsx$^J z$|4L6#0KzmV*;YQPrj5H<UB}N(=ZjcrPv((n?I?UCOI-0M9#}{c~dbMYd#%XUWDqX z12dece&Gq0J9vE9EqEGzpfjyMdGMQfK!3}zmbw)$VN_cz<V^5*CTIpHqbZ;=cTFCr z#M4LQ)8-Lygf@ROrO6fptmkilWnrC=IuWNY;)A@+QQQ+o*(tDRX@p_cwUy~0*l%lk zOndkvL>?t`xL9jq{xl5|QB{smDNf02pyJA)KM2uE@*c4)6rQy`yI7QDKq&c6MZ!+f zONilY>?e61`}PB<qWVoR0N=zFUA}T;x^~F^{iZPZ!h7R`!*O`K<?IyrvT)On9iv&s zS>a%=OtZ*Pd=-s!LsVV_8?IXYn_Q}M5lU?Z%j=X3BCA-O#$U&)W&t}gx&mWWhD3vS z5XA)G!w?GVvZ~LOywdmvSy0=^q*}1X4Q{<zFb7Jg@vR2OK=zeevlBLJYc7n%$rH`L z&+b^Q53)3@vNi~$s;9ikj#)hqUE-r4xMg2ne!hjm3ACtj8;%KfFTBmw2G(SI<Jrd? zJEehhIk+l#k6M#+GO)VaWte+ie>rWT`h4OV*LFRsenzughqh{frp$f)v?veX9_QDP zlbmod@$IJa-ky~<Y}tT{V9P*(auSe7EqZpV!oN<;vI1-b9aYh4?;E^EHSKo*Osp5! zfvXPBI_FaP@Vn@^4S|AxyS>cc;K@q;$Pk!Q_LL|{6IEM}z<Jl)Plzg;Y(PehCmK3l zoSk-+uPFo4yc2YUci>)0Bit@&oikH)6UM+J{E}<cmT@wP8bcrMO$R`G5>Z3XO8=cH z<;y)=A{P<BGNu&wYk6EkF1EbHVZBY63*ftHVtJjm&~7w`99?^O)Apj#`H2TmJM`?C z&2>+iS)m9m29CCb*_wS&q!w1+xq@kS++}T-#{Ya5KV<q1kZe;C`za5v-9k}*+n5!d zZzPcT<5f9XtHp7W{eDZSqj%Pg)kosajdl6#ZH;{?XFEHxHVdV~2l2{MySt6@+bzfG zRoP!}PxbWxALE}YLIWy3i>~~YzfLc3swuKKBwoY^B4@<R>v9)aP62nBk*iw%U#^ct zAzKwbf2ZUG@ZGrf?hYhF08YjIXR5PA4xpdzVp6uu(%CZ<FrEKSt#s;|@$Qy1fBFcg z+SF#<O{ZQis{`iV)q%}?Hu5_^sVL1FO?{kZi=XS`wP=y)v14qfI8r(uE2hLf^HCLB zlB-+*5xtQqj5u7OH=%qtT4GhUB{^|RKbFwcnVu+Kn77+%Z|N0SHK{O$!0~lwesHBE zCj#9)ZnjP`?B7L;LfSp{7Nj$}W^IPTCA@^_ULd$aG{#JNNPl^3F(3o9l2aZP33vNb zOf&aWHaKNof~Pb*R4_f76Qj%2LyZTj6>;g5b3kh5G!!)50Dh2|IYojVf$>tFCkJmu z1G$*y>eIEoMb3xce53{V>W`W6UDH&-)lWsV>(LVH6EMHYm!BQ>dY~j0^aOOsHw?pq zemTw%<*6_TS^eE3+;X{$@B>YrXdvhQ?KoCH)-D+=uMug#0F@N$%oKTJ&>hwOH3cVR z$Z%{S?N0u4GX5C7%)#0`#&<aD&DVm+f<Yxb4OUBf5QK6)oU{0kC)z5z)uc3BSVQb6 zyVSaxp=Hqob%V^DNzr`#Q6N=DQ8Sphu}#%@R=chE(L(DZY})rkCqAzhwXRx{We~;c zs&v{v{?||1gTZOMg}<NNOgFBU;-aqr2l!^j<g)exw~@#DKxU7jZ3ltnb>A3p36FzZ zAQp1p`f%iJ)via&*clBmA<R`FvI&uJ$ThFZXvwM169in`0q(KP@muQTF-0U8zrYw; z+P6hgWJS9!Y`3)D$&zvGlR??Cz49*xW`}Y0mXIx|nnD@Rx*K`Egjf6cuJN`Kdx@d^ zdvlLOUme^M4Hg@EIV?P34l%hY#P8_k&#u=nf5*QRstDSTZgJRh{twK7JtVIApd0B^ z>bu{WoWTYYODxZpXUqIFc5ak}Z52Oh79!7k?>!(6K0zM+K;Mx~?13X79VE?2t8(Qb zFn2}((&4(0w8{(PB4t%{C!|74wwg9SC9o+T#i4i<sde(eaw;t!y33JqhZ`U<A?)wk z{qMKEaCPBjYv}x=AT|pzqKLl*cSuDH<3QQD9`>W;1$7r^;7r7HPf6+3-)4%ef8hOv z*R3Oy2(P??8^R`GSS<@ChgnFV9zjqINlyltE6FKiJl>FeDymRBX85zH>#~XA`X=%0 zcSDB}9Nh1A3Fx|h<j26ZT}{H=^iM%U9lwVBuEiLUZ;CEs1NWqaCTGOzf}zI2wo16s zdN31MC<fK2ac)8>HvmT~oztqxMbvRO#imGYD_F>@=#EgrII`jXzSFSh5#}`YNBgzd zW6#qfdTcQ7E#j}*hE4EZ0GVwo#tm&$4AATzU(Dyp3v|4QM4%lt`RT&L>esV<BOFAo zF5LOC%qob6nM}Lf$OCO<`Q3VJBo79i%_+oHips+TWrU&Ei}PE}IM|H{S3R?d)t{XT z<tXF45Uu!Gc?1e~mDKuP9<K=1pvt{=VaPt#P%H|i*^O(}m8_s(78NXw<v;TcNYiZv zxFKgd<ztM!V~oW@yu|EmSh}DJa!{=jhgpf^!xeBnBmBj<f?EZxR~@d}5iz;#A(4Sp zLpzLYL-3=Kje6Fm0wt0hyq;x!1I}bTL)*K}Fe*jch6~7OFrrS`TX}l4rbQfou-qJe zuO7vaa&cg}^8W&+`i{tnkQ_Vd9&}k0y<_cKZQRUaISBA?^z6l)p{a02wi_9f_ac>? zAV=j^IC2pntBR#dABEFJw&qfQ;jRTUKaI**0?+JBWXJO~PB+-Z53bSIPeh5%;B+?P za%AHV$KWLw!;L?2Gv3^a5{*GGc8++9UcL3<r+zKa*|a1MQ_nMEvaP_2pnce@V;Z1i zimIuFuPw}34wNZkk;fwc^zdJ|>{NW9299Vvg-<SzkH)viP#q#m*BSxK88ed%+(1Y$ z?^XnToxO(+mQ_Hv;%g?GjHBn_T>lPpF{g|A*vQijWe7)f&Ze50+Zg$|EjFr~+H|LU zaC_|Z7dd@*eI{0;s(0`4zqa$`wM{X#&NaAM+O?SJFBbf(4xZbgG{zO!W@FD-s1N3r zbpnzWZBS&Z=#2k{Aiarr^zvPcqJv3Y_NY>EYax2;PvU_yuhJZYz|UWE1aTn>`s6F1 zljZl7(uuCJe2^7_haG)1D=LP6&O<LBNZXy{WBNXpVUd40hRTUPP7hAfVmnSq*ebzk z!kKEtYW^vYbDmctn~^2m6mKJ8gdPw0MNHB8&O-`@V~7WwCF0J9URQTNw<F#!yDj~r z5h@ve8&4T}$~7o=|A?bp;bjk!Sd%{QIT}<TFWp#U#H9l<;@}8afdY%-%f&%I&$)m> zw4!T5nIW61{|4M-el#Us%A;;*hbn#6Ya_*3-a+r5S$Q-oLLd6GxH*R>+KQFrYfl=P zlvYkjeRqc<fr_^<FMJKS-5_kW&DT8(f6gCn2#XW?JSAwQNDLwB`Ap80mOT7GQ~B$f zk1)49`>=GlnA_2vQGk02F)-(OfO`h^sl>73dyvaLs3^fHJb5uH;2Qn(7H?CP*kz(; zW&&sEU>?mBtR|eDZ!GznDg*bFTg_th?&2@2a$csz&`A8x@Ud3G@tI!CzYk--wVG=Y zZlZ5EdUCO7R8bqPZ5bG^koMIO@^4-$iOGgA(M?7J-8hb5#1YWgBCQr@$SgwATBxBh z!4<r6YZ5O&q{GX<+bCl-p2pwmvvf5Wo9fs*;O#vO_NDz$><D!YqxLt1R7MF71~jr0 z^TF*X%bYW--oY_3stcB5f+N9CFfRrJ)ewj_iJ*o%1c&<kt2wP`f6<|!8`YzC{9vj- z6#gAN)F*wi#~R`RkH%}Z^9rGe8Sh1WPz8s>D(v-r>81Qnnag04s@BMwXL4t~PekPY znuN^F!pr6z`)MnMrTXc+x(c7GFKjg57>`&#f_6Wce(BNlT(ZJgZbI5uqMWJVt5|Fd z-C*ccRr;zxR0VEOr`=eJq)0UnZpLUIMseki)3@tOqNQMb%WgQ}u2K(uVVGY<5kd85 z%2}FZVg1OTuxhTR%c$xNgjjy>DXF~7<w`Dw8lz_zr-3`}n+$R7BYBEOx4hzMk(OF? z+Om`&&6wvO`KQEKJ?Tk6Vi&za$FK5fiyTt7-do;?tG5GJG#c))t;LKRgdCW!t<CTn zHHv^YalLfoD70h>3)}Li`W(!-Q8<O?V>~mGC=E>uzV>4>#$lF<WhptGaX(FIzU~KI z*oUsdH`B)D_Y`Bg;w8GPZhNw6NI9mmfa0OjKFqK#*TYg+n+nn~<-f4YU4{3iYw#<M zX$B)b)G~vB8)13#D}@~2M-C4GHFH-?Qyo7Ls>W_KoygJy^R8y;2^@UGB@8UaiSHPS zE(d}z=DtRdDhel9hou!ZmsFoIr{JQB!TbI{;8f@M^Tv#NlvdYRP7&*bYJm7!)zjXF zh{XE75`RI(;3^?;V#9n1f9zLc<3OczZt{*l#9Tf(cwz&}ybG@^@epZEED`uf-t9g7 zi!5TIXwN6TxK>t?;tMwbub7_JE#{(dzm}3X`=^E1k2|MNfBew&;QesRre=0TudTVF zwb#4Tx{0LbB`N;6^1wT)2?|-rCRofbb-ItpQS{(Wo|L$wJXONSuXsHvV;WJDclaUH zKL_OBQu6WF+^2C^!l6a9q~!Cb>o$1c;}j|{#7CRp?xUXy2TxRFJn!onG<}OX3d&N{ zy`bFS8>n&a<56~n?n*o^OfO9`_AnBcaH_nJWm1eex&tkg<FlB*hQ-Cd9%Aha$yNZo z)UnAFuj8WZoSdUZTHzap24-83r0?6MvOwl>o{1>55{*{qjf$9gPWMPr3F>SpWP9#e z9uuqP@W{fxgB>tc3*+p)4xaIG@3|2z)n))^`B5*J(F;dc(g1W-jzQrHXOC1Zh}-Vj zbxb=kkEB#kQGyv!1>~RCn5N$$Pad(US}37nK>s#tcnkOLp5TcZc_L9xkwt=Dl+Q54 zmY@$ki%{t=c3Tos5l`sU1$xC+R$rgk>b;r}&^VIAb<H#m7Op%$%25g8o@H_^=daP2 zZdJ05%WeuT03m-FTrC%l7D5+5#iN7GvubNnDNlu{M(*pVFDuPyy3ExxzN2FCMX3nQ z2HE2==g9x0?4Ef<{h<qeToHb(^$7cmpQJmdaaFQ7+iuTSO`M}A{B1$kcF}lu@|F@G z%?@`ZwID@8=s^p=c+o5<gIHyCRO#m??F>+qq_z;jiy$^qwKV)T@6AAMZ3@9ofp-UQ zGxk<Zcc0QUL?kBRQX8$L4NZY-TzBQcO%-3QR&q+|6Lz<&@Wb{;<V|+9NPU5uivUiP zwoTTQIC2$u&KcAi10sx%gMxz6{A!t~o_h;Zd`l0BUDSm5%j)x*H*ybk2{-xGV`*)p z6|A{_6<DVgnBUHA<5%d%Gusj3MxW^o{Yht=JC7i14d*TEvv-eqIqpgJF!n7iVpL!{ z*JI}e$mSXNw#}Ndj!*5Xxv@?JOA8jFMBlne3&;Eg{x4<)s>GTWLfINSbfNoj6pbCE zj_%AJ;Rhkb_DvT(m@6Rz*ZIV7VvSe7$Q3kJ!}5r&f&zQOXeGjGS%?AZ`d9WwrGXNv zSePbubBF=YGQC-DFrZxQ1LS4}BKJ=iLZVsiYO44ebWz^TNJ4CxYU;fkU~<5s1;~th z_8Rr`Ep-W4my>4kuwf6R8Q$rLvXBPN2{E&|Fm*zAOC3etq}mQ)F+`=CA?|Rx@|brN zNa*BRq&VfFdE0(?&H96$rD1k^h^cBw#j=@UCgb&iX$9;g?JtR{XJu52&~*s~v|Y~l z2kD>%wYgg~NW(Fqc!gx_Jvq<sQ$HF-=-?c@pSFyTacpUuGAou0Bda`$#FJg=mNwa@ zvxF&&$x-&_C#+0sSia%hB37cPmwRi|<R`=#IS(PmpC~>fF}X$|Ak?5jAST<{jFwTN zhU;b#2iQ5m6Dv|d{e|)=t~1D;915_LYv-I<D>F`9jnJ2JE?m$JN?7#-&zsr;N6cb& zolGxtK!c^yF}Z#U7cwAnsd{fMMVh#pL@^yMjy+lZ#@2pId05B)J-P(j(SN|*g@R>k zp!J)F<!I`-oCv!bJ~m;1^HjX28U`V$xdp5bndv=o6pu;&%Ve~nP`efQug+u(;1%_% z<a%c#uTC8N4wTt&A+MJ>>TXp&wm~v!*+|ZkOZ`kH2wFQMRS-yu>|36<08%ml9}@!A z4-20%n?zN20$g}b9~750BF9)-{~CQ_#KSim25ANq7G7>gOMOR_k#Yu}QjrACs0Kut zF)sd$CW!E5xU}pmB-&X{)jd0FBH+<X_YC3357lWmDR6NQ{WU|C92c!6Fr0T$Xrmz5 zVWp<PpS`1VV`!aKf4m{TPD-Ko&|o4D9xeV|1{WN-iVQ}>pbCVGjG7rWiyp<rFz=J{ zKN5FW2TRBcr~q+uKPOS#z|lmkm35@+n!&=3qkiPR!u;B8e*nW)2bKsDZlC{@MWz5H z#Ssz#@+}`U?g}4tT$cX#3>g4q+GogA*+5plyi}Z)^IK7Q;}1<k|Fdb+wXMllQD^JS z^>|jc#(+|?<Bv}%6%$aIydNsJb)Rt=p~6Q}5Pzi1nXxRK5kMX;L6Sd3d1+)1p+R0M zu|1rVY)U|RiLc2)E;Fn^a;lIGsWPnc6qXmjp7m6|%h5n^LVk#;SqP3)9Zvbwh;YCd zyP2Ut{Y}1_;TWAmJc!URH{L0yvJy(`y2T1|nJBN<$nW?3me@N4oJLJxW2p-0!Oq;9 zLy-RUcWEDhcJ=PWhgIRoqb;iI!qv3GhU%_qI@MW-o8%cO+kYw|z0jdZI4P+OzJkm3 z;Eag%M_%<ID?d_<(tX^zcd7zQZJ5njIC39J4Q0Hq8AWM>E^kGnjG8${%oJv5Ne|r1 zlLEWn*;dG&N>zypPXk(UXM86HOYxMg&g7V&J>+-2<QsKTBqt^f7n;O-MblQWafmXC zbno<N4C2*bj^9q2Zp0}3gd_7n^~)b>1~Ww(mKL{b2U-#w<|AiXk>OTkb>mDBM$Xs` zk>M_-@&{C8=!RaAWMd%h``OqE2leVj2Zwp^)T0I3ZzN=vrhLo9OvS1>=3vlD!2|cI zHYzrCrf~(Ws)`8NIbqIwuR*z}aO6ZALD~Dkid<Y|N*0OW@k!)<W^k#Ic&+3jx%hBU zo{u{pVrx=pQHd8MuI&;XKVJqyQDKXHiOZW;tz=RzLS<i0PPv?$7pUNgcf0XzASSXA z3cKss)4PdF@XL<)z{6y$p{nLxJ@%2#u_w~*j|!Kn$kX8$Pg|gonp*iArW%R5T70%X zJ#2^B+khmC;0a7P%B6&GQr>D3k@O0bSU5_HRDA_Cv1gN?Q%_k%q?+AYE%kJ13uTpa zB7r1C{D$asDe99*Wq~jUB#W{ZWGOYZsw*rL%G&s-?#>5ISZY=qO-p&Z#kGI*9lPz5 zbBh<^ChQJGrZ2(y5>a7fRzF`|O7{ClDbE1RCYG^UR)-MWr2^Yb?CQrz1?NR-ZI~Ay zXF~t`NW~HHF;nHiC5q<AZx#>PjoK}SCxUK+O96GVu=m-D17+6UI)cskDB0+EVYF;d za(HKIokiS3+=}x_UEP2NZ1@3DOZa206iWXmb+N-;-hA~PnJ6*P%lTLiv!zB7pk=rL zVx`JDFt|eL(r;?n{4<sNtRj1<WfM~RrNGKBdc6Vt8UuEGuA1v$D$n_O?M_B2sfa5| z>NKAMt#sjK-3qx$L2nyDs4xL_+^X=m>i8UvJOkQTP6F9P(y-)G#>$R`D#8bkK$W66 z_MQAh?^4rk+(x7=<El@^J8=W}E)K(LL(Z`}=2P$KlW5rX@kCwoRsGV#V~_O`aC}Zs ztvalav24o}{yaE2`gcZ0Y`d3^nizCTk4Gb?coCmv!o2zkJq30C->GHEzNaD+Fg^p= z1E1kTx3Hez1JE}jc-?qqMn(<le$ijTB)Io2zs0B9g!jc5v8Qk)Q6wmzElK7Ws8Rfn zs{}zYqpQ+)V57-zNQX-QVnUs1{e2X!6!Osl${^uNy*!A<ke@?VLjY#VsC8^tBJn%P zu5YlOZtS?0ejT6v?Szu%xSNiqe%5-pov!|=^wIV6haPYN(~-G;q>wD~rW58ZX{O{k zJVK)Uhn#Iy*x)&fQvt8DvoIdA1%4LE1X$~{Bgx;g2tLu<x*J&EFpW}f*{;fB?aR<O zqp|~~Q6%=E@oe(2y{J0H6f8<#wWa7E+ub6DA{%KiUPbuEqz!r-spvvT19WSv$}o9< zu7A`by?G}GAuvqyOQ<|EiZte{RKC;y@$I12V3)yRCJ(ZYGmbHi+A|a}^!m=}8AdKP zPrLg^kPS|!Cge;2y770SmsK(5<cJb?$uJtaKpAHc3>KwPu{R!}e}1Yw1>C^G79#Z> z2o>rJw<5%@Fe33}hdw5Ru#QLh{)OJJ9^<I@p|`a<AHwxpL5H)T@0{-IYg>-W<<NID z*Djp`Cw4zi=!nMF9xeN};+HIN1poPThUtE2gN~T!@onHXn-hYQ%XNcLppas;dk<xR zsy2*(&7K57S$8u;zUpPf3!5Xi(V@-|UG+v2)%Y4(^Zl9FqepDsw{vm28&DI#w4Ej3 zyBfgU??J~!97x!$xbxPz<U^GWt@MpsEr^~t98f#1#oJ1k8#a7svaHw0$>E9@=xW)r ztYE<Di&{p{BJQ?|3$o1#g~wNTv<IA?2{<6L<P*GXCz?$3Man%EWi1uV1-P^;_fM|G z%{kkC<7yc<7ObW@$;6U@w4fzzD#|G3pe;Z02w0VFqyA}mvpYoDc<t?|K{qm77!9j7 zBS=aLfqU>i4mR>`%`?IlJB@JG;nVh#0xf3d2CKLGndpqtavj|{9*XY9f`oAs0Dq>$ zzq*LQq+?{Lv`=ZtP=RTI0IK`YCpMH6x18NPTj3zRORw@xAXwQQ88iP#pUsc*y_Se^ z1o&Qmh**7Rv4qQ%bm)P7b2_Vx3<1NRQgJWI9<6;0O?Xf;R?N-Y64|n0KUz1yTHmn7 zlwRe=@pejwl&2Fye+`w<C`e*C4OU6$^$_K#ucv~UG;{kD5O@-BaJ&GID2%be+8%uI zU2P~4LS)GW$7dkf32IWr_;G~a7^@!6If*gD{Rd-^7?<pnHwR}G@+Bd&#ex56Kx9MY z9q!p|Vs1t7&hk|S6I&3}QUrTkN(A&$jnUK@#=4G5)6qlX;j_>=5(AN{hYKnD=^_}M zgcqs?XL45&Po6b09BUcWr6puBPnE|z2bSpESXd!1N*bKHdjc2~biB}zjdtrRYO~&p z14p{N>w&{33N#ejFDaqyiIUQlLQbw)xX}QWMeL!rAKEL8Lwv_x>J|VgCxBD8g~u7z zx#}ej2Yc(;IW=+xs8c98Ah18IkR?mnL*cDPpR&d}<lvn3vEF~G&qQMvDd(VJYnh7^ zu^3VToIbkvy>~0WK}r0=v^@Wc)BzMi;MQ*jY1+B?FHdr@wAXd##uNty{8Dj5VjF@5 zuw$}0jjYYjg@+|5?ufgFCvr#ujXj>E#ve1^VFTP$*lYD7wi^OAj^t=(4&=dz{uR#n z(ZY<FG71HVl?F31H~72@pz4+t_0;PpLy|~eDa0gVnxY7bpu^9O=b{p;|FF<6wi#ky zdL(7SqRmQp5uR;y525E|ukBMVlnNisyrJH`i27x-=&z#fOU=e8D66rFAUIICBuZ6J z43!;Hx<>VKRQz??PZ%kgaV_h6Nr=Aao|>=?gizPglQ4r7nhO_7Es7_h6iM}3dF>39 z(3<TFj;a%iCxg^OK1*U`T8f29h@S`!#SL}!rFK)rgm9^_J|zrFJj%5%@SQlxdQxgY z%|Jbm&6e15%urRzFdan=MbNWAI>T4(6o-j~V-7A(?0%#{-oXF5JaZ`Zt-HyJerC6f z3CbrBWvB=O^vNEB;o^M4?r(KRlCC2Yq>_<EBpx3HJr<^|5R)<zFtVTrr@~6N%k=TO zj2wLEc~iY0?N7P&+vZv!W^Mbh4f@!UTW{)b!7bkiK*fg8Bn$7#<gs`5FT8%oV#^;^ zA>_m?is&!vy;Xc;xcRALzoQk%>o(HotJXzd%{wuFGg4H4!VXR5*ad2uw0t-2=GSxk zrV?G8PtJr&=U2aCXx=}9g%x#~0Q7I6ycMl8Jlr?rnh?<D+ey?zOFItvpmCl@2O6IY zaNr4+5u*L5J}RACcL)H1qQa~nqHUA$1hO@e-=Z_8!m8!5i&-^-m?`LGB8>&0cTw1! ze#p(*^KD~HSfF!xapO!ptO)je0Xa`yRsRQ?KxMy!4yc0?yVrMXw{tUJgZ-#kZ=P=r zssaeybrR8b?ua4RGtuf|kq7QNVvk4Thmra*v4<b}X3nLH#72L_kb#;#+%G#MiTDkZ zA(F=FNlr`#QQ1|{EDdtVDK9qRJExmQQ8zNqK$QfpPE|aC>QfWRjj_u*VBvM=4nxk$ z-`KXQ1ctfO0n6hIC18Cmi8-5cw3Pv-FP*9AifCJ_H>??c*VofT3Ugjr$eLJ!!Mhfr zT?v&<XybRpK&1wnJ?SOT>{$aU6fDd|+_m+iV%<}sVQlTGz+z?M%@QggoV&5!;=3)x zi%d+cw?by?h+cBC0+gVykmx$%9YioBtwpN5Bi48{M?$+5#U4de`h+fbN4&YwAUA5v zd8oscqlY@&dorVDoO~$DO@ZIpf;<!})2tT8LHU(s<6;;Gg|`ahAVew8-Fu|TR2V1< zljn|-v6dJCQ-$w9xtWr`71hBvTvxMphR=43+n|t5aT|(G(RV$88XTP^oJ!$b{E#x^ zwXm=nT!0pPslmv^4O;Al%$JMsWmSLUSy{Zl6^3L1cCt_mY<s;j9vuWiPfL*^-k+lw zzgtES>m5#2Q#@Lt*upi}kc+kC0B*3Ac(g@vgomT39Uh$pOT7@rqN6j!ZG0kG#SM>! zu!e<ka8<?x*+KzM3%(mt(N>SsW6TF}a>I{JdWEctnv-a-IajD`(lm3THH#O`;Ef^% zV&S6|IDml?nb36)^8-tYG-FXiv;rOJsRQ~c3yM_4h=G_GznkKBndJ76$a7WLNsY#s z5wOCLh@KTfBF0->@YoZ{wDm&4maVrx<J^E<F|CPXWtM5eWmb6(T;^UG^e`G@yl!z? zgW96s3~Lz4F)G)@cn!B#EU@rooAV`KF9q_=fM*~`&b0lGIJpMboterZ*|1!>baOc` z=K3;%9>$=uGSX-<%26_`XiHAP#x}lW(E7@lFMfk^<?2n<#~6rQIi$VBms}%XvR5)$ z5TFJsVz!o)joni4`dd)-3pLAf^U;S=emzPr<$qD^QS=XAhX;d4`-ozVVh&<k;Z!Yt zx5e+a_}v;WQc)eB?b@t)gL>VqaAyzV2+vAy9~-^iJ|1idXgYJ`@fOT?o1tfCMNxK( z%|Z2kLuvfNW-W)S#15wDmMEu*_0B~BV%9q^8oeX<eLw&{Em{|C;F|$J^S<F1jK+my z&bXtyl#fGb(R_2{Ocm7u=Rgna;7T~I8~nkGUZHijg$-;>>WzLqyh3Y~3-q9gojFrz zBx|zEjB22Yw+}mi&Jp7e^%X9IjWv645>-tJY76TOFqwX3&s}RHP?=Fhg90T<-*ET! zH0Xs38TSPhp|kZbJ+H@Et4j@8(<Nl>+nF^z7+X1O6Pv}~<A32&O&q};bG{n$zer_0 z*&o1MD)8oNbHwvR2Y~A=fk~gDvbC_>1i14GeBnKc;$V6`wN~OJ&-V<XN_RmD9s~6w zdsN}!e?X8y>H^&ZMyXL8oO?s*5_IiVu7O8u6ytZ>9mN_>rA-+}dpxssEvR%cD5B%R zbzF>aw8uMYr0qBEc?NPD^@E@qSkxAaMsh)WCA$ZVu<c5h7QliO+PXvR9b2~4a*izy zV3H|1=YG9y_k<J;#<I*dh=v4uRO`djw=l*0u}-Z609;pvH9+)~LvswSvMXXy%;_s1 zuKbAp808d{JEZa*BVit54!b#_3nNrNkC`IEry!MosgnRP1G4i$45Gqf%mmd9!+2Af zuZOsm!U$lY#rmabO|uen_nnCqRWwf~>-z|ZupqV|*84*xJ=q_g%HTjZGe>;}w~k2A zrfVL|Sb2%+HLYS57WE=l%JT2vH?S8PX%+w>=pkutnSD4E3KtLt5HS+{#tXg76-6pg zaS62VP}JfM9Y_dul}CJgN=_{r2)mkm`}Wu=rtslB!>J69+XBf3JCNLjml%)(%Tuw1 z<j9Q_;Rgyt0QI5R5uC3SAG-h~9xb<+MrnkZ0jG4t2&XCKzYl?4Aq1-JOf;W=Es}U> zVHI&aRI$<q2V~ieT=sr;+CwwDr|@-XImP;PXgOtk9a>JA2k)pq1f5EZyrc9`b;<-$ z7xiPg9QIpTIPd6wn8U=V_KxZ|O7BQCv(Knq?<l{26N&0G>fh-5jr>yWDp>toU#d^@ zQiZplzun&*-7o2GH^$!kiN>Gj*F-Zr>r?h|2yxq-*P*v<>X2%*wQZJ%j<y-$Y<-G7 z4t=a@X$~1|v%a(Csr@)at;VQU+smzXnDFx3FSuJLV!P)2_VTr7aCq(1CAg&bSsivf z(WD;kJR@Mu?-Q@>A<sV~ulA1W%YXNdtbeX?{N(p<{rg=1J~zIzeDtw>e7`YYBH2Dg zAKRZm`8#@h^mZew9Z!V%9euR5&wTc+>r%Cn^(P*CmMevF2!fUJc%OcM`VPOJvu~?N zM~xx>S`kt1<vNsm&+F0aK1C|e=j=;%*8S(dM%Q-UUpHFc@-&LD&d=BTxr2b+uhlTs zf8U|OrEO^M<QaA!mT!1q*k?HWug!;sn;-oY5WYj>5`2c|v%>)L*E_t|_t!&uJ?3ew z_6^TPDtGOWdK10me1?Rx3XqW{KBCtfS^^dyE?>s=dPRIVq*zTvX|o^$sC*WF#A#nu zV1%q`ig5^0nZTJ=8vx`*MP#x(YM#vH^*d5J8Wq2*(E}Dq3Cu%rl)S0VeJW9qEZ;P; zB{RK7N`V&NaI#@-mW%!Co@J*B7OuK`PLEKk`6p03Ifdw#T8B>0#;7wd91Arjg^!{> zrtp5Ga?mU-qobQnRT4c>tvI-zYUqu2cew^{QcHh&>AVQ6k%83WLdsEO+Q_zvmm9%O z5qnP$v*jLgjG`7tjxk$fN587r_c2@9sz0k4V!e3<z;(8;vDd)ep}CZpHQ!oZ6{5-t z@Rg!M2$UWc>lfwUwL0PC;$Wz|7y+37u2qVC&q{S5h?*Cp`0`U(380_K3IzRB2Icls zSyNNLD(iXbXC7WJs8h)ya4)1-16IFZYc4fioS}ShWwxFwR)+pvM!i!M8#5}^aztMF zY9;aCWT+`5SgI<vD25bDwYoWojn(zl5_jJXR!i(a96^-LOEI2G7+X)URAFo>hKjs8 zQWQNobDgSO{?Lq9HRDyScvZ{UfpjW+?0);od-$_zcZy<-V&`LT@uf7Q6@nP@X)T4< z-O^&YV#|TRujN2t&;*;M80J;87dpI#wQ&+Qy-=UAtp>7#CN~Auv3j9Sz1&)0K4@0- z;Fr;JsLJgj&1Dt6z6RR}No-kvEdL#$8(dTJ&66Z5q#hneJaxt>vkvqKr$!k?$$=c9 zt(!`w-hup2jJ_zp6^%2pPbv;Z+DFa=qMKD_0Mw>3Djp1|vKc&jt;X2PADt#J%n$9M zR!DYUwGlG>+pVqtGmyysrcvSlwSwr=B%@jLi!&Opt*?d-);ObC8;rL|`mz6I1CD9a zYTLfn-a_;3@dRY0n^oHK=M$v$G@)q6{IpSe&CgqnAMA7Y+eo6_Z_@~^zSAnA%HDlO z{TtP9BpbEaQ=iXSsv197vx++3rWSf9=L)2JPSc1gTfe1mbdIL$TtV$`Q;YgF#5kk; zjSeTldYW_CXnaS9U6qaId%pSonmuv5mU({TZG9qbczm?l=i}AYaX%hmRh^|VQcEwE zM?;RM@k3ROrtw<W`LbT?X&k<4J)x40=1WgKqMqa_nXRe~O{Zr4{&SeR+x;?n>m8jM z$YxC$@_Consg`W%<qvPxl%YRw|1geAqn)zLmfwCj%~~;}A1~*9OMlfQ?eW|7ozuFd z%FAi-Z659nXSB%T%$V0+2@m_jX*AY7A^7*mp}DycQnt_tYu=!pl0_MswO0bEjFx7y z5s}O&(o|({I{P-#TBM0ms5CSlOH4FbqbQ_1w_Jh=FD}MKS{-EoDfgTCQKK1(vM1DD znyp1z9>bWoKzkJB{v)q=zWMX{*_oiV=M`jgQF`D~T%j@C49CsW2y*$6+mGCNKAS`R zV+;A`NOS0hU#K}Gs-H!wnbEIBY8s{VNL|wj2|v_$^dv?71HiG>H7`=TZB@BQ&C>{c zKQ+`VQ2jXi_j;&+7O8`j%OzEw;`5{;QZyfPSe4I<)O||dv$<C8C=WQ)Ka0(^gujK$ zsv`GAW=277$=oOac8!(poI|&zd2^utdHvYyX`sGBgY|$uFz0`<p}RS+gUpR^ZPyCv zTQLP8eXI8C@N`w)4o~-p&Wo@@Rf%|n)(?*m3XsGjgYqOHIG0m*$hUL&`x?OyUP@=8 z4$87qD9QH(%-e5$#(wJ){nq1|t;Kc;+_;XUH0InDiin`>R*koBEw)1;5T8{Ea1+%9 z@i~`(K2cQ|wCKtqIH7tozF#G}P(!7hSqYVFi^Lv-U=(p?VCpTyA5_wBVB%U$Q45GD zOna*-YMpI$Ru6zMK<BM$2<RedRrK#sqzaYc(oZ1Z9wFMKObJ%MU`&Wq2}{S4n;vQs zrfH@gs)j&yZFM6k@NcXev6>=~SxadVij?1|oN_3}=K2k-HPjid@2@HQuzhDRGAjL) z5p0}`c%z70k=gjAyEMh|?;rhcdxF;5{1~@pNE4gnRY!O1lg<E>Nj*s6EF*l+6|70& zlb9C)<1l}6r35XEbxlH33HUZC9LwQY4#ymrXHw=dv@r-=LL+boNn*v+#s1-oj+;q0 zC4h}rB(M));zc$=NdW#VCYDCvMVztX`M$_safj629a4EW*&Zh|vwy@?3AXq^CT$Xs zZ^|@zijTC=#+L~M-32yH<K6_9ePDi&gaoV#Ykd8(E3PCY{0I3&XXuA330S|y-qY*8 zz+<`s%c)7I?PQN!>yXP${i9Dktt$hYs&d^FXY8(fhf`U*Ytol1`2~wJ@UZ64IXj6| z1MYwl<39Zl2eN;+lt77GdSGBJ>my*5xc9<d+~}!#G5%qt&EZxkZ4N74Eh7-dSGx&N zVJeFbrq!MU(W-cz!L!@P*X-QF3HR3u-I6DokpA6K1LvwJQsL_uqw39sNcZL&#y1P2 zQj=|jJVl2Ix&q=rnTQVV%QcBOFuHS(?I@cN#KHBZ^g(x9IBRb3Xb)n(qZmJI?dreB zu5=`j4oVONu|{!3afMT9Zs2H*-;Iz8xxu_DwFJQpw(^go{O(Ou$T@Rno}y1rF4$#M zCN+TFIJbZdNGESjE>6jB*%K+q5}xEj<|VM;C@cn~gK|2cDbouD!ovJ{=8`h0GSUi! zM9HW!2J>Iw2~x5QdqFZ@vcilW`E+el@inN>C0q;zHjzp*XoG*(2wEnWB#7pW7`TSz zOrG2xX>wa+$wk?OX@*dcVHCm83|5fg->snoj0iplG((Maw??4o-?i2&P>d~b*{Y@l z%UKmCU`#Yd2Pou_9x~cs_`(W_FMsR`m@jrh2pq7QNTg&QOletU=O*DXiyJI1#D?yP z#SJM1(#2#wPa3#?N${Xv)`%yIn0S&%zmNe`wCpp(*G|j^YL|rJrZNe*DuX7T3gEoz zQN?oLdI)6>0+6D{WreDS!K~-(Oo_@}MFoqXq%=@y`sQF32ej}^7qE~s{lnBIgRRn# zpHcMJurO$pQSSS~&ZoF&_Zw4^GKlkQs$p5l6vcX>#fE~T6tzz@<Sa9(&lLlObymu) zx@Z@}Uam%u-QSEz?s`Rnv(pYJO{em#q|kKkjFx`WhA2H4f<^E-w~5QxH&~FQylIY< zXf~F0lPkWcze!=vQq|fn%E&~LGLwi*NlJ>cw+hmb`vxg?no!kDD2TFAJy|Ft31Z); zqS(Vxv87R5LDcO-T>0YfYSy`^>>#dLSq|Kwg5r1@EpI#CD_@$iw0RP%1f#IkL`r^T zYw!kJ+o$r^t+X8GhVpQjNJ_k6)(I+v8QW&&Y*+FOD@AO;o$V$I7oB=+V&Sxv=%^^u z7Jf(<Il~WCWcUUraGRY|v{?fKd^o(pX6+C<lZCL<w|JqenjLiyMzA%o;T<tC4GI5Y zi^4yJCwZ4BCW;M}_$KnZ7n;hQqjwNz{4N-qP0YM1^tRJAw@AX=kV!R>4~Lyv^sLZg zPw2h|BIT)$dpF+#x_UJ0^oQ@Zt8rd+r6KwDTkV`1=piWdVZ&tDYY_bhn>8y8%mZj9 zBX}D-8(JGgkT%q-<xUQVhH4$PZbSpVq<)KqgaGK(xSiq)O=|XxQwGh&%oZs^I98O{ z=qOkUbG|A{Y+`T9{X^_Fqsu_^8eJsXT<%-A&;(PA2NjeZTR&Hn*oaOZ*?qN%0Tf`> z3(Yt6Qfd#&Ifz|}P)>#*?yG`07Q1|EJ_k|X9aLWw`@DkK;?WYtJBV#Wv4&G^e~DrZ ztw@z64`L2CSXQ{f5}(HQi1@730u>K7HaTrskKNyb`j%o1s!=IcsNOzBeb{f|-p>lF zTWx+o%AvPE0uH8ns90Hd%&WS}g%(T7zhe>#zJP{F)%q0yopXQfkPd18h3Q?b8B!w* z2y3BQD<y2@jg49NbGopsl!-^*qN+Adu5xP&eOlF201LL(t&lw-Q@0D$Jj_&UV%yfb zeI7KV5`wC^hfqtonmeM~u=%>UK42_#ag`N>F0NH-#5b>PGHre3-<3vk^;J{(n!1{( zKCONkQf@G}?yZn%xrH9D!ofht7AaNOldDlb7b)Z#OvF|L=nXAjWly5zYq4FU!}#3W zgnw6?7twX0<*T50*sY2+buEDLUD)^p@@oJWs^^$bGqv7sz$w<r9<Qag3q4-VGWV&X z=HCcGihCNdP4Q2|gfJH%Rbm`rjR-vfTgAMIT!XKX(e7i=7B$DDWI`=zsGIGL@7}>9 z*qVXv!ySDMg<SD|@95(w2G@WV-KX+>|F~&AilhiuWx;-)fBzU9KA&jtLGbT09T2-b z;rx)Pw>^4NI8PaXdW~djZ!gFFfYDPDEWJLSe?Ezxy_WJpj_)J+0LN8EafrHAR&aQ_ zRc>J^B}FT;@xlw6AvCguJFa|^MS@3JZc6czY;YX4T1p1d5n3ZzxrHJz$%;zSo`cDX zEEKy*(-7}_i#lyh)D|ik@JdmkG*7rFTX6M3dyNm_KJ0T_lO}^(rM;sjgIftQo(yiK zs8up}jD9m2bV=8d3~r@sO9pXLWGor%N%xSpV|jvS$;uBDQ%wdkQ+!SLd;+V<h}ZMI zEaIbJTzY!KD6pE!6H~pCfq(S0lkuo4q?hb1*eK2SXgc2=_7+d9FQxL}ypsX?^#)RU zBfTlz?=_z{qx-nOdwWLt8-1Uf<9bKcA~YvrolqX&6$<@*3umM^bANB^?$-&+y??vk zQS0BRexvVb9pC6XTGRE6%6zY-v~QH(QC{Duzju_&H?q4taGUpUdXlFKLT~07rFWz^ z_>9`~OciQ6&v`GwXYls)(F3*DvXT$Z!ex==X}b2R?!VTQ?PBu5nzXf_rg))ATne2{ zkNi~xo+fdj>0AzZk|X~%xm!PS|L>dP<)6?0qsiV+Q@npP?fYrc$0(xi=l<zr`;Vr9 zY}$9ze|+SKwrXtJXiW+4C{J{vjZWkv=h*ZxG|6j1<wCCCZhO5=6^V9>{n#J5#wLxS z$)lP?;`;ic|7e|qTjS>k%?fL(2dm>WjU-x3O(g>wF4LqrO(_eT?9I@WR4cl%iDHwn z#m~!9!{=o}aDIp@pH2K~{>;GBRCUQ-N6@*`y-dn5HqMtkX^|%FX^k=L9PHU7{aQJH z<QuSw=hi8q$>&|ZCZMM&-U>}at*K~`Q*1Ii=a1aKrli-?B#_U`n!2*-Vb%L(Q`j2c zmkc-{+q7$hrl0+1J>?l?9q0PC9BqwFBi-~gbl^DD4%R_zW>t}-qwD5rn)mk|_CK!z zF7?yJr`GKZ?{D}RF7vB(J465MX)bbx{oo!oe6t$X4euR3`@;R<S<W!~@P3BNyT!{c zB8JiUQgaaT9&-+WMB)3K-Qb9cDH?E(9uWY{7O;i}9rPD!;$Zrnz(NymW{L<a6e<y@ zLSvcn50?}w;YtoL*%B+^iUSCr?sY(jX^|45YY5)PR2blbvY0^MkZNH0Oh%$~Gv@|A z7ikaR1GP9N-k?G6klee8(@Vz5(ug2H(nafNqS>)(&hS!NVCdGO0pW;rD(Fa3ZzL;8 z#)?&$>IAE}5~0XLY5M?4rFNSM9H+RtkVKs^6{Zvu>Jg2VvNC!5=G?A>F96+`4C0C2 zyCO-!TToi<9;{`2ncuHtgaYT3tqtWQ++eUgG2|yLmF?;<TP2*kNn-J1wPFSFz12Yj zCzY)*&abM3(l=$}k)F*pP&(!sYTYstzn1n7)oel#AZeUis7O<-1?s4>C955b5WARU z<6(3!abzaCvlV;!ses|K^2E#(dY++^fyxB{@ltg=*gaH8fmZ8zg-%0O%b0tpWrX{! z>MqaLmK1szl@yxtmK+)$S!tusIxB7Ttb>Xn(6GH&-=)I5Ab<>|h8`zY>6k)^(hGvP zPFw@PwPF+%h>46K_JF5Q)^K?JgEc!um_v1t6bmtN#af<Z<`e<kP|yd29mSYX5FrYn z4^>Hm{K7JhUPVz>V9HR@B=93u2|0?LP(9U9zT)+yCvj9@F@y`1t%NAB7{&UDI#ooq zROR*{mK?=Ka+7M@AKzrC;#k2^#hxg}?<z0Ii02(n)k-`XzuRUId;Hy=!>N=}=F#}O z#?Rv7h}S%7IMs;Xl^;YgK1n4JDYj3%PRtrFMRiL-96_|#4e|b#)mzGm%-=nu*rUik zL#KokU0_#>M}IyX2wALjm0}C0lIp|Jv7#7%Xv7cq$s4TJblfA$J!q90scot?QbII* zu@jsoA@o(Pq2Y}(_h3QN<R3Bo4b@joPc|+(=t%{*p+&2`6s&H?a6OuV3V9Vmx=Ms} znMQ&r?$_+9Z^V7UID-&(4#YR51^kz3VD~GE^;++tY`p|T2pETpHBK*K6Y+v7tDXmZ zB!wDimHnak*tWJ9heexP)f}Sg*S1s`2{V-H=2)AXhu7uXMjx7utL_qvv?3A#{96D< z@N0_%Ww=7l*6A$~W(Zi8@h$-FCsqb99Eh{Qjcr*_pplh*1*BCrZudJdNfcOa&XE2D zA%Uh>EErJxd<Q_4qLV;WDMSg@LV;TW?^m!AM6rrkPEvymGQ%p)J7*%T2#tJ0Pc1?e z=^%{2|0rV{+^>r4(Dgu1>&I>fE2R|$0yx<N_p6l<!ZKI28)9rd7H3IBeI?2huj6{K z5=`YM2oIX*r&5_5+{5O*C>|e&Br7enrBR@VC2=ZU839(g92k4VLEnnyeX(825hum; zxoFqxvY+agFNN@U`e#^u=pV@bfn2|n9S$A)>xnj;Vez5=TJadfOB!_lYr9`rE!)45 z|INzDg|T{fh`&b!G{!{a(#w}3yIwKzDAH?3EJ2(ris4iQzqHDz<q>h_7QiniW<OQ` z(~u9}_u1HEd@_D1j1|I{LKZZ%4Oegx{0xdc0vM&xB(N}wTml%SK&KNGw;r)DLs*#1 zhV#V2jEIF%OxL?N7!;^Puuq-=-IH5@^W@Cd?#;bu;j<9UH2hAggnA*CY4{z?jjiUI zv&|TO2e2vSmf65WVar8y52BVw^vNK^%1QyiGDGu|%Fgzks03lzs&8Y4XF)%c8Q>3s zKGhLmVt`JF-K;v(sv%a`%6>$#M)3|}^ABHVK8K?@h=QmP%#{f;5?ckIm1YiJiUAG6 zWe^>(Lh!r^4bg)sNg&v^T99d%TNPG!Wj<W2VfP{Ds+Sd1_m7?HwglWeE^j5^9;Yv= zsm{=e#nULTzgy(=vSMi#YyYU{TO8;t)~?=@>Uz3GhvMxTL!;^&jb34<OcxtmD|m=t zsTEVRcw2Ew7H=!aiLK|h_$i8~vlnl_&BH_Sc8VLDiX%}BYE7saVpSx0b)4^xG1prK z^DO2z()1aNt6Pi3)Xt7$#nh;QU9m!Th3b5@T1QmYs;9j(#}YLH0Pe=n?HvU~BtU5V z3`M@ygwN}gtqC97ouf08RbIo|S>kj-Cj*hVxOZkEs+dijte$2SbEE=anu`whx8Fs# z&KB5WtXiD?8iTd7mWx)1UD(s>l+>e-V(hebCfy1R98az7^{vwfxKX@z;qQ^qP?BX1 zsy{%1cE8x}VKDr<LQ$!eI3NXf7Y1Xkc;d!t>Y=1V{yWoK!O=*@Lx$u}|D2R3T@%Vs z0`ZvDoy4>S%U|+hs}C!1>9HCHD@L_4$a&YmQ)Oj0$Y9nUih<NL1$hd^W?@S!Hj6xk z7|xKqlv9S>rIFq?TIsBIGQ!*x3YZci^DUHBjSx1mN-4ZS3-onX{tdiAg=&%GSb_A` zD*slj{9B!o`A)*55GIn_g_zPgb7cwu6=`YRtXEU|phM{kCIB681-?DhL84+wk**2H zD%2F}IkVDH5jMIZGZb8E+r+$5WRDeEov<pQbO6M+)Fx8r0;p9~vKE#HSYay?F#@Q; zDHA{q^=dhSD0&DlTe&T-YLYBlYmiUUdW0kWR~b|}V&?CnuesHJAw8r8Tae$p8C0=S zFaiNWCc;!fCFxG6qO%walklWc6L^M%LU-s8uLS#WQ%?GdYDDyYm<-~X8#E=D01_T< zzzMxWjOB=fT9loS7ZsuP{Q0^g#tuMI&FFgEL&KjCdocxHsQi=fuO)Oij@v|G`I}5Z zu{_<=*qOQ|)E;2o-5zE70Pafr0C%@7?~_1%0C)8R+zDv4UUiw~eBI+YUSQs=FEG#8 zr4BIfev+FH^sax<yR8e;$WPp3|NlDikq3Gwd$so$qSrr&UVk-5YU7FINMApoG-Dy# z*Lx-71vc<nEsx4OFuSytC6^yyUTF`LYAWRg!<DxLcCXm$f#_K{;QTFrpt<#)<%AIQ z8^)Ajzx#~JH@dH9q-#Dv$yvV9{f=a{KcnxcpmNVhXYS9&^Zc!S=WE}u2|W3XzH6!J z9hL7&OM9=Xd`I*5UBTj>(RXHnAI>O!pEIc8IM|@~Z)4e+jyg&YJMprsctCjPZT0%X zDD}7A-g$Lkna?afQ+oe%|7&Xru}B>bsf7%OTpsOCH2Xz|fXJSZFP)2jg{tqZW&&mZ zmTM@ezpWY{+QN$UGdZh5@%}vm6~$ImLc)x|Sw*4%xm~^Ed@Uenwj4Jnxm}%ZLR}xy z?{%|)FE%%eNI!8@5$Si^eun2)9O-vGZWWNwX5kX)C%1}7AO+Mq%W%{B2r!Pe?WDjk zw~9m&a;u2^Qv{E!6SAhe$3HCC7Z@#+rKBahw+1-SW`Q@lt%B~uV9}o&-B4lqXD6ps zn<a{g&I4doa?*hM6XO*i_D4)~;SfMOqHhEy)_640-)n6}u}2X+T$NZr6Kni#^duWS zNr`tjRT<%EsZoqS1okL}P&~&VkB+1tHgA6raxgnhH-M}2Ajw*~zNn1gWLA~3BC3!U zR_zf5SPM{pTTAZkBcT3@B4jLWZ;TNc%8;l%S%Y-~)tVJUD*<(gAX<{?g+ic|tU9d* zjzomUQi=s>2ZacLk)aT&TZBRcir7$y4Qoj#M5G0z8!L6m-n=Sxi6VF8lT+xfVy!E7 zN8O^}U5R{swTz(qDs{~^F)C1n_jcrKi6N#a>+`t&s?Srm%}>6bkWwv?pD)OH{mA1- zuAh88A*D)PsQR)TbzPKGJm0<I`BY&TNDJDJT;lzz2<;y-b=%MNnLOh8a{G~U(495& z`kIo=flshfmv7x%Q;qi<bbC>qRoELUb-ijgf%>-R9pn{dt-l+b59Q~4Wkp%@Bgpkf z?(zEeURx9Q=JX@iA6d_X^Z)+t|K0n;^XUCY{&{{{Pkg?r_6u|w`aRk*cYHr8B}~!w zsU*};A!4TOr1y{Yr5pfJ$I>UcM}0$7x6R)>>Z_6hXDrvR={u}{nVZxPrXOLO$V8=n zk?~sToJ#+a1GUtk&*T);1{8h`6|S})RC=hI4?Uu+Q6ZXS-Xh;%sCFbejDxna3P44a ztP;@tVHJVNHE`6{IRc71>^JRaKQg)g$RzZt5#`=OGRdwtpyo}U5w(A_P1rx}b$-V7 zWZQ$+&mP?Q%&)j7r#~nqefHq>vj_7XGjFr^aOnuxb2dj<xnb4goI`hfC#VpAd5kNO z@|weZ$C~3{@Hu-j&L<t(aXu>>$C|df@e=pwPCF>aJzSLnPFH`{c>B=-tOOW6?(w6< zJ-)9`zFwU~4%z-=1N0QunCU#)1e(M3XAiGGdpJ30wTIJYP;Gd<qYe-LiT1>wJ%Bca zbO2@B!$}j=KKa|jOY~oYP4c6o1S_Z|CJ2JfVtaatdt{0JN|eiw?0fq4ANTwpo97=k z!;G@&VD7mk+90X_$Sc|m(z!t%pM0L*kIkd!ALO6!mmiy`{G6};e82wKF!ksA^~Xl5 zKQ>kSvALv!`Fz^X^J_CcU(NUOvxk?TJ-kHD>L6<m=X}xnE8N3-oPV;hDJw6{`96Q< zzw`mu$NI6sSB>jUdph^b`wr{Z_Vh}974?BqMPX0X{AcB%^kt(DGlS1zdwQGECt_ur z=2!pO)BDe!-dP#dqyCk?7;!%VyTaMyssF1pC|vKJ&;3L5Yo*{*hCNpEjXvX+8!Hjb zC*>Y1x&7>^|9OvnQiof^me(ryYc$ivvnFf%P!xNPVvizDB_I3_M@Rgwnk69gp^ga< z)i%MS>Po?eqdkf<h-#%ULy)0(;zv=G;#QGDE)Y#YmKmL-DC@VV@a|83oscNEA9<)T zQ3!CAK@#^W#B114qCC~-2ntT$k0kMB&?%kb7z+xo^d6j&LIYt>pwgp4D>MkULMyGl z{&i`OTFTbf$%f4+8y@9VP8?mnda3PIVWrmV8{)6y`)o@-tl!DVR<GY_&`iCGW+GS? zg$baoX~T{8J3bgqd_FkcC{y*@S`9CvuJ~eWixa78Vzl@oegG(inW3VI6L-2GZ_QO0 zS=7V`Sb;2Db?|lzBa@nVD_R<5!BW}YbP18oCD?Yb1fsmJcdfwQUvI?n<|jT8#)<@% zTtSL=5Vh&tiKvipG=6ucAZmld?<$6btC9`^5q=N%X!;8|H_ae_V&da3jzBt!a*>`N zulH!x7Da{|-{So1XpD5S4^k#i)xY%2JAB*4oQHlgaf(x(p=mTr%9*M&OvqYlHd9{Z zX!tDCnjduLw5*YyRpFH$&D+FujdGnPrqbZ2-OcK66VqjFaq9CxPJ>q6-f3MbV9jZo zv#iPLZJ?L8;p?&{M)T=8aN6vwy~DnvVrX~q8oW)B)Fa?D=2<ykrxlV~0i5<TE46Qw zpH@EWo=<xu@%+D|+X~kXyZ>pKH20OCHb7HtK%Y^!?tB{MKc}^-So_~m`$p*<^>LlX zOX`yRj_!BV`Zv<iYJ{BY8?|qw^BJAKTF#!oeJ!1)wREzPwRCDvSMYcA{aX5trt&m~ zRp;Mlbbt4De|LVrt&qlfE;g#qvl?Q7nR<i6n)ja0*z<YfW4;Y(huZksdmDOc#;I~f z`91oK+Vzg+`!_X<dPe0NwQuy!E4$KXWM?)${`URt`&{}ym&!M4&pcJ9^=G7>tItSR zRi56>�tt)SlU{Zt2gc=5N&BQL`4sPkV*;Z|hkzHaDKpcvh6nq4mwHb!OfDjq1~0 z%4X*o*%LaW`L5dgZ9b)W|BO<8*SU25(`zHYT0r~A8I}BMi}JRs62$DZtJ111#(^R@ z+@opQyox1LO*fiOQWKM-sFZ>!hoEFkBRP5uxAf_uR3GJEECp@GR~m0H_;k2BJI|@K ze|6)3js`8;GTUdm&P3D2ZXOvh?br3_fJIBf9z&VWqAy0dV~^FK`dZAz^8Qn&&+l?A zB|ptA&ZTOSJx907I@dh+Wu4m_<(u%<^;i#N)Nk~4gFYi$N>XUgNDD~0@sZwc)g!-Z zd&pk2*FN&weQe}c<qsJVwhatDt7p`%^RjhS=Vj|!&MO|Zexvcc;?dr1X|(r!4DDz} z1T^%7>=h493VX$)zn#m5oPhHT#`}hcO^@}ciPOVu%zizitrCiHE=VPuXDUT-dYJ8J zx<^NOwWo)f`k6m(OY^0ea?sl^XIUp}P-;uv7{Sg`f8M#heLG9(R?n;VdmFEqM)gur z?`au-OjRxQW2tJXA5(XlDSSUm{TQ#)3|6}WnHkNLgO-R9J<So!**WwPZ~9;}ak0%z zVH28l!PK4dcfbZMBLIC$R;S%-lfR=&M83F4WaJIB##ySj^A(k85^MK`E3(p74g>9c z?$5C?vC!`^Ed(#D+`(nruaMRg*7*;$k?f7{N9Db?@S|qKJCIjz2}kv+D8}z<3PT)s zIMs;X9X*Qihi3fn9N#rR-KY56^!k|P|MB<k9NiLoV<q~d0cT!6x8@DN1-D1<g}^wi zjNSt-KDbfZDi}a6EBcGlBrPVv3V0~Y-()~p>#DUk|Mj_+S||C>=WyQcaN^pegXj{r z_D|n6fMHVfb?b`vYQ8X@MJ-$n=U(}2r7?mpp7~;r%NCG=CAx??%z8`kM!7Mb^28Qe z2)+iF@*Y3(kBe!s4P2jG2y_^;$Veyx2x5++yQkMPr{N7rVa?DpjiU5FPBj`Y>x7L) z3@Z70keg32M?Y)Q>HcxMg0lGkFews1!+*_&=!j^Un5h5(29tV3KNKlXbXrjpvhI{R zOFWJpA1)t`>!@WyikHzk`tD%-`kfui*o0^W$d;@|5Jsw>8TiQOa_-6N7@O=%Gy^OA zoPn%;8XbFM^Rw>h-U$x2XGa5>_nZNTH6sDr5fl5Ufo|=HHHvo-(F@JQ5>ADl2PU>C zrXcqCyN<Ox;>~LYO6rJ(w+~p99!~S>@gw4Qg>;&H&cHz(@#af;aZq-uAvSxyL6nkB zBlhaF9KeP<tr&3myef<(Pb&t@v?~yqo>u+WXl7K8RxJgGu;P^kz`8VG(PDEuAiJh9 zy!s7}z*iC$Y_tvJX@5lvf6an({*mj5{wN|_hdevB4igL#9ra9XeyT#XU>A%zW}-hC zbq`>FjvJ)~*AaU>iuN6gYQaSIBAcLhy^dJoQU6_Qa519j7ilog?5FDFYdj*p+t>l% zh$)EKZ?HtAVWK}eb>Toqvl7~V9T8U*%AAkb!<o?-&cvId<2bJ))_632cn;ovLpKx? zlb>p^%=w6wZ-E*{M1#;U^#-T%b;Qh>16Zc!6%~%qhG;sdve1fo)mgc$*-nKc6a`+W zl1zA^${7{ELBTfApH3)`j1M=KDk}m83T<_!^it(ZZWBtC<!e(N^~_w=RAnpPZztOF zQecJJNrl^Z-GU*YFQBrfc-<p_Jk*#IMCS6;pu46}QEqX_aRbO^8lU=y`J7gpJgD}O zJ(MN~t-CZi)u5u_-IueG`e$z30B%gfadd9_wFQ^Zv#OUgIY1Xj3=Shwyr$So)bqu! zYK46C5$~`IV5&rb*AcxJ((Q1u$L0ar?1(jrp@(bz=Eh~)4LbPB?p<N`VO(F4r<G}^ zxvhFxSPiCYx}&Hw-@zlssbtTiq9@185OnAcE^tLrPYK{ZHRY`deWL{3bL+8IBJY-x z%sDgz2F^}_Fn<e|;`Dea=P1rR_y$)E5Ld<X9o=xQ3bnq5kL@gO1zcBQH%v2y{2eH8 z*~0;hTxooO%!R6P6o5hv0NN=(;7j9@(T;_)tStkA7=@raz0ev1AP&d6G&g{_lMN5R z^|5L&kwUiwo1#9ij=l6_O|`IoX<TLUFqjp0`!+E{#{)A6Q#2-_V`u22Vn%Fs-7Z!N zD06XEuoY=z*MeyT7mZrf=`XHn<QRn-xIyPc24U0QE$a5}`l^R&g(J?eFJMdUm1Pl% z$9d7jb;KEtqUo85@w;#(Oq}6VD}EPyiiz>NYf<cX=Bd!N;E02F)Z;$bxl7VUx{f&b ztUB!f9?dNiD<8WTO<Xhf<}wj)Uo>+t5=`uTzh0Yq>Fv|&RPk*M+cD_HW#OYu!L)9t zJ{d*}Cb0^p*kfDuSBWjR+1FP*-otNDBARN;MaA2f1B;D578{!{ynNxz=swKoT+G0Q zrzbh!SkX`U_oY79M*i47&qXETv~3EDvF*J;b?C`ESF}B98Z#x_g`QqCX6VS+6F_jF zGrlpc^ew0?K57-tG5T?{3$2AaM%Z&HrWC%bFv|$qzB|)~wgyIBL@hnSF2jgT8T1yl zVRa{dUD4@VEtIt{<LkT`TL#IdB2U8$$rNfHY^od>{S<jdp}r!|;LclC^Y70}ZAsl@ zQh03SMN}2Y1r(PCAL{~n>vh7^1Jzh#vc7s*>+VWm1^5w!85lhvvIh3esT%3b_#Y~$ zg**U>gMx*icu;1LMbY!8DteL0SK@@#Bwn^V#L__-e8d{X5W-{uw28Q?(0*OlJn`7o zR};l(YKaNaD3UWD`vifA=#}}(QV{!!V*kYJgg?X46wWM$Xo~T7MK+DA5*0O$*7#kq zT2pM{2E_*Gr9ew;fH+!46dM`+OvIDahdq8JqY-R?c4nHgH>m@)e#u{=tMURCS;Z5J zPPcFm8p#vu*XRxxZRX(wO-D?8R-i07Vvk~pqCYE%NCuf0zl(LG--S6U5mlVJ<{odM z2QOf>5yir%G1|>%1^!AsN$@fp@#bTvk}=VzsL^N!&jtJ@dfni07WxN-Cd=5CT_KmK zCylPO&|?Cps$2#TnkeBo03uW2Fw{tD+%zq8zbd^viuWK%u7&Pb+}g198^6imwuoJ0 zMw%7b2_Wg1Gjz?KMmQTR`9Vv*uxs`-I@%CX2f>F-9c&`n=<!l!?3yjBY0%D}B>0et z1)9`pdrFF;n;*1f{&UJ@h@~fgVOoI~u)hi4Fq|Q3h+V|*W28U>K2n%X!zgOMD;-87 z{Hcc_mJhe1#~s+Y`w9%(eKne39dGrLcyn^~Xzq1075!97o479Nv(N_VMq=ZwD~-g) z`&^DdPtauxqy40eJ8$USWpukP={;D=U5<h~Rb4GOTzPcy26&38w?PZNx6n$tx6qTU zi&LQHAbJVGJ`Ej(BjzBYC#Z=nT$Ni!NA!mymTY`DHWlfzn(;&3Te8`+3S2t9hnBbY zBt1gbqx;f__h7r8m(pr@Wepz=A^a|%!|SzdZg`Tg%Z+eDxSPf8r!p=IKb5kAK68dr zh(51|(XIPzPr<eo#t2%<mX+^~LmWOg)?i>38aXH}Th6Yuh8iMZm6b}0>0xyGe$CoC z(g>%@ELP|uNXTEK@gX(^w;hhyxS|n?)&oH(dWg9JKP_ShXXMu7LT&$0zeYR|g@xQ$ z(8EOVM$j$fzNCc^Kh~p0{CFrFHSt~sLrO#CEsiJ{R!tb*3yM>u8xcP~Vy7alp1nMx z|E}uR+KOU$#no9tj_5b24|@&Iurk^nE~6GsErk7s)ra{kweq(fo}}+Q_Sg$vXlJdK zUU&7$pyhxds1!Jg&%5H~9v6|uMzFch!AfYUXvCho)j<a(j56=8+=81SQXTb%o>9=Z zPzxajER_W;dv5_lFRfd=(TwzJ!7d|ki%-HXT#Xwky@tqiuO7G7sRV36&tu_QJm1v~ ze&s(@LRn82V93jBdJgq6>ZV-SAUOb|dg{bFb>UmQc}kB^rmny|+9wO#(ii(B1H02! zfc7*4!8P*B0th6ET;PO6pbLmo1NiPbOs+uciMY44(82w{nM6E$&`ixhf90VQKH$LC z+jXFW%>btT;g>xSL-Dc}gNBM#_COJv0TAzZWROMC*Oq%-hq?BE9Mwk}7u0JBqQI$m zh{D>yCaCM;t6eF)tx-AQMh@s98B9o647Ezeu3&gMS11O7t5J0QG6R}UxN%G=0m^}r z8Z*pJLzF_>>h)mT8~X;Ksr9zpZX3$<`SQ~>S2qENEo{F9HM_?BMzV!rTnzq0JtVUP zo`v=qtzFPfLv{V`A`MH>V2>Qk*9<FI^{cA+qE0M<azxuL#T<%#hiXglL%KoykRA0~ zF!GfYZXcRjdadBY`>LSy^)xDOaI!{psASfyD?yIAyR`d4VrW<=@H;#WPb&=Nry%NS zxC=Z=zk*oe(G<lyh{n~>!)(X&E}^PZdpy!N!@5knrlw6JE9d$L6_E^#k68WhBPQ;W zKpe>4XdWLO){8HPYk`rF2D>=R-T>l2n>G4CYQ-Z<Bl80gCvEgd>BtOdv<fO)Kx*m% zk06Tzcm#Vwm_BN+2&+Jx)*f%rn`xZm0X?L*ij39~gE`uqKy_HN@Qw+>Hi&}Dqm{U3 z6HP>cHQciwMl^w$n3>SCi3vTV{4KOpbs`QsRc9j6rXKX4RsbwUBMlirdH9w(TC^CY zY5ELH4U?fo6S298cs<J$lflFi)-=|oEphyiCws3~A3zT!CW^Q6AmxH-R%)6+kYJ}O zEht*d9IMXn;o;!S#{cBTb`7QQ_68xEW`!6(Ead^S)>tA&FvUv=<}}4X5ir0a^-NJJ zc;hn|;Uc*?)UcD-*5M1M1;EAGcJA=@xn*@C4(tsgmZDdNl_K?@;^qKbKsyN*&bYsA zR;Lme=gx5xzPUxCMDuLm*v}_RG{*>Lt=R%tM}|Tk;mk7{EO6pRv?KoC#;)W_vfKvV zzZGLaKkkbe@?*;Xs~7;6h#E;xmu{jm^DiWbjX<F3eVbZFFzKdF-3u1k9;6q<qL6Ee z6?_3R$Q<$}Z1vx9hp8-@$t(a@#z;(xpJHy>OZGI^l0Jpw$z+wsFl4PJKHc+i%q=Cm z$zZmW+v)X_)a!!V^Av&16N`eSS$M88sacd7&<d8$x!_jBL`t)YLE2FAW5BH4O26i= zr=-&QK3;U;`&)JLe~WC=iTRI7fyUO?xbdmaC!!@;%f@;u@xeV7p7JCyP1Nri=0}T# zMO7ocMftRQ)*AC)yNL>)s}t&M4H-c>50DX*7zt@h=@SqN1Yg;kGzG>MqO^@}EvRWX z7BNJSmqNM+Nh}nPKt?oK9z#-1y7tRJDOr8tNYs;&ZGoaPPH_oSa;>BEnFfO#+v4*| zT<S7CU;t4-9wrVYX26Ky)Yg1cAHSV(8iQnw(z$?{8eK9!Ih~-gA!4bXlssSnB9U`D zDUyX`PR={3ufxj@=5yL4RPW53h{7375c-rJ1Ci0*3s$krhk7FR+a&3@g{uQZo(@C6 zi0n&JewXnTIW<Wdpzsd;G<*EWfgcMTx<o*L(MRxh&nVx~{`5oqX#Wk)>K>h|Af|st z^^WeR?S9(IcSiC24KV^MT(t0yk5I<6N7Xu%VieB^1zWjUl&9?SM+@P2v;gsdTx^tg z!K4Lg_NeYc*M>yL?17f$LR+z=MH;LCvth2RD6?UPtld?$lFfReEmp>9ffkdp=vkCg zuz1QT3q{i-FB+}=)CyY^#VUy_X2^<A(S^yYkUjp<ymQYkYDK%JeA_~Kk!&@5g1#dw zUTTZTir07}p5j&O6&3jdb5&8}ZD<2EpW64D53+!btVX^XPJqoMVrX6(do{@N<q&sl zuQi90iKCK@;*Lm~dHx5OjPl(rz)w0E=Z<747@0X5N^QL?`2C6>@Q9S1))mFrV&Np^ z5euh~3S!}}_yIv#7W$?Z!~gV6i#Vg#H~nB%P-34uqm|=LtP;%Klyk!8n||hBR<Wt6 zt(Uosq#@ukk~0e7d!5k_+LWEqBHfsvO-W(EX{brU6HoelutC`StVK>FEIGc3`c#|x zsf5qery9|u15AAeQJ;KXVa=#+8XUrHB-T-X&A|)gj+0eoeY~FT0iGOH!XQDG63C*U zAr1pSB#o%a@rmJyt-6D=@N*J%0#lcCPZHQH=-TIAf6&C7{hw6lL5q_{M7)G@F=M28 zwijf@jD19w7D74XB%xx89IP3I8LTv`bI_B+7*+$8c*&US`XrwG;0Dz~PEr+%r@vye z!z$#9OD68(tIF;1x^>Iz!jmnq7g$?nN9WmaU$>TiL2O4dr?D5hINu!8DqLgpAzs2F z2%C<)F+^h2OK9v;R7$S>x|InHF|7G~GOz@8qg>)rjaJaC2M&32MyyDFvn_npZkt9U z$pJ9`qN%%4gaPPDh8|`PK|go0h<!}&2$J;3(4$TxpPMtpd^Dz|xkZf%dAc}8BX7YZ z$#chn9r$*%k+h~bkt6RM8Stc5=EK$dwovL*WK^f$N`pWrBRc@)ov}OFMZPuJ(Oh*C zwCW}VMoTqe`1X#IiDd<a#Eg8~Z~(}aDndi$kk~MoMEb47lZJtERwbXQ9pnrE2swp) zm?)+;BsQfD8Km}emJkM@@=@6IRMQ&;pL#?IKMV~Lm=Gqn1iFCIq+pD)#IEf38^X#v zy)-b7A)Zy{^1R4?zu|pQ0RU{`7atvjp0|tl=umMO)<NNU-jE=$gdJN3r;^pti82En zfEluK4xK=P>j;$t9Gf5VGxP`tBBq|DSz*3xD|&rHQyC-~hJ;LDhLh?ag7MZBcnvCv z19h+>NRR@DjbeUwNG=MCqlqv`5=Ga#oIK$ij*gnT9#f$_k5yroP-~a6B0IEW-4`Ve zjnHF_O1}7L8<GS8l5}4aBvFfT(h9c9*xgQQe3ax>Q)$+uj>t(0wI&gp=7a}{ozJYK z1SSgwcTjIh>kDm8al5ML8&yv~jHFnyoM|~J5l~iMMylCBfi)usA3LyJWMmc-nv8Rh zC2|0mT83DE(|<Jlk?c(ayyXUT!IJl~0Ae?WT^zp}iy*&?`;uD5-V}gdO3PYwkfwD& zyyUKR*3gGBL6GD&6kxb@?z5V;0a1|(7{g;}j0a;C=2IK^{K9`}jF|!xsA?b7#&|BM z&R{_Bs15D_Xe|wc&~T?{gB@uQgfuqp`tqWr8_boWr09V|=GrvxT6)otZ3jS;7Bjao zrCzwG1|TyL)d1vdqhz5`6#|T;DPSmMkD~gaazTT5*G3XFf{~T6FDc55vIByzaI!0@ zH|jwy)$~TKDk%uNWat}NsUvFR_^Z7yrIs5RgrwARSdc}EKd6WHwAif_a8Ps>OC~LV zUM2eHxN;Qgb216Z#UGgkmUR<~NjeeH>KmzF<{A_Uni{t0Yk%paJO;VbPBta8l0h!j z1(AoVQH?B0Mg~PDU~5BMB^WRxvyxJgQ@9dn+-$(j?L}9=RN)-bxQ6A*&2tO4_%V`7 zgom8`8qzCw7S?LxAxQMoER4ngU0ht=<!}=fIb^KN{!TMDAW`AsEji7cmcmDHMS%~$ zfI!4hwwALe#UEUWUbGUuXstPnQG!V_{NN@ihE&;RMsQZRwaDev1Y&K4>BLu-q}K&U zkzwA7g1{xlA9KXG_+Rnlia;^~%dd_91WorjEP(*mB~IZRmZVp>Hi9wm#>?pYykvm^ z^Sa=I89ZN;!v`UgTnY$N^l8=m8hW<CYzUd}D|{Pfklx6gjV7K66}BQ|Pjh83ZR5E> zk_<U)K?A#9?A64fWVvaUpF4!)Yz#}nEPNow<gZVS8Qu$8KBu^g#8*ReL5F6F4rS3n zS*F*i6L1%C`6h1-oAzgN8ImJm`BXVX#*tU%LoLawHQtz=i)#r^dUWxZ3>qW|<Q1Ll z5A&uP7uOc91d?7<2Jb;(N6WR$3Wia+K1q->hJZt;hi`BQi9sAWiQ14AsFo{9HxUDA zAc1U&D?<P+sP+`O3RF^tGd(IrMaCSd3Cq-zI=@0K2azLD75zf%-WcFPyDAwN1{p<F zGU{iQmU$?VWPpdNuvJFBxw5lUsvfQ!3v+TZS-E<+UTmB1kV8JDLbod6JSV%5Daxgi zK;UuLM01~ll8Ili=E};8+cSbJLCM5ZM?q&yY%PPQots7)DF`K%*LO-gN{~+=J6ZM9 z!OLB>fRZg_BP&;V)~>%D`rDzuCFDUPhg@pvZ>N-m)|`EFm@UndX6v?X+qP}n=53p| zZQHhO+qP|6x82*{%<TMTcIS_Mwx07;W@Tk%RlM<@I(4ElA}D4yTkE+ueV<O0R$6-+ z0L~27Gpcz#_-|pAj3(V7)dy7be4iR2Df)zp7-@I8TT*&CU>XQ&1w{0Xlp?bA%Na8B zR5(<cMX*{Zqb~Dg2T)>JO9DCdzw~YAZB^dxg7rS#6+lU@c#GRN?Bu{)MM~$<ap1G| zw+r}aI=;>Nr}qLMdojAVjB4#uezmS_ssM(dD7~X0tY@`dl)&C?z!&kD%}7#eG5^g8 zH-rIFIrFq^>)!fR;5gHt+&aA6kCf#B??Q1?kL%B4;d3y}1E!En>7?++_iUmQyuZJ6 z7sR(ECO+G1@Je(2UFVKh{xo9$5~}zi(hFc2-Yrl!fe*7}-!Fp>%sx*F0;KQc8lE=% zh~VzGl&L6*?7j>))_tv0UsLwt5r@~5H}cAsuwJOEtEZ$E8_Fm<G#+D83Qc(>9O6bw zmM{v99yR^-PQu4GeIl14LUxGEc$ShPGLj;x!?(0U3oJL9Z|p&LdgAi|NkXBKc<Fm_ zY&0rBaV_N?O=(^o5TChXgP!QRd}{*Pygv~}G#3ysfik9+*+zL7XfQtFQ6RtAt?eLo z6_n{vmUdWHyIB*DHlh<iPb4JIcz3ufFUf1|>cGqQoxA31Nq^dEd4=gt*@_aNZ`LPH zQddHM>PZE}Ab5Y7Dy<;Vs~JnRc%-;nK{&z};2YYjCZbKB?ozmQM+M;N@38F4Do_K* z4IyfZ4RHQP7EHa-9(7xPQ!G|#G&`~KvIqJ;gC8+8E>1&2fCI<LA=s4E6bu{#It<d` z{(Tysyo4!~iEGS9L(9Pd=s=j-*Hn=f<vOZcNcEE$k|Q|&?Z{O&@*UQJGRi7bl><4$ z@pNwoA8kY2B#D$hCxF`qoZKNl%CkE=PKfsiNisJ6gFV={>K@YHujHPoYWKd)?N^F? z)?p6jt2*;V{$Xy<ueGF~`(pRvERu#d5fM=cqZ{kIR^m<Un+a^dX?~##+ZHb?DT@KI z1k*b+rG`lPZPd8V&P}WaUzMjjjcwwS*Fsj*O)TXm`4xqMA<~Gnn<X`rfxLHhoCqHb zp@;-BdKx=BldcbNLNE5}SpW8K&{F!6HviI+HpcqX5}ig+8CWOCI!NYe{3xZvDE$g~ z;t_EPnFJ}3IB0oKWg|`z#$5n!r=`5aF=nP7eA!J2_YBV8^;8;((kUMw+L?SoQ@zHC zBW6Nlp0499GAXpC)q<!b1+gaMMY$~PiyGi*fs^h#n}>=V7bODXTbJVd$clRKS_M$b zKr<?fWx&8p7$)lhrACK6@m6A#G!meW%$f?0oP4W*o8<NoED#vYlQ|wtvQ;NJ?pqmf z!Ryp$!0^}{5Gn>+Dx-n?XbrnWe6_pO@yiTL%b6nOg~jV-F(0v>3>0xL_QVMjK&bVU zX`vszxgX|BbZTAqh5`f;+$fh2?gg8Pi(?wn!%8W3WhuC=hd0Kpke%K5kTR4q+MY~h z*>-nRlQ)VNI})Kx5w^c!BexkB^hMLc=mS7y&27%vk|XDb#fB+hfE6cKF__F0z3zUd zs|kPs6NgeW?JE7mgF3Y$1jsRjuSNLCd8J(fn$YXLi|zg8{!79|nKm}i`TnT+U;;!~ z-bARG^c+l|C%`XiHS#@OVr;lZ?hW*X%NQY_HvrFpxjvh(*Z&r4s?gYA0cq{6P;aUR z*A_0FRS`i*V=1q}P-xJ=OnOhF7thT>+7CWGF+OeZepqR$5R0rU(u1B4W#PHWzO=N* zP>V!1GqdQ6uMR`$-n8SZl}=q{j7ek29{S9v2jfAU3$H=i11r3}belHw^)>FnG)EER z8hypSQ`cEO9#PM}Y&gx~((z#8(h&>-iyuPS{rJNk#E^9v;&&>2T4LpS?q4}<r>9Zt z^#pa@q$Ie*r=ZvO*UZFu3Ie2n5SIYdtIW9-y&gdFB-sj3{f=}uLCzrsfNIqt)Qc;D z3)+GjwlSNlLn<Qnk&v{o0Au!z%fK&aPj<#h1n+};kUs0cGnvxp+6AsWKH%c$kLks4 ziQmssoUf;4<4S;X5zxi}Xf9)sB;evNV1XSLGn=||Alsp`C3kOwG^yCK5-6Q&tLwH= zy!5y=_^3gDv#P(8w*e{*%Cz+b&Eki#M9wYJKsxcSxQrUeY9k>?x)LOgO%Lo|TG5am zM*8`(xciT!kqk+>Z3#kRecLJ#Vb7fH$b;O_?a@dPTuN+wLfaPv7v0CQ4EZ|-@R8G` zBM;2g=Mz<;Up@kkfzX{yjjh*Q`InDF29?=F916ivmy0O;iSGzd>!g^7w}aK~!+ikS z>Y;Ku^e}9NJ;0R2-f<j7LS8$&aesX`bfTH%vrw+o%6fooDUYST)ImoZ{qgj%t+)S1 zj3)23AV~HF6rO-&-gcFa`it4ljQx}Ar@u9ge=$kfKaNZ8Yt-L}w|>_N?rMUCd;rfP zx;)WfP(F!Kyj(F@Mo48`RFwoz3LWE6X<B2{vvl7$`CSo#4g{Qy8$1%wy0lD4FV<Jy z?~e>Y-Oy41*DSbqBaKFKVx|BIv<kO}QAuhtaBb$qkZi5(TW$}HOL$&ho@2eS;p4>h zc5`hBTjdVUe#c&B;FiHF6^pG~2Xq*du;t<Ll>OXm3kzN|9m`Eh>rar)WjcoZBF72d zL6l2+ntTigC|>QB(uEKsn5%Y%A-X>gL#<AV<=(#@1**M+Na&RAa`NO{e^$iJFEg`q z8@FT2c03w)9eE?p&o}v#tnWhkOb*W%v<NAc`s|`k6SCfQB<qUz5l#0bH-OkH5oA<Q zx&u!Qr)UF}Vm;`b6kt88hC#?5h7mA|w`l!9fuCE?YMI_!O(pw}7uBE0B|C03QrKUo zQWisdu3Fzi2!C%Qwtj-V(7W_(DB9PRuz9*-#o&drwM_0!YK*>4YSg~Ml*|~9Z$n`? zkMbt4uWvD7Sa~mS^ve8c8M)Mx{KVcUKHL%Maj`7v2)>%EVX%fg0cX3Ko=9!jifOBf z7;;ZzJx;jlHr+~fVJj8eaMJVjwOPu`D%eE+z&O^N+GP`!x`BuN9zJ7g<K?!j^F};5 zK6gnW+3m<kI&f7bH`MS1BrN8{1C1fF0{y<AR_!;m2Gv8e%&1ry@-+2ZIoKAmO#f%4 z(Obo;4Xw1X1~{jcG~D#t{^u26cdP5fI$GU}>q_~^{)&zi`o!0D`?ao-orDM7VFtnO z*!fYG*@XXO*uzLu=Ny+M9qye6!5NEY6L+C?BMoHWl^MF<tjQPK=z`hLytLJ*Bj%>) zJ!aKtimSOzEwjzJkKa66CUiULFX;PQ6doG}M1fsNxBA`0tu2lVnzMAAF+)StU*QN; zxLGV-tvwktlfGPr_vX5Ch%Mq8{tPw(4Q^I5RWL1{nlqR!i}icmK&p&Ac{~IAv$jkC zsm_j}FgnKXwVivdYUpP$8@S#fu2DxjTfg}Rh;nAvcoNwzU0X|Rqv^8FF4XAP?Y8$& z^I13129|AYTXS3?hBK$7i?w&`HusXxSR+koqGv<m*AqCC2Uj5t7b{rt8J?d{XQ_@L zPr8zFnf`)Wex6q}XItI$f^H>OSFdUz$HHEnC3S`5Sx?`V&!{gaG}*HrEZ3}tnu7az z1V5FUD0yqemb?Z_O=TJIa^gaJ1fR}(2Oq)YUAF||+JZ0O;~}($6tB+I_89yb_`7x2 z9r26(-^z^<tYx-By3G)6$bFB2Pt;6f!z3|tox2e%CbAu%QaQ;_ioES+#5Zix0mjfT z`@K*2yU+n6Ab|%fBr{*AbFZ1?>S%MVj7H1lKsyYY#R+fvNLHnOddg{d2AXQ;=&+|! zGp&JpRTo>o#Oq<-w!}|UsUut{vXG9PUTzNUWXTN&%#~G&y;BpW{N_QbEAj*N;kn*f zI@1NA9a{z8U#md*ymfQ?D5RDZ*hVEZXRSjn8EO?S=v)t%qeL~?ONMHzsImc+TUwU7 zNM=(n43W#M+<X-?4w@Uc+Gv!v+)u4J1T*i(mif`*lQ%Sq9l_d^E1d(Hmnkt1>pu2% zqN%Po*1!ziXHXBCv3qMt^f@ih5O?D9016o{nwnk73!cR&&)dt(^nUXPi;YzTm3N)b z%H13eBksT(CtVoBEH~>(L(L)0pK2#qv>1seP2bIH>U1T+^D(usGlF9GyDZ#yzk2L- zJszMBKB+Q&nW$}3A8XEtIkK@zNLZUSjJR^MD~BJ_;hQ`p^*7pO1y7!sdTOm(I5bn7 zv0a7SDvzzfeW2yY{ceAoYGpJ7&u4X8Thk&mR>`dPWgOv0u@V^qHm3*Uwa85BYZYc{ z4R)J)d2S329$;gA*YVub8a!UQa;<n-#Wbz{wi&%JwD|x<&+axF*Z?eT+vLLU#PMBR zFme{szM=?wT?OlN>&h-4H(C{%K@X5qM`1lH)pyhg)YdjNU$n1UTC2*=Y$>^WSbb9N zjx5dMEEu>tUGo+_icYRMBdSc-`&H2{_IF%grw7Za>28GjZ`m$M(c7QCgd3qn&Z`nR z(+pW@S<WLVTJ5=W`<_4S+^l)NTaC?8J1)*c7)dlV@Uge%B6-6YM^gy{+=aJ)Hiy-K zuHbc$3BXUFf@YH+sN7q#6jrG?r9<D%u568dgAjY3A%Hnni(1`9t{)=|GR#l;IwXsI zAU2nbaAf^O0N^R84WjX1n)U&gejnyvTw6mz_|2v<f>=G>6-qT=_?PLI>9su%Gf2G7 zG!fCjUxa!(Pmt08GjuKXm)k4<KuFhZJI37(2nOR7YWAAvDNt>s9ly~OC+o!&$D3q! zM1*He!34b5)n;?UzRY%3klD5V9D}}02_G%dDQs{q<_7cK{j_r?n`R7=HhX3vhvQaP z#`x1M74+ZtDO>qAWdk#6n>GHlobtihg(y>r7%MD;1R))T$6imh3cTF0^r*IfoccbR zvRz$fx*4yRH#C~O)#i1vz)(B_O|KnIqh2EkZKq5zJP(-QbnpM77uQ|TS~;>r1DxqD zw7(ccP7QcmZ5%-(m6+cIP6?oTQE>UUalpVHSao2`AB(?uk%B6Kw4UIKZe2B;f5gm_ z?%<SK+~Y7!fs}fnaHASH^toh=h3`_O-jv>Cg>w?^beY8*ak(cWnWaBn{?&+q<{xR| z>5pdBV4%@gaiL>N>1`Ofi5&mjSakq>LNHCdDZNC{+8Xj~c>V-{F|Co2A%j%BBOU(x z?h$e&6^&sg5=ixB3Q)|PY^%e9(f|0XEC5X8mneNv*(PP)$6t{fgG)T=BNiPF@L~6K zpop9Vd=Fz}@a{|2Ms9p3Eqy3O0pn`_;|xDLGf{8Q<M6X+WA+KQ)`^-Ys4WC4J3wde z<p8ISNZn@J)<No|aJ_W@t$4u02cV5x>ry}gL5@eLVX^>)Zqtl0e9>)bD(xQXiJiB} zlS-qUwh_8@OTd>ig6ZGBd7Wn7!&|zYyc!KVFWHZu&x73;=nz#@s5_4)573cs5ncld zq;^8=D~~UAU4$2KEmV=WD)PIpiWpG0TR`?IFTb*#lczIpo*kg{Hy6?azO``x(oH=m zO1nJFIra5{KBYCs8f{eMN6~G?7~6@R{L>nX>+YA-V^{QSJV3OaXn|5LKJ`MBvkwGh z=3+O8W#;_jHO52_O*vcwOZ<@yhB1=8Ba$aarby3uxazqp`oyS`S;(!aUoOGX>9u2x z4!K5$`Y;E^jCr~0%Oml5=s=B-4|30r1`-FWu>3lAARdZG72<s`0}H8fw8QJ3j&c$I z9yw$Yo;>?jUV!{IFC?y=`(xfB8=Zti%as>R;#FSwfL(-A=D$2Yw-aMZODZez7I@jY ztSVm!lW>|jD=x{A@rkvmNjExAgp^`g7hK6QuZXV2?H$lJc$7KInII!^aX2I*2<FOi zp;tmoHL)8Dijo*KM*=nQ4(kHC`PE)rH&4;Xpn?Q_CA%xvquS6|qvT>9r`k5`_lW)n za>9fI`liiWTPL}Jz9BB{0r&mm$)w%kt96v-qOo8SAH0yzR_;*;rjw3WSMPGUbZn5} ztYrSpAES)=xA?qKz^~}0=p-~_gP?Z*!PRNJ37*5XgjiuxuX!f}(x(Z7@gjfh+8UUb z8XsVHgC+w=6B}bkCkGP)>wjvth8B<v>}>e-`2W<Hn3>pE|JBU=AI;p{_;m6Pwnj=O zPWW0sRbf$l9eg?=TPs@!B|8Hn6a0TV2stw0GqAJ&9HNNN%lq^B&vDpT{>wPhCN`!{ zX7~(jtpC=NPWg|W2|k^=oS}t@krO_hlCz=HzqLqM8<_s=P%}6F>CMQ(iccqQVs2{Y z1j+E<&iv2Nia&?R7&uz~oSx&~HwDd|9OX?Mglw(tY;8<zobcK4=|s(~oJ<^kN-F~= z6JZl0TVoT*pNsqtDBRYE`o&;<@!<h_zrPnGZO;+t8PH8!A;?IPs!wMCbv{qc`T%W0 zzsfRnl#SE08p)^lYA)CY%HL(TjSn;-AN`Md+o;|%MWtB}*Sr4*i2qI8e;{UK`xh?% z8}@(T`%hf{9l`%MT$maE1q<s>{lBQ>zXQX;@c$3YRi7Wg&oY4jl((}Bq5}BO#{aR* z0GzY!O8!q`|0B&mVly-SOYDD?^iSJ=6`P&@-!Jh0i4F`ujQUSHu>R=w|CU?A=119f zw*TlF-^s!Ghe`iI5e}yR841Y$nIix2_&+If<t0;tqvnFst97#YPOP!yL+X>aouEmh zKqwLy!vZcb0BnkgzYrNI?FCCwrbVpv5kK1QIuFT6ukXy|x8b1gN+g5<0nLU@QJ|h) zSk#N*bbgW9@OS;a6V`C^CmL)-M9AOkj-959%&rw(%d4GF8`m`*my|7&e-+8yo_~A3 zXTD!@zAwJt-=5fZU{-A3b9TNRvrSvQA2#mRx^H}5Pk5L3zMd~^dvbPGTXtmRxU_pd zvp2s!KaP7KTwjtm4ZgH@N^*8$Ptt03>d2&P?seSDZC^jvHge$(Z$Ny~vh7-rHg>)i zp8fZZW_l{+JiYdZa~@M(XUgqwcglMhKH9#o8}?OAZK~caTeZIRo_}m;xHSik?yYyX z=LcUB;aV316*1)tUn<;x@V%a1pT8u6sXVe@t?{}(l{R4ISa=M+Vc&C_zZ<qz?To%1 zSvnu43%RT&KX%TB_Xrkv4unP>VO<;UQj)i5bPt%+?*k9ZIKK{Yyg1xvUYc$BJ~FOA z2h3lxu}IKI|8jp<5Qca_2fq$P!{Nm!geP;Lmm>qnLbWkeSMz4`-eW^Y73Ady3Ii*o zY~Su}#He24!|FM#*_;AW2J54H(`~rz2Fus&p`(0|BVX$2v=;B?f!RZfAnH6k^xoWk z&TgFfnDfi>wO~Q#_+ZGg;q`31@Qmc#X;;rj4G2ANdtve^pu})N1ycxeYruFq?;n)m z!0qp$Lw}0#Vg}?z)5*wwoCoqk2&Y8g<-rBJt<T%Cd(NDJG#<`;fopoJ-Zzo_@;#CQ zspeZyYBGOH)g8TOqZcWF=t15e2e$IQ=Ysoo-`E)R<YYUHus$wL1<tjv-IWV5zu7Xq z<5@rnz#cpsI}6`iZ1~zeKR6@8)A3R-D$M{+0`ZLq$+p41Ti<bDw?PWdF=K7e)~bU- zcW_d@ayp@{1kmB`emrdQJcHjDcY5S{Lrf0R(~bz%dlR7s_1(ieoNjJ`K!ko&F1i5L zAm`>``#YSOml{BT+sct?NWJu-KV|tVCWHZ9aJpBO@~xf1u;LNc;IF-yEnj-_=c{_= z9ZM#dZyU<YzLU1<-TJ)mS+UY~J^kuz)yTBgOz<Ts^>0Hn897E9MV&|(B1Pm0rNoU5 zIM8<3nJ|n95W46gPn>`smNtQ-*e@!+5x9I9WTf)m&mlucrF8gCI#AIlWi^K-wlxj7 z>c&3Qf$w;}e1#}wgGh*}*&@LPqKpY~@7LZpfYj>@_|s>-vsx44h@P!|c#A`}W8}1T zbUf+jk^sx0C^jFD!Nxa#b;a@r>)X{M?mdCGWq}R!smr9sGKFptT>g?7=j<0nMauZ2 zgBc*e0rG2XPbfM?6|`MID;pkDS7X*xD{MV)mx5#pbzBQ8o7IT{RCOJTl{;_ijrHy| zWxw^d%UgGzM<Zt;t(ET>dZ9(kG9+<s2&FjB@>gOwg<qr6wB0!5YB(LcaYx*DdP|ty zu=C!;<tf8{7>UN@7zG3ul-EnEhE*;=<45a=g?}Y+R<bxh%^J$3NJp#P(@j;+(RFe_ z9^@alpcBSJIL9lEIIQ<1f@`lOh|68w4~#y)M(sf{m?D#oXb57CJB_u`i8OTc=-`XJ zM5ab+qsVt4F$m5I0PwTc+ot>i*IMg!(s2U5GjZHda=#)t<RPFB8}oLLWJ%?QPknJQ zN@H;9C}Iz<ai#$QiV*$O9ZC_ueM3P?nR~rpM1G((0T#b9aU@p^xKO3YC<;GfiFasx z{yT6I&oxT?-f+|4AadCOI;;uRfNFU5Bx*wS{MBjpR1!LCVaz|SVIcDf@lz^{pO)%r zcr8f~rm5RxQcU>J=#sI|`0nH-AoKB=0)i2>?6c4bpsR`e!NBWtSt(V}*5vTR?L@f6 z{`&IBG|;9Oq}6+$&b|yvVSV+yX?&@6IrUl5Uztpa*q{uaM&S|a--52qG5o3@gJ@@* zS~KQeYQYwU;~4r|s9}C9lvA%oB_Q>DIIusB;ruR@1Z=q+(7p;{YPd-sp41@nk+lXH z<I)TDJpJVG@Vzo8Ro^b4FvbQOb0&TFl>x~dzZpR8PxOYJwT@!}7?sbmA3p!?cu7`3 zDEh*S`|ZJXON!A~!#%Lph1)TQ@capns3l(PTw2Y@jqv2=L}MrAm+RegA4YZTjWsui z|E3NhyMz{8Mi<QQn;V*}#V4T=;eyd9J{_>7i(_gHeVDyuxSn>f%>HU}y)qYQhG?sG z)DG2OhZ*QHGRLTZpg}m|5((H4jtn25iW&apdCprHDBzMsSZXF*CBTYI@(|)VUq6*a zuorZyj`7uDU^x`28LGPS!P^2Q=JQ2j?1aEp%mcpcKnRxdOghN9Tc>J*BL^3I{FTb| zlj~`DGJlBFHl-FV-43(~m>n?w7Z@16Idf-4(>$hhvXBID=wf|<lx~;O9B>9AoOYaQ zXS;UXw@pvFzJ`~GOQ2@uEeajOuzu<R2NE=;W-3qtVeZ1?6bQSfp6Tkll~Z8h4k@vs zapQIT96nINV>1ATY}d3PU>0Z71#vuT=rC+~q@a`nDg}*BRp#{lqcqEx(?nk^laoP) zxPg#|1R$%^A*`$+0#S~EI&O?9J7*l8IO4=QJc`H0S=ZA>;HB_j!q4=1=}$lM*KxXx zQRE=ijVbP1!sI1${NYH^=MMC<JRzBN0rAx%NhvRrY6b)Su{~ah;BdP4I%^mWlOaOC z#Q;E4c4Ir{C=wtmAl7;Ib$>o=B=OQS>Si<zEQV!Qc{tSZK9g8vnBXLJ^3(Ii{ZX>8 zc^0rrPP5FR=v19jO9|B%P*mf#r;-3&P>_0`8I%cVuT$h%-J}z-sX3;s-nkgcWsnn< zNeF;c-Jk!<;G9DZj3gllkhPc<!d2kn?!C`8n6k;6)<Y4nRQW^A)Uv-P1@Py66;d3J z>OYhAm|1*B>nF#6VGbGx@9hjZgSG<)SPBjB0eJi=7tA#S;ZC+eoQS=qBo&qqf?Ij8 z9fL!06v}-`5Cm^a)o*N}CAR*U0wzHER!)gm6kO5I*Iw^tt5Wl->d)Q5QURG-XKgx! z>-|oG<nQM=G#hYg31y1a*9X&C^wm9BnL129;|BqWzb(cLCD((PMuEu~@MTK<<68U+ zleLB>kK{B&-3qe)ZCAunjMZPJucK&)CW}VG+`$6YJo#L+oE(V;25790Nl*|8qHZgZ zY4#aV#7~))<bzgH>T8~m7=0p0>|VgP;K(I*w;NT;TguPh1?LZSZwN&rF$m|bsL$wa zBUM}uIDBP-or&j=^#L@Be3<$Te;&iaf?W(jY$M5@FaSFZg-Ov7WB{Nqg(hz+HcmAx zxG_dL6jd}Gg6zQ?P<8_jU@RBA*2#fjZz)P4OkkC8yim}xx&?`ZG#NRF0Be(pq*TyN zc@hdUMyT>@iG6?)7lMP_3HPhhNmoS-o&36%P&vf>c79jbYF{85c1N;&mPXYql9Rt| z(TLRLoCOWsKmn9wDoPD|E|zTFbr`7mT0y%=Gep0pu?!>+PeJaEE`AGqaRP9Br@DIV zk~DZ7GddxUs4@vLT@?tR10?7~n?>*gTnUT76;IKCK#a0DJ*{1O_+FMIE}zahYkW?S zz7cBst|5|?6egc*$}#_Wpo$sxOjj5M`mkggJy}<xIDbN1BMY!MiaXLso<h=^wjESJ zyR(IXR@BO3jtz-95-<Qc0@;Kbv;=S5N^4cDx070Jq9R~1q-+C)c)@~${M)mGC+H*` zl?3(P?%vmAbaWBP1P1#zu>v_&uoe3*KW_I!tsrSLVYEIxNYBIEW(L67U@$J-vZNV` z1e6RGp$X>~H50UFl5k&)N6K;nYI-WurM$)ARZ%&_dY~c)u{zqI?DBRh(`s%;sRqt@ z+r5Zi26?>`{E@RGy<2w!d;bX<kNk8F2VvkUg&*5$P-Ysa9qkCNA0$D)O@31H4+rc( z$36CFrlU~M-7x-Wpl1$jT~p_khF2P~mo;58g~i%uq-s{{c!GYxlPQ?Z6UyzUBSgb? zg(-nudWjsZ)S84ae!zj}^ud#CaXh_yvQKlyeYn;MKxB;zR<UWd#Sk~gFFFWjYassi z7D%o%<XFc7elv707VsRUN_7anQ?jxcr0Gg0zklF-!b{6PPeh{wvGXl9rOB|2!U}8= z?9mrF!p(>T6xyzIEBA#GiI@<K(~QbWJnFwUe9ES<HgGlRzGL9o+ad{Uf$pn7c5g5e zcaD|44ABQ**9^e)tjgQ03&-$4!a_Q7&gap&b1qX1%zU_rW)~9U*RdyURv7NbO!-wk zleG>Q5<RR8njDC39=6M7QK?E}@wP}#Qg|JG+>500HLXZ#1^N8jJtdS_2?3@+ypA?_ zC0xdRBGjsfi72*ELF68Z6d+J~-qFcv7z0v~INpm-#UlCQ3_4{;GfG4QQK4rlHphf6 zBR-e?StNc26-j{<5^9AD$^oA$@Xc$S1pKK^_AuGB?jF*V{1?QUJqQflC?0HS=QXnY zFS2HuAJ5um-p4-pH7HW~IEB|$o5mk64KWQV(d(|c!Ud6tDQcl)sCKSYPeRNmC;V$# zRDu>|$dJK`U@UnnSwhSSGu!wd$D#|6y+8dJoEgNg*~0)A75V7_^_az?OGGFju^Y^@ zNq|K8qA=KO+aRo$>HC2}#Y>F-FT{u51dLjRMjCy&9bNL%yR9g}Xm$xFkw+M&;FSGM z`o(|rYX=KNr;ird_)?vbG7iw8VJtHkHF1<8-UC&l=|D6m{-p63fu&^LV^Ngvr>dAT z*Ef=|=gpBjiY6G8-;J$N$0MXP*NQkHK|j}sjKDJ^GcbfM34ik<?X@95q8am0Xuc0r zaGRQ$=@2Erut0=C92N-r3f@KN92tNJokO`t$9S%2u<10<KAkel*n}8jt0ucJveFv2 z6Wo5;(h=7fX>h90A$BuAWB>iVuEjPm2T8amu%bt??=H4`3gc*F&g>*OCqvJF5c+`8 zI`1N+^1N4h$-Wl3u=i%TS4joJ*BqOF#-tjIEJ$X;$foH%3q@GY=y%4v<&y`Z$bRda zTGzi_SK$VgK}VE3`<R5^=U|j<kWu?Eep@@AzSR`ifJwUqLC5?nKp1vO1U(#<$a_7R z<k=u%hd($M(pnXyoDf{mlqt}<RDik<@6sWPa;7M;-sH@}JY%7?v?=nwz10htm3-_9 zWo10<=aHPU{I@ZLbPz5B`eC{i5~NJbx>HUW*qj1|5_e*dJU{YkK_NOauE_+<K0r0J zEFGk6_&iqfozr+`aP-+7f)4h|^VmWe4(q<=4l?1g3<1zA?~6NVRzBp$(q#I8fII#J zNcsb`&P4JasC55u{IjeeWFwr4p{#fn5QWDdA%PO0AyuJBdr9twV5|s@`i91{ApN5D zjz!iu4KQuzd`&Wu@(+0ENMH(SHT@kqo%>*pme_5ORKcIXU*cZ~>*<K22G)ERkbM!f z%!-dekr)LEc4UJQ#_qkWiSp&8jGpx{){)@!`ueosoMWPjl{;s2>J)<({gKjSO?lFu zSgEv#cGI8m&<N4X`1X)tFX0ew3xtln6%b?g^^^KA&p_=}u7Wf35(wIyeGu&|e4R3p zTq+t600ji71w!~u<dE+vP`<K1xyiB7dYFcoc{tyVrfbthm~p8ODC0;Jl@nZty{rtt z_NpHJh*y6P{6T}Uj(;mtP|4k{qH2Wzy_K`N9Vd4y1t?l>rgn=26;tYG;@M9=m`7t? z&4bq<hd8#uaF7x~WHO%y*t&8Hj<?8Hx&l;Dl3zlL1{o;G?SGDEC)4JwJ;vcLp^5>| zb5b(fjx1L=@!SmJD#dYb!k<fT2y&ru!U#*yADmXiN3HVcT<smKElejzC8v>tz>*Os zvlyCUDcgPyfb<X<rGZyqv1PdJl@d-VZQ>>bCuRfV_oiMkD>N;LqNGZn`?b<GnB-%@ zi#eT%3{y63CT;{7ta?w$#X?_bi++izuiTot9B=%-+Z=L5DQNhpL=XOxMGe-k$&7X} zJ=@hs7sDTpSZK(=+yN!Mm%d*Ye-Ma#u`Hfb$PP52h@L=E0nr2-vk$>&Oc24mT172H zIn%;5bgtgZ2eiaNqs;7=WWSs0bJ0=wE7PgGW6^5Wu1!7O>f9jM{M={J{Kt6^L17G4 zKqB`lw1c|wF2%Th-1QyRU}$?Ny1Gs-&RAXT7X5qyb1lhr+y#lhz<L$=Cn?#lr#XV= z)C!2EcI9-;Rg@nbW0dIeE2fT05z@`obC^DqS-__Cp>RkBtC=T$RY+u#vW?287TT<Y z+rrOrsyk8kfk{i=*EMQ4LDd#C_+Mz&Dz-7QwRNSb%gow16g=`<Mv>v|ogzbHqRT>o z<)O+^4VWDi#CVf421_vukv|x}C?*JrMi(7*hRPe+K-&9HTu>fX2bb}#NaxPclI07e zNgq%KO^>@#yl>K?d+t(M$k)tnr5(Kxpv+||ib<x}0|M0&k`xI9MG;<*8<2~DX`E|R z_M2Zq14G0Skuf7cBE_$VOnWQQXksWKc?{slq+l}iM!q9EmIO1Fny<uz2CR5;l&+Hq zr~`P7B<-<-3K78+*QS^LnU%fS+Z``R!Ve*Cm~x@PHz{Hh7LyXTSP;*~HZ&gs2&6rk zH)0WmvDUFS)J^GQLzH#!K&Ubq0tD&JQnwdFef^}j&Cm4D6WBL|A^l0@AszT*x&W<K zg9OC{fHs*Y$-Jw=os24vnhSnUWpcqxpEOIwmKicGOgHHO-NY)NCQN8SpF*o1MP_VC zxs6s$LeRBliC?raQx+LfzJ`}7HG_FsswhlkKo6GeA%d-I2Wds@qQouIN9(NXzh^LZ z$09-~xg}e%afB;%8OUO0o-w7JF4BcbZXu!oV0><1RV83``pwayPZkU*!6jd2i~&Eo zGzwL3g&2k0(<*7uOuw67L;CxVj+uc`Nc)}h{xpS9<$gNTRH@=812rC-QP`dAZ_3%= z2s+l?BP>|^bbO=6JXX^ABE=1Nnqp1jX6rweMl6{IYkNfU5O10%tgWjV<nj6A63|Mt z2bu0!smctd;C;D-tI>_(@UTUFv2QVcl=76-kr|m#A*Hk(gbey~ze-2psuGmK3r!Xv zo75(?orYW*ZSs=*9ug^|iC2K=(wF_ZEaeWwb`MIK6-@#_`y2Zjf4&eQWeP_qDcQqu zuV|;1JYFE2$>fUrrW<y?d7769z(ncKk&>(sbqNr@tbXF~c}<EbSWfgytWEA0N;@W^ z(2L{rlGMOO7K*a@arYS0+<=t48pipMT#@LdEEjHPu;ULZoQAm#hHbics6zI*cahU1 z%9E`QA7H+ik*S8c(^PdmF){{d$_A#`_&5y`2tbo#1gzE2gO6~S`}3VcOOpxQe>ic9 z^!XXkSXHo8kj{5L5BR*ECw2xe9Pi(qyvA*PaJ~EK+Comgo<6=`QT7x*pY%R9W}tWg z5bx~_^7H|weT~sMr(+`t(@W+UFR*QsC81Fe@3#w^xTR^VT)P27h09Bv_G5CsmTq*L zCRo0M5?89?y*;mEsEhhhS-9kPuqab99wv8zF?5b_Lhndi<4p0-dJMLO#r*qLBnb|F z3E@To>TT)Mue(tT<&K&ZuZ#h@u;1kYiqDFbdlD3{*mKEe#+0=KUnwC>l`XpbVbGhg zTv201zxImDa4<}xI7Lh_OTBIOqZwGjfQtTHY|@Unb6I4|9>U5=mo!y&wii`_xqv7b z>F8w0@(^hWtN5aWGWlZ3DOsaEp4lyouQTjuM(V?_fG8dfFehC^&>TNn)s_-yHcrMd z{Oi~KT7dzrhx|&xI>FJBIo^ioSxs{1589Db+RtiVZ2}x1708q^s%BYT%DG!aGF~ie zCmv2L)ICoXa3n(&dr1A`+C_Kov>A|zk{YcqZ;-)UJ2|i$AE^pEcV=IGH0)5Ubnj)M zO>(>fu2iqaZt50N34Kthl9^?ltck%0O~Roo*+0jK4&K2Vr?|M}yO#i_7&L_h${L+^ z-iaAmp~i+@n5WdYl;Yal&G;gDn!#vD{^6*J(XUP@)|8yIPkQdA(r>nE{4zQKN$i%s z8&R;?i1u6#8X#})vPiu+ExJGPhjVG1oidd_#KzX%9ym?cjIi`Wot48rI~@I3GhY9; zJUWB4^b&T8s*~}NBOujv4da-c*O}(q{k;!>2uS0XLRg*sU29<9X&5jCSz;D2^PJ04 zqfDiutv`dsAvU&i@Noqe^vbmtATj4YVk{{t%_$CzDfYXhBSbXB32wtz!gDPww;y0` z@ncxnjC|qz{DAnBu&rg`9df}=XFSyaHZ&Gd=4olCAwfV5QVkBf%ERdyxe#H^LwpE4 z*{@O%h;_Ujys~?H_pIcmbEd@?rM7bvG5jUK2!<BzLE2R)G2XE6fp(a_T2MEEI2jh* zT6LuZhn%)?%OWvvTu5|)?)*OaQzC>iV~;xP{uNEOyyevx&rx$}d>xzL7;J$Cuvm9N zu&w!!S6i~mJA9kL#XH|mpM2jn-=7(37w&I2-Uhaw>v~gBXrD7(Sajo9IkIY8Uz=Fo z-7f<{nOew>D2$oN+S<olj~oMLZEX<Xg)46Nil$;s^kZ#Bp}-UTu}f}@1aWyV=+N$J z5s<HaZsR8+KGng0W(B<>AJ7;jnVr*Pu&0F7rXQ!rHhb|hx8K?DzD8(lkz~=il&BFF zSUE3=ruuDGfE<%?Xz9%iCq}>63&!j3MjUjrC(^zcZvA2w%Mt;1QvD{m(?r>-^H49+ z;<O}V!FQxgn2l*jJ1YinS_Qx6az54+rODJNfO+ZFN(j2pek%v*$2t<yBQhFW`JkW3 z1X>6#OuPM2Bx`K2U)QDyGN1X2W6qX%)6CXRM8czlTrv*2qAPIWoVFh!BIWar_c?a5 zLDo`xszTWpEb|ASW1!)*lGYt#@&QA2E<CM&NOUQfICc!Tb6+o1{w?97P=UT$bKQ$s zI;p^If_w(w%*`w^Z3FEPwYTc_9xLP$=%d1;M?HLIcLqa6m7!zhRa^#p6oPPp11@bZ zSn<Y(edE{2cHl%zuTluXekC^USoD;ralsw-p_D_lpGBZL0obI9zcFpHDocKSf$mGf z`+#fC$6mm9=chH{2)Z9e%8SZn2&zk8WAeO}>N5|@1s{>LKu{7-X#Ek|bx>a5c)RQR zEOwo{nS*Rq|F6|dNZGzs)%!r685ecIjO3b?YdO8Atsr-##Tg_}>C?fpH)7dNlKze0 z_Yk()3PP>m`E4sq^<$AfC@3gd)Y(>A*@HVy*!=UxTSzGK6#M28n4o%U<tNS-WPPMo zhidSpv#w7|xhaQiSxv$1R!O#7r$U7%<~RJXErGEGGXcQefxRWN<N(=n9`Gj%fCtnd z>bcSYt3v)6%j6e;7QuwX<}uGAx;)+b)}upMh!%$pPoLMO_lltx=rlUOL#w9!t&~a5 za4ky)vv@KOm<uCSvQv2U6V2(vWKk4H3f6vovQijffCdT^T)ip{EvQ1NYnhYUzgfOE zp1(^&0z6Z@H;-2MRy$q^^B1Jzw6ur$Ht(dvvdjjNopc7YH5q1O-Ks_p`|tv{g_V^d z-im}9nj$0G71^Q#ptHF|+dG%FG(jmuDzNt@K!Ra0`#zu+cep(RrD}S5Tn~_hxKESK zm5)wmQU0D5dx${?`^)B=>$`BY0{rpApZbQRyj<f-FSo)EEES^G(=<SaCDA@3VtR3? zLLaeD`7wNS$wRsCIUg_!VbK-4xiM4?Jaq)%tOTN*sne8}-ls3h`Z`<f(=5r5gWjYD zBfP`<?i#Y#Pl6>=pceR}0WwAE^VaO^Pr729dD4QBQKWhf@M|_1mkL8vj!7i0Kg`4N zk=uGo7G+g5n2(+rR^LlzsN2638iw@xHP*`5p^=s@0xU(c2kZsRQ4Jv`k27GCjkqI! zu!7?Noa7XKX4;mai$a)i3X)U0Q)E%TI*1KF%4e4B;9Pp-s2ezU^H1}Kgoy_RS|ddv zZ}Tp+C+p%!fozcA>%_rxfd)JxEqIMmStF;jj%``eK$xTgr3S0)PgVs+L5TCF5tb>u z^D;KhkDR%N+#gHv;?ZD^Qq$K$w{S!kC5-h)1?U=BDW^RI2oUE30n2TGm@9`>HYrSs zh>%1~;RuwdWtf!*WKM<%uwSy|D6W;r$AZu*XATtX-~_8WcjQ$g*i#=65`mi8g>UP* zFdI~oa!y?N7xhJ#7W?tybQO#s?G2Zt$lc71E;@C@3xfr5KdW0-raJu?*83_%Dfj^M zE0emUvtBPkF%XDSb#N_0<W!6{PA}>gS@-+|RSDh*6gqfK-7ri1Rmt2J=sCZ_mgy7t zKpLVAcrH^Ux2fWm`6cDV&cK<~S*WHKe*E)FLu1$9V}Gljtb>yy-t!_}u1x@al5xrU ztn+eVN{oiC6xom-Ku8zonrG-|NG;<*w@kEx;C!EwkzOXH=y~Y1^Qnw$L#vJ}cvdQ2 z<PJ+b+ZOBPZkR}6wBkdS%a)(;zbRea8dTyGYt~ye<pD<|CaWZu_#L9YED8uGWNOv^ z*&+vpVob=i!4uaz0Nd$#4z#2+AhE}!m<~gPbMKQa;Iso<6jkNaT^49!rR(1B+~j1k zRX9%gWMsdF-4v=<q0U1c#VAL@)}qj7h8t3LID$M(&eyTTk(=^wwZ{lv1!x#-$n`n4 zn8x<Cnfjfe!agaOrl<6iQ`s<>QC2H=u@WH0jRAW7Zp^FPHtD2j<U=!Mc*`&HYTI{Z zGhE5|hL$-j_(>N*%~pI}NVcV{hEvA2uon|LD!zc6GKrC)<>YtA*Ox261$jqyjP(79 zG8juw2IG0@*dr|#SU?(k$<R?5T|34vcvrfvFF&svB<%FGG~dqN@!8Lh3B1hm;|kg7 zA<(f6c<7aEWDuwE+uuowu<QJ1rCHl0<JVn(c`0i6dR~TOLR1Vpb$Qdg?-phz$ap^{ z^mt{y&u>t#A=Ko)vGJ}ST$R32^lVyHOCB+f^=K_?hV&k{i&m~T801n*44N$i0B6S} z*tA6OJM=!T;B+52O!*K{1g)I+t!6`(+4T@HFfkub8Zie$p1aN$QoE^mSKMTJ=1-TU zz<R?zxMk|}?pWG(fv*k7RWj-839qw)(DSrz56niDxe7MLaFhpwk00q=|IV?tgb|=$ zo0!n8{xx{Nc{EKMrO^jFoHi%|N*`fH{-i7U8@=sL-%l65^~O;hM(-<{#Ui)qzzQ0} zn;udV8bihAdR575=DXJ9m(w}7$iVT$gSJ<VW*6#pcjBYf<x(>}hK+uBYQca}-XoMs z(mp(uMeNqp4J4eQ_iF}k;qveMxksv$+e1*>Ykude!_D!;{c*&Z9M>kJ_SDDD9;``g z07*;W_cYu3E9dt_ylL&8${S$|c#UB{T{8E;<7(I4^yAUKmdXx42KGm#UBuC~F3w?s z{pFl$OPJi(f=|ce09>GTbPfFS#2QP9|NcA{UvigtU--%=v1Jj|<xnqz8pSwNs%5OC zNK`?oqEBATF$-c=LUr4`0aje&FaubIl8@b09l`REsx*ISfc=C%rMF+IR16VlGdlE) z_iM%h8=vCcAM+t42+e!&5+8Q2%&v<Wi=jp^15=I@a4PmfLbu9MaVl+-Tf#3N+$M>j z>{2Hw0roty>RCTbcKczv^C!6Rzq|~m+?{a`m#1&~uf&^Gsz`F*{-8oOtnD_i99~93 zUd(p%oQ}&lJx}ttFx<0fmL}1WDG#H8`bb{ijE+sl2w)sB*q0ZSg-mDTHIgN>rc>7Q zm>bZeiuMmCeF+BQewB=_Si?j%Xr^w-{(Yvkj4~8sfBg&W`!D(RI`2bcU1CWD^RUJY zCQidrIfDoDxWtn1+0VzNMf=S1^Sw*r1X-L_3d*6)BzUvwjOgYMvp!!!0e`R78_M!# zaQ(dAw|FA`tVcFW2b;l$_L_1&Sl~TMU@_8l$~A>b?<1lRPs^y=m#QqR2l*=>Nvoh- z0YQsd0(%S)g${mQ95VSpw;mOkLW5;FufXJqF4O=3Ru2~(VavO#Z9+yW<K~yWM|1Ra zvap#<(D*`(IaM#`!b*VXz&N%F@KjGaMQ)f%8|IpmmwbmAra&8`>sSq6Id{8+)Ag!A z-D>=a4u+j@2c}*zU)H*zivhaGb_ZtH*o96@^h9(J8q$I*tu%V2QNk`OG`#fUXlmvz zvmEX@7fxS&;rX$sC<{A}ZcZ?NNb2&(q`o}!RI+~L0M83#q8nQQY!|eQWj=RcNHw_5 z#ItAoOx_Hd*`YuVj1W~4%=R0ZGd?LX-}tpfch4`hDziO(CA<&lF;Y8-gdnk(-_6bn zJy{fOrL`&5@+pp<<w&9d!hn`hp^nKoupPxQV*IM5_I<t{J;2XLz5FfCrdo0&ujgcl z%^N$oqbjC`8w3Th*3h^^1Hr~}(PR(H&RcjaT&`%;y~?QQI7jzjR+7S|hl`uuI9?%B z*OWsX_FzupwU(eOEC}21ecl0|xaU(GM>{o5kQO3`$nv7;@84>^9NyjSju~-Z3K~Jm zbL2chG_4X&&|8~Uz7+oa_*r-@B%4zE34nbNd4sqZ9v-0_b`APKxB=lqa+iS+tYl)L zAM<zTOtWKfn9C@P+{bGo4Fs8}iav4arH+p(MI@23rD{56yC6<ib|Ae|qe5{Jc`q6n zpSU6kir1ncdd{s6d_m^>R$fdXpyKJMu+{dv*+TCTl%adN#1`Z^EN6>ga~C6YjmN{# z&-0glajtqU#7kY@g9)l=*Mq(G!gGz!JRMroZD2|uiL**dFu_uo;W)9IWcRcM4GP+3 zW{RLQAryQHgQD!p0UMsW%_doKwJmi{v1X{Bdk|}#7ofvLqYU)$*9|x5YuMn4bGm&1 zPKbga7un>%o1MEJ)vJBfQTCp0T0vA+M5empcHarh5N*K_=x-L*BNi;76}7nF+2nlQ zF5;+Z>G%48LPEA)n~JD%QzLq4)4L_IB%76ECPHMgA)<#Z*&Ceh2X>gS?yt@s5y}H% z-Ku?15F|V96o7adyDb$Z3T2-H{6_ouApr;vdZ2*f`Vk{yVqB3`%;F^gHv}r{LlOcm zYyDwE-|Y4((1Au7>6^X%P&AcvFr6w11-JDaa{mRK@w8zk&+yNSnKSRaCMcYB!_W^v z04nEohsn2cZmz7XC+IXMha@3%Y;fGYWCNMqDKSpCtx$v$Rv863VY7)6f{C?`-|rB{ zkuw}e3ozH7!>9veMZCs~aHo_lXv<&*OQo6a^$+R?-ib0C;?m`)S`IF(4?QTFg8XM~ zJ6|uXwDDU4u#3f0#l2hS(#c3k?3yN0b0t%ZMI<DKI*{gtq~eLzVktrh6&kEByi;UT zQz@T03&!o@3?zJiILp-;CEKdwH}eG`(|i;pvbm|8Rh)?#e(w!zmS{z9d&*}!8#6kH zkpT_j9<|=P{=$%CCy9*{Fhv@%)rplgIed{hFh>OsMo)O|l4A^6RkO<;cR_5vmDlC@ z<ZgYtyNlu7KR}@H>#J}O&!Q#t3`-*Rn_jYzmy&hYf)vAqhvr-|e8YX?HpoufuJ((? zh#v;$bD^?Mr^!g?sij(rv>HeWwYL4aKkwgQ5V}9t>uneL)uIcA&}6)(vWCywjgr;y z67Wl!hN!)U)+>PkjzQtcI;gVF)5_%))Q*}TKJuu%hm|3~aWn_PU>r=P@H?ffpqR6b zo)sO|>Eyxh6^l!~9$wK5%q8||Nm~@hewcCtJeey|79;n(PLTZ__mzt_9fU_r@qRJz z!Pzjj&jSD^m#C8aofjF@506OaCWGq;uxAhBKuI}UD)RpRQWZkFD^BH)Gsog*i$U#3 z&hCXqT9s0k1*5RIr2%3JzPkz^h-S)KAPlS=g5+-Ku0oIHYz$`WgH160BJnP~|02d0 z7eA9p$7ZUf(4nu7$8aDf%59cwd1fI4D~j{GG-voOgRk$4v7`;PMIMZxHfYr7Y<Giv zbQ{ojsTpb1+oZNLur9#qXhBk562K{68j;a%_F{P)1iSm^mpQtco4M=_m+Gz|1#Bhf zrt7_uj#JdoTGd$vO3N2=N@Y;IQZ(t7J<E4RJe#$^6uvR-3do`(4;)5JQ??p>WQiEj z8>|#rOJ#kp-C|{87aAK^53a%$7>G*7YndDt9pzki?%6Ip(_7#auB+?LkUNG|IR-y3 zbyGi@u&N7ZX(iTyHyno(t5vj)i(dq?PRmK)AQ^pIDSD>OVkI-j_p|JUm0FM1_x1N5 z=s{B^EAF>9xaJrp<j!r3?dK`95!(-Ru>c8O_;gu*U`+&5z~%*8i4Zkh8AcxOT&`>x z=OTQV@epuTf~e~0^GTs(MilJ@<Mk#=zcFmF*xu3@$%T<ae9D0faQWHQDmA3aM&s3& zDYO(&!2-j|bgXX$2ve!PPk8J?`_DE{_nZAxY25D5(d;Ldr-|-riVy(wdud13Z{~-Y zJv6ROc5RvV?o8Yc$iLXxep@F;PIzw}INsgV$2kvI3r177RyxF0b6-aGQp>(rwVMV7 z#fs;v8i33g%8E{pcJiMqS2TY~p>xVL;@#LYIVj8dGExG;_UbQ&n2psKG?-zqf{I}$ zRXDWiI0kC(z)sKQ7^O|@!t$N;odfysMOzmHPAGDwmGkyBD$dR-^IrlSh)fWOc5By~ zLq#imlv;7Sp7>?c7jVp5%Yyjb4(0nZ<@;4Q3zn#4@<IQQmT|)WuEprlpfi)=_mm_( z`N>vooP*#Hpe>~-EIa>Pz7ev<do@MRLjm%^Z<nQCN@?jQ7;7Ar4f&L{%EBlFf_eT( z-drMTX&u1FH<X3>TLehg#ea5`a)Ea)3Bx<~FXI~Z7W><vLlB(An)t2bgn5~pY<%I7 zRAO?Fu%uEw&w#ukbV#wKnRm<!r{zoL-3CVps1gOUht1g<Irh+l2Nk$srtBuP2&vw! z5ePie7Oz9SWjL|0pp?X|s4gB2Am<djPzKTY44+s6NzPo4+jURjq-}ZQIU||syfKSx z!S){xMPZaPWkGj{<3Rr7NUv&p#d%Cfo-1Qh0YugH>QUwKV0tVWcX6;^-Am)i)?h*s zKv<c^;J8au7ef2F{>5Q8mu66xmYM1&2-3@>U*hg4CH!s%_s7f|*onMh6b|`*ERRaK z4OZdI;HN8sdBnOoE^|r0wB38p`})1-Z1hXO1?tz_Z3&f;f*IPmH1=Ry?>hCwU#2a) zeX=jK8!@P&%ERN4IywoRm8}f)BVq5+FD{Y)7iH%ZBv}}3>9W~nblJAiW!tuG+qP}n zwrzD)mf2-{>dwQ&y>VwIW+E~(;yh(Uo|FGx8{b~c`R?0Wh&v-8q0oP1ka420HWeAJ z7ll>8<xtf${0&iX4cj9Be0@q%CUBwPTA7_|)(f2-6sR8a3{pUD(!@J*LT*E5@*a~< z^)nFboi%!<CRhq`s%kGQHf&92?==5wQ@S_bnPK&>Dx!rJ2cJS$^^diN50b(wnry?- zj7a_qY`PU$S!T4BXxUEi8u4-YqVe`=2vpUOX>Uk6ms*%;rj^I7^v9NQQ<$o*qt%n| zRv?Q&G~F>x?&=a=-5k;!D5MTP5uPnxy*BVH(%$5Zq!GTEg7~hD==5LddV7;k_5c|n zLy`SGo~l~nrL{7k2%ij0NSCn;mJ0kUUWz6sf4~c<!g9t7cf$(aef_Cr2*u*fSiBGV zPf%kHt5QO5`!D@Qm3+J59*+a{gv+#nu76jnPNA`EV%6$c)>AuLSeiFtcD&VK-@-Xa zlj+bgN}qt>a?UBcx)hCu-bLX?*3B<3w)IRQqR*@NPUecAD*paQ=-z_i+gO|<`R~!l z`%dNGcDlm0HjaUsf7l!LN15uahWT`?Es&wt-}Z6vRo{>x^W1C`iVPZ2pf6C;X9y$c zqOT}YURrLDMA!0{-ow^%i+2W^<?|?K%#&s_Ny^h8*%Ni$ML0**rxF)aOp)MwDpwSl zD@!c{O=+C+Fp$wiU)Y_sx?hRn4YX4-2oTLaIW<gN_Lox%6uUiqOlxcFToUr@H^c8$ zgh<qzGlxUAjoqVv>4E5t1*#B*bD?LF?@69DxWW>|SLJWkZy?Y-EzrQfWYJGP=Q<@b z$)J~`+PR{=-FGA;xAcOS6PtFkrNfDpEfhO^=Zqbt4`F!ViR2eK!+<c9zy~M^V9iqd zV@Y6wmHssy0Txv^rZJO3k2)pWjjoRl7}8WreY9t1te^mu_i!Mn?*K3ZG@`|-QO&%{ z6?00{RZD@C4Z+MpcCHCQ<gR6~ibl}#n=)05`b+~C*vF}+{7KO(UNp66&GO3!C9O2k zz(#R<Pc(}A{uD_%l+wC;!r=!b*l)teKgR+*^E1u@03xH%1rCdn;=gW~oc?rUf+jm@ zVzLgd<xb;2MbEsPem57snu|OOWU^x`1TOs8aO(&|J7ZV9@{I^Jfz9^gI-qzMjo7C= z#MA`OT_!usc$t5j?7TGNlDIrWGTUSuo{gX?Ps33&CTV5{LC*$l);w*RA7I%WmvZS0 zY$ru#kgX}aMJ}8RPoy_`xTMVR%-^}BOWw(NhH0&7YNodATbhDr=t&itHgrtqKh^PF zvB#bZV<}{cW%W>narzNS$5iW_U83Y@hHV!NlY#7ut2*P63!(jtK~|I47bBl5yP`a^ zT{D^bpQUbXmGx-EtimS9|B|Rc_6pmh05b_sG1KWIK%}}>V$dD(SWP4+rUEy)bD<Ua z1gR)}ofuZ0fOkV;l;cf9r9+w5n^Ltr;!@Ges~}GP%oN1Z+9u2WxMQz}nlHjlSP{hi z1ew~4eeyWb)$KABj$P1wKb?uA>v{SrSp(pE<#VtoF@mg+$Xd_X#g8jn$UhmY0>+Aq z2@*@}l%UjJVQoNyWe939H+7fAn+$vVx!OihLB_&KAh-6<I)GGkgU!v{jdE-&cG>>m z%PNE?hu*@otJZ~GdwY8y?b-WW6tt!gOrS%MMC@%-)C680+%b&qNAU1koO5r{wcN4* zFpaCoG&e0(`Ag))`q<xOQoY3k)@`_xHn3s%X7m5vP2M1WziiF<K0f}uSLRCd`o7+p z^q%|t9yPwFZk#%0-O}q0^7O_4OMdTq<9++QL!V<8_V`cAOOJr>8UBwj`Y+qAwZ6Bm z7rarq=f{V$(dUm9vpyL7!?iPbKepbjGn{-q!I#gA*XP>}-zTfL9B(u}CXYe)uos?x zRv%Q|d1u(UseTIIN9=&F{|=G-SES_scBNY}^$pI<$x`&cL3{ouvGX4o4<j=JE7Sj? zdpQ1sH2QCJ57Yk#-NVHCpQt8Qmj5@p=l@K6!Te|9>%ZVU|9ZfcwKqCjy}obb9A?{} zvwb?b1G&N6plqS9uC5S5(0@L!258;=gMkKuxQD^Rt;ln(-c?itljK$Ad)^<ec7iyM z%atolYcyL{nE|i9fJ~>`DSGmw;_T!kM@8952~NuYa?#iAd+%#<@pby(IM<T#M2$k* zb8sXh#HpfYc*p%K(lb%YONsi8*{b$vS$E0~lhOvu?lk8TN2{yD)$j3X_Ht|vpWo;6 z@Mx>;n&0>B;eKL(hthAOvEGxqC4b~E?Q38yeiBHa;9`RGVuJ8wgz#pFG`c{=sYu!R zR3+ND9je0slkO^?$Ofs<TG>2J`yyZEENufRK@)YMJwe01r|YM?$(pteKyf=|WNOGN zXld(SEABbB@9S)-OwacfOGj=};x#ShJ?<%Ta*GDH@WuF2{Phu<b2)-z8M@s?_7*GW zS4(N43)(!id_xSoz%gMVeaMk>sWQS5yNt5S+={M7;HI-p%i|l@a_WWE4XzRnL@HxT zt>2j<z|dt*Q01A2#TH)|C%3Q3)9iYq%8gv?tN3HW#P?CvS+b-NFELXuE?gls{fdaO zhz7Wgtf;sqxHYcRCfdl{!uvMmZi<<9=%R)gCq~Fhg%>u&hIs@2U_xc>V6t{38E!Zg zVKh}Y<m5(D^fKme=|tgEMr7@4=hsnbK}+l2<5L2>d+;VtGlULMeHfkmQWw<Rduc0% z-6fCj)7w(rzF*-t`B5BwTwb^Mv{fqW4LJ%Mp`i+yX*{X1N+}vk^6EqV?W`o_8EA@a zZ?En`Q@T3qe$J*Tx_arqbSeY9Av>hm5|rsutnq5R{%Yj$a`gW4^RfK%vEt*oYP|7E ztl>hu{)*qzRX9|}n1mKsWyhHY|MVim?O>*^rK+%KFVbu9wFCBmUc$s)(0BVv-3ogB zD!i<XZ0{GQ*so>N`6>OZH8y;Ak<|Z?UpR)U#l<V#!^I&a702Y-I9tz}0M(f|GnaX6 zlJ^L0G0RQUgz8Y=WLMJHG}0Go5~Rrn=PMMpi<Bno1O!@z5EUw}G$H6%^(}LmJY$Qj zNotfOMzy}~qho!9-r{N+OrHy!D1V%)e5$j8ipUhP{%yP@vJ$#~Ys4X|^uh@6#su$2 zX4Tc{0K+>wpS&6D>A|p*8|SC|Hs0|1s;qUI2FVa24tvoZ&;BR`?JJFo5EFGb)$%y8 zmc36FbF6|OQd%a7RN@9(&XS>4?xzs=%Oo{WqCL_hiltDot4w2*MsJZrYmh-_no4Au zI9!GkTk*lQ=gnm>D0*vXI`Qkc0;7hh<nrz?CsG9|MYA`C>uUzPHzqQ?GHR7`-ipJ8 zNMCq&9s!vlI=(fQ{ec!{`h2<?r|vWpv<h?*5fqr`EU&l4(d>HU;+g#9s`$ec=l-9I zw?bRG0%iqGp!}kQGCv1F$(bVkq;=V9?1u1iMAq0M1tH~8&pJyWDNR%IcI8U<d9wB- zy{75SSuiuT*V1^qq7?VSL`P$}yQ9?`)G0(b7K9E9bP^k4T1$8`BVyRee&~Q;C-7fv zzWGpDU;)xgVb_IN<-d{pfj`m;EUmMzU7|41DWX*`NmstA)Q;FYDq4uq8CY}$h)TrH z^-`ZROu4E)^CmE0aMfb|BXj_5UC7_h{&@AEtWI|JK!VfM;xs3rO{!Q$pt1bDl9OZ< z{mC`jCdFkos1nqOCKDLd5f6^aH<7Kwa_4bZ&iLP*d$Z6P8jppswt6zJFg!?$cA^du z^`O&M(?xAc^G^ABcJ*~m?RAz#S0|9%<klFyi=0a|O`DFEIbM3uS``-=40auP>JwNo zn<fM*#riPJ>D$soSE6n`;jgCRcKxx^Lt4|s{6o#!qa~;ht9SM(PV~IBs8WT80#!lB zi;zm9#%{$;f!BePB4a;&tn-(<PxfoR?Cg2J>$|7V_ko3u>bu$H{V6PyoDH<?jTByw z6`xBrGPBgy+#c_klP3P}kE#B{P$A=%e23zChhQCb6NxR<`P?NIXw6A8Bw501fvQ7d z>%{8da>e^<G-e4y`J3V-w*z@GDs_<#VHH%ixMp3g@{3$a>WkaRS_TIx6$aR}dBk6< z@Tl|9yDN7X^RT~IpwJkg(AZ-Vn`Gpj#aI6rW4@zphN^jxum~Tz94*JOXlGnxt+z9E z0B;jEna&fpYHrZw%B&=P6#kBAXT6y#*e5p10$nc}29ybm7KF?3r>Y`L)SXjX<d`FC z{?7F)Y5ycM=L_y6_*ye<)$8MQ;QkIhnz?8TK<)s^6sO_@t7xVU$;K(^=8j2R<3uAV z%B0US1&Sv#LbLjpf|jzh>1cAk8nhjR?|1XyVz}WSsFa!RQ7s~`q4I3KHtZ^TnQmep zlkb4qk<HzcSXV5Qo2r-8ZA4fm3e-|fUDnxFtEFk}Aoo5ldGZ)C+>OrLWv)xA#^g|S z9X#EO!SXJ<eit45JEE8v`~??$&S|oo*I?!5WEK8s0hZeilhF8=&j6F`Dxbyzr_K<s z7@+LO`gEFXKT*w5N?q4Zcj@J5uVtbo%xQ~Tr)Y`+CjI3Q>I_m*oD(TLeMy`P=0Z$x z9B3Aw3XVN66neL%9JMD{#wZ$eGRVo3;1p;E@~G~ro3qIqXQfJRvQMhR=Omy!d*<x^ zEg0kXM2QZ?ArcTzN+4uQB00lc@fGB>(<zw48HCeC$jxaO45rxlI+;2+T3PaA^sLQ; ztaT?jTNPoWG^pf~RcIG^>wRXKHSxutn~5Xd@}r)C(Q>bqh^&(NiH;zVG^QvhaVtxF zU5g&3BYX{#x=RUCW8wY^+5QTLP_b)BU*i1F=3w50V&aZy*b>}8P2hq2!^!G@g9XRa z1?R(6d&6aWP$_y#8Cu&JYA6FGd$UzXGj$jQhPYH_IAs?ZM#$P{*Z|z*?cZu_%758F z2hR3$dS=At#E$JwR`r-4XH3-YiBMYzel_STJR}Bg3$a;3ww8OY3@`{6g@X-&9%_Z? zL{fIBZty}LrZvwjyUO->J{||B`UTzn8=*F^klMZcClaQF@!PlE+)r}wI63`@QXxu- z=$Jn|JqNvsB)Oh2#W6qLsYK=0a1Hv96$*tZ;ul*KI%8Bq>rVimk$0W=Gt92+JR8W? zIm`xf=?~weNw7<Dt^E_U#OikG3NcP`HFOu*R2P_VQI&{@?8HWE1cph(hO5Q<D?B3S zPOHmFUqx`JILTF2)Cg_NEQH9>oXlu()O@^xBTxqauFOo%Y;O)NZEg>Z4uRqo3{oG{ zK`D`AfO3=DSc$PRdx0BqTll=gzReoG=_Zy6oX0kxe}V5z*i5Jp#{^NUom_!@P(SA) zNZX8}I$Ls)H&Aw=L}@5WaYbN}Q*^0afVBuF6B=Yl)J03WNUD!!fvbt2!z(+_s6Er5 zvixA%b$vc=hAv+_BNMc#;ks}5hN|DRw)Z{$dm0F=g1hkoq45f-@fx}DG8yqY8Sx^( zk9rT(V7QiLFAotU)fUM-9L_-^(?%tiiHE-vQ*;(vc$}PjU|-+{(^o(avnA28B{`Ns z&5d<XtBah@>*DCPW9Dp=7wyT9Rzfe47$<+#SAF)|d8-b_sxD}#uc@pIZp<&t49-l= zZjFub_Y<?gVPRe3qu^tvVP>cQ%FISjO+-$BVMVcYw<P<((1nu~M+ld?i-~=X4S#}) zv4@9qczA4jd|+}CINjp3&|Ig)+R|Qj>efH4vBoz;(L~ncQeW$1>9)7K9B9(wrXNea z4n1ksVSpz7)kcQiz)^=)zOP#Hd%iH%x?Mj-rf64&@S4&V3y}|>1s~Vf;X>JSBjvIy zPHz159o>P`3WP1?XCce1eFe&bl!8OmR5{mH*C<Jun(CU%Gko;pBcOeGK1oUb8b8s( z7K5zrYLLQMe7F)yC&*tX<j?p^b|FrkZ+NjyVWLwe6_L;ouNI(Ad!{B{XGCh0hp4q2 zhiKBj9R?5F3{wV5BL$jcSuTe?Sa+!UoX8-2zEMQIvxEEPr9CY8O?U)kD8VA*mh+V| zgjC}w<#i=3((*zICzA?Wt5Q?5N^`4HYpYUolPXJ#I#Y8JJ9AgZJV{44MO8aBSr0WU z1@@DOgK&hLYL=K~k(2<_Grog*uzyu|WLQ&iPIYr#YKs*(<FDd6*EVwlYr_-Lc;1dX zX5t2sC6X|KsRWfNZTWAQ%(}C!;-`_5rK(rT4&yfyXF+$Z1y-D#trb_3bFkb#3*-dw z<CM9^DkLWHsxwbp>+KPMuh+AQ%eD6($D#(ML2wf@DqUI-GL3bRg8T(e5jW%}3X?TB zn>uZsYi%EZps}Tt?KV<fBsXvxI&j*tg|5%dxpCtxktY<dS8z;Nnbn_sH(^u)?3|1w zRc)AuRCcyC-7E9Ui;G@Yo88fvDwKn@vNBQAQoFy|9S--$F&i5=P3#JCwzJVY*;u)` z*qP~=-3~?1Z;!D70mr$zFV-4HN&+%ArWPWKzJ%6n9E@mKSW!?2Za24Wj`s|WZt~0x zo#kfqW>|(Oin*%1i)`I|ozD2<dD=br30rjK819ne$%211N7*3j*B&gC9czqL5Q&mp z(&{Ds%W(~Z-vW-xyK|!#uTXrya;LZaEeH+cRhR$l+Jt9@x77D2ZFi}!G|BU~Gqv0I z`B+a;6-O#hM@Gx#1uMa~g5D%asKa1~R1q*edPhBcM!NXGnAq%}8E4ByY|K-1h+}tQ za}(mm^W_>O?5Z0!-7`r|Nu6U~aB*=%)*-~k0Ph$%H67eLCBfdm$E}~!97D78D=+(< z9Ou*jp+CjJv%R<g{VISwQe7p%Kov7P?1u`B4tIctYk9n5b#h%`X-i#bOJx+%1V=Yn zi_yvJ>v}wWEjqEQoweQwNnM;Io`u1_w*)KqT=t&}pfXJrr8*d`MXA2j!A#$FI-68f z+|c*5n3Lc;xgP6|W>v@5<hvY6k>Q!)^ZwxI_5RWMz^oC^=a%o|qNdyJbdB)>GjUSn zibG|$-Kq=~@z6=>Sfpv#z*R&Q@r-F)inD|ob9L)X-J?_=iF{0Bi`2`k3={-pY-GgL zMa>}oU?va|y?m_D*?yvKwg@_AW3g&u3NBYw-@a#*^FFn)aYAa9*QX$E$w~^~$K*`3 zQ~Yem$iEE29w6t6%xtL5^h@+(g@=_}wY7BpZvO4;RCUSernJUJVuyOu8w(QJIt}os z{N5<P+O{9y3PVp>xlMJEc8ay5GtY>fzJQjn@UFjq04wfOP*}SsDowDzjUHP%etgG= zG__TEru*}-_LF@Pd)eF8)myNX1g6C+6c#!`XJz7-aV>IJ*qVRE*4VK%_!APAaS<kc zXdt*`MztD>7IgB7tR}RuB|OS5tPp0XkjA8ik&lfY+v4rw)@-d0<h~EGz4hG#1Y=8x z^I84;X#Casi32n&|D6ZMLLBqcaU=4?xd}%n`PH{{VYK^r{G^UqKym4f#{r}S_+Wm< zU#(u!tWl9C8iY!0CdJ)G$mKneV(@jgs3qOxalYvee>`lLlqHW`jJ>yGHeygxzvxJL zN`azfgrxXuA>{OWY;bhDoWWV|yRmpvw)rY^C5ZhsfuRq2t9|f7l7p&MlrLIdFPgTv z=qQ^b@lYFW0TP7ihc=uf&fy!V;h4tZ;BYUGfq(96h%c;(&UFql8?7K~WoD;qr{!q9 zV;<H{V}EPi+{S?GX(K(P6--1-fr27|2b}<KAj-`Io3C*aEKhDL&27pG0JJ9t$yvI5 zypIO9d(z@`yDzbI6dL3BNeyMG41w3HJXWeaS0y%%6~;Ysl^$1~sUv86Aol7L%xuR} zCQ!1!&`lgnf^zlR+P)!H-Q03}Y=3KlUw0z%WSdx$l7yLt|Kuk10&p=uPDR46%Wp%& zg>(X+`q(0fzp&V0z=Mkus#3bb)P#e@%&Swe5<}HcJkb|Cbm&~{Xy>%M(jiVc_>lHi z5g+2@=4j^RroYic{ajf44wTA$e~!yfNk0`U9N`(H^H9<<uTU}W7cfntzb-U3hwwJi zJSbdZtF8Eb?LASEHpmJo%Z;<cgQkU|4pimP(`l&M)Edk3OSvlBjz0-%<d)g=XJln& zT^`b78%65Vo*INGV&9K9XIKyn;-OZsYSNSk6z1>+M5}$}tHK0F$CxTTz#}lc+IS7d zSpJre`f{{9lEK;D;prYWTuOSHy4u<nD<kI=zi00hS!%3KNSGYD6|%Zl!oEYwJ4s19 z$xhSD;eI|Qt7%DUy=R7lkNo?>-v5@_s2wsY$Omi*bG+{_KyTwOj4D4FH9G;_1TD$n z>ek%wg2u?Y_7q=zxrg`BRnkn@96pa1iqgCU;emJN+-|pMexzAVqE`!1;azqjA|{VX znsm73RM+v$rzF`iyIlC&McUa$N?4q0-^8N#+r!!UzZ{>Cx-+e2xS9f`TeLRuHE45F zRZ%9&4zWWWJiU&(>y()q4C%UkCsAy|{7b{*z#F)zXz3|1=ny2deI+1kPY;KKucyZe z%D8@1bYvkSPYXjnALu3ftZ=ET37ad4n!Q)v8qlAG;7&_XP6yx7F|S<3pKmoCb$Q;c zZ=gj*F*@5JY3GTskwZ#M@d*vFKd`KAZl-A}RjI52QdJ#~dfzuH7=XLsNB4X}`U>H> z@W*?Kv{8u&qeW=~r4*(S>F2HFsPY<~=C-(;EI=#!bp=9vu|d47UR)q`W`ZRb5UCyr zv&LCkVyWrU0wSjlhMd}>eH1WsC(ybpP~|ZcFT1;aiVhT`r0838@NjXq5D6JwEFFCB zzn+Favh{?~i{klSyzRLM-vnA0T*jLb`W|AJD+8w1$o||Ee7SC(#NuMttKPg8v72#0 zKr#j*?%BwDGePyHgthg?<U}TfScyxHjlb<*^7ivl^(Dy+ouKv+7SRY!)JX2MfQ+9k z!xY6o$G1uWCnc%7%3vfkxSSjcUINPV8)wR!jqzb#(3`{9pXfQu_Nfh0O0IOIh%Y=B zNSb@?N?Xk|*SOXCDH5f3f`n!^#-aDnaetH_#N7_3`{U?LR(kN>p1iZ8gYV@Pnf}>v z8VQAw$avu&B7OS-iebXi9sa5x-OF9)Tkhrl%TP<&-HQQ1#0Dm3XJgLn>kSP6)_o7j z^RcFLeSeXT_?5|&86*n>swAOfq^V#bY*{Mi-ptF?zr3L`wEPpvSo*#;V@kX(5OYV+ z+?*`l^UzsL<DHcLC{J`I4-v=NqK0&rMe8^k9@z`~=9Sp(T|#z|Gkof#=c)Vrl+*kh zSW>p{H+5f?VRo3Fz_E9Jegj^XoL@hxGo|^jynrzbj#%PkhXfYahNtI?@JOInouvh+ zBN+xp{o)t=>&0_xe(9kqjJO68(?kLcHq|C6L82x|lb4XA=RHnXXsN9s<Lmu*^6Nkb zu^=p;^Ga?(L1ybzM+)mg2B$A0G+jVJs;#c(u`0w9o`uR<CL~6fsh!B73luh39vYjG z!UI@EnhRBkn81N?a+}*sBy}HNx`W0X>*wuG6iV*bTR*+^wZ3img1bw}(OYJL9dZhh z;%tTROjGE7;Q{zT{8>yGWlGTIjz{1dVOg<G3Ec0T5AOju;}n(Fho_x(&LBx>JYuNr zHX^C5i>bENrbx?;k?8)DB-pG<&a#)2hu}k<2-@G(*T2J3%?I*^GCMh$8eX>?xla%I zowe*=JXF$tIl=^@DE6PMyT6@ot>bVI4LSZysE=NF*+CSu=dHnVjg-yNT$5cZ5?-pq z1<sjaBJSwU(2{qUE^}9mOf$cORrI-Ucs!K?_t4Zj*dN`+$k>-XQ1$*4eUO@SlblXg z?trC|bl9zN)yZtJT|_rEcGeemaG-t3+8R5J^nrY^pqv=ZuQ&vbzDUW<>m99(935sS zm}4^`SqIVuc1DPbi3-gbe(J~vi}G{hs8C;rgm(_4_lU=qzD>JDG4u0>#y)0l2Jm35 zA_EbWcm)Y;y1kvMY@ScLy_7LntU?|!#I6M<NQ=3Abz>!0^e38xNAe^`%7ew|Pzk*X ze>_|df-~$Kggp<S)%-T@mdf(H$k?6_R;N1|35TE*mQX+QRoT$+Rng`qV{fl;tXh%N zhGz=r#tBeovT@_b``e2LrCfN|LJ_=<0hOKoJi_gtMMj;+UnJI&VA<z)Yz@8|yb2>~ z2a<IvuF>#v0G`vE?=(97OS{YSVRt1rIR_gdNdX6OMJjzng__)!n%JB)Eo~oZ137ow zutFb@l4xe&zJPpz=4PdeVjtjmKiQ2qA@X&qZZ?`ey-6Ny(MFWcuR@G6+soAlT0?*O z>&z~Fys1*GLX33~B2n6lY$Z1v_q|%VHxtH#@g>FCNYh!_US9%#qy~iO?fz+ej@U>% z4_XUP4}*g*SFJh!%v{LHL)a|KPLog!_|X&{xhkwHyxd&P?3@qBGuMAv3{ULX?$z(! z_B*pT%f|01)FtpS6(Xbt6KC&aVoT^2Xo^>9tNXY*E(=7F4}tK6eF5bfk;Ou{;9ET9 z%aP)`Lr`JsqD}ddXjSP+T_a!MQ$5=l`nwkk4ps*(DmHQ2rZEIE08!u`aZ8J_o9x!T zSMCH6(9Cce7)~=Z)SKS^KLRqe!rqYZJgE;*Y&dbAACuqHK1P*hYpLwl0GQZ`fSV_p zE~=vHq6j|+<q7#8vqX`xmz0K#ijcX(gFL$%_$|P+F1LQb@o0k!(zI~P(^o*_=yCJD z;_1Pac~X&`Rh_}5#bMGVs*cfq+E^Kev+8iR^t$#mlhPm?emVx5T$$<vGHaAWxXmwy zNa0}AJcg$<sye{eI!|+?#xWw19iH-RHD3AN62(Ql9O)%$>XN6^y@TXRJz=B2_N>a% z<igMTJR;Vn>H_GLLQd2MUmVSSM23@-8;bAA%b1oft%~$vgean@Kx(EQ;tETa7R%yJ zD?|6=sgkW%`bSIz%MZ*tzfW9N{*H_DmC0jtc`qnkp20%v?$79dxXQFh3=kO*sFfYX z^;R#r$SkED`iRp}nkEq%KXb69{+m*pB$*+r^_CFFD%sMl->^rPwFPwiv}|y-&2iRX zN6yR@kmKBGmalA!lLXV<%xic&)V6sJF*U?Lb}~yY&QFL?QWF%FHITby@~IF2?{p)k z&=RHTBBn5ud#4*TD>pVZTRS45#R?DBa4<Mlq_c$*f){p2?9on9Q!_R6QuGt@r?UCE zxxLI_`{8_lJ2l|<tlvRu3Ol_>A-tNGKL*^mhcXF~v6Pv%7K*8i%`jJHukf!AL?hKs z!L-c6HB254M&>C#%19ba2p0b;`Ea(LWzFBIlT*06(hrTN<~2P|XsB#i<<?+rL2>5F z!!gsf?eKNws=H?kk^4(b?Jkeev*Q!hT%e6CM*X~)ZU~Hw)<Z#o;N&4g%jR7?9Sl4z z985i4cXonu2QCO+gr)DI?`m@^Y_MDoGFlF5)Xee+{!Q*b{Oa!m!rwUgMuf942-0`N z?ov=fO}v8*+D}3r*i*gI-9%qa4BcU3gT8x${_q?aHcUM%L@!Lu7(_n{G_@E!wt%w) z{>L>iS*sC^EzpT@OWyrCHPy6sjibO`tra-dngB19C#c&)k>igpF~yPy%NAe}gJOj! za7a31gqeanovp(=RMa>{%?$zRR~3(EcHDp1mQ7!9aW|84)UlG1MNNI-W+wn2J86K< z#xYbxYk`NQ$`P-^udvC-$<fi?-oVw>34%Yo49)T}HbEBS_q!8MeA%xnjG=kO%^6Ku zFLG)YHeALT0gBa0z&ukU>fAFTQ#2w=l!3zQKMqlf69_JP{Y<#WZUvr>Y{)j>^mY}Z zifS)ahj(a~?@cC-o|uk9m|C(P({v?n;gS2qQaR9+<fcra!NJR(+FM$sK0_uf^4~aO z!t&zQs<u{ofV#EMljG4?_8N>b@XWlN^l#~RGwp?V&^@QBDJLsx*bA$u^v(vpzTV#E zmX3Sx>*Kq~*b02$D3Bv?oa^Q(PMIYS86_LFlU=2GYg-$%R)(f#Fs&?2?d+FEnxhwq z<vBI1-k5NOxF5xswn<lXY+!o)BLp@GKV`DDm7{4qSa@C^g*sY3B1o;FW-oHq;vjZk zqfx-5lhRoO#fy=<LlfXK2Lv_0_<cUyL3a8ot$E!eL455?&d#ZLDml%>)sOtgn<`%H zE4+fLu(W)_rEN<Uoz++zfN=0LQlTAi!JOX%PSlq4eGhn^7$3^Wo-FiqAtWGY&D7LR z)>MIYq`}7-T|B(3OuTTYcmVBQZr3YIlW(%%wCq6A%KQ`@ZR~Xxo&WBfzhCz6Rv|*Y z<9*i2LO&`1hVJ$oUejY<b$UONW{T>k_&7JHUB%Ssro7C4qU>Oz@?4VO#!ag)>@yGx zU^(knz>PDYsWYHy>BfkF3l!lFmdD=1j`tee=_~erc~KJLKc`>zA9stj;S|-~7wy7! zSRyfmz9?SIJ@S&1qMx%j2Nyq8-}3T;BricPI#8{S$SivNn5U+A-F{}M%0H*Rx3bDV zhc%agtXFGol}F>~Yw7W{^16JoDZgm5^Qm>S<@aoDd%m*}cSeBELnz2kh*gudymZ&S zS2p_oJ3V<mxe8$oBf}^9)DZ7ie~)*<wf2&x#syiY%F<Gdk!D~@rtaZhq>6E#IX}g@ zNZFpG+sr*wHB`6{+U#^8;wZ2ZL(p+5OmQSgbnPOGe!IIj3mxF++wpm`@g>Xzr4tfU zbfh!M8vl5R@lWN_5cuWF;S{Px+Y$l2wsr@Z5sUQ5zOYYX1V44=_MqYvOmmCh>#-$e zZm_o_K=3#1vAjYs;O8s2-4Ir1y4?00F6cUUy58@$r#4blNNK?ctC+en)x>@E3@&Ew zVl$A}Pitn5=nv;Et@u2R1@s6w!atEKzVz~@{d!ebHn+5twGd6`=*b2a_}8abbmlfS z#aE(*7bCPMZy|w+nFvwvKFwvfp%8NH5poz*WAd2|Tgymwz6ecEa1@*cck^;{KbHBL z13S)Pi0poAl3HgL;;aX1UXULw&$IJ3d{MMHNY4|fuT0}vdpnyVq_5+!+kGWXAfaB~ zpZ;-iK)y7dZ}vlWg}bS0lV|Fcds*%Adbt?w@p}G`4BP|xR}O5T9GU6fFF-oLv#H#s z)X3Pv7qqn_BoZ9RD|${Wi9cZN1a~86?{ybGxdD>$&m325<Kq*vtFr=gD?w5-DN++3 zts$@iTT<eWG~ving{m@T;Q6s0=|gGw3JYgVYF@fZIw8mW$}o9+lGPZaIfhYo9ZL5% zC~h1hWsSpTg60m3FZ?MTXN9RgcHjLuwBhXuL{WmZI)_+5f%64aZ{LF6@jZ&{4LD90 z9QowAHrM+_y*&j%j#U%9WVv0L<Gdr;mq%m;ldHxB?y9-W#nRT+0d{mZ*Zutp2J0Ph z|HUDBKNy|t61?;r2JY_$8w<;)r*yRy#N@Q6uDBJl@k3=~^VLBr&ei^&EW{fAX{{II z0mTVB<^o_d^8)<YTAqT3_?wy$Y1N+T*br*{$1l82qSDLGzzVZ@db+1pGG-D=-YZmu ztTXwrk6m;36e>bUPqpC@qN3olZtK!4{CZEO_d&+rPt;v(Tzo&(n49Fd*_QEb`MsUr z7my#(S-fs`_marxcDkMIqX_rqOA;OJ<!mX>vg^)s`}n$geNR@;pU5<C)))fgVr?+& z!CS@5I24@d$?00$4pmi$IKrbNogUy>Tm;QCGZ`tk{pj^QGu6`K)xso3(lmJE6;|>D zR?-B(B`VIp^&EH1JRV-0gXB0qm_>S_KFf?Z#n=jA-oauHR=O`%dM?U~wX+4I@Ok{U zxO)pC#4(myTkZu#OU%^TTH~grzgMTH*U}76o{oO!1bon3=jFg{HWbags#V4Yyv`Pl z$zNao^H#dXclSNHHB$C@z+CydJ!r!Rv^gVGo_qYHTrlSEAsA0;&a^+h(QNe{^_~`R zfEPhtMB_80=`pURwTm)%#TZ!AkH#$Lgf7Q2cfEr^FFWFuyTw7Sr&82wHj}?EQxjOE z^jM<uT%7F6Q-yt#Tgm?}LjR>Q?;&`Qr$py6r}{{fA3bx^*(g+*>{H+hG&zx%<bbQw z-Sm3n;cQUnpPJPWl%(eFtZOpUe|oA`aM5PPejLlhmKV_1(aqA=+3Egghp|~m+Eae( zSwSqQ$II{9$tS!*F4w!(t1B+|Utd>PZP&A_>owm_z}!`j_wDUc9Jq-LoZuVDVCj{_ z%t+eFB=<?JbZr61A#L<-ks=qflvFw{U+QOGk4N#c5X08rB`x;~I>I&r2D*t2{^98j znYF<{xv4Y$WFcAD%)}_IJf+(Lg`473x3W~HqErXMwC5MZpqFneq&btEkAE8zf|dM) zI`=GX4uxl%mRmQTnltt39*ta$0z+*HcZEfbF3Uk?b`WN*tlCSYkJh-}AAh>exwyuT zk9&I+J^q$>9O#vq%is0>1lf*P@E5(AX@Sk_OCAV#z{$tL61hCEqr3OV;l%6`dDmR- zC68BUkmv9F7vnzo(4W|$)BLaR>&Df>NJ(=AdQgIOt$FZi<8Jn~wV-4~22-!K_wYu| z0Kl*wXJq7ydu>_mpxo(OURi<`xG~-=9q(y{SX@vOlo)Vk@!0=eh5Xv1$pD|}N~NJD ziG?9a3F$~V#vIyUta>(lOgTdSJoE~nrwzI%O;cE~dOs~~(2FSc2`IHos@B1)2$u0Y zN_m&lBUgiqJomy`=L{S#LrZ&tNAyGM{2~hrg4SfD#9l^QQGp^sr262yX<O*`Tm9KL z!XF&D>XeTy!cofS2Z5H(ug>^UwCmRh*|R%<7y7}QeB$xo{yfsOeIh4gBLwBq+`gPZ z`0C^D<MTa8vU#GvjCXXImc~%=e(=>yF9f7{UY6PE_2(SL3HHMXMWJ?QL}qL{TW@Rc zdm;J@F^2-i!;Q*<94239F&9MVtXaq%NyIx`uqwS%&-d%WZghPrjo#F(H1oVF>%4Mr zVu{@Adosu4p04Z*0tmp<th&fXYnqz>GwXf{n65meExzn}+TD1$^AR%cP0o$!X+sS6 z_Ap|m;^%a8=fg~u;yhSCHYo_ud*cZKy4+*baEs2{NONPk)Z>{>nyW-$09}-uj~_Zn z!l1Wb={cEm%i{%HgHUXUW#sWDvwC)e293<|`%zSy7wj$ZQR|!I$}281B3{{=yZ$!0 zUa;g3%PkMm?Brt)vx0`E+X7vGhJ#mOYcow%4K6co4-r=PB6_sBOo5k%au)hp;>ZxE zPFCJb5rz<;x{#$iks!MkDZ<W2TGmfqHcR6J+FR*^Jb(UiD{5fjpPQ1Akr5FUm6R0_ z6%de>l@*nc7#NwTt*F)1Q@TC5URnP@A6?h(ZNN%hav9)))sCt3Vl5~y@lt-hx_DN} zVK<ouajyCISBU7RLa6or<}2pw_>Bjm6a|Y!GFw|a?F0T{Ce`%0`z5f;==!@Edx#cd zH%On~=ka`EN=;6$J2>){zS3~V=k<Iob&fx&&+dJJ&+lX<&dv;7%~&}HietH$rYMST zPUl?fAGVIh`d`hBO`w)Bj_is$vtwK{&dV2`b6J1*(~%~J4Uh40b;vv*6freg6lOxu zVah68)gmNp1V$!wU`JI#7*zupHUmjEgRdwm#YwKlN>7Jd2|u^Z6Eid8V!Fl#CThy@ z&JK=Jl2O*Rwf?B7sC%24$BYUIaXEG&-ru^mbab+@gMX{}?*)|~k0LC?b?a(Cp1AP9 zVZtSmJoqk=s3hKN6G-Phj1_yb3vyy0smS#-0q!n_xN1~);%dwxxoZBM)1AGo{s@|C zcH5hA>IyL5_WbhO9pA<nHJ5rtU%%jAtJLQB*VgED{j~N(rn}1VZudvB5b<}u9?oXM z;^g%{U-nks@%4J&Z(qCIegc=z!^6f^)X5y6JNBm$(5Ej;ez}{vth~`959s6IXbOFL zqD}`RE6u_;+dqAnoRp~-wW?aGqW)wbrPH~ZrmBOHk$8rRYrXq(uCYb=vp=%~1J-Io zK{0+qJzYH=J2MMYzcM2sw<UxU5d)GN1M%NI0K+{WTRA5pqM)YYD*TKN-<O4Hp4aEi zSv((**JQ^nr(2w?HzHg|sUkF38c~i~qV~I^J*HaYC8tL%h`H@i`@p&SAJV`cbO-nh zZ%?|`z=2zndKV~}ZBIudQ(q@cEyU5D0<-ZG+7~d{QLMwYzTmvT=~B>OY{IP_l;7*y zWmQ|BsyjH|p2$O&-Os$x#=?TCj>gYRQ$;~jLswA)xTU$fy1lx({QJCrh<vX9vitk& zb9$etub3086+1loV-<^_&&b)t*T+Zi(qSg^y|gJUHKo|3*u=P0NJc^iIx6<=?zv5j zK>G6EbdHWp_V#kLG@N&$iZpWeziM^4$gCz_LQ)@r?UbLHN-e|h^A#yBF3sM1p8k7A z{`~-ZKBp&XTi3U|dM}|Q>W)XFe($xHR3+n@b`>_^y2VwEvJh#XUzV<VJ=8W?ij6lc zP698YV<*bWkNDyWvES%9W8plemIM25Z|njWy<w2M_=f(F{6Vv$A21(LX<;g#1eakK z7jD;XPfzKXSmEIj{i41ql$5jCoW$hRpXWnz^CMS0p3V3B8$H{<m$98xNo{5FUM3UH zcSc$Zj<Tq&?yabi<_x2GbMv5cJk38Kh>1a=p>Ox<xV@gD<IBt87xk8=7l&8YmUp5+ zA@A{O5z3$@R;1(Ubp_{dpS8&8JQM<xf-c*9C8cy?vID&xEWZ4_o+YzHsk%-G{X;5` zGmCaLOZL`=OU|VTE!O6_dtOgH%vGB*Gicov9<ujTm$UZ(zM#O%i-`8DQ&Z&?iB{#p zGn1?6qbnboC9j1y`WAQv768bvGhjrgbjxYt(yP9}3Om1#7k9U`(9li5*u_RtPfJS4 z=iAea71mkQ;9%hM^Hb|Jpz9fvb@;De<MZ8iR{EjgPY0ff5w>jotul9{lR9e6OzmWq zZ%(?u&DzqTz|G!2E2^oj>htO8xK6LF^W*6f$3ugXP8ZLLo}ZUm%f^Zvi&lzFwX`Qk zY)k7xzKv~_0nIm{CS?PQh{=?2r6LQ3-{&n$!bk7f1SGIY{qwaf&v$Z8_Sw;GduP-! z?f7IaH;_X;J>%o0?=>l}&oj%#qWE*vbnUR_VN-!I3RT58s8Wvj6<mz4C%>jgjot3L zLm2f`bIU7Ol<cUeP_4c~s?Bb$wl2sj2e7HRz5|hnxb5M%cl?Gux@1Vzthc_Y+3VtU zG`l@A126xw#oZOB*VFNRNV!aI=l9p^^NNou=VfCte^AAK1D`Lj%;$Tl44?n=<Mz+z z*-0M2=LO1o2t{6?8V$SK6OxwilGh7nntVhA|Le7KGV;T_`%@^0J?+?T+Ybm3+lzj4 zvpweKvnI+p5<Qo_#*SD~eyZ#5US?-#;%Z_69C)?WcEZkHmYRm}j_Zz`esZ)2;f&~) z6I#gx3)z*>(^Q*M)0y$;p&>&=M5GJ@sw4khNX<h?L_|VZW{4zzdUZMSm-4|*8Sc>~ zJp;3$*Tl*M*oYdK5O^G96+lW)Ju^Go+TM6`B}U_C?Ph)x+16E+*|<LfyKldrKZqjZ zvO?gOH*vdwh|i~gW|HTNhkaT_LrX(HTSq}zPDL#(C*yV-aR)0EEj`ZkERNJhDc>k{ zgqlcNW?*b=h>U`!u(rI!;M8J&4-+mkU02(~#;T*NU0z!|I_HKrTOD3rUr-?6qr@xU zg9ro!a48|@C8lNLL${RHmbTWG_7>FkHuN?$6ciA+8XSe7;OQ<bCqV3i*Gf!CxVyjC z*#7JN^))7TTFeFXZSL0A@TDlA%hq{}dVIb1?>utxaYjbJx22<?oB+epK>+U<y7KrW zIStj;y1MWC1;sfvhuo_@T}@3PkT^Rd%jf7kC5F2VpHIN#T48AB3eP!pVPfK*$P%m? zhDvsVb4DrcEsa{Mj;e~Hs+_{Zy>p;0S6efC`!5g2XhFvH&utl;<jMZ6>FG)On^#5& zmX=uxSq1bL-q|%2<u~AZ?&IV;`z?=o7~oS&^35S5{71kqR*$i({tbui$<<8~)@ep2 z+PH*tL^$Nt-GgLU#H5%m6ALF5^_b810g2lBrsC(}<TwK?Hx_nxo;xyR&6Ck`>I%q; zsw2(P#vO_{V2E{vA^QF}3b?we_Tyb>Jv=yT|ILPpyqmAs(yISEK}*Q?iwLlvIN0^v z_r{&lp<yvS9bIq2idM1<1c+WHd1<i6)zACv=0!U)(a=b@u&B_%{nxv(+vR45g?Z}H z(UH$SAucsI9$TAuBbAYHS~17S=y12RxTu+hlZIiUS4lA`E629DR!~y1x4#!LGdovT zd3=0aT*hx^X)K?f^%Feea})DxE2Tw5DCr2<x>`dyAM-y{Ra1+NiyfI1^?E^6_HZR6 zB;}QqD5)ld{D+2SppCRCJ?B6_;|qxd>wn`HpHv~@yXEnF-9Bv}K9HcHrL?DqXTzDQ zkusvW<|ZZ%4$YwHht>#yZq(sWj7u~%G7)RzGIjz3T+Wvf&nBSN2|vEV@XIaAIXTwr z0GL0b=<aKFHi`YZ^u&~ulzx6*Q7<0J!TzCyqfUy>_6$i!ZMzLMN7usa1c(k?78rLr zetvgEtBpY|RjiaOI2cSnE#I(2L`vwg>$;Dekkmxq7_0)LmqJ8Bgp!z+)a4cgQ{R19 zSp&=B=5^Ru`5(q3uOJg5HmJ{UT8^ecL6Fo^vIqwSPAtlPTAtA{;dS-QC@9#)g$1An zt*i@xw~MLeFGj|O=0os@20;}K=MW=s9MoNYM2-Q-pSuKe-#8h0pLkHDl7dQ6LE+u~ z*N^%hh%I}5sE*(34J<agygWQ00dF@UCDprK#>|VEl2YPN{`CW}QrLq%x63)I{M*L{ zv$o#b>4K0LH_l6SM8d$p8lc+)G%^hn$K{21GxPlWGtQ5JaZxTN&ij6K`Go?FG^9Qp z3WXVkntCZmYYV^vbPY2$voQj_j5qR8qXiKPQB5Nq)^{}U=~b@=@cZsLTjLI;L}*<N z77pI-(U}AEdc9tKj>jOM+DgGTg=yX8ois48_i*~5sDVP_P%EJj&s(CKlG&rq3)vYl z#}BPR<m(9;?G_XjJ?<ry(ery(*HS^c{rqogEC1cY#P)tEs1klQ{ngym<s{XCy)@E# z*6a3pI%I@Hv8RiUhC60s(|7LT<Acai&=RU?t%S4cHFSr1beVl(+cj&Ztp*=<fbh(P z5(D`q$w!*Cn_L;k{|Q#-?=K_<qzvT9(+&Uh4Aj3HnT135+xs^cRJJEl&k%kB62@!Y z_m>?~*pOX7VJ@E!7boG<OTQ7ORCol>$LiZAjCp%@H{9ShIFV;{mX3~YaFEfp{u2NE z{naP;@@Wz(qVgXO*KE6MOFJ?mGA6d=Zx=t^K8VY}Jim6v=#<05Vo;y;YkIID03!cK z|M(xoc|#kZvOC<*j@8=Og$&Ah4<TYuHr3;y8CN&FhNgygu8o6Dv(u?xzYLMY`TX9W zMq`lUPe(#JhGuq~EiKDeR`dcPq}H+jq^YSXVPW9|5B&6<40l9~X#eP{Yqxt^bv1RN z<OTutdcUsF)NS{eRc>AhS4$d-67D+vB>SSNS3?(He7XGw3Kq8ylXoo*3YbiZ`$e~Y zI#x=*vmHoveE1X?>2RmJx;U~4$xc$6!e?zfpMghsqJvIm7iOem@I7`6IQ&1!2??>f z7rpwKg#-A2;={Irw2)9a;;gKwBJZ3}%-!;EpL1X1@c8_1x$w0nkw<tqGcr3Rt-V>$ zSXCSa;Zz0n`Qc+8w6?Ma76E;37@g2`Wx*}QB*WQ!K2+?~pW?@RsQiNl;QZ8aSyp1y zvs!1iHq}A?++AF5h)+W8mipy!DIUw<4BboO1oQbNMc@^XYWzGr!BHLpU-!Gbg@!~H zXXdFG4aKe5|7-|5DJkf{AOlsLdIRel&=IQr^7R0_X!hFrsrD_x5T?HxDQ+y@Nmb&K zwR3Iob%mxv3hL{BTDXV(H%QUjV+MFcstQ<~biTlJvaYxtl4JN#;+VoM-YAoKy|0Yp zb2v}-Mm9$;WxC?=_|pI6^zO3V)5r8`pPrFWP)f?k1Rk8TpQxqo87BRscjrBrY;0n( z|I##b=o@@pZn49`o#e~de3Guav15p>Z{C{$zB0BUpRWdQz(DcAD8twt1+-C>Z_zyE zL67&sjAD-S)wR+a8JN?taCYA~cazi8F=?b@8hT-mRdoewyIWnYeP{cD$nU{BG8ppn zPL6CxTs;sVu;=@Oi(f__Qv#~g?3w8MA=6*qJci~pG&DK>_HPUzRJ#~Df`AhDw1jM| z1oUp}(D*Hz+L%<7HPP~rAjwwcB>6~NqdxqtovHz_;Qa1|7(<G^DT(~(-be4xRbAyI zDC_EW>NRoisjQ654&*7T&u{8O?2f4^8Cf2Vk>mFneskFcB_0ZlkO=n(5n1=}aA3TS zuz$N^Ata5%B`UOcJmSm72+u-bkBW;3KBz7zy@S5=Lr5KY)yOAhBl{J^I~=;p-@t@e z5OxUefazB?71baJh0nswthufJd5ejWPJkPFZBtn{I?`QRt7_l?On0o;;|UI26dI~2 zqVl@+eC^TBk%D@NzzzyE_X}x$ejySD7SFe>qBb)yu&}ljCZ*T+6Gjb!n283euwyCs zkU`PTuEq$5$xtqj(<wIKYkGrO#zmbAgB}qB|NAxgq4_J};tj&HJ>AT}AP_SyJs1qa z%9@#_bT7PvAp5OO_w4Gde~5=5=HYgDglG5~B=Exlx&i5GfWh<W-$wTI^3V*Nv&&Bo z0y->6V_{@xWMmcL(x#Gv1`P#GSy9oi^YL*%Cvld7`WHJmP!yf;NF?n<QgigDOICAP zU5Rt<(cY0iH>fd`$<MgF#Od;e3JuMOYitWl$cBffJ?+)O`8`+BM1q@GyB7j|CUTR| zdK_veHZ8|5Fc0j7U`7I-#f}b10QS;$ZSA+<VTr_B-I^L79v+$sQAkqF4f-W7Dq7iA zXLWtHgWuns>*^puXdk|^^#>!UsI0V9QB#47l@}pva%#{pshET`Jsr!$xWry(8FZRv zIwnmO<2jcn06*5{W<^nrQ|6Df@R*pS?8btsUwc4qD_K1uH9cgzXGfPI?;wpe&6hy| z?Y54mXQ%oeFf14javIXfuKrkOn-%rc4YSO2v?b2P=2n*dh70QbVBimE_EQR#kH_~m z@)5G}6opwAuNG5R(n0eK&JN5@j3gRd($Nr+kX41bSYu>hl$4Yjw4dJ&zy`Mkc;uHJ z2#i<K<YQo}aVjoyAtxzhrzR<B>YSmRj*X5wT6w`r75)*53672q2?~nH$&nL|4tnOn zJN~{M(jZcWKB*_N@lg^L_0%RTXpSb)zxJjOgCrdXK#t~gzGbrW_l}+NAi1<>q?a== z!=hM2Lfpo!Ur~e0Cgxpxd#G$YUCtOW0o22yJQU&Xms!?j2u8QcD=5=cQrYqkGrd1X z8B~uo)HRfqQ+^bEUqm-UNL-qjs%T|d-ThO}77IuE+R`eAkhtI^iJ*XlPe%zlH6Chm z@}wozAu1|e?2|g;Zxq<3vOYZ<tE$@CpI8EH5k3iwho_5IO$p0EbHfY|_b5p{m6dgV z!$9xNbF8(cMO8IPp`ZcsZ#t;==b*&O!U_ux6O7KUEAnZ_a+YIrWg|r+wdaG(sqC<! zK<<HSD%7&vc2}7*G#L6LA>$%!jS305MQd$AWFzC^C<$Zh>s!9x=*R;S*q;9E3VA$- z#1OR!j-ef&3~U$ss4mb>OHU7*8No^`ub>IVjf;#1p@@(1J78)nJOZYnv5|tBnt_Q) z{|FM>-ofek_=Kir`kRqUPZP|6&e<3<gz$<(eL(2fuV3co<skPR8)x-%>P@|h7^LeP z8`04Z8tDzph*`gok*Vsa^t3aD&i)4gAwk~0JT`Bmqosb9h?JT~k(`pYv@jcMZkdvb z%FxUdoR}*P<{6op5J<18#s(c6%%e^d1-Xi<vZ$yCR-MN*(AC!|EGcSiti{i~&r!_B z#mDqs>AG^Ii;4=nssQtJ=jg2MtOJ4rs%mR4UX*aKvBJhY8y7d1p{Ws^vgZ1)a)2=j zMdVy+%u0yBoSw3d`Lg!$wA=!AcGm5!4P3`iR8mq#Nm<j_Oh^KEuF}o<yVC?`nM5po zv(KOBSCp5*l?Kt&R0GF_RfSrc>VbK850@JgBT*4yxMEm9nv;_qbOv<(^{bbM8-NSt z;^IutNWD2bb>!rqqnIbv(bXy~F9E9nVh&hxa&fS+fuyiv9$DF0d3m|b%uT@CBqzo} zzrgCpfO+^Jcv0{$Seb_i1@<{RFMDG01}^4xwbTiS$<GOD6O&V?CSqwIBX#i{{W)6~ zXIeU17e_lB%rm-pdQ{Zc!|#Ie9T(Ety4q4w7x6K#r(IB7*wAq42%Yyw($e8!ULZa( zcJ#*3%Gxpwjuaw9fAkFW-oD<&WySMrE1;Qn)|Oaxo{yi;#=(w+l%&1oTVGvW*G<f( zLUzH%A*R0M@?25tl#;UQ`syN9=Ba3C6x7rWEUm3PkP@=8c$okG4B;6DVN>sPZXRAW z6-8Xk8yM)qPr&MJU`T=;c5t-q>+g;T3&F)aAkEFqWo=`Dhj{=jIW=i&dhD3Y<MB4y zI+~#2^UHIM&GlYBUPdN{I(pima-XZ7w!WdBql;r@O=U__JR<`=)?EVTVd!yiuwli; zJah_9j@0y&k&&yo&c2S88X*Y<ouCdODOE;F92TTSguv4}dANfxVm-nj7r?x;hkHd` zEiliHi+OU8ab1Xsa4`=@3=TqmQ9(l^iuvW=&pbacKRGq_@bM!&%rh|32ZaW<v^76{ z{aQg@_B;=_mzyhC1%#6WR5uq#A1`-0IvOxzAoZ}F7tgU=3Y3*oKT9J-EM#gCR#nh8 zThcah`@s&jph=CIj#gb)&&n0y92h34pn#isC$RHp7=(;GQn`3|)p0Nn-Uh~Z1{}<{ zqM5g{wrKBc3l9s%!#ocUx1GH;KIS<&5|b0gCr6K*@OA|A<YYQJn&p+HvkTKby`4!Z z3BLY5-bfEGO!e`EN2IS;Tzo9_(~Ptv1{};wUKHmB<{fQWa5K-r$)1oDfAwk~*jGHv z)9|UEC8o$sjmH|I0`o3jp17Guc^hZY`MNq3^SHbXIhme;o(%f(I<5v7oo{SBO6UE& zk7ORCX?J%Ab@dtBRS699!C}Geo$b5Np9cl{!O3N0Vu1G4)KmZo7`d|q1mAuC9hgj@ zzUy)e))Or*DoD@BM8~grhFe?SJ~6pwtn~8D`K`x?m{+Btqt!GpuyOZ5ghWUwD&oEl z>vWoc1aw~CErp94C-dO7j%41_%B-cWDJ&!qAM?E2pqUU4;$a^685<uyCi8gwJTPBT zRet}`gU5S2vx_%}N3ISH4Gdi$7#iwFJzgKUK0F9t^k{cCFFOO`SzJ1gVIG&xLjw*D zb_nSD2YP7nFi%QB$*p{bh&(eb0W0$$;|QepITZ7@xEg$BUI(|gK{GEaD}A)LIR^94 z0Kd1Hni=2O*};{eMKjNUVgAm8d-r!9*EThH!hmaPte_~*%Em&9Ug=|RW7*r){t@@R zvQeoIRwh?(&x^`7giP|BYTgM|BRLHdS0*+NngP&x8ag^n14A2kPef>>jFJ*3I~%TR zQfCNBPtyzOxhCOZ9;NffM=@_{*4)+<`bRK7J~8~y>O6+|%ChdR%bk~7+glnT=s?#x zs4eKnu8ua4DQI6@kPXbg!sBi5>O4CKTXbyHmHux0%#(8}oF*j0%{-Hv5As;dgP&(* z`5iisPVA5!pZO#G{N(f)Fpn#W`I`CrcOTyG>g)Ck@C62lNr>Tig2{QZ_v>Z%$B!N! zz9zUI%OH@Rml`h_S@{#ND)Q?1hZpu|)Q-1ZTYmTBhXc&hY8e>Xx_i2zm{-QnJa`)n z^LYHc;ZZtoae#TOxk?Q4_72v}c)d+RQo`|>Z>qz0O{%QC1o{`m*D#n;QBfYOl#~=C zB*d6i;TY!e>pTlyoj;0sbzq*9UG6jycIL&7VqWZP=JELXBfQNQop1bum<Q3o^{c;P z{^9+X@7`x+rol01Wo0fc{Pu3}{k>i6?5vHoRe1V)Nmb>*P!B6RFA=>IjjS^={c>9E zctz*T)0c0*cpEw`Lql5+FZZx06!W;|+qgQOB_cb^D5~e0jE8yh<1xSQZSXJ;I)9{} zADb9Cn)ziE^H`?@e96zF@-{$wfUo!7?#{~c!hYS{Seu%-0Vh2_C*w%wxsGD~I66;6 zN<qRVeH8OP=WzIW@nbSifo5Lzk752uZ-Zuj7q6fH%>0Ah$J@8IL26W$u>jWA+yD~v z!>J#zPCDA}@5PlBP3>*x&vRMW_z^M5QAyc*q_(Hj+^FihiG_LPBbgV`N8r_Yvg7J} zSV+)6uk*)Y9y8wtH}j}LyrircS5^Ub0632f4}SXMImXYw`X`u=iHip2Y4Ca*U>>iZ zXLA3n^LUv5oVQ{7{W0tp=8wo#3LUBQ35ju2Xm5kp&;J_3=4PkEHy)m({owvRc%6)# z9EM}8tN8cxw8fQ`jqR-v?LK~ZCoZ|1m_>n9+|nkp2C(m*yrrU~NJT@fWoTq~l+JtX zGw+s+KX3DedE4XYJf6G_9_Gb<i_UMLGjFw3W%!scEyBe-<~ppEm0AB_Z$>K4yv-k? z^SWqnb0qU_zQ}X*=Z>TEzdMGd_(Q$TF_=f`d|OA`^S5uE9qr&)9d2f6W<o?vBrYKi z2tmMwYY_a*d|NAeofK-$QgKBa6}QIO^SXM-{Pemh&yZ9BAz@W*ExY6Dyr_|98lL?8 zfn25SQ96H|81@hFHZNX0r>3D!#x{TKz;#%h^KI_j+Jd1L+Emt5qOM8(Ugm{`_|Ko` zK{MY4I*)aKFw7JG0Xl!wH7WArdz)Ik^C|`L&#OGhJg(WK2fR%in0atDW=4isewvsF z^mI9SSyCL#i|^}v)jso|*9LCic_1WdPQan2?VXoeHx*mlYi8$c<>G>$`OiAfBx>fJ zaRl@D@-}s!nQv_j55?(i#E#0_pmm=8n9RR_{~o?5G{_%Yc)LW3nQw#Ed8})f=I3T0 zFxS)5E~_j{Oo%<w&*REf0wMhT=c6&+2JbZ~Vpb_M^YQ-_^WbfC@OYbJ#jyFs1sI+G zCz$7tPdF~~Z5^$^ypp24ot4GmW<1=StZb|rS{e|7wPKqY{;Qv_s%&U&;o<&jJ?zfj zQ)wmpvs{|;4oUG9qw%GK-VsTzL7_*+umsFvR(@Gr+&pT(J6B0UeC)i<5i#sB{X7)l z&>Y)@^T&@KLQJHtu9{bn3s3T}tt)A1s3j&QHZe7<s46ea&!(cL+{CuB7?Zbgbh2Y% zW_t1bIo4AKgv7?j3=a0-^EPPaamBD-yv?y<Sly%j{886oiwlmNd*J7fgrBp#cti~Q zc=rfz6BPPc=am)ZZ7j_XH-mVbn}^%P+!S`c1wZqZ6&+W4&-3!k&rTm~aPPrm4iN)F zHf33dq?oen2^AxLiMdj^ufw94Cnh6gk+2WW=NAx?k-G3Zn9t5gAtoWVv9oS!Jvu*+ zX8sSjCiM$?ewU22<l)yp{`3><gMy-5W_AXs$KhEVP*M_Ofq5HStIC>+hPrAHz>%Rr ztkY?Dc(`C}0z2{Jdn}7tn3)-+q$L4>YuEa4>HM*oM;<e8gNJ#2BLg`(8QjTCGToz? z7vMiHA_6h27x;M?G4U`zF$FsR7|(qV7@0ulL)yFAF*<L9P3Hv#&a-o{+c`PFu#0u= zz<vx{R8ihHd|g0*Z*uJ0WyLq=R#;JgvROyYAt|c(T3q>cIaPI57DhZePsDn`C8GF( zlq?^%neD5~i$D%YA~5e{XLGoL9}=~0(b3tV`N&))Du(@I{QNJddwuAF{9=9Uf`k|x zLl_6?>FHj-#PVHw+M3|!UEN)(FIC-~ouZ(mNK3}zI+WyP>FLku=xD;G4!=7&J_?;h zOG_guIbnEsfEMrE15$Qb^fjsY-@!a)zKsiBZv)31@RO656A<9%=HfWqU}ky()hP^g z^NR}V>ub<Uys<DZEXdEr#R;eT933rIJ9l8d&5cKo9^%SX8tUm%QBxs(y&Ibwp1yvg zs32!!X?D1ojPylXI$A?x1Bhe?uXN+lc@*>4hXnYsFb~Dc!{Zbg%UNz6QM1U%lItE3 z1<J~*c&<qiu}XNxl<65+(9zKxUI_(O0r-)Xm8PVk^guWt-Utj^5WXQi5UuUaM>2o( zd>hdD<1t@eQb<fpbfu@`@LS7^^U{(Rz<6A$EIs`0o7b;_WNvOQFCWi_#=2K;-$F|{ z87wOrit@7HoI>w#vd7Zdtu4$bDJiXNtkTj`U~IzUZ3sxn$vG8Jem)gH^XKr)w{gKc z-)6rfQ&3P~Jxv3B-N3xDsZm~0ep_1;{I2$9EF++on=5RA8oCd*B_0^&(d(pQ;3vQP z@jVkGgCEl4aI>0<Vt|>Ekpb9<i;1!@H^sUHFFz*(bHz|dfbTGS`v(WZ{J_XC=seaD zxxKjt6oZ^e^emV51@oxTf~$$;SGVr(VViH`c$$!ugk91%p*%D$8;1LaOIU7Lt){9B z*fTQHA6{4i`w0v~9OsGj=<03{4-LleZS0QJ`C~Kx?!yQ0Y{2#}zp=o<CnO})(o**i z@agJm|M=<WsEAMk0)m0QuEUKVJ-olNym0t*4D@z^Gzkd_IyyV%7v|2)PT}!3XNk$F zcvT5VD6r3~M9Db57*^~Uxypt4*@)1fr%#_?T~dKc?&W90aAsv|l~<TQHaP~T$;Z<T zYs>Ae4Xkt259&NHe`EamPe1?6&B<YjV;S#@=g)3!Z!yrHv$3`e4-4kQu|5t&b!}xC z*Db}w59s{$sR>b0;q3HetSz5E-MPKJ$}6Nu$gC)A8tIqW6<%;<V(HFd=6^U#M8PE; zoKjKRKFz|)E-s2?DJ7h7*sHzW9c)9G4k;Nb0pjE5eQl^8;5pLIA9YPCetdG|(=nL; z`R8BCOA7%!XM5Y@1MlKsclPXAad9ydGn1&8h|$q&A3lD3^86{psu22Nm72rl!TozM z^n)b^d+hGzR$E`QzOhD&<Gu$d=84E?__c`1Y0{Hp@#Ssk@#y>!-ezfGmW+%H&z;6G z7l|BQoeGKzSGLx{{^7aRXHRFlp}r2*H3b-*kByHWy)gu*z}(arYxuBVpohXfXJ(~? zL!Q9480{DI-4x*<WTqzJ@^#Lh?u8ZQqc>-ajSWSF1n_(vHy1k@10Sfput~UoW>-*l z_cz$1=Fgudq&Y7aky%~RHWd&SPfAL1{99G~czc{aeOgpZ#N5&>GAev_VJ0>z4A(lT z<1>HE7#7;xfBewg)aZv(KPbw}K70D)$d8yB8=g9S3PLbYO9;C23UXK1mr-W;-Mhxd zS_%qsE>8Aw9OvZ#U32_=JYZH;Rh3N4j3Xk$2Cnu#eDnY>^Q5$bI+V1GSl5h*i-~-m zdq95<>q<0qgD<&C95E~?Gjx9v65^6Wvsq9UB0HUsh)`1UqKlhrS!LPk=2}%{8Jz1Z zY?`^Xxen)ufZ*)CySER&i*Ry)kO9Ly*uv%2Wz@=I6{Yv@vEQ%)dW5vJWM*CtBNHP4 zjCEy%j+Vy%_h0|z?&5^&w_{@1p^34A;vzyq0wo1Gto;!R7bhDlCnpt;(itAzi#G8- z=^Y`ty-VA_E*tmuaQUB8XPJdnqH^l;o5nf@Crpj?Pn|k-9OVq~^#<SpV>k=;j&`~E zIrkquh)1o5rNMenC-}Xe=N>RKzIycvtD1dgeq!{PH;jGy{Aur%E>klj7;C^71o|QS zd%M2*V>+*1y}W+4KRCb_4nCZB=%Jcg>c%F9fx!V_#`e&SLF!jGR->ZBLApsuNwhT7 z0Fv&Gw#E6`t@YLQwG{~D`?@<aQj=t5P`C5};FVPr;fFgToQg|}*EUw4J%2_?Ng;Jn z0=%?~qrIDpBUDd>b4Y*>KOZ+C2|10R4)r-EC3(pJA5SkgXK!~GPXttFKcpv^LnaoK z&VvC4TZ{BSpq5lPJ7U&WAU)lg80nc<m|c#_+rSP$%!OL>sH)u4*|xQ@zOlXv2gKaO z2m&8w7A7riO|YO1%?)edUhdsBfyj}F(9YTt+Q7%5J#^<lUnCf80zyJi04IA}gfol` z4*L~$2DQ*qNFXLIdT8V-=I@WhL`Q)2g)XlkCu3=50zVwn7<S&v-IbDpTv$XXIzBcy zGzfj;0y_A~qr=yrXTk0@*Vjm15C^My=FAxhaS<Occg(NG908=K8~V4SVwu8<vcb_| z2xI^M_TB<4uB=-Ve(uaabMNi%>y9D`5Qsq%B1F*O?i5Ag6i`3`g%(u>g%s{?!QI_G zxVr@jnuNGpM>^?r&#Y62By@jiyWQXXbLX;IPd(=x_SwhY_3n4=I<=Q2(E^?kw4<V= zV2(EiPhs9|2dc9J><*{Fcas0Ch#1=;p*@0zGFIV~nEK!>P#}R$cGlh=uCmhNpl!~{ z;=<C(Gpa@gC+7S6J24oIef#&F6*xn4aVjm$9UmQnyMf-5nW>4YvLZMH@FB1Y=sK(p z2Ero00N>`8`gcG45EUK_`~bHR`0C@0LKfgTg02(1aNgd*2KqQ_OTyOwGAp=11%b1F zVB7{Bmb|*WP*_q3(Wtn@MY!{D<`578D$bof0|FA{3iu$fAN2dD&zw?IQ$YZ4W&%pA zsHAXhedYU)V&?JFCmmgFpmwyhG>#lS0vrg2<mf@Cw1p+u!pQ+t3<uJ}XaM$XNY)97 zv4g{X-@bbF{{1@;4<L|^A3t{F$RW-H2RJwn>|tf!v3oBI+kOrnzC$8d-ZK{=dV(9i zV>io=UAy<}-NVAl%ErzP4l5`m1dmloS@HDgQ%9JyOaMGS8LWzkir7=gzy7`rpj1ra zqS!?-s1KnTIukG45Kb;GLBR`Z>Z-<gqmb~Bj_wZVA8*~e+uGi0V5ASX3#r4rQuG=j zx|fre<>TXpNS0|ln+Cz=K^`Q$RZ$8K3+@~2efAQ;eD6TF2g5^JMv4!KSfK{bR)ZK8 zt}8GkD8Ho8Kgb^<Ah=#QB6?%N8R2Zj#4nyYeG)1kK>YSg*X}|_Fh4Q~uusd)Avrq= ziwYk)av1*0b>IM8AUrK-6K>|d{riufI(=41=rHfG{U>Dip4XGK40F5GW@O{RzMpG1 z3p*=22MfnRaTTMm^s4l#;nf>=Z`@b~2mw`{JtqK1+|RrT=p@FQ_k?3VH#aw&p|Y|f z;K#_sz|EajTw1iUhRg)j)KCLFaOTWuKE9(6(|t0yQOLC`$$NOXiiwJxK7A5y=|5JG zmzRx<4*&g_M`--jw-ZyNc?CIM-kxUWrfO=+z(QvPPQw`<KXDu&Bq(%2R!&A!OC8P@ z00S)H>*rluS~xp5{boyl1V_AnW4*ejA}AyfWRI?%j*6->JV!|>3Do2j<bd0u5u7g8 z&{!WBJ~l48v+MGsM-S2N`uVpnYa424Zd4Pzp`@JBSy9=2htGV&evpOx6#G%3!@?Lo z8OvP<g?4cA!^RzKT%0^Shj@8UoIZV4P*6@u$&Tz4o17F99c4l=Q&3ivk(ZNI*fco> zc};DMy^C{E)vvvK0GJn-l+e-D!RhHLC@P3v6crH@k(81Ihyvh&<Xvbkc|`@&^Rviw z+)tlQ%uFPuCX*cO)YVlbBqhL?B_$=))KxIrnpiBxz))Y`P)`T?JZ-J5rj6Ayw=f6H zGkN~StGC}RE-&Qd=eT>hgT&PZC5OdmVKg<gG!WBN*U(bOo8d#lLuwoAbBhW*d>Dp! zV`*7wK(eriums##MFj<AB~=YIbxjQ=6=k^CPZutW+=R_*&n_zL9T^&#o~)>^3yY2> zI@oFJ;*`}?rR8Kr#4nx~7CL?I9NbJketv#|v%=D{BGNL4&xo?~E3*pfE7`?*rgh_p zejI$lYzNQo;XbLXV-b;2l~q0b{N<}RZ(oD70HF)_2gE8|KDfM;v?TJ}B_#j@a06i@ zJa<5ag%!cWi=JPYH#<M`>h&u$u3Wpmns_PR+SVE>z#W5{pA7I85gA@pQwd}aB)PQ$ zx?pKVh>D4rm>B-ykIW1Y&|SN}*4R`R8yoG(aCdOBC)!xSb1^f=1Eqt(f`z3y2rw^i zkF<;w;PaceHvVzuC{Q@J@80U_Zcj-~@(b{x&|K^sNYE6VHq!`})|MnY<RvEP4$3M@ zCMHKgTzwL#Xiov>@9F7Eu(nmzF*`1zyqiaWgI|j0g4%Hzv(swiy}S}TdCu-UaB3&l ziKG0dPMyCXA|WZOtfFgd?C$5Ak((2rk__>hsU-nVFgGEX<3AeFj^rC0Skv6B|Eu#J zC@3kAoE+Rd=+0CZf|Vtp%fQ&s$ixWF+{4Qww=i#Xa_r{)`)H~Kp!)9KuH>|2Pj63> z1BqZsFe8|PS(B`(ZWJFsZ-$Q-EI5)KQFHpZ0Q8cXlQ}sv@%-Cw-+cG}$@6EEvr|>o z6;Ux!&=i?Qc7UGS(H^|X)`4W}VCUxHmY$tCFf!CXGE`7n44hALax^8F>lqs0^mX-& z49u+vcFv$t$d)7<Qwze@3&#`8&}%QLuIe8f8M`tI$ggg!k4sLX`!H;s9E{9NHLzGk zb#*BPMNui~3t|@q#U;fRm4#*GPl&5&8d&oRVUe$^9OCFnZRT!CM+DV5PfJ@k`^DzB z<kt?Pc{5+WdOg2*rL?>_GAi7|%N?%NhGb)9Z3zz@Kn?~p=;&+@_(@Gm21&ereeKgM z?!cBH7(h1o2lxUuC^Rx+t}gINrMdX|`{Wnqws*AT732m4`hBWEMXS-sAo&W53zk<F z|7g+^IPRk-4*`UY&Gk8X*$Ihp;Sr$_UBXd<g8X4QF)<GMg4VXC#ih9yFJJsp^Kjf3 z-@b%8UEP<fs>^e7vjG2>lH#F3aAa79Mr!M-28Md?+`IF$x$9vSZbf%*XG?2+R$hU< zXRHdIzF!c_Cu4C&oqQJKE^HXcC5YQ|Tz2;<rQIhL&PpiAscB%1@dSGZ3d1`gJ)^dz zsl2W>B`Y&JAt5RuJ~AG$=!E#F_;~Pz+>+v+(Gf#KeNgPWSPV`Z100MjU@`hQEQEEe zY^*@7K7sz=J2?gUG4XLhVZi~xfuRv$mr|3eYOBYmCpYfi1(M&)3Haj0&AYcpCPr%N zYtpkaV&h|?q9fwsV_~_nx}x=ROG{f*b!}Bid1+}yDSUo1=+1!TZ`|H^@p?1!$fIY^ z)^4m1jSMukHkDUb7M2zv$)yVO!OL<A@`@`;K~OEMt}NfUF+MZZ(Au1llNAvY<s0bF z@b?V}4US2SPtVQCEXYqxOO3gdfLwk&a@BC{VCc2CcC`;rPoi^@4~&mBcea;QRi<QT z1w=-=crk2TD5kb{dS(PoeM1$jt}+%UrmQKV?Hm}J5fYQeb6#hUC>}#ereA6`BqvDY z$ic~Fsg;BEozq(x@7A4LpfOrn8$h>YWThn~CnjEsgGYZUIWZ$E9X4LR+<bLu;pwv{ zKhq)4UOb<forM3^)mPUtt&VB1(AL&8IX$+1Z57nW<@T1kkN^5)ZI_#-X2u`?juDtI zkJ!Ka?$MJ+;B-?n6Cks@dpiKS@Hsft3(dlDq2>3#{L{;=m;d6$i@W#kKvUrUpn_&* zCuXip&R?AaRNsI2;MMEbU-TL{0l569&mON|zY(6*LSSU_C_3<K(8Wx`MNA?t65@|3 zI_?$F+AU(ZTgdQ&s<95<)WVrU^A8HUlw43%(>*i@B-Yl`)zoqMXV%==(b3;GHa8m+ z7b7h#b?Wq~!$*%G3Y#el(NEBFdIoyI5#e<$O@rej9lc#G?X8V%Ep45bfnevB76I^R zW=u4(4hs9b4<7(rfvAT@2l|KlhDQfy=4Mu}uiaw)pzr$48*4YNF>U?h=Q?b>^WZ)N zuv-)HfVRK?=+TYaH<#8|7gny$EiKF}&d)4dnVFxxa&=*8eGSd>vv%|5-0~8fUR!5- zO;ba4LtR~SBjCAfpdT*(GIH(h$R)S8eZ2N|=C$_@PEKr1J2Emo1;}sg=*TH6k4j1N zjf|oC1=_lLm^nD>TUu)wn}}){Y1l>AwvUa>tx8}i?7Z@va<;nkOsA+?N@Qhz!+3Vx z#K7dr&lvB`cW(hCAOnDS=jP$LPlADrSzfuiaqH&OXHUQX(rlR^OJ2Qs4ZiT;(E~7$ zyQn>Q{P6M9#}MuTFyIqfW>!G|`{2>z2M-=Uf{pO6Kbhs{haY|f(1PFrxdmz;43<I3 z{0EwarapZ50Ij`)81?;oXyNzwKLt&b)YyjO+Mc|co)T6`qUP}z2?^(Q{aMcI?i9uE z6frxlZm&zQAky63LnEWpvx;i#dPYY9)uXdBqqEb<ll{j9pt#BL{PHqeCr1s8mb{XJ zf|8=Vk|O9~MW!SMQ4}5%1rS<TTV1+-otaw-$(RXv14;8K{qbh*<4@@VU%Y(${P}|? zNTz$(4y!L-zy3*Xskd)`C7IfmG@3}}!AHz=vCO=L%q+=|AKiL@<OfGnxxtyN-MR&* zG{3qswKzXHKljNt^T)xJezGew%xj<daz4S?>9+p<;<|>Uyu#3=RFBX|CvSf%Dw0th zV~iKZSlI@ZU0Hw7J+a2GK|LUfXE~+8t>mQdo@pOe6_MXp+&Z&<^ZqY=^#jkE`P|>Z zbBBdL^yvS~md^22ujI?;4FY&nTrXN)5;2bxF^d&3jovR}yhqSrm#{ILDpkvbU_qu* z142SCrDT;=wDj~Mx$~Hrbhai`LNlhJ88`aJ#|p~IT)jNaiI&D@rpD%*29d9sm4%BN zEjc5-bD($e+WOkZw6GgLP5k{u*duAweund(CR2O$hM7a{&D$>m9(G52jh|#meu^9j z&5sNxfC3)D{LPyy8ykpAURwv`Us+k6UR*>nBQw)wZe~3Hxcud$vCRJtOina(bmUjp zB;@1;#V65&!|gqNEXXtiB1v7}NLbe;ET`+%lW((Y2D#)&^42M*6dl-3DD$XMjJ<Mg zLn_iLhu*$@w++<4u&myC6p+y+Z5hd_-~wnDHjNg-N5R5j84}A`EWrNXl<6{hR(LxH z2Zo<-bR3XXWpgX^;G2(o?#kAz_n&NNYNE0Ia%y%~RD4`mbW~VOG*~!dF|Y-;*EThb zOieHoB41zqY3jhugvd<TKm2L-z>mn^k_Y+qn=huz{Foy4^(T5~rVHO}<k9ozDB77M z4=%QuMhfiOojWMtS8sj>{Ot15^x^^v_+LtZ`?IW|pOQ><3=YB_1Fncl%Y++9^$)hC zdzm?sbuEZ;SOXCg|KjG!N8i5niLcxzZLLNw!qBS@iJ7vVQa`E7F!ao&MpfT^^n4qr ze_>g=evcGVsqK==q3CkXFjNR1dBHdwOhDI<?VJw4{{K_<IH658u(Ses;}#Sik&>QU zS>4u$i1uH~l8LT@Um)Mk^!K*+b+`BTAfJ8Ru-ZL1&^I<ZHajzYb#Z~22b`HDc57PX zdw@Mq8URH!5BM*oynp)3dB8v6oGFSv?*)<uS_8Y!q;~}H4<CS+-MD-2v#Qy`@XS2m z%!J7E^Pp;eGtv1^?*o(MJ3B!TCg&H0BqoEfab);gQQVAe9dz(!=QIeK&Z&b}ZrpwL zTALKXDQ$yst<-j}7BmRvl`vyHt$70HW$2mX5?OWY!P9N5{#E6}4?h}FQ*~$=2UMsh zus%X25$BCU&lv=tHwrr_MPxmt@vmwOc8rHMo<MM>IQs+y#3rT`mOz{j^7V@Vn7q0; zyRtHW?HU*?pa~^E3IGHKv&+k?_wKLWxp!q{Wo~5|!ThyrfU&iI$OeA%!Gk-9&Up;r zM6+jZl{){3|9`ZfRNEBrPw#)GcK|%1;Sou5@BSxA0}hA)ejNckk^y{aX$nYwVSaS( z%J0DWPnNrehbvoJbIL2@vvd7p6I=sB?K~LfP85Bjt)`*zQB5*AqIzND@$}mLvs$zh zYBZczt+HdDfS!L~W(S|J{@(Mt5>^og9ywl#_3z%JVd=JB{|kz1Sg9;Anh)d2t>P+Z z9DddyNI*XbK2Kx4Ii+p?P0R0Z7+)m=a}$z1iO%p0i;B(6DXgjM9UuE*g8PQ%hNPrK z{{TO*<dmegu8!IDwW*~gv;r)YmX~O1s#_5)Cl?p4tgeD`_?RF0v&h-1bC|H-RQL#7 zpFR6b`_I3G{}(T}qJ2}wF#Y8dy`v36;DG9xtqhM!8aSVYb%f!;L#I$xbLF>F-f!Iq zpF`7AO+7sjy`~kFM5bmi!lTJPfz~v4yuFhy!AefY_?WIwa#`QTqnBl^<6JV<Ql`;3 zMx7WwM%E&{dHlL(L^1nGwSBTSYEFr|^vtBv-fgV@Ri&+O=A54IS%UypIcGq-fPUa< z-GEa#KQMk>|9{5@{0lZj5a(cEVQJ|?qxc6yADv!Q+SJ|sGgULOFh4jk&d+~h&z?QI znT3Uwg^P<*T3WKXtrblmJ9TwYK;X>R-+aAe#||4?Yask9D@#D9DBCkd&c@F&gMZXH zOxQnubpOd?=FrV1=ig@<>H^PSz5edQ58r+N;Q^5O$A3NiI6lKvGLP?lL_13FKMPtn zH9V8ynKB+xHB$@o=vWOnESe1ZcWRsN;o+)QAo;4q+<drcNc8IEYvDpOw6VvUnu}<e zDv{z_hL#?_eCrsHyI;y$!=+f8Ud4~~wGAq599rwVa?8{{jO~Ofw>nMTDbawQS=T+i z4cEV<y#Mfnym`o3gTUP~4!l_3GkO6h0qr_IC$zo!F<yIh0{%rWV!yr*#>C9j-pSt6 z$2%e>J}bAZp{aj-d@I$Xm`9Vbvasw0%<tT}6PDqVkB_&Zxe?GZGBbVX@F4*F*I$3F zjm50qdjPls{jjjQ3P?i;_WJdoh#aP&gpVK}jcq@U>Y%pC{Lh~+t}aJMMTlP%wXwE( z{rwMUr?83k&)oj-AL)Jl)~(Ny1~mNo8dJtE%`9yuyGO^VDyu8Z%uQuvrQ3VD;9`G; z?V<AU^kj2)H%ju+=~=$f@f5!xG<wyuve7a$=GSy^2(6r0zPobw*+o6iBT8hPXN{^; z!C5{3gwo-Lp|!Hk#ewNHK`A^$RmXL`waCeMubjEnTib~JOA0MG|EM;DP2PE*n)@l7 zFF)4%xHjXMmM5R4JGZv?SNLca^H_N!Ykg$abT?W+NJwHzc3DMZXV=Kg%%`p##r#p; zqkwi`dMPPMb#>MK`}YIw1MEdbg@Gpl_B0wr;LIsWN%6YIIsiP_{QCOhwQC^nLDg?O zd<36hfItA`J;3wJS9hK~MNtY0p9xyTfWV)>ef#qL`zNnnJ$mut;j`yw1y1kWwexR( z|2tM&>&GAe7XThr><?eOeE!}0C$HZ?Xz&T@NNbNC-F)y6CHcEgpIy6i7g1Tz*MJw? zy}NSzHb^ff;Ai2qi%W|*xj4Vs@eKzDN6*l}*1YGi0IPG?u5I1eH8fb&(gsRCAtx{N zQVK+`4qkqi6jvjNUd=3IvBpPreA251Z$0@ow|0<2nkZossmG`jHHp@ANT}{xuI^nb zZ=ZYj{zF4cAJ-YpU84ANMgbbmmz=}OKFxl)UH(kz9+^9&PUljhe52sPr|o@A%Y#?b z<ERGxh`Q@x6)K-;gn)CN3@KF8$i&3X(cZ%wR6PV`g*El<1N{i*=TOYg4UCT=nBTK| z*REam_I8inzPGa{0oCu_i_m^(SO|b;aAK@`u)l9)7%BsQwso{Mx3#o)cP!n!)!x$? z5fKJj0)PkLzkc^#R#rOf)!fnq$lth+4z1h+pgwu??#k7xIoX+Uu~DsUEw>)r?;jd~ z9t7!JcJ0*EP`h^f=EBM{(EGDD-*xtML$hhA$?Lc8+<*QO#r*8Td{2LGPk-Oqty|NJ z3viUOvXYI5k0-9oW@cvu1o*+0rq(8i#E}HwQxjofp=jsD!^2%xUfR}qd2nhHjoGJ{ zmr_!by?q$z8EJ5VQ%hF|CnxGVItps)QVNSBQqz5-;y@GHxOpNZZ)uH?ygE^VlsK|* z^X~IE+N20BX&WrPN|Rc848!nCyWBXuUfMoCFmnx!ScAed+4&S$q^v|tgH;?75{tUF z5&M@E6|+DVdFQVbTsX15yc%vt)ZGrL(hjLocodyZ;Jj6RD-1$vFy<s2!ODu_2HkXM zQfhi}SyfBx0CTJ!)iwZQUM9~2=B;gr%XjX!w6}2`-~y_LEvBaU^@opzgf6hNv$3+W z>g(%)JU(;g6dN1s;UkBGf&<|bmM?d<-+uO@sJQUl*)x0gB0KKe$8KdsK;a7f3=3vv zc)%XG(yraR4jnp}o|b$NNe;MoFWBC_?EBcEeG?NSkoS7JI{WtTgKE(AoI1r{R9tZH z*>h;Y!`+RIjSc=~Z%;aRP5`u)j*fOnND#Ef!omVqz)bMBU*AA)?fwG=1vwUGf%aKh zS@tvEF$UL|zka=cd{j<OW-s$R;s4^|Vx7G`J!4~_{j$m`Km-QGC!s@>mM*TwBnPaS zg(A+7Ro#sqTXW;#%l5J5g9`S7IA1;QIysy4i}>Jz)+@FBD`g!EH|~9YxkXLenT=nK zQ=KYh8KLcx+&{Iljo3e560#b0N!fj^K;C5#a#)pmNQJ_qOy*H?KA_-mNZHvwwkf`P z#yv7q(-5fM$<Bk}9T^*!nUh;l4Z$orZV57AE6)Sw7dLJI2cABC5+DcktE#2~n1@x6 zIw0vaHPx>_dU}Sr3Q#}5eE=2!PW{6}b@jFADpMxyL7j-=;tW^?!nd}zg4WQDUw`ur zL=dH=MF4fEh@cV>yJt5j99>-<Q&W>2J2z{>Mre?mhr6Sv>+$QiA)&#r5gI>mfD0<X z`4}1*n46h?^|!wvmC>>X5osxD$x>1h-+c4UUS{KPAoS?z>8Zf#!oq^E@8)^#LE4d! z6mJ_C%CD+P&Myj2PV<S3r3QqMP$h4PDEX7>)_i(FWvvrWUcd8<DPxzgRCme;F)wHs zYVMt1Ke$@awb(qoy7euTD+{Yf1aZ5C4Ec1t6l`P68QBk>zSsuspDeFlzdof#`AXW3 z%RGjMN$)%gjtAuJx#URuq^*r;DG_B8gs5h9GkaYEGJpfm5g05fuduYfv1@o3sQ$B_ z|A={@!MXKy5n-WS%mNyI<=%Y(fzzl9U@)41d2|)vghCr=c64U?;zcnO@XAUG-rim; ztSqoQ1j3-eOG=6Wc&H-?oH>or(jZz{KoA-p76Mcco1rynDT%DibOytdSrZ(A#m2@; zR74oT_RgL9db-cweFzN=hE?Wq_n?Ty<4r)cD=5hOgRlt=J}Ynrag$v;;k0WTYAr1Z z!0@mI7~b839vl(`*GnSVpyw$sFAG`-{sk4lDf~l2vMZ`$GqVF?6Y0U>jtoEO9F1)3 zf#el%#$PMD;@vZ5uRVn5RRrgLQiBdkUd1twUz?FoI@B<@TGqL6b>ru`<~_U<*?ARM zWvqqq!SdG8UUAjip#Ag3!98iWu-Q5L6n=d_P8pj6a-;)tb^v=YK20}idI7(4o*=;& zgQ|K@A5irmRx?Y>YuYXkOpZ??+Gg}9?~%`#zi|UFFD54P5%Utif_d0W8;j{58|m!p zLAe(sO?z+8)9=0qv_l=}3tHP-wX`%~UsyeV?ks5gXYam;I2WY;#{Gv-4IM1f#p%H1 z=;`8M8EOK0-+cHXI5-d`d{8)dpS?ga&y?u9WM!o58|w$gMxa-^`RwKTqsL3P?zp<q zm|VZ><jE7O_wMHv<gv1`!a8`?z{D7+GB6PlA*65O*x%CG4t}JptcaeQ5#A)Nq#WcQ zBRty0H_+D2%go6Ilsv`=l)McW#xJG3_u<R8356YSpOx)0^}Xu^4MOECBOrRM>RGDm zUwQN0&tHsu_Tt+!7mZj1v=3t3r7gqdZK9idXSPB6C(D(q*N#ft$T}t)2bLdJLP#EA zdnM<CDlQ;(4(JE&(V+hg<1Madqib#fRPXE?=o1weo0*eWRo&Rxg~WQ3<6HGSV19Oe zT}bc(ig_6sDbU8BFc0WmfAj=dWoii!_TV7Px9`L!#Gyh3XcJ_Ni;FWa-%(U%L`6lS zy8|tQIJx)Y+vOWKR&Q=xyM23pWf?s*w2Z}KzJ32eSV$0BhE;Vn6>yELtc*`YJ-sjw zQ7x)pKpKtA%q(0-WZ2N;1c_t|z0AQw2a#XDGK1FR$B)gdEN8&(A2Hw2+dZ|k1VZk} z(Iap&931-z1aq>Bv$*(0)J=2^^kTE~A#{b_(9Xjf?iE7vcr!U2lieybGmnh1r8`$| zK9Mx^KBh|3^Qu*K%0G_ra*uCBhbU*(?tk(7)|8A2){|;1GBzi484`pr6ZedJkDhJg z_D`1R)LMs#n!>KDnxtqhc?W<xM6!p~U6C;y4LbWd+}?A#EGkqLLo;JrJ0i`U;vWp% zcVbR{L3J%CoWTiZ4EssX-@G+>b@9ZB<0up{7%iaCt(d>|@X4oDAjzvYZvpIm{d`ac z2H@{Ng#V6R%!m@&bf>$bz-D7-D=jNtzjJr-+B(?E#>VW@B6?^P^8j*CKD(JM!|LNV z?{jmrpr?Qe0MX&8iEt+7p-pf}U~F($IJAa_8gjgyJD?gj_W{I55X|!d<}*KHp64J> zXK(lH%1T31128;9*`Lo72@sHuMoR}9l9Wad3Ul!CwW7M=?VNA~qPm^|m#Wiwqu}b! znMdEgy;R(}U(!n1J{yG1X{?_DF|wlTYIW}tXq$KMe{n7sZ9OXM8O(lF%4t0xF|!cY z$g*wF{>k$0!w<<dllF<1mE3X<skt6jqXF7^HQo8NJovHR2gC_1XE3`3jZUf(LDfT0 zX6Mdu4~c+IJf*O>s`YYvU*A^DuiU*?Ra=b?(IZ=|t=1kq{EYc6+6I+6i#Kn6!aM+p zot+(8L+$9%BS(%Nj))BBJBA1tKx0;R=IZU+3u~*$OZXc%Cg-kv#=JJ>@plO3(UxKL z!K?4Gb3S7pd=un1ig__H(Yb5ark0m;a<f@k5mW=(V`8II(~_Y+w1Dz_W_AYK7M_PK z4K0lvoE&gz&?7KU53$3{UwrGE7<)&>lYIk`@ku8a10qS=1TTWI_@ByjN?0*4{PhPf zl+6SA)!jhVDA;BkRCA*zG&Z6nzy9D?9#Bbn3oF0s9%&+v7G2mlP?8Yd+CRIE+do-S zYR8V+B=09CbLs{1Y0v@f$1q+ev>7LHetfDlmUBA0gpKx!nJVhy4T!b`XR4!@uUB|< zWNJokWp#OD6Fhws^R~9u_usrXFw}?Ty~yMVEE$>Uz~lh(-J3kW1@k)*+6T<ftggf- z#v|kTdv=4WZ|!J<o?>Kr3Vq9Kc6s^KN&a0(^Sdl9E$+R1y?E`~)$7;iS6AS_$Bu34 zc`R1@<@*of;$n!j*|kenR{Fu~?-DO1pkEU}m<%!of>0FmqN2j!Xb^5Ylbuj41YyP9 z*WYF5B6HNhmQ8JwnYQT>Jlx%b1GB5ELsOIBWl#acQb~1nRcl*jN!g{`f(T@+CI)&y zAOs6C6;zFmxrMT>(GF#*sBvKP;NpW9Z<0znK{~2B=IMA;AJy<wvW+chnFW#ud->+O zUwJy;e*235qS4-SI{Q^zj$=Im_7+~5+o1iEWqR$--;E=`Qgqp=MCHXWPGG%HV0}*N z_@2V~ozV;AylA#d*m%c9OFmf>Z4*-?8#^ME?&25Z8y%OJTaZ;+euRm6Kodw@hPM|x zI~xEVa0u!epaoFbqHTV{^C;#4;YbY2eE?}>&u)+<pqv46b&Yi$y<O08fc${gfL%E` zIRk?N+Pgby>#DD;EML8`apv?XWL%v&o-;Z#ZDC;!&4SqA;o%vcoUpVcppXW+4#y1$ z@cS9&9i8maxC4CT&dWFM9(44*A;3JGLt#lF@Hre8L>Haz+BZ5pfBhQliylHlQ@x<3 zE~B&}C^nwv<3|sPfEeG30;<MQ*MbP4>sd`AiwZp;xgHpP{>CF|V;@lR20rx?W^ue) zo_^`=$Z$=^!pPM-zxn2*iG?@&F{QmSHU~6Z&*=HcTZUh|^<W#ff3j%1q_CVb+%06X zPs-*L&iAygKiC<)fOCc+e9A6+&*NC7i0sm~=TuB^<^)rFCtEi!T3{$BocQcq=-iR< zI+nfr_OYYGfB;dzaDG7^Xkb9^R?Kh4uv;)cb_LPZa&j`??Dz(S{ZZZ{Cr_S0zhi<B zxTL%UT1LNP0&fB;77-Bwz%Sjp10f`cY8K}3BlsH4l?vJ$9fUu8<Paw(2eX6N>Er9Y z{_v4sfbSN}uYogyaN5m`j-gEu`5?*AAQvz{xv<bTI)dg<L&EDldrqC=hsy!KKm%|% z83=n%@SlLaPM$hR4Ggt*^#JXsPqYPQP|z{@Mv0>B5IZ=x@#xiep0O1i(l)9NIokB9 z!)oqWYFbJA{6~`i?Vm*D6x6dFQ`#wFvQL$AM2jw<?;Dz0zYW?yS;nuda!XnNUBYIE zgbjzR1E3v!yBIKk-YAqyoUmKOgj>mpThU2K!vbfH2z!z{!y_yzC@ztQhZ}vhY!~vy z4Kk)7DJfo9T!_Ao2%i98!1LE%f32pjdj0WJWc8cPRZu?Y_qlx|L&_>j`}XaFMs_d@ z)RB>qn!k1pJR%^_52%|-$veOP=4%ibfXORsYZ+M?;Bh;ddIsu;ga%(qLUN)Zjlkc| zog5q-`uciPR~JFR1M<;k1qIJTn-lYMQ&+DlDJh_R0c?lx0Ztwkkip6EG3W>&9sys5 z|HJviW%iGc4ouJBP4TE_?L-P3We12KBGnC4A0p%N=E}N;`&FFzbbKq?ChtFgol`q> zMA;Gf;+0W*3hOIu7?4mpTtBc{+OasaaQpW>oev*AoRu<VJ*~;A;J~FuJ+AF3XAySo z_BPf3*<$V(^|iS50X6y_8GCLeilA}$d84ok#^FMy(P#DiSp^L^<?K#qxgFJT6U7kq zEUX|>bMW$mc-DwuuC1$MU~B|n@bGl6ZK#>Qabs$E^Zj`MC&Sy*!jb^W2Lv{_0O%iN z)dE=s2{HEZ70vk@8)fCC4i0u!L`w*VA+W7)svny}-suI1wsmz-X%zUEm6b(saL_38 z<#r(6mi9IWM>{JkONb^Knj4_wXz%F)p>AnqK_b~!*H$fDzcDs<WpZ($vZfOD1^p8k z8~}m}J?P}(BAl~@1;NqLzO%pA&))|bgd{r+PK+}LcGl7}(g67&lYrsd2M24~E?2Z( zj!8?`H8jxB)>hTjP{(3TtZfKns-caYwwZ;Rp1~n?=YLb6xki)#;HOsais^eDQgnvU zRn9u~pgP?px~^ept*ZNKZU6GC?|$#gV`fA@c0PqYQdS%)6drZgV_2{F+?H+7{>id_ z{q`|c3YQxFm^Nddypw=lpb$P%$Rtt-A0=uYe^k+FuPB~ZopxT2aT@C(gCiPPS%Z9a zWcV>6W1}*%(@QH#n_Jt5M}}u-$L4<Wwk_ZsI1?a^{E%dJW_E3DapT6~#s<WdqqEan zYeEI62`ELs&)xd^E}#@?glSt}Xh*lByP)+E_d#od>oKuC`q{1ExHo23SGK+b3<vtS z`vSBKCk9(S{a^&N8Ttx+-{e$vTU$YOU3yVzLQY;-N`_BNf?IHeqfa1_=59iA)G@cz zG%!4;W%DocG&N#a<KXhGr?2b-@;GH|)f{uN^vc8P?&^+-IgK;5{VS#I^HZz$e*e>n zNhoCFQ`{|z-=|DIpyI--<!<VlybanvU$To@_bHG?@S!Jkytx!zL`<Vm6Elw!HV$VK zHsX-AJ%{s>zy}KI`^X!RjBQCw*!z2gMusFNql5Ofm)nuKLH^-K;=lOQ>A5Sv`GdP( z`TgnW@2mN<3ct4|^R=SEsi~Hp-iqed{Oa1YqLR4GoY2H%&#)+DpoaPN6W-2A*TP!U zz(`!%{J$&EL=Aj0Ye(ld9>?alb1T@3n?xG=Hk{K9;Kwq;^7=s4lyxpPkF34_@CW92 zX>9Fh<5$})WWcKExL?_sPs{z3wtN5B{5EL+d@**6IgayHAjKb0p&i38#4Qp<&ErJO z<0XhGd`ixH1Pu<Ux=5M^Dq2TMoBC_wi6$g_D=OXDCjf<ga(+>6Wi{|&_wewq|B~)^ zy2IEnPB?Jo=k3h>Mr*&~VxwPb3jI;oz~n?*UtdL2b6!<VYEemSW==?AigyGuGPyZW zWACJAWdm`m4A$gdm0b^O&?ECYN3P!OpS^KTht8)$0aYVyd5J@jY{JN|>Otbx^3KJL zhkxvav8yZBxz1|uxuD0Q;J~ivbV!}Xqe}IQDcVNtpD#CV-4oLFR<ci&wu;&>?{rQ- zNZcwJkS}4G1fQH@W-J%Y1aO|Jq$o|tIC(Q4ExZ-;`&qh^H^TOzNvVms1vwQ}mCY@e z`}&!a+Fkhq$bUK;(%A3*h4tuPXJc1>+edoG$C|pkE1Fw!E2>ipiefUdgA<Y&5i!(& z5PJ`A0K6#zcv~#q4C2;3D%4#HF4XX{p4po>AAf7j$l#K-)25ZGI^}UFkR=G=acJn; zzA%i0u7B*8-AgMsxCAhO`Q7sNtnv;ZZ@5(`T7-aY#Qr6vs&PQfD8Ssm^rCqfu+w>i zP$}zFam!?Bo3x8&vHL+%N?D2+`s%nO8qm|V?83Cn?2K$k(8UAqK}Z`IcPTbAJH5E9 zu%@oQqXS@rC<KIHf6Aht=gIl`<RN3f8>OGL%^#bQ<JV`ZVSe^cj9>2WhlsVft|6<e z0vtIaB|RWE5&W9s7X*^V!i8pH=ZL&TW{g+H8FH&S?o)9Ez;{k>EZu%eizqpy<fQ75 zqe&^|R;G!V1pB77*A1)y!?#ad`~LT3jrob}%&%iRuDnawc$d7xUU_>?C8q;Q<TDyB zuV1~{M(kfwLJ}%4&dIi6)fbEcITW1535n9y=~Bd0S-Wg8<1iK>Lryt6Ig1eMfPA8V z5sn<EWoBbQw6P#l9lU(q0>k`b;v-WtlJW|H9;;egFZcB!gg^Haod42PIe&<ye=!{R zJp$%S_0eX#M@AYuyFkx?fJY-%=)ZiU;^-j}WS;<%I|CftnB)K&9wdzt&S1Y9<ts&3 zdQ^GG)W*u)XMRa_2juN88inb3)gDxFJ&E&iiLS5gzFO0_+&;bW;>~~Lhkkl^4|ZPp zUE+k@a&~)U?Kl-3S!76u6z!jEzf}Jhl@~9*)pLyTy3}G7Sau5M1M#esRf?=_hO|w( zyhE-4#$%75{vj0?eOi)ROeH0<5>HRU5bX3Vi56s<ojZf-9}J2&^ioPfb}o=)Nqu8O zXD2B9q3Nk#LF3pTnM`Kv&yo2rg~J1r2)#o*0`#6;UYT4_1br4<2wV;=FckEgjhiQd zOf?}n;0Q!5BNIiOA-g*Df5^G$ljAQ>TnC1a$ZkEP>~tQ>(Dkh5Rd+wA?q=a%QqVFB zswSsyA}F?G<K~?|`Z$u(D%kl|_X=q5kha++W4lY1#3pC|cL|HbO7_p5zuboFUsZaB zW=tr_mny~#J@XD}xbvvE%GqVhk}~B;nTn42CzM_G2pOH%^S1KK4bEusOKGz4%fVVW z=o4+soG7+*FS2hS$N*I5K<EdAS=rp$+}({xo!=8n&i%)w_|I$p>bTISc{srzJBNlE zJ3B$W=U3GLy(i}6N2O-KB{0IHLBK;7X6@=>?nE)N1%@YT85+xDjrOU#{;M2W$1%3} z^8D4?Pcv%<kEv5mYPuWxHi#I8aj8&sXzA%S;{f=^k#)b=3eYF}PN~bNkzCwkD{DLM zKm6^PuCw#&xdpTkVRO;^8^Amh_B*7A-&{05s%ZE0$@Z(oe`Sfwth4mWjV~M3BBvZs zp$X^)C_3c>@|?W`d?in}F1%ygwr$(i#I|kQwr7$|oQdsZV%wgHF>xl&-SdBW?s?C> z-~D#-+uc>w)m^K0*Q)OIJhj%rg-aU#ZQ|%!ql{Km?&Ctyc<RK<*PD(!v72W3=pZOI zDb~J$<UJ)M#NUYufP7b)RseKwc?%)B%Sa!9H}HPONa&$6bj=XIDzEd#*W-!3mY$ap z+$vHK`W2<*cis&xwC2wSm+vX6BE`W+s@Y=f;<Q_vb3>!Uh}eXtW~8AUYt@GjwGCzh zM~{oA(VtJ^H=p+!dGAbZYM!^<Su-*pCirMJs3Hy@mLF<~zn6NRoJ?@VGx+<Ie$E5c zc|n}0BT7H&2})h4BU^<853|@u=VUh|#Lv#s)$Ltd6VOpv5c#c`rrHb<VaZq6pf>nO zCsS2h@~sD?NlwR~!WbkA>r?%Uv#-XF-i}8}tPQ9PaV(??Zt`ELo|t`4bbJ`pt4Gwl z`{Qe+u6pd2sZD9D2=MZbG@bmZ1|0G*Q~fy`R=Fs|J&na}>%LxUU}D%PEd4NJEI8W4 z7%t;<z{ra>Os%0|$3#R#+1cA+J5tmM?+fP(=oDXkaKJCf$fkgZuUpz8m+)QiEMHg^ zb+^=D8)P`|3-~Gv1poH-SYJ*UN8>Czd)ge!izmfOH!R~bWArt}K<I6&ZKvgo$;rsl zrtrM9HhIGsPs|UoS2o@%pNz_m+6;HbZ`<C_*53{fFJ7E8W8AL^`H$QIuP=zQ{Ztz) z5-n<{_@G%}NA>4r9BYs2t54{w2{iS2%j+~TAF}f|j;NUB{uCe+8T3a^pL=9Bkl!rR z`r2G<#LeZe>)=^@dP8oV6_h9<`IES46~kSYEBNPIe21on6hL3q@c5{fY55MnH|oS) z=yAkdJ&WrS`yuxUKRSpQZ$sat7A-UeNi=5X8LrJQuP$wIuyX^NdU;x0&F`=lM-Sy! zdU;ZA;a&p5cwHISD#o_!Rh)REScRQ2mX|}_lo;rPwJ-+Z+sZu6zUeBr#Tp`uUb1_J zupQ(0S`6BhmfCt5<lb>ntkcjBi~}99xOsW)d3a0}v=#o=CLj`!%#Tue6ojxuSx{bS zgS=y4C4K@9wjOS7@0VYpVQH#-E^SSf7s0o@K~BVhoI%ep7|Upt^i)d^db2OjMNhpX zgAUQSMud7smplY?kx58I0jtADLKR8=jmIp_f)y9g=Lh)-);<+Uu|ZeKN+YkKti@X< zrAK$zdu#T67TpV6E@vaqW<(eQWCX5qUEOR+<k8cB@8HBw;=`iS*s00+X_{Fos*vOD zaWXIV{{F)4$f6GB?>mGNAF9itGln`-Q$?W?R%8PUsk1=G?(jih)mvNoo#85NMVw2} zF-I(qLI7|w@(=Mbu1N94k5HjLP>#*Q$6HTi-FP&6afmiaAG!P&O)ZiBS`vEKeRu1j zqj@c8IiCbMa)Ld#y!8N`U8&$C-+3x)_HX<Yc3iASzJHI8?6qWnU%%I_z)ANi+VRG5 zoo-z3dn(xLhg(gks{8!OQlL6~6=%`_WU@Y%vH}NVqwcjV!}D_cMLaVC7_B8`1KsXt zTN-ubCp`>;+fR9L>SIHdiUk)09x!-chP*L3;<x-Cd@SBX&PH=A#UnZjvooS`j3A+R zVoo7RgU`{92~k02L4!alD9w&U!gAGA)IaHJZZGP&ws%*Qm6<x#dD_9RdU;tqlSu@Q zUj_x_Kl5x2lr18C)$M7O5DQJt&hD?E>Eju7H@YwV<q$==OD%lvB!1OqR_c%_Edv*$ z6rv_t>?;g?CyN{!${gytUF_M_-ci#-ozW(i-Xj)FYKm#Lr<tZEB-riog;32|YwY?P zC)g2?VPIT0#wVjIp<;>`vrz-s6`7Z(|H`!V3bOPDBGcqk-R#d?R<O;)y+y?gQ6FR$ zVW?_mXJ9Y(dq8Sq!R_M<HJ76hu}^WCW}<JI9^|SkZTnSuTV-<W(x}8$g(A(KX5l$1 zCkpG8BEx0p?Hva4`{3zYRGh3jn5(*g7_?oyG@^wfBQZvhXt4AN2}<7+pd~8Jtw1}7 zC<u8mz*vERnK>*Kfq;NPpc{x12d!>jv&;$sjIKb1Z?Jln^7DfcL9_6SOaVO7SV{8K z(?GvUA~j;U1*I-@w!j#n!K-MlH>PZ^-__>#z<1gE5UfE)m5cKufvYdtQ)6MqK$@}> zvP&|O>tky6oEeTjYIcW9<kiU}a-Y$Q)e#XGHp#t8ZmCyeal!GPmcOhZ0A#>|Jfka^ z9CA&2BwCm~?x#6j-qkXL=Xm5=7mIIDjC?6xz#TX=4ctA|NBj_EVfqYWHJk}2RvAoO z^cFa%tFNC!vC!y#`&c?sX1d|7&!;&-<`i5p*k5@hV4Bg6LJAA6aUr`2lJWBrbF#40 zaL_WK^&#azfyyAS68i#5kgqh@IYQwUTReka9Zr9RB%%3%G|e9L1PHt1X&D;YIy$;~ zdYW2lib+vXQ<NLdZK{qB@-%@}o2R9fVPYDNGek{HikQa*+JS~)2YFNmUQsonjecU+ z7|PZ)mX}|;M<{Wd<XNf+)GUTE5?I9lLH`zO(^>1LR8yrL={&I^mm_)R_375uJGeNx zHQqhChRH5l0%V89`wd9+eEmw^tN-pucdNN_(anY?8wuLz!=4S6^>UDBMxV-3lYYuL zl+)y5tS^L#VyeiKk}dVTi+{u5H?kPN-eay~6_JCYA|pi<1paDmas(Yrk}TWm`rG3M z5g-Aw8aj&N1Z@R=m^d{V5ruk%Bn<HmwS-_pURw!??q%#wK-}I|Q`pv1A7(x3q7UYV z{It{@bhQ~{1K_#x)=`0RBZ>~XJ=crMA5fbY6cb>d=b%O}`s5^lx-|j&Gz2xYxV<(z zj2N%Zafg86<Yl06^YO`TqgEbc1ger}_=SQiBxt4rFJt(HNX28$jb@%0%W0hNQq3)y zLXs_IT~-BQM{tMJ<AUg@YK`$jW+GHdNsLil;^=-b4c?T-8u%vro6Ix|1Xq62wq2bT zSuKH@TjUd6`58v`LPx*{4|2f7v5(C`RO$|edsjE>G$5(g(i*{LZ4o5e$zD(3T!6Fx z5)=Oy4(<sW#u3nQMqU#5D-t6>A0Ul#3?ZBf&@>#U0|ye*0$H`f;{lFW=t;-OWCM(K zWP=m4(7X7!ckr>GA`p>5??e3)NFi{xnn>Kd#~!9Bp&)aZQp7g7SvI)XW4q8@If|WW z<>$0+0rp5Le#kU+<u#TyrhZE?$^BKQE`&$1C7Q5!Vc(l?O}g0g)BsoMTbflU2)*1s z_Ybv`TlM<yt`SX_^KiOk4I5~(Qj>48j5(1PYEC~?ipI6eK=*9tX@!pfdyR(dM@h}~ z4`U8Q`k)nT%z>TOUJUhSvw66MCT3sDO$&^z;y79tTN@xovw2fM0>p6%w6NM%fG@-B zLJZ7lh>XaX5R>pMrH1nS<pq6ha>U<3#)Q|u&~t;(!@Of4=K_`rSnqHR<dZ`EK=XjB zSc2+*-vvLSCzyaJd%A!Wnp?#q^DQrmPK<)9v%cyi0O8bn>NzhCpdr?$YE@FotT~=x z3r5Bfb?G;6v3N#fNAZPDfYwsH!t`E<b5gXYv6+Yvpr_I@n#UQ6mv5ov^HX5mViWuj z#@#;WRLa*gk!_;oJEKhZB=*<^kE;2C4C{re#k?DDjC_xnA2Y{oP^IkjcKkZsFYuon z+-FqX0ur-r^HS=+@-HhZ%qVXx;)m!d&mr@G-iDU96tx8(Qnj<43;5yV1nhzWbda}$ z3|9J-gIFHF3*p*9aRu`N*-;Hj05|-;quEfvbntwk0yIU?(lt30(7-N0hbP|jb1Hb1 zL^gf4p7)!~Q*O!zq4y2&&Bs9D+1uaH#c6JTeCu$Jf<cwM+!MqevfL9QmtE1DiqM^l zCs%7DY5Z<o<}P>nbi3rfbAycbBQbM+efv1?;d=|u<vwSgZD~-CP;A_r3_Mtq05Q=| z9grrbuv>F9Hq%VXO5}lg+85zd`RXl4Jbk7!t*Ft9`YTyA&XO^c_;lyz9N`zDen)Ya zAT87@n8#K(XNUu&f@X6c5b#_8(2&R+#O?-rOw}+nurVDfQpDD0SRi-~@d7$Ml|Ndr z)L$rEbEdt%3$3whj%|vDd?DL0EULwa=)O{mZq;d)-v;Cepywyr&M^SPd_-c7WM29V ztHYf7g!pK-Ow8X_V)Q-#${;0}yIFLRDWPIN;2=S<^*$m6yl*dPZoq#S7RJdlmZrO5 zvCj;rBkDq=MQ2Ai)3)O)7f1ax1>E*MXeHvRkLU%c)1FDVGV<sOxcFJ0fe*HrPW-m< z$!s_+3t{D}_P=eidp$rnH>)d^<rZ`IC!1XY>hF)fVPc^;`-^K2#UR})&{^_BMMiK- z+Mu-%c~jg5dEq*R;6M?AWBP&~nA91BDDo%*z>yXkoR!AJtd{<avdGFObh-T%H#3#e z*G_!z)-;ByG>+IN1ZMsWboFI^&JH&|_C^WJ%RsGcKais`vD%atY5oFX3r(jUBm8el zGq5vfV6D|B`ivJ^cKCYT_f9Ww!ylv&drc!spRpG2u-KK3*r%0E@qz$KC(jMzJcOS- z(i>$)K+m_TK%QtH)~J@O=uIr3((#=Y>v`M<7HofyWc<o7_#8_|2tRr37GJ@3JFd}) zn88*wda6Tfgn;23Ve>p(&;(M&TYAXt3KvvE$Q@GB+>;(JIETp*nh3W`e8qHyUS4Wy z0n*u*us%#V8!F5v>q9x;5eB#Z0L4+xmtG#2>HMmb*QT~;ml<_pAVXRUyB3CTxFyKb z;y1!ouQNaRyPyay3Wq^9x>Sy({C79&gWozlMx^-5)oF1|J#Xn}5Re_ugXIm+4KenQ zgYiftB1}z3b%W2eJ^V;sPBG;wuClz*w|mM(SC#D(8^yMrnrB|ifSYszmuYJ@Zt`mG z;%L$iLh;LQA;WWp;kxl)6W2p;pQ9C8NH7k2hDzi3#cW7-4cEg+3YC%v*JRF+_uZ~w zc}L*@l|I9#ki)o;!vG5fv7n}C%$sOC6)t9#B`%d^4vod3i678d?shY#1m4Z%dbHE{ zunFJhTRI2nJ>YOI1!_lJFk7W&ljMfifzfCizjf9-_SAXDjgBIJ_r9u}BcC(@Ii*Ex z_dEUdlGKl*B%#`+ZlwKbPgt8u^m6pFEwc6Gt=Gr<<)O1CdFbY$J>?<%F~xXfR7X8h zPKxF83r1dIW#@zzdGI30{+ZWed1@@VJSid_cRlbun^;P<1s-HgZ=G{aut#G~5G~C) zdsnmkD5p?NVZlRYn47AC&p@>yKDimb)9MFP@uj=Q%3D)HE+E50a^xb7^MX7gCX4f; zobn(F@*s&9*kvxl;h-btsr9C4ttJ@kCOlOGK02}ahVl7^`S<h>ZS*Q5tjxUKJ?>70 zm9`nE2Bfy9f#O3?Z5_GnBp^oy)n>W<RllYwkD%)-fe-cHd%*|cz1U+slsVYWegnNX z8|!vEV)gY6Z;dbvNcS4LcoRCA9O{x=qWONB2vu0#D~PFT8lp(z1^AY#)1|UPG;RKl z)~$;|L##@{U=Z6X8oxP<%!S0f#U1ub!JX`OT|74}q5nQ~C1OGujFnAka?E75>j`<p zP;llC`qTT<z(pNyyzW}K*tGGNCWI>PVD&|}+9Z<QytiWB7iV)1L5UVLTD>D@lX3DQ zIX4|+-OIcC(!zs+JIXlyMQ8VM5{a@#El2Ygdh)kOv1t`$)_IRj8wV2$W1l;_#KFVY zLv3MS^A!{44L|>no9369mbo7NmhbOxzjq9^J2%+7AV;B+iIUdLf9aAt;(5ZyPGY)i zr8631LXCqk*Pf}vl}gZLFe3`pi0&oTnWZ-EtGFexKQ!OTBFWlL))3Iz5h~GKx+YeW zdRds9n~+}zhxlw$vHXhsM3V=b@N#gzR&4)U8r9RFX_dyNLvGb9v5uuPilwal8U==p z1%nR(tb>7rlD4F>$i1sDkfgn9Mvz!38QD#B;=Z8hr!$8p&V)F@Z(42~ZIe?K4o?jn z-(3%9(Xblb3|l=@*(EQD6%&Y%2cCl8^;MqRq%<e3AierM%G=!^@8kDS9R3qQuirD) zj=zH(mGd&VjN=FhIbh(H6-}@xDA9<9U^dIoRAEA6_SMSJBvVnsojnt&IPE8q{sMcQ z*nlz811Yk&LwC`N*7Q>smNj+qakY|!({i#gwOTI?FJ6SATzaJX6kfU<X~`~m)HHa| zI)d&7S<J}|iFL|5B7^gC16D%EL!kB*<^*b7hhkvw4!b$VLz{{P9?_e%sGs@syv7`| z@+0tteOUoyu_}uI)9P_Ad}GGIzx7S4b;2?J^!4#^=q$KB7YKEz3ykVX3g`Zco{}_p zn%77kY3}AmAA>^I2X}hE;j|{cY2Km*c?CkmKG08@)X1q4_o0gA^IrSs{SXgwC`hAg z1gajqg(3p6C9ycnW>*2QVZs;_M(QTiSuyHSs3cmDmrNGN{vby%@`%t!$*n@g$9{TK z7ogKh%QvVIIx)B3<~*uY-N&&w;Gx=w7|;5u4}psYzMbURVd-F`EWr7zS-<e-F+h9m zA7sxpM*?NQEn8>YIQX50Rl7Z{AGpftf$+A4%xo!p?<Fi*rr^XgSayW)`q;#eRM~l3 z-jToffXNn}(F#O;Lmw+ZUhy6|k6Aha%Ih(l)(sk&#n9KN7WG&Pq!8|J;dh{6cc9^S zA-iZALvaJUhRPztHz^?sIXR{^m?h{(D%C#?s`pgsEmH#wxS$6y+}QbT9Q?4AoEw$F ze&esSMT~~L?~l(F-z2XV1+NzMH=RbW_mo_hbSfG2JU#mCd#o`Arrmz5BnrK^U0+Fz z?7Ii~OJK3?z-g&$=-G`S3lxqmmMKq>sH}2FDQ(2+rVp2h3zVM@CmKVaMoMRmgo3Jg z(k#1F_py8h%#uHEODEP@Z@bsQ`iBv+2?xE4hN@PYWdg%`28u9cDGl5Pp@xeEPgbA$ z;7=g4Aq*v@Mh71tTLqn!s3%66dA1KN@ko41C9}nru?H?|o8TgPgR(IA)3cf5p_a9f zwRV7H){+}i;$8;;&gqzKP67kYgG{#_!KdJ_C3IR+a5zw(`&bbx7Ldwk+tMpGX=`S& z@aL`o*tfPXdGpso;r9~qP=fwm9Q@uK0{v=j>DEwb6?)_7VUx|>;PIj8_NMn+jRd4N zAq~DMw8MZSmT-T<>wEAoegrnK5h*6S$qKL)($v3l+dAqM&NmLekl&Mx>UX~t2YR5r zQe40xvlab-`ow-EbbPvadVO3ZqAb$13c0cCq9z&4$e=%Qk^)Pk@fn-O7KKCsI+YVC zog{2RTY`3N*oxaA1lH>kbYoYx^|3VQ+pp2@!C620N4p4niz#))A?Emy&ajy*Q79vj zQ2U|ebR>;v#(p_-w7U<)u3XyFJzU%bmUex-4-AJMeT{A1kX)JI!&I#r7IXChIeNFw z1e@OoZR{+9dq$mzn6DgRwV}GnwJhKRfR#~OWluRu(l>gLmbkE2+px2i(J_|{k(I8K z6abXumg`{CS?_pa3vVk`z0yWE#~C+mng(=!S>E}X(E}YhHlo7vVb$7yggAD-evExE z?R4q(Jhi!d@pSuuGhgDXm>=qvpXC((NFsJbW3WbH1iDPOK&7@j4xAe`)Y>`9OE|)@ z_Y7js>=KBikLFQUTBR`Q(8Y%Dm0Ntxjm_vRvFAa@Ppn*EJZ5DsrnZqbR(b22Z{`CU z+dk8ocE0YYZ}<3q?4C><#J~K6TgmmYytR2Yb=lxI5&mL!!Z$*PDr$BOk|Nn-@(hon zuS7!d8*csX7JKCuo68z3qB2|PGrk<cB%)&ZD5JgOF*Z?7SBLZOy%vof#t-e6P$Cf& z|3utDh}?%?0w$NOJ1(1y8|IyMnHLTEEfc^&^H+i?^8;bvdDR2f+`-J%&Bff<{?DGH zi480ZH!CqS@t-|5E-q%)e`fQr{5_kWpO{JB+`-b#ikOAx&#|h+OrN}+%!!$_luT^Q zP2Gr@)Z9(n{>qWDH?{;8Xj+>Ak7j2jW|B6ywzP7CW#I(wi#ggkx~MrBn*xszHwT8b zGnaBP_9kW$wRUq=Hg^$ow0ClJFn4ex<|by6w6=3IcL8qgjNQz|%}pK6%wYuuVgKHA zul0+577zdc@V($($sSgy0JsbM?Aqd{0YLtPh<`QvkBF>1|B{dGKgq}b{~#X=7stN| z2yFEKB%q7~P!=c0KcW!3xwr$x|DQ#M{dbXx|BJ|Z`qJns=s`0b=IgZWG#OP|VI?%t z=n(gH8oJ0~W*OmCHIx~pm>Z@{61b8|686#(B@d?J8na?)pO8@cWzDs)rOiu2U~FrI zo>KYZA66$F*j#^2D&}3zbx!-`3H;u4wGan{5I2!+k_uz@65qMT!t{~1Ay=e06ehmp z(gVP##Ce1dlnTRf=OyN<3yxD54;Upf-Do^%aHKQcDp-~O{JF_wrUVzsb)<Es!57PQ zG%W#(tgBT16#Y^K76KE=j1*ueDU9|UF~I6Ya^Z0)!7jjUN3onP!?D|$s*{$n1_e%C z);A4`4lBJmb+<Y9HA&A(4In`nhIjsAr$&`T%$ObTu6P5%XWrhhQfDTp?*)D~*}*X+ zMj+H=JtH*1O{f(3)rnPaLp(8jWxw;yZ26KAN4WJZZMa4tQqXQ=T1#VL5R9$o*BQIT z0MbY%9$(ExtC>gR`gwiNPrjmyNbHEO6h~WAABxl51`1jf_Nddj=xw70D*6saWgtfP z>qiFYf=sF0ZAb8MGE-Pf2W@(@N%V=I^b_gy`L-q*D?0F37MwSWvWARZ+)@o-VXoCi z(GnPhV9ml?RgQIP0WHkS_DTZ__OZd<N7?HE(UAQ5zsUP&GFqx-g1+Uhk14m}D!WsC zGohnij2vH@!r-TBn*g9p+Fed28+!YK*B*m01EDBiF5EizcFceie6$VQnCWIT(sW8R zBU>S`jZ!CxjxiH|SK2`FuZi6~rQtyS&?9Fu9nl_EwPCV59=azFhDj~Q!)<J?KzR#S z`Uh1VX}zOgTM85RvIzg$$uI=8f?4D7{nK5zg1%M9ii*#6ylDX26mN)`FBxo^lq1&L zgHOJn;2;l6n7^iiG}#cYkS7oOZE)xmbSJz$Xw1#RRNN%LRmky1jQvy;Q$yt7CEJPq zOd#pvxkn;xT{kvt6#s)~RnONzLa0w$srJ5bp2X8x55BFfmX<;owvB5#W@|al?u8t) z1o<uE(1fU8AmHZ~bbPNKhkM~(^L;jd<w#dxuReaZVB>vVACg?r8X0cQQ{l}z#TiY| zz|k(L-3oQ!Qyq8eT$^*J)6$4cdKLMk?U<cx&ohB1+C;!)Y!Cp4>|V9{1v5h67ftce z6a`6To};BxLZnH0g6RST{MG<1Co)soI3i}{+ND-nkF<07#+RY$TgB1L{Bri7b#(qT zeYBXS<DeshlnPz=9OUGNLe1wpJ{Mwe_OEw6i-LvFP2}@?Nq3_a;7yd%^Rsi;j)93B zO*7CU!3zg20<heC+{96JW>7|W$lxM>{a^1~<E=o<ko_kW_D3`QrLUQpIsU1{xOx7Q z5@Y^<P-3hcoc~f{|Fa7FpB2{stfOB(0?ohx(14HssFMFQ@-KP*HRW(~{F^j?n_?uq z+@#dp{wPi+Q)6HdS2hmjKc7E}@t^Y-=l{JH=YOlk^Y7RC=k)gHRKV#?k(iB`N#xHA z<nY&}|2A8xN{Rm8Cno<CC7_nZAL(tR5d8*WPN{mO*aD>qd0|8lw{`D0-tx`16uM!$ zXN@2~KYx4Dfr3ypicw^lr9*2@hBpEVi2xJ{CB#IZ^q3k4JR~#ef{U&5SJZka*Cku; zNAHfWus*eYj!zJX_W<VDJ9Q_YVE-wXawH_U2wk<71$i>IN4A}_*@b`jeqQe0&-)L3 zgb-9vRP~_YZzC)CpC7#X_Fh`7(Uf?<?a$+wq_!y(r?C9c9JFi{{QTe?p$mJ>daW>o zj!u<CN=QwkO?8M^Z)e6Hx}O={$(oTuMY+?bW(lMLKOoQgLvY`>Q|8tM9eDPYEa%WK z!t3FKY4F-MY!&knRakGS^6}{<qsM`|3ymLaX%xIe)O~d6y}xwmZ3l|+^nek$mr%>5 z<H*co^_>mq$G>gvimGL~8p5b3bL78B`gE<It;9;*Q&hR3`m#A|V%3TNX7+%@_JQwu zh0*gz(t8Au;|49dgA*2Z*uVEpq0wa_g=b8yeG|*z(^}=>)ebX&wyYQ3reD{q6A`m^ zG_}^Ztf?3RZ*8N#mgl?isb9b3Sr>7LOPH0pg)QGat*&EFmH9opRgmvRHsI@%?<vw% zZsQ+19X|VB9MhwcSaO>qkafd73v>{<2RlGL=;+H*Z@NBb#?hmWds01aIp2!v){^u( zY-c=LZ%w*0tH;Q3Sf7?NG4un&V2Inz$%^^dHi#rTKpZ6kCK&Y7yp>sDa#|;6$m;j= z`V3xFL5t!Y$}jdSM}#r^7hYQ;><Qs5q0sX0lph(b`xH;rueS9i*p=3pgK4GTgkHp) z^Cy?uFzibxnR-OM$%>bfDXiZMs3tTkYI&Em>?$N&T&}3D1q?GcGil<ix#qv+R_veB zKTG<pDdrHa)lAiLuDICe8)R?7*Gv)RsP9Q@z7|vsd~opf3VlFL)qz<3C)M+h0mjA5 z`L9vO{r6F)OUxuACMIg^YHkL6KKwJPREhQfc^J<>4*R#KNLJwZ{cD*0hljlX^29Tx zr=N-;<>Fs!I2Yht5LKd{eSdK^q0>Nc7PNK$J;Lw~bN~>V4Kf9<dc1Qv*7MtAsrs}F zo(NY%2cmBOljXq?kNVo%?OpG}Ols`oj@`9V9_j{-7oYwYr4KHBg`E#4v4yoM@rhn8 zOH#afQu)~Pf&4meo*Hp;#z)H=l1Z8xy7&9n=kwRMi%3hyvNO2XII5i3TVo&W&~T%x z!@Y)^_ao4E!!vG$Y@o}OTQ?6L|Bo^`%~GPBy^1mqqf^8>=>`(Zherl1ISSMOER%%1 z?*X<w`&-L;VW5KzqsEqprkouEXtMrc&V{279cAU8cTY6Eb_1_=a%q$jJ(b?}#>@_k zRo-PFk;pQ+!%oJKv=9RK%o*rJ7Y3e3mZO;|y>lrkvmRYcxJ3epCO1yTq`qzZ@_)k8 zJ^#}24s)1G=#mAO{^OwH<J!%|JGq}JkB!je?GC-?jfqS5{Ptwx1jL_9z<*&J<IJML z?xDdq_`}Z!N<9(m^5rAqu-2jU3qrqhB-A9&&m)UR8fJb=<1GCq<NR7Y`^v)7Wn~M? z^1212-ePcug=zVwoRcw=a9WA5FRfm)1`qwc0hS36phdG|k`JItFBmbortt3J))>sO zfDP88!7kX>X5C=_&uX+)JxDda!QzcGoLQlL5===8_^vKzu$uWnrd)iq>XD5OxehcW z;;9eNPz18_-WXmx_GlF>k*bw_jSdi(d$vmMxsJZU$b0MOQ&wZ$08qr{8rmmQD&)}_ z5;U$(?rjSv>O3Bdr9~3%EM85DnNItNxFBav&J2D764lQoP2WIUyMyG|y7E`-d(I$G z-FDHgk>&C#miDw;rv|+dVQ5NDlKey`WZpPSc(Xb&8-Oc;+vR>Jiv6(Qv#42v8puCg z$!BNxWMudnanRP1rEl-OisNlP5dEWg$koH)FwJ)TefEi^jM?|Bis(g|CJ20asN=gi zRt-gwq%Iccyhkr+hn<tc)AQ^K*V(K5akdV9vcz=QkHtZ%+&!8V3vo@;`oZ(BI=)d^ z%BBmYIFmIoR9<c6_le3cNiD@4A;q#_sMys_1#@V-p$qBzSr$8#_N_AFyYkVTv=at> zEHlZQlIASFAn4Y3cjKUM0+pG#4EuBa#`0e=FmDo$S^^Mj?pP-AyMD<oIyM!>cerGJ z<M0HP2GZz#=vW`a89K1P#?r5ytW1Ao{%*y(ta-l}i9Ne*-0)nPahU1-xPE<WJlmMK zt=c$AWHQhj-2uCKJvlw-Je#n*DeJzmDDxBN1kErBG}ukByz5%4PgwDu1A?Tl0~U$t z<t8P^0JK;1S;<DI&9t2b`R5yCt?z*-r-44bf`Rz4OCi=hn3w|#2!5izBv0uFLLMHP zYy+q1LcN^=z1iFP3W<boPjh!8XN@aZtyeRVXScbgKLg_v%mitQl`|zEdy#oj(+Bwu z=6tDGi(Y&EeB{=l9d2QAn8)pw$g3^<x~x+2*Cu$<{FPX0zQ5=wYu;cMuz{x6pw1*v z#MQ|*&}c><ZQ=HVW<nlS4KB(tgAxj|R1In~p@2N2rq)=K)5P}C`oc8U{)z`@_KB{^ zIqaV>AibqmD3YZ=*!UD}c7fj>MtfbUOZLQ_iH@rv#E#nV2^7Q0F1Jk2)7FfgRXBv! zX&X)&E%;|;zkwTSxZY9Q_A!Kt)+iTb=P|{u&P@B7ouleLeQ?@c4M#P&pKjaQ`9&RM zR6NA00fm7lB$@7P9ry7AvGOmTl7L2C9!tSoXGe@$jqqcx)L&HW=ZPAC+o5%QAmU{E zH7!U`FFM>&Wes-tO*KSIm0n*JzZztJxLxL(Anq6li?{=-A1-;A&LQO?lP`>TUtBMc zz2J}pc!7UUBLp$xJDQhBd7A(6di^z914=+ykBItkqIr$?!>XC4t72-mKw6Xv?-nVV z{y0SUCn91@!O&bd+5xG|z}Sg6c5#|zOF$<X!vScuKVn&>hN8!`JxbuR9Qq)PFRM*9 zlno$%Or=()j}^i!q^iGE1axxx(1NZP5%m}YU(9lT*hrq)hrBzJS$geUC;H>^d`Q&B zPPb_1!Iqhdd^!3T97yD#tRx4`*Mq5ZEhR+{tD8d7XoS#qy~BOQ@;wSe)pNg{^@*<| zFKYhDU{)5=RG@KSvu;3xR*fLkUiM7YCBk>G2>nKnz_wYaLNPaPq93&U<e{l)^t+H% zg9+aVO#9#mG@{>W;2<YUw10Mp<(S-)j@(xbzJP$qUPI&A;wM=Ed|O>0>t*lase=R{ zcCrrpr-D$ESIYY&x8%~g5~>9VA5g<Bpt8J@n8nsX-=}(%7daHlg_lD}YEhXwgr}%m z6^)8ZsgGgtvG82w_46#+IA2sDJ|iI`d$m4F&IsDV#V~x&jJHt%@DF|ysMl;j<jAjT zcj&pt(H~he`Icw@MQSIiu{x*t7j4m=o$v&S9LNTokk_K6n>Mm%C2F81&xXhkM}JBc zVa^uR#7hDdC}ToAOb|ub--zX3i4zdaBS3#9yK1YR8nvA)%uMzo8%u(m;dMNW<?oth z;tX9CoUJL3UM&c688b_x?{muJugk=h^AC`6u2bR*-Jw#KZx2%zztrgOvyv-+l|mAf zYjUmcIh<9&VU?qy+nvnrD8tWWZQrg;AXNFHP@dJ2HQiY)E^^r9i!)9F!=Y5KV>cUG zhx0>nAv07wgD+Tefe;I|arZkeG#E2^gsc9~j0r+1zJ$IX3e-r<YasW@rdtfcWuge( zb5na`Gf_njbW*6v7B}x<{rkcTN316f$#mVv?QGVz%<T(uB`haSqsR=KMbPw+v4W!U zI?a;rIB+v-9G?l+tz8ILN%EtZWyY};!LdVZA#wPOneGyKR$ab3rZlt6M+3mNbM14N z7o>}3EksXzrA_Nn`WICf{gGnAx*R&U`snOHi(}*aKko!yQa<=pu^NixD$#t%Z*Ns- zvUJ5IdHQ?{#w~4sEYKvikZP<xv6X}ZiJ3I0n&g97oJ3XCteQme7I#@MmFCAUFEsj< z)XHXSY>%jy)3!EomMnnTp1WW=1y@vBYK&;dmoyu>iUl+Eaagrmn*X5?OD^_Y{z5>j z5LXXdJLNx=Lrp#%Jtn%&nVmT?da%l9M-+-pbl@cNEU)a<YovgMR{|9T5QI$HYt*KA zMvF-@#?LUoND~_;79NuO40#bC928@eUy%0%eSOQi-za-GqG_E?A05y0{Ie=7t#QJ5 zBBLL3IU)ussdp~}g9#)|>xODm)vSYDrxFq`N?-hRWA>P(h$I^q`V#^tDxXmtGM<2> zOO}<z%ErL!t&<SeyLQ`7<(Pikj)B*4X8H>OIb)>X9W;}O8dUTx2@y!T3yr!-Tdk|Z z^e9E6Y@U7o;Ogcviy1w8Gkvo;m2t$kL>CG|SN$IrKRc#*4eeV)S)^9nU6_|@oO_%< zq0Q=74!Q^GXYL!tsMU`66aF?Q0kaRvdPsT~6ytcQ!>O$8RMzLtcZ}miFEH<kH%)&m ziCqk@3)gcyVhCf<t(7`;X@|B_A-i7*mc5YlImNAof@z1Es*ybHQ(m3V<2jo{<fT;4 z@9f4$X~9nRcp5a|W?I0j>RS?C-Sr<=VL}4$NOY?0XJ<v9@t&?=S$*=FfQ;C{EX=*m zVEE;*h6sswvg$CdhJ+-Fm%pVfJc&XeR?TO813h^>R<(?b-WnHIr5=Z>Q{ZTNw}y!$ z7d_<+!<#bt-AHj!w~mE5>r?f;Nms4Q?#-UKmD%k1;&OWrYDirA!L%qsJSiC_i-VBa z%yI@=8tt`3*Nto^9n%GkU1+B9uubg|7>%|heIB8~E>~Bl<>L2P8dB2@yO)!GVp6*T z4(UMRK6<Jvh4ziMJDr5x`{jPt$)gQsBF82xE{uu|v29!Gnq!#gj_$nryt=GRk?qo@ zZA8#MYqKpF34!P4E|_}RuFDGw9)!sXDmXdC%oZAW$XLK^eV!FwlyU)Z?IKcUI*LD8 z6GdQJlBXo<3C-Bpi+04UW^DHxZN5h^$QMvpZ?kJhALS@qh~;d}JrwjJ$pd03U3{+4 zNv|~3VhfBc$j_M;t$Hzp<j*gBU&0E%mN&TgVQR_txKU}0x?X|8jEnTaM<S3Era|63 zYh=C%L=$N!qGF~TZN$UFBaj@MlFb@^lv;uC$~Go{<~2_f34*La1q%nG2PwbpXm^2O zRnpIVJn2*tl6icUQMh$JHS^~?eMC9E^VLUX&~C6%ZQN(l<q-~{diXxT(V8llzyISm z@wE>fjC9Aj$d9hx!wgA+k~Jdon$`%CgQYrklhfLEb03Nb$;2sRX{jl-;h)HLzC-{y zL{zYLorDX!#>`-SiqF9g$HXii@zf(88qczhiM;S*^X%n~kvNRsLu;fY{bQqVT82ER zC<({*mAn`&n_9JUI3RgR(HMhDmP~r#sVH}0v>ET4w^AtTwof+@Ht96?>;dsov{7ob z@wP8j%vTu1keZc)vCi12{3MYVWm#JgYM=G3mmke{ac?jU;ejNWgNs5FH-T9R&rwY{ zQ5Shv+uG|8no@$BB~pTTq=QH~EQ0F(E`4afbT1)hT;#6RL$gFV=sQP`3g=D07xMs$ z`h~`fBsnAY`2JxyO1Ij5#eGmG2I4k*7nd#}(ct>)w9*@mOscXY@)C3pEa^%@*8G(G z&namH%C@$5Ai=5dEL5K3Ce)0mrII_banw|SMcW}wK@iKh=A4IjUQ$)fQw{hgkQ-qm zH5j>Hc?1TvW~(~%P+0giTj-M3k~88$d`Xva-r&%y^O68jxv9#!Qznza$xPa<ywb6H zVUPlJYfoDeR`w+W<X^zHcWXWo#_#nv&zX0})RUWAzs-+eOULGI4x;&hcUs0&@_+_u zxhbp>jezXEErLb{-$T~%Yff?SlYxRu>>Tqr@$-z)hj(DxU8L`0L?<bMXBE3UWO%LR z%IVl!H~J`lGLUd%plo}m^s0#)iPc=!Glkh$Svb=IZm{*YjT^0)s`>_ur7NR$tTC`D zF{|OlUG)oR(3peP<Tx`5g8B!l(_2K%vZL<Zu^jnkecBpnVVBaaC7o_)XLmR$a8xk` zi>ry5U|v?SUDmlp5~T;+GaWfC2q56_Uu<{@-N9W9KHj}sWwPBS7qd<d7YwmpJ9dz` zEkwusK~bK&As5M=H6(^LdrIHaYrD^r+2mh_S44I#upgQ`TSE{*Tk)tsqRveaK?jUO z@HKdkRxlt#eqws%XaozMizRtx>Ma=;Z~?~3fpIsP<+h>O5frz?<6OkS2R)IwcX%m{ zj+XTe9G!?{87m^h&5<`%suv{$#^q2R{gAPx6#$g(>H~zzW=1Jl`tJ9AR{VepCW$T6 zWF$%SB+{u=BYsTZ)#buHwwjHD?jA?e4RnDwgXPPQ4uqyPRf~P6ekQ8bC63Zw7o8rJ zdRTrU!}E}X{0-^k0bl(-?;z!>zA7c5AM9Ur8xi-Xi1bVaA1D{|f9^LL?zq>x)v)9; ziMD??@>#ke%LnJKA0wa|LLr0TPL4&dc1w~+QBg0eRg3`r26EE{3xCc}9MG^3%f4y_ z+NCII2%^rbwQh&b#~1^e>EAq{PE^E_c@QH}O&IOfZp;JSbuNYubZSiEPM}KZ%_Y%F z{+zT~dyGL321QA<M2j#@p$X!SmHEaQD@AZrw9?n$H{Z{-b?%ibebA-%MhLfQN!g$e zwjSpQvVBG$hmd>F(feQ&i^%YmK*z4muL&5c1p_q<uS@V#o(o%pbpbS@4S)^oOXp&X z(87%1jSKB3SF^Scv1%*UHwhv<#TwR=FhoYn-PIg2rQr!7q!xqLC#$aY#tD=$k=;wM zeu(|p3fR}`9Sd}9<jTIE$-Cd*qB%Y4cPtuiILLrb7`>kY84=dopC}jE6Sxp8*R{;= za5+lWP{#xBU^xWzS|oaT+-$)^px|~f(+Aevk8g;tWV74Z=e`iO>uF-Q+k!BGlf(w# z6^c_eHnh}SnVi|e-LSgdZHSO8=qj$-f137u*lc3EN4Xcr68Fh2&&w$$#rj!t%DgE! zde)5pgVt$zz|AWag}JKlEIv2<I7z^n`+?)d^BkYIUhSf(%lN2G(3o4Ym6bQ`wPRv0 zY11M*bRr-2_lpSVi0&HO)<R}rLc#qHg#~L?VscpsWLZyonftPHq}ceQC)Kx5HPcvp zZt`@mb4*uondIZoR1AJ&!D7Rdemi%<a}zLDsbN1SI9S!zsd=uTab5xG?5B+2vE%4< zm1aq)O{LrVchIz>bZ-fZA<`<p#MRcNll|Q!cs9&hlTGOs)4twT4gl6u^U_qN3U3+6 zqg4y(vcdX-5aLq=l*5=zi1o)v2VxZX_3k(5Dd^S0bVSVL%A*wcy9jp5L<Tpy9Bx~$ z8_47sv^5_GxW7;|CR8}Lo;dQp>Z#79i|c~0ReL;ELxGXkK4EQXPqH2#=dXVN*7w`^ zv&#)#po1&v>^7bIv=#d9q?DC|liq-EQDMN~aP9!70CBu+(D$bB&Dn~3dmr%XgPdH= zpAgd78{2H#4ngl>!?_>Zj%=e#-W3~7U`?sHu9^rOzc<omkVkM4?-T?xkcaoI@BOn1 z<io4`7{cu7>v|B-(o%n6WQNz}@11ori)7E}JFkiCBH?`{;-P%f``$7^Hd<Pus_7-@ zM>Qc9xvLqBw8eQqrWI|c4I!xqDGBsOWi0GL=#Zod7Rs95Ew7v|xoh$$u920>ux7<e zkEY?0K#*4NfK!)Zv%)<AKl1fBuBLv#i$6zhyCIY(Ae^%N=gBk^Q6mpDKVjLzT*j&u zNb{sm8ukiA!~j%^sMMxcr;v1-C{{`h8LF3ti4T{%V#v<{S|@>kR~X*)av_nL*n?Zg zQ_B68RW6GTV5}|a9-kkr0hv9;;gVInqh2Oa-ln;SS8JsQ)5aKHJcL<eqm$A0Ar+Iy z_LA-`%%o5JDW~=%{3X{Cf!lTR4%`X)4m)&R)17&2Uv8Ho6vp2rYQ4NW<W4e`QYUY& zah5!z49j)Mt}OT=<XKCX?;?zmA*$PG-fAP&gpn=8gVqB8KXJCms=7f&<pgQ<<vt=s z0Djym3aPESYS)`-U9o$@a2N<nx>MrBOih`pp4T?qvkd_cj8&x;R6#|jj{$?~yAzC+ z94HV7dvj)E8`lCmUGi<^op(3ZVWs`~@lqDrJvXb1xel2V%v~#BF=Ctk4#ZY$FV0Wv z^}pnpVR$xXFUZw?EwX?4{l=I{@;hbGWuRj9z|H^L`5`lNN@goo0^Tm;zz0Y>l5yOB zHw6F9Zu<wb;ox9q`<IEA>+hD}KPKM4p*G5Yu>h2fUH+i?EUbTQz(7<*)!fz5-Nn@0 z6=)~^gIfW!#2kUpxP#lDoWBl&Wdiz$v^ExX^di>%V<hGRBKMputor|s_xx4&Pb>ah zPua!MRL$It7-)r77MBEG-`oqxNBE0|00e9P+KT;sD?_aRU+}Fz)x}&{VE<;a78E3A z`WM*rr%cL~g_z}Ue1JcNlE9`C|H+qRV`K&j^)~?|*?|exKRuudMDDqO364L!0-$J; zEWpIyC70v??z8=)3MV5AF~{E$e@^@l7R8@)%h<Cs!?H5}{S?3y3o$G6-x>%kW8(p) z{?Iajrw5j?0#koo8Q44j)<J)({*Mm&JG8~h{Qtsi_&+c={@xfh1!GrR;PwChJO2NH z2QT`A|ByBYi0=z`cEQ#E&^iA1&kW#{YghI^-HHD|L)rg%AO5YM{+rl;NA&(b5mx?B zgpPmw2>*@B@%Nr!`TOtkzxM>}e<rj17o6k5U!j&j(}SQ-_h|8jTze^yGO%zvRfj=^ zR3bT%16pPP%o3G&AudkdACaa)mt6NXWwgU{9(bP^x$^~YxEOhoiQ&M&bK%o`)ygR@ z>BDh5J<DtSz5ddLXtD_ky%`%D`TJMrPV-q__lkk-#m<|9=bFAp=9c-tHW@vBzk5Gs zJ|4e(oPE4J-*fFCtT?`W+4*q&Y}poYyK%kNa~b$_B)BB>{&42l`(<afbw@#oPp|jw z^XB{8>tQd9=VQjE@w?tm*_WN9qwLz9dMf!^V0^<$$EUZojRK_oOYp$#&rWRz8$0g{ z4<UO8GrixG{QQ3mf4R$inyGZY+Ntbid2Rpr)%Zih(xLjrwoUiL@F9tIld$%us}GRx zclz^PCPw#cpbAK{dROQ7CicI7dU%(Kp!5CwWKY!nrnZ5o#35k(f%x*J<)d+H)yee3 zm80u+x|q*y@^$BAc#mX3;HTKAE23xPb!NsEgTYTWt(&m@3f}j90)HN#na37Kq1W6C z$N}re&v+Erqrdq-sz@V!;Uk^~5|D@zRbn!Duq)94ig4{LH8p~t1#j@-<G&Ubg^7cy zWNu&WZ6s=(6C)bBtT`Nm&_)<x2QY1T?M5iq?_p!SQlp<6>bI5d7D740N}%fB-u7Ky zzs+u(1X>F#3bo?Fe+k4<<Ra?bcoZ1<a;;Z0A3q@Wu<eg4q=J#i2Nyvj%CC*!=l<ho z1p(5JJ#6?lDM8%O!UQG-#n;m?K^XDOSfWCt2(R^dM{d8F6Y!?}nRjTN0F9ew3W%Ts zIq({x1+`}D`z(Xe8!l#vuQ0vnKgPlA0&e(_K72Md2K{)s_G9f2%d@}=oa=U#BCRjC zEUyI?FhcPMPsUDS_7)r8cTazwkP+$oYn7Def~A88#YTR1z`tJKap87Ai}+&2*{G*m z4~OmIrg7nR#P|)M&);*m-|TmSyfN-}$M=ky5pJj#8)5h?K@S<UN3=iP(h7qLfA^{6 z3{0DvUx4fPa9&|nC<$R3Po4?=(yP&wEkt4@3%2NVp9bx72aQS99is7XXDLUaoQ$_8 zt-NcFJgA^{jF}&9dK%a3^Fb%2YTFIWtFzT3)4DSe=alpoCRPectPZOBaR_9p=o4yb z8yiTF9f&j0II-YN2}6Dap}@Pe85+ZRQT3U`<JCAfOYmk69X>v@GicI<jzKNIB|5FW zdBD>k>9!sk=&|rUQtdOijFhG$8e$m6m>B<$x|>Gu2K|8$=KL2<dr|_)leO0XY1j^& zFYTQ`?@k32U`iOOEr(->DJ}2aNx~6EP7SDg_t5S6Py_v13Ry{PQClSE5DMeG-z728 za=rC&Lq&MNA;$K^5;8R)J4AFpBjXxq&syq6uNUspP)uQt>*9UpbYp?kSjXe!FWh?O zynf33(RS4xU@$MB{beD$P3RDQp;gK@GHq@MqqNW#A}xj{xJhl=X&iPnhKbv(Gx;N@ zHQH&|eQ)CYnB_+_h4$na4GbTg|6`lBT>+r!wQa;E<QsW@hO{ul8pgRqXPeXgWp(et zuZ++_SZ}ZJBi4N+*9+}ryq9#6U;axl=eq{4IQ_v*dV^93CFY$8FyuVf+H0c|+1S<z z5odd8Y)$m0aW9}!FuYX&=##dq=Ay5jb@uC&<0L{S(u7ge!6iuOL!hq?)*ZeXvY#5? zjHIP#&7kSy$$kCD*#<;tB8{?kXeEMwn26FU+!#in3WKhRa0FLKqj};WMXANb(*%>t zyucF+Uqe&)t<e(q#aKp!Q!5TI;Z10S)*y4I)01iytxj`iQ83wy<9d5WgU_d?OsTWJ z*=l7IwWh;ZW^Gf+u@S>#%O*V#`%ssG&!^;xh{o1&&%&oduBHh`fUVEvXI8`8QzH*| zkP()K7%8JOz+0YC*6h8x2eGI{_csV;3uQTdX~<7_&tpr&hh*_HjfvIz5cO<H6xMhh z#5&>Cow4@UjIc2o$1&Q%j1FF*oq8%M18)!_Km=yS2zz`Z;VR&P4^olRBuszxqX$=x zt253WmtSZQ_)d+C+^29<{ow(LU}n5AXWoDFEi{A2f(88M$Z*(Q_b>^-`sw8JEpUy1 zzibtZY7nw?@E%f+tQ2z%(k*9wj1zmLfOkx69r<F{(rRu&tRFuw7B{7^Qs17>Fs5r? zlC?GR2Yn>fIlSmHwrElR+|XnlF$IGJAA)x2@qi;!GFw~J?d&<ruW1+C&+pBi7uF&z zFzt1&dQnE}2m{@w);Lu#3@8VD5}_O7aWMmQiNg?nr-H>{A|Cmq<yPX=BAn<Hw~>DH z4O96fd*R1gIPaatwnK3`Q5q|+f~{~;f$tP%ZYW%(0?^AYq)?d;l!Lsx^%~{`N=QkE z?^$fclIhEnMMIR1nRQq>PLR!D+#o3sP*B9y>|IsO^SJUEVlrS+iw&W`*d%ImV7aJB zddV7H9eT+h4!t=>+WrzAVLIQgFql||jk137puyAXWPyGqEm*jl0_WB-v|N3$a|;7f zEyz{Pntr9s5rclcYXRUWc2A3f<nuP4k*8or4I@^@iOQ*9(lF>(=S|<-$#cBBP4p+R zxf$n58;kkMfN;9)BPyDpkbN=MB22X8=1nG&Mx9tk#_-)Z>Av3xI~N~J?ViPze+#C5 z8fVHKMGx25nBu=8%~-M~9*&cI=)^t&F0EM?kzPHJmGd{RVKFuu+Y^L|h+%rEw@1)6 zA0iE23<a^|HgjT+rvSAB<(%hU4-vvglP*7@Z^6>WV_Ei8M#3ELH%~%Gh)CC>K0a;w zF-jFZ&jIy~*D7x)Axpp9Rz~9y64R{xzAV%L61*XB24e!={}_GNApJ;cYK|?xZ!VE` z8T?3n5(bcE;MV^N%{$b{N)d?yTZdaEUJWhn)A!(jtC+EAKNO2dS2WZ@uV^tTLOdU& zlIeQT@Rq*E&Ji@)FgXT>@Uv;~#>s>?d^>D_qu7`jAP_>kV678|bhHibM(#f)tFrtv zqD=tbH6n^Yr80;DMf9p%>(UWkX3N_WBo#L3+mv+G*9+$Px?j7WHRuI3jOMNp=|C;* z^EX{$3@y@Og}?LcTMf9iMzJLs8A0hU1{wTZncB}d5e5fOxhlntqBew?#z4p&2x80f z_AG_K<*a2Wq&SY$vV(1S-j%SG;tWyf?<^T&$Y+qTcCkUU&N$Vnq();v03GXR6BR{+ zsozRtn|%NQMqp>8cx9B8d!HvI$DRn6x)BNbdf<_?+k+_=AQv3sLEuf_7fI7Z4$iwP z88~{?M3?*p8u?qQleyoJ{ZDud<!G%-;X;;$1*b%kq$Y|zaR4_1jd{rdY$!+&jgDX& zK0ys4v>8qg99;qvisH{_(9ewoAW3}Ox<@}n`^qtj5yGm)Q^dlTwQMM4<f*8^ML3(y zW#z&zE7LL9aiTswl{ts1@u9dV9q~W89d%bFGAXa?idDkQZx?k(ul9#=;df>z=WAEb zqPc}AmW;@q&)F~_4Sa=@&BCbVF2GZ){}l~sz4o<3q6Ox=j+p|ifWX&+9RuQ4<kD2I zlrAl;q$PRidUkA50m)Ak<V@AzATF?w6YVw;w@76iA{PQB10so^q?s9=a$@%KWeJ7! zPdQV*gd3S+0$uo{$;km3pP7fkr(x<=_%q$nG}yzk+00bkY0|=}$xR$!0T@1LBZVsI zYkE#_p&jlv#=7w<i(ecltkJ*#)F?p461<FH@=9BEQh=LgU79LLDXd~6jr7+A7v<*% z7eB~JBsv-Tz1_X{$%KRwiU}O<4{{Z1x(GY&U17qW+d5Io7SaSGWboeGxy@X_$zTK_ z)BmFEoq|M(f;HW?ZQHhO+qP}nwr$(CZS1zYciY^zCk7Gco;edU4^<KCu`;UG%FN2| z|JS4}P$i*dv58E%erT9sd{RXQ<Gs_@lF_m<nExtR9bcDLL2iX8agu1DkI1d<W-xCQ zWLIe7UUoi+24+(<xgealIx~3mHFFG|q4O%t<#Q5+Y)}TWZ-nM#f;rHQ;{`$y4%ro@ zrv1JH4q#K>2efmss2CoYo|+gr!#g)Lg%wdX#vGNcH_VX<j@cPnb-F%aKM3TC7E46( zhgpbmaJ`Yr;D3EZPu3eu!<oL|A@T<iNOw4&KYcjnxDvnI8wDYAr-W+RwL9ZU+I}l@ zp*&3_Ge5z!8Y9jvY~T+guQEZO3F-`|uzO`2s}b7XEQ*Imu4nwrqRSL?dJu<zGIQE& z>saiNcA<U)(G$Gv7(n6O8jq>~Xwm3tp+v3N+?11{2czdaN?SvB)4qE~-h&;|kan2C zY80<#V+q#;xxe8C032GuSU$Cde;T7Oy^*nzPh5+5b?;p(l|phJui`j_#RYU7$=Vdh zhOp8>>*jN}0K?<PRKU_gFf1eY*{y2SXste0C`e0g;!X#URe$D`sBEBK%)HVgNR$y_ zizOQA!q%f?y{02<`k9FnN)$yOkjVf-WR{#=T*fe=lt_|%`Bkmbt}bBG_q1X~HIWqi zXA|;G>9dmxI9^1P=FyN9$)KRuxuKm1s6#${r$`~58|9AE%o`t|%qc)2HyuG>=_l~v zDtc~E6hO(_Xy==;+W4LZ5jLU86cQERH|$!r5ER8VrNwS~7fY5!qi1P^)1bS!Gkl1! zo?QrT=+Fq;RiMI0s>84qY~+ZrrY-D~kg4&nKn^^IFuAfx-t)!)uBwZ&f}60)#8!z> zK@zrE7E%F;i^O1Y*mptL{$?G9gqE!`1-+3R`w=o}ml$gf7IgP2%<XrehM_wopT(SD znnO?xwHlOp8Z?X)i_M*^u=8iQB4;0Az`$B(Gil)}M}LN>#?galO?zhY8iS|jJYZ8+ z9cHMSvoy7kauhC7IEy74R^3l-(j+0Kw>5~mAj7=Wi;g3(pfEB<tcv{OLq6#IwMjPN zrPTTyuI4efu+Sw=hGm6}ggh=53J|)F);%!<5x#`>ii`JI*JRgiTX;Tak+lmq!cj|e zV`8H-=_0)Qv8N}gH`e4*rAO*xdBJ%#+tOwqUW6h#5M0-%JoFOZKZkX;vt)4*T9joF zIEr}0>{xOWR((0B`OC2xvwZMjbWlSL!rzuqbiu3^h9X36%EYeaw*XC4#T0nKvg2O} zqQr3*kkL4_+gR-ZolQ?%u<(>hFz95QW|-aZICa;sl(o|u(u_s73Q5lb8Y}|0DvA*W zN9?ziM*3nHy(bV>0A;HNQbh!zWX>F7TOmj@i2v6qmTJB<rOEWd$})SoqoOtDp{v6e zn2loc8g+dt@;4=?sxmWylnKRS#5m5fL57lz-*U;X1YcC7ROU$uRS-bgC@#T3!84tP zJp`zOk)wyQk6OY`yLXw&35&b9N7Thxf0<ma#AQ3w+Cw2)lO+UN;Cu4|%PoT1UY*Gr z7W5)`1j%}Y(Vb2^0FxOSOL~zLf@*<RHIkF41)}u!6c#K88dVdHag^d|4#SStY-(<~ z2sJ3}>Rw?>)CAXYEz%+vt@=WMi2<gRQ8(C=*L?`%Y){w)$q+J7kCymB+{!|lFtp{r zf*OpbV^MkvjlnEdbRZvzHt`x@OHrt*VDf2#wT*#bFfgEl;F=Uus@c1s*PtA+8j6u2 zZ!MJZ!OoyVa+v!@fI*B~BXER@e2aqgSSE5FsD_+$Y??8EeF5sKbr+gnl0?+u8ied( z<?oS=;a1gz1Slp%D;6ebrGWZOhYpZ~l~~0=?`Ixm;pO@?o@>YwWx=C4qDmxHQb~3n z^R+PqKd5~QB-t<<_C$xZP5P)*R4v%8rS5<PyOX!Mo1*Zj04QB+qw$CV6IUK$<~>Y1 zT0&>pC`8btfIPLsbdnZDVz!(E*tzxyOR_3bz6MlPR#-)k0~szZ7<x(KAlKn*IK>qx zr;djxbWyh0jj2*R^Z66XU4iS`O0byK9O_2vf*F}?Ffyk^fL80>voSEzP?AM~MnNkN zi7hKZZZ$f~TDkiY4CO64L5rZsYR`B#AT5$!(aJ*vLBb9$;77A=QDR;kOGTZv2)f=m zlIm~8hc%aj0$Vv}Az=&^ruIO^&B{<>kMS4FK&2yNEy?6_zb*WlO33I*nE~R89~>^Q z)q-v%E6?3u4>Jg!M0nKD(g`(dfMH0FU<8O_r80?2*a0lLlz~u55y=z>YY@?RQV7wq zPE|czCCAD=VzJ5BAFSL-v(f@oYRE(FrSv4~o%vkBxpbp;->wONV{rt0Y4N*s>FY9- zuq2*3IE7~e#!16upK{6|@#da-B%&(<LqoR!ce1fzhheFhrGfM&@rpD^aI2Q$n~WUv zd6BR!qZ+ccOC<|y1NAqK@yZMY)w3rRh*_2z`OIG`tl)D7(72={bu81M)si`6>=OzZ zC3fpk_6Uny8ZK0W;4)GVjV(H@&<(}SL04K0N}WvXoxPbFvI}-j#ZLm(vE=v%=O{2( z7;@0yg=q5B!xl%y@qQFcVbUzZ6ptovO3A`vaiu3c5emk3P>w;<S5(J!VU>LAG6jot z<VAv+GDlRQb5kCapMU5ueD<lW6zUgtGEd$JQ5SQR#HG?50f8EbNJ|AnV~MUP3@Jpx zH7_-5hAjWW07E7cld~W~At!Bx&-p3SYGEoPdk^EvreiS<#C)PSmxnP`Sgt351+V*X zR&0?9Y5@3-ryg*C2@}JXHDpzIF38;;>`xV^5`>d9&$`hPn3l4Oh)auDElcF#7+H=2 zgwUNW8MBJP+UhzQ>7@^{Bgr{=Bi5RZ0)h<WYB-9cy?-;<7v%&M3LYB4k{wUP%7py3 zF2L&4p}?>JU`&@tbM9;LW@4*i7sEa?nBB0lW-KyrWJgU(vP`?d{$N+l5hb@{%%ay# zpfI(k-^Hn=BI?<)Cau_6sECfM+#pC-SirumR+pqOVuVTc6T>xjfpj4CQsI>vp!YNm zJusShVG|>k-;uA|Im1`D4gXG}&YRQCmFmH!wG&eUFugRhsS&cdm~nO+kcUA@aw}At zU?NPcPCz%=AjP8ecSsqwG3*!BlbLzyS{NFKcip=l&QS{29A+`kRw#Wl(%`cjN8Zbs zQ7wc;)3fcLV8c0P5g4}=vXL#7Ds6kwmT8f+*?L+Vv*s9X9uO-)erTPsb!=o)Bo$3b z!YI=n<#^?0s4$vC3>FY=#I;Buz?BXre8dM*DNxnLWamJKSI~76F&Zp_R!qRxCM!pk zm@Y%Ls?X@SjJmbh6{ZG0rclL^tOL<!tp)a4%O8pFA62j@nFfOmwG6iWeh}f6iYKV) zd1Hz1=;zkFz93v_6iSEY+YSMRT7MCNiL+i}q}U)ElOY4x0woX%Tb0tWT^Lr`TD`86 z_e{lLR;C!FXn;$tl;nyMA28>50IB#iO^TqnV=yXMuRJc`rXE$fj0&2KI`!_+gdOqj zW9CRzW;&d{zyq*iGK>o5s2ls^Wew3)49#%}aGNC&fo3KN+3H|Mp5U<#mwHB5XOek5 zxo}Gj1Q^lTRIyc&FZaHW_<did_eQRqA3j}tr|kXl{D$Z|!_R&P%>TWk9w>f4>wj&} zL-PV4JvbN^8UV}%m|$?tCBzVAl`k?~;n=51!Jr~N?3T3h$k5uj_W?$TRF%6N#^?X6 z-s-hZv;GTBS+7m<^SOzqDIG#%<yP3krb^F#oY@D))IGtCxF>Z_G$**|H{2Bw4;oyT zB0K^W#)}2i-!Wj=@}Lnem@q9{p9FN{xGw~hSP-xBAuL;W<W|Uuuj~T8Rz{qyTygVc z)StIrS7*bx@lDKjGRmYpM@qKHxN8ff9bUzRj{9C|)rr1$TVc-|#m>)?GFNeR6jO!0 zf-D~I?qSUJ7HyBL{-K99{b9{7-=sU8-!DmOH0o|c9wexSESm_nBwIn$nmXCgkrr$- zNy9aI4eWcb#)Q#Fd8cHX=4{WI>O}IXBfSp->rSm0VsorB1rC-DVa}dVx2~(;+Ak%Y zDwA`Nh$0d0TcQp=k)=*JreRr6qJMDt6P$yZ5oe%an9b5KGrXS^qXxHl;aGPv=2WKq z;A^Erdb$pw+@#K7?h#%Cb5x_6lWUu%g~<d%%Bd$cw8(@3(ao2rw6Yp-kPNOAI*Sa( z7MFS1gB4S)&W=%1s64ou?%vkN^d@zl&16LJ<*bD{q(LO!nwEM<cIlxyWU*oLHZcrE z;*qr<UHqp7{iO;lSi#Y4g=S?=>~PwXYjui)DnlUL&fd`xI8)DpsNze5jngqN3gc8O z$>6RkE}N|4FWfA3&o9;-oZ-HSc}l_OO8d|2a}a>|S6f>#veEIrBV_PA5}1-aB^Q`w z(QUOwwnoW5h|%g82gfz+v>F>`{l*uNgzFF~feelI9GBJ{=TpiVG7j<#ulXnWr2&p7 z5U`-^DKc_ip=4=kSYlSh-n!%-rFgF=iFz0Z2AeqNyrRd5Ft{GM9+yM)@%(~9n5h0S zDI9?uv;qWj3x5y4^1;z7H?8%OdF4&H^Ac5@U==W$v0Z0`ZUb7JFY@1T7wljIn1^7Z zEUR9FhVqe9e&>{Rskk2=6b3+F(V)UPF=C~Occbmlx)yuk+D5$3gry9DuALbsdx#+% z_I)T^M-kNZj-1LK|DUk3y?@W&{Qv6zeP^p*d41ga8QS}7>CeWZf6w<~(@$aN%c*n! z{K59?dm9eT(MEAbWy(R((K+RQ;vBZ<?1Y3US@(EQG8b=UnCvW#0G<{|SoL5cOe}=O zfbmj~hI$|Lm^u^nuM6{B5b}+AL}!v>am|XynH5%_dzzd4Gk~A7`^k>~Gfr!dEQi6Z zOoO=0#`TwYcF1lW$T<y{j={ocdg6zpc&h1s+(|EQI`fC|E--$jG6iriBXEW%Q;fZ? z5bY{6QClhjVo%zX#e|lur+VbJL+EEQ|7%l8hFqNzn2$ldoUj+&OeNGH!I_8wiOIyq zALC3m#7by+&cjoQyrtQ3ONTbpa{d+9l0D_NjlGMQlvf$0d<tw`Pw>h$^DtUe+W!;( zd-80XyuIOEm1;0d){}s9xcR(>&I@zq5mRk3DsyO5Y&DD|VG^(BP(MQ9Bl)XDk)cj& z%a=tawb)~tVxGXl!y+bg8~qq<p!V(oJNz%uSG9M)M%4WNJf^A|WB2;Ige=YkB+)V_ zeC9!z(yc$oHt6_n$aMUGaya2(4G!L9+^m>M@jcG5v{PN6Rfq;5_>5|h30;~RYf)3N z-dpnLuzUX3LGZtxZ(E{qj6lrvH`Tx4Xl{coX-hV0FTAK%{KPVXp{cwPO(*C#p@kt+ zUG7^8IE`KwPI9$Fpc^?*a)TRc4<WqsZW=<_Y4z(j^7_v^p<c)<^T=Q_=OY&%Byv5Z zL)&4W;p`37MA~6XyEa%Fr=o+<(9m*d3mtTFNB3NCMVBpiP|y_Vj&0+xp-nU@&s^;& z2FM*wbr7o;z2DaIvrc((T0*-WQtWpwC5q219|Vy*f|JV@f`I$O2dm_1!E#l+5YJWs zk7%JZixt5(B?9x-X>S1SLdhv@lRl;Ng?ddLC&zG*?M~Z1{_m}y)uV4Pne>3iHm!#{ z=`&nW+SZH~N#x$JSH^1O=Li^QT64!~VyMoPY(oa*6|f=z&6K8i`n8(c&?VA0vS$si zxdC=QW)<PVJ{f&~PS*K1y5EV4mZcN5b;kJr+{;AfT8yB$=nm^>F)k!{)J_}^;)m>t zsHi}Gl!`RB#zc21vBw3&<Z+92^{i=Yfl-S7ddo|Kgu!7AenGG7@%V&D*Z23kA0Z3# zoTpi;oSZM9zMhwPi^GHk$rV`|xN&v>cm@*8{)3|WyUCqZWkV2BAxxvMWrzYts&hfi z{N_}PG47cD+xX~Jfc83YJz^2YrZ4mGV5}W}?heLX4?(@qpslEQ$XbyLaJ4z4U6rK( zyUhqg{DceGH)3_10Z*euD-J>jWR5Z5t3NcDamT#yp#vwU%<vf&&}ucQ5P__jluX%r zT!QDPu=kNF&8=>>oVYNmdyvZ3aC|E<3Lgk;X^?e5C#zTiSdHNbJ_uf<9z{x<V#FaI z_d@w%gZLHJ$}joOv9H7sgEZw5qM-7k%%ysF5+8d~$SL2${p+2tVd&Z?FeeZmDG?H4 ziyVux%eUN>riUvHvQ0{$n~1;-7W{;~>^nhii;~4QxnoTWX_^6)5vF=LQyUTsDZ!UX zRH^vM$JDYke&HVea4OA*Pm47{!_WZJ&KXylJUJ8_tY>JWlKB`cNKym@EWZt6sS;V! zsyHJmN*X<jD_E|cZBZ4RGZQYz@s~AUX|r4*0fbH^XSjF|H%!g7yRZ(?k>-eq7|g;U zYFFQl#ju8qYx+8<bTF=>ERYYkw|E@+V5~e{{&sO<#ict*1U!`IMZ>x#!{xWJK3FYA z$q!gm^E-Hx`+gOHiAbEGi)R%suWGz~e$}+Xb`U6}M)*Of*v)6|fmI%)M((xD!1WWk z#*oYp(i~^V`!`)`mpXAxKuTWx0)j<@m3nrW3>vf|B4O(@!A$LJ3xWdafe-2L<}}bZ zIk%ku79Tg3<V3`JsU6u7q)b_XWwt@K^cp@)`*a5g?!R+#vcIY6`ri6o{HjwrFltkZ zJ~c{L1!Iyg_GS77+osZ(9RyHSa#d#nAIjHvhBdflT1_^sg}~7%X{u@EfyZcXD}o}) zIob`LI}~8hOvyQR_!9a@;CuZqA=Xreq>fmWbCF2!UW0PQTn^wXVrpD^Yl5w8^nHgt zf4G?K6;G4DnK*9X{s=dz(iEaiU{)dHXj2-nz>lgpoj@I@73o^z%FhOMIAVrv05p#@ z7x<rA&EW*t&6*{va?A*2>MNgkVi|=o$!Qm?)BwbLFv4uzPx@Bbr=FFLe`%$U?F7bL z?*^>@iBdMXrDKT<d)7l#x0l!wmTUjzGG*;c2C!galZq**QkfXr&&;}ie%t}BD7y3F zWggB{z*+mVnJz0PpXji`gEKkGM^7s0yD&i!-06FN0)20hak4Tq19}Fg7QVlx@pGz9 ztL5fK!6vs6VAk_cKwPHo%u<!$wgfIJa(Bz8ZhBw&DC_zA-^Sv@RgHS|_%i+Om*=O+ z`M#$0`DFiH-lE+=s>}bw!M}NQSN?~pZ`Yw#{)Bm|PiI{}s{gcGx_-0GD4$Vo*k&CJ zxG*Wnt}RN?t^aimulLAl&X0&HWaD~hvk<<<p^t=#h4pmSf;AHU(tEj_(MQd<?jhU1 zbiO7HJ`nlEBipEd&)T^Ud}BzVn#0gUbdwv3QK)@)WHF(_UHnHJS7jvZ^oid6b&;(- zk`Uv@)RcbX)$sH7$vks{)&Tr?&af0HYn%n;o4))tZr6)ph(2oPgR?G@AwVpbRsN3? z8(2JFR(O3xJT<%fbq$|Iz-DV;e$V0xBj+<O`avDKLxk`BnZI_gTm9T54#x4h6(eR< zzi<X=*Vt?>iAQT6kVv*Z=sf=N-`CGe?+j^=$I#CAqMmoB+tcZX)94F%?mtXAvtN4$ zaHbu>r0pU9=GeF1x&BQjnKvA$eh{@o)Ef=ar|}FwZS>yHJ)InCtL_P4;(XOOM4#N~ z;T|VD{#{gSkCgve_V0cgh7Ylgt4COy-efHgI$Xl$PwSN!j9ULDu`Y%FJ34@<PB{gg zVVxi)8e3eU<X;$n%8HbmT-Ui|h@IFn#t5FR?C)^hNVs;QCL<6L>^N;e<rkPC9Zw9_ zh5<A0_nv*k&aZUuX*sG4sr3L+?$6<y(|a{<HQEAhXwG>CLCsM@<WVyrL9JtYNA%;5 z*D4vBSK%Tp$WcgMw-AWM;W$Qr`3yhx%Ex%l)06o4_x!`)o#aog8nXPqATVJ&wk|t3 z&R=1CUls=jF6TAe{%3_dSe^xRYty)x^v8)117zQSOwO$)h~S*@IDfCGN|-OE>ZQsT z%x7(vu(n|)lpG&T2a^pY0&AGuvB!w*(9J#4f(FeSm}Ds@UxSJrhyDua_dG--xFwJZ z6=F{sPM=3+aD|N&a*L-Da9mEwhz(j475SAX3bDGX7FWSoNb=>;8`Ccx=YGFM0Ge$y z8OiZwbN~L{w|XY~ZbGrh0-wi$@tt)&TIM@RW;NDxDKLl58YHHa$joj$l&&dh0(n)4 zp;J_;hNQzPhdTv`#elde3!izU---=Mr^U8jQe^hQ5N-wlZ-Nhtw&&Z|F(oIH^$5&6 zpgnm$TmF+q*z!h#HQOZb#zu(b#5B19@Z3Z;OJS5j7wMi~n0Aj9sYn;E=Uj(Svv{|P z+xxCa(_!+B0gjV=53W(YRN1kuhY7aAeh==@(u+Yy{7ifl5#Ej`qdak~S<WFRJhuAg zY;NHtyB6iM7{yR`<@2?oBnP*IVM(}jOy=fk+EkTuF4Z)CgzpP7-G`$Hz7JN(x>PVc zsutF0>eD}Up<n^S;#4dTPJ|`}ZvPL3D=9Ul$mG3U?;tRuHm56VJ!%l>DMlxhlrUvL zz{9}?BTWo_y`we5`Z<w-^+d86(vXfxv60y%qzlzKdg{8p;d7}8BRJ4sqv|8ku1;#a zu>Wk7-47?MyEeX_2Lu(V!N{ar6Vc9k#q<E%!B1o|O1^Z$tJb*mG+*y%L5k9@pPPrl zBuOzx&zw^N?r2f+y@9YdG8D(?bIFN-r2kt2S0^J=hz>HJ*!rsV^|g*apKpJ+dtM@d zl2(Z75~WZGUAvqM?9Q%(KV6_GX#qhS*{;HI8sHE_!7wqNmsdESLz5u{epuv~!fiMl zJB>v6xBcBS-{u?^={5l?|Mi|i3rQ}fYCux)SJz*SGKN^iS}hB!O9;0&FNDFRMX{`u zVgQ|-UqXo#)px}RBmd4Hp*ZJrr!YPQQ0aU^#Af%?VtL>M+Q=(QatG=Xj;md$t(OU= z-urR%H~%syE6^x_d}|zhG({8ZeRR}WeyR6gqDODN3rP<lbyZCdBV3LA6$SK=>YKBo zMMeLUlP=^+1dWi+s3f<3#E!3F_lLZ!&YmW}Oe-SLE0nF#7tm?CMHXi4=avWTJ#ysC zHOny=H(XJOn|x;Y!@*0R`rR@1B=0~kvp6<4I!8lkckqmLl&*Lb%#4-ogcVzOT|F^u zA+3n7mn3#h=Cdidgou5>t~$2L+?WBz{C<@@)o%TinFxh^l=yK+?iRQ2kpnie@296< zl<J5?ul5iO1lfTn9UzI;VMkS&QpLZRpv5t1R1nge0VuevY228E1W$AWt85j(1CiSH zn3Ry))?myiAg`+yY`8^M=Jwz)0$nu=T(?$I(PJy0B4`<RDszn4C+hoZ{=%=Y6&iQT zDB=qcfZBD-Y38GfhdVd-879-kDODH)2LkUP%}{oKR-6ldCj#+|O;%A}#A3RfaC)=b z>=V)?W}fq88TQ6!3~hL_l+R=Z{+y~EeGS}jwIau>=~3gzFGZG9LZ%8$+sTdXu^&}S zNZ_J#@8^w;E@?*)Zl!FtY+&b7CJi~2L(5cpv3!=Pl$6v+7s|4POd`crJY5*ET9fUK zZ<c&^HvKz)*`!N?k(A$)t4f1Os<STXPmv&0roW<O9uKvvsw)Yj*}?Fia_zWXAB8+u z6DB8da-b2sla2>>P)sQf(u71obL4S*-2^$)<2TtOOEicujO3SId8Y6Ub%(quH>9>Z z1wGzxo{o?E`*^;?BSgx;!D=UoTsk73$W)TRxm7C#X*n-#C~+(V7_L>L54;Z^!@SJh zy1)d?q%jD7H)`7~+UzXe2I|cio8j~bTl?Sr`S2E#$m^v^f44NS0YfO9Hv2t;Eo#YO zg1nxOP(aE&T>U+wNf`ui5*lC5NsVoZPQI|XVZ!qGiC6VAvJw%Vvn>=B^Jq3*z$JYh z)slVUqV%{?w-Di=OhWqo_?mWjF=bF&#;Ppg%bW+`*;1LR45j~ln*4PtKt9fV1OX}C z@6FI3ciY6V5CE7$vR2-#FeY>e0g2v27S9>rz!BDoifW-k^z-wrHk@o<f;tFyku}gB zlg62X!xx>bHoY<zR&ixV6T}>1e*+-|-JGph1Xv{;*~`dFl>yt;1l-;qhj8jm@>AsS zO`Jb5X+DRZ-CSF-+rR*y@km^Z$D+Xc!b%oS4A-n8f9yV+fAEK?yc4Zm0i3WQbi(*z zf16@r7cgM84SB-Pw4o=YG1%o~SxP|)z@<nAiOFH%YHbPxr|<V<j;rHgsr<mBzHd$k zU(dho{j8zq5;L+@b5(`b4uG0f9g(OJOTFXB4Oo}RV=FdCXvw?=vg$5`fECx0tAiL{ zB|-87uRzgO-8$&9TA$vB!NJpquXYCpqL%etqkzLez0_NLafr(C6Fi6S?Y%eRiDy%Z zCn(JLV-QDF+l#xpp5Vk6h0BHAA=bz(APUu}?IL)ThOw(0H{WTso|7N&UHQgFqfZy` zZsrLyV$N*C^YH=S7SD{*vx~X=GK)TL|AiqQEUAZ(B_{x^g=h}gwrnpMu8t?m#OqhU zohR#BN&q_*4xvUETQ_$(Bb>&Bs<Uje)k+mOi6fpcP!TV+Jbp|-HGBo3u&`09j$G4X zvhg;Ho(?8dY*dql{ZA3nTzc>u0jI?AyVJ+(_Ao;RukU*z@0s;^y04Bh9021%#+mIO z%j5h3I`<zA9oeqF9K3F*SDd`SowE}c{Eu#2zdoAN{KxBGkoC?+m!x*_$JkMNEdaaW zk703{(&dIGAPc67lFO5W!uR?$?JF5fewAjD2S*MkRTY1BdI<PI)72=8i8`Yu3oLeM z8QhF2rw%>maKk<L`K3IQjHyFpk&A(A2tR>X$FkrVW&WH>;h|>r#YIigTd)(cDI)QH z!)9BAShc@$2VU>9fLzuxu4PAMsDQ_@!cdOFkSbU4Dz$78m?v2!7lK&>X1^xAg|vW= z6xo^D_OwYpqEoPrw3dk6(o5BL_$J@=ECVkk$d`aau0aKrb)Zm!No*d}bM6K!lQ0O@ z<r76)xtO(WFhBojE|!@nke*x6!UWYa-(o7JU&1TXCe04V$B0uXyyT|Do%6J1rH5Qn z$%%AITBwMWaue^cf)Px3nYD#q{2Q0`Th9G9XE>NLC5yM+#U=&L=%Y6^gi(&%9~e<G z{X1h21mqn)rzYzt5)mP3$vZJUd|E)RSq|ZB;>&q{@nq8c#eR>Q{*oE{s+LP8a<wHB zR=MI`PfjHf)C(0MFUZpnfzue@I!C1?EGXV<6LUc%wXM1dm8mcWY*{Y}aL~ThsWe+~ zVM!qD91{q<)!8fI!-Al)$lJdb(0{FSG|muZ*2sP&yim&pJPaRBS+;Rf_#!EtiUL`m zl<}HvqF5l#*M$m6^zz*nQ$cmS1}+DO{1$%&(;x(!HofeLRFK0MySOzE;5zPm^d;Wr zto!`)u5?;3sbj06l90Q5h+I`{3=Lx79x$%{qA(YD>}<pDj)sOo3dka2M`La&G2AYR zsDjF)sA~ooA>$aeM+y9VOHn3rA>&w^Uurc7UmO*xo$?G(fN#+xIB|mSfM@ZZl1~pX z5bIwwd8H*<3300FtSB{Z&*bd3{B2i$wAh_x4X7@rg_HoDMpFxjvw;nk#w(s`$JUBW z`2%RS9aU9syq;v$LGT{=b^WIK@ofZ9-I(QIL^_{Vlw_`*&#f$A$G9azRo~g>#dj~5 zO(2%xlrDdB4Xa@RZUGQl51RzbmY`7=bP?rXdO^|z+d@J7&`xyzw``-M**9m9jF6$& z;So<wJ?Yv;Wv>{Y3`AIuu^gHT^d~`@CbwYF8=%r^)*ENj8rEaurF9tD^20=;A5tK= zDVJ3_v9ANkph>mBex%p)NF(t&eX#rQ&6;yq9GiHJ2ByvQt~RFDowz-34ah%{T!g6% z$XMm?y^spdY5V$A&BneZktWuyA8)peEMcPWn}jar%HJ;j;aAwfqS42Ayc0R@Skz;e z3a-7Lh@Gudkd^>@<KY-ngY^iXu8k!k<i^J#Hon>i0(ic=U1G6e6Efr#a>guSBwfr6 zMe1AY9fH_;!SZMLdS1!yV2eUN<*Y^WToy@1Iyifho`)#sn8tL{QmPpOY;V=75_46V zRgf8tb3QsEs@NO5i+0aDQG%gPY9;}^`8TJgsq5iNYN1k(r>|LEUA=2!LBm$WqpC28 zMoZR6=#Gg;43a*O{&<ioQ3MxS7WskHMWY)uK|*!GR>LM7&C4PU>{~Yd)N7t|3X?2a z1&X~J>c?YeVoGZtXa%uZ4_gL|c==+9<G<YTlZ;_>Pdw3rVizbNhEmu-WkJk2>Hthh z43M%vW}|>&8YVR6(rD4=Wcx7<F@eKc>S@mo?2MHZzzUv@1Pz^g%zK(K;x#Dd-W5u@ zWf^K^04hcx=ApZ{gh2AQvY5rAs0Gbgs>c0hL5u7YRMP>ZXq9hT+O*~c6+=?iTBsmn zIDKcDCH(@$QjTS`9$ql`fr$=VunDhmdtL>Z7lC`CV~~Z8OHvX@cTCO#Js7|#&RQ6( zL+g1n_%AWDZ|DD7iry_mUj?(+u@r+ABdD}>MIc?Us^9rWg_}X<25=maJ&i{lQlDaL zL*}nj9A~{P{+aH+wcwDrzJfE`<``X!qNvQkP%|cLWd%dd1#i{9Y*`#(+Mbqi=??BB zM`e<&D}F>RUW!a+G<mwF&hjigxMoN_$a;lquWM<gwH{iTfoAGU7nwD7&J?`V^WCt= zU5a2TW{GF_QiXH+6UoHZ=w4hS=W2!T6poMq9ZIOV;E@ZX{*FP`kT{eeUaGhuzp~vj znFU;=ZEu(NYR0ZXCn_LGRw8<bA5egpMx>hS_7lKU-6}Ka4STL7krPvanm)MFihcuC zmc37os7%7TBQVPIrlZgyFX&IJS)FjH>gQJyr+jA#VQTM?<$XP{H$W^D<0P&M;d}#4 zAH=<Qp6Tgzn~B6N>V2Ni#?$q_{FJWm;d>WwuqZPEt&+&u%-SbRs8}kzn5gZImy{4B zmD(#qsJ}zo0)@yD)M0Gttw=N*^$l>fkD>sLN05MTA6|3<sOp7Sn0pxK+Ewne3E;~q zMx=z@!?LT@hu`}6_?#R#_+AyZr4me{fssTW>`>GOT^~I#j2%Yu@LFDSZ_~Bjv+QA* zRFi3KS*h`t%8U22f5@i!NCa-!awl(MLGjHM{CSwVga7xoJ@5DY{Clm=m*w~Wd^GF7 z_WM6;{+qsY?viuQs6WcrAKzQ{|J0x8Kja<$9=~$He^FU}-upMp{}oREW7oaj|Iz)1 zHzxo3{B$w)`n77_4~2icegW&x*0+6uU7#=Y_I>sKdcW!SV*QcpgUZL`Iph)k#`D+u zi>fF80xK`gU-91w``*ughDiP+Qu2RV=~Yhu17+r9DgNJ~J^xc}jFFjv^?zvt{&()_ zKhZt^FLJd1wbJ2#WSSWN2YvPbCwcyFWNAl-#Nh%65C8`L{aco@zeHqU#4vS-Bqu|z zJD&&C{XVxC1hk9zsm#_@G0D_!p_t{bzhWP*`jp!>Insi93OX6+r2fnilVLmF>ige? z=D$7ee~SS-`~Rfx{yPEopW^aAxU>H&>WS%>Mf+bmy{s(%fsy+!si*%sIRCYP?cZKd z6`jqlU%S-%c*nVp*Bsw2?jUXucL+Pko0}WBP_*Cs%^<BuKnTEKF!u;(gf)5Y&8MoW zP_lyRLht9(&2BK~X@yFaS*=#<8uQ+}-(HsU{WLxKNl8vhvXheBlq4tRe^~VQ_&xfW zUj3XuInB3bzEC66_8y(c3UjKe8$EC%MR_Gjdn;3aFk9E1tmsYKV^G>++MnlMV{3PJ zy7@o9%w3Pq<MaD|AD?Wu-}3u?JUvbh@=*G3HZ^!rw-${4p?wdk!%qeZ5?V@>SxOX{ ziWJ!jl|d7%JQpp$oUTHhut#wmWYSyX6Wt^gUN2vu=~&{cnxkzbC1|EDav*3t^m6<5 zFkRQN-Ba309i1Mw4qo1V)Q*46>;Jh}F4y;a$JCXdl6?OaOo@Mqn%btpDS9)xmUw@L z<Xnm5Sb^+tmAl8xMQSZeaz$N$RA`K47d#~_q7OZBDN}(vVV6~LonO_{4BB$BZGHaW zT1mUIzQa+*21{dXt@FQ7+B0&U7gBxYVX?#4!_Mn(_A<ZStac|C|0(&JH1&H{bCD`- z!b{51kB?AH%eWz8ET#c$Co3+g4QY$-vW+peu=Kf4eVAsZ9lok1#*P)XR^^2bwPoIf zJ(^TqKboo=O@SFngBwfJ3q8A&61$H5Q#M)jk{MO^-t}`*R@mBh@cfbp>k+cW(*mXo z&=5|iu-pwX|5?_CZhy_=_wun^f9PNIkNhN_K0d!kV#Yd+^^P2wjnGJy%q)S_M70c+ zC1vfY;eJkv@&Y*3uCGsTu{lG7^)PqK3{9i#Zw8ei-mpEwTq*KQ8RkR{-arlFL<QPF z#pQUx<#@^Id=1`273N40-asYpOf@!@2?n7hX8CEB;a~lz2z#jMTj^>{+N+FOe4W4p zfVXh*H?;l!GWWth|4MHgW4p)2Y4%&W41P*~8_i9>eFO~w@++q>_4owkN0@lH<dWDt zTNj%-Q-FF?7v^%$E%ILBZD#oynlN4JyPPWe+9vv9ErN8pkOIY`4$-m{-M}F0P@*EG z)n+(d>;4rk(^o9fbt%pAr0BNyLp02<uzMUW!<kFLGnKD%)o%?}U{Tp();~?xMApKO zA5GX~Ro>`<J{X__h^%_LogjD@ms58`y}ju6@)P`&|4cT$f2!)7XMi$=iNoLYCUU-t zfcwkhBgMr$%(OkvY~&tO#GR_ah?G}IqLjHoR<dQORR$;o|1e1pmg<c5ieV~N?yJxk zr_)>J(i&#cnWYg~C5@D0$5noD9r$n=4vF0xnN1>{R-)H3m0mv_=SHa_q-yoWa{bI= z^~FX-R7J0GE?9H85*di>FTf!(#3Z!Eu|Ls5&0NmZVAr2#0apW0!2<*GT;%t)I+@>& zUcHi^-IRQpVL$$L^-*lkP{gRD2~t>+RN?0!D7{dkpRy@mi`x`giOe2fq9CL^>D^!n zBBg0g*{NFXxlGZSqSrFJy9i;X_FkUoP?F|eoa}6>@NlyJfH((>zy#A}f&5kdN^gxw zVT2DqI}95X>H_?O#kUY92PjB-E#kHqr}8K2Fz7es1WE7e?~p7Ca*k}%Pu5eYF0&{0 ziH;FwbO97w1)vhOce^&=3|Fac$hr#(99pwn_zD|D-4G7&cQ{=;`W2R#JCfuyvpmmD zY?m%k6>O^btl}gYLwj+Hu}yWI3$6k-rpW?Cal(V4@=IdtwAy{%mpA$Q;L##{fy!fP zqN9<*D*_ADs*|KkL_OrZ-F#J-+Ok`5nNxF{TX&mn+1&*oKeath?<(&aL({ISZGo2& zyk5;k28C5mp7sJ#!lng=La{Ldb^ftD*`1`<K#26~@@X($c1&xQRB)_Scd`ueW&O!M z&54%39$lvRRH!E8bQM}f)YPN2CHOvgR&3&LfO+}$@Xdb9my@&LfBW$A{W-YUS@STr zayX4?n!Aa*vzf~4x$1kZPG+9gme=bOd)CbV?`wMCI84~2wZO5Y!7)Tv!&Gt`Wg%~w z1yXCu96^roR<Qb**e0naq(bSj29;UTNa3y|+5JdCoJvEqQ$!VoExtwXSARcGiu&q4 zs*b@?T9pAheE}Y64Hjhqa)0#!eE}MY1p<`;0+l^BsaaORMPg0B1mhERD@@&!ghk}o z^<*WUMJMwrd!vJ?6L5#H*=&KhO>2`bPj)r=s|Yu;gY|B{@Q~Oz8+fC5<X3`WtS~~J zKTQo$vi_3VGS>o83pdZdwBwu1f-j_t;Ah>eO~0Shk^5iR$?R2oAYvy_mIM_iNM#Fk zXbyI14|i<RIwvYgaTa~H89)M=F{<_7RMgbvEhp2<wcwp#eE+*ASEJ2<AmywK&l*t$ zP1RSM^$|C*>x^GZvI0kx&K&OEr1}!syfppf9%I6CF@V+@>hiAkI&Cc*NBPeQsk7(M zksdVOZVNq9btcE^+mM+)be2!Kjfa>J+{hAQ&^H{=dFQDLUc=S9vo+Y0MQCn&3_=qm z-$5q1H9pNnPTgT%@xAf|l7<<w!z6VlX$?Jlz2&!)gVxE`aOZ7q-QsBmsEoH`hzoEf z2~LEFjAaQfs4H=y34l3#Dj4>lFv$JZ3Y6XuS>qVssbFU>f^&db@RRzR9?oVT?A2=d zseb8B-?PAqoY{-Vj}Ua+$x>a4V*~)6)FANIByxuNk{j?D=W`Ip3oz%a(7W?+C=Bt5 z4KfW-)bf<)m^s@gIh!tWwravA8DObp>#%O}w#Te;8{#X0yU7#Yij&^Ku?p|i$n4UE z$<AQWbf#zs32Q5SJ<DFE6MRjQ`fEv26On;Rxq(W@F!5V(KjMP#mJr^=65`Gn=u(^@ zEx^Ho<EfgzLxra^g_k4M2P5SN5UKjhnc6#<>d1qo2XoaYv-Ri%MmSXF*yUH5#)vu> zSbI1rJGkm>Du3932QLnDduPSx#ZT?e*7TX5XHC@~iBMVzkQxmXpOS)hgxRdX+bX<P z2N^_)BS40MkF`T}qbNJoHhIC1(_3a&+~j(_o=-#4{DU9<j#3+1O7CC)6%AKL$Mx&5 z@Ru4oP02W+RE$<8I^~bZ$VF==NogQVbt*`3E>*oZT8BJlg+OMCL}H6ZV~kF0`#r#C z72GBTM%Yzc=7QL|M%aL^17MrA2=+;Cb$$;m@%nwbBJ^_{O}!;HwM8Zz6lEeJd-1Va z!4VSiks67CO3$eI^O_3McTsFAPI5I>bwXQnOJQ<UXLDL?bzkq0NaUeEtFu$HJ6prc zTRX#J!@zijL)52q5X$7}z})1v*5a(p-k`?ZmcF0RA9F?@dP!x1mvN0~KcKslwv(#F zvBA{pXE#7!)USDPGPYwVE>>LRjg;NU(V9xqT#=aM6u<UbAgw|vgoc@t^)XVeQW|5~ zpz5M%uqrOI>aR2?EWbE=eZQ}}k?YUy=p=1ggx&|fk=j36yT{&u15G$qq5TBGumr`l z1kJnz*~|pp%mh)uXZ<H?5FD%Wx2H&w8p{+O4wqoj8ROFHq+{HqRNbXk9%q+c=r@?5 zj8&lH94XWsDUKCj3lm+Gnque6`gppX*m=8@B?t18)v#*>#wnzR>hA%2AGM)4wM9*h zb=B3O&4tC;q1oxV?eS6m0b&*yOw4P1WPFTtjGPRltQ@qoB*a80R%9y=E3z+iJs3F& zxCrTo*tplYh!==h2Ur-#r|0J9CnjgX^KDK`tqn@dZJib89)t5*8+>zQEku1TjrD$( z9tZpD!Del4`th{eu(K9j21sJ0b~3a^j(W_BL$y-eg`zl{4ue$L;(b}dTS_}jcs_g< zd>lW=D;2NJ)a&ke`HA;WG)GQrAhy)okl8thO5{apMaSyt3a;($F_Ln1wRP85*qG<% zy^fWIWMze0{3J^|bh7%JAqo?Tktzt?-~ioFfr+=ABJ6s<h!WePB<CzDBH>|P?Y(-P z+1do%QRy)rqP7ZbqN#umC_GSe3|R=xR7j2$`CRr8z2TZmBEyJ<CQ*&9PVTq2j_{BV zkx`)GM9a*3&UeaCQq7ap_to?$tE*oZX(}x3DlM&Qt?er9?JBJ;s%&lQEUhW5tbJXJ zWL>>fHJ!8+eU$7}=x-tp!clUnIbxP2QUVOGgih+Afi=C+5iO;8wXF^5ZC1d{KS~>1 zJIsx&jV}lj`MVw%Nt;Ag2qFY#l2m526}V7Y^%vVEFQaG6)$f#@CLg9QLLS<Stk}8R zt8S*3AbI_kh>4)5sq;<M2uu>y7hZNYJEMDk-mj*vw>|<+#f{2Cpr+<jdbB`fnj1ic z1&f@b?ugA4rt2`a^*Xw@I=*{CCRWmRI|%hr+<@t5fa#}}dcJp;CQWlhUJ$(AA+h1* z)&d3YA}9n{xtYmoI#5q(>}>0LHx}1dSAA}_`(v@y$Vcnt<zi-K_J49Z9Uo6)H#f1H z*%jsO=3;hpF!S=TvNAAw9E)E+p5p=oPxJKNY&4CP1!Zl`EJc<42yNIn7*R1Xqaonj z@9x{39vPb46_^{lD$ME4F^!Ow^3-^j*n0T7T<|CIb$am=x9KX-J)|a5g#Kuav4J<N zKUu0c)tacn6D7N*H%J9k;24E`1fEp%<i#xAAp8B~&1?r)5*j6_tqAPfMr1{_HuNg* zbZe|OEAV$PbvX3<+DucGM5)X~#mMJ}D8se^-z7_GKw*Ve6EHpdL_d8;x%xtx+8$n* z<j6;EE>LtzV0B}06XGQB<ryaKYZx^@{(AFDUt(QxadCs!!^On{?ixEcA3ZuJLqC4T zZ(P!xLb42~toWau7SIdOpJU_MU0nhH6oQ|qt&yOkh?^e|Km^4^I6}g-K0mNJyDhS` zr!BUpF^Xz|qM5G8>gM-%KcBxBpV`;V*=&ZUElm;6L18^wfmC>{1k49gnWc$Q9gWo? z*IesjWb8O!OeraC8hBaGOY)uFj`zf{YG7&cU5}>9^33x2d~x*o{O0+9tdXymR-Y4M zW;-2pO$mat@zUf<!{zrqY7CVLkjWXCr0H0I)kKvEjOkoTbA+1n^&3n*V^m*Bd`#m@ z)GMqE6a-{!WW?0PEkFSvreKkMe5{Z;{$lQSaJuH>aq8oWt~b^H{4OXLeCy)kh1IKW z&w<=ilodfw$(d-U`PmQ=kqpD1z~_t2ZK=%-N)6&fMpW8#wDtV&{_gHp|2ko%w#7wZ zh568%2oc&j5AvwuZdTmvI1F-yqouCir@6{F$Jx_aWX8>0K}uTsG(0|ml=Lequ0Il$ zB|6;4j4z))f8v9i*{QzL2|TU;R$s*44tDhomMo>eUIU6nMb41fS@`8#OWc)q7C&*d z_H2y-goNc>gvno;aIRU=ZAM~+U3{WziLGpj&vJ{agqf<OvFYLD<720Gc>6fD+Z%&< zpF?aP{f~P>aizostp5Hq0qO(9ftpr-FN5O1PWkD$;d$cSMPicu8(O>3JA6HVOUG=0 z_>890KvDvH5Py@OHg6f$=%_PILS;77k{)Bkiry%3*m^sZ(jM}7zYIqKPrGFmsS{Td zpPksvSmd-HI#Ql8fEZa}Y5qDedHr5nY`tz5P}avDOy1NTzN$P);s7l`$fLd*U%b$i z;2KqxtJe3c=4~!I%9bcRlqS19Ny3a{TTT*}h)t9T43h{@n78L3fyY|nD;uIq-J`4~ zYw$YRxtY2dd0L;?r}gtVT<yF2SWtZ(gs1ev$;fG7U<A<MGr&!F`PmSQbxwknsU79{ zEjhtGoyj3`mTq64lfj+d^mx6VYb;&Grg(l*BRMJ~z>R9p)oQOb$<0&63C}#`=hav0 zNZMYogN8(NyYbXX<eVQgQ%BR_JpK0ee_(6w?s>g-xLTmMUGO|PrdFgR;bsxPwMl~@ zOl(cGvdH_&$M8rIo#3|tmMA<DCOZ^pNJ(OKYInH0NQk&aO&VrWm^!i-+M=f}ovS_V zyiRup*f|Fu!oeE+Q@s2<&Aj}~KeRA^SJwU`<qE&w?ebg7U)360WESBvjI`W4Oq}}- zM2qN;D~;_jtgQ?WGMD&T8-9OBZ*-I`qGIYw)7;3AS&^6{RR!csI*Jap=8D2{p6ZU% zZ-E-MZ9elIRh3nrkMP`1k@j++4kiZQ|MT)49t;J4tR143JnadAF>(dg=1}#nILXmD zu13G-85B`tvJPdUaL-45JysFL;Nsx;@(3LvEi*%1V`GPznR|}kdvJ~@Jzg&?LJrvm zUehP(&?)VctgMr4ujTFdxDcD&yez%ZJIldG{_n~m;GWsI13Ws|7i1Y@qW{mH{^lPj zHGVQ`b^^LdT9To)?fH>K&Cv~=X}*REPoI;U<k|3fd>(IP<poK?BcH7K{T{J`DD&DR z?^dFshnysM3?9>TnFy=t?$g<CDY8>``G}9J^oz~Z@OZcW$t9nUr;E$KxxS(G7uqc_ zwS~&}sO=K#kQQcYVoa2s;>Wsp`knQ+sk61{GWCbfVpxO)*G8v7cQDa0GSeWCp$Mpl z$^bTAo{mR9FVB;d@dGGmh{8l(mPUNOkjoC)5z;r4wl|Ws2k*SKz{bka&dbrxNB?4C z-?>V@KWaPc^L^SrfQyS`b$3G3FO#67hLxET5*y=wA!56{EVDGKGC4zp>UtiH{(mSS zd)$q`dCxbvpD>;)e}b22JC&#iYP1$WYEe3oLH=s48n4lLUaRZbBBY9ccM#YQ8`#_0 z)fGZl7D!4Vk=l_6YrK^ern(;OUexr_uycEiuOhnMBx+A3iUPXQbx)6P@sU!rG<~Zs z9uD?4JRzg2m80L|&&$YHj=l(5aRT3)j|2D6hhW>H>qHA&|5MybRp9hGnZRA)kK5K+ z91d24+TD9GyEzvaIAaj}fvtiM6GUHXcza)LZd78ZwS?68#K++^@2_KOf3ocG8A?B4 zF^$k<t<-KS(8SpaRB^&<LYp*Ta<Yb-EP7(2>)El;^<G6m(`-eH2|m;tT1z<l3q5D~ zA+=#@>5Z;5@s-yiNlTx7S(~}mI=A`&MUu>Ju<-2W1mpo4&Tr-i_OQ$8@jN!0oe^?y zpy1-<=y!cXW^i$uPC{WUI#DD*WZ*DJF+y0j%U}JQ_j1?!Rd{<K8EMOScr(C>+Ct^- zZqA$ke4y@u^gM#|e68!=K3-+O|70;`1<OGJD@*DcYbjcaSe1!;wD2+wtZb?dul$~5 zEd4)Qv8CQu@Oh)C?#`B<`DiR=3C_v_DwAC)!^H7+D4{*&F}hAhCk`Th`K9&;*Wlgc z4BxsL`5L~z?X-YKmeifcExmUYs9mNPP^{hG81S;>BK>BasV#pL1Wll@#gnEwB{8`+ zy}aH;MuWWTtt^3^$k5RnmVRL0uU^{<%8u2bBs3A2CKI8ssJ2K6lC*%Dy@j2;9<js2 z%Iu67-yi>_ybop)3qkX_tmYLKX0=Usrm`+(a{4hs(ghZ#+39JWs)4=WS*osQfuo0; z*^3^#LSTX9qp}$*K7mxGyHbUU3m%!Iw7btnQTO9zIBL$be&6p#Bj^2m4A9%$8rVfF zdbpOJd}I~cBc>85%~gubHisP+9f2MtT*QV`rUq~AdIo(EmY3+3!r<n9`3%CCq^h<( zz3g^y21`NW5ku^>6G?AhO}DQ#M_Fx-#tfV#LuXfUmcN}ng&gZf(&E<K;)bVL4CW7K zb#XE^zHd8mpC9wPXgj=ls-`13K?NZz4V-Owe4KA@U~>=+JN-`Xk6n4&gO#x7uS0W< zme14NlHDp1UTeSv&6{Ju@9NFcl6RV|a957bFn@wn_Iqr4zLWv>($qORoIJ$JI+Q<A z_5C(|5L)t*ozGSu0Hu?4*==w%$n3CO#kMqeHx_rXA^phOo4Sk*0DLhaoEa@{I0R3A zNXac4oUDzV9OovP<Fde62QvhBM~O;^iYyp@`^ZO23iA^v5I@I+500dd@TXRO&HKf% z3k%04zUJ<Rupn)sgOQYYg^6r>eO+p7UN3rml(Cqs!k)3jZiS`@OL=_t<E1zBXIg|O z3M3~gLnUYsiG7L!o^D4Wnf8t%UPqAX{+kcW<@w%ZY_CUaGhK{?!w`zgDBlHYY^eBZ zsPj{C_cz$qZHVb3vqke01SqpPI0+L29VJ83uDt7EaNehTRb2x-A|2nw#$BgBBsNkY zIhPM?jeeTEilgdBQuV5CF|hJ`Jm+`+(&-GY?XNFKJe1w#9c_iB1RW)ms0`p0Yx7!b z<8srrb$n$E<vr}ei+q7fW0(Q^0}BLOT9hkGd;t^u<u>Dm$v3Ea*l7Cor+Bc$nvlDG ziqOmLuGgMujRNRzv$_rNrpqvk&^LgH#AvT_l-+GT4(jATOc{?RmX+qB%;x0!{0IUN z8vj24=s*|04vgKv%pW+8iXM3_X?l5K<NkJA&w!eq2g>9Nn`*jch8GRSm0eG(8SWmQ zo?4up{Br4b?3*z5HyU`X`+RWT&hXS^`;{xD4UMTqC1J_w`zA-z7HJhy?w8i$99?zM zB8v2a4I#gQs9B1nL-7UMJgC%)NM6GThuB85ph^|1n3KE3{sqgu-4|fs>(Oy*<F!?7 zk6dPrAWh#!0li3SX~*!Kw&D3ZkwpWVh%5>@XB8CizQ901O)W%x2Ms)y^awT^k9z#d z|IB@3s%O`hH}BsHj>&=W_AI+9qpYKee{odFP;&KXMaTG+v6-ob&*bBuZ}1J<-^^Y+ zV1eewv2_s4M78$*fwIQtwBl0e<L+paqi<`kVeM(;lkF5;W8#;`rx$cq#GFjnT+A|4 z!Zsh67dB6)R`d}yi*bl;5-^HHgcP4J_LnNJ-~QM<w2E2g4<c_KRL-m(sNvHn>yS?+ z?G%>NdUNw}O4YC-vVdI9RnyARH6Sn|HM6Xt36-Kb6tR9ATH+5d=jZS!|H|!+wYyuX zn8Xf?B8!46GQA_IYBaTKq`GTjc;?Hc+xai`7!GRu7_Wc*95p|=>+03Y<`z(U2wptG zqHMhVjO-j#Ef6pUnFbW9xumdb_?{6ornz7b%!^uOiCJe0nx=r-OWGuwh1RfY2If_q zOt$|E<!Y{>E<Y_c46d|~Yy{<WGpPs2JC|ztHqy!<%)B$#9y}{<pK*<?BbId&({*(A z4+u-iD5|Xo;HOZQ2H$*`V={)NQ_Q0>hZFO2@D*!!Hdk(MB16)tc{PXx3^fqx(WM~x zH*y-rx<+PaR~Dug=O^~3(9h4|nuKt~pnz{z0sEUBgI7`Bz7T~yTJ3!d?HuGyk&2Ef znr>-QmNB#n?q~T8`Sc>itaF4dGeoU2dGupwm3>qYSuzf3?5aK++xJdr`zw@{JG+?V zWXsT6dFL!DZZ%KW3vL4&&!~tPm|SlkUQevN&Y}^(rDkvK<`$fgl2>&JV`;{Jdou7K zV`JEc0{{$A2ZpE6b$W4OdTDWHdHLSngPy_bAoxT~@{IDXo~dmab>o?}<E>X`puz0Q z;`9>0zW^WE=l>53X<>t4avHl)QxuZ&3xnd55y9bB?q1p!){=(aGByeF_6huY!Ng*g z<f0a$mYKp9>4Iiy!sclVY60vz!OE^#j2c0@4)G_v{maX<*Y6uqH>_KN$F`9TL!Uxk zy(pc47D>xAk_$G0X^j(W4?*N5&0`pq9L$_uJ;S52%PTtu2Y;)O9%tsj<hPtJq3iO^ zmF2nBm76Oo>vwNK4UWoT7B)D;qb+G38CE!uRX<VMJ~=qKxVW)8ySf4&#MB8i{Kv^A zjei}w9!S)GyN58e)pv9iR9}kA&I^c5bPfnLb3tgDS&Qm=iJC@<n?^7wd!9b8$0+9p zIxk?F4E4NGJh_|)w|<zoRU)aZdscD#33dP4GB~}Bx#SA2CcS2uQ#(Y(^OB}d1C^As znnS|E?Y*w?EmOZ@G8rcw8%IP?NNiSaYhT~5*Zt?_#*R0w7pDc9pPiWhO#5eC(U1e- zbF0g9Yby)uYm1n=u|B`PzO=Qu^L+2-@`{eB9|4O3<pmq>w97e7(|Ju(ZCB^uQ_yT~ zZ56gKv$O<Tn_66)__8eJ_({~yR~f#Ih*6INqobGFE~DmQX6E=r#n~bKO`Kd*j4XLH zJo)uQxwZU>C2R=J>z&t+2H^RO69i0?m^Fe(q!GM^kqnx_LR$Vmef)R=-oL7BJ$Q-& zesHDf>MA^{IcG>%<$%busrYcIdxNVPTYZR3Zl+RnS2S?|8y}gL32__*bo+tlSJPmA z<2?2;^rt5FX<uFjNe9knS6AoPRsmg11;{aVd3$SRdmAb|-?)2s@72@F`c4HcHvmjj zKPWi2H?L{B?DBN~_~P=`28Mi)eb^?3{Hfn!cyQT!%*T7LU$1Iu%POyoO3%Vvt1_~8 zRMa=;RB>fh_ob0{J1t;DeZh|3H0iui9FJivuVE~;ycdZKf<-%oP#WPHkbA=3zodNl z_)|ka>MzuQ$ajuyIYwUMM!Onu(<EXE>!i}2<$F&{FVApj1xV;v+k1NlC#4pn<5sMR znxpmn&?_kU-S;r7pz160YipqD3us9PuvfOWP!%}8wY_?4d+qjZsOz_HeXe(IKYF&e z_ws2-bPf%l4xyltj#o}<?RZ}ERAc|#%*yJ@)&^_}O+H5Uv5<eT^B(6q*Z7xLItQ;- zG_|A`mtkfmSb2D(Ru}4;(91c}DY%nfv^&GA!=mB?v~%l4azWLNAi3y7DCI&T?{!WV z@!;Xk34wopx&83j{zNheU4@OK<z31(koDvj>{abz0r>85=m@o3Y7Q2zuHF$b871X# zqQ6PSAC}GW<EiKbTH8N+^|h7H!oInILLDvUP*H^6LDh{rcMjIgyLaJn>n^(9yZiFp z%h{DxB`pLYt1`QqS4e(eVe8HEj+qhI=&jA=FXYedYd(5pL0=#^i2V_%{$HQWf4)}L z+S^-HTMuT>FDAj!KiJIGUC-KDNY#u)(urKk@dp7TG9eQl{b)|z2o9Za4xKPoji57P zwx=as{vl@P8&PyZ;GdqJ{M0_Qg1PDnB5xg9C2X3a8`y$c>nG#X&^JA?{wTL`Owb@$ z)yy7jd~{|GnAjUPZ~l|X5ESk6=s<OG5$$Bqw0{wH4DhSBP%^$x_}0C9`0K{Kd-r#D zp1pYy5}gZzPbOyJ65p8Ldb6l)rekDjegn3-g^C3LdyLOVlRtsi=N#Fc;rZYVl(7dX zK+igkvi0^ec63oQwjh(VB@nYdEn)#tHj|PUr*1f#RtT$RFg(&JA-@x`{)dP)leE+B zljkQG{+VU<_D)&HEanPScEgx}VWhIhB{h#4G6_3}kfK{VFPn!}^}O;x<ZV5@f)i5< zYw8Bk6AXUetljYg@;>!N;lXmuq2+y_cC@g6i~9D12gj@+_?vg`XdC+waHxr!geO*y zg5Xzm&yFpwuixHY+Sq`9p~A3o9PEz}eDq`I;2`+D<bvYhgk(fen3cP?p^crmmeqHn zHa`fNoIbAyQqQg(%Ay&>tRBFm9srL-a-QD{nxBDyHnsMI!#}gM4=q>sE;bIXfXM56 z=Zl!9=>(#jJiWZj)tSwarTeK>Lu{Hp8s>JM;ZdoDCFooY4vUKooAIxgX@9=w%;Eyb z`{!77ZFPSAS2MFuJ2uoGZa;i<`_U2g7BqYC;Ng=e&)&RraEl@0P^FS`a8GV2>71|X zp1(f3zJBM{>h>mD@YgVckB)5@a0RwUXE;%FFDEXwcjQ)9!#IeHigWZ0G<8B~nwl{v zJAW@=dYV_4kXMIE#g|z<kWtm2LDiQ*1xcyu|6g+6XL<Dabc0Vg{4+(q2>qvI+DA5` z3i||&<JG+D)xGPeq+Fariyu6H+dICYiAWdIwzcy@Mx|$0x3-VWO#jx*%-5G<=l10t zoq_x;@1LCv+Oea}`B$orf%(p($58L>?0i%2A9#M}@nckd^cdCt@#D82-?wxQs_J=> z3K|M%`-K<wS9C43UR#`4-dMf01>buhpNX>f2WI9RT5NSx1*yjbvth~Us5uF!8EAI$ z1{SB~Jx}o)5wI#!U$kdX^QTuq(kXe<Dtgk&yPsAI`cKuM|DzTXRnU1t<DXM5b<bAy zETD|d@Vc@?s+4`9b^yxBpObMMUA(jT@OgA$uYi`Dv4fLWcywN6&A{mBSE1{-Yd7YO zGB%%qzq!0Lzq-13K;AL5Lri*5+xJmNgS~ry?{j^KsZXAKu6w(Wp6tK7hpxN#c6RSR z-ht2Vy?pWU(~kvZO$<W%B*LcdNzKKV=gK?hu1v0>0^}`>;G<GHGfPXD96px)pS^jc z)I%`a+|yflsV*V6ATTb;*+0b0*-h8Nf=|ooKa@jG3!0LPTc1<$p;7RpmUpL?bEB4V zA(!{~4}<vsYY@+6o&4<C$wI0>Azkw{w{fEB+G^YIdRon}n0c}~vO&YQk@BLWvUSvh zXKw~)wl!SSCG~6^eEdMW>N+~d=H|Y>QhD5^dE;NFXi)OLzP_}%`33l2&CEgC@9jJW z4d3Vd(bFeT0djbJ{N%~b(<i&np6xt)x`TQh^Y~=H4TgO9_+7N@zy9!cX=77b)d|8$ zHP^Jl_Sy2z`L>~@#Z4Grw!Y->S7F~U*pFjlejM*=tZ`JjeyFOY6@uBwv`mnCYY(KM zt%H(*>1p-A|Em%Cy;?90nszE#S4tTdN+}0&DaSL0v1C^1#O5i*C-=ko-!CnL^Cg#2 z(QEtYrm<g%q<w*=Z<DND9;t*)dgYb-PhTfj42tS|8`wF5$fp&T_6-kz6}tW=hl2@P zF)13<71dP;Bo8Y5KJd5Ez<<_uv~E9yiq><W8bvs!K7R%km_JxC{G;H1vIo83!5)VE zJ(TBp^mzBhv)$)UO>IL6d9=9Hks<kgfPH=c!X(<~VALMv?U$D@sr%#pv@b&dTI#2o zx_dzC6LSmvV-r!X-o;JF%$#4_;s4Q${tulf3VC-bX(tMxUDBQcUFoz#co0Q=h(fEF zx?g_&^l!%4%a<=^XQrC!tDEYo;mOk{d;iLvdGju({ssWwJhWEQHZ5kBpyFAp?cdC* z<SnG_yY}GO`0{;2WWJQ1jlH)Ih<tftGb%qhcX&P+mP<L~*yd%SfnWMOg#hj0F<Ezv zpV^mpH0@tJ%#+Wm4lo1KPtcIV<C7OJaMiu%&v&0a-|qq_*+-e5J(M+iyaR*9&p&@C zs%W9%(>^O;XdF;d(lHP2ZfJHLW$)2yKZl;EgIc;iH-~Ep2`<|M*!K(#RW`R~mR5wP zWVnSySbHM%ZS9o}ObB#C{?j;_NGp_7+Lc5GK`QG`F6T)m??s~;pb*qxl+<UJJ^<m> zzX$5~?%uYsG(XSFO-)TjMn+0bK}JDAPEJlnNlC%Q$*!rcT3=J~=kL|n{1(R8T)wdx zmeX~?ItyHls(Uq|sHJ;U`R?oYRh>6w%mQ?+?A${lk_(Dk`}%%IescdF0#l0%5G2m7 zuFQP}{AWwE{owvV;4yX`v>L-L2H=BWKYjJ;$;($yU%Z5h`7f$s-hKJ%*Ot#elfQ5K z(LR6Y)sJsy7FT3cod`J8q%5P0F3(-+TkINLS=iiK+1dcL$2gp+uV>~QXF2WoJnBw> z4d}(QaoKsu$XExT024=7EfX^#1H>t-99E+c8V)fs76EcrAqrMu3N~R%HevpYDpJZi zvZ{J<@kxJo+UI9yWTY;#u`(+u$hx^WrX<ES*Vn*{%Zu}E&Gnh7Nk~sOLp>c{ZZ0lP zj>6omKl4IV_2r3bh+eO)0`@u{1qz5t5P3fB5GqOgu8}3sd1O+(oS~hK2P!9DTHkne zVjNd0@<>-Rg`QV{y3D%1wtx;=S8v_gk62L(|8PH0-MRl{7p?7Iqy7>N`KwpYKiAhU zUVmHRMa;XX9`WMEn-3q}{`Be1hmTNS{rKV4`yXGv`|<g^chBCuef0F%?u+No-@J}a zDq_E2PABc)mef?)y-?r3I5@M0_Vz271SY1NA7|p|^vcRjutsaE)2l1}H^weq?#wE$ z3QbA(2#>b%K<ZiBDH)mzxfcs-*-=uE*;trZm>Qa!7+|WIk)E}MDGwJ1I~Nb^^WPVE zpc+)2iHV`DsqTpBE+2k0GSEGJ`gBO3-=DK@J5OGulwZX}uPIeS!lsF+6s1=kxwwt2 zS@@m3SA#RR^<3k0tn3iM;R(3~%{{%CTfyUZvsk+WCT_jCvNFHA3a$p_;ZZYW_7(mC zCgqNC?_fgELFm(i!j6&gZwP<+`t7T?zgBn!9^pmwyKkPoeA&^_R$W<EU0GUHQCeAE zQdt4Dw7jgitgNX2>cGP%&%o`x|LOhA;xfOaIRv&Eh>ZN!86dx7bY*^heP8Wy&6@kB zvzL~Yq-CUKWTrz+Nlgihj`H&Nw?`mM?d;5*T}_<b)Qn6Fe3Dg65uEI-SWk+Zvppdt zeL>xoUw-*pV*l#ZOFBAQzVkf5^D#f-fp9)^_DpVe`k%0SLz64H^<%(%*Z7uGWUaVW zHbk$|)>)^8Onnk+c3-_KZJScE@HBHl_(sR)R8;jJNKuae7M;h4JZ5Ua;`;hGLF>MU z2jCxKfCoQ=&c^%-{EL@|RsA6ChwJNi?|1h0m>3y|h>5^PepM+c&YU?TCoBEx!%xt~ z?mT+5^WxdP-5t~#b5<p8O`rJ6k?P*XwxQ*lD{J#>D-h3Oj6I&837vXq{01W<0|^Q7 z;jcS;_Kc9Is14Fz)6CMu)z{SCivtJq9<C0j2+7q<kS|_B(DC<MCoXwWf|G;ow|+`X zU5%WAd~s?1e|z^zn?@?T=OKWFs^*%b;98~O*TkjjOCf33&^x{P=y_;XvznPLcv1A6 z;a1d~;op7(0o2kDa5c-zsHu``s6@@m))vOq{F<ftqVQm89-(R92ma{+4|AAm%p<1m zzk;c6fBg7(cbA5S+TX|P*^|Asm8Jdq_RVV<>5Ia`g7^3Kw(sA&_ZT(A+<&t7^4*)5 z<YFpbEiy3+r`Y<cp2fN=i`QpY=hi;YOu)BnYH^`=aDbbOv$w1L_V(tlU$?sa_U#*a zSt%}F9&0Z@Ju6#nYbO~kOEy-fy`4wfTkBA_Hr6l|eu1r(*%=}-W+Bz7n-f2M`kP68 zU-xAKg0oot`KO<L%Fj;sa(CU{#&VbD=g*#kI9QmO{P@55lb^qOn{{cV{l;e3<gN7D zQ89}YRb+#zPb2vSM<KnSt-V(_Z|}K86zE#pdxS<N<)flku&GP8Z_lo;gKM4N+FID& z29gimd2-?A`WnX5Vk)#>*xFjYb$jK`-NnrfQ25(ByN_SJx&P!T)F*F$+<X1*J}RB~ z+0vlGRdCrKK7yxx|I?=*KmGjiK>g|GpWl7>2yDX>P(iE}8oYW3Ek5l%dkV}a#YG?f z`Kro_{CvFqLxVS$=5O790N;+j7Vz-h$9H|#Mmfcd&z#pc52>i=UVwO~Z(?P3WoZF| z?%TKLH#eu2mk#?JcoCTA<>3Z&4}YD8sxk`)yOF)Ku9dC2ne};DO(G&9R0;!KQ2(MU zJRu??Bq66_5K!pu?Zw<F^lwxQ^>vt;7_q)(Yh?jrD*+(^$O6{iB`+(@&d#>DvheOl zEPrfwXz+@+yQ`YA;>8Q%aN?lC6HRs1Ab+39@zD<-K49r(pMD-1=&LL(EHBQlC@H8a zD@;#Kckzkx3rmbl$TIgx5Vt8%vP<O@k|n2MWMpCy6BSX{)YC9?wQ%zah)v9|uDv!j zT~u7CuCB(<cOK5Hh^Vlkp+S99Jy5+5007^0r6)ZjEjB(DYD`>AY+OuyLR@@ed}3lk zK|$`q#s(-nyrQTe54#Opl97?}^YdBV+<gB2$4BU-IspG@cek<rQbArey5<z-@7L_= ziqiGXO<=yYxuK{a7iw`q9_9(E=I4}`6mD#+zyJ6Xbe5ebd$^caS5bl^oSB<_<?5A< zJGUP`L1phz!_Dp<be7S{$@3y=1gr`wPRTjVQ<wUeD_gHBt0=3eD0z6g!LOcNTAbb7 zJUGUoneXT2;lj#1oE=(vdL<n_O;ZakGfNRw0}>wT?+7XX-|x@-=l`7gpEJbYeXb<` zI88)AL4S^4F}tw(8#eZKcS;L$%}tDCr6t8gh52}SB*aB!q%N8n8<rI2K7RBND{Szo z=K2~q>EG0{qP)85axk0FCM`K$K~6?k5Y8=+<VA58N4tj)e$V1`c6L_49_w55^mJU@ zoC=Ea(5c&7u-wTlAvO{s-{O+OyZ68PJC(UvY0S)wuw^wBrQiVHjMOCfv2Yw<pZ&Z& z73F1FS(s^Qsa+iGzS-crcW)UO=y|!hpf|}#OTx&Gu9EOenVIOR&oMD^^3&4NTALe} z7iC|nD9TPxHqzIjrKREG;fc-8&#$WH=H{fQKWA!WP?(!pUQ&>rlAx}tOhrX0E+IC% zwuU+Np!`-=mZW5)axzk1z5-R@97#z^P*YPWD#=5D!^6!9V_YeE&jbCCq9Q`D9Ur9U zs}CP{pFex?_TA+4Bo!5vxR{8X3}#m)F%@>6l9Iy1%@tsVtw96XuNx@HNwcytBU~IY zx&hyH>eLVE$q85?{Qid@sA;G<IM}b>7~XyP^8U`wlQ-|C=Vm1(FVN6ZUyu-SaP*}( zuS+g&=@4C4-M18-UTJ6N04@bi0UXSb(BQ>ew=h?hN2Vr(galxhpkk#(OGE9u@4qJ@ zC1qe_1gRIkC`Cy{DI+Z<b@2iiV0gq-Ss7{Y`NU*Y>=*StLNX5;1AQGWIJ3e+0+yyG ziE&Xm87aUmJjTaF*jSp22n*8D(Ezbqo9o|vb!KW3F)^{U<OTGxLRDFGg<}O96yQG( zIKc<~ygdr@vdW6{lH;RQl@utcD15x!e=G9<Hk{DIuRnYG1dfcTsEC1)9^jFcmVCHP zXL~a#DM>(}@ALsvhxBkeb?OvQ`uy25TwnU=(L+Z&8~Ch=k-@>&-@JJPikBE0b@(IN zn(F`kkAE;R(oc<#eDl`S_$U=6g|LVS6EhPlE7P4@+ut+*?+VAw*~JOe9>x5vTLy*( ztSn52zvaRGd$3pEfB(Jd*E@zO4z|{WM1<YFUC-aXd-?AD$c+)`Zc7WZhg<TW=dmz1 zy*YmqR|5wdD^n9A&}tTDCU6wcLvLd<ki*vopkG5{;Gv_V_2HAJFF$;&t||w2L4S^} zs_gR$j=ug;Rw4bf+*;aRIVD{S?PFW_U%Y+x<S`5ra4w9D4CXdAV061aJ_empTSKk3 zstjwsVt5Gkb<XxS;AgZgtOdkz26~<dM*<Q`J|&yrl&X(E{RFRofI~+BQ~+{3ybbpE z0Knjin;R}2d>uLnoO%$w!yl0l7x}OM{%;V6!*7A2o<4m#IU)A=z+)Ioh>1M>*818C zXt<ob48q+7B6_TwU0q&;gJbVt(?8Jj?)^K|=#vx=R6sa(oNw=HZzdupGB(uzb$?#J z2JM2Mf4DJ>mE`241AW-<F9gHi4?q0C$UqO!Vr>AY@*MrS)~=4xnJLhDLn8w=R_4Ra zc6T1Jv$423+2h)0=*tokVh~6^fBW{vjS-OO#f7=U&ET|ZYN*xKUxEfp*cw2WS65RR z86N?`2Ms)8-bG_AKwel}xO!{5v%3?Xg8;sK`PD41-Mwe3+U`WGN@7MK;B(45=SG(9 z!|VP&p1@&IY4P;R^0l#17&4OLqw#-TYf~MZaV1@SK2b3)9L&4BIGiCS=a#dG&TV@4 z0mVFwgXhlC%}k9SdEQ|-fc+;SAsM{Vd%!b9fj$6`huec&5EM9%YilZs@^t5Dcb^>h zp81a+K7jE$A6v>1#XLEgnz~ACd^8m`RdPZMRzhGu4UP0$+nd4C-@kVkx(P0)gteq3 z#GF2TDmN?rNao=bKf+-JKP!)$Yey^2Tj_3Xtb>3%yE=Cd_CvU8V5pCkdDI0kywgY^ z(4eKG1*r#DKRi5yi+Mr*^WZ(AVj^kjXqFeTq_&*wZ6UfTC@usCf>m?Ag0Qg`4y=Em zU*F&ra9m)PaJ57RW)iX}@M-%cRgaWho^2mqL(7Gsu&~h3<mABc5RBc&VICq@Q5iWd zegQ5{b{x4%#~%nt7(}&v6Uv{y`7y}P8}@KxZRPl<UPzE1Ozclz@|<{>$6v1Y_Ye~k zrKcuA^mLr|D@*e*%7BdE(0MX#ZH=7VEVMXaVgBvgHw+BtpiOQ4CD?a27f0CI^Jm}w zncWBX?!xc{10Mt_hc|LTTohFC6?#wu=HZ89jpm?OTbQyiGng40;QBe>l$e+pEKH5D zz6<77UO}$BwsvfO&cFaC^98w?Frc63<!;4xc@72|(7=SG_{H^=fvZ<=GB3afnECp7 z!_Qe+S~%Rm*})F1bz*V?10%h)h1u1CzTI6cf7E7eWvQpLEj%O;POF`heN1c=^xUq? zt=}{o9li#~5RUMriu}Cd7QmiX);S=jyS#J0Wq7@+s*0R~Jg2arv%e3#jjg4{)WnTX zKYhgdb>IUd!-9!Ph<SxY*m-$zG4F|R`tCF#C662;qHuEMKAabbn+`XNkBNlitf8jz z3GIIlmc{v5pd~gY5`#U&hTuQ3G7q7L<OOjUf2+%ju|fzRG&3_=-B|tc7#Elz9ER_0 zs;|byysoZRVQ~R2=3l*f1p%XmhFV2c`P=vJdAK;06yy#!0|?I&5YW)kiV6!Jez&Qv znu3C&uC@x5zJC1*R|X?8EC`~x(}d(OQg3gqV;eGJPoFt+j)DHn*)vUbSUMLk4;LT* z`GoYe{*mkY270)dXJKK27>}5Q1O{ttGc=~*h`At~`mbK;?&-k8JRptq@x;YE#P0%v z{Ba4f5bnV+diLzuoXph2%?xyP&YU>|#6m1-V{c<)Yl(HYRh1P92ni{vslZ+Q{PWLA z>6H}R8iayI79r(Dm*+}a#<{q-t?jI;8|rzuxuDO&S;Lw?G1SvJb@~(u2{9aGZUG@S z9v)oGd$~FP&#AK{9Fnf#g$e0J@R7FWhHppAPd`D>0ecQ-Bn6u{76%{%__yzDf6;kr z5IwB6mXL^uhL#4RB&<6F{!m6ns<*Fe@9Ewjz`UMzaY-R?h}GLb7lWgxrlwL_Q93m} z4x7r$!eW!8Bqf-b8D(Upa9z{`%q+}}msS_|o;-mcj&<<WQdc=mKuko(M@mlF(N=%B zfu6P|upuii2e^8<yJ9`UENm?PVWBPEU3z-DSm6N+1RD{T5a-}v$GW#`E6eaRP0WmH z8|&KJn{e566!XG@zP?^~bY4)9AADndLv4CS3fM@jM_5x`6?%l6f}E0yBEr*MSw%@s zRtoFaF)%SoN=w3U(A|L=1#aJYpr+$N$f_)$7Z_JLQqnpRlTn<OlT%t<=@aP3#>R4M zdkagVKtqj$jEseYgNdD;PehD^_dFiv|MA0FB323Kup(`JQ$|L5Tp^*fj3i+1d?-f; zCCJ|g#+|zQnm0eb$LKr|b-0<Bs4z1N6Kn?t@sHT<D+NxHpYMEmRoV9K&0{j(Sci*w zQVjFdG}uib#@nc=DVJ52HlXC1eEh~RR_0k)SyWV&n3)*AiM%j{kC#VQMyjW;^Y)$F zG&q>o)7CgcNJ7daLP1G=xuq5>9u$<6%4#a)6cjFw_E--c6AQDazh7-@tFEp#R^|a| zh{^#T9$p@-DGN9(@G}hz^-3$to15zJFfV*O=3CoaDyzyUQCR})%!9sZ>1Y|78Ha?2 zLX4oVqkXs;bWb*RHYF7$a6Si}s<~|_5xoqvqFZX+cva70VZ%U4O?5<U6bA>}Eo^Bb zZB2D@N(yd%eyHNIay$ZpxS02L2j<TZFpJs+7hI5(0dJ1=SV6cgFE0z4i8U=yQCbK) z91#@;QXdx`fy>*7i3+i@vM4JnFfuY=<p&yYadYP8=Pa$v|1mmWS_E+}bYvKXA?`R> z!9x-f5@=|s0`s}q>9EgOv%XRnCD_^7jEwYIn6Wbtn-UQbDz7SET$)4aJT`Bmqosb9 zh?JT~k(`pYv@jcMZkdvb%FxUdoR}*P<{6op5J<18#s(c6%%e^d1-Xi<vZ$yCR-MN* z(AC!|EGcSiti{i~&r!_B#mDqs>AG^Ii;4=nssQtJ=jg2MtOJ4rs%mR4UX*aKvBJhY z8y7d1p{Ws^vgZ1)a)2=jMdVy+%u0yBoSw3d`Lg!$wA=!AcGm5!4P3`iR8mq#Nm<j_ zOh^KEuF}o<yVC?`nM5pov(KOBSCp5*l?Kt&R0GF_RfSrc>VbK850@JgBT*4yxMEm9 znv;_qbOv<(^{bbM8-NSt;^IutNWD2bb>!rqqnIbv(bXy~F9E9nVh&hxa&fS+fuyiv z9$DF0d3m|b%uT@CBqzo}zrgCpfO+^Jcv0{$Seb_i1@<{RFMDG01}^4xwbTiS$<GOD z6O&V?CSqwIBX#i{{W)6~XIeU17e_lB%rm-pdQ{Zc!|#Ie9T(Ety4q4w7x6K#r(IB7 z*wAq42%Yyw($e8!ULZa(cJ#*3%Gxpwjuaw9fAkFW-oD<&WySMrE1;Qn)|Oaxo{yi; z#=(w+l%&1oTVGvW*G<f(LUzH%A*R0M@?25tl#;UQ`syN9=Ba3C6x7rWEUm3PkP@=8 zc$okG4B;6DVN>sPZXRAW6-8Xk8yM)qPr&MJU`T=;c5t-q>+g;T3&F)aAkEFqWo=`D zhj{=jIW=i&dhD3Y<MB4yI+~#2^UHIM&GlYBUPdN{I(pima-XZ7w!WdBql;r@O=U__ zJR<`=)?EVTVd!yiuwli;Jah_9j@0y&k&&yo&c2S88X*Y<ouCdODOE;F92TTSguv4} zdANfxVm-nj7r?x;hkHd`EiliHi+OU8ab1Xsa4`=@3=TqmQ9(l^iuvW=&pbacKRGq_ z@bM!&%rh|32ZaW<v^76{{aQg@_B;=_mzyhC1%#6WR5uq#A1`-0IvOxzAoZ}F7tgU= z3Y3*oKT9J-EM#gCR#nh8Thcah`@s&jph=CIj#gb)&&n0y92h34pn#isC$RHp7=(;G zQn`3|)p0Nn-Uh~Z1{}<{qM5g{wrKBc3l9s%!#ocUx1GH;KIS<&5|b0gCr6K*@OA|A z<YYQJn&p+HvkTKby`4!Z3BLY5-bfEGO!e`EN2IS;Tzo9_(~Ptv1{};wUKHmB<{fQW za5K-r$)1oDfAwk~*jGHv)9|UEC8o$sjmH|I0`o3jp17Guc^hZY`MNq3^SHbXIhme; zo(%f(I<5v7oo{SBO6UE&k7ORCX?J%Ab@dtBRS699!C}Geo$b5Np9cl{!O3N0Vu1G4 z)KmZo7`d|q1mAuC9hgj@zUy)e))Or*DoD@BM8~grhFe?SJ~6pwtn~8D`K`x?m{+Bt zqt!GpuyOZ5ghWUwD&oEl>vWoc1aw~CErp94C-dO7j%41_%B-cWDJ&!qAM?E2pqUU4 z;$a^685<uyCi8gwJTPBTRet}`gU5S2vx_%}N3ISH4Gdi$7#iwFJzgKUK0F9t^k{cC zFFOO`SzJ1gVIG&xLjw*Db_nSD2YP7nFi%QB$*p{bh&(eb0W0$$;|QepITZ7@xEg$B zUI(|gK{GEaD}A)LIR^940Kd1Hni=2O*};{eMKjNUVgAm8d-r!9*EThH!hmaPte_~* z%Em&9Ug=|RW7*r){t@@RvQeoIRwh?(&x^`7giP|BYTgM|BRLHdS0*+NngP&x8ag^n z14A2kPef>>jFJ*3I~%TRQfCNBPtyzOxhCOZ9;NffM=@_{*4)+<`bRK7J~8~y>O6+| z%ChdR%bk~7+glnT=s?#xs4eKnu8ua4DQI6@kPXbg!sBi5>O4CKTXbyHmHux0%#(8} zoF*j0%{-Hv5As;dgP&(*`5iisPVA5!pZO#G{N(f)Fpn#W`I`CrcOTyG>g)Ck@C62l zNr>Tig2{QZ_v>Z%$B!N!z9zUI%OH@Rml`h_S@{#ND)Q?1hZpu|)Q-1ZTYmTBhXc&h zY8e>Xx_i2zm{-QnJa`)n^LYHc;ZZtoae#TOxk?Q4_72v}c)d+RQo`|>Z>qz0O{%QC z1o{`m*D#n;QBfYOl#~=CB*d6i;TY!e>pTlyoj;0sbzq*9UG6jycIL&7VqWZP=JELX zBfQNQop1bum<Q3o^{c;P{^9+X@7`x+rol01Wo0fc{Pu3}{k>i6?5vHoRe1V)Nmb>* zP!B6RFA=>IjjS^={c>9Ectz*T)0c0*cpEw`Lql5+FZZx06!W;|+qgQOB_cb^D5~e0 zjE8yh<1xSQZSXJ;I)9{}ADb9Cn)ziE^H`?@e96zF@-{$wfUo!7?#{~c!hYS{Seu%- z0Vh2_C*w%wxsGD~I66;6N<qRVeH8OP=WzIW@nbSifo5Lzk752uZ-Zuj7q6fH%>0Ah z$J@8IL26W$u>jWA+yD~v!>J#zPCDA}@5PlBP3>*x&vRMW_z^M5QAyc*q_(Hj+^Fih ziG_LPBbgV`N8r_Yvg7J}SV+)6uk*)Y9y8wtH}j}LyrircS5^Ub0632f4}SXMImXYw z`X`u=iHip2Y4Ca*U>>iZXLA3n^LUv5oVQ{7{W0tp=8wo#3LUBQ35ju2Xm5kp&;J_3 z=4PkEHy)m({owvRc%6)#9EM}8tN8cxw8fQ`jqR-v?LK~ZCoZ|1m_>n9+|nkp2C(m* zyrrU~NJT@fWoTq~l+JtXGw+s+KX3DedE4XYJf6G_9_Gb<i_UMLGjFw3W%!scEyBe- z<~ppEm0AB_Z$>K4yv-k?^SWqnb0qU_zQ}X*=Z>TEzdMGd_(Q$TF_=f`d|OA`^S5uE z9qr&)9d2f6W<o?vBrYKi2tmMwYY_a*d|NAeofK-$QgKBa6}QIO^SXM-{Pemh&yZ9B zAz@W*ExY6Dyr_|98lL?8fn25SQ96H|81@hFHZNX0r>3D!#x{TKz;#%h^KI_j+Jd1L z+Emt5qOM8(Ugm{`_|Ko`K{MY4I*)aKFw7JG0Xl!wH7WArdz)Ik^C|`L&#OGhJg(WK z2fR%in0atDW=4isewvsF^mI9SSyCL#i|^}v)jso|*9LCic_1WdPQan2?VXoeHx*ml zYi8$c<>G>$`OiAfBx>fJaRl@D@-}s!nQv_j55?(i#E#0_pmm=8n9RR_{~o?5G{_%Y zc)LW3nQw#Ed8})f=I3T0FxS)5E~_j{Oo%<w&*REf0wMhT=c6&+2JbZ~Vpb_M^YQ-_ z^WbfC@OYbJ#jyFs1sI+GCz$7tPdF~~Z5^$^ypp24ot4GmW<1=StZb|rS{e|7wPKqY z{;Qv_s%&U&;o<&jJ?zfjQ)wmpvs{|;4oUG9qw%GK-VsTzL7_*+umsFvR(@Gr+&pT( zJ6B0UeC)i<5i#sB{X7)l&>Y)@^T&@KLQJHtu9{bn3s3T}tt)A1s3j&QHZe7<s46ea z&!(cL+{CuB7?Zbgbh2Y%W_t1bIo4AKgv7?j3=a0-^EPPaamBD-yv?y<Sly%j{886o ziwlmNd*J7fgrBp#cti~Qc=rfz6BPPc=am)ZZ7j_XH-mVbn}^%P+!S`c1wZqZ6&+W4 z&-3!k&rTm~aPPrm4iN)FHf33dq?oen2^AxLiMdj^ufw94Cnh6gk+2WW=NAx?k-G3Z zn9t5gAtoWVv9oS!Jvu*+X8sSjCiM$?ewU22<l)yp{`3><gMy-5W_AXs$KhEVP*M_O zfq5HStIC>+hPrAHz>%RrtkY?Dc(`C}0z2{Jdn}7tn3)-+q$L4>YuEa4>HM*oM;<e8 zgNJ#2BLg`(8QjTCGToz?7vMiHA_6h27x;M?G4U`zF$FsR7|(qV7@0ulL)yFAF*<L9 zP3Hv#&a-o{+c`PFu#0u=z<vx{R8ihHd|g0*Z*uJ0WyLq=R#;JgvROyYAt|c(T3q>c zIaPI57DhZePsDn`C8GF(lq?^%neD5~i$D%YA~5e{XLGoL9}=~0(b3tV`N&))Du(@I z{QNJddwuAF{9=9Uf`k|xLl_6?>FHj-#PVHw+M3|!UEN)(FIC-~ouZ(mNK3}zI+WyP z>FLku=xD;G4!=7&J_?;hOG_guIbnEsfEMrE15$Qb^fjsY-@!a)zKsiBZv)31@RO65 z6A<9%=HfWqU}ky()hP^g^NR}V>ub<Uys<DZEXdEr#R;eT933rIJ9l8d&5cKo9^%SX z8tUm%QBxs(y&Ibwp1yvgs32!!X?D1ojPylXI$A?x1Bhe?uXN+lc@*>4hXnYsFb~Dc z!{Zbg%UNz6QM1U%lItE31<J~*c&<qiu}XNxl<65+(9zKxUI_(O0r-)Xm8PVk^guWt z-Utj^5WXQi5UuUaM>2o(d>hdD<1t@eQb<fpbfu@`@LS7^^U{(Rz<6A$EIs`0o7b;_ zWNvOQFCWi_#=2K;-$F|{87wOrit@7HoI>w#vd7Zdtu4$bDJiXNtkTj`U~IzUZ3sxn z$vG8Jem)gH^XKr)w{gKc-)6rfQ&3P~Jxv3B-N3xDsZm~0ep_1;{I2$9EF++on=5RA z8oCd*B_0^&(d(pQ;3vQP@jVkGgCEl4aI>0<Vt|>Ekpb9<i;1!@H^sUHFFz*(bHz|d zfbTGS`v(WZ{J_XC=seaDxxKjt6oZ^e^emV51@oxTf~$$;SGVr(VViH`c$$!ugk91% zp*%D$8;1LaOIU7Lt){9B*fTQHA6{4i`w0v~9OsGj=<03{4-LleZS0QJ`C~Kx?!yQ0 zY{2#}zp=o<CnO})(o**i@agJm|M=<WsEAMk0)m0QuEUKVJ-olNym0t*4D@z^Gzkd_ zIyyV%7v|2)PT}!3XNk$FcvT5VD6r3~M9Db57*^~Uxypt4*@)1fr%#_?T~dKc?&W90 zaAsv|l~<TQHaP~T$;Z<TYs>Ae4Xkt259&NHe`EamPe1?6&B<YjV;S#@=g)3!Z!yrH zv$3`e4-4kQu|5t&b!}xC*Db}w59s{$sR>b0;q3HetSz5E-MPKJ$}6Nu$gC)A8tIqW z6<%;<V(HFd=6^U#M8PE;oKjKRKFz|)E-s2?DJ7h7*sHzW9c)9G4k;Nb0pjE5eQl^8 z;5pLIA9YPCetdG|(=nL;`R8BCOA7%!XM5Y@1MlKsclPXAad9ydGn1&8h|$q&A3lD3 z^86{psu22Nm72rl!TozM^n)b^d+hGzR$E`QzOhD&<Gu$d=84E?__c`1Y0{Hp@#Ssk z@#y>!-ezfGmW+%H&z;6G7l|BQoeGKzSGLx{{^7aRXHRFlp}r2*H3b-*kByHWy)gu* zz}(arYxuBVpohXfXJ(~?L!Q9480{DI-4x*<WTqzJ@^#Lh?u8ZQqc>-ajSWSF1n_(v zHy1k@10Sfput~UoW>-*l_cz$1=Fgudq&Y7aky%~RHWd&SPfAL1{99G~czc{aeOgpZ z#N5&>GAev_VJ0>z4A(lT<1>HE7#7;xfBewg)aZv(KPbw}K70D)$d8yB8=g9S3PLbY zO9;C23UXK1mr-W;-MhxdS_%qsE>8Aw9OvZ#U32_=JYZH;Rh3N4j3Xk$2Cnu#eDnY> z^Q5$bI+V1GSl5h*i-~-mdq95<>q<0qgD<&C95E~?Gjx9v65^6Wvsq9UB0HUsh)`1U zqKlhrS!LPk=2}%{8Jz1ZY?`^Xxen)ufZ*)CySER&i*Ry)kO9Ly*uv%2Wz@=I6{Yv@ zvEQ%)dW5vJWM*CtBNHP4jCEy%j+Vy%_h0|z?&5^&w_{@1p^34A;vzyq0wo1Gto;!R z7bhDlCnpt;(itAzi#G8-=^Y`ty-VA_E*tmuaQUB8XPJdnqH^l;o5nf@Crpj?Pn|k- z9OVq~^#<SpV>k=;j&`~EIrkquh)1o5rNMenC-}Xe=N>RKzIycvtD1dgeq!{PH;jGy z{Aur%E>klj7;C^71o|QSd%M2*V>+*1y}W+4KRCb_4nCZB=%Jcg>c%F9fx!V_#`e&S zLF!jGR->ZBLApsuNwhT70Fv&Gw#E6`t@YLQwG{~D`?@<aQj=t5P`C5};FVPr;fFgT zoQg|}*EUw4J%2_?Ng;Jn0=%?~qrIDpBUDd>b4Y*>KOZ+C2|10R4)r-EC3(pJA5Skg zXK!~GPXttFKcpv^LnaoK&VvC4TZ{BSpq5lPJ7U&WAU)lg80nc<m|c#_+rSP$%!OL> zsH)u4*|xQ@zOlXv2gKaO2m&8w7A7riO|YO1%?)edUhdsBfyj}F(9YTt+Q7%5J#^<l zUnCf80zyJi04IA}gfol`4*L~$2DQ*qNFXLIdT8V-=I@WhL`Q)2g)XlkCu3=50zVwn z7<S&v-IbDpTv$XXIzBcyGzfj;0y_A~qr=yrXTk0@*Vjm15C^My=FAxhaS<Occg(NG z908=K8~V4SVwu8<vcb_|2xI^M_TB<4uB=-Ve(uaabMNi%>y9D`5Qsq%B1F*O?i5Ag z6i`3`g%(u>g%s{?!QI_GxVr@jnuNGpM>^?r&#Y62By@jiyWQXXbLX;IPd(=x_SwhY z_3n4=I<=Q2(E^?kw4<V=V2(EiPhs9|2dc9J><*{Fcas0Ch#1=;p*@0zGFIV~nEK!> zP#}R$cGlh=uCmhNpl!~{;=<C(Gpa@gC+7S6J24oIef#&F6*xn4aVjm$9UmQnyMf-5 znW>4YvLZMH@FB1Y=sK(p2Ero00N>`8`gcG45EUK_`~bHR`0C@0LKfgTg02(1aNgd* z2KqQ_OTyOwGAp=11%b1FVB7{Bmb|*WP*_q3(Wtn@MY!{D<`578D$bof0|FA{3iu$f zAN2dD&zw?IQ$YZ4W&%pAsHAXhedYU)V&?JFCmmgFpmwyhG>#lS0vrg2<mf@Cw1p+u z!pQ+t3<uJ}XaM$XNY)97v4g{X-@bbF{{1@;4<L|^A3t{F$RW-H2RJwn>|tf!v3oBI z+kOrnzC$8d-ZK{=dV(9iV>io=UAy<}-NVAl%ErzP4l5`m1dmloS@HDgQ%9JyOaMGS z8LWzkir7=gzy7`rpj1raqS!?-s1KnTIukG45Kb;GLBR`Z>Z-<gqmb~Bj_wZVA8*~e z+uGi0V5ASX3#r4rQuG=jx|fre<>TXpNS0|ln+Cz=K^`Q$RZ$8K3+@~2efAQ;eD6TF z2g5^JMv4!KSfK{bR)ZK8t}8GkD8Ho8Kgb^<Ah=#QB6?%N8R2Zj#4nyYeG)1kK>YSg z*X}|_Fh4Q~uusd)Avrq=iwYk)av1*0b>IM8AUrK-6K>|d{riufI(=41=rHfG{U>Di zp4XGK40F5GW@O{RzMpG13p*=22MfnRaTTMm^s4l#;nf>=Z`@b~2mw`{JtqK1+|RrT z=p@FQ_k?3VH#aw&p|Y|f;K#_sz|EajTw1iUhRg)j)KCLFaOTWuKE9(6(|t0yQOLC` z$$NOXiiwJxK7A5y=|5JGmzRx<4*&g_M`--jw-ZyNc?CIM-kxUWrfO=+z(QvPPQw`< zKXDu&Bq(%2R!&A!OC8P@00S)H>*rluS~xp5{boyl1V_AnW4*ejA}AyfWRI?%j*6-> zJV!|>3Do2j<bd0u5u7g8&{!WBJ~l48v+MGsM-S2N`uVpnYa424Zd4Pzp`@JBSy9=2 zhtGV&evpOx6#G%3!@?Lo8OvP<g?4cA!^RzKT%0^Shj@8UoIZV4P*6@u$&Tz4o17F9 z9c4l=Q&3ivk(ZNI*fco>c};DMy^C{E)vvvK0GJn-l+e-D!RhHLC@P3v6crH@k(81I zhyvh&<Xvbkc|`@&^Rviw+)tlQ%uFPuCX*cO)YVlbBqhL?B_$=))KxIrnpiBxz))Y` zP)`T?JZ-J5rj6Ayw=f6HGkN~StGC}RE-&Qd=eT>hgT&PZC5OdmVKg<gG!WBN*U(bO zo8d#lLuwoAbBhW*d>Dp!V`*7wK(eriums##MFj<AB~=YIbxjQ=6=k^CPZutW+=R_* z&n_zL9T^&#o~)>^3yY2>I@oFJ;*`}?rR8Kr#4nx~7CL?I9NbJketv#|v%=D{BGNL4 z&xo?~E3*pfE7`?*rgh_pejI$lYzNQo;XbLXV-b;2l~q0b{N<}RZ(oD70HF)_2gE8| zKDfM;v?TJ}B_#j@a06i@Ja<5ag%!cWi=JPYH#<M`>h&u$u3Wpmns_PR+SVE>z#W5{ zpA7I85gA@pQwd}aB)PQ$x?pKVh>D4rm>B-ykIW1Y&|SN}*4R`R8yoG(aCdOBC)!xS zb1^f=1Eqt(f`z3y2rw^ikF<;w;PaceHvVzuC{Q@J@80U_Zcj-~@(b{x&|K^sNYE6V zHq!`})|MnY<RvEP4$3M@CMHKgTzwL#Xiov>@9F7Eu(nmzF*`1zyqiaWgI|j0g4%Hz zv(swiy}S}TdCu-UaB3&liKG0dPMyCXA|WZOtfFgd?C$5Ak((2rk__>hsU-nVFgGEX z<3AeFj^rC0Skv6B|Eu#JC@3kAoE+Rd=+0CZf|Vtp%fQ&s$ixWF+{4Qww=i#Xa_r{) z`)H~Kp!)9KuH>|2Pj63>1BqZsFe8|PS(B`(ZWJFsZ-$Q-EI5)KQFHpZ0Q8cXlQ}sv z@%-Cw-+cG}$@6EEvr|>o6;Ux!&=i?Qc7UGS(H^|X)`4W}VCUxHmY$tCFf!CXGE`7n z44hALax^8F>lqs0^mX-&49u+vcFv$t$d)7<Qwze@3&#`8&}%QLuIe8f8M`tI$ggg! zk4sLX`!H;s9E{9NHLzGkb#*BPMNui~3t|@q#U;fRm4#*GPl&5&8d&oRVUe$^9OCFn zZRT!CM+DV5PfJ@k`^DzB<kt?Pc{5+WdOg2*rL?>_GAi7|%N?%NhGb)9Z3zz@Kn?~p z=;&+@_(@Gm21&ereeKgM?!cBH7(h1o2lxUuC^Rx+t}gINrMdX|`{Wnqws*AT732m4 z`hBWEMXS-sAo&W53zk<F|7g+^IPRk-4*`UY&Gk8X*$Ihp;Sr$_UBXd<g8X4QF)<GM zg4VXC#ih9yFJJsp^Kjf3-@b%8UEP<fs>^e7vjG2>lH#F3aAa79Mr!M-28Md?+`IF$ zx$9vSZbf%*XG?2+R$hU<XRHdIzF!c_Cu4C&oqQJKE^HXcC5YQ|Tz2;<rQIhL&PpiA zscB%1@dSGZ3d1`gJ)^dzsl2W>B`Y&JAt5RuJ~AG$=!E#F_;~Pz+>+v+(Gf#KeNgPW zSPV`Z100MjU@`hQEQEEeY^*@7K7sz=J2?gUG4XLhVZi~xfuRv$mr|3eYOBYmCpYfi z1(M&)3Haj0&AYcpCPr%NYtpkaV&h|?q9fwsV_~_nx}x=ROG{f*b!}Bid1+}yDSUo1 z=+1!TZ`|H^@p?1!$fIY^)^4m1jSMukHkDUb7M2zv$)yVO!OL<A@`@`;K~OEMt}NfU zF+MZZ(Au1llNAvY<s0bF@b?V}4US2SPtVQCEXYqxOO3gdfLwk&a@BC{VCc2CcC`;r zPoi^@4~&mBcea;QRi<QT1w=-=crk2TD5kb{dS(PoeM1$jt}+%UrmQKV?Hm}J5fYQe zb6#hUC>}#ereA6`BqvDY$ic~Fsg;BEozq(x@7A4LpfOrn8$h>YWThn~CnjEsgGYZU zIWZ$E9X4LR+<bLu;pwv{Khq)4UOb<forM3^)mPUtt&VB1(AL&8IX$+1Z57nW<@T1k zkN^5)ZI_#-X2u`?juDtIkJ!Ka?$MJ+;B-?n6Cks@dpiKS@Hsft3(dlDq2>3#{L{;= zm;d6$i@W#kKvUrUpn_&*CuXip&R?AaRNsI2;MMEbU-TL{0l569&mON|zY(6*LSSU_ zC_3<K(8Wx`MNA?t65@|3I_?$F+AU(ZTgdQ&s<95<)WVrU^A8HUlw43%(>*i@B-Yl` z)zoqMXV%==(b3;GHa8m+7b7h#b?Wq~!$*%G3Y#el(NEBFdIoyI5#e<$O@rej9lc#G z?X8V%Ep45bfnevB76I^RW=u4(4hs9b4<7(rfvAT@2l|KlhDQfy=4Mu}uiaw)pzr$4 z8*4YNF>U?h=Q?b>^WZ)Nuv-)HfVRK?=+TYaH<#8|7gny$EiKF}&d)4dnVFxxa&=*8 zeGSd>vv%|5-0~8fUR!5-O;ba4LtR~SBjCAfpdT*(GIH(h$R)S8eZ2N|=C$_@PEKr1 zJ2Emo1;}sg=*TH6k4j1Njf|oC1=_lLm^nD>TUu)wn}}){Y1l>AwvUa>tx8}i?7Z@v za<;nkOsA+?N@Qhz!+3Vx#K7dr&lvB`cW(hCAOnDS=jP$LPlADrSzfuiaqH&OXHUQX z(rlR^OJ2Qs4ZiT;(E~7$yQn>Q{P6M9#}MuTFyIqfW>!G|lzjtuWYLyx#p&2~I<~D& z(y?v3V|8qItd4El?%1|%Td(@wnL9JzyYqeTRn?b+ed_GBcIB+Km;U{_HN6XkNFd-J zc~vElvVRyt=nX#QzXj3)VMz#=BsBa6l^RsY@A(9%=m@~NfA=uoetZ;wkcppL3f^7G zY){QZQI*I&xW@eQ<<R+EW8(Azt^X2@iKo)0N~)S@W^G-VG;bD&x^e3MT?}MCuZ|_J zZgwdCeGnqMdHDM6hFOb34E|SAdz`;N(Dxcga%3P{_!UwCjQ|w_NmE@*r`0<nMGI#5 z_m`hw61P<E*QOV{a>t;0?VfLM&jA$Tg&s=?WemDq9}+etO}0K{DJw~=sp1DcVq;D< zW9@-sw8PPa>+ZgLP-03gJt^hutq?`PVm0OlP5~}~$6@x{!Jb6#S{jxytE^`S{+{LI z#*dGk*5Lg1-l=@$=qYhvN;VC3oC3blFU$e;WynMw4WB8#nt>`IKB90lu7aM5z5L5e z3n2JK?55fpcO4MdhQaF|Y%=wEZL`NQBuL=y=yNn_;##F@Z}boz^rx~mgQhMDQ#2}5 z1UggX27_U{*pz;9HB%b3%4ndzP@y8&w{HvuK<JYCyE~GW+utcpjpWMTSPFBnIbH?_ zVMa!@J9x*#k_Jo*2M*F8W2;gUf$>5*F47LiY}f0RmzBZ#YFq)Cw|CJcQj0n7;M**6 zC6D{y6yfvnRR&N37cq~>EMp1pa6IVoXgCUAP=cPf@iZD-+)yc9EvP*`4UJ4)Z4qWt zmRS=UQ{K<!xBdK@u}=hPSym^fJ+;NJdwUT56w?8H=X_qYL#$vV5;~jW>WgHwMW+k# zwslsJEqmfO3sgmD_)D^sx5Wy-IvlKpsb{D|HafkU;ODxk%QxK7Vy4Ngv4=~xK+Q4P zBQbi#dU_48QARaryFWG)j+xz6fAT;<1!vhh;l-$!IMUp0rA2#Oj4YHtX8ka?%&mEM zuyj#X_S9B)QdXB{Rp&Hhs)lF;#vy-6%P^vn=;^&%oOF)|4oNQwyjeVSM-OerK|SB6 zF%*uz{U&+1r*Jnbgx&nHH+_3Zxsvh~k-jnIJIvWi1`p+ynyokF+u6;`-3}<slb)pS zYh8JER-Cjm96~QXSY@c>XdtfmJz`^lM+#S*GhL{IIv6tQ=gz^f*k<Q_Sk)SZb;Wo3 z-l;N#u$0<NMS+r0#=EhC?<$*KK<8+&kAX(3cQl$}$$8vJ&XL0eqcSw1TMs9mwBTa& zc{)P($EVT*XO0Yebv>xtE(RJwY+R<6#`;1?=!4!!Tv(;Q9?|B+-DBgUbJOD^QI^+w z^$H*76bCyuI}eMOv%OymRC)@Ti<Rb{hcBNz4nUL@@rj|J-REW0ty^F_ohqFF5+V*# z6Rd4A-xd4{k<d4Jz1icku5`KgYK(VlEO6-dHrvvhW%^m}5-fg?hersB{17wfD|tY* zz44Ql!rCuue5WhZLssb2iIM;{n>DWo%`~(gD(e9IiaD1m3|8oaL`EaN3OCS0lcSVf z6b<Loo>}Yi50(8sL;)NHC1-PNDCJ^`YW{EJ$cK^NCb7Y@IAC}vMUD9iuB$BJTz-YH zXA0UCa=wCt{~<3#OA<^+yt$76{BCdMY^!DVImf}G&Hh3r00RJrtQ#FId%URHXmK?$ zF|jeh?s;pl@~vs`8G`h_1%kwfPPoBsiPV|dByxX-e)`mBDX*ek$zA#sx&nA}w~-|r zFFz<Gzz{+K-uNKAaUh-+7BJ%82Hs8=BbVLJa!GB$Av9W=jw3X?b&l^wsM=t{=e$@4 zvS?np6@@w13Np-Gx$XwL*LN%Lb(;04maOS&6}3j0zL`Gyrchbc9~-x=D5}Vi1YEjd z_m>-a2FH!MpRnOp>Ls*HsffG76+6`Cc-{*{;(`Lu{BlMeTm{yjGNYHaSbAh&j;L{b zVG}^$uki6Ps`8Rke8N{l|JUGy;QV}%Pk=!6UP69jWz_P0LrDQyJt%ELQnI;asTMLF z1tw-?d8NO@({IA(x}oJ#$KTbP%1_YRS$wb3uajMYpO_3yxn4>GjPLuX9eMEIwtP=g zD&V3eaX7ProCCLegp1@=9QT`)Hf-0SvxH}rWcc`yIttMfMG8$Bd-I0*{T5hV+dJav zdn9ECpMF=$IcRJS<RzO_>}oSFC>Bn0e$T+-|Gq%d?)KJdCc^Y8(co3fZYdkqdN-Co zv+?&|prFF`AU*R^BLJYvWhU6>>%C>M?yJNp!04^>=`#rV2Fosjbf~XyU~&8{?+Hqn zFhf^)Wp4eQMTMn?QGk<aV$#QOGLx?@B`63e<|33dHEUZc&}E2toShW}x5gGXJv)pB z5pcDvh~8*uqy&p7S6M_S1}2#UeCD`-62RN(bJ*x$YMlIV(+uYXbW*S5<Dw4{n4_@` z67xLh!}rF?->1wRed#5_BY7zRtI5H}8^4{xOrXG~1bcg=4$n!O3isk_nRc<gU8+j- zT>59tv)MxY?quI;`x{Gr&0uP3gn0^e?2cA&5<#cuDOOCI-iT&jfqI&5fAoC=Jhe5R z=4rdb=p6sA^FZn*ZkcteY2QqZMZOOB#jgDJ&#QoTw;_#y?saB_cJ)mbcMuAYb8dc$ z`i^HNT@Po_4^algu1>^(KVx$n!yB`MJS61&G)?6>B|e^>H1m1C#o}vftIr_DF4tO6 zleXidqnmPCkf)fLS-g&}hyibldOj7A{pA}TfM(G5>grY)abf6(m;0<xyuzHoXd=Q$ zYk{bf?8f1@$t;FSdNDE4P(8GUnnw5ccSQ4|>ad5&wT~;pW3Tsxipe-g*0&cNr2&R< zT5Iw)vB47RGMce58DOI2sz>XVp-<3x&dt@Gv7;-8<jwT4M(6Z3S{D(k`+QyPG&AeL zqLUqTMY?)to5SO>DBmQhq;a}gWT{e*2H$7k%yw{2L+wPr-!YpI6|acOpmRsV(D2Wy z^p<(8)zEh-7&^|+V)g);4Y#b+SE_UdfrZy*_TAq}6j_BlO^uIgPv#i6>TaY~Ud9Vf z9GZgS;)aqg8lpyKi$h~$MU{B{ZwecEzApOi!0H5N=lS>7N3e6SGf~kXKMIfsRvgi- zF)9g(>(&8o0PoV<%gauK715>8ye+RMc$U7eAYhNL`*u{#HIr9V0R|@Pi{>BS?yN4a z*PHiOiYi?`_v@YQx<fMv*~_(tTU(z`9#yT=LBdZ=0D;OGB5-!{u@1a!Q_%eC%Za#E zG{L&(6%yHVn_QMon<J)z#?+!O=2}Oa!z;Tu1NoNOc723Y1ZN9p*YY(y6qMNHRQE#r zVKt!4ak0{ReTj`#eSQW~lAoA@;Z6}oqzXaTI|)sVG*ow-*0^Rs%-M6M4APeGnZ3y+ zMmUwy0HtKG7?mw-!AZRMLaXvZhuSvF<MBgSSc-5Fdt4{?$?-b~&O~y0-w6M>cZ<%4 z^f5sSPrmG-Vy<W2R&UX{m-J|W+U#yI<a=l<(8#0CvIz8b%!H#-o*nx0XJUGz4_BEF z+$4zY{v7Nb`ruH0Y!d!gH1Y4vI*-LloredP=#9>%Itq~a%k_vUZ*z5MUo$(ya*ygR zTVpJ0FrP-}dbXy9dRJH4_4fIVS!`rvNKH^*sI4yVM_pA_?z8!|8StUa=4gb9HLz@c zx7&-6`7*BM6#E*53TwJ1g0)LR9A5(PV{Mo7xjFeZm)C>d5$37aI*v`}2Pfe=i<05f zrpq;-50w75vtCa*A6q$}&6NtR*$7y>gxcD;$dG7?$}v+@GjQ;yo}|JB^3XrA;G0UQ z1_n9ihy8thI;y%W3vO<9mMD?SMajvEit!y@SjX5Fc794yYETpuCp_FRmM4lZ5u}Q< zUU@r$s4LBGBwuYUB4gUQ75ox7Ywd0?ZaG%+q3JDfX*(q2HQnL*uB6)5INNns_yrgp z;Um=pdRdmA(bV(HMWh2%@K8`9S{spHlhM%Y1TowWtI)JLYdYI;Y{cf)5lUtdN)&&h zWyo%|t9a4CeY0dd_=?vv621eu?qsfL>29x~B6SLIEGL*}2N>y+Pl6p(Sci_fUNqJ> z+2e14mWI$37CzmFL<a@ZSJ%}6Q(I3?PyKlL9S_iOaZk5NLjwH4wo#K9TpKsNA99PE zuhy1XJRV4RE>;I^Ohg5@Vcy_iO-+Ax6Ex3MuD2oJbN|jv@<ftAXm({-+UPiYUCQx7 zMJ0`SbZ_N;eYgy;oVriv?$=Y+w!2(*H1FwKkBp3j|1hzY<93w=)F(wnxrmD+)erRc zf~s{(Up47%3fi83fI&cTVVL?oXlQ6Wuxv6xF8=QF?(p$cmX$SSxaHmc+}SbA=-}fs zGRl6h9ibKkhwP`w5l%>=uN?RaRh|+=G7+P)9!a3(xVt_2P_rKZ^We{^$9Kuim39~L zwB^4*)9_AXf)C4a;pN>TDzlmrsTZk~L2ZJaa~`)Q&`=w>B%p^<i{m1(>{TJSVS?nK zp6D(f)SD-Lpy0rty=5o9yv*N7)#*OK$E-AqRJ<`f5wMMqjhmbjucZLrL(6URg;}2l zIP91lHUKDKu?`9m)LmI=A?WWAl^AZKV>Y(=Dgk%*;1(Epfo?AV20-?`PuTgs;fPv^ z{9Gs_bY}-n9iq3XvC)5FDpJqq7G4CYpg<JhTpXH>g4AZlL5ZiMgRi6%kI%zl)%)E% zCM~2bujuspw23&Ury3bYz|I$zy89Um2m5zgiuZj}7jfcrEflf^mBp~4BFd)c?YOS# zt{u_c(eZIf_X)7lw)*nG?LufJHU`ZW7i*-dRAPbwMo#uN>{kI~V2nJOZ5ZWq{K<l+ zyO2o*s+?}u?Jp{$<Z5Nt5hx}Gb1ch?3_XEn&FO|E-uBjex&#{7GBI%bwjj7?r*^a- zMq}8^h?$*Bkmp&nRk~gL1>c_~SE5FFgF_@Pd10cU_RfK(!E1g+q&XF`32I=msWXd* z4k)uP*d#I*Emc)@eP@l;PpsmPy3Ef|x9pylduL>W9Gx*x9Pj-NFA%@kVpx5~R9`nI z^sT^exaKZSrY}39OlVglk}w+^y0qoA)jZ(6+1*l-;eemTq|oVHk<&9oni_&gI0P7= z%7|$>Y!*~u<zr5eeeoKocz*o@1Cq)jSUnFD887awlbc@9@bJMZ2lkKIq5Zoug7q|% zL$3kVU*P#^>>%yqb$GVxfgW@^x-Z>V)eu-4>j4CKDBz1yB+J={RKQenP3R8@^lqv* zVc}YuQbU$HFhkVip6w_&fg`-gA7_4h=yn5oG;KTUET&a_fk!Ifhu(?-2$H(}OwoF2 zvGUw=jLn)0Eq2bkhb;$u?WoIc=;|irLDxMu3)8yx^Q+nKxBWeQ7ERU5T(4-}UIMb% zX<!cdU=A(-4@Q0&4hwgXF2_uw9?XDJcR51&dm>d9sZpyHdPM0cIg%CAJIc%0$=_Y} zV2ljp<TuE-C|&+;OH#NI$Aq}YXQx6fm|tZ)Y$s<gJE7`@@sD6NSG5}IS@reQ`Ft*D z&0ozfJ3}}z^FIdOL*(J{X=nP6yZ4Wdcl5C>LESe!pNKg*k;pvdIbE|l1eqoij@pb( z@=tM3p$5KZtF=C#?Z4y=N;O?Hw@&}o?mFEYMg`8@57GB(YHNDAe=c<Y!765EL`Ejg zx7}IVoLNTj&Mz+y%}hhy_*#*Vw?|0IyVB1wwJ<(9HVl!#I{=-~4v#qiG;1EWN1MI` z;oD^;o-^NfoPxuD+ovTZa<4eiC_UA4yIr5u`{nttv&w=QsSaL`Xt-8D*SBw-o91EG zDD+A(npn0NCAm7pO^yqdM5y{roWQ_?>@a|up2!*D2<l=w`xAbbuF@>e$_7OF@(lL~ z6&*7zC0<QseQ7!9k9+*C);+Jjw~LpK_!Vfl8#Xr8)Ib&^;*pNe9k_;x<rk!fEuzt; z%Rdx<a=t;BqZ<E;h+tvZR8Te*5f#mC1EUMzY<b*S-N)rnwYAZ1^YZXE!RBMF|J85U z>Gdh3E{>!btm4duZWmcz{!31TuY(a$fv!|f5GQhMj8<FBcxh@6Jj@ujW2{FQDps$H zIbdOTFnYY4)~x%2kZE&zm9L>5{M*IQG^rgk9^UI0aL3w+y5=X*IT<JTJ0SEru~X2- z)I=Zd1^Ty}pDbMX@$1yAEYZJe3V76P`D0n{_zNXxZ%b92M+Bm-9inYTLMY7^E_|+E z7ZQHKEW}&VGP6@SVWR{xqCWoeh|aX$eUM8!n$rvkP3^5LOU(zyc%Xc_MQTIlbz+(D z;JIJ9);1hlX{nXbg;|WWcu2-Gb_n6tT@7Wug`DXq(|f*{!o5lWaWB~1oUi|c?NXvK zVbNtzRs8OHxEd}}AsPn8P(aSS-aX|ZA-66C13#m5#zIk6oQm3VyG@PEXy|BkyBvZ3 z4(_Fqfq}Cb5_fa7TY!T+e_IM&-^nh=rK4kAc9Aa(V0ybG%#QpZN5)P<|A(DAXMJ!m zjzfC}Pc{z6Bgh{yq_JC&0~P_p%tInMGuy|r5lLYsFh*XH<*Z56FGw*_-=&~%;}X6j z!Z=>tG~~8!pQqb_=eA~oB$v**<!zlzqyUZo4&}8Ly&1`6e%!!ui}EZ`rrO+7OXo`z z^EB5>AA5RCYd-2z>@jbZ5eq&-raGj>p<3&ykKr4jAu`Ms{}7g(xaNRZ(n6q`2eRVE zw*Fkt`&6?5_2&z%8Dkqnq--gQ(7dp`>EVPOVwkTiZ$Wi)>Ai!8-lpnU)mn#|idH1T zG^tj3#R38E+plN3705*H2o78CUS0sOY6xFb`21*^R4j@hDirRJCm0$_&`8zg;N*N2 z)~aks>Is*F{IJq<pq>5T+7<d>I?3R^r|h!Uk>7~G6LpUx+>ME8y2Wa-Few=r1klhx zRt8%!GR#w&jmzP$_r381=J>5*WQNee(?#N?<JnnEkuu65*7S|(d7h?Wd5HDLdx6DX zO8}P-A(2=GXYj9i${KN!RA*v*yN6;(7zcZvUl*LGTr$N|cFdz-WzYTE^|7T!ATQwt zOVf2iU_la%p|iv(ABVKDBG4AODxjfgIT*F_^^$_z@dV<=VhZGZkWrVS9-G46+q0B4 z$&tG)F!CWhh5BR%KC}_stTZpV3W!u?Z28^B#M#XVl^2+S+}ODAxY^FZI2W?tF|_xE z7CYs*dk+AFo8aw&xaC1!ff}1Eulz$XK)U<v;Sx}4du|?(X=v;Y<27pK;`Tfz@u4Gr z+eS-A2Sl#w_<6npbZsB1scm%)Mp>V@(W<`gzaj!BE1~tv4N|JAFFmK1C=6uTdZ>cG z;g}))<<V|)b9=YUk!*P4Gvw*O4f@v?H%$&n9Ot@|WUS*;8jq2A*VY5T>dMN5mEyNd z<U#%@0xcF5D=kxr<$^VTXybBnNOwUZa9Cz^<;bX(pxVl51e4NYd;Qz3od8VRI7x0H z?o#f9<Dbfe^H<8Xk#y!M5OGi*&j)RKZrq#Rui>A$(;JT5uPWM6MtmR-7`iDy#S(Ag zem=1oN!q38v|J*x-_Izk-yx^(C42Ie+54O^4ZGKF-YB*7b5K-@Ujx+|Rf!arha~Z0 z2XhY>Kl!&-jEbgrV3?aAs<?J=6DAK2E`Y!e`UEE%LVh{d+Pn9b@&II89B$MLU|~FI z6GN7r4&vY(=?rwsPucsv`7ZeCaB*>g1%R%!@c^SrRpqpL9X0j`cT~pO8C;nx+$`1x z1_<N+OIzF9*qQ0o^<T4NAX^Xy;i0cyu@CSw&Ze8eJohF>Ie<V4cOAstyy?OQ62w<r zc$gxX85b)Y+`K;TIA;+A--MMqvJ2_+>PaZsGY@vffKL(DW;kf+fWo3bbj~Rd)gl6h zkEn$J%ll)1)WV2R{sxl&9>vtYT{F}B_<YDtX0Uf?o~P^iAU`aK<CUqPgP9$QReSR2 zc}Oc*ukBtRx5=m0ye+>von?*AxK?iD0Bz#2zEWTbyTjKpnAmfK-eYoCy&&{#rtwf& z+U@{c$%8_RgF#cDk~012O#5gp_L@7X<Mp8?ug@2t&~4`}L}0Ejc&R#OJ5a5~DBuDy zJ@4Saym|Hn8T78OK}sQ)DcEJ?kJOys4zBsV%LvpKlo|sW8MK>Wy%mi3Yp5m0!4>Hf z&Uhls3NC)2fae|(1qD=Hz|!&VX~)M^te)>B##ogfi@)8<_^+rO8Kh^T;VB|aMuC>< zDGqM`19{=zQ$7#bjGd5CkS;+mJ5Fq954o!>-wxSKZV7m}2A6xld68T_gsJ=#)64g; z+;b!z&Zg_5dDqMubi5h%QUtD*yV5?(3w29PiPtx=xX<$}k$tqeI((%SD#Kd2L<f-w z`s_>GRLw5<6#<Qbs&WS@jZd9k(~v;;kaDnWe6KbvMxPDJ1gZ3Dtv`WU!(m#}65@TJ zW>L@{aXfvbwA7UJu^}P2UJXAmZ~Ebx3yG<yfJNq^ol($P0R8UUzaEdmO856`CFA?F zaTR&~qsJC6Qej|R`}N^G;JXVW>|AVYY-*PaJTD2+6k>6j#4u0S$AJ|RmhUi)l@+!I z_Le<QiaX><NFb!1<Tr<0;>hmJi3~hEyq6buB{fW;Zm^qaQwWGj;WQwYuaa67H5FC1 z7s!v0D-c-%jF_I`Yo;S;A_5S-;LpD2K9h&&XawoWEt%_LvvUy`JQJu;LXvrSWkQnO z>w}Tj;8&IOXD3gZ74k~~pA01)Cy{03=bc0a513P$A6PN++}Z-wnkzM&^>(k1ycs}v zl=MVq6TXV!(yi`-?|hZ#RA$I7=MU`jpUZ|9SoaOh_aJO91lS%nWZ~KNPi}eDumjuQ z3a8EP-!vfLIrq<G&0Fy2?NIxX?rF=36oKk2;O9c9eq}|Ok^T>G{i`ddD(m8uE_)+O zx7&9&07c*GY;PQ31PYwJ8E`~sJP)b?@rCK(8h!j37uRIJ09Zw&Dk23PLR?*8TS}NF zym1T>+UV>oZ-MrdoR}zTXE#N8yYE7@k@Ub0CB_4E&4rTVq~d1pnqMA+=Ga)62$3wS zE~?ULcQLWMnmE|!)E1&VK)j=3;Q{<%_>T5=NYd+qAbO^!JL|d@CpV$61HbaX%l=}4 za#@&RnE^Z##rtlrLO}s%xoY*gQ4&&%Y^+mL3#!00oIyjGa%*VgQXoiWV@gnJW)HFR zp^s8_%l8DpR*~vu_^}7RqEqmc$^`d)HupMNYo&pCMd_=h_2*65rT68ik*SD)3x5!w zqADIW8iDhok$TnBTGC&AUF-e+z@!3`jPw4R-6*R)-d8spiC=N(t0FWZnI(ab^0Y-2 zWu{t&i*B?%&M9#KDUP~EfTy$Z6BCJ19&_!i6pdt>)@q1Yu$kH5f%i=#-4-4vDpBNH zjIoQUi?y}=?!e9f<ex-fTujUA8$|IJO3|*#^_Lg=Y&45N3oSi!v8KGX?0e)#AuIn= zrW(GN<%+Fl4pTL?#!fIesF&tf(0VuuNG<+%eeinh7Z|V2J!NMtEg@$ecLj@YvbyS$ z!E)06T6TeXazwEQHs2iPn40ET8F`CLZW(S^*$mZpw{9wms?NY}I8A=c<2lycjr5&d zoJ56jMX~@5=z{ZGrLsrw&)F~uO5<InHM;Q4B5D5Yo!+v2o0HUb;Iph(8=?6~6(f3; zL*dlkf~F%Pou`1x5{=4^V1~-R6Vt#KavRqT1#|K`gd6kAT0(m16^N7+{FOW};GrRT zHMT|QV_R&e>uL75g{JE{piS@j{rKpz?70r6tBL%!A-AWbBq?(v{9xhjWTiegu6Znd z>W}ecHKlQO@bf+D#)QOTujXvGxc2H>7tNFiyeG4j*dYhO;6alk%~C{~2+j+8`Yim` zAan1{i{N~MGd+8BdB^7!)WJ`SALnMfYO8d{-K9s(ddn$tqwfj81!<Sw*v&sZnOuHx zMct|p^%KhH;P)~9p+gZJ-xV3#B@3nUl$`JYhes%B6;Fkv))bRWd$HOHBme3_L%qIC z0SNMP@K&%fVY@PTyt=CTjJt4c?OS4y<+R&BJRf`iZq>D8n5Sn6^RBSey(gljrzfi9 zsH-AlsUY#gR94(D+!dUexrIyK=rZ5T1kAn+ZedR^Q3ug#iu9ISta9T#tHWM4WTNOB zhzhB-TbOX{kKUoG6QP}jn~DC&5TCQ4nlhTjmI=$tmh8dYt?tSrC-IDdJjweAs+I3& zSqvOD>!>sLs<m<njo~W{mrk+*;%n5{F$@MN;92R0rnH#j;C7j%`IIt)oNg?HbnWQ3 zuNtlK9K8=Y5+z|R5M1agiQ{z1uP-U_D>~1hLq6QNGfS5rLH9>`W=3k>t77hhgQ=}; zG^tt+G^I)~;fBn$0jP1<$V_v-NFIo`fOaa-pIWpVa61cp)N?v8^mA9WnagP$gg#c| zLtAkTvxSMJ-23*FF=Wu&_U1<`os7Q%D}BzA*QIDUn!y=;pVx05%5M;eY@1amH3NOE z|9p@Yl-di9QFgC&-Q~JrRnNKw$Wxsv+m9Msq}4~5*1QTSN@|2-y___E#hIFIIP0w+ zJNxxRe5xPpQb-&Z%aw|qg{Pyryt=TmGY?EZfD$}V-)AH@>u~t_d5_qr3%4Mvew;8Z zc6ZUN)WG{e?w8)hWk_g4b0u+EIVc_%2^k*|JMvrV4kmcqKyQ$#;bCaIsOc}Y0c?jd z?h7S*h;CO|TiSJR$p}iW{Js2>m^=xp1J%N?-{7x>4U6#QfW9U<?bnY-lgQ_=v#}~8 zp2~LMbSt^NPuI;umaGU&a*qXvzDloS`TLok;x`m(#);uZgL%wfpj>F_{a?an7LFBx z_&;qwn06U4h=pajc(@{vPJKOdeUPK@icCz61nX;vl<?a<*FpQ<40lMCSmJJA{0Pc4 zwHO}ni30^5QPV$Xl(~FD3w)UHp7=lyr0%Cp+r+-(@U#g|`ud8IsvdQD=ItC2BO7M{ z;&{OD)_(5+a0tFeqD<&_;q23hs<|t>4bBaWtebgDI0Icg^f*DsZu11n_IzQt_F<J( zXPug}O>6ue*E{1rdm-NLKM`GYpLKQu*Bx9ujL$jI@imOaL(tB%esn2+VkTlTbj~~e zx^A5_L|1o#IRv{dPf!Ujw+^nCX>Av-1PmBOXMYf*V|r=EX-Sij)uPDB^&*|!(S>0c zr)Ka3oDjl=bKEIPesiIZF_!Q$5;mite9Jj5(G%KTB21AsJ)4YuW(ev7i-+zI<^{dW z)X`1RmIwF3(1zAVpkqY4ON&gS@RMdE=GuZ5p(8~VO($;SzV{hwIf~z9kkf_=jvlh% z%Rh4>7@fK=KTvTMD84Y=)>E8fhAky8vLT^tG~PYP5P0E?o1tEMR(XUmzl>>ICNpZ2 zO8P@?_UZa1qEGT+GP7$f_*75DJ=j#5ayXfK0FLh#!J_%LxOrsRWHgR>Upyr|>X{-z z=K&f!WAV1}W)yzH{94MQ_Ik8Olk*7GdMur?(TSnfmS_l|d?<JX6{xYiwz9S!DW0*3 zDmC#wSnxDy^>NZXRpiFmL&y^nj0242b{wkB13D-U_QVQIyUBo2J)v%4Ow;odg9{O+ zT8Ll@OKhA`qsWJnwvRGAjX|cyr<=F=RXG+b$mkL!S<6=37xqxBVEYnzXY=D@wq3x} zK&*4u63i6Et8B?=ajf>55sekkg}~c+N@d;cX$wR>l7Zv&8*Vo*>H|-H=%zNSK&koT z-S9(MzmG}SXnEb;NF)R9JV_Cre9o66NJ{Q~u$%pJhXlOa=%M903H8vlr`g<*_|ezA zo7QyUn+EAHUnmwa=I*}OZ8=1Q$7mTqh9gxI0J^KVkpRtMup7Ybb3dss@X!{ptcO>Y z-ge{R^2}OA%S{h%9>Nd(hFtKNenSndwp;7a8>c9g?{}z}BFrj6y}mZxKQw@VMPOu1 z62P`xaqv)8Ys`1}G<On~c^tX+vR%i0XJlFVvf<2}l>9KtL$yi~eDJXFP({>R=z4rS z$`MKD<zASX4yyTzFj`HRc-ZM1KU+<@1PLB!x`)Qjsz-pA@>@%*Yk8SZQ(;!<Q8q!b z0V3FpC#P2F&l8PASye%KCrG`NrWct3$Zt&Lnpb-d)t#=^r|+1nP)Q<~h$Wm~FXCM> zx}Rxy&?%P=DY^GXmJ1!VS<T|>6PV%Qq;09%c;j{1q`!>!q^_E$Arp1h<u|W*{8R!H z#zJQ5`7+FetwDtDFhT=ddC`)gDIoBOkdQDXWg~D?j56wD_G}i7EXy4m{G60@92oI+ zN|@siyz`sl2`r=R5c|^%8N~exzQhE<yRk9cof^niH^s`DFwOMpO19VmL;pe_c1_k7 zaNA_nW;SVfJUG85I3up{wRV^*d`I|=g}cl>skF5!$(g=<!|lbQ{NUjH)jm1g`I><D z&>8UdiXhoTvC1UcsC0q{`Wx)9=B$Wq`AKEz8Eq+wswRD5g)010a^}Vs1*6!D3}iHk z_Rub~Q(_hA-7KNI*}+o8MEbfKj>)~t|JGhcjx2;XhJ#u*$YG&`cc#&!f4pA?^i2tu zhhl-6=b*Pv^VhjH+pnt^5iKH5q;A0{8zF-Y=$rW5**ag*x|DRi<(Y-0`E@o{PC$JZ zSEHlJ9p>E7f%IY*SKKY^t9Kx`BmHv8@J5Y-9d{_RpgsD+LV%MT9j%`_`X9LFB3I*b zE&1jMJ*01!tgiknN7x>we=G|Nt$ymJ-EokuP|@~{0D)MXe*WBcaT(8U&Uw_pC*=E{ z5vuUS4`GHpE5Fzbc}K@g^bCC1IypJrUM~Uy6BKzIn(Ir?{cgE^?TCEXeP6zyFQAsv zQp`VSPrbh6K6erSu?fS`C(zcv<ie*3i9savUK%(QD2ef^J7Q|!FFA)k+s}xya4(68 z@V$za8+;33&R;hyJiNo&UbgNw?U?0oI30vGCPe2W#dnly?_i1f8a4sw1t)qI8TclS z6(5_Cp!Qoq5ptyEm&B{JmzQ7*(l;BEk4*y6PsN3RNj=T+@!SAWbJD)q_$eT;JDhKx zN>gKxJ#49!kbMCf#-Q1uKLA!j`XMsh5iv6V2`a!H%C<r9X#JVA1D9(17lLKXXBzK$ zePf81x~TSb_uWdsP<kV3sxm)Ys-Nq+n>L`WJszC6m#egL@5WPR)4^h}w>L7R%Z&A7 z<zA}<JMm}krW>~7MBPgFbM|%*>{3)&b*8czUq#Ro_LwfnSWN_Z2{!s_^;=Pr>*dC) zNOCZ6wU)S5v>&_836vqq+UWSV%IUBw!~LbQ+2{B!UvRVf-7wf9*S#LxP2Yu1hf>W% zf?IP^lESd{A)z<JPasMB&QOmCP(Y?YeL+gdjSqza)07lcl(p10=CmDKI!cO)jO?mi zf50vM{Av0^D(W+G>Fb^G!nNL4G>4d{)!8H}91xq5(o;g!%{An#e_wcE6H2~CDR^cl za@A~HXcH|i0UIvouk<b7LlF8-5-A{nF~D&n-?hD^wX%~ksaZI&Q`nEh2*Y?=EkQ|u zzr!VqK*?Tx__~}O>=2k?U{F2GBcUa#V1ygKS_#+^nvtf>W0-#fnSTe7sCTbu@M0{= z-eBNdr(lGr@ih+CQ#AgeYc2fPC$>84^qED;VJkr7o?oOE?NOu+x#URQa#h@18XGY` zBzjdMOSP?*bB4l>%zPzFciDD(hmQ2|=j1)~m!t}qqY9re^p8k!1XEdhB6Qzxe&WZ( z$lcF?#?S<(Yz<#RU!*y20~vfq#=v-Ze0(~-4q%kncWL9AX-WW~e+4RdgV{Nsk>Q5` z`Wr9T2*4$d86!<O0mM}jsuWJkE_9%=0<ID2w}k3=W5nXvt2DC>zQx)NZviqSpPvx| ztiEW8kN7eSQlAo)Qjio|6JEJ(Pj~oPxiwHAtwJiA_JWqLf&fpq_Vs7ME#*=K4mj@f zLe?TYKmyFyHLQfeCe5f*w2{%}euCZMLn+yBnoFu_F8>Bu--GNG+=fj}*V$EN&=X$r zOSf)>sy)Hz5}l!g_ADD^#q~=7CMwONyO}L{vJ=kAOoAO`YW5YK^_5E$hB5UJq#*w? z2a*#%DK9q>I}<Av8#NtjH)1Ljs07jykq4jv=}MKAEdX||(bf0O=H$XZ2GtX!ermtd zThJL-T~E)-*4EM0)yPauSd4;_tXOY)O>yK8S3OvTNkT#q28P}UUFhhTkjXEfAE1F) zzAmLcR}}TAL&~hG{V7@o($e$y@C8m|T=OM9YWZLE`R4F;Xx}3&+p3)8D$6uN>_=Cn zQoo=6RKB%x^UIHIigXUEWcZOH3bM)M_6|(+e9QaVrSoA+bE~#^-ob(@83J18&YA-D z`*lCvm^PlNGVz4IKegV$Kt})r*+`ZvE=BBR3-5-`b8s$lrPD;yJUA6uK|+ks7yQk_ z@DSRMI99UB@zLc50U!#p6flHr2W<|15IsH?9Ex&<C<yTZH4kt3wW<^n?Wch=K2b|^ zWlnQvO`ye)gASMz((`<S@6}qkC4lS7O;h2E6JeO|?U{CHMxW9Qzc3%`3>zg{uCg5; z%K9kGb3au7+{W_M079e++Z{Z*-A`San@?q@)hcQ9L8wx$fmbpPf8WUx+@yh5LIsy; zC#o4DOuG@DOC_gRvhOT$E0PNEoBW&XF6V@YWy|!R5~BfPa>Dc~qKEf?65x!eEP$5m zBdO7E5FF_-s}>b%B&8@yP9bHQ;#2gL*;emQE+p^KBX`UF(D+R<=k^Zf2|!GhnFYM% z@*GH*owYXK86SJkB?jIFHqJ3B`XLZFBRvNE4Urz80}w|(f)I=csOgQ+fCCfLe3;dP zA_2BnXfa1fq<!?&q<==IpttaF?%*Oog&;zFKl*z{5&dDU)DStjk6eu60zjrQ#E2}@ zep}*T4R1lWr^>b^6rWK$d0QhYcp_0%7gw598hOr#$M%$+I1n606sW=A2KF}G8n&~h zD*>(&*VW39;k!89?;onh)@yV=9E0mGXJEBRYgbVv#m3$x=~F|_mF#vDaz`|ZK)0=C zs09xJ+jV-ZhcOK`55qQuI-n&ij6Q7^Kj~_Wr_yn942|=Ojj|2Qf3Y>qHPu24rEtfA zc#GiRt7A4V0*zr-0XoJ61bQS4h%q>(LOp5T;_U8bDWYDGVZoIwT22t!FCXYgX@L1` z<~tl+>6idd&~#uGi*L>2o!=8$lp%<`s{=@miFqUv&%)fd(IIdZ<~PkKU^umga{4D5 z&=Koiw)9=hxFM2m{fmSx%KRgDzDQDCYyP>WxB7gf%*3|8eN33Efw7PPptIC0jLRO1 zn`gE$^Vz3*t{$!*{cewaJTC84XoGO!PCwZ>hBac=rEDfU$zrx_F8#(0J;Nn@XY!~S zs*siT2d`$wE1a^8^Q5AacXWzXdR$E&?}EI{r2Og}p1-#IG!hr+Z9s8jZnNJ3MGMOr zpC=x+_ZBEX6KTU&cd=XEm+7fj0Oto3haWdEJE~R=;DpzGI28bx@SDkzfhO~vzy1mZ zbg=W$;EL4mj{7YUOD0a$aF<Iwr^T%jxZMCPA6=OjH!nR0yXn1=^@D9PI>oQWt{~Qs z#jX%(tg>zt1kM~>Y3i#nBX=tjcWDbJ8wK}mtEAMQ(aAF_8%OC6y^UO#d+g~}g}$8v z5x?Fg;K1tnhzNHzLFyR-Z%t5HjFQPqk@{w+Uj<L3E7omsbr??7Lx;|5t|XP%3x*9N z6YXD81z!n!Y(*S=)lsg#JT*DlL+m4FH<-ACfTsa~jzmUZRwtMvirW6Z)rkP1T$XOV zZ2mKdSI~*^jG^rLo*co-Q;n4^Xw@wfEF)B;bII0$Z|Xb<&WlxO=559q&A=Q1w2Wx0 zX*%FCA0Zfn$(QZ}Dql|A{oU0XM`vzJ(Ys$Rl1T8UZ{{2%3Mg3j*@%&?+zv?q9~-l3 zt8kxsIlrXo3lp6%Stkb)5wsu@!cv0msax>m^Fw!y0Jq%_>d`nVgWBFI)Tg43^june z4xSdL;C~v8MjtKRlWR|k{F!+wyl(4%yzRrE8CMrdatb?pkxtD6<@aaz0Fl5i>+@?D zSzoOy(5bHnvh?6))V|C9(ndJ7(t_18em+9@N3_|UU*ad>L%)XN0}j<$V9ixWr_^;O z<%JfN(WLekos1O<-`eopniA;B64)Z@;Td_8&{P(9*;}1>SnEVFE`8KfJV6eNgsb8j z#d)&@P1Wo=_3_H(CSfK|!I~<Lb?DF4f8c3%+}pjr4}6k9Y}XIUWn#|VVY12{vQEew z;rasPj$dj=xCoS866+)eLC@C9K%S`|mMP}VX$?)G67lS1Yq*^IX009vlP;2UGb3mS z;Kq)eB1>3qM^x(&l2~$wPBf|Y;nD4bEnfz*>p{x63lBIQVSOtJIQ<J6Iuj%Lr!m+9 zqG7j)t{AS+iVKZQLE5^bRtCtY0t9&^-N|QK17TMlpxBCe5{rG3?cX%ho0aB%B!?dB zN|02+EC=H0t@Cp=dJb~bXwLk3%+5s(#io-CE0kg?e(Yd=@LYjI4-t92I>`^O;V#Ss z0oiojUs!cr6=wb19|=Ju#89(U(aogp<VE~x7hbI3D9IgmyDgu4RoXJTns3#ncKUMx zaFdAdFk!*M`L%*GKa8Z6K;*LAe_%Q%NGlR-^t%5&GfbwD82zBLzwj5YuqDZs-bx^G zj$G`YWr<UyJ*O)e?jcw}sr$eQ<Nyxj0Kim6*tb3m<0kBf0tchQJcq&po9bNu=ngcd z^N&d*e7A;TZR!a;n5goZ#<oASF0j~_d{u)E7)@eRF;W97z}0B$%G+veJFDG(4Gkgn zx?Pn{e;qRfIiW^q@jSVBjp@Ob6IE<i(brJk7Sy2lb~$v}9MW|B-sSH0`p{Mx+kf-W z688}F6lX9vq^S}jCB~Hbik==_+BT~G)o%`D@AT(faeTyAX%Yk)&KjVdO(dq+2nVvP zy}~}t->EvykD6egvZYphn3^LjGwUKTz)4ZUqpMgO8QTEYX1>Faf9b5c_+B5C21s)G zK6swMe*QH${5Sh~G5LNd<bDh{aL614gFpvO;%kghn+(xe4Y|tt+%+RI^dd9#GVW;~ znrRgVnHjk|I-TuuO0AMmbV;mEd_?-6n_JUZi9rr^D~!{6$}T3z520&Hfs^{)F7Q8a zKUu?F<k?tGAA#u223j4qnBCn2>w|QC;$3<UZUlCQ2U=gvP(74K17sGqv%|~k`^jRs z0Um`aH1W(3^=pq|TGgSb2xW2Tbi(VoBR8iZX^<GVI0K$>IAa}-b7w{cw7vaTLWbmi zm?`9jM-0YWu8@ax*{5EhyKc&T=hZloTFXJg69!rJ@MWBSDsxU%F~nQx@A=%X_9ia; zqK&B3+K2XrBVTi+oHPxzF7NIObM~|E$bad~**lLAix$<X+nT)6eti!So=}iyo^e^T zv@tX_aKHNz{paBAKts^OWYN%m)zj<qrr~w6ak^8dvG?QsaZ^vDZI!hhatJDxFlO20 zLhGw7t}9%`7>2WYBE10y)CdS;)u{?>p(s@nBZ5HXx9yl}<M{eL1*aI+hlV>zL`kc$ zN_^@c1aefDj?on)KTQqK48LCc`MYmWFkOTwQ>DX1z3!ha=UYFDqqyqUFHu>xN-Y^j zS2MMRG8GkHBY$CGLg#@8Yo%i&r!FYXb#Bk`A#Q1(<R_AgMRHOcz0c0wwP#bq9u>iV zq~^rdFg#IUb5+Im*mAM|7Fem3WTkB+Isa31(GbG_fh+s5rp$GNgzC5nq)Vq$exu{_ zW8@x+&1=;6tv6};s2Ak0n48XF1e;I51|6p;ca%v+j_R8~MuYTZ83r^)ca;QHECo62 z=?kHP-ChjI1=!o@>KA=&kX+L{GzaxCHBS{mNh3RVNAvHn>UNe!<|~Ck`EyX@^G_7Y zL4^w;W~|>2>;LRG4Wc<g=CgA`VxDjhN?^a<fECbi;Vb2RIffe1B<tJ0!)ge3(V$?0 zLvUlx?P1KEQJqGTe*zlp3o;;cWxw$;%%8S{Rws46n%>o$Mr|Wc-kzTNPyJfbfT0c* zz9H@Jf@v3F<KK0kXH;Vc8#*}AhM~}O!0kS6*e!@|8rG>n-hd%ucj)J2N~Czv`vBSE znV)-SJrEC4$cRJB_=+yvIYN991rgYc##i1jfr99x`YMK$zr$5RQHa$cFBwdayg&}W zNFzWW#x@D$A9-qzpMy@wE!?04Ylh#JPrH;UI*(wo!9lh3)1P)%8~}?3%8%23U}~br z&BA&qS-kS5(?S2-+fSKp2m$hdQ?x?Ay8oDfS+y~t<Fmx>0{^~_#AqgY|5NaLk&GSJ zpQ1zfx2Jku#L~9g;?|702Mm_5q$Xh0H{huR<PG<U{fMazAiomMZc(e6oDY4CVp@YK zLjvLa9&`s9cn2DE=f8!j+W)I>OHW>C;3m#rCN<Tl5~Bd^P@!U1w_;n7)-2vzmjn6_ zx)Up}rHv<+oPC`<*dyLzbMTP=$KJ?v{!Q#suHRB_PyI>QN@u}!L7SXz=kt^Mp35@5 zPr~izVzj_V^YxYJ;GVOumnbIlCak)`s`igzB)**Cxgz;-VudBnP`T9zt;B%>5x(M+ zfoKEhlMwOWg8`rlu2c&S72QmEfT^!98{*N`78}mhFkXQKEP}ot-vSg%jgx`Pdin@4 z{Fdvx^+gF1_8Y4>amO1)VuA0Ei4XJJM>6+4El`OLF?Ma~pXU-)jwiLklCTDrwT*HR zzC)Sn{vFxmkpQzQ$f_TJSmuHo5~3~}0QSkSRcaI+_Jc&n4}4|6yaF0^F<5M<%x-3c zl3B#!spiCzHR{SKOuXqU0M@P5Yx>N!K+wIYG!(zrPc~jRHohLE=0ppqgc9u$w7{{3 z4)DkTG;5>#^*VeKOaEGrIO+kwAybeS!Sy|OmM6X?OmLjxR;&z6i8$p&T61fS%-QOG z*4KODA)Su*d><FoH?nhBB$nJAC}q|wfuobTlk1~gA$g(tCCJq+2c_?R^mIC-$8j)J zs+m|+R>;IM(DCeuiNt}U8lu$81LmB%{xCl;L07jVo1O}N%P)p{{eJKC47C$<<&$fQ zKuq%>o?<bWA(ICqqVzyXX@1wI8osb+YjN(2SiH2Rc{soEDQy4z=o<()%!_DR{k}NL zgP~Y9AnfQ4a`<793^ubGP}i0V`+_nWJX1QzY)NsGW|qwZ04t(2PZ_uUPFv?fQsBT^ zVadu|M8jCnPg=M_oDGnZTBwFeWWM8y$hj?5{FyMcHbTE<S>LC5VRq+fObY~ZtV4m} z!K||W^tWw$`yBpc*lgG8d~SCB$<^Tw&UlHZU~-^Ue43iG6GLQ+N@syg4}?rNMWOt0 z<TE{_r@ncZ9(9Oq?dr>#+|Cz58^$Fsw?t;xs)YsDB{lb!7Ln9eV9kYw7hO6_f5gm~ zPiZM`pzz*3)4&6Cwq?>7wY_bsY;<~jZXJ*AN51aDE~dGg-CDjFIjr&;3T7D}^9<6U zd^5fViTmDZ_yUKlBS(z?2)lB3i?w)*#bE&!T$&<~i6;d=h9Fx!L~m_-ghiOz-fI82 z-Ke@r|EcjBKq!Rl6^-);BJJUV&+xKo(_xK%)uio5@_DUJ<0$Z<`Nsi6@+W-X%aRL> ziH)(NlY@zY_1`^PLrWNDMouC|qQ84A92|_y|8<({KTh-V5-~`d*qAw)6EQO}0#7Ou zF(|v)nGi9k%Nbgl7&#F!C^;KC{o{;;wSgJ1LCwM#cr`0C5repig_*e%3==zWU)c7C zt%H)Cff4Wu5fk9Db|zvD25v+Q-z=OQ<xL!fZLRHWZA@&Oh&YKDzFYioGI0QIe;7EK zh?p4J8k@lI^TYfH=|5M_dze4~06=f{y__|SKsIm}_}a3<NdSQS8y5dT_CJ{A`WJsp zO#hBQ=Kl}=m^j$}8-qZi|2GCDY=FGj+5TmP$jQMO$o~J%GR%KxndpD8oUS8|rhw)< z*=n*v-9nXArXE;86@~_JU#+Tz6lk0jR8~ozREV)^#2||ET~5?mT(sc9NJMo?I6)Z^ zxku7O9ZTG#(Ep27rNDDMPvpbWm<@~L#h7gR<#gMGXFA{Gnxm-*z@Mm|bd5w1tBdH) zG2%=2*Jh-WIGdd4*ECuH7=;Lz;J#c=5YCL~bVc@29Q{7MXtEQPD;2hQvQr7O{NJ~e zL~@*8u2gG6TLN6ZRBQb_u*srK;cwH0BCrv75KB)6b{t1<%@z*)dahK^uQ)#k;I=hi zN{jBu=~U59-9VKLyZU#zDp{+!_Oy!AwDa<J*HTqrg76pane)v`MPgwCR@}S%Re1Lq zYrRs<N#E{QxT#nhTmNvr0K=7}fG8({Lg1e^%o<Cg(Sa-L&2r;~OL}a<ruT$_O1=>O zAFC7Us<VH<SUN9GSxx&82a|DmD$kpYUFufOYC3m$a?eAsg7e4@*T+9)CpdLw)XA(- zCeqNFhjbNmZ1jsj^zT;=b<y}4;yIfS;b0}kG3WQ2wWnfeqm^}{X>@qj$LLF1@fK(8 z*K&XN8#p+{>%x4wRvJQ$q7#5I4r)?3(yRhBGA>xl_03vG__-aXtayh(^6FfC?WRg< ztdQ_6Pg@z5Z^DsxrYJY0p_~gDnIA{zrDz@nAdmgH9E;U+^8l|p0%HV*qC7Zos-0Uf zypQovSFOS)8c<0R$x-#K1i)4c?L=FL4ZR$xeMIu2J30%4fcZmD?6EY2+n5!4vCg<? zu3YGbRcsHp5ovtIjU0)e6xAd(ww}##44ex>yvxS}5YRHlbw~HlcR?~b=B<kg?i-Ot z-YnzX{>C1pFhydvnC}nn8J_%qxR?UH)MUg-`*8$Zxma)g0>+`+;H*KzZ)V4Tjq#fM zAFYO4kB2hUhWxo?Io6r<A(=aKiKedZz=8?oeQ+)7%=00DQeKa@_JDOIn)v<T(bQyS zCV*~Lx2$QroNDi!!#0JV(I^5<fRY6PH$AKAarHFN1^brazIKr+UV^pslxfPs{kGEo zeNlaIpebE}`}Ya<P?RdRMnTnjfDMo0h+W(Av@?ymYDmnRkg|qtN~|^4DDJm<d<Fwu zZ&)PfvZXAHV7?2g{KIiF;?i_mGrOn|!^9|~SqQlGK5BL(hJ+CWjMC*x^@L7w`{LEC z{)$`Kq2!EWR^Jsg-UJ=g@cJX)L*2L%Ex1&q*xDSmmpdK@B5>BcyUscO9O(M5GuttD zLnYw#<P$Sf)7Q2>(QNgT&_aH*`wo0CoIIRFq1DDv`nX8oLjU+&Z(idrLQInWI}!F* zGW``{OpJ_d|0Tp&{+$qG{r?bR%xvucCB*)B5%#|etp8m`zkd1{g8`rcpZ``Q|0VJt z{QOJhF#UIC{;6T)f$GGOh(X@K;jdm`W@7s%XTrA5z(XM6QWnOJL|R0DMY$po(_f9H zNW}7w4GjA~RTaly?WRb?_0NsYUy1r3ijRr;KNa6!4fp@hA5hx=XMarW|Ll?Fp9C?n z{Uu!yhKb`pDdG4>`+x85pTz&=`2Xq-SQlww@Xhuok=9?DIk=dCApvHczdy!bHi3%j zU;X~08=`+wt_br#Z~vF1|9N|111EzYwq}3F@9!}FYxq=692_ldZHO3{=vn{UfG{)t zrTRZ_@h_GC&yo9&DF71@3o|Dp<G)qV|3h2Vw}1JmJpat-J<;--;bC@>;WcVZ9-k5@ ztsajAI<0Mj8Y_eGg&D;dDpmz;Q63$Mqu#j!mR@B|($^QHp-{Ym9!*=TL3L$EGw)4P zvLYtslJ9WhPxkxAr}yNer{iJf>&AQf?Zu_rq$n0~evaxNVylUn<xw?}Z!FtGxX=cV z>1<`YuJk@u@ZBNEC{Sa1o%+XjVjqWafyQ9O+v5<J6&ZT(bsBldGtoq>t}nfAb%z8# zE}L4=6+Z&a;B=v()}$Je4Sd+nk{PsHUuvK7?I6xJ;U`(?HQCJ`)GBPf!=_3r?7U}Z zmXqlY^=2*v&7JAa1{pFtoHky2zF_u#ptW{+E|<>F^w*+39N%VYcRFm`I6A&|2_D69 zezPQqG7wo6)(Wf+{qXT=t|r6#)=nf@o6F6Luj{KlBuM_vCVKG@5nOm1Iy~u*<SwN` z60KN?(=`ZsqE{*Nyw~eJA)}qVjP?4~3o*QK2wSF$dwkHx=Gz1(D*UHFi|$gyx9Z@a zL4{DcJfd&Ullt%JHjWWDS+loJQUKWEylRqqbiM%09J8qib3%R?S8HJpd7c+Km~7-D zekZaQ>=j=TR<Niw--#em-(7*e98aE;XaZ18cW8WHt4U$CypEyhfM<p&wj=io>*iDU z{5)nS$kfpP15iM(zm}n!@j3ZR&`k6(e=Vv<=b($xa@?Aqp1&o3d;U@M3$%y(ZN3{h zP#DdD<v{*x;Sc#gqP}p)9q1nPF}_{dg)DeWD_nU$T8ZxEvOLZ&$bSblNT5rh4m=u% z4&Y9bfv3+ypWz_Bk{b?Bc_iPK-w#(sQ5G#k_oBVH4v!`YVP<|@{s8jBJ1&E#-h+0a z-LQ0^7tjY-6OQH|%^yV}RE@?!d)m?Oa3`1RUYBd49SyzI2Q|Q%W$0Jv6?6zE@vq4; zK@$cEmT-Cg9pps=(Nw7I<8bGv_+R8|Sgzq-=11kvL{8|J+h}j2-=II^Fz$~h;Ax}} zSw<e<Rw5bRI1rY(Xc6@H-SG53;SAnQG;)x8gnx=ZA(rG0=beC&H2OKZAN?BZ(9$?w zg@1zI!+#~i$t?17@)z!B{1g0J(rjq;1!yU{89jymh23}vo`lcG3-OhB1HKL4gAd?C z_;YdwnL?J3Z@7ir3ho8|OjsuKtN3e$^}>zf=eg;*{kb=D|H==_uSb)B9@ata??4Yg z%l4py=zUl|Mt{Kqc3>wgah$+Y@fEOKjc>*e<E{7!+zxL!g#UuSz<<a8#3u-XMI;fD zAZ4&5$x3o5`5C#79E9Z%`HK9T3vgvzhO6Tmxn^z|)M^8_8J1n#pZPF<kk3Ou4-)Pa z9u&3;PYL^kqoOALL`L!(Cm-pq>i$y><u>N-%<agv=l_g+fWt8KLMdv5+Ruh%G2r`7 zptx=5ZLC4xg>e-=9iIa|GYc=qEAVB|lh@&U@h@22e~F)mUiv-$2I{PnD64ZnQb*1t z6JWW3%p)tvW^y}eC-0H(IEiy`F3!hQaih5`H;-G*t>y0I+PF8kkGQ{Z$GMZR<aw1Z z<;(aqpW#RIv-nH+2l&tU&xD!6>%u3ZDlQe*iyh+sNQ0%*rHRrcDJ$J7?Uvq=TYx%W zLc7qjr%1KK+&XR~w+r1uYWNWO9r+#5^DH!%8%Kar$yU6PT!q_7xp0{{j10r$(NR7P zef=_dkQ^t&xN&$ao{Scgf%elxFaIQL8u^z{7ymr8?RW6x%R~)dO}-H|v;!m30Pp+_ zH-OJ@ucHsRkFmr*gg)d|9Kc=Vac&||!3+H9!gQ42?nA%iR^Y49E;14%_6hkWpyToQ zNstRu@F4sxmq#2K4^&jo{S{q{mXJT7F5u&h=q@~$Ux02wHTX*O8F~!3qK|N~SS9-K zt7H+siFj~3BK#B3x&~a1Il+sr!&&ZL@f-3!x&$5MRrDwBX{h}{@=I<Ue^fXZF9Z&_ z3av*g^6SuAVLJa7UVu0}4W;<QAl<Iy2Js2lUIS8PCdjniz}<U64xGV_gCjwpoO6IK zrh<gH7nZw0PVhiMi-5<^11a`9)GkgT9cY2z#2|G*x4fP^7tP2&hVIEPKo{q4M}0va zY{*{;Puz+=LARo<`1;%xXgP`j=lltuBa9*kg;DvwWD|LxOeS}pieKpM6b_;<VfiH* zg-#cqL!0>Dqsgc#e^dTlpzcbLu=k(~(b?z-wDN0s!&zJ>s>zKf+w-Hi<<Q=b(WLz2 z`BJQ+h53un1oS-mg(RTaQpOrSb;=o5)9I%*4jbAqq`t1UX3)R^{rmN;&Q$fOtVpNI zlVyo`X{;m~35S9Kzt8J&n}*A&YYtVBB~joxf>HIz<fxW-Te_u<Pbbeht1sP8&V~cC zdmU(Li^HK&r(V+*Z(*0kPrcHDE9d{<O3S{|>bVjd@kZ3xw>myD8E-o<A{p<%GbT+3 z_~sGG=6G8dgU2y=GlO-26A8E>J~Fs)M7#~R#7DM`TC;G|$d(cCxa|&gcyjnWwQn`r zt~vm80Mr&pF5iv=r(*_?z{sK736gcFR9iSXVq{w=IfB-pjZ2N3J-2P*r0F9^L=uVS zzSV7b_?+a0Z76wWn=8XELBrW=+Qi{)5_?U25p4~+F}}UJbJI;72D-2%qs>juojr4U z8#lX|zQfGGdq=bdE<Y0deit6@9zK0T@3RqZ)5zeWINjZ}X+yki%cSYO&n9Tu+ziiv zdr4|k%cfEA>YJbw$4-vJ+sO6J)7$X%@X9!C32l|#zIn-!^hnF%c$<<uGr4fn;ub(n zcvBlXcWq)vIBf07A4cJk@l8{vClhT=k!17i5z*~lwCUWnJ404Hbn2PD)!Plz?%3^4 zR{_xU-oU(`a|~jaQ)ukDJw1zQjgn)4All+{;!u<6Nodm$TFe`QHq99Vm%*<Y!(DB2 z0V9jrl;JI#jG^>6y}wOJ8Oivje<GlOWY<@x9+_P@BBqRgA_`JE=^-^ZUxeE-nYOAb zN+Qy5z$w)6bhcmDw|Y$nNhX&Yao9ljp^4D9vzv$Zhwe`#D1vY7u+W9D)7Cm^y1gGq z7e;m<tAD1sjkM4+oy9Z0sq{>1@l4O{ElHr{c4pD~+T?W4-(~ncBNq;B!~Xw|)AQ`} zV<#uaPMR@2K5|n_p>M}dIc3j2Kcwee0c!IMpUy={0U{BOodu$t*>f@7nXa|*Dfo+w zsONS_GEgNug5#swjFz+PvRO?e{>z&>@<-`iZ1eqXg=)49&78VF?3Ddes;6z@pu(UA z$4;5CX_I=&IUqIrg<}dEpm;Q8dLll&4NV2kOu@e+-#LV?&5<?>x@<VT0O-#?RM<V` z!bkybh99NQzSW~Z&~4f@Dj6TOsb$mbj(qEd$+(f+w1?~?`!+2f*-|8&j{M#mBW<H@ zYKFdAh=&4G5OgMG<Ka`L_eKz-`sTjOy}`^t6%bgw5<0V;;3J~cLGH0UNZ^lfNR{{_ z7=>g}I6}DRLGdeK2KGb2jB&iNyK%g6tZ`g-BWeQJI0?nT0SPl<rl7!}_D;sR&Xbma zPM|p7NnNTdbCXDm@HRM5r&(%+3nRlV$%fH^Yj&asoic1KQ+m*O0pbiU&T-t+=KVL( zS7p18cNxdK;5|+73Y^7hV%F9VuCEa#SbPS?AK&r2aWkG@x3=Q6WCmw)lb*-lV&~T% zbe}lXyy?#8UdWZ^;-|cSo~2ciN`ok>fstFGHBcYqFl^h=gWLs9pqqBXAX5SOw%z4o zz>#)cXTVpMOI68Km$TGKoKL$8b<Du;2lezMky%@jhNZ?28qpx#>u@Ghc3Q>d>z<!6 z?qF^bK8*kT{GK~E&3NlX_Xl6+{+^SW>-l8vPxxB!QB*X3mkNk^O6<TBtu*Eu3Bf9E zL@ME62Z=+Zp%cJ<TL#w17C_<_$3u5RU5;gs9WlBZ4L}SnjV`0R3!CnSfdgvlYJ6T% zsu(=De)oZi=M8EY%pEwe;>Ps2(CqW!y=UMKvY0FdTBx={%gJ(%jKkyLl}SXx!g9DE z#4o=&m>F*z$r_)c{^Pm^4nQlQnRN-@8Ke*H*tN@co%ceq0qW18locejx{Y=vw;_HD zoZrH;dLGYap|85^YVAF6-~e;G^PiChz!8Tg??GJtPdmH~q$B?)EADN$i^GI_klV%) zZVkd-xEjb8Xq5XLk<S5lPr&Q>otMLt8;xUKhK+{}LcdJ*Dq}yzM<(OLH5fm!IX68d zeDxhXfuO1R&v;Yl1ez$pQ@0b!E0!9L@q#y|>j9-B|2e}y1*{My0L4TaJ%aq22Bk)i zq5gpU15g}*_RtnZwu?W0(lL0FNT2j6P!a>awn7evNFQm?BgoJ+TGC@ZkNp0b?P5G+ zM1eGSkhtSlpicd;x?y$Y4_g=V;s&zOvC;LaQ&1#Fkc{-4<2ySvJTk>I(>F78Ze)qH z#4*Qnk#9+;C9;-WDz0%{?%E*UE!}Cn8vKB~C%)(S&=u~f?kdGfBx?sK7%7HAl+C5) zDg;u=a>CI#xYh)1j=gfDP540b*%g_tLN#%A1-Q;b=nuo%-0U&jgKGx){cd0YF<DlT z_89(}L4!>roh*~YsY~A8vS!EXGZ(-8&^v2y+w;VgS3dE?)mNUKC2wOMpZ4^uow@u6 zxm@n0XYPI$-=Dkdo1@^5F8+GadP*xF0|HL~?o_nRigT83)-K_%A-9ryWd3O$D@YUw zrwCXh_%)T)UZrh9n972V{9(r70DNhg453kmP$xqu^s*JA=qlob;U}yKmhP$*iajwv zz;W>5i4bz^#f|uSWHZN#4D?IJ{=tqdBbx#Z*leKvgR+@K(iBCh4*0N!oM=De?J0Nt zrT=RFiqo$w{pIM_X3@4a0xe6>wite;Kud~g=s}N1oT^h&GED}2Z7GHUP|PdDD2WE> z1+f@C8;d&OOiZISk9CmeERCpvK)lp235l12Jm~+<0a_kF{kv#Qn`pUz5G7vH^Fqz- zChUcl;xb9`J%=ra+e4<ty!04-_6~S7Wfg~mObt*8$$IfWy^Yc{eII=rdzm$Om@rIy zPIys#PI^UtH7bqKnzbp;CE8r)<?hQpH@Kg7e-i#Aax|>H=y=vcB1Y6GF=B@JtNc+U z0rAVQRRCMzm}<zP_*yjVjYh+AGz`*04o5jXW^|B8cTO;|*@1(*XbTYQ4i_exx+?HC z^fsk+{2W<_;>f^5EX~~2L}rm?<Ql@0y`&tK;#;@d#13-hc!r7>ru3S+y0b@2H^nRz z8=U<zP7pP=tVKl*7=p4myRx}C<x8aN0po)Q*VO{)GcgQ&0RkD6m?-hm$$An<J#z0i zTkpBzC->n!o^RiL`}kRp?|XP=?3riIXq?k|_5M%hFZt<xn>+{K|MHpXPd@+X#@Pdz zyq%W+l=lOzWN>o<b&gQbq8JE95vIhN(O?JnNvgU_bH!A(k1rPGV|}7RA3dpS!4N3H zxIxK3E~P0R>E-GERQ??3PZ#8FXlepQ(ghgodf9l{-C*p`45BN=V5Ok@^^y8|ogZnQ zXRe8G=lU-)7JKLVFVWX}*Xx_SH$;A+t3sS(ltbgFyoBMsm?Fl420Vvph)l<I?V9G} zgL}!NC`1-o6;Mk7s;RqI&5AFJlX#HQa=cYqm1fc@jZxZ26R7dAXX%;r=DxuWJY+}c zZM+u`0aemzIlh-y)gAcu?M0Er@JOZBu}s#MT-`?~d4jH_7_|`!%nB$3e#I-AJ@tMn zwHWH8`W~=|MT$m=mMB@4p4MJ^$C7KdJ$zNoIbOG8Rmb|pi*E9^C%*jUWv?xnKldk_ zbDzKW>pZ?Tc+ZBmpIrHn_W^R*RdarF-F5L@uPoRxch-IVVlUj%nfvFb)ILLDkOPLW z7i64{)7D`3bZwz_ul9uYswSMnoumJZ=iET&NE10pP#v6vG+_AGINr-~Jf|a~>Ab`} zN1j767<*eR74bY=f?iYk4l@5)K~Sxd(pt4B4OLq?G2m;arPK~wZ|Ra%maLUp6Lr#N z7oh~@(7m;Y7$i<OLT{$`0B~eCy@%{_cHo;>Uw;J>kO{+MRKzqIpBhXSg5h|)(QKgI z+|aP0Uxo*^ak*U3;b7PAK|1JtcLRu>cPvLu16S6!f#XX`8tEgN0UdCS<<%@lgVs8+ zL9@~gT3HmfeH)m>Z3gqR4%e79zNE>SnB3WY9l8IfFTdQLtHZN?!R<bI_Ahb|0b}3M zy#z>)>i>lB7|7XaHt*~~7}}@P2H~hvjrn|0Hx(BS7thC{x)UQQ2s(pV1q@((M&%mi z6G}-yM&0{?Hz;fLaWnbrV&&NI+LBErcX}T8yrjLSeHf7yPtaKv=9B@#0LNaCWgPH_ z;Zc2VkLNX~%j@)boh}`i#PZOJSk5hAa5`O<4;L!+tc%BQQw9RbX2ogs%vr`V;~L{u zgExRpf{aaq7zK?W2^QHTxH;~A9@ilkz5|GA$PVYO|HLk(r?SiU43njHC2$FAXx4-k z1jCUHa=(lKSVT<tF}a6VfW>tR;{dmK621fn9D{sb39Nzi)E9jBT=bLnXKp(0rphO7 zA@6rTJK?(9I<dU^=3~F@#;wMt8}~na?~Vyge)2z0=hn>39e?wc+jbmgz6%-$xbuPB zD?wFwLP7GCx^O9;g*hClj9EI?bx>Flp)BUr)fh%8gQ_xS#~HDJL2(#haxcK_xIn?^ zI`GZ`<2OZIX1k32S&GZPOG0>rWcfyfM#N{hr^J_VbEUcRV)xwmYWb4r_44}Yd-6Me zQ;O40uCTd6oSI}pk3tD{LZT-s;>mb|o-k=GC+Y<18NqMQqNoJHrxfc9R_zeWjdrD0 z84Q)+tr)<k(2AqaQoF?1T&+^v#c+e=Zwkx`EDKx{-~)boF5suH3v`h3of+Fy177Uv zL1RJcF=+>FEZ~eX5S4h%SOUL=8fc<K1)Q6zsbm>4je08Xu(uadxf45s)nk@SJ7emF z<c#MRw0B>6=(<1Wj@*C4=g)l9T|eQL@hcyF_=?M)<R?294;VM#^soOorzQ8Vw>EWM zjmP3E@e{w^y6@yi*(aMj9=LnkHip+dpw!m$Y37p-0lF5DC@Dm2<he#H^59JXpCLl6 zxQFC|$Fl;O(**j72^9Db9N+<`f`fGrI2K&<fdeNW2L}VA<>)W`F#aVvMrCQYa;;z@ z3L+;H;W;t`4svA14q}OW@k9`p6D=Ql3O^O+Nm%9^nb~-Wblwcc6OCQtjpHcPKRm7r ze!;L|znm%!1RfFgVeDJZy?!#s5wdP8zISJCf9}^iZU5+$I<%5od5}gJ<Ts&p0M8lf zAXu2zL9pu}J_YV@qlm(CT);x14xeVB294voXeItb9Wbt=e}34>Iu4)AbFY(ix!F5$ z6K>p@o6q<s-wnQhGw2oxIq`zs*y%Eu5dV970r-{?Frh5dOespyXeqF=zcIjAATLx} zjE&r8<5l5hvC}weIAoz2Pa_kJg^o7kAKE|ke>fFh<8|K2IaEd9dC>i`C`lT?vZzVm z@4;&<7qh(MlIDd|gyZNjA3es!dCd!VDKSBiV<IPZkmZ(wWbF%!U_$m{2g2B4xwSZ& zCvoRa<PY*6^W0`0^Bovlj)_{Q^s&Zm*04tR4VQFKBG*W*5|MuDdhho(<%D2`e-Jt^ z95TAPP_U^n+|_iXk^0bG)PKx?T-(qu$hM5YKyz&{_V0J@-@ieyx4=GQ+Z>a}w#6pR znBLC2I9b{YhAGN_OS!5UuUwI}{edKSph+&lc@kW@LX<dC^Cp@8(No<&f9QSuKlh9( zi`EExzZ-?0&y656@SS@uz4=CpD-PWWcEA^aE0ak(&mP1BRz_1#k>^JxrzPhnS1H#i z;-c^+!g6Jm<67ZbhgjiPxL`$9%wM7?9(Sy&s!ty}SPeZ_3N8YYgK1HtT3-aMZ`Dv? zD7vW_6h+$iqD-GIGHi=pN(thWR9cJDJ2aJEr%}rA(W|s@bxADFI3!;1s*W=a$$)}K z^<6tdvki#0kEJrdDw`QLGuY!ZWm#y6?u_d?R&b*V_BN~_ts23IXy|V?P&>^w)2L5X zV<vjpVNR07i9xopolb&DFsS}?V#9Zm^w!r`&0lc+t>?A=`lj4Z@oDRZoIQ5bPaep9 zh?icF9zJ8}lsj(9JtORG-ZSrl$7(8`Z(XpxWgvI1>7PGt%(6Zwwn*BLC8N$=JCO47 z{QPIa8sTlg%G<l<ki{hg+kOyh-{;mW3dPYNeGYK_>XKG;UCCy2ukaN23w;mQuD_xm zLPtveQDQpXC1yzpS0z@ORnd6qXnmUZJm0j?LSaeC7493|_j30*?~QK7kCLtCyG{@C zqOjpL!aNO)?5J#DHh$mA2E&CgAMwOAE)wGvBkejHrQ;amaA_bNm$9tT+Q^~UoSD>7 z1=DvNm10m9w1LSw6EHx1cu)i@aX{pgW#!Ou?(&*Jd_YQ5flqv1Hx>VU`@Yk1FMZOL z`~A<i;o<xKh^vRaShMe^PyBV}(ofev@)tq|eskj2_~N%d!Be*%e!cIO+aJz-bK7&d zFE%}I>+c6Z{>%WHc0uobV)c)g;^DGQpQaIWAsH&I;8NzuD2%KWmBnI$><A-1CYHjb zCB}bEeE%vE-?v5Li~W%J3UKy&q8m71_*!c)7m;L97Gz%L#ZWLDB%(tFPEa|~=lA+O zeol;VfdqCtp$N*+1oo?D0zp4!GF9+fhqIJM1O9*?)F}Z9O(g~u{6WyQi3jk%pPF%X z^XgUOFTd@;^||f1;kI84969c;i^e~bdqddkD>>)F+`;{i=W<WX9`wxMfg`_o?9+c$ z#h}#>gKVJr8V+=a<r9UNEK3sNc-oPw5_2F~qV!Q>xND^;+}Uw8t`jw^^UD7P!)n9M zw@IkbY2z5nX2%^nlKCO04jcfj_9Y7I!+iP42e{11ce(3?z0c&Dp3dpdP(IoUZMYuV zprD(r3~R%!67FdRG~qt*{~Uyb9sfnWEr%VOE3jeiN7Yw{&FoeEUiCi$W`te!><{a{ zmHX)AC#0==BCYq(XS(MzUA`3fe-H3~3VW<@#2fLEmI{1<?7?oXyc{Lm0g^&7!WcA8 zYk{#Ch&j1LOjIyVSER~8vqQ@&T9_ksgtdj~;DSf>0mA{)!4Y~tS=m~FD@xLF6{{>X zrH0aT&hKI8aYpudp+!)AYT6ae*i0kaw|(0NYNP{6kKmJ$XgC^*a-x<tQoeL)T2Arl zWGbkaB#__bNx;QkPh5h%G9i_~Q3uer*MuUbBoe5cg90(5F+;<yjhW()QaczSbt&@{ z3E~e({RoH<8n^elc@QG?CU*{5dTZ{`mOtbkY~P6|e)u59x2LxyF5JEB`hAxshHSv( zwyTexPMV&^-G^7M+Ji6n!+UsD`+|<24OrefZqjuVHa@sN_igL!dTatd9tD|F#(3cm z)Q{}6!X95O&&3pVi+V^Usz8WC1|E(}l1P1AruCq|sfi8{a|T3enFpEn!%X{STfvhc zj(8z**lDTo(Eow}<pKfrk~#hY|HO412gAQbU#|1RnuB1I_k;{>8D!%z)5siV;HQRc ze<p}9C^{fYSd&mbx{rLfZ(p}4?CpMx%=m5;+1Wjg(dCO!tb=~w&>Oob{}39q+&Sbl z7QEb9Q)_Sg4zRbCee7*AWp7Jj_BI%{gPm2nQ5zRF3)=*sRxsLbMO#oC;`@WEI1zn} zjv~Pwhl86DC)m*e+7H1(|9n;KpRbGkbKEj)i;wls!~A>Ay#z3L_d8m_tjjj9SlQT} zEp`eG8&GzvF<;zAO(bYzef~3UHnWMIu#9<Rfw-DnB5u?-nxeuuuH8Wyt^<cH2Oo1O zN?KLrw1fKWv}TT?^X{-E1Osf<O^;a2iE?D)9vt^ro{63o5AVTg#KL2?Sp2ex);|{H z%2@aAVoSP=><YU@)QSWq?8-DT*T8m%2iHMEm`jx&woO_-XUyWtea*lA$*&LKEy1l< z4qtUO_xF>bj@K6diQ<ylepS#;0WGyOOn{mNB*&?dOCGme65;-9L4N#4%1``}^7}Mv zOK`ih6TUn5k+-13|M3i?tGf{rT~HT;94W?2WWXn*ciE|%OjZG+0T5_-x{^Ygo1PY2 z8mAykWX0h`vO-jc$nankknr7Zh6DrAAXK3(-xk|)vi+2S7CI`@)Y)kqI@C!Wo=k?> zEg4iCcqx?_ZHcVpSjn?eV5LksK1m_Mlmv)BDqEf3dpRmArJ@rg(=IDzK}G><an)Vx zVuhe_2s=T=%Ajj$!|Br)z#j4(nTFiZSJN!LppC>L0@z~_rfoUa9~2Q|@<zKcS-Umt z$E0f}VqJrXOZJk8%&*bbYrloA)W&FITwEVMrB^$rbLaDG^vj$ZblE`!xj`T7oIu8M zBP2^6r=RIm?<V(fcS?84Te-(2(M?=V=Kz6t1%b$#t`87o0A=l5*SXjNw@RkjuE4`i zr$O=5;%;>l_g=CU>3HA{Aue~|ftIQ%YTVMUabU+@XrvQ6;0);iXH0PcDY}*$*yteB zo{bAFLaP8`fo$DrQb8A@<CEFOAW$0fw*U_J>>SC0_XXWx^jg5JWnS2ZtC$l88{nhw zovjy;mOlYB_#Qa4?=fF%Y?}t>D&ZW}0N-wRs`O|fu=vjIM1!+B!Geps>l>Vd>KVAJ zFB~ld5Hro-t)UfJkU-7A0N5WGT#pkVZgCQucjIz={s4cd4$s2EbGd2Ta?^#qC;ooh zSrdQGo&0VT|N4nK{_u%7<CXhB7nM?*24A(^?I`LEIjH%;v!&iaf<iJlym3he*H<PI z$H@v$h$6{67Z*jLC_RN96(!ib1WLsUGlI^>9XRfo=xA{)ceFYLhYa=+Q&Kv-?7uUV z_#f-39z&^@XwGCaOf#)Gc8X?l(>N}8#5}{1&46_5_VBZsSu4k(02*!{IDi@nfQ5G1 z8r1;J@7z7ALAD0j;GhPnEW}27cZUERWP|i*(jNbHBpW2B7gi75KepQgV2KTu0O+IO zx7&RM3}$dI<^d$uV5%>%dEYA>+55`L9H49+e+{6k^+c=fi_8H#?jzwH<U|qlnl&-( z!d}DcjRYbQo;P@}BjAYePXu;5Uv_eVKrlk$C6+nCGa+Dwrwh}S^NgwHEYFO<tl+fp zd663f_Yfl#<2ZNBq4?5qFl4FLplFbaT2P`X2DI}fsz+%5OLSPT5Fz{yNP~)>aBB%J zaiu91#a`kg6fJsiwhNan%I|TfjKYE&=P^)X5Kr9=W`)%osAdo{YYDh<Xb#?p2fvO- zJ=LDu{o=vg-mSmICBOd=N7jCE+wXF}C$Hh9`2KylU;Odo+?HLx#WQ}D`&aHDuEmj^ z*zwccC$|5_cLROvD2R4g)$`0HUNY7g>pkB%-^)9+7)Tiu2-^0$J1w)Afov4yZ^53G z!|^bNe>kZBpPKbQ_DVv%m02N<up(<m5_$}K+lm6?lsRKDaArv&VFKLahmk(FkGtsh z=C5<F<~HIho_`>F&cN$(Hwb&3?s>bHK9}q6ewxEKT{H7qpKjC6L%`?HKtBXg89r$x z+zuyp501_#oi8sf1vi}O4^~R7ET^UkZ3!F5rGQoh97Vw0k^jq1ceoa|M|YN0)S7g^ zq@va+Y+Z#dod3hllC*sut~Ux>dfplXK+1V`^z8U#$IR%`=t|`>=UUhG>PFXH`V+1W z*XPd9Tn6a8xasnmrpt6`iaSCQVZSQ6>DaFjR1|+89Et^~IAN&@fdERBF^mR-E|*h| zrJeVQ#Uu%_h%9D6moZaXWPzh>ynK0iYdKe57W{9qEdFn0QgYbVA31LY76=^)Qm37& zjRLkaaIUeTKO3X9hiHYKY>-|*RIofPRkmCWE@P<a9!f<AUcvN+6C_YL)L?>)a>MGh zq76ow7uHf(dxTDNuMiY?`JN=#j{rd>8F8^-dEz0mY5yCSzxMXH%Bkn%kL{a!@p*j{ zWB-gFy8h1bcRi9DAncv++qL(-SCT3pe@Siy9(dhNLmX1~C0tGY+R+PH&cn?7XZ+v5 z-yA@ER>d4{4!??9&GV^>I<6r)oEsyZQ!=u2MER(S$y~EEv*f(W8$8Y=bth?mmluIl z5l9z-iXxC?sIV`#fm9Jl7l8_D!H=R)rJgP)<y=K-u&XvXA~mxAjQF(V)YL_e#rhKG zeDA#ATF2%3<*uuYOUhTJ)^nR2H|U#OHyhWNUz@sJzteT6FIKSE`X<uuNII;f`+$j! z`h?y5pn+*L5132uyEbw|ghWz)y>F}{g;RoGpwh#RMaTLov6!D@QZEC3ZPs>i=_boU z;r+YpC1UkWl{<BZkcgJVBC;g%91(G<ybO+sLM+lZY*Dhi6(muYAN6GpJJYlVj^l~A z1uw^&u?WsYo8{?CUrApLH9lKOqdvF~72-}Onc9a|O{e$u2@irc;k28oaC*vJByx98 z$lE=I@<M1}!GX_?JHkANE*Aa#J}hT|7j%Rc$7qXyjA;y-Mn0RVN3r62Dh8?VsgIGG zL4}x7c||&1S8ESY7h-olZ@|w77?FbWQJ$XptUl|vS1o&T^2C|Lau-cnwBYK$|Ll=} zuNU^Zo_V6}p@t#&{pqcjuRn4BE4hE%gMV*aeDir{t{O3NK{7BqQ~$`kWxt-g=#6#G z8*f>6{)C#EC6&W=t-0jjs?}fE;~WD(KkQ}W3pZH0Kw{A4h^2xn9c0zcxE-Z`R*YlP zpTiu$yKuq7{@ijf0U;Np!{3Vz%wLLP;$%@Q<ZMHlK2hF%Pj6oaIuvY_BiT<4mJ8QZ zOi-+&(H!E*mGGN#5kY_EneYB#*Y_c?0ch;Ni{7`?v}-y)U4B*O{gf2^U`y5V!{kx? z+436KW5VYyNkhc!AkVdniZ@M)b{gri(}+<B+#I%|%+bik{W$KQ=qD}y<^EPb=hs;f zr)U<c@xlPSEfUotwWvKJQRNHHg)I`*9+9YKebh|*UL<C+G!Rs9Ky90V>AVceVsJZc zTY!0>%o#QLmVI+`C*Jv8?z`ptMn7}ad%K0bC%1o;JNd{hSpR~XaB|0syDr?vGVV|a zA!!ZOb@)Ro-3O)3KJIj|0Sz`A+=GK-&}egvdvtI*I?tT$J}+q8E#K`T1s1O{a5$9l z)e5!R2w{Xa);C3%qMh%XE6mlF_*M(6wJUrs!AJcmw+x6UGSxCo^hc>a%LZN|R*d7p zYY`<`7F6gJMR&Sfn%CoY)7f}IUq`-ir+|WSy4Bn!-C8qzU=kyN5U`8A7^9#d%Q0Wj z>+=QOnxe#fZUEh;=5oai(`y)}ThZj8PjHzA;25eTa6!Z6QWRMxP^X~VZJJ092LfT^ z3<Xa@ail@%gVjO;p0qnoL++ta2flH;t*El$(70}Ja=OEz?%?>5^G1ByqoayW4%JU| zV5zu*IX$ko%X8`mu)4u%?B5SX<NhMly8s`^1wN1oKF|)g8U$Lh9itQ+tYVH4qAA7& zH_8b|c50Sj(R4kala<*7uJJIpr^e%kji&}&A39ux@dLRlUir8@JVeETFW;JwjQ0KX zrQF5O<zBCl0^Z!KKsQZy-SM|_?oZv}+*kj&v7P%R7}?pI;`2tIc!cqo0=Ci^pdSy} zW%U917{Y!B>ErI>8G`G%A@UGqh(6R==dSmtZcp5usCCoA30$-jwt8W!6t*(uu8RQR z=~WypF2$ERNSf~>RXVDiY4>1$s65m`pLmu$h0n?}9W$I$+zaqLezCm7vB){keF=ZL zOtsXd?n^!E`AyO$^$xy6e%Adm|Em0Z{txo|&iCA(@t@0|J3n<-i!57EGeIr*Y3Yz@ z3CiNzofIfgx<f-guMt#Dky=8ZTTTiYA|g7HRYI84M6nD4+a~HPonWe9I!T5D!RK)~ zbsZaqZn`}l2lN@y9h~M-9auDoM^QbVI8uNc6^`h7T;sf&#;J<JafEnuP(DcR@54Tz z&$wo3MC-t_o{g)U)lQXDJ8;LYS>!=Nz*X<CRI%MMCK?9~&VY+7HI71F-@Zf(kZorC zF-o=B;3uK3Y*!WlM!4BiD0hQ!3X#&>5BRxUlr9_P{k=CfUG8sYrj~7g_k=i@iRGY9 zUZ??6^C}o=aMO{kNQ1}T@X(FBBMov{qycc>xg$!Q_D-ub+Ta0GjDuBo`U8y~w?A;2 z3^r>c#{=Y`j$=PJJk#Ca&`J`gVN{Z6bf^>}6w*8aIOYk!F$xiYPO%CzxEBnzAn4Z` zjBD(O06+z%P)L1F`wXj-lLLq2inqJFN#<zo*3!g4Uv4uwNq&{vcuCX5^YHcE<4*pY zIQrI2jO8%(8_v#u&PVyvQ6;J;eXVLmSE@pKxT=p{Rn?#m_SHv*R*k93>e;Hr`l70q z0h{#oeeU)DJp6?2t1Lz;D_CAKg&qq%S-Cs(T;=}I!OFLMA63dD{5VFf5tAwlxBL5{ z`#Q=sQz=v$C=F(+t7;qghUzi=S=H0z=FEI~QD%*{L3>sExBl;pSzqhKywShBHZUmR z4bJMbtPklE?eA=I-s*hNnRf~gI=4B$adJ+rkR|_RF>n5u<)fLYPL}!P6lpH4(;4Lg z9puT~!8^RssD$W6VW!4MR;YuboTJZdV>S|*hEFBRsZK4JtzX$XwVbDVu$+#Q&`i^E z>MhZ>m(viegTAhuy{NosACz~H^DSqEMe_vX=>h3&X`z97U(_B0bN0R63>etJ!X>d} z?SO{P2C}6AHw0+i&Y({Wq=IGr%U=`^ilkI*5{c+!t`5r=5`)ag(P(8v=9Y?1=I^kP z8FAo{UTMH!%w-HP)>sx!kD_ku&V2F-)z3#V#T3Ni^($;swxK7ZgApXnBS0%sEOJau zyLz_N)mGT)Ri_hX>-l{?uRoAXbE4z~2bU^&xQJ_<yJzvX=SQzPt8U2$3vkWIjn}L# zX$xL_=!T6?PBfH2+4IrBh5MJy9JF-N!iUo(*G?Vv)b-=njrThBaCu6-xbJDrD}pO- z9Ba)!yWeF;Ph5Z65d2YP)TkWS|E!ktC!BUE^M%&~P1Bec&8Kg*evSprRW8&CBL$(U zw5^nsmX<|pqGv{zmu@Z<hk6?Qjp1|r=Y+FzR-f+5`Y#ACmM_v5x-Rx#9PTWAU;7~N zLFg}@uL56%{#tUlG+!Eu3;kXFy#s_MmnEFznkdW{J}CJo|DB;3J|`~{6o~>qs=lbx z5iCFCz=p$ev^ZKFygiESU{vY|76Q)4i$3?!V$|PG(Ra`kEf)Q!RM4Md)Ug_yHHf#P zWz68L;ZlTjV(<#K;5K{|^QE{6Prw|eCMhLb08UyZl)x|}7R(Gn?54zm8L?2k(N1ZM zUFxT=#z8hn$kHuvC^owO6tj-f#Y#G|2S>mm`2HX>@!-#LB3SkhT!>aAl3?S3^&cZX z10~BUI57Tu(zbBl$J<wKzi`_MEBE&oo?k+0r{1>a>0ey3=4oMX_djo)aO-QUa^K|M zyC2{A;?x@tyng8A15AHU%zw^xfy@fy8HJ3yTIV$`>~df_6to<1h<8UFQZULpu+t~W zwEYrmzofBnmqFVvF)Bas&daucvp+kCt~AMRw4&kC=y1>Qz+}(lK#Qj(@N@EW?q2;- z<I%7t>mhY9S;Q?CF430jt@>lyE@ij6OVj+?dhM@-bC%6=EpuJt;#?TSjJ0+E8=q={ zYHdba&|!2G%nz5#fxfR}6slhCl$newi$MFz9hp+F5iw04qA0Q$e$HZ82{WvWiTcV9 zO1M;Nl8EGFkvo-MBry>y4UE+8FL)t<7h3>iR~F`9vE<Sr&0Q;xWx7@tM`q22{zmo) z{Fyfgm}|xXJK3(5&7A1*+$iq2#_c8F{PKg`zgB*6!!v&@-4?oL#>OWfy>9U>`1-)J z2XP5jpT=a}wud51E_&(h_xAmS^5iJM*~fP75T0s1suEsL>9zU@U8wWcMb9Hs)N{R) zqYKDfVV*L_+Y;?8eMflL^HJy%&nMn*0)GpA!g$YLTAB${h8-KG+$;4X<$6E=P*SIl zB_s7w-Z9bh)M@$x{S)ys|9AM9)4)E?=`dWtlMcy5z?7ULSc6f@bfpaAkckb`GF!}6 z6PT0Igw3L+oAQpy6c=SyQ=}ARGL|)&b47dHbkZI-i{mUNb%oBP*fCeT%U_fZN*_yk ziKi%<AaPQRksOoNQp_erhBl^}B&LC+P^@-h?~H~O<GQ+gvmKj8)p&%tQgr>E)96@1 zqK?XA5Xd&dfa|cgSAyh_dHb(<_mai$T-$PI|DE0Or!QIai>+5&_R#tVZaVSEgP7Yi z=?vohZWM99@!GFn{@{)M6lY^Wj>dq`e1NmbR-hC`ePk+^6|%}y$2@L{uuPffkbU;7 zG}Zw}taB+;5@p$^?)Qc7yvM`*K=;tlz~~w7ap5zfliV{y=SF9{mxgCYFB32G9Vf?w z2J&N<9tceIQ?HcsM_rqZEe0_RJ`z<Ww3j?dDYhujI)NLY-woi7J3PQW0SnBqKQcE} zFD8uZ#c@$)p(zzrwQV}q!=-dMIhC%ZTk8y}YD;mczs4w+tn#YbBBtWKFcoE(vKcJO zaKy&WDW-Z$#cXC=_mT0&icIGCiXL~D4jUH|lpDKOG#0W#3yES>i>@rPm>m%JN(tr( z;{?lk6}b!cR)4+ci`+NZ`^R^&6QBHC-EsY#o4P+Ble8hzZn*LZJT34@J1zxrui?tv zpK|{;;@kEv#CNP8zVI<7Cq00~R^e?Fz`7OlD%ch39~uy{Ld!!x*Y4Aw(B-gRskenX zLwtz#NM*RRwnWxB%@tL#k7T?ao)eM!pci}d9*Ym8c*K$0F&o&~IdDiV8{o-AOKUeH z9I_~*ge)Bx1$kNAq>{x=$|&og>O#ci?}fmsw-8wUlBpgx*vck4b>zQeImPIaVCZ?g z7bVbftRnCPj`uQxvvl0&7?}QDMpsvs22~o_JeUU4PAKsjrl?4w40e*CxFg6ET@egs zbk(|bI0Kxrvc^o-)zsD2)1VXxHY(SAG*f@agAaPb*RDBdW@N~qb4MIJ$lZI>iY2w9 z&U4?dj%vB^rjzr5C(g`G;=Tl)h@mRH%xZBs1aGw?<vqtS(km(@p^|Dx+FPA$a18dI z?HJ{qCQWxNbbP1&)937$tgbjcd3wb;6`QNKR7-;sgZng9k8+GkjO;TdF{RHUX-;BJ zpO)&@>JKVDPkf#Hros&PMPCQm-d-8?NKBU)aWsHwl2+7-4k0jJ$yL@MAsTh5Bg>+i z>i5;8YSdIPcqo94fE8#7v<CQU=yEc(nhCZ56KsJV!4_bGjV5WcqhH#Bjb25QwhMwS zKn;VlSswRl7fzwF(()Hw2VEb#@-DvA)#RE0s*drDD@<|gDx*(xv3Qd`<-o<Hn=6#5 zUY(!<4&1u$1>CVN;|DUX`^a&c-G8Kz;D5vp8?6Af6rgEq%#5r6z9F`x3)B^p7(Bhi z-TZBiLBm&HwK3?#YuY|Mdhwe#KY#gS^FG}2t1s_)?5ZobK6CkHTc?L7r3THNQQvkW zZv5zOjBmQT_2lAj4_@{ZSM_G+i*LO2@=KJ5HXy{&yf!bMy$AV$@O^<=Hiw8A*(tt` z8_Dg}d3I=MAXFQW!8P}C0!FT=AbB0CmQt*m!L@k>cPiM=w2a?kiG-D`^imWk)V(lS zN+GlFlrX(cp)qcT62(hVqEHpX()ei-BHKT{o23hkX92=M?cmxr|4~0#?%(2X^XL7% zpLkRDK&%0EI7;WX#({DUBc6?26$U`QvjU7u`C`IQui@D5Y?}}f#+HPco#TC@C-xfS zV)F~xm~7@)Z^zZ1klARPgfz6oxYH>*rIb_DB3PG!FA?=`*CAkPJE_Q?$Ydr>hDOn6 zZfL){bImWuwqLSj;?0d<7XJPA?4$Q}&ms?PxMK1xS9L!JJh>4N)yVRUBy_;KKp9M1 zFhSX@Y*E^jPUU0es3IYyR9UXHDi0P89ai#+TB?ARCGmt)MDA*gL_y?LQA!Dje~{n8 zxAC3)VP5RykMacZasCkO@w{y>lc{`9zwxZ!c$L1DXOfLCN;bY28|En!sk9II@$wIQ zZ)GE!kOV!2i@`Ta@+((l*aQOTm5uG~?fl;k9z5aW(<eTldGinFCgGv1&2IFrHIf%n z!Z5x@ST6_xSr8<iC%oW6Sa%T4tMR7bkZ21Xq7*e<n?a%n0>G7eN>w*IaH*roF~Pyn z^cSn1wy2N}!<+>N^DG@P=ACG?Q4X2;C5*QnA&>W&#OU5!&A6Cm{*5<iv}Ofr8b|#C zmhjipKC`A~gCW~Ftxnl+rDa2nV8tm#kWCSEN_UM9+q2wgl$g%cS>K*pST?w{esFuu z8F!80zj*V_e_wHrbIk4h%oAJokDE*R6=;F`mge4(*;Yiftt)YwI78uF`agu@BBvD7 z&Fw)>wFoFhfMrFnLDH$*r7CfYaSw~~9o^}!pi#f0?XYzV>;MY`9<{E6BO=cWyjZV{ z22_ZB)#>V`+$HJ<++RiMF%c)lw3Lz?#34$PK0$Bho5kr;vvL)`R=7ubS$vCsPdp-i zA^l7Ix9oGPs=#qP5k*N+WY|$;IVFi+NfJ4pPYJ455L6WimX|THzY7N(i&VY?yDUZE zS&XGjrhADv^A8Mr7=JUEK#mk5DexB&ZbB1)b7+p)K*swF<H$~lW0Z{COad^60x{<V zg*5%oiP7_W<C>wGjTWsq&PIze-;aHQff{I_Su=EI9c+VaW~u~emm4`&7Gjn9SOu3V z*KtG%>NHUUybpU8o~0_)B@K#PQqm~WAY4fUY~I-sXPfPb!dyF+C$j<-Vt8V{b4P+D zbnNic&7XD{4Whkadm7tpcNFt%upRuf+#m5+_WI#%UT-5S=uq;GAiepk?U9DU;8wF8 z1EIrQOkUX`I0-luKlw#&F@Et+xreS1_MUtmx8>G!&n2ao=gy~ec`X$6j9>q{TVMi; zB{$X&v6CEY>+J1-f%dk{PJ*;jpl)13sqmoivA|D&;;6ut3d@C7AusSC%~itL0-ipJ z33?w`HV-1)363V|E$zSkUfP%RlJ+*PZF@{E7-PlZtbD#Wz*Uf4Xgq(a?4rVp29WH; zNX+)=kM_*9?JPyo)}dk=Y^EfBg{D6pD`vADD}ICE57sz`UYp{N@JEzC2R?}l?+VA` zBp}C=N-z>vI4&8Biau&mNLWmULxy@Ng*T_Rq(}<nm@~E6#3s*twjlG@Sg?-yY+l+( zlg&z}4K)e#+?W_MS(wf&=5UzBd^ocMXDuz5+8n_V_P9vT<09;F5t>tI(#J)ZmW(jh zJ3@JhDd&hr9~>#h|048xene`LDSQZF8U-Mwi1HuD_^;$g{Kq6N@)tDl$)b&X%<?i- zY@@<yvt)Uy17EiDhZ5M1(R3f_6}IUW@PnOh7D-<L{y!Mcpn4b+n}ObW*-p*tNqaRj zg5A2WsHh9h3!T}@hI9jL`Z!b9%#QA@xE~ty*y1&Jm0tbY15fTu&OCkj&)TQYJ!joe zK7GgdSr<;<yKQ%O1-bvCSwru5wEHfy<Fd;p-g{g3`$e1UQ=k_=zRL0loaiB2jSl0l z+-IJn+;NY{Q<2*U1hdw_cN>R-hlBYbAD6vOuip(e7Z&}xsyj7jd63yzL1uF~n6>3# z)|R8k+Hx=^aFnqtXb&@M%fYNI*#EbUP={KG$Q`$sICL<B&Vk|Y7$2le7p9h1@Mw@M z4{iyz1v`U$kRvrdKjY2g?WS2sf&GsLm-+*P%j{)v@dZxowA?>5;Km2oH!m{ZhjAN^ zF(d4hqjvHyOOgW}++&6LMN?5_RZ=<8NSmTFf?cY+fMl9Bzk*6(hUG#mw-<^xJbcMV zEe}mJ)b^?+XRUgiPv5m|<nnQYuIgSz)?d8zjN9MnF3hbNk^h{p06gj_gqQ60v6=H8 zI%>eY0?L%DC=_BR+>#p7MvG_3)5K<Zfw)MPYmK4qq5ityNMo#ftbb&1rZ7`E*T}lF z{&Ry%g{8_|W2t+oe{S$n>{CQRKcAZ-Oi|C*F5>11^VEwpH4x<`6NHAhJi@%62qOfk z=VKcZ3+NPrfyDqU0}2^%N10Pv$dY3~r{yV6)eev_k_;&>anit#K}yi$V`w-5U}rhf zoYe7Uv)B-eH=-!RFbg9T_?Af^#L~Nw1&^cxfuMn58dfNL$j8Q=u_BW_p6!(=)RSFD zV?I=IDU*fC%7wy(3Qx5Yy~M*N$fCk@+1_r^h(~Yu&4<{3#ouoHIM=mj$A<Mgc3!_> z2l3#FTh`?M+<oA0Kfy7qzw!DTZ~o@>*BA~r<QDM>z@Zz(@P*bbn$g!d%@}L&P4Tuk zDUJ8hk|l$DgG$aUSsvdUmxl(1M$QhL9ch-&*JcK0Mi$FUv_;0!z>-L3{B7??!H>dk z$BuZ9#16;vaetD}7#Uw3Khzk-pKZ)AK5_i5Bxg8GC+CmS5W47(IvvOvDnF!RL$%Zv zwN>Tg42f~8ke2YN<zSf#!D5<1(WvXmg0{oyDy6n0OIcWrJvF4pokHk8N6w1@bE8+l z+&Cp*e*A|4b2g*}Vv+^SOGnoS@hK7WV!-@|;c^x)Hye6K%RNQG>i2o+muk`#Cf5sp z8y+2c`@)Tf7GLu56*F$_XFj&(vZo$jy=r@Ik?_K%Nt15M-~CAL#Es_+?LNUhdSL(S z@4o)p?<o$?$}Qp!0}c%o#e=O|93(@kg2TvIvQ`tDd`+RTq0O-^F`?E|8)=G-@QjE| z_Dqh<@yv;|#9CwTi0`^T6~EBF3>tk%nU?W2kUDJ)8Kuo2i^%)hhrz%4zX*LAIZ0fY z*S+DWLvo7VC=ZAWIBO7%vbwP0vRo~$Ru>;*!H5{co{I$|Ts^@E7YjzXSTMrHlqm}* z_-S9e>=_TDeR&g;AFItDM_J1mFS9s=#NrT=-!^{js9j0y)Ck0Xh_H4aYy2@{&<bo8 zqTz!JA%|09tJPI^O?@Hv&9b+z{>_SqyAw}ew(7BMYc6>tw}{BY#^Zii+LF8Wv0J_y z&OLMBz)P>Z^WH0VuFv&=oR<MFCVJHx*588-9w+%)emFmwpU<!6Ma7gAS<yYFq9abm z4#pHnRVp{jST2iuu!oeH|9k(w$Kw0eGJB~TkqM(y><Zh(7kiod<K3h8|H#EZVq}l4 zq_fj#=QI>Qoq>#3H#k{Be0C+BU1$@q9aob;U0nb0>5H1qzu@#U&m4AvH^!$QT5;CU z$16rRwXE!Z$L@!w{O8<u=!XGZz`BAj^Og-&&Q?a0Pb-^OcBOKQa$Wgjo~Nq!ak>%+ z2Ll7fR=*bzB4jEd#vrT)XUa2`nd(f(Ol_vVSYE6wRu?-KYm4>vbbE!1rk|Df8C*U? zZFbB}&#hdYTwUH;{!{fn?e@yMs_z)^sQQHVNX4U-JJY{O`zwnUX;~3S7J>32P-%OK zg=;8~ECS_4po9+jyJHPA<cgH0^5J;e$2<C!glPz`EL6?L8A45=387h`ZJ~o9(G@BU zEem}d;!8ufhDhiIz>W_ngGIe9FTLELQz8s}2pm3aU^cI4r`KO=N5h?FEyn$3mRwXq zN}@iAw+EzIfa23)5aLtILs85}`#DO(I9wjGJi*#Q^!ENNZX2{q$}u7KO|ej%-WiJ1 zyF$!?46(>}sDqrpLn^O=TX#hp4preQ`WAXeRWSp%s>rJVe96AWQWa*eN>o(UwhZbV zM4AS*4kCkS1iu^wZM&BdUEJ<v5bqSA6^OG>Wzb5*%Uw(eyIA#H@q(22&WbbH!Dh`A z!i{Ag7v0;?z(SZF7`-5l=?A6^*sL617zoc~R`i<Vow3Kn*r%vguz_&u)6m36y6Ks_ z7_hA<tD<i#DR`^Xrr|a`1}B#3@d#2Xr3e=KLJ{-AZo-+2pt7W{$$iuauB=d0F~dht zsZm1hEqlfyD_EjORc76~b*Ps}qH&;X&j(B@(iQzkUG3odA0<@6Li5O3B(!OV>xL_? zysR$u)0gj=aK@0T+a_Q2!VI%bTeaxQ#eRSP$aOE?HEq$$R~>vGpB7!Ra^8s3lEKuV zG3&;UURzn3IqQlA!E<MxTc3=Uc+~QmGp?LD<H7TurhHnS|2wG??m+=MuTiB__0lx{ z(P^ClKx+sitm!J|kl#=;E)~=_=WrQis0{1wl!o(?JW?6iA}yC%rOgtLz<$~ywMm`Q zAxUIY5(;w@jxj2dXu>5MxV8PV0#KN(@ExNLYMD{}PeVKf1I@NArM+Y^3gW@r=l{T2 zWgjK%ZZwWiDc?o2F{zX{Yif*F?R4x^z#c@SBLrqW`+g70Pb5b8oW=_;s=n^Jox65< zGL^B19yCs$_b{1r6P7N@-F#E`Psde<nV%1`>@fY7J)W=!h3V)Mc<?0d@#MW@bY@N0 z@7w9vcG9tJyJK~1+qP}n?wD6>+qOHlot?g)=XuY5&%5`B^ZAUCIclxath&~kHIge; zzyF-$Q-N~SnnuR;w*AW9Qq9oiH#=)Vwmd8!N{n1<;nZhppo2<~b!zUd1a~Ce@7&XS z#USlL{9Xi<IJ(g$zAqG<=P{n&T)=E~JyQojOCpeXu}H`IG<vOeJX7H<D2ha#rmN2e zl7vR}f>grGgpTEKgG%a0^=Iy$r3}c&Mdeg~sbb=Bk}&fJbn!Wh@Q2h;`-y0{(33*l z1}nH6Gv~z1Sb~QHE)j~QNOPg6Dm6^X%uz<#C>N@+(_g)#h*TYqy3n~A`tO*!q@v5F zJC4;nysl2EbLLs2wsd$LP5_I_rhA@>%<}jB_RXP>It2Dsd@~|&!N(sKOA@9o6Z>Td zgq)W-;e3+weB^sQ1B4G(c5P}zG_?qck(35YnRL~<lIz>2Mm6W}Td@YIp}AbfT$0z1 z)KG#1W$Z<1RI0uo^8JCgrmE^pC1Uqib3Jc~ym&cmi3|^PlydQoS)p2X)%eiu2%%e? zJ!UJ4j>O_<3Pnm8_7zxNcALN|xsCjVs0&q7scb>W0{S#y*7;uP4*MSXuIdH#rQH2H zu0nYphxr$NTOv{UbOCZ$9<|5*)dCbhd3E>kEtjYMI23C%IfCSdkW|=AXWC%Aq^P4e z8cARphgsmL0Ok(10-O<?v4l@Lrs&;Zp*)_t7-gZHoN^aJd}XD|zs!H7S6)R}T!}p} z=mTqU)NrgWQ8|blm%VNbNgb5%n<0gr{tn|%ijAE62~<ccU}MF$=2WclN-rxLqF}}L zq>wv3LVH;Dgr$;jO;d{GeqQ%&Y+bmEGoBLGxN|k3EmhV<r%87uk;RaR2%d8Ql3+(I z7oJT=K2zi5HyULEzq5L(>J`hTUQWrL+Hdxt@n++kBTJW6V31sG3PW{E9TtvA=9;jY zBma+SmWMFpiT*$pHwBuXWtMs-voUVC^qX9|?R~$~&=_OYUc<%x5Vr;Cs&-#X_APHc zZ_yH%_8tyrp>n)-BYee$W8~9ISn^4nVb<Im>)VsjSq6wi5@R*C5$QNlLKK{_hx^=$ zN^tBFed#jd%l0DyXba*(fAl61*G*^Q2S*HW`?Lxs02_yGlTLJT2cyvI76W5N3pYRP zsx6vcb9Nz%X7!ZwB|3W5j>~KxAK?V<FC_om-)yXFt?s!}t(>?@tpzzpcw}|`*+Sk@ z;XUMY@jBoo^)CL2YuOk*C?Qmj{Fth4Qq;!ZjJR;TyD^+d!a>p^FLN@<K_WAxE^#^B zWwM|kUIhHE#E>IBq#aYjIbNU6P<(Y*#xxNzPBkI@+q{hX7m;vCR4N{+$Kw>vsg0}~ zef0D|<F{?R+IAet?<pX{N=8>oihaxigE1Qv_-xBG1{Kd`W0*c8iTlllts9sEH&Fij zAl$`{#VSi#jLkflN)Nr~%uMawRH3>f6JxXr4ks1v*0F!?zTj_e>$IFX()T~u9HolS zn@%z}{qUORpFmLQ6sqOIRt0{mkAfi6F;z1h@B5Y}_aWFqM_@X1YsXyn4)F{fG6Be$ zFH~8vLDs-N5+~|<J~L6Zx49CMK859*_nEPdCG=XOt*ElNJG5df>~1_i=$9Kp;+T9& zaA9o+Huww9%5~1Lg$>rs$p9<sA&ruE2D-NR3~rVhu#y__w;@fEcCRIE^tZ_}5uEkd z;{vX%!{tI;g+O;<T+;}3OWq1oeI6IkHLv4uJ6YzLF$Dxck+;3X<u1-`*uPjR97B7A z`aUMML|a#IXjrLI!cj=&xPg+>CzWhnEiYO+WSOs7WY64cF6uYd?9ZoozIJxw>wA0m zdq!!ldp(|l55FPf#ZtLsaSt0tK|9WT`c^Pp;X$_!ANIn<k%f!>#5J8AII~yzgdviA zFv=VT(6Tuv1udrrvoOrl_c@XI6nn<pYO54aQ#eahx1pr`v4L>-F;E;5#mPNi3%9i! z``R|Omr-Oq<ans{WPNWv1Sk}Gt=wZd#2y4+ZyU{c2^l0STYL9<XkK#!bt*{Zks&}I z`=O#l_`&Ys*-38i{Lx%e17n{umJ7L0n__)u7=vAQL`b{firG<tK(vs+_fZ#puXu@K zKch>re*(hh95YkBII6g0Dc7<pFTllZ5r#aV{D4`tlgu=!B|S>YuM^Esy{_}bNvVL5 zOBX2{X`sb7iguYYf_B8B@uY|^jb?(m`+APViZq60LmeyJ#-uvijnslm@^u<5*DoOH z;!=zm!}21s8YqPX3x#TRI-ztzu*9-I^tT0g$xCUmfv_g1^UkUL<J)tb_sq%s<kq2e zADj1G&-6O_XL)ToP;^1_Y_*3n7odx2RjX2YRs!&%@m^6ANbNx*DF)CtT0v8-BMDqg z{-ht(BN|1cS_6MN3(`fx<CEY};l+ov*BrqG7q3^xtW7RW=+O-I^7ZODEX3CZ&*W^5 zFz6!@bxf>(Y;_@y;M7@|xOm(=y;$PeuyJ>9JuJ>hPFOi^Oiv!rnNT;`Bq=8XVApB} z0W5;}ba6*WbWWUzx6a~0Beo~va77|!N;wR3gaI#it?>%D!|(J<w=&kzy%=~Ex6q=S z&1WQxRQ<r4FZL=i+~#P^8NuNFD%uLg^n~ita7`0?<bXuAF-yrxNh`^-DI@h!+&_~^ z7okMEk%m?-mM+#)?!(PP55ErjszLy8s~?=V6hjJ=LRBUcbn4m)GR88k9~*BrReUSP ziDoLJ8uTg!>oO5+Q}`i&hKvLz)26&%jQtHh=F)SiHBc>SY9C|w>^UWIjnO#`saQjb z(3s$YCuAP5S_X!ZzE$rQEo@TI-IJQ#QZcRyI*^z(8Jjtq!nLk2{K<_bHcN{YN31_c zQ$uJ<J^(DLa#aWxvkY`en_^Zc&#=}%yJp+qu|}D;W3u95D-*I3`HDX8CE>6Z-z;dx z*e6MD>-B}SV3E*xc8`=Lek=%htVf?mfv;sG=t_G*CC?Wd>RIebcrhbZu%UyVp1US@ z#4F{L`V3?0br^Sh!zkmKsJ&#R;D4fsTn^S8yGKO8HDi@wsTiBQFi`{G+pqO<#b7<{ zJ)Q9t<pI^j)(><a0u*#3Z90=|iBsH7uQOgANAEiC`t>BU1FPBgmL9aa9d!wE69;0m zOh`_#>Y}V3v+q^Lf_}`XSBIN6rsZ-axqp6}7iG_9sKlW<2Gx(w=iS9wuhLYqIZ*-| zVsDV1)AI-h7m7;B#s|z2%rQR`O^n686$(P7o)qCpb`cVCX=p3eoshHEkg_(IGr$!e z+b5(+7&}npxdIgiN>7P48H3b>9#pc5O}&j*9|E<dk%oEV(1l<QCO85&<`cGg4cM%s zuSBmxF0;I%d5n8+WcO@+(&-625YS(e8KQ`Rp9P{obdxRm;izF!8cu+<;g#@>Wb5@N zee>y(O4Dolj+L~v!+-7aW+d6sR=T`3Pxgwj4)L-?RIP~6)o(pfSV_j9k$)Pcl7^Z_ zF{gk6D>5wNeMVx*YiJCT<@I8&-63v&xG~sj5_aa!`^4CGS_&O^+Bs80tt}Wia0V<$ zNUNopswCrsa-MaLaFONabtuF&?1O_wytmWAl<|YXD`w+sW%x*_nsSAwww$lJkJ^ts zn6}tuA?qO4wx4y_<si#KD$aCN#BrKMwCfiS8PFr!pu2FHtuCzAbmYgfow=WNM{kf& zMt9;MK^#g$L)bTKqnAEsm8ghfH8s2qbT(g@%-RLAQ$_i?t9f8vzg7{oY&28naC+7j z)+}XS2!aKwmx`3Whxzt;H-Y)l7|Q4nBx^B2Vrq;xsWY$_r_Jr#g%RNMYqBPelbfBL zT@36#%92=Z;LNF0w{eCXjW#yy)Jf-0aG7ov|A(;*WBW55pLc#OKwtPFxXniFO-(f8 z3R5e=E_m$(*b263!Rfd)0x7Kd$mqH*Asc;)>zLv?F=23^IpGI#49BtFbxi^<DepmH zTfdgfD&2jXY07-jnna^Ih;7H2;bQITw5c;QdYm>|nkcLQvb-sAKP{X3jJ{Fh8EN^# zAdK;vbm=sNX+5K^&h7d3gZ`X%9XMpvx~d8a?X57{G7_}I+v!1i)BHBRI_<l0l%!O3 zPmQae=i{lluF%3PNpJNbNMFv(Pgo18)%A=vYq5%V3<O@HCOZvSvyaDq*`J)G*=Wb; zNY7PwLSZ6BL&WYW^D#oaVPd909o?Yq-TnT;!OyeTyMI)DuBJVmrX^Vbw#N{uTvunq zbkx0_8Gl$iJo9zEhOT~&L{Hf7<rH#<Rm<EP+v?j!1DZ<uc`(md>&?7dg!lF5&aj%b z{62qVX;o0?z^ASCiilNHiRk!y;oM&#-5}hEAJ~NUYK2OMiu{BpRC#Pe?vusv{@#Yc z=q}HM$zw~rNAm=>3z`^%$?Jz6b$|7()hDo30n8VFkW1eaGF!Oo(qNf&-UH|l9^s|X zI~Hk#tPotQbV(c5HOS!U&(7~N(Z}p@P57O{oBlQlP4YwZI2@kHbMI)kqJd7GNMrU0 zeHjeekgZ)|_Ous$p_(EngrjoB&@;hiFg5qg*G=G)NJOQ?RN|4=`8Lpa*pnvxks199 zyE@?syA_T-j`Qn=AInIMI1QU7?;TbN=PAxHZ%lu#$Nk3*qIKHPTC#_4np2EE4P@M^ z$Q~own>zSLE-6$CUgi(IK6=5StI;I~y6D`Rm@l?e!2Y09cFMId*X$;HMP!#|S3Iv& z?r3aPSI(wPz#y~g9J3C01JwP@k9rLBnE=T-_OKz|Qc=3#%|wnvt!r(-dEiYpxXpkJ zi|u4e0|)I5L+IO<Uo^T4DLaneGN75D*O$#*@xgBdf&{zsn4W+Dkf^^PflB-}=<!%= zspfn2Rs9#CdKp#WU{kfJDtuX0A<K1H>ickQ$twKdjshEM;97IBFM8j$U^@`}?rVTv zbct|etIOCHX+pvSEt`YI2ta<tDHCxbWFYp1A_gKFZkUkme$$7t%L*|Sbl$zvrwKa~ zpB!|7nYTY&tOwW-xkPW_ir;cA{dRLzQnzwbKVNLEf6Py2rYo0l5H+)kv;$N=C=Sy! z?{NF~(iR4GLNda+E3s1=3g8HU7F-lSMqeZOoDV1Jek1(l*Zn0tE(1xdH&y|gW)`k3 zv{rrxe`n4asEylXvKSqxJQt&f!VrXTH1zaJ^G<`4%Z>vAYR04w!VttB(i}`bJNDET z*cEdmia!URhcy$q9El+yMNmcX^Myb@=p$H(KqhTHGWU*64D|&F?G(6>(F?EW@%S~h z;Ss6lWffTyLh;mJy$pKAt##XWiS(Ebc@EV)-kb3Px<pin<?eewqAFD!B?MkRaxG{r zwBFBSLJ4$&;3v3Ua>8hO{mclAL&8Ch2v=-7L~{hJd}M-|TgY|k_-x51jrrJ&PRXra z^NLxQBRY1UjE~G)pWmPT&zSq5!&9?4WKfC_YV<ht(f*nO+EF{0clXywA?Z*?vlPrR z;q_k=IpVhzfgy@?3`ZlmP{K_e7H5yB1V|uXVIKN`4-rp0m!y9>R~Yb$yp7oiL5_sl z^vkk*#vM+Ngnp*=ku&W2yip`#T=Tt=OGG|_Kjij@es1D^W$Scdl8C(FZjcj*&!TJ7 zP$&f8J|gRg{q~rV`1el_|6?7V6t?PSXG%%M&j_AsP)f>$W#X_75&cuKGmNnNgk^6e zkHmf~aZBVdP_c1IeNf`xam(U$Fw%%btJl>Wr!4ScStPYN@v4daZ^X(H`Xs~$L;^J8 z(8%Rz4vV8JuAx~f!!{OWNenSTrZa*Si$SidvIIOKVs<IP5}o<=_A)?w!h1;*W{>%Q zzTj6>$7YePSnz#j&4KjWK%7wvN6e)5Sje$)tVFCtGG9+xK2aLYI}A;4FoYxN>n%=4 z%s>Q3SV9oV&a!}j;<~4fzKK*iO}o&gb;t6t<X8|?FZn~IO_E!I5^NW#&d&)Y56TQ- z&BodgEk(ifH7A|bkGCRNDFS4#U^swCvbX4AU)ZrHi+Hy5lWbWY^-a?+-EMW!lO9=# zIJy9G7doKoepg)A<FR22<jo(Eq-1YOC&aY?RpgWU$M6@-Xi~Yic@O#bBsw#E$a|fc z!#|Ms{*STlzd0%LzEntw1z3;w%E;Qs=5w<Dl*Q<G-$FX^86cnNxUJ2<c5vtN;nq-u z(@HBZJuj70F?7^C{!oh0Pe(QfxFoEmE~@wLN|msr2Jx#`^p7jMKFdTvyZXC;ChYT8 zN^lcCgyZhY*D6;aL4Td4M!)@;vLA~wLaRmsAQl%H!Iy(1Mt>C}=&tgiE$Fpz#>QZ= zRm+R~O&8<KqaBW3(5zrue6B@P$m4B}UB9&#@u{L>7oWOR6=Q<Z6x&>YyV|C)JNErt zoS}jC2Tc9Yz$9#kK_EA>z!v+vb3DI`O?+YMQV(|{(mo_U@$L4UTVx8JCnpk6jE5|s zcx5J0t&7kTxlXtk+WOd|lk{*Xrrza#P-irz-y$t`G^T#2Yb-gOTc8tvNu!T!_%Kq~ zT^2%mKVs2oT*nkjl<-`9w(q_M{DtF0FX}n-R2+2Mz<oObYSw+K_+Em6_auov*qszP z(Y`}IA?y?rVh%eZxjuMELZVrw5k6m&Ed5-g#Gw(zNkXzEnhVUNWm3&0^0+^vIeaOC z5>&fN(7tC(shv2b{BHeM>7o+cL^;l`2huPsu8sPTvzyG2Gp5t`&#lTXQ1I$K*f=XM z%d`oafy(z#nrpJh^~12Ci|gmXM&Y}Aws(z-6=%FXjn}=n-49z&4ZCHw#c7v>ZC<qX zUpPMkIu|cCYk4dAGEJwk)FYaLiT13roO-IyPp98HPdIJjRmmhPqR-1|TobnOp{)&G zq3XOqS2|bgSv%>ro!S8|QvS&h(UTl_#b`{6OEhjlacE6-<zXtD!GcpC?c!&#?GVZQ z#MU&{VQUD~N6FJP1ZaVWDA09)2E}{)sgK~^cJZ71ABDLt#&*yk!Oo&C0mV)p?C&wV z!P-J?X+3ky2=862Fcwi38htYaz7MWODjw}_=P#QKazD3dD#Wh;IC!H;S};0Mp0_~o zq%g?}q^cNtM~zPz&Z2%kUT<~SW=CT1aLCwFvw_TnBG8#${$56LJdIG6(RT^c`jL-F zt(yoU=12>hZS@X>R&z6{XWB%2?2Iw%#)&2pHkoGNZJ_M`38Xeo@(;BC|Ap^o_z%UH zk&%g&{y%VI#(!YQ|6%$53vMi8W9|6U$icwg%+}H7i>)u<VDOi!%t8MJHWt#e{THeq zpN)~}FP{HjWi*ToU*P^P2)~H~6w}|UuPy}tAr<4(u(Gn?(=aiz;4{)Qa^Qbqsaf&A zn8kl<$mm&pG47=Vq~wJZsRZoJ^em+n@#)0BV8mty0@fy$Mo?b>VFgDcE9EcV@E2HE z&(O@;1fS{4ue6@)zqA7w>Hk88RsTIEmM<^=^!z0YV8r+ZGX4jz{8td*KOzQ<f62*z zr>lX__=S9CWBKAJ|6h#h%h6x^Px;^afA9aMe~pEO73$0WrvJnK)8=pbA3rSY%>VTD zpY(4TD=Qn+mwkPhjQ`l*Yt}E2Guz*>edYb*=WB+4>;Cn_{&zfo>wcC0)9&xs85#fS z<DYha%f9kBn3?~d^1pTdFZN$!r>Fm~x%_)>|M+A4JBBY~`5MFD^soQ_YV+@T|NTt< zDQ9E<S08`J`|mb?^Z(la&Hw))AL{>*_y48+-@5<5>f?WZPyaq6t^d62|G>uoXK4R# zG8~FuA`}0Lc|kn~qrV~fKg03AuLe;6S}^4Qt=;hj3;!pgzV1cL>>V71O!e%)$lqVR z=>7YK@r%|@Cv5#C%<(sNzXI1l-o{GL`ae1UEjIs8N*iAj`paeiFRb|2BJ$4y^FMVz z{u@@j$P?02L22=+<I&2E?n|bW$0~u@YL=B|Lw;2-uX<WJm)fX7fQsf<ZvJc?1Sb*7 zPrqG%?A#JXQJ|TbSi(jKgAFHRoHRQZW8P<D+h0S`kpZC+37;)=6uXZTkMG`}pM1w1 zht`L*huIDn9FH9+dV#TUPDEf75mi>F{W@`Q<j*vLI&`hx4UgqymqE;%QL*&K;)k;q zUf5Y&2(#@G2y9-*ENs-z#T3nG1VXHW1<5oPHX3g<pR;%fWu35BqI2JG$6DF&KcV&k zDOzpr8xD`y*!Z8A`ySP5wI$~~OV#CwSWT!mfGR7ErI8#~FX(4JflUvswT5$e>$OLO zeL8;LVuyjUg>el{Ai+I7?)o|hBM*~G8wOyv)SR!4C-nKJc~rYI)uwo+-ettk1PB=Y zzwL4A^YMV*ZSbP7BE(t?rOKo8M8SMTAQ%}5m?U2D+-vZu>Ik^(`Bx!(g4Pj=3t$q% zxama$J;|Y;<yc1rfQCW!%thr|u^tK>S-)Ay?aqsZx)96Ea6*ls*BnI}ih1cDQQQGd z4rtGZbcM)G8IaKmB<L+{^{(a!^d0f0&fwG|XXyRJPLCipKwAx*wsMu-ed#|m#c9Rn zNq2>_k-!Oj;hM2H;vV1TQ;npYb~NQfZzz~bla(XB{`SiR`j-K9dXQBKYzO?7jV|0{ zuE$LE)fDofo+9)r$h%U3x?+YagB{SHg0*Gb^^je_5$zrK0dfQqE`JF1ypUknBXZ6+ zP{JehBZ^c72>7}LU)Hf18dElB#%h5R-aCwN16(sb(l6qbSi%gqBd&Uc`?SEFh2^01 zddPZO|J2uECY!lmwBx0~UeROM%s1@il995WkyZS_N3g1N?{IZ0HLopHyDVWLNOV24 zJRu~WFkQn*_AW3sQrz*bSk4^26263UdYM5-#&?A7$nVHr(yYX+x#Fg*k#Cff-P$<c z!n<TM4SCe2+5<#-%r$fyEvB_mf^a#)vqgveSbcty%@i;DZ@5BfhhFyTAmge@Z5lt& zhorfvjq?KCJpiNc7c3mvIfCq8(cG~f<bLX}2UdS^v}xZEc*XPgC2wfT^=yoNbVFMB z-wfYE`tSPLy7g$8Q8z-ro}`c8%e3Ad*WHg7tas?FKE|s4u`43>aM@kDFtNH}m|1b> z_@NQHwBvED@d!%}E<dfw&H?Z7jZrJ~)xf79uWOIpuE<Z`Yt7xqZZ5DRK3^=~d~83e zAEI=*F1e2}(>XIb<e&VX2G^dRsF8J0HUp2lI=d>@m)Gbu5{2G?VO4qXf+~C}KbeeS zo_%eMcdMste7pp{gsub|jOY=45hks1BB{h<6B&9R10&Y%Xno0L%rj3`ATEVbt-*-{ zI9pkJIy;jmb8m*N5g$@ZBvuP=u1vl+JUbK}w3xwHUl*2caklRyKSW}_Z{Y>=JhA+` zbD`f$IHErDhkmt1IM52hwk66ArW;b(b#8~W>2cZHaE13q;e(~}WkCQs_(9lU$ZAG( zZpgGEXiXNf<B`}dzDb4`PmtP@b<ITZJ1}j0+@wmkDuAWJc#$c?ug--5Jgq6sC3SAx zWE|lvJ!`^nsfMlY5%9XwcXruQy54Z*mFVGzz3)AxwM}~!=yHARiRi=P<NF!W4P6jK zs+Vz9bwwvlCNuO*?{HZJ70fOG#xx?JugA7cKt9|}eN@!;3*teKRcxT^7c6p!7sIrB zYDN;fnv%vf6gKGF!jqd~w2m;gYp-vop=>_J2QtS<r_^BHqb-$l-D}!uvko$zJ5v1& zoP8vB3|VepVfr``JR^ipIDD?G-_}x|$F+Ss6i-;l6^18rRX6r@TmRtrV1}I2u3WD^ zqU@jv!uU5UL-d0350!}sThtfi7w!-GauXl8mr}g*KwK{<FaI3=Ed$zX5>qy?dO2*< zv^C!?8v(`B2H#ApZ*GqMU@gHu*tmnae}bUAxjl$D13HCr97O#JNPS6pa4&W7j6c&$ z#8CVL{Sxzl@m<EIjqn+AzNg@Xa8GDK#?$)IQh=6P`y(4Mv_jTzYK58Z_ViDLVIxsZ z+kAWU(DdyZ>cB^Q_mzv<^u5eFB5KFozhb=mybGff2{?j2>*i=i+1Cqg%U<ij-G_7N z+TGlh^j*!E`LTV}^^Hxk#$Wj8@d)~kX0dydT@DDU&yz~;V&Vu3NiVt}Y&nSYYpbF_ zjI3CtA?jTnWXU7`@U2~J6jLr)RUqJ2NU8=`xgKJ0iJvcu{}6QA9q(*=q8VNSEnsm{ z)F;FV+m=KPc8_9uPG(v?fWDY7uM4OF`J^3+pKgA~ZF;Hc$i5y!1S2Z_ieNsL{d>AO zXPJRF^xfnjW4bTk%D{IC3d!8}wl@5ginIiumq$*VGj-2)kT|y;U51VgE2K>qO+<Bw zzHS$@gqs)o;0jJq;vA>Q_^tV*)}V}%YP8R&h-5&x018_0*{!9AU#5|7?C{>uxUfx2 zCS=37p)gIlZYFohj6{y;>PzM>66)j~wsO-nmt_>}ErAquw!I4$YL%A4G|D}%WT)6F ze^a?3?Xj2!)wE^5t6njlCMklZ_N-E5AqaHSy^HNdIcsYz=5Wd~`0m{4Bb1G^@ON?P zF|N5fyjc!BLi>HRk<%u*_5GqKc9qVe!j?Mt2JWoH*NV7S)cf60Y*tVny0m0t%MD^} z$bd!Q?A`zlv2doZFrU^(%WM9)x_~K^k>lPfqClD{<Mec$SEsNBKb(yAhAxuuGq8lA zkW~PAK`;NUza$rK?>x6$V67tr3e3a$G-alDG1j`)$8IfYLhd(>;1GR~?s{v<`=CNJ z9$>n~P5(Ao(qcnOwxgg=62QZw(LU3YHj%RUVu};SkLN3CkEQN`rF*pI8yW@ZpvP(< znZ+4s>W<ZJMQSbEe_4IMuP?kew^6*4y`np9X+>=uTtF5ESYHij(i-90A+?-z@HnKu zcbZ16;hB|7@)-*70IZK3D76n|Euf&`Mt&3P!DB|22Fm!D<-hA^rORi`DehH7Bn~)= ze2jD>$XycBjTf%KTa*>bd61l_DMC(cBlq~CYQb>LBBGKtJ`5X}aty@SbJT6E-NJr{ zyMp(6o{Mi({G#Xk1enI(j0h_Syu>*&ql6zEDxe{f_Gb6u43$6M^!lH^j+puspMiec z7n0^*YaCAfQYxGhWpnlD@<$USUF89Aq&?;bU0}$E3&xR~6|n>mu`IUK|Mn<6AapOs zV}GtOuhJ+;?;hYQu#_y<VI>W>`jm9?sOz+A?6Ny}Gq&-PwFqyFa`29qTvXAnkSxkK zL(aft6E6zQkhAcksBT|QHBNVuV+H?kO)5nf#nhE(_~wC~Z+}{7h!f}8`H5JpGp*@A zpTjOxt9V7EZk{~Dke72X^g;aj_1{f*j8r;o{3wTm$2&2;Qj2e(^K*0CkF0m~5B2wC zt>`ZubzLUDXJoG)oTnP1to3|$j%{44wi+O4wJ<S2#w4bK1T1tlC)9THE=bdXeIP?_ zc{%gsW}yaqF!3O)(QyiW4tBjbeL%$iamW18v7S}#&F-z9jqa<qEM<Q6o<RBW`Z*&e zqka&5JzhOyFxtP^?|^UW0vrRM!(v*z*15J3BL$OE$xrJ^ONWi@(Ue>%!|_HpO<a4Q zn;*l@yb2r%$6m84Hgfv*O+Y-81|DBRQoCB}kB_S&VLIlHuU<QZg$I(7+`Y`=wnk7C zf45<<ZclW>q^`i|sO>*~g>I&MEq65yY>LjHMJ-y(AjrdTBO!L}{oX{lHs!KGBF_z6 zi;D3d@61vR$wv&57)5G^c!2rp6fS|P&RG4<PBH;zuj8{#$#3J;UlXw-+D27ny`)V1 z9!cqIgcfC>5GX%(NBTJ?yo%qJw{qB5$gp~8eW!p74MTsCqnL*!q7<O5#}k40b6-A! z0sq;VK?TA}N$TwxfHo_jI^(8Wwru{A0%YHw+O0(6>Jzf0qQcecIp=bnD;vh<x%RoE z=Bw$lJMpOgIC(F}d$ww%`cZO+w!wyz%4uT!-d`%At#P_KfNY&P|2Wh9M_K_-we9fX zGZWX%aFzy`ZUgPuD&|vCjnjBc`WX~A(psvL(fy>~|0?k*I6hlkr2<N&+!#y!M~--M zPxeTO0K-1kBhybJed5vK3XW(%x7#2Bk$p8`*yrr|b=?59-s{LD@7&%PMM^ykYz}UG zh_AhU1I(MBMhzeTsFA7;CV_IqiZ#(W-R4%C>E=O?mE=&;>mHNrUNs;1ZB{1tk58|+ z8y{{oz8Vv8ds<{Y+HlU4k#Hh0|5^Du#Z}O8?{551EX<x9wcmj6#k^l)$mu?I9*&N@ z2jyh5&tD#2jvZ$Qnf~bYCa#PtNARb<tT-9WrdWFDEqy>n$qtwddOLk2uzYsQZs)`A zrXBixD!p00h3spZU!%<mxcik$qUtaxcbAlX`;mf4r!Zb4g-aIjsspni^KflJPL*Yz zM*3L^iVEA1h*IpAI3KTRE1G#dH}Y}ecSLap5@<OfK^gb+yU0CM9tQEq2yKUVATHvS zuDBiMoh`hRqv#c0P!I4rzU45tu%?W+NFy?s^Y_Dhnr-?e*(!G#B72PEpI74<Jw|<n z!ZQw`_chwAqjePuoZ*iFQ5iVD-LaBvzJ3Qf0ni93C2Dx_BGV<tOjeN_e7~N9fBWE+ z3YV?@z`YMUW3!Ro?2e3>LebvaRPs6rNm8Go*}qbHz|lyu!n$-+j3kO~z5X~8=V%#L zJS!3YI1{O|I?F1lkdDD-dkM7sA)ZH7*?_G@Tdv3rDa#pNp!-Otf-_+C`+8oEba{c< z+hDLaoOOm{&pfHZq!=~}bcK6~@tm4iU3Zkx(pFj;7NX44dDyVZpD$JkL0$nhX)SR- z>H7QStub;!WSv)0MeA!PHG-%QTBsiAa9&665b>W%{rVQ}NKfr#MrsY^^r(a))B=UN zk=svCo9i{9YqCAj-A%e~#t+=dHs7Ys_v!NMt;uYI<_|wFpY`=uFUu-q3unwT`|Ej< z(>?e>oybq7&ObqqSA(}YI|tAMlEunNVD_WMPRtW+9uliI%2ppBk9r^;Z^vEQVVCZF z*KJQh3`tac?+aTxFZQS5J9V59a~#H$B5RX7gM33Oo_8{yTbml8E`6<zlBVnE=UQy* z_R%-O8oSIcsKb%a9o14@O6$fe#%mGIx787sjF;wJdi^*IDk<etfc=$4TZ?`Tj;L7D z<#RJ<j~^o@_Ur^HZ8>Wsog?jKf6T97mi^Qid&zoY$x8SbmQCyfmB^2QcAvvH<c7^* z!h3L7nQ7^FF-0sDE23*^@eV1~4g|v(yp2*rIt9&Ds&*YStzjRamdRZM_5J(`4jXwN zUsl9P&(guL*S>Q>V@H?=PX(*D$^?-Yh9&RwHr9V_<9yc!c|E#awQ{EAXX__cU>RA| zoeF%?&>8$h*$TJm*Q?;GX=eTqUD{jv+-0-5)AosNu4cdG=tQ0xTH1Mi&_=aVZF!D( zdc3CVTu<9S^w_bq+>7?_yv~J{P@_$u(lWL&S##3t>=@-8L93+!*eN~CT+m#+wq&VK z8jSXY7N$8w)(v8MigbS`i7J7ebD}WY;61xXeGQ#5d?2jWq(*MZvAn!Z9{+*Zx^rS? zFS*o$P@&1AXfe-(+TiGyeUr3mRTjT{ymZ+|s@ro7f<cb}^K?mqIn`i&W<yh<%6NVd z6a(88;PCl&8!irAR$iUP^PzTO>b_m{biBpm;c%Hh=n4Ni8)P5gwpk7i|3gimmH0k{ z&o12!deL0uHt7s*!kZ_Jh!&CS4%a^9Si~)ZS}dU+J1XcqxsU(2wv_Hkc*~rB1kG~( z`zVp#NE&N9X;D$|?-4DnL-|1<%azi7YrP!E16g6}R0F^q4JFyD?KJkwShuK-uH|9- z05tl<ZnU;3PhE}2RM*Fa7GCxYs>0zTg5cTS`kZcAq=KgOcUyQpKfbu&KbpBs9jWFL z+v51-I*eCD0>xu?oFQh3sft?)1IGZ~K!*|Wi(@=eAIaBFXo5{hizUR6`~^lJX?|!* z946gg6#6fzHT$?stkko8+@MKAv!VhNKif;<J}A8@YAeXa&4{y-gk$!}Kr2+J{rAi; z*J!nADoIY4hx!9%0lRX?nNJ;tYhxE*hp4N=Y$Whv;a469VhZQ>nI06bp~btDHOV%l zzP_$Pvq^A9$WvRBsWMIpA1gc^fvxx@inYR7E7z*&(6USo30FFdKx(jD!>(M$00A>o z+fFW_ysLknAItm`1s*{Hi6aX?@diw*)LjfwIza?)aaK&LNT3|v0!4_vq6iU%r(~Ep zZ-HKSM2}ER3|J8aOTL_}&|BShCid+DfyVZcY>+aDK0IY1RG*?x9V3`TU7XSgf*8`V z??_e9F`hWwf6$;i*t_DgfcIIB{ggd6o=UwJM=U}=hlgxD2`aosAb@3vt(z8S$-RM7 zw@G3EX0hx$Y~1&#U!|fUPW9MckTqGJjTYo6@#UWcDs7oJwoA4u4PylGA%ef~b58OP zzk6Dg={0IJ%jMCqb<`Op2KXZ^>o+r`v$nZ?GeaVSH|t1BLM&6=o@<DwMG0ypU(=O} z(x{P=a@jc)7=nIM^(6l|eA3zb1j0Gh<$dk++qmm<$MP+94!F1imJNGy2^TPQjY_Ds zPz40j^J~qUEH2W&j~FSG?DvJI$_p4Eg1{CK{M7yVCCwJDcO0(Ps^6ICqro%QL?t7< zR%o@rn6+w6bG!g&9;TvE0m>OsDnp?N12730kXK>^>#51DBS@-fA;-=`P-3eYb|}qs z-<$)~K?y7~hSUBaPuwp6>i@PE(T$2_6E6yugG{l+|I<ocVkpeK{DNrJUb?AKQ)`)h zsiel3^(T&jfdpod9(Jk*GZ%A5dg=j6UsrE?<+CcVh(a9cc;v$P^!miv!X4w3Y5q7g z7R^FVxGES}`u8|~5?LKZEK+SO$Ygu+m7>s2IXy;j!UiXy-{|%TW}*1(5C-T_V!tpX zM+1+4B32RKg3Ji6_cF`VapQASyajvy<RvN&x(sC%vk#)^<<u*`LfK)6H8L{Y4-io$ z@K=<{<pjxaHlQ;bG`ofmh6e&hVZ8L*>c(mNmBP(+0qo`U<6$dw-Lx{k&jC0Y2;2cP zB*p&Hq2F?J1{-FW=Fs;C6JgYOR~8zptcab6g@}oW3s?#PHr;~FF&_^-Z8rK{J-rI* zyQ^`x;*MD12M7<to6L8p6m@D_?e&n)P*}hso@TSf%Ysek(-G5>Y`eL<8=cGi!RsxP z#tWC!;tu?B%5G%=+9E5>oFqLxPS`qwY7FZ0w40Gal3m@k{f_zhLTkXTw@~$G=#G%w zE=#-N(sJQUFqlw)-1Kn%h<*(Q&5dcF>7LObOEC7n3-LLpUDoYOWaX~%VI^j}DIk=y zK(A3RN)NE3-A4zL*;BrxcITcYXR@$n#bH(l+NADI)2}a}jYooxFd5FA%O6c4P?&Q6 z^RtR;c}(<K-~(jpMuYQx`IPPfxogUuVxdj~?Olw;eY6qA&rC8Yom;Ranbw!`6Zo<T z!#ZB4=QkyY+x^{%c^(Lj(lKOo8qdT~B%ycL$Q_Z=JJ(2fSJ%?}Y(yoS)&{!I(|eLb zmAd@<&|_6qHUq`$mD$Bnq>hdITfp7u*TIZW&?RonYgoQvsVVDh`ae*(qEmSkVO_O4 z0hP+WvgCVUQtz0e_BvF$J@^2B0b{4Y)s~0#g*VRkll2(K%U35vvnoAn<^C?IrvCQ% zrMR|wU8})LOfD}@z=^A|doK#*m)w=B@uJjcgc{w;N>v5z=U_$D$LIKqEFWI{&Hakn z$_J&_@)kME$!Q(wOZ8&YtIhOQYJ>DvLWAt;7mO!~R3bln>U)T${fU*DV6x6EV3~I+ zkG*3D6|TLvc1Zw7RGiB`;O$sb=?9xLx;h3kq=H}O*-db%&wQZ9HQahcfh&T=6`$&W z5(yxcW*i+?hvaOzs&J+6L4*LqHtGh%;}5y2Xw(yxf<16nkdN54=ib?YkXUJ?R6!fK zGhg?#g!vEWpvlHS8IqmRX^QDv@w_Zo67nMI+z>8<ufrQZp*~oWm6JgCfxfk{85pN? z<zY-HxmUMH$OptFd;P|6{Pnv!9*4P5DTwy{r+i3-QzWy9PPP)9V`fE8>caLG8BZKZ zUSWakL=#Si`Nc2RJe{jM(G<6p!WPP3hns_CnZi~p6w9ZpZGR@)2Bl%)-wK?@f~&K; z(Jaw-@qTYO&JB(TgC?=RJfyE79R)q1Uq4>w%w4X!_l;i}Ho46&yRmU^Jm17u`&3@4 zD`qR9PEx4bpI@J!e%9j{jOu{qCV=q=gVxJmqlXPxGAt20hi~k{$!#-;+N%abh!L%x z(>eq5nAzqLUVnGhHU&;198Sj&LOv$mFa*~NQ($^iG*>ZZ#UretTA1VS4DK<hsIsZ% zN}3g4DBA(01gx6`0pBG&F+V&!IX)EK9{OP9+<Engjs&}L$i>K0n1aZ~3Pgpbn9(8! zjSaZn$hf7LH95stb-iVt1D_VZacGw>9d{$3+Ry&XS}ga;z(TR5Hm0~Bbt%s{c64bA zThcnaaDs1DV<XkI;flx#kQo$L@;i+e`qj{!K!%^`BUHwjJ9jPttg%pKy?SIn{aEMc zyCUIo@guWby6yJxv`ve>$#pM8_H(D;J{gAn+lO<z{YQhO3-3fAZU5dO-zKKn8k5UA zhdY{-jOSpq{tyu#<$%^GV^5LzA){p##fA8Bg2M;7k3Nzp7j|X}crtqmm^JMBdHYbq z6w9{v#(l8H(5-uUl*_8WXEw{Orld8=>yydzlIsTaPK;rEoR+#i<+KS^p4^Tl2^Bh_ ziHNK4Ly%?sMyx<l^|CHZ%vd+M&}68wXG)K;A<8ymAwNZj7QShftT@}k6G{_ITp#Mg zeZs<2oxcnn>hv7Mi6X=TWgr^pw4faI>Ug{_sqFZID0gcI=l4d{@8b*uy3nVZOug*u zP_OQy41>+oQ%&8`e6@R+51<dI&Vv`!rzO%Hu2cf<ZNcg#Dd-XhGNo<b{Dn8bzA64z zn!u5OQm-Hfb`cI1k&3sZ7hK2O;aG_Ph)a^se#gkI!VJhXY^_40^!%bLC{Gw0QM0zi zNZ{o--iQVO>%=*3Gw={p5@~yJSjz@}Ts_o&hzw>Se+p7Tj~5Q9|KQ@ancAq9u~MI5 z2iIYFi8u>CXf3@rs!M(|ng@OqY2-n_>^e^_Jw}z0780^NeOtqxb7((iIvpDP)TMO- zDIc1of>ktZDsSvT+Ya$iTEoZXVfGylyOjuQEbH}3%Sb)EzAbcXbc+^yY5J}+8NCoA zP35u#h<X{UcYo~Yea|ms^1Thvtbt{ashQpon=_9Rt9xaxnv}|<I7B4<2IrEU-<MUA zVLla^PCU?7RtyWP@@d7n3?m+)7i!O4QbKDxNy<vKO7eze7G@PzK$K+xq(Szxg9JYt zv)$sPUTu#0ArT8W->PR)x;AC^ddH6nj^f~IA6wmev%UUZ`Qqp*&10{4Z);$xpRbef z27koIQ{-@yEQb3AA?d!Q<^d}fl<Y;;$QZz&4@S1StankfvESU&5^*-$LE3fj#P7Xz z&GFfCa9clBdH7sES}$>ma*Lu7K$L$v4QVir+6V;>NY=1e@7*w8Kj7Bgw85sws|^E7 z65Y^)%3;yy4QAw_u`>=<GGz|B7iEYb&P`h>6oz6JhN>4b;__99@lQ6)8?`{Ery1Z? zEG&lXumK~-NYS`b%d9$Boo`5RKW0YZ<U+~+lKm(a9BG5}70-famev-DTwjrIEO<oe zj<ghiy9^d);<R3^X*UZ))etn<`uZB)#S+{7SmDO#_>98^m-}NIl`7o(9tlntjXNsy zJ<;ai+xe=zCqRgTS@bCsg@LTah^=POP3~;`Y?9a@wZ=M<<t(#z{`j}*Y35@@zOWHG z%^xh=2ao{7M!~v6%sU_{jser3Ra_L8Pt22%2PY9!g}mH<Fq9G$ti{y?yQyBjScxa} zWGz89VNafaN<q`dB+hm5bVeyHHQqv2g>G{qJ`sE<8X>ttCWM{X8m%_k9j}wYt3$W( zRYAbc+Ea$nIpJkGF0O~NJq+n8n$Q-pL9}ag9p55=hyya2P0KIP@un{m;cZ`w4hIhJ z-)W-;{mJ6Sp1{mR4uIYfKSEEPj$t=mXG2<Vdz;+7S{XLVe4}e9!4#HFy?S06lWNP> zW7P@-190(Mw1AktHoBU!AO!SD(WDT`xL~BDEu2=u*xaO3QPDvP1&JStspRQ{1g%Qu z=#J2zgBml%w!|!?<0h`Jr~^IQ#<7h26i$O?R584mFl372jml>W_S>fL!gv|+T0+2} z4IjwcS2xFz_>D;Ts~4`UQmL)LSe3<coF39t?{|Wq$U$P=Z+g~Q6qHKy@)8IL%5f#f zSq3i98D2T8YGVs87J`#uVHr8x)qFUcU%nDxoF#sLFI2@%;MXI6L5x$?q>RnqBGrQ) z$ip*{#}Ew`XJ?y<Ll4Px4n@!8@F9P`(%>%NOd_z}_CWzkt$N?^0oI#IU)nxXl<wPc zCYLWvSAygp?r%@hYh*hX*6Z|kyOc?fdrT|J_b@cQb4fuKOpS&J0GTZ{`|{4Vz<d3g z$25ISkx>j#Sz_cgsz^jhViWC6?$i_eJ-4}M=37;-G%HJMH6V=E#=-Sgf2LNwHb?MG zb@eRmjOsaXSk|_NU)8`0<8jg)+iFR`^HHp+iiG!OPU5K<IfmbSWRQ{r6C&}|H%?}n zd<(uGGJsLgiv=Yj)+_hy?BCwj1EmB&WgR)TYPuwEBO&y73kw>p@B#o&`FB~;BRSl} z@Htp=i+v^v+Q-9?q4Oi90Yg8vR5y6<XHf1njy7CSpY(_*J0+7e3_^;COw2ape~BW> zHne9cT<*>%RO9?)Z}5Atukr0Zx$c!qGk%s!w&`w2k6d?KQra&(UV`uRnG=lEac#hc z-s8&YV*0q(p<Q2Zy^F3KTeY!<P*!L1YDD@}^2m07n0%~3Yx8#73L1;X?Pzu1h68SS z4h#9=_H?}&qHD8Yf|t8=#5CLhEC#~4m|BP>Y>6b^gn<e^fV`_hc!;Pm0IN@jVhclu zA8o`RL2(4a37JY`OL~JtSA&GeZ&K#5GX@8ck(Q3n&5K(TGf(g@9<pU@xQ{pP9YydW z4oHAk+AXe4;PGF13c~wc<A=xl+r)Wy=1HnuHmd;DO6B)Tpzn4=3|h2lvo)8oDx4aP zyBb-<r;|)Yk`BJXGu%ul4G>{~(s^sbs^D@JU4q|Z=C=3`lCiiLO)m)2vQ^<pvtpb% zC3F^}q{c|8blyqw*OBq#_DWL88~FzPe(erit}&O&5u0-A+NY>IC<l8EF7Kxuw%2M# ztf?AJ8CT@niGf3zlU8H+=?Hd`82N0AT`@NFKVU7$>nb>hJL)a4nqRZ8AqIo{Md~{P z5fzv4Zo@1i$8w(<&h_F29NF-Lz?*H@79j?g-_3X9yMf>2w3=H6?RX%Ip6IfU=n2bx zG-uc37|NdsG>zNV+lK?5!Ataq<Lo~<3&OWD`&r{mx$XJwq$REdmkHdwvUWuw7WiA0 z%J%*-sG69pl<x8TH0mS?sD=p?M>4YTASAjd1vvB>iO+v3N!!0ScH}UVi==SH#=#j; z&DvRT%7oPQap+#n-7*CK`o+W8j|G)9Lw||S3`^O7P)1+~-X~x*-fSy(b-qm|Fo<yn z(iQ*%<sLB5^D`Ef)8SK{NB{1(=1`%bN)GTjjLd9HRr2&O5!-#4yfSF^rU<bO{?#Y~ zi-{figh|mug;s<4IV)8xJC0(GVChJ=m_3hiBRuVj+WOT|lEsgSiO`%$h)J9_OT{-w z<B6kHqoC_K%k3(%gYhgblb$4}wh$u2p1y)IGOI02vO>M7H~-*?M|d5v%_sokG86V{ zIeQ?p$ExQ9BkbCg{xVIAjDpFV=7FokE=+>%^ety48Vs~QFl(VJpl=F4(^rOn3uwVo zZDj{HS{wY>G1D%#8fTfn444zRs9L*PAx4h>cwvW0k@k!t`@4G)xv;Rl(NU&)uDnD? zON(j36RlReIEg%IZC%{c@mZQuf=UQ~Ma%hRJvV)y5Mz#cwdE>KgvWAsKnNM_+8ba$ zYlFIm?T2r}Ne|~nk<<%J&Z1|=LuHazmggWMAi*J=m~cum!z6;;Id0?4dV*G~>*>cc z(IeS4&xY9r^Jkj&X7w}dGtFn+0Sj)E2q0>!LQjek&-^@53{AO45wc3zr8uJ*miG?X zfzd3HEUE(0ipF_ZvU~8vwkEaAd_5kGwXc_d%~v2Bcev#z-P8%D_f-x%j?e8_+uqiL z?&aiZRoi9fY1Nu|opqn5?Av|y2d0mk!)|vUINTkt>-IOT%ZDeHGg{SZn1_Af(UQ@6 zakG9L<FY#?Gd&kvCN(~e%E|XEjY8g$&@4BZr^!kn$kzGK8ncGF8TwtH)EgKu1O8;C zKKWN2o>()Xg_@+B($9kc-O=?s&wSot5@!<gJQ&%fOnbo+v**3CCWFqi<8pBNFdYoj zP6sf{y7OofXts@3(_`i3_1^>6pP}$WQv-bFsa$UyOw*71LSC<U8_?7(paj1KwO)Zp zT+l;=fwL!Qb{1`=;#yW}#;47UNoGoJkA|~a1Ec4UjE_RAaw?Q=K!DgY3EC4QeyUNU zaS1CsTG3y!B!C{k9+-@ijaR6N0F9TNk`-b<`u80$FTiX&GVca!86G((t`UjOc7lCW zQA)?>L$ipS1F#GJFdeet#Y<2ToghCElbrdF9+0j}y1DP(v@Q<+Nr}SdDi4J{Q`c7c zm?&8dxc2GAitx)ZQZ?Xp6UW?_Zr4Gs&p7+oIM_(m>Uq5p?wQpkI5@lq4qo!gd6_z$ zGP-tqX?bye_gd9Dcvl1F%v-EBTmnrtrh&6$u&G^7cXDV{MJzeQu2;BYiiGWiR1;Ko zrvH^k^*Ga3ePijD!k%#Ca1_v5Q9<1mn^lBTF<S{(*X`i7k^WFVJ$M7Jc>IGm$$7Tq zvd4Aa#Bnj@L&&Gzz2?O_a0tJa=qS5v(3i7ZQz^A)G1|fuvQHizX&YV6@5K({-c<Sx z9eq3hyJqY$Rd7NB%zAyIwG3}uvUxlivKQ?ROyy}Yao4vO<qiw6V+3~t85T)Xh$7mn zQMX7p%Raw~nNF}R9GqIZLEHxi)n4b`R!=ozHtA}jmMTzcV{H2dY1%V$F=X>#jqR9p z4WhiFG3jb-A0e8bIzC9orAxId%szEf30dP@TISkzLi#N~VkXUpS1Y^NPLnEJ0IoX8 zGE;T5iFE>Um<=MYXF)9NMm6Qn6`3{eed$2Oc)cTB%8jTldrYYUs>(K+Y7Iu|u<H61 zQbN?)-PwAOTK>|0RaQpMpf3$n)1_Jz)evjPTJc&VBLNUEEUP>x4X_qDr}F$h*jmja zNw#tGy9<&k_d+@2O^!1rcT@>0#0>IHg(GdLv`_W*CDvljyVAn*=@yYK2tc@<-1*hB zYK}_Gm~%58OLTB%<%%l6b3?RRrPzuutcvsq<8Z$>lU_zZDZLI;Rs{x@jXJGo8c;dV zPWPV_R4P87=3=%cqI@jWr`z+FGk$c_iKI_?-_Ov~LQSM+WN7a<s7pod9d2wnoJF6L z?2)-zen?ro#8X5yZN7(;xbq#0dx~7_{_t1Wp&^-y=w6Avi8O#wfI%$St|tUH01&ou zO!%K0>-WHkpb!fj=7O*hdQ|h6o2Uc9tYUib85?Y0C_Eyl9cMUiN1fK|$0-JOSF3$< z|E^Bn0!OOLHr24G`k<a&OJA96Jq9-hIfe-xLR~jtLD5))R&1el87eS?QTa`p0gZX! zTOBdjG0R!dK23$^qIl9+>`U+uW}YIfSdpiaYJIhL(g+D9uAJ6&=I61oQ3t90K>;gz z9`=FSt3w5wirH{%Hhpw#cFY_AMdHN=dzYoL*NE5CdBP<(+7{Sd3+~;xCF?iV-hl9T z`#JeJnP@~eG9BqM>8QdP$8-phRz>9U27(duS~0>LV@TD?&@!<A=>`-_b|CrBE>)=f zgZ!}E9usEOTLt7D`B!|m;Wu>XJQ6nnM2P8A&9NA^w(&cLA+BV+%<!yiHI@YB1B1iO z7U*iu=7hUJT(Y&HNmU0ZczLpKb6df+w|ZIXXUY|tRpsQ=)1k$2Uw<L{?Dp~cmc8yc zMM3g)>u1lHGsnwV3@GaxYvxOi3I?q%HE_mWFoo0fVJ9OLH{cilFVfxtI+JHz`;Kkf zwl%SB+qP}nw(Vplnb@``wl(ntZ?ez+Z=Cm>@4RPyYbAH3sw;QZ(_L#l>+0^`Rczn~ zR{iGDuFyzHk|*`S_gpRXHRkY57!m1WNR<V0J5_?GK~&!jmxv-3B@4wN4hd;hXMfTS zCgfr-o$|aGb{3l5yZ)3)wJgR&BEC;D6d6yXqSfl3(#L$DpadhX(PBIuLwpECGN@O2 zgt|^mVp1Dnv=R6DnC0y3>b$hG)NN6y2(YZ}V*pk8Aw;|Ip%$_c=40XN*^+tK|J3$` zgX3)!ox?uTJ=Db^Z}&t$z4QwTTEaB3l;{a0g<lC5l8TgH1M)@~iDUK?=}cmEpz9?v zl_blV!Z8%=(9;@JzDY`#c(pZUuiJd2JcFVGMzYjMH4>>AI9(qZT;qs3POj$4!9Qpe zsFj~c32aAF?u|jmo-x^HvQ1>r(2iPauxfMJ<+7-;g<c!8-D!=~DxqzUn-M$rTKC!& zoAYX?pFuyTyi9%yah80<eTn56&Ox7w*pfJM4$=18#T9XmgKPo}A9x=awv;X8+z`ht z<kam9#!2!vg_E!1PhK;G1*_8VxjvKXg47QQRxRM9g}2lOfGg~lSAGjz7)Kw?;d@O? zJ?MCgu<+?10m`mSW)eRFr4bE-Fm-_;w*Xb>w*m>+Q%}7OdjSGVjD%qYGXyA|S%_Tm z@8JJ!m4Fr)h+)lfJ}CP}4ZM=ltg1a_(N8ygT|drwF3!|Dm`1t3pq7KsX)AAJtY{{I z??bxyy>f7ehF|#+e~IdaO8w?Jy$cG>8xN&hb#^w9IsYkxWcwtElXfx(hZAhUDOgi9 zxqI#y|A7%EcA#kG?!{@v{g{0P%a7^(-YG0#m@L5;%l9dro0D5@aVPXi+7<0u0}BqH z?Z}ZMCbttVhq1chB*q{>lskq4I~lPZ5iefPURI-Xsb?Id<i;Ayz(AP6cV4si`gnNS z{~GgNBvgC91x-t<W&#tz@!l9Nm{NJ-B*TE(CsTEh0?sVGYjQ3wC{la@r#JX;;s+Gv zNl||Vc}}XDq9+fF%?c!AO0Pl6=hhC(V-~loN_T?!6cXOjTb9D7zhdGMU`}P^BKK)y z6fk2$80~2W)G={ovH>~yoWc_vNWF<p87F3kvpkh#GP4hZN6yOuYkB9vF`<m&<sMKa zkhB>X-mCTHJEz#T<zKlUhQG^9WVk<vmS)W7AdjKz9)Rp(k1V5)@Q?R7Xq57usEH(A zr3JLrc%5BOUzdXpo!YTX?SF0>Qo%9D3UmS+_3}Wm2ua+14oVRnCntt%a`*Olli~O# z=lffVuZ<fefhzdN@~pQb0&N%rVjmU7&oG_OeWNx7_ws9D`#R$voBTxJZ@_1D;<qLz zg&qOdA?=}((fKoXr9<z~{G9-&0M(@kFXEI*@k-=J_|FQ?=^&a2tj(Af9tIg|!Ee6{ zX^yKAA`<v?+OlB59<Fe|68M7V5dk`ewgs82a-Nf2Qod965_ObvlFK3(%_-QFJos!l zzv1+Q^mmFH<Is-Z);O-;InDFUUA7u)xN4&Qvh?B*nby8zHJl+D09+m^hXBm`J%4~I zFS>wsh*EQ0XEIxisYIWrhh3?&I_oO46O6h{lgnnduQq=z0Wu5z@nB}zc3M{{!tv~U zN?*`>CJcxo9KxW>NF`uWj-#p;K&60NDVDD}OTq#wBQie&vk+`rsA0}FVTIR@!&(H9 z8CNTOtY!}P8-U5sQWciz=O1IKin;0aEi-9q6-$#t-UX0=d<te*SV01FP$%>?fB^pA zL+S<+Cjc^c5}}HiYj$F~5E}>>3($<Jzo9_Ze7~i4HUG&n5>1MHY*KgN$T&{IEs<$K zGsW$b{^o>BGtplSo(^e`*aPDaoiQk0H>GsKJXW&7pbN`VXHZQ6UulJjO$DkxJhSC9 zZ)#;2y3b){*!En~g2bHQP%aZ4jTZuFi9x7(dc!+S!Jrhfx<+n!^4dCNY<1#rh$aev z<dtq~Zt{o`$BaZ3$gjp!Cby7wS-!~Vq@UsBPlK0EEb6tWZcrN~AjY7}lLTj)9;N|D zV*$F-V?0!y+k2GZwP5NtudCm2!zhCZ>f<SNqF*x-9kxc{1TOyy=hBSp6ELQzjIHme zzz6~)ya7P@g62PQ$6pEFS^?f#`t@TvE97TCZ5@<pGgBJghUmg%H46vfMK~##wuB*X z&o5(qlnTcnI|P^<p9j8XK;d#90mj8FHj!P*=5%)gQ^Go&p<)gO0+gh;Vdj=x>z+HR zmZms!WUmxFm0k`!x0>Vy7|zxXKAho9rc@#vDj8s21AdEvi3#7gzq}7p0_PFdA@-Q| zF0B#LEz$d3@!Cxy9fO4YMIWUk?=_$=spVu}$#YchJ^Dc~GX7|EoB$s0mhq&li2y$V zP#O4IXoMb&1E3yedp7+FR*vuF3K+m?3wi$6vEk@=gS-OO&Zq?G`K5B7HFeD-A$<b6 z0+;g*V2`yweWE}z=ryQ6oPUrpt0(YwQ!!2qs5=L!9^o*BT+-19B7ze9NfhQWN+DxF zpqU{zsNAqT)3WkDr)Vyj91%Lpn+eT!;2f||fOXuB$1!};@aU;}=&&|*f|xxsFLaFO zY#0g=vj>Z+z_(SzZ-@6V@wmoeyXEqZ$$}>MO4&-<>K$=6hwjjwsGB~Uni=bWkyiA8 z0i?eOYx#A7)`0~A>VvGK8c=7~4AhZz&;>RS9xE$U!@n(}1gdwbdx^>{#%s;MK!Cic z!y5nx+d^O?Lf^z7_=4>mA=oNPsS2qB8_<@=$@UzBM1KZlMlQf2WQNb6G<d8mrUx8g z*bF2}t^XOSB4?p6YW%+f6<ChlXImwPjQ=diS12}Q0Te~n{|rfyf3;Bf7XX9;VMIp$ zt$@;}p!%$R6{3v#D8OH#3S-b;0p=@I8~)Xz+j9*Riq6nM^%eYgKw;`4KLfqKy9x{@ zgK<whppYu#-wH4ojluZEPt_7&m<<x0<nMsQAX|w+4!B36|5c|NlIY=p#2_DC6Pk!v zQv*Bz$<GEzMy48r<mUvWAO|=Ij&uZ)mjxJ$SeR3*8X)LlJ0rCApMee7g#)@I*!~Rp zk>AQ2m$4vF*r6~KdcuK3l?Oiq3PYhkq6*_@0F5q>oQ1~d^#2M}qV)QAKwqKqv$9tk zz|Z)8m2vywX*D9cULDY%;=IR=YRj)zY0vW_AHrTBMD@zKR@RqSAVbxGs8ViE3ssKr zqO@bVKf9e?)~9X-jLHDq2LwS)a2|3E6O2v(VR>o;k;g9{??@WO3dJ1U*~i=m4fGgo zuSg*33)Y?H7wQ*ql)EE$JD_z&=BW}FXer=Uc>@;S9S9HxF~DkB85(S`&Tzcy_!KIk z&Is$#JvMB2w1<9s!~Fa-<*iMfIL=UI$wgmDz~Hc6eGIh6gD+SP0Wut9+P*iE<|;?S zur?nTM=Fqu1oT82EQIhNumVy5BuE)72*HWKZK=WAY^U0&sDl@T5WF3<T_Qw~1shG; zBvAx|z_YphScw<moUKo{&4Uk(27DX`Y6jp=ho*fDIDG~IrN|7y2mS4lq{qxKS4sYD zoWz~#&;ze0Ae|g0pA-!TbURZ0npUm`ZyHo6sc=f7ceUOY%50GPcmC=`IypQ)1scrE z(ldiR+`{3GsWX(mEEM%O&P2XLOlSh?!-~QhRtj-;yx-;WIe6d!f~D|ecZ&eFrVWMn zFdmmT-jK(B;C(<|-uoA3fE_vWaYvMiN9sHMGe4G)t5VFpIPl?0^ppF{c>XxHdOYy! zaU6vo`A$<z<A%IpQFP%RcFN!q9j@+Qkjqh7<l4$Ln7QmwPE$>z$Yq(OmXU&w8Rb&* z3?H*7fUqZ#$GK<b_m9>`W~U!DX8XEC!scRgN0J{`+~O78iKLABQbWZlK;_TvAA|C5 zy=ME4?Kx4uG)Ro1PeFVOrVfVeQ}3lWhRhHQ)t^mgZ5+y<wIGQB)3#I<Vi7kgkTnj+ z`h})j>ThN-QE73FSz(pfyac&~P#;}nAx&(N%>&3T3XWbd6ta2Tw)*Z%GntKl@QQ4| zupW+eBpt3AiuEUCpxRudoRNw}XVx2H?;DXk7OB=B#?1NkIOOJIH1;=#H^OM!whbPC z2G>nTFOoo<)-x*{*2A09%FuQ{<d)K1DP2Ps?aYbrOY7_n9G-N<h}1b@BH}rW4RJqD zKK^K7Jw0}Q7!=`5@D*9Z>O7J0)v0y}mTx6&z_x7s%qYbGnHFRO^fq8G#y~b;Udh(z z0M6z|MF49Oo+$FaKuJ2+Vh5Wy3gA2MUBWtaTV&t_@hfe3Cj`=(0b>Ez+l2!B*T${} zC!0}iAG{VL4k~axxMzxIir9Mfmp}im=TdJz9wy_gAUi<MO0F_ud>UrUHO7{@6vvjL zYleLD^Iankw$~fYyr-Z4Fr7KAvkRNS+VFj25^MG<SoSi1%UtN@glMMIZjSD2eK^MQ z^U`td?4I%}-#_qEdSj}gyIIB+q+g!4;Js^{(?zFNkUV{}aY^XPJUv{?i*C8tRi9k^ zDE)k=Sc4h2s`Oz?d3o*>i%XRjw<<MOrD&ntXwgl+%g&OwSOEF>A_(eFxGE5yt;-)G zWNXJG_pupZBcIs&%OaXS@OU<qh|^c(LQjb1%KFy&RMmj&St{VSG}H>J;g<&9M+I*J z6;h#wQNEqs4aCBj<KN9g<#rFRrejnJB^O&)t5h!vRk-C{MR4x~-I}o1oz0}0_jY_$ z9~)+qnXFaj&1B6`1w-cV=R?QlFV1oZ`sGXQ5XmuEltvgW=kXLvkK|wlOdv@;RVVf= zsC{=fbRs}?QUsmeb^PU3_3hIs3hlcb+1uCz{`g``Ly^jU8Pyl#V+HTyr(K1&Bp@Hj zed*(Bl6BQo04aUhB+Aku@hb^UMfPGMcCvN`cA%Zb+>P*1Jb#WAu&CFD^A`(Lh=cw2 zPa!0`jKTJJkff&N!7oc9<SCFeLz22nUhMF>1@9L568S(EFV*6##T&g*CD~+ELzkin zY9tEEQ>>IxTzWo$xwga=zc|qU1cCpJru;%e8Ch64IsO7gKZ)?aQ_<Yq1oZzq2>g@v z{C|SLnK(WnaCY`j2%MSa69WHaoc|317nhS((-im*2%J{X&e~W;8R}n2@Xz`E{~*Dk zn3%rUQpGPs^dBTR%illq{|5>FdAh!6<bNe>|Du!sBEkP&{~P)I*RlU?$;r<8PipyV z%)gKOpYwc?*I#X4r1w8a@UQw8LH<Q}{}&|qf6npW)xUCo<^3$7{$BoLtgrw1qQd_o z*}qB-4$i;z|LXho`_+f}v+XO-=ehZ3j?cbd$G+PCK_>rYMey&-?SBB1|BEH^Nge;2 zSHb6X{#wbOxbWZH0lw(uudDo7|8MAI#?N)}|4Jvb{(ZfCO<LN-*38-bGd<gXr2mgP zBLNEw8_U;V*3Kr5pQW{dvx%_DC;M#jZ*X#rs+Tj$BId_sjt?n=aS#O{lIap5Az_xa zAcKFfF;G(i2@xU4Y6#v6DJ01(1`#7F6@~%=8s!4`Q%_+{9Z>2h(g~Qiyp)yJ&WMFn zORFWmf@fuOE2vG0@ypL9K>RSi7qO#nPTfaux{sIVV$tEDO7IfJ?5$#I31PYc`y89m zLzi^-4Uv&2`QYI=8PuvZ)<?fS7G?K?M-rND4_T=zzqxe^Bqu@#tj^)syjb1C-}ZKc zb~S?r?(8Z33|H*jCU0(b+qm?IPYuii+H`UfGfEazhU$XpsN(VbLp2TaXxD?iy{J`q zG3nx!`-<V6w9;K^<dSyJ$QrZ-!)vx!Gg&3+$L7Z8j{HkxGg_FZkO*D3uq<R?Rb*X5 zO2eJGSjQPO;hwz1?E2lr9<4IK_L}r;dCvX(7?~cwnxY72tk~s_fvYKYv&71r<zOyP zo%4y8OX6zwzBxTs0rtXZSs~gs|72mxbPnVJS{bF-Zhm~FA(TsBOJs`(-xar2H>5iG zK_QU00A;K&quh^iLo7~x_!#nqIj1SN`CX8-84<0lnokjc+zH<SOKSCcRkTVS@T&2$ z#BsnY9d7>TIaO75Z84>?C|FG~nq`^RG2XFmq0$}MxG*cIaxjNVb9n0<aC4OO3#8Ht z`^sJ{O4Xoc5q5$A_QOV;rR<!=%Qq>ry87j6>8j~Q1g#<qmhq*l(%}<{m)P=PU0|3Q z%vOKeL>7xkQmv$#-}Q1r%g&W0bp~SJHUcE^O7Z8<{2vim(^48ktWnPl7$30mNOgAX zzuDEZh5z#Rod%f@dap`2f%Amm@s7;}e+~DIm?~q4QM<+SRh3M09`-i-?o#H`<GJby zOuk!;c<e{J?Ri@G%YxZPpzCp-&@*MTPs2wVJw3}~)qH<uc+&viarcB9e$$uEMV#@( zKM3b1?iI&)&LbQzGlzanGhU<a_xF%f&bHY<+L=)4p%|zvd1&(KnKNfu=YMV>aPxIf z#ja#$f8D|9<O{IjxyXKQEMVB0>XU|RG`w1t?L@DJz#O+V{s<6zyZ4&RqB@r7SYl{V z(X6ya%yZ=4v$}uo%zJ$9;&Z&oy~44=vcls&yd9-l)AIR%9an^n*O++98BV2W?Rmi; zSzEyBLU(vTN01l%1hj|H*d<tZpzfLEEnsSQ>J8jSyx~uLfGkSGNp*Az@$~A1{$h3N zq!BDfo(RoAVRcN6(b+1GHT$;11Ig(z@Ldz}xr=LJCnSbGJ|ORb?ByY*_8zD9QH#Hb zKZV^sZAZ%Y-!}ecPk@`l$Vd_giYX|d>2nGv$#<o9xp#KM>SAeo*s=IrvEMdbVL0A* zo(4`_&^Xb#?3_)0l>Thjk!yo%w{zsX^3B>kzIjxbN6!=0?037JHs|Sv>u$<Jy2<;^ z$r0glPT&QV<B9d?3bpM9fGZCp4r<L0Pkw^z)JSfw_D9*oTlMfqgAtRcp36ZwwP>l= zyy!@OSV^h-I$fF2ZuF}_4OL+#C*piU(PV}afjcdfz#Vj8c!i+yy$QBb%!=u08m)_Z z?Nek)Zu0`Y22-7;s0pYqYk9>Xq>vJ__0227u}vtN>LfE8&vL2TN;AybQ9l@K`mA9& zSyZ_|Q{_)mv}g?b<+>t+3Q%8d7-z$Dd+WLL<Mvb-0s};rVN1~&oFdFt*)cfLtcmvL zf_%oQBWUy|8MdDaTDFO7<I>~!Yjh743^nkCt^wUD`*K=g>=rUw$Xv8>5L7QM@suL{ zWO3mbC(noGlMG^YK74yl4@9UPSJ0A4W@;i?Mk?l$OPmsHL_dywwk^@i?Z??eg~)zk z$dVZx<yIGJKto9s%djdnYiLSU(+1`$`B3nx;$trqimnv5%X>%j4w*u|zG0r3J<TM) z2aW|w2ADlDEs@`x{7kyl!@)KEh5Luc-sY{OivdxhN;I={%^Z9-{*0x8MXpGFk0pHv zl397xvUDPLpkvuE?PTJ?(()W<C%>5)-$w%pH}OYf=aG2)E(iSv#Bta$h|JwG8AS=* z!uSe%KcuORbbP{MoF9h8s$#j*po=+cJ+^c%exL-eeA25WYGW<ZQR&cvx{H!@jiQ$Q zcC0nF&@2u#xcpS0JoQp&{PaTq9$muZ&UMRm68$TOHD3b*cQdSqc@s}hFDS<ITZ_4Z ztd6<w$T{OsMmwUYl)Yc<QpiNnu^|UcRiIP`P@UBz##kEyC%QBcqPV990KPq9>|kQ{ zA#>c2;hy2HTl_w9bQjf37zkt|?sg@Kx?E1p>8$r&F#m50-eOcXgi46zv)!9rvHqJ4 z(j&SFD5S35rav52uI)(YqC81BmzEKVIkA=!yGBadmzJVN+?4n)m8Z<NzwJ1**d7H} zVK?Q*4n66U6+~7;n65Nv21Q6uy6z&}eWor4o~N^0@;c+=egEWHdqrl9vR%W@ov}nS zy^M}d4TX*7f_(cm9tQ5~vcmy7g@PV4(Pk=XF>35xtG#y8`Ddxx!}e-<HT2DD`F!K2 z>BV~iXSgpChRa}MbE6Z@hSBfgQoPfSV0zWyfQ!P;9y%-#!Vh-_rXO2A`-z9U{W*0B zad|y{tft%(^bdG<Wyn{u?z{V-D9CLe(HRp4R+uq3w|hV$_fco3=(H1d<~DY^I`Uhy zc%7g>&xbA1es7rI6?CUDpvg=u&ns@4>s!E9?;7L%n(w)j%*YA*L*%7$v1nB{Fm=`+ zY#x?%Vc!r(edLfZz4asbo&tToaFm8gptFyMV-+|b&VRQ#EJ{t@jp!~%TOZyAyc5?M z-VMmh9+`{a#J?Cg5l$SZ8AcnIh^C2W3Yffbs;Q009(6m?n$F)G&>k>L=?D1~_lv;; zZ`Sy!p16|k1x`UnINw2Ab2f@9&K8TDM+T1&)u{E)PTLu=^4p)4udP&eY6ss`o=sX) zPV;}P>Spj3%KBEx=IW5EEsQQNjf$<nOu#WE-6^!)O0_u>srj<0brGvwPNhIQHvB~2 ztDeYhSq67$5q)nKeYc2twU6m&5Y^Qpt}9RQVZFVT_7E)Prc;uwzYm+cl>3z+N=fj? zb>#8N|9ri7*IzM1|HHdLc=R!i3bK`Sa~r(<+h)C7MMvU>hzDF~q$fojv^O{=n{I!k zwf8sb#(TXu@Y{MDmyXw8{5dzbfX#bkRLv<iH;I7aWrNtMR4J6z#v@0@Ox!@|1LOGq zo{H;!f)&zc$QAJ>Lfiq8)-9r>t<Q@>E2wrO0c}n25t<&5L%U<!hahHQ$-e_U50d(V zffJoVAmL~5Z;`z9v^~fHisMv$vDGVIOFKk4!QlBUG30e2*Q;?Jeqnc+bx?H7JEb?{ zErwCbf#b>NLRl+xDcM%#Q8c_=Sl7=Ac_82^w)RY(3VN&Wj@6J7^)Vcfxm*8Uriww} zt1P1od9N4?>_If~-BvZxWm0T0DB=w0cjf$8I2fZTVTv!Apy4Z+V33#4&Y)`@PN5v# zPJ+I;jU2C2yH&{Di)X4RP;bK`%+SrJ(LT{V)IH8U?HbbApK>t!IsCI}y@;EbM|T=x z+Fq`5c+bHD2X9?#x;*&ai%0KuK&-6;s13)^Wu-YSDuuioG24TqHucGmX|=VgotlYC zG__2wj)|^GICSZCoJZqO);nvmQ^UZik)XQTzQ!X41MFqh;=y<3*bE;61?<YGEa<*< z-7L5tv*qyKr2ym)!AhooA!<{qye|NtDHO&+Ci|K%1w2tv9u`O-zOJN4AAGHKg-t7| z1)4v4IwsKP!$PPPQ$Sw*tQ-W>&aB^oqISBuHoG*%ldYqsW(5oa{Ye?<2351z5G$$` zWYmqn=_FZg#57uq5|PrA(;$*IM@bouX)~;=fb*)}BwTzHb?mCryld&CEAzxF^ZdC! zKJHkiU$)NS=seDAHw?L=%zI5c5S^^p!Dc=k1lPQljf9M}WQ{$HqIzace>2vsGF;8U zw7894j+eVZ-y=;Pmu&Qg%3s-bu7=akrmYBV^ve9=3et;gIZ&*gvPpAjmEkx;X<!4T zFM@-*CqtyBFa#B;mU$ujmRe4`(yqUBI?k>0eWTU+{<P#y&!IWa;J*^)Uqa$bm@_l5 zv;0$|e)*UE-wsY+g8M(2vwcade=%ocW8?f+UH*0Ozcy!MVqhVlW%=@DV`pF_U}R$d z^pE=N`ww$AMNN4{H5F=Q3mX$B+P^tB{r9r`AEs=K>|e6#FH<%a)-Or)nep$YY@Z%& zpOWm~jGz9}gIWa4pHuyZOyvB_tnGiL6aVii|HttEJZqma@o&a({~{B=X7Fz^k@3^D z?w>AJf2;lFS@l1-SaJT#HTz2^GJk!N&-{Ps#LwF=pT1fDqfm_Tg4RY>>-zZI#gvIT zLz>Bh97J5uy__3WPo1g=DDtTX9B4^g9<jdFzb-HpswJ|prny|j3i=a0{pDXH`m6R~ z<Fa<q#^z^@*Rt(0@1?EA#?sPe4avurH2;O4+tm@->y+1F_HVD8D?28FXoR@}RbfJp z6B`xSC(c(d^Jm-{yVP2(y|y<skt)PKFT}z$8oTng8iT$ZKmPDGsC#5q>a4UpUC&`H z2n#&{iXCekm&HcAN4!~r<W0!sxS9~K4c<!d(<?!qhKE*GnX~tau!48mh3~c1dYUdD zZn#W>m{$3^?pi8J!1rP`$fx#v`)!@4t*0JYT~E*pALZIx^4*@W&xvS=W5HCKoAs6L z0xx^b^aZ+62z=0M42lm$=i))WHS55%IM&b8_&{D|h`?&~;X>SVL_1s2b8k{n;djep zU?c*Tukg(2@tBIxje?d!uKV&@26+@<Z3~VxkwP~kHMJOo^!862jzEJ9;0Z~xs<q+X zuy-+uVP`~J6mOEUwW9A+9ih_u-2{BG(g}S1p}w%po7WMqVel=9nvhijr`8`2K_8K^ z!bIlqD37n?4?EU(7;B(J9(XcBQ8PK^&@Bac3o(43H@E6!cet9tc*1l08hQdm7;Y;@ zqRe3|Llbc9J=r}WWCWb@-yDA2O8nTNLy7eU<kUv?fZ)Nf2)7PnloQzmr%Nf+n%&rE zvY&(Os1p9B@6667&zPXU4<aezCNQq+Ompi_0^-KdA1yV@c*kQ>mRR8?Fc)GDRw?qm z-}a$Ldpf`9cyWY}|A9^Y(<7vB9#Tj*&KrLDmLNfb!I!rkRMim!SAhJ+bX`VN-Y0>z z-cjVmG&w-D@ZcElj{1($b^F_m>?_<mh#rZ={_-8owl)z~$RQxUtyt9z*~*q}Gdxc~ zU^#k@DEds=v6Xh<R^Qc}n}}`fL?lf>3WXbPz}!x|L;WwTg@D$guz-H@nInunkn9Nb zos=hyS8iUhhU6H_5?A@374+udJP7Z+yOGXfn{lsz&bqUf%-$FEp$0t}(#fE1IB~Z# z%F5A)g^uxqhU$Ny)Hf{SMWC;1QC%BBH^R6As^E)k;|<}@&l$IqZh64A_Kr>N(72-B z!@I-~Ck&8V`?{PQ2z3FrMBGtz!?JGBI#9QuJ651K_aVIX`pa%Ow`ruL2gJw{zmMdN z>QO{UmN-^~Eby5l^F;CT`%P@(=A!#@zv>pls@B=BfGi~hk(Hn2M4^>i=|XW=gs|2L zTM10B&8jxTx-SH{_GRLSwD5-N^3(S{$~Z&$fZ@DjV*%8b^tgfGL2m)6pGk8=PY;*W zZ)ydkW!><4Bka4P`9*$&`UPK2gCOVr5a)J4hL<2tk^rP6V;&W3OlOH-+_!Da;uiND z5pm3T<;SD|PvJL_XJSR|Qdy``#`e}LB3+ZDFRD86Iwp8y3`+MlrBmv~F{HC-#>yEg z$E?XHTdjDG{T-ls*K_K3lB~Kd^hD^4>Hfxk$K`=~#|-P*j6f^&=q~~*-*U{>0bK`c zS4N~9j3ZE1PE!vpm{#O$$^MS?gCQ=mME$V4g6a?{!XSIW_Jvqm$XCj#CoahyOz>8w zja_C0lh4-qp*%q&1M|e9f;bdjN<vohj?|s!^67`VTJ*fZcMV$8Bn75pk(~SkPSySy zlwOLO9yN?>kR)IMu2ps$&345{6#=0|SlEDk@JuK_PO@Xj@_z1a9LoNYgO4q7PA<hR zHYBNB?c}W7L&|H9z_MGPTm)T?GMur;!#`Bqy}m8nO0@H%mLkEC%g}1>P&h5mxdd?T zP=O(5mfxU&p6$DJ1-=ckmuP0?hI+ODp@33=Li@mW{;(%fg)Xkw7o?{shNyy)eLO%1 ziSm9PLkd(@6j*H3AI-iE^iEsWO2|bUP)dDC1xc`ToQbc56Hr>P2+X7uP|X=qDzgT+ zdAuvlIbT6coJr)bw9T;7SS-9HP*EwEFp!3oy*~gitL@$vmxJCRKS~3CrX-9$Ov4wN zC$8xfsd;`z9kVW_Jh)(ob5p<tjpd3{yf`+01o1jd-dZ_RnTv$_Vd1HHv+hV=y{1;* zUu11$+tv>JgVNQwk0^!BBx@E{?GIFPY7dlL{G-gnqH8sTf4w1Bpt)_+cY4!yZ<<@q zOx^91biK>pV86AyA}e24bfHhve|i59e?WO_B<HD7V-n@usA7jfbKZ>!Ikob!l)$Y` z)?XB=9vX}E7Re_Pm{x?ZE?!3`k=RQq_T)ZQ=fVr^Edv4;JM;GofU^iBf*_N)_2Ba5 z&xh!GxAR+7WL+tgNc82GR8UB>q5^*tI?%VYv>$*FQCguS&{SU)Zs8^4vC*ktStjrZ z9PD9IR)j-Wksbj__3DgF{vKxB3_{hr#Cb5=4o>TEPsqvKLL?3(IXzl{XH_vb&$Zxm zaqADi@GOG5q#mSw+rk%Q%#6Mu3t{-{V@?P60{l1S_1&DbqL_q-qsp+Wnq$6^7-)DY zf^<Vs(aaI$IfS!v2vLocH{R&-td{ortULn7nj(dx<4<aetMc-}o&|l^WNd-o$&O<p zRE4Cwg;eiXkKf=Psn-<{lYvfYqmG^<k7#HfQJg`*4tQ3Wcr~564#d^swXnA1bisNx zhKbT>>eGk93*50B=7!6*;Z02Q#<*FTsDB-^l6k~zvUG61!r-C30|)tv8Xe;u;3*O5 zDg91Q^1sG0Z?n$pS=FQvwVaJSWOMb5UI~rNK6~K$d}S@o>|r|5H+wbi_T~4Bn5m3K zYOiKG39()65ST!<s4AAED-bC%xe9S798%PfDy$d@NyW87(gItP@fe=TcTCb<DFV~c zR36J}1<40e^U{vrFHTZfo4$K(PVlxrTRhKNf2Fo0!8;IlIepyN%RIebll4G@_rd#o z5bFk}`8^gzy6XkYEcCy$l;2*np(Ux^IQi?cJQ)mZFPmJi!t`<JO0h^o;Gg$vqV#z* z3L{MQG1>OqVfLVk^f_opX~nlsiuzqn1ZbZOYZp<19~>MWz}-pJ>zzNoM0-xeW`qx$ zB)#aYPRnS0I6C@o#{b&&D7a<^G@%8UsK}^CLvL4$-684VZxUQ5ux)Z{h>meu!-S>% zaR(fVP@5`JFgQS<#z)3gJBihYZGxSNSH@?{v37Fm0IH+hv$$>PqEL%4+a(-nSU1{p z)m=Td!vsO(M!;U@w;ecZJgop)$gbsHvoxV%v`GXo?qI2feQV^GxR_;)#f}(<S!2JN z$%JSY7Z?;%UypBjxJX|y*;%H=4r+P1N}c1&b$41j|2W~y{o##U>b3RZq|)E%b;a!W zG%FW(4k5;CtH*@&kb(akNfqP+C#E;zFkWpnigl(Sh@QyQ7%e}~8CPtz^PG(F%bzFs zUTlJv5e5&S3qQfBJQYR3O&I|xVZoK_(tCaIhd7Q+coweM?k<ZBIg%WCu3YhYL`npS zgJ<Q%k2eb5*ho?BUmflDDlyD|oS3lR-U#w1pv8Cilc6~?fLAaM1~AkbH-()bC0<C9 zq$-D;8%wdAbj5|)m17$6E2WWCV$|{VVmd?Jbyyv};*+y7y=3G9JZx63*yDh0aH|?) zan0~)ELI4~ELYrts_#fij;_Ys+dP+-N{Gp@F2397uzq*0SwQ`B_yF#!#%8s)3+d<Q z_%ZCBMEv^Pq@@L~jz_44WvR(#^U&dV3m-@SE6-G;x%-ygfJ1B{9A=laesnK?p2$=9 zu%%g4%shu8)IRpS&>UK1mMhb6&^Hz#)U;Wg90{voeLNi$re@G1%q5(N&|7ipa0BA# z9OM;8AQ>Bl1>WD6XAQ!?0UKAa1CTN%MV=Ed{gr!LwiukZ*7A=2fxau7Iv`YMiuC}a zC^bF_(xtqa8MESo)~-_2;4Zdzb;!DES{#!x0{4etVkVxKwVFG1t#=@oqst5Oju@nj zEBrc?XVFt8z;BWPw5xGdQ1RAb#l49TW58qBi#=&mCkVM@9nT<877aTFD-oU=n+8oC zQKfF<d_p#GQZC8wBXqOQXhX6o7oKqnT;r{6xG8XseGY35n*yArKYIr*(_D5PIxG2Z z*pH_{<@1MB<i}zhB!SW6iRndW@mJ8Te{Llw8I4&SF)3!CLAD+N$T+STV&r~r$fswF zw|BK+UE(;g3|8h*PzDKz9oRcRt2B7f>|9tmS12tvl=c{DcJSTUtbL)hLT;@iEit$% z*2==;_IpnO&f8YC7;asL<9+qMLBW0=cG_`?>PwjBh0J?VUJTOSUItpsE6$&9{26N* zYJUVE;$^HehZ5^{rkR*HA^Ykj5((m7lvX1hmE`a}U4(ZDOp7mb#x$Lmn=KexuMrjY zPYHhadL0*s{UNaLqPzZM<wbO)6bdd@tPB&G6^=LSZ8}n=LkDRq7HK3T6gWmb#;8ur zZgu?Aodi3V+n~Ap?CT-%D5WN$NES+;6Fl=M5RHK?{@g7F4U9-9QdV|hdUB{D3?xU% zuss$=N>OhpZL8IKF`0`-*cMBj%@Rs>r(f?>oUKpg%gFbw&i0Fyo(3MN@m3ohq!o$j zd3ICO2j}2!Xc|~}?==?()Z9?^-7<%baW%e6-wW}_&)1AprXw@-)l4P?!hbp$*Z99c zma<mM0*G))Dj2tyTNnwY`#_?egsPWtWv^!_`rVO?;AWWM?X_4U9AIMy4Nn#~(+B%C zo|l~J+2wyczsAyoDfWX4KGi*?0E=^`kO#e@1-_n-fUCNy=+<Y>6RL5|4Sv(6Jvkw> ziwc#nV2rr20mI8Xoc@je3?cX143?X(@FIREc&8)45t|BjfWfD@2v*^V#3l44yIpv8 zL4#U^v{fUju@;h*T&exsuH%wmF_TJ9q!oiCJ1AK=#es7Q#}r2)CTTK)K%cD(i<}|W z5+w+Ck)%IYUB)ov$&yM2M?nTQO|eoT5fq92sAh;43yIu8g*e+nDhwEZ(4v5-__ToN zR2iP@ShVg0#2j?Wf8Ojoes~~0(11VcSyW+5erOc+dq<1c^tS`l?=^4ZO4F|;9uJis zPO3Gulo~v@@23ZkJoOzfx?`-?Zg0KtyuO|y%hwA@kDVR|V_rJs*BvP}ZnRaa3!e8! z59FUaAM)Ki5e#W?n2U8MA*Uv=ZhPSo=!;1R5)QY4))SZg92}vL`zS7_>=bU{5RqlX zUDeOw<+hZ5!H%Zzgn_3Z0|7%hxXlNRiIrO5G>_rQkYFMwJw>WSya3ojw2Q16!h@{j zWvsxmu(p36&{!XP-g^&$-Ov~AU<*-;371`}6b<&cY)8x(ro>_1h*sIdWY%QF3s;Tk z)5hUBSIJdfPrKf~JAB3jKXmrF(~D#2=rUZyp8O?P>^@AJ^7Bfo-IzL;+(#*9)F}eM z;^w)Ip_}^E;|YHUzR%~6Ug;3u>+Wdolv$n<zFR@!$nqWTL|D%_vtt)C8NDr5Mrocc zjcdx89iuHPukZu*;~<Vbk5q1~$0lh>yuk~LQKA(NX5BPvtOJkLz7nU9@C*Gs7;XqL z(mF%1UQ$vvwz)2su{#WIBH$h<KzhqAUikbfw{}ECFe6YpS|6N7Gi%<CJ(^k{t38Ta zGz5jJf;$3Ww#FM2MWm$Bb|6)Rds7f1i$p4kO=D1l3SkpKM2?FpA+=p)-!ef3R8qZ; z5bz!fFBuT=1r3KCYy%aY1<k$0bozCQ9{#!CwSaWH-qTSzFzJUm)KU)Y<{K3uah>-E zM(x|qH|{^Ha(v>$MQp)!UC8eYuRDw@y2u=XSRCV+EJ900f=Z|`yFkw5GRFw-@?gU< zp!J0=c+QZsvw<)oqJpIP5yRKO;zj2(@S9?%k75FOKqr6;T3PK9kWR08TYUH_GSGr| zatvvJSMh$dv$lk?dV*?DDKE0^TE|Oy1=Em$p-YgOZyGNh_PTY9Gqz(gO7nw6ZyGz* zm)Y*Jbppc12GQwqrHY^Xp9u!U>JcQL(P0sbW}4Itj7km}Geu(!l|mSey0^wl8;z?s zXqGRsC%6bE<rX+=&Q?Yt{*F!=;!?C*w>W3ZWyobay(Ij#LffZt3O~UaV~Xy=-z0Us z;1UIY!YaE&M3MI64tn)jFeDdRq;#B#_LSRG!MZpiq%<Obo^_5US-q2A2`m*Mh~m^) zGd3JdRm{noKfXC-?b8S8f{?aJDXN9ALzzw7nY1(MvJ`%HLKbUYGE2=a{5IPi;8D9{ z>Iu&|f+43zBIF`*%r)dn76K~yM^8aMNDm(rR^oP68uW-3pb$$B=m@SQA*%f>)q!Mp zcLU-xwpu`Sz0Wy^zXUJwgMrbw2u_-j%wTTZV~Af1^DBqb>(Lc&PE%g{%e3IjcfPm+ zzK>Pp!2lbAX>$HtHuW`fN7t_Fj)-E1Kz$Yrs{25ZI&}9<1BVsrYSk!_s|RHS>G(-g zhGR}V0RJ6G9|)FStY6tBl(Q`6nyf5y?ktUD`30<muWi;Mcv{JbX$U1G#m4qmn^sip z`I#w!@U=FYgOW2zXUdt2K4%779Ka^Rj7lBMxx0pm$0X$FUg)u91P%?cKM{t9883g< zJMwOquNv@`-%#9h>)W_yZJZY|iBq{6UX`2w*tH>E3fdUY^D~UJJ6(a_ytq+QO%A>e z3}t<G`yP5_5IfD|u$*)?42R9zO|~8`hS$aHqEDN`<2pI<5>uaqnNVkr=dHpen}n@E zc6i^RkR%OSeqf!`HKl?6om)pqP=y1%m>QnG(-?<!N=lI;d;h+R912YVMHl6IuaBAN z1nJrm&})z+(xC)eMlfuKmYWs_nFXsr{z*OG8VC)<rt+;)C|2UfC;)#lhoJj2;xZU> zNbRot{=R`#pNypMfN`6IV$Le$v#F^NRkzGqqF`o83nhpx{fu_Up8P;8tgH1_-W-hs z6Z*ESV;aXbmgGUSVsg(jB3<zG*xHk+1s^X7wIQ%xI^Z^1u?wa#yS+wC{5hv@_bIZH z5`B3F)O|kR+QH-ZGiK$Jm<d5i)OCI0e2qf90Zb0kpz>i??$#zubso9%$ZV7dN;A03 z1~Ijlk-0y3QyM<I8B>XMn=7}vJe_jOEp2aIvqf=tGCw;FHtgJT;q`j+#3wJDzAwvF zW^(v0+=it+_%!TZ(7xp0P6k9K5R1h~<M=u6>6ED%>MX>y^kOE#dfJIzI3@~9d5+#C z?vy7az}On)rbzDcn+0qLujTIu>`4F^28gasKy%g55!_(hAYJXy112jvuM)jMh>Oo| zj7wJX6R9LD0uzNY&5php6C2g`ZHrXKM<iCpj|ecibCd(%TQ3)8R}NGpE7#i<w|gGu zc)eV`Z6IIO_?qH7y;@$0?L@_Gt7}V5cC9_i=6#T~id?u0?qgdb@*tAt)9?rMKW5~y zai=&=AV?b2za(L&utLS<lh0U9W=%TdghgA3Q(HtlaCBjk?l@<NaYZ{P#vThknR<kt zQ{OE9fqRyH!pNHz(71BbVxsj({f)NfxT=1r&!cOjiAn0d?8RNQ+vLqPhD!gAbES6P zfh|u-L*$P&gL%g}myaOOSlm$k-4(8ma#kOh1G11)l!FC{+u!hc4uIiGigG!(Xb6Jp zu{H$e-PdpLz;~vE;*yb5$8$8EhI3F6XK!v8L&~8NNL5+LPB`YBM8pi>L1tiY=x}G2 za+&zV9S6L8>r$$er8>8~925t@!`pK@u%<iHVLBE4XfN5J$};uarCT@Fx~u*VPS@@Y z?y_M%pQl}qreFIq%w{}YKC?cghv@^+zvlMmT4K%M7tVdQ=N4s<hj;j3qf$cA<FXyb z*KM+tJ=URa)nQ&tgl~?SIX#NNe;Q|Pk~TF+;7nrKfDEQK&@8Qu-387HE1@_pK@cJl zm+4!wYF|yagDG+WmVyA5XZNPn6BuZKQa@oZ60w(WP>Ed8i33*>QdI}>8BJ|7M`HYp zLVhOsc}z1%2Zt0kr5b72+)xtqg9Hl5gpw|Ic7VCTMLK&(ISk_k>s`qUR#mD|qj7Oo z75Gka$h@wwb(I$YQsGcD#mr`uGWj5s<Nmg_cH&!PD!%r^U=VZWy!hpXl5LyY*_@Uu z(=fPiI-ARzT<6YJ-;wEbpI5HCcW=ek+xw&v)zuK-m7>b#V1e8ILX0i<FWB>@IXVq{ z*u6WTR;>6cY5jyHO;PBTf-;yYf#0v9Apu}YlhzMJQZUx+Ub!VJM*5>S3IJvHr+OJE z*w|*R44S;Fed&@4)o4gfLz<Dh9Lp|ju#SD0MaVgYRk0DXoTv5WAEbHKu7ax(EwQZ% zGg8;`^tN|i!@@z+v!~3s-|R1jAd<Pc&T9+b{B&LIJru^Tb%5h8sy}axC>(7ynjGSA zOtz)f*5+yO-+j=Kls<0pJik0E+37TS@U3)U#^2(3oDcm7np1!Tda_yIjl>$&NC4Gp zzXJolpqXzx_7$<B%knos)8F#!SZl{SY2X#k_6fbGN&`3yT-1qL3lof84;U&ZmTGRp zD8j|bC_v!^p~fJvqZCO5!!VtPwOEl4XAE|$8(t?PK2vc9_FAmh7LvV`@hv-BNz;4e zeFMc)AHLGOF?L6Mm;B~<dLiYu4f<e~8zz)FY<do>2jQ3J@&ti*Jfim^ys%{wb|hb< ze}$JE%Dg|Sf!V}7Wh{@)e9hGua!x20p9Zm)i0neQ%4A42e0Cl|JVsMJUm!s<Z*lVm zg)|QIAzU=>tkFSe!irO3;|O;Z7XiufEl1eV@W=;AH;${5^?pv&qHKW{H&Ir+tmOBc zNJKY<K~OT~fxwwZL_*&QRjPsxCO?+N_As>2s}gqxqZx&{l#JAM+paKk|4EWzl7?(Q zVhrUfxu+B&@-Xo433*8dl2oE8k+edVz3?*_+=9xT_)1u8+ZNcSm~x{~Ev3`?$#*Fc zX~`w1(X|0CE(!llJ#OS1_VxzLg1w<istlm6xnkkT2S)qz@~PiL{%yrdKdTEZTT~`2 zskr2uY9CJ8GqA;MX*uU!?h<}-rPBVI)o3#J@{m5^{wTbmM?=|cV!x#sAZ9jg*44S` zzsib}zue06K3Mo^*HMt$MolP-S&`d@y-z}&HV|__kygzpZyK0Tpwn*&+G`!!G$uUJ zp;~qiP1vEhL}WN*+FiU0U*x%<%V)7D1{ai%=w74loD7_pU>e<A5SxHuufgJy1H2l9 zH*N}AjRUJ6L;X|KF0=D(ww4qc=wdsoi<;{6%{4!fdu_J)V=^EO)kmwM;}FnF%@@2P zz6mjJFbRow1X);z2&|r_*nowmh7={6&KIjX!l3aOsXeTet2awfIiDt13AeRfyQh^( z5uiTnNDcoFMZLLnYf9jK&H3qR;fL$X-Lxv#^U*ZQ=3?I~U)689L@@%gs07T)U`VDO zPLQM*2ebi4p76jCqRL-P3zz_FMhLVzs${tSqqhqhC~ym|0`h9%wWFla)}x_IBam!# z9`bZ#W2Qg_2-UL@g|GvH)|xqb^l(rnEt7F!!G>Wz2XNZ)bp155TP(^bb>;8z{1a%I z4Kt-&Llf#BA_S`U5QLS&g%~qG9G#XPLPFe7NH4KexqJt8oNt2;O!lZQ&#=^GTB~(D z=84I{brtrRz`XTYt9AHYm8rAXxo$Y7G({UinRFQz31F8f;AGW;P{5DExqh7S(Z}?- zMlh!scB-mH>bsPzh~^uA`hr9Nk<_m!S`RPHolVPbFEER3Xxt5#U`LxwEO*ks-dA-A zeY|bwzN~F{ou0W;Rz~6O*!A&+bVK+7zfat~dv(8UR6P1JkGLrUSg{uln+VZv4J3hR z5X-A?cOS&=O^WvnGSxx#gg1Lb5NFr{ILqF0z>K_laE^*2#s?^*1QC4znDZm#fOHQL z`RnCW1}W_uwc}YGQ1S8UgrDkk2V#Yi7|&yxi`pT;f*86M#LhC#xp2mqTm2*>|5jy4 zr6RgoVpKYf!)oh%NJE(_>)Tvl-q)Kv%Dz~Q$pQTb8}~WW(7><XKAKxD{+J}qSVA-D zTI<ASoP0lSNRCD?sT-@iHT(f2y0fB?e22^-`FqdLP;0VZgK)#PxA8x=ZuI+*_c4d1 z_J8c@+~fqih5C^B=3Mh}g)N=k1luQY%a*n9``7+<uh?0;Srfbfx~N%$UHjf*){@rp zN-M8Xw^8aN{w;q|ng2(v%iO~sEt?gzKDlwD91}8CPUw0^sA7nR`;?ja40YlLj0U_` zylY_E2Bu&AV5>>zm=8`5NaGEW@4pPKU~=5wM4ipA)ncLmocsHG=aN?-fNlfEt7`cu zZsA3Q5ys$$1UZBvLIE(ey=wCky^Tncxlte}`QFUEb7$eht$+gBogZ<hYZCNU^cGpi zXDmFp>Nz%Ao{z`Ym*GOUdfA8B)>So<kDN$iGklNEYo<|Ze{S6LO{`}j(O9sft~0t_ zQAbXB5O3`~ru*pA<}{A(O*oECA(;g=p~7|Ik%Mn{D?LOaZJEqi&U}9=uE{~V6P+co z=UO6=_<{U3SzNx0u{s<{mNOcs9<c#AQIHZCHVI^#QctcddjmlAK*7e2fu%E<;@iP+ z+*IWI^CQfcbYZ#!z!vgP(mBGL@iwC<ezx%5!j_jmpg+29<2twFq2bn8iq22BsKePQ zv7aSbYTSYVB=yw&kCzz7n%mOK52|I`qFk7->QULd3o!H1%gs^nk<uO3`3*~!4dyK! zV4vSSxco(fmcO-Bi5D{*Gy~9Ik<u6AnWJ(U(36J|X6=RVSC>0J?;aqIr5h84a%mP$ zEH~ty&J?Q4t&ZvwfQ!+hjP{<#HfDm0x_VGMs_x6JxajZny_mI+00W2N*X1dyzS4~< zXm`GtVix9w`C!~!G=3P|F*NBlW%>C3)Z@;-Vsh*lz|2VmhGTas<(yu(<Tc>6iA`U% z2%VncJj_0%`GfeF@Kll8G=sz!cUAn&BXlO1U0msy@|DaJ`GfF{aQaaSYY92>3GTNi zqGMb~vZyG0A*4kg08TFeN#{FZZp$?{^=;rxF>>#WaEvhvudo#jOU=$znE<gl)(J>} z2`g412UVvoN<wN12rj%z1loNE;;aUjhFW0}e<zi34S5{jK$0|G5Z+|Mvoz!pU@K9g zW>S+mW~@wPXaT*aEJXmEt<q~6-w@|^oC82tpgH3ZB%*<9>#Z4(eF@nJ<K4A5)@$G- zrV~>MbWk^Gs~N{TD{vSblZ`fL>apIz;Hv2#4kwmgUaFA3_UpJmT#d|J51Tn`mTqQG zyImKX1UcN-Ga*j<4*{LsJR+$dub6GWORCBQZ0UN6ISJUmS?@1No8>lzQ!-7vsxqJ+ zYL-r|53pqY3h|i;>eKy=x<ek+ptEB7B>oiio*e+`f1hLjxo0;7OyYi&TNHkHvWzx? zYh-f7p1T543lg=ZNKk9U5kDCYwS<gl(x5p@Z-8~Yh^$pIiAeA48=E(doghj!o@Q1r zNkewHs?kADedN%+cp|{G;<pmK2n>?%V~<FyWVdG~K}$6@Gm159LYhcET=~u=l7{>p zB#Pq6gGlOfaZjAet28ZZ1g()~!mxGGgq&lNp161iX48Z3gWFfhWx|ZHzGcNI3+oO7 zJ{=LaU|orHFo5hL{4WJZS=ox)#q4eDDP}(AD#YPn>&B5XgVlTd!fb!kMR+|qe630n zx+bz{tzDl=E{P!>WVdc~cGAdD)ed7Jmbqz3;paah@!0C^?n}QdCusTkJ_dP_6Ww>> z*4Hn!XHceuuH(l?DQ$s!JFO=SS#d&67x)?S23Oi&T$qlme(P^SnBzv>#Is#Ke@%PL zMZOM0i0M&TNHT|-iQ>=Tf95~zZPm`t;7>~7dV``0=!j?oZsBi>fy>8~gtI~Rw*|#w zSDymYj99Kzu1XFVikBWKE0|>{LqBz~rwwSo9dZKlz7oCK)qbU=Bu^{vT)^~1XAXGg z@3)|9$XS=Xr19hbCh(_=K`5>Ir`TiZ<u`;Q=$Fq2+M%GKdvkbuk;l0^yrlkIQ2-JM z;SJ2Qll9%CuQcZUrx5c5_$*`JW}(Ta>FM$kxGYq37RNMHs#F)Z3a&MM#JCRlI(D-2 z-(xLQw>D$DeqJbVt;LFro_=dFno(!LJ17ETL6s4jS76KxFfHOVr=t>qmdB(LgPtV{ zHZJf*;eB+B_|pnZdbu|c1cY<y)Qt{7sbQi&XOGZ??I9j|iv(*cyulylFAI<Z-VT8R z2(%AU0iq1qjA(weEgrMR>~*afIWVT7Hq_o!JEA68%c5=3Ip<MwG&V||4FY?wHl<xK zi+Uo7cGse7;C>>x4K<p<bD~R>FPo2-PcS2qKOMkQ&m^)4L<06=grX{x39byO4F8#( zT%LW#g8;6LaF8O3Ljh7Apd7Uvxg4AV!Wrz?DPM|TbH<Z=eG!tGGhj#ZLxLdiRKOQI zz!k^^7#D~u05xDt9kE`K_y3S~meG+US(X+vGcz+YGcz+oN-3rkGcz-$n3<WGnWYq^ zm{KgQs-~;Dx~F%#cjt$Bc$m9Cw+Od$!d{&Fy&HvEkQSvbbSY(_3E80qNfifa$*zD9 zDr6&(lq{8t*SHOQWQi6FqXHz-pPkRzg8dyZ7c5uJC~>nPBW?1`M2?{$BU|?JyZnq3 zo?|HCw>!f0&<-j|!O<U*Hhj+jY!StdTu*oq19CU!kJC@mJmc6J>=~nnwe_n~zIN7f z>>dyZqm}@U=&KOpb24oid5P@<E#H6-ikK*=iZi%GvU25=k4hs{0*x|+c6d_uis&{L zVluSsltFFQd^6VUl`&no&AG$Y8P^44m4V3Vfjx}?4moac>I!;$uSUY+fE|;Tvec9; zS1k!gs5EaY>XL$Y7URw1Xjx(^#t6qd5&Nq6hTev#S5RAXJ4j=;FFgDlLax^7yLyzG zZV<A)I3koCH3NaDRD{?P!V-iZhf@&rYYmBDyc?=lp*3CgZ#^HV-SDS~Uj$L=pVN)= zoW=BSK}N!#(@jT;ezAT))(MejnDitF0F14KjZAyT==(i60wlT<x-}Tep=F4-(z7kM zd~E4lzL)ch{KB{IUd}x5Zifx`y4ash?w?fN?40~Ep4Vg?{dMA)xQX1L3iusC)%7v( zoX`it1Ae?|6VQ2L=@21%^{9*7J*K^&@%~-1qYiEXMVDJE;5K#E>q4wWjrbPX8uq<A zx@nOrAf_$lMPIs+HYj^Eq<F2T#(BbGrMJICX!4-|c7lnf=Q-kq>dQug^`n`JSkP=v zt|<>yKTbLTDeM>Zhsj)~8<aIrT9(8w(KfZZWf+GS+jcaEJ_g?F&F8~T?W5kBHJ!?z zHoy_6x0~%_3(l@dK@?7pxs{|^onHgeD^<SKO?Flo1$roIx^+~>QhkR`QzEwg!L2{k z4Zb}Wc6yMpI<;M!C1022TAQ_KZH~I$>U&OOirfk^)j#N1s{?biW1ShqY>!lY<dm7b z&jBm$7zkWGEXb@X6MiH+G}xOUt$2+je>MEA3t?$W6T`$a!G1<1GW<y11!TAwEg*gx zHd>k%u8y=h@xjPbUo_TQb=winuvlsPOi9n4FPK4Y#j{#^lsMzEvP7V1!RL*mD#^}X z@|JODYlFDrg?mYNSV;St$Kt0-I#;(b_ad3Nj-C8<$ITA2J!-F_6d{tjB2Golk*10_ z%txRP%?N*RHHD_=6Qlo?Jg8%mi~kKZRk*N*!f+ly7tp>!eS9y68447|zJf0%DlR)S z*A)D6e+w3Afc;MS+fR;eGI#zueaz)`HRafCZRLXKSy{2KKU3K@O`Ktvx!Rg|TG+am zFV)3YyL-O<9J@Kw@1an^!71M=t$-O)Nu)G(_n`<t>@?-4aD|lX-s%)Q=L<f{XL8PG z;!($S*n5usb&OEvZ2_SQFKh>VcJ}h^z$<90&;R_XhV7?Sp<3b6b2@MkXk(YZ!21VZ z#n6WjrVCDEZ#1}gz7O%^Y4ytMWzNsh5cKZuPVr9-`VMso7!*izb-6Q$c8VKM=lV9j z=QuBCjfrJtyt*p*_JisXc=O8*Z)b$ZyR2)>T<1#4c7!|BVamg=k;Nykh=W1UuCmwg ze(9!#%a=i&^{PqL?t|?O>j;N>chf4;mv`9*zCw>Mre!|UDz7me=^vy&by{-eYMbM4 z^x$NxQtK$v@yYzrD#&+k)MT0@VUbmp+eS}Y#+@_89w|w_tARLojS3=4oJi#)@=2cL z$E_8rzlRvsjN32E>Mh&9E*PYMOp4VU2o4ntmaF$~r?Uz_t>sPcEk-CSC$VTLJzuS{ zmis&vJ5_6DK{ar>w-Zp7z#{4jepT4QgK+nJ5{#N7fMX+hyiq5<`v8Cn3Gn<Es^H&c zqrc<24D8ITf69WH|7w)_KjXU0Y=4yn|Hq!qj2z4apPtU2nCYiXmGcuv{RdI7mXM0H zhRlD#x^jjN4z{lUch}}WMZp}*On=t||Fco^rvUjkee7TG+|RTBG;022hnfBtjpY+* z{cHsPuKk;Pe5!^&O`kcL{_R+PyARvv7x>TdKCgvhW%)#)|HC+cz5b7}fA;<PS$;eI zM?WLS-^caG{vXHwjKj`Mz{2>Ok@J7W_^tns&+}XVpStT$FYkXPJO4L)?!TH9|BUDU zK65{H(0|~$zdz;A_y2o5_tS*>?_>T`cKtW{_Yc`MGsk~~=c;?yseIzOQ=AWt-(n{N zV(X|}TS=%gu27Kuf=O3(6RayCAx+U6&kp^<K*IdYLY2tSDqA44DwWI|hy+1_kvgRn zfQ7kH0GSJLPa|C#G`^be9hcY-sSTXK6m@&p#sgMH``mdyZEio|J#Zg!A8mA!4}?+# zxJ$%#TN-x+uxNpeTL}m%FQa)~l?O03h=={yyT@&|<#`)qN|3NemOQnwz0}Kjs%Cqf z=R&BjY$V-!9mOk!elDJ=whbaAU%tmte8xF;)MB?;EyFV*ohi??I?qVoQ;`#$vC5;y z-q+@C$XLh`j1gGAjI_0e<XL+{xT~BoceP&YF!khQ752e#R}?pw9-ge$>4bZ>B$@@l zCN9S-Cjuq7k<)yaUw(Ae;lGn?e;RTHqYY%Kh3HPH7Cn7f2z@aYV7{U_FWU4R3CkPw z4O4^lqKKIx`q^+EVTpYK_#6&9Q~h<^;Y(mQ;;ebk!4<1tT9z^UY5h}*6TXAbRLCQx z_t>v0fh{cRK14o}m>!#hb;0Rbk24aZi@cCsZZTU8dBR&8tnbigSQ7LvzERg&8flCC zH`W4oHjxi8cW`eR2el5#2IL3{{g;iydv{9#sm!tLXEezh18b{fOQU!qQ;KwtNG6*D zt~cH`H_Aj*N#n#)(Mn}a>!%a75%r3BN$15XG1=HBRizctuR-=|!&n;B9)b79Fy<>5 zKcMyc)D4zspRz(FG3Uf@z@8*WLhH0P3n)iujpZjvvY<}5R8U3moiQAlHw9hS9pFu{ z4{y{PQl2gGalfPB7V~R*&2hTN&>6VY5vb`Fo`E%iAq6?rouBbdh+mS{Met4quh>Uk zs{TUD2`a<26u+OMaJ?FSRD7rMiTX5Md>Vdr6zn5i3KOf<JVOr|INftDmHL_TOY&0c za!iKpJZ((zCUHGcPa@~15_`!*X}9V_34PII0cJ@#hV$YKmdaw3RMV`S)Jl1>KTaV! z0r6d}D!7SfK9aPJ0U(-DIiX@x^5BYCQ`*<8vSUjZU)Y+0k^|b2^)s9o@|Qp_vvJU> zE6<CJvg+jJTUJkCpFsXG{G(|jaky*b*RSt*UvLhN6ph<P5a$O~i713(!R#Yes9Zt? z?WHo2jWwEKPKK3Q8?@SB8*`Gx3S(;Gb<iC0Iz;rKER!x1o5wqEkXL4nZ&Kq8r=_Cf z%W0JH6n;?&k8zKQI=7`)PX$jXqnF)(X<sYEQxac<Z|tv1s$x?&*J8xY2fdqP-`Ya2 zY1cn&ro6Qz;||%hOBoSLwCVKV5&r7hcui>K^o6$_jQZZSjX(z_d1?;J9=dTnib(e@ z5p2cmMDJGx#H`_3wM#8B5!ujycdLTt;RLyl431L!V(K%Y*mIDZhK=e`ZIS~$_73%x zm}?!^b9`2DPQekaCtX${xFz{%-{rH-Im7rsyG!u2QRhv<Ug!e7|BZ_at=bH6Oz4Xx zr0hdVa*|%N+iV}`?NJiS%b}jf<f3ShW8|uy*OIEb-fr4cfj{Qc{H=py-KOR|6Z71> zPI}o${7z(efEYzlhA5*l6;oN66wPL6KSK8a0<&|fP|9$s`vlxPeas!P!y7x%o7{IF zs`%FXgNBap8t_<cPe;Ca@b!((hz+iv^_py=SI;xfnp8}v(-b=Nc=EuZwSe13jofEo zoEypfYXNQw=jN%R#*!Q2jid?-3(}&KF`2lOXjG=oz~Dhsia0FOFp@NlLHx}c5o>}C z!ut$5Rx2taK!EY8*|n6c>&D0mW=RVACT>;<RV-bNsgjDmxE3^P9w=02Q}Lx8W4kCD zPo&IRvkcotr4r#aK9BrLW5)u#9cmYcmI@D9^$QL?DZ?>aK;RT(-RCX+nJqx@<vv*% z^IH#Gj^7ogYmB=Hk;Y7s_7|dSUz=acqICD}l^!hN-$@5|RT1gMzJk0sl}D-~wgZuv zg5EMpjGHB$plld{&`O`dZ8~A1idpSNW1v}?^?g^}+#+VSdi}Jq5M&kPokd{mIE0Ep zi~_HcI8ZcFeNbrBukPAVR?2iR=Vs1IPiCDR8R=-4Wjd(Wc4=AcD<LmV*Cb{ts}+^$ zlpK~I3Lb?Xuh5ppBuyVf2&NumDEOM%t<l%Hpas!7^Yu9FHZaN{(#yS-l+RERYV&CH zi`Kg7m+@ZyRW>g^#|s^+;*ul{t;mXvrIbVz*EGX|{>ltP8O%koD+Q+$Pnp(bt4fE< znjEaWR9<I9@#WH4$H3QHFU5pmn(0x=>AS_`V)HrXd$)-m7rrm}jzt+-vkUsk-2Ch- zZ3`w(+xs^5uWnxc?o#p4$c=INZu!f~DAISLwJd}k;b9^pGs0mSM0`A3^A^W8#Bop| z$hf^LCX)U8<Q}|5-Slf?45f!g=4=xiW1&}9CG&fDh=a2+`yMj+;Kh)FRG9BH_p)T2 zquG6a4w*mf?IXPeSUye}A22E(VIG2@(ab&Sp*vT%@C4C4IJ{(UMtsKHzaySDu#d=H zMJRze9K88q(2K_Tvl8}Vyra>L3>E}*pb0jj%hnzFzWrF#&s?z8{QQrc=Nrq5`HRy@ zgsbfJkK+pFA3sJBE9hRa%opzXGWYf5JwCL}rRWgvLjyPfcXh(+W9QM$F-u(0W&1Z} zzdg3aKgW3j96@-HBr?Z|`=ylGRB1MvHQLIqXEn0}XH*`M%SD?mr9JrQAbb0O1qtnB zT2Ja1!rz|8sL%qP*M#A^9BYQaak1>oFvIa4%<zT$;!-(RND^k}QrV5&&T|M!=5`7Y z4i}CW?jt<x#(br-gL}ljQr|J%5#0gZ`DTE?0Rsat4dCHN?#nBHUju9bw6w~Z<EjMD z<usB940rB&Y#EYmN3k;_72<uSkS1IvTqnFic=OVG31ACI+(X@?xdU^BWQT$Qlnwms z7vgJ^+xkT@n|fL`9mh=f{R?ZWyewY@nQQ5&?3X%S*(&t}+AlY1tgULYVv8msSp}0z zPUlqTP+yDGau2M&X+<9{6<K0xr59O-lUeHYlW$C*8sb$|8yMc2AO~SP-;TQ*rxq?& zvYHn7r*RrWv8$k@n(`%FHgqHOTk{OA*j_wn;D;CwtUSK8^C06yDRo`0tM{5}wKTne zOd1usX`0s#3jeH_FEgzSVYSpwNdDYFuf%C-p+r9nRgr8z=uSKc)1z6}k5xu}XDJI+ z+xmHhATKY>%NWD16?b_-(Hx<DNV{^uNr(7;FF9;$0>;;t=6gV;saZGYuuI#hGhcCe z9awJg!c=|4ft_mCYB)8d@{Ue_*1h$wlBPcj`!KPv|GmT!^Iyxi{Es;k2j?d{DQsvb zVQOh^;Y7gB#{5^5@-rv;8%p^%s!>SD){Q{>bMg6mzQoAP%1Q8BgnvkwB;=)jqbSua zZOlzRo8y0wE-|vOeBSpDhVu8s?Jo@F@2lN^%$NQENVPw60*=40<(ZiO%8>rKF#eNw z{GTh;=*G%^8DM}3df*F6o*&#H$*}RylffLx=Vv(w#L<Qe4*QB?9=E=%Hc#n97;v{m zo=r;Me~aJ=_9QnOncDkg0J+=}CzxvYORxb~!}ezO+6pZzkmjnT8Z(8k1;UFS;D>nJ zvJ7`-wsq!J-qh9z(Gx@e$tlx~$%azfu0NqLaUxJeRfWgwi7Gur{Oqkk#Ko;Q-9pa3 zqdWxe!}+J(2yK-lHVLj7ir0L}ySpk-z#BPk&ks2BWh%PmWAR@fRH&8u9pX&bijw`V zI~c6K@NWF7;iXDC+cBti-v4Uq`~jjeF){s-ZL|J=B}4!HiT}4$=yweEFR9SqN(ucX z75bb2GKP*$!WM=OpEZ>}D|6YZ+F1TxP3XU#>OVlV%>RlzW%+0Bl<kwv{rB8yp$DX! z%HrmO=7qO;D|1?!H3@S9AtVBUAA$r!Dzqknh=i{In-Cd|5P~d_KM2BpA6i3LKE;?B zXak)o1OcFRSn}FZQ+zlmI>HuOm=<v&`V3W?=Z7OX;N92lqnB)3vnk$F-iOzRZBFI@ z2BDEaEJV@gOpo%@EA#<yS|9(rtxVGg54`dmj35MWIKqpOP3POoEC;oD1f=jhZ_oGq z;C5T5gJ?mCC;<vZErx1$o8?h<(7<4io%cBC++L?BJwJEKg%|pkhxdbN8l?}IdsPJ= z8^!a?pX~Zx#>_&ca3%6x&F<fDAJp%87LJ5A9rf=!Z~5MIT~HVB1D<=@)<_>5<-BPY zfEBu{ZLT}oS~Fq<>_K?sSp@-!#o!Ah1iH-}b8ohiKXTChL=eH%-_0_7MMwxSDJ595 z&4bH9<L|dOstK{{3K)I}$o&x7g=h&PQbY+CdsBc34@$>Y`zdNa67noei-+d~B8p`R zAN~W$faT}OnRKrxZ&n38NAVH%-0Cgt2U}gjJg@qbi^)$;(D9(xU&9ZtA?TRGLl|$x zAIQ6T_Q%u|-uL*e%Avu;Z2iK=+zVmEvknbN9B9#9Vyiukl_fRi^ld;&k9cpDM>JBx z=pNo7cmg>OEd3=qDlu)lTNVw#Ss_jOd85bO1|}F>0bm8(vk4a1Ye9NqxW_cl*mm4d zn}82C0B;?k??F%5`*zjPEyz(|`C%56T!GQ~%Ev%79gshepzq}tg6NF7Coe5Q_2fmE z6;ZW<n{stY!sI|ptwmmDyzjtMyOEJ`R+;O~1*b=|V+54=SfC0EJXzou^UQZTu{*<} zdq<G30%HtWvyO8(kYa|D9=$ijyqqY$o>_{Z*0h<=Ivz7rTvs1!FGSJ0A;|SH+v9q} zHOHY6oB~=v%xDHA`);!4I}03S9P6fDYDe1hzw+4h=OpOd@T!(%@QF9i5aS${wUGPj z^GL8^Uu5|g<+soB3S3LwF?9F@>j`y>06Kv*yyqN)PwWDD(q9mE7s6r5z1=BI-}3rp z#|T21&CG~s2qH;|5$|S2JLT4*O!_^c<pv3y0$ZSJ!8%s?Jun}G27FZaZbUZQ=Iz(a zJ^0qmA1m*y`uO2g_1{A9#0|sY0OCjv6xi|c_`qey_yp(l+3w7XfPEpG_c$@W!|+OZ z4|1{J)d5Qx_3AbDj%1LqfO7$(zlXWxoN@eq#55z>+h!7*g1(d<7@W`Jb`(IX?t6C# zyq(<Z0cx>fTr=cs!TUu}zL!cL?4%<^E+5|TtNDz1uCuy<^L9R-9W+u?@0!r1cg@f) z2kKH<82LW<)%>HWC&mYwk8d|2R&9a^Q)DDTkO@|r>|yMa2{P$|)V@GpDNELKD@avh z#(3pE(1x}E;2!I{oHtc>#77i^h#QPoSXDl~w>6OgYW*y+Sgq8JeaN*%9aa?R_&3C_ zgVQ^WZfIQcJUgbKGv%%`<pQHLTn9s4Hh|T`!dEIAemOk_5-g<x<%~sW)<rRM&8<0k z*9(zLJ3#~fF|j)C=@(>%kMJe{&bb9nT;2%X@@KCY@9aOCd-B|3N60sTSVEr_0Xs!< z@=pkYT6M@I%rI8`rI-}(x`5K}p=Vy<s*rucJ|Nc)iNbm&ncdL0g2)mWW)^Q-QWN7* zUMV`Z0V9@XE@#YU62)gODMi5FqAR$?&A-QB#c_FRNZrg31|)#N;>4yRY;U6<-M1JU zKZ}ZsiUqc3Ml-a_)@x0Wsm75i?r=&mU}c_;-+BN+z5}G}WH1;)l)sea<vlln`SbwG ze&feOzIu!@%*KydF#NH}k69Wb_LE)QXfh#;Ddf}n<(bEi|LBqDYm288Wn#|Ab<#H= zU9oTRiib#%;h0o<^c%aQQ5v!PkOtl4?6J+(g349=;D_tQRI6QQ-^OBlEpqaO6oZrq zrq!F_k2b@ifJVo0J7Wk&ValOo?hbx{-FR_YLjq#uX!EiwuoSYeldql9Vl#30)*n5Z zfydhF^rV}>2(Atp0|~hZu;A^N8ve+Q%JvFa&%m>tpRx9dx2&?Yxu2K=sH8KVt|0{- zdT{{SSJw%}U}}NAq@;r#S^bKEnt~^W?1A-gz<&56v9@@%UV3?xk0+h&Ky1NHqp{R7 zi@e5B>g2pr&xMvcKr#e|E>-Jikm`#A{sRM3Q8E@v&vy<YOtjH8j5I?3qbCQ=2%kK1 zf|sEuG#1*sfSUYtAd4>n&8%PYy<3iW(o;_@sp#&%0c*~7)&0!$e!(fR{L=sKxtZ{> zpG_F>VA+IF{GeIU37YWqtFI%&OKcQlN~-23pPF$Kkv?CEL=W>YFg4{psDvk1`-HT( ztmB*^M#mU~T?5$Tk?ouqv*6Wr`t-aLtRK;1Ti3h2w6kplLba}N#mK@z(U_o*K4)wq z3I<E4Wq2xVeo)e;<-&Y0j2;@r)uLs#0ALLaYEqx9X+eVDfgXLr%@7;zWUy4Oy_99i zg<yTbCor6;f=1xIG{3dHpE>1@_2O=`b@AZGh0m}P&rCqN@v6b9u5sZ%3wZXd-*y!A z-0SJU`@+H3Ra(N5*Jf$#<llXA&4eI}mujS}oIRbe{KK})=F~Ft{MMqY$O{xy*17ea zFMiE^?X%?V-S9Y?fx#C7)`2Ji4HwENGSfVyup$Mj)I;i2CB5`5>IMLG_?*|)u8mgK zT%o9XbpMVW<u?vx7TW~0RJYCXp*Xk7{s%U=of-fK3&2cE8f<U{4S;~1TVNI*T&@rS z7%UiX5B!)&4HirM>BikeHIh(qdnhNP*X4jjBGH}os~UDnQe@LOW6{W)Ld*vd64|9H zGr6BrnH9Rznctnw^DAUpT<XVb5>+ym(JmXmf$y{Ss$cu=i1R!H7TR;a)R66Mfq0Sw zHNCc9bOUepCNTEdF-l7*QUw}aCq5S1DJFgpbWnTK%=iiUOOW{2RwzU#h+^(zkpN|* zfu`+$aqI29^`}A7s*(=R@9X1?P63HKE}~NI5g8;Nhy*d3!>t3?pLv6b5g7E9TAMW$ z!2T+-Sm})!6B}_L)~ee@bIa6#duNC4rnkt{SXDA|G3Nd#A}XMG8r`A?`5mbCs>zhZ zT+G>gjd|%>IAMv`&2w`GA~{UU?2zmH@!F8GrK=7}6>eZbx^o4)$mn8n$CXuZI@18b zC)9r9BIWA)j`#aW6xrSh-}H=^qC7c!4nJhex0o%Th`K>hvREBL^)!RsImz_cq8UIE zK?#MD@sse#$f7mG4!G4TCtZ6Q)>#@I2#G>iTb>0Vi(4j%CfMemQVU9QRpEn8n_Adv zaCw_tRYv_1Wq8P4Z3<NJ5=nQl3pD4kzodr0BhPW3#4iw2&KY(94q7aqQn$BJw^z9U z?ry46)2sB|1I?&uj)wN{0v$z2%hXsFh!w%Xy9PxF*V4<?SXM3U5}%a$ME(>M|F~YD z?q0du^f)aK(Sh4mXgy5Qjd{i=pG95%@V=r)sKbA(skHpH{tcezPV`Fa3SXbs_*eq? zSOWGq6cXXu-D^X=>GJFMK)L45y?jh1z7+adzzqP(fC9xshse-HrK)ri=DJjz@`NC+ z-FSgYp}HBGGt?HM66Cz7@5hSCPk74Vr6VG7lFZ+p>rjx*xgv9fX4mcqBh_4LSDBdv zrZ&@Pf<i@;47YY`S1z0Yu&%%LTbK4<l2{T4?+t+H1M@wPS<xz+^SC%EGrWyuNL$r` z!<UXpdWG{V&0uxd-7NP`>CU&f+I)wlv)cYOryXH!MrO4_JO?aL5Q%nz*g*n)FuDgB z7fQ4@#uyj(y;Q@xA8j*DMlMzwU^X;kA>OPm1&0{OlDOZwoo72hNdZv;O8{!tKus)c zPFm9IAZSoVyZsDvG)XrRe2^t_{)BaoDt*ff#0NX^iFg~-)o1zbJK(`gK+j8m4IOIM z^6-_AKZ9N>pb(lER4Y)DD4?|Wn#8DzAv3R34Ak%s<8T6qd>8`pUCIL#)m>1(6w*>y z^<&8V9RyK7CT-OoHB?bTs%d)=WW{(D5=xaKs+MHw%2#H);X+{dItZr(md#)uk*pXG z$}{3AkO5VHl~h(We^tHnx*%pvKS3;X1YIU(i3`DVjvof`a{<Gp-%X{`PFGORkW`nZ z%Q98NsFlB#vYjQ>qDM4=SMT`jT=#Nct?sn<26P|9IMI`0xBGbP8zQx!O$3GT!`yK7 zejs%W8R+)z*a-*$T&|Bu=;pB>^dwVQEK|34m5OWXYDR8H?!)U7GC%3R$9{Yb9fq%| zWSh6`5c5$iuo<mHo24q1`kKf69rp`3FL_n~EPtbv8f(mPg94Qbwq^ao#=5Cok!C|I z7{yUM(IWgP=7sQ6B$1uSuA8Rf&3Bwufl+mQQrQiyQbilJ^o98!iFdVMZeva8f=ukW zD1U@Yg0jnImrf8q*g5_X9Yd;%*Cwh~f=d9t^X<^*lzSf$mEa1hDV$btmWw|y8{<rk zq$Hdy+fz^#O(KxH3hXD-%*|~lPzRN?fv##~(RR_0)9A61hRQ1=DI=>Dt_G^-A)LVh zYzKvL`7Btj+JN~>mYgC*xHiLR46A^*2(D=16HN7s!gl43ne5qr9-QC}D60o4%MBXH zF&rN$F$sr<GOQ0VuOS=pJ}Xpocjsb0dmhY%jSv)>U9q}dHOR5@0NB1A6?dEU(S0t1 z_H+b!+k4@;h{IQ_UoginfNA&A*Pr~+m~fE<beHOs_Ddo@Kuj7hou+VxCTuh+EJ?D; zR)L$BkP$PxIE4f~fyZx!6o}i3WR_4eA7p~1LKs2x3ebQA=X}Sx7yrg=qWv5p8uBO* zkD-mtXdYK0p4%=|{LTfAGE!Jff{jUg=v)nm=Hvi8m?#y*4uJ(VpzgWmytn##*{2=r z4EzUt9(nh4f$io9<Liw`s64))XPvLUw^H>8u^Uki{ZZP91XKEStR)cUNDPZ|%GcaX zgQ7UYxZSFeNxF+w2%Or)g6Y*-uTrYZ<B+U!=UR!oh$y0F;Q_A1pSc~%*67>=w|l0F zVBiMJl}n3}Whf%hepLP%0zmY-tr$Q^BX~ec>ZG3T0g9%ksb=_TKLy!jQ97~*0vW)e zg_;l{5n{=@uDqy0IyaVcU|qm{+<x6(uKO&#F69;H@Coq^0A)9kx6nK~)f^1u1(09G zUtHibEIZzF@z{foaKV^-6+<jh!^fKX*pjOtt2YaBGZsQjc0fqPloVB5spT5i_#lm& zA3!0927;Vz!_}N%*G=914!bJyi;EG~(@7mdk%1r%W?7GYGBd%|&5g!|i-rSI90`Iw zH3)kfs^eF}nFkvkzn$6en)9OT&-(2B7><3u_4a)y$p_1etXzJ>sleA(```|>zGpxK z0`i-D+%|LO!8%r<arN;<+O_17h;)i^tmH$f6`FE1Sz|blnlE7WY~rIDE=lCv3=Re% zz~2Z-G-FLTe=-`^ucChq)I?J&aH789dfCV+UX~nYTrnuYi{QQ2^-_c=nJm#U$4nmx za0Qzb7EWUyn!v`sV{!mxL<<S|cA@}evX8I|83rloMtZ^dK9;{ZE!)+8SGv<GpgN^H z*bje3CoeA_=5ez88rQd7;qw-7e8*Z1{t;ZiMRDz7TbN)hb$eT6RkrIme5dn+Cbu2< z%7oV}w^?j8xALB++WNWy?s0+k;N}sJ9WSP4{nXSNgS;G0Q_WOUk$3^7Q6RfSZ6xWZ z*tddb|L5S>V0X1H64z3#s17}E!S|r&+UKCx0IKA2XjP(eJ@$|+@hgKLM2sOsDc;cG z(>*H&UKD*;OZJF|D}yoE_Rw2l>-OM$F@U*penn^<*fYD^*Q_*v5~sz*vxQ*}y9Cw~ zYukHkn>!mjByz<y?fhH3z_h|vX=bn!KlK;4x4UaUrrtemd^U0@^@X}ONBnt{QN}r> z;|6U@>shq4XwE-ruOL=5!MJ)I%o1JGr3F>}Rwxn_#nnRRY9@z6?zAY~fD35NB;92? zf&RTQcQStHk!gUIk}f9eMBK9pUm;;Y#7#)1(|$iWaDhGzpw!VL%z4(G`WH2LI-mWZ z<dcUc{_9=#Js&G*^J%@*_?w};6tma;7Xv)Lb^4W7ljo}rLd(&I^PKJuuU~`8ho<#K z(W5vVb{cDQ{V}-ETM&cO2Rk9D@Gg0z&#A#R+njW3$D2**HKuG|*wO$H;Xq(&SvL84 zyEf#`>m;*mm;3-HlRMGqFdu9B9VQ&6=8O?(CW6*<Qzyjn)Wge1M0x4X+K{SrE2}#W zxE0mB)H`?CVfB%dzc~7uVwFL{DHHqC+fX#ob`x2_I)<Z@y}K^02H0j*09}5&wsXJl z^JmSO`;OhaGu-Rb*>Hh16>{I-BrU<V_kn-k%vuIX^N#-6njvW3T_MC?#r;(mOX+Hk zw<%a2oMBM()Ff%b0k^-8uFtaBAa4`-1W!4TKf$e%5QF{_=z!svU_nH+YxRuQ>7U%s zm=Uvq{YZVKv3<eFitiRdH?=`y59*F7KWs(T;r<{$XbCG<3YTdN-yV=<@67i!&qs7< zy@@-4Mn;b&vS<+?rGl)}GWRR7NzC0<&pif0a-SkzbJWwXayNm{V~vNomI=ls*+vk8 z!g-tsM(`G(*<oDvM?_T#N7Z22b&E?N0e(i)DihG5MUykhRoaoEB0@P8LLBJj@<RrZ zEJU%2r;B@^gD92bqO(`Rwm(%1DJ%cp%_UT+98;bN_4S2^V|8WFS#>GEpV?DmH3G@K z7AV{R?vbA`2;ZD=!<VQ5ny?zxJxC!Cy%;Dwbf|Qt+-QUUQU8O>4$MOyuM3L@8Ak6N z?5<agep3JkNPuxSgzaiNjaPrhqm`R*1a|Q4j)!_r*Rus__3Uf3)i2fuA0F>XU*(Q9 zeGI}5tn1d(+bgRotKEF%k>-@u6G}ww&Y&%`OHO+AgU%o4*GkRJbJ9It_w)64CHWLW z`9q?WQEPaO?BOxt;jC#%`GQB_x_BbDV20}iJS77~dm;4*O+@TcVk%8UG_AeB{{Bt7 zwa+DGE7G;RwJdCC!^w8}tf25wWi4I_6cXKxw&vJnV#*Pg<Y!IXGDouUrBRV{{Wvi_ zG~`!e@<vibk`+|}6b=|4$fV2`I^P=0vx9=m5r_atUFoYj+ExU9B%#-}0fd^=R<Z^{ zU9l44D{ICer)m3L3E)=Q=@Xsx>>;I=W3oCdK=jWW2nIGM1bNm-;e4b7cD84`d*j#- z>V<Y1Z|cIod2ZYcyO@cZ#k4x|MC0mwi&@V2^jqc6e*JnJAur>#C-J#n-qd|KI_UgW zf72*>;U<4Vq3?bb$eXXPvoXRrUuVu1I7N2>{H+t#r92O#K_{n8Mwi+K%@S1IuvNQ5 zyK}fs1|?q_B+dXMk3(9fY*MN*^rz4_KOJ)AR@8<Tq4i+pSU^Z8h%_CF+XAr#ATcM5 zkUR}AxS!1JLf6a<sOLsl>5HZ<_)P@{8#Mt(fNUgO{Wuete&A*jM_p45AvZZ6r{oz> z*;0+sSH+u+$6I^(Tq{#L*&K|Is#iI^)V=l0(#`RL*ji979o|=il@mFKfV$-Z@cpy& z&R$VZn9Qrzy;ii?4YXL5_AC~#a8w!~>QOGSI11maNX~*?_5f1LP=ynpBa=BqZ~L8t z@f4J?K~_jw2g=CyX?k=Pj|WYZlfzQWlqW>Zx?86<ENmFTCvXqP%`gEo4W5nKP{Itg z^4idZNR<;IgcYeie?f200O@38tAeA|B5%PNdm)%%PNJa5w>aX91X&zr+q%X1*t~lE z<7)?hbsvE~eB8g$`!pVWy}7?i(bjueT&jEiVL&|9>id=B@JObrRp7(1(;=%mCWCs5 zKAslJMu80i8%0YdV$#tvN}*tal^w#!DU3>M)dhuQq8t%Kn-<kALk&V3#b4{|Vk1&A z24rny;Dr>K#JapGg!IGnTq+iFM(vIrz;<&)9G@#!<v~?3-}_b2Ysef|doxV=NK1LN zq=VHr&R>SmJrG9&VIu8{%Qpj$y6y0tEm^aSNXMzr4o-O*k$2Z>@HTbUWEr4Z!dN3< zb!r*~SMU(yiKxj(AU0ig&2=+E*!|eXk{lImYB7}&Un9>$QNxYi6nOAGIFpxq9@p&> zFKRW<gMOKDg95e+in*9CkZu6)d9~o~0^DK@6ds{-zD)_seQ`B1hb&$a+fAjwQ8hv` zJ|Crkjw7GZ1hI!M`0{Gx;HJg_SuIzKy87D0rCm@th*qQ9Bso&F9t!KUm-|LWz6nMy zTn6yon+!Ffqce(RkN`cALCzd;Vuo%vbwA{tix<NDzQvVM7p6C~!*ED1LYPVO?s?3O zy<Mewztg84O{e469uwz5z6e+B=y)4aY{bL*?Fbs)7EFwqdztks{QGEq8zjEQ{mzR0 zxWOBoK@)z>cWB<??d6<)RvqAGzXHq+k*df7xs)i5GrA1YiEtKa3=ZJESd+#9C_Sk4 ziYha7`$kI%2uBKTq8e5p;eq}+TkAC~X${a?u8nAuVa8!j@JqL4Rkv|fmIB-z!g#+S zY?wry$qp_4jxN_*g`70iRbHM&D0<;;1m8pi6liuD60l+}ubS?APAu_CnjBtgHeTnI zon`&*l@U@D-lvg)_u<C7Qc!bSocyQYoN-vWylH>D!Mwa<)lM_%mBUnJwYIG~zB5kf zN=U038u$XCZVCC?@*$>W=Wb~Qq=^7&93zv!*qXiyU-cgN5Et$<flgfK+}qHEJfghm zR~=mOmUxZR9y`)}BBeNLBItsj`4TGSu+6*baCuQ(Kee!%Het}!0po&uQVqp6lY)@} zWVCEkQEp>9wR`NcA>9_T++Z${I}Noh0ai5n6*AG*P=c+k;gSOx_b;S={ldSN*iCRm z%h_TwoeYf$f9RTdbsv0rkV|i>@LpT+xJ43&k7z)r_cR=e6lXUE+s<N^%5G(RzkRT} zaCd?G_H(gvU4NtA*ovM`G?dSBXRgrw=Ved~NIXR((hbBSfiG3rPrJ5U_%s9hJW38t zN|mxiGt+7ZA<kkQC6dA@jXOJX2bC73%$0K)dn9-rC-b-j$GRB=n@sl&I2q=87Gq_6 z2kI3xp=~<y5qFXNUF^KTa<R&^j-6|b<v|I3`_)XjP_P_y)Gc;fBddo5lNH<GC@a3L zH&!2z`>akQewCF6po5nc%LxFlz#eF#;)OB5GzGuV&|#5^9?HxqRQo6$=wXi*lxWFh zW6-5Bd+T1MZGX&#I_8SOh5P51ex*Kwa778&tgNh!j8y2YM?<2NtA27Y{8PV}j8#k$ zxuxwLQ^6isItDzz7(5CxN#FyRYdFjL8gV!E7K%~GKAk*2ZK4W9H_ZZ@)yhZ9N}X~D z%nC72o%qbLO+p_E9h54le3BPS+^^qhoya?7L`AOU;ps^g5q_GC64ndLjM+Ok<Lf{n ztVab%A7o;NOwsXQ?lN{Ou2r>11&k}YgK)Q0Fie?@9IOj<B5ulh;TVFaMyvvw@pAW8 zTykfQK3_u?4H%GbyZTdNEzj6b8r1+n1k^|NPn^xEb^F?TX>P|A#e_FK%UKV-`4`(Q zxtgN^J$FQ>fT}xB_nEVU=)<3!(rTVQ3*<%;6(+-9ffzu+(XMeQ-8^J<Z4w2X5)+fm z&TyIoTP!_%W4QDTd<5O$-<vh`vi4!z;t`TzJ_;S&1HJZtDsx+`up=@jn?h1`PPrly z^Eay(6%)OWQIAKCK+;A64$D+6uR#}7G3X`GQ7<4OqRx-V*XmKMad+sb_7UaT8uFG) zM5-P}A8a1DrHp;rKh0giqjz&(f4j!L7J4<z4Ex0TXr>l=a!mS)*y}H~F!#Jd-*jhe zcOq(3%tun)<H+kv_yU+@#{1R|eNt<?YTQisj#)uX;muu(Vlr6DnQ3mS7BC^RbovtJ zBO;3B>9f1M)Lk)4*A(kdRAOU0*n?xnzd2bwM9pwAc+G__#~izN5Kjsc;lf74M$<;? zbHH&2`f%1Peo}J^M$M_9?G}w?WIAy$auB;FDxqZrWM`t}Gn`J4lA)aNDO$Y%oc0+? zGJ#&rdBRa%-LpsBLgMB!g9>VBv12t&_F&5Jz5`|kmH~9yq3*#gIcJer{3dr5fm6zu zlccw7MR6-BXi;=-X9_cuqySi@tDTEi*sH$0TcBDK6iAE51-~S0rGB`+T4tAEz-Z*} zQgf_BB_XRe5@T_fk%txz)#^uJAaD<i=LZrH8}O&wG^s3Ox}`2*9Z7zODCQ~i3ji39 zYZCNSDWrxlu=m0BW_eKqXV^KlyH4xfzku_%yzby*Z=DG0SJP2zHnuhV5JYN5Eg0I# zV{Z4~uIvS7@Unl%&avID2Vd&uXbHs&xwk68*D5dOrQR|1O<aOhL7}n;dct#O-ziK= zq_jdgt}qTN(_)Q?(y()L&T9Z*%R_0(J!zA#*Pv!8N~6U3WkH!<g=+b^*s*ffiXgL) z!DA#$#h)9>5Wq)?6>U@yI92WC%eB4Tl-u5$56~{+b$l)`cfiM3{$_a6&?nj`I_OLg zZ{%|&bQQA0-dn#~xT!za53aYl^c;cS%;-@KD%fc<)1njuDt#!4czJZ{<~{|Ne%OR8 z3igb0b?v)1e%G5{G9ob)ugOP_tO@oCUZ(EHxTz+h1;Hs_6Y?D)3k5NnA>En~*H|AI zSW4O~o3au8ac!mA)XtRG`mn=m&C~!{ft{z|cA+FbvJVoONlE#JSEsKY+Ht-L7T4#8 ze$63Zt~#^9WY{D28!uof*;a>~rj`D@t?);*H~vs&<Xex|oa3BTc8QD<c^X+-z447v z{pLPTtDYw7ZJ1^4(_u#{j>T9|(RJAA;;<23)K@3|pWhZ<;>Ip687^E0wV4-K<ZaIG zRt9S$HrtKihu#I>G*n+G-xj=!yv@XAG%60^ZUeuWQ!y{56{Q{SrCQdCURY*Z>b=~K zsX3QhmOq`O%GT0e$Y#rKF^^&1N1jAJEdigc>$nM%boGa$at@u_5UagK%@FPvGpQ}Y zQZbIgt&-Cx=EOdXfd62=!BoW~IT|;uaa7r4#Bl*&$%R<21)mqc0`$b0cfA7l?PYZS zMD{q~=xb3GWu*61Gc0gJ7co{R2Q??ARORB&CLETy@PTmb`vk9$WOz70U+i%Y4FV4B z1|W+OwjzJ!vacHEoBAr?qB{c(?j~~h=;-Q5>kvf87{3pySaI4C;%nRGfoi9k!a;Z~ zYiPo=wT_wL5G;i_z+!0{4TnyAX{X8q!}fUejb6dTF#wOezmIr<LlGXfx{sJOVu*Zx zu2Cd3m6wq?%YkTriyLY+656gM66S<scn$>2s?{!*Z44;{r<b|uZv1FmddDBG$l&We z^?Ezs$V?FTwud4o@2n_(#(}dEoVqK{vC+M~s~Iv3>$(a}j7uP6<@<O^iPFGWy#)>1 zYtDnX3*>7Us@o6((-hMt(;B1I2?P#OE(UJ3{>1>Z!v_<hJV+8jC*IQ1_dU3IH^3d~ z@n_8;1JY73j(>&q08ABf16!tAi*<C3Yuu!<v|p!eTvRioau|qmD->Zd_pO-e;o|NH z!1Wp^@RSfmC+GnqfNL(g-B*qRG!Dbt!os4cIz;;n3&|T3uJFSox(01Dy1-CJK8=o9 zR)XgQ!NH&c?dz~xa#v7xfUnX$!E6qY2<BM#i~)hOeI9+}K+P&W>Y^247>;z`H4KLj zoj6c1?L;(0>!7XZGS8@-RC~Qmt4{+@Gf(5C9qvIff7JsvClVok{^Xa~&&m7CutPHv zTCZW7{EVVVSc(|%lNhcPR&ii3J-x#`t>?}{RuZ3f(cfS!X!D$Qsbj{tPI=}o=D9<( zVb3L;A;M;425gdnbnoyrv1E}-Sju-c>8ug_R6h(=hgM6HrS&tBgb5oeY0IZ%_uldh zLnb0b*X8#};`Hz+I)oy)a|rzu8jkaQgFyQ%OGQZwRZFGur5}%1bAzt`g0?`R1nEmq zNT6dMX`VPjeW1`ThzdtcEK$w;XjGqGo-RTVTZb$@kv3;bK>6O6){qv*4brL*<oXt& z3lJ5Xz8_H=dR1A*-Ic3LQcVQ~zG#fz5Tk4N(3^3QDLX<Q^`RHgo$I^?Kjph7O~jg} zfZP!0uJ<{3Hy_PN?`WIRi-Ut=paOW${dXeNT}a{AZ=LPnQ)b?p1HdI|_Nl<Gd><)x zN+J34jy|cU1k1V|#d?e6*nG4(AwhN3{SI}NQWUP_B03bp?jGJr{%Rg$Bmuaa;SI`c zWe&hxCiqn0L(J_DsMvVuf-{kpi>0tDk7l{R?RIA(od<lmKu}D|vvp`9=?<b)4pf2m zhBv}cm3%Vy0hH_1k&rI=3SgMt2&<QYfz}`sKgd$4owU$3P%2No61CFCpTbl}Y6*)p zNvhvs(({|jRo-3+p{AEhLW*XL6PGO!P1}Ru#^xJMJcx&e8I}wkJupNDy8NniHgHF~ zc*7V_@RUl-R_rdX9VCoCTtmh=czAg_A6$ip{7UTeC-_#1jeKwPb9^js&E>mZA9m2q z+4*6KG6mK&o_CRNv>u#d3!q+pk;N9^I-f8*E9Iibx%5T(KFTmja1sVQ&c3=arFBb> zG))tzdIl+&=*wr5%CA2_*g{=iU#`=|+nyWp2)b|+Er?O{Hbn2^;tV6pP*e%KfD5_I ziJ9m&QR9GmLlzadu`0!4^5?%)P@|MD`hELSZ)~(?!G<i-e5#5=x0f!QKHV4>`X<Vt zjg9#5E!Lh5|Je@}8XFykt=4ze3$ep+#D^hU(yRN30cx0Z<|#6dP^7npYd_8cFf}nU zs$Ss!IgH;6`SlhzECG*VbP1O@VMsOxELd|S3HyhuGxXe7^NGR-?Dv63*|oyUFKnus z%+-X=Ca;lCk0tD<wg<hGLQFpK*X(yMjWb?*WjiK;fs?&c2E{Y4{O=)gzVi9Ku?P>z zap454b|97huYx1^hxh{UHB9(XWDed9Kx5Q|_`pvNh8TM%c!`kv7Xdu*H##HY@g_eE zOeMw*FTgCy%{f2RS@75Jd=p_U8NUH73T*>m;6vV%DkSf6BK<g9pOJCv4k6!nF(R6d z)1R-@L3u!N4brEZK=P*TBIc!szR`N3Hf{f+sLz@vlQ22Jv+x>i>TBA0;9*tUq}Ohf zVTpdGAH=&f-VdSz*O3O08V}GW6P!h~SA?x^XlNYRnZgeQC0^1|vQzc?`n@*7stjGW zO0T2kmmlH&7xc-_rx$@hccj@vpBLLmko|6|H$r(4;8_JXB`B=m-~yFIv)45rmNCIr z3izmy#vcV?B><a2WW>K-0DtB9TUv0X`$;KKQU%;kiC9>}uSuKb1AM=Y1$gN}EqIXx zH5zttCB#)CCJA@xyXPSRYJUl%09C^ME+%mv%S8Y=qXhB58c%6=-|tNR9Zmw}i|EV} z-12zfTe2XzvLYhM4q6%jvz$R<e`EC$L@DT;oCHv0CIGXH?~~8QJlqKr3FHMf{6kcT zb5uy{FtF%;o-5DBX^0i^cQgrZ4-YrDrAPeD{>_d8;JI%SDN**KcBae=ZzgDf7h?hB z2ZFYEcZa`ieh9+*mxIsAi$gHD;!R|MJ04;3v^twZe+T&fD}&jBKdo@&_yoJX-u3L9 zIeh4`tJc~D?hqf`^Py)EK5WrN2uh$0MiOIu>PuZ7J@)f&n~M`G!^T^&h~Ni8`0pqk z^6I6<goT2(C008>qj#VdLaCk51*x&r5me?hgiIPx%ysm_P3zal^3<9qnBG9y>%OSP zunvxyHfXqN0WY~&h1_@C*Qq8}u0*Z6lIje>b)XAPKzhQ}_d6f&ZhA813<|?{e-@s| z*_pmhdBnqWFUhFG_Y;KQw0d>_iPI_KQ9?lsu}zb0uQp3d7NJSRqp(>DM$qG2a<n*J z)*`$CO?VsVCf7moD>Q#gjOW=qGQH7E-chQ4OK0KCw#v%J+Qw4tbuuMGvZS%Iwcv?I zMReMO^0KiT+|7a5Cy{&1aaM;#zX$bd19t=W)oR0vNBwwfmrSjBT^7pNbpXk#f;)L+ zNuxYNqA`wxS}BIa*N0g0E67y%3^7Fb43V!bA_Wo{hju2(cquoe!xuTFQw#f8nve^E zkvvX=SYXaDloo9}@C&{*M^u)+pzW``v~BBG?e32!mibm~{C&Kewh?xD9XxhuE*anP z2hH<H_LsHQ$$NX$wbWk-JzAab%`a-H-Z|6dv&C&*iRcc!{q%w%bA|^ir`vQwu9Fq= zV5h(6p8IQ4X{p2eRWvx?Yq=c3G^FDLx}xnG%>mF220`xxJL0u4<D;M6;2Y_%tHIlS zl<(300=UDa8wv{nYvt3=I`67+ukhdYAA-}NksnL|NYC-roNAhKeS2bv^yqSKIF0$* z;`X7MbIP0CJwJUP!p3G9G4G5r!m+`9LWq&M$+h5&zy*&J>Tq6VvoGGz=BkRr+NSTX zrZEKfiv#uNx_#}N^Ra#2*Q=J3%E7A2v!R%cDssaS-*bT&=tcqb_EyZps;{`aYgeZk z5jx<q9@@h#ScY+Tl}n9P;Cb>oaE|Q?z;tM04c>=k0Y#5g#zh{hGL;wUi^i-0!yYqA z#M@D0;3zeSZs1gYVVf-#&L`s+`hvBAsXhAbb{FsQowGgepT*sbFKV%(1rJ`=k1_7< z0<;hPn_YJ;C}4vqcKQ@wr0E_FoPPAi&+9Ee9E7aiRaV(buhl%w2rQ2lQeJddSQuM< z>;7T4LAvB>Vu`J`?bMv)dAOy#H5Bop@6rj--|sX}u&i8@dOuTZH)uF-{hDm!I2nj} zv%teApHA0bUI<I<)v|I^xN@UiTPSlzKPZVusrHjxR1)!Xp(NoVnLraSsYD`)WZ)q} zVmA*Q<40vCkPdM25X*rBQ2_egV2wiu!6GLB%FF|3x%g?VhN+OJ=VfTuw-W#rB?kIC zIjS#y+Gh=A6$FF`Dyav<*ZMjDP21OA&||ZE*4eXe+7Lr<M!QMmS(;1DTM<o6JtRLJ z$&XvqUb>dtl3j=^ezY)2(D~dIBNSpIpbY4uBrv@wN@y7KpzPE-e}QwB>4~}io$RT@ zL!6<U+KP%hhWkCoR%K1@`@_WEnv^I*3%+-l;Jihe0K<(>LrGZR?+!t*z!xDysy*in zRq3V-avQ!>k@l#2V8%H7{!!}>y{{XYUbjcs#z@8a@R5x8Byyc!VJJv9GU}`I`!G<u z@zAlMsiXDb3lsziHd8GJp)l&5f*8Tr^H7ImsO=JJAFrE(6CdDOT^vVPjYB_{V&i8J zZltymZlGX#odq_1<f*?--X0#<O!(c;%BhvJeW@%zIy#`cI(T+`e=uA`{b28p&V~2= zxfMro!ry5(_pxrZ#nIujRIII5=fn8q)p|?t1dN?R8w$ZP^~>$o*QbVuOIgVGRY$a% z4tKpB<?W8l9yI-E^X-bHk0KA!)9i!s)eyBqw3G|&(&=%_titKBd)1@F^XiJ6&J*$J zHM?8Q`qPg|lY8sB)icecj8EE~Y)N-CyD^N{3fV)gP|uPF`e=#4k(z3gJkQ>Am09@7 zJD<vc_uayFl_N#Xwb)R~)s0w99Df(V8?;o?6foga3<wkE42#KaLUXzoJ8Np_qj!29 zuHhBJ{tV$=*eo4jhMnqA4FTZwgU+H~{|6n!G4>l^3_UqPO)$L&_FgVv?Wb)C*4mYN z7Y>WnXa(uesSnCUH~!olsdS2vGn$T#yriVqiTAO-f67L8AZ*Z>QZlV>AJoIP=Li0U z{|4{BkmLWZUjH3`XXN1gyDJvU|3CizseS)X+^QLwKi#VT#NR)q_y0h1Xemg`NJ{<{ zfB)UF`adwn;`p>D{*&qWX9oUv0slWp`dR)!4?ZobKUwYnW&539|KG&;jbH!m`rnxG zKe%z$PuH-2j`{oX&;H*S_}})~KEKVsaPLrm+5i1oc6OHk7UQ2|{`USip8dD`eZE5d z?e)`=`tu&2eSaOtZ|D7XKKo}?fj^G@ah#cv^Y=KP*ZmpexAFZE|4#<~&-w%ZPu^qy z*QxQ3^alac?{oMsApGx-`hNuBq5cP->c19F_{4_(I|$G6Par%aJ0s&~Wr%-gRNblV z>WQk_#eXEz>F9Au%idi0AOzGJ%u<Gu1Ohu(PC*bzRHNuOBLYeZM5R$;BovBdVy_9~ zZ>maDqM@d4r|DYX_A=w_$NjB4Ed#{P>+&4`vflNgl-cpn)%!@jtDE_OC=x*_PfR3l zB`|H@Om@4$2>Grf2!Tz;BK6ecSpMx$=uJAX<RrmdHm&vJsyPdSF#vJzLAKRu_Xlua zi#&qTN1(zG@&94(oxeos)jsduZQHhO+qP|6yKURHZQHiF+ctN1@9ydQoO7QC^Ulok z2fROIrIJ)~T}f4Ct;)(bA0@Kr%-8H=lD2MV5@Qghd?Bt(4#=y6IoTXer|SSIzVz#m z=bz@jKUaB>Mv}Qxe?kd*bLH`v9k+orzQRO|!k?=F+w5*1n95f4_>K(D)dagPRC!)! zPg1Uz=5?cQT$oL#Ge`Q&jT0)D0KM%5@kSN>7H1Mn6Ztv;b4v{p3^P?9x`Pxb7qpa_ z1<}mj(NCfzHEvOlpT4&5*cw~p@>@NTgCqtMGC&S5DJm&0jGC&t%-ZVmdc=E0M*iGg zr5}xt;Rqp=CUTrhAB&Vnt5RF#C`Y!tdJEwQcHW<==~W=HzeAuvsINlC5rfOODZ{8Y zhg%{thu#R#9bKG9@B*_GGOGtO@tc|}!+{aC+pDQzGH3Kat;vm)&5J(8zvT-sC1lFd z2@TYV%)<$Mb8o7;ut<uEn1ghNMj0%-kT+Z1sS&bw1@1t(5_;eJWOMMtE{fKnbIumy z#)LJZ4P-Lw0Ci&_0+JLuGDm-;6{`J(?^0t$*^rNzXUpXQc_rK{;@c1RtEHw~2S(?{ z)jJqG%OyYFp1R-Wm8v3LcfPs%Vv`nu9YKKNcB$SR$OCl5F64kcOpYYS|70J=ohUpW zOdP8_t3JZvmk_ebphDoa0F^(Gm^|1U*BjYq%o;fkODgAj^6rzKcsOW3b#c11AdEe8 ze?9E6Gjbz4)XV7LE#e3IPS`FP#i6RA)Nvxn_gT#3E?~`Z9=ScKwk9i#TI$8GLaXyN zN;-IiRE~w4^Vm(urfz`Uk+k`fX#w@x>|s*&9JoMYL_4A}zp`J!F+ySw+{Sr7JU>`I zVf6^06%d#tNSww&8*;Po?UWYFqL&VqZp!gaeV`3o8PV48U&`5awMlxmRTiF9R^{|T z+z&&ODa3^4v(2%mTu~>4dIL1D{R%NutAVnGYbQrn*Md3c<@!MCP}cmNZsFNO_A~E{ zxncOi^+FC%(o^stsDTyWdFm0-FeFA2(~IK0#4#zzk_BOkM3tuPvr1+Z)yk-tg|IA` zbY$T8t<KUPRoPwVJx^8Na7xn7Mxjk?jWE+yrZg=9okBP!X-!t^t1sOyU$ld;rnU#G zgO`8e7(chNmHmiL^nb&4X3OxSx^-`YP&FdM4OI<vmKmb*AJHIRP?4i4u8=?#AEY|v zja#WUhtL$+_IGvlmcA0g^Do*0F=8p$cII$o$~17n<=4O$2?rW3pqLkDN0x>q{_U+B zyLsfiW}_OpZp?h@VwtQce~Uf&56Ty#Jj==|BIc%GC?E^VnIacGEa!;MDnrXKn2rjY z?&3ZcmELjZ-2}Qe$8R&W5k$2SkJ=$lJqk)0xd#<cFPh#;@YjvI)4gVS-!ek^6RWeN z3!%d|B1at<mtH*ETa}Fx)mP11bv+X)mBFRuWFsp5HX;W7(6}5@j2Lu9Nubue&w~`{ zW&r{nh4p=KW#yJNAe4I!N_7rf!^yh6=42Gv-d@<ZJS$<EZ-A;zhA^3CVFr3yTM19d zNNlp@!F8EU{>XB3T>XOj+Y+i(Ucqq3X~30cW2tKUb4MZ#u{w+G!a1dWe@2F|Q`Hh? zE2?Io=$pQJLsv};2;B{-V1a{&NWhkazht@9rL$fnp*(I{hsT?`J}sug=6M(gCC~xo zx#nEmhN|ZjE!)Ck%vwsytHHC_f}Lcq1f|qROL6#pEF9_EE>}%b?ZqqNgaDBThF>um z2q-`{O%KGq5Fo#Ge3A^MU?mxHzsYU+ApNujzuXrpAfwRtYW6Qb2byRi&#^Skc>M3R z{Rq|IZGve7>z_E<e6GzkaNM9Zs~}#LzWjAT{xiK>YfLqrp)lt@{E8}?r@9*c4?Itx z=?n`8*8b%SZ>-9$tw1dk7o2%C^`Zss!%GQF=IYhG!)jnSSL7@<@?470l{{g09UZP) z&0RPuZL#<u1=rX>rHjN0nW{f#Lq$C>or_-cQ{s;+1N<wAHSVgPt8wWanrkQ6P=dl? zCrGl=nmTBa*x4IUUxRG@$wtnzx|bzfT)g$Oq%E*Dam`$CqUgIlgU;^3>G`o~CnT|e z7olM)adbBq)c35zRhIfG{a$hLR5u;5QEO^oYbLIXktawNt+$y^0H)U|<i};A7!gPO z2+mb&Wt#v$Qw}QCxYVRLQ_Ne99v@=V-Nd*OzyM-e4n_>_%SZfnDIT`)Zh<sh5ak~D zi#&-daE`uiuxZ<J27~!+{HOK`zQ|zFzPPZjAM7034pTX_A(>Ofv9I5A5A1uf*tMT~ zo!DJ@LhS%8jTl2GzjE2PzIONQhkJ+jCiIT&QRcKM9NOOaM)xwX*tviCp4|3baBzc{ z;gCDI-+m#5hJJ8i<G$HBydLWv-_qy(HVq#f{Bpv^b-)QNjsu%xG-M=T&4Ukp8~%XK zgy;)8W~3mdcP14<2n{%5WN3JZNdAO)SyxmF)uqHiq{FFruXhfk)s2{tJSbM2n3-69 ze=-o~I&-;F`|88)-0IE7bv%+M9MU9<Avj2bBM3T2Q%ZY;{5}oUg*;~e%8y0H-BTJ# zDEd)9d)xYV=vNIwiz}^J`(qhFcFZhJHl8zVb(ZoS3?<d%%s`OQ6KCZx`dHtdJbO|t z6*YYVgPgG{-0XUty|+r!SV0vX$AlMo&%;>hzS=(8E+46nr_Y&A>=lL!Q_P105bVUG zG60Wi-e?%4A*F6?lxbclZWW6pF+mYP0&>^3@n>i+HNzPJ|M1D`ya6*Z>Z!{feDX!V zDje$tdrE~5Wuat&;e||o)oEHn>M%o_{<hd!?4Mn}5E{N6E{pNv_5D#awg|m3=1ZjA z;pkKBg}lc!5D?}8VcoeO7}`1X2zRbmK@u%X`5jV1k_SAq$zfwN4XDVdhbO@U{vX<n z;YZIZ9ZhLd+*~btQRgd<$g<<vo<=^I9Q`j^cWB1&>(Cb$kr3pG1F)d$79}{Tc=x^9 zMAMgD*H}p$z#%Me)1O**u?76Bc=0zjA<_l0g869(rSgD4Lwn&1e~|tlIv{H8cNdwM zp&VxbO`FYRagl{ClCL~LTp70D(+KM|8m*9ms@)xX(Hu;iEQJuB<7&k(C|#|vIIr=Z zM!6L<%rK%CehsMOI#vj=pMp~aNRJiNi||`QBU~Qlml)-ns-zRGiQ_jy)qpQ7#uX2M zl|Fl2i#?55&R?+azXS`tbqNOU%ID1z`4bngY0$JQTi>lINDcFa?JroEz_9y1)qsHI zY{Y3i05%>C2;9-!HSUJ_{<$ZoZC{G2q}6+ch;0j!8iE_1<<t2z4XDMT?}tl&!teGE z<hC7cXF!p0qRiMF!q3+vkf}HrbY}>gDBk^H_3*wopzY1Sdw0Gv*Kx2ETN&-rNxLia zhddSpAmf={E|0j0inYnFra+8=Y0J$M^=HZ+=|Jgi;T6+m<W-~&-3CTdP;--%(Tfu; zS2V)GvSnhFU{Czr&y(MY%Aa};A}O8;%m!C~b_$9P&-nw|Q#oDuoO9&LEK7Ifvn=XR zjmTk!hdsvRyQf^yY}7!WzLzPH1R#;Xrizz22?BmH8TaXB!@4!L^vQ=HF`|p_tJ4Cw z_{Q{G@2k_8<^us+{JP)8?d7u@Dj^Z85PCGI@WPGxczqhEKvRpR%Q($rRx1>%Gg#pX zyce5MZ!oyZI}*d%LGkrE{w=bc2#RJLo6#e8EX6t&8@L|PpddEI#m~a%t>`TH)^*zD z?5YF~x32`ERUskshiNmZHUys(p6TqtrB|`CG5hy8bZTyOiw&gO>E>NunkxB3Tc~?s z&wDxjc=b$s3{~7@0GM<;@|WmpdEzV4ov5jVfN2U(5VG_bjx=O_SOk8?exn~wjUQV8 zwtyD!Vj7UEiQw@am%kZ7=~Pt^{kfz`%r+eN;1l!sY_s3x5i$Cx<8R!oOeY+y2Vz!D zVpd0ed1-P#8Z@8k2xT1Imybv;7=c16<8<f6ESkg(N+D^hOIz#D$M^H5iuGQ^uTLd( zExB5X%x;r@BN%i(5d^(?BBo<na=j*_l{2+iYOMH+XI$%#Y;w{^Mz}+o#C2E}mJXtn zB7DHwk)SR^eT|s9v^{so6b1Z;q$#(gh{AGLYi|TQ#fMp_tF~wO!s?YV?2kP~i;phi zJ$$t{!*r`#8ruo+;?z@F%YM}h6!n|g(RL4dXpj4>wAkI4T9R1npI_cy!Br8nXc*ij z!J2MQGh{=hNFFLXPZ(=aqK86aM&&Fa$-0VF6^t>AvJ4B}?#UTKlmeCWQ%va5pwTO4 z^p0cZC{@4t)fK1mKa)idSRdyXoO^DPw5m@cuwU{B^%R@=>i7AaUHc5(QicthXZ6lC z&h~+)`5Dizzvq?yeKBj@Dtv;8Ktles)zLP7GGx}Vy3`o_6$lJFxzmuQjSfYN_TuIm zww<~egYgE%JmPkargh;DiSfNQ1BuHN94$Du6}s6i-bC#vzGXHt@3p4HHG7duYt%2P zP%~SayRK&U#YPqe5Rq>D83>F&`;~SPc3ECh$BXzsVX*pJVxmy!CAlaRpA}Fl!7A)X zTIUbO4P@1;Y?X4Am4yA5GkC?NsFa1LiJW6Y=oGnY=hZHZbNmps<4T|>!7IV5`t+gC z>(fElsK$%>)#XoTOPQ$XmSQB`%(5!l+AJTIBc%_J{tUMqh_%YTLfZ{zetP^80oE<( zx*(ccy%G<vA_wcnsO*7>Rg79$!64h0h``ve82K$Q46yC}9MYHdt9x&FJ<>^aBhyP} zFByZXv2|^E*$WnLDNa9zQLk3c=$<Evg-=E%0$`$wy^11YCvhB&Bmc9YoSdna^M#Y7 zr(`gDcsoQ!b@w9)I(1e*e<{UT3AkL51%VpfU7eH7xG7b*j$;s0;QV_P8V#vGDYjkK z(`33EZ#9taOjojTJi)51CI-Owu)l#cX@$_{!7oxGq(|`nGGYV_FRjz!e&XbrruQp7 zdUF^}$+qWpZ*x~QC}jC)heDT|=+ozpkh0|?#(+vSDu<a&ccIB}ng|Ow?Fo84p0mnf zlr5LL!_e5&gV}dK#%Q0f_w!XvtbqjA6QK49q)Rjoz>60ROEu8yPFcFTh;>mbs9UhJ zKu4blP!xO8aDWOq4kFCxR#SSOHxVBgX3oqo4d0wgl@ddeV2(O48CWUIoxKNVbr_jh zL_L_FVQvG7sciYzOFG$IVt`KZ&Y2z;D0i>-DamG>`xdR|R|O;%Wg{Rous(TuKm%^` z0-D)G1`GHtgZI|DpxY9Q$yx^&`Dq^KxuU1Cyqe~O0AQm69QCNA_0;f>2{{n@=IfX2 z$zH-^C#(apT#ra@K~brZzzq9#K3g;^<Hrt+S3w~eepC5XFhh~T<-(`Mapx!JKZK0C zvoP|jr{S3|mKL=L?&!xnss)chKRGu!WHNE=i^!+?!-jEXEJodM@7bh#a8F_5L8xh= z)ITzu=-05J?cf>+W<*O3NBT<xPW3e`^+DBJF8Ad7YI|!We|xW71O*!vra4nrcX;al z2HHn@?c2*=hlA54obgPa0ux5-<lr=Roe%0b+DE&CS*qjT8;m_{8`^(8J^ORBH9v~g zJAmH0!|N-fW6IvFf4SrG!V9O!&~bsyl-cfA^nwItL?Ua-v@;~HTc-~9i_|nZ9%EdI zYRjWL{pz4Z*?S5S?#}&EnU!MR<N}9Rq-Fd1>FQA~cMHa&vlNaA3@Uz(K?kt-Nr2XB z4t)VQo1j=EK5&Ut2bT4;nR5n&22_Cq9Vr9Rx&ph1g@1%=4ck67ZX7a&J~do>h2@(v z&;nQmM@-N?gWo<fpiM8x7yy)X12>18Rm!)=xBeFXI=I{Oe!0_=gB0^Ly)UM78Voht z8&yd(1#-7F@6tKsHk4RE{%LBrU5J*;-g4#k`cwP&HJ<VA3-;JJa2yg#Lp}wAU7}B- z|KY7<YzlsC@=0SfY$#`0{U(mtADBuH_?jUg34s0kA)&PbxC|lcQF~@Fhae+$fzd&0 znm7vR3GAu#zRz)1Vv!6r`Hb+OROccw;78J!zJ*?{vyyfoDeMioPf}i&jHsdXPPP?W zB(<T;m-NAxrD_a~=qL7r3Dl9-d5F8I*3lu=pRj(DXR>L5h~;y0>WnA(79>agVdU<y zd50>(#e`wmqLRq4FvDQ+I@K1V6GETSJV*(_Zwo)Y4ONZ+WK*t$q4REJ+tO#cI{YQO z-w2vtCm{AsVw%H`-i2kDxHX>z{A=XO(&y+49QhHdva<`chiplw05PeMoBYdG7a_gj zIi>{T0_7`k;P?*pxGp^U6Q)?X)m=IApg)V`^|GiiR3(fa+n1&W#`p1Z-k(pS*5yn* zW%pBcbM7yM&`M;q>g-LwQp;aRF!9w3!G6F%PsCs#<cEctx=-{QAO`V=9&Ox-%bzf; zmNN}eDvChvITl;k@`=#u;=t~m?`9fs*AZ2z<Sb^^cR+H3LldaVqgt2p4n!ezk|!>d zV+LE5!`+rHflbJ%0Mg8zOEv_|4UU@%76=OK5F?jYj*~C&xBU`s<|hXV4p%hEQ`e1Q zEWWkUlBRK|Ai+{IKveUDnR`e;SeJomJWPWl=eFxr)`r&G{26{O97nRQ-gg}+O3huS z+I-+`1B@g9k*q3MMrtZPQo!C_R2VLiw60O+Gh@wN>eDh`+vfw$cMo~*0FYNaTm@%6 zV%r797l<=zUjJFdp7Ar7!<9rhw{t5uBkuG1gG%A8@G)Tap@x@W68kn?;ztj8-vH~7 zbZ{FuJYdT{ea%R7&<`IZ3q@57AxVIQTWUJ_06z^}RgjtPu{qe}L%o(^V2}nX6|N7M zM%yhpfZZQ4_ykGx^ZSOFU1Ey~i3`H^JZ(TcL-Oc7n_tpB9MA7<&nOFEVYo6k9b36R z*ArI_`lOEsWb0^Yb8YIM%*)J!g3E+JIz|;NGAuNV^Vm1@7tVky<|Ne!?gN<0O#{Xg zB{-<nfXE}(zd?e-FZ`b%yyyMr1PP2t3ZvFF<}LveY`XCL^qAJy?*6RP4P_}Q9|YWc z4!65sMQl&i&&u)bqU`eHogJW*bHs`_!Cq(%Y6E-Ph#ENavKnEsY<I<J+HM_k9~pyn zoM#p4AWc6^L1~9xmEw3I*f}AVZpTc+mJaUmuf@b~8~J5y8@RFCm=2H<RlTGk7%VA* zUBSJ-mzCS&O_6i!?-q`)LwXXw-OXNdQBGbZ0SZ+{?RE;(H>ZJJKLA8JB+@oBHzVK2 zEG^htU&7RLnuuc^21ryKVjnKLJKBc@Yx3Wo=-0wF;N#5*oNB7)_$dm|R`vS_Nw7I* z3Z@i7K@JA501EZMj<4YTPM;<9;n`<v9QG!A=6i<BiA=@Q!5h<~jOn0iaN57yv_t%Q zKIx6v1r*1B8~{24(aRZRVLt+@>inp9z3u>$qjNdElDgSsM@a;g-c!7TW)JcWt0mdb zWsN%mX`l|BH(~%P#r3mF<J6v%HX_YHrSCc-pBZpH9I8?O*>?jK%`(M0#iRflyL!l| z<P$qA911-y0QXJV%lc-6(vsakWpoQbC0A!3I}87or>6$Uoqc250Vk$)@c{rs&r{?8 zjfZdD*}w`~5=y7+$IEhpp*s|37k~usM#m3ahW=zg3O!8xQQ>jL<n$(sic>n4!^5<j z&hgoz04+uVgOLi1QoT=#N{1*Vt5BnHkxTv@2DqNiL)v34XR9xJFs@agLfTvZ*n)>U z?mD6dBsb#fE&c`Wm=@j;hmM7`90HUt?@zj`hl7cyS(KhzmDaz>yn<i8D7}0sRRAoL zTXab5U!c>1QZhBAtcHgq@*<&Pj36+0k(k&OUq#(bV~*1YMihf>C(j1}!gRIH!S%7& znG7jE%wlig_o^Vd>JKe}&9vYC)M!%5MVyfTLYNaIPR=t1ibFsNQ(W)}-L9_);MpF% zqkPD`saU@qvUsLk1Wu*oARo?@HzPvufUa%7urDdY@0BepN>JYI;2~E;3uXr~M%u{9 zN2d|71&FvxvSrsiW@W%SpMZ^Kc)52^s-5E^c9GXFm+Rs3stLaT8TO91Y`W`HV{&nt z;I_NN?p6IdlsnvOyD1t^@Vb?d|Go3km3nL%qNMa_*Ws1r?uCyzY5RTcwbRi{BVB?- ziIcXw)pc6WqWEz4Lw`~i?E7R%-6?+!xP9<xs=mlPsvt<lN0gNrDL?HEvZVc6DITt* zgfV~WbdpB6A6pj%pD+ru*j7CsZla4>bAjGExnrKarp5xr^?u0^Od_0;00;A8rFHVC zkrD{X9M<gS4O%u_A6Rzw+dN@)O}Y^eGAp>b*=tUPg07dNPEr>;vAa2cM*L9rH65<* ze-!aKU7l~@EV^N%KOi?hpHtk~T@2&Z*L~yZ4b!aP32a&C3I|$;S??jc{ItrxagGC2 z<|UYLgeb}|JVA75s&_I27LE~|!`Z48mdQ$O<r6B#Ak6_<#a>y~1+0!*$)4l1=Nz`V zijxmneN<OnBIv{Qk~yg2z2YW85CN%z0?Cr)c?`wG$b%K=!2Ssd`|i<AK$CS;hTI~0 zh++GRg@}77l${<pb?dNRA#J5?Yd^|6#fSLA!N(}u&9<0{3mYj4k+!}+Y57}0t{u!p zOk(yadFDw9_uxJ$?*RW2ic?WalSM=vUqnM9(Uvxuu4_%Bx!d4S`d5m!0}^!DUqB>q zW%9S-ck~0mqQ6{5kBW>?hi$w<WooGRet-f1m*v8!_m%av-wyu($@61pJ?O_f=wI`a z)%FnO#=qtxVhv#HPTrv-xh*5Bc}z5_M9|3{?;V;eKva(4jYTOoHV^<~A48d`o|jRQ zUUeAm5-85g;?Qmws8(v1Qj`#FI<$wN7?*&+bjBX+9FR~|<I;sOJbrdGa0^Ljdq^(U zwkV$*5>a|4=6d5OtnuHlkMFkY)jOERe-*nWbq*|N$GbU=7MkGXD&8iov}R`FWa2#9 zCbJ@Bj6_DxNbKVRb?{$_wZ<*Q)9@nYdnU^l+!upwU51PJfgyRt@?$&GdAtGpWf+s% z&LqQb?c_4liNvWY6@Zc(@IzgZow&V`*a9nWE{w$~WJjZQoR7uq`1LZrtvdHx)Km~E z`7wZdjQ9O!rkTN1aK9xr$$LkLD7-%JM~~|6biHNITL8K8Si06@Hd8r4_C(4x7Vk1R z+E0wFwk$7MhugW5C@eXmXyJw}OZz$;jhIEhb`^EP=;+M}90M;1WwBAE)JJY!y^V>G z+-brYz&CVY$kMrQNfuK~ae2i+>wv}(@&vicd>TFR(4Kw#)`f@F;}O1Ke6a?FqEc5^ z0>=U#o62GdiiVUUN0=A42K+@`KaiK|%H;=i%<p(p0*)RKa-3i`n)7)QdxE;1pwty9 z?ejPvdd-Dc!P}n|_VshjzFw(|ID&@6pP({D+sotzjV^ng`#8@!R<fg~V5c~g5X`Y) zyMAihwQIN~bl0p9^#09;G=j0-7Od122=F9JwvyJmPdV)r$CkjlxpV6(jq5PC@h~Hr z5@$g}`$H<;3b)}wAXx*PAX))Ua;VDHTwv-v)kw8i1ScR3g)ul9MuMU83(ZhfLpP2D zq2BS{2VPvHh8PyuUYb^H#>d8HWm@CyqpvFY!i>k<?%B)tv3qju);|(=<9<E}>}6`J z{-IIjfsZ@fska|?3rrfG@|;u5WuSwSARrfSl442OO4P!2(KA~*jt!w&E7ZL;lL>Oj zMuib$1X@Sg&fY!QAawh{=6;r<*XOOSk55l4bh;QGeuR}|Y8nKiArT%i%M0ACmUe2w z-sWZ@3cyLcj7u*Abz+w3@UzY5Bq6&KwR$U3Eq3bM^VeC!$qfgw?&md>*U50KuJ6OL z+b(|a&m4gv@Almhh-UUb_pR>8YjGJhPEA18VNfK6_^QLGw#RsGm(Pk(A?6r~8b)&& zG7KtHLt;4YarY$W$Uf1WM;~cl!VW9%4!;VX4EcXb`cCj)|HANXaIGB9)}C_A8oqIL z2-qxucFff2)Cm;;JIxcpa%ON97(Qb$sOFkt9~j5bUbBS@=DWSAxDkDXnE%43y-^&$ zrE2U^lO+z?U2mLL3I^0S#Td{JG3^G>g5fNjDJs-Zp&w7j7UVsIHHg(RI1&`MSP81r zM6!R)kMxgf_17+UiOXNY)gpqDleC=p<L=6np+grQ7ex9I-ufc}Ktih)0zE4dDGzAB zvZYZ31ay1(CQXc$&si$v4ByS^IV_khnjMCG?HTM+MHCcQy(_f;M~BBq_23QW>mw7{ z1yqF;?lk)Gf`79aL;sPXx$ZJhsOrHi;$0_H(#%9xXqCA*b+S`qK<0p^QoYDDCNP(~ zK(r=aGbVeW>cKMnv`XT(Y43S%LsMqp(=ZNWVGS6z$^E-TVACRuyvhR93boCj!T8+F zFT=blnOtj^^<*NiYMW{-HpZi$Zt(Zi{jEK9*XP2P?;Ski8wZzEDe`Cns$JwbCV7MP z1Ry0pJ^(~$^h(vsH!ijgcCdDmJ%-AG;cO%#EIEUS?J1bCTsA=hwdPFiF)*ScC+dug z#4yBCB#~5Kpn`Rtr>ZWH4;9Vou@|$y1bnAUlSp*Gh*x-M;2qqP-VXC<k#B<p$@l%F zYnNW*RSOltwj~I#T}At4%b<ONYBWM|^$0CweQJ2?s}W|dK$9C<#Zy#KgFUK2+t8&^ zW5=g=RXC-*{`BcFhc55+!1W+ODS&9Dbm8mvZ%wxCuj_RyqHNW@4ozF^V{~-s<@qiy zXSHI^*m%~EYIl6L%!)z3g8@HUW)WO-?|7W7pvAJtsf#Wgx#)sQCtF8=L`N0-uiUE6 zOoX1*IpDIzo<v(3G7}ZKHD+m$lobNv^Ex4G06a7sJ;(Wa{f_p`hwmOkEk>7uB;*46 zy&Q6g4)oRqEV=@g(md=a#&cU5e<sGgF4L%M33TwaZtr`?<RUi`nrMazY1Nt&oh$oG zJ`XO{`3p23|9GEvj-Ocy0&xnYJ=!tp@gdNAmHaF$)%7tV`{Q_>yjvSYLdzAM*Lfnj zajJgEGeI=%UG83Y{d#SPGvNJpO_smE>XO_PvN2AlkA*?3Y_;4esgItzPXt6HC4Q8j zoI=rBp4>bHq&e3Dpym!+2uM-+Hma!_qAkl1BnCx2R8t^`TyaC?T=nafXf414Llx3p zK`;dVCOJjI97G7vnS8HfcVR)*ou^UQO?x!;{=SDTEs%pA1)>;)z7&obx6jU(NOlCQ z7eR{+58$>7FBkD9JeI6xd4K!<Lsei!?O2Byv~ulFOpY*uNol$_8}&|eE-gT#YICzh z@7!rT9ILomA-pCo)t<ZTpk;NJiqUe&Z~C-3%y*j!3P}}wa`vqso&4rjb2lv=%K-Tr zrBvb=!Qk9Vcr49ETjyZ`L?aBh3>X6Pu%7G-G0Uq^^FOzXXQX)eFad_3T?>_Bo;-Fo z#Su-eQ^>90_5u6PQz|crt5{Oh*g74|H670u6M1nn9Gyqv&SLRgh0SDkwnfcs+d10p zG#r<0h+20WXm!PDXV6=0)jkv)olR%*$adq2V~HODFQahaN7K}w4W5yu%vXj&WMD3} zV;IWA#!G;RgVi$??d@T~6h%;A(PrvIuEwsN|48%9=%JzYcX8I1hc&sG^w?7F0p~2~ zo9~&tgWY6$_^59fxUDRxJL@ZSG{W3~zKuq?TP`go)I`x`m$f9F%;uo_f`tVq)1JKr zJp;)q5@5}Jic`Bm^-8?79dUTT^-8s_F3jW}^A*sUe;O-3?P;_|E49N&a+$SeN~mAh zQx2GR4z9<b_@$itQWQw}6)_tZ3rQ9Na;N45(VGQh7KUW#D+a@_2#ja}U@Ks<2Gx2R zg`6kC{NA=A?xq2v69sT11DH>pYy|E7*ea2k%^J?B+~?y(t2G?NTW?qIv|3CXt!cUD zlgmc>0=%WC?eSBp1q6?kNGlWQN`@U+Vj+i^jxN4Ba?_FsdTD>eT(LQBsv5acB()G< zI~GR;ID0FU(DcOrjTT>HefY$41EzyYW{6F|8I(oOFan6tVkH>X(6H@J?(dd}w8_xW z4$2hMd9(;nPfG=)@9o#s!lnLnKR<zgXejC=aQXbMiwHNn-C-}RDzQ26vTN0K@M}I? z3oUj-1V@_<Uy<??;`sHkgzJF^Xg}BOf<Xh0vrLR_RM3F3h_<N0LUZsUOf+AuY@mxc z#KYeWy*}u7+_t{;XfnQ!R4o#pM0aSC%MceaS&F+{u!nH%Dqi6A4I3X<V12ub?DoV{ z9;Fy$3&J@{1oLdTUHs!;3(V&v9`LU_(pd<eZQ}L8>&EU|O~YLz!O_HEgCO5*1s6w` z*Oo(tz437+K`0fdWhfRH|MqdGKqsM2ZqJ1%WPSqD>=}B(&~*!!^{KOxs4!KIXgCn4 z0OGVF)KlfcE0)yFvH9z$>hcL`+5C9~Sd%v<9No-e)RyPSpf>xV`0Be5mp}6CgE`6D z8<D$xLBt<!^{|%VIz|)#(wF!W^ZCY#h+1NS4xA9_fiZD<2Tw#IVGl->F#A%ZAeOd3 zj~p_re9sn%A7i;i3J(VTu|fjDev1}-hJ|rIA8C7=oDOexI1;eCv$k&|i6mw{bw6Hg zamHPv-dDSc;Irh>eMiBlo~5@I*t~8AD{V90&*FP$PvxE&@_l0Ea#S%eIKPB{H_7$9 zUL%LX9T*fgibi*IGh)QqDNgvkw|;nRpO$+&RfLXIC|-)XnQ9&x!S%k6!`{oqkyx#| z3HL{VUeNE0xP;hi2W~oOP>X9tR3ld+Ji^kiBAXVDRWVpm(20~)qIPcoShF3cGwhrK zZo60-o}HW#5_G-~k#mE_o=E+*os;@}1DD>ojGNf%>wb1BhG!xgiDw(8hKiA#R;6_N zB1b^RHul+5e_?eFHD^X8&rZ5hy6wCnj4Zli%(fwe;L7`iz^*l94m?x=0&1Na-MEfV zJPBVwSv=@ph**E~gub(*UWzuFoQ!(md3g=PrQ|QjnqT|-4WHK0nCJB(LVuGNaV)c8 zZ(z*%%tfusU3h1Wr!W^kgUT%DGShMLfJiky7AL`nvj5gc%8am&s6NWk3lWJKM21AE zoWdHZJ7GQg8NLT|IZo08zcD~}IIBR8>BDcV8{sS(EKL)G6%Mn$uG>l#3mB2N1BcH& zt-ur$oD_P=2p3fzlsnv@n1&!ko;n!71H7wAefrRZI={#y)Sk&ouFLc5e$xGBCb8>o z=}LgZ53M8rB-B2f(B=CNtR6oclRQFH0cetu4-30bnkld*n(D^tkF{~gV(|bPK4cK} zQ)Bc!DdH!zr~SFaUCqHhH|N7Ss^xhyQy)Q;KIW8cS$Q2@OH;;9so&A4$`*hLQgenL zZI=LN*=pPB@718y<caYuXP*Rmne{%JL#&|SiK8vcVtgH`it^v)KZK$q)Cu!UogrxR z@kZblFQ2qrtGZ0cobA?a4k$W=@FQSJ1~!#ln};5DU^;hhdaYNNrLYwl`mka^V9S*y zOC(YucQ2lAR)(S$K5<4DqMQwGaCb7&L^lSND)cw*s~2|gwmORjN5pSsH+mDPWTc#N zx_J<nGI=~%^LWU{T_n4d0lD}*-lwo`yf;XDd>*&n#fI7Zi*1&ERf?6JQ<;_ZsOf=( zxK8e=$p<fhw`!t<Pjk=a`h1*}?wfYqV%>=;aL1x8q#GjXjt$ig@eN-LcS_LTp@|uz zf{oQD2Vw7oZFg^UEwpTkO1QU;$s3iiNbemhJ+wEM(3mqd4tK3LtapH2)t_3<;X5@y zxiIPUca6l6nxbt4yD@oINjnDBr&pu)U6>{-UL3O^&egFO<3;&NYfQJ8E)SyiVc`)o z0|;A>{oI6sikH_?FTnOJfr)E)+<^nSFamAF(4c05QLT*bS6779LK<VPQ>An#uYYvZ zZ+TROIB7uM3$s=}%3Vf5ejW_&(}=jwh2&xH@$x))mS0LtFPI!(V)(<nztviYwwSIo zBsSdJg)eJs^xJH?YIlS$MH&u7!H0i910GKa>hN_9qW-i^-Txr~M<+6I8=l<rDFE;I z{889u#}^{i`}5^PX~Nf3aW>^y&;cCMBjC6}_JeP9y8lC)3GDbnZ{N(&I&ijEbbhCG zhtuV#s%vKf`*;AmXnkp<0b=MUXQWoNj&;3GKcVQY_zhygnt_`LN>$D>XBX^#+r;{I zjvgVePxK@1gugxCkS@D`)lM^oeHB)mqF=`53hEbZ*|vQyjWyUl*k*J1FCcMmlh1DC z?!;fro_F^y-M+)ctMt!Y&Bc89JanyGka`*0T9m_xvTwmqf)P8t*Mj4s!{mU8L^C~# zR9<tQB_~QlS%O>=40XnMnqcLC=kIx4C=;e{y2yb`^E#$N6M@RG7IDgqt=}@GY5Ea! z@mYcHbWw7Ela|K!!_O;4Jguk@fo_;{qqEw0yz}?AP<i+otRLk+t||N*z!hP*S3ykl z#3yelnxJP`1lu7;v`C)>5^8J!FxC3o9ou#wuSCnJ473B41sR1LB_NK|z(x%pA>VNQ zoIrPC<l^!V8C8V5MMF<x)5E7kTz`1RJlGqxCwGPC&fuw0Bbx8&2q>$TLs!f$shlrn za7UNnl>O<f(Poo+Db6`fJ)gx8MdyqPxYJU?TQoXGei%K@rM{rv)w1gN;?xN;^>f}1 zc8_Dl!6ri3r@eoNr^!w35-_AuXAh-3pAds<=V&%v(1A%jmd&J>O;Py0$O0krHq1wd zHts37V5ieWAApNd+-pD-8HHPvy>E;hG{zD|NUr1KnI7%v5BIwa0A--tTOOoBSmnOg z6)eyH-q$I#AoIu<E*3!3PNX$op$v67fBw|fS!|!L@XDOul8Z7h366s0i4?19n)VUA z2q%u58KzbMil92+k)61swFPYfp{peM*of{Rc+4McB&q2IHF4VtDQV{g-`Lu7;XVB7 zqHrx{>6FUaPK9V#25`qeR}d*4jW0C79bpc$Ukgs;<~}A^$#b4sJUWh?W7OTQl5F%H zQWj`@z+Mjgr};<1OYHjD4%GS;aI1%0(53~s&xOCL^aang&%!&_3-S$B+%uW%Rb-3Q z%caZi(e^KkQ7x_Ri{ifS<#+Rfw384mz5AIc+k4rmlv%i)CE4dzkI20b8;0W6ys?{U zx7-)gkj<aaSI#f`c9PFGUE{XD8S$Voqt3c%UqJ2f*pD34Lf-i~BVP7w$U*F^U|=76 zpjFbY^MA}2$Xvbl2(iRp>T~HLQ<a5VN2~+|&iC)=H&J4TyxEktn`~We$!No-PKdAr z++`&)-GS<kTDhap-Ez-1ADn1Sc3moT;U)jjddAz=z^z+`nv=jA20NSq3jLWu#=S3d zrK8s(ac^Nc`o8I`2t!F%?-*?}3I<Ek0Xx*yq+ED}#7THWOz;N$04!Vq!nZV2aFi0l zsFj|d+tXID?>jNF6Ic2<i-+afYK_U<&|;F?S@XTw#ku{$L$2ggFZWyLlBrA!G$RDl zSR$A51&LXf1#DqGnGsUnk}`6~yW4-(*zt9OiOJ<Npj?j4R&JuIuP8G4$RB*8KX|Ce z&ZEJ(4PV9{Myj@>=xOh0ttet8R3-3<cA0;7<aBQ5zEZJcHcjyr+&@v=e=I(WsrLm{ zdQrT1P;9m=HCvII!@|p|>yyGfJ7GaId6Nyba8{YcI<|_4vCgRfdb)wS+tv}`+9mZi zyUhK#+Z;Er|NkXA`5Pa?z(`Ne^fv{9k@<hj4`E}Y|1bOyMz;SfI{8LA{)->-7Z33- zS-`&lA~XyfEci6c-#hic(I2e%-`3gwtL#KrLr_yjj!N9b+SSC_!pPu%0uf=M|2FqG zM1<v=zV{D{-`oC8zHx#7PzwAPh{(6ye?u49{t{3y{vQL=w<_kZ@b^88-}nEO{ww|~ zkL53s#$SSs{}%pL_K*C(>;9i&R#rC1@9=LM|M~jwI)C->?>5>0lRy8s_n*G}qYc(? zaLeCyIKBt@@9Y0t{BPgs>HpoAzxwxg{x>D$pSHh!`0M&F`+t}H75~-tUt{u@kKgtF z*~Z`ge#d|N{(tk~zm-A$xAgz0AOEe+|KG;Hp4-31<-d%_U(e*P@NXf|_j~gndYb<L z2mL=_nE(67`d<}5kpG-e{<8w;f9Jn^PZ0m+zcBtK9TTzrroVgxgz)KP42%@*Yz%Dw z&Vl?-=+tk-=zn<pH}Qx0U&Nn(roX|iOy88x|EB<Gn8Qk4d%4-_>LG(^mT8I}PMrOl z42}=<!=GPV+!PUAT5ycZAxwZEh?|)Ko?d2&+iQf7a^<b+W+;sn6Ku?gzfuBg_xRk# zQ_HG)!>6n~LKDlT8`5c&BQWjpxl`|J+iA_}ruk*VthQU%^XdmYKvf+Udl<8wrDZVf zYl??%;l1F)N=)A)T|h11uh<_!AXZwdHkcD0)wulF#M~XW2hYv(cCp?3cw?paw{IyW zEPCy-HDI@A50fiOq09Zd;J;2Fa@%~AMq9=KzX<l)gV1cYwzdv83bdu%qi&;QWHvR| zeG=1eVF!Rx*=#H&utyVv7T@U0U7S3@u+7qg_=H}YR%mFdS4W1+gT4aYHw3)F-+Uf% z!s+vTfeZ_IzH;FT2QX-M`~Xqr%_a0P3lscZVNo%00eTglg+OzGc>RdSsefwLim?*& zEsu(_l6_5mN<WH;N}c}%%^`w$?)UNV<QKqQ>2qyM-$A-Xa2_bM=fVwfZ9{xkc(?fA zunJBt@}_GVpvwnuKg7BPzU0TcO9iDHq)`9SN@gd7c6Ss?xd!-WOKR&TA=eN_SN44( z$Wk3ptX*zT=5QUlpR9=t%G{T+3!rq(Ykw2sOdi(B;71i<4lbE)ZYZCAjeN2fS^&I% zanrqP8_M$ll{=P|fm)sSmej!gp|^x)aF1~Sc9}nn@G>l)-@7?pZa#*NJK*k#*T)|s zy(q8jtx_$pqyg%%0hFf9#9PV_+?`P0Uyj<7g4P(T6Hp%=MPxtI+;QQC>0@^|)nTLU zSuP2~c>s8<=~h`B9S?7e>9D&&hFmzL3;)y-Yh(ws&V~C-jYzPArS5}zq4ydfR_d?T zd0*aS^&KsIpCo-0WAA&obM!eeyVrV$7nWjd+rZk++fMUa{=WS%SG<JsaKP*Bn6eE$ z!(5He&*~RvTX>TTUpHKz&@DQ4Fz{pIpHIx4V7`8|Tel`72H8<b?MN&KEZhMicWAv) zy?W4<AzYJE_Gm2$tD6J=+GDi4tB+byedBZSRq@#|U*fCrox{j+xr9MtnPlS}8AKZV zUi189JHO36UClduZV(3#uwQVutRKO|WQ&z~Sl#Rhvx9)tLyX!%kq#s^gRomMtNY)F z7QF2LK_BJaKi2TV-5pqb<?s<mAYfpEa-vU`(f?iLCu0h=#@Q0;(nz+3@C@B;SGQZo zwvM;<ep!2IH8b9Rmfd$`@6`5li_(^<HF|UA>JT=8TkmM+;O9ba;Ox=E?~C7~ucfc6 zzhi&bf<)R^+K1Z5-uB$~fEN(Xz@vp<408<!y?TZxqDom6XD`s6X`YoqNN1cuoRZv} z=4a<zWmNt`cxB;*y7}cZGYz{>_{_c&>^cLM>cH3kg>1POT9q(dAM>-VcTaT-@@z}) z4m!)fFv`r%Fw7SlFT`vtv184|8tbMH>UZw_;9CDz#K{`|rCILzMVRfUgQg8taG4By zqUMD@68G5FE#;<|rg@-VUtqyhhr~^*0Bk0Zl6_4L)-#5e^v>Q8y|$zeW0Yqw>@&FT zc($pky8qy%P8O|mpQeJpRb4F2FWq5*`!2IFT}ua^%D!cKXuMyAuRpY4T`oD-CTo#= z0)2<zY2jBA|NQvEWlPnX)V@6XLTSwj7(9e1)#rUdtQx?Jf5|l@NSkCG1lpRtqq9e^ zJzjOV2%ofx8od}p%kks2g|+ivw@>4Op56a(-HIMi`fB6Ou@fH!%hs2=@AC=2N|bbW zkWd@d6&<^WHU8sSja;s0fCf(^^yR_lezJ1hvo$V=+CJ`hR}R65FRd(D=F^>O-HLOV zHNC4VgmBfX4ZRmXwmqguIi$VLPsVR@jhX_#gQ8CsFl<T_6=F6^fx}yKc<G*^m}K|_ z4MmoA4DKj9m5YMRvw)QVUA}Q7c5>?Wy(`DLm1?@2{gSnEm^zbK0iTBOP^>v)grB>V zVdli#=@|L6ZABPI#>|Cz1ZYXciB^Vt&O8oerZ@`zXv4Q4s)(G(D5(sXGQR<z4}KZ& zPH9;a-=7L?w9H$oSrsn~4&#cmaQXy2EILj<&J?(y=V{rWofWj;E~EN_t1x=`u^TtC zU9!@W0qsyIc3n^gs(D|xx*4pSY2KXP&RG)<C^5P%!pwYmzD=)kMI+2<eZGX{P9)N3 z{(MTmt_PnlMrC^AoZY<fASO-#P-TSCp`MZ!V-lG4bkjc*i0AhxS0^H5l`XW!s<~B$ z>hQj{Qg=@XlkkP=Xtx^dTaVb;frUyvk6Em#W46L>h+mAW>ovp>o-Q_*Cv(~0o5vgI zw8=V4bT&(qb1JQf9sxr^W33%Yb2%za#S618X4vG0wT<^ladc5!)C@cII_~B4SlA=k zZeWyoo|A?&)W~4Ernk~l8!nqO?fTA1Ebzxk4D#U%`AibC=Vi=%$g-<zp^5dBImV$! zrR#VayTXZhrj#i>95Yy0VwxycdPQJL`P@2p^4Mv)SO$uokmVYDY9%IDvV})4FCVQj zm1f?96n?XnP;S=dBJ&23DV1Kp&Qi3|RG;}rrz<yd#L3Z_D}hQpVfbh|C~q;ouT?;@ zMYW_KB_bYm#C+46MKJM$zWnO>bU1hwDRGad19dQ%zKbcHFr?6v!%A5!_LnWOcRO#x zduM$8Su|$)jRBE`RBWc>$R~EhD2fgrO-#<0Q5y-1Zl`jkP8TC;m@8|{M2qV>hq|TL z`P!N})pbRLL%Ma-Fe8Ui61BrJdVS1Ll>=|)r|@9HDlsuqdcl04e@5E^I~mBubpNkj z4vP2>?OYS(T{$chCSDq6THZg{AK1@T+1LzICv_VpR9sxsCsE7tTW6P5^MEYb(b+QL zhu6!R{PUde>qeLLGb`o?ZkRH}V74<+tIo@dq$>x^4XU7xP*H=3(T6H?jT#WgPxM}x zWJ)FE_4m5)ufVdr-}`_Tu*Bd$NEPNrG3?cs$@Ctp=!(2lIJDdOIg~Wmv>^;Esdf?B zd&y6sIU6DHNyc_T+YWX`fnPxZKOA`6bkn(~Q}3841~UQ42qWnz`>dT>L>f&`<OY9| zqh?S_qk*o1{9gFrhSE#RG&;bJ%Pu|&VNadb&Wgu^Jj%4OZCpQ2FY}o194a;y=QuV@ zZge+-h5J5aZBks-fPad@3Ah2j#Jn1FF+l^tOq0TxLL;-Wt;%eTRT9(*{7n!l2@X@x zMUMrVLZ~UR!@yF&og5n%(-wWpC>OioMqW06sk%UrPP{MuxH_-*A+qiVCT4?(LS`IO zaZ^=Qm$l)~^`r#}jtyXi3}sbS#j0FhM6#03Zp|1eWD+hZH&4lk$qDwOeW7VLY+Wy- zMQCkaI&Vk!_ms`R(UQ;VuiwvUzHjWa45Lw5P^-M|SC=p{R4;70y=~9qJWqVu8K%v# zxo$r0h7MAJ_rLrjtu#|xV?K;d2KFPR?tnrGH-*sDru4kWXCJkcxW?Cn7-xcN!rB8i zf9a<FY#n<?9DgC4L@~1n{%uO8mE11SuDMgQWrZl*I_jOH9n65r6dn`)T#TBfE<Ens z>NCB!s2q8yMgbKDQU{LVWBUSJl;seEzJ|ipxEhnISs8g(8Nw+PAT*Jl1RlCT35f-o zyeFWe3<E=8&VN(Zv_P7I?72{kt)p5fxECN+(fjjteAk7L*Y&no*SgnzaS+WWau~P& zLYpqf{pLuh?X(;TPD883ImXNVN``IQd<zz9%(*g1FAz*OOuIrNE%HQzvdB&6rUL}; zxG1^UfxGz2i!(Ll`i{LECScj{h`M3>?9LotUX*~ksgMW1F!RW%rinS{L|BeCO$W!e zM#x}nfpcNeB$f=6ao3+5?Yy3{e=D^6zEY=0{#5b^iZc#VFj<{ld`u65VWa6`Kma_{ zxmW(tt&Nd;sYUE{CvSl=DV0Q^yeg^el+cpMlhVQBh~$U~10&WLJwci9o8lTSHp*A= zCb^MWxC){+ouzpF+|BrzjI`yg<JMZ8&hja+n{-U0vs)vxEn^0sChjJAZ$TZEhIgX- zi}$eS(48Zm8#i5#z1caxOUp;-Sh-!^N8Cr!M^<)%u$?P5i=2U<HdP{Fq)_X|!n7T{ z0AI4bE1f|jf%e1%<Sbr7@eR!Tv48smi~E2GRgA(nIJ^d<%WYU15oVmg`3UgnL0&7Y zNRZ$G9IreRo3&(oFN+>8J>5p%yP2b0_q_`Lsly4P$f7*|PNdvJ+o0k(CSk%h+xdeS z=;8@%FuyU^LkA2dpXy=+nXv*A2lh-8pCm{eBEdf8L&!#5moUFFXwdxB-n{57o9O0N zs8rVbVBe|dr&Mbzreo-&L<_3+BHZ!eN&Ew~qy>K%INDw6J=?>}?F&T0RAC^keQeq; zo391x_sWoyTVyGFkC?H_q+z_aMm?w0G^5%6&o_L!j6UlIf8$1bVtptjY6fKhYgL{l z^8ATC`C?rAs5Yg1JN&A`{HZZQNb&}2#!Qfy1DYr_kQg?GND(07O3|2m?H*zJP7wd^ ztpe5y#Pc)~1vx;8;hR;2GKAZZUFPE~68b4fX<<QAUQYRbf@*6mbFSghjb|U$1TIv> z{OaJT^89)RB%o?>R~L>Xp=6tOeH{is7IT4q_do75I7n^xv0QRqq+!#=&j)hGdv34_ zd-Cj58RJQ%lS?Mv9wPJdVKy-ZNyID21(w*uEBg}$XTKrV@n>nz19m$(WYAulF?zAi z(6uK0W@5MiM7YX{K9uCwKld8P3c?qV!!4`}%A=OkRO_Ist=3%^Sx;uOX47+AdrUpn zEl#*~5wS<RbagDz6(}|1x3EkqRT?R5gE?Es%^N9bRg&6(V}!G!^3$xIHmvnU<*gyZ zuheT<O5{=MgKf&t12(M>Y~&hoew8x0=B;840>cVxn~l$b>28*#o)T7JVdEB6L|4S& z(nRGcU_hz`=wUntyF}W6W5fq)@><4B*u?%h9A6Sp-lvRzWEy36WJbdg7JdCyvAfQ8 z*5F@abv?G`beQtkKA-8U*AO#%w%b&45pmWT;#Mc7iYA->XJJ=#A!@BYWG0$~n2!rs zUyG0ZtV`X}EyWZRTsooNLGS{Y6f*tnn!0AEo6%cj1=7XIqmWV+VlCg=MK9UGWW_)h zz4RUt2NP8cfDk0OI1cF%j9;-xwPUzdZp?z?q6vJtI3^-#0*f3pM3A<ayBT3*(I8%_ z1`m!&<EN#RYs>N4wjS=+>nKs~w)azID$dysx6S0ad56dQ(PZaF$uAuB&kg@sJucS! z4}U3Nhj$%53hS=(faAyxJWa2!i*lzsrDvi&0OLNH_^zKZ%`w$6JTab^FCDn42L~~+ zTvv=hgkU~kMH=_ns`GLh!3R!c7CmZb;#hqM_VrV|ZqhBJ>ZY~TgZI#rz)UFVf(a&u zJ``gSUYS$l5UyH!$4k6CB9`FU7~Ddmd9Ywl(~1$uu9$mzYC0{#V*A9LNRv-<H<Txj zesq{-gg#j;RwE<g3L`A7(Y@6lPs80BJZe81A1H5L>zyQRl{d#(o%8FAjLlYW<5lJK z0k7=X&g`gig<RPX+Xw9KC*3q-B*mujN_uY+$sxy9wd@|ZDYh+m$NnfY%P^+Cw@7MD z<E&758S<i1qRh0*@@Csk>rNlId5*ogDexZMU0ma*ZIuu@f+0Z$KuG(<THi;s!zOEd z@HWQ9U)*H^4`V%?hFsqc5BMm5K>bO5WqbeW8*p_xnPfz|EjFS7Bm&q&&_(7&YacAA zt{<>V<sJ<Eu!M97BT2#`Y#_9m(vi>PK{zj(yoW$=)P^e;GDb#8rBqVe8I;!v%n~JT zs3v>vT11AmUd~;ZW!TQ1;hda$mNxP`*Ym8RlkAe$(Q0E0>Ug(@;1pxm{V;npw)f}c zMn(7kD(<Vps$9Ch4WzLs5fBt<f!%?`p}P?f1Bs0w(wi0#2>}B^DG6y1MLHx5LZnL? z1u01h1w@H=w$F1sa_{pz@43GBk8fXc!Tz~s*34RKX4c&6UNcV#Wj-FJgzm}&;l|TT zXK24uAJ`o{ak7hEJ#4&kL8-pXtv>(D2J1K9k@E0C+vgpgIkS|*pKn)8Y&-c1Ji<J3 z?XPE}GR)J@<vBx?71&?#-n!8E)G9HocXo9hiKz7CW@-7}c1WEG`Rem>c~`vVt-8Uh zm|2-O1bf#CWbGt<qXK2$QHR1PbGEU{54VeAoOk_rUETAQ6Pt?n+w?va#70xiS!=gP zOWuMi@TxCZK*}>2O1izfSN&u|=7YC|hx2@8iqgODHAz_R&OF(EJYrruJv@b;&4ad6 zCY1~A#q{IueSR(770wO*KT6nX(M2+H_k<mvOMF0FsPxoe-ZvMok`iDwd@bv3-M6Qe z4K-~xd<H`pcZ+Ycmz_LZKfDV+upWQNc3izj!3JBLSNSn{K6mJw;MRIvY2TzZl_)sr zP84Pr7{Bt)KKDb}^kJomXg=SW`jz_=N`r4^*c!Ub@PzC1<N4e4+9E9+XLp^+7#rSj zEbIA-qnP$_UNvV92|m+q-LYTT<GSk=?8q&c{{Z@(p$36rrC6`(oV!lx?ij@deq#Mr zFgPjWlg0*St7wtOQRj+eUW#LBaud_ueYB?dWRKDPjny$S;h4q*>gioyW}~`A!-6br z``e51FWD(|y<uo^X}Y~Fe6meDmcnhbY5TO%o$b^5dMpUm$(lVz^lBrr!5!J{g;#q| z@edx*(($<4cv;_F;6&NIMqYhfaNcHBX{7vN{R?LL-gGRi3q7;zE_Ia0V((|Sl<13S zsdX#xiobvHVhY1K+-oQy68I_MQ8m9M>%znIJny3VHu@=UBV}H70xNSO%W!YBloVRS zb+&7Dm0-bgI?>!K#7wMZ&?c`8YnJh`C~#CZ#2fR3L%K(@{dxtxd&uq@%2TK$bK+MX zZ<oDNmtSSl8(F=|+sz70W;cT;N6H$br*y^|&xSw*<vO*H(0W$Zk%s!<&`p8V^iusQ zNxvv%kH_M8?Q|ABL7MYPlX^M2&xP#o8vnr?W0(`g<IQSeacNI(G%9KmrEvILZGO09 z$wx^{Qo!+{IIKwm?-=UH$jU@x){s)*Ld#%^lv?3lO4@^$sJy!EX%CmB7;f65O{*`i z*q~p-Ms>H7_h<VWJ~DWz|LL*VCr<qzAu}&7qLcaDQ#^5EulFl*%(ToLHnY=ypJAsj zcHoE{yT87|sVhhg<JeN&JNpiQ7oywqMu;`*Yi#P32b52~91d3M4*o7zoc1cl_^Fm; zc4Drb4*!!kPV0{+TGf`Ko){{1s}%K7m`7%n5Ah9-FMlpg|GZas&F8UK<~vEXnM6^S zXqESRX`fF#uF#&@ukyZMcg?-QRuw78SJ7|<5vR`AA-{f#6K%KIbF|a@t%#5(@3Q## zosw!|j1#=ESe<@SW~i>aU?9CPALCQWcEylt)I_BFu+_y%gs4>^JkfxzxXJRzsGeLg zjo|Z>7msD%QM)A?y2AazSgXp>Rd0{!*;9T#RkK0ro25-Yo!Q$#=MXF69yKB>D<l0! zR|C5&#f7hAZ|}3>2)ukj_1dTNI<m#LO4ja2nR`cw%bg$!f;%Gza#CL`Emd5Jelse~ zZo(zj!``s8QoM9JiqfF&`_e(FLph)8`B$=~dfFz&lBW(=J!;B*mYd7leS~PSdXrc# z=cz!<7?w54^v|0vrQ{3~6mLo5DOBt5*ptLYe8}&x{{oBD<F1Km<_?cx|5N^=O+P-I zB}(o!E-F{*dhw&@x(xE(cZ14KZk;nHXtF73o`+t!m-UuD;^Be{t)dEFShzaIV06*z zC|bds?wXQ$mm%%uIjyESZGm!`(ny8;V~owuGHtSTydLz#27R#IiVY*|f0bJn_WE#k zSX^<x<1Gw(wQ1x0)I~w#u@f#T`ox)Onc>#v-R1q9u&kVSw(OijF=t?ETqkC`Z_a&h zn@g2@k<jHIyVv~^Q90R;KgW^f5htf%``#B*K{NR;S35q1c9ieEGUlOXGxo7g*jzG# z>5kITC$o=prz)gw@v5?^WIH2V3}zBIu!^`I{-)G`lXp_-zBnd_PSnbJXC)XV2ew9C zQaZiPw{bu;gl**S0+D}SMvunh(SOAuf4}DauW$tER68eY2co?bx0sBxIl+#|jfbIk z#v#G;{=Yl?4o^A=?6){18V(M>Ln6@Ja2O83jmP75h9T88^wrKN{Oj~P5f!58Rcmt% z{hepuiI74IdPIV)0SH8hsS`|p#w<V}41_J*4XrJRR-kS?>bJ-w;PzK!62wMG5sV#? zNi^vqd(u&W;3zy&R1bXrKm5@CAIkp8_y21hKckC41|U-Ps~x|~<!5B_U;qDSdw)eH zp?&`|GWin+k+ENqFZ};g<m*4!$IkE<2tJeSSeIMur#U!tBYr9vd{?w~awf`InL2Tk z;^V)<UvN17pB{jR{=M0MhQH9HqZC2)zrtTA<Ucw8?yT1}{gp`RPcJ4T>zhyB_c(b! zwm}^+&bfRvh`vE1hQ{yS!Nad8?myg>?6*uEddMRGZOtEl)RnxIE18OV##Ed@LFca@ zolfjN@nh-ic~ymT`b?Qd*elGJE!(OX50+o;mEIuQwJvOc6Ej-Z6P?=@ww!kF=HA`2 zn{IR?U2^ka^|`E1yeVT}s0;>P+B7roc3|IC;b^+;7QHp+*CD#=B3%;4*5`<Bx&`;8 zmX%#0{2_`zJiS-~!nj)ZC?Dw9jP!mNUK#EiJ;0?m*j-AT$nF;ty0ROg`&iPZVR>Eh zY}A~IPDR+jJ~c0c72)V7$1+yKvTRLy3X9gh#(kqKew0yC;k(W$yd-@jBHHopK-UlZ zh4}+Tp4q#ejw!#?dv}w&pXcn!>%=fl%dnLV6Y0r}syPM`P0@85+mGe&eH+`-yy3L( zl8#rUnWUWOlR;R?P<du&9!y`Zm#KerUDk)+=(%{_p4!&Kn-@nrO8N*}1nv40`LJ`E zL6$1SE!vD~G3A>)4hO8)g*fDyamR0`FPd1@Nw_nnEP8$8F}fryp-R{#I(j*+i3)dj zQoq{s8+`88ik$Sjc9%mQuvZk{xJ&p}W#{O!{Ey>8Xw_wG<^-5(xG#8J661q?F{`<B zW7>gLq1Ds<Q0p5-Gp>jYRd-ewqvkVpTYtP=em52<;&AY8(%yuao273)r28;V_t8+9 zYvk`PUqG}71z8%&J*o1?G{3$!<Zqw<v9G*uuf;jaJ1s%$M3F!*Q`Wh_%<;2d%IH7* zvAo%?Y<@ty`C@>Uz%Hl!{f2P+Jgx#R=QBfzeJY*in4DgQANP3m&*RuxFG))UYK`SC zA>Y(|&AYhD@a&P-ZDJfl+()=OOXAJmkpm7i)r-9H_jnSr9Vwr^_RaXQ_4Wb$LbI_Y zyK~g@W^j1yDASOE52Np?H+3I^y;a07%O2Bz%@jj3->%jxYeeZ*y=KZIbcvt7CRb(f zi^H+*tU5D}Fg*vB523>~-HQi5*I!=bxcF*qac`$+U_M+kBZe($r3ZN~CNt*HqyVF( z%bcpKh*!M#t<Bp=hYLfF<*i==H&l<yS?M?5-V*SXGB`4LY*5ZeCADeab8YJGw}(R2 zOnE$<=uAskS8Es-osam1_c4F1R&fvf>PaP9^JzSvzMR7-lreAL<%l3P2SFb$x9}jU zC787beb{R@34g<>&>OKj@WAFA^#g?upGoH3^3Zt0@8^;?)+bvMZhLDp&^wgVQu<@T z^XEgfb`03s`O(!SwvCfpEIy*cZ3`m@zGQwIUW#7Vc`Zaw`y((ju$M_y;kjA#iO{oY zw>S%@8+`7jd^Rn4G$0yto4w(Q^9XO~2iOMXvsbJR9LJP|<<B<6;q&XcnFiHATx0QR z*kb%Pa(wb;bCD5)*Yz`|)^pAgXJoe`#Nc9X;B2PBiDsuh+t{AlL~!DrIx;O|%4J|Y zt(M}Bak@UWL_LCf-|oxarIImSoa0XM3=Dy~gZ*-xlD#fOs`+<ke8=LGs?H@^zI4n` zMzU_TsKcf7Dlv@3u3X<}>E?E4O1cj$6y0?A-0Ye?yx`{A=WqJ_!^z&#rWN_@(;P&9 za1X?nLH|D60~E7V--GtN_c`qLVEJ?w9o>k)b?T!k4hQ+xp3Wl=-M6H;!ICL`5OsQU z`A8h+)Y*eqWBZRSW}fEY4BRIZFENZ4qi!v~<h0c{-|6l8a9}{whLz=1#MhfSG<5NF zB|-8ry&~P7;)DKIwUq8nNA<K%P~Fk=ZFgU*$jFT|wDGg;WuT0m`=0nA+4F{_ja+5F zNU>*$&XfEWO|w`H&Uu-kcZUpRdnD}?a*^CCM6nX8Gp*v6hucT(syU1Yluo7aj?AsP zQ6v|8*PePfp=kPAr%Uq8{yh^`rnaUzwNE%Qyhb-R8J4yT!)(Sq_=ovE^m#I{J}_|7 z<VE|huDhx{yH8BuX0BqqWVJ={Q1RvdlftawQuqDo0}sb>ju&RGQuG$zdEC+UiA&Ee z-&dYcqKqN-y0mY6K0YsR95iW@u$rvpCoxO!OElMNGYY$xv&oc}J-%&oTz<@Ob}s_f z>BM*W(fA9$S4M-@wC8(eY-hf*265*m!6~w%R!wWE#AS~RGVOV}B48^zXFHty@tWWW z%epoG3KNsIQlkHrqAQXOkE2i2NLg?_EWh;jqWwbZz0m@K=H4nf&N>RCbEjYLDIDOG z$>-TLE-p<qKhYo<G44<hpS(XJ&t1LR?Cc(z;P$l98;S)&nd#n^UJeP;k^<jPQxikm z$3*5|kBw*E-T&~6lZM`?4QuQh;xR4p=y9ENlxX@Li+jO$lat+>rm2r{9aZIuNGx(c z9k5u+IclUtZLyo;5_NC!Te;czyF@m#n_V(jT$E#?bat6ABprOP?HwTe3|u2ulymRU z>NPcLt#5qPUM~+-Guq#EV4jxHk?mUxlXLH6pf&`QHqVrdPTJa71lYXf4VZmFQ+*#$ zZpHC%a3^dYu3p*31!N9gcc>Xwa-UhD*5a|vY^28efI7GNsIlHW0de+s7wJ2npOr0} z;JUfkJbUo&Bh%IPC5yueoJO8&PgyuCFQu<BYMK&cTAU4{2&aT(Och|lXQ(*Uu4Qa& zOzN2E)%HH!Z0=3F^DXHO>yirUMVk7j_U9q14$?H<E)S)b2rj`kRm;xSoVRT=>tr_w z#+h=bmQtdh(@{DcbVE>$P=Aqdrj%A<K&VG2rZY0h<qp0=JUuERVKe#dvV$NF=Jw6p z_`}dOnIEwsQ8`QKWX*+@{VWr?-CLH|Xk1O%IGHsAd0xJ#HQCZ-EnPUltk^0rXo$Hn z)_3rz{9b9zWCON4EXRJ}8;X4Dt9oNurdD23p*23XRgbCZa)_^&2iJ8No*cd1F|5F^ z;Hz|a8db-|J$Gb1z*TeOBA36NPrT=i5gjzO_sp4@mLW^i0~P**yc`z3jhqK%kLbR< zE|xWHF|5z7eVm$J;Ea&ayR4B|{bKQRS}ppCZ9QEvvaw1^+KM^J(TXzZmLBPR;SYMU z@O}qOXM#fs!JGzhjGhaX?;l9FKjlQD6)G=&@A7CVV9PMef`5N_<(iaicFa(1lLK1! zl~a9}|4_r-*C)D+sMI*;8uruZ(HP{kWpYPc4-_R@O$C0C5gXrc5$eeOmBJM<GxgRs z+>J%?k$GbBnwxa%MuD-cvZ95UX<^V6A((8Ok!SQVX4TT_g!X49PfcoU&2N?+-xPIa zEmhlQihDcGrte{WTSm~mhy`iM-puDrJ%5uU$Jrtmr64f$uv%M>u_vgWsS~d%8Oc0> zZ&fva2-mQ07m7JQQ~L=%HWxqo!^C^FN2xrHk6z*^XUJ8v9tZvx*;2^G`)*r@TTTsD zo}v^^y=)R1q#`Y-h|#@%Y_jdX5?<ht3r9^-1S7Te{(_EXy0%L^fhKi}GjKvBqA1`h zLo$!Z%$u5y?b^=w8#GV8a$Wz>$F?OG>cWuL#a3iM%;Zg*Zh36kq)FG-YkB0XhHZh6 z|B^*o+BrmIB2yvyOXXN+%wy9A{RFJ~XboDbfTjHW_fV%dv2{muR|ukkBY73hL}RV= z8awr_<6c|Mu{=txiIUavxNY~Z7GKB5>|bu;zPGP^TUTDetA8KOjp5iFza^IPXlQfb zzSfJNwXwT;T2dZD-z70Joy9_{ELgt^hKMN%i9^SjF6+aYBcC|ZIoVs9wfeq{UVDKQ z49{fr2<3g$iaD>Tqp6el?sd9NL1z~&-vE=x6%CokmS(DqC}xePZ=ba0e><fy9oU#b z>m0dYXhlhd8fi0auGHy&MJK*6?KT`~n>Ow^@T7}9JxMM^?Sb^<#7T8Und3)vyBbx6 zB|kToH5D@5sFrb25=_V=K7ON)I?ACil@Z(eG(OZxE1f^kL-@VkiB^~1miWGP>L2fJ zKUKNl?DYlF-H~^tkyagF<RV;SX(8_$mu30np}67J)G$}<W|xm*E;8$Ik)qq|y8+$< z7Y+E&N!)KzEwni{HzY-9v`?X#=;FJr*6gg_%t9yZv-<Xyb3OawihE-0;nPi#jCT#h zUyCvBt?1gH2KQ&fb+<oT=*rd2?YT+cmRWg8`{5D4!WWIt5h9^QCtYr=D&G)3bN_LU zdWj4nqvJ}xu<#wrIPdn3_G@wy@`@9|)U%Zr#R3DHd2bsWRL0w}9hS@1QEWJ_46oL= zN`CqPdE&_#aaGS!Lko)s?_XYuqD`1NyQc6mSoi$Yw$o7$JyZf7V}0$tf7ilUw9A(W z{H)=z8XA)%HvRyqSDOX4XeOSj4afEf7i8^?TQm=z(;Fx@-RMX^Lw)6mS`IufiYe}r z`TO`f%g5@d%LA_XqkDQ|53*JL5v$<pUZ$u-qq-1j#@=~b*E2_1_A-mI&}>6Zgk(Wm zY{lAxBpy%e&JU5>ELAt$C*Mj|b?4YDRNA0>J=7ReH*|;kyY4bSN;*`RYv=p@HS6R9 z=?dFVVuH_ZnSUw^89(5cB1V^ZFtbOPO3!W7t}!8Nbf1X5_4sVD-u&9fjB%N`hZ^R0 z1g!<%P)>X;ntt;*s<lXS?0ge`Lvqb;#rwx6*Ll>_Ub{)G6<+oE>DmG|l~P_2EwlR( zo%;-<<V@eOrrC5F@!iTnJ!@#3(=>{Hb<*hkZ4qmwjQ;5XmzKD2x&Q^%^)Iqzxc&9k z%NZZe4KW`_84y2)iYE1m%3dZse9+5u7uE1MvCNR>L;_3pL6eIQ1L~P<?A~8<4UI`L zJz*`Yb>>_Je^;<(%aE-}6^Eo>iJM-3<aUvJT|zXA=<L~E_wB9nq0X-niTi%QcHe?= z-WvC2qaPp1*${q^zK|o5!9urh+>yOG>AXApA<3zC=4r7<mwoxC1XJlAh`hU$agoO; z%QCg%@bm}NQRl802Hg=V)F3?{%>1xN5<AOX$?a5DE4Sq;De;nq9%XhrjWQe5DFwNQ z9ohlo?sBoORdhZ+6}X=`fygN0t}i;V-IqTV=<W3+yJkc9lvg-Gc+PeBmWgDosBiuj ztIHSVE)k1%nXLzvy@nxYyG+GhW%-?O>IT$HoLm%GA**^W$0CX?*ZSOKNs~~OwCfM1 zFMX9zc%(k0qA$Ntz8oLWZy>b)?Sgv08SUPzN#56XY1l{`GwY_Kj7Nu0EdKFT%PJDX z5#X|``l6jEQ+3Dy-{QSV<rqW&*WP~lTik3cj0I0ZhBdCyEgc`N2wC9ULZ+n3FWlFP zuzIoANrjbt{gnbj!zotdNS|_cq(sI^F6J-~wd<<drik~+{(Q>m#(aI&QE_E#p-D#) zPUi00W4;v{|1QE_G1qwPi;8IK1*6Mf-{_&<I$pWNS-zZ)N~=hofoZ3Rrpntsz-^CU z?86OO<lE9GPjLvK6o*w@M9NRMDRZ+6g$`n#*=Cn$S=pFWw<KsU*nT;6x7lL9VOX40 z?zJo9qLyk}uWz#ja)$e84q*p*JsXY7>F64^WOhBfnYe-~$kRDB-f_Ku>b0xJBgH?e z478t5$*7LLQ+&}G(`?jSe$ow5Wh${&oU7?mHQnh&DC0{!cYHphq^v&~+2Q=A((k4a zGKCdZhkW7faHto5O+2D*E?y61qhV`r5uhI*d7-}1gkr|ZzOQK|qV)E70-s}ixB{Vi zGN8q{C&o(kV$9oRQN3*0G1bl(b=8bct9$`r7t2E4fW0^EhNuM=OuXQ)20yMcKl>IW za9NENH>a&58cAs(`rd7LpxEEvCN^bCsbg*WJGGu*P8=M)JaCKW$J&7d3&YX{u<sPD zTi<x%**AN`j)do(XSW4eV^{ff_0<`j?+B{81O+<wGqcFdt9HcPe-M&UtD3{BmX|)L z4&HPLFK+5^LRZ|m7%VJEz~H+~)V}sEXft~X-oY0R;Xm6X6$mest%ZlgR=wz{=rPt| zzEfG`qv>e+Rq`Ek<Zy{!zuV}|mM4ieLiF>Nl^ts-(W&-ICOIzqPKYf=bHzoRMkj*z zc10_yU36DtU73vHoq|;~(+ocgrr;IA1x{wotJClxp86#k@TrVx!ZMG_mW|v?C4B6L zxnJ1-B|}t@_1@>)n-ghisl;n?6focRw6ssWQfDWEU-%D{FNw^SY1U5Rqo#BF53a{Q z@v@sPYcO9&uO2T9T}bVVcQrb9@2Z+WyQIJH^%_mM#_ohYw0ye!XOB$i+kM+TZAoj| zd0eS9t#kGT-J#*E_=K&YlUzmBXbL!|s!&>C)Ps{_3ksBhBdw3y*<6ze?zq_~d7I7= z`^?BZH7!$>uM>LVWA{0O?H5Bcmgr)=mkmWya>j%Whok0xFcDSPQ#)1KJ~A76T=go_ z=u@-LQ)b(oF@%lE+M!svcc%_NU)EcR-k$vt<L7&96?Sp?2DT&tPtE%Hf@V?lcMR>t zsfo|mg;E5E9wi02!&~6(eJm+Cw8BqQw`5YpQ;MH5_o$7XERBEW^9{~2;wkZVSK*`X zLN(k7i*A}!P1~3fMS7k#HLc?v`l8;vynupZa$fgXjCV7!M%EOC)rs_mO^YZOu#1at zw6v@<RH;#V8j3y~)-x)-)^NjSc-5?vH70J8e%U!EvvNA6`RuOrk(-_|dl+EM8C@Ux z_aLp`V`3Oe1CowQQ*_Hu$a}2Us4lS$?SFhBHHFre!|JO2!|e?Y+xn2j#PgK-%IeA# z-5U&2gBuL&N1bfW5VsZ`s4?`{)h4$N^)i+mGzcq;ujQ*_LyqPj-m5d=*nc9?qa;`2 zIB)25$;)yds~_=GNW(kxdcE$0!l(s88`I#l@lme)gho1et<Nm2qX6IFMThq`KhpVo zSBo+mF{2qT4h=ep<kysJCVJUQ_Y16Ts3kTZa}IQ93!c{r;VhZwSwCQZkIJ>y{yf*^ zP4<bvD6WY90=eUpAvGoG&o~Ap$`2{r$!RLC?U`O4u<Q<kdDZreNA<>ima?^Z8p#{H z6eu*-DSW=*P5Kgn?tJ~#u~A_QmBKovxcBJymb~lx!lcw|hV#280~cR~2RV8?qg<<r zJkAl>d#~><?i|bIFYnt%$Hwc=$Ezv5e1G$(lgi~GDYL;puC^ryIVR2>^wN1Zxc@o( z5POJFNI{6@O@}C>q`t>SNijJBdgH=Q%y`<HC-lOfDW92%G$uqKWF8v`Mc#<eL#S$J z1ew|2TGy4xpfwJmiPv{>y3nM|%ji2b1#5hTqNd3cv`u=WpPJj_F}sNsEU$0e$o0*b zowZ_+8tUsgCSf&C9i<u>Q_QnHxW3Y()6rM{U3+#$4Wy#exBelf<K{5?$0NZqyTtcW z*Q{6ilpe%?_W1JS<cBWy^Ye2nRATte)sH=REE{+OzEpg(2F4G2;_ZI7Z<foN`CR$} zHRny^Jq#__r^OQk#teRO)O5<#N7IUpAKD{^PBD}!+o07*|B8b}z*T_jehRiOi_!(S zkl4Qoq5OR6ONttx(P;Ru5DNPD8&dvj2nCwTMM|LhD}?f&Q@Icz^a4`3kRX){i3O=# zXw=Rait^dB@|v1|rE>k#`-9-5%#^?KxxoEMe??8eBXrV(?;RHi0lqt;COh+9{&DWm z&&UD-u_I3gl2i1nuHSRQ{)Xni*70vXzv}u`{@*!ef0wWGi{>Bn?a!Pn@SB#D4@Ua_ zdwYN9Px>2&!*i49{`?{FLIUjPZzPiTSEK}_`2Ce}^Or^aFF8X0!3v;}lAq@Kd!*#w zclrFsND1a2M@oLrX_RpxT7l@HfQCJRTa`d05(Gi?(1~D53MmR`+q)5*wC%0!h<~Ei z_IC27L;|;fyg0~{#KBM?f`dn(5pWS0mLCS=7o-)_u($Z{B-~=!PWBcq<^&R3Q*&-D zJ#Hfq85Bi{0-@`;m^u+XxwYjL<v?(f;7$ZJ>RS`90+!BKU7T!1LA+Dc!2vYruhb`Z zCxRs{$h8Cdh5h<)|NM*Fj{Dyj9E}I+B7OeCU@)Ls2nNT($T2h)S{4OvQUa0M9rYpc zI1tg?iJ@RHQdskMIt(6-AO+ce$6zox9$d||69e_Zz@+s%9TwCFGDLS`a2T32<bS8b zpurVfAlCLf21CIRI4BQLABMcm2#|gWEsG@GRJOA|IGmJUwgW@MVWb4p9T*B^wL;nl zL&M>uiE9TPC`+af93Bbo^x8>>!=rJ~HskOZG9Eaf4@g-o8i_=cm&HMK1cxDEcxZpo za1;oVL)(IcL1c<WU@%ZQ1F4`fAejAoe*uOi-O#lY0||N%xq!AHK^h>G4oSK^Y-d>% z8mc2W3<HDO0f{e$tUe4JtXYuyFkpy7FtEHqWeO6P$@D==W`^nmU}X9rVPvvL<H%$W z`avdpP?k*gKRW=CGZx(I2a>FJ^bLc>W1(Xjj>eJ89*skSReeWUFq5FkWDjN+GF^hQ z2&jHRSu*`%P$)Q5_Mkp89br(U%L8||1%pC@H#v4-2sj1-!rV{{g@@`Kg93uyNe9QE zfwk?#5Ws>k5L+YhfJ6HR%930!(&x`PmQ<E>pW_Y;j>VJlfaBmuNZ$YtG<c;1S{4g! zGXjpq0;dd0hljQgyd8mq&Q}P)7dm$WIy`U@cGd^Vl9qeY=U3mbC=67l2rLRudZ%Fr z9Rdkv1TqYTC#M68DTEG*!D1lu0Z=X$+_AWm2WSfxLWjZQ$YeljGMua|8jgj^8BCPm zentonFujnK#h}Uh0c3!I_5+KClRTOoaskQ6<UBAWw+l*#AnPwEOJ-|0B-uO!3gSp- zI6(N~plcqm4B*E=>A)fn#qd}leFz=EpfUif3<3wyFA|ACL+1m4Vd3O-WcCc`@JNV0 z01qV8KOpgd`Y%Wz0_Yk=st;=SfR6MM*bZ3%43s9ra8MrvI0rDOTu2xWA}fF)q2m&X z#r}Nt>vtW2qJPs5Fl|8F0?I;ViUBJvbk0HI;dpRB$Iiat5pd`@C-H^Ou>d38DGDhI z16B^j;CSf#0=kQXt_w)O7YmsW03F$!Ln;gPhmd&SaYJ>4N0I9dVBmgJNSndB4;c%f zJ}lIiLXrG>h^$B$bUgygJ{~%zQNX2vu5SRtK<x!!Kp{KZM=A^MbluTr5)Y`XNM*5* zexOjG!;t=x_(JDwK!=9f0jVr>y#yD>!k~Ns28PYf_K`5C4FL?S{5$CY4;-1CF<>Y_ z<qR;WoJnng${Ao}>k(MFVF<GJAz@IP1at`KI0E%ypmPa{2UKrp7#0q-ds2PSxdhN5 zpz9W?EL0!BkwQRyOn{-Fdn*!#gXkAvSUAKENEp<Q08fGN(D@lW0|1)asUtKPgXA*6 zqRH$4SU8y-;L&7zL@+vlHX%GfS#rL>&w-2u(#V3Y=KuptAoYPZlj$6ff<yfwG{B&K z35f@pE<u~2c8bPhz=FP$FW6N8H+Uxo=tz%Nc8p&@2b4sHk?l1w!2F<m0fvOG-Jm|Q zeH6g3&~*l2WVTD<0rg`^JfL$T1{e`^k4VD6qoJLBLz6Z%WEk1H1*C$7&TAwLx~C)U z*`fX)Sj))v$p8Zb1j-kf8pNJ~mxzV>og@rA%G%ivEFMUU3<Db#2!=<2-3B>E#siPS zLwy>Ow*j4Zfqx2w53LWlX3(~PsgtbDU_^sg$#?VvYz*+w^^$}^Z3sAAaBzS4&az;- zhwe$hzLCtoB4JSf0AOh7I3i(C-xgpvatyd;5IKXH6Gm1az@R=lpd;Ix0t^S0GZt)r zp*kXAP?-WR7Ho$gasewc)c*r}2(mFv!k~KtfT5x58x~jrbT5uYgE<?@7hurmC4den z6Vg63aDsoE6Nyfy;FL2bTGI9Hde)u<(nCRRF<pCmA~)Dh{_ACM6+26NfP<BT^lG?@ zJoh<n2{3V?&G42;OSlCAZ-F2X2q=sN*r{U(cuNa79w$ZnzmtH6{ot9TGZ8$%{P{%k P=N1Y}%gd{vrAYff<6bBr literal 0 HcmV?d00001 diff --git a/doc/html/slurm_v1.3.pdf b/doc/html/slurm_v1.3.pdf new file mode 100644 index 0000000000000000000000000000000000000000..1068bcffac4c388f4398549242e026640892fdff GIT binary patch literal 845840 zcma&N19UCj)+U^s*vW})+fGhw+qP}nI<alrwyhIm2Pd{W@4ffy|Lgv{$LJb+)KaZl zHP>1-pJ&cln^ay{l$Mc>8JcwH==SKm^e%61Xat&xfPuiy&=Q)5hk#z%#MaE&oPg!K zN|AtG)WX`?#PK_|HgGl(HZig@HX-2SgLZOuG%>J&cHihuo0KIMLkWF)KxvMg3M`P_ zvPuOdQz$@W#7wkaV@NSOOsMZ?L%w-@sqEZR@ww3lffBXU`t-g`J@ehYdI4F5{}SAv zb*Lq26}^3k{+dpG|LZN}-La?K&9*qCsn@SuU&=bghf5^^eOe4BfZ3&@py(XzwgOh# z6#@-TC|ao*;wmq&LP|+5-#?;aj~ln#AL~L4rJ##O<qEyFIYi0XKf>C@gIskL$aq`~ zy(U11Qxx#Psy)53xc>2Y<J!OK<on2s;=8@e4w@Yx)#Z#xXuqH-9Fm9%@5qz8UEip8 z8Ln5Aw=YAL(jOCcA0i=WT+Hnk_)I16+#Q1@D|huAxB90naxldBcb4GcYTaM*pU-`@ z{5Ruv<nOOi<476?G^{kaf8c@K|LPM1rTF`S`-i#PXhNL>Bl5r^&@xB?Q3HisV<p?D zKax_|h+KDomFV~<G$@E0by9Kt4E!0{0Ot)POzX{J4FUV3m~sGSRWc0$h*m_t0<0&e z4o_c!<px{S@(PTT|EI(X5QAb^)qV|qe;;_#K@*m@9nx6d+O{@k+$rjZFSbmRV7oeK z1E4rrHkb&t_lZ=UMRduwXdjEdIw{b2X4#dXpE$n%uN+v%51*mO(0(MzlpY0pcs8t3 z7;G<HE|jdr^~vF1KRN^xuqfL`q{1Ta3B5QeIOv1qxh}7yYb7uy#@{)BXcYIRJ{_;? z1d?c>YXBv20_xp6!Fl5D<zh<1R6yBQK5KS@_TnLtZ>3__7M_PA-WC3CtF_ooTOrQ( zdBd4QdxqJDSX>60oM!yqZqv0ZcSp{?aXOBSdb1^)%df872c_@cE^c3K-CHwiled|c zRHnVK;3*QAefooF#1Rajmx!{FjF;=`2;6ik8ivci5OMk{6i2J5RK<=+0|r>6aOlV` zh^H$!%GqT7RM)4JAp@fmEVxdnwg9~T`jyZDt^;I#_W}A0KPow^qDChC1*bJJ8zL8} zZo%~HppC<6?Uz&ItL{fyf6aoI7!-*fwe)r7fZ~#5?4+SR&w)e_q%|k1kB<Fh!1K~F zg(^WHfN#lJVkM|Np79sK(e7@u`t?Af9yJ{YHTNSQOdU=iU4P^RZH^ZT-fS;V<)j?L zO84;(?!aF0=-I-cU`?MhX}R}?PRXTG_FEHES=0yxQ9HC*yFVAKfrv-3R)P$!8^)9^ zPS!xIflRV91`5=1<+vkVSad?X5SUW!A37sM;0Zd}PppahqB=L2%=3svlR6Ebd`n)G z1ABGiYNp0@MjGas@L!C~cSxLshLh;WRZN3eBbmP=qiMnne@xxTv_y(%F_;qy1wuzz zP=IRzL%a$`V;IO!OZ<VK7qcs<H#CKqR>*3w5>iZTmPXr~+onFA>|o%c80nERn*p~| zbq|wpt75E9!@W~R7Y&mDnXH()&c}&bF0{#)y1+=mOhvQHmx4dy5Jci)pQyt<un)fv z;oVy^#<9otcptOqbB3EcX}ZFPrBge9@E-KbwsAmfOZS#gd-bhXrxQK^_WE+2<%!*1 zErwX;EOZW7rt_{@REU=(s-ADTH-30ncgFcTG^%C(3uJ}@-)Z#H#(Anfv3-a%adV-g ze&zt!wym%}8NP6QQI7ju#q-sB`JJystnL)koDG2GhqVlbECu`He&L<T<EuUOJP!!? zYLgz@)iox0WlvflR4tS}h+f%G+B{TfP0gJ~_5$gJPZyQkn6JP(H5r&@lpXC<ngp(X z=U>fIw258YR{cc(W!p#&ZDMQuANBd2`$y~kqiX+28JU@x{%QSJ{=aEtWe<B30(yA^ zvwxi(O>CVB7@7aAo)t};>|7j;Oq>Y*Qz2w$>->Fz6Tv?!{ab&_m>63a2->+rYcYIR zuyZmIurV^}{G-DEGtfW%{u%He)i3X8XQX7}OrZ6h6&59+S2l5XCZLzF`5syDKdI1v zQV9Yb0(v1kYdc3JdjlgAf`7UTIWa=B{%cr1J_33XcV{sr=Wo>bSDTm<BLU;TG3B2| z(eKCnSG_1B9RmT!zn@i<`8&_}uLp;w7iIdV!1zCJr!Hq`X=3#6P7*eZ1Z@BABVoft z!1-Sn{eM#Z)B0ah{Rhz4|J~u=llfnFqgQe<bpF@${!2;%dNm8<f54B4fL`3h!p!`8 zrrDUF|GS|77LZ;^#=yynfbl;iK?`Rmc@sw=I~#jD+ke)F<Nt`9(ElAdFZu(3zK=nI zhy0ygP!-^R3jf<N3v|Z5EBXK5F#m}1k8~`||Fw$$Ou_#qFXKN8{=EbKp9E%N{hty; zGqSS$FH8F$8700qmZQDhKRkfI+0o^DFaF=9{$9TS3t#x#4W_KU-qGUqbra_>)Ap3* z)4?6c4eADI3w3#U2_J&?eZK6cb@vYj>JQ=`0t>ey&%S(9Q4vg%SDEX6yT9BH;yf-> zt}w0EY+hmBdGX!JaJreKCqF96`kmycC_5p+N%_ANdb@n@d`-?jPwpLOo6{euk!iaR zj%0*5Rn!b`xqn7_CQ5lJQNJ=<)f_G9PTFBm+F;t9WM5!wb+)_uJv`1_jLzco`+OW8 zZMI(V`@Y`ajra3V`mHzAc~Up$4gaBi39P|S`VlC&kRZK~AUqKvyb&UeCQyDNQhGL7 zfjVY~;?U2eyTT{3PAaroI!DvCz*jLtTTe>RNL^r0P`~f#`r&S}s%^8QxRpFSIcOEM zxOt})_mtE7dAeAt=lg=GBR3)OlA8P$_ZT^`NrO}HY<wa9@&LuT6v43s)#f65gPHxa zxg^mAbq-3tK89W3n6Q98<jA>18UBb}M%iU{Syv-)!`Y_!;gxGC<=pBTM+qA|g|WHD z?^JQe&}CLo<%x&I7GD=Tr?=75>}tKzja=-r=zZM8_d(TJvbX^+F+(pdTp=axl8CX8 z2Dp{1u&6q?C9cCJ+Q{6(`zHBzl9_h!yqXw0M#xHq7be7pc^&RxTxIoOqGtFv>`)5) zNQ!O<;964jBIZxYc)??OWX(&*=TS+1bIabtV*;Fe@CHv4xDHTV7@hoLC*<r~NejB& z1&{CJ>tgM`U%?moQ5=0-ZkPC!RSN4hIWilep$eI4JgKot2`bC)mHWDz8A-}hkYwAQ z9^LuIG<DYf><v>i^^(77R0ep1c8D{@$WtYlV^w&4RY+rHXnkd8qj_heMaQ#Mcw-fq zLj`z!<+xLo*i^<Cgcg{k#~B8H^&-RVpeL`SDluu#)2i{c1NMNP!^ED^c6&?Q@_YQs zy{wIF@8&1juVmBsDgCT9)_r#o)qjznJBF&o#Vg&x#=$2Q#pKvHThEvP)tWdnmwIfF zcMEMY%T3XQ>QG;2RnS*A&=+bFq{;^8DHODcl>F8S2($_zDo|W*gx9g^UE(r%!V+1P z)F@4iYI)g5!+a0D!O=9BIuigWzn`dlsI!8I$P}{vX}BP=61sbBz$UBkLJ#o90P90y z)zxVS#XCKlxE|>4Mz@n2<EQ*GUibQ}sBxP5kuF3W_N+UW^<DtdTM`!`ChBgg<pHpk zz56ZZSP4#~v_ulA#0|QXDMPK?M<MWsNvgkCd$?N^Q=xoUnZ_uU-XfdUAf3)Mg~&2- zs1!T4{GDsho6BH8^v2M1{O55wdNot=#qD8sqzYoPW={;)=QLJNOk{XP)C%XE6^9Fv zzVPlGJQ726d`m3*JuURq*;EyF?MVhmCCCH<2r$oSZcnqL+12p*6FK0r=-m|i?yrlt zLTj1=MmbHO{DOosKL<hasUrP^b?Hj%y6{p&=I8<iA>~o`8cQH4P2=yaisi1e-`W%O znx@yM!OYZNi(_qyQrz?7?G0t_j#jUbC*a|j;5sZ&No)wI&EdZp5yAlbq5Xm#z<;p# z=0atG1xPQ1UFTz!|3vNwesF4lrgrqUNfZP+MYQN8>B?7@*b#e2MGG-H1B)&LQHj{O zUg&d%DOc8ITn7dWtXRywhxVhc3HkfkAFmvg*2>NtNN}23oMb1oN)@RHG?cwnaFUFm zJ-SBQB)iN6Re%`LWB{W$;=xk+CbG3#Za?hG8UMX?ZxT91<*_i<R{zZ_4ELj1J5h&- zdcbM3@w_IvX}jz!tLiGd<|@;ovja$OVsn(<Mb0Ifrd3DF94{?swUUbr8mpE(<q@=q zO%oi2Vr>Ze<aKepGf}sW@MmLDoBn9YA+2d*-l1mA(IVu#)f@XHCtB`mREffUzN(<( zc}N9OLzm))z)L@%(AZBO^X&QdgZ+vxD{Id0>h|&Dt$)6~>UL&ne-hIqdmVLaJ(<^I z+2=xy%q*oPr`tOQ(8&MwKG}B|DrDT8=TKDV5UitaBC&}wm$S$Mr8!}SC`))HP<cpf zomdrIrg&F{$}C|he_fR1b|5cCr7qGgtb)Q8*QBdecAg_ieSQ;J!{8vL!T^&xhwyU+ z4rLB%clj254(2BdBq{?WDtk;~ql~<>_{uM1j5pMcP&E${7U4seqop_&?ez1^wKk@9 z;4Q*N(>dZ6&2_pQndPMS0^Eo;*6Z2)ePW|bkhQ|0ol=33{BSw`6jdaN+B0g4Y;z<{ z+#J8+whuCMzTggm&sEbFy<ScS?yu0J>GReAr1l>f;#8cV<xSKfS=hy0+%bu(oTwy) z8T6T^K=EWos8)ZIQIi)p98Jzvg0_P2{jTp_4A=bwl`_&iszl^9RGzF?hg?N3(oD=_ z@*Ggwv$(qxYl~!ZQuLC#j0j6bftstROFLR?v^1?9<le?40S_TVU1+?W=DMV6Ob(S- z!BaixEN`-Fx6#445k<sc&p2SSP7`Ik2Furg6}Y2$7;ZZZLgSx4{Y<hed>ZqdI)l7o zJEe0ZbyH;fiE55g>biEii_b@U&Ew5sPMh31g_8`>Y0rm{rx1$boQUCRi{f0+=VF3m zKr{GMu<U`MP`k}#DBZy_M$sS>K~A0oCqUB>N41w-oQ>Yt%aw8yy;AKyfPk{B>C?N{ zV07H^VjYS@L?E8zK#1l<a)#NWONc3_6Htd!aHsQ->yt2O46(5_GIcQ2(%%o!GdB0K z)*a+*m4pq_Ad-t#p`GL{cNwMD#OJ@R$B%f+j=K9t%Dk2%GK=TN+k-?>nW7-Ytt|0% zExMVG@HI$kFC<8fh5O27`^p_c#jYTHiSs_1f_W2)h})xKig5xpf&23gC#wDq<R4Gv zpAA*+4VCUeChIMxYi*^gA@>*W%~T#u*P;^`;!v4km!78^A!(mt?cn_0!c}8a{=){+ zf4ZODJuNmXc5DY&(PMs?Hc`7HLTM)WS+B2fpBT6$#AXT6Qs%kb&mdeF4mt>Os1>3U zN!hNt&I@sv+BCi7D%<V(a2%ZC7j*k~nA*TXYWL!=NSG2ju5X*UpX9*t@3bRIg(xMW zWB%~8Y_vv_-*tq^j(PD;#VR+3t5AomkjP9CKiQ(t7^4zez5#q%-gW%f5WBMTOdwmw z5ZjLnf4D|Xf?bj;?QhT$tKFq5KtI9J&|P3tooB*9Q6eI;6C0@(7$OlHsuJ%j_lTT5 zsVXCV5y7V7Bv(~YBeXHI5F$r)GNZ*-^YIFfKpyzBJUubJwK2H3u{Ag{2!fYCKz&RH zsYH$r!cA^tCC19^1!lx;;qwObI%D{%n^+=n7F&<@3AR0MGp<4$6GW{Bxcu=>{geYQ zZ8L)6Y{^AlPuYnarJ*Rr6@f`k(W!P0+AR2+&>&r+Hd@j}Qhg*7Oict0PT6@{?TH43 zB|2KVw%5na(B*S`c$_vRT=x~<Q1y$}_O9E1PXnG+a5r8cG+rSkULz-7COuv!JzfO( zLGPX#6vwjk`96ZA%HlT<hjWm~lu_|T;vsHgvd%&?kCSsZ%ropj+VYRXEJ@TXNsc8D zb7LKpszRr;+Bmwcm|5H33-;tk%b^#Dj1xcWDnI({yj2HcRp&L-S5=k=*5~G@2c{=y zHb;l~`-oX!F)=Ujk?}E7F|yKrW@MqIBqAk1vm#r%Tavw_>%z*4!-q@V#>76whCf2a z*u%j(+&?rv+%q`|oNRJhXs%IWZfY+%b?Ki}TjQG{Ya;1!sjv33blKZo^fzj8(~qWH zg#wy%7@&xMwvwUMbJSv%?W-2!&K1O3x9KO#6z<9pUQya&BJkm};N$o@oGW{-Ctq~N z$&J0dp*e6`{a{PJ3Z9;^FGrr2QgEo8EaTej93d%HQ(bj=f{T85*lAmuOHz`*!cVlY zMJKDh9H1~3AF6=V3G&wo`8D>ORe)XV8(w5nkm!^_MI<!HtF=?BJzX8IGb}a2L)21+ zO*G-(28{=1h9Lu~kqpJLB$v$|tUFkBMr05^*C3+a(a!z++!hx6Dm?sSFu@}IhVz9o zgjC}w`DHmZ((+siJA(>Kt3p$=QggFHYqLUggDO*tIzw{;D`Qv3JV{44Syembw;oDn zGRy}N2jMU|)eJGq0x1E8XM8*LK;Me)@Q|kBtm?*^)Fvx%`X9wLt}W(z*7`@pvD|HU zjKp;!OGIG;Qwb_l+A>_|jM~%9qQ_ytV&w~EyYZ`uv!J`yJS%qg=CZ5F8E8(g1yTao zaq?_KB_fk}<*BEw_15r?uh)}_%a!*p$HIE00WcFYDqY$iWEyKf^7H07Mcj}YDNI&j zZECf3uC#r21dT1FY_||=Be{W7(STErEp&aZ&x{*ph&&;Ay@F%HO09n7xe22XU}dK# zscJ*tr?9iF>Ry^(oS*l&+U$<RR3aa&mX?Z|me~EtYInFhj#*#FZe&-Gvz>|F&ce*e z!OBR(=yE80dVPou2sqBseYVyxQWB7{F|`m;^d+=r<6uO^#EgQ3ce}o6b-ZI}aFb`Q z?<g~)H^VeUR?Jc5U100t>u|;&%hm42PuQd@LwA=P`z`oKbA%0|ZuQ<m*|FMK1%W8Z zCACh{zYNDP_%-0DtScva;S$;RGiPek--6IEUUli$u1$DGcynF1(pIPXa-%$d8&jKo zuaEU4RZ*n!RAjVVZm<$u3&?enggP`<NF@Q&gLl;ZN2H4nw296Bsd1KE#QGdXyEs-S z7B?YIJYSAM!mheu;~kUKq|_PKITsf<L@j)54Dhy*Q{%y%QxeSGTin_i%`p^9pYoC) z;5d)|7ySt~p6&TL$Y(ypk?INwI*ORtVc)ltF5CeMw)x?f)yZ|9r8Q-~HHA?`6AaB{ zHAW}5xAWoTr4V3OGh@9TlCm&CJOhn&X9-&7x#T|^KxLXDN_8+&gIsl?gORr7bULA^ zxUTPMF)P6bxEk$>W>v@1<hvM7mf@M^^M2>(@&4xdz^oBZXO?ecqNZDIbPe$W({WPd zii4#$U8)S_@lZ)=n53y#z?DSh@r<cliZg^7v$bnXT_aTQiF{0>3)D-j3={-pY-GgL zg-t*FK~2CTdiYqOviwBdY~gjxMq|}R6<jVWzkE+A=X`2n<Al^Iu1<cq{Z>){J0@qM zo#bajLi%YCb`LRIXl6rgreCZdD?Fs!qOGOtcl~#JyRuVGH@PJ?5-Zf3-dK>()~TOI z1$Vvda?8G-D-11p`6k6h+9}qK&OAMK>KsbK!n^M79<-=eL1Fcds3gJuCVF)7_~8v7 z!qisfiSF0^>bLbp>}79TTW7&i9GDufP*C6mm6?HG%C*2<ZfpJ-TW!Zy?@vfr%0-y; zt^x0o5!GTSn%}`EvXaoumhd1uzf72}LK>4AMm{=nY>T&xQ@y#?pYt}r_S$>5BN$su zoX6_tN8_*7M;xGG`S&a^7W|l>jvIj|&P_Nv$*-=t6TQvH<J)x11d2;*I1V5szz6j+ z{%rA*W{rvjXb>u~nG|&yA(eGUiow;|q7-+L$N8o?{PM6}RF*t)G4|eyS&u<Z`J^M| zDFKR>5t8Ds0hiP3w!zlzbOvL+>%!zs-r}psks$Wh1co~3sq(=K`5jcHqI}-`a^ASf zMMv2biHFi)yCXrEc4)&%;vBw?5{_XU4hH-D5cun^n)uwB=uGDzqrnQIMrLNJW=f9M zJLZ1%Bo<fe`X&ZUPaE+*HGe!}5(EShEC>L+jvzN3Y`)4#ur#ryG`k@yu%kUbK+e+X z<9*b>)twrr+jW7ZqtFn?PiiPjWeB`h>9JhtxgxQCtT5)0qx7)+L>)of4Zc^GU}ig- zJdT|8iDu$p5|pFY+WG~);^vmqZHuc3cGZEvlVxH_N)l!o{%xDo3BblwMJWltEWHj6 z70?NM=wpc>{KRC31`94qs7&q*QxgsrGp|a)Obk^+_C%Za(4lj&qn*|6OanjR;6vP7 zLAZ~To28kRoBBcv^>bnEJy0t1{Z^MBl71>y*uv9@XQ8B}UZG;#&!Cz_e_Uv64&iL1 zd62opR$B0T+q$D7ZIBd_ml|e<222Y?9jMBnrczO~sWq157jsm$9KQ|J$W61UkI0IQ z+FZnkR*ICzJvDGqgx=4`kFX$UghQ=h)uc%eNQ|L#@D}@u7lm<-_EA;(9go29D&tjX zWBD6C>Wh)GNCsznhsQgZa4G32>MCnn%=GLN{O-LIB&pF_Az^Z;7Ko}I3Hx>_?<6Jd zBs)znhr79$%*I8jweD#SKJu?~d;c3|qc(`BARo|0jIrK7J9_JXpjG+FsM!hV#%W0g zRyJpc<~4@bv?uxM$~?S}E|aFiX7PEvkd@{n2oJn7W_P<p^CHcv6TO;=3U0F!5iod6 zQl-N!Cp(X)KP1VH+2z7t&r?s=lf&X%d&d{NU++)P{$~4x)ShZJ!B*!h-JrILuR@ud zs){mEwu>F=;OVv3UL{Xgqf6KBJBeZu=3N*b2VTQQMN3bDLWLlr?kfRVdwMt=d_F#m zQ^xh7pdkqnd0H6q`9Lk&XNF5%j@w*HRPVj;R)ZKRML8`-IURgO$GmVAeY{q;*XDY+ zyn+-K#^`K?q@E?hL=Gx3#V6Fq`U<4j=46<rP?g9UAXe7$sP}%MfbMYDfAgLX2wx#Q z7yfupkya`ZVbmy1pyYy7BK_RuY*k*vlbmK3z&w<)UuPirCmZ<l%K15BM+WHcd?M8Y zVb(ZHOH4Ig+MUSBgF&a(XdeZ1-Eq{eauj)V#fz>kpTYyhC@K189XuTDO$0(l7fT1< zyU)j=_bfeOw8D74XK#D%fmeZ+d6%&!_}=^2rHX*bRkB~#`Jb*EfLI)?I@Rl!LUuDQ za0tdgggqO1Zzjl|<gnJBnC!@e5G!%X(XrS43*J6Hs@^1-K>$iGVIhs+c(vqq^N%sW z5_DnwQ+$gQa8i=Gs|<QVy$j$_@M5PduVK2Z$rvB{8LcUd{gIxtbf4NFx%g5?iul}f zo}{VAuB63GbCp}Ik0MceJ4k4HeGF<34d<Krf!}U(x<8CeXQl=3?a4bkI{02(lIfow zr;<<@iHsHeBGR|-rx+qE+2*hO=DpmtzGYtSKMl2{-Mtv#MQot6x7TOQK3`FHK)dcB zc-~iauI|p$5I!@QGJ<5GL6jtPj5HN2ge^<N+?#lr`j*yJ2A95(jHUN;Bc|Bv93f{I z)y>J`Ef<Z&G~P++m-2YW??K`?Ta=Kl(r6t=!y|iP-`rxmy$gs=a)u9`v|M$cZ+Du1 zJxlV|-G=UqGW0gnBN*29_Z;xDWdHnToykpq<OPhOvBeT6+9fc#);&F+g@*&ZYAr25 z9Ldnp>lQxYUe2Fd@=6X>p~W>2nZ^^Kv8Xmk2@*AbG<pd+dfs7&g_hVFGQQk>{eJ0B zCl-X^b6(EL&(CO?Y)@vLPv`VygrW<`PqEe2JXQsN#IsOY&454;Gqn>rbb-VI%|&H1 zQn&{#Pj#UR5feBt{@v;}9ZB7bm*$``%ldJ%9fh3p`PxTseWh<3KJV^QeDs=;Z-<mj zq&QP9Jlz<&UvL0+5PupIMwuM6vF#D~N?2N?Qw)on{qEflYn-goa{sv9#u+3Dg+~mz z)k-9_c|O^?(imyEJ{;W#NP@|%;4FOx+y@`(M9|_^UEzkMnD^%nW^`~e)xT^ya-SUX zJ8RiLd#I%Tbc7B>R_p_;xxb!lu3>W!4LWi~YK@$G*?||a=dQwV4VTW+T#;QV5?-jo z2F{vcAZ+VS(~`HFE^(I+PcgrNmiM}^dpwo^chl53*dN`-$k><OQ}uj1eGr>+lAHj` zx4=?KI_%as>SVT9E}|P6+iUaN*igP?tqmPU`anLIkWP%|mmC5|pQPmGb&gg>jt(>9 z%&{2|to>;M+rvafL<Qyy-+ttS1^L-A6v)p*!dnN@JA`9P-^Sg-n7O$_V;?g&131tY zk^Truy!-?<-JT9rHqS@h9?BR@Rw0iVV%K~V#DyHb+R@@mdVnV3kvz$f@<0(9WI~U^ zFAvv);B-3&Vb237HNW-S#nN0aGPb9Km8lL!!a+!dMU;;`RW?+7Rn*yu*qcjis}`ix zq3MFzF#?q7ES&hUzP6$PDHq<=P<XH7or;b=9^tl+LZgo3PZDcM(5$mtwt8O;UWH+` z1Ibzy*JwDo9iEfxuT(ny3%iT6A$KJ=IR_gdNdX6OMJjy+h3cH<>e%d5Eo~oZ137ow zumYbS#nH^by#aXwO-)MWMLxjsezNOvLgZ^yU2HVHdJ{ZYq7BF$p9Scpwihe+w1)ol zR~eo9c#|cV1?X!(h(u}6vy|Ly-1lnaUQHMe#ugQ4B28yxdwdD}5$oZjxB8~=IbtL6 zJZLREJq!*$UA1QYGjbp%4q-AaJ4`~+;YN~m<SH>Q@p5uBv$EeEPhJ0JGCZ<lxmUe; z+i%ZYFB!ijQy0U<l#7rWjGw-di7ld8pekObuI%IJxXcqp-Uq@F_6C$`L>39%fNk=W zEk%m!4nl^li8kg*qE@CQbq;@mO?Gdg>+hb=J6IhwtJuV8o5m2x?1%z)i(6WZUT3xJ zy>KUpfMkTzKy#X*qFnd%{SuI&750XJ<4L)PWW$d0e4qH5@-eC~TTNlV+JTN854e7$ z>7**GDva=RP#%|ed@vUoeNL`VFAteLJjk`XhTGhk(&g3<I3B5YL7Wn9e*6q*7&&g* zS3EhmG*2qDv#K??usBS*K+!SUPaQ2qcUB$Bl3vrEVp1Al!%stJlPgudM`Dd~2)Fsk z5GfptlFRUzN>#h_xyI8Jsd0=zWQV6bQ-xQyw@7gwFGqTTlCtRObmt(sTu0d8uRWu( zI5GbnXd+^5tjdQ<F5pC|_r=!SM_>Q|t|`7IE@GNHwaU|m;G>A50;!q0iOVfnnk|bu ztPI_cCyO^<=pQiPE#EO}{XTG5`P<LWmM4zU<h>wuc?JrsyFQ}-;waN1GJvOpqg1pP z)mc5~AhDFR>my7>X_`c6n89R}_-{yUkYt3c)LDWbt7J*He8C)9*5uRi)3U+VG{sqi z9yv3YLyU2!TE4I?Ob|?UF|XqBP}}A@#8eZfxF9dOI6oplN{v%gR732R%BO(uc&8aL zg%&GK6*7e(-#J~QTDh^Q+1e2aEtGq(hJ(VgBA(6{6Fjp!VvTf&nwqJhm7oF0A4}(E zXZJFK?T7OG?bLwZGI4{{6t;U1LwGeWd|)qd4rLM|V<|IiEfiB2o1ibvUf^EtiH573 zf@ztBtC>9Rjm(pMlo2(U;4S`E@L_L0$(p}W|4!!aOgl85oYV9G&`{a3%B{lKf?&^< zg=3^?+u`fVRdvl2AoUfS+Fcx@WyL3|xj-3NjQDvmT@x4?t%ZUB!OBB~md?3&Iv99b zIGB38Y;OnU^q&(v3rpWd-`3=m+hDpJq&FW_tC{8X|DD*s|Jm0Iguf2>LV&d}2-0`N z>QqodNxX##+D}62-&4KR-9TGO4Bcj8gSvf$diU%fGE6xvKr2W|A3!?|G_@Ewwt%$+ z{>#-rQKJ!!CC~wXL*Df<IoY^+g{{C|r4=~Zk^m=^E2!H|k?oHrG0Bn$!xmr>gKUK$ za7a3BgprIhm8HWwSlBR0%?%FWR~e6IcHDQ^nnj;~emk9f)V`dYNlkt3W+wm_J7Ivv z#xYn(Yk`NU$`P-^udu<#$<f}{R?pSh@dJNo35w-?bet^454QtPe95mfjG<}S%^6i$ zFLH7QCS1lD9+K5bz&t}E>dZ4DLo^~&l!3zQA0<iw0Oz9D&wzdCQs8ONf@t+kYf~XA zukuoLc!P5J+F;`7j%h!Ht|99-O;h3)9==N~kpoHo-IyUXFmTabb3?1tYsiF2j*BfO zEH7@YYHOvpQ@i>BI39^*uSPEg%gD`6`;vY$)1Hq9*>kF#bh4s`Ik$>R>!|1J?dfT1 zZol)sI=+pJEyo9r`f&t?ebqF{DYNJyqhzB7*j1XdwzWZRVQ6du)ymY=&U$X3IeL~@ znpMN>i3wMT`(})(8+3)o2Byc~L|~opLncdGIhw|Uh3Dl#sJ;0;g47yv<~(~f4t)0| z8W~hNDUCHyya=f)GyyiFUr_Ut-{;-^$98XpHLrW*4`2J@(=#fb3QjX|^&|hW#`0(T za<8CDOf8>qY1<M-XEhcFAZ+~f6etHAQ0KP*fZC$I?*Y#v<9#XFqlKO>xCF$EnVK44 zRTWrA8f=Ww#ly?W#0#5>XQ$1}?P_^(;#C%mmK{i1nV*8AmA%%Y<L|BW*Yp1E3V5h@ zyw4h0=zIB&p}YOM*VL$2t=>1KnWXx5KF$tkS28uYDKD`DlpRb|o{AIPxM}r;efnc} zSWdeXaN-PTY7J<byU-(G14X!l<+1j#;=Km8dyBlEpOu98&*+!@$J}CVI7M~$MLV$^ z7D)`D&Wjduj=bcg=x6QC!NgD0H@&<d$cxbm4^*onG72Bw=cp-Oww@R&^UkR6tgQ0R zV9X^T>eO0V<WV_#o4Y-&ye=MX%FbKud}`cm`8``&pKdM0o#Ekf;q&tnV%20VFWhzS zl#RarOii3ktbkiX%kYUl*2nwR-QfYaR-aSVxFBj(S(=N`Qw>bX)IHn_RngBf<|a88 zDBF^Bo45xn2MhK=nw-u>90itR2-;7CDUJk*u3Ti%Zg%%(paT4S+dr<?KZThfbwXka zk8~zj;~x&u|EgRV0zY3moIo~fTf(E&)NCU$Vv-)&7xYRD<EKpD9F(7cYHsp-Jv1lJ z4)nAK2;$Nn%PR!$d=J6x2EQ`h>9*%^PS>&B`F6WCxt^>-N()L@$<&#lChn_ea6Ww- zn~t=0Qayb{e>i(##ph`(phv(F{()5fsh2zD*Q2_;v8ko3g<v{MPu4%rzc#h3GrOTF zz8o#Q5TQMB0|89TM2L*{VJ^D`37>5bpUt2elgDh>Qc9}xNoWedR&W;F&CSVqU*c;D zY(Il0vcuIRwazHOUJKMbCqG!4W9O~^q-b@Jo+D6Sp2D;Cb~c4iTf=6z`%D@~M7g*- z`Q_q(bYVQ#<cH)6dtKQo&(tILywdIUd_L0c_4JPg?t%0(8zxYW%yjSPPTG!VW0_5f zk+Fp@NK1Q2Bp8ra^sHDCf50jLXFYrGWf$&uJp|>iS+3ZIheu{tX9eaKf}|!=#6~_^ zLtq8Aq{MG(!jY*8S!v3^vp`AMi(LO17S5W~w0M~WAjkMjH+guJ)flBYhE{eRO!GG= zY8WMDjl*Js;tq>1_;wv<hN(Vu-TpeX;q4AYR)VoQ1D{8R_1&r3x&gW6dl1>{cbv*U z^2v2=s`HC_eGGyats;2Nbh|XienYe`i^vQnSB(qYRdbn*rLC#m+1A}y^Y_aisB^^m z8;9t9Zv@ySc<w$7+}{m07M4#-?rhDE$!<$ob}L}xhfL4ntA$XUt-+lrz#RGxtry_| z#R)s+?7(E?2KcizKL!u-H#Q;Asy)%M!PoeYoqGYI(n?Q33$l2+yC#>@rxS|b%2kA{ zGx)HMU32yn%0oy`wBg{RqTn)bYEv!zdH_@VKgQkw>Mk}ez8`ALjdI*<OZc|@-p+6H zNDpW%Ue~*ON#wKJUC#DVg!}TviH`Piwv?w?wP!iKe4RbMfR(dHGR^B%hQPR38+3cH z7BMpp1t)rPx@NaSRn<X`@aRaVdpH&sLGz3ZMhb2}dVS9fwbXdEFp1$*4c>T#<y?W~ zQ~@xF^0O~J$89r@`)B7MIgWQ`ksip8QsWIVwgTuk(3peeuJh&Y^RgoCEWs#z9=}cQ zp8N=L^u?CuJ3-N6Gqu*1xGCwcm8q%KR71ey5pH(CJIz&YHta@y;q;4IMQp&!OyQ{f z)x}?Lr7L`QU%-u#vd=xn^5@M#EB;QaGknFF$G6D^ZT=R5{;1|m`yCt2Qr}kZZU)=& zBFK$sc!DrJ#?iEPQ3fj?1#SH1m}Q($Wtir!x8P`{N4#=3*hqC$ih50E@^__b0t=KL zi&UNq6P>xLFt2jUd0&NSKUL;D1P^kR=v-!1A87KTr>{F2gerdf<hue*jOQjf;OKNU zzFd1a8`S!zWYz~IskuArnoRedoT%lWw_33u$1<_y2K2UfvGjIyxc}N_Y!Z_8l;3<( z5DV(|^1E{K2``t+@viaejLZ4k+Zk5V`Q++)#kU<Wd)e)MbF&x+W+DSC_)0QRaw#!A zoC=uW2GmH`<o`INjovL(<YJbRO2gqx`N-||C|VL?*!;7o<z7xl*h;`aH@?n4G_@|X zIuIx~dCLD=NERj|F-j{}=_X&{x+ul1G{vbf#ep#O=@}vD`O6A%)+GD=@A|l41wWzA z9ZRc2!RdzO=C!BhbX}T916PB<U~9r{L1BZ-QjnP)xLFIU_9E$nHIDcDug)_ruF>P; zo*qSyKgAvgdZp&_w>{q=+x`OftT#O+uyJ+40}i(Xc%NS+mj`xq_kKSdpIIdDoXxr5 z@#+Zj#Jzhq?u85e#txmPzrCN=t`<g0n#)iF60ECD1CQ&sGcPUq#ltcfdM!PN*J=hk z3~O;lM!q;#mQ@bQ9lm81#i)VnV@=ZWo>mA&`PD&*0jCxZeP5MGFWs69a2c*t8fuc5 z=#rEWj+CR!q4mb9r$fh-!{kqcFFW+KL3gC73iDQPCnfcI5k)=$C3Z<wI(X&5GM-1t zZ*qF%YOs-KUf64#fn%kpsgH07eyAOvWMM&2nv9fKOQ_2#kR%9H?|j#-^L>6Ru1&W5 z!I3LYc~~MGC47G1sA>G_jPHfJehm=ayZv~f@4UYO9{27~!;M=2IT;%vNROtrr3Atk zAAcX8uK|(`fc_HR(M4)1L;2gmXA`{;kmgxwMu*q0Gh`>2cOztln(bkk(WxxG&AqSr z=ud=f3TO{EDhqPxJe`G{A3CQ^Lhgtn-r<6kX&rjLpZ9hnYm=$;re-A>XO)>}6?@~0 z<X&GB*&cUvrJvwHJ3LLQ3v9HeDS7)El@~ix6^FD%7oCs0>(947LdHG6b7Hz%5yHJa zjF_qTIo;g(FjAyA57v%N@&oi<c|w3L_82wXqI1_%-B>R4c&3tOE8rPG7UbsQ2M?0a z>Ft-h0kdwoygOGvDAvW&b9sNWdUk;X4bSrXQB;`c?=A9C>zm`qD=si1T-utu;+kB| zTk?nHlm%&a@Ue$kK|#@Nf~-Bk!l|&enx?1*mm0T)2&;P$J=k0%!%0Ir3w<tfq>EGk zR^CV!1{a_@m!$(pkX?xsV&x$&>Hl6bOXUOFTkeH8d%E0isb}Gzos^N05fK%Ylob#a z5RjFX6_t?aA0DqMuhG;~x&d4*uf3y<tZDbuW2P*+^mD;z$5eQ+=9d+FDZgBvKdEH1 zn@s(1uKxQ+i0IoxsP*>hE9UF?h4(`#3I>s6rlw}f2khNUs_|p@Q(%|T6}Jg%kQRJ5 zNT1*5;cR?TO-`>XIP!(Q!f@N?<!m)&mOrW2?romW53n3(XNIO`teg$Wu~bA;7)3X$ zbEfqdOGjhvkLLOYQ1d89R(Y-2F^(DM#WT;DtUuhzaHGSz$Jm%UL@p4rm>MlIGa<+j zWhIVkAtDw$BNH01qbeb^ssS{cfh3#3XB3s<1Xn|ar^AhepW6n&%*?omuA!cZnsTh8 zouh<ggmrbbFKROC&Sv@{y<9?Ej$McsSJ#$~PBwPnPZj^2pz^~}gk`vHZS{{wE<7;k za7jcDz6(SuiMN^r(m4-f#qO;9>=+0tay`u*cNaq(HL6>2HRg~UHUEyOj-D2Ocuh6C zt&KQ!1?Vq(e)+BTFJtuT3%$b6pKvdgYP0;StMs~lT6-c>on?48`@@+C_}icNr_*6^ z@_HZ7d&_V5dOdG9FP(1Rz~ys)zkV47nBD1${SE~5>dTT}?4~R!uXoGu^m1@ChCV)0 zr-71{Wa69cpWIJO$kd5iRW4Rif3T0x>0C}x)k4ciJVC~_-2OV#*rdeFfN4jEv07J9 zj9*tzQ%}Rn$i&dENKeRV4xvOqhu}s>_<Og5?w*IGoE;I7UtN9~eoBY$%fd9r>+|X? zp0|_RXvZz5Tl8CRSh$u_MQETTq70>24Y$25rb^>EyIU=Yx%EN&z`5!#V*eggJJ>XD zcbZoJfm@?`CkUBscY6a<ZwGV@_|cvMv+*P9CotJjtizSQ;GDq8V$eWr!i^oI-^=So zWoxghI~d-c$bF~X_xD2U^Yf}Y8s9%n6$MQVT}2Jx=BBQywyLVKud}{E^4Y$NuCLRN zsePv2B2Lg2tnldf6-<I&BWDv|A0NF7hv|&BlE&1O<RX(I6XOyg83`GvsMy=vr&cin z>5IQp**Y>=TT9W>u-=I((nwi<tJLKpGaGpcNqq#ilE1&H)H3`!TbAPD((Jk8>APd( z-w&|ob9$7vb$!jP^Abv;ZhtW9^Im;UQ8KP>Q(+UXU0Bg54UzWwY3ZuhO>L8@*l^9_ zB=9Uc3Q$&lz!z7D{X)wg4d*en?B9QVWfwT_34_?hH}r?#51JWyhklPr4O96bxClEx zce`?Xd`!c@3=fa!6ZKW0q@2m(Bqpc+I2)9k8@}Z6Y`WWD@7}^)!g5k2wUx<zo=7;` z9&X7$%A~fsv!X_vHH_xX$%V@HG=B#tCI*3my4kDc_IiwtFDrvv&|92Z7+PLk+KvK& zxWlV~FNGXmmX4>_6`Z?y(juqxPzX#4x@h&4l+uaG3iNic`1JF7lFSsP>I4w_hg2MA z6z*yk@2w6MpGgr~tj=+FznplOt2Sn&)4D6%XYHvjW$o?wf&ecqAlS1`PL`D?T9pY; zPpqJgEWc+IzZ6{STj1qe>_B{;0wXx3Sxym`T=oW*+xfjeySt@^hHmVPp06i$HzyZ= zygp7_VV*_}3<N$sJ+@r!bUtCQ4*l_Kc)H!nOgl9Ej=(c9!j!JPR^$wKP)DtrsR35_ zW~KXDtt}n$-R%7{qZ(T)KOP^CYxP<?-XAZpJv1okbn&d{`FW|eY^=yJX{FdyOS-eg zHnq;>TiI3^P<;cclh-kcm`n+m%QKPreO@yqeDt17egrnEf4r3D`cBNsJ~_H=ZI3vn z9s_1`0y)&v(%+wZUy^crJu_V_iath6R}ZW2HxwA7P*jYAD&&Y?z{Ci<^Qybm*zK;` zg;7p4H@$*I$&LyO)auHmTJ2_QYJ;q@cQ!QFw!sq-wmcm7j$g4x77eMI^wu^sdtAJZ zX10c>;pCq;xjW<Zy4$}FDVNCY{Qh`-T=G$6Kd&$34XD_!<MRcU`g|>x;`4vJ-~9SG z1?2AdJVRO!BFhU@p<;D;LeTPE@OnW{kq?XDf4)>qM811>eFz1yrykpF`2p?3_Mlx~ zZ;iV7tcr3DN6%)hvLlq2o#^_zm)aScxSCi12VQQr0@&G0Q&QpIaNLp707rY^&Io?l zp%qLp5S<C#jWx;D9qA7q8ZtygM9R=0I`Ury)I5YlL?nczhKTYfmlwl-DDUl*VIN%5 z(lP3LOsq_Rji`YMfyaKV?2yt^PtVM>v^88`iqZI4yP01{wsux#H0%$<?A!0>4Is<7 zEED+Uj^E59;PdI9n&kT8VVzXc(9+P))KXBEQBh0F$++D_+`>pjOOG)<i6gdB$~Q<I zp(K))8W<ZJA|azHtS&7wI5pefL5Iss)z);gvFa#mm(|pa%(~&tRE3w-<>w3dDDld7 zBLD&IxD=D~64SErp;=06OIvG8dkboN8+scW3JM5Z4vc_P@N^ZF5g>HJX(c8k+}_=3 zZ2j^6{2UcKDdGb9GIwjO|5OyvW$QRbIlfx`dltFyFfF6s)7+k4Mu2YVAb@uaRdEdX zor+>>UDf;bjO?6}P43l}rlzJ4NSu|P>2q|J9K+p;&nIAVr7$>siRYX$KR$j(WC>aY zO(i?dIjxlXno6xzOI1lxSw`XE-qBy1qpg{>^@oRJBtQM?dszl1c(QOcJv~Wrd1aJf zXqlyuR6u^>onApwe(hY%z5~9pUUQj;c6@3`zBpure+l@->M?fKy<)RHy1GfiI8Dn$ z8yAxf3x~Y8dyou?m=w`vU}C4B9P|0!BT`#mmw#_gj?<l`hJvn+Ge?HZIWk&KT>)89 zb;KFkxI+;Kbg|Ab1mD?xXIEF%KD=|S`+H~YKUvU`w{zv2T6KTMX$jeW67B3K4s<^C zzH%qGYgkN8Mc0|Iq89H0?L;q;JlEUf=;wZP@uD7?XlSHaSd{DF{Owua?R2xl#5nP2 zZ_i^N7nd3si>*n#mP$`QDW7FzbhuqySkTPGPDMA-tDu;Wm1A32%`YzA+uw_ro|&z! zI6giuD&;q`G?q`x{05KsoW#7E3TY7$N;*Qe&X!QlhrD-H)s!ORB1a}gy`CQ`dpHsj zlJZJQlvLwF{)2<lP)6F6p0gky@dZSJb-4INfJy{>w_JX&o5!ugdlFRC<hHc%ELc-D zQbttQoW#U|!D&?e&}spY^;&F-F^R?oCSq+I#tz^em$M~=({U(u!uQWG{4$F&PL8$O z9gJ^Mbo)6olf-^i0x%^drJtKq)Qd-Su)i<nsFk9#Jw?<}+iFG0*0nGL0MUWT0^>}@ z&+QItwKAxsh?S592ZQRT<{1`?NC{nZUiFd_lA7ongO;QBP>4u~P!iLUy4?K0(03nF z*1)v5ei<@W{)_&=E69X^1>%EC%h5O>_#>r+EW$y76O*!!mS<#CcuhSc3KC{vejcb^ zEAxEE+r`xKCnIBh(;?V>y`YMQbBGZbHp(tP0!Kf@_bNf(HB3a_B_0&2B%@H2Q+Rj% z@uR*2V#}HvtmXH51&xg^D+>=uz}rnoPVsJ&G4o=kq?GuTcXbb}6n1aV?Q(`9|N6ep ztgZKYGA|^?js08|k<j12y3^$W5}AsD?ea{#k#Y9*5$DIixF8o3=Y6-b^h|+D8d4Vy ziOh^lO}&_{wYkFrbOk*+y*>=Jgg5+Ntpy$mUPU7v)_c_d;Z>)$<M-8ly2>3&3E#33 zEF8SmtuwpR<Mnd+F&2Y#Vk-sH7^Zcb3#eyc@8<MFRs(^+rdC2Gp0h+VC9_AF6S6a6 zjvrhF&(jk!+RZO4eAr7WrRVpqs-c2#`+jd~D*oO<$M$?Gs1klO{?Xjf<s{XCxiHdt z((CehJY<ALwx^4ZhCOCu(|7LV;{(rD&=RU{serZWF?5G~aG7~z+cj&VtpXcz0Qby+ z6#Ma0l8-cV_jg4c{|9KTzrT<ekTQ@XPZ!+d6Hwo7WF|Hpt~V|hWR@pl_aJ@(BKk}1 z*QXs~*q~iNK@Oh}7boH4bDt50RCol>`^xJEw0T=r7wo_m7?EdHrjCwoaFEfJ{v!X} z-Q@@O(n%5ug7Pm8*DSj$OFJ?mGA6dAFBd=EUhs>+T)#HP=;Xu0A`qXoD|*nu9R&XO zzOi2jbA~oRrMEaA?JG60^XZgx9zw(*Y^ukD)2?oK^^NsyT<ZrLW+#(Be;Oi+^ZC6! zj>I6vpA3hz4^HnkSz4AYFY5(@ORZu3N>x)+!o<V}?*EQE8E%Uh(f-m^*KYH)>TK*p z&ItnQ@qSsRsom-}tJt^{u97qqCERuTHugnRE(gy)`EvRU6fAD;CvKbT6)>0*_X}_Q zbgYzcvm8iueE1X?>2RhxJ2|ol$p9&h;WIX#Pr$=G(LsQj`Dy7Ge2;Ad4*w5wLPE^0 z1+PA4;Q&6M_^{0&EkqQKI4div$XjQCxmzyIQ_f2q9-sdW7rxd6(l8HadPaw&wKoeY ztBRu_tg4_sKU~bc)@Ek^!cMOndIuC;X>fB9$xs%b4;4H0hxqXx3jcrs7(aDfrj;1= zjMk~GO;wOTcPEz{!lO`|rG8mlvd0n_L)W4>!CYQ(Ay_$t8b1$DaFmC@=iM%EfgzE_ zsd)-|eNju+UmL;>N(wqqh(Hymp1`_#H25mNJiVP=RC{gx6#HgjaMPa+6xSASq$+X0 zwR3Fnb%iEF^6TopL%4^1*ND+uqXu|HstTB#biTlJvaUGolB4*L;uyls-pCWVJui%7 zv)GUJMm9&!rMlwr_|kvn^lr1<(?<1b9-k1Ak&8>o1n!-)9;v177$*Fqcjr8qY;0n( z{?IgW=o@@qY_h|`0`g>RK1kQx*wMw-Htx)JKGU}#o-PNjK|%0ADZ|(u1+-C=Z%{qu zL5}yrjAD-S)V0zY7?{&Av3FlNcYmj)VbDm&)c3#~tLh5WbhWrz`_A+Mk>7!}r!(Z` z0*-8lT|MBzvF7@Mi=Kxck^?H$?3w8MAkto5JqBkrG&DK>^sV>9SGgEE{s1BFZVp*r z4(Qp^q48TXwJ|9#ZKUNPL6oh`PV$kqM!Ek}Gg-aEf{oh)K8hH7T^#w|wU5@9qq@RL zP}<q$)MMh_T~QI4706Rsm)F>f&=pf%JiIg%BggME^y;z;LOd84ArbBoBC_V;;lOwm zVgGu`LP#2iLsVezc*K{59-ax$9u*f6d{C8Natn3g2cI(hqLD|+M)os^cPMn1zn%#p zKkN|90mH9yGOAt>5}$>cS#wMM;|2pgjQ}U~%BG@rWVoxQM%BOpnC@7w+Y=19Ff>$C zMCE1k>B^&xBN^ooo*e{o_9x=p+<YW744!Xmc}+%OU_ng_baIdH2ecYEF%u1BLHlCx zA%miwU9}N5lc8KLr&Dae=hQm0jEg!KIz0k9{?|+JebZ;c`75|*Tbh}HK_Et4S}-WM zl{GU<$zFInLDp-n?&;-e-yjb`%>B*KFwfA{kHB{asCvZ9eg@CSzw233OM}y}&Mx0J z2*{8ijfIh&k&#t|ORGvUDikCXWqEm@&inh_ti)+D${(!YKv6WpBau{qq~^$Xl&t!q zssj7Wqpdw}c0gk=gP(C}k<;ZB1qzB0$JiE_kPQz{d&;Yw^J})ekpw5PW-kQlROC9L z<v7$%Y)Xz_U=G*|-i!n+lN}9$0Q9-_%Gz(<!xE9VswE{pJUlc7ynv*N8{|`7RJ5YC z*6QkX8^5nB$JIfC&^~;5^A~zhVQEQ;qNV~DD=&QJ#N>csQV|JhS{kN_ak0J5637(I zR7|QW`cn>10Di2?^|GQGr_3*D;ZZS3+4Xr<zqWv!7P2}*YI=w^&-PA3-T@kGnoom# z+ASSV&kprHU>Hzrq*O$}uKs98s}=R+HM7iAv?ccW#%8Adx(mwPK;ZWu&Awfs^6~iI zMm|C|o}$oe;#FemN;;^X!C8S>iIGGDi#i%260)k$=c|kijFOU41NL)U0a#$RJ05u@ z2LfXiG<oP4YMhD-Tu4a@St&_MnmVV*C!-@Hj#geUQU$++VuGWiLxO@Lva{ucql2D! z@Q%N31~rIOp#XJ6Ha<$CqMq7>`AyLz`d8i*Vi2TbI}jt;9j_Ve{5_*5JcurB>1kyQ z%rMB-5a2g)YnRkuvWdA@-X1FJj~CNMOgrjfQ67r0cS|g5G6W-=W#yEqDk*GvhZ)}Q zBMhp?8tNL#%E{l1zBi(aAtWwUOjWd^wD#^Xdy|DDZFO;lLr7e3f<#b2!l%6$?SB9l zLFm3K9u$<6%4#a)6cjFw_E--c6AQDazh7-@tFEp#R^|a|h{^#T9$p@-DGN9(@G}hz z^-3$to15zJFfV*O=3CoaDyzyUQCR})%!9sZ>1Y|78Ha?2LX4oVqkXs;bWb*RHYF7$ za6Si}s<~|_5xoqvqFZX+cva70VZ%U4O?5<U6bA>}Eo^BbZB2D@N(yd%eyHNIay$Zp zxS02L2j<TZFpJs+7hI5(0dJ1=SV6cgFE0z4i8U=yQCbK)91#@;QXdx`fy>*7i3+i@ zvM4JnFfuY=<p&yYadYP8=Pa$v|1mmWS_E+}bYvKXA?`R>!9x-f5@=|s0`s}q>9EgO zv%XRnCD_^7jEwYIn6Wbtn-UQbDz7SET$)4aJT`Bmqosb9h?JT~k(`pYv@jcMZkdvb z%FxUdoR}*P<{6op5J<18#s(c6%%e^d1-Xi<vZ$yCR-MN*(AC!|EGcSiti{i~&r!_B z#mDqs>AG^Ii;4=nssQtJ=jg2MtOJ4rs%mR4UX*aKvBJhY8y7d1p{Ws^vgZ1)a)2=j zMdVy+%u0yBoSw3d`Lg!$wA=!AcGm5!4P3`iR8mq#Nm<j_Oh^KEuF}o<yVC?`nM5po zv(KOBSCp5*l?Kt&R0GF_RfSrc>VbK850@JgBT*4yxMEm9nv;_qbOv<(^{bbM8-NSt z;^IutNWD2bb>!rqqnIbv(bXy~F9E9nVh&hxa&fS+fuyiv9$DF0d3m|b%uT@CBqzo} zzrgCpfO+^Jcv0{$Seb_i1@<{RFMDG01}^4xwbTiS$<GOD6O&V?CSqwIBX#i{{W)6~ zXIeU17e_lB%rm-pdQ{Zc!|#Ie9T(Ety4q4w7x6K#r(IB7*wAq42%Yyw($e8!ULZa( zcJ#*3%Gxpwjuaw9fAkFW-oD<&WySMrE1;Qn)|Oaxo{yi;#=(w+l%&1oTVGvW*G<f( zLUzH%A*R0M@?25tl#;UQ`syN9=Ba3C6x7rWEUm3PkP@=8c$okG4B;6DVN>sPZXRAW z6-8Xk8yM)qPr&MJU`T=;c5t-q>+g;T3&F)aAkEFqWo=`Dhj{=jIW=i&dhD3Y<MB4y zI+~#2^UHIM&GlYBUPdN{I(pima-XZ7w!WdBql;r@O=U__JR<`=)?EVTVd!yiuwli; zJah_9j@0y&k&&yo&c2S88X*Y<ouCdODOE;F92TTSguv4}dANfxVm-nj7r?x;hkHd` zEiliHi+OU8ab1Xsa4`=@3=TqmQ9(l^iuvW=&pbacKRGq_@bM!&%rh|32ZaW<v^76{ z{aQg@_B;=_mzyhC1%#6WR5uq#A1`-0IvOxzAoZ}F7tgU=3Y3*oKT9J-EM#gCR#nh8 zThcah`@s&jph=CIj#gb)&&n0y92h34pn#isC$RHp7=(;GQn`3|)p0Nn-Uh~Z1{}<{ zqM5g{wrKBc3l9s%!#ocUx1GH;KIS<&5|b0gCr6K*@OA|A<YYQJn&p+HvkTKby`4!Z z3BLY5-bfEGO!e`EN2IS;Tzo9_(~Ptv1{};wUKHmB<{fQWa5K-r$)1oDfAwk~*jGHv z)9|UEC8o$sjmH|I0`o3jp17Guc^hZY`MNq3^SHbXIhme;o(%f(I<5v7oo{SBO6UE& zk7ORCX?J%Ab@dtBRS699!C}Geo$b5Np9cl{!O3N0Vu1G4)KmZo7`d|q1mAuC9hgj@ zzUy)e))Or*DoD@BM8~grhFe?SJ~6pwtn~8D`K`x?m{+Btqt!GpuyOZ5ghWUwD&oEl z>vWoc1aw~CErp94C-dO7j%41_%B-cWDJ&!qAM?E2pqUU4;$a^685<uyCi8gwJTPBT zRet}`gU5S2vx_%}N3ISH4Gdi$7#iwFJzgKUK0F9t^k{cCFFOO`SzJ1gVIG&xLjw*D zb_nSD2YP7nFi%QB$*p{bh&(eb0W0$$;|QepITZ7@xEg$BUI(|gK{GEaD}A)LIR^94 z0Kd1Hni=2O*};{eMKjNUVgAm8d-r!9*EThH!hmaPte_~*%Em&9Ug=|RW7*r){t@@R zvQeoIRwh?(&x^`7giP|BYTgM|BRLHdS0*+NngP&x8ag^n14A2kPef>>jFJ*3I~%TR zQfCNBPtyzOxhCOZ9;NffM=@_{*4)+<`bRK7J~8~y>O6+|%ChdR%bk~7+glnT=s?#x zs4eKnu8ua4DQI6@kPXbg!sBi5>O4CKTXbyHmHux0%#(8}oF*j0%{-Hv5As;dgP&(* z`5iisPVA5!pZO#G{N(f)Fpn#W`I`CrcOTyG>g)Ck@C62lNr>Tig2{QZ_v>Z%$B!N! zz9zUI%OH@Rml`h_S@{#ND)Q?1hZpu|)Q-1ZTYmTBhXc&hY8e>Xx_i2zm{-QnJa`)n z^LYHc;ZZtoae#TOxk?Q4_72v}c)d+RQo`|>Z>qz0O{%QC1o{`m*D#n;QBfYOl#~=C zB*d6i;TY!e>pTlyoj;0sbzq*9UG6jycIL&7VqWZP=JELXBfQNQop1bum<Q3o^{c;P z{^9+X@7`x+rol01Wo0fc{Pu3}{k>i6?5vHoRe1V)Nmb>*P!B6RFA=>IjjS^={c>9E zctz*T)0c0*cpEw`Lql5+FZZx06!W;|+qgQOB_cb^D5~e0jE8yh<1xSQZSXJ;I)9{} zADb9Cn)ziE^H`?@e96zF@-{$wfUo!7?#{~c!hYS{Seu%-0Vh2_C*w%wxsGD~I66;6 zN<qRVeH8OP=WzIW@nbSifo5Lzk752uZ-Zuj7q6fH%>0Ah$J@8IL26W$u>jWA+yD~v z!>J#zPCDA}@5PlBP3>*x&vRMW_z^M5QAyc*q_(Hj+^FihiG_LPBbgV`N8r_Yvg7J} zSV+)6uk*)Y9y8wtH}j}LyrircS5^Ub0632f4}SXMImXYw`X`u=iHip2Y4Ca*U>>iZ zXLA3n^LUv5oVQ{7{W0tp=8wo#3LUBQ35ju2Xm5kp&;J_3=4PkEHy)m({owvRc%6)# z9EM}8tN8cxw8fQ`jqR-v?LK~ZCoZ|1m_>n9+|nkp2C(m*yrrU~NJT@fWoTq~l+JtX zGw+s+KX3DedE4XYJf6G_9_Gb<i_UMLGjFw3W%!scEyBe-<~ppEm0AB_Z$>K4yv-k? z^SWqnb0qU_zQ}X*=Z>TEzdMGd_(Q$TF_=f`d|OA`^S5uE9qr&)9d2f6W<o?vBrYKi z2tmMwYY_a*d|NAeofK-$QgKBa6}QIO^SXM-{Pemh&yZ9BAz@W*ExY6Dyr_|98lL?8 zfn25SQ96H|81@hFHZNX0r>3D!#x{TKz;#%h^KI_j+Jd1L+Emt5qOM8(Ugm{`_|Ko` zK{MY4I*)aKFw7JG0Xl!wH7WArdz)Ik^C|`L&#OGhJg(WK2fR%in0atDW=4isewvsF z^mI9SSyCL#i|^}v)jso|*9LCic_1WdPQan2?VXoeHx*mlYi8$c<>G>$`OiAfBx>fJ zaRl@D@-}s!nQv_j55?(i#E#0_pmm=8n9RR_{~o?5G{_%Yc)LW3nQw#Ed8})f=I3T0 zFxS)5E~_j{Oo%<w&*REf0wMhT=c6&+2JbZ~Vpb_M^YQ-_^WbfC@OYbJ#jyFs1sI+G zCz$7tPdF~~Z5^$^ypp24ot4GmW<1=StZb|rS{e|7wPKqY{;Qv_s%&U&;o<&jJ?zfj zQ)wmpvs{|;4oUG9qw%GK-VsTzL7_*+umsFvR(@Gr+&pT(J6B0UeC)i<5i#sB{X7)l z&>Y)@^T&@KLQJHtu9{bn3s3T}tt)A1s3j&QHZe7<s46ea&!(cL+{CuB7?Zbgbh2Y% zW_t1bIo4AKgv7?j3=a0-^EPPaamBD-yv?y<Sly%j{886oiwlmNd*J7fgrBp#cti~Q zc=rfz6BPPc=am)ZZ7j_XH-mVbn}^%P+!S`c1wZqZ6&+W4&-3!k&rTm~aPPrm4iN)F zHf33dq?oen2^AxLiMdj^ufw94Cnh6gk+2WW=NAx?k-G3Zn9t5gAtoWVv9oS!Jvu*+ zX8sSjCiM$?ewU22<l)yp{`3><gMy-5W_AXs$KhEVP*M_Ofq5HStIC>+hPrAHz>%Rr ztkY?Dc(`C}0z2{Jdn}7tn3)-+q$L4>YuEa4>HM*oM;<e8gNJ#2BLg`(8QjTCGToz? z7vMiHA_6h27x;M?G4U`zF$FsR7|(qV7@0ulL)yFAF*<L9P3Hv#&a-o{+c`PFu#0u= zz<vx{R8ihHd|g0*Z*uJ0WyLq=R#;JgvROyYAt|c(T3q>cIaPI57DhZePsDn`C8GF( zlq?^%neD5~i$D%YA~5e{XLGoL9}=~0(b3tV`N&))Du(@I{QNJddwuAF{9=9Uf`k|x zLl_6?>FHj-#PVHw+M3|!UEN)(FIC-~ouZ(mNK3}zI+WyP>FLku=xD;G4!=7&J_?;h zOG_guIbnEsfEMrE15$Qb^fjsY-@!a)zKsiBZv)31@RO656A<9%=HfWqU}ky()hP^g z^NR}V>ub<Uys<DZEXdEr#R;eT933rIJ9l8d&5cKo9^%SX8tUm%QBxs(y&Ibwp1yvg zs32!!X?D1ojPylXI$A?x1Bhe?uXN+lc@*>4hXnYsFb~Dc!{Zbg%UNz6QM1U%lItE3 z1<J~*c&<qiu}XNxl<65+(9zKxUI_(O0r-)Xm8PVk^guWt-Utj^5WXQi5UuUaM>2o( zd>hdD<1t@eQb<fpbfu@`@LS7^^U{(Rz<6A$EIs`0o7b;_WNvOQFCWi_#=2K;-$F|{ z87wOrit@7HoI>w#vd7Zdtu4$bDJiXNtkTj`U~IzUZ3sxn$vG8Jem)gH^XKr)w{gKc z-)6rfQ&3P~Jxv3B-N3xDsZm~0ep_1;{I2$9EF++on=5RA8oCd*B_0^&(d(pQ;3vQP z@jVkGgCEl4aI>0<Vt|>Ekpb9<i;1!@H^sUHFFz*(bHz|dfbTGS`v(WZ{J_XC=seaD zxxKjt6oZ^e^emV51@oxTf~$$;SGVr(VViH`c$$!ugk91%p*%D$8;1LaOIU7Lt){9B z*fTQHA6{4i`w0v~9OsGj=<03{4-LleZS0QJ`C~Kx?!yQ0Y{2#}zp=o<CnO})(o**i z@agJm|M=<WsEAMk0)m0QuEUKVJ-olNym0t*4D@z^Gzkd_IyyV%7v|2)PT}!3XNk$F zcvT5VD6r3~M9Db57*^~Uxypt4*@)1fr%#_?T~dKc?&W90aAsv|l~<TQHaP~T$;Z<T zYs>Ae4Xkt259&NHe`EamPe1?6&B<YjV;S#@=g)3!Z!yrHv$3`e4-4kQu|5t&b!}xC z*Db}w59s{$sR>b0;q3HetSz5E-MPKJ$}6Nu$gC)A8tIqW6<%;<V(HFd=6^U#M8PE; zoKjKRKFz|)E-s2?DJ7h7*sHzW9c)9G4k;Nb0pjE5eQl^8;5pLIA9YPCetdG|(=nL; z`R8BCOA7%!XM5Y@1MlKsclPXAad9ydGn1&8h|$q&A3lD3^86{psu22Nm72rl!TozM z^n)b^d+hGzR$E`QzOhD&<Gu$d=84E?__c`1Y0{Hp@#Ssk@#y>!-ezfGmW+%H&z;6G z7l|BQoeGKzSGLx{{^7aRXHRFlp}r2*H3b-*kByHWy)gu*z}(arYxuBVpohXfXJ(~? zL!Q9480{DI-4x*<WTqzJ@^#Lh?u8ZQqc>-ajSWSF1n_(vHy1k@10Sfput~UoW>-*l z_cz$1=Fgudq&Y7aky%~RHWd&SPfAL1{99G~czc{aeOgpZ#N5&>GAev_VJ0>z4A(lT z<1>HE7#7;xfBewg)aZv(KPbw}K70D)$d8yB8=g9S3PLbYO9;C23UXK1mr-W;-Mhxd zS_%qsE>8Aw9OvZ#U32_=JYZH;Rh3N4j3Xk$2Cnu#eDnY>^Q5$bI+V1GSl5h*i-~-m zdq95<>q<0qgD<&C95E~?Gjx9v65^6Wvsq9UB0HUsh)`1UqKlhrS!LPk=2}%{8Jz1Z zY?`^Xxen)ufZ*)CySER&i*Ry)kO9Ly*uv%2Wz@=I6{Yv@vEQ%)dW5vJWM*CtBNHP4 zjCEy%j+Vy%_h0|z?&5^&w_{@1p^34A;vzyq0wo1Gto;!R7bhDlCnpt;(itAzi#G8- z=^Y`ty-VA_E*tmuaQUB8XPJdnqH^l;o5nf@Crpj?Pn|k-9OVq~^#<SpV>k=;j&`~E zIrkquh)1o5rNMenC-}Xe=N>RKzIycvtD1dgeq!{PH;jGy{Aur%E>klj7;C^71o|QS zd%M2*V>+*1y}W+4KRCb_4nCZB=%Jcg>c%F9fx!V_#`e&SLF!jGR->ZBLApsuNwhT7 z0Fv&Gw#E6`t@YLQwG{~D`?@<aQj=t5P`C5};FVPr;fFgToQg|}*EUw4J%2_?Ng;Jn z0=%?~qrIDpBUDd>b4Y*>KOZ+C2|10R4)r-EC3(pJA5SkgXK!~GPXttFKcpv^LnaoK z&VvC4TZ{BSpq5lPJ7U&WAU)lg80nc<m|c#_+rSP$%!OL>sH)u4*|xQ@zOlXv2gKaO z2m&8w7A7riO|YO1%?)edUhdsBfyj}F(9YTt+Q7%5J#^<lUnCf80zyJi04IA}gfol` z4*L~$2DQ*qNFXLIdT8V-=I@WhL`Q)2g)XlkCu3=50zVwn7<S&v-IbDpTv$XXIzBcy zGzfj;0y_A~qr=yrXTk0@*Vjm15C^My=FAxhaS<Occg(NG908=K8~V4SVwu8<vcb_| z2xI^M_TB<4uB=-Ve(uaabMNi%>y9D`5Qsq%B1F*O?i5Ag6i`3`g%(u>g%s{?!QI_G zxVr@jnuNGpM>^?r&#Y62By@jiyWQXXbLX;IPd(=x_SwhY_3n4=I<=Q2(E^?kw4<V= zV2(EiPhs9|2dc9J><*{Fcas0Ch#1=;p*@0zGFIV~nEK!>P#}R$cGlh=uCmhNpl!~{ z;=<C(Gpa@gC+7S6J24oIef#&F6*xn4aVjm$9UmQnyMf-5nW>4YvLZMH@FB1Y=sK(p z2Ero00N>`8`gcG45EUK_`~bHR`0C@0LKfgTg02(1aNgd*2KqQ_OTyOwGAp=11%b1F zVB7{Bmb|*WP*_q3(Wtn@MY!{D<`578D$bof0|FA{3iu$fAN2dD&zw?IQ$YZ4W&%pA zsHAXhedYU)V&?JFCmmgFpmwyhG>#lS0vrg2<mf@Cw1p+u!pQ+t3<uJ}XaM$XNY)97 zv4g{X-@bbF{{1@;4<L|^A3t{F$RW-H2RJwn>|tf!v3oBI+kOrnzC$8d-ZK{=dV(9i zV>io=UAy<}-NVAl%ErzP4l5`m1dmloS@HDgQ%9JyOaMGS8LWzkir7=gzy7`rpj1ra zqS!?-s1KnTIukG45Kb;GLBR`Z>Z-<gqmb~Bj_wZVA8*~e+uGi0V5ASX3#r4rQuG=j zx|fre<>TXpNS0|ln+Cz=K^`Q$RZ$8K3+@~2efAQ;eD6TF2g5^JMv4!KSfK{bR)ZK8 zt}8GkD8Ho8Kgb^<Ah=#QB6?%N8R2Zj#4nyYeG)1kK>YSg*X}|_Fh4Q~uusd)Avrq= ziwYk)av1*0b>IM8AUrK-6K>|d{riufI(=41=rHfG{U>Dip4XGK40F5GW@O{RzMpG1 z3p*=22MfnRaTTMm^s4l#;nf>=Z`@b~2mw`{JtqK1+|RrT=p@FQ_k?3VH#aw&p|Y|f z;K#_sz|EajTw1iUhRg)j)KCLFaOTWuKE9(6(|t0yQOLC`$$NOXiiwJxK7A5y=|5JG zmzRx<4*&g_M`--jw-ZyNc?CIM-kxUWrfO=+z(QvPPQw`<KXDu&Bq(%2R!&A!OC8P@ z00S)H>*rluS~xp5{boyl1V_AnW4*ejA}AyfWRI?%j*6->JV!|>3Do2j<bd0u5u7g8 z&{!WBJ~l48v+MGsM-S2N`uVpnYa424Zd4Pzp`@JBSy9=2htGV&evpOx6#G%3!@?Lo z8OvP<g?4cA!^RzKT%0^Shj@8UoIZV4P*6@u$&Tz4o17F99c4l=Q&3ivk(ZNI*fco> zc};DMy^C{E)vvvK0GJn-l+e-D!RhHLC@P3v6crH@k(81Ihyvh&<Xvbkc|`@&^Rviw z+)tlQ%uFPuCX*cO)YVlbBqhL?B_$=))KxIrnpiBxz))Y`P)`T?JZ-J5rj6Ayw=f6H zGkN~StGC}RE-&Qd=eT>hgT&PZC5OdmVKg<gG!WBN*U(bOo8d#lLuwoAbBhW*d>Dp! zV`*7wK(eriums##MFj<AB~=YIbxjQ=6=k^CPZutW+=R_*&n_zL9T^&#o~)>^3yY2> zI@oFJ;*`}?rR8Kr#4nx~7CL?I9NbJketv#|v%=D{BGNL4&xo?~E3*pfE7`?*rgh_p zejI$lYzNQo;XbLXV-b;2l~q0b{N<}RZ(oD70HF)_2gE8|KDfM;v?TJ}B_#j@a06i@ zJa<5ag%!cWi=JPYH#<M`>h&u$u3Wpmns_PR+SVE>z#W5{pA7I85gA@pQwd}aB)PQ$ zx?pKVh>D4rm>B-ykIW1Y&|SN}*4R`R8yoG(aCdOBC)!xSb1^f=1Eqt(f`z3y2rw^i zkF<;w;PaceHvVzuC{Q@J@80U_Zcj-~@(b{x&|K^sNYE6VHq!`})|MnY<RvEP4$3M@ zCMHKgTzwL#Xiov>@9F7Eu(nmzF*`1zyqiaWgI|j0g4%Hzv(swiy}S}TdCu-UaB3&l ziKG0dPMyCXA|WZOtfFgd?C$5Ak((2rk__>hsU-nVFgGEX<3AeFj^rC0Skv6B|Eu#J zC@3kAoE+Rd=+0CZf|Vtp%fQ&s$ixWF+{4Qww=i#Xa_r{)`)H~Kp!)9KuH>|2Pj63> z1BqZsFe8|PS(B`(ZWJFsZ-$Q-EI5)KQFHpZ0Q8cXlQ}sv@%-Cw-+cG}$@6EEvr|>o z6;Ux!&=i?Qc7UGS(H^|X)`4W}VCUxHmY$tCFf!CXGE`7n44hALax^8F>lqs0^mX-& z49u+vcFv$t$d)7<Qwze@3&#`8&}%QLuIe8f8M`tI$ggg!k4sLX`!H;s9E{9NHLzGk zb#*BPMNui~3t|@q#U;fRm4#*GPl&5&8d&oRVUe$^9OCFnZRT!CM+DV5PfJ@k`^DzB z<kt?Pc{5+WdOg2*rL?>_GAi7|%N?%NhGb)9Z3zz@Kn?~p=;&+@_(@Gm21&ereeKgM z?!cBH7(h1o2lxUuC^Rx+t}gINrMdX|`{Wnqws*AT732m4`hBWEMXS-sAo&W53zk<F z|7g+^IPRk-4*`UY&Gk8X*$Ihp;Sr$_UBXd<g8X4QF)<GMg4VXC#ih9yFJJsp^Kjf3 z-@b%8UEP<fs>^e7vjG2>lH#F3aAa79Mr!M-28Md?+`IF$x$9vSZbf%*XG?2+R$hU< zXRHdIzF!c_Cu4C&oqQJKE^HXcC5YQ|Tz2;<rQIhL&PpiAscB%1@dSGZ3d1`gJ)^dz zsl2W>B`Y&JAt5RuJ~AG$=!E#F_;~Pz+>+v+(Gf#KeNgPWSPV`Z100MjU@`hQEQEEe zY^*@7K7sz=J2?gUG4XLhVZi~xfuRv$mr|3eYOBYmCpYfi1(M&)3Haj0&AYcpCPr%N zYtpkaV&h|?q9fwsV_~_nx}x=ROG{f*b!}Bid1+}yDSUo1=+1!TZ`|H^@p?1!$fIY^ z)^4m1jSMukHkDUb7M2zv$)yVO!OL<A@`@`;K~OEMt}NfUF+MZZ(Au1llNAvY<s0bF z@b?V}4US2SPtVQCEXYqxOO3gdfLwk&a@BC{VCc2CcC`;rPoi^@4~&mBcea;QRi<QT z1w=-=crk2TD5kb{dS(PoeM1$jt}+%UrmQKV?Hm}J5fYQeb6#hUC>}#ereA6`BqvDY z$ic~Fsg;BEozq(x@7A4LpfOrn8$h>YWThn~CnjEsgGYZUIWZ$E9X4LR+<bLu;pwv{ zKhq)4UOb<forM3^)mPUtt&VB1(AL&8IX$+1Z57nW<@T1kkN^5)ZI_#-X2u`?juDtI zkJ!Ka?$MJ+;B-?n6Cks@dpiKS@Hsft3(dlDq2>3#{L{;=m;d6$i@W#kKvUrUpn_&* zCuXip&R?AaRNsI2;MMEbU-TL{0l569&mON|zY(6*LSSU_C_3<K(8Wx`MNA?t65@|3 zI_?$F+AU(ZTgdQ&s<95<)WVrU^A8HUlw43%(>*i@B-Yl`)zoqMXV%==(b3;GHa8m+ z7b7h#b?Wq~!$*%G3Y#el(NEBFdIoyI5#e<$O@rej9lc#G?X8V%Ep45bfnevB76I^R zW=u4(4hs9b4<7(rfvAT@2l|KlhDQfy=4Mu}uiaw)pzr$48*4YNF>U?h=Q?b>^WZ)N zuv-)HfVRK?=+TYaH<#8|7gny$EiKF}&d)4dnVFxxa&=*8eGSd>vv%|5-0~8fUR!5- zO;ba4LtR~SBjCAfpdT*(GIH(h$R)S8eZ2N|=C$_@PEKr1J2Emo1;}sg=*TH6k4j1N zjf|oC1=_lLm^nD>TUu)wn}}){Y1l>AwvUa>tx8}i?7Z@va<;nkOsA+?N@Qhz!+3Vx z#K7dr&lvB`cW(hCAOnDS=jP$LPlADrSzfuiaqH&OXHUQX(rlR^OJ2Qs4ZiT;(E~7$ zyQn>Q{P6M9#}MuTFyIqfW>!G|`{2>z2M-=Uf{pO6Kbhs{haY|f(1PFrxdmz;43<I3 z{0EwarapZ50Ij`)81?;oXyNzwKLt&b)YyjO+Mc|co)T6`qUP}z2?^(Q{aMcI?i9uE z6frxlZm&zQAky63LnEWpvx;i#dPYY9)uXdBqqEb<ll{j9pt#BL{PHqeCr1s8mb{XJ zf|8=Vk|O9~MW!SMQ4}5%1rS<TTV1+-otaw-$(RXv14;8K{qbh*<4@@VU%Y(${P}|? zNTz$(4y!L-zy3*Xskd)`C7IfmG@3}}!AHz=vCO=L%q+=|AKiL@<OfGnxxtyN-MR&* zG{3qswKzXHKljNt^T)xJezGew%xj<daz4S?>9+p<;<|>Uyu#3=RFBX|CvSf%Dw0th zV~iKZSlI@ZU0Hw7J+a2GK|LUfXE~+8t>mQdo@pOe6_MXp+&Z&<^ZqY=^#jkE`P|>Z zbBBdL^yvS~md^22ujI?;4FY&nTrXN)5;2bxF^d&3jovR}yhqSrm#{ILDpkvbU_qu* z142SCrDT;=wDj~Mx$~Hrbhai`LNlhJ88`aJ#|p~IT)jNaiI&D@rpD%*29d9sm4%BN zEjc5-bD($e+WOkZw6GgLP5k{u*duAweund(CR2O$hM7a{&D$>m9(G52jh|#meu^9j z&5sNxfC3)D{LPyy8ykpAURwv`Us+k6UR*>nBQw)wZe~3Hxcud$vCRJtOina(bmUjp zB;@1;#V65&!|gqNEXXtiB1v7}NLbe;ET`+%lW((Y2D#)&^42M*6dl-3DD$XMjJ<Mg zLn_iLhu*$@w++<4u&myC6p+y+Z5hd_-~wnDHjNg-N5R5j84}A`EWrNXl<6{hR(LxH z2Zo<-bR3XXWpgX^;G2(o?#kAz_n&NNYNE0Ia%y%~RD4`mbW~VOG*~!dF|Y-;*EThb zOieHoB41zqY3jhugvd<TKm2L-z>mn^k_Y+qn=huz{Foy4^(T5~rVHO}<k9ozDB77M z4=%QuMhfiOojWMtS8sj>{Ot15^x^^v_+LtZ`?IW|pOQ><3=YB_1Fncl%Y++9^$)hC zdzm?sbuEZ;SOXCg|KjG!N8i5niLcxzZLLNw!qBS@iJ7vVQa`E7F!ao&MpfT^^n4qr ze_>g=evcGVsqK==q3CkXFjNR1dBHdwOhDI<?VJw4{{K_<IH658u(Ses;}#Sik&>QU zS>4u$i1uH~l8LT@Um)Mk^!K*+b+`BTAfJ8Ru-ZL1&^I<ZHajzYb#Z~22b`HDc57PX zdw@Mq8URH!5BM*oynp)3dB8v6oGFSv?*)<uS_8Y!q;~}H4<CS+-MD-2v#Qy`@XS2m z%!J7E^Pp;eGtv1^?*o(MJ3B!TCg&H0BqoEfab);gQQVAe9dz(!=QIeK&Z&b}ZrpwL zTALKXDQ$yst<-j}7BmRvl`vyHt$70HW$2mX5?OWY!P9N5{#E6}4?h}FQ*~$=2UMsh zus%X25$BCU&lv=tHwrr_MPxmt@vmwOc8rHMo<MM>IQs+y#3rT`mOz{j^7V@Vn7q0; zyRtHW?HU*?pa~^E3IGHKv&+k?_wKLWxp!q{Wo~5|!ThyrfU&iI$OeA%!Gk-9&Up;r zM6+jZl{){3|9`ZfRNEBrPw#)GcK|%1;Sou5@BSxA0}hA)ejNckk^y{aX$nYwVSaS( z%J0DWPnNrehbvoJbIL2@vvd7p6I=sB?K~LfP85Bjt)`*zQB5*AqIzND@$}mLvs$zh zYBZczt+HdDfS!L~W(S|J{@(Mt5>^og9ywl#_3z%JVd=JB{|kz1Sg9;Anh)d2t>P+Z z9DddyNI*XbK2Kx4Ii+p?P0R0Z7+)m=a}$z1iO%p0i;B(6DXgjM9UuE*g8PQ%hNPrK z{{TO*<dmegu8!IDwW*~gv;r)YmX~O1s#_5)Cl?p4tgeD`_?RF0v&h-1bC|H-RQL#7 zpFR6b`_I3G{}(T}qJ2}wF#Y8dy`v36;DG9xtqhM!8aSVYb%f!;L#I$xbLF>F-f!Iq zpF`7AO+7sjy`~kFM5bmi!lTJPfz~v4yuFhy!AefY_?WIwa#`QTqnBl^<6JV<Ql`;3 zMx7WwM%E&{dHlL(L^1nGwSBTSYEFr|^vtBv-fgV@Ri&+O=A54IS%UypIcGq-fPUa< z-GEa#KQMk>|9{5@{0lZj5a(cEVQJ|?qxc6yADv!Q+SJ|sGgULOFh4jk&d+~h&z?QI znT3Uwg^P<*T3WKXtrblmJ9TwYK;X>R-+aAe#||4?Yask9D@#D9DBCkd&c@F&gMZXH zOxQnubpOd?=FrV1=ig@<>H^PSz5edQ58r+N;Q^5O$A3NiI6lKvGLP?lL_13FKMPtn zH9V8ynKB+xHB$@o=vWOnESe1ZcWRsN;o+)QAo;4q+<drcNc8IEYvDpOw6VvUnu}<e zDv{z_hL#?_eCrsHyI;y$!=+f8Ud4~~wGAq599rwVa?8{{jO~Ofw>nMTDbawQS=T+i z4cEV<y#Mfnym`o3gTUP~4!l_3GkO6h0qr_IC$zo!F<yIh0{%rWV!yr*#>C9j-pSt6 z$2%e>J}bAZp{aj-d@I$Xm`9Vbvasw0%<tT}6PDqVkB_&Zxe?GZGBbVX@F4*F*I$3F zjm50qdjPls{jjjQ3P?i;_WJdoh#aP&gpVK}jcq@U>Y%pC{Lh~+t}aJMMTlP%wXwE( z{rwMUr?83k&)oj-AL)Jl)~(Ny1~mNo8dJtE%`9yuyGO^VDyu8Z%uQuvrQ3VD;9`G; z?V<AU^kj2)H%ju+=~=$f@f5!xG<wyuve7a$=GSy^2(6r0zPobw*+o6iBT8hPXN{^; z!C5{3gwo-Lp|!Hk#ewNHK`A^$RmXL`waCeMubjEnTib~JOA0MG|EM;DP2PE*n)@l7 zFF)4%xHjXMmM5R4JGZv?SNLca^H_N!Ykg$abT?W+NJwHzc3DMZXV=Kg%%`p##r#p; zqkwi`dMPPMb#>MK`}YIw1MEdbg@Gpl_B0wr;LIsWN%6YIIsiP_{QCOhwQC^nLDg?O zd<36hfItA`J;3wJS9hK~MNtY0p9xyTfWV)>ef#qL`zNnnJ$mut;j`yw1y1kWwexR( z|2tM&>&GAe7XThr><?eOeE!}0C$HZ?Xz&T@NNbNC-F)y6CHcEgpIy6i7g1Tz*MJw? zy}NSzHb^ff;Ai2qi%W|*xj4Vs@eKzDN6*l}*1YGi0IPG?u5I1eH8fb&(gsRCAtx{N zQVK+`4qkqi6jvjNUd=3IvBpPreA251Z$0@ow|0<2nkZossmG`jHHp@ANT}{xuI^nb zZ=ZYj{zF4cAJ-YpU84ANMgbbmmz=}OKFxl)UH(kz9+^9&PUljhe52sPr|o@A%Y#?b z<ERGxh`Q@x6)K-;gn)CN3@KF8$i&3X(cZ%wR6PV`g*El<1N{i*=TOYg4UCT=nBTK| z*REam_I8inzPGa{0oCu_i_m^(SO|b;aAK@`u)l9)7%BsQwso{Mx3#o)cP!n!)!x$? z5fKJj0)PkLzkc^#R#rOf)!fnq$lth+4z1h+pgwu??#k7xIoX+Uu~DsUEw>)r?;jd~ z9t7!JcJ0*EP`h^f=EBM{(EGDD-*xtML$hhA$?Lc8+<*QO#r*8Td{2LGPk-Oqty|NJ z3viUOvXYI5k0-9oW@cvu1o*+0rq(8i#E}HwQxjofp=jsD!^2%xUfR}qd2nhHjoGJ{ zmr_!by?q$z8EJ5VQ%hF|CnxGVItps)QVNSBQqz5-;y@GHxOpNZZ)uH?ygE^VlsK|* z^X~IE+N20BX&WrPN|Rc848!nCyWBXuUfMoCFmnx!ScAed+4&S$q^v|tgH;?75{tUF z5&M@E6|+DVdFQVbTsX15yc%vt)ZGrL(hjLocodyZ;Jj6RD-1$vFy<s2!ODu_2HkXM zQfhi}SyfBx0CTJ!)iwZQUM9~2=B;gr%XjX!w6}2`-~y_LEvBaU^@opzgf6hNv$3+W z>g(%)JU(;g6dN1s;UkBGf&<|bmM?d<-+uO@sJQUl*)x0gB0KKe$8KdsK;a7f3=3vv zc)%XG(yraR4jnp}o|b$NNe;MoFWBC_?EBcEeG?NSkoS7JI{WtTgKE(AoI1r{R9tZH z*>h;Y!`+RIjSc=~Z%;aRP5`u)j*fOnND#Ef!omVqz)bMBU*AA)?fwG=1vwUGf%aKh zS@tvEF$UL|zka=cd{j<OW-s$R;s4^|Vx7G`J!4~_{j$m`Km-QGC!s@>mM*TwBnPaS zg(A+7Ro#sqTXW;#%l5J5g9`S7IA1;QIysy4i}>Jz)+@FBD`g!EH|~9YxkXLenT=nK zQ=KYh8KLcx+&{Iljo3e560#b0N!fj^K;C5#a#)pmNQJ_qOy*H?KA_-mNZHvwwkf`P z#yv7q(-5fM$<Bk}9T^*!nUh;l4Z$orZV57AE6)Sw7dLJI2cABC5+DcktE#2~n1@x6 zIw0vaHPx>_dU}Sr3Q#}5eE=2!PW{6}b@jFADpMxyL7j-=;tW^?!nd}zg4WQDUw`ur zL=dH=MF4fEh@cV>yJt5j99>-<Q&W>2J2z{>Mre?mhr6Sv>+$QiA)&#r5gI>mfD0<X z`4}1*n46h?^|!wvmC>>X5osxD$x>1h-+c4UUS{KPAoS?z>8Zf#!oq^E@8)^#LE4d! z6mJ_C%CD+P&Myj2PV<S3r3QqMP$h4PDEX7>)_i(FWvvrWUcd8<DPxzgRCme;F)wHs zYVMt1Ke$@awb(qoy7euTD+{Yf1aZ5C4Ec1t6l`P68QBk>zSsuspDeFlzdof#`AXW3 z%RGjMN$)%gjtAuJx#URuq^*r;DG_B8gs5h9GkaYEGJpfm5g05fuduYfv1@o3sQ$B_ z|A={@!MXKy5n-WS%mNyI<=%Y(fzzl9U@)41d2|)vghCr=c64U?;zcnO@XAUG-rim; ztSqoQ1j3-eOG=6Wc&H-?oH>or(jZz{KoA-p76Mcco1rynDT%DibOytdSrZ(A#m2@; zR74oT_RgL9db-cweFzN=hE?Wq_n?Ty<4r)cD=5hOgRlt=J}Ynrag$v;;k0WTYAr1Z z!0@mI7~b839vl(`*GnSVpyw$sFAG`-{sk4lDf~l2vMZ`$GqVF?6Y0U>jtoEO9F1)3 zf#el%#$PMD;@vZ5uRVn5RRrgLQiBdkUd1twUz?FoI@B<@TGqL6b>ru`<~_U<*?ARM zWvqqq!SdG8UUAjip#Ag3!98iWu-Q5L6n=d_P8pj6a-;)tb^v=YK20}idI7(4o*=;& zgQ|K@A5irmRx?Y>YuYXkOpZ??+Gg}9?~%`#zi|UFFD54P5%Utif_d0W8;j{58|m!p zLAe(sO?z+8)9=0qv_l=}3tHP-wX`%~UsyeV?ks5gXYam;I2WY;#{Gv-4IM1f#p%H1 z=;`8M8EOK0-+cHXI5-d`d{8)dpS?ga&y?u9WM!o58|w$gMxa-^`RwKTqsL3P?zp<q zm|VZ><jE7O_wMHv<gv1`!a8`?z{D7+GB6PlA*65O*x%CG4t}JptcaeQ5#A)Nq#WcQ zBRty0H_+D2%go6Ilsv`=l)McW#xJG3_u<R8356YSpOx)0^}Xu^4MOECBOrRM>RGDm zUwQN0&tHsu_Tt+!7mZj1v=3t3r7gqdZK9idXSPB6C(D(q*N#ft$T}t)2bLdJLP#EA zdnM<CDlQ;(4(JE&(V+hg<1Madqib#fRPXE?=o1weo0*eWRo&Rxg~WQ3<6HGSV19Oe zT}bc(ig_6sDbU8BFc0WmfAj=dWoii!_TV7Px9`L!#Gyh3XcJ_Ni;FWa-%(U%L`6lS zy8|tQIJx)Y+vOWKR&Q=xyM23pWf?s*w2Z}KzJ32eSV$0BhE;Vn6>yELtc*`YJ-sjw zQ7x)pKpKtA%q(0-WZ2N;1c_t|z0AQw2a#XDGK1FR$B)gdEN8&(A2Hw2+dZ|k1VZk} z(Iap&931-z1aq>Bv$*(0)J=2^^kTE~A#{b_(9Xjf?iE7vcr!U2lieybGmnh1r8`$| zK9Mx^KBh|3^Qu*K%0G_ra*uCBhbU*(?tk(7)|8A2){|;1GBzi484`pr6ZedJkDhJg z_D`1R)LMs#n!>KDnxtqhc?W<xM6!p~U6C;y4LbWd+}?A#EGkqLLo;JrJ0i`U;vWp% zcVbR{L3J%CoWTiZ4EssX-@G+>b@9ZB<0up{7%iaCt(d>|@X4oDAjzvYZvpIm{d`ac z2H@{Ng#V6R%!m@&bf>$bz-D7-D=jNtzjJr-+B(?E#>VW@B6?^P^8j*CKD(JM!|LNV z?{jmrpr?Qe0MX&8iEt+7p-pf}U~F($IJAa_8gjgyJD?gj_W{I55X|!d<}*KHp64J> zXK(lH%1T31128;9*`Lo72@sHuMoR}9l9Wad3Ul!CwW7M=?VNA~qPm^|m#Wiwqu}b! znMdEgy;R(}U(!n1J{yG1X{?_DF|wlTYIW}tXq$KMe{n7sZ9OXM8O(lF%4t0xF|!cY z$g*wF{>k$0!w<<dllF<1mE3X<skt6jqXF7^HQo8NJovHR2gC_1XE3`3jZUf(LDfT0 zX6Mdu4~c+IJf*O>s`YYvU*A^DuiU*?Ra=b?(IZ=|t=1kq{EYc6+6I+6i#Kn6!aM+p zot+(8L+$9%BS(%Nj))BBJBA1tKx0;R=IZU+3u~*$OZXc%Cg-kv#=JJ>@plO3(UxKL z!K?4Gb3S7pd=un1ig__H(Yb5ark0m;a<f@k5mW=(V`8II(~_Y+w1Dz_W_AYK7M_PK z4K0lvoE&gz&?7KU53$3{UwrGE7<)&>lYIk`@ku8a10qS=1TTWI_@ByjN?0*4{PhPf zl+6SA)!jhVDA;BkRCA*zG&Z6nzy9D?9#Bbn3oF0s9%&+v7G2mlP?8Yd+CRIE+do-S zYR8V+B=09CbLs{1Y0v@f$1q+ev>7LHetfDlmUBA0gpKx!nJVhy4T!b`XR4!@uUB|< zWNJokWp#OD6Fhws^R~9u_usrXFw}?Ty~yMVEE$>Uz~lh(-J3kW1@k)*+6T<ftggf- z#v|kTdv=4WZ|!J<o?>Kr3Vq9Kc6s^KN&a0(^Sdl9E$+R1y?E`~)$7;iS6AS_$Bu34 zc`R1@<@*of;$n!j*|kenR{Fu~?-DO1pkEU}m<%!of>0FmqN2j!Xb^5Ylbuj41YyP9 z*WYF5B6HNhmQ8JwnYQT>Jlx%b1GB5ELsOIBWl#acQb~1nRcl*jN!g{`f(T@+CI)&y zAOs6C6;zFmxrMT>(GF#*sBvKP;NpW9Z<0znK{~2B=IMA;AJy<wvW+chnFW#ud->+O zUwJy;e*235qS4-SI{Q^zj$=Im_7+~5+o1iEWqR$--;E=`Qgqp=MCHXWPGG%HV0}*N z_@2V~ozV;AylA#d*m%c9OFmf>Z4*-?8#^ME?&25Z8y%OJTaZ;+euRm6Kodw@hPM|x zI~xEVa0u!epaoFbqHTV{^C;#4;YbY2eE?}>&u)+<pqv46b&Yi$y<O08fc${gfL%E` zIRk?N+Pgby>#DD;EML8`apv?XWL%v&o-;Z#ZDC;!&4SqA;o%vcoUpVcppXW+4#y1$ z@cS9&9i8maxC4CT&dWFM9(44*A;3JGLt#lF@Hre8L>Haz+BZ5pfBhQliylHlQ@x<3 zE~B&}C^nwv<3|sPfEeG30;<MQ*MbP4>sd`AiwZp;xgHpP{>CF|V;@lR20rx?W^ue) zo_^`=$Z$=^!pPM-zxn2*iG?@&F{QmSHU~6Z&*=HcTZUh|^<W#ff3j%1q_CVb+%06X zPs-*L&iAygKiC<)fOCc+e9A6+&*NC7i0sm~=TuB^<^)rFCtEi!T3{$BocQcq=-iR< zI+nfr_OYYGfB;dzaDG7^Xkb9^R?Kh4uv;)cb_LPZa&j`??Dz(S{ZZZ{Cr_S0zhi<B zxTL%UT1LNP0&fB;77-Bwz%Sjp10f`cY8K}3BlsH4l?vJ$9fUu8<Paw(2eX6N>Er9Y z{_v4sfbSN}uYogyaN5m`j-gEu`5?*AAQvz{xv<bTI)dg<L&EDldrqC=hsy!KKm%|% z83=n%@SlLaPM$hR4Ggt*^#JXsPqYPQP|z{@Mv0>B5IZ=x@#xiep0O1i(l)9NIokB9 z!)oqWYFbJA{6~`i?Vm*D6x6dFQ`#wFvQL$AM2jw<?;Dz0zYW?yS;nuda!XnNUBYIE zgbjzR1E3v!yBIKk-YAqyoUmKOgj>mpThU2K!vbfH2z!z{!y_yzC@ztQhZ}vhY!~vy z4Kk)7DJfo9T!_Ao2%i98!1LE%f32pjdj0WJWc8cPRZu?Y_qlx|L&_>j`}XaFMs_d@ z)RB>qn!k1pJR%^_52%|-$veOP=4%ibfXORsYZ+M?;Bh;ddIsu;ga%(qLUN)Zjlkc| zog5q-`uciPR~JFR1M<;k1qIJTn-lYMQ&+DlDJh_R0c?lx0Ztwkkip6EG3W>&9sys5 z|HJviW%iGc4ouJBP4TE_?L-P3We12KBGnC4A0p%N=E}N;`&FFzbbKq?ChtFgol`q> zMA;Gf;+0W*3hOIu7?4mpTtBc{+OasaaQpW>oev*AoRu<VJ*~;A;J~FuJ+AF3XAySo z_BPf3*<$V(^|iS50X6y_8GCLeilA}$d84ok#^FMy(P#DiSp^L^<?K#qxgFJT6U7kq zEUX|>bMW$mc-DwuuC1$MU~B|n@bGl6ZK#>Qabs$E^Zj`MC&Sy*!jb^W2Lv{_0O%iN z)dE=s2{HEZ70vk@8)fCC4i0u!L`w*VA+W7)svny}-suI1wsmz-X%zUEm6b(saL_38 z<#r(6mi9IWM>{JkONb^Knj4_wXz%F)p>AnqK_b~!*H$fDzcDs<WpZ($vZfOD1^p8k z8~}m}J?P}(BAl~@1;NqLzO%pA&))|bgd{r+PK+}LcGl7}(g67&lYrsd2M24~E?2Z( zj!8?`H8jxB)>hTjP{(3TtZfKns-caYwwZ;Rp1~n?=YLb6xki)#;HOsais^eDQgnvU zRn9u~pgP?px~^ept*ZNKZU6GC?|$#gV`fA@c0PqYQdS%)6drZgV_2{F+?H+7{>id_ z{q`|c3YQxFm^Nddypw=lpb$P%$Rtt-A0=uYe^k+FuPB~ZopxT2aT@C(gCiPPS%Z9a zWcV>6W1}*%(@QH#n_Jt5M}}u-$L4<Wwk_ZsI1?a^{E%dJW_E3DapT6~#s<WdqqEan zYeEI62`ELs&)xd^E}#@?glSt}Xh*lByP)+E_d#od>oKuC`q{1ExHo23SGK+b3<vtS z`vSBKCk9(S{a^&N8Ttx+-{e$vTU$YOU3yVzLQY;-N`_BNf?IHeqfa1_=59iA)G@cz zG%!4;W%DocG&N#a<KXhGr?2b-@;GH|)f{uN^vc8P?&^+-IgK;5{VS#I^HZz$e*e>n zNhoCFQ`{|z-=|DIpyI--<!<VlybanvU$To@_bHG?@S!Jkytx!zL`<Vm6Elw!HV$VK zHsX-AJ%{s>zy}KI`^X!RjBQCw*!z2gMusFNql5Ofm)nuKLH^-K;=lOQ>A5Sv`GdP( z`TgnW@2mN<3ct4|^R=SEsi~Hp-iqed{Oa1YqLR4GoY2H%&#)+DpoaPN6W-2A*TP!U zz(`!%{J$&EL=Aj0Ye(ld9>?alb1T@3n?xG=Hk{K9;Kwq;^7=s4lyxpPkF34_@CW92 zX>9Fh<5$})WWcKExL?_sPs{z3wtN5B{5EL+d@**6IgayHAjKb0p&i38#4Qp<&ErJO z<0XhGd`ixH1Pu<Ux=5M^Dq2TMoBC_wi6$g_D=OXDCjf<ga(+>6Wi{|&_wewq|B~)^ zy2IEnPB?Jo=k3h>Mr*&~VxwPb3jI;oz~n?*UtdL2b6!<VYEemSW==?AigyGuGPyZW zWACJAWdm`m4A$gdm0b^O&?ECYN3P!OpS^KTht8)$0aYVyd5J@jY{JN|>Otbx^3KJL zhkxvav8yZBxz1|uxuD0Q;J~ivbV!}Xqe}IQDcVNtpD#CV-4oLFR<ci&wu;&>?{rQ- zNZcwJkS}4G1fQH@W-J%Y1aO|Jq$o|tIC(Q4ExZ-;`&qh^H^TOzNvVms1vwQ}mCY@e z`}&!a+Fkhq$bUK;(%A3*h4tuPXJc1>+edoG$C|pkE1Fw!E2>ipiefUdgA<Y&5i!(& z5PJ`A0K6#zcv~#q4C2;3D%4#HF4XX{p4po>AAf7j$l#K-)25ZGI^}UFkR=G=acJn; zzA%i0u7B*8-AgMsxCAhO`Q7sNtnv;ZZ@5(`T7-aY#Qr6vs&PQfD8Ssm^rCqfu+w>i zP$}zFam!?Bo3x8&vHL+%N?D2+`s%nO8qm|V?83Cn?2K$k(8UAqK}Z`IcPTbAJH5E9 zu%@oQqXS@rC<KIHf6Aht=gIl`<RN3f8>OGL%^#bQ<JV`ZVSe^cj9>2WhlsVft|6<e z0vtIaB|RWE5&W9s7X*^V!i8pH=ZL&TW{g+H8FH&S?o)9Ez;{k>EZu%eizqpy<fQ75 zqe&^|R;G!V1pB77*A1)y!?#ad`~LT3jrob}%&%iRuDnawc$d7xUU_>?C8q;Q<TDyB zuV1~{M(kfwLJ}%4&dIi6)fbEcITW1535n9y=~Bd0S-Wg8<1iK>Lryt6Ig1eMfPA8V z5sn<EWoBbQw6P#l9lU(q0>k`b;v-WtlJW|H9;;egFZcB!gg^Haod42PIe&<ye=!{R zJp$%S_0eX#M@AYuyFkx?fJY-%=)ZiU;^-j}WS;<%I|CftnB)K&9wdzt&S1Y9<ts&3 zdQ^GG)W*u)XMRa_2juN88inb3)gDxFJ&E&iiLS5gzFO0_+&;bW;>~~Lhkkl^4|ZPp zUE+k@a&~)U?Kl-3S!76u6z!jEzf}Jhl@~9*)pLyTy3}G7Sau5M1M#esRf?=_hO|w( zyhE-4#$%75{vj0?eOi)ROeH0<5>HRU5bX3Vi56s<ojZf-9}J2&^ioPfb}o=)Nqu8O zXD2B9q3Nk#LF3pTnM`Kv&yo2rg~J1r2)#o*0`#6;UYT4_1br4<2wV;=FckEgjhiQd zOf?}n;0Q!5BNIiOA-g*Df5^G$ljAQ>TnC1a$ZkEP>~tQ>(Dkh5Rd+wA?q=a%QqVFB zswSsyA}F?G<K~?|`Z$u(D%kl|_X=q5khW~SV{~Luw=P^2+qP}nPCB-2JL%Zyq+{E* zZKq?qJGPCR_nhzCalbLn{!z7R)vmqQT%+b3d+K@C$W1zBQ-&xkzC56#2W1{-a(Xwz z&J5JN!_4KV##3oV;P~$ltlI9%+9uY_CKFf=q|&2eUnrCG9-*+aSMFMFC*l8C-pE=t z+)Vasa~-4FO=VHFxH2|45RCy;kA#jPL$ua5*EhFfq_THVW&eDP7C+C}e4e(=mU?mZ z6Y_?K;e6+EyN@)M0Ns>_`;x`xedOO+J>fo4O!JFVqf0U72JkRSYiyiJlbFZy&d*9b ztx@Kd=iB$C4Fy&kh~MQZ^41-=uN)CrVNT_WZk8t}?Ee7IBk^v%t1z>aZ}R1nW%0V3 zCbTxZmxAvX={1daXYJrkNJcJm@3?(@sE@ow5j(nUf)$oe_v4S1!~SNGlU0rP6ETdq zOC+UuiuusT5L7$|ptlDX&PjN8zsJ@Zr8OfmpBD-y5+~mZZ#%L?Z=2;J10Y!?S^5U! z_Y@Ejo__xTWV=we0-$=!S_sgbNBaOg{tq*Tf{&fSYX*3gS)I2(-Cx*h>3A5xEW-t$ z-jR!+vTkXhH2yU>4<slH7X=<Er;D<Q(QIwb4Udf=U=f&@k_5A_RUbaqHkk4sJujX{ z=A6WAzV0>h+?!a}yl%U(q@+Gh@=|Y5h8;dGKh_crlz5z+OmfE1`+Aq;WPxbEAxzd0 zCLi?#BrepEu0nu?nD3);uo)2GrKjoW^scS(Ybz}XKglO4H-m>+@a8wD3_a6ISJoC+ z^#Ge>wSCErfN7Yj^>0o;)&KT(JWF71K&FUcB9?QJT_$>9^u5sXqEoFNQSt0gtd+Rv zvRNcHC9%N6$vIGW@+IoC%Rx`~XKYwzA`|sA7PYPa^i%;A#X@H8haP9f)*?c8o}m3^ zUbJRx4GuXbBqU5v-wxT4q>BAqIA1^`&-KOzyC5Z<{?7QiCM|Lb-3QL{hE!5@OAfU` zgz~(Btull2ZEuhFWrVOd&a$y3%`v}ukgs&ZFuXEE-jELl-?iFwTFe-qj4o{o%}Z&K zHH>pd{S|#@<*D>espzOpabu|3_IkCdIy}61b4rbJyCL8^as#}-A;|PoZZM0tsGQ<~ zq=6pQpO><)J*%z0psmJI*JmxSQ%8Nt%-=eoV3hfi11D4HjvRA(q&JX0ERy=#oUO&o z<ZkNVn7w<0?wo!okcacdani_#Ixm;=&A0p<o*w=I@~(o*OSw$LdpOXj{p&)P{nz!Y zm=2K-QlHSXy|B?X)NNwHLSuk<V|tds+WhkB(iS@#7oe$^yT!%q9&>T*P;RA{JK+xY z%`b$<g<-9He7j!Bktc#h$O(OUIoMT!o-R-meF(0t)Wft&N3kv107>ME%_E5Q82hLB zkabCkji-L*Jtz4(HQnIEHxP@fr{|u#`*dDg{*x9yA-_a+gwnGBxCQcp;z}FDJv|H2 z%eS)iaB+FPT?U6FDf2qFHC0>$-th!D68Uokyh5WdqgK#SE<NhbzP%Q_^b!x*N8%U~ z=o(&e<I{%6ArktnjvNV=$N4rMGdByAUqGE7WXD>0m&Zj1T*oVnz6Y}uZ5fvw-DB;o z+4Y%sFK{}ajY63cqVtpDyU2ERv&NA{{s9bt5xt0wh)7{2#%CvKq$w#wOtk-!ezWuS z6>3Kku{ZnNArSvkUJjly(4L+y2o|>_9b8D9{RVc23n)};ZRvM{t*{YxDn`Q?wKxg_ zz)H(K#zeUw#uPn6275y~Gz%SXy^wa}Qt$mju#Wr6<hy8U3HQ|$*S+bxUk@J3YC+9V z6=2T@^w{#!1$1^Lf)NjJSJdp^`ux~&wi+E6hzaktVEbHu&?(1G_AJ=(!gl%7xZd}Y zx7QE58e3VHqiVrl9lDA=r4O8{kESTcM&GD=FHP~d+I|yD4f|$mN!UQM{nwU66|Sm_ zj(?|`1*<kbTp^!#f$t8Dn>*}<!5*{a`{-@{A$&HLVIdaQk)NItiERi0wG(v;K@xb5 zdQ5-<oCOI0mQ$D>35R5=D5<IHXlyU)y0mwfmzJ71)_K^%t$KQzzmkgkPh17~WxsN7 z4VEq<7V7l0ii-xvr>FOqQ}=O?xfwo`T-rxa>{1DxJBnSmnU>i9mXd~zQV3EJDf%e{ zbuWVy9LyB#vR&lS)!tFlLzU7dn%pBANMeFvx~Gw(A}G-9o=c$Oq&a?5#Q}QsU14BU zH_j`qBd%nE8?{jb*cG0aqbp=wdIv6j0HvF}tDAk9O7pfExwa^o!0Q7{!wi&7ZT0O$ zp9Up27F@q_sW=@3iM)$SHGcmr)rDAfp=rM^YpaNlUK$g>E|;g?)5t$Z;Xr1&mZ!h! zyt_w7`W!m_i1;O=2I`{5FA8NFBZXit&p?D8AQC8bLX6z^0%(aya?R5UAPhiS^fUT_ z&%_jx2#=3X&)@x>B@S5KzG0pf1Q=d}2;E}#EM;d0B7mgf6_^0Hr7+{<sQ!H8Dhbz! zX6BVR)7pG<ga)pny4;$ux(uky?}6>I^}$;K#}tdQ!@s*P+7qLp$AL}hvFXJr@%2$P zdrtI6Up2cU#d2z-;+d~#MQRA}^qXX!#dlPz(KukZFUz?r@BnGh0FTIWM*B>Y9`P0? z_lG|m&Yvo&fpgrlt&2ss$c8`3-@xqIHT2y))JA>aWuW`?qt%@VCRgc=opl%3sj6>Y zgE3KQpS&#`C{kT<*5{KPAu{r=>FuuFV=+u=#vp_Q);N(|1xWdLh&Y(psM%@gQTq@x zAVH*&R*8NBijl6>+1P_&7h606-tA8>gW^zqfK9UpJ$^!NxS9qAHVzIh9v&tZ8lsYv zRODp_bDPQ&L)=ZE)n-Xar5G3n6Z8?2Q^IDy{B1!(umapG{I4mSP{&l+)Q8h`jO65& z9^i{zr?{8O{WXf94fz-G{?dI!TX)vFD%4bJg*#1d$Yw~Kd8*#ocm)>4x5l_d)-c+p zivxF<y*|DRJ>Lt-di6dXXzw&uF1lG!Wx_!kz1h-1)7}oUOz9GtYm!eHhBKO+jr0UD zkWJ*d6VfGLckyoNeMT2!)_csfEyFU9m82yJ1Hj&`jE|rKiQ{EjU7p-;5dh-A)!;E? zM<`3M!{5_WVG$_Th(h3>kW28^WVIC#Xr4xH_(bh(HTi8l^&wVc&U&D(NH0sx0oR*R z)&TBnFKs1gSHj4EyK~)$>_L@z0a1Rod3Gwa0#!$Tl&wjamtn}^#qG7(5rh~u_Ir4A zM^F79w_mER8?|!iqmUKcBX8uKK>;)6xG5uVgi7vnuGI5Hn2r;?S1PXY<Pxk2>oQ94 zI|4f#?iYkdm1_)N(v!iG3Ze{Z;ztidNpL3AR^OiN6RAlW5Jzsxrd^E&NhOwwOIVe* z><m49p~LTs8_93-*xULbB5{Y@t*e{m4<N4A!V2DcZ4nshXs646&d<?*g@Jd8jdOyE ze)J8TkrfB_j>rJe14tnsg9{}BGz=za!M+RA{8=<ZV*n1<XmQ6#q=O7~q(hUlP`h|I z_i)i5!r<WnpTqr=h(WM68i-sx$L=Nx!N55TNh0gaG;18J@m;8{4EfHavU3_&KRZMv zA0+C!vKosT6Q8B1`2NaMXM&^XVhtGFkb&kq<1V%=6~J}!mPREqd@q;x!(;8#R=wV* zOIXv@Jgg3B!v?C1<kW`@Lq_<8isN6Uf(flskUg7u8lfY=UZVlqQCxHV<G4Mc9!NPW zlYgg`Cw;x?Y!*(wv1wtMNuH7AFZR~O)&}sgbe;sDpBN6lCT82pw=>KpNY9jnz<`7S zJ_W~IVj#y?me<!NOEdr+7h2Dy;{wt_f1)F00+#Yv?s4?x;(~oZvc9`m0_vac1E0}i zje&|D&cJ*#%NQiy<wcRnF)%fjckS5kbZR}-oG1HtAlAEbRYKCVIfi}<TG|0+>4~RE zETyrd=tA31b1CM>pS>WbxJVBpQ(-|sPlZJ!w-Y1}??Ow?i+|l>6WlQR{XWNZLgAV4 zHsSKUVX9jkTl9i^<$PX>)k5WB)~y$MwtLjynd3Id5;i(pKJD%|I8}SM8D&?$-|044 z3H611%Zfi{6gL*}f^-$<khnqag3DS8+5!(L+gZ=~eekgTc0mBzNZSGWD}9Oq%+CXY zIJS_Sfjr;UQ4I<JSG>NX*<iq*!1??iP~-tiH)N3C0d{^`T(PEq(}AnRGRd>`JXO*! znF$*NUbo+#kN%HWFJA*^$GQE9t;0QXdS$XQ51<`HnFn|#o4gk#fg2}xrshW6#QnPT zedhA%cJV{!1}V+g@6`GA?c=P+ffnwoeU2=fl7Jq;=wBbwaG*{6M1=peflZ7dcV?)p zCaDw^NQ3h<Z$hVX)msj@dW>h95n~ti*D@*`#pA{?$xg2sLT`lq4r0y$nkd)M&#kUb z;0K6#&1T*}uuQ;rAdxA6%@yXDvSD~|<4>?~0c)Q@p1?Wy8_1vO?6JJ1{(PaDGp+Sq zDD_=4EE80u3z?1)5lvnMx0PBn%TCkmw(lANwCvwDbM)UdAK@6IsaM`3YS5?NLEaiI zlk;~K=zXu3DJ1xFw~Nlw#guFZ?8L}6UPmN=&+P?`4Y)6Z{9kemCCRRsY%?Rt2s+?N zk?CPhH0^kbMG^l@0C#<ln!j<>Ms@wvXwJl47`S!#oqep%z=m2(CZDXmQyWf8gIIX0 zeearV-w)u=P3uZzxJ2E2NoSY7?e|yT2$A3~yNerl`2d}3kXf=rc?K{Gnt-(+ITM@) zIib2Af&Rky$8>o;(1|nf5o8hgfFn&-SWETESxvneMd1}yG}-+XS5u{u_f9;o)+GAM zB=+bgcqYCSG__?ujt*B|wnlM`D}T*&AK+1`Xl+7^6knc@xrSqpAzqcj49v_KXlwO} z9>ayEEuL=ogX7!B$QKFtUel;T4(8%LCY!<$+aE;}+yH>W$!o&|H-V~qa-;Mp$oW<! z@P+1ajdID7&e$9>8P7?+p4)A3!RBc+<uXM-Cz_T3ZtB=IrkwR|LcI|og|%SpRGY>S z9^EO-`gJ6)30TQfa>(TZ8&E^Q6;#~ZlN=*3hru5F8+MoIn(-Q~ti;3|*x47mK0+}Y zEW|70O)=jQ0=xbQ$zH~rT;`wZ^sb%Nrm|?88gZg8O;QW97J_H6CBWU{Gs;=7JwNo6 zSAZIUO)nE!BFkL%)Xnnfvkr$IF7|ePS`<~!Q<4J&?syz5Z+L8ovV9#)gd-ASXgH|p z=g{=<A$mGSl_|N%@I>D2DHdE;v`=mn*>q~0c`gHPlkuJZSg~@ERdW?Zl5`M=T~!5* z%;ksb#DGrT41eTA{%9daKkOMU`Nbz{O|omS9zvY25I?jgeTKB}dJV%f1`DY09yx^= z!GRb7nEwzBXo|$RjkHzbWKvq<R9a?NUmTwN3x(-sJ7a?H)m)}a^9K(mwrakmbBN9z z7W<07cGMZ8RdO~?c4YmVjkd9>v)-Yn&g<9M7}9{(b;TUnlriv>2BF>O^ztpPA6r3O zxl7GZOLb33i&Erj?5Zui_2i@1+w1MIvnGD{_OU(TG4wgXXmm_lEnHTTIp+;M>vu)x zq$XM5B5?oAbFnNjnoN!aftIWO+s`JFRBnL-uIa9G%n9_U&k3L=Ii>GvlpST{i~d+} zmmcAwEa%l%ZitC*hU>Ka%UE>froQsg6q^Z1ahDjqNaDC43yVtQxG1AIh=4eV<N4k) zXQ5D#QM1H)6Vz5?bT(t|%0X}K=xl?SY=i6vy2my;rBN0pp6(tu$NUPL6cl|Dn^S+W z;g`0KOg3WRk$$ylW`E`7ABrQW`ik#|`hi}sAvjOAD0fA6*0ZN?^kyTSZU@Z1zLBj_ z`a!8)17|M+N8>{sG7Hq7s*}M#miO|aDw~GM<9Gl+OVnr+S-_h%pCWbYB2W=36VT~J zw+be1&%!eyFz#?hd=hY`x?L8}O^WFThOdQ<DFQLmDU6R9O?N#Yj_C8wd_n$ssSaM$ z;l${yg^K<$%58$L<O)<<bghje-p%?b;(2p2a~BYAL8Z|>ax$JEE0A^7HqyDee<;a6 z$h)WbrMKwhHbE?2+NkMZ_C`nc5ia^iNs(pVebd_B*xbnb-uCy<;rpSM&`+}!W2X%t z->=)|x0#l?9=(=<&yS}a1Fg;twl0V<$auoIHM2_{G6!4_xacViH_c=QBMit1AXDv` z8f=L;bqW)LV2#LLT%BoR)4q~x9NS~_y$qs^%~TCOjV*x!^_9!-Y7$R#<8x!Oo4_FN zZA#|La8>FonAo?2^R*(oCn*#U{iaoF>kipf)8BQ>oe|8XWjDystjy@V@Sq*^>=ZP` z6$Nfx`ToT1T{8ki3h_v;%99Uy1^=AbHLxee@SkY7u(ga&mDoMhv48HmJBftU=%m=_ zn#e49imw=h2R(A<J=IrwY?Dx*v;uqedK9<2zdk1(AlZE<1KtNx){X~&M`b+p&J)=D zg7)Y*r3I7BKNP4%f-supW-2kDF#2kxspBarV9#C&l^pluNG?I&CpVxCb%6!u_h`<V zks3Z~LNX?f-Y%9Bu$qq6CYI|Zp+$?36id&Ps-Y#z;TCKXM@>Tqt)pnJ5JenZ5SXVt zqte)Kx1hze-1sVm&?k@++T??K_gKwQ?pl=0a0p&31^rAp^XhX*iqGE;`|=OqVr3dW zhUN2K=*En`Z|jF<>!d@>>HG8Z@L6Dc=69+?jeks6LMZbxa#}+FWnMjgw7HuLZ5$F! z56toNmcxqZwt0&N`2L+D_J(>%r9w&+e+ZT@oA=y5?+1UBMMfN3!&i3a$rt92E{?`# zGQIYL2@yh{G*mODN{dp9Kq1zIxMDOv_5~h6%OOA=#kUF;9sB4`Ux55kSiVIG(~i2U znscvIcALOthl6Y%W;pAsKKyPPs5;5A#neVmSb+6av3ldnqKER_KS-Zz4*!+`r*xfR z<KQU?vv&KBp8qO`JN(BM5|f3@gQt*0=?_Qlq0%Gx_va=)#EQ<lvX1P<M-0};l-BR8 zZ}4+D@E!M=<CwV<ptv5zVb!3XS_E~2VqTB=g9O~|BlI35<Q^pSK4=$JefZblu7RTP z$ZbN<kBkhH8jNDJBc<wp`qg{NbQXzz`kYWh=&o#h*7iPF3QmoRpig)!ZDC_UpZgPY zMYr**1%ayt{Y|Hl>pjIc#hnWJJulDR`|fKD{z-RVE58Ll+itGKNB7+Ve8n+Yc3?G? zHgs*rk@)k+7fThViIrBlA`~{Fb&^Ml#rVrkM}8YYorX)LjRu1#c~CDqSNAa&0%plx zx21m9S#7)3!T5#{unGlyiUccHn5KTydio19rYQ{G1)zkA22NF<dgDzZvBD3>B}N7w zAXx^S6|4OYH}z;AUg8#4O(eC!lD7M9Yn$XG{D3sq|If~*P6S)jLe$y<;#rDsNr-yw z0obSGHW{(>*pJfPw)m=ng~ha*lCap2Iejb$<qL>qvu(-cn>01En0Ry70IWNkx2*Xa z!O#bBIY<FtPj)^pcK&{qwqz^Fq;lN}w2-OhZm^hOG&_@rtwwwj>!6082{a>sBj!+F zf}01hTpxUEn6L!n-S{6c<x*6anQa~QKh8G}a>*Wu$Mm{Ciu~PC-^njvkys1<LaMS| z3m%^?p57c62rCLVtwL<<I;%(oGSKTyo+Q9ftLI=*+aMGFfJ)>*OePMQ)Dovz8?ofl z4}$T$0@>J=X?-pUsJa{*2u%CiKh{OiTSTEP20q7&c!tGjflLvGh|&)!t1V$jJ$~uL z-tIOSy>ewo`*?BdU()sUIXDu0R2bd5A+a*ai=kXOBI@D|Jo>as1)bjrZtN_8eMOlJ zo39vUv8KGuw8-NHfR<8OrcXOa&^5Y~6g#t3TeGp0(lQkfla{O#=K&OCm+N4XS?;-` z^Y1E@J(I>ZCm1%Zn+CNnE$)3x>Ary+8&P0*F>CF<f*d;EzsA29ce-?XUfSF|xx2l= zn6B`Y%no(R&NA}<#t}K7(pw=jd_$(2qfpr%`_GLTXzm<k#U5eXc?7VfcJYVPMRF@D ztdbjd=wQM1$}YZVMyGTZ+i|1e{jOMGIA&ogqOz7UQu^qdZ|40Dw&l>8biVJXZTI~A z+C7;(h<W=5yOQZ`acBK%;=I9UER<_{!aGWfB4T<2Opxd?euYEUQy|8Ff?dDA!&<q+ z;<N${t4J5j!IOobLXa;TW3Y2L#v;t<>Tr76Yf;}}_|kd{CKN{Y{f#pOp80smZ+z9d z<GjhRVb*DzdeNZQGWo4E|Mh_}^$ULRb=4il%-+<+)!EF*?mr_3V`~^@CL$)H{|s0; zIhk1g@9a0B@_)_p@ewh~nb}*oS`smHe9tKpF{*kwnh`N-Di~XvnYa=$s<;`u{@0AO zosq@&7aCTk-zT%N5HU)bSy@=R!Z34uk3}799h_AhjZD7J5HtJcv@?@*Hu5536tQx3 zQ8aTFb+B`Eus5@JCE_Atl(4dOHFN%cw>5G#6Eib$Fg1e_5P<pr*6q1|(a#J7000Ad z4+?fLf_dM=?_<{nCkX)jU*!AGs{bP&3-|vOj_v;u?!ToG_jHw1as9p>5u=IGH(4M% z(|;XA-?#mred7Foe)K;(!u8)fB2UCh#3=lqMce<^CH`ALWl53$RhIu;^#97@o1h?t zri3@z+ej`_1#U*6d@bJssR40ei2rNr!C|7M%BBRW;ZM&Res*^D_LMz2fk*^{@G^6U z=A1N77$hP-2qJQju^!1W6*gE<YTN}UYv*OedNAh|YwuU@&QBpdD!UBt0HD_(#`q^y zC$B*NDW_sM1eh>wwWc{)JeGU9t&{16Z|Htj=H9=DFFp7m6c7}(fRU=v6}+5B&%V94 z7AsT*o~r$MY~#eXA4Lhwe>H|I8U=D5ox*frZdh)945Oh@#*q+EQEO2iBGlWOvIXy_ zMs~8KBv4Z9^r={UmyJCl&H93KJ+xC~)&(4R^c62>&@IC0;(@C3*feYv@e-C>Z7K8e z>c*q}0&x?ZIM`Cp`vkB1>eBsu>(Jfy7v=5&C3GvMl1avvp2zGv8`O*W*xVISNpmrP zR#Ifoev|O-T0dKfmV6+ubVd2e>ZE~LC-%hT4uRzj*Yysq>x-!S3?Rb^Sabs;$nUWG z?3+fVO+yS#nO>_BP2tsC<>t`}(TB3A7ulv;*R2y4wQ?}A(zB>39|mh}qq>pfz4fkN zzv5mOw*M77D}4u3ws~4z$CfDlG`p3T?MXWL(>vQkxU0<CH~3HJ>}OF_kJ9hryA1xc zTdvt}K(RfTL8>8#pUm|p>vN{;J!&{7)e{!;tthT7aqlCxMq~9>Bs;UZ4D5&XNg0#F zf6?`axm+DBnT~A&h$H>Pki(z@K~(20P4nZEIyr(?2hQtLcu)k)i*_h-?N*KmqV_L5 zw}jbZLtBEO<UT3BQd;-PU#j12>Wi@|tT2X>N~#3kM4hsymfFzmiYXX-M7&6gmg32+ zKJzFiHOgywmNae4#hsn6DR1}<Qa4kne_3(PS7ny(pVGZb_^ioi5UkZq*K({l+hyyg zZ^G3~6K1IGNol<2RStf!^Y#jUK~C2JE&msV`;SumN8vb`IQ~a@{>Pf=5HU)Nii#My zn3)nW|3{&eiS+*G9G3qvME{S;VPW}i1^Ztt!v9Z;hJE!K^*HShc>RaHWVqEH?fv}s zt4TddSPt$@C7h>a_4jGVm9^*9E|aU~dI<q<j;P^8EeEcY?xiN3h(kYuh}xhS0=}GH zc2d9ELg2nywVr7WqFPhYE)Mn%4!-oa4L+~;=kzWK7-K*0Ji{Bx<LXG)uH238WLoyu zmv?Jr<*aC5EN=`91U9?>(<y(~ni|e|ZBgK&1Iw(}GdUh)7<I^N{qlN*@?OjF>|9=B zyZq+e{?p@SP_P#^_3CP`@CcO27ceVPU#?ELN9fyFV+Q$^<UHL|bT(#O`8FmJcE$TU z<~}gpKxIzDCqG6ZEe<Ev{xymR`STW=V%LAb|La4U@vFiAVSFKa__x`6CDwBq>~N{8 z*DIKIFd<2cnbPs?*0Y8{c%Ce#UAreFj-SU36Xf&Ecvm;yhuTGK{er(?#GS@(f6#~g zu-#f!71%ta=UYL6&JY>rYfeS2wt#tt8AI<cYFmtv*Yl+V+^?rpi!Z;Ig~me8SGmoM zWx`KL*UKZxXI6xs&ECc(7=QcVD>B4H6FcTk{^8pC(%LLLmy)RHf~Y1B<WF@&0htZ- zJq{apaXxZ!IO?PMWxDs9%5zSCPFS+KW`MyN$js;VCQMV^L6!t;YB{*WhI;f<cwa98 zh&g728*MTE$H$?OeSX_FmZiXrIYyb;WHZ<T__hK_e>i;`$EzbJY+SBmtnXj<G~ny3 zb9s1QcsaNxK*!n+iZ3?|n^aEI)g;Z~2tzsSq*`go6f6@o{4mzYPY{|6vsE=)13ekU za}-7pL2U<twHQ!QQ<%tjgh7KPSLc;K?whu@1dl&PeC=dS9Ny4xizvP|0|NfKtHYeZ zzn{W6`gZMuLe*nm{yg^T$UB7sdroUcK;eQ^8olC^T>1GrY%}z?`qBJBe{1zX<ENG1 zUD$1AH~pCzZKb{+=QPAc`><Kznj5^)U2&KRN6r)ox!v5znB1Ye0Pgu0$Nt<vXL)E! zzurb1#-s<Qj%K?K%v6kT27W%glS_M<`vqMUzGBi`)%+SvBl>;vUS1@<`4S}e5&rXf zK@HyZA-K60-PO|q75=l3W{<oZGeS2zARzkC*igp<fH6X~0~M&-QpW27sovK9cXd8f z0=sLc0k)#n{kY0b`{agaLXpIiS?>ojhs)?@(Xk1fAE;o3g4vYS9rdXSv>#qekyV_* znZYLy7g!JG!7RAW>YQ%-K)z?y?AOEIzs$F%uj`M@@Kg`NE5aOqpO^3}13%Bt>zl2~ zCqhPlpPRq%p9oOLxjVfb{+>^GUl;BGY;`h#5*F!wv2H>5hXbrW<Z;ex6YLxz$0*@; zz6#DPHKY*SO@g6hHS#t2XX8fJdIu0V&kH^6@9iMq&(8A3=zMue6XE}q0%+JdBFDeG z{m2~cZ2j&a`1z8@<9(OQfmv;`tDuOm1ye<>VYsK*`aV}}v4~5t0-*CEMA&oCPjrKe z1e}$!1t5nHWtPVPcpY0}r1|v8<x<PsZRS`bCpRT*b{R+$J)LNcNXqENnW9Ah`dO2L ztd7;xaa>YFn%*t<WlRy@Sfv0|T6*egyE4i1N*(1R^agZ+qj6)iD$fqMc3rvOlf$80 zWMd~-6u>`Q;b?9#=#D<*=o~J&>0lgcfUDr{Mvj~ryS{T!*9zyu4zUOL*_LXF3(3&6 z+Wtlkfhm+-!Av_p%X6cI0F%i>TU-iS*L6dE-RT9%v;pnm?kP&&x<4dY@V43n6rtR( z^7KxTsRc10Etd_}C3W<mj^S$y=!ba5MU7TYHF=fJqDFU3ecK2vmkLv$x(Pdm4w*g1 zZDQ8AQYYpxZ@hRW+Is$>Y#4@3OSy350hF~Draw9Wqez6m7?6y%l@VSCJg{&kOtyb% zE;9^x)A(!;X`*U+Tj?7lF!b+nw6SKFW*O1%yD3N;U>{aHQ9fGRlMZ$bJ3gYXTK7kr zHR?Whih=KY6x9CRTV%&8){T1x#rs%o`Vg1u3!=3@rO?o5HXQ0d>lxlNB0gw(#4D<S zfh8VA&Apm+L(@>8z|e!_;wgLrm9GonsQ5$&7#J3A9tlE0bHPaBx~=Td6Kip{Ba|>B z_L#_?n5N!4szPuL;w>s9j_~2xaMTSF-<a8hBEjAlX8R0xwg5NTPAeV-fMI)SyWMe? zz*tQ=lfSe;iA<Yi-*7~%9}~8Z{yFSnJ%wT31rJb@<?T(E)K+v9aB{p`qAyS;(3@*8 z*%X?J+~`{;gFw>$1RF|@6dKzXlfz$*@B)KVTj$GvX|c^2<QRVKG5rQ+PUwBw%z<cH z*@+b5_h$5tI~~#9*!%(M{p9*5ab3zVBKny5#OecX0Zk#Oop_D@I}i92#=u-j573pR zuH!Ig9aN#ZRQ!0tMi&D}51TuR=o#m9K<R`(CHww6f#bWuh0oq&FpBjfK&cB21a&No z@si$yILK63&|FK90W8zlmc42=j^bzg?mKBKVWXY$XC_X<d`gsD=VGp6OtB%jO;>^! z`WcqkHQh|o1qLv+aV)YU7LgD%CA-}V8|w50?;nMEZB<l)5P;#Ducytx!gFvRRTL=N zR`gRqcB*FfID_&UrQQuoqvE3wAUM;*#n_2}JofJ<2+;pAN8E0Li}}zB(g`3~IvD{T zx7uSAQgBYj`~wL$7{4&3LnvWzg#F3?4+K#K(}nv4k_(B80m9clJG`8h`Bc_9yq7!o z9&OK|+|Ba*0p;t^bACZ_FAZahjZxRclMuQ6=Zh0`fSs03+zEhdC9hLBTq$Ms@3^H? zY9j-#FCAckb3-04e~^F5tc8ZWE3|#u=P?%s`Z-Eg+`||U0Mtn;;T3^QLyn!X;@6(n zgl;?hV_VgyFKuz3Oxa~@*fGshUk}6c&~n;?B;W0*i}yKL#Z<Qi;_=oC1??!(KCNzP z&%|U5>(QW(Gm_aJBBYElXI0g~7!pX)wKVS*mEVLfZPhc&+eM8?>3+Ir+dxV^Jp$N0 z#Fs8NxXG)ZIC}IO$su41yYWyn;e|l3|E91F3`Eso6r^KzoQ3xa?@OVdm;vb-P}Jr* zq1;yQs1HvEj|z+TG?2d!U5W2!@!gt!71@ak)>P;{DY$GUoty;G!bvq$$c-zW<xA_N z_vGn@k=?c2YciO$l&|Up3Um%tX>pl=64B}j5ztXHt<QpZquv!ng%}81g%8<Q9KU4` z7jKXgfVF@rKtL|KN!FKxy=j(VnVg_t2GZ~0nV}}8?SXo`<LcnTWL9J67v81EJpKdW zJ=7eWUFG`@f~asU3`-N@;ee=va2bRT!JX|cwwoPk37Q@^1zOZr#k3}l0K^BUnk#JJ z{t+(oQZPHXK{5V|iHz){x&>p&P31-A5iiTym|exgx53IIPpw#k1S*bRi1zVdK?w~O z&1@3rHJeHKAm3=DZB7W>R$GG5aT|&WsZbLyYX4JQMMTNRD0<6+Qq@)TIzEd8ghia3 zNekYXH?Owma`tP$-Mn(8@4_>HB}1Mqn|DDPAkU>bv)BRu{4CE(BN8{bSW9rl67G+d z>@LL;z8Oah_0<YBK;=Zh<_XQ7b^6z45Nd$m8mm!xZL=4v7*ZL38<ZrxCB-bHqbX@n zTTlyFzehh|q0=U*wZaXbM2m^-Sb_9}k)0n#4Mnf($GY9GpN8iXhsay;Kmf_*Xy7mf zVC1(v*)X=Y1{T5#Keg0wGcjAFOn&0zhX#AXy)vBi)!WW7V^S1WVsRWzzA8!eZ7cg` zs#m9zG!b*_oe+E|EOGqf|AaA#+=Ku(6n#RWRXH&*eQ&m3%I5O@UAnRUK5pqdJekeL zM)9u}i~ILMQR%S(%?{Yk<TEYTM>J2PfbnJ)?&u<{=x)*<BR!FUm8hE%_xTyHS8Ja{ zpSU@VYX%=JS!kwv#k2+go~WLYeK$d=xuqd`TY=QqUrsLeMj|GjZm9bNZH1<HoCTRg zHjf%gEaG71N$<+S<0Y{EcVFg!v-s;J375ZqSRAgJNbRt6OibT30O;!vK@k7}K;she z4VQ_b*f{+(N*DZ4Ob<(>*W2-ZAhA>JJY>hvAPvYFtV&wn9Md{V`@nV;HI$X*`4))e zhD4d75|6!bjb_P0D=Wmvoceljk{{Zl_ni|zV|I|Di`j=~eF4$X(E77_h!yF`Yfw!+ z4&QwfxUc>-!Q7#$D{iG!+ZdMET+e)*WB;^Ck0NDWw#niTIBA#~baCVGzKEkWsi7Yn zZNK>!SICEIE|DrF3mGtk`kd}+jz3%x2G^&0$foRfL)x+h4UG5479afJ1Jkqp<B_l! z_2t#ckm2mIe?6B#U#Gb0<XeL}c;MKIhY!sp1nhE59;q3d4wd*F%es;tW>=MKrw-aN zlq5bt5#bEZQJ%#MHPKsbDY*n34;@}m&W!-$n`?I^EJay6=2;s==Cx1><Y!Fk)4>ra zn+WB9ftqxUA4nH-0@8T2-CBj1^^fM)^{$k57aw&{-Tul@zg0&b-bO+jiE?YjC%-!V z<&J<xCUr3+RF{j`SI8n_CRVnglx<Mj<U9Kyq6lfW<8S?mhIQDIqnXw^mSCmDiRcio z6I8@HJphOp>|bt9<cO0)4zf4eHWGYMM$(+x+bgBm8t5vFZD{vzHrDanS#C6blw!@f zOSBgxh}0g>StSBQMYWkwofrNRC-qHu5Z!!->>|B}3xlr0ypHXAP4olfaTD|XeUJRj zSD-qG5o!l4oF=XzcOQY+7TYgyNI;8eVa_^cBVWBD1x3GA4V2o<b!A&n17~H1J_Lp} zXIR+YRT4YgX~a|8_jR2Z5}dT6reTs+NX5a0pPvg)_1)_I%?>ynx~~@f8=NN-P|nj; zeu;ATmtw@Pu8I)kQl*TW&>+6@XMk6AOo*^y^jWX$)HahDoOZkOJOo7Gi9n?+6a$#Z z!XX+o$lqP~;NNn8b&zMnw%E)$*(loj0un`PPKWelhPFsc^6R{W6^D2wmPgEl%N)5` z-Fz~}k(iq6O1cDAVRqS(i*1UPqcR4E4))hH^sC1P#0c9@M#V55$GNwLaGtR;yE7?r zWfhIJr$tQ?VCDF>i+(Tf$)~o-6N)e5(Um!aYJ)W~=M~Y(1W_S$$qh}~|1bd@?P*#o z^acU7XE2I~?x5COeYv^#E@t!3YyuB25bziE!AxHvmKB^P12R=3K8pz3q?O7Xi{V{F z+h*0Hqs3eeIiBl4-W@|&DoDXne+SY(qUX<v#qSsJGN);-1{r7GJCQ0eCEDRkz@rWj z!&hWc(V<p<Lv_!{Yg1^u@T9lFg@?sWj2ptvWuyl}zeh=Bp2@-xh3wJZzC<Zzxp1Yo zMg^)c5NqCGLG?`zGF&T#HSXn&=&hD&8DP!G=jU)i46QY1J^KX2xWoFSCe>U1@^*f@ zGeW<iFt0qk?-oCFxqDT<PE)&lStik%3@8$j>)?e*+B9S}FGP<(NJN+6nd1qNsr#3p zz!pZ;-sD}tEH=JpTjw~tQAZP8Qx`zIt$ts;3z|UP_Vq#(bSMS8M|5h0&SCMPiHAc% z#gEaee!^}W+Z9(7jF~HR((GNBLA^WDwZ6ypZvDw&YS>pR0Z}N5_WTekv(^S<uwYMa zW+KH`krcz=RN4^OIiv#&>i@$+lElCadJ-Xo(@Ip0%WOm($^`}nX3x|eU=Xto))1(7 z@bRuLRvDk16<eDIUK(7mq=Y{XekBX6Y$d*7Ds4cX9pUekg%V&ApKi=t-(|{pjaRrA zMq2e={*4F=hbI$l{*oLUh-yA?W<cC0=uN}gSW|CV+UgV|&U%)HND$&UZsZ=hhR-o; z^(O;}T#l9a6;V@a)ot`^r8)7p;V;gP{Mr>-uyeeH)T#E3_GwXr1ay$-P?p9fKV`m+ zf9^c$4|3EaRE~VuQ7&Wr)ReD5MqpejBaMCmKO=J<?iXN4DbF6*p{dp!IBk?sQu~Lf zMCEi6LlUS(t6FcgihVb$)^%i<iHgw_MuOPoWgKl7EqG42ZrnbM#oxwia*mTM32mh( zn!4~U&k!sTHUAy_wl+R4tdOPL>Qj`5Iv5(dwok2LUCc2)gd6VZ2;uTXx(IfFwON<U zdUJ{{1RfN57u1VbW<Q9sL{US>b2DTA9zh$_mS<C^rr_?}&>`{-Jv+6;b*^h4!VN59 z7LDUI9k&geiyb!zxv3g%bl|9PAUc)0RWnEgK=xfA${AwZSjt`g)0cxtjI^JlNZtlm z6$QBvy7j_HLE01Kg2#;vLhtyJ)ng-4zSuSqV5r90D#2uhw%QNpp3@5u>HfA=k|BdP ziDW)5K`3383FWqjUz_4ee=A#INrMyENO|~EM`~5%<g!Vm#H1NFGc{w^Yak7o3l8%s zs4cUp_NZNF3hvZ7L>rF+3CsFAw2>ynG@t;6sJAN&eK$dY0O!NR(lPWi&?&w!;Joqs zZwBTR^N5Zybb$gwNebFOEGlx@=9!HRN&)h~p%2>wzAaurzI{Kq*^q#`<!yyi7=$3^ zsvR?YU&I;Z;2OzUNyU(j%a%<NL{Gh#Uq4Ag{|8fmkq~@jTJtg5O7524aFjm&<X^+C z4jA3AvlJ8$__?d&(DbV&PN)X1jfpuLCrs_PfmT$5lG$8755~d-uXX4g3P%sCmTz?v zE0bEqg+UcZz(Y6TN?+%Xz}1xeq7UJtw>yew%ZlzX&XUw^oexj15ILqdtV-i1g&qeP zi3-NW`Po{4-#NAJ**cKhzLd|BzYqcKLw-UFb8R7Nyvvq?tIRx-XveR_%S_Q$`m=`w zU4^eywo8wN!d9SOL@ovZ=WJ|}uKfN-BD(7z4;e<JF%+YXnai69gwdA!zeGW3WpO*z z7rEBoQHDIUpM$QRTIn*qdij&be^M@z-F*t`vcF$y!=U{x#q{8jefhQI2o^{hR=Rse zi#DdZKmXBy$vQ<Rhj6FCz0>Y6l*6%~o}M<D7S^l@%P!v^@RPGb_~wGO29EzaF>Mkv zgL)7%ky0IF$hjv);?IzNCz(G*S?-~Jt$6el>8W?Y78L(0!7GdbaG0#lHXwWLdUzEa z@9LmUG-?jSPbLp~55b-~Y}0U^<v7b;!warZ{5Ka!9*dRBSdKvHH)z2}+{|KgR$HB5 z;rG*-@bGFMn{-BvSfc2ehA!-vPMWB*B&H{)T~ct;Cb_G!bLz8tQfNT^5|s5BN~$<s z+d4ljHRBPcN57E1b!wa>{U&&1N=*#AQ9sM1DkXy#iuo4u_6kI1Ii8jt&97lc+?@Dk zKV2(0dnvvumM4}H54T8eFJm<Rg``-YZYgPKJ)=~1N-b>JBW5uez0F^tF2qPzr9&gP ze&~bb)<&smj41W<R`pYU`t;930>?00vN`!1S12B`JWiG!z8A!s=V2WJ<v?w_bv%2; zc-gKbHR8D0)O#yjE3e9CV{aw40$7j8H}vOe;_Ul-oV+ECqC3rOlH|l%Jm7jw{cl=z zXIWdi279o4U;l!7DQuT-uQ9%lKJl4Oq%GTZlYCnW%9kA9BXU%2uW2z2@n`$Q<Rp34 z)CDlVnY{hzpq-_?jGMW(iEBDK9q2sgy9(7?17YK>$96f#IC)(8@Y+y%Y(u|v{3tr7 zaTzPfaO3oC2}yi5*QEua2vc2+jo^*YJd8XhySi{zXxJqk*?G789=p0QX2>GcCg|i2 z7m+L3j%@+~jt#aydbA)k+9AC=D`@xlbeavoJcqAstUG)?yGX3n=1vF(iv7g4dY^?h z;}WYwEn%2hWB6;@y3|^kzBw+h6z<XOHg%yW?ZimN{@`Y%xAlbn<X)3N1gXSGp@Q-$ zxD_krdyf=B9S&GaNOCpWw|#DmoK~F}%%-9Xjz@8l5=AFa!O=E>LES|55f8n!S0x?p z+MRG(P2FJ(oYe{(Dpub;j#8bjtZE97m$?K9suxaPbis`4LHf5<vphG@03{h)IQUmW zz}WYfluo&xgWxw9i<;Ci3adpp)p%0k3OT_djKjL-PrrPn(S*3-&Ox%ymWFOVC+t`6 za&^j{@A4m2UsfAFueehPUW%KGiXffc)3h5r`(mR}m#6`l!1MEm@@lXKX1!k7O=v1a zNQeTHDhhi<HBv+nQgJ<#f)U)g43)*?9?aQY2r_paS@PJ#8aIpbk$N56{1ekXq_h!J zD<d-4PEVzk4fzl#ITIF?n4}m@*BI{Kd-jCkBipr(kCARj6S3Q}XH1-8kyr}ef!|%S z^38<*K)^e<)*))S@M`VS-;noDg`l{x^^QUYN#7<IS@@BVSr%5hWhj@Ig9bb6M##6v zU~t@PODnWB3v@X+7zJSwK+#L2C~+oLzv0G56<8F1D5${4Kv!@{fLKpa8c_3(ZL&Wq zr7&#=PjyGGR1S5gOpYg1LB+r98!V!X=>3&MJ}W`1cQ3dPhRA1VqlFjQA@eA^%(#RC zR<xcWpH<XhCO`(hgVz2|DOh_$KBKu&9qL4`C@tws&4yIkS{K)owKROR<hKDOz8w$i zS>ckO3g@pm?Mz;mhd7I0IZs>p@=`M)T|^Z;1{Ln%s(#01VXFULg63?mDgqq=SZJWo z-ngG{^>vAZLK=PJg%f)<Ze$d5t`Dga{b5zDR4)CpMmZ(VjF@x|nso`6i>#UY$d$4f z!t&$_W_%}%jniQvyYL||&5#?gTV#`g7>LiGt>oB$&#_&b^GAvUF7l-$$=YDCk4(R& z&dU0nHg)G<UswU$XLxhBn2zEKekoeU8DYn9AlN<iS)3`AneS?A_9GaufZ!diD7Bzd zk8TU5r<CeRSuwXaNC6er5fV_wh2e8%XGNZ*AxT?#veg%-T)6OR{rdT4Dc`(!aQ~qd zGIPi7Bao7keoPEZY+OqNAH<fe`A4cV05;Vh$S@YT%--8eei+r!0;jIO$vvBa2(SoC z$|Sy4ADJ06Z!ec|b{=XVvh@;3F2grx<lOoZ5E}Y19*0ZmsI$UwRUW1%G7DOFX!Tc} z%x1p7f<{-QPC&HpDZN3!!I3Kzzm`RVG!dRQp*ekI2pW{pQwI)Hue7apWUniR4n5+H zMk((nt}@8ZqH9&D0vU}5FU=K7=C9lZ_Pvf$Uhv{s_Psby3TO&D%64~XtyP9O`aC86 zkjWT(paPpZ9v#(WOMw9y&FFgOO979bhGCPKbN9(8Jb3XQJi)@K1-7sP?|8E;-$bhT zan7|VU2+eKe2N{c>fS=xMuko2ZMGyXimT3q2LVoE-iCp!gEhn0#;=z@qC#Tu*={<! z2<A%k!SmTG`nHk>C$HA=+Y%BKvOFhctyV5+g!$A{ZxKlfW`Ap>eVCg6WK&IXcJaVc zen0{FSTf%<K$0cN>b$mi__Fwct<>_qk_Q}in?7RNrXlqZI<A~YCL?f0=7tG7n{~_k z>3>kMtSa>-sRGRU`zmtXK7Xo~TQd9G4YxD!VrJor({u!=JJfvDBxkbf36Z<I29Byp zn-^ii0~;9>@_ql7$<Kl7%iSjiuWW)dXcM);cIm5{xG(XzrQOFuUJhmyTnnJP_l-yx zm2fd*um%^ef}4VK%Q^nOLGl!YM>Xj~KKo}gNT=C}wV+a5;0?A)io8WQ*ku4Ph_;Zq zKuzBEp-p$2kHDG2riHQ0?R$3YpJ>wC{l_*Xf!Om>^q7x4$W;<runJ1le9|ag`?HBJ zjJli%zTA9WcfQe0(@vAsyTB2x_5DL;W;OcOHBfxKO+=V(dOIQ$PnR`=`#a8pG=jN3 zQO1(uhgEqx$tN{s+ixM<?By7!2|-JrCcJ5=7dMOU@;70j8HsstQlqd%sF^b1ln2k; z4>ETR>(?*x6-_mlVj<?%k#MiL$O(|#GU#qZfunMH@`L^1f=t?O=c90o=T@W%mV-;p zc60G#N8Bet50qGm4!*2pI|v+jSFf_z`&Wb<9-S47>tBaq<&U@xu8V7Bnkj7*Zmitf zeJXBVz90_FPz#O6aW0CB0{6~n;eE8lIH7a*O@eWlQ59v~+Tzx>y(})%t#vo>7YC<q zd_J(+xD@e1bxoR>&q_Zr#qJtDw2+oSmAtnm^1=WDvmT9indw`|kxs>SmPtCJ5X6M& zKt`=Q#lZ!17gDC@1~U6a=F}Mt^7RbCbXiloAx{=5GCQ(d3eMM`Ax<_Jad#H*&iZD< zx5vVppKpvXf6se+bweCc1&-Fi3Cz1zku5Mid;dZYE;~a_xte+Jfi{*m3G9LrGIKn@ zLSL<?5A09kVThWU!HE>jA$wtFjVTtU%j6ruH>jrLEGN%m<T7!F&c0p=TDr}4^xc7T za|M=iOT2iAQyG?A*LfpgBi@V7Nc4o^^{-e**@J1GS?<|GL_<f@bJxn76^eit%UQo( z#qRU?H2Bqb`NbgyvsHaLxq8&BgUqrkMylEQ0$qo(#+{XGx}X5D;%aFwSS!EG>b-d! zU7tH5=kNmVxI$D_8v&-LP!*Quz@ICBTwWiz-)iq800EN|<eyr%Bn=%Ax~@Qc3YawZ z5P18+$^zzpApgwyhIYI+aUWgaca;-6qVTGW@5{9*V+QLa5eg9<$gu}@JjI}$Glwzf z5GvykmqNwTB%3?oa-|sRitWuw#7IcQFrUeQbXnhgs8~5C0E}E!n2rkBz-d0XR{BV^ z>UlG7%x4nhs>05DhB~REM%QEkHmgI<OUE~uiXDiEXzBIIom%o0J~izN5QY6SH+z#P z{Y9N`pwMm0#Ea@$w6LXL+Z(_3P|V}ZDFi%W2HXQNQF=Hk?#t}#3Fs1H&GdGa1~gn* z28#@2ADMa5>%%9*vt0^?WbKrMKvU*}!voR_Ya<J4D@cMj93u87*t1cLsGlpibqKBV zyRfE>`!~#dl=Uw}?i~q?e>P1`PTE((*+akM=oGhAzDDj}KjzV4)SS$++DA^e(D5q4 z-cYk0?E#qvGOz}dq`j~?>9+!3bm@oZJNqiV(%PFd$GcK(O{&~Ey>fSgZFCJ8(;lmi z)^*4=wDRk6?pFL)41K=z(3X#xdSLl11OG8FjRJp)>=Iu~MALZyFVIm7n*;g@3D+%! z0lrm<5IQTw*LQ4kWY7ajCQXiWG}f+xZ&qoNYNkJw?h%N3{EK}HT#|kp`Dv;oeJ@tT zw=UMr8Ae|vyM?brM=YgWP6vyA%BE|c7x#imew@F=g2P996||5bl}qli!TYoJV9ZuI zEC*=D#<3+KmIB-JRtfL&o9?aZ&mn<Ui@sX(poKv;e8t?Qc&t&jO(D@b3xQWhA4=Xv z+G6twAOGB*TBApf^Xz@IbZtCZZN@$UZ>;EUun(*^qJivBBh|ls=i=t~M(I!#*G2^7 z{9w~OUcEHq>VGHvu^z`<+$w}G>UU5P!Rok7;44hhHm*`&a=KU1a;7!9cPn8q^xG$3 zrqKEn(5gb0!MCTLVjTa@I0E>=2v*o{!u!mIcPH!97_PPUBmYra^J}+Z>J!$|0q0x; zxdW2QuB|4*`wTGjcT6VY&iB~^#MtxC^Ht}@<#Hnm!Ef4ZhQi&mSThP(l<*5x(38tg z7oP`gKaUq^N%k3t+T5QV0X>rJ_iW&Cb^B-|WgUIAO8C$&u2~wTAfQ6zskZx+H6T9; zV<EQbZwC_H0^yAhn~l=vS+qTg{|b7hMCxbDPQZ@hPBYYbn!zBl79w7yGS<JEHwm0m z?{HMA2(N@{I*PGs$*_(uRW1t+$9Bbp2VAeFg#|Gy*x6mJZ7-i?50NEmk1(wab5*Ov z5M=oksBYz3lQM$sG39k2h7SkPx(E>-8qNPx=C&lL(2&8&ZBx-VV6Px}{a0yCiP}9f z<eR0{gHv3S**#Dq8=Y*S30{=fsnmfGvSnd}c)qoy8xHDSt-7*!k&n5e8m?7_^2yXr zn2Jq0I+M9uOukzAUN=^nkP<do!l1J|Q(d85V4A?xbNx}l<;$S;An3ZHv1)!N{L@;E z0fu#3$<CJ1OEWp!Lr$n9VAe)_(0Hdh>W|49j2+FBvG`P$Sf9DV5P~-$lO%lfi?zMG z!lJx*^9#R1X8HoH)(-+k@kX9Zf)j}N3vmC0=6%}Y@$x}2gtJK4`!U02dd8zuslG?J zulFu*8|tL|F~-Wpcw+^L$aK<YDm_djVCFB6f+>A><57cXZU<20!%8NLs1ut;ktsEG zdVxj%6=#vxE5v9au;LI`AUkOh?38dDDRB{O#6%`Pq1c<40wgzt*Bp(Ddmv%cN^(b^ z52KhHH?v?N?AZOUhEj_$`3$H0h@+(Zac=Iy^Kopw-K~nh6o43$w5Q61n>JsY+%keL zDYIN8&aj_h&Rv!nLrfw=X!Q)&M=v9ejO=V+Ingp1ehSIQAcPvpVSJg>oRh{k?oE!h z=+T4}7XOE}cL0*5+4{Xldu-dbZQHhO@7ZJ9wr$%pdu-db_IU0*=RD`0d*1uT7jNA7 zI-<KevpTD?R;;Y5&ieh=T58&BlD%>X76s17=ScE1dverJ@td9~^-=Hom0r<Ko8^Yi zJ|zasClQ>!{#@RQW&m!S%fLc2NAy(BTMx$rEg(-_p(L`n&A?qf1%m2B$AZ}e7s{@M z!*nZh#KXJ?1tWtM#?BDe3gsCeiWIP{;ldC50$%kZv=bzp8Fi<@40cev1^S?jN806y z++a)haHD`b;7K@xYd}*|(T<;4(VkP}m_CEx#Y&~3Gtu6sO!=wqTu`NcPJdp#!9wXW zLNhi3eRx)0O-@~eF^u1hw(PXbDWJ0>cjpRo&l$7blMsq1g@9x9zJJCuGcj=*R{4d* z;MMe%J$Q+2&_=NHV};0HOWk=k5-l7XskY)cWTSSu-otC;*t!tym}EZW*IPm7Q~#@~ zCp6OnFbQ)oRk$bfh!VpbE&O0rl2zb%2fsECM&JR84!{oadyi2fH|O&g;NYH|_y3KP z{R08}3n$}XWn=jlE5^X_H)Hk}EA~%<Q~n?Dsl0*XUl1t+(?7W}MH44G7e^x#CwxYR zf7S@u**bs!;=AH6-0YiX{SHcQVIXMd4ypB51sezBw;q>K=l=jd|IzZV4*az*c}F`V zB@<`-Z;(x1SoC|}Chp$|>_5oYZ`ku6OQFA)68Jj*fxZ6KT*!$5@-G%n@q7FDbpM7< z|5_*J#DLH67n%Ccm7?Dr#s8~bl$n+upYflt9Gxh`ci|t@DI}dJ<M(%lzmH3l>94}S za5<Lm0@FVWf9>dBtnOc%ldxgJXZ+hd5;n|`jQ{M<cRdR}<KH~1gbgb`(?9L=-TdDS z>_6K5%M^bzu#Erg^8btqW@KUgHx~RK*7>^^N-_pcR`^VR_wWA&4j$?kgZ0IS2k8C! znwPXYL!f6sH*td?BSoq{nFiGPI5F=7v<>|%%g|9aPSa{6pX94KXB#Mglif1j*MxlV zKkRLzddn1*W<6Nz{%@S|U*s?w+rM1#ANKpNHu?XFl>Sen{9XEgG0J71-(QUM0RCg% z_6~^3U-a~UeN6+LvF%9yH;4Q$ewvx-e;@L{2+zO#>0fT}*NFTcPX8k~oPmwy-)_MA zO?>|!bojsA@q3s$+S~m_fa5zmx_oolf4j!t1L40~0rJ1+#{Yv7z4Vf)!BKO?>D4-3 zcq7(W^da@h+e*-+Q6LnFi(vtm7yvdy#Gj9hl=gz9DAOX=dXFDzcbkJ`q}O-h@>_S* zcOw$QfPiMhrYKNPFD&ZCa6UWFZ1}VG)(LC40R+Ag5fSp|s$;w9JhN+A*XnZn!`5w8 z$2Dcs<ll?rZqGkGU(;XDIbY{rZ?BJR+c3*^Z#mmvPT6Ly-uLUbtKHW=FUP!#e4kI} zc0D=UD=phHa$MRyAK4q9AMZy!5N^-O8wQ`++a)>MvBzmO+jV5pHQ#x}%k5r1R@Zak z4z59b(z5Ma57)Op=b!v{52t%7<vhLihI1ZLUZ%?(Znn#N7~b2yt{V1K&1|dQtXj3c z^qyiVH*jnAojksEHD~*u65(3s0~OzLhEEmlpZH#nFHfHm!BoGpUu^KYK9tsB<yd$O zzF^;Sn!g%0SL}_xoLD;VrwX~OC*HSDhj$6)dG>`yoM7D=Zc~ysX>|9Q)b9ch$~Zp{ zaJ)D?rk|Vb_}()vK?f|Jv$06fNB(esRS<^!f)0Keh=#+9Q3y}wKrcrIkcDbvsIKPC z=Dov)jw;B@4-^JgNZGpCU5`<{z=zdyT(vy`qzu+a_oiES-wBql+eJrtCr7@}(`hZ< z$pdqM6hYLvzwf=i{g_!l^|9cW<!ix$&hf#JWy9-Pf9Cm}bE{oF7d0UCwB?1#r+^Z} z1r<yo$gKh6>9V(9h6A^^iw^xE#)}z{7fmN4`+gS43n82mftLpt?7lW<$L=|O3etEm z{Ryt=t$No)^27H~3Z$BEUa86AF;#cuj*VWV0HOzZZw%Pl`;H6l%VT|g(36wxAj0OT zG!;14p>{_u#Nv9>?3QO9B>;Qybo4ZQccI~P=WPF!2v5gLy{I$;I0?izA|%@u`*v;H zk=+(4ILDl|L0hX13f<9J_0sv6wh};xyZhmw$@3I`ea!iR>lHCMNKZQ=SnpMY8q{|e z?_jFA1p*QJLAmH0Sc9CKhwaaBW?pIl0d6ZtrXls>yZ)rrkC+e!bit`!Rm#_P3d4#A zSc5+fVs?D#$saH3nYS#NV7_fA(|gX^s<&%%zNf`XTlMrSGgZH*w5EeENU2Q?&1K{m zZ54GQVTcrw$CVP-*Wp0hVW-0|B0%V(hdgltzP+>w9K~Tl@s+^!-5?{C_ih#$Ix3~Z zcfyg1=KD`hSYlh#fSYdYeI59BWbn@rrECxhF*Q3R*g%v~A@04}y9SVYodJLPtT$F0 zLLAZ4)pu`k$aajJwvO*8s<|Y<awv+;N29Ru&7WPd{K5M6^@zKV;B8r81AXc;sj*C< zn*<j>WX3rAMNyG5e(GQb2ylS>7~K_$PEiGI7tqRv$JEuBG1CfL%iEzKnM57a!pdfK zW&l-P!(!#m+k9oceM#ACz3KASo#WBSnNMrwJA$5X5wi+OoE<_b&a?WF7*65Ws5E6i z2DuVW$8Ova_m$ofW<Tt*JAQG(uop(6F)>O3!3E{@+^S)n3()x9`rFdKk~k|_oS$YD z<wB&R)&B9is^{=3IUo=6r+d&b;{lx0rA8dqTN1&Q*CNElj_x}~pI@W)pcqV%Nk=pU zF~_aO>d1H+x<z#G`EDXpBehZF8;}?TX9WQGY3of>et}!9%^K+#0pF=OZYa555ghUm z(7UZg`>$k4<%SP^aWP6`aOx=HUtVKO0|FEw`l;KLB7S>@f|N3MdclbNK&t{Qer4iF zZWwT(N|8|%e#8=Q(D?kf;3S@_l=!{jX2C(^vIBHj<EjDG@a##{gzEV#Q|zfEbT-16 zKi$GW<`Uv3RTw|4)YI@<k|4}dx5%WJ@S)KqW1sLn$V))x;xh#VBWl@apc6n>68VFH z*JiU)s-SJi;fLFaaEtx*<&kNi&CW@ycRyTw8I;2M>Uq=nQtfl<v!XvUnG&%<89a@` zBh<eH-I`<gRo@5EPC2!vExgo%Ee*#o^fyt%{FW&vUy4dV>iKYBzbnJ}T`LLLayg)V z6~xqVlioe4LF6NA4Kl{0=j(a;$>HI9Wsa-9TtQ)s4c2E(`tB+Nk~vHnK<<w9hF!Fd zVgVSHPqXj8wYXlA6%dNP@Zx^EaNUw(^wn_ptaah`%pp8K!y{^m7djVLGIAq4xjE6; zN%`e^cRhwtoqA&}Ea1PWL&z?m1((nT^ZRCpCTj6XXhgVRG>T6K?C9c{T0`$=E*P$+ z9IdiHo7^rf1ezh*YMr!0_19nqx{NF^Dj;YO4!J}E)`cU(2dH9(e|Vnp76uBqW)YT} z3s(uSB9q*Qc+S;NW)bWLov34cb{JR<MQVnsF2D1(K#BQ$k{CN9uod%wFF6u|r96=i za_-crn&8O6#U6d8G8Kv@EluPPk=muyqNUq|HUYB(#{U2V!?$4WtZ16Uluj0s01jQK z5BSajs5A?lfe5D^r`p-B9rtD1ldiAfCE^;WS$Ts($1tp)y3c_GO{tj*R6v+J|1b%{ zuBm6X@@DNE_$^T(Ry1zBil4;?DtKrHz>w{l5(LcRY&s{7M-3f@EsqqGQb47k(W%Ou zx_gjj`E(xdi)C^)$PhOW`XvF#>U;nzYluLUW1x;3W5&)IhbN9Wz6Ou-YyGtAaXs)t zcrc-B22=XOkNjnfE@K2aNOgUZ`-U)i(E@)sQuL_<{q$SHz9t~PawsX~Wm3&xpg+3H z3lSVn_f}^EqhT^c=(i96XvS`A&m2VpWDUeR$G+yzhm9m&dP?1lrh&z<<R%Y?I@V_r ziwqN-q)vWv*0?u97B<HMR>^6eITW3$Q)(rl`V5L{-1b-!pbH98?=y`u4()Y<JfoX* zEH*jIl+`;ML%9TUtTF)skgDt4_YTfE)WAp*f&f{ISs`2nF7DC$WQ!@AykRpG0ZWxX z)J!dFIw62R=c|z7bXfn9w9CxmJ5oO}3JkN~ICy7o$QiU1IKWb9fDgdqPdRU)83=d0 z1>#KXH7TjEv>)8cgY6U?ilb2OOM)PHQ>uP#2Q9Jr(+n^H(zkL_yrSTeey;XvCtH=8 zS5<%Z7M2Rg%qDBYF<j3y36j5`<G_5txh0e-R$m`XXTeu@e|hpC`IH|7B>tutGn8Bp zVhRN&W5Aaw^`~3$4@}k?nmm$|5Or(F`qv#1D=}7onZAyqA(|{22@6L{Sc~K{&2n-i z8W^C_J|;myB#64rM5dW1KoLJ>T9S8KNvY2{LSppsAhA0E--1Ke*qv@vDQ_u1e^;EJ z)V(1Tjl>|FJEA@#H;q(rIpFY>3HBzQLpJ-+DDq+I*Zg@5^YivG1hI`IyTSnMG!!O9 zhmZk)z7(3gt=Kr#u;9iR=}=VBbO^HhuRz%iIDoNS>{`eBg1x0Eg)o6t!tp{uOX`*+ z64GSkAOfsSCX!M?*X2nl%ow4{FC`8EN?Ztza>v{+&c|I9F?8~4T0-Rzb6fdcVJm%s zY}g&i@>v>HGf2+<vPHk8E@mxh;06kyBvVmp*mJRD>#o8;EmjNKMVcY{HH~E;d3XwP zw{`Jb;ENN0<2%*WV;7~t>zL6Ac|?^-i0P_8039Jg$J;D}@8L>V1TJ}s1_WZ1#p!A7 z)5CYOByst4&RFAfg7l40zf)x)Nl9VyxuqQOp9QLzV^4R5QJ@b?rqPpiC5rPW#5J-2 zd!u+D{mxTJTGh6P3TSt+G|-A#UdXW}u|NU_AV>ICdO=I@#x1v2#d<rd)g~$e7DLK5 zP>2`IJIcR4IeLOlz)?w1@9yk=PDDo+k&I)oe-SH?Qw3YI@9^Vx-`5I~HWNnc!-MqP z&u(M@oDK%#(k)4vqewu>U=f;deo`|*dnO6@#r#THN<d9dWx9~JJh&_>hgb_#<RDf@ z8<btzN@ZHf%_!BtIcvKU@yj5ucZNT7aiVwcZeZ^}M&ps6%HbdkT%qt|TM5cc1GT69 zjq3+V&~KZcl>F@j_Ml_Gc4?-fP|)2merlj+4s2dg=aq(68nKr(T``5lI%K43R_l0z ze!`O}n9mW)?WH3`!*+!!fn9is94^<IgfPCtf#>wWlWcN4zIn1wamKy7)d@gkjR{t< zX|=@=H^(nH3TA5{ntBT)R~mAxVFAAydK3$Ij!>mK2Hz@KTMp86rIX*?b3Wpw<)0;@ z(Sg|e7MsyzSVdt4wg~p<iyY!+L;?zJRl1k^LWx9-3&v?iWhEZ=-x)q;Q`i`|nRMSW z@a%4q1hzo;RUmsb7>T>Y%3g%%1F&lbV0u>NZPbNh{6fM)I&{hB(YbXgQw+?!KaXY? z664o#AZ=C{?#E2|Q9Yft1{e}OtPGkQh;9+K!)95jN@MxDKu%J46@Ao;r1CkXNNElE zWa^O;O00wcQy^YP8@wDY<1rp;-NQr_Tc{v%heQexC_U%o>^zJCsYo2}#iwGKe0~a@ zvaJ~<qJgN;GZ~v>LYEPr%l;%1KaGl{Kne-9%mw9$PZjv;HAVvdSSNdsY*u#%X-577 zV$}fzhHeB8wzTsKS^fuEGtG1ZW;5?YAN(p5seGKm%ZhE|8oYv-hLq@4*KFauNW>(y zP%>0ISE?r==A$$I6)h@3i!x-$U_~&NytOPL=D4|CJQ5Y&ImqtMehkhG;+O1Ufb)v{ z^niNIV$nq+6p+|;=9wfwqI^*pY_=^B){FGLz@XwqM*nBx18)LGtwJM>zTA#3`Kg^& zlwdUbgyYCV3^Q=b{wDq6pZc|f1)@`j3v7I;E=U>s=+H1$8H}1ZN)c~?D$#Txn&Usy zc#ObOGVib`%J))L%$VyNN!asd$(=+K49aguSE=I>QkrW;oROfPYD9j+Gb1xFgf0qy z@gnWEAwZ%T^H6BM4ODQOnVai;Kkj9L2!l8%5cCzijnFwX01-Na@`#S{T-IRIX`XpJ zVV1ECF~n9)c4cIxHEt)k`Lv@Wt})W!RG~xcW`4r{W4flrHZTiGxGS)%N3rK2wsQjG zWNX3fEI2De&%YmfkI_2kDx~tXTY1608acoFYPefT1;W=Hn}5or8jLJRX2Qs(={*BQ zSkCBo%Dm~52cpP+<C|L7zg1V^4wXSilsof~gx}|Alx&budp~wlJD0xM6xe`Cy9hzY z{3AdZc2NX99G1v?Et%xWAYz+8I2Y1J6{MUHT+xgv(56&?x)1NdF^Y1!D6!t;)Y2kj zzO}R|@~*wr3z(IB^b%!xEbRN1oU+{17(zM-mjV4C-5Lo}CT7h!rwnXXfkKHpF-V>t zd8MEb9U0eT9A*!o8d{bP(k^@sEBV%WEHgOz^cFz}d--W}z6^(TPjefYa7l&$XomOM z12iiia(!_keL%ni{~jd$9$IHSc^6c=e>nb0RuHlgPQ_4Gyb6fo*H0mV5}+Yfp-2Zw z?uKBj2#xxN#?v7EqV|pj);J9?ZI^saGLiCkc<4xA3TZX{Z8@F0V2+m9Es#_}eYFVj zPlUB}#1R7<zH`XF2wG;vhoDG|0tI`r!3blIUe-kU@=`|6dKjBXaC&`xT5!%$QN_yb zQ#y5uLCgL~X|kp~X-}+FT15M)4|r&V=p}py$gt;d2={qHr``&PQHS~oeV8Yp_9{2Q z={X4mZO%T3b{4))nMf`b4G4e&0@MN_{3de9w-hK}Ss3v}Y_uMxA!Z)VH>0WAbP;A; z>V3*M5=G?%w_z`91F+qy2S4H!(}AC8FgEe8WeO^}TUAu85TG}5);DA1?xg@lOU=~o zk)UEq{Y*T2$@_C?%qw~D8srd1wiu35B8W^DQvjQn?!obv`AV07DoXN;Xwe`81-boC z@$6*UytPL-{3TQ|;CaqU=39~F3df!sL0qLcE=~Be=?y`yG|m`d3HpOmiukBizdBcX z2Wtz{$x+E^<RGwQ#K|m&CRxh1o&q3$iHy*|E3nux-1JHbr<68v6M_@7f$@7&FPj&d z6+}@|rO*CYZW~PUvE;>^%0z}Kn=%(Sf(%x@qvT?tFSJ9yz|>c6O<jsNe%omdxug^{ zd{CkXKj#C3^=mSxT}aP%^U=ldha(mmGO%z&N$;ia*To+MB3~$r=M=IBO(>!#P*gxP z!N%-EFd7v^u&7p13sKIrbPJuW_woTPanvX?{~_7$uKHAT82-X^BJWhRQnh1SkGC>A z2sStSQ8f2{7DP}OLluz7y#np1ZoES=rXP28OEnnU9*VB6lZ!K2SG!3+SHN6Laus(@ z;xDjPMgBob_TzDupgFYyqN!au9diZc+m10x^!OE%houPV7V0@n@5(G-Q~FRi-*N%w z@gEfunWStZ@~MTk%i(tLvz+SAlzm{*l6Q5D+D%Zk1r7e^nzf2;jBIUPY3edFwvGi4 z{8mw9c)KUa(3t45P+)nea#RE6`vo!H<cz^m%tGY%#?OigLZZ<{hn=DFMz)X+{^RGA z2i3u4yvx$Lv$SOS0%_9wltEKt?i6nuwCJ8YRF?8JGn;9L&jcv5nTlePDGq=@wS*)^ z0zpxP=i~<DB48S48kPMP7tp{EaYSUy2#`qeYavtKN;H}nN=Uy3aAZ<28G0k%key0` z8A~me<3R(KJvmC(NCeaYynZL`vV#f{!4%h~m;RiQz24m!D@ei*A#Rv-rNK8TViOjV z61JQd&&D>i7y<~SJ)SdS5rwhQaWK?P>0?8bb^L`;WikW^(wn93Acp$#L2s9z>7OUC zX9z=jFcKvl_&s$2TCD~NiU|O1GDnhmTZKCjRUS1P{Fch(ikUuPo{B9qWL%hT(gC`G zRX#<S(1JdRRy~5u*phM+t(t_OYr_)1U~8@{@>}@|UaHg_=6SKAFp&X0Sh9x*wyqtd z6|svFw@4qYv#$S+!Po<f2%+SLY}wWcuGDqlTP!neMmtrc3zOVJL;=9~)WE7r!0K$u z(V<Tk3@O1SUuKK}Ke9LiRd0<Lh1}CBY0yl+lV3w>`cuc;z$m2s)@5(%8;{;gXPPWk z{9vHQV>1f7l{KZD367v+-8sa9bx6lIYRqFLohwpY_n;}(ByP6(X=TKcX|TFWBoFbb zdCc0nl0hDyKPCaKM7y8qk(H{<U<TfoOSlr<C=L%>)EE02<3}k^Ssj^?2^CUG+d;^n zKl`I}1g<JUDZJ2R9<oVoLfd)Bwb3>&$?raqGMac9h%SA}uggkqUu<W;lv&Xv0JOic zukm|8gp?^9qNHRG$GxDPSn+s)a3+&0?wPII`{rq0AOI7kKSfHiLewQd__F$m!{;?A zreHbKFS0gyoGWdch(a%n(MwVT7g;LG=EvP(OmPEJ@@g38Lvlr;m$IC@pTdsat8g0T zHW;?)-l7UQ;NC_~ktk2JI=+MXVn(JK=1x)7^~A^+peY-eVdLX8NFV@Bj1sU`Lk~W{ zVeZX!4lPb3aR21QDbnX>Kx0+GQb9V~{@Ca9dK%vzJa@W#bM_ju^TGA*r)vv2{(OA@ zdO_J$_;}QNU!R8J0YJR7H^|cmnDRA7=bVa-Bup=vWjx2WOO}L2LA={4Y~q%tv3BbQ z3>7Xfao&r``CPo#Z5n6!3QAnAiud-silHv*M`huX-^QX$$+(}`0mjfd#0kA6af>s< zKkYHt5*G9CTb3l){~?4M1*o^FPrv3)EtETAQoKA0=*oVZ2Pi%xR_;kqyzIawpBYou z4t%MEFj=<X`jbI#+G<&i75&O9F2m6<jp76`!94Y**^g#m5d$jvW1&eq;?{M6Eqe$n zCtcD^*~LLr1?C*0;CDwSL)I^mmavLXIw+G*mYkAR+N0^6!uUGFj%K7j{0fNTkpK(Q z1q98p!xe2Qfo9`m9K%0;-7ggw(0a%(6s+SMEtz9&h@RCXxBj3VNu~X)4%H^W0aAfX z86#>|)uo)@&z8rEW$neoiG{l7r~(dUsA3POnU~|~?wmIQGEq{a_2msRm}@5ncH$#d zVP{Vrst<=9i<R!YEVW6Fmcf<k)!5D4Ln@*7D^)VHY?3uG7@<ixbS3*|8PUNzc;gfo z7JYXUz!ZZfkw96a)6P0EBP-O{&<pdF`W92%n!6dFB~LOK4awh~G%@<s3B{U{llDl@ z+*SI`SB#%W1|W&u({~~YHX6~M%0UC<9b6Zv7p6q_#(#1yj<Hjw@`u>kIXD2P>6#Ok zzN@oxIAn*TA8E$x-;_sZkd|J+PEvJ#hYt!!bz8+aBIk9X`SN({10ed&VWSXM=WyE^ z*mn{JOhJ~I1<X9_y4WaFsc7fVV0nOz?Gk)cfd##M<poI0xrZ1_ib`{WLt}>hCg}tb z4RMUy@R{&b3(M^Xm|Of17B($kI5#&SJ}GQxRd|bBu-zF?HGmC`MU;6`+G$7-P=i#1 z!>)3Fa!M{lSaTmA0#Ej%6a-=oZyT@d&cP!qx#^5);aREe3`Goo5io+GMSGBT1xk!J z>}#MMrmq&%T_8?|MYmR6Y2PuYZOp1j%o`UH9iThEPyU1mq0IPKolXC;CR^UpN{r`- zg*3j7ttkdupaCq_Z4hj0KIG-5tnxPBMsV@=*W(A@SIyT)hT6Hu>$SImo#&d~WE9%R zbQc!g7*>v~8rSCrmUs8_Kv1R@vJ(npCbG8n5!VC9fO%UR1bE@H`<<ehSQGtdTTv+R zIDhP-J0n3{9t=9Phgt;WOP~ALv4~G~@Xr}RugH5eMoDIu^cd_(A+@Q8snLyIyv(gP zHoVW@G<Ha`=v+$F2=lC*7etf&w#z_H$vCw1=7!@VpX>!=^|!wrb+gCQJ{fNOViw90 z0k>2ACb-i?*{btU&(q?xBxAw1rA(NOX-GRO2CrKMKWB5^R~4nn)F^;?>D5XIy3kCO zgY;va2<Z_Sjjesqk7WWa1?Q*Se=3qSHaM(l(*#*e|G}|fOT2DoYbPS%Q9>>m16|e? zICn|gix83WdBgh{JzghksXbAl><gCpiO(_6a8gO@fiZE9p*kC$);}b=7)%^HircxT z7b^dn@Ls4uU#+?3#Vnmv;66@1jc@L59+|d|c7WPjb#sRmasl*S@vBEYe0paZLq(OL zWBEl~273g8aGnD$Z8uo)+J}Ap$M3Dc@t9tv5Q4o*Y~0c4Nm1j1TkHcV$7(;zKy?DJ z2^D{1+GJIh{Q3gj=Y+Qbx19IgfUnLE8^Yh{ei$jwDi<NBu6>QkbJnU)JSgXUMA8C5 zNj#zThiF$pd4Xf?Zfi5xbspx9vQ_;*Rx%-F`&Lx%0(qug)de$>YnHF%^d2{ZJdhTq zkwB$S22Wp!Wjjgw*Mr|e*lH^XwSwoitTEM(MEanhpkz^JT4`nXZ#iM}&l+zap~zDl znt#It)l(}!a<(ArBegnKgD;+TeOSp&I%dmi3U0MZvfVfrDm*g3;)iVtjLw@20PYOz zE|Mh&$d>bfKUxCZqXtpWmIhcC@=seOKLfM~CL}hGdKS^;>DIR%9>7AhIIes8yfnR4 z3_U}q(E%P<H|=evOmK#4SuvQ$ll_7@H&P`#fk!{qoH|GrMRB5F?bjzOg%Jj5pfJJJ ztJ2VdDwMjCIj;Sa<!kF{S{f4IncBT^xXicG@j{qCFBPYyJ<PXpD;<_)K8WnBGoY=> zFca%uHFD607q}&?tPJs5B;3#x8PTrD799Yc%_Z92xum5DN+D8#y(a+@42#+K4z;k& z?HMRl)6?U&k0iu>l5C-Tcrt_X=cM?T7<90|Y`%rQD@QB9Pe1(0FG$LZRj%}MYy7}c zA!<EM17uhd?NcJAXU8h^-wr9?6CYjjP#(K3`^-XEbj9xO3{?Y<9RWDYfhecyG^M3? z=?k*HF4lWAi!$V(*Qvn>Z?L{QhAa*fV96Ay1^#G&Op*G$HGBFKZWyPYv|wZush$J; znoY)~!Vs0C5{YZ~b8vj*cAk<&SrrWyBd3Pdcaj<E4$p<(f>*!BS{ZvZ($WQh#Ypym z-GEuDA;jb{25hq59?0*k;NJmka|%B)?aI(aAxt<0$tgW3vM661#fBf`GfTE{E`H^x z8@P1yPw|I@i3bMSAVnc>@y@p=>*7d(tdrpD#KCic20S3mdyP=pAg8mAZd%bmn4|)w z2CM8%R0T#si1VfqmMOgPGB(crK6MMZJCfqXqrn`Zrmux=;fO9u810V=&^54DPP-2f zAkGH@mRkq0P!6kXQkW1CA&Hp85hzj1FfR|toCpzMzhKExTrH801)){W94Oev308IK z$g4(hpx!4W0yVb}-_mnsHmD@!9KZB0>WeNd_T$CrD)^1GJ6w_?cRf3@;M@@}3>L)w zq;6H2>ij*i-d7<?!3UUMnbakn^>QAHfk2e1gKHTgr((2za$di{y6Y#XO7Kdc(7|iw zj#=WbO6D<7&-od)M4!M1(hzOHbCDvsMHRQiFDWN>3eK#~LNz&03iYEjG<NMR)>QR) z4V)bDju-J_bsXq}j7!#Mjh72kVkC6A$d+^;Lb^EDB11nzY6%azWxN#x=j()w^dc!m z@0VUXpURjvwCb3GXQkqK?y$s@U9n#7x``A<D?VhoZ22+&tJ39-K_yPHX1#S&9&kir zvPyD^-vR3Lf`D*Brq(ydO%4jhn2>3UC$6^-w%zj-Xhms2;($pp6^01s(I;ELX%Ds_ zs>-RmB+$f4*S*)d!O3K&aFp=D$bJR8AyltIorgMtQI3SIMWN3OH>B)%2zij4uVaNH zH|gK%fDya`&@kAL>vLo|h3#uQX_}zIJ|XxW-{|lsreQFntXA$qB|wZj1N7SMs8_jN z(s9x6cg>XHO~1&?E#Kvha3$kwTIR6eM_mLpJMlFk*_Q8kRx);ly_nEZ@df0RNsJ6F z$EF>hpKbu><Q>^D(s#$oU@ScujAx~z542cd0cq?dLx*K_?HE7c-RQbL{k*P`u+!7h zd^>x`W<K7>@iNPgDrBdIKu6c%p_j9fL7c~KOp_F0*Z5CMv$jgcuDbs4Qq=JEJP*f& zs2FzY@}_y;&QFh%@xG7i@ydLiU87z>sL6d{<6YgmDSe^n*|w^dJYXE@(OT6E={;-} zEnlrO$fcGTG+PA#&WuX1X^G%>=)GUU>E3gg@gblHTD$C7&x9<o>mg!bVm=%<Vh)Bp zb)C(pc2n^#yUX;<oh(U#^@hE3%hc)Jvb600Um1|AWYX6YUS$QL=V{&Sn~x}S6>Ny% zC=Ui7J<z%RnPqJWBS610F`--eWAJwUV3syQqYrj4Wl#i^{+k*3gRbOH^p*#GKVA6d zD@S!0y{~8%i`<4ID`*UFdPq%Z3>BN(WhJk<?`o4@PUq|b1IHr|+HN(PeW=&%v5!`l zYt7UsHu}MdB?Crzk5DQ}`|xBIv3pZDkZ^|Hk7>O5i$8B?zfz^#?}OT2@;hG~uaCy> zjv`LwxHcHIC*QYsVNF^CNLm8Frr6eAIKRf@&1!d5UI|;kYYhA8lDP*SR=RGd9uD`k zRJQpsu-_}~BMz^0aSjq3E@o9*!sI^ZeL5Zn-~w%;Yv7m0S6NE@_vWzplDowF!k0gY zt%{&7hI$dyD8`^ttzsobq6$hCeez<CSP-)ks@vuau;Lnr8Nf1>eC#jl2$l|2rTIex z9LDu2z5P<9Vu(PS(V?fkUo!UD_!Mt{S_~;cXx@RB_^^9rcAZaK4mE-qm~k9~Q?VBk zx>t^fQ)!#r5PtgLHc13!mpV%cu;-Cg&-h`oI}FpEJ;IIs;bl1C?u@&?IC<57A>OD` zMUwmS2NkkqZMTKx_)b9O#cWT{>9mB?^C*7<!##s$WfC2kaz7HNkL2~m=+tD40LBr6 zeQ{1%$aFeZBUv(IHfb}5xeh&|=x}e+mtY|7SIOvxHB4lSX6Byk-)C0KC_^#&$G^a# z|AJqy^DZ>jHI_s$4{Ow5{3I-uGk7qMODqYW{cKEHw9h<0-@7DEki|u%pd8v<f;XGa zh;Hs6>*F~T&~&BVP?k4?>-+J(<s<1wJ+gT^*fch@*QCq-Jnvxwi;=E#t{GH%9}$Ik zT1MTTRApg3$RGJgS_S0_2wKb%*du@_bnvU<kcoS`wWz=p8Z4_h1tw2)p#}i3dbr>S zJKh~_6Ead6cfagin#0HA`Hf_P#%E&8$$B|gRsuvv#?cjk$9mF9a>G>GFt?n%<Xg-z z1=<*0r)v1h*_%b2t``OBR^tzJFzkd|F!hSLvetE74A2F(TQK{^E_7O=N22}EkQQ8N zrIAaG5_Vak;l*bsGjk7_rEt&LaQf<V&-VpIS=c#r3xc@=QrDj*_2ro-lJ&p$@w`CB zyRj9(c0kKm=5hyyRD<hGJbT7Y<;|g)9Sh{Z2vH@$?7omW<C7BejbB=Hcl|=EGTYOa z!~1|9BDI4^2oihw-R-T>lSR>%Tboj?9^>d)4ka2O3}_h@>X?iJ+fke%#x7fG-{$Jk z1N?l{%U|Pct0jNu^&Ahed1D86RK?VAgP<VR8X9+KAlO<hnCxQNdkc?-%N31yR2dZ= z<>>CuNK)ALaB<Tc$17y&nsJE3?$1iR)Dm=s1z{V$%{k%|_k4)sXs4zL(n91AS)Dii z`BTl8!@IN9F)i*(K_f_chMXserd7fTdSlzlm%^VPKLf9YWLxSm4zLFzZx9#5!y}Z# zu0bCNHz0gK?m7^Hl}s%3J^kG|-Ru+`<~jl+_x_Sd13@OLqEB3Uq2r@U5lN(MrJ9b} zE{M~W9Z2uos8C!)-it=YC$30>;<aFio^#^^Uy%8>nHLiXsCY6WY`yhnKHqx?W$2ME zu?cww%h@8>+{Fl8^Xq=-`~FM6I9ELv;<>Kx-UL;&>)t_o{;9@ijt;HqCNL$C#6=|~ zm|!vNJNUM{WcQRM4GP*uW{RK-AryQHgQD#6J{z98?FLzKwH<X%v1X{BM-XeB7og*K zqYU)$=QTIzOW5GCOS(e<PKbga7um$XtG$OF)r&*aVfL<WT0vA+M5empR^Ktp5N*K_ zs3{BUAq$q!vRYj5OmaSN7je{-^jm#EAt76@ZADbMnGrp-+3g}(lI`*l6CpC$5Yhdn z>@`mJJv&TT_h)C12<1MpZq*(r2$DT_3P3!K{icc%g|bfpexpPDkO0IldZ2*f`rk&x z#JD0Wn8k|#?g&&i2P6bsHu}SczS-?npaYFE($~9tp=c`UU^-P23hrw;<o@$GV`;-o zp5Y(o)2H5fO;9*%hN16(08}n(juWrt++0~%kI-q(j!8o3*x<Ok$p$hzlVY53o1qBD ztTGC6!sg>81mmk6rf(3&k<%Q9^DtMQ!>9wJMZCrfa3_>4XiH!Qi=~+!_4n%g-ib0C z;?m`)T8^%)_dO_@g8Zj#+n>*@wDFq)unWbL#l4$n(#c3k?3yN0vn7*^MI<DKI*=BH zq~eJ-Vktrh6&kG1ypv>;lPMoL^TzGs3?zI%Im^`<CEKdwH}VA_(|i;pvbm{TR9uJ| zOm_!1O0=T4Jms@pj2Rup$bbfM4_oiteqczllf=dem?8bP(}|TeIe3=Yw?G9CMo)O^ zl4A^6QM1n;b46^vk=Nz<;BI}ry^Z1B+ee`A>#J}S&!Q#t3`-*Rn_9G#my-3+f)vAq zhvr-~e8qj`Hpoufs`iV;h#v;$bEUFLr^!g?sij(tv>r$awXyp?Kkr>*5PCe->unYJ z)uIcA&}6)%vWCyukC4^y67Wl!g{ZxR)+>PkjzZzdI;yhHeT&lyYDX*%9(YvV!pacf zIGTfCF!m=?_?=UhQ7qU-PKyrebn@VLi^Zj04lZd1W)u6gq%DhM-_5uI9xaq8i;;U? z#>xJS`N~C`4Z<U)ct0EX;H(=v<N*MaOH|33=0yhe!z0qU%iuZz>^i_WQc}*8ioCr& zSA~%7h*SCF%(D2|VNg4fvwNYDR;83>!6+<jYJix5@2tQFqM5N42m>pJAbA*isL*4% z7=zjQU=xfzOS}p1J&W<h#ZPC_v6*Qpbm;5jG3<+pa+~K`om$GkisG1-<_zCv@b!H% zmb9U^$b%8o28|e<?yQrKYytW%HY1IAo78p&)&)2p&P&Qm0yyVOBQo00oG*=mV0V9C znWL+@naf^rscsumz?O5ayWT44I7JO@R9#e{w0t2aRR+Z?MU!sWvwWAuvsnww;2YB} zfh;@nz+uERWvjt|FA^hqgOws{sjTg`TP~09Kx5<T!Bw~c15wF%Es?{bqnzo^KG}z7 zdJCMub#>hua>uYL$KdCsZs<o7R(0VlF2_3ZhU0KzwTjkp@ryv#X*mn*C!=pEMNhX` zE@$TWew014QtQ$BzL@@m9yDXJ=6-#JYmQ+;?%cxIdYVN0ZTF5Y79gPupDxP}tchR- z*gS715u%1G!^q>E%atwTQiKmP76PtH5LG>OHX)SEh@w4jyw*hNH;OG5+glnVIsf|r zpK{<FTz+PyN)4&9(Rk%~5-kN(u)we~9qUU0!c3~~10K83;iJvd<9aVu8n^pnB>R!& zalE^lA_M^aPTGm}i}`+f7maI!U0bHTI}^79@(*^l-{$e5Gu~?lj(0cpQO^D4ywT*1 zwGMIB?5B}~)RHe&?S?@?vEtc^1|TzrvZC{Yz5K`WCCwjF=$vwmcz5<p4$5-AjFdpI z-TLz(W@9x54Q3dupkmkw6%K7Wj)B@+u#+=6MrjlKuzY8Imq0#z(bjo^V~U(9<-9$O ziqq4|{O15iA`=9no!ZsrP|*q>rB>XoM}FD#c^r$@vLJr<1Nr_;`F<77f<-Ese9)hy zWt{M)wHQ4bbmmh0o|2@;s_Wy%IS7sc+ESXrvU5-6>mjSWmy`576d>>X_F4L+lvaL% zvBpu^kdIj_EQ~@Rm}d{<%_X8%HUWHmLs^)nB0##X{xc(#^SrZ37~Zje7+0w`*<S}8 zgWx1q#c!O(Ey~<w;|mX^5|e|3C6(%V2ILK)LyE1;y<?s^t)4S)*EvE!l_;2h*`BVF zV-MZ`q5?O}l-+<9A=SGv0)a=`<aMmK3MUp8l#;j+)y1O$<eX#|${;$M<`YXG$(ikO zzv?NRuq$soV<c0ZGiH%3*!sz#D2#HdEa(Ao6v%%R=~eBZIEM+zb7^cQfT+4wJ)%4o zOphhwArAJVdvPq;224l-2rJVV9CvZ@Txc)Xzc}pr!W`<tDpUO!L3)YwQ``fkgx}rZ z?udCEJCQex!ZF{E<v|Iz!8)87{A5`$k61UybvEgTwny(-U%&UPt$qo(K>ezR9icK( zFhe_+#x88@ZKt02^ORM$PxiTXBL-Ded3ZchM<=0+vbBMJB<vmf`2{jluKVUX+}2P~ zFa*C00#*dZnj-!6oUqCdIb<~rUqd8p!<KOV&ksq8I8G#NE3-4rTA|bZJk=xaesZui z>R3k(uuZUZo+GmHUV0+E(|V8OI7>keRqX}Ey7h_7t;UNMrF-+ONfzIV0vZVMALFR1 zzR}h&0aCaHV=Y*kVTl`nrt9J5B}U8fmaX_NVegmE8m}LQ02OuV_J$<W$@%eSS~*-w z{I(2h!jv^_%^tkB0vY(Csg5afSC=sA=3wRkK{YV(Fs!la)&8g9_9myq^)QX(M0YKO zCl|#ltqoq8eWV2R1@`y2s%r6<*2>!jc%&dgx(p>yls`UWrKq!V`+fnGSx)}KUbTX8 zUwLX8K(cr>7Vm-J52(*#QHtwsMbxiX$+a8o`nj(jcbU@HadEZe6dcVeR;i9*J-(%d zp?M=_$5RRNC7cC6mI@K2^syaS$}w(NlcZ7CJtthxviA9lbtPSh@Z&1BovG}57Ju(O zcz4$D^>>UT8P0I{eY-M_ovyI0jibLNKU>}2Fk`LNAg_+K1p>s%>mC-K>MJ~0j+<>< zfk8bI#5q#xBtaN$<P~|+bJGpH=yLA-TgY;D;Z|Rxd=AB=dBRjWacK$|TfDBj2*<Gc zc>G+FDLhP9`Jy6Id9kIxDYa7$8Ul*wGn=zk=L=!1fp$_FKAhPHhlYvE-a=BIV&_jU z)9UIPm$=;8wa|MNA!7B$^ueG_WA{izJs`c`{wjo_oT%w!yOO7Mu2A@~6}fA*tFY8h zv(zxp8FXWRvYiqcWl&3z?Oah_@7v-Mo4S9L5}9_grb3IA%oaL)W&J)(9YFhuE0SB_ z3<*SE1mmY9fH6hoiy?syQoLb01SqO*Ol>BG8gW9p6ImPSH=wDO{Aka{P(}_c|I-1# zwr!hfTO(4e64~rmsbW@fs%kNSvLT3B(AG5pkleKlM!^tDZbQ0?QIDzrEZYd>xGxE6 z*|Vk=jahDKzoeBW3dk^a_pwG{4}XEALotoJ2Q;2voc$V1?4RgukKD9Vzip9Wh&+cm zNpZv*MkoGGbl^lMO>~z2<?IQ(r^w0Ylds177juz60_kj+3jVX9lv+B%5YCttFT6uS z4Iop!*bYcPjfU)#9-^uOr!Ny7CV!cKnQT2bViUXk0b{bsG&~(bR-S;SVo1<T4}h2o zSgU$kGvCLsIV$GV>Dx>QPa|DccnzOD6CO>i|LKx6$vt!Dk}7#8;}N2@tf`sYv}bAh zBTY{#-?Xl6BKN6=_lhn0Oc+BUT`Z%EGK9m2P&%qo=kyXOOEY9MZ;%vdPh8a*mrMxd z8-uJQwl73DQ+7rA!+OnV>U)~JzFyL$5w!#rCyyvmhVUz7mmI_-G|5b-2Oo~|T8UnF z;OA0284=|VlRFn0kq@A<;+N4u<xv<ncm_G16l7YY8NG2;%R^2Ty__<l#E*1A46RMl z?DspiTJV_y?6^fi><^&v-RP&E$GW;5roz#)x^E|wF|=JzpGC{tc)xPlnUxrT7Kvr8 zC+%WKlr7|+j8(UP7Z&2j7uhL+tGz(k00qk6SEH}#E{Hc6cK33&3?T#k4kZR#-#cvs zP|*!EH*+`2vMt+T<;Rm%2u%#Wg<(^z3Ay(4^gP_P_d3sOPQo8W1tkvK-6XH_zudp0 zAKnY&=CL^ATBmKgW!^?Nt{~N1vsC3Pk`wD;dzDG{6!%-P;YwJ=gyfyd-MAaOf%|%1 zpZ0!y{C+P^7w7bRzBcIn>G65g_!_@)YL|6Ot=Z4f`@KEy^QJe_v&S>=@%!8!?@4+7 zar<kM?>&U>)3#%|=e6S*cUbPv<HPCjpZ7(x9!R`{<x?0R*6#IFtXw_8=a2K3Kewyi zPgbv4o+!MGKl|N7p1Ciq-YGkCPBF8SeH6Y9*|tCbZxP9VN=p8}O1fp^Uq6^Qm<#?J zS<k-|bPP=NEdM+0!#_k{|3%Ei^tZz7f28bTWckkZ@UOfNEX@BiWzT=r^87mo#J9-p z-#H-uL(6l~^+Q>EwY~Y**KLf$RO_EiuXZkfE)X|xTZpTxE7%~^@6W3~8h777fW82( z!5^VkWLZ~lDk_2r@+vc3Zx2^n0USrA%H^h2noUbg+b`bR=}x!fbYzExnTZLGin3!8 z92Eai=;`#n_cl5IJb7@OZc2NiLZa!~Ka>&TP*F3y<3bGgh?n}MMD@yKReiXiJ8p+g zVS{0Jl68rt)zRka^Y}D%`Fk3V&+FshaJ}W4&-?Y^ezcF9!e_O<)`O}kcW8s=#lIRa z0mxr)E>3zbPIxR#cr8d8RiNxdq~vV89A(4~*`be7cZpYIl|*Q{WQMwRj<<Y@rj7)^ zfhyk~zi!XN^~2p{S=(k?aWiRXe84JTe*Io6=1+Fd=jnWjp7#rej@+2UOG?sP%v1Q- zIyH9wv+<?)%OeEGLKyo3M5~MJEk+h%Q*pct$_#{jT@;(Z5kWp((4lj&GVCFnjIzu0 zqOOMjnzK#Q<16Pv^10Owwh|U-GDB0f&#B_Jq06+O${%iKTRdH??4AY>v+LCgH!`u$ z!uL@V??+W<$)bAP_;kIPP=(~wD?)|>YQPrKg2Jl6=9qSyNF#F#&)cNCaVDC9^C}{& zC?P8q9;hH2rd62zQI+NWvFf2j=)q*z;bh&Q;~PoQ%czaw(fp^h@amWL&%@%prsmzp zr#Kk*z%}khP#u8U5L)^94)E!>;$}3vOK$I{*ZG<~pZqVf!x*}loKEoxt7MiNG9*?4 zLlsifSQ2BEVie}YrH9(vDM^Y`;3V7bZr$02RCSiUtTj_q_2P?EN(0;hJNT(0q={mT zkxJa&N`#S8)ZWsw-??YM3y-ENaYxEA2J>-y%Wx(tuqchu2`n&5j?xV-^uj~!Ajhwz zDlllyQ>*Z_{dNJKL&Tm@cY2E5^16M>epwsY-p`J+UCXBOQTSMEta|UjtMikcI|i%8 z#46oG$G|2OMrGSLTTht))R;IkmHb>I>k?XLlAE9o)}gw|ET^lgrz_CJPmvAGRmg7@ zDNfY!^S25j%vW4&fYq_;S>QDJgDJ8ssZkOi(fqQ9it!$Ni>+xeaVBuA{C=YHq0Ry< zB2&P!QGZEjC3OE<k40Mk3(e0H{YNhXi>^)^2=3|G*iC;|7n+^i2p`3l@#?S7@@l6E zpfn+(kZ0YI%=diYp5mA=F;RC@t)Iu%viFH%juoJUN(;o{N?afd88TGLz2pKLj8c6? z+CyEU7z$-O%G5?FbQW1O25GdW$%K~igC$teW$&E3o}32#qPK>oqliakXjP0wmv;wQ z;VSS+n%z;HpOcu~QQ@KG5lb91R_rc>`ocRiun6>#vCYwJ4>XVyXA_lJH7Du76~JR~ zz<}JRIo(Z;X4gaKf5?un3g1n!?k`+C6<Sgi(95X(<>w@n`PlJ`P8I3KtV@=nSA`eC zGJemI6Hpv>tuXtOP&XuQmM?amC2Eh+X`0@g1~O6onjdLZl;WBlZL2SJceHv1KLHKJ z0M%iJ_|A!x(iED=02gw+7u+Y<4!D8II}<DmC_r*4>^d8*yb-?V|9xKtlG5JODv|H+ z6xOVlpetWdY)9l75h=vr3@Ew?Kq+G9da2J5qFhm%e&g@gzhp7<9^8krBIN61f3&n; zQX@OHFTr7Iagr6+B2}m&P+$61&Otnk`s5mEljJfLP!4QFoeqfXhzm{W9nackx%Ids zXMAzz-Y9g6!fj!!t)9pu3<K1p9j`-3)$g?4a9*9%xK(<VS$UmReVt*^(GDOtw*H&W zMb0IXx<yCJ95*#!xq_1v60?RZ`3a<uRTC7Md}R>w<aK_uBVM<b0P#Czy#DXv0~*u# z+yl+(!+G#`t2efB4%D3Gh+>6@JXJx*^PqCV`cB0)ftSAH0%IS2jI-yv54LOG%*+{| z>$|6qx4zl7%DbtBy>SeatW}iF)g+#ui(Z#%q-M#@*<GGd#|?a6@8i7(!9vDOxekT3 z4uLxACKBt&GuiXZ5Sn9V@UjHg0u={D*722rrHc2JC`=ND@;8MEZu|0Ll<FdF!YatD zF^#%frRUj_ROh$h)$|ThD)dk(GjNDYFvv3yJBxQ{Gf;@k;3)LqC~Q&j4Kni1;!FI- z=x-=%!D>H=nS~Er4i{pWwbRZsR$3X`05=I5OlOFiHCJh~Wfl|O^KrsjS#GBD_K1u! zfL96zw@U<u^FrnLl2s8TYR;%Evdj@Qak71iT0cn5c>~+=KbK9L^?EoQxW0l9C(m2_ z5ZZv!#VI*J${MMHGO>y}xuW8iIZ%iT(&;ix0b)swP^>PJP?F}?98J!a0yYEid~P0G z3|D>qmC{pxR*J}LsQj^B9&{DGO#RMpo9lqwmdVu>UsEWPovfG8X+%&W3eZ$ZRnp#4 zt)*%0Aon&RdHfhO*on&1VXjM}#^_LS9XQdA#{4F`au*qh6IMv{;~D$Mw9{BAkHO;2 z@e<78EEJa=I)O2wS0AJ760gQAht2?x*mlVbaqR@@Uc8#4l)A2+?)>xNZqsN}h|@Zk zPQf@mWa{$)_$ip8I0t-a>by87<hhvO2*4B`B{Z9VFvLz%DRNh!j8P=;Sb&oU{t3V& z*kR38Cr5)P)?$U+SdUbj*Rfw|=H%)9YaklVXps*20XzVAl0R5eJQ@9T;T70~(+P;f zDX7zV(9KB*B)Zth3aR=Jl#;~9$SIo#S?hK()(V1pX<*5DtKbf@=KJ&#Yoc@ho6$p_ z(!;L4;nH7=VHrg;qiq2qDU1=|;#QV;x)xoGhj<#qHJ1`3#=^a2vb|*v!D827-bA?{ zje$IIg+y(UP(|4Ont*+|2V<2N{dq?dd1r$ayMraW;7NM(X<D0UYDj%WyHgd1lQn4g zhS-#5SS9CaMhM!cnA_Nin>cE$${Vb}eW!a_U6W$dVn=qzOL|O?lO}5Sgvd?!h;{l3 z5Apt+Ladfx&7~fTee}Wwp&$dm2U<Zo;S_DEt2|%_DUFj0uCiSok4J&YJ^^<ZLsSM9 zQahIyA|Xm>INq)1K9c=MiK&Ma3K2?#M|`2FS*Q)fiM0etj=8Z;MJl(3%Mb@F;7E*N zh^!H)3=wh7-xu&ndDl_?K{jRQDSy`XK~|tkUzi3>{2k(J?eB}0Sj`S?KH3SkhVC4z z>MSERvJxSoo!D@dz#y^MV5N9(+0XFllgd((7ZEH<4l-30H3Azm3n4NTCo>u>HLqWR zVMzTOi<4uMn`;B}YnubZ1Hia>{ZvP^;7Vj@z+7ZDR$?qnzkV2TS$Mrcy-pdv>c$rf zoJH56e*V}RwHZ|*iVC1oJH7&Xr}~o(D{V83>}<(NR!7l+6rrIg#TkY{M*f|v6{JZp zk-#8Lq9#((MN)k@<A<6EDvYx8q}m^9Wae+pc1@3$o1x3+*3c+Ta;WYro}ua&jqQDx z@2&<ci{MVIKya)=a;!#ntV~+0PFk!8;G^CH6$rLv$@4=Paiv8fH@kCy$b?bRW&8n7 ze3H&w6StFd7t}L!f9fL8L8c^1rX>3Uu(`1ga%F+jSxpS>X4JH8;+#F%;bQP5Ji{1b zZN*2gou_JlwCb#e`m)Mm|LV-_WdG#&)cWrszFs0`Xbg-?JS05y6!gqg#Pm$m<amTQ zNEReZcT3WDG+k&}aoA9)yQt_t(V<V^QT8y<4iAqFj}MGa0w?Pn7Md#*80*>#PM!KE zRn~ZBNSX+Goa)Ox%$@%W)<7x0aBuCwvs*ha$CmGg=d@ig2_q7>CKj<cuNkP~R;3-# zqK<61V3kdy;7uX#DQ+1LJ|8q69DZ@@RC?K!?Yoap$ocygaXD)OHkI#$&CYPC6VXdi zajVOyI@`O(B&Sza*LC*|jP>>3xYW>-Q&8_8l4))iCa><oC}c>)st8?!fL(<^$oQP1 z8?Szdq-~;-bDAn5L&H5=->+QFt&m;COU67Rwx}B-%7C~E9tCF@O$b?)2{_bGoi~JD z!>sBeVTjPOMOeDIJNWpxhlKh>#R9{TX_@gk`6-1ZS>=`a)t8ED>q{G&DjHj=T3V}G z+p1gJs#@AAn_DZITFM%l;9Y2wU0#(|T$Yqy6q}U`{USIa#V0DxBQ(+_AQ*a(xhulJ z)?UTLT2jwe+Sp6mH36C2QrJ4(GqE+X_7KRQ-gp?2*&=EXL?C8KDrPOH9SWMS>D#3C z#plzj`6;<%`ek%Ncw5mm8=c$Lc4g`Wp1)}kkp<?J&$6o!GDxfGdTwmp#ovB=_hohO zd_Z!eu~WhYWoIg0EdnN4*aD%T(K$tT5wj>|*9>j1TwU*6ecwW4YD;e25U+|m0hbm5 zm*#0+eemjJvd$uU2t9j*jfbaeK%jR-6d)R%nUhvr3iOsYHrHP6XzlCkzIJWk#*M2J z<JYIBMrNjO+?=^_^X84&*&DMrQBQ8pjNY6Xo}L<-m>9fqqxbstj)8&Zo?iHDSz}W{ zO>JgrMN)ntY&JMC6&V?e2o87f@wamGFtT@0GqJj;XD??NViQuHRz1=-ygs^gAIP6v zy&sX=E~poHO30K#+*-yq2e8-lX;gBpWK|C$lXaJ{N`R;wVubpE<*2-#jMDBCe*T`! z+kj~yVvttUK;UhNnuxWqy;I%0Sk<#oKe#fuaKC+Q%PORbRLqKuPoIQS4YmXDlSx<# z8ilJMGWC3m^!<u;eF|l5;^}0ZPl?&kD7i=)yBa$o9FRVqVUgfiVzcuyOUp~@8tXbb zI|r{0j*S7{V{^0P^K+96^ZSq3>MZ67G{98Uf9K|)FF-Ho8y;@!>jVCw2jy1QBo`D% zXXL;Lf{ci92@JLM@-=gI(KNS~(YKZ{MOp<GW!H^epTE2H==r1PZ?4X4*@c$U$|KGS z8uMxdsCv|Z&w(mtmPRV$#;y~r?Ohm{-E-;6Qc~GqdTGx|KIiYpyo@zi8d^T>#g$Dw z%|3kmIKF(prgs4~iTCPi`^ZLS-MB8YkU`ClOD9spr}4a2FsYCUlb9JLml^@9BB_uu zmpW3;Az9C_*fPAvD*Tc@GRM*@)HN_DASN~@BP*k{0)PZ%28q5tH3^)5Mt5!wU1!IR zSI1Fx?yLWP=_$~Cu8xmGSE}#m0(X>CQ3d8FGA+wLHW3jLVTbew&!cB;D`#M(V2(t@ zRJL4OUVrfH-rlRbPhOR_j*1$Fd@p1|A#QWQJXIapsP5fx!8(T)mDTZ<bxd=PZ!Tz= zj?C)`Nojnr^YjFyzfn=w^CG5^aPf@C)8_U29|vY`RrfAH^w&JJ1|xfLZm+OuG^K)< zkWr$da|xT8AE!FfJE?AH{*JA0HnD&qA*VVall)l@b()K|Vn(65K1J4%wKkFUPSMpN znN=l?mxm|E#^!Dw;2f>n*ukFr!Z!N9^WQ>_r6Zs<e}621SHL5IS!(R+f{q5}KQ22D zJdbxojFW$`wYwL%eR)G`Rhs~hn6l=9B_JOJe`Nl)drUQpisxA&Q#NI!ykilly^2Q- zuWl5jyeE%-m~lXPZqrms<#l9y-Hq9e6PEriB|N48j7>vJKduH(U%hP`Ub}P!HS@d~ zJ(b-)s-8(BfLZ|w<G!qY9)*;HtW{L&wfXC_+d3{Ow2B@SvTol=A(-TCIV5z5*%XKv zWQYX}`1OK7^Q|N6Y$EDi<C?N-2d+)d%&yE&Eqsmi*XWKNTk!FX1z%hc^p~N@iOT~6 z5Cwzh0oe~v&4g&zIUv-^-BZunPC?&X$-*ZzyM27+!QH)=k6yg(8eLJck3S`1PAXyn z*sFQft9sT++2&Emd7e}C)%PojExiWeu#snO$Cb$woc<PNab<&^U$?ja2G)0Xp1p1z zS_SXA4?LV@Y9%CxW{9?5ldwSyjjW4PMETVE!^EO4LH%GFMGq1gHwp!Wq>-zYyN6dq zghyzsmKl<UR}*>`(Rp1ib#E=tT)UVC=r}$Q;nolIk5A7m&ri(%7KVRyHNWFisD1>V zzx_&oRcjkW%@FE_C8v9aM?3fgS|Z$aEN$csZA?59I!D&FAHTT0i;8U#QI*uP&cwoI zqDFBls0qxM6kID=)KJr&Ro!w!^ZJV0XUzSIs+zB#5cRhxmiFOS21XCR{`UQcg9;Di zTZC4V%XtVG#OnsOaH{!H$vC;kRxjUqf{3hS*9v4%@jff<#;A%gbZ~L@^9zVeOw22+ zY;GBuo#-FE;ph=d$FD;~CkeI(tiDNbxl4SLQ(Tj8T6=Nx(2bk3(@WUB%{V?M|Lbso z@iSw%2aAJ!1k)JEzuezn+1v_NKPD?TATG%*B*NC)&&1JL#n@cSKCq~HeC6(w&4<q) zJbM#U&`BZVe45YTyhfmkXRVTZwIZVOoRSY1JY|<mh-%Bb=FR;|CgwL!i2Cc7>DiTs zk9NPw(tP^#>FVsAeTA>-TeJ+Vp;Pe{w@B9sXl7PMGAX%7<Xs+Lxv%e)&8-(qui|q? z8X=(VV&;PI42z6R%LEC95EbH60Bm}BapV5>^~ouZz!Vk{Ln3-<Vm^Hd({P)JOYX^S z?n$lT`8}-zV^fQB(~EQC|BQ|KI;8#jt+}tCe75=nqoa*o-G!Ixk_(E%Q!<c|v5tO0 zmTsP!W|k_ZPGJzMuRK`4{}cq@JF$*d(ftR0LmqWMka|V8Dn&#Ui&_AcqL(6IpVggK zJ!0sdwRPvw2~>Z(f(HIJ2Kd(N>kzt{1eBp7R^vo9k7{ZeS6(gOip%4}bGM9rQ5Rmx z6}+hwP#03|yu5v*<5G)DFST7B92?sYAu)Aoaenju_Qd?0Uqlw8kUsc)a67{KLAKF# z$g~c>^p4c3fy>t>K=7gdcG~BT92&4z@cE-RXF3K4F@g`_ZBTqN2)>nvx4w;?ijjqD zNJ_`Z`r_?9z&<L!lTE|t6u%*(EJDexO5U{s$mi4wqmcKIwo3t%lUR057m=}b=j1}| z->9In&8W0w9}4&uw1+qLFFB{;D`AzT?p;eG>w3{7w7zeqwr5(`J6FIcl1$!%L(SR9 z3E>tT5}KR_^4>Xk^~TMcn1tcsP;_!}e(mliVCm+UBq(D=$f7_ZU~s`G#382MKdUDw zyF0IbsC#%4Vp~jjdoT_~Z3>;<+0SSG`W4><yz>V<{MTLY^Xr%o{+cqHgH8$qQ%PN8 zT2X04YNkhcv^_Gw)Y(<T)Y8f$G{64Zjir0*51z#pcXMg`pB6M`kaJT&RLQ!O!y}Jw z6otH}j9qeK<#0rPpQUf%?FYLjF#TPapICiR)4PDNG?m@++Ftoo3f?mI1sdLU=j1&! zof5~E@0GONP(fr08%L7Lxk(v1*?M~TM8$%8uWD%nawZoSu+shw`Rn(#pr+(j3P@QH zGRct&8Y<c)Ad*@FvwK5wdh;8HhNf;}G5PcVl=;D#BSH&4bk&}rp_;bKxs^51nK^zj z2`+)5mTq2J=2iyw9%)tAng<t$W^YC0bqE>+pA|M^QS=0<mvt(IM?vFcl(%=yiYvb! zm~mOpHU074ixZyy`oJ%2?_h3-(Rg*G<@%bTZxNLuQqHMF&9jE$qT>bQkm-$wDV2lT z-g$!hA*ZBWr3@XN{CvR-WR+F6^!DDkIfF_G9wP|dw<1g1>&v&+vx;ij#f-q`lM9=w zIj8vN^n~PHi7g$h?;VGhXu+Sunz}hMvH9C_JLu#;bX#!vc~zGZateYIQo!ffc>3tu z*c&(^!*V)`TgK~qZw90{pVtdI#i!3K?<Vh3A?;WUg3o6d58htQEho8V)H|ug%rpB0 zslRyHdG@9Oy)3SAaOLuiO>n35D!!67xe&CTlXK_Q@&QYeT{mnTSSD^7bw=7*-q_LJ z8wq|Ux3aopU;up?2y-!L?l?i^{v{`9uySi-a&gYdGme@EHNluc-o+xMBBE$8qHrMN z(ooOH6bSz0A@Xr0^AF}~ezV}CjnL5KWPN9MVGRVfMZt+F9-)yoUcS0kHhT76zA23v zHA8ugBX*%>5YnDLug9hCFXvhz<xnK$RKlbd2$8-~KzVlKgqd$~M0(>1SAW^^)2E+% zCN}ru*37ywA;Sn!(-i%nRyGwMRu#|6j`8jrYqk-W#Lc45$RHHWoE(tIz__HsOLaZh zh7Nn?->SO6JVdztqhq?}{v>Ql1f1&fHnDzLJyFG1<4Uhpc8m>A-#qB>|CcUc?QiYs z#CTJ8PjPKSN<nc*QYv5%QLUb}t&W|STU>oiVNZB&hoXG~rHnHHzk#4Zw6s&Hq<sO9 ze@@wtLnqiOyf!SqU&=fhMzRyS{-PJBZtd3eEn<K#@0z<{9?PZ~q8Hc#B1SFioKtsg zc;T*3`eiZW$kS5Jie}DEzJ4Hp5U~!7-N4KrIF5=Qc`a#rd12%Jc3aPYnw|&B<O`c> zx@CqJ4aSvSPpcX39-f|BoSpn~>2~a!F!nbZc&z(;aNf@F)MWdWE2RyMsYNAW$?5wh zN7EK*6;ke(*5Vvpb<rY<^nwi`zk#S(iljsF1=~ER)QU)6!w84iMzf$w6|0z&yT$$m z%e~zfVBqV~ackqXRc()4W{n_C-$ntwNNQ=v@SL{c`8$zC1Dc2|3OQ#L6!5;lKtW9{ zM0^JgJeKqbHXDz6{L25#ePgO;*OoW$-wKY&f$;V$yDFosqltfURLM|s^=L)M_?5Al zsfEwv<DYNv4cp($UOQlc=Ekvg5X?li_Wpsg#^$u*Qt0FEXp^IFYp-GLY2=gb6kTKD zm&d0UbXLTiOxRq^GE>4fAD9<5Pp4M&5jBf(h;0%uibRAIpD^~9DzD%E*gUj~S>_KS zZyr?6tRAT0(<tkZPbKXXmeYE3^KnYmupzR5T+UU~%F#6-Fd{Xxtf2{&qB#_?ej8fi z4>0HF@F@Sv?Txj&TdA1D4vQj-f-5q;BdKaMwQ8igYhrlj%ca}-FZCD>YWx_lfBhUa zKe_Ab)yn1;P<seoJi?-Ey#0*q98@h3Fb0_h6sozTuxt395j3W`U=PfTT4jk@XA7F9 zfZ9viB$|cRuxbY8Rh&$={|n`6uAweJEjA3Uw2y2A<#aQt2go~@YWX(O${@_VGuIwG zD{h~0jjbb=braKdboLJjOUWpztq0(zP?iSYe3)Z0hNV-?qcVpR^K<YOYj-wRZf_z( z(y4hhhy)BZ5b4pSAow?O8pgUtW@lFxrWWTX_NUO#&*7ScaKxa1Z&v~Pn;nB!QQp1~ zg*{sBeGKg!<V}%^jwzaMX;PLkv<mKL`3?E>BE_t8ge@~ftulG^V`!CqR1jG*4r%PF zJ{#NjPH6ipl$ATXnB-*3&{}!tEGlj_PuB}>0~^n%h!~h$Zy#Pyth~;m5x}KpZ|&w5 zoRE@NbqQl>#(#S<@E~Jj*oFfD3{VG#r_gnJabbFCab|h>-rj?r!RsLSL`?FG@~)n# zZ5egrnYH7sS7)HX?8@Tw62QLzAKB;s4-9ExgJ5zRyHQgVlJX0K;*$}<;a2Wm+7{N5 zhTbwZ3G(&{{CdH}VwU8h7NVA!!WQX*W@*CaX$)!s>^i~9u33y4LAnm{C%pa3%d^+- z8&Nl`TY|^7kqtwiLSDTnoq!fe%QTV;Hi2o46KfAa<R#5x7?m8%oLxP`qq55@I|c`T ztB@XN=D_5)oG+p4^39dyxz&}MD=X`FZ$S-?%3&5ZIK!hYX&xC?IFMC8QQAH^IJvmE zu{yiD0w2WG2{io2$tI0|9l9P!)PK8&FtpWobQM%zip$Omh)r}32sLv-Xqs7z>UxQq zMv0q7FerPTKCj0p=LR}2V44i|yiq*4oCmjln7CCUsjPcear+5%|JpJ*y^Xo#3a%!- zW|&huM8)%xrcVQvl(U*c!ouynuJJ8XzhW{OCmkC{L{La<R&Hxw->=vG=jO(aH?0?^ z1)86onEy=sXI#;c1L1S4%X4ch3+ro(n7Xk(zrMb-wYl?r@8<G~j;S93ivr~Z8}GEs zIZe}fO;c@G=ipP&Y;J88wlK4_1Y4V0T%7o_Eamt~)X!HLzKw`cj{~Elm)b6)=3!>$ z_(a9oA^lC9TvUuKc{Duv^+UO}{D~!O2+r%B*N+C^`HT|;Op};3f=HwhyoQksn!!R^ z{y%;Ecmm$Ps%$-YiUNLcrRnM_JgPZoNLb~7$g`>VaH)HPs~KB;h)iy#Qgl}|aR3`1 znU)E090YXxf#+A#V1DB~_A&IQCiZDxUIs}A&SzIw=hs#NT}%bYF?D%+Yh`;IDm>q~ zdw1{E)5`iz1uZuKOjJK8IJY;iX}aw4bpQC`^4121e2{(ECWic}-(q-h*?Y{#d#_)w zYH7<VuZ&91!d$B|vUgO}H|JDwWmWg3k#{>SU_^bvj^8xtyipvFVJxp<EVaBBi41~8 zJA_ag;Tn*8!rs56eE9fNLqF;-)PTr$j%_(cUgAc(8gbJkVhQV{(w^mePfIV)aA*Zc z=vdo(dj}__7Ng@<tcjYV_59E)DEZy@Fsq>IEAwk>py~@~Ne8f3wzg0eIKQ>MdTV>_ z_HC%^w{Lx}cWys=wzv24X-ISq4WAC7pplMOPHOFVUh`CA|J=;V>dMvzYzj?2M)t9g zf3Wi&=Q`K;msdImuU0g*q!*WAW+qs9c%xPq>YC8YInpV(lU}qt!>hxh;sdmE>qT-w z)r}yz=tL;xLL%>VP8RXt;m!$xe}1|B@Y()EG6-FTjicpV$~BPn<QME!?P3A=?s4b{ zwOncr7Ot+|5iuDh<#3|ENyQ(Q&GF-@=mc8ZKYR7HmCwSyxq(6*E#^>Bgx^8cjXQS^ z*3G+j;c@FOy576{^4-hXl~pAz1R<+3yP8)>eqUkh&GL?!5!mRh&E+rT&+ThIdSpRg zAUKHq5vl%PpUi*0R@K_uTU1*QX3sAs!O=h1%++1b+FD4}j6~9jT*~nW0V6UY6CV9& zPTdF&op27FFjkGAGh()<C0+g@X6GAGbVA^to}T>FKD2_l>Ix!n9a<%9nxY%nf?De* z<J8bMJ+b~Mw{c9+AXwGR9&CJcW)7Ix8#izMlgSVi?epkBb#W2xWYDyK5q1pltG7@x zzEAkpy?glU#=U#@cXyt>c@Yww3xZE3X5kXwnBRJ{sBNZWWNCf_wz-9h1ps@D&qtF# zf!F68*`49};0=_q2Pr_$I*zjS_A_>LQ8Tt6le8revpy|i0Z}%Sk{73LIGa`ot7b4f z(kUUo6S4k>h&7Y6)9#b!Cm8;jW%c$>S;s8q3RHH(n1ErVvd1Mgj~X%wJBN^>TRShC zhgS8x@<8NmJ-vbxQwwYA2GJ7?e&4L!@dNTc^+n;qa?GLSeV=x;uz!pC_Jaq<tRVQC zckXB#`w(!biJOEcR*!<<S9Q;hEv~QM-d@_+fPbOFuyP#ij}UzHW9Q%?_`T$U;^2g2 zL{ONOySJf@ow%0OccL~w2$`HduLn}kt{uvv8N{p}z@#1kk3@2w-wT?bfq*u(_JqSf zv$PK_SNAS94z7U6>wD*mn5XFkqMSUvyvx;@&5@=1sZ~R4nm!ulcAnu;sf8uzTn!G3 ziw&Fcua{|mzUR#10?7O4Saof6e*IT7vrjuV)E{m?e02NK5%m@{d+^}llPAyKymWAj zA>vS_l5%iQZYk-Uuj-z^KD)kt=ho`>CR*^<FoKVcZ5MC_wnt|;QFAXRF12^$R#w9} zh>VJJ^bIt1LTH+rF(^BKFJO9_SC^1ihe^ekSv`<Z)t^Dtmq7(dsp|h<a^7co^!Rjx zPdNNDMZO6Ar)1hkHlhmq1dZd>yzAAy>!_q$oI;BqJb&9ezM+Xo7t^-2^Fl_YXIHnj zkIYQ}*38V;mtyDk<sF@Y{4DREoebKsqs{qOs*ZvA&ZEar@9pe-Q|}*me&_LHRDJXq z)&BA0w;$iPbPlTOd65bl3TXR<7xq_lEwo-+oLJsiy|o43dmx{QviApO<{VmVbyNkZ z#{{!s$?2#$38)!pcJc-mr{z6Q@f#7aDpFsxXHfH}S3%M#dDAL-(#pG^Rtx%1)u8{Q z77|s^c|zl#Q!aJSR`o2PjLq=6vO}treW7*$%E_OTaU5N|v-$9ObYZW6mYcDIlUI0j zUS-X|=;&9W>$ht+=8iHppMk%*yfnYMx_Cg|F|<QWdQjW<QAdNldw=h9eTb<~o_wx* zyN{mizq^O7yZ3f>?>^pv&+ff^@$u7-1!YYPLi!}artV42#h2&GJLj%UuAu_tEsWr! zQaUqBOPCx!mi?c-d8E`sFx%YITX?B1A-5nfF3H(H#LU@E*TRBN%jrLqLrx2tl8alP zQ}Llu@T8V^r<QZ0mT@7M_xKNk`2TAV&t;wb?Aggesy`uJ^E9_{qUqXd+wgi?&9Inx zvO2Os!?%(0qNB2P)PrYl24}W4T+=1>Y#e<2K)dQXI>+YbzP?g<+@*QrU#DnL^1i;l zw7K~O_+QP;LE7)_JO&Nl=ls#rCr|-$czpci$<EU!yU(8OJbSu>dL8rlWWNoDeE9fX zwCunB@OEiqQ(Dyt!bvsPw8Hk;^3M6Tp{2!57+<!&<nUKv-!RyZV`F|C?`o`ZRJwks zs-+cz*~qj^ka}wmq@k^Yl7Z=I^}zqD5&6AZFb$e^Dp^-b85c?^2XZOLGlsEbR_Vm% zDa9xE!};GYEratVmr>Dc`{<^zUx}oBfu?VhtX&?dgiU(omHSU$Csz!L>U$g5IfBTi z6_@r64}TT9{w9Zm30g5J8q^ilRR|;xD*Qh1x6#0V)^@aRKZJ_bbD$bUIHo>-1{Ih; zSTX#g;D53Qz2LzfhWtI0=Xvya_r<f_=TA*-LkM}axYUs$`F((WegDEF+UH=@9_8(q zmoTaO<NmZSLjYRpr<%HZK<X273;bgfQLf&_O~=ffU)$mT(Tx5NohS-<cPeQo3ZPxm zo&sIzv_p6hMSO@ttC+f9e*W}t#@Nf3FJ@<^n(C{Y>Z;+%(<giX%AI-hE~owm0N*^c zR?;>tW|pAhS*z{e%&O!qr0u)*;Mw@{eM4lvl%9>fw-1PXd1EswKRI`JJ{XouIpf&o zWubvz`aFdI?cp(5cZ{Femv=PnUp&l{&#DeE1JX~>ki+AX7cX$tz30z&pFQ920w~!> znV&tBHF~@QgT>E3e<-SGq2bd$D`03GP*Tz{5AJSgb{%E!(P}@3o~VObx;{6DYY7Q1 z+XLA53=LH_w`P`Bgr{V<g+y3;BK2+Ul?_Y?bVL5rIGIQ*lvLW4L<T`B>rO7`Nha?_ zqZ*(P)L@j<XO}(z;nlwf>i6#6wy`un&&y3sO+`jVN=`vWK|xMVPDV*d!Ntk0sjgaI zQ}O5T)!6(N#@JlGu^E=rb-_9dT#c%GHKC}bdsO-E>-SZiH)YHMbgk^%Ln4w3id*~o zen)<C{~iKUiwh7W&aSS^eFgkyOSApp{z2d|b{(`D!z~8jgJ3^>_3FvXS5IHOgo^nu zs$$-K`Rdn}&p(sDZ~M_cf9KVYZ)X-)WK^99IMk#pql+%jUFuux8eLh~+*;Y%0JX<B zoT;y8<{W1^?f5+EPJj*Q#j|nQdC16E2cG~FM^`NqGa&=SDXSb-qYxSnF)|hba#kS< zR$&S@VM;b({);M7$~v;DdU5ede|OsFXJ=%jF0!#QD=Emjxj3dI#x>X1z>CX^^KH%b znW;%gPd7t79bRrOE>4cZ+^j$ILR9tTiE4;mudM?1IvxcIh)NK7KJ5@HN&BvmCD3_f zQoWp^os9=7Ctq6Mcy(eNS1R&IS2Km4SAe?Ay1ur64q8`l-P(^>Q40TXKTzGd|6~`f z?O&t*5)JvQSI<A!*DqdwTj52_yQm)V;>DW}AKw1->CK0aP+$G{;nn*eU%vbC`MY<| z-n@PE^x5u<=g;4~j!r6KzhF)$?ckQwRN1{y-@iCGvxfHeE0_c(rkfvU;^_3s%1y9F zYpc_%EB!adE?w@-Dz6GnN%shkw(>ygS=%WYnhUuX3v1a?QjpnLm{^z^nwuD4s+p0V zwS_4U7Y92R5A5^b7kHo=RGo>5p{=Rzi0LjLel#-BJ$?FgNTA=Jvu`_3UZj*?#YC?u zRYSt2iKrB%R~@;yjjUPtoxN9sGq?3z<8-X-5W(RIxdqKVy_j3U<9D-Iy8|X}y}7b7 zzq$&p2Ib*VGi3G^{sAWCj&bi`LeN3z(}Ti}k@0T`fBE|DtGB;acm*EeMfAIGp1pk8 z(a~02Syo+HT2)b6Szc0E0kyQethlVKsQ>D~!za(c?Y#f#{mkMrzoa<?wi<|x{MH#D zzhiV|etmsk?QzYT`=+y(mX)Msq-A8LLrqCd35$;M^7pq#AWZG-%$;3LoZZxnObmRI zRZJ0_?5tQ%ikq`NAtil5-IZT{`CDTD>eWj+I$FN-JizlYKjMLKK6Ca=Zg%>guzN$3 zE4lS!z<k&EmQ!S{xK%bpuhQ08r-e*?5^8o|y(?{-QnK(gb3yn<$LCa3^&d!4j{g>& z$A~;;YQf_A`Zq!AzJ~|kA7X$9KZDN3{0jVwmxopTAnk|i>v!*W_V$<<8Hk99z(#&m zDJjmJIU^@4{p!O{(8caNdbIQ6*}dHz)EaYEC2mch_{x#$-o>_|<(n&O^J^;*&ti-{ zo}USwdT9IxBO?O|3Gv~tJA3wwkf^8)(qGfe(!|x*)ZU8&2lF1T4yOpo)l85tUP93E z_gg0}c~OFsgYCC|N=sdhoPvCDY5sqE_ez^aD!b<)fQ72&nxo)arQz4arRqx|Y1hy@ zz4_>QXjZeDnJsux^qk>V)SThpeggs2(hqPo%gd;#l540$&C1pm#?}0qrTL=pU}+wq zY2OF_=>ZROm}<-;rtZIjsc(P$_;`1hhKAbT$LraXy|tC4{rdLJYZ>W_!oq_0_x85$ z-@Er1HN@P1viI`co0#NcDqbxzF$<^I`l_DAx+{y<XIJOeKF>_Rw`^*0p?7eAn~Srz ztNr%&=C5D3y8QO-8+lnNE?ypMFF!piTWxD687)gTR;In3N84NLP`5VLFcp4*t(Dms zA~I$n)v22kKYjX}Nqt}UWdee;SpE5@pMJ{EPWN(m-QLD>m*(fso`N`7n3??ezxk7& zzj~W>X{7zeX4mAc^x9D|ixgF4gQ`y>`2|NIy`ZhVS2u6(xkMD`THAYsMkeK>qF1n~ zOSf;&uCIe@o!{D8*xm+`58Zik;pX}p#?xXdv|rfTTE2CA<<8y3%?(ia+dI3DU%k2i z<SEoAZ-3l-{q8<0o%q?(pu$yf*&jZFr+xp^ryoE4{P95j>F1x{efS7$!xK<JtQ8u( zdIv2&?LB)6%qPV~AO88O%8LAay!}IiH<#vb-G2bzj=mP~@ZHCEeb+`g#f;CK*EbKT zsOVmRc&Be-Wp-t00fO$^x92xEr<Ruv`y6-?nCIo;26PX9orbD13kSQAy|b>Bt-6`@ zd09;&A|g}@16@%6qANThA|fOqr(zIL=<e;s+$r>LR1EcXn3)){zGZ7=0b?rxApyt& z*54&BE6vW%wz#tJ?nf+tY<FnzinqI~nzG`>3*vC%pu!VPb=4q$pULsj4<9~Y>1CgO z9vbMYEG;ZA&aWsbs46Q=Pfd66iSi3ej7-Qf_ec=8DNwRY<rI=7r(tAdVi6M+QP<Sd zFmttV^9qPf%&)G!HZ@&TT&S+D#?N;i&a8;2u%V$reN#P9y$=8Y-*u%YJtHkPJ{D?B zTuf|SOngFId}4fJVnRVd?!v|fC_KEPs2~r!4O^0tk@EBNS>4=x{{F{D=%hLT|7dr& zvHns)UN*Ys6z1>O?COfr_03ISzO}ibs2~?=aX}vD399Djl$R84Y^=Zk_!D%NohN&^ zm{(U(f+L)nn|<Z#m5n>MA3i~4?@`0e?jCfO(aFj4B5DMz3Mx*?In7g-`j;zPuPUo3 ztEeb>c)G!_o?Kd--P}Al#-W+-=jGwT$~>GMT6%gV9X(A`3oSEC5mf^c9_jB0DgWQ^ z&-~~Aocf<L#NU0cB>y-~L_k4*j$bjmu=*P|_I7tl3v<m)jAW%H#YBbqczGnmMP;Ne zni(6G6y`pD^bjj-@Tund8aV0S)Uu+yy6SQ;o6sgLIbK0dMpzKeEsx|yaTiCshYx<w z;&gU)R=^(XTlDmFT-=-rit^B@+gq^Q$t@u^5+dK?lES<9zxq3sxmjt<%#5&QH5H}c z0N;$%B>1s#9AKaQyge1=Wm#F6X=$ll9PGZ?;JbHk85rn!xw)V>$w*7W$d0a(@JpGQ z=&8>!F>&(K($ZR+8<!VlU#cj|PER({*P*4Q;o{+m&CbuSs^;eAq^Cb;YGhEDn^|5` zke-sDuBuE$MJX;JHoLZlIrX6YR#ujzWTbL3QeVCTRpA^-NlH*tQz<IRLx97>%?V>% zDSFQX{g9#}La-elr01&-A9tTWd-3+&<n$yJ6_vP{h@1>&S0ynOcAk=w!o$rKV1}(h z1KF<|D9A~(vN9uF95K29-*xKL59!GXSRwrWhaaeEs5v;;uiqHnefjeK&d!rJ@22Nw zB_%J=&{AKJ5OHwyr8uukE^g@%U02<=6rEmaXXgMe1x^7R%#hIF#ap*9SC&VnCWM3p zV3(j`rA13a?Yr;4Cm|(eU}OZT7rrP(Nku6mEhTmF0vKR;#8g=sY4G{PWK`@I^*ur| z4;lk~9W6Ms!a@R;rY4DTQ8^hYz%4w+$3)mznu`bv($UcXv0Iz#-+XmuY7#Lqv9#m` z^sz!!S#*VC1sfFLKMy#;2mQP~3iGnciu01=qg9m@D5)rXyxe~)^8hxS(8I4kd-?>9 zjHsxHfsr2Ik(HKwxJ_q!Gbt%aK%npR0aJ(ca65JC6j1v7*)v>U`smR^M>`w%tcj7q z!Pnosc>{`<7#nr?Bifqk|NW1DFfr0kjgNfu*3|eY6(xnRhzJui6Dupzom<=AGyv}k z$IaQr3Dh3N{H<FCh6b!GOozYa!To!%SKojCz3JCGhA9rV)`Uca-MwAU-@beK?)}J( z5$JA93$uq?@}K9iFgLw9e-l>&2OBF>6C==S7G@@J6wpI&V>6J$*9M?pLu25fqoeiV zlcz5~e5|f22X{e#j;^Zg^9qi>{!vyT{j=O!+Fm&&T?_4FTlZhQefH!r3=?oJjEoHC zHa1{%yFNY!ol#put+uKRYrbN52=#T&_BP;Wv@NU!#Bm0Co(M+*5=uTLo8XkHk3anc zuYiC<M*vg+ay+~Z_V)n5;EJ0YE**RwItZM45WT}6kq{U8umApU5QoEWfuf#1eL6WI z_V~bK7)ywWJp9)B+6rj6oV*Oe-320gteah3UW9{V?_kqE(DUy7JJjft6c1EDICh+G z?`m%*A|^66)c<vVUcUzIf}el5F^rYu<fH?A*zYd{!`}}-{J_XS571(50H^XC{khhz zj?tMZ(0M~612$IX!_9Vg9<j5rxH{S6+Gps?5)xt%NIrl2_Qs77km$vQxx>xiv}$Uo z)zx2u220o)K$llnQyCc_0l^0iJYwEOV=X{lSX{VzYrC_%6P|+rzI^%BEU(?YXR6xn zM660;Mj_yH$~)&qmhQvr{yv_-VNq%E^vd$Ju~8T@lH#NBe_d-+9h`9`U41@LF)kd; zySq4?AtvXRvxv@ZdiMdvJdA_q&e6?GjURd5VK{*OCm|sjywZEXGem(t0FZ~<gIf?3 zIFD;<DvI)S=V*7I9QU62j~+gN@j4${$`QppIhmTeN^E>I6*X0ILJU?yU_T9w^jq7T z!P4KqcNe+|E~kXGq$I?gK7A@TEB#33;S@i@VFf=ckDF^pE6!W#Zf&fCfIGW7cMtYM zxN2aikCl1U1u(qRNFdOlrK1I@2UkBlJcNsRLH_gLJ)&YFY3XQ|7qO(aoa}8Ox+y3w z1P6jubH0MGu@(-jf1qFA;1zIOV3u&TL<VLOvMBIr`z2M6lw6)|A6`Stg`u#p(9q=M z!0-@^-N#`bB34luIWB$yE>3nFxk|?$2uK)2wR{uGpT7Aq$j=-0aAR%d_@`b-kRMF! zPhaw!c$mjuuJ-p36BDJUCPDOcoc1eA^DxSQjNs6DGHq>*oZKw5IACG^?b|mD4CkOt zZT%(KcQ+SD*xK`F-~O512lwv6@B{-N1Sy9%azR`aRPhyhPy^=ShhvTApjca&vM@84 z85`jGIpCC-m>4Wfjj_H9=2l)ouDrH(Y<|wb04MVWxtTDapXcRn#ddiP1{%=7grxYz z^_79ES8y^fzz3N5`gz08Sy@^*+`!qv4y<)zasmS*y|sne)q%d<T`YgpW^HAur?V|Q zBoI!kos)e`Y!vj|uFI|8G#eeh2FDPN@TH3Uyy6zXo>ta5Ag8;$bG~JGy{f8;oPs>3 zu%NTQ54?@7rNz|5jZZ&)#QJsM10%zNiAjigg+<tTd2uoCiE#SvG$AFA93rA{a^*gp z7l@k<H;a#ngyXECrt%5xe-4(#`B|VPHYO5-J;a9KKd~|op@-xJaTtHA%Zsr>2p=>v zGg{qP{qYzVm>?X6?`^8D#>Kp@u2x}j0WRiWy?O-!qlSiBMOFFR_wRYQIF%IS4mSe` z&k_*O(9wzt3m$&AsjixWf}*ar3YETo{R&qGBQh)qqPf$A<S<fiZ>?h+GGb4kIdhJI z{><4kO?6m07cUPNAOHD;^tAqw>-q+IxR_^QVS*Tsn1lodYiu(#rs0UWAe{QIUg_@X zz{5Nsjr8%v#XQ9C0)qT;39%6F!7zIE?Ae^m)WgjTbal?0IRnH(ENNqJV`FQHb+=WO z6$uClDXFQzUHts>&q?W(6x<qwf<_i0<wck0N?OLbxVWwDtg0L8dAPZt&%#;5nm;kr z(>Zne6bT729As_*AvPW!T+Dm9Iseb8vm_jnuHl6V=|%97w&sR!N6b$@LC^tv4re3< zn>Q8*AO!fg?`(h3d1??nthSbrh=_)k2BIXaI|KeuMn<Z)uWRq=-XFlco_29bA#jM* z+dvnCqo<~(Qd&_uH9ZcS%FDuHlcXdin3x%5WTkLj)C0^c%#D{;7x$h#fgg@_@YPaR zIZZ%JM8`);PTA2`f4G63wkEJ4D=!DQdbzt|J;E$(EdF7kE!|yudb(KQ0Sg2h5tk6> z;9$qPw`(iQ@H0)!jA|R}+S{9O*>x22!h*iOUU+m~P>>&dV|_zydPWM^NUTR#Q(YB$ zgq(t$l8Pe2(_L9bNlsP@>(?<bF-l5H!f?>tff@yF-+7>><3Y%(ET9({S2<GBIuVmm zoR*VQT3zWA=*Py!a%+1FOQJwSjf9Mhg@c2Mot;lajDz<)9_Ih?!&xF$3FojPZGBTl zMtWQ!p|p%7VDEe=M+YUy-v`E>y84<oKfcH4JP>ubnV6_BGYb=J2L|zv*zPL@PLiMR ze0f#b_U+AMGT&H-i+NHE^VBrhO(4eGsHrKJRhBlO<eGf^#xPdqSy)+ARF#;S7{7_U zFolnoM^;9vr?2z&o!c}xnAg+RI73K6$|OQTNqxDc7AqbUl$6S9D&!OtE{^tC4;>Q= zv!}mbZELHpt~OTY0cnWJ0UjP+9;_(~I4tlp4Gi^4E6bak>hLfxd_3k`+gmED$|zA; z0_@C#zG>-b8Jii0goi?mps%BSxEXX$Hg+~86(w*!2c4?9Z731F46~wJYTbBM&thT2 zKuJw?L~IlX2iq-dX(DY+b#h7yZhn5K;<9o)0)n`h_jU*7&k!(++65O}kdy&$j`dhU zxGgU)3z~^FEl^Qf2s<1R6$Vlt7af7i+lYw@v9hu#D=RQEGGOHg8gOxQ=H}-tt<3*1 zI$v4@aV~UZ7=<D3I9S0$5)u+<Xs817x!LKk&sej*QWquI+1ZSY^jVm(GY^{*5fLh{ zDqmchL+Ly=Z=<87ewK)onn#hGlD4!k8*6Tvl8VaE%oLoMD-PxvnVAqsud2od9URP~ zP7?*WimI}xs0db_$28E@*C{M1YHX~<&%Do3%*VyY^j_(@a;1xk3cIQR^K|FvtnI7= zf&;2*Yc5`taImq$#ylGrH<zKQ5uCE-`mb_;F$qQFTx!fph`^klvX1$(_VKjb0(N%R z?X3-5$52#KQbtKx)7VT%0(Y*`&H2021ZSB<EPb=jpXXPUm%)_=(bQA}$AwjeTAS*D zd3O(&8xtc@5n;GuSU{SSlO1#hbpG|LmxmjG3+3YCOwUNYIXiXa<e#IMC)Lr_DlIPo zs{mpSSaNc4u(5%puwour*;#pcxy{T?z}qAz#zDWp>c@b2_#k*u@Gw}JhX@7sIXf?V zV)6zq=5@8y35dzh32GCQQ>P|kX&@tY@f`g*TNh_qI$9S;I~>e2x_Ek2)YrrBg7F;} z(%QP(Qc@T3F|VgxP+Zv1aOnu0_eavw;bC4NJ~4Lm#?Z>zG7XLtB1C`m4D{Z<-o<6b z^J^=hnReEeSaqI{pU=j@j)at?z2#e9U0v5r%%(zi!Nwt`zU1;;QR|eFvg-QkB39<9 zXlNAF)D0}Htvrwtva)!X|Nac&83tif?{sb+UNsd(T+AC7=)zCH>TO_1f*p2nwC(Hf zjtC3E#XKO*&CO+PV}XZx04zB*X=-}xn9SqxHrhIxpyBh&bB)dQUOrw%CWbnC+Msfu ztDd&Lp`N3QV`WWcN>V%{13lJV0_I`pad5C<#l<{y3QmsH^pug2tGLd-j+PoB2?d>? z4k0O3MoJtOq(y|l(>i&$gD_$}!XOvGyt9XUMO`g0&yI_Ea*%Odh>3794@V3RLVi&} zLnDg$<=@XdKQKQzHTLlFBRtGAFwzHw2DY>{KYjgLL0<Me54V?_D_8}DlLJ&Y7e^m2 zcRD&6Fk>L~u$>prv0Mt2l~X@UBSb7@Y7tgd&^BArHgWsG4z{34jhc>DU02V_72zBh zCaIu+n|UX&^Jf@@j671gczD%uFc01a#&-rB%(tSMx3acq?`#VX3&z7d4-dDUy){1O zIXM!O6UHY;kDTy!1oPx%Iy#!=m8G)_(>=YNNht}w{yyGF4=+sh@q|aDuUA}rEcDZi zv?K-`%u8Mr=LY5-ZCP+L&%w!_kQ9IQY9H8FJj~PZsh=gL$V`pL8lnR8E?%CvnMZjW zXVCe&Iu!G`ybU>-o`IeW`tmxi1{j@hY&=Tm{k)H49;9h^cL#O#8QWC}4D-QZ!R?*x zyU(8o1^U6sWn^N2_SDo=00|hmvjha+eg7SpOrXB&atqcIEiNiZ&&Wi_uX%=BTi!k~ zxn`{N^3D0J$A_3#rJ<wMG%&Dn_dtY1NGU4fz7Fejnt%j!Uf(T+iyJ5N;I)oq-qOmf zrL8F}BoH6-yxgFf5D(&E9{3p>A3i4Yc>FvtUr|+l|IvfTdpom>H-|^A4h;<qT^|@4 z>PJ0ZAGkg|2w(JQcQ-FP1L9d+I*(x<m(D{24i0t*==ukGXz?&lN<qo3e1?cTGc5rt z^C060r1v=#^R~Ded}dw;x3@tvFDom3w6{41^Uwglx0#w5-`Uy0m7zs5&wye6&VzgR zcOKU^HF&~+Yig{ZD9_5qLW*ALV{c>G+tvON_r0=FsSj2rS8vaY$~J^d@|<ek2~{IG z4HH);HV&Er(0Lj<I!yyZ8+T7cXrzpi5+^$wu4__f2uV-V3+cHg;b9)7^TtOpZ)w)t z))e|jFh4#q{Lkt<hWW~}?yk$7ms{Ig8X@RF*E*;z=*O;(HjpW3UtEw4%)i3pZSd+m zI|o~IY}A$hZv4!Xb1IxBB*V=-lba9nSj>Z;XJz>vI*(55kR6}-BmMm3^cXOYD~kD= z`TKVt-tX$`_6zU@28c<B;dp|{d9wHGW%tLA9v;3XxF5?Pke-(sFBw_+6R;}s>iCBj z_Gi?Nw_RI)_v42H%+qQa7}~mfx}lg?#?L%>8w~S!{Jh~&I&X1+d91lg4D<F5*35Xl zO+r$_@tJR`!*@-pth@yJ7sS^vm{L(u9;}p<6eJ|Xm{s8z=JD%13tpW+ig|Tlo|Ikg zG!b^@#g1ZL>}%%n`1vEe%@>_-{DYVW(ZKbqzheI3{g?0FXJw|rF=u6EE-n1_Zt(rR zUF__vjkQ&H`g=)L<-kx6D?2X{y%deCGcx^hTJ3m6=giZWZ@+jOIxRy(TMsYyuqYJs zxaQlqI-VsWJIg4l=bDU%dGg~izwd4EFb_I^q@N#~7&)5xWfb#Rrv!Y-&!h4-Kzo3% z_ulT#%JRZ~-P~B4nz#WcJwGSoNaneYV*WTfPee*V!X|wb^FHTr_<8YTGEaeKUiOb+ z{zz|wW_}m1pa0DKgWbp5x3)oQRF$y+*4Eqr67<8VAFxh3+VAhhl@(3xZRgK(S=jgy zG09O$*?Xk6r_|i2>bi-AdF3OS7tu%H)p@ex>U>y8&_A#9$6+2b-v&4Hs6o7>tQc2T z0d@d5j|>lf`r<jp&%gR7n2(8z2Igt-dK+LKub*df|E%+PnE#x&Vf+0t>=)*b$W;m* zsq+bmaZ_k-gV)dh8pGyhr^7cMo}~TY{ylh|jGP>XW2~$A_w%&Hm6eU{tq|=#et0J? zxty3qfmGbmCb9;w@1DG+qNGSgL#<_KWOtO#d+amsmW)4d^M!fa<LEq|ybT`a#eR#< zZ=f@8wN+*Km@h5D#XROZtd*5n|6p%MD$cykAENWRXm4{Q^KQP#bM)tqqw~KzhNbvJ zz0EP0N9lZ9N89taZ=D_O;8-1QW@%<ZL`)<uAr1&Zz=dlN{LFk?D|($2YR*z|MH>~j z#@X|_ddU3rx+%|)Q~@DjRc$T1<LbPqk!Kp7{QQAjrR`BVf1DWh5AZfGUOcC!p-#p& zf9$|@Se)~1?%djfp%&Uy)>NXdN&Q~tg@yRfpXWg{-vv64b$>9-6aN7^f7CT8^5c7( zTD<cr1@X_TJjgt**`x=&O&geba5ZK|hFE@@m<aTAIeA%99L$UF>wMKd^PkrSZr^zz zBxz2-p{DJfms&R!Tik1A=WONTf}i=%I?p6(=ACf_^Z4>Mb)T7UZ43{^>21W0%G;oI zp8c52zkmN8z9=-vA6t04M2eYjgVuShYnSHdW*{)v)6*`iEK5v?J<`wP%2fg({QT#m zG2RC6H7R0NDKzu({}l7!ZFKN>n`6bW`Nahoo&P78=Z{Y~F7s_2t-!pJqP(4z#o=Z= z+?=dztQuMx5Q4R0n;HJApRcNHXl>!){%Sq!&fZgLCHu2nn(_`w@fD--rGwrPNv=Vm zN5-%O%wkr4SzO#aYQH;INkV+=yv-3Y>@od36yVSt+l2GSj~+ryq^_=-SC9)&^02Kd zX=$h>CMGs9HLR#AFU-%TqNd!$wz3$Lw{di`V_{}`@%%Z~QwD^@#>Wf}_Tck2Xy$Rn zuwT5*v0_-=qy79**I|nbj+}eo=Z}P+v%Gjj4EuQZ2yYV<`dR0d73FO#%?~$&c$}Mu z+r-=ycD@Ba^OY4HS9;I$^32apA8c^%!D9{)141@sS%;*Uvg-*IBYugwQn;_fqL?Qp zBV>`V56<To5R#F)@H?2#&PX99A-1uzZfZR`KaXbq54a}v3wnN+jI`w8*FXOB6YPV6 zqFiQn2B^p3SsYMO5@UgR8(XW&nu><HY7oGYp+T(EX?S?JU~B?A@#A|ei&>bN8KtBp z0f1}Q`f%y|v6)96GjD^3d3_@TIXM~J$xJfcqnH=qKQAHzF{~H(c^EPAFh4N`I{z5Y zeGeF!K<7i+yV@~2Z-Y(e1qIHtbFkYvIl!=sb?v}@3|mxD-Zy+*K!9&@?Av9<H|JJZ zQGc>oN6sNBs`y%5`E@x}bygNeJUUOrdch^4_=1!yAGVq8tILZ(4oD&}?__6lxPc!M zwQkYT*`fK!TqP=o{bT(6FQ|Kc=z{!Wed~gR7#u?w2kGhQUcbchU3%J@;OAZ4U8*lt z-JG4GprlAk#^O4Z<YnpU&*|uB!ln+tJ2^fIokdGaBPlsyczA#o@7x1Yc3Jc_srcW) zJZ8R)3tn#n#~kpJla~_^;OFMzINV@ndIHrc40Q903hL`?&`Z3rFfS~~&&9<Fr~4co zEmk{sV7|?bM~@!j%2gWb=~7WsA$`3Yn;V|Kexs-$XJcu0xS5RfMOr#qLt_JoWCyQw z<I#B(^Vf$2_^~h##mmFv6dB7|ZXHpx$jFlG9uWn~%Bpy-NfEJ1c*d0J8ClTL(Hve0 z1y%w0k(HIEq@wgdI3L~!3|kPsAv_SR?afCrfAoAC(D~yrUtUs3OiXm8r{nNj%Zu~U zk{7^uT&gTR{O+6AuYqK4ZZ0n$&xXdjS8v}!OF0=VD;kROvf!LT?{Kom(%G#o%qS@- zt!%8)(o<k;!sBfSNXW@K6;OUY6+iRm@XWVy!8_k(zavvnP+&bx1AX1Vys@cKUQvEq zTNC`Q_GT<2pqHB~Y=IiO54I&980OLIq+;MFzx(k$6C;Bk(&KQmnu=n8nURqJ*ocdX zvM@Krx&$vjCj)cEP)LC9Fnjw42gCfp$S~+U))BeAxds%2oJsU7m-YqosL+C|iRD+f z?(bonZ{v8Hkd%a7(l?<zG%g#4`-V$cZdk3RstnjOGSVMjSONP93_~2}iS+2|ZVwL) z#_w(Hj@0>MGym?x2k>mb_AkG&z`-XZB-GMU_Yd&t>T3V^>F213PyzyifxfQ8jUPR{ zzp}h=_;d{Pc7Zer2?;tnJLVVW&dg5X@iu3P$*Fi%2}mfg&#OeqIKCKG>=?Pqh56Zt z(4ePJpI}{5flBV>XTxx2WowmJm_If-2B*o#(+z9O?X3;0bJP#&JTQM_{Q6Hn|IE$F zVTofI?~CWpZf$Qd(4VuhwhRvo=EJc*4n%crWf|8k#l#Qj{Pn2`QBmRS^kl3ppFiEX zy}imSq)5oDC~O+(m)R9waAjiY&SB<%I7>vqB^{hnQQAJu!pbf#ie)J!oN?Hzz1<yb zLzoUJ87cwd<L7;Cs2|`t($61tO)7qTa^%x7nE(0bU&>1h0X%1W+v5Z8;$V07>{)Sf zF%vVBsF;Y+(Q6+*eth!$Da5J}`eBut!{x#Kdoc8aB?f!!?&VfnU$efkMvLRV2Po!= z$Y}Vrh{<WvlVkDaZRqjn{1M(}X<?R(j1146#xWO(99^9XiVIh^*1`Vaxz%S+XS<=k z4%RgV7@d!ej~=}-1gF5<)EH~{uwS5u!aiqarGrDBz_u9e7xdi};UHwDCgJjR&Ytdt z73HHhXN-*vMT7+Kd>uCzI~fBXsJ*aBxPN9>P<Hn>*rVpppC+U^FBg$nUD7rc5Ef5L zN^<;LRr`2*oIZV8R7}L&(kwD6e0E_bHYyC)I;rC`f6N#b+T4Hq(A?DMhf_Z&%F8}` z`sB!um>L_NI(-U4Fi=Yfy7LNhSJ#(OX87H^#>QF-3UV$^_Hi8N<p5oC{CqrMR#jD% zOw5cUBEtr*_C9>{059{Tw1PU6w2WBSjEIYge4cwie-7(PG<1V6xk?-{EGRQ{e-aYn zl0&muP!=LPosfu7Qu3mUn`>ER+3Mz6Rb?5R>nv=VxwW|t=ZJvd?7h3U55J3Wa)6Kl z!#vo+<<(`>%3>9z_wTXaumXC7w6tVqUJfG@BLDylLG!+hb!CK(md5}0U;pLq;)LtB zV`A8$iLrv>B0@p}B?URG{SgWmCmSm#Cl!y<86Mq>Ht{~`9U-~BOWVIL8~65b`JYo~ znT1uNa_aJ%#ySQkOpWzVojP?K<qYuk2H*i>I1BcUcDeaE_a8loN3DmY!Fo?8_`RR! z9xyY$di4scntf(|V)U3djD7n2Y44RTQ!^tNYrq%;`XT#!yT18jI<H>6yneMmIKUSU zKAd;xp_*Fi#wLb=!2w{#_Rx(%>Q^^bqoTt>x=Bb$v^3NJlJ1VS#rfH-_0{#Y6$s?} zx;rvblVoL3xAXzvl~okshdU#jic5>vHddcKe@013A$3s#ytIp>y_<_8R8NF+NPrJN zA2%ThIgOwW^*JUbdC34DPcJuTZ+90@1XO1~q$ij|CKi;=g8>Fxi}XOCmQ*-9V%An5 zJ>8iY>6uuVU5?7zzz#sng<A8ds@&7rwzaXovAzli#N5OP0v~1;CM|7Eu%Hdi4Qt?D z?%g$k$dQQ9&e{^%z{jCIbmu@{Bp7T0LPAggCwp6jGmH!l`xSNuwa`*XASNz)Xyhv9 z?~lYpM}YN(F0UXbV`*jrKOEB-cHYa~m6C#7SVSl~J~lWs2z}!MI{3+>!`Gl^!R|HJ z*GOIv2djDJ%oz!B5g#vi%&*2A0i>rJ`nRKEnZk;)!O>v|WB>p5-U2MHtXmU)?#w@P z@9ppFjv@&Vh(Qt}M9|>w6h+|_P(T5N7F7j>6z*=p-Q7L7y9EiFgt%KrI_Y%JtW$?1 zbbo2P-QWCk=dxK(J?9+u*~i}X?sx4vwU;H)0-h1HqoSl>jyDERVcu;As<Q*^4yVC) zlK-rT7~3JCJ%WZZR^gPG`rs^3Ac0PH*4`ejveM$9ZO+N!!qUn!szwGU=KK3QF&K?~ z`}ds{I74%BDlN<%A02|bf!>pusfntxA~*!_A+QSQI;;)`!Xm!_-{zM3cR&0P6&?)y z0JjnN>f?<<7T`I8t`od)-rm6m`Z#M#!q)#XE4V-ffwO;L+y)(%yt=$lSW*blsJO&M zxbtx45D)<>&Ye920utm3_#m(!^!umJoKjO$K>%-N0!pl?q;PG0<@=9f=JC@f9bIjp zcC@rKjvPG#90-Qw=s~Bng(cX+$pKUh2hzf50QPK1)(MHRgTsB_zIyfk{W}m3Adrq9 zKX&BEA<hE_I5-dNVP)U3doK&yehwbKLn2t-GZ!Ixf*ZbLH_MJ)yZ7wf!@|nS#?B57 zD<~uck5x%o@$~6aN0_rr06ab!tcr+=*i*>A{=N;MR7~Qc*hMj@51|=46EEBlPA)D% z!3%2Ys>XPuknoU>?hfc5Z{54w+TLnlqz|_Xsl&Wd^co?$my?&}<Ku-$mT5ej2Epb* z9wfX~Q3?(V?i=iV_7cH-??AT)!$Vp|iVulcp$5-ZgBTXBD=;J|zogJV$R8pgxL!CS zdSk&E;cUgkFP=Jm5-J}+{Ps)N?m|W|KQaigPs`0AIXeoA3LiRh82-z3-~e18JS}Jw zZsxxI`;VVGeO5^5Fz>PbCuH}Y*ORmibGy`LWaGiUpKCV@J1aW}3&%lm6{E29s`RSi z)f;zj+*kz&0acwnCjdv>&%6ofB*vTfgkwK9H#eN2va%xJ$H>IM&7D?UTC}o;%mmfc zPy;-0=FDk6zM~M+eKNRF$h9lUdw95tiHe*)eG+czKUR>JmyL`L|NWRpX#CZ;6H}vk z1vy^co@VBzYHG^BLT3a{!x<hwaU38dD0D$qPDWEp9nKa211#a|=UrS{I6F7}W=nqr zN4$Pxy}G6%C?pVMkFK7MimEa^M@cCO)Z`W9fZL%FoG#YTSRWWZHZHoe>++*V57F-W z`L{1?8)|57R1>_Rq@2=OQQ3Wm&wRsvkcImc`%$68!Wcdo%UuVBc5w2;#vN>2oIE^- zczI8pK7CeDP)<q7j_eehoD>orWkN7hP*#+Ymy=c4G&u!%O>K<5i*r%cuf2N!m=~9n z(9zYw>FFvcDu`Yb6%iAWl#&F90^ot<U1%<OMFrFIv&eMZPoGZAOeCcylN{{S)m0@V zCBT;@B_-6<RWRC`SS-fCP+#9rPY3!uZLO`Qjny)@FbB*tdH%(#x8E%;FXZIsxO=&S z#MK2Qhs9`NG&Qs|5YtrG&{D^n;X}hiY8&fwiwZn^7>0ObX<2DNvapD-1l(9f1qEd# zRSh+DO${X#Ww_W+7cPt3gw1QuE-LID85)_Mtf;RGi;gBb*lFwHl+{$F<zz*~FP;|` zI(_aO+)O@xetv<o!qT!L(lUq7h_dr5vkK}f*~NOMb>oPB9DKrT2hZ-|KB=r@5s^`q zRXzOt<*PSuUxTy&p$qp1#421qxV)6KB=X!PB>)3(17RaPcR+=O6~V)co?n<ZJ3sU4 z^(!>4T)V!Scq!i6)*33n9fO*m4Dc2a8D3RW31ki=xwQhiU};5&iiwz*82;gp%nT3E zUAw;4*i;uA8|}$(cW|;N+E~GJF*C;lrGvqOg{3(NFfVV9w2Tzs^P9Ie{&D6gP&l{m z-s<XZPf1Pk3-F=PT<jc3&=j0D(+HN<mLxmmB_`+&$|_1GCPzVBeG;f>PXXue>FG+a zwpG<JJ1(NUn@50yUyA2~+Ho1P(`w|syb?Qk&h9*LYA4r;qx`2%oxdO=At|e@qHApI z?&q74n-ias4Dp(&B>_(`HzAngKN``F<Qp7V)7-58tMeWxC@GMf9NawU&QuqIl_j9d zz}V2p#0bvZ!^<PLFmH5n?B@OZXsQLE`tIJY<g{c@Z%>i~iC{@EBbb6&ldP$36dylt zhL0C4IFcPvbNaXd^pcsAIXN@&{M&EeeE0sz^JkN@Q&rU!Q87`_6q!bLfS%jY9=yrc zfn@7o=jP#-o}D=`GSojZR8U$BoKJFcG$okp85-d9b@hx4%&iD^&Y)4qmLwZf3&Pe5 z#}mxZYcHv;>K_{!yD|&NuWqc5OHQKuFl?P1jLb|muvkTPbtwf!Q7P#QViyI)CB+q$ zg=OSVh^uKDSn~>Dk*}*9;^;|j=59$x1l2iDOItYm#pbu<*AAn3Ghe=XJ->LRw7fVn zD%``%9j?@dWMgG*2@f4W4hA&n=xh)8Nli-zNxXi2?b9soz?L8wKsNXX_yRU4G%{kY zF7QdEx%m0}<QL|)ceLac<OT%#eX2l3tI^0H`3j2*mRA=4Xwnln?xQCU0fdds^*MRj z35jvx5up%W!cl^P{9!pUF%J5I*0!d_rMVX`U;I+@aNHN)zJxkm-IuGX%X4zG0RNYg z;-NusWLSnqYU`>7hI;SZyYsWT>tPjcMR#v!OKW{rUV*)5tO}mKUl7YDV{t~Ed=}#_ zY#7KTh}&~qcK0cz-6s{!N+`&wX<&`<1bYVx!#g28qqe1~yskDSD>FJFAu1t0G9Iz$ zg!riVc<_bXlH#7x5ko_LQ0%%`3{D#Z9E>brG5R<xgmtWJtU#?kf&Sn-IR*JK@o_<6 z!2!X6p%G!1Qj@A`tH-A&H}2jAlHbe;_~OOQySGOsMr!J7(z7yR<71+tBjVy?VY#xp zqV;l1OIuTQZB<EmX=z0%e10<M&Vb}^+}?QcdNcFLqi4_7ZmbWD3^cVil~-35mKGt& zr3&-G%W?|xiYrP%P%W&kEZ?{>J~P$O+MJP-6%iBV8|csQ_YDXQj!BG9&&|m!$WKg5 zjk%P7Tz))q)o|@#=(V?YwGU5EqH~fDjE^;UwwF{@retRYL`J%JF>G8YrnYu^W&}-r zLlvy9G8QMMtSO@H92lDs5|hSrUT2Re9z#i{UurcZCrIPS!O3N*m4o%2(_0zu)}33R zF<M$1K(}OMr6naNCSHnzM}H|fF(WG-HeSBme06Ez>9Z$4(;?4ZJfE4Jg#XsnSJyJF zj%l#a*48vRJ+^*r71YS(_LjPj|N3NYmz$<$#vlKV5tuKJ*uVSk(UV8ubW<}EAhWxB zI{>=yIXKh{&BAe^<@dk*)61=w|Ki1qyZ7!uQ{euff@WqXX0A-mU!4O~-+%bv)$7+^ z^cpw;xcsNj9<N`&5uVjTU}W+rI`C`I#Z1CQOd>84;*Tjh?iJA5En>J^$nb)yu@2tU z!kI$z4+^`KTu@chJv0a;*4ES2)N%P|*4)|A(cd>VHyaZdBP}g;>h!6@M~@&1n<)#? zPtbCD2718};dL!dgX1F|y<IKst&MFhZJn2aVCR+=0q|&MOf<0$3j4bc9{^l|sE0-e z`iJ_4M+ax-W>&AS-D3Wr@A}OfYd5YjZT;itI&8f2;64PfTNCksw!i=A(T&?Tm)2Jo zR<6!1EzB&=&n#S-nV-FKbzy0J4bAeicJt=k@)De0TW5PsQ$uw_U0rh{;JIs{A1?ng za_#NNCAYVIy!LkHwf7EAPHas(GBP~{$ZzcE$SEt2N=fsLjG_7k+PZp}IXLTET5B1b zh-w&V*hSa2kB!W&N?<AMyz-oKwz~98r>I&=WMzKCcy`^yz~su$81Kz@ZviAA1Autv z=Ha<df`N=#Ub(t)>*mvEPrv`tY?&ZSUcGq@zVP7D12B-gs6BZ6@bS~f5bgmm;1gPA zRzUyz;L+m;4<0{)jqtBOndRq)AASVTg5UwU1!^A*mO;t<2bzVZK79B9t-XU7_5FKj z;rI7H1x=IG*oNcUp1hi#5>`o~=J6K^3FmeFS<dV36vgipF*~kquS>8X(%jucBcs!^ zifZe6Mn?hFqq8%kv(w0v{l^8MxXJPS@-kZ|M-7aYypn=~lA^qlBIsd7rX&VY6dn@= z5L#GUUAlgqnOh3Um<f0TN%JZF@n-JhPw4_*ynOup`GY4&rhC{9t1n)^{z-1Bw{L$X znc9{#nn>osN6d7w%)ErmEXj`_-FkrJ2S-!6!I`Yxx&@~+zq&HDI6pZ*_sKT%$HA3; zvMV#pYoGaYKEc`Pw*LO&x`w2@!qB8tkI+abZ+|N)l2ILFj2FgO*#?zeS%1(yvBs}K zJs^r_Ii<m^<fQMOX&+V<k>6L`I<tQB{x5y?1J9cI+~2`-hlM}%=>N->&hb^R<jdy` z0(ewhFIrv_F^>~5ixn}A-Y;UjN6=uGurZq|Rm+56L8ejzLP9U4WR+F4^z<UR^O%`* zwkA|UGp3;#H~PoN3d+h{y*$l{md0kL#^##_k*}GRg^L?4IU~Jupm*`w`r5~|up2*3 z{QX7PBWcurhV!2$Q+xG>nM3W(+b;qhc1L@SpJYjXiW~^dj|?Y(0v^Hq&6_J58;DC@ zTL<J{Sy`T5TtqS>Gt*^mW<3A6{N<#v%>NEdPBe6M<X6`u<m3g#C((n$?LB=g$TR~Y zNnPJaSl1;ir|Z^}Z?kI#x#URl)+whH9oSAN^Qcjby>e|sD$**4-oAae4b;D|tloMQ zkkKV=8Of>O0%#XDjTXX3!NOq~63baE!2aKq=`wm&csmCNhM#YA9FSFIb1U@Vn~!_$ z%GRv+pKNGqqOtvQYIassd|X&`R9H+jSU6%aum!f)HZ_b)O)wK8Utj%c>cGu}$V}Kj z{Au>UkI3JW2l@4zFQ&`<m?HM|Cwgb53*T(y(evjh+L<H|F1DFQ3hdgQJ1F2+Z+-^+ z?DEp|;sOfzUrK=cv#g+>l1z0B4#FJ+u82y@gd0fp54NRynK_bmEr@bh0}&Jd;^xUm z-@f&UuiPhXtwt`w(5nuKnX;ZzKdH+w^vtD3Ro{K|d>g2LVOhF<j}%g=?UKr&=yJ|5 zR0tn=!8jaDK-Z7$oDRVL|5Nrjp-nchv;ukK78D+llAc>x-PVVQ_Fu}9iLQcQAm7gP z_qO+SxA*rTpMBl1+C4bXH#RypJ2QQCae<i!oS7wdYg*)cfIUze07Wzp_%EfrfBMUL zz(3)fDT+St1(F6@1G~?pcLeYcAApzLxO?xjs@cNu%sk-Cgvj&rplW_I(fLpB1C!)C zJ3$a8=NE+}CWEkXWcXWA+>C7<bns^9Gzgl`se@N;+<o?1n-sw*ZG&;G)ON2HGzjLE zFk?Nfc>?EU=$YdZS#|5d(`~H&RprAEKN?X}b!Zs}RH!GgK0+oD=Z!+o83dm<3Ogu8 zWId(vuWAf-jE6R!Kyaow`ve5UCZ-gYK%5Wq^@{+Qyt+8MvNC_|8W=2~2_-)Y00aiJ z%gd|x?yugtcV%T|Ze<z4{IzR<v9*8527dFwgFA@Mc?{r0vuAFVI{%3Of3%-e+Z6Cm z?|-Iu06e1M5lM6J{wGNT4u}AL9RWO&0eop`3P^roesu22@4)#_mb-?BD_dG~$}8iu zbNyozTmwVxJQ(Iq6n&ztrlIjsO)@#6dST=7^xFNiTC@{tG@MthvSXfro_}Cw2cNM1 z-t)Q=RuKjsIbMnN@7|+f>9$_~3yN!4sVp&?597(L;woqye%2sJKtBjRPh-6~rEULB z%kOU(UnK)`6OuiN&hQM2ip|U^tf}iAANyj0`-bL*q@+au06(zgl%%$<j@k9Ksih^f z0xXo4muPCLTM;cM7Z<Lqu7Yy-m>>DG$l0oMn6Tee_y}B|J^M`i&%cEK7caM>eN)CT z{pAzAqYXmffa;m843A10IG=@egyF$Mr%+XM<+oGbZ`}x=L(@}DJv|V;rWKV$re-k0 zqscyj)--p#y^}7%N>0c4n66K9S>MK^mu0QvTr$>DrqMV?oftkw)*`%l{JLjEG5bli zeX=%cPKmno%%sxZZLI!PrLAw~oSyGlg8)`JXF$7ve&A`{fKxa>Fn(SCf5!&=3pPX$ z=U`xAY3V|v_y<EDonBPh)ZP6vRWq?LKR7YY&wpaio;|yng@u)ci;GiQTC%yV6-^*J zb#+ld;LO+Ge7$4G4jWr*Ap9#UOF*Y6+cQPZ#?LZ?f7Cfl*gt-B|H)(K(9I_2-)9=? z0?%K){_evM-+lk#0g(B}e?9y-KEqTpkMDg%J4)|A3tBfdJd@#>G9FPiQw#IxSPeKV znhg4PYMbui;i^_3`KrX+e7I>y^y=kn;X*UCvB#R4i)fiDk>Xp1mL9%*>ll!`U&>m; zrC6I@#gFy14JvIMTI;-W%hW!M?Sv|~I!)av(SV*=*FC)r*T1B^|L}vndB|CVz}+$q zyjb5edI2W^?K(auw7vN;UVC){{zWfhzrGK~#LU#*$==h)J0d1NE4QqnsegQYE7hZz zN0YI#u<QlQ@7%c)mf@3+kGG+@5zsO+GkxgrAprc>Uw^HQ#jM_Y0JsAEu&}xcNJ9wr z`t_fP9Hya!k02k7Z9k6cpti~U&z~=@E=Na2h+h=7v9@~s{SRoTu!;81-2U(%>3#jy zt<RDMH2nG+Q^qgNENv#cN5`out1HaRO=V=I+k3j;Vt<A0q4MzbWOH{nO7hX^S-#Qn z6u%%edeyVC(K0mV*K}|Qt(;iCyK?v0MLo|WN@Sd8jjB_@Sv~)R(&2`owX)8|f$23t zDLh0~$9286$jNxGoVnFo+lc*33N1MQs5XO5-g%#z`zf3+Ki2!WHshF<C!eM}x3>3J z_-GdMSa~CBePq^jH(EeQNMcHMSw&-K*T~Gwr>-2u{88ScfOcSdDJe;Hb=Cd*_XF(% z>_tU|fhPg>G#W+V%qdAp@w&!306f_I`ugIvYas7I)o(m}1fO7lKmg=D!1K#jcb+^& zQ3?v530lN}z@NW;`||z!C$C;Tdhz1nv*%|8PVd~c^KXCuJ62ok#~=R}03KEB4_~}| z{@wd0uirpu@CoWjYmXk?eDDw@`MXb_UAuD^QCZN}fEV1oyK?(BNG~SfXW_JqON%(U zIKSEP4F?BD&(Ofuyyvh0t8>?`ZQa;4G+5Qr21-65Col9;3Pi6CUVfGoS0jjC%`9ZG z#z%F0(yIn<J^41bc927wC}9$*$EXuEiPm&TsP0>??p-QxpL_TILqkg+*BQ-SqWE)0 z0UFMioWsgK&3?IE{!Hl}nLDIT=Tf44qu|1)?R`wkgICkzs0RIry6a&TDxYbDfODP< zDOA(Q#Kg|g-oqPIJp^TiHTCTS{RrmgP|VK_jE^Ch-?Mwyu3h%_c8}h^x3eb!)$iSl z(0*uG2!LmBVyt_xzi(t1Dg%GEb+k3NwX}D4EZw}--qRTo5e8ZUfCu2ee)nEhRyyp} z+|mTd-?)zst=t2kK6&%*%GIkm*_m;%QLSw)w;tT@9~yuj1nFFM?bOszyLS8L!pbtx z`?ELSb@p^avuUZx>$mURfBq81{OrPfPk(Puf8W}zThogRaFnvLl8uLtC$7w9W@iKh z_`#N@)+UI=kp$mU6JcSYXy?Vl!(CQh+SYk_aB32b*{7G6Qc{z>eHiH(X>fs4OIHUc zC+a&o3To<73X3CB(|x1jKoi=yc_JilX^oJ)I#Gd?II?i_?(;X=qzEo)8!Wv_lUjNV z!|+SH+&H{m+CD!pa}AAHgTgb}`4m{BtVB$MRU8r$i@LTE`<E0Ivp^Pk=dTo8II+IG z8g56_-43bJ4yjUj6rE1syj6WG3_@x!<|G`!%8KF!-E?SDYI<>5RZHstbF3cKHUMK@ zCeH)rt!;?QckZ^dw{acd0;-2Crl$DyhmVDXF0iw+v9hx2>+69$K6B<28yoB4BZq^6 z1K|^vFL$=ze)giMxbWQBGkf+TJMP=ZZe>M4;R^f=3ub0`z#h2LuHCy19XgnvmV6LN z4!CzO*xtSD``Dp<6B8ql_j<ZI`}XgHYS8tZI>ld9TyXE%b7;ZC-HnZn4gO_sPdax_ z0JN5lj&?{$5VXd^!U9*oOz^i~-#~Be{sRRCITmJt_E}k3_A}ox2G^Lse!YKuR8CH2 zFY`R%|Kj3eoxMFhV`HHGvdStz1O~+?p+l6GF0RHT2dtTeBF>Oi-HjewbK~L5_Oa!I z3ig6HUp?<SIh*v0_~3%pE4BSAWgQDQ?tOl_MNQk8jbDvZohoG+q3x2~Kee)r*gszq zvKn?t*?p}*-enMSSe1H6g~Fpu=23D!px|&w+1Wm}DZYBfJu*|%5UAeC&V%6{85@_G zlUq>@!7Ms%2{K?S&jaQcH*Nq2o<4mNAP4lTs-^;%hgFa|An7$V)vrH#dWN|QP(Q$Z z02TmF{li0b^|k0KQzq>}orvP%3|Ixix3;!|*3gY#fAbAQ5T&I>0ClK{pb`+fXE!Ju zU0oehQ<EJ#H*3O1XpozSyQ8P;@$0uCp~0{b8b5G=3o5|*7#bOvo0)#~x4$8k(Xs~- zX(?&RQc@D%eDlp-X5(-m^yum7sle*O!h*2x=6UWx+L4eHZyOlOuc}GTFA7gi^NEb5 z2857MC2xr+`IG9_e0o7;trJgPzw?YKW0$a0cghDbFK8HQ?wwyhxLVP**gU+t^(~Yu z3#&&2al3^K`E<M#Y-7wB*$<w+*aq#NEU#X_KBY$aO4^RgJcfr!?>q{Q2juO!<VgFZ zt&M0Y5oHsEsAhFDdtCxDfCJAF7%VBTu(ZChYj_x_{<EI{h<TvFx%G7sVWC~j0vdkh z-hBaq)2ItzFq(jQbQR!)LK|pybY}YEMKKic%1R2}-d-%MEU-HS!l1xQN{Rq@s3QoR zIgQcMAX-^K5E>p90#py1p*3kKiLA_Y2E&tC6C8oX#>PriL>R&L&Yk*ty3gNz2n`K} zRpxQ`poqoeO+d6OD9HPRun7%5D{uyJlU+OEv}+q`EiDPa@UR6K-ra*991;ZAOCs5z z=P55Q3t9;N1r@+4{6j;sE2?5Mvjbui>A~TS3_s``jcn|J<P~tnUn{%f-7{vdJ%s30 z1m}KIgAPhw#W9axn~_jD)G)YO*12$X<L9~NJ-idyc@<b?tcCHx^48H_an;+P{qx1a zJ!!YF**W_Zetkbq8Jh!gqyutx0DCY#O*d(J0l#yeAi)=ds(Mc!Q1u{IGfT^B+Aa@F zj!z=mX7niUk<XaFaRV?fCMNO`^Af*;dDu%Ei|HR5>Fn!4xfdi&dvDLv@4g4LLmlV~ zTH9N-v@~E}SUrF4ENJ^@@4kmP7o`5i{fAHu9W2tt>A>aa>Ed7+Y65!SeE1<aI1nX# zP&jv=y+AR~l<2!;Wu)sH>j%b0pjW#2?B)8S$4j^FxVq7pT)*q&$rG#h?&lTcv9htk zI(XK=#2BbDFcA?Uq;KNb-_qF*ex$6dh@P7f-XyK09ONG(Jle%K(ALe%%*h3mJjMu= zybTw|FQvTq;mfxPg&lC8mF+V1z3T)GLgg$YAbPFpS*q(_dGp=RUyOYA;@dM9jaUS< z4`SS<EyLw)qMLhXwn6(R%ayCwj!N6eIwl(jmLFC^NFHH(CFg@GE+BOd=m+l6p#Kfy zEv{#yYi<El@9Z1s6BQSmnUhyl-PqZM#CntCTlG9(es+CbNbmxRc^Mfg(8ixI59nQg z^aNRDY6%eb;2_Jl@5Cp>p+W^{6J(5wi!(3ZQB-C`MMa{!11*C%x%cAR<r_CvZ*E+> zeS3ao89g+#jKyNUeg8pNNDx|vRdqELaE+|2j88;8y)X|^EvjEY8jZ}%EL=xq*wEwz ziDV1C%)vtkkzc<ugVy86kIk$sXTa_sG2hYKJ+-t1Lhi`XBXBYt9Qz3bbFz!GxcEiX zO>_<PVzcuhbcNo~&chq-6+-fOGdUfT-6}LQkBqUUJ6CT$ku>x^rb^TEs#SH$KaTNo zk8ebWC}-C0fARa)l#B}2lWHt7HYao$5`-`l_l$dwo^9jyPnPJ^T8D_5!mg{Dq-ZXA z2Y@<6vWL}Okue+%I{P`?-gCMvDpVCiGh<sjBF&xR9}L}hVorWRbuB2I!3kyz`$^B= zyft}s@x+PaC=@XmEuhe?n7{V$$){By$*VVS0qlMKd{71k;O{_$|BhYEh!Wa#r@NxS zW@Be7Eh}EXb9eFDI@rp_#_ZA}dT12$0CG@1yO}M+>f<->b91wxr+^9o(c!6ya3<!V zO>jwIY;agOw1$Qna=e{8pc*�mMfT%<}=}Ge2UU=O9mKZ};rVN<&ivFg!%rpU)Es z5Ri^WO9va0ltvE<bMW%DqPpSjoNxr9x}E`-s?&L+;OfqqN8i4^RNT2=(n{Gr8-&ei zte*lgvZCv1b?*{rn|JSjaV{5aJu2%N%zjnMX+0k?vk=$FvTe}*$@1>Q56Lx?_KBC3 z+;R`8xgJ)d0or*r-TAaU__5vx#0e~CFuMhfPO1_?)k9Eb=gx2siGWT#rLefF^>TY( z-&V}8+`U&-Ta6CUBU`Mk)*d|kjQK6v29-LCH*bEzJOGKEogG?3?dZ`XM~)tjhz#dD zh6oryV^((N>h0SLYpcjh_!~DS=dOIlyf)_XcL?UumSOe5tM9UNK4Tty6XZ9Fc`-53 zxog*^mX~vKvsqaYR0G;$Vxv>jlA%7dfbx81b_UxPo`)?BEsY$U9B^sSBQQ@7vBS(? zeCwMSdq>5SeFKs4NhcQrB1zi>FM_f7pUQMfSTQjC^#?DM%>((>-9XhS*k&A5bE79T zHlifI{@_<0P)T_UE5GU<X(EpnUD!BKk`Uh7Kf8_FKUq?0$Bx=0?<Xd6>IL#?&;jko zFkUCL87Fame5y2-b2__(jrNL}D(d15h_(c0s-u^$S9o+}YDR8lb$MeGJbe`Nwzk&y z-@G?4)Q9E0$m9tu8JX$8<N)&Bn>@b-^E(mR2h7i`uEZzCBjfpdc7v*K?P!CZVq|&> zeamZhdHK{y{#{7(yDTj&?!A1yc<tKN>(}R3SKz<Lj&16BELQvF`w!ycVu-ZawM$l3 z`oZh(5-%m7UlTx>3^E3SP!#i`qQc;45N<n@olq?VVa46o-(}|_bJW0=O>L8zw&@W( z+}(o%v#YB^Q<LCjPyxhJNp*EqYg=YX*`?fq2xP1#26{js1Pd}1RE>_gg|e>E4rQvS zabWY{;)54&l1e&3I;uM6>3CEh)$mlZjV)-I1(FAQ`R2P{c{<;I`-=af(cW`9`&C?y zV?6=(7G9a#p#76&dhO2NjU&HOblIsy<;5^gV7*UZeNO85p2GQ^(F^3fXtqn(c*jLc zK3NlO6H_A_J0g|t;uqu_9haC}kX2fKgo$}T6G&W!w--A*8vq_~2<jT31yI?dZGOV@ zDCPm-NDRw;0BK~;ZjdFQoB?umjddNpUC?oW{D9VgT{$^91A_wEyE|&@s;{gpU%jz$ z=JY9KT%9?dGdeSEVPOu<g4p2U;TfKsu(TwgkOsL9#|;SZ`x)jPo$S%L1AOGp%Qx;G zbo9L;z&xBoVM!tIIUE;67oG0fH#$6j{Tl3x9zsJ?y`ZKpqqHI@HlF6=M-PdB7~hHl zs>V^*f(W7OSxq8~3OyjX9vFW9#v^HCA5iiJKJ^l2alBfde(CMVa81X;$kjW)`R1gF zg*W>#rM)sX2Q*yI==sQ7hF`n&U>mo8vS_=cu$(j8Eo8D!%H|Z#_q47**crWmbA};& z$}W4)<5;DL?9#U9R7`N@1XFt_TQ@ISU??b@`0QNh+>!A*mc9G-v7^I)08zkjenB2+ zU_kFy%x}i9TQEO%1<}=Vax&lS_y&djQQjjbPo6-(V}cO4q`U-LM!#bMZvrY75fK8w zFWtHWAtZ=u7Uu9H_!`ZX3fdbTgg<=b5GN-GvxC^_<LkZt@R47D?-tCjfir?|+RcoP zp-m9^Aj!}m7cf7$u+TR;g62>|!s|VIPMzY1%K^VY18_JQ2zyWPpMbqio;pbl47GOk z0PUwwv;}5R&@uW(iK6WgJ2<!T=+$?gu@xNBHmVLe+VrZ!YVKHST1or-N0R^TpG4&p z)UzE^+9_hPPnB{+i!Pw=8=6|b4cb3h#;>e$OIiP2!e)nr4Tr1)pdEd?7%+d{D3nW_ zuv^50Tgi!A(Md?d0%wj0dy+fDBP=Q?E|G_a8-2BG7xKjoGNvIZDPCAyh`x>pp8#OM z^VeU0t){Me{qa*|^_$IAP(JAQxqTx;$|_3x_U(g4b}$Rnk&%&_zjh5gA|TKYsGCX2 zJHP(sYY-TK$t!DX8Ce<NaXXoM2I_}|246}-a-t!Pz~9cD92^|_`g&7W7eT-S^3i4m z1<ym96Z3OZSFb85DWH7;Y=`gxP97GJ!O8J4=m;Pl0bhpy!}-Hy_K%MaOwZs=@u+9* zL<$^b2Z$dc)eTf1BIEJq%DRU8Rh;>Bd@I@}?>~Q?Q#*7-*%A5Tl~H>N>nm&+kWe~Y zKd@Tbu{gAF`}aJZ4<A09l`>^Lt;wq3z@<h#uI(vj5q9nNHr4;xV(u9AwYc>GHToVI zdu}C)pmF$lqp%Cc;X<a-XZ8G91r0dm>`rL89o29X#Srx@tRPZz@bZIr)`(!Pt*c{T zYy@EN@N}<jsF}ZUV`_Qx{doW<!`susk^ssF1U9$;=pSU&0$BwKG4}Bl&G{P}W#y#~ z4t7>VO9+P{u&r;ZADcto=>>?kb#+i_6!@2wl|^uH&?xieb|BuC_BIDcJ1Z+oh$b4E z8=&K8@96@eZfRvfBH32gRxMn=F*bK)a&e)urV{oA{Sz1*0D=lV=;Y!eoU?@m!O_va zv%lBR-v=3lBs&dGj57yz*3vW50Qn%3fZ^K*2W#3cSF~P^NlVu?G|<r2R@Kx{$6`&a zZ3twlp^crknT493!69|$e^a2jMw9^Hr&jNZ>3bbgbcWDX&N}s=I^89@u3>1cs{3kf z|MIKve(%d;W<)=BK7~C}Rvao69(C7aSg-ipmTl1f$+CX^_Aylomm2+;He;W>lYm~J z5I$1KBvJ?;C2AgjRMBa#D4tiHc3zKh8tWm0BN|v)gM4*l_%R}5qcXD7ODjs7Tib_6 zhG%ET=6>?FE#MqD6CjQJkYsjdc5Q8O<Hq8~2E>)4v(sB^LItP^C`G@|-TL}2pcH9@ zX<J`tN4KN9p!E^=L2H8RF|j@R*{$HXH)dB?w!Q-l2l}}C0<;V#23tP;U<9-o`U-vD z<WzNATS0YQdQoXYPF`3_hEGg_TX2M<Pau)zZbEX@F}Kt-Fg&Ma^Dpu=HDXxf;PS1f zuj~WzIAv_r9CNYs%ERjJ>W+yyjWf0VE2Zu8Q>*uW|I>*{C}iVP+%1aVr%XPe;=-%t zZt9x64cb3nvWr^xDUe0*p(k{_xfESQOruc~GmjHC4rdcK;*hmHhx3xa2MX%@$QzK1 zZAnbn`+I~&h9oAVgZ8zT+mX3J{^3XBzxdPXxhuc<gS%h({pslMtNF7EzqcmywW7hP zsg|DJissh*>e{rTlDN#A(8OfVuqb4phWYgq-p)za!dlb7NL<_ezbnv04SX_dN9Q*l z$L6<lE7*&hL>l@woYM{9$1=k5`aso`buKoKtiAv62j+NbZ0%>`SKBRQz^drDU)h;Y z%l(wLd;i${HfaBRF?NhOj`LL@#UD_i9m6ohEfPh|<3!BkC5S0}O3r%(4GyWgNSX#J zT1QHo`fK8eCM0_+D&5&90EK;Weo<~^HSl5g@bItylJ0l9!`LrQIB@0X?aci~Yro=R zqhD$Y{ZZJ!<V0IvUqw@MUR6zMQAuoOPDo;kcLXvrxj9f{@1$pC197Vi*5qH6T@P!} zBl9{(uHNmRy>U*5&Zj~FRU>VAi9?ZW!pN`cLE_f(&c%&~f9!>^t1H*J&T8+upvR)% zz^>?YNS(%`O7)8=+D7c3FE?)86VmlovQLz@irO#lbWT4=+$tH6FJYMkpPXW5EEmlL zaGt89C{4#Wc{3j^ycP5NS-O)q!uFv_sfoD-ITclv%`KPv`k9m3UHJmYe>xk|*zf*@ z_2^$`V^@CLM|#J{n!39ynp<)!s#6MzVluLW6OtJbG1Pz%dk=2_yeR^BTP)rT;?_MX z)LjZL)bO&N*_$^Xe{0Ri;F7h|rj@EX<#8yGB?#ehXz1F$FpPw*f9#jtODi|H1TcX4 z-SYOV@(v(xxK$`xgn(_t{w1ZVaX`!{z}&y|qInpw(|LnXDeF{m%VcSrw2Nl3`$1Al zS&A6?>bN8t(9^Z-!nDlnjBH5I#RKm_NE;Y;DK;}Zy|}EfrmnuD17L$F1cYFJ%A%j= z$@%%@A!ENArJuCTADfZm*Jr9>e)dm{U+(XRh_$$`A*-wc962H-Js>s_{F>qy1d_(W zg=S*sh`dE+j911Pa;rM-Q*i~rcTR6C-F`}oC^@9$r0S5PNh#)5riqvY`=+(m4Xgme zw@+OA{`X~#`HAezuVXu|yi3@4m%PJXd3#PJrvpmlGa4?hU%lBz>|ats5-Kpx$+ltD z7mNZq6r9BgiPF~TQp8kQyKFJzFcu+0PB}X{ixBI8e4>95jvS|DW@A9Ku^>|&ynNjP z!~A06BU3Yy@(O_-t6Ezx_w^x!Klc-y|I$=Be~6`jF&y|k0_IEg(Pq0xMjAW2K+k}H zM<Z6~zkH+O=phkgp8%3O1039#<Nz8TB#jczV80sWD@9j&RC&kL#>(Aieo1u)<n1mR zh3R?K9#nBXiSu!ZuCMIATGO}OKE3hc&41*FetLKhc3$~i;)LCDc6(&)I29dPWJre; z?VoJFRR0&17caimbByu2)M6D_b_(YM@vM|pimYvhv`xCaL#_bEV~?QzAr%*WT9R8# zB_*;FPfx-S?DQ>(7G#>8JA>*U42n1OQc6O0E|6qNePcssCn)@(>8W2q<JcdWOlIuQ zk@+u$!vm8Dy+b?#^qyT_nOsl=eHL5@Tn;TT6!e>on<s%xH6c0R2t+L-6GfaMyE^rM z$hqi~<1bHK2ZoQxZat*zbRNsl^{nPqcR#4^X5n8_&@v0ECZ}#9D7Iwd=AA$KIFiyT z*!flW3TW?;w%H|PyGxeDCTIV535&x@_RpTb+=lC4ReFYIOeo2hD#i>w^A2ga^QgGW z*=5U;GUZ5_ijMgwlwI}+8J*Yjw(`pj&S>#VX|nOl!CE-z6K%|#D7JJjvTq>B095Bd z=m&&Z+1%RP-Hk|{-xEvD{l}&F&ujkbxX`D0IKdw~hlUzEJ3+nYSJeQ$C+6fwrDnh- zFv6ojz(W^i?doCfL@}}jh9_zn8p~si_Nlx6s~lO!F}C>f{MFk}GiwHqsZ&mBx*Pg7 zh!}=(sZe!j>FG7&0Qkm{b-&mO&?oy&smrL5T-;+TYdh{g{Oy^pv-9h@1+)-hbJ6@8 zz&sQ7JEVx;Tr@wbX!rEV_N&EzWr@qIv-HZ1FB{b&ryNkB3FrnWI^_fM<?V8m9P^LJ z*|SMlh@1G+qRNxY`=j!@oI~=pOo@O8Ge<JO!;#@n4-WT_O#q#9DYpO!zo@nz6mrMF z06Ks5&tO0HyMx(Z9IKqW@_TmqG_Nc=E(B<YJ_!5~rT3ijDhOBOvU0+bQbDAF=TezN zG(;-h+|k9z#ts-BV`L(OHQBA^_OG&JJ?Hq;>WQA|jokX-vpV!+YOea;buyNh_RBjd z+b4$R_SW{VAi}11>FLWijV*n8rk<PvTKf*mupgD@zJQa{up`(81cYYf6gD(p9vqpN zTU)=`**#iO(?R!6lv1??hG#pWvQxzPD@m)brL4XIv`bojC1C-W7gVwPaeG$czq<HF z7uyGxL>CXKI3(_qcNW0;D?1e^*ykuZ<jUEmb6hlMleCmF4Gc_gDZ4zAUNuY!&B2=6 zXqyorzO$sb+Ix6Y149@QF~JGRF&SAP6>`ceOB<SMFSmn)=pP>+nVH^%-$y8p{?qK! z6eXW}83gSkBh5WM5OkE(H$e1(&^v^yD7{C-xCVtf`vllA1w7u~$-tU~HM7t(G?dXX z`*$VRy$UY+E{X9a!-Mm;YrAL9V(G`#+(6PuStPQ_If~*#{4+YMdX}oXua<Q#tloM0 zX{_ev?fY?;igocGJm+-){QC|{!6(}>MYdDw90HgF=dtXkHQD%8*!dLLjw!L6*4inG z|4P#GZxR-$eI)_QmMrIRp1w)j5dCMAz_@a1L}g6zunaMJzk>5|O;06<JdigkPK9#T zsXWrQd&SMgO#=K=8#^b~Dm&)Fvs#SpTs4gF`b1kGd=S7u_-;XA-U#xOqSLd0MYEZl zU)j>u*wx)NG&lgXzd5#ZW%L)+$mUGfzd4fg3;TYAI&&`Bsmb24QE)nlGC*+^*4Be` zPc1BgSQW$!xTkkytXojHi(e2z?=*LUGw2z6X2fcyrfYa!oAfs&+9@o<$~Qm0ba;5- zc6QC6xS{u19UnbLorqERJ_TnH(@@vA2J|g5Ao<nXPrf)fdHe2zgp^V{a=5gr?Sb<; zfN&sxHh}sG71k4~EGO0WT+sXaqWQle#BPZiU>;mx?>QYShma2+e%Qw6KdF#?v*<Ck zKB?^p?B$(~X?m%Ui-7QvPsjX&GNj#Nrswn+;n{7#X1&ula_h$kWFHkhBOP-~yuCA! zB#F)d!UQ1sM8-l$7?Y8mlvkKhQU=Poj7j@#eSJW=5JGNc{@)%&|5Db2&tF>2&7y-v zXd}bZlhCuY4-9~#RkyamaSN(zL2;!N7RP7jf&dFiO!ke6a}SNA_y>b}2XD1N=-v4v zy&Eavj1Fp$|C>DRw6=$RSVdg%!1UUK$egwlnp8eDnt^xSd0YUyypseW!a1t0<nnw? z-*Rck!orP*zh^Z5+Vxwh8I^dV-+38x&a>LU`Jmy!3)qgUu%1+BJ*|b<8O&Y*%(06m zsTmd9u>7Z%ckkZ&M3;u;bTg9M6l~+TlqiQ(TooPjRGbS{$;GNppzR%Y2^*iqc@{N| z-F^0YX8l3y$eL$FhPt7dmWdf)1_<AZ>INc#>=Wo75&_TxfB~r{WamMIn^RF$R9jcx z)Le79y{V_CV{ou{Yz)|VcxL(|MSsfgfPS6&Io-pj)DNRuzPCj~gOQn;!Ko?0ImmTj z^``D_h$i4DK<sct@VI2AZjVSw3yM#IQ$nPBKnV0o5QBp-gLB@bcf7f}uD&RixI>k` zLxCb=7G&X9lv6)Gy>_1-Q*l_y@vJrj=U&B+VX(<NNn1rxV;hRwt^nXc(o8I)FV*}3 z)%^iUw0f<zeK4n>-Y+=aCol!EfMkYWQe<piZA16NN88hM|DPx~Z$BV<q^DPpd#79m z*mEkm@Tj<|k&D&HMewOkDdAV4>=HH+GVo3<@4NH#6%hW!>ivj}N>u}MRXsxpY>h|` zW=<|36@Ws4Na-OF!1(~Sh?I1oS%4pa5Y&EQO&u^Wgp&;&9W6b*?F0RQ*xvCm&_4iR z@B%>c7rxb<oq-Mlm>i;G069n(kW47f>)P8vu0xxkrgAH5GE2*W*b{T|V>5HYQ_|rC z;55JuX#vPkq@9O1oT0gsiwVgAw3&{XrIwMgimu^tE$9EP;Krp)RU<{4`V^#94h=2b zBzUFsDA<b{1RMF*pVtilNdtg)iK#Dcn`6F3wlH?}&Ng!XQ<s6!IZL<Hr1BA9dnNk> zZZ%h4b$3<Ad<{woz+Q`1aURRqEn;$9(={lq@%n=o6D#+6XKyB#bQ;>ylyC-`pztki z@OF+CWGcu8Kt9<w5Oh8xJUSpYF*GSPDlHSh51I!8Nf6i|djO>n@K!W6gA3HQcYyqE z>h6M&6MO-*(B-~<Ky@2fUteo)Z*zBdV`o=GM<*yHs9(|CQp&_Rv;(bWl$56y7GKIO zh|A1DWsHAp0)!ZVcB+5yC$tmDR5Vt_nOk8@Ox5)brF6_W)v141qzPa>)Ewe0{flb5 zuXInYt67I}%iBwuM(cP~^J{s5q>;Ccb&0MoY?(y?-!XaphaZ0Yqa?pw{%<SuSJ&;m zGZRXO0<(HFnFE;n<(y<JlQFK9>Mq5av~poxKNb;VPC1g9NBZC#@-2MF)Q#G{Wm;&4 znh{Y+M<0L${Apt62!SDyO1E?OcJdAYMFH4@$PsV|Xaweu&CG^~67UZ|1^x%s4QeE> zss<EzVRaoKxwx*OxW2K3X~=S219TAi)wO`|+=^-dIq-Q}Q3-S(iGcI$T!@{+Q_?;| zJv7oaC={Ypa6J;;%L@6$2gTIh+0e!gja4=D4Ha;BUQP1<RCL>~OclomsyW8{B{zbA z=h#`_;pcF}Yx(jXUb$~*5<bde=SGm_d$Q4QZd-!^vbALpytF8@i(gGWz8E>%V6 z^~976V<~ATbiDT?W0(ONOw4P!RVolujwm=mR4b_K6`9kze*ZZTe#_{3YUPNN-z9at zy)w=KkgsQH4Qkoc0f}&JTs`bPHo@-}9PSw!=@S*_ADbAQkOce>E&yEsND=4-K$8Gh zU&<>4cK|$pGFXL;0B`UVkmwL2!!EER#LLhK;2awC42uFm1&4tC#lg!Df(vU`4>&sL zc4Lx*0nrvmAZnYKY8n{H;0U~$PP>$->?(AAZ4Xg=5Gk;zu4k@eeASAP!K><W9_Ndt zR|*@1!nN~ayfmCs0PvOFSE~^Xzc_mJ_OHy{v0eUihP3a}3tJ<y+Y?Gh%>7C(nuqOE zaN<#-%2}rZ;cL<=K<5DAvtKmlkhQV)$!;HA0;w}Ff4gaTEwN<KoEo8GWTU8KpkZi? zHMi8avIWSQI+6i?L>e7f7r58iCjc}*EijZ0E)W(4%nUK6Uraoxpuo6GAi@F3K-!-y zH~}mP{uL0L0M(!}^cr4Fkb`^zoCBqUfU)=Vf%p`TVgXQhasi=bXpIm%*3?4F$OKg~ z{92A*E4#5M(Rejn&**xq*hWWWwT&&^2}y4f)%QB2M3%F<r0rUMSk;Y9&f$V_kcoF; zWI;awzN%-5IdKO%JJYtF`G2s~v<$@+^yD_plB4TE-XGTVV3&1}G>g-AtJ0vBY16CJ zobq`Uopy@i&*-=ZrZg;UJX*c`3{-A)&r)=LkC|(nlCh&a)=*8)P}|fTf?QD15IF+# zgQ&KmxPj&;xijoNd>k2m-~s?z5Z-`Z*T7JKG5Eqi8f*tQfjShwAhJ&YbR59v(1QTZ zfzqK(2ql2k@gxT$8#_HK8$`6@&4Jh<N|n&DI;=_go08iedGa9@7hX;Jd3|4UaM9$- z-R9vXEm9<?cL8k%sCNaMG){RZ9(6Z4n^@bB%JkYv^qbb|-lg@s+aoIfkC)Qg-sqh6 z!nQf@<TjjZrhvXbi;VqA3<I=1ARpse0kkiK^JNn=W0xkX5W~_c`#|bUt=(@MT`%ic zh{)}+@JdrQrO9HAm2vu-K>HuTZ$xr{SQ79F__uU%1?3N*wPkt%1e*Y3U~DjeG;|8+ zC$h!E+s=bQqBFo>pg!yc`vT6*oyefo!J$B|>si|12v*=_7-JI<?Mk}FLRdQvb=R*H zXuD*cxa94*6rGQ2(JkE5TZb0Mm+yMURSD{Pf_hiB%hL0z6~OxLmLeU|^3-xkr6)Ay zH6amedFLVk{@&A9+YJ9-p^Qu}rj>LTG|$GB4qFA3iJ69RD!Cj`q=@5V0Q121SdSW2 z$6S6@D(gjaE;(C0a(rp)1jM#u%lB$}m$K@of-*a;ee<+z{N)YoWU!`6I74-PBhb|# zdjS8y#vnh8ZSC=PPJmnR0?;JpPA&vzDuCI7Ok*0dOhB+rfqy|A=q#YV36swah$M&| z0OwdU3uso$&{$p1P(jyJ6iejSbYxMc{JR`^x0Efblr@XA4KO^G6jjzdHo9~>G_yq( z?|)dCd;#aDL$8vyNIalGW>;{&U>vMZ&kE1$1GlZ|T`uidY#dy@^>F+B#{VJ(;k3t3 zo)^^g7c@`jG|o_C8-M^$>H4zDIUZDYm9t650_@$Z0r@KS*(cR#EMjH{l^k(Su{m|a z(`)y~mhP5bo{K9U49w_sjj1#9%2u-umNTNr>RKvb4Ha>QYWl{Yv;nI+rsg<v3xKX3 zID)0MzLkwW(H1p>j|<=+@GtNbsHS6P0lUDC+Quf(gFrh<dM47kgbUjC2Q{cWm8gH0 zar|1GxKq?@mx$?JNg}Tb*@PTl-99<Ce8)GjTGGV#prR9q87)eQrb{uX_q{UqeA)~Z zheZ4E>ZFRXI;Nfh0pEA!*4ua6Lp1;8%7+gh7FTaoUY;txJP&wqil|X@N;--2;ZUOR zs=3SCq-(oZ>v+}x@|7KO&S^6^B&|54i6XilMDMJE#?hgLTa5$D@x}d~iA}a473RLh zri?s8Y9ioY+JqsYPm#pgN$U{gbnpr~$O{0fddBJo#u^630A@`?fV7Dg@`-FwH!xAv zH&NC#R@BAI;s_UYY|mpId9|oqD$X1Vj;zwQyTmMZ3mNY{ug7v0w@1*BOUBy3J}R@U zyRmP+uz8Hg$PmJL@T$=y%whrRDh|1P>hAlH!5L3QQoL1QSybUbStmm8$nUs^x9R=A zdU^ZyyRMPN^7grs_J#P8VaLc?RfkJQF`oMsoR6xz%UC3V)B)t{c+{wqizG}U4l6jZ zTr@u*M^d&70oh#IHqk!5R?$8alyRBhSE%Qjp+im6c1gxKC#gGLQnHPawG6vx;xDA{ zbynB?w2qqqmUc$R?UXj{B!-HZHtnb;<%kC5kUE81jdDPR0>@>Ov11drVih!EIjh5Z zT8o`ujh$be^(^+dEK$cYFgm@yb#S4oYnBpTB5Ue@M8)~Ix|=j10q0(=WShaO?#?FX z2(hXJA;QAH1jGz@G>UdmH8bn?-+%vMTfJ`=z`(}c$1TH1zh82B9&lpjTO?-_dsNGV zP1fO{imSB6C6GD*d!|(@lQIRg-1muFu!s{5tCAHhL*1gv^6Q721{U)g#(a_*%)N8e z>|>=Y!o~1GVkUtXnI>ZBFJ$0%2InoH>wN;_c~ryguo{h9*@Z*SVV|@Ot2lvG)O4?q zA>f?-q&oX?Wp+MAwi7B`=W$}nB%(`nT47u7^!28p<)HK?j9t`mb;=<{r?XgZW&127 z{f9%&QBX3{cHC-iVrF4_^lXpB=KSVaKzn7^VsYDiegE>c`_Hz~zFoG${{8o?y#sV) z&(|)TOzdQ0V<yQYnb@8<nb@{%+qP}nwrx+0j%|KDzxV%Y-S@8b-MemguRf=$4yvk8 z@4cU@UHjS6R&8Hr>VO=9iY1I$HM~&&ZjI{<A3lNMsFFydivcwTN>_cV2v;OTkwk~c zTgAT<Q)3X{urKQn!}QR2_Z#WA`9u{yr3HaB#if08C5elX-kIL_YhORNO>+8+U<HZ{ z*r?Zov(*C2N71j&S`8}{W^Ljt2GKS2?IHBVCD$l0jP&T72w-j0%w&{>WqFPrx!%OB z9n(BS(y_=6a^v?od3(0Z%Gl$A_>Yuq*s6Lbvdqp(*zVg-w)_EA>PhAr`o9-lgqHOn z{2thI9&5{;H%Tatn?buZyJR*yKR?Fqp_o0#ect+#R*(8X0VN#Nc4OGwyjJKq#d+iO zlF}6Xei)4s)8!b@7(LZu6tU!FaHlVXvNroMBo|<B<LfXw8lZVbcW8Dhp~@bLzkci6 zxY?Ts!>QPq>6@$-1r{toku5%vD+CrT1sgL70~!VnnupOGAPZR7ATdukhQ+X7Z@>zv z*zx7_VUD53)JXex?ywreoK(r_;SpUK@_OmA=9FfUWuAb5eMu5@p*#g2!{livaD7_K zv-w@6dE7ek<n8Hc@YJ_89XQpY$UUMV{44z;bW&LBc}^*IxUrKBZ4?Sk6Wr$GhQ*ZV zrg4K3^bI&g><0au`~x{&=-ywdWX@&(tQX=z90h4)6<^MYBbSdmyf7S_&fv-mHsBZf zxQ?RUkCZUQkgvolke9SZN1mVn7zspZKx{K_!I6i?<oTB=>7|>mL26;Q6|+v|a*kt| z%<xdHgEXf-m50DZ0~N;^7MN=2ar1DV@}{p`8Pw1&`v<ABjlsY+;1sXXtRFlkU{-HV zX?m})I3c`mAk!KDzIXX0Tr6qBK2QumczbH#LMm&&EosYKc)(x`O=<?t`uabWg1+HC zu^iF2gUGCfv6$8=B^N+ne>JMblq7+0d=I?)5^(n=@Xl`=RcSDye_LCIZ|Ek@Pckh{ zzY3!e4Io>&r&YNl_tQAuON$kH0NsI!%go9HOWL+x2J8`Uxg}`C?_+;#w%{gqCC_&y zueaeObgirKy0BeZtLyp6ZQp5?#yjEmb2*y#qviTaXn5bz$5RNCVGB-0c3s0_6q!4B zbfH*gl2~?yEkt@fTs?89P>{RiWGGq}`XpF1W!V3VtTV-uU1bk_KFG}Xmrc>=8q-b3 z8d%Q&0>)oHAN>AuWd_MWT2F62+7#*jTc59i0=^TKCvJG-$czYsG4Y|k2goKqr-h2q z!3NH)gNy7!3i02}vBWHa%i6|S3E!cNwEp&N@|eGIHDt8~NGwC)4GB@V6$tjpsCilx zHTHv8rv<)(Z+;<_iU=GwR8|iIV(C0m$xKUP=^x6f8BDy{D-f((^Vf{IYu><nAqglR zPZwq`S7z>B`IbaesDx6DF|>e*#!m1^e>6+|`;B^h5;MO#_c+QS5CDCkC&BeSc(w<= z8EjCT-gc}cY^msvi}aSZTFJBZgY573#3P!W?*-mYsBfg_aL9~#yHE;DSG-3j3n$k{ zd3-W_4J(lA+jjE8zBJUD<HvEZ6iQiG6y_+zlF;!iNQuM&<ElcGt3xJiT7IxDmtWSm ze>XoB`BYqt^!cXj_KtKAbQh4R2|~<rBAsH<8l#W}A${$I5?2$}p%}feWo~us4`05t zq<T2N@h<B4{OBL@2jqu0uM01ab7IJq4++@2fdW2ElfmZJ{p;KF;9kCt2hEiYGnkRz zq#NgOf`AqOFiD-X7XDf9L{ezSRB6V<P)tQvIQXq-jW`EHT70PnHj&|uJv{feOwJ`? z<j)w*AG3yjwF~1r4}+h;K#uiaVL36YEkFIN+uuG%KWVo*)VrQr99`Hu-N5NC@nj7T z)k{v(a(82htWl{=QD}f6(~Z9VusHIb9nn_V0%Sx1uq~Z^n36lVgMWsy%Sf+~>b0q3 z!FP)<yrqXHwHI2lqv1uD&C?t)&=veJ6V;V{@0n}l1P-=kQR%n8Z7FVcxqogSj~_(7 z?!hgmyBXh_z3AJmbLsucHaO-Sruxcna19zK+@<#dkD@6}jQ<F?c6W=le2c|u3KmqB z%A18Jjxd2JRWd?jX?=u6nAXu|`?%Ajv_<o&`sz=}hvFHHGXRnPaKWv2*}P@<hi2Wd z-6HwCPP1to_^0_NL7V)E(EqaH1Z!wzVDDgOsB8Im&sxt6mXV%_j_98qR#rNO|Hx(i zS1uP95v_!wm9c{f5hEKgPmYLI!PUl)h*m{f&&*KYfrwV#QP1I@95G8>V_<=@sR8g{ zdRAs4T2Vt&V-p8hdKNY!S^;YdYdd)xU47sof`&j+J3|pWT~{JnF)If{I~!{YT?a!V z2Rlb#DZi<My^NuqfVHKKwUwcj0}=3SVN(m>VX(Bqe~%M1)VDS;B;w(L{Z~_5*3L6D zMbTu@e5TtB*C<;llFC&A3MoR-Ant3F)R6-Wk^;-C$dZaM*7a$HaD=6WEJcM1AM^#4 zW&{!xkWhMm8>(Q58W#D%m{;*W$8$zLtV~!j+FwjaWn9j-PkCf;KmM^d5(M!hYWVht z<QG;q(VcxbOwacg<kC2+-00WzpCDl5g6zKzq;mst=7eS|bB^L@4rqju9VnbButk#{ zN*QGSz8%Dp<9zeP+Y;In;0wgt8Ww>?rscAKi!S7Vg}_8C4Jp`h9E~M&81VEw@xX{U zUpwHotw3Cz`pDr_&PGL7i4?mgr9z3c%|v5X(P7qcRoJ;q3Ftiw!!dWhRV7C(pv#1N zSFn!YHfO0_rZ(-<^9nx`Yh~>h#_g}SmgFDhz*_|TYR9ZKBN`vNvfQdLSh}Ra{?+`R zFjU1I%ww@WrJ^)H0LIvLamr-Wk2IW&%UN~aY~WPCc2?W9$C-B?j1`nm3fP$Zl$v7G zl2jqJ{5qA6)-s|ct7)ZE461X#2GBy|p^ay20l>qFO=2z{v}nx4{ESx6jHc4$+?b## zZNpohxBZisGN^0k5U&Lbb1grD8b!?uYY^Bhd!$wk(nPmpDcwJB8Sd)}NL}*^h2+w_ z_})X2)Kn?vQ<1(lD$|T3<49hiNA+VNcx-VJor}C>90X;;;&LKZ+tnSs`Us2;I8Wow zic{m*is5yPhq`VaHr0qql1PTCW6le<USuQGHmc`oPw6d~AKlqi6bN*#d18sBBHY2O z)Q)w;MRR6H*Q;iJxD8L|E@@&-{3Nd-skQcKiKAs(;^SI99)f_DG^jtif4&Qp)HG>Z zmUY{V)c0bX<nS|a{{~wuVvYI!;FjsZGr&$C;HfMr`fU)0*O{H^*4KX$x*gu~OW4i) zWW)rQiQmzBnB`;$ZC&udCF8N?v^UAZnNu`nO(zy?2-kyic~`zS0hGc<yrnywGtpGa zgL`wcu`w^YdHt%I!D^bVV=nUyerA&(G{M(w2>97~HTSEhp>DXhOt(K5X`-cAD^FQQ zj2v%kgTl)y!$Zv(vK%QV*dtL&*s6uq8~#?Da$`2_tFw+&DoVjIZ+r@>)~T_U?Blrn z4fwRWT3&F-j^!)a7(v_@6a|1uQsS}<Yh#<JV7<gB{doxZjebfNWZHx=M2xc4OO=E! zQQMOB?7_-gsgdN&5+<KDG_C|q)UbvlAAnX|sXBZba%^3$^2;5k9T7NF{$1AsPcC%B z_qm;zyOC1x2C}KSnb~V=?`Y<RX=py*`2#y{ST;^JqL3N`C>>m6aK3*&*IU=P%MjDw z{s$5ES1|n*Vf1u#%>N<582<+e#`J$6!5ElX{!@be@8#G3URnXA)_+PX*#9c6UO&AJ zz(AluK0*J7#{8}0KaKfMVGLxq|0nCe3S*Z4gQhSs{AWv8fcyVDS^R&~mh)bEP!JH1 zzMOk$OIY3<;4bjBZH|)w@;^21Z>|3}j)DE(e<voo|Fv<z`u?M7{!Qxtw|@L@Qh!oK z(t2GOx%09*MGF?NeLv+@-@M=y4sIE*#P1FxLx)B^9wQo9fPA;*oRlCI_SKSues=^> zP&m5Yx@$Ay4dx?2I_KxR9Um>66fB+Euh>4`nBGCd(3k|Dd+Aj03R`9@^D|d4Bi7+0 zx?PmcU%365megI+;omou+FcL^!?I>Wxa8J)TABTPgzInDZa&xrKB*EMwr6dL3xf^7 zFs*?k#&#Tcx=<^fU<Um#dz}jo6yJPfh1wXF)Q=}N8S$To7!8j-5_(5E_0MXNCE18i zqIAXtd)s>{@<U^;Qomj$eQfv!Z>7uA8WZ*DvUK3(_sg)G!cWkQ;&|rxQ=|N{+1z-r z7p)bf#uF`ow*DoD*w?6Qp2Cf@G4=Qotbv!cwki9>A8=&Y0aS$nQXj5}W6NxA`7snZ zIlUQkui?9X9+_J`sct}T=9a8xiy=l^6*bK6udRk+(?ppb-iVJ`S$hsqu?cv@c;+Xg z(xt_rt42tHUxfK}al-Bne-P4`eeu<(Aw#a`K*ooiy%L?-b3a>+7VybclXqC-*7*p- z|3WfY$(#v+r4mE3yD7H%WwRM%38VoqB}BbduV>OrysYIQQV|4l*>W#c>;8)c<VR^s zU&&q!JZPg7(L)rQgFH&`2*=i!CNQ`KkUxU5P$5uOW|A=TE3K^rYII3+r}rk`nHTY3 zaGI>N5I`WASqTnIaw13^@9uOlPXt&aWzo#GVkLqxxX>fMJ_vrtWu<l_055=00E1Iv zms-8}l86Kv#WpfhTkBR6d$0mPMgoy?v&vZDMhv#%I5(+k$xY|>b*XV({a%{2*>P6l z+R1jb*%{HB|8q4SYAk-SX_&#TSS1UoPIg)&I4N?mQ8`Q;pqL&xPTYfC6}Z$H$39^! zc&q(mU3CTir(n{)+7J6Y&-A*&sRQAA0}EC({#+)JQ{ht_h<ikcut-HxSm{-{y-Z&N zKXcA9zL2kC<{Kt}m^98YyXDQ*9`Wx81rLL=p}jdv4In)!12ZZjY8V`}$|4}AEd-eM z1(f?oHOp>bw229S@^BW-;Pt2{O4%+t6{O<BB7?=|>4vi;2y7RA+s(~hE0~$LFu8Fh z7VI&_K}tz2CReKmi@Jl+h}PtU23d6@9ybYWS(Yniy*o5g&J~P>xBRB9nzPhUNM|bF zzSpaT&7Oz6BWoWGm&Ch3#IIS1Z3u~r9vJjZV)|@}+k<PXU}w&G(Ou&CQz0eW%%LiT z>klb_x@f*e+s|x=*Ej_&Gu#P@kZm=Q?{cX4IeHT1uH&Li%$_@WGLXVT-#zT?3Yy$( z+aw{bVo2v{{r+{=Voqw6T#7P;Jaziyg)JaRk@yV9{b|)r*+OS5$8egdkH3fDUewKf zF2CEni2H{?VQF8)8Q1UdI5n`C@=INBHk)!SZ>g4hn=dQNNefJ&90oW=gD^$bFEBa? z3otl7nfk`6b(_?4m()*jUFA8;D=vcLC!6_%SzX=S91sLYYD;4XF9d|FG3f>I>6)o? zo?WX0t9fU!hSOJ`59N);QIwv1>`^Dp>6K6<1bJ&W$9XR-pU}2(`|TBKhT<xKam5>c zG+8+sDO)ZD(1)J^WTSE4pSgrgxE0j>=F)zps~{NPV<`QczxNhAd$X@CD`Ntql&4&$ z)C|HmqL+kRVRTgPv}|(BS&S9WR$inYs0k^5($^^3O21e-7Ysk^C%HfWosXzLZu~>e zRG2ccK$nj?cWfsrtwO?Te$Wa(%|a3BOPiY*HS$e}N9(%PZQd8Pd|feyb->$Bg_`Bu z?F|b}pQX<XB0=@?@e4#JaDgi-+UZSydDG98`Zw2lvhl8hbGLhQdb9Cl$;?`Ly~n#_ zG`;1yV?9=J`KQyD=F{c+Q}0g~*6yrD*C^e1RJgQ;bw{;A+7v5%d|9mmL%E8R>X4YN zKh;p&QJ-MEN;xV2tJ?cV<o~1gm{?f<T{keY{}<ixZ?Hq=AB-WRYxfrz(Eru(Ktv*E zXm9Okr*CLa#Pk=<0CNPafw02L0hsYuy#O<SYtKz}`K?`G)&FL&vNHguT^Th0YLkEA zpnuN%TY<kyR>sa+U*6DxNFBH<C=7%=hAu#)^AEt`|JPRFUt2LE&HtuQ|DG;jPY?T7 zXUhSvPed!^;vgdL@E70wEfcY)C!+s1JLGSnFtDQk<O?&=&=E2Gg<Zhf3ey7<|G+IE z<YA$qCu070jmQWrp#S#-GcdvU?+I35g8AP%<nM+59iMS=!2&DJO2qW9z7n&frz2we z+p)m=h*`1|G5zbFVwP+~O#e~q|I#u4gCWKA|HhE|x4Y5GOX}L26EXkmn*TRO$Y7rc zq7M-=NYCfz{BP?M6gqk=LuVLrGSsT0X;97gBa>cGtB{Y<bWH_=RP}}*lf2cZto>!L zQkw>QYOr^H2R*G+uNlG;Ec<I+|HGsC?-m*Be>|A~^xwZfEdPSX%>R1Q{(r$^2H+3+ zAMhC1^8ad*{i|30-vHyYP6!2!)i$7y@^!TBRLf(AXB(#<C%7}T70kuO1!5ou@cW{d z+QlaT1Q-ux03yVkJoDmJQIS7hMscR|_3mQRpZ%~zq0FdKt#OHQ>)C57&Hj3vmi(X~ zBO%^SPHIeyo#MYGx;wmXy$nx3j_&NH8&e;sP^mlj4kQKG6_xdFIKPIu#fiJiQ@t>n zR~;;9jay?;SYlfrWuD`zx3@ZZ-#<*94^I;UX=L^f)|)SRy<YBaM|-&_yjSaL+^8CJ z2LDh$`&ALffAQm=i<Ov*6&wo{Tnm)I;43{6Dn1!6Lm#n5v+ZTjTH+B}B^6jMp844_ z$5S>%T}w(-PnBmwRJ-fu^zLG~tYNt&w~+{pWNz+1zkaJ8{g~DLaXeqF?e&bUDLp3k zoSgU?{SY>`{u3|n$>3b{`5uOSA(U+aro~a}8awlAV^N$V`V5RrZ6qt-A#omU;DJMt z0^$Lyq=Mu0qL!-PnuBHI{R_uJ(y93so;)sO5`ANp_p#iTp5rvX;v*N66`>YxR(HLd z@#Si{Gr7n|!P}^z*S(U%@4`BQxHRqP5ZR=Z3ljSLpP<cT`302$P0?+Z5&9;k9@mLC z<BZh(r<LDuBL&PAx#0pW8CMbZMirO$#;OJr;0KZrhmy1c0aw3;&m;d7jpjY1hE+Yc zeH;|!Ha6|tKg1%q1gvp2Kx%^21k=dOw?j|A7Byj7pL2OVyv$ecdgpzTA4JneXLpEB zm?tq`k)yH@>nV~M#gG~(7NIjGEZx;yPyMDi21~R8a+%H6r>HRRX091ws1%*0Q0fx& zTO&^uqD~ZHk5mx!RG^HMVDyxn4CkB-7aUGk5R8;z59AT_l;Tg6<5C)65u0KcAExP^ zX@`Ya!;W8ymt#|(rc@GY`0ju_1&cgkY<Cwq=XQCQx?AX5-Oi4)UP`6#Qg~aau6k`F ztNbEAwF^>?j*-8Gk4B6yh|IEdu$VFgsWx<AEOuQZ?-W>Pl%Dt*q)By^QAS%?N1Lxk zlq?mHBb(PERFt6U>t`NFk|(!VkEm(hy})7kh$FQ8TeUbYyy<xt1N$xL8c$7k;)D;N z@OGs5uEGo^B$?0rr|z7@T;TSl4wtOV9n;qX3!(>wSxd7OoZ$Fm?5eM`6VqCHgqPye zVAcJjtjd1kORB)P;3ut-jJG_n?xN^W5n&f2byt9e)NO)@T{$F){KEGzc~0<!bV(|O zo*#UF7{q%EH3mC{v1LoQ6@Ka`)0$>d>!#8eC6SoL4HV->mA-N8cyQ?U3190Ojeb2W z#jIo~Jipn`3{yl-RO^c5_?X1$iVO=W3twWNF=ulm(GlF9K}4a8h-r#qy`zSmIGL!x ztv*TvD+e1x0t4kb&hBcoGrk-=eIy556ucSX-kv#n$Tp|QVwL{%lbI7!;AJB!JeH#! zvnXDQS`}OfO&^~7K}>Pbxx(Z}`m;V^qinI`Btc`0R?X<@IDnDLeSV}xPMmXgw6(6p z#m@W%`Uo-v8&Z=A1~_({+!&HTj}#2p4eI4@1O0=;GZQ2Q%13%G=rkLp@F#57@10#0 zJh`pAMJ&(HKD0?YUQ4FD$oiW{c!U7G1E}yK2&Is<)42|NutIrF+LfPg-;(LfTTn0h zihz%|&Ee8sakbRco*284=}~5Evv`3bUtP&-8T<Dkj0dL(%S6X1|1vQBpJ||Ib_DR0 zUU4j~W}Elh(gtTYE)4?5=v<}-8Y&6gf(Tz4HR3c$sQT>J>rbl^8#YT$GAb@Jt1i<` z+uJ~-$JU2w9i<&3egc_$ObAl^m&-ZGU~#I+lODheSkxfVeyj|@9=*(uw#RAJ5Pt=_ z*6Iuw?Nb}Y<?O3f9n3?&nZL4*vtwj0hZo7-<tp*pod%YX)OE<M@jdqf@(sLouuq<D z-dQhsGBReoFK-^+Uwda;D{iJ1cE_;|Ggr|!Ruj2h7d_9F$&8blvN}B?0rk9}Z{t1t zK>`MiIkp8gwgH+dhGOezGg<RYFluAQ$Wp|YeC7M!EaEBxO5|=U&>6+_WUdP0o%du! zC{=`71r^a)q8qe;gWg%csZOuMs_1OR73tuTXOO-wA)w8`Y%ktm&cJ<Tf<~u<MrVzT ztCy5<5MBCZfc1*L7NqR@ok?)t@n9jENh9?%eWit=6?B8R-e~4qliDgxmgHjmTONLB z3-i@<?(R4Jbg-5DfvsY`q1+H@-XtXyvFZ~l(@YZ-HT*2^!j^Y36P|!JqK{>xChcx^ zTh7m*gUQooUzFA_X`+<u;H3>zff=}k9h{MI%k1di^V4Y4jX+|^^wG`F644Xq*X#^W zmi#yT3B9lG9Q9Ux{N&S8Tq}fRR23gBmIs`K&r^VYKsmN(tr?u1an%KqSxMUQ9s0z@ z!XS+mRK;!0RqARMw$iU7zXA7w105LL?Iv2J$_%#UmjM%9m`txyD>o4V_@M>gAfE6b zrtQZ{xOEq=080o5vv8c&Si}ZjJ$o6XmUvWW*){vQMYf7(zSm5U?ZzqFiK}Q?Yt26$ z>@<!x2HUT5YUYpA!KOUzLmxxQiLxVyq|A$Qz@CcmkAO@OQo^(P1;K1LmY{V8Na{y` zjrrTV5gma{LLF3Jbg<WZ;4YR+k9CW;dIEe)GA562Uji`kM+-H7>?4D4CHg@%#*x!a z7hFJ1*dKx09z)un23{Qn!(xeytdOZdpcg0HM@(7XNm;a!vy>CpNr3&HHxFtjZ@Nt@ zw)l4X>uU6XyX2s=cc{dDF*Ln!X0+8`D48J~TGZT(P|LKF;eb%}d-b^(sexcmsZ>v? zZIH+%l-IYM_l5xO*n)4Z5pacgerlk-Is0Q3XMMSc6S*e?<vRn#JJ5;R^Qr0^smiFm zg*#K_2b0y9M0$9X#<<0&sro1y$2ePf2^;vzEDC>Ez<Q5&Gdm|mrbP~|0ZZD9_mhUo zw<KtdL|<!lWbfkqHUwDApqfhD7JKOg^FzS<!S>YyHNz-cl~%c-_LCbX7o4Oz-R=(q zlDz$I&IYM;O~tp*&xC^IG4Z`xOuT>h9VVn4P{@YMlN|Dfq-0{$e^00(PPEI3u`g7- z)?0?zXNE>)2>r?ujzJ$D+XPhLlQK@DzXn(p9H#tO+6Gv@ockcus}XH~ztjLKEs^SN znmo)SJXNhZ7NuDRJT!R{5^Is6O1^>bA_EnoJ*BQ;(?=C0q|ZXQl<ee6ips>6#-;+~ z==R3cxXPaH0imdUe-<alCO6jl=hrs+hx)+?a{H(bX`tnStKK=uEzL!k8QmfDIZZuZ z;a;ZnUbNzh_)el~F+LzRM=eJczeV~}DFZIPyiq-7Axc;dp*fgwkk?YQqlT->iF1Tv zlm7tvw17AAClKqVid9GacKod}ln$XRgn^*oFsc0b6OAb%LZZ6c(^=2)V{>qnIw?f! zg-}oFliKRG(`QE&k(qxxhA${aHYr9mD@HOkMl&@=2=re2jtU<oZ3J^m~PA0vDTu zztDt!;d$IXeq5sFTqBpgLnqu5d|%4qm;H?2=o!D+7QjplG|?*Z?N6$sX*MFKtrF&J z$PX5S&XMWIzSfk#_gH%<^+hSos;Vq2F7~a?%ue=Aj!&%*5AybWV}i%VJ|{#a#7f4> zNcozUfsqu45(~?WYUW}__J*kiFC~f?B7PGY^%xcM03B(A0B?JDUw?ncV9$57&Tgu< zLV>-mv0&e!b5v<TXpE|cqRpYQ+|ATsV}0IRug*z3oOBrkXwamC`S!J$45OB<8oOjy zsStlAFUq1tCs8tgTax&a!U`LShmeU7&&&2y!EH71yggca<oOlDmfiddOX6j~<djV* z>a4h|ZTWZ!$9ntF_hMzGWyeQ^i2M7kmW7#kd6`SXI8!T3vg(Vz9|ocWWzd@bKAM5Q zMxHYAaI3vS3M}*D?9(Vo1p2wvx2iQJD`PYV#fP{^no4j<#(Y{}2_THIB%xIkVb~U= zGg$+)`YTRIbVFw9gjCvEIiH?df&*Rz2fy^knx<Z}KT`yfsvaahFD3(Hr2?03P~xbU zsi~E#t(U2<m#M8$rmIt>sg2>JZEKpuYicDbX(T0RqopUpy_2vJ50X<(ePfy<CBkxx zX{GAxS<)IDP?MWhT3ZocX9i9EBe%k_!C1>&`+z)>z3GA#w@PA$EJ$P|MrlM{f)ATk zeY_4Fjsnb=KU1_Cycjz0yQt4H<7TceIvJjTXLXyR#6lb<PS=$qGl-TSyIEOm3~qV3 zKN>n-di=7>ua)nEFf^vrqW(gry7DDAXO>;a8KwS*;WE5swT9-UhUXT)ftk3~26A;6 zCulMTX!4<{mgm)pLERLI8#K3jKxA;S`L7&jK{O(q%+z=#4cNORR+eS03zPHH(=I2= z?V-qW)V<~6Vqv2q>pvN-wzr3otE;&6tg_NpQxTgP*jZUPX(?D8w)u}Q_ffvShgn)r z7OML4e3F(%rb2RF#1<@U^yt{w;n0ZASJ%yUw{&&RGK{rtCC0SI*m|gPSxVe<EFC;; z4um7w8l8l(>og^pF26?-`2VO4u|U-<-<c}dRT?NFk;FSD*ZlS=!P5(P@jWQ%$cmV| zK=t~_nppQSCDw~kTKKhX8Il&#Sko!L(XO&sFT>ly&|=f=X)#V&5T-B@79pJ-Adk=l zb`>wC0*ez^PQ-BU5q|d`=I9A)Xt{fAkRctqI`gAd6sH}BlNc|CCrdYWTLl=o1?Uhj zeu8rfBm#n}MvRIC-PE_Q-@CPshr4}^UOD-B2*cE)u;2|i%%S~7dxT41b$SZ+kqdR8 zwDcVlO~iP=2ih+p#1;m=@&1O{-f5PpIcc^ziC#zz0>f}QQZu`|{r>1VA7EWIWw9EV zG&lBb3Kr+q47|i`!Drf+(kMxoa&M>#wc=b8D`ms}cuY=iRmaVAT8sy9IouJ!tb(J) zb3T|T$u-I2@y6EW0krc$nL{5>%w9)?jW$|n>SFjNqs7VP`irkSl;}!hVB%A-Nt1Cv z%SlRO=#x3*rifLit5+C0hA7|Sco>G~s1}&%eh`tdkbR@dZ}{Q^ZU`CL#lsAf;VtZJ zg{Wyf9Hl%g>v&QA>2*vo<5?9IEudU>dGy6OL0%T(keq>foR<X!<*RP+9n^Hbu_cwU zPN7be;DADthPsyb)!F7|Ineziu_-DHC&+`=fS=gPzL!f8f3@Ud!={%b7$b4<I>}MO zKFXTLBsFT{6h_R{qvrMwyr5fFcKMd1DAwjWVtD@W{*@5Q$V%~%=GWaa5Pf}fx3Q|O zF=Z<BOOBDv%d>|`Pa`bmnBy$9GWm$Av}URGAto;7AdY`iMRZIHZ_*RaZQ~JIifv?x zy_cF@Bu-T%jZ6+EA09fiBG|^OTwm$UdhKI*>Av0Kk1G6@!|d(-(?_}Io3E<b*@<5i z<RLE&ClXh*vtUHLcTHnEW{amQ5IUxVM5ojp`jQe6f_oc$G`UMKhlc@FiRD=g3p(^s zN;<<t5UQ=v3OmT7y;5v{xmwLD{626r@Ysl4jYLiQpdsZd0*R0m5a+Fel-BOF#MNqd zfMCAuz~)Ze;3>-z`{ttt3bWT$;Ykpf;9sGraN78MTEEUgL(vdMfL3R<B}SaGZ^{1M zA!HRT1j`@<0{-dV@7HbRw^Iv}6V1J}I&-Kh$*GB|32ADN$h+mED17y+>qrP~4dlDz z+|kf+Ffe2Ye*owzlJsPN$uc|9!q|rV^qLgkmd0owIa9l*$3gE#XL7Vw$2pFsY+W=j zsh$+29_UKB>teavlGy5@?1*cY{Qcr1RVZ~Q<W5bjvDI+mC~C$BhM}#Yf0lN0^C#qz zvvXFb6}}q8Wg8M#hM^hh_h6$CAU3JtgO98Tmlu3qc<CR=qv3nk!4X3Gip>fO5l|3Y zp4c9&EEphSQjvrm7o?2phB51^N#kfuJ+0B60(r#7gS@kZbQdi>{c~D+;u9mt+mX3@ zPrk$pC@$ZBdn=mb3Qi)Q1d$fI2Z?Y#fvb`Har|kyk6<amg~}na)I`|b(it9Ri6Wc0 zP&YNuXOt&wOIZRlk&LE6rMe(9pQX5A2Lz~L>&6rBVP$F6*~s_JKaw7Hlp%$Yx<4M? zgZ*KV_SFNF;>TT~u?9{dn{3LSWk=ashm~lzT>U~S43=RHWUhIr&WB3E=p1ZpA8z47 z#3d%EDlDw9Q!|eUJ9myy#D}W|1j%8Vpenk=Y+A)V;^j5st<~IZZ)YOY>*vK+Iw#q9 z$UjeQe6AVwTcE=IJ;CR(M!NrOX|Mi)RpKS1VkM#(rT*Txv_3sBt2(%%G0sy{;_7j5 z5kDC`O~~brDnBDeyyuZNz1<<46J}f)=iW$?casr^gvDi;ED>Th-hMdw{+sNORXXJ5 zH2HWnF*w?(dvwm@<?i_8EYmZv`dGaIzA{(-8ogO`8OFp&Ntl76Rb*e2K)bd2GI6pJ zQ=)p;UKocs=Ung54;VW)LSh^oCJ-5YS02Q|&DD1A<KcdkBDx0+14V$u%~X%a6K38f zJw*Is)bc{Ca_5=55=>t{+<rdXe(y6P@|mOH{iU+CI@_b^1uQ>5Qgb6P`6Lc5tY4lX zCbl-pi!aG4E6pg0vPeo7xxAW7rTY^Ne2cRdX!pEBc?l3W^2WFcHB$-+qKB)2B<3ZP z=wvTuDsk%_Wi>hiW?>Y(+x;LvSRkL4PEV2B(!djPNtE^knWN3ju$8r_x5CEv`t6$| zJY_MpM$tP;(PS{?&O16h^Y`S!#c3Ng3Gi^&k%;LX&1`|e#UBRVGPDIT@?&_OJZw1o zUig}39Y-1vyYHeF%6!L{$$nksemJcGqVRBPl&+qEL}eV1Q1pICJC-sY4A5PP!OdNf znPIVk=AyrcM_zW%xq)uV-SLwB0JLu6{Ga@zmA^L|zl;DDVDn=hW17T4<KtDFBr#)a z9Rd6N=UXK?b(19x286Ir7!AR!547yXyHvV~g%_IQ-%j0Tzc+MQ7d08HEpsaO{D_m- z^cR?19f8@wzyn%-kT;v`F84!|=_vs_J2DP-wqEBKWID%($=`qI3ytLcBGIww{V_mX zw8>izw0k+Ly-M6&zUrw<xVY0H3R%KtZmv!nf4rb?fp^?OalI{TUf!OjAbq4Ur1?w1 zg2{_%>Z{3`3Yry(xHNDx^en6@_AdZ68B_PiT4bU7DN@!Ty0g9MYc>XxQH;I(FNM*z zg#K^QR%n48#SxlzdIvUwUfG4#JLgdC<aF<vDcLHXz~MBXTBgK}+cm9c1=vl72MC-^ zU>|TZWqt)(orw*9WcUnVaYf?BTE(zAR^8m51PA@xtIbTo?8q=NYvw)>o=+c}a*Fnq zU`17t8AfAaaVXbFiQ?40)VmAVx!vLh2NhZA(LdjQCOr41e&dJZaahdC%}r|>Z%t&L zO=b6@hoSM!O|sHbJ5+*vATU*2PJ_Y>HnJAlcZ9|P&qim_m%RfoO?IRV6ye)5NN9GR z45R8MNU>F&W`4ik3`foSc<G_FxYV%<nRRh2Ja|dVwMI!Kk((+NoU9Mp&D(?6i#d)A zrbzT(+jRANAucY^EQH6;eDmmq2S%Q6x_j7cVfX(HL+}lHqnSi}{dBx}2^hYAbugj_ z5D%AL#$NmcxC_|V45h}exWo@mGU?6kPitdmsC{0y<2>5ubx;R#{VFDZwS)CTmFoel zxV#*#ui&zg^xJWSsSlmHTSFGGW-r5W3>HuSyd=AnBR*Gw_nS7xLfX`tq$Y1QTHq`l zoM3zfFYR_&b$uuT?fh9~YjbcDDQQ!DN7)4&^g(XOinj+W-hhh7YqDD4sgPOWI0~<+ zZm!I3;=*{5HP^N2>wtJ-L)+7vT(I#Se2|ix)YzHp+u2TyGDf9AG54nMZ4QzYkmQ-r z0mqT|=47Tv(4ar|iEnI4Z;=koyy~~}BWGsz4LpsVbrHaugnC0M2y$bAWEyQsEN%~4 zT@;bn%mS{F-<)y{k>|2_s)q|NXaQ=(2QuFe6#5D<pkupaf4MsC1*BTr3cBsVD0{Eo z%ok_7ld(MRElsr16Zb>Q&ZE8OD6yaec>|`$qOLD+&6`k?2PX5TM~KiSGw@<YdRhwl z#2vYpgAm;hx60ajxCC3?^Yz;fKfYW12G2OTVX5^}<(3^(-uqpx=oEn<y~TBO^_fhg zb8dZpGT<WbENyEk@SD$8RE|;yNwzYpu`&wCnXTa|p)2iT6`be!r7(gKwA(j_uc1M{ zw7?TI##;(F)la@c*}?L&TYHQPN4O5P?IRDf*y?=gj#|%$_A;$qhhV%2I}dZ^3yCoG zX@<PBrOQs0^ot?=-pIV%RG86}RF@Z#4{|MH#755qAzM@!fh)DCo2%~Lhm-oWPg)k# z*gjmkS({-HCc;pnrgS;>1wmGpT1MuZ-LcbII^6>+j!VU>ht1~H)q=rmB2^(mWT_CT z?&$F=naDhbDZ1Q6^3pDzrsFJ0*qt8&akp=YYFL55HN-ks$wHW@RzGy`ig10-Z}jq% z`1ZjMi1E%1Or7o1SzGhHMn%hL4WmdR$t_{fPEj+{;j4_MooCKiA+WTNpRnx4=xA46 zJ-_%QsRcct5V(@=pjmLE-QLDNCp`7bjF*#GFSlT$Mt!dyezsHQSLBC!+bWF8*xj25 z4L>E;rj`ay@9$+>Um>h-O=xlI_#O_`IwDU9Ha>j#)(sst?8+VOU6{n@Tbozwo}2E+ zpQCB&?<NlyV>&1eWJs)NOfbmzu@I(UvPc&z-Jvjt+lE+vr3(`bK+C3kNT#gb`dHy= z2va>oBC#e=n5rNs*_r=w8Y4}5j+Qj<W`Aq@d$ESN&PQWPaeizTIMGDHTwjq3lbFYj zR_leUwu?ju09^g}96OI}Y*#N$89)pt3HPI7==@e{#?)w5&}OdZayVYN{!Dw1g=qGM zRqg$b$IRP$da^inh#}(+t;N-sXVLK<afYWrjZ6ob3W-+MT2N#Dl!d}n)T)Ct5w2z! zs%i|ER^+oLzV<yWaH+-&@=!5DqUjUvz^p2lhL@TJzN#VG0vxyuyA*1KGuiB!Wp0dU zqJwdnfQ!m1+cvWDTaqK{yraVd614c}kFrXr?P8fE$Ssc)eTJYy`H6gnU|^uFD|B;b z7G*1I5`no=SLP6KcxL3|*+QZxRy&-bHen-UWsD*W0Qp1l%=Gk5T7b<!j*qo6=xaK@ zzq0IR7jhuC+PNqEIo`fxY*-XUnw6<s5`6>gh4C}O^Bu`xg?#`uqhKY2>z%$yqNf6~ zDg&bFSs4%R`lFP|D^)@wXM4)N!T5}tE8r)k6|?j*oD~@EbV&$SiiS0zmUKnOR31uC zp^^3ZAx1__oU$W~zUh#+JHr)`zWz!O7zn%!R8aAZqnoX+o2jjl>+|NOe^&1)(UYLW zO~g%AR;eYn<6dgxUZt{ePVd>+?%mg(ZV<v%z$X&Csjk0{9ZtKfJX+ihod0e-O7D)+ zh1MFzQe4m`0}IT}1I(LS?|@#?ejY|%QfeQ@v7eD?-=QhI8R!{D?^u;;1P)&t;x&24 z`}lbM@+Gb;YlXVsa8oRTKsLWt=Z{Pu46$*hI5-wx(@0cvB))ypNqwwDyon4=?*9C` zaVkzoDDUzZ0^`G;{pJkX+|!%M#Dmtw#B?gEQ)g>Fgs3rH3>LQje4uwdwh~*6A}^5i zm7T4%rKOgmz3mI(zyb`@+wdq^q&I#Wf#`yFdoW$YqO$|Kf_B*W6kLd;10poDJ)cRM zYWRs;Xqs?nx-i`j_rF5)2LO_TRwoVqzC)I)H3O>IE2Tw|q_o0a$@Ue-@pFxVtuwN9 zAGV6D(<nurQ*iJ$u1FdzF`+(9ps(+|v+9~!zFUt0n;aikL{LW5LdnWpd#if+9dI}l z#afA343U<dner*|YOFCE1GZydK5lPL1$Sy5p3+v!)7{n8(AawGad~(X7F9|J8vf+~ z0{60EoLzF>RZ`wk8L%xsV_{{9-b7d50Ir^{rjhYf_w(RMY++g%yDKt8HX3L#Ca=-t zAL<$%0u6yx;&;gm4TXrGu1s9d_X4erZ=s|X&{L<G%h8bA&k?BL67ea_exd~^9YL}1 zX}$bvAH1G#E?+jg%PhEE!oGOf6ds>Ya+R?gi>e&>jMSGt*_68bmt(7YhDcZy$vG%9 z*@EB_rY6DI;(<H7`T~^ab-ebt9_a6i$sSC#wIIcyri_)90n19Dni3Er^p39X=7#RL zlw4aa?#`Es^J6bk5Y((75(>OO*qT|ZP20|H96q0RZ<Zi~JYqao$b#NVxAa_WR^2Ct z-K({MCe1h{aPV=uPotcn&RJoB6`){isQ6eI>&!{5Bk0*1wZ(MYA&VES`?Fg2XJZFu zD7>E#XMhaO4o-}F-)47#$J3L%0PhLyg3pL^lqI{c)~;|nj_v$+J($yixvT?sX>r<V z8xsi8Bb9Y`cPR2gjQl;NiqN$D`?ndYAI}?)bmciGRJZ2lIVW%?Vo){8O-(ZBY~78W zZszXi50)jT&DNe(&Q`o`P0f!trlJmr2-%3aIk8d7QfB8aTDJ=NpMNICPR5oXEnp>i zgdb{SylZX=036Ft$;up1)k;i_1(?aYMr10kF8NBBCuuX|>~j<?@mdX>{pJ06yI>9W zr$Tmoi;+aFM}j{N_(?7ur7*6yccx%`y}erBuU0<<8K5--Bl8b5$CzX8_c6~D&-Fl` zE^LpW8#T-jF{-LIQRuNr4{Y+f#RdtJCa?EOkHFQ|dEM_D6Q}#ST7CKPsSjmj1Ga#- z;B-b@nrwI8u|1_}+iZWm*%)6<R3xPaCoX4bPg554($+nlyopLhSvjhlJfPj5J~!ua zGvL!EVhedkDgDsSp78EcT3lOKS5QYXnx-Y|o#kDbSk#<eQxjc`5S$Cu7`ui7{l-9y zO7LzXwE>NoX@i(arxcmPsMl0Xs`)`|1i+Pb;NQ;9%6ePiY4B@3fhDoVS0lAZ%fnsq zQ#&Q!TbN<xuKoDYY%4KCq_Q|cVBz6lgqX5|%WC}*KZ=ZYetY!G(H7<0V5Y$v#R>kZ zyjg~!OZsW4)BWjmsMG!NF9LT(`I-smCrxIw^K~m_%dNh|vPj>+)C;VsH82bU#64nK zB%arI8GyH%x%0e@kWdRn@oSnRs_y=Q(aAxUv56?YffTu(hguI*mL)z8XqvF4D?yhV z(Q(aD5O<^2eguaw$2ZJh!~@8&K2i<uAEZ==e;&drIQ6Ia=oZuslQKu+Fu-sI$K(Nr zj?;sc?mKRN?OSqp`k~6hnV&$;qQZM^Rcu^?-SFHC?eyAB<Q{ltJ2ljJhrc}dLk(9D zJ*7KenBcx5+mwW+2aqd8`)w;bPDfE!Rc~!-t*!WY=l0du;hjYzd)(;*wuzoP_x*Oa z0}KRZQWD#nf$<1ik`|rwSa_jRGkB_@<fg0e$MUcTfK%%Q1R&9Zc3E3+Y1zI$ER7EV z{k*`~D%8r4G(dk-pOI5{KzK^=F?e1ES7*ogV(MgU;cKa)fJGV)&Y@G*j%;Zl>5&Ek zVt6=0`gL`(sdpD(V)x6)D?r83($VW(nXz7)lVyR>ir2&8br$6wgUS7BdncZJdb7j9 zCY*RzrZCRVM%s$vIHUR`tDC32%L}k{@<66`wM^$19c77W1JNX6%qDA3OHR}1ysxCx z&lVC9W`Bpk<j8N5mPY@B)0<YuEloK&Mmbn)Fj<v5Ms_iqZ!wt<Lag-UQ`>IS*!Awo z!C#u~jZvrz`n}j-O@t*6_7yyGZ?WTavGcT~KqG@coRG_VowF-9R1|Z*sqvOyxX@U+ zxhZ-=;&W+YVmVn4@Nj^i>HGHcGCLD~tu}x1S-C9A_jxLRSmyHl%tQW?(8UXIt*_vD zhqd@|z1K{*)$D*+cH#<zT(BmuftU}<4%EQ8(F~PMmCi<pEq9{q(7Hz`qeDD33r7Wr z(qZs=pp99=4pV|{;&cOvQGCEHeT|D!Ln)`-U@UW6tjsq@;W|(0HaFIutpxWXy_oZv zkMUJ;#+83BTb{;oTIv30PQ>I@Tb)2zf@iK1$k=Fhye*z)NB#4atAlQ}Pf~iVe>^Z0 zv6kUv&(V=`?rF0*>tPfFOSW%!YX^{rtj*=uCVhi|gqzIzqpXO3r@Qy1y=O?Nbe2bz zdwX=&S$BJIRr{lp(<RTQ?=&#<@b&e4G=!lfJpar0zM>1U$-!j67$=}gqAK^xK6S)) zz8nXmxOfU4PttpKr)$B20NpxpHN8tI4RJFO9nI(}@4&>W<Z_>%^!PDvf`AlUT3on# zw)}Oj>{UUMb8(V=ev&P5^5YYd|I?>A^0Z;*+u7<Ue;F^a<}FjRZQk*k+4_~6+GI_N zYaK@&Uw?D#O<sPT<AT4jHKcJ9v&KB>y#=1f+pqQ$4vyi&!>%ql*FS}>d)mb&GB;g7 zCENN8@uWRD!MApK&IO6E1$djCCzk=WbMbiFADx;fZ=cRO=W=iJcf-GZGU!GK0_qOU zhO_REE8zMfd9_8DJ~8IyhQ5c@o2lof+`>UgEbXSQ{VQeNExMIxeSI&yOS1}Fg*LB} zvO;vf)sY5?7&mjIg4{~~INxK_`<~Bol;=)0I)pSQN>%0G*qFa5pzJ7y8G~vKl#T}u zDF(?O`=7UHsr_$BlVxYkUyq7vwL=R$eT%H)D>Mm810>xJ5?`gY$(7;5PTX-<*!@O| z(UTt#ki5~`KFEUoVbtg;a2C)P6`{W)QNHn9HP7~VFF7?>@dkt~+2`O0u@&)nL!zhf zs?fjXZ+q84b#C_(1if)509@}}9tZ0;0Me3{0?@7vEeo;4&z?S>o}Yc+*8n;T1PABI z$#kW!dmjz70w8K9#c6HszfMr?;okI7Wvey^C5I<6wAXh&XCpq4GJn9jI#ZgG!{%tt zWqr{+ZV+%m7V-$;FHdRH_WHQ99$FbsrZqAyN;@e}KPlT8ohNty9LsdQr78Y^1li(h zP?}?*HcHCbRV_c?nkd_+E;w(0*j|0Q@f0xVO2~@rY(@(4aMfp|<Yjku;lWB0XWv^n zG|ctYe&GrPIp3jIb&kkhO?GBF*XEjtpDshB1DlhciRs^q$E3Ac>;z0ZXLD~|e)+K~ zlA6t(!0grm=07;i>;0q5BzI??hf2o;PeyKz9_hl$#0lT<a@LGDIIF~8t&N8@*c=9i zW*uzh5gtL2rP(M+DWKS(B~Vbso#fu~JP|<x+CkuBo-I|BDnVf_Q4o@k@>GfjAVzj6 zl#i2xyr7e?V4TbYva{F?b@F(z*;LEKJ3THbDJdi@{9B4on2%3NN(#u>-a9y2Ra&K{ zEq@KTTwHm>7+TTjs>M#4ckJbW(}*l{XU;7tbXR!3IDJ&iWHp@l;!t_^M}P!4La6@w z;w9o`_et<YJ{%7D`&3ocgeSzCv3ULa_6OfKy%T-|PCqr|w!aRq=l#j(xU#f%M?ly! zZJFMt=kv*O(ll>;xAp5Rk2hd3+S(XH*+3x^nr)%rXMQ-%wC0KW8IGpv${)4WHIT+( zwv5th<3l`S_VXvM6Dc2rqrrOHRo9Ub6{u_wR1sxrR7PU30g7@wrF>)@M0y4cP&*}J zSS4Lp7Tw=0x*y?`a$_8IWp1|DV&2Yc0Api=0-Cy71}ch?wpO;H??XUB_nz?a@LS8t z`_xh~QE65IZhS2(8XBpnzCRVbxBLqC2cc#mTGf?b9ykafU_*W*yYiePQ;NM-#gfjr z8pw5K<Yq=fQIcz`ZMit=;VDzzh$=G%W-0r$O|*42`5>w(TW_pItH{EB+VIM3w0;_3 zR-S9;e|$xFE?1uBU0$Zu@>bswnrJT}xZWL1M<U$(xI3N<j+W7Wf7)4mCDiVEy?$<Y z1}ZMkySvqkZ~%}zAPP7U(5)jye!iWwps?C0v(?SURv+~6K$QYcR+LU?ynA#vHYQmk zY+gQJPW8?@M5B2zL0JteDfS2*-E{NoM0K44I}NTC6V7~9RxV~$B}F9#CoLUIrz|x# zt1*xQ2@{GF6Y1=B3)3YBM<FvbG`F(!BIKBc(2I#-hTHSSK{RJ8yWW~pTB{&Idr+{N zLQ$ZvD6|BvP#M3qC9*>GDYH}ApRxH~W6z=D47qm)rWIn6yE8?-ch9+Ar5%jSs<X9@ zp}P&X3i4n_meJq={R5QjAj<Yqhku6eXx_gsD)!nM+WYzCyu7(v$pwO7N9eBI8t5+s zq(D{DR0V$26lK*^wd7Pm8yh+*S}H0^K2Li3$)|hHJ3f!!Cw3XS3)sP%a6%&9mavJs z^&JeoJUz9~Z70)Si|Uh;5(^9q3=N6|B*i3Q!lQ0(9-BpoB+kz!GBqVLHWnfz;636L zB~UWXDpaJy((Ad2Nj>>C5`jLI>Uy6ii{cy{YF)QnJ-76{yS_F&_74(PPA}Ot?gH^t zt@rvp9?MTj@&=VHiY$WFb4#klffAly&78D5sVvjw>aLjV`JRM_0SXHDgrc%hpBS0L zAzVggy}K_jtbC_k!BE?TdOlFR{!>G5uy5hX!HVxh=fS6^&X>*)4=GsKAt9kX!d{9L z6jK@O-^i)nPx_^21~0hW8g6%2J2&tbaO{;xtt7Lb#$r!42b*#a(y7dE&8d*5^&+^l zvSBjaOx_^BeFKAmx!$SfbbpA5DJemi)1IH08(3Uk*bE1Qx+SPWEQTIkl!&3#;-9&G zR41o#mGz7FKX3N>Ev^}v;pbs%`r+;V_&Z&gvK>I|6IgbbmcOl5xU)P^cp^@0x;(?# z`F!MRqEw%jO6?+hm$9R?kg>Dn1qQk>hh)P%K3-B9XI>&WIkto`wD^`*_?&m8V@i-~ zx&`%d42oo*Vm9%u=%U-N)Y|*)$;CN2C}?eK_;fYCvoW#o{pDfO9Q!yNNageR_|SB@ z)&7XZJn+Z6?h(jhkg~4_oPlSchbvxrDa#scqY7U(Rt7BbOiT1MTbSAAI@|c9hu1fk zzdt-2R%<u6y*-@cx~fvpXc3sx@^VwDTbh$&Q;V}G6?JBctgD~OG_x$xp?mpOCaz+W zFc=XpmZqcfdcLHKd1^lze(|eUd4Dd>_8Oa(dbD%i*c`G;J_Jl>`LU^_q`p0MKgVZx zyQMpt7Q7D`E$>&}t;y1dqbVBrmq~wnh7cj{%&F{DX0^U-6+}ByTL*@MCOgQ_Q?4l$ zZ?>MUs`fX}+*(sx*@TQk+HkeoIeftxn%ARj&|X<n>vD8InA#YeM38w}=WLJG?ri<s zr&s_owEl5_zu=+Fd|I8$=~J{>CFJod_WYbLCggp8yZ-fl49MQ{e1f*<N0s5LK*#BD zgQDg+=XQskARiPW{CF-K3w!hEco*<zO+K{V@CMn6>cY6X+8B2BToz^@jF`??W<@G3 zInwfRDYn)#bTTvr^}ASa2C%XgCnY1k;<=!t01kE_9gw^;gUT4-pxR?Q>#Gtg+fwgc zf%F0-Bnq%#nlhhxR9wU)B;SdP^^j$bF3t!4P~2H7z~4Kjq+-=}8JZh{>QjLdgN}Sz z+9IW;nw*+yYN@-r5c%nC;cRjh*4$p6R<}C{w`;ST(}ybQxJcxkJ$gM0j0&%FY?$pu zfOAy-llmv^RP_&v5=trwX-Vhn&>J}M2#FDfM^WTv3Yj|b1GG5OVqF6RJrq=Q+2w_K zI{QYOTi6iEiR!9O7G_NajgqRWp=oD=sfv)2n%rDIPkC;cP9zYJEyqG~?r+pAJQ!vY z8WI*75+3{-9(o>ndi;ER7kxvJKe#&bN{Eo!5!B;iV{dM6RX6^4e0&Ux92IbYe402n z*M7+HX|c2&q8(l?pPhuw-A_vDbTzi-mJnf@+42z_!jv5X5|YuZEGoJ`pHLl=GRfUr zQk0ct{k~<Srh6WoBt~*J|9=2GLBzfvK0#&gQNzvd9(0z`$;tB~Y6PqbDo)8c%~O~9 zmn&PZDyt}~s3>`Oy1}oWTw0vn+&nnOp_%XJ<>A80Je(a`dU_=tJxx;!Ei+3IRRa<p z>F)?B|KIP={OA9i`kynz-+itm|2R!VKtX?wUopF|`WrU(c6Uk(bInbRWThp=M1}cy zc_hR|Wuz{e85@=q=01M(5G!o(spk3`IO*TivZB1Y>T)of&?YT8UO`SqSP;%FkK{#h z7e~8?4}Q<$bar-Dz#i*c^z?LG+?)!E^3bW<Td>^8Eg?1%BH!YY!n^ms`a6}mS!vA7 zjId=j6{X++-;C5G__1&tV4wZGJr(6;Sy`BAX{lWt?7rFHyLWFH80dMqxu7@6NK3-V zj;@mMOPQJIsn0Pnaq`pB(psAvmltJUswm1%Pd3ump{1qa;^B$S&d;x^=H}+4r$1+E zWKfu!Szc0*o|2%hs!T;iDJ~&4yS9co^`QJ#R+gk>q;fJ+U%moW;T%axN>Ec%DJscB zfWyPh31eI-dd~y>kfI_&upJ+y=c^ANcb`9d@%G*1^duD(mAIIQoD61HB{3Cto|2Nn z!_5_7hOI#Z*{>TY$Vs!ZG9z3ZF}eZYb?Vd)>B$LLA^iS_AE;@lIXKv_-x%I~`SSkG z&XYIqrsrlQB`?s>QeTh|ad7mdIIl}CZs`zRSKYT1onC2Y=KwARP5~UukkH`8TemP* zmPe*0goFfOm!M*$MN32NyYIgzAthyCWCW=fz9>aWMJXdKC3W!v7+`qBR9P8m@cG1K zRO}b^Jwh@M8UuYDEjY8nLIReiCW&!TIT<OyEj-4@MA%rGiwFzS(a`|0Tbt|Oe064O z5-~BcwB!Z!u|ic@bcJID8x-I_4>-XG{k%O2^Rmi{^OEDERh1MdsVIEB+<z<c05+V^ z!>>Pk`UH-QsHlj6ksjcYm6m+CO=o*EDJe-npzrhnQ-}0$J9X+5Q2PAYGhAQ#=+Q$* zI~(|{iIKs<*WbK(1B#az8+G_2+M4SB{f~bzG15<sk9_mi)c7bBC55nv2op0CD=X8T zTif3>0PhON&Dq5X)E>qBty>0$2COVhhri{){d=%i-+%wT>DN1kDGs*QghYhhy<N}W zzI*xZ{m6|G=x$33vxi&qpXaeKH@!K36ITNV8!J;2BhYFVW+reH&_i!yGmyjA2B2R< zW8k5qqxIpFr!PNztgb2tcR_!SuBz<w3XZ=1QC1=Sv)o$RUO6RQ3+-cD_g}ny_T(`P z6L2n!j11;BHehtSK0XGWQCmZ;wyF$kzG8R?^>xnnHsEKpEvyB^aRz#x2uA`EN<Jl< z;FPM5Km7!+fPh0s08{{SJiHC|_W;1)ikllQ9ef=+2%LHly~7`o5EuEc|Nd_fhr@4y zqMkl|IyoWs_`qWrONfa){MP!~3TU{TybQwK1tNN^n_XRAgo9)6VADU)^X~mS)aa8G z4^%)ncARhTYHubYCNehE|8;*}zXt7spMSV9jFsf%qyv4}?=J+y-w!|hz{o%k&|+-> zr}7;Axz?_Z(U~dGc|#)uHdf}t&31Plv9qzbI@#meXXwik5@HZYK7af6#*Gn>=*5M( z!_DBdYHFy})n9@JOV}DfmseL)85th|!3PaIV%|k#EkIsaT)28`yR*9!o`V3seEHQZ zuid?8s@m>EtV&`=A>eb$JLg80?!)W;KAyl~QEBn?%JQ|bQ5Z6k;-m3@U29VvoN*;x zeLhh!E*#9eyEvR7Cg+y3h|X<#_W{K`jDzRS(alVaA9>zkIDq{pAt4#O(tE%&M1ej4 zkcZoYTM!gDk85ixit=>lXm_6+_n!HW9zKBaIv-og5yd<?nVPyvY<x5oHC1v#3|2y5 zKMjraTict#(%-*#7rF^9r-ZenB*dIPeJVFA{Yd8F6hFdY1wSi~n`=ia&RgkjZLEWU zJG(k}5B5X2YGA03m3h<!Fuc=9Akd(tqXnr4S3f*Fgo}AW{`25HqGBRx>1dW0v81+~ z>}?^sDJU)k2ZB{|zJjo^77na`pkLqM6>wZ&mT<L124)hnDDY|fB~_1<T%K(oUPH@; zp|G&f(B$O6@DPmM$6+2KR#6!_E`9+nPIer*O2;1vNEk%5d=tu_zWFi8&l~n|V{PU5 zr(Q^qA582|U-F!In8#nP_V*AI6Q!pnLG*N-_A5*CFv@_8;Lv$8ZEcO5+$^*>U}664 z+cyjh=b%k({Uz9UHy20P+Vf}M{+Zne_wK^*1Op!gDTg<5L0lA6@fCVd1Lom}V~ysZ zSX-E~Ff*7L8{qmm;FOq{7%WVUvAzrDR$f7_yta01e$K!EC-VilnJ}QA=jCq2c6km4 z8qmOmr1-`4m4T~Qa568z2blT#dBe|HSz0*Uz}dkLtaV~?0s|wxwT0Q$fxg{cEPvEy zZDpyavn@O%5KgO|lYLBV6!hG#%dOuu8y&s|#}JP2rHcH#;ugT3R@ONnr@Op!zGZm5 zs;Y{df;^|NptHXZyp64;#ni-&Pd|Oc`gPy~Bg2A;Nr-ucMc8?HaWU_SaQf~vAtjF- zBBF3|<vyGjh?@>Ki;szf<E){k@(JyK4wl9FS)e60CK7`^#D?HMu`&;#hvWru7=NqF zi?KooA2c&FTHRRv@fa7FARLD8ZK|)v#k{VrR$*}gF6LjodIbTahK5>2Rr%ZZ?|Ha5 zl@#O-Hv<UI5)jbP(TWNS9)7o}u9|{^qOP_ImA-!c3RearGAsz9xzmK?Fj8-Ctz#Q9 zVo#qrbB=-j%-J(dbyzwVFAo<V|M`UUwEmIn`UZNqm}g;Of*6mOgaig_Y%?^b;fT2) zocga`>F(*k!#p63^zp>SJjCw;g8Xp_u@LUTFnadv*__PO!_5qIb<UhQ1H?isX=86= zV{3_Zw^fxD2?z-(sj0wS{QUFJN$Hgo+!};}MiwFEMVIGFTE@A!xUKE1svGKgxVfOu z!db(bKQYwPId%FJ2?;SAWNrZ=HXa^a%zL>x|IewjBpi~i;e`q5Mevcf=7w)a%uhc- z&;ff6XCwuiHx>sV1o*e_Y=6;tY7jlFww92Hh=!I1q9m+41O8A(Myj{3YwzjaAHckx zc5z7|aER60Ko^6fr>3S-T2VSRJr0}7%fe!lq$DMnm>FecrEp!;1I#SUjh9vz_ntg~ zAC7hK)lyeEO+ZXU$45#|+0j;ixPhLwCa@tZF9*1Kxw~RL!Ypho{$Zgl-CcTmx>(@> z3j`Yxmk{URV8^<*Yb(p}Gfm8lY8&g?+naFNbrkc$g1)|9cywM+kRN<weM4<}Mhe(S ztVdW=T@`wSoPwN^iXy_(U0Fp*PF4!**D)|LN=i$@aM0a>8U=3Od7!4_LCC5spcfcd zIa1O(5tC7zmXlLjUFj3($HvBTYkLbzqCi89gp7=ZgM*2koliuJgZDfh=Kt}-St3>m z=ddDeeN#q8dR!r)w2UNR?|djn2PMeg2gaSc`kFUCzQ^c15Ouhjn5Zx_3lnSy2Jw&B z?kfdOlArH<c~#l=?agB{-&lu>c~T7X)HK*lAjaFMsVSFLmNuZ|ntc4mFjnSSSXoq5 zm6(|rzlppsg^!m<Rz|9)uk-eu+cY?s*VEQGLr6l(Btk(+eYvF;D;^Y-l*(!<<P;Px zj`mm&9TN+)r@voqYpbrVHdf{VX^6@J9v)sEtSJjPEbuc84E0JY%bT0(@GviYJmy>5 zTPmx{C{bAg?979{Y3XPgn;D0MheC{?ucLjq8FWuJb~YsyC2&3movOKQC=tC3v!YvS z-FQ{cVqwETNlkS`Y!n9v+bwKqB5h4|a!LwretxLpvT{5Eg1DIXb_eFq5HO3{1s7b9 zlmTy!^;ki;EiW$%nu#?nP*GY4I~)-e22vjv9f8Z+h=~fZva%>ED=;!LVC4rIaB*|y z=I1P}%>OYuUs?olE_7rVg(2=ZSiwUQ5)x==r~>o3+3B#)ShK!T7bV!)*^G?zS(vdi z51SGZ5h|}LUtF3)={z=XqobvMmWY&^N0FS8wzM!CYi^m6iptQ;6r7kV4(1t|nGi^? zs>TK#9L%Fm69u`7s<No42v(iPG|<)8DJ&^!Y^=r4yw6e0$Hm9=Ug^4WrHhIRyQ%>5 zbm!=-?W_ZW1FC9kE?$&yu(86%JR27`m!YW<oU-QnuX2Df2}R^wYRpQAz?`14j`_0o z@wD6mc6Qe7tqokqP*hS<MoC%I*i1+QcdpXS`Mc8uXPHDSeY4M>=U0@M!IcKl)Kmk< zg;j-Go9cmicMq2v6C+U(VYp&gK$?@29drhC{`ISuhZ}$k<>KN@&q%#FJ9XsbpQD&3 z)zQ@|EiVD90AdbUa&mF7v4NzpVjfx9S$TQ6&CE@}+axE(LBGK2$AEeGAb3&mFj$#~ z2nF^zJ1=`;@&+#Eb+yz9h{?|hY7>)FrzT=)AR~409Q`?47iU^JS{Fw<9LzJiczRUS z*Te6E@f{b^+Pd0OQWx<tucuv5T-eZX=?I<oN7B;aVO}6UF?RIE(8}5}4UQBdM1S-Q z^xnSS#bw3wYb&6ccGi|yb)Ju(&&I)ygp{Pc<y&7}UDr*_rb2eX#v!J@<nmlm>y(nR z>iX&;R_3W_XcW}c4J@s#JdhHyvUr&P{tV$624Pe0bZ#DAH5El%%o`Z!!cV~JZD2@( z9d>ZE?d$K32n)f*JRr@@&1G$4frohjEIBo4YI^LL%;WJk+B%w`;q%LLjm`C5K3+y9 zhB|uMpmLw9p0>WBo}-InWld#DQamFAJ=R?U=3(e@aIj&;#XNKhPL9;{l#!9ExX!+g zmKq@m1)ZP{At_ZxN*orXMTEf9I(fK*Fk(HzAQ!;Avxj>{T`e%rj*EG6ka1mziEuFw zM+^=^eo;X~BZ~Ru-_JZhFh4mp_VDo|Jj^pN(g%eGwzM@ref?TNUiLf>x0jnMSOtWW z15`H`M;|YDIyxFKV<7dgofprsTndzxQ$I^1L@Z=#5mr^uHe1p*ar?mzwxCIknvPao zSI^27;T#wysi1(Hc_*;*XBdQxJW{!Mc-3(*58ejGcLp5Hx1yQ1vbJdNYzq$y#=|@h z54WAYH9qD!ITDi-#wSOQobYx8^W<bYI-2E`rLzmuJ-wYtDG9#*KHf+VFHH6Egh!;W zS6qB7^wW&ABnBMJOI{S`2Id`YS#UGY!O5PG6o2(<AJ|ts%+v6xpCzWqOpV7Hq5|_S zUY@v_M|m4((D}ML6!W;e4LO;ffu0Qd@;a^t7@coyJWA*NypLoaq-l3|2X*xs+f@k+ z^TA=k?Vatr&z}bc`oYO%WMY8!)YMb}2^hJv1O(rG{~ef2puX#J3)T}YE-FaR$VA7l zd4^kC-aaw8W~}t`&H1gzhnQETp`+C_FtBm=K!ij{DJtT=4(oK9fCO}2-z|lU8z=MN zwT@)o(#ou*ttl)d5Fhir+@P5d58`1S_!%1?J|^>c{5&vUQB{8b(Syf(JF|;7hexgs z4Gj!k9~c_yM?GF2xIR1xU-W2qH!nK_;#piek6|8{&O-wZ4t5CW`UiSw@i0$HLCLLr zhKM{fEdeX@Ama$6_c;{vwzwL6W?l!kw?Q*6D=U4pw>bv$&;Y-;nVK2j+1bIBp+z&# zfMNd5gM0UP9@jQCc*1~dYOJ6r&&tL^ieBksZ)4fp)&3Fpy|PiM4^}2uZ_kU$HiS&_ zoNC?)RU<hK6IUiS4w?bbc^W!8O#?$4cTYrUq>PdhCp#OiYf@(jNl()Y>A5E1VIHOP z#z!%4Y1Z7<6#7RnKRz-1&+0se`O32HuFIX5TiaV2A?QHYI;bt^$F7bxkSS<iT#yaS zzry2f@ajA}2U~P()Rq2j{LGVcDx4-H!_7RCn-B6>%!8k2W%(UCk52569iRCl{ru$g z7%-13ius!P`*$DS@9OLJ3-AR7h)Iayc!J4!viIv{_s5SO9=;~HAIl(+o|hUg8Cm%g zuqyKE_=gwvXVi|jU0Z(l<A(#x(`p$Q+PZtXp_o_3&pdb=4D)#Wyx~zgZ*hQmthq`I z^Y#wb%y_*`LQ=x<nQyAYcTK9Syaf6e#MdyGQc+PJtdx`#BqYR`RpA)s@#{PbUY$RR zd39i(lwIyL5q9Rqj$&TyYv%Fz`6Il|7oBhXgO~@=!1b%YV*cU%m+#(ZWv0O~XJutB zE&TRw@cq4A?Ch+KwN-ffdr4L0z)%k>J1-Hv6pgGiGW~K|?RZ7!%+r@|zjzxuEki?F z4=?wyC=~O!=G(YBo+TnX%P6Ynnv92e^5ZeT?``lf4?2IOpC6kTIhy%p6!Tc81boTQ zqw+RDdw{R^-tNxI^1^=I+*q5MxB(|UKPTfz=DChy{x~{ML`p%zCVdq1KId@wdGTX1 zPl0A$_K#uyNN<B?eiyHw|IGY@-N)Ovwn1uCm9YTU*4zLR^uwtiuueMK@9)Kx6;17J z=g)Il*!U4K$x%t!d!)9f)ZD1*x`~B(<s+FF(MRCbd9vf`d{{`(Kd<x0VIDKz1~>Dl zLA<1_7*|#Sb^th!3=e+#;yK38zxpSbkBN&0=4tSH8(<!<pJ#Iatn+x7|D3mB`~5NO z7v_)1RSF%c^9hM@Q)q94*U$eN!{%nE!#5tDr2XLjJ$RjroE(N@tgHC<^R&g4m5uGK z5bZvGcqcBooR~#{RNT@gvIem4p1h@^q)0_Wtz~Frca+Y1>@)9{j6ZMlg?ZcK=scdh z4Ibvjev8g;pfhi^Rb}{?FD=5wJmxyAm6cilU~fh$&b-YZqVu|FZ*wH`ZobHK^yiMF z^S?WWrT9a=%`uoq>3my9+w-??ogMAqSRHO=X=Xx1Oe8KL4hTWOg=-M}%zRrbdYu$% z&Qftj8x^<4+4H)3$o%xWDbJ8p0U=>kZ7sXw>b$6tXBwXT{DEAh?NK^^oEY{G@HQ`A zJg26iPR2HW?7($cobzq&+}eVn7TQ$SRHCj){a)sUh4{~(=Rq^y1v-y)e=y7w{{cFG z)HNyc<9nN0yz?pr@z1L~$ULsuqzAlB8<=@;HD*SJSbmzA2=sJ0d0A2%%!}{qeAPbl zpVtO%-+3S;X->eQrtO`VS~nG2+-qj%Y~|vDpZU)^&m?N*opA*7`0_S&pP6rM3=hTW zZN!es+n{xx{g}+ZfBzo7C^X0)TX?%fikWYN)_JUJm*(eYATZa{(=MwlOH7D8($C|{ zRRSUW{O6-F-UjbADPmSBH1qNQ6!YM1bntkaW5uxf#RV9h|0kH|k54!*^KBiiz`T;8 zyq%TB;buJCoUClD8d@3<g0*6s8UCxEuc~ZlZQ<emYCY`E-cxBM`?Fk{@(xMy6{GQ` zgWeHIu0f$k#;^p;Vpe`xT--crzdKh+LVWDJ%@Hx|G5tIg;Lse~g!9Lb9zsl{uCAI_ zkPA=pu&pa;X{aS8CN?oOtf(q4%+IExrrgA~vKW)Madfg{VP<;q{5jTB286`M#|#ek z;PW<U=5fWaU%bt+Vp!dy{rpkaVT%inoO|HskA$DIym&+m`*`;VZxa;yS?84%<!vm@ z4>yB&oSTQ+#M~5iz6C$?l@%RVde8In%+F3AY;f<vV-67mLN;YthoqRY>j@Pjeu=qK zxUa*am?tJ9WRb8B&gT~pl99UbJDAVTNFgR6wz0ErYCSqXk7oW4xF+=rdVZITwB+H} zKmPO+?1O@$TxNC#sK?=198gjcV}W@aTdT^NiiWys5Wtb4L9EkhczC#AYyvy+<9jTN zS(up_rKBYRfNR(KaOwQ9nMWQoZ-a+<eIo-oIT_r^Ofuc0m>1wbFCqdltQYus7%}lM zKQRS5{}|7G4;Yz1=R?}N+A%tBgH7iJ1<tc`u-iE~z_5#T?ZAEvTU1frH+)?{fNyf_ z+hxT!=T=xzf3jIe&LJtP_*z`~bvadaRu)D)I#0xU!6l;jf|M*Dwwdj#%Zoq`NFp%r zWM^}@fgcjJZqd=%q4~&MB`SvfWBmLtsC#|rg8X8A>w<(B977lf>FMcSzr^xgdfJ-c z=Uv@hsxMXDoSmYeq)1D~;yRS%W$Ee9>F8*}rVhV5IX()VMN3N~DLG+ycz_n~+yhc} zS@boj_}{@iX1<LJUT*`(9PpEqmlF`+=jP%#++b#U0@W!Dbn}Y}>g#LJOT4i#FD%H< z#l;Dy`y3rDRy%iKzRitCj~?R6RT}E)Qc+VOeZ3o-8=k&?qo^QfV`+A{nT+&BS~^-o zV*`j}2d{MF(Rmc}*M|i7u`mzC%fsUo8OvF29Z|E$$dc<G5e3T1s(7wR5wS{m#+2z9 zS<unZ99{_pRsr~tm6fKXqVzyGAKnNITM)h>JP@tz%||kS^n4r8`QtHPUQ$R*OmwBE z<M3O{i}TWw7r=O2sw_SH?wi-Ifn;uOE-xR?hQ_*AZ{I>oIT<V~8jA9=;G9D5aI(kJ z*{v<iC@CqeY^>7KQ($bu<825?$jLbsP<}oYKlA7C%(rpDJKtu%BU4aNU_DI(eciyk zv8hpBQGQ!n6a23BW-KG1mzyhWff~9Gwj~}I=F#h<V&Esg`|&*!BZD8(<8ZT@ieiA7 zk&yw|h>MA`FgL}z1TQ})19QbtNPzD!d;13m!~DR=Fz7ti5xKp&1{8ywN%Sn2_675( z(1NRp<yW`v?_ry7<9M2ol!RT<H=#T<E*pmXhD%s(Sgoe24A?U=(jQ(}0s9FILmcOc z^yunt4-XB-?``ak)cIpG|L(&F@NB^LFTb(C!6zgn)Y4M-5Af;gYXA7@=ctHK0s?}8 zzOKWKA3eOkvb=EkbPV)%fiwvT2|7AE<`?G9%ueC)HfM>+sd!ZhNGPz+t3=5-z8F^Q z7`e)Y`Pqojpr=otU|mvyO77)n!*FJ0Yn4}+KQ=iAr^(0D4QtEotqrVm)DP-BFn?qG z`cFUq%+1MRiDMb>i|5a7ZErEqpR=*H3=a$D!?8XNM0IUt8P_ev#1H8F^{EL_QQ_?L zWUMWpKi#>#y~-=3NXV=xY#Qm8*%e-JWn$^hVdj50OGLpX9h_28+CI&~$}TR7Who_` zaoDT9-5qR0m<}l!Dgol-=Y4IcAK*FC&mVP7Dt>%&<kK;j|M}-%%1a9YJZF2`;{)&F zV0ZTHS#fbO6El;jn26EQYac#-eDeG$#HtYbVU?Q0<-z@XF!X~Z27B!8<yKo?v%axL zi{rirDCUXCX!x~=$!XG)WAWu}=<(?M5#DBLVU~=H49}g$F&Bv(U7ZSw3s<(*!T#a7 z)n`v<yP>`g)-?qfosW%=9=$OHr@-9Q7;E^jU!aG=K4)g7gF~LcwixXf^xYKUAY`T{ z;qrCPp6-Pe<)b%ejExONgaq(>9XA&{83P}vy|78Re`Z%ucK0{fqvp?_CZstp7m-<A z(l!+k7Eek_a{OCW`*?etK7CqLOvK#MEHWy5c3~zqDh$^;spB($%orBh+<*Mg+|=lY zQ$Hxm%RYPh<j9Yh8XKNEeF{P_P)i89^9piT*OyUd_}#n4###yraxPBxaUAF609|wZ zd^})QRaKQt%#0%<!v?PQK78~5FY~0df;yD6j9Ax<h>MAQo_j!l4(m!Zbb~LsN*pmP zC^K|_5)$H)L$g^>79u;Hkcd!H@}i5IYguL4>gHNiWf`37ENq&&wYd)Gh=Abiy}P#$ zzl(5kfRF*hJlMkJ)n(MmVil$L@3G&o0(ykBv}9&p4kHsI0E~5IgpQWR|My@2<?iBy z>$hWK*rAEBg5n}VLINcPIjsE=3Ku6ED<>xvkJ1?)-HSHyKIt7HxxGu<zb+g1_Hg;1 zQ)iilRibk0@|(sw1}997^-rBTbsXgk@bw1Z0b@7|_KtSB`8oF=J%~rGho!-KPbc`j zpXVMhGroHD3agrZW`1Jym^X}l`uu6{l`c~=BN%JI7zFwu`+K{-`C~e-UcJ12wLdt( z7Y;t0cj%#-TI$9ohJnEWV8-^)jX~;HH&&yf!$G=9NJ+Fb)Buw1j<&`5*{$`}^|ciU z<omiiGE$RdWl*>D0pOKY6yb+EBb<s$i`O<*pFMv@Nl77fQ3AZQi=(}piz8G|gmXxM z4?iC_AqhE+pbqspCM9{v03S~;H)n5m7f%FKXFsGTm_sHOl+J?z23w2tK%kaXI6Gq2 zRv<mynHcGrSeRXo%G<yWK+J_&^Qfxa)7iGQvA(gs3J1j8#0UZ(W)>zbZB4MC4b2T} z;9l<CHG#;Hh|td3657DWp*?ixKwl&nYyv_;Pyi=;TZA)=3=aDhb_TW3Qb-^sE_!I> zD(3Hx#6(Ac^@T34ASYvKW&%GP(-?N%%iWcdf?QZcC^|kiI5Y@-;{rPP$)m&9pl8AE zHP_cjUJwVXdFIR+32_l0FL%tZ#vB2pryKgWqhguDin77cVF+XY|MuPjEUv6u6MpW@ zKXdQx@9T~t2@r@u5+X#<;O-Pf;S^9n0fiP-1%(vuZo%E%J-E9C37UksTSq$SbkD3) zha_}=X}jIu{B!5BSx-IZ9QN7A-u3Qx?K-uWCD8(&5wxSCq+pIW22Ww$Z3n8e1MCi` z!FQ7XtcV!fA)!5jhB8*+l$iS9EKneUPIlJb9<H*|;-GEL$>PG&$}_4)1}EnG`#Uih zjeYy~ofSAkb8#vy%pD&cg1dp<lbNZBs<I+D1n?oS3g|kl4hF&^zX0Fnmil);{16o$ z4EzAM5%}ukjY1aSIfAYeyl~#$!3O#`YfHk`|1vAMKm~!be_-4O9hSVhyiizD2+^px z#6`ICaOMyY0V>X&Jp%#~<O=v8upjjMr_Y>HQ&T|zZ)O5Ytf-`LZGGkYk7DNW(<dEW zZJ>6vv^0(!JpvpEhUDl$r?iD7*uu#HR162w!e{{YY)IA#iLryjec!%%_5S@k5Dy@b zjvqgE<j5h;0|z)b5A0!O-?4iy3)_AU9=<~&Sl%-iA$o!vzGFAbj$OO=?A^n{%F4#h z4h}0QBm|FDNm=pq=~G9TvrGUyJ{hcvh>F-#$iM!+4WLv^;-c6^F{lrr89Ea$+z?JK zE<wQyYU--Sc%zW;kdE#S=pS$0yW85{YG9-fw+pGmyi)WUA-b27m*wN*g-DiZJevl= z=0P4Lyj4*O4h!xZ?0xnU!F=yPw+F*RT1JWwiCCcq&sKvN7OpEWBq+b6&_Bo@A|SY4 zI3jvu!5QIf#l$b3I(-r<A3*%}OV{o~Mle4z2(VAf%^^8E3X2LKI&v8P%XQ!YTp&Cx zXcKPczWw`;pE`Y3Na!%{vHd4x_nz02v<!2*)MjMk!M>ksHw!x}I|mELL2(tMu=J|* zs^QfecW>NS1qcCEojoT2N8Hc63FsuooA-ocKQ}iwoT0L^BH+i!#K6s+R$N-NvWCnA z)znY}JaFdBX+FNA5Yv4!xKYTpE6IC!xQdC2oIZULZs|W(ke8Q@j1K?(m`7;*)wdH< zqj?26Uf!N&=B8?D%D_Tr1Wv;l9zSs$AS5VsK~_#iQ%fDr761b*;p^vJTv|9gH~nTy ze*{Ooeq+75rXnaL5M+<8o{oyDGCW5~DGAi%736^1p%I)e*3eiV7(O;Gy0h!@qel<X z?)v$+FKZiWXl_&!yrHC=(pgd2eTUC{!+wy3`xN_8p~J!$J{ikh2ZeTU^25d*Y+Rf? zJcoFBPn<q|R!~q*Ny(1v6q}qB5*=kiFjG)gl#!Q{RoFB+1$j+vjJ=C<QPr=#djOah zmz2=a)xqiMDkv(5UKAA(6OojX1c(CQf#h9iE_p=-)AO^)blgv$PRvXsr6!Xc?9|m& zB_t)lmn9`7)YMfl+L~A_#=uZt-%w8n`aEr|t)`9DGPf`X%rklZ#jCg9EiNzQ<mb41 zxr4;j1to{YXkj!pv@{UYRM*f_$D83p!$WEt>vM|=JbW02cw=c<X+W~Dh_D3QSVaW| zWhGS&HFZr5B^713*iRQOi`<0GYtJq!>>U{znVzhuuM3NgCOX(@>*AEvRHfx)MZ_<j z7Zy5w?i}1qK7M|FfwRKWvLezlhtG(z^DDCo>MPmBdZu;bh<+S=!fXf6?%_VEtYZ<8 zQI%Ca{QTvsH*a5qv;d(C_Xor(Tt2wGl(Z!B+$ALd18@UjBRqFNg@qNt!;79@m^V8= z^Xm00G_G8`zM6O`-rCk0D!?6snx72t77-a<RZ|IM4kWp?0=i&nMTm-tn3x#;;g8G= z571q^zSh`O7aJSx$#8dYvM1VD!E-S)#{;E<!GeXQIS4Q>Z;!N$6yWolw>JK9<|t4& zx9{HS>TXX-P4Wxyq0n6H97xa<oHo-4me!UeJLDxM=nl#%N+u>pL0o+jsAx|C=kMw1 zO0c$7)iFCRqP&|&fP-I(=YraC8MD)B<h{HSJ9*CTJaB3!*NLP2r%s)}AR-|ttE{4H zZ0zpmn~|FnpOOsmnyDoLPcSzjnBzYh(T?OB99YxbtpBU?9w;a&kenRcJm}6;7lM@~ zpv%D6(8$CH&fLSxBeyVbbaL$G{rhOD1)%!w-mc`dWKVBTk^_lgNiZXrf?1QSscsY> zKW~PQ7c4lE9Z_@oxB&E$nUgs=Gx7Y}Z{K|P{>k%ale1G*)fG`OQP32bMs|Rn+tD7p z$<~2n>tN^R;g+7AIWRKRKQdHMS`3^|a&j~!nClrD;PiF%j10`J2zJh(QOK4g8&eCy z)(giI%+PBusjliD8yUMY3&^i-tdC1hqWdsxog9qJOf|4rMRj#41w~OQ=?h{P1;r)B z6_tf$<WGpJX&PAb3Sp72s~qC!Np0qCNk;_LIZsPlIQzxsx8&Cjqj@u5zIr{sc%`(w zI5H~S!^<76)P`hZWo-!$9Y78SH0bDT5BNz<O9n~2etqrJEbhRTAQ(V4_y_m`HYhYQ zVy-UmNu{~?`TOJ-=C*gV<Q3!w1p0ldKt-$3$RPO&iwl-l7XN6{6FBapCl3LHjm`Br zdD#hxap4i65M9Dif`a^EIWaK~`hwQBrp2YX7cXD@QuA=!7vH{wI$hnDtE$U$a<c&c zmy+V4L2zVPhDK`Zss@I7@7%ldv$^YG6>dd$Z)ZzueO6w9y=SZnp1xlY%O_)TMxA^X z<1TC%$R&u|b6j@!DW%;f70ya1$f;>yjqwC~2MWVGAw8qErK!BGHYF=FIw2t{AwDu5 zvFL>OsQ7sBh1`<jp3xCQLw!)}x>yWO8v`7SEMPJEI4p#9tZb}6tv-SN;5#`5`7!Zv zL1DoG!GWO>VV6>qs%opprzbb=-UX81%nA77#m&37M<zyU>TA-oGGgOnqM{?>;$vaC zvbv)6a!X5FQ*~`sNqK2$MJarKGU(2L<Zs;Gc=38O^T?xT&(?0N4~+~owKkPkR~D8Q zA<3l*^TEq<3i668N<mO9tgbBIxG_F6)zI3Uk&_h>6XhG|&+zvR2n~)&j8D(a$t=iE zOiPWqlz?1*JaW}=?O^D&w|2D;PfwzAk`IiJHFvg`R8^*AX9Yw?x_B{cTqvftc6w$6 zO?^WZtgbQ^C#I|^qU{_Qn-LO|#&ce0k0>5PNv2<FH6$lU<H*6uWvP{e^_|mO8SmDe zTc9yoS{p#OWMri!B_}3cii1agDLFACD;+jozTA9uY2oR!CqL66&t5#AnVp3H*40<n zGOdnju+Y}lG&w!Cer*-h$mRByx{v?*WNnw5re?+;|BexuFOS&2`|i<`N8of*GZP@Q zyL&qTy6`zT)C<kRaiQh+zx>n7t(X7e#f!W5?m$!E{-A<pW+!H@OwM1O161FC_~6y+ z*I)D+I03l)r_Ua*U%wHa)k0uo@+dm+YtY3^!bMCXE)wF8DLU>I(Aq6xxLe5Zf~v6& z-qgaGLh}y_yOdl|Rnt8*2qf0l)78{*`DfPL+0oJ8H#RpL6Bi>bEp_Vjsl!K)APSo) z3(-%|a(V`O!4ctgElq>tBOSe6E$yw1Z7pq`mw{mCmKFi<Xl6__u?`CRyAK}#T!E;E zMhE(b`i4gbXXa*Budm%={-E#r%^Pbst}$)><L5eTyz}5b1h88Z@qo6!|LD<;+c%fi zR~J^U&Mhs>EY8m?T$!1ly>fM7X?+dN^0RjH=G^iUoL*aJdrebAbwgcUb0grnYoH%4 z|1xsz?Z_p!w|%_!cILJB4o*&NO*=9&Jq5^b?C8iTE00P^^Noz5`UTp$dYCyl>swlD z8Jmb|7-`r=*S3$1%&kgbDeS!RoN~6h^h~FyT1sSPe#3Zn-NeA;%Fh_@&3A7BBp?HT zc<1Kfxle+Dj9Ff}x^e5~(`Qe=|I%!kAWL4oc@4hs;L!sxkh`coc>M73)5j3*0Wjbb zT4q*2|NG$4;|C8OKZ1?$uRodP=Z7DD1ki%u0l5Wg9}Jd3$@~YJg{D4y_yDcFgBbPw zduZYJ_df+qlhoLT<Jz9Qnw}C?NuuWQ7YPaHb^Te+>+TfA?-VgRu5PbOuprXh-9saz z)3b_d>v~2<0o9|kGo!Q9$dmoY1)#Xe@%-{KTPH^ijF!BTf`XEwypkg5VMV4S22m6q z69o`jSX*7Xew~?H3dxuWcmqlEDgE(g?&DAC0$;p*{QUWYCrGAy*bb{NUcdfHZmG9# ze<hjPmNc43=D|nIbg|65gv>0-k00H7faC{9Q@O#Jtlhc=r!>F1GPO8AIY0NwHuJ~9 zm431-Gt6tB`EowN+3B|a{^GiZq`bn=q*Ra4NGET9D=Lyv9b=3a##q?~m0ekX&^@un zuR%Q^if1{c!L8(^@1AKNRuz%oSKK<Ye)Ik>ef0y+n)%${!E=X&KlJGT%a+dZRj=gB z=M4gQR9r7wUJ@~n6ETYwF^%3YV!TJtV3)8ln<`bygkV9YQUgLlFQsIaRkZZ<BDwRJ znRK=$R6;YRp&2*&$HxlF%3QrX&54%AW~RpGn+B1unU#f$8!b5_y>p;<@!I;@$F#5; zKTZ7oMc5;0)P9EZpC(g#^@f>4?akXS0v>ipdySuDNq&kP2+fZSCx8MT!TimeD;pb# zOI}+C<X>4?o?cu;G9xq7Wo~9X|G50+q_NEZ4oprobadob*CgcR1;r=PgTw7TeJsc{ z10qRX-$+>3B`l}w){}3uYX-UGNb=SxrxYF7PAK!JQH;HEZ9^*3Du>>_eYXwNzp$*{ zdK8e+C2bkWso(-=7dDL+!bicvVHpz3SuDW*-<0VxdRBNl2M30qZ*&}xRb_K4^x&J1 zd+y5CtoNU6XlkOd{c>t{R#bdkSaeiaOf*<HVll7<w%0Z_j7&{16Cz(<{b}mJ&4kEI z*gyPf_P~$G-;xLU^_wrI%lw!k_Vp)vXQm6^Y~<1N=P25lBo8jOnMMli+MPQn;8$;c z2K?;u()8j23iw}2fcvwopr4XVbqo%|9Rse2O3Q>BNc9i4rF)q<l65VJa##Zq6aV7o z$w%M5^@*?CCvB}pF2c~O4vCqvo>D)l%P{oJrAAfXee`@AsDEKux_*xoQmO5d%Ax3T z&M;I6A9=wz985shkL{ce!2bVJ_Bf$UHn6k;dE*uo9+8rsTUp)KhluuH%94q$f?put z&h+=T_jR}T_aL8r-LTp{IM6pXIyO5qeRXkxnFpMiC3b6C<a>ZUP#OS5G!OVMrM!Ro z%Xz>*;hZUoKJNvR23iBV&!l$*@DCq=m)*E~@3X4e!tl&I;LL=`^Yfr;elyYePwxYh z<U2b-5GLmrg(N0}uyJJgTT$GMZ5?#*X6G~rn$D?%S8m*W_F9`1!6|Knajn#LuNE{2 z=9MsGJ*{~H=Vj=b;}ThQ>%r4)to~Ky!w)|iQB!qj83$CTC$K(3CK2b2LeCilpEn9S zC`DvFrSY$740eo%Hl9Fmra1cq1jHt$6qZ1o5AyYk0GPbGIJ>ekf9)C=ET9P`KMDW@ z2D8h{tM~4&-nn;WWo2$<8NvLuYk;w}f5--Y^TC5Vh|YNo;6$@$Zk0O!i2r}IpH$lv z@K5i5rgs25qTvxqbMO8qNdpdu0Dc_-Jdy!?X=w^beqnxe?#l1L`A?R+hKDO#T64-P z<Fj-9V-s8hL+v~m=1vrSqOGQ(@lj1OIih-D<MH&`{j*xM6KXV^SFN&Ro`9ZzU}guO zu>Rijx)N3q1|B(HiS_T^qhaZ`UjGY<Ygnl)F`5tK$*tllXdHglAV@$z2tH3^y*Z_A z|4qy9Zx~-C19KCSJ&Dfn42z1*%qgs?>m48aVuJgI=7yxCME?Lku;i4awyuuZ^|h&` zCA0!8l$MuhYN}fiEhiTjuB@(ta`>1Z`LoE`s&kmI-&FVrT%SGrO#9Ejg#Q;Wx1xPh z#xVWm6TPDiLg0YvnXL?uN*Xwyg>{7C!9%A|RdeOHQ{Hdg2%kgKQ%yZR5WS`ql|-gy zFv6qBK7rOWcf7rmF2PDp$M~48PjXq`#-o>It>auW)>5X?I7XcqK1S9eym|b(XGAgk zNws~lHfm0Zy7bJX(%x;X{#B)|Z|0ny?^%NYRyk)tyMTV+Y2AQRI6p9cUH^Z_2K);) zL=fj-U}0(LLZkQxLm!=9RNB<t{WDcFu`oY4G0x9_V$YsEyP1WBm4%CoQ(9WGxvdpV zAUkz+Q9$6#*WY}-W5*5~TWcWvD=SMtrzqPqMb5_0GJ}8AIZW6;esurIW9HD!Cg<O0 z8tMYiU%meB!w=tm|KS0U`Nw}f{5U?tR5Fk6eMCD-?>`G#H#Izy;h8cXQ8iNw^XOO& zI4qhB`gdxZ?&0C8Rv`JR#N2$iX-M?y<!j+WGqkbCnwpDfnJSUuTZWb%zI^K#kh@>X zTEnGSn_k6_^|cKuZ5&$bymHIbK8)>zDz`dK-6_$4o>|vDy$#pDq`d#|gS>gjS%bjc zG7h|0-!pmvCjsp`J}0!j`7vI5bprlHFJiyG55~mI)ZWS7)5kj^CO#{-tf8rYe0(d_ zqnJmNv9hr21<dc<xf7P*laG(Lp}7&zGBPuL=<p!`{MTQ9t&PR3-g^MJ0{yVCx(Y}` z2=@B*pNJf$p@fehAB}B4j_RPc$^6fsFRm^}M@5KV6t%Iodj0(mXs57=_Rrk@@E_@Y z{no9|k_I&V`WjQlFU>4%Cc8(+sVb{0%*;(?WTo4Cy5M4eh3%p8@bqMJcQ;D%(dk*f z(eV_&AT)Z_v$D}LH0IZIa0soOSiZY*_t`}~&m&4?oM(-yQ^8q1|Af-vhM~2x&c%W0 zH9;vnL{-Oiy|u{6c(0tf)mz(${YwfhIRB_NgH7IfpPKtAoG(At`?xman3gA>raQN` z_gDC67V}tnBWrzR)^s;oKuAbpN_JUAV`taM%*>~*9L4-m-lKqaV0tMjNp*GA{rmR= z?E~yZMTLPU0roT+Mc~XSNlEd##yS8z*!=qX;<ala??KgXJbVP7V1PgX<UPRi%U5@v zJVj9o3ZDsD#DKt`zkU1i{re}cUOjs8;^DLBX9Z60+_m#>fB!pHTkFRk{}%urRqPL6 zynO!M`zNp8Kxps@>PTyk9^HKK5GDD$PoG`8a~Dxr(AR($+`YSU`!+}~Cg5k`w2MoN zIJr2#+3^hr2S?A)z}CFyumG!b*RE~d*flg*)zSt^J|QPB^im2$uMS>*mK0Ybh+fSs zWU<Cab$rsR25&w2Hn(<=Lz*aI5~;_i6E%s}bV#V~TdwY1DsP{A_x?jeOCQ%6&0V7S zb4CFg&X=6S%0A70xn2HD=^mLoq)z8jqI{#^!l&(hOv{5;)8nWH{fN5jVHGN$X@r1t zo(w5e)5yfc&e7h(8&o|6Wra2M?F0P?=I2n%&kc-^A(-E@d)KaA_V#v<-oCf9Cjr&( z-HXtEXjlk<XK-Syd$7N6WEd&~f3|hBHMh02cXuq^yw%>*84(c%S^|Iv;J<$NURG8* z?A6@T1jygGj}EQe1E4;6^X|&kt2x=3aj{XYZ7sJR-0vS6fF1<tTz2i$)KI&2`{u&R zGSK_8H{W&kbVIXgsmbfN@7#a>62<)N!hBDEZ%=>U+O1pDiwkg+va*tmhmR+&%w}e1 z1O)iOmZsJwh{TZu-%}G|VWDW}#lyp0R$kiHd3kVZ5{=oXmzPpflf8Wy=^1Hofm2IY z2PY@$J30z#>QV}eBU00SqvAjl+PHZlByVYrki0rkfs{D1aP#i-H`=5KE@>Mqy-Jf> zdJMzxOS{}Syk6QqKQMC*jaY-iGuinRSfs2(OoLS%5)zBLwh{Z66cw{T7J28d6kIs5 zzPuW4N7UU8snQOqQg{@dPT;&%eJcz?YB1&`9Kp(p;s)JxXi{o=aamPM>i~1C9@RDg zV_qiD1Lm!5h|72GwzRi#9pD10hb^Y2`1Oa6g@i7!v$L_Xvg+&WfjmBQ<`f$n>)|7Z zgMtI$6P7P`w%>mCqNup=+}SgG_98p(+sAHYML^*S{0s|bW_Z9JxYDlOyAB;Xn4Xq= z5J?WWcQ4r9z3ltgp?wn*Barucx;p#z?}KX4^_)7zUsPOh@7Z%`!Nc8+jg1ZdWp7V9 zcTNDbmX3~gNJtR0#=^n^SHMj0w_o2tZ|(jA1qC@4W`XuuSy}co-!TT)n7@9#e|%I< zPG&FjJmLT1;$oe>Jw0P%p#8GSDnJAV#V4Udl$I{8#v})<nS~<GkX7A{9$Rzc;mh{1 z<%0_Lf;eA2?>ae~^o#i5g4Qdw{VQc13peh4ez`?W+nJ4DjZ>W}Wf`IElH5PFvW?h3 zUlOt!c1hWNtw7#o5OP?RdPs%BqfF*eaz3Eoa7fwNKDH^odd596Q_~Qr-pS5`;T;(p zmzk4WQ4PT?I&KLvU@Ol9<`*|^00*8veG(uC^sB0-0+@$YkUAjgH8s_*KYDtGxe8D} zz<mG~08ahGLv{7F=qght?LnQ0;^GWg1;V$swu08sjbDHB4MY&7r9}XBsED8v5W8nL zC>&i~9aB@29XmH`!bWJ2n}@rjr|a?Sw;`dyun`(RaDWRc!1)*&8JL@ye)YG%A(hdx z2N7v0X~|Mj65o9D&0c2Xa3J*P>FKG!>cYZ;u<z!1?m^m-kQ8ql7|O4zNzN|{PfqiR zjHL#IkWeLWi75G#>ehUEL1nEIPhP+Cj45N6uvB--2Qe>b7;5gFUq857(Y4q-yt?%* zlq(CXM+9-Zg$((0ycBF>%o*7ap1#-y?Vl{KUcWx2M)^wGj>|lThe_`|3XTWl?YZPg z`=qUnXekk86NIQ{bu)Wi0y2OD&k-0bDX*}!zOid~7^wcUp8tq>puxHIbrE5qUCaU+ ze&ybM0fEz~3t%vsfO&Kk;DkaOXm)gF`r<_~6!6MQ3f|scEUYZBI|Ra@z)MPs0C=b) z2%I^M(b6DVSwIjP9u@*r51XMiX(@@U%yb6BlUWlSfyKtgN>oG`!S>Fb`g*$0-+c%T z4Te?bardBz#p6vtv@0mc`-89v4L&Py262;JJK?lz8)_{r3Bd5M1sLAlgB~0b1lLO< z*`VhsFE0yP2>t~Xz$yGgL$WKXVl%S?ViW1X;f@SH=p2n~?1AJJaK>LNyW-t5X0JVj z=v4&geo})DN?ye=k6)XSP&(8wxLVe^aCPJ7x#m5*6WMtcSY@n*@xk)e(Oz-Y+o1jP z#lbykx3Jkc`xJhCKTa8&19GGTa&`cFFg{H;X?g*_bDki<7lW#LPajbAAXYO=%WK*$ z4@{0vBHCv3DDRQan7?rYFfS%1@)7eAzk+$#OB;*n9~<fH>p{5}Bu#s7&(rU|2ed;S z=nGogTeY+_U|(20f9@=3`)BXIhd39c{>J@>Pz@a{(#7e(<>=|+U>Rxxdf$BbAvib? zC45jgcb~mLG0&9fyJTgg>l^C_#zvr5y7}zo`lH87x9+&Q(U@Gn>*UE3tM~5b738t9 zvBEld*1*IVs4_4S5h0{+;@IEP*$#fBtgMKhn-Sh5t)v{}A0s^4#W&E_&CATm1(ZC- z2$Z}H7sfB8y!YYDw+V$EaG#a!GWEUd1PwyvEF&O#t?F5->tA{E-Opc)eD>noGZ&3m z1hfxg+@&qU<!z#yduO&m`zOnltJjW7+sHa58wZvjRzgS~VS6R#gDNf{bq?qU?$MzC z4dX4YXQOLw0aWko8|V`i7n_-rS5@8E*@eV<ljB?UJYaryeO*ZK0*ZMV87a`lpD+*T zU4QfhS!HSo5cc38%eU{uC&Zyb1!xmwjEjpiFW*sAW<*6rqPqhvgE+bO;@jmLH&$<M zT)TaHeq|XwG_;JxV!nO<L0Cu-T8341H5G7;tgMVrL_NJQ4^b_uUqBj-%*-rYM`YO0 z<OGRi3%$(2LkE#xzcPc?<HwK9tSo21?jJGV(c3+>v;;!#$k8KkG8`QH2?TSpi?g`+ zMbu4n4fJBO^C5JF-q6m&8}1cC@^~{j9h2QEG&7Hkv86j#Z$6PU^ggCa)AOoTb;>`E z@p6xEM29G6*6x4t`_`0<3f7ZqEHXAHbQuzaFcbHTdyk%N<MvOM=+s(=h?>H#tD2-} zE_nxlIz+OE)m@P>91S}AIo#fJx-2SG6+<&)TRS4no#G!1-FIS6enE9DD4f9wW(@mD z&)>W?d3Eu`iQ^~~F&HhN(5;xi_VCH4RUpZ$H*W##ef@k;1_t2oK!pE}UCf9Q+H|M8 zqQGWjXDcl$UcYm9@!C4r%Erd*(jt0j6!QRbP(HhvEyL>LH}7+Ev!JJd3INgJsflnV z=Alh+NnmVnSU9wXh8l9bojaf!H}?U=M-a^O0p>G5VxH$9PiJrU?8-_*Qv)zOMA@Iu z6A2KIjz&ud8<Lbp4+?Ye^0lJ6;q9Do1fsg00hg-Nd86R!&Y4HwzP(i3xnI&s**+VD z&1tNk0x`0p>uPoH5@?%u?|*SF7i~Q%>lw^`Rmy2SA2G8K*T}MM(EiEt?!ynsHIw#< zmzCUd52?8xR-*yhc{Sbnv^@B+-Uq}9EN3vg1&vOs5<%5NP-f@Oa1V)qPCTWsxT^JX zdtcvH%&*+NS5;e$4$&i9tgY4_Jp7FLE!qZ^I*T`Ne!@HeiJhGtT0`yV(IZEW9*&3% z=R1Z77(ioIcIN8s+Y4)}$V>PeHzwz<e8#*s=J9t3=Fyg6^}(y}vU5IT9()tzH;Q>N zG10ke*QS=2b8@p;SrJqN+GAp)Q`3^6KD2=Hd}ekA+ZLXOEe$P=9Go0*Y0x7uPY<!f z%wK%#n;3gX#glylk?~0<7Xu<m+XOFyvG||LbV^t;F#PogFO<y#`PJP()hO6z98`0o zCp0#qB)|UPR~}GFc?&DQ>K<t#j}~3nI8c%h-r7IAjoUw2QfkMJ+9dBMCUfcq@@dck z?Z+@)C$t$SaejQNG?sHZyM&GQikT|v;thzl1ZS$Fm#<fNbYyBqZe?|OV-q}m6!W&W z*7x7MH!#$P<-N${2`m|z>A>Uw^4*&}zXkI<5!wgL&#bP*C&nY=`FnPQs&DORgPvk! zdJ28ZYj%10)JgtbNb|caEiLZ7e7$(>+STjV=T}$YzsHVk>Uk_y`{nx&;^Jb6wAr;w zR#y7K>+cdTC7@puK$r|N27*u&^P-}{;AjwTJCmJIEd*i3-Phk`=OS~|z?MyIlbN>Z z5j@=8g9Edxt3y+h;AKzOFhbyaIyW=Ywl+=2*XtR@C}Kp+GQG8I&fj=6=huF(!< zs;F^b^Wfrx7jKeEIzc+BI_BwkR3FvwRI-gNXqg3)2YdPEyI*-a-+udw|Dw^}b2|G~ zT#jQs0rnPNncJZKlVy7C&fkqAzfyGBsYK<)Fiv2-Phfpc>iC|*`JK@V<h*FMOW1hF zMN2+e6KxYyBO5y+mG0sf<QpBAm|KulT7HCyc|a3LT!yz7J3AWy9&iZi8lVMG*`jTJ z!t*HR0pUmt%Y6W8WY2DpC7_%Ea&?V$9lc%9ae(}Q)_`3(IXMG^0@}MfYU`@6tSn!> zv2o_~DP&xoIi52*Gi_mE4$Xqt;NjsJo}93>B%lBRLH@pw2DuK$4G8f28Ri|G?9sRb zeB{o{H|`#E^t~a#Je)&eNg?n#92Z0vo$lH<Iy`^<8tjW6LPJx%pr$UPv?3@rp6266 z4~c*n---gN#!=UT2%+m)O(Kg5Js`Or7=Hf7BWYtFQ1S*o^%7=ryjq@q>FvmHO~=B> z)jPlW=A?;*H~TTAy)rfjG+fW<`N&&_U%T~S8@GS5XuG7aoHN`lWU^1n<`mBNw5~tc z8NGmWh9P{)E_=`8Sfz;U(zfSROmOA|Q+p>{H!oUXC@7rx>|E&Fk?}f~z5DjDqr-pz zQNVD1K^|yeK<`$}Z^p1&Fh6z$(baNtGT-d@28I1m-XkYZo<P52f)Kc*yaZZCzheS# z0xA{}5dy$3-MRxIB#3Gj=I|r<8qJjo+8Z5&KYZj6CnpE9gV^cg>%IQ)kzauC7R;}K zGlFp1&5VwrO%VAY$<QDdFh9Al&^J1Q=1@bz>pgo;o#Kbf0lz>4a5xzVdr$D6fW1zh zI!O%-wRZIY?Wa$)1!hptG5SV{qU{hnIJfcW)pwq;6&%twst!5Y^s2*Z?pSJCN&Eaq zlK<_WMCBCJvmH~~DPpoum2yOjE}-ulnp(dN+CN#wudH%QS^r(aW`~3ghpYpj9eukP zFn``CluMkjTf~H0$%$LhNl3#2XO0Mak~_mAEGj52k%xyHeYI>C^2H4@rXeXQURYd+ zzK#f=0ARrL*I$3FrmlMZ@l#~=o6S{FKIr$keIrB4DoXqI?Sn>kFbmX?k&&9eb`3lt zAkYt}n@Pz#zy9WH5Ey{TD{E^RSsCDQJDGY0>W72|UrIu9q9KjI-_D&J931-kdQ(>y zLBIp@(Pjk&&qJFN^K(;IuPP}ipnU;shwuST9u|<n$?-Ah2p}E-Uxxp~`NL)QkB<&a z&)`k*sAug&3LIqzh#w-=4OAZ@<MHOox`z8zocVNoE7~USKYyK5J9I?Z5&7bkQF{vO zD{L5$P&!;cuv*%&IJ9v4_dJ~sA3mIwGG#rj$*SPMrA9rj?I~vwcJ1~y)&JRI?ils8 zxb*=w`W_j3ZY7GKark+ounWfFLZ;DY_54`{4LIfOPH4Fu)o>HV5cMpqAX0Pi@`HHR zh+wX*t7Bkn1Yq#+bgylwnZI#kYI*bhc>pKF+tb350LljhHn;%jA7s@6Sp^9(_VE?X z`5PN$<)sb|c2-172!|oCt#7Iyn?v5|1&FqFbx>&(_?MNHMR0J?DD&lZAl{btHU~#L zD=SNgCK{R>pyO!o=>nl{X=Ono*;dz9EnL4bHg{!maiOxN67~iC6Brx-f(kw8<l-Wn zvxNo0(b2xMzt_*-2N{GUI}J{ZGY59o(lgQk`5=>k;oAoXYuYYXv|f%$OV>3t(9qUa z)znbOVoj`V2xO|Ejh(icg_@qhA$8|}Q=qv<lmOtTR_}`GdmU1AhR{{cI`yDB-6guN zVQ8(Y`)Y0f@~iKD@5^ImL_c;ug*{SM94ZtZb=PB9ulU@SZP5P7vVQ&cF;xne8vU3y zW1qZ}fL@>wK2pdeQV1U<Y94=7(P^(Jo>!fAUXO7a>mh?98dzC_e05~_F(PB5GP2W4 zD@vPN+lNPnXJ^Oee)6_0;2by;AdUQxWOinDZEbPm#^S~X#FeA7(_3po1*i!qMZeG8 z`uZ-Q6lsKMTVH5Lx1+nD^%3_$Yl7=Bu|4|Pt>CyfW>;6Xz5@&g`ndZ7v<xQ(TR#0@ z1hg6Y3Vq+?RCQZhL3LevQE5U>URX+oPfUVaaD<~zAd%*7LUPnGx70K+Jf~&zFY+`s zVp!wg@~x+@>;v*RWo*?PbFuWw!|Lwpj)^&qGqwFIrS0=mtM`8Y(}_taWaCrZEsEc# zOg^CE!mH(O>YBU_+CN{ii(2<7kVWvJCv?2I6kSA2qfrwxj}tZyXA?H!khMLB^OC>^ z3hMjF8<31`Nle)LdxS=YBqpPS_O+MWk-0(s;YZ@X_|xgRE5G@JyI=YJ>FDpP`LhbY zw<hzoqQR-DmY&{<=GOe`+O(pQxXhf;#AMH~C}g09`Slat&Pmt8TGPNtT-*G=E6_v@ zd@^fC=Qke5=C^Yz*o&J)8u~Vz(+%LqGQ#ruK-H9WE;f&>z5nnB=6Gpr?Pudx+bv|k zs_3|1*_ltv{gk$Q|JeLCX#adMc8ocW^Hm_lA5ftk!!X1x5=G79M9kwQh$(zZ&U*w6 z4yn3Gng%LbM@pOeYvPF}Bzr3=-PtDqg?(~<QEp{5@L~7x@UQ=p?svMw*e^~vaOLOi z%>71dzv5z}Uup{dQP{xbL|b29MN@NLRZVJ9No;0LNMee21Tr$YIZ$Koq-SLVajOj0 z<X@Ft4{OjP^EyYa-tC{gaZZQMr$PZ$BW-z!Ly>I4$gk=_;@0xc#f^u5?1izbE7!Ts zYVWzA$D-iCuIO|~oyMa|^@}OmM(m$2H*Vb%()CudPn5Qb+Ar^PPCrQ8DjAS3VVMM< zoML7y7tI84o~ooMO~*KSGaoIy74!R9x|27;_Mu6siMa(i6;+kZEtmWHnUmUG`2xs) zIvdj1@BW4L=wD}JSAN?^ddJ6_y1OfyTXHL^QwoY=GO~jck{J;()PN9s4{rdxDFS#~ zEZz*_);%iJT?#JL@UouSn>QbSYt6{ulC{&Om8v@BaVU@_2;p&P=-R$8jD)U#?3dk3 zD>t|VFo5~p^7gFq4j^y1RVZ47fNjM7C8eryK+Gt>+`sgqc^I(Md4o_X>r`>eWNDkU zi)OL=K~hRtiWvIpxFj0T)3xlvw9M>`Y)H_>1Mfjd8yI&fHZwcDxU8_IuD+uKV1p<G zgkXQlqMzr<`T67_W4{}vpR~;%n~~$!XR2X-_D_so?(c_)wYaV!tE>VXIU*%JAT|;F zn&KA(lE%V?W@6`vyhUb=SH>A~t2*veaRtD4PH!yTeoBidIi%#I>X4&JDdtwDiI@cY zrnT1%tN_EePh9){_hpUwiR{d;V>_<AOW1gqyu)63drl>%14`sG8ZNJ2z1c?WUs6I6 zDlpE;wqey5i~>0noW%)=($?ux#8g?kY%$|779m4UIXgLv5bJ<^qJI&N9H(VwV?eaA zAX6Q@eBA=W{9@uGQ!|qC3V|N0T3avo^&x~m_Y<7|(o{Krh^2oq9QZu~=1cX_X1hm5 z8aul{&wzkOBUb3Ye52y%ArWMs0FpZc9Nd`X02&@7jS|jazZ&H$MOS)MdB@bo%H3yv zNp%P0?JgRH>3P*2RB=6t^Kpr;uk5~B)3@9{z47ABf8>XLdUy|ZUin?(gxzv>dt~i6 z6&+b*NQV^dpKQNW{}+`PFTT}tjPbhEVij0+3g-jytdv!XtZjz0O}e~8t^mekkD&e` z6&HP4l3PqAC9)DvPr?xF^eu@NWSX5jgX$j)iZ}F9N<wxnkYq`HV?$>rDEy)6sb4|k z*dLipX6(<A`7edT1Ct27Lp%cXo?Tv<Tu=mk7F-Bi4lOVg^qY;FCxJ{gAvxd(L@gr| zMVukKI`x0Zx#*MQFHc+thL6Z@J*4b(9?Q`6tmajBKdA0z;a^hFG7G9Er*0xBwq)bx zoj>|GlF};J`BnD{Xz!4=*(GDUOP0hYXa9Ezi^EFx&z`^BhU;HddWL39D9M*9#tc34 z4r#dasJP16Wy_K><w%)|j`=5)UG@kWo!9fW^2-g*Xz@#FvhmBoS~%zvZOoh~wsbGD zZy?A3ROdkG2ZUMK+}hmTjYyr}6HCth$EEnsYyRrE(5HDg!5=$^h8jCNLA~c!)d0OG z=Hy4EX22yd!lOaJLl<W4>S69gF|q}QCu$iQ%VUl9sk{EG99hRPw)pb=)!R=qYX*<0 zQ%-8S8~Qeg7>04FP<3eO={4g3_{NcSzt{@UC;LvR%czlD++!<iJMKUH?U}B#^Xs_< zv=Cu)(fk|0JQMaiq=?^KG(W0n_w>p3tHpn1iOa0B^vaDd8`UDG98jSN=msb{<pc8N z?Q)bH^N+~cvq@NpoA}eB%9G3cqw>0(L-MsuiGT+)M>4>}k>O7d4)>2u0G)Fww*Uyg zsJ0#ya>u{`I)C-gU_bV|gV|pktDL*?dv^IWuPiz)1Zamo2>cPH_nh)72v_5>a>9~Q zL8O7_Qkg?EL@M3f(Z$Hd4j3L|WFmt#*{$aGud-x4=lIm>iJs|=-1_0OI`m^|uKM0} zGM1P2%R4FCCx+(s*7mO;!lrlW>B~2bEq!{Xo}2<&`wq*nAC>36fRod(BiIH6gl6Ov zHZ)%z9GRF~Tff=aJz7!ILHACSQndw!XFH*?Q^fcyNvp4=tiA!XOIm#;VF8#IRI&SU zdsgDVy7)#H+Xt3J7Z0g8B<_=U7Qp!{I~6F{=O{Yl%GsuKTr_8sw3IRp3`}n+yF8O# zHB1T3!J6A>n-L(sv!uA%dw5d=Ll_Y;!3oJR8Cf6|a>^@98=7h_w}XV}A0Ho?ncjro zM<|Z|)9lj}C7*g31nnau%{@I3bd=OLK=gsoJA|t!y+_2j28BBN1lTbJJl@{Pz?y_L zv(Pj&l+iK!cO}=o3NHFCiSZ@FgY&m*yJyd0>BrUFK+;HAB(ljlisD23Gdin!ma4k1 zmUS+y-g)|ItmfwJ`*D|wb@3iN=XC)5`wmLMC)+Vawo~dH0+<8mvFxWc+4xo1`4rfW zDY2Z^+9`_vO49Og5*Da^B>~HpEaz~ZzDe5<{b!WGxN>SlWlZs~3^97Yg7a}rPbG&u zkT)t$g>u%ZJkqv%#m&V{0{l}OJ15pEJLbZ(T8!;nHH`53L|Y(y5WqnAZb4z*2=bGn z)3bm@vzeS<+0xe7)!j8TH~_T2Ikt0U^cU2~=1kYWIg;}W`+kHvb1vDb$=<P1a5{)G zKyek;)`N6UEi8dp6~qj<r*~wmTTr-*Ul2m?G<SkC=ox!v#A>FdYj|Fp^fx8iDJ;Xv zH$T2~czEG<cFmx;q4!xGA3a8$h*9`H1!ob{P}jHy^er+V`PJJ`zBo8}`|g8;lu|o# zxU{P6f%7_ma3Fs+fcgm)))T5MC)M^`(EIwL`M)8=ZiyOT9$aAWIUOs9kPjbz*v98S zsgQlM=rOfEsqF~t<(-acda00$fbfw|$NYmbq}^hs=kyri*=@jPz0)^x>&FOW9~C_# z9dk>(y)%#`iOvAR1R(iD#zIIKlaZa2SC~;!2FkgNN&9VmeL%SoLT+XL-yTK(Qr3gd zUs}!0qJu?fBg4~^(6h7;41l9mx3<A?3#w~DaitU%$7kn)01HV>_Kk{j4~?Yw2ZMSC zZ?!<^-T5QE8!6$84r-A9n>_8bwugOKMO^W~^xA{SoVF91R6aGDfp^_`TmZYglLR5c zIjXMY@_bF-a%soH!i|T&XEgrW^;@YKm3X4xc^Pxgv)aJ<py9y_*p92Po>XT&t%cYb z%w7S^v5O|D85P^G{HK<8@80`FmxkqZGm_gBY~#3;D2G&B6&>?boC{US#i~xA?HzUr z8=u8_7B!9CefD~0{Xy%<nrB3Yx}lkti5Xx92;Yk81|os%6X+fi0nh?~0jVZr=Rt&< zQ&Ck^TUXxHTywd-si&u7aIkl54A^*hX8I#Vf6DKGex3R`-NUEU52IVYw?#vPk(rsn zsVTrY$aP@#rtWTtCg3PQ>~KWzxMZeok4Q-iicf-5LZo{@2=q!2gM%=GbKazPyt%rr zz9^QsLzTWmfg)oTWZ_qoQ$IevcAp+oaahUmtTqGZUd4}Lu*o|~TSZZ08;aYm0N_E= zOe~`>)%*d~{Q*g|dabp6FsGp2FF4&NFa@!IWQJc-WNcn-L-)f++tYOapC~tPKOlOf zr&o`Ar(6cub1J#;sJN<;i`B?Q@TpEI;a8#T5;hSs@J=o7yYuuF5dOsK{fLZ8RReQX zJwpg=jYtk=PA(u7fI@*t=^+uo`2e<vlysn3fFFPm)P7-29WXG2lMNjmEj_*M1O0&5 z-tjTeKLB9x0zmQ?zSW(bfer$g9HL_YIY<|fOeoIl+S@^{Lz|$caw}^xOUr@S6La!o zGjqaI(%}T)G{6mM0mx9KorgD^p}CWb3CRJpnU0yImXWcFuHkVl=l`zY#-&VEBSo9~ z6r@!S4K3Uxc%}0w*oztj8~N6s*9`zk1AuplsV{DuW4=YUFn0CMHgf(`mx0kaOSjad z@)2NrCHn+!HCJABcU8xH4N3{XUW-<79?RG*Vsc#5H7Kp|`hyn}EBAV5Zzh*?8rss7 za0Z&7@GWidc8(TgD#!*vKG`=AbUq_IIv_SNG$}PIEfc^Gng;?&5ZEAl0HqM{Rx~w( z3)HrEfc$Ri?t+jLd;zr3<-UGEbsJb;Uu$n~b9Z-RXIDc<CnzPTU(wuB%EURe1FdD0 zl&2OJU&<|r%gjM#jDKtbgcyK!s(<h&v=hiwG*-o#TVYI0)%6Xfbj&%`sef0b31B_c z9O5kfi)y>CbWg6US%-1U+e?~8>v&Z2Yk7gBk++R?iLNhfnMDELF?s!mAAbC!B)?t$ zZ!7ax*X_MC6H12yvwAd{1DN~eoMbGMF|L*BF2$O(a$#LR77=4kIg*)2`rsV$Equq+ zjoQ9tT4;uv5m8A;AAkh>X=3LHfgzDfw{!P)@(ut+0oa4c5pW1-1m=&;%!Y^(@DD%* z{s+|!Y9z0!1{8Q<bsZqNxUQkNzOjU9$Z}l+bP)N~wSe&4ifRBk@OfHM33MNcfb;BJ zh@Hbz(mq2yG}1LF6rxmcJrdo^3i-tc#nj%}(8dmpRW<Yt6>xZ7P4fR#bla~?6~_mv zImY`XH-doAsT-EY`yWwu(R3-&AQy1TJMU9;ktIemlG;j94c|WBHg@eF=d0N+|4GY( zM^8g8RYm9Z#FPwUDQPEky!Rtxm;oA0%xk$-DiBkSC^$h>E2!%gnbW#{|2YtT%jkM) z<%pBtC3U>LGR^>yuV-luYT48QiEwRPJ?uR;!S5Cv?im{C6BXwln;4vs1pE&!09^n` z5$FX#lK@v=$}0qS06c#(ScQ!MZ}1e5=nx~rF0doS%g_km92)ctivmFfhk*XY!OIVV z3u{*oI6CNdW0Hdb(H2J_YMYp98W_po2)vq3yOgNxDs+Br4^ey&DX^%nXRc#>)ryh9 ztLkzd=ZmFR3LAvNwew=UG@Mfa@Ri+Hs}T*qIC}N=ugu-CUH)^1wC~ajTO+dD6G}(S z{YoyHhwW2v;!&c?S*HTwYtkw}=K$feUo_{CwXyceZXaC&sWUKtyJ>hWv1HJk8lhrj zqo`w`VQ7psx74??1<08?k^z218XZ^{xYyYy05m@>Fq9515Ecc@3^Ar(OgyNdz_?2w z!U4%Z+Mg^q0W1mr6%d;M)u1x;8eUA0gM0#<1Eqt2vG??W_!N#}0Z?~x0ik7RjSxH5 z)I!V11XVKpT8>{UyRj(Icr{$l=z6QzMn`0|jV;{?NpBL>_d29Rmb1E~?OJ|V)s0Qg z;ev6HiFaXSK|cV#s%MEgaR)j()3%=Zf3VcF48;}n<TlNcqw7K5AJ+6>mvxXdi_><i z(x8@U)2q~+@_7}Vc8cQ9=(q=_G%Rd9TD|)WRBm<8QgnWgnQNSqv7<cJP)*NJ+teI_ zTu{*vIRf*8sJ5cGf#xT<GweNl92tJ#0svYN-hf`$z)*lO_`*LLYzH@iIuySkvQGeX z9Kh$$g8<Hf(xFWVC4klOBnKlKJ3T8KM6~10f!HBRmC&&|tV#KslG`46@*x!$UQPOW zeP42L(d5eA=HVqRQY5H%0c{4TcLkd?PI)IDbvHSiSlf`w^x8@Eo7U>yrS-epBP#!o zm(tqa=$!V#wmI+QHk@mwfWAMAjQvRr1GGILALCj9v@eA7WfL=FmnNzZ!_q4IK<Z4b z-ESLRFY8!{$nCN4N>eta$zqL_ar&A-`yat?L~?*w67UK5w{&p@<qx2>WqJVwn*d{A zY%qW{bPDJvvc<#O&VxasGr(V<KI{ei0?y5y$e`81p+K(dS=!(TR^VkAV-pbVO1j2E zSUV1N*RK?4yJVfX<n6f>osVnLE!@*vhZe_|?|Q~n3F>-+dRMl~((|en!20f%A|284 z)N)CsCp6_XArWhN=OO_9-qTmx4F6xDj7%=3m2?+0&&HJwTLqMfnTByHxg1cWh~r}b z^T764j~Z3STz*w5>qT=eIa@t)d}-?h#I|G0_iB2Vvg)UTGCQq(^R#UI<qhm)u%=2l zLv?*4(A6M&0RO<oAU}+4?eTU_fLrha&?M$gE(B*PfZ2jfV;ZtdK(I}Le?cASETFy# zlg|x^B#0dV=U6ifXjaS6SY6LhLDy6iOXSydWKpL4yBvA9lr5{2HH)+jFg%tNRn|N< zx^z1<vqcu~e^{A(0q3VfuadS%JfJ{kS8%>y9IQ{z3eW2Ux2@@2F6~%s99+HiaQpqn z{~`t9w8u}L7u56@G*9O=&QN0;fB;YF`m)M79#nRfvq{GS?A@yY`6~9=C)H>yVrB=G z9C1#uId#L+Yxl>N?v`Gjiz^-s%;<EDsWbD+R<jP4Gor}qS}I@-6>)}Y`o^HN0joNu z<~VZ;fUX`mf~B>-m5n~p7Bz#93*aE|FYpworekISyTFdx#wO5%Ks!o$Cepfu3)=Pv zHK;q4sDGDn{92s2Q`Bsii0NKQBCiVBgdAVpJ~_60$2YNB(!}?mq7#T2ElP=|OEIYT zy)yQE+6)zkMEmgSq>8aRrk()--*@HK+jrYTH2>wwhYueXS8r8bo+`dP4|s5js8MrD zI*Ie)P@?dvxy#$6Yr9wLc-8>&l^t@<X)`z^tvIBKBDx+#@2rBx(V>M~jRVW^#r>X% zO|~Hw=Dx+Ij66eXBH&-zgdw3%k;K_a>k#C0@CrJ}3jnHm#_9&f8V1GyW=%tYw22n- ziEL3fFj3VvQPwq9)Wyr<2p4s1&tn~VwWwSw&KwGktkSl-#4L6T8Sg!>$8r|8N6?T< z#@fI>DzmJ+v2VVxd5p-&5W;!zs?j9OVgc$Z4!L~l?)#9z8Bax0yj5UXRN+8bCqnPY z@3@Dz>HWWYdHeReu93y^_PLVwh4_+T$H-b$hf7B>p8FM?kE*-NSR{ee0p#m=)Ton- zBupX>D>$)SG(R9mQnm~M*<9K-(LTOb(LNKDahc#(sOOrYLrv3mNya!QsXJa$vW=0o z47+IJFQo5vR@eQsj++3Mc1FkTls4@ohKiUr?WiW@hz8}5I)z(}azKRw$7PeTV-vSx z6*OWwtHXL)i=AJMonM{xEcUo8QO7bcI=#MiaG|PemJ(hfYwCYQ#re3pn=~N-=U%O3 zo58E@&L-ywv8n_i!ot4<#0+>eigr*nGwb)?fB#`yy>A!5z{cIjEyGB^UvhaKaAM|L zBxe(QRLg@+*5ROvtF*->kU9W+rd2DGG6l5U_laAuh!YO0k`*mO-J;6!>xY{L7V{d$ ze3BZ>y>ry;W2G#@#qdF5CV>~3CSvF>WZ-uO=PjV?eFEcoRKxAC8jV}og+tC^pR^6D zIDu8vbgz&h;GF%WI{R^Dc0NV66DnNiabn6OqDypIVO#I?^`@cap!6n;UDR=P${|Ik zvsiCs`z$2=heOU$P%_eX+-h!OW?_2tY>&j|{N`Cedu7*Raoc=-|MIo_&$iLNUADsh z{rBHrS-aabyavbzkspxRW$s%fZyR?Ei_8{vSd}he76-}!Ft6)X3#LUWmM{)Ku0r9E zuw;|8<WY2xH1@UhN)JkF%&r|OY#vW3?{kl>B6wzE>|!MGfr5J8Co%4aR4JSaj{D{8 z*`#gvND_BmwAd+Xx=Yw(kFYVjh%u+A@ev6#F%>%v1G+6WCMu<>qIJBXe<8WN&psee zkq~rJ%k`k5<8d`t8OuZ+_bNCUSz_u@HFs7Sd*F6)LWHhcmTO#NO4WEp*J1^N_Bo*U zv8#99A}Q&&0l!_oP~N@&{>t?SHGRu~{KA&m=)wW#s5&ij${9UBZWS7*qRTN&Pborz z7PU;zs}=|rz^_RuktZe#>-ry2aO998?gRYG+X-M?0itH~6mnRJSA11KN@H+(b7)2j zSa51%XxgTQq|^r{)%Yh=g{IbrXEa9Vw8Z7N#}sr1q&7GP7wS64Ng4a{1EniD9aEzT z=?BVMCBxzL7`2LaStl_J4tX-7!7!fER?!ep_@{Sd)=dG_tC$@YX!zlU+s|HoxBXbR z%WsY9{P_6A>w&qOwf!qqJ;+=}K%4G~%|>4N(pHhjwY@kL$p;i&1aSWHHfbR8wcRQK z{02VtdI<TKC=ioH^@C2R(s<<T!3Fk95f8}O9a45Wp+P;1rHdIbWbl3pgg_O`U{zu$ zn2J@15+PUy?=NQPeF5hQOb!U=k+(mj=zLtw?E=nE*76dDT8i_m(RQm+b;=ht3<tTz zF6+Rp<|=3$tm&L=7g`mP(+ho5Y5P2!EeLqz<(bj7htFPp8mHPW+vWF^7q8yVU4Pg% zb`AaA5HM#-^>{!=r%iA<#HZ&C{P?sy530NKsJflh_7*dVmbb~!AcOd+0sI^IHs~?C z08rYBHfgd}m!t^s5@xX%O(F$#{e|=bg!BT1^n&0MR>h3N;9pQd#`2PaO`4ixt`?;f z=TWWWUajF$EN7E`UN;ED6?8N_YHoZOPjRzQ1NR))xQ57r{^ZKhr1Fv2qJh|=!L;g$ z+P<anrMpjFzWMli<aXIEf0Xn8*N;E`xbg5s$K-W@eMJ{C-wRk#+gxnP5Iv#E)TdC* zAyL>kh)>&#Tb0J8L_MhL#*bkL>W4^}$Abb<cPZ9#t-vv(0MI1*-bjzIY2J0P3Vnng zGeCg;LW5GOWS1>z9)CeEm`}s=fD(m6fy}MydK}9TGY!R%lZk=l5NLqwrB#oIWOup6 z)j35}lOt;wNo@s9lgl?B{Rd`V+AiDWm&$`@uLiGd0Fg4K@**I=ZU7nT$!nU9EgAMn zy=)y+fup4>SVs#P2cEz(4yn@*sL;5TsoW~A&=(xh@BkR|Yx|ti@jr$2MN9`ieRwrI zc{M%3Q@E9B2b8H?$`pjmwY>z5gA{FJ_1&`_B5H!Ndy*=~0Ov7<eID`k1fP5zY6^yu zV&IWY3aLma?wh!}u}$pT<)0+=A;?|5{iJ_(6Z|FZ3lKA+;s;bT@IUD4)avo*qCrqW zwAcpQ;Br$&fi5jw%juE|DOR2sDQgiSO$d`V50xQ=%UMM#*~Vx%UV;jE?*iN4O1Jo? zfUNGw!hx9L!N|NGzoaHwL<P}159=5&XAvfb4-&x#OIk)?Dar0}HMw=ei`VaMEB5X3 zf2O?u{=>$D=c9|a+eX*RI~G!_#zS*@0q6c{mjlw<gR{G$3;GhvhSI9XGHNICn`fXq z00XBeXqnA#Ml7>-BBgRPt!gwfrz<d}#UrNLA)o+HOV+Rol``}>i=p$XQh1ce+{!N8 zYP7>z?q~FTWGutYJhKuCI~T8Q|AKG3{Qt@SFW<adzWKOycsad#1TgO$QDx>^sOy@c z;*cP36D?^GE@2)bW)diD=zr13Us&JgoQ~%iZFhh)m%KfPEQvW#PuL}5x?30wzf08o zn~RparENJB9FMBgF6c5aq$p}gVNw0S(zQDue%PKXW4rv{Ro=dP|KRD%<&FE}3)h=^ zXY*?MV{+R(VnCl{={v_O5<+D00g^_(V)_gbT~8q$_Y2ze^BC9j7&mDHhN_uAo*YXJ z%nnYd$S!Sf=olZGyt;As;kH)aF8^8kgC9W=eUBQ%1)KKnJ!0>--*Vh8+hx0Km+i7$ zw##<eF56|hY?m*W{|&?X*meqKZe(+Ga%Ev{3T19&Z(?c+H83DBAa7!73Ntr1Ffs~d zZe(v_Y6>(rATS_rVrmLJJRmPjWo~D5XdpB=ATS_OATLyTaAhDbSWjYVWn*+8FH?15 zba`-PATLR6VP|C^FIQ<~bZ8(kH#HzHNM&hfXmkoOHaH+JLvL(va#L_&V`U&UATS_O zATLR7bY*gIZ){<7Wgv8Nb!8wgQ%zxWYalf?ATS_OATL5`baPN;azk%zaBps9ZgdJb zATLH~Y;<LEATLI2VRU6gWn*t-WgtF23UhRFWnpa!c-rlKO|KnSb{)W5=hXWoS=E0g zf1%!!S}jo`MUjs+2uZ^OXA&UOjEqcu4k@WyBwPHF$hvpZ3NkQYbvuFEa$t8OLm3GS zHykOWSem4zz!`Mv>zuRq+WWi?V%!1<+>48c_wKvjwd<U{*IsMw-~DcP*<E&*-DP*# zU3Qn<Wp~+KmcD%VzyHtw_@Dpl|NHv&H{T7{Z~LF$|8t&axPJ57@!7ubfBEM=)j$6B z>u<lG-`Ka`O;0;qzx|%v{{8&szX^BvmfT_fT;Hdk-;n2-uivpJOt11?da7^v{n^`~ z59IFzf0>?exaHg5{?qn?;pNZ2tWSJ>;>Mr!m-)}Caq!=&zcKu||L(KmR%dRl`)jyc z|6lfyD%>8g<8ykr@$l;7{z-~k>%*&$s?+{eJb55}_4)SxNro$Xp80Jbz5P?$4o@{b zVgC;MpSJ(A-Pmwr|8q6G^UkC@Y|Q-M>Biw9(>>!u$a4;Nv+<Xf-7Q|@Gg$J3<f%IT z<Q;uEde8W_Z$JE-?FPeX_?{aj7|8cL|EnU0dklv8!;TLhk5P)2IT)%G08R_JF|MvY zo<8y%42`3qqJ%^PQJ3EE;oFZ^>>v(ba@lU^0V;-I7=AC?c;JRmvEdjM{3sOcUtPGS z;GqX`3PloK^Yg&(@%_-->3?ZJPCvIB44;1)PB$OF+xR`lpYdF-(v8DI_TGIOKT#Er zI}3&>LOfXwe0AdEsT~jE;TR14M?M;Zp^h(2PW<YGJ_$sZiyxs6hIrc2Z#aC<=>)Xo z&;AHnepd3eB?W-xO9ZYz(^Iu+^_X7B@;5L=qY1wh;>rVTe)2mH|9aco#C$q8<cn?Q zsF(wwaj@V|hiUKKXYoDL6@wha>B;Ge;ncJK;OgV)BOhITSf!|&1C<1Ti-(?W9P!%U zayV`ihM#CQvs_!T3m9^AR_%1>F024uJihegwS4Ii+sbtGJNgoqg<o8YhF|(cECzt# zALP!qw+Rvq^i-wm16e3MJvaBpoS~S5As)CnoMJ51{9w&%1&@a@7(&j#Rx0|sDPpVi z4q^9kM{hrxmi_ryobffB2Y|U1&*F4kR`J@>fa#`F-m);*(5=VLThE5<CM;%RXiZNt zZc<&04I@$%%Ttuw+Ybi14$p}hiv%x@#e5xuVg6jT2o-VEQu-lw{9&7zKJw8N42_q& zfXs3$EmNJgiIlmu!*^bg^RWoueE9x{Q=SA#M^|qX4uloSLp<WJ^0Nf8UBiMfo%=Rz zA{|EQ+5CP;3c4Aexx2gdV4x)fPZd(O=E)9{w*I}4K!#v&vJ&C-2)UgH!@*chn2<Aw z-68A+P0BjLMivvBd0iTVK{*5k$>BqemsSS4L0g^>8eUn@XIqKdq2MVc%@)dA8=f|F zF$PxJ8#X+l-TF51`Iq9j@V$Os(mh$9)0wzG<_y!fM{9?a(Zv;}p=$+{iYbA#Q_gV8 z3fPJ-IApt#|C+anaK?4v%k1be;)x|$%*iPUI}3gIxLItV0JQwL$7+JCB%PK>KrPu7 z>_xk{aP{x4zvb?hgP{s}c!lRO<PUT0S&<w2l_BnZ)d}q2Ui=xsfwWx2M-8QAyN(GY zIq_N9$jg}+X^w{L;gmD9EMtuO0!Ilts{jb;xbU^2qj#`v+9t{&Kj}ywh$w-j4-~}$ z{BCKwdzMO6v;AO5M=E3pER7_4eabV#K`Dn|nBOWzf#PC9DoyfVh7v#m*+DgdsYnxd z84rk4jEyOQBuV=WAVBd5C4AW`Ai#=ehJ`3j!!H}lR)ek(IF&3SSfbd@IG80bZA2P* zcZmejlakF<GW}~<nIE^631^tzCrdLR3>Zp~_8@vFrv%cHoIyg1usF~RlLpw7K!yX_ z(VdsA3~eY7#=0XOrh;Hl6fF#A>n5(@{dIsbQ;ET{KAAt5aG$cBj{cl8lpF>zd}E4> zSpPzIwvRwo{KOaOI$O_aeOHERV#*m{-4=;~*DB5VrKbe4KOGK+YGR5&cB~;cpg9bH zQAbxEmHd1x1W=5yVVlZK)3QCnC=d|9P3Nuub_NY(VcXGV!SbwwmN{lI1oG!!#&uif zkN`4N>7Mib_uqX=s|iwo5J-XH557uAFs={tHnECuG>1KTii@x$)V$T$o1lw)JUSOR zCO}WX;d;gvbHI>kX%PP`hbQa|Le>~UqQTL{%&;IOEk!#GS*Ga2Z!eKExQO%6>@n`n za%-OcwRi1lLAD9Guyb69VA>7Cy<=$^s|hU;<vT@#+eD0t!z+)*1$^p0q=Z3sog%1c zM6E$K6eaVAeA2Sbogv|c=vW#4yJ2g^j9Q3R*Ze-W3rBzcC1i9FH>N!Z)^Z{Tm-Z&a zI^eNZWgSlskmmlFGo(h9*36>CCU2Lp7n*PewF5W-k_86`AZawbtF_V^OqN4Q1BleJ zgG*k$Q_%yP5)WLFR=m1gk^gSLnqXOdMRtT*tnd{tF~(q+<2e%yDq5HVqavWw^vwh~ zT3=__`)qWCq~9haQe?F5lF=Xnqx7}_8BV><LIIsRwd*937Kp|Us*FNQQ5GpJU9FtX z^7KqK{&dU1kTY~)n@C|pdr1K{bmR_BVi6;sVOSoIXE-{MA4n3*+7K0c+v&aa+l1^y z#FM?y8ch9xDEZ1So5&P}R@==y@FJGgX&}aoM)tcH>7heS+CfOz<w{F^Wv(${V9DV= z35JR<`>|~eS!Kl{fz*QmXE<bVG>s(!YyvjYINE|g91}>W=`6MvSsO9dcTf)&%E4uj z6b*)zSSaY~wBpDoiPAL(tU73%#g3R=sB1}9_y!hax%H=mUIqlRf6o=m41MSn&|a12 zN6pbQoxstLD%J$&Rm=$hm|k?j{0KV5voa-z5el#xt3<5`NP58bZW>OO+-=2+4lAO? zwk%F&%t0X1fKiuo<+A;i;WRaX$g<4I0}bP}#PV=rI)&bZ!!@K~AOJmrD${9Ngb(N5 z1au^Q%}P<$<A%6B9CL<<9Z{-)B@sgKs95iB3`yO2X91H6fUM0(tE25O(y;E}o>nYA zR$$Tb4l4p@ZJpe5ieF}x3P8d+<Y>cmw}>;4U|<Jb)PP>&*UjXX=|BVolJKS)h$vqP z&J_)t!+<lGd3UA-8aA&jp(W^tSZoTi`rt)tM>c{@!Mml!EuAe%X>w`~X_o3*swfjJ z4Fbu*GeSZW39;RjGtl+{MS)Z<CHyW*Oa9U#BpAzuNAf{{qvdvx;Jy}1eoKLZb{%;v zz=tWcBm_F(Mt{igv@4OH2oS83aAkbOZQ~3TP>ERYK%p_y({PU=khCS?L;yG~DP^VQ z3WEWFDOx_b938F>2xJ+W3oDK<5l-X!0-m!S0gyX5Tj%3Kz^E0Z4TP;kl0${oSZXCn zOeZJ=K{mBzN=t?Bs+AQq8HiN*r*VT*I}vhE=REmz=e?_sFHqWBL#heV-lDyU*ujK1 z9J`iKoms8W6Lf-3rN>L`(P}LZ!=npIm=&)$pc_uH?uiCz*`vBewh6*oWK9Y^J)@xC zULdy+rjrAbyjWO})l3MnG#Da1MpPvvFRcUtata2^xoU><52rOj?LDQ#L61~OFfMSK zis79~9zR{`Hj<x(U}F}#;9?U?OBz3Sm*zm~gjHx9&%KE$7{*GOFekl~NI;X3v^4{M z6?zlLp;49DTyP76x^-${TaDM$pKc{-69WJ!5hu%63`stb*SM{aesou`lu`kv3Qn^r zNTbtoQ2O?Ad<M)9D@iQj;Nkwvj*g`z%NaQTBvB|UhHN<CKpwvHutFxbB934X1i$mq z+?xRD#D*R()=Fp<NefLIB4s-MEHeyzl|;dhdzaRe7Mhqd>`-dt?WL*-+JsU$Bv=>3 zPS)O-GuWV;s97EAx$;GUn<%n$5ao4qFnGSRIKt-_Jy6b{IU<DEDS<cGg`AlNa4yFl zfeInQZlaZzv@4Nt2FiQkEzXS<7<d_ZSs0wd-R>-*sEVv(IH|SnGl`IKo0wXzU3>=q z5GPS+(jrgdnh}l;y@^u??0rS$*RW-WGW-t+pTQ6%-8uF9d^pZJk^FbDK?$A8LajGj zxsN2alGJiSo>>xRenFP3VmaCy1Nvfmra^qjE0}VI@l$MS#4Wv=DDv|<^&gp{>Xa8R z1aN?+XDyp+jT?lwLcd@|<sO@i0Yl!bc&dO4MT^Tqky?$Nl;y(mV)dRWZDDt1zVR_V z;oT*Kw+z9n6ne8`NOh1-?1x|gI9h`me669}v5Fac6B`rdb)r38@pY+Egq-v1jyOX{ zKwWVrwo%5y8kYsG&z;%<hmOWn29|^vX^G%BYOGc*YWmbDC8X|-=D-tG{?1Y=Yzp>f zdY6bKQxOfLNU<)4*NRPg%ZO~hGb50sQAP3-CIEy?(sOil#u*AyB$9nURce;&=?s_O zT8%|n5>da+ke_8Qo06b7_C=D<786k>3Jcde3vwVUg!+BTB?$GgPUMznvGNLMC<O`G zaRd!7$R7@<yzUfG1t+#G&$Ak~KJ>v5*$XC6kx}{BZnWAEQAb%zM((}Qq|-%<%`8Kf z3%Wa)d?Fh4bRh0779qD(3Wvn2h8s|9P9hr1Xm64eD<A6w;d#OtI-4Z<p2h&{tD>5N zvKppF)gu)IklQrSl>sB7MhzS{-UK%6n&O0orqwcn_$1_hAu?N}V_CtVyS-4(;J6ir z7jTp3jO7_;2r9z0V5WIsu*9QA&gIlMKbUnBo8uS^4Qrb+S{+l1`-D}FF9cf4d6_+9 z2bY}k8!4*dm{n&qva}^Qq0uOFMw4Nx@yXdNk`b4-utjte4EC=@#}G6WF)@hMME^b$ z;kgkC7i2vtTKmJRJZETVT_ckXH3^9c<fB4>Lh*}T<zWOv>I-^~k9^lzH(j>S_2@KB zZiiwVR+e@$HZEkkA@KfhFC?#HSvplO623I{9QS70TQ|ee3L@b_*Iy~(dPm^s4Rda3 zG;~7IO)x;?i1Qh^(sn~t_TjATIN6NT;re3XtSbaRwveLfGggKaLMLT1H(Gws3nHpA zf$22ef1w0YIPid&vBKW5t9T2WFC(DP^pzmNa$}ZZo3QiB@UYUF-L-_X8ZOldFEj;% zL=M$O7>W)fe%h8L_F;?PK!%_SZB_)TA_!7vGlXrZK`g-^>~f7N&!v>IW$9{Zo71?c zwT$w*xP1dQD9P;=*UE`tpd~7*W@HIWg&r{&<eF>~3QdpNbf2jQ(NQT=Jd}k{Kor=c zq!<()Wy=lX0Im#9FA8sp(i@iXbjqBDj_L6cT^!f;$<wJFHsV^}6e8A5Ap+1o_<9DN zq6*v9%$mbNdLZ)zn)3`@%a-8=Ov1*J_D(_yv=3EcPDrf+DSLTCL8k)6Rzr+&N<6cM z8;e=8v$W}jV5Pdd)Hab>2V8ZcS|>%S{16O`11U^bVBb$&@`gFLklqBCLSR>mNd6Jp zni9xiZ9nS@!HX_5QuKw48KY5fC&Y_ndcZc6P^zdhe6kH~j0iAbU@izc$wpGF-{~({ zDa2~Rj7%rBp-g-mm@X1<&ixP!5sua%aSDm}iV1krB6Gm23CVG?%wP@n0=lhtLeJ7k z5nMXK3A!~uyiJTdRaprDqahZfLm;&%3j3D}fKCla)f^YUGsQGLI{RbJu$o0Q9xUT3 zktI@8lXu2_Y+(oCt@ME;*k;3d7Y%Ka`l(S>px&)3xCsXEPi*kYD0rv<rPj7Ac#H-| zQ^GSn;+Esm7c|(R5LqY*=585sOGOehY0d!M?RbWgYNAu^0aa5_)fbKg!=s?W_L4X! zTt2Yo9YCimj|ic3Q)u3ds!`{B+=V-+9d)%J(VDY`b}e)niIZT&&Qc&2t$V!Kz{Iem znkW`1El&v>O-Ltd#mzOT-u8nEdqv1C5K%R6G4Q}kM3<<wJR%4rHwQp|EZ1nHXQPcO zE1J;-*SW-%e`-D&&YTntsm^FfV+w{=@+vzuFr;r7>>6NHxR}<GWc%Q@hwUmtV)Rom z2<txw4uQ33F4UTycYj_Pf)6A^#bIf36`H^>!d@W60w~m}k~%|h29u>QAyQEiWgHs8 z5o5_h5oxYfSBF%bYIlgyvVDIBr*c`TXa!2bcvkkM<%;<;=umT|*sb7hvLD#d#0S#R z{iV|U>V+Ysk1K<eJlG6!SY+TS%3zW1@hilLx`-H9U7#Jwv9R4R(xWu6CGL^yC4xcP zttkB_Ck6G<A-0&S3AMUZni|*@9YCj#Q!pT_&E=>OnrT>{GyGIF2hLy%9VNuBG|L66 zNt`A($WWN1Zr6%$5YakVbSl9IOiJ>xv|P0du%okr>sqi<xiUAoy_r}|&~9?0^gwHk zCm-SH6bu>|P74R27Edd~Xyc=gekd}AOEg0$^JMr^lS4+1rRBVcP-AWiae~Y1D&($) zsXo_CpwTWTqCuHV<0^&D%6KXJ+%6b=;45lSuh89@<@g~L(VS98N|$i~0<a<_r9p*_ zBI_t&C`AWZ>RGJ9Y3vLG36UcwG!Y6DlG=yPep4-48B-H`!yQeR8Zp3(`xJAhhBByW z*OCT<*L*ps+9g3)0DZ@m;W!3EBMqjony7iuPVeF1%A=Jaha|Jjb}b-fr8h)JOGIkZ zNS+*GX~h79F6Yu?HAW3>U2$Swm%%p@PI7w@t!2dl1i+nGaYq*U{V;E$oPwc2;$jUZ z<j9RogoiO02Ci2M1{weiWzr>_@vR=rjjD~&hWnT^b{d+@XqX&L#bl(KWxEF48>-GC zleZYn7p}+@GPjrTj#iZsUTrm!)NW{0MaNX`5KE|N4fxfoq?9ojhDyI94zaofxDvf8 z9;ILxys^UaF=TohBN3<YC)aLs52$v-oisGhXD-gAUJaJFBPjqf_OmutAvSboIlEvd zh6+hLP^n}Y5XjZY3$knyjTFDQN@2Dz;G(Z4W_ev`VPl87@pa<PDMPH{nlsI)QvES3 zA2(x(@lIwc(1wdIHsIwO4sn_wW3-_*Ceo!++j=dgS|qk@d23@eoMGAFBTxzYTIV*n z*qbPlqRL4OQ!eA2dYzb5F=q(Dpd#3gv`7#RrNNN;kAW&sDz1#1oC}AjF1sQzV9^Dr z$PcZPQ;MWz(+ag$%v^NEcC>Dxm4Wk=Dvl)A0xz1@fxT-_<454*s5DY#g9gI1!L%RP z(TAx~<rSBn#*z6K=xaTF1Ui-|QsQUZaDbs&>=6San)ZxJHVLtl34k_%ND!g5QkNTa zFx57-dh1i)Wkw9t$}mbS1Ep$GPNI?W8O}TbDn40cq6#~V7^pStc<Bzx^Hn-xptEAR zUhx(~aUSuE&LvdLxN`glfEtXLVxZ0|vA>T^VHZ?kXB;3Mvq=#G%*i1(t_;NV4;tdq zyv5bblRSAk9i?DEF&8#f8&wnP-~Ho1efG=W#OrhN`*eNEZ+{+s!Y{dp=l=El|M?Z+ zQT_E_{M*k9JpdB&aABcf0M3A97&^|7j3Sz+(K72CZ<b076%z8^q_sRuEo^tb0f<DZ zr*z_vpZ?YFUbV|L|Amy-t&)Cx?vE^`!WK0<P~RIWmz(p<-~$<5<sFFeC3lf$AnCth z-b6=&!PiP5;{`(=jR9ZXU@+QvEJL8kWv1830d_d?p#ezHN2+`wrq^*hP@Ipcx&!S~ z5zVR8c6l*h&uiCLHW==Gk(+X2nJMTJlW3Umwt+3f)fo$p{nWKwi}7~UH=e~CpPEW% zRCRGiRSoM2qsP0vF`auww~4F%FAHV<HJ_*1F6PhRq>`~>yS5X-Agc+c$b)Gn)Dc?B z<=9+HLAGR;9b)%^zWJ*e3||!aDK^VFx17qk5`3;D@qz}sm8iltajs<pgG+=no5)vd zuBbZTr6tOyPH;$yBtyQ^D}&`tE0E+YG}n?Z@^svToE4akU{GP3G_cIW;F6414$<jx zuI0vZrc?5LYFs7e*9cRxS2$;QhpY_atW}(yZI)UYG7Kd-UP{8zG8hQEK9N$?)qvrX z2U3O269zVpnd-b6jH_2S7^I<7!PS>{w!Sj>O6Z$1VkrD`S{cGvB1g5BmEtDqcvZq^ z*kt&~!wMvLn&6A0+_V?=ss@8lady-!)XqlY%XvE0$~Y>RK!<K`ad88gUT7ky{8%<Q zah{48=30_q@T!iRCaCNV%`3bz^f`l=ci9={C_Z&9|9bnu03!lf1yPByaq+l>!RUzt zDJPVj12oZg)wE5lQg4DWYUCUob%y4v8w}U(eE}po;u4T17A@!<EoU72N^=R03FjWO z{*(5w4m^PYpr-VRiO*1^($d37%|vf&r12D^-@KA5!yF77Bb?}{yka4PuM@8wI92oL z=_o@Yuk(_J5GMtw1PR(7-yf;+aeAGWwdym}_*1#+6-OY|0gExWT*NNe3r9YQ|HHZs z!LSB+L6J>0Ua(kG<8q(5%4?-ZejW-K0KTHZQ0OBOsbqVxZNk@DH=)+pk9^2!Odwru zW*IkxVGbMdg$}r)3hmuaRNp_`hNj>D_5DBpumAm<SL=HE?|xx#eA-{ljTimTy&Ese z8=p>BJO11oe!lp_g`8Uya}_e26kJ^9JM=ljXt}uw2&C6|@=|9<wJ^!KrHBK|K#<jV zG9i(n4Hyi1SBnby!FbB&MSre_dC)?AjPn;VN;Gwvj~vZISI+d#$=tvnoZkC4AO6QJ zZxc=!J5wwX(>6NnBhA8Y*8+2v9WF3vV#~<>IHSt4@yBvrp39m3G4O$p)TxvK-<W~S zJefu}uAvs|nUPyckO<#PWi(_hCcLY}@3=z#(VzU;QcNdTDFZ$*SEnJp7iLt2V32bn zFcLCkY=0Q%O@wMf)6RH#QYW;taoSuhg=x?C9ced|@3uF(BPBgk6sO7t*Iq&Eb(!Le zMN5DCAN|Sa+b6fM=v6AghD~`OIm5H)tS)*P%<~ym(TbVE#YWYJB#_A-yy9PoQ2LYn zq){-gTH1XyOqHW}%P7wvXn1IhncElS7Qn6W@*9Wj0{p9czgUXT;LjOVRx!KR`AAJ1 z$O$6TIS-lPhEnf;INJrs-h|7Kz*C1I;;bAV$&SrNWTWvM<V$j{fog<UAqUJ=f@Ch1 zRyCrsqh9!v`@?si{NaQDy!~w=#~6Vbm-toehZc6hw3gCrR`xv=>pvq*L4}n)h_dAu z?uDU*%DQ*j&>XRPXmU=i!Ufoz3Qob;R`P^B&vsZso0hNF?oVI!-GzD+)Xx(JOz6bv z`Xo-gCBoZ=`-eBMt0G&5(%x(uSms5+3kwTQ7SOmZPUG=94x;L`@CpkkmvOeo4TZ8S zRP{Qy6krp$a;^x~>An4HPt9_kPFh0VxJozhbfi)BH2NTk-9gFIXh8ws!{OB@mV-{J zJqYz`0P_}wEYYZgY@|TXYnJ!`w?dPYw#j^@FQHztxaH&y3Ab|Fe1G}1`>VzH44E$h z<ZQL#-IvTdid$<jXp$#;4eMi8C+H9u=UUF>mPQqGDK^4jC#VfX0JAA&9$&3lTMML1 z?oH>g_nm-ld}gSJgM662+~wCl*t_{6qSH%}TU^FJ-0@6_ooK`qbX~(-S~1X&c&*6f z!5@U)L{wA>`lUp(wTz3pQa6r+44ykiy1dp~S_Uaas~qA<1cnY8!Tbx<-#mPTORvAb zcjFU7Jm{8bRORT<75C_-dq)h0f=;4oV0JmU0C|BR&HoB2?Abe-s%#*Hs6#AYT459p zC0ywvGx&0?7{_s!V>qe3Pz!qDb>lQc8!x7KcrmTR^}B-|*Mt@6SS_fi@|x66fOTx* zE!9mZ2Je`L5&I5+;9@m#%m<b!7Ndd}0W*wXKCj|n%y${-?7d}FBwv>Hi#vro6z=Zs z4u!kByA|$E;qLD4E`__h7w+y_m-=`2ywlw?bLU<6+g&7(!H%;s^X$mA_KA2-Jip^X z15Qew?9t1o-e6cP1X(^T9=~!k1IJ5d;~`OyQC4R*bfjN-Es>^b_n5CA{L{C-TFMrU zq<9WsKAg>e%YT}(A2D%+9-DN~4f&M?;^Q90to*lhn-X*pNFxpbG72~H42mav(Scjp z^rB6iQ}--YJ*Q5-3BKS^v48+8qzL2<p4rw!Z5&CERbo7?7<f)F|68P4&ml@H<W!d7 zbqi`pqhz4uAf@fGihu}6F`g8H61f*1hWeSoBj@1jeMuf%YRn-jx@wpv_Q-;`;hqS8 zZ9PlHlpBA3qFf+gnN<)o#nAEwxiMj3;veHU{6#8hrltPrW5N7vr_5RM%SE!$AT)~U zy?L8BLCQ{TIh6=@R67KOV5YWV8#>>a^vX#%M$h~Tx+9AVeR*&?@&=K%28xnoE~ba( z9NS`rzyrDNRV~Vs9Y2Ki?lKW_UclV)gpO|+PbVQ52!zR6U(JJMlnho6Pip5_wtNMY z@t^VK+IUP{FpK<@N!@1YINn1S=;C-m>LT^HPm?4zC}S4*BxFR7AedB{DaU6?ph1g6 zqE}v`jg=2pAjlA}c@R&RM}gi*Ii<Z<csMb|he8$$tVwntzZGVirRk<gF5tp6jW&be z{5~WlIZa5?ao1_(RT|NRQ67==D3?FU9uU8`Db&ecHIl?=#)B%AE<NCTRye!VE5|8R zueEH*0sax6sFYabyNmib$1fC@u2Jo}P6h_e5SMO^E2gspzS(sjU_qfrY==oc5sC=s z)-9dKVGBMdqRgSaz~8_^+qvDo#=&SKw;%V$z;^y+O|VvpDhG84qZA2SgIt#hu3yo9 z4{A3tSIYuNX56pY4kKs@psuej+k4-90^7%W+&E5&ZA>6VM`6zuQ$L77S|fX|93aYt z9%kig*t66o;h<pfRXu57-8cMf!)I|VOu_JihAA}YP8&hRMr=h;x~Zg!L&_%qCniip zY#td!0t0>1fpOdWyEDKESzBh*x9bB%aOSQwhU4PlTN*5I{}i^O{=E{~Rt!*hXWEW; zU(X99?9`MLpZ1?4Q*W=Mxap<)WzrM<V8g5MFpHVUAdVxK#tHIYR``yJGd7Aw&O07> z$g6m}9tWa=mGs-Sc~ZQtW+z8Ud0t0#c%*(GU!a~ts>uAt#y!7rR``vgW8JJ=bc?aC zLt|0ZuXDRmuz0>oFOyuP*J$AnI5jNJsv(Tmrt^9Rr+ve2!i#_+VCl4NITgIXrh|xq ziFtcakJ%S|-*G&f+)2r^=pxlMbGRT0{xkHIOR7fain-+%@VOqDQaW8N!Ffg?dXC2B zj_HshXWp79j$&WX{w=NZ!!%1%C_ehRkrD0EgWk)<tx3udwJ!MXgkAwq>L3&H8*R}; z<c1qv4{g}`GkawyosUQcv&@=33s@9SYH(FZ6eX+kSvilX&vJurR{QiEJ^LLu+Eyi+ zZHVX9fwxA-_o|6uZ1mkjb9#)@F2Q8t)`9U1BA13vAfYrJ&`I3c(}$O1_hd<zo4}T* z-1aB?i~Z5-{U1j%oNEl4<FA`rUyPdliJJm`Pq3~$ar_>QHL2cGdM0RssM7DDP2}pm zUFx`+xZT^<P}=0fz<w>a{jqnhjk6nPcRH=y6e{yR>)m$S3m0G&Sp~l^y3AbUw>^W! zo7f@N9k%#JWKjTp+W!+lg?t1$*&<p(I3llD-a991pBXVDuCirD4=bjAfF3+e!Q1w% z27h5s`5Rw|zul-Vg_m!#WE3G-BRb5a*HhXKE3f>Ot69GSr1~{PkvE%XddJD6d4D~) zo(cN_1SMNOflK+27^SAsCBeJ**9P&x%wk7LezqLa$|+w=HoF1Z<2$&K2Oj!EuJ)Ll z)5B-oC!)0qWh9y3eqe&uEUngG*grhRo=moM91aUOU3apVuv}AU7DkcbNjF0Qx=5bC z85|l65y07_uuo4Y@)?gtsw9f0OvbHdFjrxQ<n3;Zy5sc3e9IY}u?7gO(M((t{kl!6 z8KlUEAN=y{dQSOt+OI>Rzef`b<X{c!jUI+3a|HF}aEd12u^o?m6X`b1&GjmZ5ny&w z$}5F273ayMHK3i@&3JnZ0W@B!)tBZ;<NTQ3H@_o!t3@_V1)s!*@f>&BndRAwV>Zxs z%r=2e?It7_OG&HQmMqV&1$mGSr;$@EgQUSM`mzrYfevwA7(8}EyAlzQM2%%JBgg20 zE?5TuUJDoW!-nUVrV%NLl#6fX7WLlU!R%The*Gg6=6J2lcNTm^dxqg9fV*0faWeg6 zno#GgoWv{4P&t|?ZHG$u^6ATYoQ@|ss%FDCba3prD{$4anUdyJZ49tE)+=z^`VMp& z!aKs9kl?1T-xP+<)QZ@o1qbFI9ZXE!q!z+Fro-qePdr}d<fXsNpqt^(?2>$UHL5L5 zKa{8)+`;t(8STWD1OEk9!aS4R+pio{W8~2_awKaC!(^W)15SV{0dDgfnIkqKKG*Q6 zNqfsTq$0gFbup|P=r&w4kQhJyC!dS0C3>O=+G2A<vc+8t9rK=e9i$!&gIo=xVL&U2 z!;g`(rs|iOT6BM3Z`IQ080$)j!JMvxepWB+ptg#rDlQNd#A<!RHZ=rmi#ek$EL$(3 z;V_wkA-4*Hg8eM*ohb=&>n=_%I>T7GbZrxMu`fH*;!oB19if5P`Y$u~ctl-qVmO+~ zDFQT*S%elR4G#~MyjeWIHrgh|e8{N<D2|bH1kf~!IKVEgn|YJ?a$~39HIS@}?M4B% zL1gt}qPV#Qv)I(=0^oXucFDf?24f`>34X}m?URiTL80G=U}avP;;A7?MU-@jicht? zmC3^i6)lugF<S+2Ix++39P8x@3&?(=k@AYk6Qg*}>7!>|dc)_XzpUp(1pvw)4hdOq zyqM1Z+=JG4OBG*-I{w1ZB+%Hw08{0D)Bkb)rCXS-nhp6_(|u!vD$;Rdr#X9H<vl}- z)^Hh+6hQ2xloW(NANp~BwTndOggG?|+FE*&fD-{Ud=kC9^x_UHuB!DKX<?-eRaT*T zh_729ON}R>{b;=u%)t8v7uZv1-+@!Aoj*>noB${3Snso~n-1lZUBq7ImUc>BM8=PF zRr!tX1Ll63ynZlaW|lo>EWt&Un4qb|T%Hc1h>346wf^}8tUs;GB1%mR=wM8)=1CK* z7xx(nkV*RqZ`P$Ra5`_;U_(3K+q;A*c8Ihqw!uJ<Y`KyEVySJ{l@!Poz4P$u?PB}+ zA>HYK{0nOb4Ty-o3NK+6&I7n0P+IL0<8xZ+4(R)2wpM`k)=Pc6*xC+3Q%VKbst}iR zS;->vo5dMP8DR7Xdpnst^2%v|##zx1c?ATZbXu_=doJbT%*eQdNpZAK5Jbm@__~#- zC-rMwlml)(1mS>1O73Ho?9n3p(d9Pd7f8eKN%p;2*mI8o)ZXC&9>Y1fLy9J}1#rFj z;&iv#8`T}Jcqw+VZ>6Xj_TO1<x=_>w_>NjO-yd0MV%Pb<%oUCo{#-x)mWY(VrfwuT zT{O<{v1*9E7L-{&iCDaqXp$g8nHtL@&p7G$c+y+etYNDdJu$B<N2w}<L`!AtS}s3S zinpA2CKsiXk`ocV@mB9zkw)Z(his;kA%ndrDNx_nz2<9YPz(t+;^-KD6Qn^Kt!QbZ z-AAb%GgOEm^tk&D8HV5`72C{_?}&|;vfA8lT+Po{S5Z9MI|$^y-DUP-88ieQp$SC3 z6Z7V>lG1J(P@<UdFdXyx&tIRp^fFU6Dt)6dVh132zf)SJQm3VIS5q#BTlOY}SlN8k z=j{s&0=N5Gos9zDYIK2M>a?e1marMyA<`-yd_D=2V3ntkS_KfmVQ5@wdu5gx8kwBD z>LIh;TW+P7&=LeV_QpV1jGgf$KF6d*6f@SLqk`QUtsMBRLNUpw-81Um>G*DqZ{~&3 zuO?gocV-F{g~(k`qofZbJ~ELeeej4$UXOa-IID(sIRL<9;uSK+IpKjl@QAc7QePba zw(MZ-DJZ6jg<oDCD}qUWiBbCDOf&o1U{E=bv3a7AR3w#Tz{<_7tAUt6{91w!Kr>;< z69QHYMsm}4Q=-FiG6c8r#>O9c6n_!geiY@6iJeUUSVK@lu1!}Lmwrc7gv&JB;>cX; ziwKT!an`_98gKVILs1KAlPoxXb>NV}(XUmqp$$Nv`9`E6FQe-AfEs_ty;%ua2>{33 zZ-@-GQzr`}AlRKBBXeXW7gNdeSIVopB=E(oi;kCaS`HC?D`h7oXbm5zaiu=7a*>2f zwhW&|u}qdc6Zra+Ga&P}90*uZb?Hio!FeJ?FYsby4W*T>R`bQtUohBTb>PaJfq^Kc zJQv8mpragXPv6^yrF-!o!gX|9>2pP~C`RGsB(Ld45>#~H%r8dU^Mv7WU^R=>aPkR5 z)o3{K?<AscC`3-Sm@lSh`Mi}pvQX*J_&gcA!t|LiT5>%<!!<@RBDZf~Y}}8d4cfe- zi~5Ue!>3B~0jndJ05;CrhzF~Dm15xb%I3_Jaw@=s9SMd|#*e6+I35#BWI)lJHC$<+ z@Eyh$js962B{4g=i$~FW0wFuKRH1@YUT?VcIF6PCCXlCJo{IHb4$?%j`wbpD-|nr& z!|h@_`P<jdx1r2C=DX3(O7dU;^y_a9EWep<Cb!Ty*Vr_rT07Ifwn05$XZo%m>^b5- zx8ZnoQtfBmoXr}HUs`GrRZPDd*hwz<U{$Z_<rT^wFR1}CVJONw-rCB(EuK+7kicY> zs>QmnrL$9%@}?yPfN#~F^fMW%(5o@QVg(j{8B=1{q-F1|z5+iymSOm2WE-06sOuEK zizm`N%YQ(gHKCZZtyXq)RG$0jZ%=51K=`YAxiLhf%v+)PYsVd*bm}aQS#wDspUbXn zPr7W65=Y)VrBp7MD@h3lym2)~mm009B%g-_$$|3fs9_d@y}zcUx{&nDed%iOGSAsK z9XC11E1zwKZZU<0uRye6L?+Z-#u77wAPDC1Em>och=r9uFK>Sarm--P_IJOjA&Oa^ z=>!a~=m&;ns&%&KKKnp8@nx|~hf%W<7wOpiJ<0gQKp_c*TJBz1eVE`v3sbMCM-Gd} z^s80&U@!%8CU@(jWissk8+S?w{dDOy7-15fO9K#iq;($qT8l6uApuG8OA&2cYCw*0 zHo-K)<4Io8IO447E|>GJ{4tx-`eO!C<rza}>AVeBc6lL`BSir>$o&Am{cz7pJNX$* zDDE>u6MjVHmC7N-ksvxODK{~2(9ZdhL@RJXaUiU8Lx`{Q<0pdK*?xtg7pJDsrxxj| z2MFI5NZ!TVP>T3m^se`rR<YxGLdorOeVK0+zSdcWF+m(I3gi%JXMLYe0M&H+dEDLO zHEpe11i@ds>}Erth!jNM%Bi;XrTMB|N9=LJqSHI`M6(`)GNLpr7OAbBz)8_kPdEI_ zHTuaZGGn&O`YPN;e_#+KpA-Vt4~!Lgx{DbhB~Te;6*V7yBy9brFuwOU3Gx^YBy0<l zWA$pmqn#Y(eXbrd@D-|Pdv@@3@Ko-7($SxEggQsHZizAG0_@6~bMiH-W9b|9r%ehs zrW@nTK4p2-kYb>tsLDQ(mazVkU-L$qu+&52*8q)I!%B+`7Guqu@t#6o&mPsD-}C{> zYEo_Wi6;|tV@))&I2HJ;=~sj(s#+S|c&_-<@I;dBlVr}%U{y`QO#uR{U}IrfqE##X zj>2q>j)-bu>&XbOn+Oh13zwSfJkz^L@aghwZ@wz4#GY9yZsy^VfCy^S7eP~izDG+^ zWn_1|1C*GLyJIg~z`88mH}oQzJsXO3LGt<6W-u$nbT%XE)+%M&^mVxIsK%Tnb+?_K zFE|86vWS+eVpxuDXke&cirR3OgZviCfFDVQj8J&n3@BzFwW&%_tLdB(s%2hzcV}5j z6(o2&k8Wiw`B3q<UxT)$^`8f$>`8G3!fsj>acs1OtgY<*)cIIzwg(uhE&6!0EX@!g zm!7w=aFw6o!LyvLWAgNBkswcylE?8wX~NIR5*`~a;YAj+XJ3LBGxImP>t(aZ$4%oV zQi+O_z*%FpU4+>OR7YcH5{%(tJ4)x}8A}Vz{fwy`vd|DvL>^fkHQJvDqV+TrQt;qR z-q_WQoVMo@a^%}xJ&h|XtDIu8t5-s9lmv-X>r?vz*9~355p{rc2K|%>LO4)UNw*}9 zYQ96`MVDo-R4;#_x}T<keN3YrdB}8#XOKcIMz;Bm@_f@06W`DYT1;r%&XNoxS~Q(+ z_d8>7FS!@Z^{a4po+A_xT>-4G0zbwCr4NQUI!NJ~aX+Alsv(t$B<hbtl3(G~;l91< zDv5Wttn?*hz_PA(c-1YNjGJoVqUFdY?#1#Mg~`f=0E+q`CV?9l_&_ojQW$ytDA{$X zN(NoVe$%W&6r(=Gs3ni;8q_A)#XS-h>L?%s*qsMz`CWW@5_W~uE^aWmzA?5du+a~Z zn{L@DN4}fF1CTj(GZJEmmkbVk?dZVq4(jO4J&T!RxcA}XkB7hObDvCwANW&QG3ETG zLnt(~gdiO;%bs}p1?xa2eq!4pxf=A_Cfr6;1WcaA+l{-M{x;fptj8vDdH`p%O4mQ? zM^+qzp`?#fPxXhK@L#F8UoqXmu-Y%=(CS`~3ritclzR@FJ{B5Iu61=v80VV0c1o7G zmU0W$SX5U}Y}ht822IhC%r&lQ8Oy$};yGuHJQl)`OBGG)pa^F7CioUnu61;Vl%XEH zp3_GHv@NFW_?1);<>NNUaw6M&gk#0;NDnL*48}f3iL0wc9cmE^&@r-z;w1>~!CPb? zMj;6%T3vW>6c-9~+P$s|v803)phnkD)WUB-C52DJeTu`d&hYdy+)2nZNK-na%I13< zN;+93gz;~w0vH<WB$=<*tkn=xdDt=Y0@!arqg#>pt_RxMZN@^8)7meG<54sn_wNOZ zo4D@TY)lIDK=VY>mg6?jLyBgy_lC-wgZcS*u?02?5Gqg5RzLw#c$MfY+H+!c`kg;H zn);D}21AIzSGSK^0F<-?Oif%2GOSB}vGCza%Z0=TUBR*{R|Q{qczEn>*?OMjG$!B; zqk<8IZmpA5_?_)s(+zBga&em-bFR`fTrq8;8<vr%ub3<I7RZQpu|7*BdWiWhS#icK zV?yyvWUpP1T*Cc+T%Gi~yZiW?pDfJkdVj9ddFb-KQ~N!7>Ch_eoLse&r8BrW>;0lL z)V0ms`!;xDi+is)d$;*}ocA@D_T9Q|vFo|*@#}!h!`<!Cz{BgjNf#9E?&1-wH%sU0 z5mvU2z~kG=)5F!W*S*Dah6f4{gKLjV@FUl$#VbX7))8iAqPN`dJ=V?le>ai*v!&#J zinL2ce}gizGv)oev*$0r&L3wFJtH0S|8e&Gao_ye`=6aXe|Oycube$h4FAd4^YQ<` z8+-n4<N3Q?=wEF-c>lGH2kM_}Jl(zkACF$VyPVBmAWDFLf`55T0vxmclK3yO{*%j} z{FoU3VP^WvQ1mY;GygF-eVCB`S1JEbW+qx8qwi)0Mxyq5uK$&#>A%k&>c2NJrE7^H z%OU$rwi>Naw2&v3sRk5~ha!XCRV!=22N)&>mQ|7@7NV{i&<bOU$Ov1B2^ZWO2q{kq z#w)@hbW0eiqKg?7`axP$@;=3JM%*urSu;AEk4dLrOt(#Vq;o&4IT#B8_~F$Pt`YE| zcj4VS{DAyP)C^w|Yn>DIl12ppOe)0Aw<nVmh&dxXU6FklOS4BKoa9LEOpYOz<XFNW z`{&0|JSo;USF$y}EgmLcvbBExqsgpH?oZRX{72(Q5JN)@d=yJ##T@qW@?6Ql@K|5_ zk8Nwdqz3h&<B7bjs-7}2Ms;$zGI6V^=CqRIw9~SPbE)#jQUH(~GiRHX^7w*!Ojx)1 ztFUe}Ryw8XlRiIRV5VZMZT!Nx{q<K8{UaTD3qPLP&}z)_Mh7mfHp>kcE@&|Ln%?3E zD!GGsELSH~m1p~a89UEUn2dYj29vNjE6<t?zt^pt)^zT0=AH$k2jvkTtdIYeo?z3K zQYE%RoJd1%9@3W6`Z(JH)V*6d&_?E=jbm#*fPoetN1NYk)|`r_ic-{yqSWGCAEPO0 z#a^7XTgy%E*Ryww(}se)QW!#tq~?V(3~Z7+RIdVPq+76(>7BLu;p=*kvf>pA#-(*m z^piZXu|nLZJZ)uIwh2?#iL_jwl58$`WPTili?n$Z0AbAXVk}0-)g7ej5SZ=*I_J)c zS?$z<>UD&Tv}zGH(SSsdK!T)e!3(@vXe-=0tncYS;VqOG)zMiP_(2MOWQn1~-9oF- ziE+Y0c4kM>uVTKx`H{w5+{l{no3xss#>S&LmX>XSpKJMO02D&Xu<r2g={8VG%d~Y- z&TS*Yz>9I5!_Uy25UNPj2JP+MEyIJSkDWfiQ$<RQu>a#`Wp<_;U;lB4HW(|Qu<P0J z@G&k^zr)oqtML%p+Tgwm#v`psZ-Tkg?@<)h9q3RYT=&jpoq670!4=oztlXiU@g|b* z-J6<BOn6Z&>Xy|Fms9PWa+s%ZG8%;-z9MFU!c5PqyI(#IbV0vnxUHS1ij|-*J!Tp+ za=fnei!7=R4m739aU>sO3`Htqs1;PL`&)C$kJz>?PdibnDhEfu@++#@q{LXUk75bb z<Iw79dqKlHl`UnV263O0=O2s{<Cmt}nAk=J>nB7S%!0zK_foLH)5ec{K`mXrP>t^t zvnyWB>aVzw9!kn6X7X7<=8D%s3adZ#Ina(R(SS*XkEzX3dA{Ye#{*%?yX~Cg$$_XR zn%RoJ9V!8-Cz+U;n!d8}j$*E#gy8p`-LvP0V&i1P3#m2)*TsSd;s5LRYV!(f5p<I9 z-|N?3h3T)VO-IN49~F%0--|Z$-))rts|v>OQ3L<Oj`>eD`=9F9e_yTs$#VHmwfg0^ zw;?b91i){=|00=x3^w%tG5#?B85aJv7_<Lh6yyJ4{Q3W!%UL)5pRuMV`%cCRiZ}aj zL(VS?%y@wRD!D(s{*ws<`#-)X7W#irj)n1W$^E}sSI0HAY;c+oJP)dtI+4ZYX0h!b zm9pU~W|7I^nF4WciD|=#Rv8G?ViNLsyG-skw60@EEr#WD$snmjMc1A(8Hpa=+1^Z^ z?d$=6oBLPM30sg%8n?1%y>n_wP3m}LAKmr}-SFZ{gP!cb6k@zsf-6_OB#@niSA}*g z(w9?uIe0m^d3nCf4#UlkgL(yrI(|PbGU>(w;4U+{8F|^fdF&ckW+M(;QB5nH9X)zd zNfMexySSP&z%0%ML=7D%GQ`+iu_OcUapuX_&D$&+U_VoI00fFo5JnxJ8?<x`Cf&V; z*z-WWOD%%o)+^Ha<?FrG<*bm}xxszcKRP_r>*paH7##Cs;pJ!#x;zNCy4(naiJQ{9 zA_KwzF^x^lFO-%Pnsn$v`;D&SHN?qc{lGh0|7GsZhAvALLWk$+R2QD>>7t4x$r$_H zXLt=HPpazICT#}G)ZP5a5#zgu+nx3EV3|^C4e1wylP6yMwKTX#oSwL(gTwescg)Ev z)Cqc$Qcu3+DDXMawTdrm-1YrT)q$JCA-wvEEc+S6rVV|g%ZQ-DyG|^Rcii`Nyo~@+ zx(?M#KrZt@wX^t-wLNnH<-;7p7Aikkv&sYkNO24zDSs7e;m7V9ju6r)`>X)U&*2L< z0ad1=q&uz1(J-Vp`n1Tym%Jmm0~ikB7di!=fcd~o@f-lLnavpn^(Kd<^43#60z8mx zRPRbliA?3>PeVQCs&yk-WDQ9ZQnw3wexomoDd}*gbD|K)Z1M^4u?soU9~NW=CZ<L) z1YKjH1VLkN*Mhn0lw%Yc(v9j99O74??)@-+v6$gfm&L%u5OPRb&X-dNCAA-KG(2wv zmdDbjm=U@cwJp}j#K^BV^&#WV%kv<SEBTIVvC5+>hwP>|(kI+{+Cj*YyNw6A5*mJ| zG^7LA5m4Kz1qGS2*lEWspk+{TS4pVJTH!Fkwt%V$vzP~n=A@gWqZlS~8MQJBT0t`a z=I*6)52eak7|6kp9VcwpoOvk)HeBkSo<bp-$ArnIppQsgKw9#{f;JPJYtLvme|Kd` zC4i-bVYhI?V=bR2>R)l;9kU*qEY>$*q;F+cGP*8Qip`jJ!a36%?zT8_J(ZZVtIv0z zOM>OeA0xE`nA7eShh)iaMh=84IncRI&H3Hg+2P`SmqbfrJt1SWxtYF}$@_cz^?7&y z{`&R!`}6AZa^NdA@?lxo;)M;3%&tutkOVTskQklZA%xm?AL^S7CuiZ|_e3*rj@P|F z&X%4GKVYL@!+8ZHV7>7d;<GNU0Q;Az{AV^7iw7^ZjCVfQ8&AS#uLADbuSb2Hu_g{@ zw`C^U?Op9$EgWe<Te)BJrZy{Z=C4I2R<9@gl*JNY9&6zA0=@6=caO)Pf?r5~md3bM zgWqEkvo^6EG3my|16|w#W-OiD1iOcpoHI3r7o^<Jut}7P#VMbhhbwQ<UFv;vC_8*P zR>%JtkRqU(wE9rvr^~d8&NryiH!qhlPdaX{!BGaB!GA~z*Qa!F5smpkY5-^3gNT@Z zQ^;3=#FYm~a=AF~BiUHGsk>CM`*XI*Z(og)R-H$0fD(V2!0)t<4kb`po$se%Z1k*q zz{(^I3eXu1h62$s{PYmSfYREym6{TSh~tYU#(ljOA=0zx5~`xIL^8FX>^O?uDpic% zG#Pn9?ZHD;!$TCyyc^t?<?u#c)@%I*zY(WaX-2gc(gYXVdZUYTWGi-j&T?FEEH|MU z_Gi0#Ur_hMZ^SFsoI>AzC3AgeEM_XUN24ee8G=DSV_OM91e#I0dTfM3xf3f)B0q0F z8g8{MQU$yNt8!Ah%Hj~}DNdL{*x{s<aG-xz3I^~TtwA20oKOex?}qMWj~9GfpqCtb zr{OPyhobvYF*YMy!76gldg#504=?k@(8yAq*Ct_)P!5YMn`2r3m4?J$rk1}BC(e0o z&<oEXhA7vCFI&G(SPqiPot{0g@D!IS(8az|qVME;1d)SHxF}2#om2=R+D~C{2vgp% zRsFG$@L|?`52DMu&GCy9Ok573NDfSM;4ZxKnnT`q2>-gExGzqm+%_;l2pQ8=Wo)gM zi`hR{0_heO)Wd(q6sw*deA$ZWbLz`-4oP>cuT(9k23epN3FO#z3Wk78aURo`PW93{ z>9ShX9rN6{Y1A?PVVC&YDLLu=YWM21FIj(Mjok%3zUliS-vhQi<Hg<}+_Y4CzYtt2 z6093+I-v=FVPY(&a9gf*4Ql&LG#ld#G1~#ru)J~~3_rD?d#JgWkr}Y^q$<R=Z=AC} zpsv%V#@vpTK%WoPd}iwGmlUxnP>f?lLY^TiKt2KpS1{$)V!SSo&R!wC^pGB=Scfdq z>s<3%#jZd?&Rn)9sbKCjv8&gpIP5;J$^$+_uIy7s3h9YInpVThEe=SyV@l0FD1Zf` z6()(&f}=$cyWL}so(xjye;+$I9R);YQuCd<lv}G7J&Q}lo(n;%7MvO1D;F1+x;(u^ z(j1|vF3SKqRrloUG{F?nW%l;qs5K`12xb}PJbrrwhY^L>X^Df7Rh6W>=mbzmf=uJ^ zlH0F1t#HdS)x%sCeWtq>N&{vUr+h*cdq*Kgw-zZop{ZoxM=e<c`dDDnuzRszr17LQ z8G=r0-&i~xQ5-si8C!|v&{szZe3*hI!Xw);e)tAOE<qcz9HN&Nd92GRg>%KPvGwp7 zQ<pk@#TwhD$AoGCd#PBBYPbhftNdyq=wSAijcTlr?uE`LMpAj|yj~3f?+Ok*%SOb% z3k*iZIstQH7&Rrze9uJKR&KYmTZ3(&HK~LP{nfKSb~ADq2Z(t*kcIVl=;4quhJw8n zUkc2r665(P&JuPU3AA~6@c5BIt{8M}x$_$`bZ!IpDA!>i(iwcm8HPhiO4)6#2o`W= z^hoYd*yGevXY?cJv<T)Q^KNu&J>Ci+dp4CG6mPC%h&@`-VQqJL9f*f?zrGc=aDdMZ z!#hn#q<Wrhplgw3jp|8~p`mor`k_$-%S>z?*{r5o`8tbCEgB{aGfTxVl{(>Y${UHm z*^%|wOryxkK9T#EPB)1`lyG5#IO;&Ta9<Sl49~sQNXY12V;ln<AXq`PIBO;r!zxQZ zmI)Q`Q_)Ir#ZpJC<cU#oeoNTzRkvQRX{=5MclO|VJCE1o6JYxkea^g{iqOrgake2w z5{RQ*OG4BynC+X6$6wfDR?YUyu^a0)6@-332p?)u;VR-|P-zkmu9D&(ce^N2VP(0z zLVssY;aISeaN6s>-_hi(8~e^zTp<A;w>Z2jyI_hi>~3XM?NrVr*p7c@fRe67WXKMj zt3FeJOjXo_!3_&H3_5*gixtcG5F2d;t75oh0hK_5qs5Amix|-WD#gOkQJf<+?03DY zo|MH2LYmQ%FHIUWUTt>W!CvYc?O2Q?=seF{9eK0H%8;xnKUB#u*mRcM5AXO@+NFM$ z#yyUHnULg846+pVUR->(;L=l)*)<D%UXB^59GiaFl0Ael#V!s$%<r)zR5`XxDX4A5 z8LnVH&yr3^(!!dz*>-%p7(BGssVjiZym}J>A$6h8ZjYd){J5inQK=!XsU0giYS$hN z&Yt{Mt5Q>j;2WM(s#9Dv<fUVHuc8v3M6I~)V5n)#I9aq(t1}~3tyo(8yy}-|!|}+$ z5_4(^u!P)MsYq1NUN=^0qZ5ce@}dE4@L(YxJm*#puPU#%N7i@D9^)sZ^Rzcil?>ya zMhS-`|6*<zYVpz*8)f*N7h~vLNc?`tozPI>e3=TwW8Z^s#!8Af9kP26d9@{u=kVE4 z2-AcB4JgyUoMc;AR)4DpSf&eWVC>a%V@o8zXcqb%R%zmsx7#Kyu$yzg+CkhY>eoU0 zl9`P2x6b&Png>YiG7Y*#WU(fv*5CTMlya&l2EIK`2sGL=w=`<FGo*IgI+QwfZs-R} zA8-3LTe&yr`O`DOP*bAid@($%WOF@ZE0)tAJ^{e9a0+l#gvpR2)h<|Da&1DL=Tx#! zziW!jZReQA`$C2<Ktyc*=KZlYU-Z*i(J2|e-?FH?5Sq(3Q4O_MCvQYOdQ1`KM)JJ0 z<yO`8TN4zqQdJ6J+TeC;iN@N&wl4@jcQj8v*ho+UD+Lls=dV;C0v=Xr1}w>%!@Ii! zC9ttUB7H4z(_ySe+*v5FIB4|r*zv*d*I!n=CmPXD@m0E;)Ng=CJKJvsbp_gLvqpst zN8iPlD1JzO2f~S-w^^Yv*tOz?T;OhoR=y}-E58PvMrFW72$hp};IHr3n3%_w3{{0k zlfs9-7CH5}ZN!^swSe;KDrv(@Ws|qB+-Gm|JH(99oF-g;RD)j+vixGNApkq>``E3R z;zBkMpNs8U;p6p#GcG{|&dO>UtcLUK8|aln+8S7-Wa5AqA@JaMzcqh)<FeI0@HCTh zpeYahUQI^``rNn5A2cBm<H|+DD%#6SR-kW_C936JG+`_W$B#!<>A6&s)W-bnN0>wB zXYGV5wVD9sJCC&u$?e#bHVKi7m!<-~lvXc-#(Ih+P&MyhMS@o*9Qt>34m2uM_mV0K zCL(*!;gk^hlF|t@O%?syV=Yj>tW**s+7-{5a2c?u&Un1GLfB_K_79@S(qaQdsleP9 zaTQ132r)_LJmk+Lw#zEfCP?RNq({t7@;RPZAmF6iVxH%rcpWLnO#HZnbT<;;Sknel z$7buRaNJj9W{cy?gIXBiW;?&H?z7=&CB&@D^6CIRh<>x)Lu{^c*ek_q-a~ZBpv)M? zDUKgb$ggVCNLd(xz7XC06}_)wo+k060r$m0l;!Q0?J^Ek{u`6!f^XST({{QmCX0X0 z_zwsD1|j!68FTo_w0dJYRuD=Prv;QgrzXKJ_h50@eh4wm_z;Q68`2g91g#~ew^_}S z5HC*nX9umZ;2a>8oQiuD68f)*O4^oX7O_pB9{GN3XB)7w4~E!f6l>2-c0C>?>*4zt z2C_M3!o}1Kh|K*g@a6yl3(lq3?H2xSu|QU!cOPrY)%#Kywz&rBPl310Hz1DOa4ur8 z=uGm8VY#vohZj;j5h19ow)3_}cMQd9a0^p88W)!%%M3QsmvAH-Z8gUaV0@Fuj3|S- z=j6&wOlV@^npa4dt?G%df2&=;2#~6)5kITZDrS6I7p<rAId{QSXj#M4^zvHODiM7< zs7@^$U2=6mn7zKfNh5*Xk27oBgG|%-VW!jst;;TGZ~0ofvwu_pX{WXIcDOA$&2P2o zt_GlGCOA%i>S0t^$T3opbUF%!+sY5?A^m(PHXgraOcaXg3bhM5n@dia?$p3&Asz*l zz0wmh1sa9f0u+dF;~a?<B`c`~QewzDr@zrH*@AxT)a6SG?j;wI&U~@ia>@d1u7k`- z5~v;|B8A02Yp)7;@vLBwhh#M{s@amk1g{hS_-;m1)y8Zyzk@B#bEuj;ZY*x@{1n;| z6IF?6Ojxr<T1hdYgk`9$eT$E|kLdbT9*fReYlq{WC(Tl(2LnnML!r<L$?FqrsA1S{ z+7NEPxD-JNCd>{ez7q!c-N$UcUu1N199(*c#sEY#b^XmOE&RA<Lo6BY!{S(e9WDca zQNo=Ca9;C<y5qGg2c2zUI@(tU(At(P)V9+FddWoFJjywV1$M+k3D_a^<+rkDmKBV& z{XMHZ>qrAK`?ChXS)%2)YjT^$a@z$f1Cl3zhGw9N)pHD^i$Rp4yS8LK=N5Co*9K4! z+Ka^(CCTHcrm5sG1wn7e1mi}p!#V$rkWQiglgZ~D>U{AfAgXbs#BcRMR|79E_KYk~ zBQshy@l8SE5gx+V!~WM#;)H_wnAO<^{WH{?f)_U&CcI;~WLx!`GHL*%6{**HN+e0P z&4-g6yMcPV;cR}SPBfkEXPgioMr(AKQ~bNsk=tRc6^YlzoUil?^3n*qd#gE#_(T0^ z+hZpcQgS+Brd>Iq%bF(4_44t$cwa>&Yi~q7P*uv_Gq7CmOaNa?Gyrx?aH!Uflhs5- zHTL>CG~(R?+SFVRg70xUFR<l^r+xzhz2edRyBPMD5cEecVq#{Y{}09JANaLD(=mUO z@?`(QsLATte=yJR=>M)B$s0M?IN2K*IpDD{{7p|1wE5ttSv!90_#--fOl3$J8Jg({ z*tkGx{CQ7Sc7~4@2CYAMw7;X?{(9v<q&Hc68v_L+M?4KYT3I2HkIy!8`GC3o#c&h& z`&RJpTX8(Cf07LUbXCxS9_nubBo7ZBt+0!usDk4kklUX&Q3rZF`ag=L{KtoX2!|gX z{cFDn8_h?*{@#5N){lVsuk1c%LYO}S#(%UhegyRY2<Sh8zt1Fzu+Y%sG5@21{iA{P zAKDrd4ILE2-==i_eB?jibbopyZuM~tKL~VxW6oK9bd2@S@%cDlA1(BFEPw0hpMU|1 z<!=*VA8+xWj@o}AShD=z2$uiT?9tyf-2V=R$NKlq{i`nfiNgCt;eDd;K2dm|D7;S; z-X{w06NUGQ!uv$weWLI_QFxyyyiXL~CkpQqh4+cV`$XY=qVPUZc%LY|PZZuK3hxtz z_ld&$MB#m+@IFy^pD4Uf6y7Ha?-PahiNgCt;eDd;K2dm|D7;S;-X{w06NUGQ!uv$w zeWLI_QFxyyyiXL~CkpQqh4+cV`$XaW{|SZnmtW_P^XZ>Zcz^kT{^5uEW@K&RXo|=B zcQe)hlB)E<zY{YuGck39qGx9MCzI5F>+JbgFVFws;r;hSxYG_$Ma|{bM)%)WQFar} z59yw*oPL}j&Jfm+=jZ2N0#QGn&%3Ezd;$Qv{W<$ULo7%$&R>+21ma|sraE43&o}(p z_lp%vjVsg}78p05yf#xEu10A|_wv)@<Lu?7N5t95|0(Ed_qy>iI(a|5wV!NAxu-;; z?%3Ir5@c6W(ZA+I40DT>bXTB!X0)i>o6{b(K_|DuusO^)!_sJL`R;vpKXEoViOcKx z_TjB-y5RMCzP%al<|6lAuB~>XY{>3kqki(M#Ek>;6PStlHWMQ>5-PM3_zji6<WRWi zc(fE{$OhT2n?ZYlM|hc7aIt8Ls(FT|bb`8u7_W{p*A}m4+wJ?Ci_xN{)u#M<LjP#5 zh5zj8jYiZ%X4m`CY>|%F6NZ+|i1<@d!b{YB*vKjscJ8C$nb^}EB>P+_+Z<%Glk^ov z24X{DtP{!<q-;$DEB`)zE^XkRW1-@gJyt12r^$J3HNO={tA@L0j=97Wi%V<;EU-lS zhDz@v`AvPNNdct?E+%VSZLG|$IyaMx<uYed(f9n<VI!|QWk-pE+OM&xI#D5ViOJ^# z^m$Z(O(c2w6#<P=tybX%re+>j3D={H)V(Jagjf-R7E0XEfmV#musg#_i#sEg{qZn; ziC+d1wF3_>B}C35)(VGn?^D7mpIYDd3UeA7x9;v^U|j-Mxaz^Q0IGv&WM|tTCSM90 z(QMARyzZZ8tG2y!f0OP-(MDypi;Y<%GGCG+vEb_~kr+o48!8o|FvTz2R$om>kRJgj zSa)`6PuC@@GH++B7^A8do+eZ1eeJb@pC~{YE5sNo|N65WVW=4OXYui1_VHl;{$%;r zp;C;#+^;`NaK_58C=Ai@%`l4gQ}s@D!a{7IMlU4GFsM(GD{wV^w*Vf4MITXrbrm}2 zbb6P#TN+s3Opmf&NGJ1>dt0h4d;Nk}<s&_@4^oMWR=9zQ`VyBPk!j^<Ibj4)W#q_M z<hnxIA-KvYGe#ApMR}QCN?TD&o2QPKBpr|~m)k5{7_a5)XAww{D?eZNMa!aVj>G5y zQ+QEAttj?K<I^@O#%s_Owz}TfG5>+$>!H${Dl@RKR37tM?HPfE;LUR_7D=f)ny&{s z=uZS@ZLJoNuSdrtmpvUFXf`rKyyU+Pm)+k>D;>swQUnQuAGL?lUvq)G3Zp_rMO=(E zTn{XzZ{kJm%fJW}=7_=+I6>ypq$m}ClJT!GNOl)!_IHS2$d&w3q%ugNHOrvZOQA7N zBruQdE5eE_dF9yh;Lz(4xzaZtM%*t!t6(TNyWY(RQ-V)W?~LGhAII#B2n#9wvA{lM z!RADuEA(sX3j$qubYmpzEj85G@mM)l)nO`d8Sn@kFd)}aR%e5~$wmLk1L?te{;M(8 z&8d@zTvM_fdI^=E?2Nb~FB@LLkv#2)Wzj<9vd~;;+TaWsKKWk95|bY>RbBjg>3sWf zyygh4y7A>v03)UQ>`=43B<J*SOKq`>y~Q)cAy^0om=+Uc91C1hLr6S5T=2nmP`5xU z;2I{+RFE_vKk=E+_vuK*wXki!55zD?QfpVUc&?vAXroS?wrp9U4WY-6a6x)UK#_R> z3Spb?XS(dcie=TQmwvuI3uaTVLER`zf<E51`wKfoRnikX;_Sv|hZ!+VlKD#fwZ$)` z>_h{o_us><5}YRdOMwljQUQ_ezrs*>#j>=RZ`}QoF+9C?sTVv#;W9JSRE_5rf(2^O zjMXBb>~UDFJE=^l-zYv#FTcpByht-^YXy)QSskQxl5q;BYSPj${hI8*SjIsDg;_<K zcn^}#q7H^kw$ul8_&ht@7OP#2k64%AtUFk^OKlvRy{lfiHw*D<@xnUFj+(XjqfqWP zM_IuBB(RjAwq1UO|Ec>R&(K>J<M{FVjrD>jJ$=gi;`;vWrF*)i{CZ+;dlbVcV;N<A zIf2`C-t$a_#3ZpXv%@3eppN(V>*&wjAVI^1Y`gqwy8ta!Bk@(_smxg>Nc9mDcxn6# z{<2*{%h>XOV)>hL6h?7<*~|Pm=N(y53RU42Athv%sCsRU;*(4X%9E?GN;*49B|7M& zDLBLhSmY_lU-Q>!Q_zS^5GZsID6A2&byBj9VhenR=r1TMK`O39OhUU(dvj4tnkgq~ zOU(=|fa~~m##4li>dQ2lQuA@Ixj3QC%$Jin+k^&bz)N|3n??KsIUzE<iOLA#RmYTO z8Kwy8IGNrB&2J>8JOQnE?~BHbI$i8`oWFzi#!s4j5n6y!#VFW8O6n;C)3FNLIU`~h z*-?n{QfbqS0isC^P%KUpP!eWW?2V2W{MY?)y)SQ_^p}176jGC2%Y|jtlpZV>`@V~u zB^#MWWZNOPq;qz}R^>}&ChEks8{ij-05p_S7PU52YN%V<$-E3n9NY!=wWD&knQ9ZO zFxZt{1dMf}F}+AHU55wYgys{1K4ODTI*b%^>&;&tEWqwfLvz}o;~OG+b~8vX@Tg6* zYxQ!AZWc`uRgaNu$Ew&%s%qP4&pz&LH4HZdJFIeQ<&DxoB|q*$9D&P=vBQTX&x&zC zornqy0Ziahz_9uSLH=qeM(zlZG6)AA@po{;I|LX9->W)rXRq_XnlF<X>5^>mJn$_} zA3wTz4nV^hF3=*|g$LkD@B?p%C8e9pKL;OkI0Ugf0&_SCygUqsLKhucB2fiJDT==f zpRl@>wrnM3DZ{V*1}rga5!6Q7c#~RWNqE9{IlRYRyw}k^Q0zV*npQA1+~O~s#P9<` z%)%U3+pL3O4_A$->P(#2Q0Qlg^v@EzAkhnOFT(7%`T*{je8QG+=mKm%b-?cI-I4Os zo}B%$oa4T-t-hiyhy<P46pi&16{PNht%<U|@hUVteQXL7tfG??0|d<@%uVe0bsQBI z#Wfb-?xXFDj&adR(S4hP1s%q_aU+!*0^|lf#2Q_>+gQJKK^AlH#$vbmZaSg75RhKr zU5!AkF!C1VWp41@r26r>@6sJ^cl!Z}-u~C8{girUlE2PQg@YB)aJ-sLy(N0~<CFKu z<$fp-?DK{sXQ0**#aH7e*k?yO6ewNkFGB7zLm)APBC`BIrT-Dr_%VQw%YGl`>tj`P zobY35?PCEt^MS2X$NNQeq4_ariB|oh$wfQFR@0tgQJ!YNMphsoun`@o;O`?6?JF1i zS>hTtd01Xd{3MJ;!A`2Iq=Ii{VkSt6;$T9JrQ+!x5Q@~ZHa|WxzP{2syRzOp&<p%E zr-yQ%210=p4VaVE%0iTx(H+!))6DY)`gubCSv$6n|2VP+^&NC$*lJjbFv6cw<=`CX zmGU9;%Qve5WJhxj(i-wMq#tVXk{qEJq-1R>Cm;<1@%VZv;#J`iP7<mEX`m{?sIZET z<0=nS$V}nk->SMio%NmGH~NRE6GOD0arKpdQ(ND3_-v_tVHWrm%^wskml&;<87-9( zt(6ij40xw=O9_H)Ui5eyN>pwZ&&B5GFFa;Ya2C6Z6Putl)4=86*a7_r(~~?8w3{x0 zk}knE2W)Dng<PKJa9kBdvmP;N9Y14Bx;Gzm22VeNSY7t^)5b%&CsKJ@O?6RezGr!A zdc0?RbYgX|pZ6yr6AT8%87>kodJ=kiGGb~vYGN!x3=}hxxr;f;E1EWpwAhyr$?J&7 zhscn7hzMI)7`xlMy1QEj2mZrVb~E)Qa*S2YIfr)L!wO4W6C`y69S+sSE~a){o3rjZ z4Nls@#EYPVdM!FgLc}H#)Ec%bjN)zO0-UMbNXus31gX4VQur6-));U+xJ<a%UUnyn zZp#U0ZBa5qPcNu;>=r;Q2^Rt56SgHt(~@#_WuwI$t8D{BMJmdRP7ko*cXyl3b5n5& zvKP3qX4YsVRp&iqhGKoC5L*5|T7i5+kLkHsRbC<aR=KebsT2f)z1$j`Rhr`!(OUhI z16%}+#aIL*KFv^HK~2!5Ak-2d+2&+2SOc_s%Z~~4LZ)hkRa;v)A0L~81D=KYfqG-i zQm)va$ODPh_7a}vlfukTB(YK{Ff~fm)yvdZOEp$Y)mJFeG$>QmM=(==X_>}pX(uRa zCdTU^rzJqY5wPL+lTu6&GR+X<p}R%5Q1<*>(C+V3m!DK#S(033225F#U*cG2tYNOX zhabw?a6yk<CNPH=!ZQ}9Fs3fXfl949TFt-jKbS3hB5yH#HgXhj(U@k&%2=KMZgdQi z*=2?h1G=9uSz89rAXawdW^K9Nzv<=vVB~b+!DpXWqtF9tWI~}$4Md{01eB9K%`WVW zP)BC82xC>Hsdb_0xhY_1E@{0EUlqm)n1l+Lv~Q;Ed3kJDJ3-(E!R;Op5nN=!m+dTs zjE9+#5~r*Qb(_e_vZ#G-dUkTs`Q7T*KtvhR&SFuKh;gCKT6&A!&3?r4GFBa{oQ(BE z_(nQLW+rB8GJ3mR-ox`<q_6LOruL(ynt=kpl$EiWu)G((B?}up3I@gxh%e5US55Xe zbhXa1j5V#rCbT9P`bhGb%G@(7?L4iHxI<Z*9k?;8G{tBx5<~F<Yw80m;MI$_W{UO| zhDvY*aZX9q5<bP)`T@_rd&TXU;WOt*UhkP>t3GD<`q9dBe7~$hQbQW5I~3O2ROjnt zd7BxUZM!@zM=A2d6vx8CWwHVkU>kui<HS{=Fayi*816iN+`ff5c|sXkZ66t?%Y-gZ zk+q0nwqbJOV@LC3>c#w0)vvo@kQ|jf#ysKR-~_Mw5*Y!wVc<}=bK?*Pee)8vbWF7m z$@Eik&ii0Lo0gCE5bLY;$qDd#4)~t(0udUrsLAe62*2<UJ4l#@yK82L@6$|8iPKGq z^up?(s78wsT3KCfcZW}T2R4-xmdk;OGb4l(P?$I7AjNKTK9jx_#)%>nI|G$S<!4&x z$?FbBBl7agx^8BZ;yeczgYDtWs+j6LXZ;CMT;n_*uWX$jALsdi%%Km*<}X7c#_P>A zwbA_JQIe$cy+v2;%5){ska5Wv#7UTdWdtSB^hq4@6ZmSARZ9%*0~D{ZJPd;~lyl5< zWOyVjB!rZC^*}x#Mqr_xJj{^k-XhM{U$jgHBUJ|FoX*RBdmWKac~(Y7396J{90EDV zE69QFlQK|`^0FWxAnFC*f=}j|SW%kj7U)I_^(i)LYG`|3o^EWEwaI8FG)9JD26@mL z3gBBibaN@;EEk`z+jesVqbAH>B|3d`h_s<GO^F;kffP6MsJ^)c$?uYrTf8ACjIq56 zADrF4d%*=awpMze;k#Y@xcVY=x3#XSHe)LAONy4u&2@lGOT{hXnBgq3Hhqt*uwkk3 z!N)J+z>j-X`{I=PqfuWZr<F%|A*O*P=1zKg9zR8iI3g*SbZ}tb`s*+3iq)m=%$FXP z=dPPgfye^FY-Vq7Dj$`fguZI#r^kMgVEeo@oN!!G&O+gF-qj6lXw9CkAD51408z=c z`@Y0@xFFt!?~U%?n16&FsNpNH80EJcAQX3miNaP{BNw!jMtLRM@wr;hDoX4*8G5Wo zEJq+EzS9tM6#|4y2}<%-g30J~SYc_mIf63Zv}155tn-v+iWB;%148a}mV15;jQ1~B zQaovRI;mUbpdqgh`-)s^y(x~Lylcfy<QTGy9D;5b0t)kZ=f`(bL3m<GaICeHT5ADb zDK#-xIVMBx5plbC7>T2Cc@+VwqX~bTlrtPU3JeSn>VE*Z3@0-lV7kbTH#f4bFu5Yl zzo|LgL(0_V>9N<n-jNig-F}9tC0858ORO(Vp%1uJ<~m>IwjjQ|FE`|xsc<*{Kp9Hi z0k%~gV`4p+FpQM`j%s9Q<e#b2)btx{!Pz;p!x~2&^r97xE8WPPm?+pd<m1|;nja>j z{D*?j)7*1!UoH**n=Ym>93loQ6lg$xOj$x(u!>NCsA+j3Mr@D@k{jx@s}_xu4fUjE zTQb-o8xQ=}0^Dts%p}#M%-C<#Aa5t;t{sJ9ua9#1CgH7Qfh9B!e;h<y<Q^o-`3Ry; zu;xT%wF_(YjSGoGbfFQqtGVMxm=%It!d&e{UypIFh#f^S<X952CZ*b(>};mey8Xum zYS^mD*jrd>YE>5eT@zX2{gw)t2wd0u{adg<6x^;xfO6cZD+GGq30R|T>66?rTg#v_ z?WU_=NV(x6l%ebu59QfFaTuMWt=;_%bcp1)G0JjFYmAhPL)?z7Lj=jeDnTJq$VTw; zPI227Nsl-M%{Uu%ce|UZh_t#{$)%2QHXhR7C$>IUj0Vl%Km0vGX3>Yb);4vP*PxVn zNhn$IXojhYdKOkE`=-_Umo!Its*7Dc_RizRgC}vh+>sQf#PN4LQYU}4i)4qHRK&VB z5aeE`$HJj=86|xSF&}N)AAge|*=Lmrc|J)xT22U#`rb7><MDiZbbOlO8CZ3sQ4dp* zqi}`NB(?}?YOE~6K;9y{tMyf<rRpMKyaMf8)wY8OCVuvr{=VNO%#ZMIqacuh@F?2~ z0G4j9c02F)cf;gSKao)p1PR>C^m#lXXKm9$B+rMf&c!RXp13Q34HSMj%>HoL`5hkd z^#1_VKr6pGr2YD>xv!smw)z93qm5nNg_r7*3yQ;2GLVt6j($OwZl0QEmMW%BVGygY zJXpW~6a?Qpv5r>J{Re(S9(6yEdPTP?MMM>gS^$-zmm*-F)ty#7V(6Z=b?4CuRDZjI z2L3h%_}1&|5W1QKl%XP4<3u%&YHAr*UM=5>%j3gyw~T#J7hcH~yr~pW7gFuKynUnN zQj1G3wOt+@8`}>dF?DKje)InJ#QdCJL>8lvKKOiaJHq-ww$XLSv<|=Yj?}7w%hx7A z@S*;8+UJfO8n9OI`J*>yItB+Zf)C+sP<%27zLkf!zKxxVk%enWO2^3h;_W@aJ}SSH zO~dCDzagV6LdmU4-n9bA=hO<LkoS<bO97LUSawYpk+F5><U;M=sGzdVsI+7s3iuYZ zhd1^wIj7<)VU?xsT}vbDdeJ1bzHg?sXIj@gSHLKeOx}Y-&DqEa;T9Ycnw$pm-Z^;n z#?70UgyG>(baHWi?d~RE>E@RtC}Ty)qCg^GaKR|VA*S9xt0ySCJFkAIdw3FJTTFO+ zFb+j+3Z35B&u9Mn72gEB^9MZq*In=P>zEJznlhS$P6`84NnK-FQE5bKrbl?RJu<-5 z*;T{T(#j(=zy8{drF-iSp2Zb+b7}jZ7Bps%b5lT6$-0!oBadzrg}kSXU2<aOa72Ee zrElTw2fHUQ{au)!Sbb2_yMVDYmEH5&UinlC-ZJ(D8s2s1<UKT<633SBm9*SYL1YRW zN0Q39Nf|oXdV2Uo#e#dUYH0&<CKng5(*6zk>-V;xrsP%%NLdgv$&m^gD%vF=l3D_@ zdqZ-1^BaeTrfy;}`Sbsj`N5eZLJK~0)t;fDnzqZil{L|sIesw-E`gzzZeCjERtEMS zX;s&n2N#ECZ$;#F2pR;R6*gm0^aQDwbt;8NLE~hUw|C8oE59C?aaqqb{qf$56Q2J1 zz%Oj?U~Y)fcy*=a`kJ9{5tSlR&Z$JrvxefL;|1f8>5Ydem4n*ed4l>Or=(q_3>}^P ze8CK4l~uO%_TIQTgGvb=BM9BMB1_xr%eU6EifY-#jKJrU3!ADrr}*degyda`Egh`y z9fy`^!JosLx;Zkj`P*_k=;S|iTX6V!RhJTS3W5_-z~|U_`smx(8#p4vayp7z#_M}; z2BbHi*9$tur_U_!Cht-q?N|(g&u179-d@cuC%I<SJE_IYGy4Rozj)bs_ND>7EUs~I z<?@Y9aHsStzLGY%5VW3?bLZ9a0ZWrzH*6eOCT<#aM%r24*wNk_34SKGvbtkn0DTz< zb1`V{I6>w9B`0XGa%*F9an8vzj+zHG!I(kb#Ui94qG&Lpa3JH-P|wH|2>#?D@^K~e z59Vrqv*4qR(9q;$eP?%J4FtAD!HFpzp^-LTzPeU6diGwvDUBI5LwSuOcA;eu(w;uA z$EEHs=UO4<P$cD4!lV`mk-kwtd3NK3nQw7KdgBRKf7$ZWr=NQ!HuvM!%(^il!w6B+ z6#bx9HWeRM70=3!@$MUIwh@=a&7#l9AQa7<9FWMsxTL~Mbv@UH4twU`s=B~DM7aH< zW4h-4By36qoa*v6v3^-SQN>r|O0QLRj15oUJm~QMmo8xKZ|&;DcvE*zacx6NL2*b@ zDqs&$t)8{5j-8iVTzyPoPk3&JqJ09Tj57hhfuKROv{R|1eF2bvPT7w`C)g^yHY~qi z$~+oIvJ<-gq8F!b?bh@yVt_C2n!8{g%cdEk7uW(KMlI`{Q+I86;jT~mWijK((^Afg zX3kE&ejtDlu?~#gz|0>wj*1?6EopjrVdMUGThD-+o(Ib03!7@XWri0G#+6-9s~PSd zo}OBqo&0j?cI=xl_BR@MtowX$-p=sUWc!sXr45a#MI~X$>H8)}(-vtJQtp@5;v8Ld z(ISfUf(;?Rfv8!Eq(kuq+dQb$ib!6=2#454v!F^9tC*9!#r_4$z1<gJ;Oo(GYvZ+5 zZI4`LjUY|mMghG@YH7#toVMZlJCQ{Lnush4IcF6V@V>x6K}{`0d<P9Ymh=cV8;^SY z%KyxLW2$G@mN)O;3XaKv@b)abDx<8UiGOia$xw3jXhp~Pm9d$rh0o;UpKtID+uzJy zJ79t4#<6t}%tW>J{(-W_=CtBc=;Q8alcR5IuVL+J<df|bU1Q>x$EO!`R>Yi4*j&sq zQ^GbMm=`urr&jb4HH&eGZ4xkwM1&NdF!q-!uiyUIJhX~g<_{uo9#qb(9;o5dDC>|< zCG8ZJ(|U9BaZ1&&A+ms6&Q;UO(KR42A~mzDp$V0uITW#e8(QKIFz4s+DF4dsjkUX5 zshGqLiz17HD>A(!scJN}YNWbrVtD4urQ7*0^%xFn{1~r){Twwvx$ElH%H|eOdk9`U z!lG=v{fz7!R4ouN2AKvFs=1`FYxtfKG^V*=56p{NWr<m53!0{Y+DqCbnuXS|Y6j+2 zoJ_X=3*~CAp)NlyHVm$`k8A|xbTg?3$UB#6`8LwZAk4fo*B(48Zl7_Dts|Co6Vr8c z_74b4$tbF=2jHhrmImK^m}4@ArBlqKGKUlMbMO^wcQ#jUZz4m|sd+Vs1PnD0>CvSi z_&0JI#=1skXIB=c7Uw7Sr_j&O;hKbS#Grt0R{{H*9fMa<-o6lpJzDL34DB4`O_7R@ zDVlC+QkF5a3hrn54f*sU#jJCLEi*)|GI{i4XqA0b5Lq$~Y3!;#8{79zX!|Ral{>qb z<YddxT6yOzDsDAT*9&d~8_%eS7?@mdA6`$ayw0K#z@=ty?dBGokdjw*31ex-e|s|U zAY)_Lh64Z$PzQ#m&~<uoVR~tCW_kJE-h-aO>mc|<O!AEKuAZrF8Fk~Cwd1W<XQ09C z%Hs4Az`p<=+2{Wc3~6D5U~(F}QBxF>@(Y9FlM%t;R_<Qf7S@u6-ZC}`^7aY*dcnkE zmgJ%qqL!J$7U_a!X~O1d3~B-FI>E}WS&SM%x(@Lty#33|v)At%Q8%nxg2%Rz4MU$o zUcD%tfEG#1G?EK8foY8sYY#!>CCy_Pl^o2RT|L92vdb$w1_ytukRE5|z~r}_FQM!5 z&6VZ3)s>qoE9-Y}K@E<|VHP$x!=o)}9vN0RkX1iX+CDirxwyEoI=i|8AH>uNH2lZO zCXIg`x*kZ>f4he;wAFWX6;xk}%gzgkO>_<jHFH5|npunLdWo7wiJL|+D0`khug56o z20AZbnhf>4Q9QYv2e*EhxK$#ltb0~*`w4ab+A=u3jk)9st|q-^m{U7M#q*M;PXm>d zvzkM~!tK4T@hwxoVlo*g9UDhPP)KZ6ZfjrPuh;$O=EjaUtrw>SnxCDR|4jR5T+xsN z;d86Yb89OL>uZacy0JdLzP_}zx$}JQ=JJY;sUHE00_6o8@3hM~P1AWzQ*Br0;8W0S zZfzB|FtfA-Tbo*3ocOXV<@ia|&sQ0~jfhc?1EZst+AgE!VP@v|M8(-5{Y{))RE#Wn zG(7qBL%Fs5i6v|Z&g-4mj|SlRj1vS*lbAJvNTd<GhLH@K!9rU8KYjdo0^Yx>Y(032 z0)B9%>FO#xsySy!Sml7ov#I!Sse6N~8C!jbOm3!9bXPQS02?2fmI-kj1a$j>=U3BU ze&am$G4!V<_Gw>U21y6bXIEF}*H!^tOa;g>b$NShWqTVcJm0u`ckk8H%KA<PEjIv6 zR6i&<w>Ph8y6p0F|M=qa)&_=rkbT%DhWx4DVt8=bd(6jsuV1ffY0E0Fj7rbKT&ps& zcU06j=TvcJRrjTlcRMX$M18@I-!$pGQ5=t9EU#fKwY(RJ41z^Fgispc8jyR!-oK=L z`1n&pKk6^kfXH`_Z8=6>;zqj~anmGX3G1ZNp5=Q_OE1rGXaz{<SlfGh2PdT#qvKYr ziJGJJ{Lm{X`Q7(0tDx#D^J{CM>I-N|2e4PRwonx~zqP%3YkTeXZK&(FZ+)(JZa;dq zxA*dCNOTSjpAMm*k&ahRYVCMl^HgL1+|0`A%GL&K3Qayn_OXzEu=5`0I@kD@S2_o; zRy4Jw7nfmXCRllRqgEH{n$XKR(kZx;UbH*ItHYw=1GIDNMRGyajUc(`L@4D#BJXuh z7V+TW&Iy5ke!2bd+5SW_2wjDZqvc)7HIVh>7wlE-VgdN>ap(xOTxt#$uCCq@F&QQ0 zaH79S#UGZ<@#CrJ1X|lad-b)I&%(aBfkGWE=1@_D-$B)lJ9iG&&AWHuaqBL+-n;wq z-OJgPRV6J1A*(XGnpa4EUt#Ob@{XAi*yydz<uByV?Q1@IWI<mbIEeibss3M|%zwUC z)!N%zR9g>b&o3sy(LdPC)m_ipT1eH5MAC^|%JByQBQhZq9{p%e-3Shya1Na?R*j%D zVz#FxUH&0v=NnOULg1gCp8V84w1T<n3L<YES|x0nq8r$PTI(m{)X+CQvHmExaZJ!4 zSk=rPY<zTP4w%>*H*fxv$q*Fn^XNczaS`og(6oOMb`0>Vw@@;^Px#iod-&_dy?gg} zcb>g@5fYsXf=?!9;S%4N-+HsCZKh*nX?_E?xrK@a0DFwjN0UE+*XJDBo#FZ54V1A5 zDL~IUj<WUkGj?=QGqxa;v?UO;J}qJaQ8ts37pHDGn^p*`W-vU`DIvcTvHpjMHIuZ{ z?vv*y82*`M_4ZC#$1LUwRCdFdfMKMv$0aq78ZrqxhmfLMJ1?7uR`tB{K;&&by@C@{ z3v22I(Gv`Q->lv71M)ugMd876%%SCdpLVpce~bF|g9pc~Ao!bi?r0nP5OAo8n}jD; zkAmP=b<d71uCL$TUfS4zf1$##avbcB5PbAw=ingtz2t)8;DlsEP?(jwx1o)lxR%v- zqBcJWnVdea2U5?j9m=8^#H=2`q#gi|L~@?r3!0ySfHt-Egu_3xv=1#;_bxULu7Jqv zd*_Ror|ATuoIJg}%hj39k)`{oRYPo=J{snBp5alcg(c`*4GxQo4V&?=muY{#=gi^) z$ouD5b!~Ni{Z})yPdhf$A8tQ<bo<c}^%gXH@ZjN-C(quzba0Cy;!vfMa&S*>De0WA z>Yl$oyS{$s*6Q{qTJYB}f{%`E7jOl(M`t)ub1x?@wRhxJR>L@mjEZyg4K#H^XquWa zC_8^IV0xNYmylP7NyV30J&;k=pF!1^K?O;v>i=JI-e-CA_;iC$IQ%n3z6kxNWZFkI zq6+&2jpNn4>(#yMsH9w+LW>_ff7?60p@~Qr)3&wqLPn)$SGTs0%uN5*%*@x9V(0ed z9i4&vEbpJ44BD}y&G}cVj)D2kqsLJ1?d*J0?;m)6=ka4yee@XB{_*3tAK$lh4yx*T zkqR0LX#0g1_E&T*v|d}BSl(E@wFTdMAfJh{_XlR?99nF3R0XNW1hZku>8Lpgs2ON> z@&*>C<vmaF8xgQ7QeU)ZQ1ho(LDDIC(<*w>%DbOd3;Ivhp#P&55>?Q7LgSxPE_KgV z^(>%_&G5RiL#mX0p>_bu$)A&P99_J#`S5vkVXuIeo3VqFS9o+@WzE3o=vSfZw`(`% zjxsi%fxo%DG{3sKctGAUv_njKP}}!WM}xh4fA4dBh^bGWe6D-DkDlzmyN9m3_jY#g zKHh=P?!A2R@zak5Wlao1`Xs`p?n%wXm*>hm=dMhyp#tPBjNqeEIx|a4m>fQq{hz&g zq|`$&+uYMzc&RQSw;(Vs$=N@|%-K!X!h%oB=|7Z1P79ini(8*l@u5-hq?UK5mUE+) zaUqxY_z#2l|7#G>Wu5%&*~vnxKOtT7G`Df0>Dp@B@OoO!u$XzWI<i5-w~_Lqqq23> zgJ*9BXSOw5(<Sw69DMvhyXrbR$L8j~zEXMIrFr9Dr)W^}zP`S+x%mb7U(L)x+VAZ= z1`Xfm{L#}VPyupyeEj6e&eJEm&z|i(d%A;q9rO5PzYT_b`1oD4?7#l-c4=c%TGa`{ zNj2BB!uHwn&iS^XrNvDcU$(yF@K<5qFxZb{V}2a(YOHZox_+ptr4@qN$h1t5dTS4) zp{;|Gf$3@W!2hce`Mp{&4Vrc;SyxIK7fLAyaw*3%hOuN;>BQzK#V7Z}`QI-sgYzYq zQPFGr=%%q>iKKmjrf-w1T^^}~O?u^(`%hmdR}6~kdmGp}g2<;8m-Y=0e-*m^CWnIw zS}`da)D_iL2qX_G{66rv(ZGM!cC>Ckgo@U4pc+LurapfL6_`I*G5n+8f3gR?;K3e- z{5_QCdGvVq#k1Y#Pfcw@2zj)))R7_ieSm#^|H35N=U~(x<?WZ3Fsb|F{<JSc09xv& zn!0;H>JxJd{9_YQuHMB>$IP5x+u{GwjQ$UuC<=LZDrqMQpk30Q0$u5}LwFEHe27A; zn7Utn{`7Cg*vpqMW@o0F>Z_aTs^Q7gCwu?Ooq6*vr~U>2-#oNd(l#w-mZ0KUtL@*+ zs^l%C?Ys8i+4%B(Lu9^`o{hb?4~TquV>2p0Id^zI7?w*p<Jjh9p@CoeJcR)5;W1fv zjGx(;cQox^Jj|2Nstzy%(ofKk!{d_|FL2ep=g)VaJ>TyFDA`AupFNZ{db|UJ#m_%~ zD5_|o;nO}VU}zjrQqnOG?rvyy9cAy)YCngbsDoO%J~xMJ2?;LS1K9Tr4OKR`W|mfj zr)0Q=L|A(w^=<8y4NM4hL;lk^nMf;?RN9q920<$8PA=z3ChtX~8lVu=V3gEnmp%aD z)xQVo_wL@du{1x=%S}y9MMg$SPC-UNK~7FiMoCG*#mTOzu3BGH@#pW=*!&j8*j&D` z8J5#^!8!|EjjDS!p{S*MRQc}f_f?%YWy}I}t?b-GB9aS=Tl@NcM}Bhu9s*N~3lJpE zuCB~|1^j1Av;E-yLEtfV9kd$5Ee7C&U_X8J>dDJjPhY%*iuo_9V%~lE>erUfKa;<2 z`_Vps=hcsIXBJmvRGkPo)TAt<i!RSy>Rap@U0K-NTG`qFwZ}M|sjp||9A`Q0_&n-P zfDP!yvvJvZ$jDd+p8yj_S1l7WAp^uIs~lFN5E>3KG8O@HRv`*jVG1^3N;YBsiz-se zI<l&Iaq&riciQJ?XJn)<vavENDag9HIHn}VHP_d`i_44iZO!$WsYysrH$y!gUT!Wf zPL9IdtUvQYRQ2VFYKUI1tpfHs9t8@BN)UNI?GP$S`>v5C(0OE1y_}()jRz_xUs~UI zbz&S>D)LBIGlia4fV#}OzP5l4T32t~+K*UK3jc6FP~ExzWEZXNU!(pK4f(5A&p+4K zFJ6CJ;YG~5s2=g+#hVWw-v0FI&4-UrU;X&u)%zb`zWed{yLZptynXca+3t(y&)>X` zPAX!*U`{9P;Fi=>*}YKTzc@IvhW7R=m;@%Kn;&Q5==93UO|V95tJAA1{Wr!gUGB^( zuL?~`_Xv-+@<8fY+bJ2E3%M5yYuQm!kl9$6SeP1`n;2lKnUS8gg((jg2Rj!J?DO9j zc%T|oor#H|t*P#a=`J6BG&0aVefo4rpx>XfZ#z$3q?BL9M6W4TL&Bzss1&7F9l5xT ztXcS-y;p-XxAk1(bgb+U!QlzH1<gIZm|MZ)ce7Z#114_0xw109x(coa<>65?WcC&Q z0Vd^+aqnP4&_U?agTjuH@oxx!`TFgvx4%|+1s>r=^t*4Ky?oix(N<kqR$W<IRZ&`5 zUQ$^BwY0pfxU8(G|LVZQC(pp`y#ML_%;GY?q&Wn(8i<Vi))^qbV{~PHeSKf;am||h zrn8rpm84~)Wn`v9O-W4&i;nX0_qRtNOzrH<on1|w-PDXs41AJROc9*ytXNNqo3lM3 zC4E8Nm0y1OTVns})k``$TE6o<!1FOb;(>5JbM{PbcKV;Ndqa~ex%FefeAoDvQ)I2U zRW?Mg($-n0g-m@CYIa|}D{Y%nvhXx>LHI_;=TubnA4pM-{}!Fch&*O$!Q%S*H$m&Z zhX>#vVt@xfgU-hM3jB+ghgJO`?T72@ckg%h_LvwMh=_^6Mt)T(DbAcZBPT2U>cdaa z#qK<MwDaQGz1<zu8go`9ZcU%~%8}~c#kQg4n=5PcYby}XVvIeWp9!6MX#55vBLfKu z@!_vKd-jZwsHhFnU(?Le#MRf--ireV^B%4arwGZ_Opq^LLeTN|TPH4gQG%0$?YDkP zOI?kef_!ml{(pP-N}EP1yXPT*g{tP7qu^Sl;n&2a>PsPM*U&q?`RI9QR<oL!EqGD% zoZ(i~oZ;Vo0|C_14{$Zh%c!Z6Yp6uc%GMUf)%=>J`J(V(X&#|z-v|Eb0S|MSYRn_1 z?!SVmZ-4ywcz2hEhT7l9>)DgNwUwp)`u5Fh8R?6{!h-kr_O|ceyZ0D1#N2<f_wwDF znB-zAUM(^)3#Zuns-DHVD~s1>SLfC~&rHC#Y-(|#cW{84i?g?@{r2|euV1&i{Pyh| zd08ngULI>NKRqj3ZEGhPElW05roEj<+gs~Uw>H)=6@G!OmDw30GG-ywshbl&efpb8 zeP8!w0)n$x{rRV#e#*~I_i}gL-o|p5=I76zf;d>1nf&;_`IDc&dYg4=r2WQb*W|7A z+EFo!6jfw{s!t>N1xF#hpsl@EH*fE`L=@;++k1pYCgr1|SFou|w{Op`uY+ry-`ZN( z-UgBn-Fb51=K31O(_$*LU)b7OzIA)$&fUe$4N&;oJG+lxy}AG7Dby!#f82Zh?mjA= z_}S8+!c}nDA3lPoegD&^A3y#4@j(6Q=bztw_y}yn6Hq~{6&k#H2Q5DBJ$nkwC&fh{ z{`soPiu`=M{X>H{m*#KXe*oW(z83KC-N$!**G4(TjL)3cHxH?(=w5($r*C3qc4cV+ zg6`Y5=QlT}mX{9u9C#6!=jGuBbPs=>hN?0P2fLBIv#yn`x|#KPSxq7$B2)?kT~Pm` zD?A}0A|xTFVh~X1?(N0gDfDkt4E1%GnHaIYWou;tV=Dn60muT@-z6_A&CbrYxU%r> zM=XDAcWCg6x4WyFvf{-H;&9@i!V^t()gXVL$??$-A3k8|WuJZ?8tAJmEi5n2uP7;~ zDl1G+O?UB$@(W9hOvp0#ND#LvP_j$q6p|&UVPs@t5fc?r*VNN6bG2~u3W!b2udcl| zHC<F(sIIQY&vzcqtca+vp`k&2Q$0|<4*&q)b)_dgBP}*Q7HUjfOl(|Cd_r7&Vtis^ zLP0_9!o~(DJiMZ)AP>6@TauBH^7Hdq-Q0Zs{>Mk?q&fiqXm_`<{!&3+HoE2%=I__+ z>Wb3!%}rpwwYj0FAQx(JK_2D_s^;gEmlSSntiS*G6LglHCwsV<S65MjBb=F=edX$v zjXSp=K0#&gQNzvd9(0z`$;tB~Y6PqbDo)8c%~O~9mn&PZDyt}~s3>`Oy1}oWTw0vn z+&nnOp_%XJ<>A80Je(a`dU_=tJxx;!Ei+3IRRa<p>F)?B|KIP={OA9i`kynz-+itm z|2R!VKtX?wUopF|`WrU(c6Uk(bInbRWThp=M1}cyc_hR|Wuz{e85@=q=01M(5G!o( zspk3`IO*TivZB1Y>T)of&?YT8UO`SqSP;%FkK{#h7e~8?4}Q<$bar-Dz#i*c^z?LG z+?)!E^3bW<Td>^8Eg?1%BH!YY!n^ms`a6}mS!vA7jId=j6{X++-;C5G__1&tV4wZG zJr(6;Sy`BAX{lWt?7rFHyLWFH80dMqxu7@6NK3-Vj;@mMOPQJIsn0Pnaq`pB(psAv zmltJUswm1%Pd3ump{1qa;^B$S&d;x^=H}+4r$1+EWKfu!Szc0*o|2%hs!T;iDJ~&4 zyS9co^`QJ#R+gk>q;fJ+U%moW;T%axN>Ec%DJscBfWyPh31eI-dd~y>kfI_&upJ+y z=c^ANcb`9d@%G*1^duD(mAIIQoD61HB{3Cto|2Nn!_5_7hOI#Z*{>TY$Vs!ZG9z3Z zF}eZYb?Vd)>B$LLA^iS_AE;@lIXKv_-x%I~`SSkG&XYIqrsrlQB`?s>QeTh|ad7md zIIl}CZs`zRSKYT1onC2Y=KwARP5~UukkH`8TemP*mPe*0goFfOm!M*$MN32NyYIgz zAthyCWCW=fz9>aWMJXdKC3W!v7+`qBR9P8m@cG1KRO}b^Jwh@M8UuYDEjY8nLIRei zCW&!TIT<OyEj-4@MA%rGiwFzS(a`|0Tbt|Oe064O5-~BcwB!Z!u|ic@bcJID8x-I_ z4>-XG{k%O2^Rmi{^OEDERh1MdsVIEB+<z<c05+V^!>>Pk`UH-QsHlj6ksjcYm6m+C zO=o*EDJe-npzrhnQ-}0$J9X+5Q2PAYGhAQ#=+Q$*I~(|{iIKs<*WbK(1B#az8+G_2 z+M4SB{f~bzG15<sk9_mi)c7bBC55nv2op0CD=X8TTif3>0PhON&Dq5X)E>qBty>0$ z2COVhhri{){d=%i-+%wT>DN1kDGs*QghYhhy<N}WzI*xZ{m6|G=x$33vxi&qpXaeK zH@!K36ITNV8!J;2BhYFVW+reH&_i!yGmyjA2B2R<W8k5qqxIpFr!PNztgb2tcR_!S zuBz<w3XZ=1QC1=Sv)o$RUO6RQ3+-cD_g}ny_T(`P6L2n!j11;BHehtSK0XGWQCmZ; zwyF$kzG8R?^>xnnHsEKpEvyB^aRz#x2uA`EN<Jl<;FPM5Km7!+fPh0s08{{SJiHC| z_W;1)ikllQ9ef=+2%LHly~7`o5EuEc|Nd_fhr@4yqMkl|IyoWs_`qWrONfa){MP!~ z3TU{TybQwK1tNN^n_XRAgo9)6VADU)^X~mS)aa8G4^%)ncARhTYHubYCNehE|8;*} zzXt7spMSV9jFsf%qyv4}?=J+y-w!|hz{o%k&|+->r}7;Axz?_Z(U~dGc|#)uHdf}t z&31Plv9qzbI@#meXXwik5@HZYK7af6#*Gn>=*5M(!_DBdYHFy})n9@JOV}DfmseL) z85th|!3PaIV%|k#EkIsaT)28`yR*9!o`V3seEHQZuid?8s@m>EtV&`=A>eb$JLg80 z?!)W;KAyl~QEBn?%JQ|bQ5Z6k;-m3@U29VvoN*;xeLhh!E*#9eyEvR7Cg+y3h|X<# z_W{K`jDzRS(alVaA9>zkIDq{pAt4#O(tE%&M1ej4kcZoYTM!gDk85ixit=>lXm_6+ z_n!HW9zKBaIv-og5yd<?nVPyvY<x5oHC1v#3|2y5KMjraTict#(%-*#7rF^9r-Zen zB*dIPeJVFA{Yd8F6hFdY1wSi~n`=ia&RgkjZLEWUJG(k}5B5X2YGA03m3h<!Fuc=9 zAkd(tqXnr4S3f*Fgo}AW{`25HqGBRx>1dW0v81+~>}?^sDJU)k2ZB{|zJjo^77na` zpkLqM6>wZ&mT<L124)hnDDY|fB~_1<T%K(oUPH@;p|G&f(B$O6@DPmM$6+2KR#6!_ zE`9+nPIer*O2;1vNEk%5d=tu_zWFi8&l~n|V{PU5r(Q^qA582|U-F!In8#nP_V*AI z6Q!pnLG*N-_A5*CFv@_8;Lv$8ZEcO5+$^*>U}664+cyjh=b%k({Uz9UHy20P+Vf}M z{+Zne_wK^*1Op!gDTg<5L0lA6@fCVd1Lom}V~ysZSX-E~Ff*7L8{qmm;FOq{7%WVU zvAzrDR$f7_yta01e$K!EC-VilnJ}QA=jCq2c6km48qmOmr1-`4m4T~Qa568z2blT# zdBe|HSz0*Uz}dkLtaV~?0s|wxwT0Q$fxg{cEPvEyZDpyavn@O%5KgO|lYLBV6!hG# z%dOuu8y&s|#}JP2rHcH#;ugT3R@ONnr@Op!zGZm5s;Y{df;^|NptHXZyp64;#ni-& zPd|Oc`gPy~Bg2A;Nr-ucMc8?HaWU_SaQf~vAtjF-BBF3|<vyGjh?@>Ki;szf<E){k z@(JyK4wl9FS)e60CK7`^#D?HMu`&;#hvWru7=NqFi?KooA2c&FTHRRv@fa7FARLD8 zZK|)v#k{VrR$*}gF6LjodIbTahK5>2Rr%ZZ?|Ha5l@#O-Hv<UI5)jbP(TWNS9)7o} zu9|{^qOP_ImA-!c3RearGAsz9xzmK?Fj8-Ctz#Q9Vo#qrbB=-j%-J(dbyzwVFAo<V z|M`UUwEmIn`UZNqm}g;Of*6mOgaig_Y%?^b;fT2)ocga`>F(*k!#p63^zp>SJjCw; zg8Xp_u@LUTFnadv*__PO!_5qIb<UhQ1H?isX=86=V{3_Zw^fxD2?z-(sj0wS{QUFJ zN$Hgo+!};}MiwFEMVIGFTE@A!xUKE1svGKgxVfOu!db(bKQYwPId%FJ2?;SAWNrZ= zHXa^a%zL>x|IewjBpi~i;e`q5Mevcf=7w)a%uhc-&;ff6XCwuiHx>sV1o*e_Y=6;t zY7jlFww92Hh=!I1q9m+41O8A(Myj{3YwzjaAHckxc5z7|aER60Ko^6fr>3S-T2VSR zJr0}7%fe!lq$DMnm>FecrEp!;1I#SUjh9vz_ntg~AC7hK)lyeEO+ZXU$45#|+0j;i zxPhLwCa@tZF9*1Kxw~RL!Ypho{$Zgl-CcTmx>(@>3j`Yxmk{URV8^<*Yb(p}Gfm8l zY8&g?+naFNbrkc$g1)|9cywM+kRN<weM4<}Mhe(StVdW=T@`wSoPwN^iXy_(U0Fp* zPF4!**D)|LN=i$@aM0a>8U=3Od7!4_LCC5spcfcdIa1O(5tC7zmXlLjUFj3($HvBT zYkLbzqCi89gp7=ZgM*2koliuJgZDfh=Kt}-St3>m=ddDeeN#q8dR!r)w2UNR?|djn z2PMeg2gaSc`kFUCzQ^c15Ouhjn5Zx_3lnSy2Jw&B?kfdOlArH<c~#l=?agB{-&lu> zc~T7X)HK*lAjaFMsVSFLmNuZ|ntc4mFjnSSSXoq5m6(|rzlppsg^!m<Rz|9)uk-eu z+cY?s*VEQGLr6l(Btk(+eYvF;D;^Y-l*(!<<P;Pxj`mm&9TN+)r@voqYpbrVHdf{V zX^6@J9v)sEtSJjPEbuc84E0JY%bT0(@GviYJmy>5TPmx{C{bAg?979{Y3XPgn;D0M zheC{?ucLjq8FWuJb~YsyC2&3movOKQC=tC3v!YvS-FQ{cVqwETNlkS`Y!n9v+bwKq zB5h4|a!LwretxLpvT{5Eg1DIXb_eFq5HO3{1s7b9lmTy!^;ki;EiW$%nu#?nP*GY4 zI~)-e22vjv9f8Z+h=~fZva%>ED=;!LVC4rIaB*|y=I1P}%>OYuUs?olE_7rVg(2=Z zSiwUQ5)x==r~>o3+3B#)ShK!T7bV!)*^G?zS(vdi51SGZ5h|}LUtF3)={z=XqobvM zmWY&^N0FS8wzM!CYi^m6iptQ;6r7kV4(1t|nGi^?s>TK#9L%Fm69u`7s<No42v(iP zG|<)8DJ&^!Y^=r4yw6e0$Hm9=Ug^4WrHhIRyQ%>5bm!=-?W_ZW1FC9kE?$&yu(86% zJR27`m!YW<oU-QnuX2Df2}R^wYRpQAz?`14j`_0o@wD6mc6Qe7tqokqP*hS<MoC%I z*i1+QcdpXS`Mc8uXPHDSeY4M>=U0@M!IcKl)Kmk<g;j-Go9cmicMq2v6C+U(VYp&g zK$?@29drhC{`ISuhZ}$k<>KN@&q%#FJ9XsbpQD&3)zQ@|EiVD90AdbUa&mF7v4Nzp zVjfx9S$TQ6&CE@}+axE(LBGK2$AEeGAb3&mFj$#~2nF^zJ1=`;@&+#Eb+yz9h{?|h zY7>)FrzT=)AR~409Q`?47iU^JS{Fw<9LzJiczRUS*Te6E@f{b^+Pd0OQWx<tucuv5 zT-eZX=?I<oN7B;aVO}6UF?RIE(8}5}4UQBdM1S-Q^xnSS#bw3wYb&6ccGi|yb)Ju( z&&I)ygp{Pc<y&7}UDr*_rb2eX#v!J@<nmlm>y(nR>iX&;R_3W_XcW}c4J@s#JdhHy zvUr&P{tV$624Pe0bZ#DAH5El%%o`Z!!cV~JZD2@(9d>ZE?d$K32n)f*JRr@@&1G$4 zfrohjEIBo4YI^LL%;WJk+B%w`;q%LLjm`C5K3+y9hB|uMpmLw9p0>WBo}-InWld#D zQamFAJ=R?U=3(e@aIj&;#XNKhPL9;{l#!9ExX!+gmKq@m1)ZP{At_ZxN*orXMTEf9 zI(fK*Fk(HzAQ!;Avxj>{T`e%rj*EG6ka1mziEuFwM+^=^eo;X~BZ~Ru-_JZhFh4mp z_VDo|Jj^pN(g%eGwzM@ref?TNUiLf>x0jnMSOtWW15`H`M;|YDIyxFKV<7dgofprs zTndzxQ$I^1L@Z=#5mr^uHe1p*ar?mzwxCIknvPaoSI^27;T#wysi1(Hc_*;*XBdQx zJW{!Mc-3(*58ejGcLp5Hx1yQ1vbJdNYzq$y#=|@h54WAYH9qD!ITDi-#wSOQobYx8 z^W<bYI-2E`rLzmuJ-wYtDG9#*KHf+VFHH6Egh!;WS6qB7^wW&ABnBMJOI{S`2Id`Y zS#UGY!O5PG6o2(<AJ|ts%+v6xpCzWqOpV7Hq5|_SUY@v_M|m4((D}ML6!W;e4LO;f zfu0Qd@;a^t7@coyJWA*NypLoaq-l3|2X*xs+f@k+^TA=k?Vatr&z}bc`oYO%WMY8! z)YMb}2^hJv1O(rG{~ef2puX#J3)T}YE-FaR$VA7ld4^kC-aaw8W~}t`&H1gzhnQET zp`+C_FtBm=K!ij{DJtT=4(oK9fCO}2-z|lU8z=MNwT@)o(#ou*ttl)d5Fhir+@P5d z58`1S_!%1?J|^>c{5&vUQB{8b(Syf(JF|;7hexgs4Gj!k9~c_yM?GF2xIR1xU-W2q zH!nK_;#piek6|8{&O-wZ4t5CW`UiSw@i0$HLCLLrhKM{fEdeX@Ama$6_c;{vwzwL6 zW?l!kw?Q*6D=U4pw>bv$&;Y-;nVK2j+1bIBp+z&#fMNd5gM0UP9@jQCc*1~dYOJ6r z&&tL^ieBksZ)4fp)&3Fpy|PiM4^}2uZ_kU$HiS&_oNC?)RU<hK6IUiS4w?bbc^W!8 zO#?$4cTYrUq>PdhCp#OiYf@(jNl()Y>A5E1VIHOP#z!%4Y1Z7<6#7RnKRz-1&+0se z`O32HuFIX5TiaV2A?QHYI;bt^$F7bxkSS<iT#yaSzry2f@ajA}2U~P()Rq2j{LGVc zDx4-H!_7RCn-B6>%!8k2W%(UCk52569iRCl{ru$g7%-13ius!P`*$DS@9OLJ3-AR7 zh)Iayc!J4!viIv{_s5SO9=;~HAIl(+o|hUg8Cm%guqyKE_=gwvXVi|jU0Z(l<A(#x z(`p$Q+PZtXp_o_3&pdb=4D)#Wyx~zgZ*hQmthq`I^Y#wb%y_*`LQ=x<nQyAYcTK9S zyaf6e#MdyGQc+PJtdx`#BqYR`RpA)s@#{PbUY$RRd39i(lwIyL5q9Rqj$&TyYv%Fz z`6Il|7oBhXgO~@=!1b%YV*cU%m+#(ZWv0O~XJutBE&TRw@cq4A?Ch+KwN-ffdr4L0 zz)%k>J1-Hv6pgGiGW~K|?RZ7!%+r@|zjzxuEki?F4=?wyC=~O!=G(YBo+TnX%P6Yn znv92e^5ZeT?``lf4?2IOpC6kTIhy%p6!Tc81boTQqw+RDdw{R^-tNxI^1^=I+*q5M zxB(|UKPTfz=DChy{x~{ML`p%zCVdq1KId@wdGTX1Pl0A$_K#uyNN<B?eiyHw|IGY@ z-N)Ovwn1uCm9YTU*4zLR^uwtiuueMK@9)Kx6;17J=g)Il*!U4K$x%t!d!)9f)ZD1* zx`~B(<s+FF(MRCbd9vf`d{{`(Kd<x0VIDKz1~>DlLA<1_7*|#Sb^th!3=e+#;yK38 zzxpSbkBN&0=4tSH8(<!<pJ#Iatn+x7|D3mB`~5NO7v_)1RSF%c^9hM@Q)q94*U$eN z!{%nE!#5tDr2XLjJ$RjroE(N@tgHC<^R&g4m5uGK5bZvGcqcBooR~#{RNT@gvIem4 zp1h@^q)0_Wtz~Frca+Y1>@)9{j6ZMlg?ZcK=scdh4Ibvjev8g;pfhi^Rb}{?FD=5w zJmxyAm6cilU~fh$&b-YZqVu|FZ*wH`ZobHK^yiMF^S?WWrT9a=%`uoq>3my9+w-?? zogMAqSRHO=X=Xx1Oe8KL4hTWOg=-M}%zRrbdYu$%&Qftj8x^<4+4H)3$o%xWDbJ8p z0U=>kZ7sXw>b$6tXBwXT{DEAh?NK^^oEY{G@HQ`AJg26iPR2HW?7($cobzq&+}eVn z7TQ$SRHCj){a)sUh4{~(=Rq^y1v-y)e=y7w{{cFG)HNyc<9nN0yz?pr@z1L~$ULsu zqzAlB8<=@;HD*SJSbmzA2=sJ0d0A2%%!}{qeAPblpVtO%-+3S;X->eQrtO`VS~nG2 z+-qj%Y~|vDpZU)^&m?N*opA*7`0_S&pP6rM3=hTWZN!es+n{xx{g}+ZfBzo7C^X0) zTX?%fikWYN)_JUJm*(eYATZa{(=MwlOH7D8($C|{RRSUW{O6-F-UjbADPmSBH1qNQ z6!YM1bntkaW5uxf#RV9h|0kH|k54!*^KBiiz`T;8yq%TB;buJCoUClD8d@3<g0*6s z8UCxEuc~ZlZQ<emYCY`E-cxBM`?Fk{@(xMy6{GQ`gWeHIu0f$k#;^p;Vpe`xT--cr zzdKh+LVWDJ%@Hx|G5tIg;Lse~g!9Lb9zsl{uCAI_kPA=pu&pa;X{aS8CN?oOtf(q4 z%+IExrrgA~vKW)Madfg{VP<;q{5jTB286`M#|#ek;PW<U=5fWaU%bt+Vp!dy{rpka zVT%inoO|HskA$DIym&+m`*`;VZxa;yS?84%<!vm@4>yB&oSTQ+#M~5iz6C$?l@%RV zde8In%+F3AY;f<vV-67mLN;YthoqRY>j@Pjeu=qKxUa*am?tJ9WRb8B&gT~pl99Ub zJDAVTNFgR6wz0ErYCSqXk7oW4xF+=rdVZITwB+H}KmPO+?1O@$TxNC#sK?=198gjc zV}W@aTdT^NiiWys5Wtb4L9EkhczC#AYyvy+<9jTNS(up_rKBYRfNR(KaOwQ9nMWQo zZ-a+<eIo-oIT_r^Ofuc0m>1wbFCqdltQYus7%}lMKQRS5{}|7G4;Yz1=R?}N+A%tB zgH7iJ1<tc`u-iE~z_5#T?ZAEvTU1frH+)?{fNyf_+hxT!=T=xzf3jIe&LJtP_*z`~ zbvadaRu)D)I#0xU!6l;jf|M*Dwwdj#%Zoq`NFp%rWM^}@fgcjJZqd=%q4~&MB`Svf zWBmLtsC#|rg8X8A>w<(B977lf>FMcSzr^xgdfJ-c=Uv@hsxMXDoSmYeq)1D~;yRS% zW$Ee9>F8*}rVhV5IX()VMN3N~DLG+ycz_n~+yhc}S@boj_}{@iX1<LJUT*`(9PpEq zmlF`+=jP%#++b#U0@W!Dbn}Y}>g#LJOT4i#FD%H<#l;Dy`y3rDRy%iKzRitCj~?R6 zRT}E)Qc+VOeZ3o-8=k&?qo^QfV`+A{nT+&BS~^-oV*`j}2d{MF(Rmc}*M|i7u`mzC z%fsUo8OvF29Z|E$$dc<G5e3T1s(7wR5wS{m#+2z9S<unZ99{_pRsr~tm6fKXqVzyG zAKnNITM)h>JP@tz%||kS^n4r8`QtHPUQ$R*OmwBE<M3O{i}TWw7r=O2sw_SH?wi-I zfn;uOE-xR?hQ_*AZ{I>oIT<V~8jA9=;G9D5aI(kJ*{v<iC@CqeY^>7KQ($bu<825? z$jLbsP<}oYKlA7C%(rpDJKtu%BU4aNU_DI(eciykv8hpBQGQ!n6a23BW-KG1mzyhW zff~9Gwj~}I=F#h<V&Esg`|&*!BZD8(<8ZT@ieiA7k&yw|h>MA`FgL}z1TQ})19Qbt zNPzD!d;13m!~DR=Fz7ti5xKp&1{8ywN%Sn2_675((1NRp<yW`v?_ry7<9M2ol!RT< zH=#T<E*pmXhD%s(Sgoe24A?U=(jQ(}0s9FILmcOc^yunt4-XB-?``ak)cIpG|L(&F z@NB^LFTb(C!6zgn)Y4M-5Af;gYXA7@=ctHK0s?}8zOKWKA3eOkvb=EkbPV)%fiwvT z2|7AE<`?G9%ueC)HfM>+sd!ZhNGPz+t3=5-z8F^Q7`e)Y`Pqojpr=otU|mvyO77)n z!*FJ0Yn4}+KQ=iAr^(0D4QtEotqrVm)DP-BFn?qG`cFUq%+1MRiDMb>i|5a7ZErEq zpR=*H3=a$D!?8XNM0IUt8P_ev#1H8F^{EL_QQ_?LWUMWpKi#>#y~-=3NXV=xY#Qm8 z*%e-JWn$^hVdj50OGLpX9h_28+CI&~$}TR7Who_`aoDT9-5qR0m<}l!Dgol-=Y4Ic zAK*FC&mVP7Dt>%&<kK;j|M}-%%1a9YJZF2`;{)&FV0ZTHS#fbO6El;jn26EQYac#- zeDeG$#HtYbVU?Q0<-z@XF!X~Z27B!8<yKo?v%axLi{rirDCUXCX!x~=$!XG)WAWu} z=<(?M5#DBLVU~=H49}g$F&Bv(U7ZSw3s<(*!T#a7)n`v<yP>`g)-?qfosW%=9=$OH zr@-9Q7;E^jU!aG=K4)g7gF~LcwixXf^xYKUAY`T{;qrCPp6-Pe<)b%ejExONgaq(> z9XA&{83P}vy|78Re`Z%ucK0{fqvp?_CZstp7m-<A(l!+k7Eek_a{OCW`*?etK7CqL zOvK#MEHWy5c3~zqDh$^;spB($%orBh+<*Mg+|=lYQ$Hxm%RYPh<j9Yh8XKNEeF{P_ zP)i89^9piT*OyUd_}#n4###yraxPBxaUAF609|wZd^})QRaKQt%#0%<!v?PQK78~5 zFY~0df;yD6j9Ax<h>MAQo_j!l4(m!Zbb~LsN*pmPC^K|_5)$H)L$g^>79u;Hkcd!H z@}i5IYguL4>gHNiWf`37ENq&&wYd)Gh=Abiy}P#$zl(5kfRF*hJlMkJ)n(MmVil$L z@3G&o0(ykBv}9&p4kHsI0E~5IgpQWR|My@2<?iBy>$hWK*rAEBg5n}VLINcPIjsE= z3Ku6ED<>xvkJ1?)-HSHyKIt7HxxGu<zb+g1_Hg;1Q)iilRibk0@|(sw1}997^-rBT zbsXgk@bw1Z0b@7|_KtSB`8oF=J%~rGho!-KPbc`jpXVMhGroHD3agrZW`1Jym^X}l z`uu6{l`c~=BN%JI7zFwu`+K{-`C~e-UcJ12wLdt(7Y;t0cj%#-TI$9ohJnEWV8-^) zjX~;HH&&yf!$G=9NJ+Fb)Buw1j<&`5*{$`}^|ciU<omiiGE$RdWl*>D0pOKY6yb+E zBb<s$i`O<*pFMv@Nl77fQ3AZQi=(}piz8G|gmXxM4?iC_AqhE+pbqspCM9{v03S~; zH)n5m7f%FKXFsGTm_sHOl+J?z23w2tK%kaXI6Gq2Rv<mynHcGrSeRXo%G<yWK+J_& z^Qfxa)7iGQvA(gs3J1j8#0UZ(W)>zbZB4MC4b2T};9l<CHG#;Hh|td3657DWp*?ix zKwl&nYyv_;Pyi=;TZA)=3=aDhb_TW3Qb-^sE_!I>D(3Hx#6(Ac^@T34ASYvKW&%GP z(-?N%%iWcdf?QZcC^|kiI5Y@-;{rPP$)m&9pl8AEHP_cjUJwVXdFIR+32_l0FL%tZ z#vB2pryKgWqhguDin77cVF+XY|MuPjEUv6u6MpW@KXdQx@9T~t2@r@u5+X#<;O-Pf z;S^9n0fiP-1%(vuZo%E%J-E9C37UksTSq$SbkD3)ha_}=X}jIu{B!5BSx-IZ9QN7A z-u3Qx?K-uWCD8(&5wxSCq+pIW22Ww$Z3n8e1MCi`!FQ7XtcV!fA)!5jhB8*+l$iS9 zEKneUPIlJb9<H*|;-GEL$>PG&$}_4)1}EnG`#UihjeYy~ofSAkb8#vy%pD&cg1dp< zlbNZBs<I+D1n?oS3g|kl4hF&^zX0Fnmil);{16o$4EzAM5%}ukjY1aSIfAYeyl~#$ z!3O#`YfHk`|1vAMKm~!be_-4O9hSVhyiizD2+^px#6`ICaOMyY0V>X&Jp%#~<O=v8 zupjjMr_Y>HQ&T|zZ)O5Ytf-`LZGGkYk7DNW(<dEWZJ>6vv^0(!JpvpEhUDl$r?iD7 z*uu#HR162w!e{{YY)IA#iLryjec!%%_5S@k5Dy@bjvqgE<j5h;0|z)b5A0!O-?4iy z3)_AU9=<~&Sl%-iA$o!vzGFAbj$OO=?A^n{%F4#h4h}0QBm|FDNm=pq=~G9TvrGUy zJ{hcvh>F-#$iM!+4WLv^;-c6^F{lrr89Ea$+z?JKE<wQyYU--Sc%zW;kdE#S=pS$0 zyW85{YG9-fw+pGmyi)WUA-b27m*wN*g-DiZJevl==0P4Lyj4*O4h!xZ?0xnU!F=yP zw+F*RT1JWwiCCcq&sKvN7OpEWBq+b6&_Bo@A|SY4I3jvu!5QIf#l$b3I(-r<A3*%} zOV{o~Mle4z2(VAf%^^8E3X2LKI&v8P%XQ!YTp&CxXcKPczWw`;pE`Y3Na!%{vHd4x z_nz02v<!2*)MjMk!M>ksHw!x}I|mELL2(tMu=J|*s^QfecW>NS1qcCEojoT2N8Hc6 z3FsuooA-ocKQ}iwoT0L^BH+i!#K6s+R$N-NvWCnA)znY}JaFdBX+FNA5Yv4!xKYTp zE6IC!xQdC2oIZULZs|W(ke8Q@j1K?(m`7;*)wdH<qj?26Uf!N&=B8?D%D_Tr1Wv;l z9zSs$AS5VsK~_#iQ%fDr761b*;p^vJTv|9gH~nTye*{Ooeq+75rXnaL5M+<8o{oyD zGCW5~DGAi%736^1p%I)e*3eiV7(O;Gy0h!@qel<X?)v$+FKZiWXl_&!yrHC=(pgd2 zeTUC{!+wy3`xN_8p~J!$J{ikh2ZeTU^25d*Y+Rf?JcoFBPn<q|R!~q*Ny(1v6q}qB z5*=kiFjG)gl#!Q{RoFB+1$j+vjJ=C<QPr=#djOahmz2=a)xqiMDkv(5UKAA(6OojX z1c(CQf#h9iE_p=-)AO^)blgv$PRvXsr6!Xc?9|m&B_t)lmn9`7)YMfl+L~A_#=uZt z-%w8n`aEr|t)`9DGPf`X%rklZ#jCg9EiNzQ<mb41xr4;j1to{YXkj!pv@{UYRM*f_ z$D83p!$WEt>vM|=JbW02cw=c<X+W~Dh_D3QSVaW|WhGS&HFZr5B^713*iRQOi`<0G zYtJq!>>U{znVzhuuM3NgCOX(@>*AEvRHfx)MZ_<j7Zy5w?i}1qK7M|FfwRKWvLezl zhtG(z^DDCo>MPmBdZu;bh<+S=!fXf6?%_VEtYZ<8QI%Ca{QTvsH*a5qv;d(C_Xor( zTt2wGl(Z!B+$ALd18@UjBRqFNg@qNt!;79@m^V8=^Xm00G_G8`zM6O`-rCk0D!?6s znx72t77-a<RZ|IM4kWp?0=i&nMTm-tn3x#;;g8G=571q^zSh`O7aJSx$#8dYvM1VD z!E-S)#{;E<!GeXQIS4Q>Z;!N$6yWolw>JK9<|t4&x9{HS>TXX-P4Wxyq0n6H97xa< zoHo-4me!UeJLDxM=nl#%N+u>pL0o+jsAx|C=kMw1O0c$7)iFCRqP&|&fP-I(=YraC z8MD)B<h{HSJ9*CTJaB3!*NLP2r%s)}AR-|ttE{4HZ0zpmn~|FnpOOsmnyDoLPcSzj znBzYh(T?OB99YxbtpBU?9w;a&kenRcJm}6;7lM@~pv%D6(8$CH&fLSxBeyVbbaL$G z{rhOD1)%!w-mc`dWKVCby=8RWT#zMt&5oIw9Wygi%*@Qp%*+(qG23y>%*;$NGcz+Y z)7-D8yQgQS`$x}v`g0{+EiF|Y)zPWiRlDR9z#<JK8A*Q!X(m>b+My8&*oVbp(sCx? zD>}cn0PYjg61lioZr<1Iw|t&HNAAoM>y=d(&=r{!r_s{H+yb)JW(Yb)76VPr42}+W zQnE6+7@6k<m{k<C;4<W$oLI?zpJC%{-@86U!;M!$yG`aO4ySy_Rfg?bgdIX1pSETw ztvV&d#cpSY9j>;kh?SPhdt<c8#ETx6VXM&)b#<kJ5LKi)fi=Q}7$6%!PYyNRmyam7 z#L>8eF^sP+$J;%Yv^}(w6bE16l9$%zd>c4y-d{q<_hixU^5X0EC~WB9WTpm6k6$$( zW@2n=`wbCa5CM|)_}B*VSyEOSQnJI(_hOFrvL}TIOOX8;>KT-sl9k!0ivC)%u=V-n zZ6EWn$-MzM2|$9s`j!cbmd^|my}(4lrl@0lS<IqxUglFEf`kAu1}@JPU~p>(ITuN{ zPcbOy-H?lk3E?$pv8iaFuoaU*|6S?MVI6a;D}-BjBelG2%-N|P@Rgddn=9mCq7f!$ zbyfx$=5@8+eplamW{7Sl>TYi?ZSAY37uacD0h86Gk2FSM>?oG$gRw_%gEdSRvF5Df z_C!^<L4iIgMLAq*X%KS*?hOUg&qK_Y-&9!IQN>P1!@^BWOij$sOi+6q@?7%laUHsz zu$wi5EG+D+aNec?Cx;7$7foxx^vX#a;tCAO2v+h3es1${LA)^CI0_l|g7gOFA{(YE zC6-y04;52zTy8=nY>j_=-W`}|Zx$yPvpTh&t!FmcGtSE+-Q6`dY^`g{dvKDL)>d}4 zRh9I#v=pWC@nb%o4A{3{-}JcgV!0d6H=kOxt3Zr_V{K$lR#(SJ!5|(fgt_Y-bAq}3 zNh2ksKvz*kyVb(P&$2MRnh~D}obL{BkEgDm;GyAvNk+?0O^nbE%gac|v?c}U`0+X| zI9wPUH#WO9BFf9>IVT`S$FjLMrK+eFnV3U}h_!hz+2~LfHMx12fy?>|qgPZhV^9=T zP~~0VVWlIZBn~?3na5HR2ul{}Td{qokk}gr0uQCh!4H{p+Spf%nc6u_T3Wb*%_hJE zKQan(y`RD2{#0B{q%`=zI{$SaSIeD~dJ2I#;we4e?6k~~uf+xRCFaUsmNnChi>zFH zO@6lM^TP*^DQ&S|uO_ys=|yIPgkQgqe>21`cfVcEB#LjlD6^mv)wg@|0NSp(IOky} z`W$l)p1R-X#wt_4b_b%`pDqQJw?A`&%*-gvs$?ehGr?50{GYcM&X)A=aX3L*-ixOQ zs`a~(vlf0BnA}owUt3QP$n=TIkkF&-#!_)#qED~V8MSB+Uk8=dB5cn0rTw@=glSLG zQBa-ZV}<^{xHz{k@AzWAxDL!1-{N5Bz(vI*rK58_y)W$-k0pXhr-Gf4pLX(s^y)?4 zaix<D-Xq2D)TO(qjIpJ&%}fn6I7-0)xwbHqVXZ}gd22@`1n2~ogo{DFhQEf1Lzy3% zsa02R7=7kky-wg-b5t?ee(W7_+jQJ*^MluDquvBGb$=bt#H?|oE~;ayRZY^-jnhs} z(dwk7Wp_Hc8e00oPu$kK-5svqqGaf4n|sSD3%d&I>N=Bdw_4{A(tj~KU0e*4H8=Zq zJh~k=J|W1-*~s!RGxI?V+n*l~Q_;jqN#4ao=e|O$wt4;L;ylyVvc^u2u)?xj7F}wJ zjTx_$GN3x|xXo~CYP**yF42-QG4<>BaNGFXvp+h8bv5B`5AvM|EWiUewYACz1x9P? zwYE8)9nP7{cYn>=q!CH$b$Y^gU2o5TF~+r*@Oivn-^>jmU4k%fqiUI{34Xn8&+I`V z5(osvTvrRE9UO%d`hiaeZG*HzSP{ad2#vf${Ru7N_jv|Xb_QcTe0W>zJUt0O$R^G& zhwZIqb^OUfQIpI&yutkT?a1{*bMovGZQu%xiKoi0TDpd4c6~#bG=C1rx^d<uCJwTY zU(b?XKR2BCF$9s*GIH~N%dAZy4j-M;kq{IFjJ+n192<%jeTP&;BS3{f($dh@ZS%`a z(}o%O{mlbR@{a1`#_V!W{sdIN!{`0|C742@$a@)~oI$VqQ_`-q+1{TlZ8e4UkHlfG z__#~$ct^-M?MN))hF8Eol(=$hZ(7Ah8$>a%SdF=nQ-Dk0X@vc5s5jZKj)rC2I{U?m zzjx)N>GN~9Ev%rUZ@NGwc3J|Ml1)RMponiAjX9{X0-3C<=|9a^J6J8mNA%O2tFX6n zzu+p%5=ed#zoov;T@U27VeowboBH#zzSZj-9x8Bu{56&`d81mrKXwEU>Y<{;prwbx z6pP9fh0YYS$zaqWK5dX%!<0^~Iu>FeRHOv<;|D__kh-Md{+^`u?su9?6S>L{mZDs2 zj@O|fn9)(4PTq;gltHti!NYXO`0BJ|AYMqvMcV0{<9@UHx;oTQgDW8W{vo<dYB}#0 zc9%`A?EMgyCVVlm#sEs-Chi@RZ6fLS6AyYK7LFnSl%V&0BAo^oH$qxZ8){!)Q!`6X zM}(P_WzN*jjQ6YM{h*+B{0l)wj@89wUw!HO{yxM2#Y}Mj1)neNFe?~|r0$l4#u6EA z@!4XceZ4hg>%PS8B2_UO{<0k9U5R3Jr<1KP^(=MxW|wa>{Csy!#ikcp+zgp5_DJbA zs0Ai_3`U=LZ=WGH%9ti?&*xUs3A2})2M-ifShl?jUYx3_GtK>WdaU>5=wiiFwvB~V zUhRjIm7AK1kB)|miiQlU2B#5I4MY<Vhm4kyWke;>*Z;6Q?U@J}mRS~fw|wl09o|ZS zdU;4^C>neJP4f6a;bmR~yJfRKb9Y3!n)V%$z9~%%=6p4ihjLrp-VgHq{C4(!7Zm1M zUn=0Oz9J_(LB<shp${LdDne>31Xtn#v8m8IjVr;GF2YFz3>o!n_i#jftLq`MdY!_y zQjETDx*Q=gtu9MRpmdD!e!Ngjb;}nR94+xT)NJ#MMN=xhNEppMcA8{VfhP3q<;0T_ zT#CKOK<N4WQhwyjm1VDK0QKC%KqH7x$kNu_SPYMN)E`ZVtP0X6+M2w7YI<^QetIIx z_T8vi<>Q>@VCQD%Vexf!3`~Q{NF#Hz*4p<D;8VZ>h_WI+GZc3Czm9qK2<&7~{p7!b zNPyG=Yo97`2fs!n3`pH*@xH1rTj{$V=iMF;8NR#AvGQY?d6B;YOWfz-5kewA!VLXR z9$e#S@~o}69&L;7a&30R3jJrYG+5nk-S<%|9j%wjHrTOp-mMyg75Xrl(U`B&6ZFXR zIBgF_)Ag)(&bH!H^`IY707p^T)dCwzrG%n}{|7npQOu7iZ18Lj7#>Pd6TZTmYAZOm z=y3KdL5CvFH*oO3<V9#Hf*FXn4^e>M9c`TLb<F-3I9Rkf-^c`D0N{}IV`Jq{m(`oC z?xv=ucBa_9?~T?0wT=G6kbZYSlK6;8Pq=N7dULyE?yrb1|AuUpHMDE_EB_*Q0B_z7 zvZV9XCxs*!LIl7MAA~mn#K+PSM#9g~&*gITs^>*Mr9CX1MqA5ylxDBq`NJ4h2Tb^a z7wb?C%{Q;IDECHDmYFNh%W&`Je)XeXs{z%DHAB6!&NwR|%iq8ZD!b-$^R5j=4H=Sv zOE2=_YBS&Pq)G1!_NTQ*DJ|0<#J!QqU1|$FzeOSmK>=ufdE-v5LfbFdu`7EleKIg- z)C9iBNg(mp&xvuWiqbQD!Z)L!x3I&of&!5*fI!WDQbAMIua$?!(n7KZP}-!FR12#z zZDcwMOw6i^svxK5--Ivq!z*RZziYNso}qQJ`QBvSrn*DEFd3Tje3b_oKMqhk^WlGN z2b`u=!o^DAaAt?PhV1kT7b~bbA2cg(+HXK-3(qOb^6?>c7NIAL6qzyh<&W?OF0#6J zbSBdGO34j9|E`jE(%c%%Pc^OF(_vmzDw^RG%f#XrTO{f5d~Y)sVS1Bn^sQsJl8bD+ zA1|2Q{O1)Yth7JO$hy)D2B>kF3$_RNZCh>xD02!h`ssf84?(`evWp-c85kH^p8Uvv zh7u;s)KgiV-}qorWvOKp;AEPd@;92w;%iR}4FyhfQOa6cb*+`?vP3+tu8M-&<4apU zoyJ25xZ2i4?=&>hg2j|;ETWTxQ_R8s^IX6gz|ZAt#Q1P}g8XR99Oo2tO26~-vL6wc zqp<@L_cG+q_s+@RufiOA<txG?btM3+#lgm#xRb_Apva~Sdv~k>&q@0S?$zBY{c>lg zOpWNJ?C-iyi>1W<>4Ejm50-}7p+A43EYhgsceTS(2)caEu;SYFN45G3HPZD4Vjmje zscrGJ&N`gN=J}&9La3X$WjCm10<tuh_&VX2x(hnKu7f)~hc$zHHkc7QG`3j0Kqx>i zxcMm>I$xOdyj?**MH!5`yATKej?Zt7Y|ahwkdX7!G*{%7`uq6MEad+dPpqx0Ifod( zT5m;7*-4CzZO&~)o@Qod@jbpK2D~rn`&Y&cRBU<!T0lQ)YT95VgrOf_AF?CxigH6@ zi3nqC1%938G>yDZWiwRKi;Ig!=%Y2(HhFz~AX*gHL_SWfe_j)w_<k%_P9;FHzQ5uq z4>C;9+LE`650z4v(~OVH0uwFQyxX>o{6jBtZ?Er7oZUI3Zf8z3yJl|Cx`|l57V7I} zm{|{(TpXY)Gc>x|ot{=i`KCyvOft-4%9MLG`MyGCcfxWT>m~;RPuPU0ctuo)Tss?w zM?9)CS{Jm}B0i*H=s3Sh*n?#^J+uG3QDra)EWWj{@BL1p$S&e(ZhBIGw!pa4@FcbN zHCc4w&=Qo8Fp_f96g4(q8Xg}nuEHC5SKQ1Ga5L}%RwuZ+E_}Q_fn9)|i;9K@Qh+?N z;)rgKQ%OqRv<-3tc$eQ_Uw0d=iLQhe?0J2_vkd|S0sDMCcVp`AS-hf(FfiHQwEhP4 zWOw_%-G01LRO|71-R$nvADKhQU9C6X+53O-sA-=K5q@C;2vpAzfvc0x4d7*4f)>}` zP9>~k2{wGLk;q=!<+F9$oiP<Pr<VdS*E`#t-q<A=$hXaR8lt45I9oZpS8m{;pv0%9 zdloy6Y5?WVOI5ZT%WSL~3$u_?{KOOt_ewA#)d+fiDQN1X5qcA}Cbfg&u0FHnkoJ5p z?9Fa*!ha|YQA&qOP}w3ET_j2_wW}_5sqM49pFV|!r3sg?Cv@|koy9<KCQ~!|NBJlG zT6I5VP6*O?3gnKI@_h2Q`-(4oWyXTl=k`h<KO)+IP9Al(Wk`T?795qz-0<JOlQWzB zxGH?$rlD*P7hoUIherzIQ}EHT#J{)dy_c$VA0ORfH@lkaDL@vkHln8eEHt14%pHs> zylcAcO|YoJ{F_`G*qR#~+}&w6Iu<tPu#u4=wLk-)w!8hF^wiY2&llEb!H2h6Vi79W z!E*RL?=D9d%DGn39BUOTZRwf`)~^V00tmoQblfiH=M~!B-VXalnWx|CIksFMU4-i` zOGnO{uh#uPQ3l@6`+Vg6?dAQqRx7pVqF^18>gp0=!ec3_#?8#k!NH$<Q;HVJBmTyN zZz-c18s=IY4fOZxs_CsRdU`rop~S2dr=}_?C3gB^onTu!1S(6bLs3wi@^HggohrdZ zkt)sk=I;ukuC{oRe7CoZiR<823{2vzbGW;_<5(?#rnkhU?UYK?@`CHXmTq6??9f}~ z7hrUTkI@M3V_A7Y(<rDAkqK7CLqUmZYeIfYMMJL_#PB?-M$_S}?drg>6QAEeD4j(p zRq{Z~l-ur5^`(LPVa0g(9j|xv=Pu-ii-nSvm!qbt^cldpf?$CiV5~<z1$J0z8!_g7 z+0@YNh`$Y57EV`G^!yMW8yZ4iQ(q5EZ9P3Z3*_Z@K19RCJ=-A-4-Nv`K}}(BZ`$&E z%qwZRUSDDHek9?!TpO}86&2ipd542FGxO*nXqm0rXh*>3{+*TLgCvR2;?A(V*?Io9 zoa>8<N*ed%)yDnycol3l{gA;ups%9iaJAxW(c8Zf6B7gfX=*Re?JfssNcr{4O+o^x zVX&_cRJ}*$x><iq(Ej2R3<81+!z}PoQ&aPiWs3=N>36qZr@xPioSYfM9q-Q9?yga0 zCm)}&an48GD77Fs<N!ska8e3=)!=uiinLIY$vD-G7y@nQy`8bg+Jj)2#~@C9zAJ97 z^!upi?Vv@P#t#}(d{~Z4U%ysS*|oG7{TSs;YE$goi-dK7#=4kg0ezG@95=}o-%7zv zQzR#iWG{)(zI>TOMJN88Z3l^!75*lwF0Vm8X5~4glFgCH;2nHy+|;y0ZAJKAT5h{< z%my?-wPSMF0ib}zIw(X?_vK|ppufXa<G6`V*w_}T1-!h%T459fdVB#G0J)ETVb_Po zV`^pc3!%)2-CZ~hh`#2grl7^?7=8accoC$+LQ#NgNkk3`QoA_^C7!M>zOr&6J`aa= zpO{5ldU$(&@!8E;GjVQj4Kj{^LjWvw&kGg~_V4sGzlY{-;^dh+C}c}2%Mm3dlr5jT z2|cqt2crAqlatb(Q(&cS&DEjjrO;}89GX2Y)@XH^<Rk-(yxd)6bRlF&oC2ABB;`xu z>7tL9kZC2Vyk7TRG?j5`jf(py6cd95mepmZzCerCOye?dN81Bk5)Ev*IJjebDBO!n z2bzuXIQ9x+R@XA*MK*1<UN?WC*o)NauQA@RaLFrPm|sx)7eLqGt)Mc-f(qFbHKfGM zmBm{ZlsN!w3K@%*s=B73tJc~BtK_pj>np-Dr?>UN71=OXcN`SQ?_kpxByg?-)`0O( zfTs)kcE}H03pW?D*IiL2wChnRn9WT++6vkl9`L@L9%-qc0FM$<=nSrynOPz&O+h3a z0t`?U#PnP?ORC6<aTmz`L`_t@z=6R*DHRc{-p9$zSFg6IEnjGO_%PK&$ETc#fjwEl z1{%uYx8Ry@@cc9mkdBGEJUb1*2s$0zx1Q@72&~PGU;;c8@Fi)Im7F6gU@Ey5^d|&* z57oP{a2-vV5lcOo5$Z|r&M!EDW4suf^T2&{he3Us_T3E@vueJOV^#1YKc!#<DZK%v zSpD>P1@3vq7Ollr2iLu$)<eDy)D=&34bzIyo8H^S89m2^wVaQ;fnGk#=9(3*H#9$A z0Xgh+FsA}ACpUmMBfl(%r58xIa~4rAW^kF8JfXq^ks6EinDr_>qRcOOl2x;N%B%RP z-`$R2j11)Dx5#%W-9etq(zsD4gt#Z?XF{!*-(|h+r{=D@pc;hnk72dev>O{(4fHkm z{4Z%O-psGM!Z|SuJ_kR-72xq{X9rGt4vtTD4X~|1y|#Rwi8(ou$b1wy-LpFdnWmDC z+f7Uh&T!A52E}sJ+g{EOUh{{fn=f11W`65*pY4yJ0@v<G==-&GwSC+ki#>m_N|+gu zk%<fJcbB(jR}lOPDk>tf(vdg6R~F#y6O!_-4sc8_PK=F@KqT=FLML^=V-5n{nx~zy z=5L|+4%x{U%nzMs;P5{VXi15@Do-`b&h$O+Hm3BWeI9q$STJKWz#9;a)(h$S4{Y<& zz0I41-YCYB%a?vhtqpUN<3gnns{N24F!Uxn3Z|wfaz!|Xx}3@Rg5RU7GS9cR15vp; z$2~?x$4pO4R9D?tUJ3o{mAI$<z-!>==Bq1l4f@j)8=Go+Fq;waSl9m^T+`I*8`9%8 z(OC1<Uy8rEKOihnO`@ZsSQxexRm?<0Mf2Lh=z=+0pLW*{a5>cM?R46Gz5PtF`B)pG z2aLLWzl1a-khH>7UD?nbVk#=4<wf{984(rf%Jc<sV#dd5b;M1Ur}x1lO<+66dxfFm z^}Csa7x#u@Cn{*odoBr?wr1A&8XLfWTn^8WIxyqmy?q0Bu8XQ~c@~|Qb%DPJQm+%c zgl<kx_Tyfn|G4#F;lfYcpk`%>jjk=^QMc!hXT9eylA60KQ*|8`_;uqHYcCQ`X});r zfAhAO6b-YOXhqA+PT_)$62gf36zv_GWxMw%pK?5}6&~@Yud4h{0TAPX3g8y03t!NU zXTpQ$e&br-bZ(=iRzVkLG1lfGnaJEFgxhd8lJgUCrK8N~{bmODCJE%dVDogn`5U=M ziN=IQmor`YyZiBaq*#?`1c;%4ocn(GC_qAPUkQeM{n8zeKv{JuZqMs6Gd8E8qtWYj z2F5$Mmq!N&&u2-zEYR)%P73_(X>|RkdmLBJ&h<IP0Wg4>ozh4L^21zN2T21P2Mx}K zurM5_j!d2$9F8ZDzv4*acOZu>0!CR!MDphLPv@gj!pdNbydo>vQ>fpN;(m!qL*XVQ zi6O!`U*9(7we4JFIDqH1Wr3uY&3Wc;pH8L$o&Qdi^;Z2^sTF?QkO|9*Y*41UyfZ7; zD-?@#_bY!#dQ4kB>ND(dKh;r7K0>B?q^03H+v(4dTc9H{!j||LnVP)rgjm{2pq3A^ z>dCh8(!l#%y9xF88?8BGJ4B3J8H&(?u!7mqqyu7PfE;gOO-tE>lehks+IaPPr@E?k z48jblc17hP0q=YC3*9PYGItb*y<Z<M09ZAIuO)nOJVPq}i{MuT+!0S0G?t*Tn%m*& z#Tu-2`LOgeE(iHhmCs-Y`{9i{^x;g3;X`lvRh=`xF@X>2KF3c_CZ?HI>!qTUR3HeT zsfnxtwrXsYuRNEK%ij=i>jTX3Tf@i-r-P@9!OOsNu$(4klt--XAJ_LeOULpSA4v2C zi@%WswhtkZTmxqas(sELb&=9wVtjvqVn~_*ds)~JT%cSw!&7n2r(k8z``-PztxljI z=?P2IeM(?S5{sd`%&Cxow7DwK9<wH(sbn?uYxVmT1-bJn#I5Bt$i)z&9z_E-g`=NO z8Ec9&cY8?8<Igne(_Q$8CUEnz{M2e7Q<aI8n4PJsr!guoFax=%Y4K^RgM)EC{GfAq z{~IlK+DXqo07y5%+YNEYgS-kgK2=fmmtv4~@5S3KxXk{-A~?&)#0$oE%-YTKWnS`A zSK_XnmW~d{T-E7uu?h@rAE~QvcMtutJ@uqjdpmeT1TI!08de&m)zn^l&#q7y$a3^i z1%JaaLk20J-R0%=ZCfDO@g!y{(19BcY%Fb=9+5cD_oT?$CZ;u=AoH$o1cTL=mkTQ= zZksBA{8a*aEUMPpW|Av~>p{>a737d!f<)l3%;+jHzgk1<s%8*O%Ss##?zVS>G3^ti zxP`dOxDQV}R0tQYRqA5sEYctnpuArW+x0!UxBT9Ie&x+<I&;6N>ijb119`;IO9Osb z5>34>CbwcpyOms4N<|I^808E)<qdr0PM@>-UNUE3_d6__q?bL0M5XyPQEgF`NpX2d zk}r2L_i^!4e{097YWamkdJ3XS=!7+6^6=mS2pphKadIFOR`P89`tB$XL3SkI#(V*m zCR28CWH}iiPOdSoz_9$ZW59dBVt_6e7Z+GC=xRF;5LK$Kpw;iJbv(SMGSSK8%3|SW zu{AVAm<U?l-r2#<%BX4ho)ZVziZBEZef@@gh@W{r(*owRKRLz$BvQEVB<|tO5H^$~ zzUIQi6v52AT;<^A_5Z**k0SUXtlXJXM4#V4Ldl+WxF-&@BCM^Spk;%KOF_`NXFyhq zC>TDXRst-)&p}d4V?u>nNd5;DGsh0CEWeYBVF%fvzTpL)?w7-Y$WV?qrov8Ub|_Yz zspFSn?J)iJ2Ls$@|2m8If|?ALbvlze`O!nP$)|=Yfo1H@0Ov4bpHX`6sXdLti1XQ| zBNZ8kLvUqp3T+MsEdxr*jOTNm<MsF(?v&2A$J+e<0DxkTgP#zAg@NFe+PM8-jW(lz z8^p|llN0mS`7>nbhvFtFg?yG^w{Z|sOF;*?mRPqjs68k(1~M{e55q<q81eTAD~!Wy z(r28BWSCW4{15@3eIyDBsQTdLlfARf&+B;ofGdpg>Ohttht-MbU%9eKFGM5LM3{^M ztu@me+(Cy5!hL6a-g23{;bS1(f?y7u*wEhc*VzG`a#`Gx@NkW84}gne`349xg=wZ& zvB<m&Bp%M@o8txdtXg!uS@tpnuGRapeyd9jD=o>lckzU;i)@htwE22`<y9)9I{9QL zktq6{E8IU?-SDddnuFC9PSToRx_xHhA@Jc9U^)1{?O2Tdo0Lh?88zB}L$pUCwPz$H z`a#WqL3=0g^pnz3Q_{zWhvWJ-+F;%ez%v&SQ&9nn%p<yfL1zODdhY&-cobE6ecULU zJfu&kD)1jawfd3@199!Q$BSSwH%8d``1trg-EQ!_Bt+AQCFznQJl&s%)<{?ZBQ)05 z*qYeejy!2zkf-4xkor<TobrfcdbTDr@$m3oU%iyoF@<`-ZfDFOAf|-Vfn2`I>ebX# zR5`vNHsRMGas(K0y(2eF$IwIsAo^in{V)BdkFl``GE>{KHznp5A~1NSP!WVA3-Bt0 zBzrf9qitbtsu?dXJ~XT3R|5W-$~-P2E66Xq$%@`EXEZigaSGfz0ySEzwVVwOZ%@3L zKzfvnWM)&o%8|0|o<cFcstYP}WVee)_Jyw%qf4xZ#+C;VwpRjdZ#%M|IgZbs`8BYE zJ3oqM%pcx0A>cU=&gCpx@fRFW2aq0UD~ObUpR-_(#R!9{%5q}^8*qc`YnN)<lC*9| zV=K?Q4^IF^|Jqz%0$>yhoV^8bOlYzIstNIp>G1}A;sqDi^q>$}MWiMo4INHgQ)ypD zm@d3|0uj;V>Z)Lg_MDoWEb8DeO?h|VMzopo$POjW0}RbYkmLNp&DpcKIswhKvosYV zSy5Y3qtoeTVs|%na?GtOLV1MvK*hoX1i|nf@9&ai)Q3X!&dhYxcP~wCL1Bk{=Yf}t zW`S~BoMo8>ybvV@?5sgS0av-|4SK&MrI*-Pr>7UyfN3~GMzR&Q&?aR-lFFvE&_7wd z#I8sF$~moKNr3HQwX2^e-uOx`VbiLU+z&b28)R)&h8C4&Z&tQnx8+xUS7XLzA_8vw zp?peec+_YFu1m%m)z9lG|9r3O{5~F;RAEwaK7O+sXLlq9=;a{sD-C~Ff+i%hBJfw4 zv8<-dQqOeLi*>{~BMv6T(a;R`aW#2nA~DWquA7slkxJKI3l|SFHy=9myKSP|#^XdK zig}MSaWiwXwRPMZ+#Q7cn+(Low5@+YlzgKU?Vj3reWlMqvkbA+*0&IE&Tr3oKz<Uk z4mxA1<$GPJ+-~78Q&(^50)vBkZFvK2fTMua=KnANZ@_+q@!i^2an;rqa@F-xwEQ8b zry&(4FB7Eg5RxxX6n|*<!)czWd7hP#x7hTK;g*%nNMmpNwz9bT9PF0U)Mf$Cx%Pgv z|Mc?oS0q;q3($csytq>?fAag9i<G1^*;8Jp`?*yt!=JO;SAJl3`lkbE%ldUuT2EAQ zqSv_;E*-6Cx*{_9il{8HsO$*lsO-COjeOyE2|Z9Sr*Ff!anZJtGRtp3rlhd%<oUsm zjj3z#twNtW;=A3?b0@7d-7mrI`Y#_R$5-Vq^)TJd<adpEy``loS(`r(7vE1;8}brb z#xth>noQMDn&gDNJfLn)N-p(j&Gks=ti5;BOpCz#Fk6csaS#k0HapWSN2QD4ys~G^ z!EX;S_uak<E+o0qv&U9+eqBQydRW?AnD42t(V6s=9k=MOq{)wcBmo=JF2A!|eEBfB zMRWbSQzaT8RLI5eXZ%ZtB08}rGQLL^LFFSg=?xB#P}(N(2a;M#Tq^z5dN-2%yEhH> z#tH=>)Yr*R(ax0Z+VJW6y80{O(!H&JnL&=z;o#_E{6oyTd)Fvm-wNhKak*z-L|b29 zRM}ZiRn|&T(#A|q!sw?vI5TrAmxA$Cfw?J|V>#U7zJ9VUqV+WC9k+PZ=0$d=qg?o8 z@edGHQd`eR;dq<A;p$VN-NoC<ftYar^U&IInx)oBtE<+Wp}g&$s$&<4%)xxAhbXF5 zvGZ&O4!aH1*$1^c`J|?ws|;5zasm?T)Yx$hhH2p08AfKbm=oX*S!D&3vO}DnEQE9& z=yz|LZHXLxkGYbik*yG1=&H#Rbg6HzX^E@4FQCKz+_<yLSD&E|$NJ{R>V9kDUPD8F z+S+OUXgkrADZ~6UVy+8DO~6KGnh!woMzjZXP=R`A({94;F7i>&>%!2_U)N==q;nAZ zTTcvcCp69#C6{p@I8w%uLGL(P9ItjUMu$}SpQmm}({QwaGX}hD+&)&^A`scPs8VW$ z1la!lBr7a)6db4QS?|8j^Teu|^9)v?I#Y2RGqFr>h%&2v6H=1W{E78?+VUM|dam)j zuVMT=+D2k}0PIRg0vF4jik*e0v!$Y@sH!U;%pjN&JjB3%G%x#T<mF|b*ti?FFuP%b zFg<>6$-K<a?@>Nl|MDt4qOqllxV!=skBfwikBA-l{m(8Yc*0;`sF~4GM2DzZwE7^n zQ#tpgvLi%~yPQ4khM!avC0D_I!D(E+B-No>QRHv%x1z=+_zFOOv%JpR=aXs7OXT@@ zwJ}ds2XMKS+R?A)=`BZA3?{YDf<s@e-?<`ouCMe1g_?14q{(mrGa8f&Eo0zY<m}>! z5|IC=-G*t85rbG*j*EvY3h6AsC(j@G7hbWcnXzC)Es-*QhtCFR|GUvHsWMB#EleOm zxt2D=(*tpcz!Pf5*Q^Sce?*}_6W%i)=%Mt(j9I(57!FUn;8Z|>IH}rkw@?1=F)^}9 zHXwlq3~&ASJ^+W{dko5?K{w6;ji|bpis#V$;OK_ApQI}=#6yo0dg8f2pyJ3EdFL2e zRej#2HP^h(-+8k;;lCg5=jDOuruU+|8?xc#=52DpiH@&nA`y;uk!{nh@`ag<$<Vdn z9DUO^Z-lPl26F^<Q<0?lv%)s4LAI?!xC$_69Gmk=jE?E6m7py{MplO+FW-lBeoq&P zVe%)FC-{^Q?kC5+lGG13`ZyCwUt?i&3d;A~lTv-5y=B5Q8ME`L_!oxIey~L7PGMfq z`z&3(G#v$SUkn{+9RxZ?wEOg!bc#S3He#-AXc0P6RM8CLcJ2rN;nw5CJqCFlsIb^! zJHCQ*7lN_rhl)d0cY%^ivmJe<X=d0m@?tv@$|jS&!%Tr!&V*U&<rmc_7>ld8rWG>d zcIlMA<mO-Q-=g}Z9;dRp*Tc^ARlUN@WGF{csR!Zs?hq_n?n+ulS4_tem=7e<e*Su) zNYZ_T#?D;2Yq}kSpR~A<wye7u>($~sMztN!plotssIw;;2B;hf9z%s_uB@-FZ^TGs zZlOv~ehd{pPg#GSwoDg$a`qDPgooh(vE0rh^#wpD#o@kqp;-?Z5Y-dv5ymvTI5oT! zVXA`&qp-rp88eP~EN%ZR$I~2QYI?qXUs#i8v4)H-RhF}A!+m9szzTCLRdBU9Ibqua zJP*dZb}z$BQ@qKQj+MmgtQ*r<^IQtNU!+yl-<`EWG$0u|&%ERI@S;BQ6hv(4unLq} zJl&5xmJj%wMvhg~-;c&H;4Y99<0<5RJBFm>J^;HtxNu6syNexOsh8A<NPnKo8%-R0 z%fD^Q5Wa1ci41^Z5ohk{kKd6;M0kpo1!Ou?H3Oi#OPUDKoQ8S;-2M+!1_F=m!7KWB z<r(d_-fl0f)wJC7;1=Qh(C^5FPZ_t=(CT}2PJIbVLIr_GN@>EZBGen}GXuke2v`Kh zCM3aZE0u?j)paI(N6+)8ky$4(>#sZY-1o*-Rj-?_%qgjlV?0!A6k&&ti;vYreMRmk zCu1BjbiQ6iSs9>OZwO;Ggvm!;0f}=pq|1=tA!hq%?5z3(cxk`2wYyhV__P$~gr4M* zlo}zz%z1L_l>a`{NS0R@R&;?hNNf3$8G`)ARH=P)^j6#LZhIEPT!Tsx!9*<O{C=6} zj?wc%!-Gz_d_>8;Ke|%ntix)a*pS2w4<}<!)y|u!$0qY_qW905MLIH3SA9Xts<($S zm@pPHQ}4GCCTvY2bf-}oVCO|EhUVaqV?sj0w6x8TEpf_UpL6GPXk^)5*x(nWq!U2I z*ClD5L-0OunkS^3vQzwT3uGww8~8F41n=hNNY9@Tw)$yS)}$GxH+Qn7P8j-E`p6ry z{@}Z2>vr=gqm!Y9b-`H)&F^(1Tt9b(-&wfJy;92Bs#9F)D>gk}Eh`QWFWwwef4bff z@E*AW-ro?UdMVbJM4OaP@j!or9o3!}v#mU<F2A5H|DvkRSX`z0`6V@bYmb6a;!6fH zmO^{vkkuu*hV)^c)YIZ*C1NUbQv=83)g5%_C@W7E&Kt);Ef?yvSjs!w<UKGkAPf4g zjLSo@NX>KDSFaU)p~Dt^{VJkO<b%{B_-rR+xCwomm^W7+AX=Z6p}#V_xV*5z#>xq3 z=;ms2HoeE3A3l^>>gGzggMIT0;dZ89DIM9YRdnEvU>0;lUtA1!k*A{#)Ik3W*HY|m zQlYKT5~Yvy<BHWih~*gD+w8AZQIWNWUiv);*(w!n|0s}%#l^#8$IWdbrzQ7E6Q7Vz zEHgs!nIFO&c}`)e1@fMbndk**Y+an3o^O}IAxTO+PAv^(7lC)&0S-j|>;bRe&=*n5 zXekyRb*A55^Ip1%|Jp_37!c?fTyf#kgvTKg`YjJ036#e9)*mx9@|RvfpC4rYvh*sA ziwd}ommhi$W-iz;DmuEy+F7yfG3%V;a5@`;HX%gkBgJ=??&xHR`yM$7=mRHu5gGg; zft47anWX+(Q3-OiHCpn`*4J0C73qha>E{-K=$F!B@RYvR#6(`Os0C^NT;ep4*c~n) zU$wcZ*AceNTF9{w4P(gsC<p*6Df1Zf(-|?Q;2A2|3(CGx@Oa~ev=f(VCmO*j?kk=5 zqM<3=S3^|irssY&csQd8^^Xca+n+%94No0Fdq*NTaUWM%)&8xI?3R<|P+wn6c(*z0 z=jwxYDR#0)-j*k}^JM*M&r8luFYNNK@|r9abH2*ZW$bZ1kn!3m@=|Q{wVL<h6!)vm zH<8pZV7HcpH8h*OmL$q>6&-Z^JCzJr)scZRxtt4pw{N)F1D+UcF&n;*US=OcXTyKY zMZ(&0(^4X_4IrVnex5>-1fHWF6QF=hg9d<<lA9a}g`_Jhs;X$KZ_evDw|15m7aKd& zxZA)jdw7_=l8X9|UIqANzH)8!7tbT+Yj-t^3J1rhrS+Cl^>7Wl8axzT+C`9WQwp9t zh+Mar6xqc}NW%V<4^sY7;4KJ!FNG8w%oyyvS>WE$+E&#?nbIPh+$9`HVvJ$3qn@NJ zz~AYXO`z<kF>+JE4t500Ffgnc;gQr9RW!!^xmE?(7Mhiz&1YD62U++4k!<j)Z1iO; z&e>$(+@N5Ds0}a)(^oRF(X$nP>K9*|bNR}q<ggbY@+v4+kM%Crfn0W`ZoMvPDT|L< z7#6)Qm805G&pk(BM`pg3qq}OqyGKX*{CoNl5iO+(=B&ym3~du5fnX*_PlO)uBT(Xm z7`f*K&=is6lA{?w7=SeIXDEx$$QY6ckB?8s*9k<41C}>$n5G2)2G^j1x0qcEnVEqI zpuh3*i~(E{m~k?clR#c2p(^3@oFXS0YhaJiz-3hDTVodIKIPdR@NL!}cuSCBg@Vj* zVD&|7V$`=0kcPBhX@x2AwLhzN9O;g}s<sCUWmHK;(_hgFR1x6m*1vlc-cc?`;eg}5 zEM_mk10=x$+#^dF?9z?9M4K4h9wynHK9y4gXSk%B=L>F;4ZO+T!0p)7^<3RmhkW3r zzV+xusW}piEz=n}>CCZFR^Ge@W1`YLd70ajr@G**&L%lP{>izfv%Plvg<(QH3@OOJ z!hz(%Ps+<p#LmP@#YRns+JpE93RDtlnaCSZh;*&S$`%Ye-{c<fZg+Ya6o={q(lC9{ z<tONhtD&!NZEx@F?rv<ZE-X$#Nmimiv#vDym#YD+(ljZl7z0CplrCayT*x%q-v%@U zE5NPH|C*u!by$T}Z6HnCP)26q0lv^>oNJ-fU%lX)0pC2{F6~E@ReQCId{wz-xZ~KG z^dGS^50yJ>&%lEC<`~z=Dh8W0QIIVr&ktat=X?J5Zrx9NnmhHSi%u3)sc_JGFV-}$ z-){#QCbWr6RmrFH1AiKv40Q!Ckd5WI64Jz9xAAW2e1_&@R=Z5KEW-XED@uwJ27td? z8XZ9g630t5J3qPIA^=1|mV<|p9iT114`V0B!y-_w5d|SWp%&n+zE_t)qInp);uE#D zROPmG)rMFOJL!VCAiXR!23)WIv;uHldul0ub0LfjxI5R0$n001<rn5-on@m$%TsaS zL)jREc^QBjnBQEP9z=*yWxIz*cks}Yz5P;gS*w;oAA&058hj(;2nv`g#Z4J}BUE&o zaiN+e!gLtrxl(qCClg~ySd~(Q-{Rk5ce@}wDqo@hk{k;bmlvj26+L?Rn*?V}WeN0T zpGb{=gW$-FTeqrGBPsu)<P=h&DLF$=n``s?;zIHpJNB|Vh)CQbbM5G4o&>~In_I$L zt;~Z&I@s#)o%6BxUSZ%}V&j~kq8|Z?Gcw}9-x28nx&R5}V+g@SfV%!D4LC3{&7WB# zGzMUQjTU!|MA}bZL;81Y8hRTK=N>K!R0twG;B%mN3^54SS{;#-``FDmAsA!^L!8Je z{kIhk*2p$=#~-=&q>^)L7e8A>MIR)pnvyE>Dr298pYgrrr%nV%QHAO-xFLOwcSaqo z8Ong`<PG(5WcY4QuZPF#@r_#DPv@|Pt65lW(z-QNDe>_SDf&O*7s?L1ig}}&#h^RZ zv($n|fSr1M)}y$_+Q$()LS4{O7DoSeOAoqQlj#hcTqBeG65||0i)gmy`Q|!^;WX|9 z5I+$dd=1Q&C7?6RDnQ4Wgg}pk0Wl88RHQG%Tawe$B2Cl>G9tK|P0I;F`|T4QDIKto z!+ejUCleR!1DXM>VhN~yx(|Ft`(*^8;O+#HYibdL#Ird6V{8~)mHA!k7Z6UZrJV6# z0|sKf%9q8&O&Vk9Hoi&Pqbxje7l@?Pw-sDy`DrY~$WHDAImSi08=43S0J_S|Be@)* zxOwK9vR?da<{RJ!(C_!zCld0{gf<Bm?+sF2<5;8S+{$NjQY`1n=QD0S(KFqC?oJ)I zKozml+VE<1zQL*3xlSp$_{FAKXC&0-^DZjLPARO-;|1v`%ph@r-UXL5<+TJJQna$1 z^ZDRm`)z{)w2(Ff^p<)Q0+^or1aNGiI0CtW*->@!02jQTqv>G4WZ-PBEHqib!p(Om zV1S*E23MqEZz6D+SSoqCmb*goB|Tw{!1EU9`RK{Mdiv@+In3;jZXE8A(J6f|aR;%5 zEOCcOXO;7$AaLd2O4nG68@*qZyiZ>|-7I`)Un8adicOte-8{~C>}%q>+Go$OE(+)p zh>HG@gad2fBO=_>0%>3fxidv&F-|2fL+YQUeiJ;Esob!~)nz!-h#0=8y_QmDFB~z7 zNp^hwBlt$xYcJvypn-Dz?YY^-5#j(br_t041Uwx83?woJu)4q;Q`8OguT2IE<+1eW z=kT9Hyn#+mWDe&n^yUgyooTLaL#u6@Vi}_%T}ZVJ{?On-a9yfKvuHQTYysv7pk>Bd z&(Hzee1u~RrCxars(w553i48K8k@Z<L+^RLOd-Laxt({CETmvPU?WDh_B<j1d~VLE zufcuk=SIuW7bUx3vQ7;qBWOb;MW%&0Qn%tM6h!P91MYerHDYm8hjjc@sn0~6>AAG| zoO~?L!2dQGk3Cs=rPiGm2Ql+h`rb9zydS`yo75CZaSFTol1?uG=kKqcK_Y=@+lw1F zxd82J(CP1oa`fQl)B!6&GR8P{GJ-X-f&N1H$Fw<J-x8<bBfdxA1CBIUU@g?frZse@ z6oi&k(4_a5Tuc;;-rMm!o0I6ulh~pf;2C*S&{P+B+1p%rSnEYGuKYF9d_azhg{u>q zBzSWK&D0&b4Dc%Cr(mYez?v&hbm=cNZ18kC9~|C32ERxkb{dA{voPoHF<Ir0SSJ;X zaRUJIC$Dv*Tm&j^$@P*$pywOqATQL9D-;VBv_@u7$#{-(wOp?KbJkBoDVHgFSy40u zaO1}=F{LbbqiXdCDJ*%zr&`no@aT?VR<DCO4It&*MTeZuumM#BoI!<+UCA;0GZ<{a zv9Q}j*9_NaB}K+&AniTBRtL$ag9Uk{yvS$ULSR=Pq1Z}zl1uzk9pAMwT9oH)QX@|E zBuT1aRzmRfH~6`le1<q`wPyc5<>aA8VADxO7D+ReJasZZ`mDmChl{*jpBDVA<u1ws z0oigtSX^^o6K4H77!5}x#89_a)ytyp;zjgu_*tUpEX5spx1*4EUDi6bR$$$(e&(?V zxJ|})nzUr${9egf5J}QTAaYd^G&qwRsvQG1b~Esi6)D?9jDFZPP!!E8Y(=uIzZycE zD<A)NMe+=3-{l&Hdl(i_<~4W<Ifw%}2r!cs4rqwPxQ(<?<X}`>;80v-Q=1<c+l9t- zwV5);_iQZDp`OHp`BgF7)c%*&4Ho-~uX@M{qgi}9PI_<^*p0TnqP^C>tHv{Wco?bA z^SW&2`?wLvDK$c?&*|k`Trak~s8WZjfu_oipeDtStKqAb@aB_`ZZFTb$M&lDf!oK{ zgvZe51jC_WE!A*oai**{^o-cD_A!m`f%72yXCCt<iBaEWNDyc^Yk__?k+@P59LS2! zD*Ft7m)Z<JYLa8xwtC6YpIl+tIXB5cPKr_<J*B#s_(r&Pi(Q6-D_6CpkA`3AfD|{e zp^GH;i|=7Sf3shdkRL=q9>j42RpulZ3OZz(SZj>hY=q8g#8uw!r4^N_ACsw{`9S;F zLaR8$%*fr@<?4`IW}SkfM`C^IFEa4b(w5Fj405DbX_DSsemO~g1YKJOT-5h<ga3u| zVEyT)z{Ya+1Y~bE)b6y$?CBZY7^3T!=+<}gBycb~)c$Uc>a8*sEW5aq^Rv8RfGmz1 z;9aCjlgJFwu>KUOT@!(dP@aHJC%ln2dV3b04vBGxGw73mGv4Vue{Nhz+c$76WJDf_ znMQ7O%wV$Z4tYeEbLI=W=c&?vQG*ksy%H)sX_(yrU(Ok*I`2{)N4%Z!QNaD?XzIo< z+Js82bL40=`aMtDMaxk8>i(f9_aNt<JX&|&(RGwqw76cw-t>+3`$xF&q@n`ztlPSk zospTL*S$^b-^2GqO+jzdB_qc*AK$Or#<!`anJ(R?zR!=REq%@QHP#NuVW@b*xE0e& z?eF%u?r>4#7_J)0^oAHvqacjcXR5G8qEso22m)0<cH(MG5*zjvUE)|D8}FqMrL4!R z@TqMG<f*QlV=GBK%#6;BzTX4}d2LcKU52YrWx)J;J2+n{uziw1ao1~Drm||2UN(uX zVQP<HDlWM}{>H+D&I1qDM#n}@U09aq+L7x|+}bh4Pb43Y<f1h8kdwFP$fk}xCW8M& z&55mPbgIbau7>Ts?dJF+q)I!*TE|#w!9#S(2qNf_E9a@U+<lXT>ZBQ@TenMLv-9h7 z^Z|;^cP!w&FJ<Mp59FwXo6czzn@_+F9j7>Nj7e6W>PHYpqs&w}1~f)bwIo$M1v%{5 zE1{ypejLdq*!$SpHv=7zJhOW=CyhvTA5}pqV+Sv13o%#?2P<QX)uPaXc_{LQX9|_j zqQ!7?R<WaozX#1jXfBWi?3|F8r`$u5*l)LBg*06F%K6_;phmUG`giWJ8h^TJQZT_G zcrxepGG@)H%^)c}10D87S&;ei-}o37&pV-OQ+mG59~#YL_A#gL&(8yAfvxF4s6&-+ zSVv4S{W5YwOz&k@Eq<u6lM`(O3QZT>;q#W=lIXT^gBs)=2oZZhzob$kC5k=-%azP} z?4S2SJW3-Y4zJ)VxpC(T@kJFzVKbUs`@w_=qK_G<8d3iKsTzSotO0q&V0P>aa`a6G z0s1JuS)kz9M`z*!bW(ot79~vU=Uv5&Te*_!C?*>mRO<l!Sx@C5uxOy-B*O+%3q4^D z)>ql`jW>f1+GGD9ZKg3CC<9LMD*f8QQxazN=A^FwGP@i6#|9FkxzvM)pjff21J~c; zBl!2{242Ln_Pdg{%=t$QmdKQ5AnF_ZTnh4z`^<jK)DBQs{mE`wr<Pg(eS>0Fiz!P2 z;rbDJ4;pe08hRhJjjA>f-M_7`AT)TJ5G4EOk8u@7A=;5*<(^*UjuNeTqMsfI^j~xr zR$ePRA1rytdIhj2yrq_~;h@j`(V2qV_~pF7<-Fd8)5z7X!kfZ&dA+WeXRm#?6?*@q zyRW5KfzOtkYtf;7*8pEpOy(_E4aGGbn-L_w+>!ZWg$ZKCWzGoswJ7c6!9o$flGDLh zL+I0RiQhxPpo;EPi%yk2O!<K6@2{H@u{D;Pt~D^eAp|Uf0iQpDmC8&~fo(ngg&2Oz z_umDegbD|aSDt#|jUlnX55y%#1|A?;1e_JB#)g}?w+<|DiK--$T4PDt0?XRQI0!$W z%=G@zZ0cyRc{OCU4IrMm@Ro$A+YW$zI%56j7ajJaWTy?jN??8=jfOZZHdIy*GeYSc zV##z%a_KsC)ifsF%ryY(&iXB5_C_G|K~x5c-`9hU*OQH}SGgtG5-O=wXA~`Dys;BJ zCK%1u_+g_SpTsJt&O3p65OBm4>Pv9*0G{oGZv_*UV6+`C3sWjVd70kQRx5kHc98x3 zfp}QA^P|Au4fUPu0v3rSZx>31^;+QgbpG__I8R7Hs9_m$ZQDs%ERdc~ckCnqhDt39 zi^>|ASQa{w9Wj|WWK2_(dS%dpQ!fa{;|g?bTdMiFD4^nUxG(VcZtrjhL3aVUmI%ZQ z58@dXgE=yJ7$Qn9l(d$Z0oBN*BU`I$f7H^IEzRS_t$$I+*JuA=@KJtL^P1Sw7!QV0 z`Jk|~7s%14Wh&V0T5x@P9_%a1SlDdY5VIA<ZMu054*;x~(jslbUW~Thjik_twbF`} zxtNBraDcREl{g0=FTGd;lgxb26_tBersR<{ygo|5Zq?AQb!mR@V?qlga;!&z;lZr7 z{R*;gfBzc!V%X}??s{o)_2BCC0%yF!Q#3u)E;;*?yBkMjk4k5WOb?_?H$$PcIrg6! z*4NlN%J_AJZR;Mun%cn^P8-RkAiqpz)TWIE*DXE&o*tFbUTDjOh8J5lM}N%BSU_nd zVW{}gGuy}m47O#_7`MM~scv?8e{G+P9mKrt!7inHncrEx8au7=8VP2bobU|Mp!_ho z0Z9<+GJ1tW)|Ds5e}Y}Tzr$L(!{V?63oA<#$ikC`A4iZY8K$?jKgJ^b)6wSmw9}-v zMgOJw7ECCF>>G>o7b5-flF#U>dCO^?e$BMqCiS9Dw`mM$H2-qIkopDR|GMl3V`^vO z?BZl<X#3Bey^$3R2NMw^(LXyZ92|_y|8|z0^<U5O@)9w~nA(}USP-!Tn=BJ45izKE zI+zkMXviB`nHswgF(|tlx%|r+Nn1m6;2-LiCculC+1QB~Bup*MEnHxjh!}+JZS0+t z9Sn_4iT?F2%Cd&e)<hia|MQ+dEM1%xOr3=7Z5{0GOzm8VIEfg<ENxs&oq)eKhAyTe zrpERrrZD{cF#r1a9;*X=;s^mmNPwQNuQ@6EQ)EUaEK~Py6y&H?$5SA>AIBEGAa)U- zrRlmVCTZFYloJBgXB_=yZ*rR^ds;B}K?gmpG;bMVGVJ@SUH`XQ{zce-(GSPJ>*Swj z{pZww)CtFbLno}v|4Sw8!2SPHDV+cBO1bC_1OT^wqKDkAZE#hPe-8eSZ3=M8u`TtV zi2RQ%|H#M6@^2z@{PQILhsYdU|Nn^0AYuw^ZD=a)Wavr6@c%OY7rjjXh}W0%Aa4sJ zkn=yK+_uI^0)YJA!u`ur|04-A*S`z*pNuERe`h@Z>v#PBvutl){w81mXu#KhCd$8! z6Vtz&E60Cgt{ndzb7cZv{qN?=4m7_1@NWKZ!bsWyZPLO1ACn|<adHLP`+q%jF#pkX ziT<PMUS#M>pedpSOtqP=Qnym2lxu_(QbnRcJk+RZBZZixgqBy4rxanV88e9Dh{=oE zN{ALd8jGk+3n!@{BKJy}YG6s276pB?t`c}j<cWD)9=Bt0z8sg!xSDC7^vU3RT6Z=R z0R#~>kgk&mVs#VUJ4b!%`QCz5nqZe3`<6}%0HYA$5<HO44aNB%#JyvbElaoeS+;H4 zwr$(yF1vQwwr$(CZ5z9|%T>EvUFSUKxqWZH_x9+w`$Lbl#)>gBMrKAvMn=Z^|7Ofs z5S_2iJ58WFrV~wdrgWpklt^_hXHxj<<1Cq);9nr!k<^(4QzYHdwEUfAS*iF})|JwC z=C=_~M+$tFKxe}m@x6P2bZ}IHzvFk>Q6#NRd+L0lWUpbUMv7UNR;5PTVWBgx>OAkV zF6LID_U-Hr$+hr%w?>Ij*pLPLp=cA<bHPTxLTlEq?+s=y-qtQ4f;Z4;BPB4#nZM-w zrxU&2iePf&#%8z5WaXL;Q=s)DX{3fXl<&vpjE36c5HNGk)dh>$ApB@5Hh0bMRuk98 zjm!F;L+*m#p%@{Bq$k_cUve`Xda@d%Hb^s>C~aeUin_K2rGN&H8z*`weDsMNZ6`3$ zlGEtR$89=uaWt_iy0O%{+}l%h<sCSyiw;`_X~Tw&&WU<ZkhjWX$T771P$t2xil<t& z04)qFHu8guHj(}wC+QnL;b1(vS0sIuDJ|8KepQ(p;|i@<3NGYTM${Bbp%cr~s66Ct zlK_ZQKdz_Z^*w%q)Sdz}d}Gz0>{xX!?Pxw{ILMpU5i`xmM9E~x2G;z*n<e(59pgsc zPE@`kg|Xc|CBfgmaL;V<)cE`8)%x)+*eGtCs7AG{Pxq0Tyk#xy$zSAkMD=!FZ3*-o zD}p@hXCt5xvL=nEk1r3wvbq)>tBRgG(Z)W^(_8^2KZ&7A#qH2Po;<U?_=Y$cgS^#c zC5VTy_}w^J?)?L&Av$4ffFkY|r=zBLECNn9BW$L_=o><Zu9?qtXMKs5E?r}(>bfzY z!g!wCDtiij@xWEK6K#G%yAjN!J^gHLH8<x+wQgM3GFi`XaLH$#!_96Hfxtt`1%;Vk z)cSezJkksOp6$7Hl_61%vG$x}#?1A;F)X&KF*?$krO1_bjyV>ihN)RxyB%oDtu$fZ zxjyegt)Uhg_b#ZSX_p>v!#Rm9)PzfKsOJNX;8MAkix$FrMOkz*O-fjiWoK?56Ka$k zW4s6ovpq<~hCrV*0f$zxeyx$zBjHfCnLAv4FE^H&UB=?Kfx?rdiyYB(>UW}-P_7M= zfe_!2um1YL?MMK^Quxrb#Fr1zM6$3S_b^rt(nL11FgJf|=Nrq~Gz%f<zj*A(3&p|B zK@e7F0&akf03!JJ_jdOddlht+_`m6j|LU0j>WUc|7+C+^8FT!581kQ9t{lw&dei(j z1A+h2n@sEI+Tpe$dY{y;^`O8h)N;O;D1umIw^;ZSSQ_Qgd&C!C426)kCv2^JJn}aF zp&OsnJRfvSq=2M3iD_dVOlLL^(fM$CeS4T42YctX_6nxN;`Jykt=xWyJu~urI>;zl zya-S6t&!JU85K=KTQB`-_^4GQ;xY4_k@d$3#&p=uO}(`94?Dixj;y-f4Rege%*xA| z<qcNAPI>9%L}>2)bMHn52Wdnj8<}kB$zo$~PGW9P$B?pJqkt-Oox~C!>ETWFc!nc^ z*NHF`*=wLBU1_%^n*bnF*O*m$$E5flccGeCg%xwQ+Mg5l<WmoJmwmf8yPK4<`q#Oy z2XB)1gFwEb!J+ZdBX4JK=o+y29D34?bKk4HG;fP=Bxc1V61F{iF|C1o?o_%h{&90} zZT5ZvCwr2o^$mPm6MnuMH-`@08~63%TyGlNhi{vbv)_0?dV4eHwqAR^dwdx3W$gCi z{x~z8H|s(kt$)0vSGXFreuvo~zi|4MbmPRFa|p{Oeo+Poh<XKA#@(WgYx8FgeX$SS zr<V9LOffy}Pfp%J_%*t`3MCQ0c{D!%7~hKx{T|{EG(2rky(ggc8N%m|fh9<c>=Tq| zN}yez{1{N34CASK2%Sf2kqq61M?|{_zYXXjj!+aE#I{qlx{;X_$L0Mdbd9(GiE(ip zkUU{H#$?nTH=r@U^%EUXIWFex!4S?0vz`m}8^F!Xtq}4*W?Ue+1kn+F>f2B`TbIb` zmfzi1-zZ8WR{O?JX!X*HqZ)Jx2#m<Inj9WBW`zt)!H7QDR-rnF5nFX}U01UZ2;3>! zTLj0n^5y*^<S?>nKCDtPE|Kqfxkh$ofe1t%%YpT;{Q@hesSi!@-HRU%ml@WE*v z;?*hgn$oJnDHYh1Fo;B&+}|ALwTA;uf%0|T@`O#utXBvOc#dreb>^o(K-bDjrQ$1* zllk{_ySKT+tQOl6gv>UzYyd~_7%VZ;v!Lw;?eu&&gDOLF3^V$8LIWF|4nPaM`bpMq z`XGT2d>Yu+0F3H>0IaaHDwdo+v`Y<1<b%tWM0MI+b6({l!htweGdPn_4Oy6#jW<JA zVP9|*?9TE03}xU%_3rqYjP_y0`eeH2b$9k1sXY#<1>hUgKzPazNgRN+Nk^Fe41o^f zd&0-d*&35m_G`Nx9X+0}^XcWQhhKN+M`nL6E4SYgD>LuE7pZ&*ZER$J=cJg^r`OZh z%g(*ojR1^*g&31yP+Wu1Jvf3n-~BP2R*)%L+C6s*U%N%Ac7;Mi5tzv%W#wp)_Rc%a zqK5}0?M^H7A&<Wz55I4Aj(9`&B>MYLFkHZU>GAeHk9OT+YPN@ihmVh=0~_q1s5e|* zuRf>MV>F%JMJL6EL`6}5tT%+{`?x$gI{#HoDSl3tp59hh`*R)l+kWI*QQ)i0lSB4a z13YNxaod%jwi1-B9q!n0f5$G57xvYqk=VRqb3Mdzg)G(gx;;&nM{}R~mW`s%e#X%t z?)Ai%qVdmj)%yyb-$0rk2^}1=S&GD<I7YQtc)HBmJ^BvUxo~vR81k5Qg87Ug3E*00 zs<*Q5O^zUs#V9y0Vn~)AaKYTGM%v4Zqx9$VRkDWG2Wo+>D}7A+W;H?)Ub(D;l&h6= z=H!eA;@D+W^C4^)0X<_8&yF9+c^qte$4I6gkoHj({d`Ff9%M515&O8i4(SbupBMIH zq`4}w{{#@%dX>XMEg%$9lr;;lHHkZAT{lDEjxCf-q`*N+1LT=5Jzz=TI|-{^U<#rX zPAS?m)_3#*tH&c3dc(dH8dgXzN)n-rz*t%t9B!Bhu$v2QrK~v~VC&XG<J(kN9tjH) z&!&Q6!I)oYp_~Y(GK#SC5nU_y^E=R~p6I{w?l}hbWEBjqM>ItG^Q3L12KW}VLqW|I zj}B;CK!VcdTbw*E>cd(n-oy^N^ntUvw9Xv36oDwn1ZEjDDLk056GzJh!m)EtD+YHS zk<#B<owc1!M&m@bf3?C*hP%(O(20oA-`I>2nz7XNpRj~b1YViW*l6iPh4;ZGY-}ZW z3xKsdY_EJR1p3F6gSfFPE5_!w?j)9@*jTxEtTUq$D2#xi8wTBll~*Bov)wq;Xj)ee z)fiJv30w4ks0uNlBAZ#4YaVu(QyF5yuzEre3E8&5(KgK$N`R=OCixjx4q56;vodVH zt>9(@0C5IeMGSp}5tYIoe%ePHxEC<uTIX+gMst;G+db^X@f@rK>yX-Z6v@ob5SnwV z5d+%JL$Q3GCo*Xh#Myn2TJdF^Rb}H|({RBfn2H@O8v@a=vh9jvUGb72l#GmAQPM`8 z$u!D(;1r9jy%!C(G(%qjk18x+^VW#;xBRHV6vx2qD|d(<HR&Ju)zJ}<2fLDo^;7wS z)L^$D;hc>)7!6JPVgun_{+b?KI%UTY1-I-aY+<YFw&D*uzT2^@dQb@-=){XQQ-W6= zk5kR3=jV;b)Cqr;r8Z~n*d}n?FQ6iRj1&Bta!;?_y}9c~p$g%r-9Ken#2i<G?%yGD z6<rFfa>D@mly0DrwMbvz;ZO1V87GvJ^56-HT$FEIqOtwkgLVoPomWArb0LS0GDvGg zp_35JrxMk}>3?#7B;6vkNIit71I5ZCtA^W&1O|S{5~_!K|DlFaGxss=hYG&JuLA#V z2f3nZgi}>H#G&yAsTK61V}wCN-QS_E2I<g1mvp4RLy89<?1E~(lf`M)t{ST%p7*xC z>`aE!YJ@oPN#(<Z3aU|*Ts9gjwE<iFZ9T(rK9xwNYyDg-Hrf`)yFq%BZadX>j%gA& zhBh@tk9*YYP{fdpA|p?D*?2ds0jeZ3GyCIebvb}$XgQ&m`D59<{n5b<y`jig@s#h( z7s2OFc{SLbY`Lh?n`n+_!}AheH5^_S9m#-cfn6<}dLG{|gYi)d^p%Xnip8}Fzw>#T zBQj^yq(!k%ej6iRbxs8}d^ge%M_x+Hcox)U^y*e|TTm$Bd<q?|%2tP}jQf5c!|SAu zSP53>hs<FtcKX+i=Z}@Oa0lI>)loW8$NT0Wlo2{oJXi~Pt5c-{(~`rIeChrz<kuD* zTvyuk&K);X?Gl_ck3DEH(LB4&uujYC0}hFCLv1Ct&CUk$<*3URj|iNi+qRnMerryv z$;Ml&OU2nSx#;S|tR_UyD2)%&J_Zsz2i7-Nd8Q)1)%kf{VgxbaoC%&JnDW`c5I=L$ z`8o<`d=?%EVh}NlHmNL_dHdvNin+O)XlZTI8c@sz9$HA%gGoF%kAd!^DXLqQKEhnZ z^^ugFjkFgpN>Wu4pY#414?jA1cy&kzHveC{sJx`rgRXgxb_aq4zHkJwJ0~;PnFbs; zVEWAaQDLeF?I6NK`!+iSLVxgGj4QK@33$d79#c=ZU|%?!Ivoke5z_3jxTC7TC^F41 zK<~8UEYX#d*C1I$@(-8rz%b?zWH{`8J^+Le4E*kaJaXgtGk=B_>#4AW6OC25kKC`{ z&U}mk>z_KV8VX5-0~A(Q>JU|+6$o)M_gm_MAqQH^e4`6}6e_${*|t;VcPuK$t?u=r zytU$Tp`s?vK!X<N%UkZr7yqmQEvaf!x!=1BKZ(Bh9{Bpc=0`^w1pMggkqlJ{bksdX zD`UsqOOyZedQgV|ya8VZrwBrpYF<GAmkLNmIJ~DAm!qIWkLEtPB77eei~BHD9de_C zUzxBnmlBg_q@UH+TqdR<+B}vz?oNaiAU}wDb=&2X44k`PgffWUV{?EV3G;$FuIH~Q zCnJ$dDtj_4b3EkDjsdxtE?WC)sIKcNlHbonNxi0^JrbFf<n&z3itsNm`5{G@>Bzew z?1$nQ7mBk539%Mp>wFuAeF(eTY|;39Pi7l^hmQQttm{MOz~vA+-vg_}dEPd|U-5^5 zuWRldqppLAH&~5I1ee#XV}4rk*(q<dvI=f$VCE^rC#rj5D_|qv==@}q{fW=PYE7`g zb85+6s{YR8F)e=|jH*%|0}IWYit4MJI#usJ*%P55u<EFfB6xjB14QnPczB8&r^bs8 zVD|0>OzMe>A>0Fy!-LE8d#P6;gL+6B;|ee^~kyaKBB)zZjhC`uTXXcyjL9o~kf za40@i$#HMSB+@SpdF^<MrZOHboj$b5$BXn9p-4oX)C~!Ka3Du4R^Stm!A12>W{(h% z!Fun<)=R%$yO<;(NJy<X7pqR6-*%uL><7pUOjpy=>Dpj^B##5;g@xj_4DvkC<BKdR zi8msG!r?Q=bkxVGk{?uy2IckbO|-9+G5gEdpJ*NA1zsGi9=jxNVjo*8alj;BV9^r! zK$b@h8>xs2HEfT12-HMHpQP#}2o|~{AWrN*5eJv$A!p(q)fB`rojTkdh(o&FF2Zv{ zw}R+M_Y_d5ruNe&(+C>TW-3;39^@c?Qaei=$*ullQ)C;4Mc558?bZ$5zYkDk8%&+? zGb~~BEv=D0Ra2u!7_3+!eLpU&zUL;~d;LKS^*?vo>eU?#xbU1OcctgEU3v_xf{1CZ zsZ`z5H+pWay(k2g($xp`lo}19C6JF0$PJ07>7`eB-`w(A8x?CR1Et#}Vz9|ZwgCA3 z-PEow7pioftxQCPSnJnSNOvR4F|7P$du|8o)4ezm!o@@dcCH}b{(KZASY>}<KUDji zo|_(sn!Q6DG4^VrMx>W&FZwntC*rq4x&2_Pw4<6oLujlbvTxMfXx?-Ws#vu*#__~T z-VQJguE(ayeF39Vn@~(Uo?NuVpg%%jm_8xzj`sp&Ph*HGJ^r+bVcC&3q18I!U<=US z1w6*(C%UnA=0c?&Zlb&6{^8F>?R$j|NTJmjEb<jMj3$9R00vX0_2VVt4;8^Rpm?n3 z)oA``s+Ut0T_U?XY_3!)sNN)YkHw)fDuP#3q=N;Kc#@Jz_WDQ{qGu>qX{shLYb?St z)>8X;Y-Y7pwJe<)lu#di<EFMf$S{Wc`kn;*`UpU{PLUxHrre!@RYEW7F;B-a=wyES z%IP|5jSne^0=trN$(+9-K`A2=X6V2|fNpPQ*)~AJ^t2Xef~G{c%AG+8JC4I43p*a8 zwe0f#s2%|gedkJ=H!+BU21PpIDK;3??$FUS8Z$n(h5M;DBpHcd3xtvT<aJSyfhq=r zsNL<Lh^4N~+-u&T&pY<PlK_KzW0ddr#JTW!YQk}2Zz12QnOYhUvfqYtn0MA9h(2pN zhQ8F{lQb_z#clZFGNCrQ`&wI)znCgz5ufKY$2H6wYSpqL6oD6r)23&a2gK1R8ik1^ z?geEJ$1bV`h3u`KIeW}LLBT5h`m~o`*I9ioqeXCTx=gxKCo_dx!c*J=Yz&wat8L?= zLInv{QIAx~zyRfkr|@&eRzv(uGe&ZbI_2hv1N}C33Hb2TiZJa})#A{@C>i9@2{k8f z?tZcx;brzO!Wd68=!N|R6<KCr9>3?lt+5?hUy+5#-Dr9yjk9R~z`0_qe%92(BlLSq zVZ#>B7z4G=G&yr5)OxDHzqW_T_?bj>qu*cGw6D*=7_&IkTUf%kFU7==l;PmV(0!1Y zmO!Zw0qL_$6q*3hiwuP5ozh8)&D;^Xb+mCohZqG`KiBhmD}q3S0G)Zyu$tl_S)x_2 z0~s(Zt|ZGe;W2)PB<3AtfH%~uG2WJ6n(84Lrk&Z1L}M%%d%%z#4S5RMm@U>BEEWSU ztr-nP3xRgG9a=K?^Z)@$#Pai24uNJ={r#O`FY3^ZdcZ7SS1;t=>n&h}73$m}8)r0f zKXa-~*!Y1B(2ENc3tZ;LOHF%To1w1kWgzmm)l?vz9j_xv5gg3aB=B>#fq_90+@@#T z@5p#6n5oj=Ah!fxJ1o>&VfRismSjpk_G;mIf~RL40c~{0&vWcWzYwvNN#U+l1>}<B z4BI=36dkjvZVAP!sJ#$KV&FMt@Tk(G4qH=X8ShWg`QG7&o5RokSJCHhE$45zz{<ez zAEFP#f5Hp@!b%E%Gb9B=$G;#D<39qBlBtuOi=(lr69EU)za$}HyKf!Z*7<wGU%JsZ zl=_Y>W@#v7=MJU)*D={SnZDg>nRNe>iT>eSfA9Hkl%e2gXRK`MOrT9buOK4!jkHYN zze(2L088kfRQR7%l0f&rsNsL<Q^HP+Q2$cb`S=LvMctjnm7V_*#Q(|@cVZ-9{7a5k z`abzLy!tNa-`mAF=)TMKPw~arzYW&EukKqUXZ<#q|DD79Z7}}ZVEi`zfk9$ybc_V7 z|IXn2&S3wyJjO!D0LAn#aPikE|4rupDvhKKBP#*Nzl!sh!A8LG4{`f8*a<lPIrz81 z0mb=O&HY{K|5R)L4Orm#-vJB%Gb8!uDF5Ps-}me9+xS1jg#Vv7;NKVfSF>UN4-WWu z4gR|&{li8%{&%?1|5Fag`Okg(e^V*{A2{IO*YfwH`X3zd?{@Nkz2yHL4)}k^m;Px8 z|0`Mgr)m5Th#2a>N5ua{8UF1pTZ^miirc4svh+rxx$H~ko4=E&MXN|88XwCFAvp+a zjzq8+6(!>hM_H~-qWzvQ*5S4Q#l&FX!tKB5Xy8UHj0p+Djzd|fkx^9Khv|HNk=6KT z<E;zMXbT8pD>5?l&sFDc^F>zos-E@l-49#0bzRrgZPWiVN$K(W)B831^_=^4@%8rl z$i54^YWJ4A`{k5l-sW?^dAr_o?fY`Vx6J?fbYa(<ySvu9D=W{f)BBOL_4)Dss~6Jk zIc3Z6Q)jm{cQ@`Ny>_>rT&DJW<**97myh+$Jouw)5a0A1`?llF-Ot6Rfc@jy-YR)7 z?}L%tht!wZ3WuBBieAR|_OGkP12uEo>No2)?JxbOII1nY+CwMLZ%>-@!%xWw?Tf+6 zZwH1?RUQul@5h&?PstGKpE)l!_}w4Mn{e{1yoO(JZ@DdBjoWMX#$QgXUH3CZ+&`w? zch5%l2^V<}g~y!W+!}9FQ?_aK4w*IXf{x0$K96v{IX!2eTkQDXGk=2(T0ZAslVXhh z;rXg03jGNa@-i3$j~}ZTk-~{lfdU{0-OgB3!<WN%hXWH`m|qYi0<4(2bF;r0t9D5M zr|-CKdkRPuVu0a8zv;dgqENq&f%;B?a;dM|R<f55?f@l<q<eqgcYXUYw|VAkDImw+ ziVc(Niz&yB-@EzDJDPi|Q?n2~DEzeJjm58s8p{nGLMg<f3G3x@a9EBDf3S}M^C8ZM z6__7GFDv(c9>fPJk{XGh4<F*bv0%sHHG2lqbTs=3q2;4?*GvlPcPtH3!@sE9Z26d` zH+IL)AX*67i*hgl{KMyt8~)34b92axi~T6l=2ux7aGpcmo_wg~^|tvf?;>g-&d}NT zS;YQQ<LBP_;TbW$uD3>UStf8Yh+kxAjxEmZ#;zlWEpkY%1zV$zc0DwPqqEv?=M%ar z09~G*hofe%Glb0v=Lharq?BNNoyZXVS5X=;zkU3pnU+>aB$x-4;tOC+3LakeKO<TB zX@P`zZJb#~G|TS>)7GG|p^O+pGkt1QuN{;|l@D-+e;mZ^_%l*IUNo|9S+l_X+EHf@ zoORT0Hx~TPN|bjR7}n;hM`yHWLoUf^%#18#<(X`ibfaL2l~5*?lQuWu!8+h(!!aX4 z=wpVxa09<vX)^?>!;;b~q3gS0W*Xn!JPJ&7YNy|nBQ>pZc1w6td-I^1Ufg{>#CI&P z&rszY5J_=$J7l;Z)Nx^+gSxv$kcRJnAPm`WY&Jx=VrT2`J`zwJn7Qqp-*Hp&NP*>1 zm0Es{!zHwQcE<^X7}z%;?LR`aXM+#+YsjX>F^6pvUV_R_a1Dr|A!mB%Vg(9vf`E?i z3&*6YfprLK=OAF|Y0jBzhi~NXQIbxhO=x51usJh=scm4h@#Js6vfaL<9<<$b`{*t3 zYUVDcxAFgiS!@-z4o#XLMlH#=22F~f^lwt0v7dlii=gK)>5Tu%Xbra?aoL}|JY_rx zC)J!9r-bB&_I_^D{E-LH^xigV6;MTzogyJXyN-G(+Sz9RcwOCle3cTI59Q$=e8O}D z@AO+U9{Vks@XC7`@^Vk_9kbuRNoPnLw%D{Y29kvHR	G9AM*Cgfs2iMff!IO+{Z z9FnUN0OG9ern#Wdt<GkHY=V&gOad>A!oL_EWf<t)*0SSgij+#@hk=ARl?en*G|5lz z3FbjT%20!}T`E!k10x|S**pCZBmtmxL012A31l}+_%P+DXi9$)$u}4Rfm;YtuXQSd zz6kS>U<$cGdhAKHz#0UOWEvukg0&frG*WsS5iAe4aFB(>glSc#4{MEd{MKYh^Ryju zX=VZ#45_#$0#AxkkcEUSL7~Vxjyae_u(c$C5a5mZ?9^%)8w!Mx4r08L00RXSS{U;S zvYP!57e7Yj@cstAbpAB^+=lF!&n)I7956;N<A_L&FCn*<SOK;7A@nmY?O982^$;tg z2~2}+v~d4bs_B>FQji9IT)6Lz5dyANgzR~oFn)^S>UhcTUNj&IQFVry6EcepyaN;n z2z|0A)nBe)uqK9^^QQfGRe>p-W{e<rC;B5U+P~rem{iVk?!Ucny`?H4mHZGS{P*E| zq{JC&;P2V$BkWm1c|9T`>qwTmme(@#BE5LH&^gEi<ootLN6?)5;w&u@zGy<pFJXjM zFoX*F=ZB~22uNu~xnVU+P6zGi<C)vS?&mHUuVx&rb3U8hep?E*K(^O8>4X_<zz%jB zTVhs1(jp#niw16rL`4iz$Buw{o%0n13A$zzm05^X3$me*-iLZEG)!j`?gyW0V19NQ zS`SBQg{iH+^R+^Y`+ky|I3u!`@ItIO5`m{akqvR})vKA}%EQO~`b=Xk5=&m0Di|iS zORYoCum@`f<^W6p1qUavWa+AGUciz`5talFTWSdWj{2`W51fevuM@A<)u9vrW!sx! zpy@5@8l+WqgG$dhVvu&oi3~%fl?GHul(+aW4Z@+NZ@%{S!#SvEmyATor0FVQo&c!u zp#=a_u6srZFq^CSf+PVgYy_?%N=RA}jgnTkI&0?cL5B6yd9pu_+1W5t!ch39Bp{pf z5uBV6B5|&v241W=2Uk441k&UN0_xArv+l>upi7aV#O^sPnGb)8mkIjJF_d7n&1s$+ zqLgJzf{`e(r%sHsZ>RMQL5a0vDQR!h8b(8d@qIqXkO=y>dK*|x(_te2r9eP)4ikHp zXi}gbKx_*f8v*<{$P#5|G%e_w*o-S~3h-zX{ibm!up!AB6sPA+2V>;n3#{N(Tozfw zF=@JG){<(^U}z@okEMZnU?2^?v#66W-lr&YddVl^)AP*Pee<zYD<CJTQ;+~@dd~gt z5M0BJOr)WRP<2?9BGnKQo_$ZYSaK;_Hp7u{)CI#WG;(HBf&>eGim6V=4Ijz-EUbQG z4O8R5u!l`UclJhH!8<{NtVM<d0K5TIi<VkJ@FzPU&LrN`Qi>~wA#J=kP9b5qiWPpO zh(b4I8rOC(lG`5UfQe9kRnrocg})gV>aO;3)M)tB4CZg)sDaFFvbP)~^v#l?1O_;d zEC!uh!<gd?48V1l{PYf2r;k$31VBI%Zc4DiDD)v`P+>C%{g~4{+)6;P*lKC>Nl!yH zen2(6?ulB9vjxcZcNP!RX46VqI$FV5rkrb4P$1L70*&`G3ke}Z)^8^<&piQtuWd|6 z`c5Y${kcFyf-xB^ekbTxc<dUt*MlbQBkdpHit9ns7fRVg0>ZT?<~w%NL>-?Cfl!rb zZ|XH{a|nZ~5Uz18kk7cdXdg=$*F?H60>D8_X<B>?6$t1@sm0fZgIfa!VS<?fO&vpz zD0lb@l+%a{7{|?_eR3$&SB6>y8&oZlARN4+VMQt_LrwuA$kuEsB^`WSk&McM8K&}5 z>JX^Rjp!(U!t>&M(p?!#udtylTmiYTQ_vm0)*r-<)0v`>tyw*X>>MChJSu%TZ$%3~ zSO_hZhFZ&!hb>os6%J;(Uf3bp0y&^%A`8XKTbQ@2N6?B;k_ep8rJ)hGECW%`f<eS9 zrb0?WUkw822n9CTZWVG5U&<=@o40sSFjhr^fzCc7Vn167k6-tkEg?79z!>d2I5x7h z^tb;>>Mw!wAXN*T+3s*kj1j4H2J-GC34z4;CRSh{R8Qp5e8uE-9ee1&4i_s!?da8| zTw790WMBXaMDj^>7)ieP)wb$5A7}NtBqhKSD7i*TiNZxkh1VxXFR&?iYDt>?z5UOr zn3!VHNlcC}5=9E?kRKd-0(d?5bwXq<L@@>kAiekVTbTf7Lm_zdD^eDylF+i)M5bJy zG|Vtw$s+x+KT}r{(K6DQFBPngeiv6jZUiZDl4zh0$*t_9F|XximTBUix8I5SXHqmc zBOJRpF}U|MatxfH^D4~bauNltQTns51!tv$+0%{U`9l#7*cK$Ge76C6u!*1hv@_AD z819%JniyGw+gCLCWf4`z9Ocbd%;9kknQ2-zx?W(P2;_<u3q<k<8Hh1(-QmjMm)@es zt97QKOz-dzx%~*F+nkSYUK}%A@$YW+f{@u0Le=cr?Xe^+2}_PbIhsgjK7uJ#Mw}bi zz^_K0C4ydK)M<_(x5_`PhG@GpDDLjL9`Vx)&XdsTLG1lX%xN>Nqp^cpg?bG{kMT02 z0EKs|+$;Q`MI$GL;<ci)la2@Oj2?3+Z4BK^du|zd_qR!dT4DMtQ9K)sC0yd<E<+6f zIJ5$>ysGoJ>LW0JB4Z;TyA<&1-nx`41!dh|#Bd0U3+OtKwJ44ZV5NfA%w}%@hQ^Gj zfTaXsScdPhTUDviTD>k&kQQCV{OUti{hU#v`T_N1=9wBsqKpVzC{a%rvKk@lIT`k& zmzg-ONKy0-nG7IEX2HqXc?1(mi6p_BU)3t*;tVErS1Vdn6G^dmIxg3gJ~JVY<4H7O z77baE3<`Rc8`_b8I_TAVf)wJhUhXKxy#5Z#oB|Yb-2nuaeheS3tm_Iz0hGLjcD50# zh3}ysVI7)GAztxi&9-R+K~Y>&TI{NOzGzW2a+*ds1-gSf&5H=@(V5_i4vnx?1uAr? zG6Y-Uha3^sq=j7qGBy4M$iBw_CRZlOOU?+uMP)%oU;|c(*fKFHNZcmNTrwbWffy_f z`wj@(WyV2JaLF=Lz%$8_4<VCwk+EifUT3$$%w8L62)cdZNz^f>IRw=}vq6c6LETWH z*v#<~JAaxBa^@ih46Jn~lNPRW<Xezx3_Xa}q(?fhF?ed$9X4gfL7J*LOG6VWNB%s8 zlUSl*#qIbyO#)(SOP#1QGR#x0=qLgU3L|6Kvd9-7@_zgG0;DFqlv;0tl|1Ga7P`cV zu&j{bkVl0=enPjAy2pkf!spPQF|l5&n(VqQbC0JivbLc{IBF@bOl))}9fUWZcJw5* z#+qEJ^hiA{PdI<fHniCX=b?!91y}Vc4?M;9PGOyFEm@p}=4BZK4#Vy-+ZJ4fRiE~& zE;-ht7WZF`_N%Bt_*>!%&Y0CgP=v@$nb@^_=AelxnEcOJwte$KlsInu(&`6x>MPx$ zGwF%*<{pv>`W=l^3^VKQCvNH%GPaw88nNh>A?aB_14ZDLMKL1ah<!FvNS_QNcLhT7 zplsAYDu^JI%$b91$^>cp@h=^tsb-6l8cffuEHf9|%9^9@I@-K}*(k<;qpnVbe-Fv2 zD$GnEWrFb-F^)2RAVbN<ZaC+bgU>5cD)S@-D+r*h6&7Kj;F(Us9stz9$k9XDMJ!;a z+&WKWg~XiQBI@F-K8-Jy<FXxS?V=E^$Pxn0@jZKjWfwqgE>C3)3VIUUgJj&p=uW2W zgUJkxBs|FpK{dgv8p%mi15y6;5Ed*28dejIa**O_48e}nY-nsc3pOb3=v-op*96yb zDbOMpt$0U(i2|mSQ8(C?*S!njY>nFiNfR<qkCgaC+{i#0GqmBqfa;H=V^Mktj>0Td zv?m{mH1X_XOH!yPWAbW%wTXgYFfgEl;2IZGs@grH*PtA-8i<l1Z_bzT!cL<@vY+`t zfI*B|A#i{Se~y53UnFwstArePXqYm9eFEyJb`zRikVMqs>WA!L<?oV>;#Sp!1Slj# zD-<SZrhs}&h4zz!l~~3>?`0ll;pKWWo~g?aWx=C4q>3k1Qb}|h@%~{5zF+;|PqJn< z=z$Jvlki%usG7G^P2C0wb|e4eW`e@K44`<Wg~mM!Ok8<@nfD;&Z~>iVEgwOX0`iwF zrlYhd60_wD!1izVkOZp&<==p+$_mTqF(8A5c>_-g9OOEDb-!>0N~vQZ@|~3}cA_d2 zPrSB*xyx`}nhEAJ8iQSFoiW1`4Tffv2+*p3cCGad)fHt>pi$7uLt@KHkXsE;vzG5X z1w#E49iv51WVK_w>5~>oEo<f>f*@fB7x1B3wJ0(#jHaT_m<L^LA4>MM;=`KBLV+!x zv5+u^3Q@bG;$~$ivctH<GEiwtTS+i^+iMB^O(kUXpv(Yq!4D4S-)up*l#%1+tA`l? zPa-^QXz7TW(Z?{LM=%6Lu~eSGC2S9tSj<4Eq=;mSgVm2{JT8Q2S)-~Rs*+{p7B=7D z?F&}ws9A0SDmCD)_EdZv@xpwn;8eU;y=U8izcxPvzA*n$yzqV=Ojs049hk(k2IHt< zvPU^#5Px+`JrvdvhM}RGhdW+hx6QCn$Wljo6@Nh*Ah=OY@j*ro`Z!P6l2!@X+@X?z zwTAj#kFm-O1eMdrWr!J;8oA8xDy-l$2GF>qLp3aupp}wYWb9)KX+^fH5q1dkTpG?) z{opcEclAv=&Cqp)jR6;0bxQ3_?Css@8nSb?j)e~b*3snn`==-{SQv89;Q46s)Poj> zg|R*qOd--N!W8!=&q|5HVll<XU118wwoncMlNVG+H6i7Et1@}>bmRqs=`x2@!7~%? zly6&f7+!nSRtmLq+v&&8gsAgbO5#$f4uC*)M5M)n!O=t)6owR{;F{-}RRflnFu;)U z#N;f9P{;`zp))?pv|5<T$Ug^hWmB;j`=Z`ZoJvEO$}CqCzyepjILkIj1vLP?N0awC zz=VlmOX@PpJm%!C_xC0WlL<me8mC=p2~3OGMZ~2=tQIA5aEvU60fOjG7K~ZNU~P0A zjPz3b*^%TNe<D_!4g-SpWotNyqrH4E*cD_2<O?1c!IB+~Mau+zOBY}@>QG=<05GNt zq*=GscvI08(eoj1Y0R!z8B-Q%II_bgMH!}@U|ZM~Gen847}MxAV<=3msW&ld$%uM3 ztO-lD7Am5nDpv^7Wfri{%auh*j2IzOy~J?!9UyH;-BftR2IyV&19yxjp4h~Qr8nfO zwodS6u7lYuEVJfxGsSwaDXqkm08CGfY-)sT&SspQ2IL`7lH3aACYT6g%VW?DKaiqP zdfTK7TNw5VYRSwzbS(^xLpyF=4rVBYs}3@lr^}Q+7-{g?jl*x{%&6u<BI()oj<Mk! zG6;;D^4Z81ij_7!X-l+7T5LS5jajn{*Y}APAYZjk*xJ@IDG~}MBw>{44zoP7(^MGE zA^P)()?%6@5a5dY<6dL^sT8PcqB66fL(Awoi5Ly$LCePAs}q$YicA-wn$@RtoQGYT zZ1a=-?~|xvNLGR9Ggkb&t>q8J_YTWgluQG`2AcYtz8{Fta>ZlR)SQv{7xYtWUT+Yt z6bhvS^G$ode634FVB(CYC@D6``b0=SHh&3({AQ(8Y-fgLwr0-@<y})Tn578@DH`Bn zD<!#t_&dxQ9zZHSO_KsB?kJ2h)(iJDxQTmJE~C6gqjtSpG+_t4+o%~*m8mwzcW^(f zs5GOz8S47pSXo1K6+?3z0^CMPM4+j0Lbe*1p$B-ZgN3f)<*7s-4=&td0|7>KHdSm@ z<n!H+Lw@h4$=#s~r@J?2?+H6!Jf8vj_Ry2h$M>%n)P2Q|NB#HBS!iAWq&s`Vd;@?P zKNAeDnYbvTjM90g3mm%?DHv3wyPcwD9vRvnZasiuA{C|12eG-I%h!6%ldNCCNvqWf zK3-R`G{pmGtlSE_*i@;R_fvbon7YTfVYj4i@#X|)y@op?;sO1uQiO+~!g$eu`r8H! z8}2m1d1Iy}tK)#K9Jl#^5_94eUW6s94%`Y^vE?1Wzm*ZE%a>d|81-kZSJl}tuDs(j z9gWf{PmvNW(r#M(X$O}vp<_Olnsp*?U6<H%hOu)qq|8-Z9K=*%FCYs?JG&UOe~Pw- zSANn%n|`w9mafzNn%yf(s5k0tLGCB0ge(~gv?N<X)S5V6(~%ZzF-gHS`s3g8Qi%zp zkMcsvHp$tVHPMdbRYQ6k0M?mYHo)dkV+tH79mJeDrfywR#<f>WI#D8LFA+f^+_OL( zcq~gDcSOUonm~W&ycL**nigZAV3^5LH#NAI5Tyn;f96ngJmOfQeCKVYL;7nKLb*Ym z!`wZz3g)m%H7naDMGKP&hLlrJYG9rT1EP~JUTJCBZ$A-SDR>$gj4dYpybCL;Qk@;6 zC||jMIn}MDhv`}BG?U4Q;@wFLb3lVgyg4QLfb85|b--fH<aulmio`u*FS2l}3H_-8 zEKtG0b%|zaM(kkHgKK$$gDOoR)Yi_y0XSXHf~f3WgN@T6Cj#S_R)WDzMNB4H*(KaG zb=UWV!N4@Pb<AHBd@i(Ko^SmC#NSbC6~pTtZrg(TPs4#J$&<2yS>|1rn`En$>;f3A zj&N{XLVi_Z!>nF;1CnqZAjOfP(VpVcn&Z4lIYGuip5QfpCO*}{@%RJgl{|!p&ngrx zEDTCai`ZEg-J%rkb|p{`;=o`NXPuUH84(87BG=+_sNSERQ3w;&-Y0}2kb{<iKyKjg z;+Nk!cxI<GpEEB#E4QDciW4jYMl!bQ4AHGYi}Qtl4R*lx*MYeU#>=wm)oCanI_9=d zSQm@?;6Y&k^c3_foDw6JoBXV|8Cca~&tF-K^%}F3A<(ro!(<OKgu}iKhHEQ;`n@fu zvdg~}QnLH?_`&~G`}L8je&P9g?PF-?wV^*9js7v)jZHs+ohzr#{keth)AKwSoTZK8 zgvykKqNDSR`+;-NqP-mwqG;9qPRU%nnPI%WI1G4FAa2>6i7-AN76ZmpJre4r-+kgl z)VC(YV@}9B>K>g*ip3=(7H3*medb|ie5(&XYv+v}|8tbq4p|O^TbTxNk&Ww;czVEg z704+CmyW^0Xmae6qi~|(cGOWXXEObh@y0)PsXPgAH_d;FCtZxaCLir0JziTX4q{i@ zl*NRWtgCY9x=rYFKKFfHNrqgV5}1!cy_B#U-ApCeAkK-10g1`vhcCv7Y><`E;*7h8 z5_waj!-fuRu;uI@Tub((>lXG7Vp3jZl+p>XRXxEAm-K^3QEA^d{EzXIP4d>dQ&p<| z5Lpib&cVjhDmqWhse4Sd`H1v^VX@^9lDKiat^@rrh1bOQB1MK8tqpG$ndCzENs3tl z3wMjC^iA|5w7%+_JM7R)p!dq3y&4g-d$X9TYK)z$FA}miV~|9Pobc)UAxhW29Gjq{ zJ3*7NeafMP2UR$D<1y1>CWW^+N79Zp{#HR6gy2)E0VZ@QYODnfg?i74Z-Z{R@B4vY zT^}|?qZs~}sn4pHp=hrCO(_dM)Sh@zFZhXN1cQ@#!y1m!uY&V~COX_U=5XpgEga>l z2SC@dpyc}3)b4_KXI(XfGE-_-ujKU~w}U;Amu8W{WKM_9UP<J-NC!4U-a^^yDv7j1 z7IuDMY5WrHhlYlhLz`=(lRLcSf-5+0x`BeGNOfo#g$-_?QF-KQMKM5bbF6__KI{Il zmY;Uak<${|X_I2VaV}DPWO*eB-xeHSv=9W`8{A(ePYIN(;Dva!0=P#DrkO7b{81z@ zYn}28&?=Oe)H3c>OrNjU&~|(T2ifYl>E-*<{8l;q43ka|c=V(BU^{h+D?;0v(ISET zC+vl>8u=*##);O<QHmI<6D8Y#0eKm$2tXsHDV~0{rZ#ku^p)&M-JfhfTQ9S+&_J)W zo~`3m{<Y2*qJl-~cx{~#{;gY?@NA1A6ldK*9WBPWIQQzYqkjCL9T623$k$?##^$KV z4kh-OK$skEv5u}4Z7ncL(eG88B|$>qu=?Mjm%hEogQRPFd)*F^g?Ua>ELDzA=TQHg zmi!cl2?>xZurzSxYy<G{Cz$?%qPkq?&Zzi75L6~iqpxL%0!ONIM$G)|SdB62koqlr zbSpr6?z<ea2xHTixVtk}4?cDV;;sgvo@vmQmEC15$@#hbIG|mYr2xB53qgE?^V>6G zb(jKAp+qYTKnG-wGT^H{Fqm?~JoBOhC#OvF8WhlKHYpQ<tQwb0+PGhU=clmqk}A%w zY_uFZGpf0h%G7XpE;0)3^KYt?wMQo_TLM^);t1RioTnZ}N||89As_Wbd1r(84q201 z^pRy(jv)qV$|XcW<w==M_2MW#@}Q7ax{G`HGgrgVrAJ^!AT(SeD98pm8fAxXu_Hwf zR~lrKlt4Egfg3FF0eR7TjM@ezgKd1<nikSD4Ja){^<b(xC>m0NFP*4d@r{qEX<_ut zE%fe}G#@@K)));#9ZV}{OmX7)Ky;v<;SZJc`#?dG0w7@dO%O|!@Tz9TDN#|<$Z1@` zQuR!WiomR?P(hAM)?B6aQiV7WI+d)!!d=`DHJ8r(8bk-0Ln2}@3;T#2eODI4Dl)Fg z-vPz_F=ZwGe7N0(qsaRsrK$4Q^J7cSoe3h~!8}hI)>UcF-@<x-r5GhYU_n)Kw@miS zMHnU`ahfikRj9nG@#g78!xG!RzmOW?E1_a1pSe3$X@DBJ=OP2wXZQ+3B0orDj3Mu3 zs?-j3{EC2-y!aUeiv}z8^dcEFXjxd?##@}3+Q|k41=1ZK(&hRj&<8oUobLu7H<sjB z*lMvY*&(D%NuFh<L8kNyK1}Om8wl>#DLL6?a;pAM{SJQB2_2YkM>wx4rHi}~$tSxK z{k%<6Y0NePs0z7?6M<Ld-#3O;xFuQ*KbrG_Ba>28Q%e1h(4LnBMG~{L>pZq8z@V8D zvuyDt^bf&zd!K@=sSHUSuqbE3k>EZ1<qEm%!I#9;xb#*8o7w1l4!X9unC%pQC4Mk* zT)}M#H>lF&qm5x!AmeCL8nD0*t2iD*9i<fLTI0%32edh0hO7ZJ4mIZa{<50E@w1&a zOH}2U5=z%sKK8&e3SpAd&RePih;?U#*|;6|uCPl!DIR^-N*&qukNUmix4IRfY;sM< z5+3rXhp28Ru^}wiT3*W~Ygg2V1rwc6NI{j%#MpXb*7^DA25>>qnG-8>ccKE$+MCIA zUN-(fhYcQ>&QUsiTu$GC35wuG-~H+DeT|Hhk)H0?)i*Ks@ji*4Rq?A*Ze|#4d=mj? zH3tR6dE&+_SqW}K;H)frr*z_~`wt&wEr0LxNNlL8QI{THy3g(6>?Aqg`=maf?AQ4< z+7+a_{1*=X)xDeY7plH(n_B4u<}ZCZ>)K)chn?cpt4&7vv{J(s>p;M{aY=S<QG!nW z_uue(_nhYZh^Rt8Tn>KBg|2YuBVl4;J)AUQ4TU~+pD(8MQ1h+2%l0muu1JITg}?L2 z*6ZK0w(kL78B(ZbF*FceWd~#AYu_ANjHz%JZi(Zn42Ar9pm+N-&(<1Fh;e0VO278U z@a_7+JbjGT0Q_jiuox&~lm+F3zVuJbjwizaeZ=-FXH7VRpIA1l{FWmdSS(*gXl+<5 zHM`sIDn1Lp^=ALvuK6WK&PQJK{Tg)pFz?$FU+r$!+L>`2jH6R4M$C#{;WW~Yk?Cv_ z_vRiTkxYHiS^UMzKX2zh)1=++gWF#Ux?UWwe@))~iae9&-eS_3e&5}PGi?hbZ4LUG zVc&S+`kG8IuiICBC2EDJH5#B#;Te2b>%N_NI6lx;-4(#Zd9SjMJigMyJxX-AoL6fN zm;YS!?R*%753-4=MOc|!XDtmlSit5_>6Yk^Sp6WeE{47w?n6|koPbWVj*}9NE-X{> z&5!-Xij<vL)4pJc9p5y<2%f3zYyZ2RaOGG{Mj$NEVbXxg$3IOvmKdxB17_CeCG(J- zU+LDva#$Hs>kgvSm%}@&`(oB=xCz|Qobv>Nnxlxwy=qK?TF3N;=+hUkSu!}M%voBH zBcHrx&L4}zVTAtt5q{ziALA)cSN#3u>8rsD$yT)*viw&7n6NEdhb<iEcgSgP7JCLR zrxo1ZM}-?$o;h@D)0n8#`>`McWbZE~r)CpGaL!nq%L}R^=Cg@fsnR*~X`2PCO_(ty zhkMihL_-PxDkeAV5n@|(bN7^he)BpeS<3N00fi0&mje1-cVTg^ail`|*yDzir{QT_ zAw&7x;>iRY=Myqw{T2lUKBe(OtS+jB6)+Z(d^z;S^b1GXAJ1WcW@`;ba(tQG-+15Z zk?f-Z#UcZI76-<A+U0PO?>LdwSkF1n96F<)m{KA=v;IK3s;B|vk3tlkqDmzs9absa zFMwzah^vy&seAg3=%7?uZ0iL@W-kokMgZ^z_>f3DzC9gNaxz(W|D1i=<HwW5trWth zXA-RG26<ODLL^6~@il<Q2C``iqcpm3x7_@cTdZ(Jx>!A@8icC(n`PYY7e$&jlMf7V zoWxsjjmm}cwoN@uuqF0eaQmii3_9XR;={1eRy-NyvEQ1d9CE@V%g;{c7M`*z5nl5V z3^f;C?@LN@a0?iggbPPxt{$ch6<McJ4Wozn-XN1bIEvtVVCAd}d4t1hA@!zSy%T2& z7BDQ1h4SD;Xj0&IUnpD&$w>t!FRgm}{$bTw9T}?;{Xh><I>DrbNqqwD_CGLE#L!pU zn$xTw;~7|wB^x0P>6jGjnN5N^P@N(tez(@WEi_;R`ul2ByvEzsNR8(Ao(!}5;DmHm z$JX+Ipd!^7nRIF*+FCD}?ql2gh>S<b7ms;X8yEk|)jOP%qO|Sh=3y{NP|VUZ=ahgu zoR@s5BkT?j#xZ(Za3mn<{gA-bNlO=^gUltizG(jQr-nb5Z*Qk_R>F^xR*32xC0__# zyOay;#<q<=RiGeY4nZ5)w#;D?-~dFyFg})-S2&kLlOYIxP~?chbubh=g+%yU{_dJ> zaS91{9fOsBe@UW+Bo|XPASt`l^;M&cB37|h%fRXo!tKrpVsLI!EGeewLnr5#P$EV3 zUNXYSz41jT%zE3-j|~D;Ivo@FvGZoJ*mn$V<e4G44RsF3)hg7|%>+~X^M3ey{AEy* zr;!KwT;G3hiYC^5@1V2zRO`DykKTL}lo~|pqM8~)xE%f+Zp&S&XU2*a6@4o!Rmg=1 z8X=WYNpAI!9bdzCi@c=9jwZK6E6m?Bn62I$&~dU!7G~u0ng{G9eCWg_!yyniR8feV zd}{F3-cz6Y#Uc7QXJ0S9FgiOjOG9a=|Acjzu5cL4jFs(}6<c^!Jw9YErGT%SBzi{X zts$_8h`rCYGP=Utm;uK8c9}fccJ&uC5eoS*@%^^kHEz#62W)uHXIHN%)gg&q^#K?N zvOP~KKmx7(wyH9vif<u7lS9I=AmmR5pum!bQDYJkJkd3*l4SsQL~5HOQbKMUgApUY zoQ`U+!6sRm>-~c;bkz)S-D*ii_l;bNfJNMi^buySh>wfeGoSosXxt5>uy;TJYL^Yi zsn-e~?(FPGm~>~yWMK>(2)z9iL)pD)aW44nFvJr!Sw(pfi^)>L$@NaNH%ODHS<d4{ z*ekCQw88OWK9eQ*Q>s?<6>!7lvMkSrdyPY%Bw0=gnF=&*M_0D{UQ{h1fwT7A&u2Ee zgl$2%rIP8AzU_0F6y#(MEmP_F(rKn*Qc@#bD9a);i6k5GRAIzQO}1ygY4Yjm)Q{Xn zlMV?+QhpDv3JoTy_L_vP0zs&BUq#6r9%>g=7ZOIZ{lTqL?U)@eg&Y?XCP#5{pdq~D zwmUaaOeqf1xOhQx<WW1_I62d!XW2tbG>8z4#HVg~rqDHY`<w|^q?Q{6J>CzVw%6O+ zSiXZpL`whuN=J!oIwG&|WD@_GWh(_~IZtgUaV!KFu4SWFyjLE>ob;U<|2WKq5eR-) zYMTt&%naT->h-7}gQ;OQcHjN;!8In4=Tn3JPO*O-hEOPN=1Uq|#De`8c`YBIfRuTt z`b$`YG6>)}G`^gp8ruS$e12iwnB~y}uj*TPIU+n~OE4_v;dH8ibLuLpCHvS}@lm~Q zKEi&9g!IeNZ`#56q<(D~tCF~Pa~^<4OJ%AOl-`#~@;?)P@-gN^2uP_u&xXFZn<ftV z0KgQI)$(TfQNaTUNc8TqcuoNO4zP|?RC8sbZ*R}lp=5g!)B(8jtp0YGG)@#8-soi2 zspZ+Qic8y?Am$KzYY0K;=4^!`z$&50o<^Rk4A?Fv;C8+^gcHw_Zz2cJ;{5Rmvsv`) z=Guy#1_t<yhvH&97J1fZR<dwnxMpR!Be$9S{hv&w?P#qE;DmL-W5#EDn-pU^fPTv@ z$YVaHbzMRAfzHQ^QVLQ4&IK|^O!jjZD-$3%J>PHUm>M3I@>e|S+s0Jz)!gguw<>xr zF(Vr_7gcC&Kd5QdA&Dxn<QtA`zg3AGwnB4+ru5%HR-O3}u;N;BH4vlABuGBsWhmOJ z8~YtrtCM>$IC%Q-m2SX5)Uw_y6mS@*=X&!`_7Pb=f~WA^-M2<Ov1}@_1o>%O1~Ej{ z-MGuEagKZuxLnw6V)fhtqEPkP&Vq+27(2=_v+Y)^S-E~6<<D$1`gDFTW*#s@=FC5M zUSHu`Vwq98b})CIrqM_3-Z8`jCG`+8<OG1V5X}Kw7VRWM)$wGRczyD?b7WnL31BBe zA=C(?Yi7=;gj1MMbrwxFnyLK9am3^L%3`G!M~?`o1}`8K=GLm!k*k_a)}E))Q^AA^ zjjA%RzZ4<OrTae+aEcs0+Pyrl57K1tdOpVf55~SRMzkp0ZfxE$?%1|%+qP}nwr$(C zZQQYKtMi(s>1+F%Cg+@F|2oOR{?>=JR?ZXi(_~K#c_;w-y_6%%57Wc!9vbIAHZAFn zo-CYBs2A)U|E-fFC*1eWzdk)w$GHzzi-yy;R@y|>^WTQ{k}G~#b^r7WOBBx6)B%|= zloXsE?c}~zuc%*0U~((e6WrLc*eNP_GgE`W_ZlvSnT%BF)tO+if=l40l-adt*$3<H zz)#O*8KjKu!V8@AoP&7rMA{blPsnp;l=Ao0E6>iV3SI*p2#pa5ck9+$!bB>4729yS zpZH`l7XO;Jl?U^=9m)-4$qguT6fRRr7l65wlye}M)M50h)0#>0c}b9*sBBIe<sv!+ zYDsDc$t*lqY=*A$Tusw)lY@Nn*=6gMQCRv5#2H2BKs{xzF*68)V4gpcwUmljS_ks- z4rgPU2m|T31k8<5Eb`1JWBA0qFsxH;vAqvD1jCE3i`_a-nwPuDBorM<CZz-mNhmgO z56T(9gqB#E`NY0*SiWZ6ZL)`gDUvgJ+MKPEVGloeQbHJH$^3&6Cegh$1VKRB;&EuO zj35#ckQBca(ZQt#<d|j?%p^RY<rPgN&YkadyY4NTvaM)3XCPHsFk+S|+;(SI5JEXq z67YaL4&pnG@~*L0SipqhzA`f9|D&=|Go~~VLWd>oAqEcGvpkVv4K64Sgq39kfwMe) zA-JCxP!fJ~X$F01nWc7uD78ZJE#`qz%IBtkf6TOrox~GP?oi;*{HTc2XcfT(ak?sy zPo$IUGM@~p<<WQEKj1U}E8haa->~jsOQ3`lLf^ruz6aNK*QG1=I%C=6n{%Pngh3fy z5s`q@*+t;2WTmec1$U2raf!^B=eD&8zdalr0?8+hh!u&kp+I-DAfyZ`i>#{dXMlul z&>F$_{Ut#j&w+$(X?m_vCwO*{uX4;aKnA`+73aVXz6GAaeM~ypM@Oi8*5Hv6Zy~_0 zqP3(@zd4n)-E`Ti_+Yj@&FoiMNDV0lI*F>{7h?q*D2Y=z(Tb%Jp7amUWHX|o)NnPy zq7CmY{PXHn{r$@Tpt3&0&VYC}r69pnGnZ45&xU?Oh@!T=#e?ULKNC+R%^_9x`U+Of z4BQMLxE3}6mL*QDCg3c>&iIU|0k(;Z@V=Gc^s;2Ft<gKHp9G(-(Cz_8MK$5dN@=GM zmjpymhrSe=0`xmhk}5l|-xHwRV%igX-4fPq?YVgn$^6|&tQV3mupygSF}|nmk6wdv zp6yV#`+-{gRce3d<@JhVNDPZ;l^TZC<hCY;#;vF=cNNHwP&UFu8f3KM*G^Cw`=o7c zvU+{bf=~nV#<wTST81FO*L7S6WBG3tfB!RNZ{Fa2G}eLi??}W$htgkL9U&WQhX4&e z*82SshC0h39&IagM98)GeJos+cLeZUSDW}k{RSk+3#7DZ{BWA6YqI3m=34}j)x5=z z(AAux?fxdYT=HqN#F-4DvQ%)^1RXbF_7SzogoR`i1laD1Wd+8H5{m#6D#u(jL==%% zRwvD_H-b2Qt>kn(c+)R-bz|rKrR02tE_ZK}nwnbY_`JG}um@#9BDJQBq2Mhex2Qk5 zK)Ry=$^>B?s2QYt5@+=;(0Fl`c^h@>a8%FpRIsm^v=c8mj!6vCsAb5uE-3F0?eR&? zJ)mWTCS5FPFruaNMfN}0qep3jXzn<|d4*0;Ky<~h{)+q<Gn9T9;^-hH|4fDfMbwO_ zOeIkxPf2#8>Z1GyHB?ic>{#i`$$;hD?eXf`cNllnqeQEaO+CvLvP;rbN&u7$Kum+T zZ}5R+Z=^8_hf(qxGn5T`O#<dw$0#QKh*8U5H8iPB^U4M!EHzL-MzDKM)Qfug3MK4I zsNFnZaQ)-$HelmkVs<?8($D;NghwFr?H44({@gM+@^zsDCpl`MGY_ohOyNF9O~0Q0 zG!?v=3BT}Xuwu#w%!g5EY70R+VOGBJ3=1}b%=BT~Be@$6+a*6nR|m~rCD~7Vn*A7W zzcyhLIlq82T4x!Y4I?W}!BEmCYGedL&IE2$KW~^FU|1iQaA@~$B}Sx^tjfPf%%2O5 zr!}}cCr@+D-8-jA+)I0eYOZQ%q%`kan1H71N*0*Zw@>9g*YaGm#+(ab$Y+RVc2k71 z`w~b+S81PJA!TcXZsiY=0PTyZIN^{AqWq3QRuS11A)YI_Aic2MFqrtArEG4NcB@CP zK*!7d5idvd4BaCGF%C;M)$YZEr?^q1(;0MMNgySp05!gMrWXDJDld5(A5t2Jbw!|; z<xWMWL7LN@RIxbXP}a>YCrtXv5WvvfBFXu@XRU*nE5wdp7Qp@jn%s+dc0bY4=`<0F znb-L^osOmHe*P|A-NE(DV`EaJ2U;eQv6{Ax8&fiudp1(p87(TpODMKggiw8hwgw84 z#;ZZ!&{+~|H0bH$XdOld8Vw@?-`qcI2T;}tGBb5E%(f}tW#Pk>kq=7>xr1d@sSUmH z^71;`v-7^lZ%M`*M+GAa-`gUq4!AnFryJQ1=i)X$=iH=ezGK=!H>xDj*sxIHEtVDS zWqp@U@e=c2v*t`($AsdU$@_OdaSQ+Rx;g9f^z?f!&z9u&e!n;BzV!M&ssBvgI(Epo zrqv$g>W=O#`hMt+_3m>IevMw(;XW%ZKJENW^L~cXe%o}e_P%$%;*7|?JUyO`ynHU3 z_CnzvuAagAvh-}8Vdd!xynbE0z1*$)JX^kJd!g_!xDU96zH(h!eo}Pho?+&s_{#qr zvF?2TpCpq1Oey()t#ryKe?S@8nF{|e47~r~>--0Yhn|s+`F|?M{ddyW|HkZL{~w&X z|4*UE|3%f1ndv`q<^D@%&wtHm`mZ(PF1taMwAMR*-S+Nc?PuCvvb;Mu12{okA#5P8 zudm^PQGc)3{nT!LK>+=MoI{{tmZaI&AIi!CiE_$w-5-zF+kx!IWl9w$)f&w!j5}{W zI~k65leDBqMOjIS4hk|8;_T%AY0=x|^WbBA@qPN}Fx#B|Oo>F@eQ+c#$gZqvaL@TC z!XrV_Q<3tW(X!@fNoUd)o!lD3_B8tnOS7}x#rNrX=4x~nm)HC2@MyF3hS%r)@nO85 zi`;j;q0WP{IdAwM^;<v<ZX!^Cz(Ty#LcGvKxX?zh6e@rDsc`A}WChBYEwX(-gU$+% z@H(;JYUvzR+X7F;40Sy*UL$3J9bWyuhs&3n@v4^fj>1;*@Z_Ln;Ns?kX6#E&@Auha zsjkl(hPLd4_*-i7N9=RN#3mJX!K=}g*xM5%`%*aD5@egR%pFGdpXQPTXOuZex%y~U z{$u<C+TbIn5+%4JR%s>Y*<~H|fDI?>=BIa#rIZWHTWm!vuoU{{8s9U89RufC0p%Ai zCL3HGteoCP57V3VN>@_R@1oCfW1lA#CyC+)oP-SB*f9B&v}*$TLMp&klER|upqAJU z>nKArbFaJP`$<OX!Ha4_tY|??Wp3zTYsPihgK_26gNd5qB$%NTxRDf{;FDVkk*nx` zCF2Fp=@B(=9p6VK`OPhRPtWnNZb2JdO<>vpb)htJi=7a&A0;hlwpUy}&+m)1`@RJ~ zq(`x|vAJDhQ<f>rx1>lc_y)=(CUL|@$|WdFNh^<acQX>?XTZrezv@2ojcIDk``H^N zsA?sbX%zZ6gSH4W#Yj^n7-Lm9eN~8KWvG2+=c9S&qeaKFRXAf67()d(edT|rDzPYx z(DBVNN{=)2FLfirY@sG^Br7qfFVd=Uwfy%0UPDD+QFnVwT=RQ;%RQ|OZ64+)S#M<0 zc*%XO)YpA>5!CodFC0QtW8)MbU}E7Ci=uO^ovdbz0cwq%7)#wZNV^3$8D*!aLbNGw zvnpt-8)yqP@KR-h^5hHJgiDgN{R1q62?`XJ8{xDqdzUzjUoeGNCDcn3B3s_}Q87M4 z?yxoVr_T9Lls-?Dztot4g{2Fb|213@SPDM8H(-%ec%u1xp@a4zGV5r!gW#N<Puvc4 zcca<Lj`5QJ7_EDLSJXI80i_EPhQ8{IWqlR^_m;$li;B3JXu6+R$vh;9I#hxYC@v92 zC~|@<WlB>j^^x)aW034G)*9{>!H_TCRiZLXr8Uo{)=#G~Ng=RE7%IhzDgWfy^WxAS z5V<oj8UJ%!j#kZ3e06`A9ifbntkDzA@jZ>%6CDv&5xK%XXUXPFpeM9D2Zu-(71t8O z`bZ5mbv{*vRePENTnRh@4-Ck4mfO?pV0trr@j`lXUG!;!^>FFzCEuDRk6um{Ah#f{ z#LI?Pe5OD<VO6>kvo5q0o;kWehEIOfy~Y$kOx2jQRk7T4o}@KFt6_3`7Q{&Dxj5FQ zAjvsD-ri8==3w~_aS9fO0jA9a`KyAQ+8mZd4<CB6AJQ+-0r(G-XD&nrke~QU$Ynl8 z>0iWtz%Q->N$u!u6E6sG3~$j*)RC(!u_g41j1r`G0u)&Wpb)loxzb|~RjRDZxDD_h zSTUdb4CzN%6ZG@7J6<^`t(BQM5N9_rKh2JBl`K-`Zz%hyU?&<ueRhemPIjINtN=Eo z$^b-mz=5IgNnmNW*nZlTHM+caYZ5#|;W9VUQcL0%f(2^UO3)^t9B|xhyr@ZT+AcfK zs=CRpxydx|>;RCR*c_#GmUWJzYSq>>!$}KVt>hqq!mK4tc?KzB(EvjxTN{EpeP0~! zOwg&r|MP1Sr#D)1NNtjkcc@Wwv<UHO`N2BLj+(m~St9?KuOi@Z5nMsg(50}!|JHv} zXymJhasGP$#d^b&l{M#kbN~GH(Ldi_bw9JTKZ#+Sy^gZAp3LpO?0uz5Vw%#D)9n>~ z(#ZSsIoWp@B52f{XJ1rjAEd2jEWU|6m$S$OsWD-SAcKFyUwKGql~5H_rtnaO!YFPa zcUzR`dLSoCp(fleq>RiG+oba=-<%^sd2ts}LuW6kOb4Ah2mfaU7I_YGcljP|4*Cxh z1PUDl3Tt#iqqLlp*b1K!`UlEJh^jjglhC2_(NZjvR{BNeS{p+<;1+(P$sA#e#yU-o z^m5{7!Qb#U=G)o)eL}-b;I+b`ol^dh{4iPG6ct4A+H*?tY%@fSzd63eZC@m2JV70J z->W7qy1nf7oIfE)(-*D&i0wcbVifEk<xP~qSy;tgoY4uZ>?lNq8MK)u0C6OSD3+JW zD9MW(4#wvzfm?yNzPFFg2J3zSiWzC{Rl;)W$}d){LoOm$X}^v(dG^TdS)APowMEi7 zDY}VWhWMo-0L@jDr5&v`ni^L2vL9m-Cr`mcU8vlhW;(>G4EB{bK~p_wOdm39_fbKA z!;1((U$H@F9Vg1T^_Oo?R$!0jp*d~Q@s0j?_cO?>@TkwTYY%dZ?v&0E)lHG?C#X6| zs_EG3EWRG?HIFxkI&N}m7f#YarM(_PoPjHdu_J_~EsAkKU5E;d0nFf1z_12{K<+k| zA$JE!8%6<71Uh=)odQgQAJtxWu{U~QEmz7;^h&mSpZJ$$O`koy2ci8PFV-eIL;&DQ z4ghaXAf=lvx(1(eJO!~o19Q9xzC8_vLKhudBT)lIDNTBcnz4SAvFadYsl;!P0+v{` z4Cy3odB`ZWBD~<c9Y5kOJL>KqDf3(o&n%uBZx0ksWr&0jv$Vj~G4EzL!c`}#y%Hxj z66!0L`4!9x5xoKTA<X+~3gV70B5aR>F2)Yf0PN2@oT$1S$UmOSKOd^x8!Fv{NY-6U z*W5~1Md~l!o2fjSu0_K$z@{+8D!oWIMASON+`&%T`m4&K^p6F&|7<_Ids=i>^w{=f zMVIku+F1300J$0OPraV}V?w}|Ad3ZfOPR-VKb=rv7|0;-p=Pjl1bMs4Iyd-XYSZ+R zi%hr2({WIWZ{YpqFr~h^<nGm_aHt~MU!OKJUx|U^q_iV)`A9{AW8Sc|Y}7`gq&oa$ zhrBq)V&yx7RmekT2qcE^KP-``^pWu`zX5z&&Sji$h*imHCV-`5hz01%54KSQZ<pvs z>o;hL*6z|2pq*l?>nyOS%rjskD-sacijGwC4-ttDRf+YLyGP8PR+SOI31d;Pld34I z;#-@V3zDKZno?t_dV2<iBMtmpo}QTA+8A8i*cu!e1jfl9pgg95P$WeI<|MVY6lG@g z1U2L|_x^x>pD}pXNhskzkEuuf2HhUF9#<xe4y05)xd!^Ae93{6vK~Qpvfv=CC+|dx zR9BGX2*)5L`*mjlX%<Mr*H0I(jgoMdP#ehvRTV~sRdSkEeW5~T`sHlb_IkS-IDc;s zk5i|F>Ad3_sQggdJaqf*slzb~?8fnj#L1_`sprH=r^jih#|Z;I={{0|U|W>FK86!j znJ00vIRy$&85Un99R5v6)?R4la&+p3euWuGTLwDJl0eClU|Rw<Gtx$`Ds()rjiuR& zp0!C@up>QM4!J_0pZHT(`PFCZr7{qsGOw<-s=Pd~J~ux-Fg-c5IXcYSN5}+&fpLY4 zgo~bvo|X0|BMUVp0Wltm8Og%Ug5(oT2S!E=E==-1I_4!N>=`234i?7#@u~6Yk-?Gw zbd%j&V~reRQ)|hwOYgMW3fB}#15uYlZMB!F%g*+yzfqHub~NQC<fKWP4wCRsD+y{n zTP;S}zDn`mxq=w0Hoau&!d+?n8*&>Acph9PTx=iv3nh>B<g3nD*|E0|RC{(yAeQ8t zpy?UAa-?}ldHc%AGLFs85u#F6l~v~#*r=zcowlX9L`At9+yrwQG?Lou0Wu@8p$Z7? zKtJtZzOmP=0<2n}up;Y%1jh^t0>MFU&7E4U>FPM`VaX9Lf|fEYf(gGiC>&5zbZH3n zWJtCp*=*Jzox!Se0{yVL24S_1cFxz=w$Pw=p<$rGc=Pl-_BZliV)disx8>9bi;G_q zQVLAX3Jr})jm-+p%?gbTicC$)42=oQj9qQBL~Wg96|IycUF6JU=q~~`{9#gx8A7H7 zVmx$@xOU2cz7?I}Aq|CDm5nvYO=iIKe+p|HTa5M0_0I@nx!Z2&3F`zF2ts%!;uI#- zWq+YEYR@){o`+8sE8oc5joyu&1l%;|nX$4rmtBm{L2`P{5#vFRlV=+$5g5cO&pd3b zwuX0nJYS5RZ@l;%3hNaIK#ff)bf|$y)YpLW^XA!wT@f3}j8|c-Yqhj*w7ho&j4UK= zwh(F~H~~{p0aK67b-ZuSjT&YMJRrC|gQ7!AE&1|Xg^=+uv(pn*w4feSSXoweuFbA4 zE_z(7cSoWtkq%Z%OGQjdZ2x7o+dmvfudib@vdYWa%tUQxVdUgsW~8Ba*%!XNKgIa_ zALr=2TB#c<@=IHrm<ucT;9Ie<(W78sL_)y1-rlu3JkT|`$}!e=l$p|+Vi+JP<fw2j zuypZsIN^@vYIWnrZ_<>Zxk-#A3H;L-VF9mOeKc2cs5VlDCrETot&{L8!!`(d_dhD@ z%86RIM)LX2ncDO-$2W*mS>oHZ4$BB@uIpCZ>Qq~9l;dq<XtV3}wwk0Uicp%0h?30> zQiN>*zD*QYgTf51#AA5!ihTTvaQ22Ww%$K8%90IVpCfA*!|cT5#K(@~$<dGBRWoRO z__cPFJjcA?;NS$Wg^P&>+%|M<Ja}+SgnsylT|1{bhGgnfTJk+P&ZFg{J;lPYxwruS z&IdnISs_A07BxNWg9wNUvxkIfe!6FNbeU&rO_^^^p%>NwMKxZH*3Rwid^&wAJh82r zv04vKS(qT4fx>*S04eiW@|*RiFi8=iI2frxs=CrfPup@ln@~_#*Yhx+73Vp*8SRQ< zR>Rcbxf)KE=9=d5`ef_z`o;Nx%;7KR79V3GCR=SZ4RQR_v67?;gQa&}Ds<&>kcnv+ z#HpBol?3H+^r;*QGx+MWwQCGrBNU$rJPe}?luOKXWOyVjB!rZOO+bDi#$e$+Jj{?; zz9OzRaN4G$F{-2T&exScK4;`}-Ze3?f~plar$DYrit?bxqzu%Pyex=_fAm8i!DkCi zttn0QiuGcIhLl>gG<AG$FSoZVe~na<TVf(GL%e8>1n_Mf`?-|=u9sbJ+4XaTq9!lj zr8rAD#@N!BrN>NNK#H4t)jd3d6!pr>uRaiz#M|9PjV>NPec*zd*eJiy@I9{nW?zJ! zb~d$j=1j!_sd4fJ1&)xJ8Mvh!3!LRPX5TT@wk-92`1qw9_=%tDaLyT#Ee0a_9X!G- z@y#spPcrk%`02{T(W#-Nqa(*QIJ?-@n``|!9|J7!y$?GAF~x*=%)Y)<eyV+h{^}N& z=K(Qb$GkM0@LaL3LQ#pnb<Le<ZQky`sbeNUY+A#yKQSIIh_BIii>DNGWW<R&z9Nfp zQI{cNS$BjeY^@D)aTjT<PntcSyUn7K#F4X+*H-j;G*Zep4KY^<K$NtgBySCvtZuh8 zmQJS=DDy)X26yrnPeqP6p`QjI<UvoBH%@R;V3o4cMf2N5<0c0Uc~b-qa)Zr|IDXop zH9L`0*gA3;x=|P?%<EGC-$OOwg%!cM_CZF2C3ubW%v8;kEVWnk<LYV5U(MUQXi!}( zgvZqU@$gAtU<A;>6To$N+36s&Rd&3ki7mz14H^C&t?>a;rcQ6KqyDY#)L5OaD@<+q zhFD%=0~rbfz_m*E<w}ng@%3Z*G4~wBr{x#QaO!Tby}Ec)o6+QPq^xgLV|(Mk9NpH| zAFvfy*PL#fzZ#%79q?RP#umgxp(bI!xk(*AOmtPGqR`vY``}Ok4gZ%OrZD^;3|1)6 zprZK7<jzo4p&(JSsuYZb5LF}()OmMp8fRPTS*^}Auv0c3guNB`$5`1}s#)2oAJh<E zXXf4m#WJ7Y?ea^)SJ@IvXd2->gt*i*M3nOtM1$a;GnMrrthE#u5{KwY3vO>)cVvV$ zqI~jF!_3ftNr8wxMH%E&DzX-(`jXsYj`EhnZ-N@JX*%^4QIS!bi}2J+mh!x(3MK;I z`~CbC8VCh{s2QY^IOz_7K6C-rVps7dKhD-ZszSTt9uQV#v<hV;cgI6{HBuHq=VWL9 z`~V#$DK$k|Wo3hro_&hjy?2TzIa(_yL<-pgUezOR*DmRmsHl}_tKn(?Fc+QKxG1^S zJ<Y~L`g39DcgJYh1|Avc4YG(n*86Wqcl{rf3NHyID;~`_HPOJz=IqeC`tX|8Bu`zL zyVucm;&kXNE|({g;+#1Cfmg=tZkI@2glTnxXEQ;;eO3ZII+t;(RG7tN=kfHH1j#Y0 zY}orn>e+g7Xsk=`_=4B_<JtLTws&ytnPwAAb-v;qN~_o^q?w6|2m^V$=%F@_ZhP%b z@^m$tRPDZ_2qu2smBDeqElgyT)FcRGFapZHB7l{LyZyoU^V2waY#%Zzq9B2XxdD$i z<f2_>nB?`i^|g5Q-Wzu{u%Tk4<6@-a!B1558%NRCdv$wlu2;)DaA9Gz_EvD}c>;9A zpdv$De0|I>Q`ROY!z6{GL`EN>vX)D&_XindhqL|{_k4l-2;w;N#(4<0QV0v7L}~ye z7o-yC<t}Hda2uTFG&`TnLn`@p27rCDfW59<Tp)C0fF$J;s2m6}$68omsOnJfL`)tG zI<`i6%cJRxqjZ%c%b_V;b#-|c9w<ag(l%@3U}J5<<I_7^*!w(uKM#Fo=?bA1#__y* z*>MiM^S8`9k2S&dKE^Cn_)o5q@ZIKryKJ1qU}M&)+`bjEnsR`F(+9xsS<87bK=dSs zw)RA4N5ltPib;%)z3*Rf{~GS~CQ1*UAot=IQVEP#OKdj-jh!q(6~?{9wMYUcCaSqe zqs7-dpBxHY?Udy;OqVqo;X=KlHifc2)3TTDQ|c!dUu#Pe{z@hiHTBq*w3uqFa;o-` zB}i=t3Qn(&LGGbq{~|xI`)zi&r;+K*w4l8`IVT5upQ~#Uy|d#~A~Hkau>w8<J-dFg zA^ehU-pXIx%USDF=IQpwKvT-ilMYVU8Y+8xeb)5*9c2fk>j9kWb5;B1;UW$GJA)x3 zPzDNEQC!<lL*87-qD0iKiJPHsX<d16={J%w^?q+e7kgg7=M1B`I+}mvqB5DpIV$og zjdvss62{sf2X~c5X*(Dk*$MgN7TfM!fp?P9eQBrVs(JsG)BNh0lD8f<bl#Mpwi%v5 zF}Hu80XI|jpI_9O-1JY5-v|mzG-0A$9D`%s!{b$GIKZ>k!W`Iv1P!fj;T!ht;-w|8 z<WL1lOdWw?JRS;@VuKhjK?A7KQ_#WV0V_15#KwUB?cpcstv{Vm0Gh{XIVV3qqh+!^ znRz~)-G?5M#y>yBMn~gV1?(BeTzNGE94*wuR`}2v0uv+`g~d?*5u`lTnIc$}|G+4z z)pa_8vKJ@KUVWDN>ux&|Dd+pWkJjo&&n9f%&AIsKJtN;1F_}PNrd(*cF=W5s0Q4a4 zEIO1tIdEg!J>VU`v`D)c=5O|=S3itVvU1Dg^L871padiiA;eZIf#l}JWa~;}gvI)B zRNqM=bY=y6>Fdd3(4lrX_1~(Szo98+{kek~9qbJCZ<`LBr-!^wns%@5%Bg=GpaPH- z`cBr|-cL8zu-FI&9e&5tM=m^V!HQUOSD`tEOJ}KWNNyDHuhd`yW=+xIw{@neN!v}9 zILn8p7(YPDd)?OEpGyF{scP))j_#wS?MfdhdVY&O2u(SOjwj3afRc&YtX9}+BsQ4N zA{*-4YxCP!kUk`>4IPGh0Nxl7j`U{NZ2U*x#H40*4wi-v_A}#*F&W^@{b~H$!vsYH z1!i==b>xEux!Eyfi0?!Edwb#s_+ty7#@)i`xw%6lZ&O!&SdbRs{%~@f{CF0fo(>fj zk7u18@@NcZLHB4vmwaP{g&dyR(c){`6Ak<$Iie$_fg)6h_#Syacb9{pbX$8Nj{`_m z-}U>&(p*mxmY0K-sSbMlK?wOp<gYvx78G0+l-Y@xyK5}V7R1z{>4MoYJml#t?6|SM zwxR(^XYSPyIM3sqijF=mp|-C=!;a%`A}a}ytn+)8dLMOe`C-)qiCSfsC|KDYuG8C} zR2sc2+pF^-H$_)jduu@netR(m3O#uF>YV24nCw(dEpI7(SvQ-|0&k$=C`Q0u|2+Pt zCdKk1Z@@TTne|vf(lv@M7OGy|2`)^L2BePf0<=<_tCdG;13%iEj7~kA$r6kLv^5|C z5$cO9MOSOLy&BnfWBP-!MTMCNlNp&FA3Q&Vdbp^qzA0R`m<SwqYI6^F{ey28%~`*U z9Po)l=uC?a;}A61kz{SzN{nlqoE(j;>`#X?m&;7LXI4zNst+%_?U~ypqmN|DV%X?% zVPgI9vkwx{MO1SXh3nLneQa&#d4h<?09gE9|1$N6BEdV*O|G(~2r->Oh|o2W#ykm> z%CyAJ;cw8%?kzOE-HUm9%Y$ZR>sT$5Xguj15x{OS3-i(2td_ku&Uj(qj4&!Fc2gAO z+nzo?erakUFK}3{lt%~_tXPlFiJvKN!wS>Y6xN#^sF-p8+h?jyio&YGa9?|+aXE)4 zGvU$K<ofjT;Mv22T-#gN&7CP7PCftQk$Pu@DWT@)Z~umo<EDLu(}QcX#6nxkTKy~Y z!^A6OZNvT4(NZ)gm7y%DHLWQI#Q_%FG&C03Qk6$U=1BW6>pyf6LP5y6bkC_2wL9Ny zTul+`$M6KUI7%~BIAwc_WEXL=#8=2Eiyn>-_7cl=_ziwqGs=q-^S_NI0_Mi5e8}Vi zcI0{=ERB75x|5SzvY&~o=;lt%^0XniNP@@!N``L2ato$ri=qxo1GnSJ;>|bOCv-TA zPxM;fFKlMs_KWl7iDOhbPY4~ZfdZ?puc%9GC29mZuyiowiuR&9%hw!4rjm9&_^C(@ z<8bxgA=nbX4ap6njNp|z3$SD5EUA_s=p&1od>USA7MPl*SSyetC&qH{G0s$rH<pD7 zys0k6RU9r#n_T<oYQhv}q(x_^XLtz7ak7eP@ZD0m6tEqyG((1vV#TRKhESvj$6FLj zR~A(pTLQs_a(Cu15Ey2Jv-x7YS5^nikq!|PQ&rRw)DzO@(z)5$y^J8cp*%lZRltwT zzk#aq+dT-u+!|NjFjv@z((w^7<QX>R3MuqWP}in!uy2n9!&Qz!)Qm#a4DOGHX35@4 z2<i-Q=9d*bSeq|0W*?ME$()^Ohenfg8tx}l6gJGVtI#&UShHne=xJKExH_^`T{8uU zeZ?lWSI4MXaS5u<kcQ?XzMc%Xc!q{+A;17Ia^NAQbIu<2`X1)?Chl+B+krX#7kIBi zQuk5!H96(h7|sXj%?H)0rg{CB6Z?;U`g#Fy*H3=nVa)Xd^&Bue<rR?=?x6$s6A}CO zRIYV4P*)N{wi#F;@1G$*J^F_XQVt7H3sTYtP|pHP%m<FmVJrYIIr=AR)T1!@JK*j} zyS^qT8&_|z<XNjU14dioVFhyqbh^p1{ZPdxnG&E`{LQ10EaCYNiKh+Gld-3=w7CZh z8zw0^!N7eh<8Vxm`wm;PX!9@br<0G`my<IoDKA`Y`C(%w^if&Z1`B_EA~96h;*@#i zH+b0D+S}UdIXXLlaEF#4nLbCyNuquKcHoFD`F4iVH7&b3p(yD_OwK@uNjt$oFgx;_ zWvEA<dxU3*glCG-k$L{dCQ5b!#zCu>0rS))&()p<-s+Rqrc6*?<*8!-0qOj+!NArX z-F^sFL(*-MrpPHY{E$#03!I$Pm?1bYaMfLNN3Gavz<@#e7fVz~PRvTh#!`2ucJ=Gz zcqE3k8m$yGBR4zkN9w~=Yd#Km&#`jS(UKDS!ZI?gqn@X?r>Cj8{lV+z_&y@092YPW z=m-?+rfHI0deL24(OUInS8>kD#u}xCuCWP3GgCt=>$QRE=v91aRu!WsI!r$H7cr)8 z&=el)n;icFfpz>Z=`1a!C@Oa*uD2(__U6xUVk?N5i|o}{u-&&PBoL{@H0A)YBE+tc zc$kcS0gZ27?@u?N?cNG2Znp>^AG_kSa|*5sc2hC6Bfqi6@>jcZ&%jCyP46%%n-T>l zRVI4?EZp=INPBD$rw{)V)kQs@1FmQK$5N7Kb6p)Uaqt;aRn?PK6+mq%&@p;vcTY=W zPb><qoi<O`o8`rccNtJ>Rsbm_UNW{;)>`w9%X_Dv*Zunyun?~}?=_N;&+;7uH@kJu zsZq~b-CsyEN%32JoE^}rWN2_zT4Fs>vNu+KDUNsLq}CJi?vL4FI_r|hj@75C)u(Fi zLJNlp5atY$!`#D+^BmajE%JJORTShsr(N<JbB(cP7tz@l>BO{OBr<@!C|bxl@|2aN zowYLq6+2bi^z;NLEk-RoP^k*fD17>yqa=IVdZDY#JEwfGw9Gq)HWLT0Q*CLHLt*P} z?)I?syn41SyJ)rbu5q>D^=N5*xi=Scf`iS4%g>9CQI)Z{a?^QGGW_{BHE}+%0%ipz z%_H($ALm>5fOEpJ`kJcB0bZ-Z)Lev?s&7J~=I&Ogf_9!UH_5(0-j=A-#5q_wSg;S= z<ai<Mz`q=g*M2HQb|gS><1B-Ex4Sn3>F?{){&l<lEyMt!9UNVFq&>kL_jHJMseEMs z_<C)B3el`(0f$;svyDiPL40Ia&?`QSn=*ZOP<{%cvB~TC)SNs!(9`ZO@R#~nPCjVo z_ZFP4a4XZDu6y<uG#%TWANO06>&eQ*)FAkk44oOOVm`Y17t{AK>4<Bm)ze3`hqG6f zJRU~;x_E41Ux?-3y17%nJu1r^o0>|R@FuggB>nTeYg5bGvl|*>%TYoL;aU@S;DCe- z_((WkW-?n4aM^Zn*>oz=d5i`vrNr9b_$DV<@=gM~xj8wXOFT^h?dMPgwtqE<tuhL* z)&evxNDr3gSh?%J$y)8D=J3>(r*N#ioJ`=-*05M@zZ1t1kgpz2`JC+$uZ-rJd=Xt> zZYx{m7<y!1SGql4FGjjOU;ZNlcSrn_4ILm$VzT#VCvC^0vCO)}(8$~exTQTf0u;bA zYF0Fn*MIc{dp&#aZ5KAF9-N$SmLsO&>6y{RNuIF<FR_Ujp^=B$08pMKG2s`Quw|-1 zRGQFnEs*2)BGrF~hA}5LEnX*{kfML58$UhEsE<+|Ln*lorupd?HH;E7$6_)-a)!ng z{FaV0Lsg!-?)eU_xw`|96rnB8!RC=*e0Hj~?tt%ko`m=M9j5Y+ymMWe>U<;Lp98^1 ztMFbkU9ZisJ`n86!ZU+NRbm5nRh?&JsB3C>wskhv{Cx8V>Kw2yV-dU_4NrFQUb_zi z_IHDfgyhnaJ6rRkv)fXZT?<%vA=0yWYQYs|YyM6YU=01X){AfeVuc)XcAztI{ry;) zpMwT@8=K&%RbOaW;A;HFE<8^n(@M`k3bMGmyC#>@r{jx1%9RDJGI%hLU2^v1%Y%te zwP4{QBVjY|YE#X9drqeIfyO>g)SRuIeZEu~8)Z3JmT+x&y_`Pg5uZ?*Ja2dR5=m#b zyPWJI@%QD56CCVhZOG5EYR_|ec{+Q1PFBvJNi=R(=>lS7tkLX1TSQIS<Q-{AX_{RR zRa6Gq!lEJ^A7PoC1<W!s=*c*JY4toZR8!+rL&b+v)w$#3mvi}-Q~5!~%g=vw9kxx~ zA77mUW!XL%g?k{rN{u!|Sqh*&K%x(pyDpZyFUpFvvIHV=xqLS{d-B7@&=y;o9|S~- zO;uZ4VyC2jR;H#_Qw>g@kN#%+e^TA#X2WdM7f!#aR>b(f%@mHx-CSLIDc<0^`JCJt zDtSMmFMr=1wBqixI>A+(yZ@$KP-Y*&XwRxn)W5yaEVXU5?q<*(PrTglh8J*?V{8p8 zXC=_`QIN)8j9JDGS%zWeau0@Ddc-YzhlN;2p`hDjD)&&T%D+JFzDVJ*FwvQ-0{t$# zocB|R`bT-rUEm;Bk;Zve<%udUYWlXLL9impJKqIhVmvp|9$UMs@$J^#Nx#-FC9^&- zQPs^!$9TH$^i(zfqScc1IEH~G*T1*Di>bGx!;Np7zDZEZLvHg$UNo@V)Az>FJFHwb z$E(J(GdAb4w==Y+^ToyGhG*M<_PX2a?rt#_)L0ru;GJlo<XU`sIQ3+L^Q1<qCLidK zI%>C2frC*}G7Xz2<tw+_y=Y00Zu8%wrdv4;ek&dw&G<U+(A2v0>Og?(<QZ?0pbT_I zLZoJ{;$6P{ZBdGAX^LZEiambn%PV}~>yIVEta0||<@&fl1uwq#15>Mg!P$nz=B<au zbX}Tz14je@U~Bw+L1BaQQlP0Vm}v{M)*|tf6}HzWU*|ao$LR5KPmhB8zhd_T-BL5T z`<~w*+x`ansyjW!zj1TL1qQow@;SdqDhKG`=Jk0vKC?*LIh%9E<=GMF@%Q1?s24Wm zH+E<@UG{$8{(7V;YAizzh%>J?4Lq;k&%Cwd7Y|FL>$db9-m2>F(5=N98v0=0SX9|7 zb@-H36r%*Jk2Oifd04_1<yQwL_@9|S_5D;LzIAKR!DhHnsH;j~ph=K}JCKhuhSVFW zoDCh550kzOzU|Ob2R;y|%FkPVoR-w<h8KDJm)Is&Y2%a!NqZb6f5_^Rs=`E^dt$Az z2aJ`Xq&~yK`=WGwlY|CBYS5EoE}<+dLlD7JeDd73&iDDQ{ECh621Tqm=3xr6mGJn2 zp``Ju(SH{1`Zj=f@Al(_d~zq9xIenR3^#6_$VyuaLbx}zEyd%%dHZ>L{|pdqoaim# z99^ZR(v^Q4d^gbw0%)9<W^{P+og+Cye;Ojm*K7|<k4|OjZtnfeM}5O*lR>$=QkauM z<!LYE0BN5!3A!N&dxZ&9rgiB0d_UTbtWBoUnwXYkoL6R^SL}^1l6wA3WV=7mlzxK& z>~J-yEU-|Uq~!e$x?k-~RUA?mU3EV1uD{-U3mWw#<wSS4!iRae8!}Sxvb(zRpr=T( zAFLf4=lkova|Huj?a`~dM&+)jx-wnqa!n=9R>08#FUZct4IU(-(b_F{pUk@Ea_`&# zk*$lS=W-`8dvpN@4$t!Xl2w@H?=A9B>X~87DJ;;#U)z|u{58Irx8M!UDGSu-;9(85 zgoLEo1YUcAfmLQ{HAztkDm7{g7E<#hc(T4qhLwVF68v6dOBbU|Qrbus0^_H+kfAvd zC%F+W#LPoj(o0%0P2~aDTkZuvf4Sansb}Jyos^cA78Vhakl`2M=a-R@5fK;fA0DqM zuhGy|ygRvBUi(BHS<~vN$4FUp?&pBkimvcv&MzzWRC>F<cu~$~HJ$=;s=oXuNbp-B z)ckn&5%qES!2wc?ghn8msi~Rr2K_XZZ2a2&=HI1v`P+m!NDa0dsK@L5bUr?*Dy!QS z6!At|VX*D}cD|Z2%bVD1`!Ubsd$JsBYl^CBq?8T8wp2t_7)dj$eXe<lsja^DPh))p zpm~%ntGw3q7~7Qn>XqwU#t-&%xY2&yeQZn(JQn~-RFxWu5g&Mnyb@cb5CId8o&gom zK?NU5MIVYqUxG#dJCZ_Sf}^3r!~Ra(*LCB>)YPbmrlFpJl6<VAovnmugn4zfFLE;S z!Fu{By<A*OmQ|4ZuZ|53jZDnIzbf7b0i~y-aEmaV+G?O@4jfRZFbM>Ao+|_j@sFB# z;yHICh3>5U>}YTbQeBN5H)jKERf>BtRmR{PRlkm@j-D1jI1N?Xt&LbUd8i*dUb(II zA0xEtE8W8HKd^6=s<XVStF$`4ntQ@ion<(8`@@;=xZB^4XVamva=Kryd&?iVx;-Cv zZ=J5cfy?{xas4{-WOk=3=C={ht0zNxwVSe}wB9YZ)62%z81no~nFc~ql8I}&fBHBv zAzde8S-DtA`NcXyqkTO^Q41w4{sIx(a?f|JzDfQop4pBDZMiP55Vx+Drj~}8k%_KX zkshDZ983<62F{5FfBCS3=9Y)4lpP+PUtNA3c1DBi!^AMh?fvc~mba7JXv-<9Q<S7T zEL2ObEI3dSUWQz(`nSC;x=Q^uyIVDovGqyoz^UpIp??pu9dw$zJ596yz_n4W6PU!N zyS;&-w*#sM>}XG((dZfF8<6BE#{Nc6V2=NEF>oLz{>~P{_wD_vvb9&m4HRcj__5RW zcV1|HeqKde{rAvNme)|%QBVhLZtALPtEwvdIqw@Jo$b5o`Z@cW+Gpr3Vh3r#42$|) z!NBV^bTanw_SU_!pU(IwX-rK?E;24MHYyR67MF&MjJdymX%)qjy1Jao)|SrNT8ff_ z@k&sZLd?3XQj?9yY~;o#_U7M8{++4RH267RmgL~j=y~Akd!Xmt_qXG5e3r6tdC#r$ z6ilRSe=_XzT76AXG^%b>W)Z4gSWzzxmh%2%;iB73X`QLiaLeS#|0*(iqNMbMD<&WF zgPJ`W#${sBzyJQu%74)l3cibL;0MkdI5YAI^%<EOs{Dm_6?$>udgJ>1oQ94O78c$o z;-gGXK9j{xNJ{y2J}5gke9h(2^sv9)z4dnq(@}-kMmqO(BK~}PxF!E6lhX3Rk`iIo zAc{LD7c$$!>=TTT5Ev5jZm*Wp^EoQ6tPFNRcX4WAXnA#MI}#ZD0jCD86k>c?Dvnl1 zVD9ckla$6?J|Hphs?|qAQad^;z{}qJ+t>3&B2$E-^90{7xZ*gYa95*vZ*{2nToT`W zb&j+9?bO{&r7<I&+D-m3Yfoh<Yj4K~7;tF;-i~>4vaCG8vP@`tVg+?%`7@*Vt>9MA z94Fs=2mJdC5Z*D(Vv4Zjx;LQQ*7x((%{4V7WMgOaVm-0DIl1`j{dw9F<1BJuAmHWY zx#ec3^97xG=$~)H%l%en+M&U32cCf*x^(TmB4@aRGIG^a^<;%-R;sVn%ECV1)y^+7 zvaz-D>-qV(R=2g|^Z5$PU7egp2gj0@mzz@4+L9E5T9QSjq&r)5Q}aTum1Ttv#mB!o zc^!j*!32M~JQIo6`#n?KTld8nD4<d8>#a1`XJS_7#ldxJd&D92_+&OGfK4qe{qwc= zEit#(Bh%Tu=xfAe^|1PJL!LeoS=lJCLYD9iR208Euew{6)%K=c2>DcF(=$kf<fyPf zwXR&U)poX~HqbJAXG3Fc8!Q2S%iUq`_#Ja((SV{!cWpzX$Jz5}=GR#hR_=9^volt= zyZz^oe2LW7_n+t2H4jDh>-s|8fU?~>E>A$I_s?P}F7Map9pBg4N$!sKD}>b`k{o{( z3TBrFI5p1|w<pvT>98>F_glq8#HUx+mtY`k>ap#XFThSr59;mh)~KuZstEgV)NIx& zD|~s`sg9ppsjY#ri?KOi!1ZS92`g)9N-Eq3wi{yF$<ZE|6TEMBNCg8lcxQZfV@+~( zNBWbyx-<a+ff5w3w%ktvB^N#c0TF(w0fOA=_0{k{@<&@Gm?!77boBZjV@qQ|LrOq= zz%ihe9b#I_>6w|9wuaklQ7T_6SF_uQ*3Qa|hW%maeY^d<0VHYXWjx>9@w<6=TpqnM z<6Ivc%+pFLYAV{9S~Bu73Q8$iY1g~(duYihsWFBZF@#ofxdzE2<OJeUeIp|SL?jgX z)ulx`$7Z_+s4(fN+L~?_W^E;{vYMKaSy!Bys<5)U{Cs|IMQ*uncmRMM=VDTBLTVNs zR0}CBDJv~0F99tt11|#u0e=4Lfe|n=uC9VIJorvn&4l>)`-cbht$$wM-=m_ZMH~P> zX0EOE-wOOXEFH(l$2Y5&=Mf7})6#l9&F%SRcxV>({5Z#u6~`w@smL}~RlPs2NKPr) zq@HbQs;cq<gjwmC-bd%j(VVTgJp9Ht@`KaYI8G_^<Kqtm79dqn6f)!N(~7C@sg#<v z6qRI^Wn}Jd9sRXAS{hkf|G3yj^3!jAU(3J*SJqz*4-ewM+|r8B)Qpmd%D{hc&Tb&c ze|BzWKTm$L-g6m;cD!qde%Pdi`1pNebm=?m-mzGoU0fxg9jB$EjEaeeg@WJQ+=+&S zjf-e9FtAdPk9m9^5h$&0%YQ#kwzHk3hJvn+a|gQ2ITC7i9ex=RHG~=J*h67^G||pb zc%NT80T&mQKAa29$44ive_2ow_jBc&nsxuisqtC<5bW$H40OKqzH=tGtD8?vMb#NI zqZID~>_jaQz1G`d>*aoRaibg=tE;D(o0n^2U-qo;cDmYPpr5+8x972ri%AZQ#ndF+ zN~Wiume11D+utuPENEn6rJ@<@R*+4|$g(V~<`)<5?eB$8&&<|V93LMSmGYWe7|Eq& z{sxb@oP@lZ3MpY>avFS=&Xy4Nr@T)Um6Rf*A_oQq-5wz2J#2At2{}bYa*A<5zrn$2 zNJA}hk6GZaxB>!!y1%$ZCzbHHuDQIPch6ggk3=Xa$!%$2SuiH5#PldGISB~^gVQK_ zA=UiA>$O;9W8#es41`+P^c{dZ&gV<;XXB7+_@CdQxMk*L>}+ebJLtcl=>B_VCXw}~ z<iv!Sn09VXK{pP;-tMuOtyYr8<_tkwb*mLQTgTk=1b_xq1`vBHZf<v2vz1OYMYMz@ zC<sI^HP4_}SW@t+^QM;+pV(N>2&5dXhfG*pn4FNB*!d0!UC(VuNgc!d_HD>W=@RXU zTYv!`6WIGNHCy9=08mN^Nw~c{I|g|lHP^_f(3)CCBn0%r{5(LtX6D6?m$Qk*AA0)w zrbE!jdI4p1r(i=+EaY8Yc(#7<-?s$y&@d74kZ@3_oQzCXPUhA1&zJH6fF)~gu$I^J z9V8~ItSrnw9%nZ`ImN3@+SHSgoLrnQ@8%IuG4#=n)A<}(?)`I}QA_vzbY4)D6YI4q zJifnwb*IZ6I3g7t%lVaXBjfz%E7q5eenB=m*6U$q>6HwHIJhnh0*Micl5#Oyb909Y z;09`RdVLsj31|4TS`#b;tcpr1wD+k0%d<{($M>iEY?U*F9Ij<0NGNElTYGk=$Mfy_ zYb+Y^)J77zF;w$D_oSYVwVT}+Nfj6zi&7DZaLxkNgv1VcPSDnnF>Y`bEKgU^a5ulO z@M$lxl$O`4s)hpG_4mK2sknTAis|{5SHb^k{HL*@!%nOXePyWmqTA*Dd`J(2WJePf z1#`^8qUY4f!vmHruPIpFQUPPxW8eny<UI4tvTNEzT?IO159W~rAqw<If`>SBH>o0) z_Y0)f&reVkKncKss|)t|1)y&?A`^?|uh(A=h%66;?m^sm1hlu>pKn`)&_P@Of*c-i z4tD(K*FHmZ$*^#)&z1KLD6_V#E|`HWPy&ytOl@tQpg_YLy+z)+hwCrSrPD-YcqKkI zmn_>G3tJKb5(bu~A7@{kUa+fxT;DeOsN}=LB4F>e8(NUT9eCc)zA--dIRk5e(tGT$ z_LZ8L`E>F*cR@m67M0_{X%|<V`o{V;j`f2L)6>a6e+&@Bczi#eN1_qqPKSfr2d8(N zEG){FmvsZcB-b$cQdLzIF)(le`+xgR2HV1h)O<Q>T5TSdosFGHIe`E@UT@1(wOie$ z6&u$=RT2gw_`8n3$-YR+_29)fPfnk{y!qYZ#C>zUJUT<de&L<3wx#0VEPG;YZytGi z8tkdgPPQz3l9QCiuo-KQ7r<ezsKAq%`Dv+WT=#8#Hoq@Ye0+?q1<yW4A%7l#xX{f& zO$21NSW8Qah<m3KGuK?~mz=j)93H<r4qVL%#9=P>^o$M(D=#J#W@QHf7!?6MUfAeI z&CSgIg`Hkkv<^s`(xB!dqM<AvZwgk*FR|l2WZnUNP+rQ|OiNM98O<{r>#9IM&Q1<j z_-Da33%#<~WcMXdx~@eryt%yMLeO$>RbDQxph$QA?}uIP0s{i`GqV)5`l6PsOKbcN zaxxka@Bn4Uo`AY~RJbbNJl&mL6gw@w6uV|2Fq1zGWVhxY#LBTrS~=FZI)anI`E_-_ zE!@MtTZE{sQGFZ&6?qJH8XrI!85it!iBVh#F?6A3FQkdwo;UijS*&L}L+hj0QXR24 zT&YW0-TN%Jv{Buf=NAMdq~cN%{zs>*XG+Njx(UCi-8pv#YwPH&e^gCudivj2o2)P} zCwbD=U&L#!tZ1TZ8xN*C-|1W6FV_RNAi%gF<e{t%{94FLcPJimz{h)`hSA4)YMN;c zbc|^jSiA4+yGdzj=u}eC^*zwXDmwf%T`ewFJ~Mp)qz|C&>2!IyCr38JF79w(m~(wW zMX$q8$^Mn9b_}$A;AwB~?t`=H>KbhS`qum5s+<iRfPe|Rn}gSv{d=~wseG4Atc}Y{ z8>zX75M(N|6TPLZkRSimOjhqOVg2m^8%2n@Espr?+DGlnQCVTfEA8xZ>@jxhuBeE} z3g9ZO%WLd~?~1N29$p%XmgV&xdUxIhCL9b17Y}m}7G86Ax2L}ew|l>4!Y7W!CMd9T zIO54d3(JIKjf@QsI;hGoxre;+g-aQJQ_mx2A^8)?JruIbTh9QWA9@ICkM3JJ8Cfp? zfy>0msIjH?b%&0YhKC(;V_i`@GTc>DqoQvQNOP>)?Ewl{7!sl(to*k5a^v2{mW+G| z#|jKF`v+leZaxAE8po%#ye1<cprED&D!Iq!3rZD?kbw%KpnWmukWRtYw%QPj!9X^b z-7&`hdup9g+F6YQjTRma_vbC>vFSVh;vLMREzMM4KL9;8EeHh6(u$F(WG}28FYCQl z=j{5dZ;%Tw`tfdPm}}?;DB#l`vL4~OpU&g?ay@HmX>c0G$@w=20Ui>dGB>m}G_(wN zZdFc3frNl0FE8)Y{``EH6+cTx{)ZV9AcBg2B%FF8p)vB?C9A%ws=zvTZ)?w+9Z(<4 z;H6($WOsf?hJ>WYHnIW4XTibIn(}OC|CudsB*IRp*$aj|6TXdaIS#QEos#9{p9A!S zGbIAeWJLwX19@$|vGSdFw?N>oYDtL;3kyjBD<G=k1pby25vgdcwY)jo#_j9Maj_T2 zw+ma|<U<QAEG;Qf(2(a~=7!6hm>e)jEFvOKOT#cWDz?*J0-mCpicVEQd&%MQ$Bl8m zT~<(Km*$fa8Wok0S)W(&ZS&7*A*uUs03|`%z9B0w2e^8<yJ9`UENm?PVWBPEU3z-D zSm6N+1RD{T5a-}v$GW#`E6eaRP0WmH8|&KJn{e566!XG@zP?^~bY4)9AADndLv4CS z3fM@jM_5x`6?%l6f}E0yBEr*MSw%@sRtoFaF)%SoN=w3U(A|L=1#aJYpr+$N$f_)$ z7Z_JLQqnpRlTn<OlT%t<=@aP3#>R4MdkagVKtqj$jEseYgNdD;PehD^_dFiv|MA0F zB323Kup(`JQ$|L5Tp^*fj3i+1d?-f;CCJ|g#+|zQnm0eb$LKr|b-0<Bs4z1N6Kn?t z@sHT<D+NxHpYMEmRoV9K&0{j(Sci*wQVjFdG}uib#@nc=DVJ52HlXC1eEh~RR_0k) zSyWV&n3)*AiM%j{kC#VQMyjW;^Y)$FG&q>o)7CgcNJ7daLP1G=xuq5>9u$<6%4#a) z6cjFw_E--c6AQDazh7-@tFEp#R^|a|h{^#T9$p@-DGN9(@G}hz^-3$to15zJFfV*O z=3CoaDyzyUQCR})%!9sZ>1Y|78Ha?2LX4oVqkXs;bWb*RHYF7$a6Si}s<~|_5xoqv zqFZX+cva70VZ%U4O?5<U6bA>}Eo^BbZB2D@N(yd%eyHNIay$ZpxS02L2j<TZFpJs+ z7hI5(0dJ1=SV6cgFE0z4i8U=yQCbK)91#@;QXdx`fy>*7i3+i@vM4JnFfuY=<p&yY zadYP8=Pa$v|1mmWS_E+}bYvKXA?`R>!9x-f5@=|s0`s}q>9EgOv%XRnCD_^7jEwYI zn6Wbtn-UQbDz7SET$)4aJT`Bmqosb9h?JT~k(`pYv@jcMZkdvb%FxUdoR}*P<{6op z5J<18#s(c6%%e^d1-Xi<vZ$yCR-MN*(AC!|EGcSiti{i~&r!_B#mDqs>AG^Ii;4=n zssQtJ=jg2MtOJ4rs%mR4UX*aKvBJhY8y7d1p{Ws^vgZ1)a)2=jMdVy+%u0yBoSw3d z`Lg!$wA=!AcGm5!4P3`iR8mq#Nm<j_Oh^KEuF}o<yVC?`nM5pov(KOBSCp5*l?Kt& zR0GF_RfSrc>VbK850@JgBT*4yxMEm9nv;_qbOv<(^{bbM8-NSt;^IutNWD2bb>!rq zqnIbv(bXy~F9E9nVh&hxa&fS+fuyiv9$DF0d3m|b%uT@CBqzo}zrgCpfO+^Jcv0{$ zSeb_i1@<{RFMDG01}^4xwbTiS$<GOD6O&V?CSqwIBX#i{{W)6~XIeU17e_lB%rm-p zdQ{Zc!|#Ie9T(Ety4q4w7x6K#r(IB7*wAq42%Yyw($e8!ULZa(cJ#*3%Gxpwjuaw9 zfAkFW-oD<&WySMrE1;Qn)|Oaxo{yi;#=(w+l%&1oTVGvW*G<f(LUzH%A*R0M@?25t zl#;UQ`syN9=Ba3C6x7rWEUm3PkP@=8c$okG4B;6DVN>sPZXRAW6-8Xk8yM)qPr&MJ zU`T=;c5t-q>+g;T3&F)aAkEFqWo=`Dhj{=jIW=i&dhD3Y<MB4yI+~#2^UHIM&GlYB zUPdN{I(pima-XZ7w!WdBql;r@O=U__JR<`=)?EVTVd!yiuwli;Jah_9j@0y&k&&yo z&c2S88X*Y<ouCdODOE;F92TTSguv4}dANfxVm-nj7r?x;hkHd`EiliHi+OU8ab1Xs za4`=@3=TqmQ9(l^iuvW=&pbacKRGq_@bM!&%rh|32ZaW<v^76{{aQg@_B;=_mzyhC z1%#6WR5uq#A1`-0IvOxzAoZ}F7tgU=3Y3*oKT9J-EM#gCR#nh8Thcah`@s&jph=CI zj#gb)&&n0y92h34pn#isC$RHp7=(;GQn`3|)p0Nn-Uh~Z1{}<{qM5g{wrKBc3l9s% z!#ocUx1GH;KIS<&5|b0gCr6K*@OA|A<YYQJn&p+HvkTKby`4!Z3BLY5-bfEGO!e`E zN2IS;Tzo9_(~Ptv1{};wUKHmB<{fQWa5K-r$)1oDfAwk~*jGHv)9|UEC8o$sjmH|I z0`o3jp17Guc^hZY`MNq3^SHbXIhme;o(%f(I<5v7oo{SBO6UE&k7ORCX?J%Ab@dtB zRS699!C}Geo$b5Np9cl{!O3N0Vu1G4)KmZo7`d|q1mAuC9hgj@zUy)e))Or*DoD@B zM8~grhFe?SJ~6pwtn~8D`K`x?m{+Btqt!GpuyOZ5ghWUwD&oEl>vWoc1aw~CErp94 zC-dO7j%41_%B-cWDJ&!qAM?E2pqUU4;$a^685<uyCi8gwJTPBTRet}`gU5S2vx_%} zN3ISH4Gdi$7#iwFJzgKUK0F9t^k{cCFFOO`SzJ1gVIG&xLjw*Db_nSD2YP7nFi%QB z$*p{bh&(eb0W0$$;|QepITZ7@xEg$BUI(|gK{GEaD}A)LIR^940Kd1Hni=2O*};{e zMKjNUVgAm8d-r!9*EThH!hmaPte_~*%Em&9Ug=|RW7*r){t@@RvQeoIRwh?(&x^`7 zgiP|BYTgM|BRLHdS0*+NngP&x8ag^n14A2kPef>>jFJ*3I~%TRQfCNBPtyzOxhCOZ z9;NffM=@_{*4)+<`bRK7J~8~y>O6+|%ChdR%bk~7+glnT=s?#xs4eKnu8ua4DQI6@ zkPXbg!sBi5>O4CKTXbyHmHux0%#(8}oF*j0%{-Hv5As;dgP&(*`5iisPVA5!pZO#G z{N(f)Fpn#W`I`CrcOTyG>g)Ck@C62lNr>Tig2{QZ_v>Z%$B!N!z9zUI%OH@Rml`h_ zS@{#ND)Q?1hZpu|)Q-1ZTYmTBhXc&hY8e>Xx_i2zm{-QnJa`)n^LYHc;ZZtoae#TO zxk?Q4_72v}c)d+RQo`|>Z>qz0O{%QC1o{`m*D#n;QBfYOl#~=CB*d6i;TY!e>pTly zoj;0sbzq*9UG6jycIL&7VqWZP=JELXBfQNQop1bum<Q3o^{c;P{^9+X@7`x+rol01 zWo0fc{Pu3}{k>i6?5vHoRe1V)Nmb>*P!B6RFA=>IjjS^={c>9Ectz*T)0c0*cpEw` zLql5+FZZx06!W;|+qgQOB_cb^D5~e0jE8yh<1xSQZSXJ;I)9{}ADb9Cn)ziE^H`?@ ze96zF@-{$wfUo!7?#{~c!hYS{Seu%-0Vh2_C*w%wxsGD~I66;6N<qRVeH8OP=WzIW z@nbSifo5Lzk752uZ-Zuj7q6fH%>0Ah$J@8IL26W$u>jWA+yD~v!>J#zPCDA}@5PlB zP3>*x&vRMW_z^M5QAyc*q_(Hj+^FihiG_LPBbgV`N8r_Yvg7J}SV+)6uk*)Y9y8wt zH}j}LyrircS5^Ub0632f4}SXMImXYw`X`u=iHip2Y4Ca*U>>iZXLA3n^LUv5oVQ{7 z{W0tp=8wo#3LUBQ35ju2Xm5kp&;J_3=4PkEHy)m({owvRc%6)#9EM}8tN8cxw8fQ` zjqR-v?LK~ZCoZ|1m_>n9+|nkp2C(m*yrrU~NJT@fWoTq~l+JtXGw+s+KX3DedE4XY zJf6G_9_Gb<i_UMLGjFw3W%!scEyBe-<~ppEm0AB_Z$>K4yv-k?^SWqnb0qU_zQ}X* z=Z>TEzdMGd_(Q$TF_=f`d|OA`^S5uE9qr&)9d2f6W<o?vBrYKi2tmMwYY_a*d|NAe zofK-$QgKBa6}QIO^SXM-{Pemh&yZ9BAz@W*ExY6Dyr_|98lL?8fn25SQ96H|81@hF zHZNX0r>3D!#x{TKz;#%h^KI_j+Jd1L+Emt5qOM8(Ugm{`_|Ko`K{MY4I*)aKFw7JG z0Xl!wH7WArdz)Ik^C|`L&#OGhJg(WK2fR%in0atDW=4isewvsF^mI9SSyCL#i|^}v z)jso|*9LCic_1WdPQan2?VXoeHx*mlYi8$c<>G>$`OiAfBx>fJaRl@D@-}s!nQv_j z55?(i#E#0_pmm=8n9RR_{~o?5G{_%Yc)LW3nQw#Ed8})f=I3T0FxS)5E~_j{Oo%<w z&*REf0wMhT=c6&+2JbZ~Vpb_M^YQ-_^WbfC@OYbJ#jyFs1sI+GCz$7tPdF~~Z5^$^ zypp24ot4GmW<1=StZb|rS{e|7wPKqY{;Qv_s%&U&;o<&jJ?zfjQ)wmpvs{|;4oUG9 zqw%GK-VsTzL7_*+umsFvR(@Gr+&pT(J6B0UeC)i<5i#sB{X7)l&>Y)@^T&@KLQJHt zu9{bn3s3T}tt)A1s3j&QHZe7<s46ea&!(cL+{CuB7?Zbgbh2Y%W_t1bIo4AKgv7?j z3=a0-^EPPaamBD-yv?y<Sly%j{886oiwlmNd*J7fgrBp#cti~Qc=rfz6BPPc=am)Z zZ7j_XH-mVbn}^%P+!S`c1wZqZ6&+W4&-3!k&rTm~aPPrm4iN)FHf33dq?oen2^AxL ziMdj^ufw94Cnh6gk+2WW=NAx?k-G3Zn9t5gAtoWVv9oS!Jvu*+X8sSjCiM$?ewU22 z<l)yp{`3><gMy-5W_AXs$KhEVP*M_Ofq5HStIC>+hPrAHz>%RrtkY?Dc(`C}0z2{J zdn}7tn3)-+q$L4>YuEa4>HM*oM;<e8gNJ#2BLg`(8QjTCGToz?7vMiHA_6h27x;M? zG4U`zF$FsR7|(qV7@0ulL)yFAF*<L9P3Hv#&a-o{+c`PFu#0u=z<vx{R8ihHd|g0* zZ*uJ0WyLq=R#;JgvROyYAt|c(T3q>cIaPI57DhZePsDn`C8GF(lq?^%neD5~i$D%Y zA~5e{XLGoL9}=~0(b3tV`N&))Du(@I{QNJddwuAF{9=9Uf`k|xLl_6?>FHj-#PVHw z+M3|!UEN)(FIC-~ouZ(mNK3}zI+WyP>FLku=xD;G4!=7&J_?;hOG_guIbnEsfEMrE z15$Qb^fjsY-@!a)zKsiBZv)31@RO656A<9%=HfWqU}ky()hP^g^NR}V>ub<Uys<DZ zEXdEr#R;eT933rIJ9l8d&5cKo9^%SX8tUm%QBxs(y&Ibwp1yvgs32!!X?D1ojPylX zI$A?x1Bhe?uXN+lc@*>4hXnYsFb~Dc!{Zbg%UNz6QM1U%lItE31<J~*c&<qiu}XNx zl<65+(9zKxUI_(O0r-)Xm8PVk^guWt-Utj^5WXQi5UuUaM>2o(d>hdD<1t@eQb<fp zbfu@`@LS7^^U{(Rz<6A$EIs`0o7b;_WNvOQFCWi_#=2K;-$F|{87wOrit@7HoI>w# zvd7Zdtu4$bDJiXNtkTj`U~IzUZ3sxn$vG8Jem)gH^XKr)w{gKc-)6rfQ&3P~Jxv3B z-N3xDsZm~0ep_1;{I2$9EF++on=5RA8oCd*B_0^&(d(pQ;3vQP@jVkGgCEl4aI>0< zVt|>Ekpb9<i;1!@H^sUHFFz*(bHz|dfbTGS`v(WZ{J_XC=seaDxxKjt6oZ^e^emV5 z1@oxTf~$$;SGVr(VViH`c$$!ugk91%p*%D$8;1LaOIU7Lt){9B*fTQHA6{4i`w0v~ z9OsGj=<03{4-LleZS0QJ`C~Kx?!yQ0Y{2#}zp=o<CnO})(o**i@agJm|M=<WsEAMk z0)m0QuEUKVJ-olNym0t*4D@z^Gzkd_IyyV%7v|2)PT}!3XNk$FcvT5VD6r3~M9Db5 z7*^~Uxypt4*@)1fr%#_?T~dKc?&W90aAsv|l~<TQHaP~T$;Z<TYs>Ae4Xkt259&NH ze`EamPe1?6&B<YjV;S#@=g)3!Z!yrHv$3`e4-4kQu|5t&b!}xC*Db}w59s{$sR>b0 z;q3HetSz5E-MPKJ$}6Nu$gC)A8tIqW6<%;<V(HFd=6^U#M8PE;oKjKRKFz|)E-s2? zDJ7h7*sHzW9c)9G4k;Nb0pjE5eQl^8;5pLIA9YPCetdG|(=nL;`R8BCOA7%!XM5Y@ z1MlKsclPXAad9ydGn1&8h|$q&A3lD3^86{psu22Nm72rl!TozM^n)b^d+hGzR$E`Q zzOhD&<Gu$d=84E?__c`1Y0{Hp@#Ssk@#y>!-ezfGmW+%H&z;6G7l|BQoeGKzSGLx{ z{^7aRXHRFlp}r2*H3b-*kByHWy)gu*z}(arYxuBVpohXfXJ(~?L!Q9480{DI-4x*< zWTqzJ@^#Lh?u8ZQqc>-ajSWSF1n_(vHy1k@10Sfput~UoW>-*l_cz$1=Fgudq&Y7a zky%~RHWd&SPfAL1{99G~czc{aeOgpZ#N5&>GAev_VJ0>z4A(lT<1>HE7#7;xfBewg z)aZv(KPbw}K70D)$d8yB8=g9S3PLbYO9;C23UXK1mr-W;-MhxdS_%qsE>8Aw9OvZ# zU32_=JYZH;Rh3N4j3Xk$2Cnu#eDnY>^Q5$bI+V1GSl5h*i-~-mdq95<>q<0qgD<&C z95E~?Gjx9v65^6Wvsq9UB0HUsh)`1UqKlhrS!LPk=2}%{8Jz1ZY?`^Xxen)ufZ*)C zySER&i*Ry)kO9Ly*uv%2Wz@=I6{Yv@vEQ%)dW5vJWM*CtBNHP4jCEy%j+Vy%_h0|z z?&5^&w_{@1p^34A;vzyq0wo1Gto;!R7bhDlCnpt;(itAzi#G8-=^Y`ty-VA_E*tmu zaQUB8XPJdnqH^l;o5nf@Crpj?Pn|k-9OVq~^#<SpV>k=;j&`~EIrkquh)1o5rNMen zC-}Xe=N>RKzIycvtD1dgeq!{PH;jGy{Aur%E>klj7;C^71o|QSd%M2*V>+*1y}W+4 zKRCb_4nCZB=%Jcg>c%F9fx!V_#`e&SLF!jGR->ZBLApsuNwhT70Fv&Gw#E6`t@YLQ zwG{~D`?@<aQj=t5P`C5};FVPr;fFgToQg|}*EUw4J%2_?Ng;Jn0=%?~qrIDpBUDd> zb4Y*>KOZ+C2|10R4)r-EC3(pJA5SkgXK!~GPXttFKcpv^LnaoK&VvC4TZ{BSpq5lP zJ7U&WAU)lg80nc<m|c#_+rSP$%!OL>sH)u4*|xQ@zOlXv2gKaO2m&8w7A7riO|YO1 z%?)edUhdsBfyj}F(9YTt+Q7%5J#^<lUnCf80zyJi04IA}gfol`4*L~$2DQ*qNFXLI zdT8V-=I@WhL`Q)2g)XlkCu3=50zVwn7<S&v-IbDpTv$XXIzBcyGzfj;0y_A~qr=yr zXTk0@*Vjm15C^My=FAxhaS<Occg(NG908=K8~V4SVwu8<vcb_|2xI^M_TB<4uB=-V ze(uaabMNi%>y9D`5Qsq%B1F*O?i5Ag6i`3`g%(u>g%s{?!QI_GxVr@jnuNGpM>^?r z&#Y62By@jiyWQXXbLX;IPd(=x_SwhY_3n4=I<=Q2(E^?kw4<V=V2(EiPhs9|2dc9J z><*{Fcas0Ch#1=;p*@0zGFIV~nEK!>P#}R$cGlh=uCmhNpl!~{;=<C(Gpa@gC+7S6 zJ24oIef#&F6*xn4aVjm$9UmQnyMf-5nW>4YvLZMH@FB1Y=sK(p2Ero00N>`8`gcG4 z5EUK_`~bHR`0C@0LKfgTg02(1aNgd*2KqQ_OTyOwGAp=11%b1FVB7{Bmb|*WP*_q3 z(Wtn@MY!{D<`578D$bof0|FA{3iu$fAN2dD&zw?IQ$YZ4W&%pAsHAXhedYU)V&?JF zCmmgFpmwyhG>#lS0vrg2<mf@Cw1p+u!pQ+t3<uJ}XaM$XNY)97v4g{X-@bbF{{1@; z4<L|^A3t{F$RW-H2RJwn>|tf!v3oBI+kOrnzC$8d-ZK{=dV(9iV>io=UAy<}-NVAl z%ErzP4l5`m1dmloS@HDgQ%9JyOaMGS8LWzkir7=gzy7`rpj1raqS!?-s1KnTIukG4 z5Kb;GLBR`Z>Z-<gqmb~Bj_wZVA8*~e+uGi0V5ASX3#r4rQuG=jx|fre<>TXpNS0|l zn+Cz=K^`Q$RZ$8K3+@~2efAQ;eD6TF2g5^JMv4!KSfK{bR)ZK8t}8GkD8Ho8Kgb^< zAh=#QB6?%N8R2Zj#4nyYeG)1kK>YSg*X}|_Fh4Q~uusd)Avrq=iwYk)av1*0b>IM8 zAUrK-6K>|d{riufI(=41=rHfG{U>Dip4XGK40F5GW@O{RzMpG13p*=22MfnRaTTMm z^s4l#;nf>=Z`@b~2mw`{JtqK1+|RrT=p@FQ_k?3VH#aw&p|Y|f;K#_sz|EajTw1iU zhRg)j)KCLFaOTWuKE9(6(|t0yQOLC`$$NOXiiwJxK7A5y=|5JGmzRx<4*&g_M`--j zw-ZyNc?CIM-kxUWrfO=+z(QvPPQw`<KXDu&Bq(%2R!&A!OC8P@00S)H>*rluS~xp5 z{boyl1V_AnW4*ejA}AyfWRI?%j*6->JV!|>3Do2j<bd0u5u7g8&{!WBJ~l48v+MGs zM-S2N`uVpnYa424Zd4Pzp`@JBSy9=2htGV&evpOx6#G%3!@?Lo8OvP<g?4cA!^RzK zT%0^Shj@8UoIZV4P*6@u$&Tz4o17F99c4l=Q&3ivk(ZNI*fco>c};DMy^C{E)vvvK z0GJn-l+e-D!RhHLC@P3v6crH@k(81Ihyvh&<Xvbkc|`@&^Rviw+)tlQ%uFPuCX*cO z)YVlbBqhL?B_$=))KxIrnpiBxz))Y`P)`T?JZ-J5rj6Ayw=f6HGkN~StGC}RE-&Qd z=eT>hgT&PZC5OdmVKg<gG!WBN*U(bOo8d#lLuwoAbBhW*d>Dp!V`*7wK(eriums## zMFj<AB~=YIbxjQ=6=k^CPZutW+=R_*&n_zL9T^&#o~)>^3yY2>I@oFJ;*`}?rR8Kr z#4nx~7CL?I9NbJketv#|v%=D{BGNL4&xo?~E3*pfE7`?*rgh_pejI$lYzNQo;XbLX zV-b;2l~q0b{N<}RZ(oD70HF)_2gE8|KDfM;v?TJ}B_#j@a06i@Ja<5ag%!cWi=JPY zH#<M`>h&u$u3Wpmns_PR+SVE>z#W5{pA7I85gA@pQwd}aB)PQ$x?pKVh>D4rm>B-y zkIW1Y&|SN}*4R`R8yoG(aCdOBC)!xSb1^f=1Eqt(f`z3y2rw^ikF<;w;PaceHvVzu zC{Q@J@80U_Zcj-~@(b{x&|K^sNYE6VHq!`})|MnY<RvEP4$3M@CMHKgTzwL#Xiov> z@9F7Eu(nmzF*`1zyqiaWgI|j0g4%Hzv(swiy}S}TdCu-UaB3&liKG0dPMyCXA|WZO ztfFgd?C$5Ak((2rk__>hsU-nVFgGEX<3AeFj^rC0Skv6B|Eu#JC@3kAoE+Rd=+0CZ zf|Vtp%fQ&s$ixWF+{4Qww=i#Xa_r{)`)H~Kp!)9KuH>|2Pj63>1BqZsFe8|PS(B`( zZWJFsZ-$Q-EI5)KQFHpZ0Q8cXlQ}sv@%-Cw-+cG}$@6EEvr|>o6;Ux!&=i?Qc7UGS z(H^|X)`4W}VCUxHmY$tCFf!CXGE`7n44hALax^8F>lqs0^mX-&49u+vcFv$t$d)7< zQwze@3&#`8&}%QLuIe8f8M`tI$ggg!k4sLX`!H;s9E{9NHLzGkb#*BPMNui~3t|@q z#U;fRm4#*GPl&5&8d&oRVUe$^9OCFnZRT!CM+DV5PfJ@k`^DzB<kt?Pc{5+WdOg2* zrL?>_GAi7|%N?%NhGb)9Z3zz@Kn?~p=;&+@_(@Gm21&ereeKgM?!cBH7(h1o2lxUu zC^Rx+t}gINrMdX|`{Wnqws*AT732m4`hBWEMXS-sAo&W53zk<F|7g+^IPRk-4*`UY z&Gk8X*$Ihp;Sr$_UBXd<g8X4QF)<GMg4VXC#ih9yFJJsp^Kjf3-@b%8UEP<fs>^e7 zvjG2>lH#F3aAa79Mr!M-28Md?+`IF$x$9vSZbf%*XG?2+R$hU<XRHdIzF!c_Cu4C& zoqQJKE^HXcC5YQ|Tz2;<rQIhL&PpiAscB%1@dSGZ3d1`gJ)^dzsl2W>B`Y&JAt5Ru zJ~AG$=!E#F_;~Pz+>+v+(Gf#KeNgPWSPV`Z100MjU@`hQEQEEeY^*@7K7sz=J2?gU zG4XLhVZi~xfuRv$mr|3eYOBYmCpYfi1(M&)3Haj0&AYcpCPr%NYtpkaV&h|?q9fws zV_~_nx}x=ROG{f*b!}Bid1+}yDSUo1=+1!TZ`|H^@p?1!$fIY^)^4m1jSMukHkDUb z7M2zv$)yVO!OL<A@`@`;K~OEMt}NfUF+MZZ(Au1llNAvY<s0bF@b?V}4US2SPtVQC zEXYqxOO3gdfLwk&a@BC{VCc2CcC`;rPoi^@4~&mBcea;QRi<QT1w=-=crk2TD5kb{ zdS(PoeM1$jt}+%UrmQKV?Hm}J5fYQeb6#hUC>}#ereA6`BqvDY$ic~Fsg;BEozq(x z@7A4LpfOrn8$h>YWThn~CnjEsgGYZUIWZ$E9X4LR+<bLu;pwv{Khq)4UOb<forM3^ z)mPUtt&VB1(AL&8IX$+1Z57nW<@T1kkN^5)ZI_#-X2u`?juDtIkJ!Ka?$MJ+;B-?n z6Cks@dpiKS@Hsft3(dlDq2>3#{L{;=m;d6$i@W#kKvUrUpn_&*CuXip&R?AaRNsI2 z;MMEbU-TL{0l569&mON|zY(6*LSSU_C_3<K(8Wx`MNA?t65@|3I_?$F+AU(ZTgdQ& zs<95<)WVrU^A8HUlw43%(>*i@B-Yl`)zoqMXV%==(b3;GHa8m+7b7h#b?Wq~!$*%G z3Y#el(NEBFdIoyI5#e<$O@rej9lc#G?X8V%Ep45bfnevB76I^RW=u4(4hs9b4<7(r zfvAT@2l|KlhDQfy=4Mu}uiaw)pzr$48*4YNF>U?h=Q?b>^WZ)Nuv-)HfVRK?=+TYa zH<#8|7gny$EiKF}&d)4dnVFxxa&=*8eGSd>vv%|5-0~8fUR!5-O;ba4LtR~SBjCAf zpdT*(GIH(h$R)S8eZ2N|=C$_@PEKr1J2Emo1;}sg=*TH6k4j1Njf|oC1=_lLm^nD> zTUu)wn}}){Y1l>AwvUa>tx8}i?7Z@va<;nkOsA+?N@Qhz!+3Vx#K7dr&lvB`cW(hC zAOnDS=jP$LPlADrSzfuiaqH&OXHUQX(rlR^OJ2Qs4ZiT;(E~7$yQn>Q{P6M9#}MuT zFyIqfW>!G|`{2>z2M-=Uf{pO6Kbhs{haY|f(1PFrxdmz;43<I3{0EwarapZ50Ij`) z81?;oXyNzwKLt&b)YyjO+Mc|co)T6`qUP}z2?^(Q{aMcI?i9uE6frxlZm&zQAky63 zLnEWpvx;i#dPYY9)uXdBqqEb<ll{j9pt#BL{PHqeCr1s8mb{XJf|8=Vk|O9~MW!SM zQ4}5%1rS<TTV1+-otaw-$(RXv14;8K{qbh*<4@@VU%Y(${P}|?NTz$(4y!L-zy3*X zskd)`C7IfmG@3}}!AHz=vCO=L%q+=|AKiL@<OfGnxxtyN-MR&*G{3qswKzXHKljNt z^T)xJezGew%xj<daz4S?>9+p<;<|>Uyu#3=RFBX|CvSf%Dw0thV~iKZSlI@ZU0Hw7 zJ+a2GK|LUfXE~+8t>mQdo@pOe6_MXp+&Z&<^ZqY=^#jkE`P|>ZbBBdL^yvS~md^22 zujI?;4FY&nTrXN)5;2bxF^d&3jovR}yhqSrm#{ILDpkvbU_qu*142SCrDT;=wDj~M zx$~Hrbhai`LNlhJ88`aJ#|p~IT)jNaiI&D@rpD%*29d9sm4%BNEjc5-bD($e+WOkZ zw6GgLP5k{u*duAweund(CR2O$hM7a{&D$>m9(G52jh|#meu^9j&5sNxfC3)D{LPyy z8ykpAURwv`Us+k6UR*>nBQw)wZe~3Hxcud$vCRJtOina(bmUjpB;@1;#V65&!|gqN zEXXtiB1v7}NLbe;ET`+%lW((Y2D#)&^42M*6dl-3DD$XMjJ<MgLn_iLhu*$@w++<4 zu&myC6p+y+Z5hd_-~wnDHjNg-N5R5j84}A`EWrNXl<6{hR(LxH2Zo<-bR3XXWpgX^ z;G2(o?#kAz_n&NNYNE0Ia%y%~RD4`mbW~VOG*~!dF|Y-;*EThbOieHoB41zqY3jhu zgvd<TKm2L-z>mn^k_Y+qn=huz{Foy4^(T5~rVHO}<k9ozDB77M4=%QuMhfiOojWMt zS8sj>{Ot15^x^^v_+LtZ`?IW|pOQ><3=YB_1Fncl%Y++9^$)hCdzm?sbuEZ;SOXCg z|KjG!N8i5niLcxzZLLNw!qBS@iJ7vVQa`E7F!ao&MpfT^^n4qre_>g=evcGVsqK== zq3CkXFjNR1dBHdwOhDI<?VJw4{{K_<IH658u(Ses;}#Sik&>QUS>4u$i1uH~l8LT@ zUm)Mk^!K*+b+`BTAfJ8Ru-ZL1&^I<ZHajzYb#Z~22b`HDc57PXdw@Mq8URH!5BM*o zynp)3dB8v6oGFSv?*)<uS_8Y!q;~}H4<CS+-MD-2v#Qy`@XS2m%!J7E^Pp;eGtv1^ z?*o(MJ3B!TCg&H0BqoEfab);gQQVAe9dz(!=QIeK&Z&b}ZrpwLTALKXDQ$yst<-j} z7BmRvl`vyHt$70HW$2mX5?OWY!P9N5{#E6}4?h}FQ*~$=2UMshus%X25$BCU&lv=t zHwrr_MPxmt@vmwOc8rHMo<MM>IQs+y#3rT`mOz{j^7V@Vn7q0;yRtHW?HU*?pa~^E z3IGHKv&+k?_wKLWxp!q{Wo~5|!ThyrfU&iI$OeA%!Gk-9&Up;rM6+jZl{){3|9`Zf zRNEBrPw#)GcK|%1;Sou5@BSxA0}hA)ejNckk^y{aX$nYwVSaS(%J0DWPnNrehbvoJ zbIL2@vvd7p6I=sB?K~LfP85Bjt)`*zQB5*AqIzND@$}mLvs$zhYBZczt+HdDfS!L~ zW(S|J{@(Mt5>^og9ywl#_3z%JVd=JB{|kz1Sg9;Anh)d2t>P+Z9DddyNI*XbK2Kx4 zIi+p?P0R0Z7+)m=a}$z1iO%p0i;B(6DXgjM9UuE*g8PQ%hNPrK{{TO*<dmegu8!ID zwW*~gv;r)YmX~O1s#_5)Cl?p4tgeD`_?RF0v&h-1bC|H-RQL#7pFR6b`_I3G{}(T} zqJ2}wF#Y8dy`v36;DG9xtqhM!8aSVYb%f!;L#I$xbLF>F-f!IqpF`7AO+7sjy`~kF zM5bmi!lTJPfz~v4yuFhy!AefY_?WIwa#`QTqnBl^<6JV<Ql`;3Mx7WwM%E&{dHlL( zL^1nGwSBTSYEFr|^vtBv-fgV@Ri&+O=A54IS%UypIcGq-fPUa<-GEa#KQMk>|9{5@ z{0lZj5a(cEVQJ|?qxc6yADv!Q+SJ|sGgULOFh4jk&d+~h&z?QInT3Uwg^P<*T3WKX ztrblmJ9TwYK;X>R-+aAe#||4?Yask9D@#D9DBCkd&c@F&gMZXHOxQnubpOd?=FrV1 z=ig@<>H^PSz5edQ58r+N;Q^5O$A3NiI6lKvGLP?lL_13FKMPtnH9V8ynKB+xHB$@o z=vWOnESe1ZcWRsN;o+)QAo;4q+<drcNc8IEYvDpOw6VvUnu}<eDv{z_hL#?_eCrsH zyI;y$!=+f8Ud4~~wGAq599rwVa?8{{jO~Ofw>nMTDbawQS=T+i4cEV<y#Mfnym`o3 zgTUP~4!l_3GkO6h0qr_IC$zo!F<yIh0{%rWV!yr*#>C9j-pSt6$2%e>J}bAZp{aj- zd@I$Xm`9Vbvasw0%<tT}6PDqVkB_&Zxe?GZGBbVX@F4*F*I$3Fjm50qdjPls{jjjQ z3P?i;_WJdoh#aP&gpVK}jcq@U>Y%pC{Lh~+t}aJMMTlP%wXwE({rwMUr?83k&)oj- zAL)Jl)~(Ny1~mNo8dJtE%`9yuyGO^VDyu8Z%uQuvrQ3VD;9`G;?V<AU^kj2)H%ju+ z=~=$f@f5!xG<wyuve7a$=GSy^2(6r0zPobw*+o6iBT8hPXN{^;!C5{3gwo-Lp|!Hk z#ewNHK`A^$RmXL`waCeMubjEnTib~JOA0MG|EM;DP2PE*n)@l7FF)4%xHjXMmM5R4 zJGZv?SNLca^H_N!Ykg$abT?W+NJwHzc3DMZXV=Kg%%`p##r#p;qkwi`dMPPMb#>MK z`}YIw1MEdbg@Gpl_B0wr;LIsWN%6YIIsiP_{QCOhwQC^nLDg?Od<36hfItA`J;3wJ zS9hK~MNtY0p9xyTfWV)>ef#qL`zNnnJ$mut;j`yw1y1kWwexR(|2tM&>&GAe7XThr z><?eOeE!}0C$HZ?Xz&T@NNbNC-F)y6CHcEgpIy6i7g1Tz*MJw?y}NSzHb^ff;Ai2q zi%W|*xj4Vs@eKzDN6*l}*1YGi0IPG?u5I1eH8fb&(gsRCAtx{NQVK+`4qkqi6jvjN zUd=3IvBpPreA251Z$0@ow|0<2nkZossmG`jHHp@ANT}{xuI^nbZ=ZYj{zF4cAJ-Yp zU84ANMgbbmmz=}OKFxl)UH(kz9+^9&PUljhe52sPr|o@A%Y#?b<ERGxh`Q@x6)K-; zgn)CN3@KF8$i&3X(cZ%wR6PV`g*El<1N{i*=TOYg4UCT=nBTK|*REam_I8inzPGa{ z0oCu_i_m^(SO|b;aAK@`u)l9)7%BsQwso{Mx3#o)cP!n!)!x$?5fKJj0)PkLzkc^# zR#rOf)!fnq$lth+4z1h+pgwu??#k7xIoX+Uu~DsUEw>)r?;jd~9t7!JcJ0*EP`h^f z=EBM{(EGDD-*xtML$hhA$?Lc8+<*QO#r*8Td{2LGPk-Oqty|NJ3viUOvXYI5k0-9o zW@cvu1o*+0rq(8i#E}HwQxjofp=jsD!^2%xUfR}qd2nhHjoGJ{mr_!by?q$z8EJ5V zQ%hF|CnxGVItps)QVNSBQqz5-;y@GHxOpNZZ)uH?ygE^VlsK|*^X~IE+N20BX&WrP zN|Rc848!nCyWBXuUfMoCFmnx!ScAed+4&S$q^v|tgH;?75{tUF5&M@E6|+DVdFQVb zTsX15yc%vt)ZGrL(hjLocodyZ;Jj6RD-1$vFy<s2!ODu_2HkXMQfhi}SyfBx0CTJ! z)iwZQUM9~2=B;gr%XjX!w6}2`-~y_LEvBaU^@opzgf6hNv$3+W>g(%)JU(;g6dN1s z;UkBGf&<|bmM?d<-+uO@sJQUl*)x0gB0KKe$8KdsK;a7f3=3vvc)%XG(yraR4jnp} zo|b$NNe;MoFWBC_?EBcEeG?NSkoS7JI{WtTgKE(AoI1r{R9tZH*>h;Y!`+RIjSc=~ zZ%;aRP5`u)j*fOnND#Ef!omVqz)bMBU*AA)?fwG=1vwUGf%aKhS@tvEF$UL|zka=c zd{j<OW-s$R;s4^|Vx7G`J!4~_{j$m`Km-QGC!s@>mM*TwBnPaSg(A+7Ro#sqTXW;# z%l5J5g9`S7IA1;QIysy4i}>Jz)+@FBD`g!EH|~9YxkXLenT=nKQ=KYh8KLcx+&{Il zjo3e560#b0N!fj^K;C5#a#)pmNQJ_qOy*H?KA_-mNZHvwwkf`P#yv7q(-5fM$<Bk} z9T^*!nUh;l4Z$orZV57AE6)Sw7dLJI2cABC5+DcktE#2~n1@x6Iw0vaHPx>_dU}Sr z3Q#}5eE=2!PW{6}b@jFADpMxyL7j-=;tW^?!nd}zg4WQDUw`urL=dH=MF4fEh@cV> zyJt5j99>-<Q&W>2J2z{>Mre?mhr6Sv>+$QiA)&#r5gI>mfD0<X`4}1*n46h?^|!wv zmC>>X5osxD$x>1h-+c4UUS{KPAoS?z>8Zf#!oq^E@8)^#LE4d!6mJ_C%CD+P&Myj2 zPV<S3r3QqMP$h4PDEX7>)_i(FWvvrWUcd8<DPxzgRCme;F)wHsYVMt1Ke$@awb(qo zy7euTD+{Yf1aZ5C4Ec1t6l`P68QBk>zSsuspDeFlzdof#`AXW3%RGjMN$)%gjtAuJ zx#URuq^*r;DG_B8gs5h9GkaYEGJpfm5g05fuduYfv1@o3sQ$B_|A={@!MXKy5n-WS z%mNyI<=%Y(fzzl9U@)41d2|)vghCr=c64U?;zcnO@XAUG-rim;tSqoQ1j3-eOG=6W zc&H-?oH>or(jZz{KoA-p76Mcco1rynDT%DibOytdSrZ(A#m2@;R74oT_RgL9db-cw zeFzN=hE?Wq_n?Ty<4r)cD=5hOgRlt=J}Ynrag$v;;k0WTYAr1Z!0@mI7~b839vl(` z*GnSVpyw$sFAG`-{sk4lDf~l2vMZ`$GqVF?6Y0U>jtoEO9F1)3f#el%#$PMD;@vZ5 zuRVn5RRrgLQiBdkUd1twUz?FoI@B<@TGqL6b>ru`<~_U<*?ARMWvqqq!SdG8UUAji zp#Ag3!98iWu-Q5L6n=d_P8pj6a-;)tb^v=YK20}idI7(4o*=;&gQ|K@A5irmRx?Y> zYuYXkOpZ??+Gg}9?~%`#zi|UFFD54P5%Utif_d0W8;j{58|m!pLAe(sO?z+8)9=0q zv_l=}3tHP-wX`%~UsyeV?ks5gXYam;I2WY;#{Gv-4IM1f#p%H1=;`8M8EOK0-+cHX zI5-d`d{8)dpS?ga&y?u9WM!o58|w$gMxa-^`RwKTqsL3P?zp<qm|VZ><jE7O_wMHv z<gv1`!a8`?z{D7+GB6PlA*65O*x%CG4t}JptcaeQ5#A)Nq#WcQBRty0H_+D2%go6I zlsv`=l)McW#xJG3_u<R8356YSpOx)0^}Xu^4MOECBOrRM>RGDmUwQN0&tHsu_Tt+! z7mZj1v=3t3r7gqdZK9idXSPB6C(D(q*N#ft$T}t)2bLdJLP#EAdnM<CDlQ;(4(JE& z(V+hg<1Madqib#fRPXE?=o1weo0*eWRo&Rxg~WQ3<6HGSV19OeT}bc(ig_6sDbU8B zFc0WmfAj=dWoii!_TV7Px9`L!#Gyh3XcJ_Ni;FWa-%(U%L`6lSy8|tQIJx)Y+vOWK zR&Q=xyM23pWf?s*w2Z}KzJ32eSV$0BhE;Vn6>yELtc*`YJ-sjwQ7x)pKpKtA%q(0- zWZ2N;1c_t|z0AQw2a#XDGK1FR$B)gdEN8&(A2Hw2+dZ|k1VZk}(Iap&931-z1aq>B zv$*(0)J=2^^kTE~A#{b_(9Xjf?iE7vcr!U2lieybGmnh1r8`$|K9Mx^KBh|3^Qu*K z%0G_ra*uCBhbU*(?tk(7)|8A2){|;1GBzi484`pr6ZedJkDhJg_D`1R)LMs#n!>KD znxtqhc?W<xM6!p~U6C;y4LbWd+}?A#EGkqLLo;JrJ0i`U;vWp%cVbR{L3J%CoWTiZ z4EssX-@G+>b@9ZB<0up{7%iaCt(d>|@X4oDAjzvYZvpIm{d`ac2H@{Ng#V6R%!m@& zbf>$bz-D7-D=jNtzjJr-+B(?E#>VW@B6?^P^8j*CKD(JM!|LNV?{jmrpr?Qe0MX&8 ziEt+7p-pf}U~F($IJAa_8gjgyJD?gj_W{I55X|!d<}*KHp64J>XK(lH%1T31128;9 z*`Lo72@sHuMoR}9l9Wad3Ul!CwW7M=?VNA~qPm^|m#Wiwqu}b!nMdEgy;R(}U(!n1 zJ{yG1X{?_DF|wlTYIW}tXq$KMe{n7sZ9OXM8O(lF%4t0xF|!cY$g*wF{>k$0!w<<d zllF<1mE3X<skt6jqXF7^HQo8NJovHR2gC_1XE3`3jZUf(LDfT0X6Mdu4~c+IJf*O> zs`YYvU*A^DuiU*?Ra=b?(IZ=|t=1kq{EYc6+6I+6i#Kn6!aM+pot+(8L+$9%BS(%N zj))BBJBA1tKx0;R=IZU+3u~*$OZXc%Cg-kv#=JJ>@plO3(UxKL!K?4Gb3S7pd=un1 zig__H(Yb5ark0m;a<f@k5mW=(V`8II(~_Y+w1Dz_W_AYK7M_PK4K0lvoE&gz&?7KU z53$3{UwrGE7<)&>lYIk`@ku8a10qS=1TTWI_@ByjN?0*4{PhPfl+6SA)!jhVDA;Bk zRCA*zG&Z6nzy9D?9#Bbn3oF0s9%&+v7G2mlP?8Yd+CRIE+do-SYR8V+B=09CbLs{1 zY0v@f$1q+ev>7LHetfDlmUBA0gpKx!nJVhy4T!b`XR4!@uUB|<WNJokWp#OD6Fhws z^R~9u_usrXFw}?Ty~yMVEE$>Uz~lh(-J3kW1@k)*+6T<ftggf-#v|kTdv=4WZ|!J< zo?>Kr3Vq9Kc6s^KN&a0(^Sdl9E$+R1y?E`~)$7;iS6AS_$Bu34c`R1@<@*of;$n!j z*|kenR{Fu~?-DO1pkEU}m<%!of>0FmqN2j!Xb^5Ylbuj41YyP9*WYF5B6HNhmQ8Jw znYQT>Jlx%b1GB5ELsOIBWl#acQb~1nRcl*jN!g{`f(T@+CI)&yAOs6C6;zFmxrMT> z(GF#*sBvKP;NpW9Z<0znK{~2B=IMA;AJy<wvW+chnFW#ud->+OUwJy;e*235qS4-S zI{Q^zj$=Im_7+~5+o1iEWqR$--;E=`Qgqp=MCHXWPGG%HV0}*N_@2V~ozV;AylA#d z*m%c9OFmf>Z4*-?8#^ME?&25Z8y%OJTaZ;+euRm6Kodw@hPM|xI~xEVa0u!epaoFb zqHTV{^C;#4;YbY2eE?}>&u)+<pqv46b&Yi$y<O08fc${gfL%E`IRk?N+Pgby>#DD; zEML8`apv?XWL%v&o-;Z#ZDC;!&4SqA;o%vcoUpVcppXW+4#y1$@cS9&9i8maxC4CT z&dWFM9(44*A;3JGLt#lF@Hre8L>Haz+BZ5pfBhQliylHlQ@x<3E~B&}C^nwv<3|sP zfEeG30;<MQ*MbP4>sd`AiwZp;xgHpP{>CF|V;@lR20rx?W^ue)o_^`=$Z$=^!pPM- zzxn2*iG?@&F{QmSHU~6Z&*=HcTZUh|^<W#ff3j%1q_CVb+%06XPs-*L&iAygKiC<) zfOCc+e9A6+&*NC7i0sm~=TuB^<^)rFCtEi!T3{$BocQcq=-iR<I+nfr_OYYGfB;dz zaDG7^Xkb9^R?Kh4uv;)cb_LPZa&j`??Dz(S{ZZZ{Cr_S0zhi<BxTL%UT1LNP0&fB; z77-Bwz%Sjp10f`cY8K}3BlsH4l?vJ$9fUu8<Paw(2eX6N>Er9Y{_v4sfbSN}uYogy zaN5m`j-gEu`5?*AAQvz{xv<bTI)dg<L&EDldrqC=hsy!KKm%|%83=n%@SlLaPM$hR z4Ggt*^#JXsPqYPQP|z{@Mv0>B5IZ=x@#xiep0O1i(l)9NIokB9!)oqWYFbJA{6~`i z?Vm*D6x6dFQ`#wFvQL$AM2jw<?;Dz0zYW?yS;nuda!XnNUBYIEgbjzR1E3v!yBIKk z-YAqyoUmKOgj>mpThU2K!vbfH2z!z{!y_yzC@ztQhZ}vhY!~vy4Kk)7DJfo9T!_Ao z2%i98!1LE%f32pjdj0WJWc8cPRZu?Y_qlx|L&_>j`}XaFMs_d@)RB>qn!k1pJR%^_ z52%|-$veOP=4%ibfXORsYZ+M?;Bh;ddIsu;ga%(qLUN)Zjlkc|og5q-`uciPR~JFR z1M<;k1qIJTn-lYMQ&+DlDJh_R0c?lx0Ztwkkip6EG3W>&9sys5|HJviW%iGc4ouJB zP4TE_?L-P3We12KBGnC4A0p%N=E}N;`&FFzbbKq?ChtFgol`q>MA;Gf;+0W*3hOIu z7?4mpTtBc{+OasaaQpW>oev*AoRu<VJ*~;A;J~FuJ+AF3XAySo_BPf3*<$V(^|iS5 z0X6y_8GCLeilA}$d84ok#^FMy(P#DiSp^L^<?K#qxgFJT6U7kqEUX|>bMW$mc-Dwu zuC1$MU~B|n@bGl6ZK#>Qabs$E^Zj`MC&Sy*!jb^W2Lv{_0O%iN)dE=s2{HEZ70vk@ z8)fCC4i0u!L`w*VA+W7)svny}-suI1wsmz-X%zUEm6b(saL_38<#r(6mi9IWM>{Jk zONb^Knj4_wXz%F)p>AnqK_b~!*H$fDzcDs<WpZ($vZfOD1^p8k8~}m}J?P}(BAl~@ z1;NqLzO%pA&))|bgd{r+PK+}LcGl7}(g67&lYrsd2M24~E?2Z(j!8?`H8jxB)>hTj zP{(3TtZfKns-caYwwZ;Rp1~n?=YLb6xki)#;HOsais^eDQgnvURn9u~pgP?px~^ep zt*ZNKZU6GC?|$#gV`fA@c0PqYQdS%)6drZgV_2{F+?H+7{>id_{q`|c3YQxFm^Ndd zypw=lpb$P%$Rtt-A0=uYe^k+FuPB~ZopxT2aT@C(gCiPPS%Z9aWcV>6W1}*%(@QH# zn_Jt5M}}u-$L4<Wwk_ZsI1?a^{E%dJW_E3DapT6~#s<WdqqEanYeEI62`ELs&)xd^ zE}#@?glSt}Xh*lByP)+E_d#od>oKuC`q{1ExHo23SGK+b3<vtS`vSBKCk9(S{a^&N z8Ttx+-{e$vTU$YOU3yVzLQY;-N`_BNf?IHeqfa1_=59iA)G@czG%!4;W%DocG&N#a z<KXhGr?2b-@;GH|)f{uN^vc8P?&^+-IgK;5{VS#I^HZz$e*e>nNhoCFQ`{|z-=|DI zpyI--<!<VlybanvU$To@_bHG?@S!Jkytx!zL`<Vm6Elw!HV$VKHsX-AJ%{s>zy}KI z`^X!RjBQCw*!z2gMusFNql5Ofm)nuKLH^-K;=lOQ>A5Sv`GdP(`TgnW@2mN<3ct4| z^R=SEsi~Hp-iqed{Oa1YqLR4GoY2H%&#)+DpoaPN6W-2A*TP!Uz(`!%{J$&EL=Aj0 zYe(ld9>?alb1T@3n?xG=Hk{K9;Kwq;^7=s4lyxpPkF34_@CW92X>9Fh<5$})WWcKE zxL?_sPs{z3wtN5B{5EL+d@**6IgayHAjKb0p&i38#4Qp<&ErJO<0XhGd`ixH1Pu<U zx=5M^Dq2TMoBC_wi6$g_D=OXDCjf<ga(+>6Wi{|&_wewq|B~)^y2IEnPB?Jo=k3h> zMr*&~VxwPb3jI;oz~n?*UtdL2b6!<VYEemSW==?AigyGuGPyZWWACJAWdm`m4A$gd zm0b^O&?ECYN3P!OpS^KTht8)$0aYVyd5J@jY{JN|>Otbx^3KJLhkxvav8yZBxz1|u zxuD0Q;J~ivbV!}Xqe}IQDcVNtpD#CV-4oLFR<ci&wu;&>?{rQ-NZcwJkS}4G1fQH@ zW-J%Y1aO|Jq$o|tIC(Q4ExZ-;`&qh^H^TOzNvVms1vwQ}mCY@e`}&!a+Fkhq$bUK; z(%A3*h4tuPXJc1>+edoG$C|pkE1Fw!E2>ipiefUdgA<Y&5i!(&5PJ`A0K6#zcv~#q z4C2;3D%4#HF4XX{p4po>AAf7j$l#K-)25ZGI^}UFkR=G=acJn;zA%i0u7B*8-AgMs zxCAhO`Q7sNtnv;ZZ@5(`T7-aY#Qr6vs&PQfD8Ssm^rCqfu+w>iP$}zFam!?Bo3x8& zvHL+%N?D2+`s%nO8qm|V?83Cn?2K$k(8UAqK}Z`IcPTbAJH5E9u%@oQqXS@rC<KIH zf6Aht=gIl`<RN3f8>OGL%^#bQ<JV`ZVSe^cj9>2WhlsVft|6<e0vtIaB|RWE5&W9s z7X*^V!i8pH=ZL&TW{g+H8FH&S?o)9Ez;{k>EZu%eizqpy<fQ75qe&^|R;G!V1pB77 z*A1)y!?#ad`~LT3jrob}%&%iRuDnawc$d7xUU_>?C8q;Q<TDyBuV1~{M(kfwLJ}%4 z&dIi6)fbEcITW1535n9y=~Bd0S-Wg8<1iK>Lryt6Ig1eMfPA8V5sn<EWoBbQw6P#l z9lU(q0>k`b;v-WtlJW|H9;;egFZcB!gg^Haod42PIe&<ye=!{RJp$%S_0eX#M@AYu zyFkx?fJY-%=)ZiU;^?fs19W83wk{ky>DacNbc~K|+qP}nPRF)w+g5kSw((b=bMATf zjCbD{?;qn;YSi9KyXIV#n!DEb%{3<xlJQTVL~c;rt@M2$Y<$uf<Qe^5>;oDRHBV)a z(WQ}zZ8Kj97XZXVj~#O2v5c?mz#DPr5K&Wm-lMtLy2aOh^LyHFKg`$D{hO=qi_Y)h zZAVux;|mTnJPl*<Fw~2m*1gJK7%3PGJ<CqfHyulcXzH#oN1!)V$tsamHla;29bG~- zKqE$Rd7ngR7(SYbTGC`>4ajnG1BmDMbP?#rnK|4+rvz}3-|rPAf4I`e8%y{Y37Jt) zzUQBm>k0l@Bgl|8J)cc@VF(!nO@itc;sLqO)zQt+mIw1e*M`zYpkqY6&x*;S2#{tY z;@pE0rXxWS$tLRLdhi=-KTi6?Ag2u(8aHOkTXgP>KRNeMb*SRTUv_EwOHXl*8McDF z)RvgC#rV%*4*x4h;sW*Bi^>y>`Bi+&I+;<YRO%49*_Ru1)S%?!Y;NyX=$W31XQ-(( z<#-zP2psPnf<@b1S=+?A$z&q)fp|t_><dM*&Lb37&gxyu?Iiq+`HhrC!_8#BCdV;~ z%~UpJi!(!m9pM;|@{zzXWU$8i*2dO$jCjs2iqy=<Xz}x`)#qv3T&V{~KLK}GC^mr0 z?LJal2I{6b+!rf0?IQ!QdV+mI7^WAe2A9H24d9^^mRQ)6Mlp}&ou8Gs8ly}t&$sW( zn{q5x5OL*7vX&h<uk7KNp$_HpF6JjEY=3~BM-p6m*I?!--ek)s%M!G=jA*R5FZtgu zGHM#{&f3A75DlE>-*NhQP#(F9!gsY<`76wy?#CZ1hy6?<CaW6nCt?_ImWfMo<@2GB zAt<>HKyME&9FuYH;>Ok+CDg;So)-%yk|y5@Z#%MuZkwee{2^JynEM73e#w19c#4w& z%5kD<1%m1=Yr#i#9PI<*@_U%o=YQ-BTGzv^%<jDPa(!W~rRAarGY{i~dPgdL%D$zB zQv1{3IFP6)SQK!im?6X}OufA|KRh;qfQfHpOdQ0vUVZpj+hELl^t^N$k$V!e_4=!k z>)yz+=5@z~IW_HZlACIiBJ}Wa<*}A<pv3LuWb%6qosVZpZZ?SK8^UBALCR5&f6`(d z$r=P$u<1T3JF6Z(ZbrJ6R`2>cucpGH;FD~!Vl#NC8Fzkz($F)FL}hJpRS$5Jl%@}v z0dP8oa{Zfwm+J4{j%QJfO~_PXjBn)}q?bu<=zT9V+-Q_*N0eOq6YC{T+N@?tP07sg zaME^EojggpY|_xv{h6EQSxAIEjYVx6UhYbuLYPQQ{m|n~SQ><AjuSM1%!`%`twF)Z z1Ox;b89TwdVwAC;i|31|WIsKzz%EEgrU8tvbMn%6f%||t?%+zwZn2>@h!Czfur(%d zo}HcXzRX~@#yM8j<awqyH?q}k82VTGh#RuOpu1M9PP19VlhNfZfdz35(uQ%)$lpTm zEL@eIsTCcysV?+YJ07nVRfmTcZw_gZE;sl*M=n6`ZwQk86q`&UElQ`jAnBk-_2;E* z>(44{FQ{v=RQ1^_8&r{Bk_)$X$mnH0WWbZDv`6;2JrbLUA7;sYZH|`0Cek-`a7>=P zfp-ova%5pV@!zRsLmXGic@|o{hNp*RK;D&bxG7esxeo^#HKQ-I*`lvsg|!I15&Hz5 zZ3PW>pl*{2790IV8Z)x>))!XRmbckhIe?maIa{1e?lG3e4y9LnITP<--+Y6)oaoof z$9L)#?77031su>;R)U=6=x77f(T3pKO5KdBwB*~O^bmhsvAPAa9AkNz4q29zSh?$F z-G3+BprRd|00d%jc6a~f>N=g*mj9%IN5CtZ6Rz;g2X2P6D8JeUaZkrg_yYK{^>A=_ zyj=zbCo6J0wl!5;1l)1?+Y|b+`@ce?t)NuUQY=4e&%M1Cyz~+c*+yXN<7?|*apKX0 z#eXC4T^l*#FOT<WJZ5U<E5Cp`Kgfx-@GOsy^1n`y8+{LAF4{IMIl9OEwQkdA+P(PQ z@oW^zm;jBJ1kXvTtD7aBG-3v50F3ZOc;tsTW>P{<vRb-=BE&>{w8Wc@kB>k*;tyMs z&s}_xFU6IhSv}3^>4G2;bCSWuq&YxfcR2q-mDZMi2iOWLL5E^g^ii{;Kp<EN>BpEz zr*APu&yYc$kao=i$J;L?-8fXgq7f|Pzp{8Pnp(nq)J3#!`tCP^#<E*bGL`w*G6USU zJ+y&3yOO|&1~@Bf_HVsqb{#E72L@uodd*lrHy*Ugu~OU%c0I71W*Rs8Uh;nR!>+|v z*5xXj@m7bdVNK}*Pt`|}mt&!A*1eaex?SzO38#euvb7{`qFVoHOQsA{)<(m-Q_hA} z86U2Y&AY&Jg~s_g?19b}v+eWfY5E~}HkN579NLkeks5)e4*|6sc?v-saE@|}j|@Bq z;tyO-ZhRydoTa3oqO7I1v!w0R-d$c=YGhyMW(~LI?r!=@BH}l3<?oyG%DFvQy7aA3 ztEW{&C@3K#qraT0k8{jL|Doj4Hk^EqQsCTP_`1!w#5PV`0ya`EQ0YgJmjKkgBw|ny zW02EMky}@LM@<i9YMW3>k5B-y5xVg&wPYoJzHZl__(~4y<2P08pho}?1B1G8ZV4?B z1tXlu%^ILR!3AmBLWbpc;N=fsi6+nLW*^4Vyd4IPZ3;&4dVk|kJw;<{T^pgNL9xw6 z=dYiX-|hGbJ&Q`!;=D?=A=aFz+po*oDiWfW$3(8nWvPCt<)0(7BQamg(p`1l-J>CX z4xN64M@y=JI;rpqL0QL$Bbdt46QcS52oOIZLh5?~Y6(wv&eQNG@JC$oHITt$WDHJ% z$HSxJ?FO*K{%booOmqA|`qv-=w-`OkIXM9cAnCXTMnIh681d4SGk~~Cf;B=}c_ofC zR)8F#0c$8uw?-^Z14;|O!1h@C;4Og1<co5`0J|^RlcJ!<ftxa7Gm29a>LY7@InW(_ z)$ENFORJEGWWAymsUX19ZIQYc-%+kbVT0kkto&Ss2a*8wcZ(=zu+1{+5ouv`eVAc) z{8UN{nCFygT`Iao()S{J1G8mQ({*uE8TE#jgznRgQgy(eT%$8|)LvwxtiE{-!a$*U z@-(v}PjklJSV*>q$jrN@v$=MSMK`7%gAm|b|BmR)N5aEJ$j-z{#YRns()TSB5<~)V zjnE6I81Y(_l`RN%sm0Cz-S+e{FdoGlxM}X7$5+4wM_o_P%FfQo&CSS6O-PJ_lB`T` zeoJv;h_eZ_+9Wx-6dhe}f-Zb=O3)<Q&l)5c)8Dni@0y|sWlWh>bvQ%IKw5hF0lwII zigUT#Ppt@ApLYrOH|<B1WoNCkTur4$n8W0zRHo>eyYiitM?g_RYm7@o4TE)t2=FeG z#|OaC^SzL?SLf4?=1y((qMHRpG7O~AlQjc0{p}#zm^O*2CgqfVIJ3#oK!+b4$w-zn zF+=Qi5BHYNdvqygqsK(kJTwzYK|+kcAMD-2@CYh^C_%E->B;pL0Z0UREocnM9?Bf- zFm8G(G#vT*n*jJH<TAV^X>A1rs=I*;9$|Z1O@3QXeXzxtqYkJu;>&Wg|Mga+B@pMe zho%CwGeLy^-MMyn&Y;o)pAawW0vjc2fwDa>^7bUm%P{2d($4za2ttes+dVv*y}Pc= z?U%CiX00^ZC}aia$Q#-BK>yitoYavw0tMH3XQ~B44EqW0D<$UyGEtVq4M_#~UA|p* z*9(H9%60lLiOC=_IU#x#k)wy9WH=)#3&2YDiNq)!7+ZSEs$GQ|Q7M*^Lr|Hf><leq zvBUR^6VZ3_*wgYLJZYEArK_8H1}MJP%mUtWeF-?i-bS1EoR_`-3LW<n3;P5G?FbM! zBRd}K{Tn@y4v;v~F}Oeykec2E4H&>N&5v0<BnHUt8a4hHkz|m*j$~+Z4r&h<`yMU| zL=Zg8|8uy1@>?LRmD)EBu47lD#30~#bTLB9taM9k%<(;_u1wj^<g#;WXJ4Cd3f_oR zb!9bXHAddckqP~kr;hkXQN?O7IKczWcZOZ8*-AjyDcfq5NbtQJo)3?;Q`_}ApH87o zR|~LOBn_J=l44UIlJuEj7fSZO6$&OaN<n^EEl>*_0sU&!V?By*u74c2CC~vWXJPc~ zv~Z`ZH=fJJ&NnnJEHlb8Fpp+yU21IrAIsoM1ojoi##6^=TLmnJS^4Q0lM(0<(ZQ$S zm`e1ddCKzo+N1~vfX4+ke$sLP(?WluA!Y$B=P}=7>q^H5d4psFcCq-^KivmBqsAHn z%ey%O=bM<vAabuP{g@mBQ(=DBj0MoC^_27OYycqEvvN&T%(yv*ZW~&{4te>Bt4KJt zv7_ih(^q{tMrP($phJ9wn}M+)KTuDFSp=s8Bp3H$OYVzb-BJ_WFxve-`*dRAncxn= z%DsM?OFV1TqHE<sUaG}n<x=*o2U?D6<nP(zHpmiIT5BH7?l(APTbEfyXWzICtL()3 zLY@_QnOXVGCEP%5`FTW6kh`F=mV&l`LyC5mb6#&;EZ;p4AWg&_f8EtSd4Hzo0e)<2 z$nODM0Pm;<IUr};zN5JypqYS$d>JS*|K%G}NC3djOM@fa^k+I?jYu+OuAZw(;w39_ z6W`+&u=3HBdG+wobF`n|pV&V9MMkGcTIL3915xG%p2aHbL4oh`oij^)Gk)TJL*hPb z<#ea`p>vaj`YSGNVPoew`*EO!^J<?x+p5IBhd(O%Ljn%8iI<Szk0x*vL-3sm3X4%1 zc?II&0`;4~sdV+W9gYsenR@uxMg6s;5_|EuVN8m{Yo@>(LBE}_qrW=xHS}|<vjh0S zx4dQ(PhhYtAOMia=+Ej5b4<}NJh(X%Bv`=Gr<ccf4*mu*Go3S*x7?pEP;;iSu?MBP zXM$;jf_Nd>G4ey58^L9@7S+7dIHwKZBY>I{XEjd;Nb?bfKALvrIido6>KW*%)-t(p zSAo{|dYMX$H-Ee2C{awodca16WaV*04D`9PsJ03BrI#NqO<$7YjKMlPl7gTGo*a=8 z>OkF&D_<1;#|Y@I?@>JtTV+(+SB3ga#EG6$i`UWH;tXu4#c1-$(lf2$v^0>JyV~cj z$@={O{@l2(M3O_u#fM~W8DPJ^`bG%(qirs3TxI>Wu0iHV4`u1W%&7g>1Er0y8>9v5 zWCHvI@s4TpdZ3eL;loM8@qmuhSzygoC+F04X5|G}l~JYkSDlR&O5QthJzA6LDwElw zn&26EQc+b_c-T9fxmg=U(69W|GrWP1N`-0@Tf}+t1WeWJd-QRu<Yr-J&p=zNPju)n z)U9!~yC3Y|K1RNX!GAT4%H?7#-D9xI9kI^H8{zl^$(_75OmN~WyQVZsjDnnRR|3CK zKdw_Oo6{PaLZ;w4$kubZ3@%zdjiz3v>gGn#;KNNFJI9o>+)b!9BBZhujGbyy>%*ft zgj&9i<TU|Ta+Mr%IKleY;By2PH}|B(@Xe#M1;xSc5neM~qn4EznF4qA#cqs{&jkr^ zOL~$obOgh0JVLUSai^5|r8&H7X16IVS*L}c=t>aR!mJ15>TUCJws?<zuh(1{dde$6 z3CE(7j3|*}Dtqc?e)QgeLkkmryFM+7tmi7p1qR-AJ6PFt+Z1B`I+zIiMu4tnr=pun z-NW<E-9EBR!AX)U;_jDx!F5IZ<Ytjor`nnO3ear|p5u%K3kPX6M^OZE2fpxCRp7{c zeu!2K=;Y1tM{a~n3lZ93&u~dJkB}wtp58_<QNCQl(7MDK;=c1W4A&SeP=)8nDZ~gi z#0Ze7jF5j*1o~}+wZeBsh2`%GD{QJu!;`<EFkGx>jqp60%e1LyaA9Jr7Fs%oXkB5k zu6S!l9no9G=HjJBHUQaZ8>>3&?Rx4wqQ}M%2RyDT=1HdvflsLs+PzON-{SkR<U|y^ zRP;5Je+g(%{J0vsY71*U`RMiZczf)uNf^F;Y)^a)c}_GK9n(|^lM-XfeM8HRtLU6m zCk<Ev-am6+DoctYl_o}@;iw0!vkAo%Ti}4#wKv%3`Fd36`B0J_GWOKUjxzIwWENc| zMmQ+SxpfsAViKC+I?aDG6kWNfu6{JdW&x$TijH0+vtN*gMy9i0l#w5VLmb3&0jA7R zAOvL8B&psArPUCP)sVAt&{H!iM=vHvFXw^wv5i(?l$nvMyT`>ozrrdNS(n)A)K7T$ zrL7~2l?eDqx7s+Xzw&a1{0OSP0`Q}LpciZi&Yd;VRi2II><JLP*+8q?4x_JcWP6lu zP`p>q(F5P!@KB4?48==%GDv3SS6*ag(=b^)7m!zp3QZC-c+=KXgjQWR3PNQf8lBK~ z!Nl!ZSQZ5O9rlQKBKB0b)6%(7G3~(cwV)w+07eG6;W2~po*TpwUEY}w$R7{o!HYWV z7_IdXp&5gpP4JZ*0V+$*weduI*&jt*Zw@A|d?GC<)Y?Z5h7+U(QqGzNT37cECHV(= z_vFz!OAam*L?We)>UJh?w4@(lLNf~T%nPntmbQkb2A=oUaYKjihZ+K2CaZ=Ho8CTO zx6N;}E%QA(Ed!q)PrG^=otvy(5Mz)D1o7)8ms+HDIBsxJQ|K=0Df9;DkQ2a+wPz}@ zB_dR*j0pTSKYqp687DRED>%oqJ~rP=ev`DCs==eS#+ReIa*C@ab~iOVHzd6Y2=v^c zV7d%brpktieLFZ`FS2<OM|RU~TBEY;kXkd2t7Ga6XDThbL4sysLgR)9?Vw{Lr!KB2 zaOukTBWmxO<s+0!Ky+4|e8?;K<G`kdH7ShuM9qPvVR)**=BA3}wdd;aBe+H@)k@n) za@k#E)et=Jku&e9zS3=nnChezxL2n~ey98EbK(J#&1cg8eIRxHcmVjQjEl~30*ja5 z77e?!V3J8jj_OAsdb9LwB{~#(U#$dH0tGqj*(-s9{eC?0CFuL)CbYgbaDnMPs-t>@ znzxF8q>;U+les9Yy1k{5`9?`d(Gn#2@-u~UNXbf=8LQ|~)6hZdD5^6=5jzJ2#wpjR z1lHRvXfX{Zo>C$73FL$(+2F5x%;rc}4GJbW1P|tde#YDd)p<nuXTXAeMFx1OG93@y z{P|bN=B%zy>xX*lq+QJE`}6bgSwMRhfa*};9n%&S$hwS}7S(-OP)!(Z?&d%pheXu@ zv;Vwhw;;T2-lhhA2T;VGP%mkeh)E(3L9%5F?)&Hc;Ez&B-^SMQ6kWOU1$m>2qp%o_ zuYF;H1<)q-RSYT9BUQqYiPRyk7)+0SfRCW15ulC|TKS8Py|t$=KxX7tZjnPZBk!u_ zT`Lt`CNS9GAlrxO&-$tl0h<P@PO_~rG|>_lVSSV=-gvU<pxpNlGUl7Z06JioZqRQY zJSAh)?#$@;t+Bhpe{3T%nn^ym3y7A=*mDk*9>KpqH}QO{=)5cI$XR+sXNgE{1+czB z&*i}HIM3|IOr1dT8<Ff54XSBHP&de?^%ye5;4U8__aMRdAR+gGdnl^I(Sv(>@`59` ziGebinMO6}#i&OL)qixWe<{+MCHd-phZ;h2X63Q8^~RKQXp{$i!d-0(9Si*2pO`PY zO;{@kSS#pnI*r)qDZVN0l+*2bdG_3QU8na;zWZ8@<Ns{CxfU7Sck%ZT!C>BnRae;5 zwjM|1%^zPXm7gY3SmOwn+l<mm87UU#Ejt~FGk`h`6Hgxv0#R_IT5+uIV=4riBYoWw zkE^rTajApx3C3p;@c;Y~q*!5`21x7aC&-X4H+bid93m7jRekD-JBi2wKOCPF5paNL z?tfOS5*KFd);_$<DWaT2VudMT1K8F!`JLbc(p2~F%%)8QnbktnS_3687vB;S_Synr zosL^&#?oOuN_1P}DF+l5)2NHVVnOEiF(Z^Oek+@6ODW%?u9?HYoxcXcyt8`CUbx{8 zc@U9?<nwW7<MCkQ?N@3`v4Bi2*PcKPo@(v}iwQ!tF?!f;#3Qy0Z175?9sxRH3h}|e zc>w$Ajb{lHnrOI}AOlk_PI;Nt)=@8WzIpJI^nqwhr~9MG&lTmJ>;e{%rQkQDGV3+} z@#)g(&2fRCykOHB#O9u(l4t-uozCP*A`F#kE+&-~5|Ip468pClqTopl5$g33a}M1= z822lX%{|H1=Mw*_%dvrg^xyqsUHH94<eI|Z^W5LgFd58{$V0y&_d`l)it1C1Uplb0 zy9`FHUfIw*UflYXbbWmejszVQMzwB=u1<2JD^`vOIe7vfeOjb}E^G!hb{4?CB2R`c zRE#oPQru>l<#7XnmQtE$OxualHo6iQJF-?=vND&_FcuG!lxz^?0m(_N)WM`M-*ZOg z-&H8OCy#AS&~I5b4QgJR-Fq9;0s=WUBExWF)Y^On+I7Bvjejxhc4_szw7Ix*c6)*` zUg0X39BP%FW#<2mC$vMMvp}K;giJR@rnElxn;+9t-#yBXJ;Jha^Jh)#;tivX;FOnJ zBQxyK!i4LUT6)ilO6@GR;Y7uat5~EzW@ap+v=lc`_~=_`<_3Umxim(d@4G5HJzigX zCzA&;Z+~D{vpmi2EMJWrH+c*Nej1-}kJ2FjFunm!6zws5g+tPjBf@)v-MGKQT)o5m zZUGuvk-?vfD+ND=AX_#@Z)10iNs!sq;qdgUMRk|{OXDqwKoH3%4toeZ>+zD;@Tzs! zaf^P_q|-X>qCux+67Z$@M}Z;j3x4o*%@xMP*4W9}(Zs;!@0p#UB@8DMAtT}66PEAa z8JYiG%l@xg9v(slX%kyBXLCZ%?|?c*LI!0IdlNzibvZ*z6C-Cr1|=6m=YP~l*cg}r z8q_R|0iBt?vk@|gn^>5cJHs%sa}Y8J*;(5;D%l$t0lEmA0Fv67h&dW~5Hd*EI-5A! z+gTepn-DrXx&T^#SU5Y$n>Y&D+1T6Jn%FuM0!E8kSOdDjFo^!`Cv0M5XKX^q#|QH- zRopi&vUS8!6;S<WJ4`mH+o@73)q{(vB2d8}>QuE5gN;)|Dr?A7OVBrs7(}o|<wR`6 zMT#GdgjMH+l9j(9^-G$lV~U%U1VUTY@V_K+$2_i0*|IoYPRV9p&3DdtXY)R7IhhIr z1rj!qY!M4!_7dJZMM3wGwjq`$+UCc-WzhnGQV4Sj9LVK|U@wTwSLYok(jU-^q&ZW$ zQDKRvIhQlb|NV28NJ|VTkm^Y8Ool6x>S$U9G+9(C{B62a1T+E)3G`&3CyDenY>|NB z1yUi=i2;s)V@Hvc7TvM)nWDYAfhrkRU3!%&S%<myyo&R@%etsrg(|>%7Mg3}Vz)+- zNXUQ{=e}qY-gCi5uR?RyzwZrhF2U9=Fp@XOa3eJ+)|tNq@Y0D<Z%H^ga&5C)WxR4l zk0sFhkvvkv8^&k7Iis$+I0VYlb9u&UI{0lg4TrntqSe^7apSzc=MQ(mMHpsiA=%OP z^q1@mhpvn|nGNzx7HZp=u7ZxOekrj2!^V*=Dj!1<N81q`ti&|N@<E&STs&=@vQ8Y0 z4)^vHeR&7&>Y~F|LHe+PqjQoj4D^lC7)mT1Ka6omtHQBnEl>;NijCahqD@qQ$5F<H zZv+I7&LwFdRccGMgnw1m#<+Ydw!8~Pl_3q~QrN`uG#U>@+awUul=anAf}V#LSnV+= zBY>yz`i@=a(vI$Xf{U_g6*<$4LYzX5qHo0yx>;f`(lKu6<3#NzTo~8gQxXDju6brp zpdt8$QLUHYf`jVDiDp>K_IMYS#aq_$J>`p{j=0{=yDgD{V?~f>{bU3jO2)YH_~GR~ zL`KKFV^zU(C&tK^WtuC{*oy?FRLl<J<Iyw6n{SAdDcDC%Mx10Ao8OI-^)4W28mbe{ z1|;%!aXNa6$2{<OGty={oS`9X=!)e;XV#B+>D)Drx~>}&CY<Ndt+J=k4<Ax_JITfi z){Sr`{n4wn)y#|^&8l%-(|A48!6lz<4lk!g7z!WxCpg^vqNdmN^GGl3dyePUWu|yJ z=Gt?vDGS&8#<1wB`she&wgOlBDb`r5DwalZ?RJnYx8j6-=lZ-0jk;=B{JWsChFwO2 z4d*1zk0v|@16^NOM3>67pXi~ymsCYZ(_};y*>-03v0;WOu||vFaNC2_?1&7>6A0)P z>sRW@J>m{!n?HxE?_|f)a>`i!H&A(!bx<Okj{T2x6U(*WG7%FR^3`7Nxg80?SPSoa zmiY3ann)LZ#ov#WgEf)QEX>W{*!jh=HO)c^1}q*p^1^U%a}b8t8AIyhAc6`0<9V}t zgR=@gOY%RMu)l`suL)yfWMuod1!MgmEEph7{QtEDV`ctt1`Kfie==bIXLAKGTmQ>k z!Tg80>SqE50s<Pydyun%;m-q{0v>x-*vUY^|ATserT(iPGv~jRWBwnMWBLD~95WmH z|I+UNt!{5$e#W3cP(WY*lPZ5@{KqHtzwR`g|GO%H><9nU<)3E%Kj!g2%>K0Yq$6G{ z()J%3@~mWSw3xXVH`kVwqVon6z<;-fyO2`ylrX7DV#Dv=*%J?73{35aMB>mEspKo7 zlKEpkAMC#Cv|n$(()!)sxh=h4i0bgOmj@r#uLtcpd0uZrLl?6GO8jhP!OsT3N}%8D zAu5k9#FIyYYx}vrL-fWxZ?8X`tXFQXQZR&_IlV`dVJFqwx*z92%xOH>V|%=ChJ97S zhY=}avN_?(GK7lh>BwW_>-{PpHO;$)V-%mG7j1b?O)A-k-sL3Vg>03cF5!@ycE=Q; z4XR>Mvb&q-<{>%|qx_R7UF#|7M&|Awg!YX6^<fo0*m|eb>&<NLk)BqYcg@Hgkb{%! z9Wj3$kW$~|9~$;jdq6ypVeEpLzV|ySpdRYBTZtiz!tTbF^XY^@y59Hn@q3#bPcQ?9 z4zoQ}?)&g!{}6N-{~j|-pT(e3)z#1GGN#qi*)Yh@#$PABq5QntRVTGW2G+c<u+d%) zK|hIjJbY#_NqlXGoN$dCnh-1zD5D=%{5>ZfD}xR$Uk#6+2d3G7$04GIv3dov%kgCJ zBq#{mN@Mf)=o7s6KyZL%1&pfpEn>${9d}QXz@hoOU>oW00PXseAON$+5*5_}*PJ1N z#IT>fhu^{HnF_!a#`RnKPg;TvnzAN0z?ZV=8^RDT^BDs7h3*S&iMp!*#{(BiQGv=< zSZGFpHzt?9G~oAp(AqU|=Vq5$AUWr;ZUDtqBH^yji~P?24r|sI$3(j9)a(QS*d9IB zh>eKE2*H%Y>%k!-=YR^a4%E5~wG)g~nS<w|BC`m&)+hG6pMqb}Vo(Ac3Retcts)n` zA3`}*UanC;oZMqMr~P$CSal8Kp)RR=u=8&1O}+e5lF1`Rl7=8Mg?g~00%yz(ix`{5 zF`Uz<mj|p2bIZKBnVX0uE<O*a>Bb{C3VW}Ab%SGBO3(Q6SX$7i)+%>#Bz9j2X|)1` z&~2~GRyi?Novtwr%aX__#^crY5t%8#N@x{c|IS}!=EP?m2>D&VtP_>g8XjC;UYbqH z6|7nRRkYz-;kKMEn@{e&ct8i7P^ibfafi0dkClyfTh~EfSoE{b@q$bx!KBp((&}IJ z`}3w$)ovu*n|hMh9Fhxo&n9Bus6KSDfwpKk^7Um9w*~LxJ@3srO^$bUw{!J$K5uSb z9@bATmLJA1)(c0?YGrHe7-s10!e)Q+a;MD_1@v@y`?~lz5c_SC=NW_GV?mQeat@?2 zs*8T$z(0|3$+1u84HD=!qnKRm!;=Q#;YwOI$*14)#w-!w`%`t1%YQ6RK6_)>l<@iQ zWVtBZb<BYz^G_EgB$%PNb=a8j@$+_j`gnNxz8D^hM!#Zef7zmyvuccNvv`o7fs`Av z8aWn>Vmt@^l%v-JB*Wu{Y{4r3rW<S`Ixnq)FdQmxMAcEO;a{MBc-P6DQNB~|a-zjT zyJF8wt%bsV<;$y)<N%=pm6N$Ht+!S!AiVUWpbh?MuN20BGkOF(YZTKGjLWoi-h%<E znNdH$A0i6j!4c<A(+@%*(b2iiFenv5Vsp%HWjE?v-biNeRw7*tD6DZIWT9C{QEgSD zF>;Vbx~aBmq||(MP_wX{RjAt0^s=vbs2YPmK98$W*hIexnV)>{=j9-cqQ-ZWKPBT= z!b_8pYAcu$Ta5&4Cc@KF29X_;y6JzH=F&2!&&93x42n16p+GL@z*^ipa}^oM-S{d> zWsO??M5bnKlH@UjlrT*_b5!wwv+z$XhszsL3_WyYy5JX)fpSLaN0;+hj7B1P7yJ+) zH_F3!XNtpO-AH{q5mtXef??*a@(6FnPXY*BhJ<k8+f)yew_t$qBO{TQ8Ld`?s`SG+ z9rrK?&xkhgx(c6!aA@uRE}}@#slW1bBg|hl)hLBdXGisCiGHduJj^#!qWdPK*Es9# zfa23a>(~cjvGLay#xgwv;f)zIQ(zLXC89<Wrs4r(VI2e9K#v+mn%{Te^{RSM-lT4~ zSPGC`VG~aq!}LF1z*gWB-DnE(U01}ZwRjE~*ocXlss(R!kX(wRLIn^95W7o-DXIWd zuoU#uN>{Xt#4U>HG+7+V-5HObq{iwo%n~98&4|awV|^C2p4hiiNvV(%HkReL%4Eke zieeRAcM{?U)V$<g#<u;3k&%6J%cjZEl{^o1C0)tAOpt)fN;%Z(T_%Rfw}ryRbwqd* zESLgMmVy=PGF5a|rL~)%seiW9y^+ACwP_3aOY<W4tB@Kr6icpLl)zb81+Z$Sm2g2U z5?NecO~I5+seN8mE|~T&)r$`KuPQ#jFzD1pW^)I$PF`|g#=Y2pDvFP$^Kh@4k8H%F z;<fuG5v&D!r0@6~1ImsyTnEKjZz9_&PBcnI$ZNt^bv$*ER75GMZpRf2#H41SkmxiG zMOY^`Gm(B0?QqKh2T&A6Q_<QKeNP3@QyLYiO7S2G<||LULMYBjEh8!jH0$e&=7vm3 zTxW{OG0n<w4h!)t!UZuk{3r`!9p*4XS5euVmk?G`GYGc7l6CmIW<}Rx`%h)J*)W~> zJ9Ali>=+Xn3QkOvXsU4OgFmLyaY9Kike-YTc7DSRD`6MgBD1PWb6ygB=efc>rTBIu zB6SB$$7Qz5-$Sq)ROjaN6;|>jfUP*LHP~%EymzB|f%R^)2qWkTrbhe6P9_Z`v@u)l zMglZ`8(U{+;)#t|^WIw81FN_Fr%80gt_@ZQE&Q;T-iYz<OJ-j+FAgF-m-gu95<@zi z)$*~CEC{58+MNd}sL5Y5O?WLef1)6O8=T3Ww7)~4`U#c%{u+3hg5tP}<Wd&Hbwc$J zctfHPs;^ha{|UV7JunzGAm0d<y#d>XY>s3<!v?HIER73lQ`ig}-4KO%EvC&NV@KP~ ztlR+03pV6sA&LSr6X9+*;g+gHTd>{{xO|2lz>6(nxabpUNU(RX7uz;S!D+xi>!V8o z;!!5OyljHYxL$5_x-6j%bW#Gl?D7cw_T`1=|0>`c|KW!+;LY*a^Y+E-sp7W#rB}@f z_1LSbV7jNGE{oH)E&(JL6HAt=Hk3s@QMHA$h-^Y**yiQ*h=jQ@;xS_>myDN9S~Rbj zgSP~h<+R|XrHE5tq1JT2Q=aaW^J~Yye;XY+hBV1_Wa8wFec;bwy1%HM)DQ+5@1zP8 zqWx(ck`QJ!J<`EOS)6l<Y)ve#UY2sOdOJHmU)dXWY9i^VOl7}Z=FDh8XM>06^kWpe zVq(MD*E~=joQab+-mfb!A5o;6T~#ExDM!k~EAHR(JZszWc1zuYUD#T|{SsdKd161U zAH5nyYy+_h;Q?XrtHdUz3OC4qxF-Fu7aOrZHYroj%6b$BrsjO*Gs^$b2RHbHcgJc& zat`Lx$fX4QAp6jbypKG9lp1<1Ha<?feG4ygw2!WZU$}mnWOE?QL_&{47i)@^NJQfM zUWFxmd6QV31F+ktMq}t#m1H#!4z$^;(ipFp4P>aa4jblLvEeHNv8&auF5xTZ^`eFK zhxMeHBQPx%`^>0%?62#E7Bc|H5ImfmBA9azs|3A~v4>(}CkE_#XqE$f_3xg2$PTRa zlxAu0AiPq0B<<6bVexX=+spSj2v&xZ*B0*~!I(l=d`^yj?pGeIjZG4`4q0gw(+5yX z#AjWa{W8hbX(XZa1F~8yi5*?f7Zsd@QiY}{x0o(P6HJviA@eeZT;d@DEp>&ab`&a3 z8zh=YM-SVOHl*TZo|04+h?fCA1{Qdrjw$GZh7F^kmmd*mH&qF|14<jlacpAdw}B86 zY8zQL$*u#FJKxf7v?VF3vdNU9nOMWD(W?U(cpkfY-ur-c<3d)?nq~S25VY1Dy4`q* z6VJS?d+-%JKMIyv$}T-xDk2DpX+}4Pt@T<GDaOZ54i;Z;2c3sHFa#&Z)A5y%1Zp#@ zrJb{G*~NF^9vz)}81JiiF@pp+PU{X_h54!a`F*8Jpz6~uLaDgSyhy3W0}+KXl4BGF zg#s_%#M2WO@%Lrl8gqCKaV_Iv#X!Vsc){_gxi8-;^p-Kt9tw67M$~;Gtzc1}>{g5~ zYrTTEFd6jQnk{n@oE^BZ0JnfL$zb`!jc($avz2u*N;vuv_a*IUqjgMZECrodc}FH& zTQjwxafGL^V;F91Q?b$Dp&yFX;eM;(eYcq+2BUNKz2|D<J9p9M9$K~|4w($SR}`l4 zmASbz0uq}I1mpGT=PHXg1sq^X(k*=Q30`)9*t%6m2<=aYKEUP(*Mu%}-WZ>j8aOiI z)LRc#z->S*-n%-%a{3St(}Z(m=VfW1{!Q~u3hnp&Y|&AX=S`d62$~%oolVGDw@om0 zawqUDNOA<o#ooaspr{UB>jmdh3*(%J3>;U*8?P8>Up*67BPuBQ%o*S(dd~5k@yLU5 z20x9DV6EoBwPlMls=CxIy?K5!Dkusf#Cl4|IHp!lrgU3){Xlx4=dPk!Q>d{`ZeLjH zA7lL~Q60W(876Z7r-iiLv<0cVoF+$CO3pdgw#Q;gr)}T7Cwgrfd|ng5d+mX0gR_Vf zBbA*lddn>Hu|qA6jW=Hw*|0{gC7MGdd^t5$hR6fDo+Km)YoBTRYQre*L_XwT8Mncw zmPIV0o>+F5b(%srzftg_W-{8Ja%mJZ9A22n48roNqCTbdXe;4a|E$Q1D)-jrf~h@- zSqJ&Jp0D$SjcCz~DU~KL0h|1N|41H@aGZ7&EZ_6OK+La5)^q}=NcWNjH183tVy-w; zd|kB7!uEjS?XeuC4BZMxXCGdEwA|<(?)Qwb>u|Z_j2zAhj=p(O1pdM0BZT_`B?{_0 zW{_k_BMJjW@kpjk@Zn5cDrKCqw35ndDBOFV58E_19^)6oRceI)Bh}8wF!F=KWeQ%; zA@Q$HETvo;mjSna^!!4ExD?gCEM4as|HI-$GOlG9^A?__b78=;rSdQx623ZUYs&xh zR4cMB;5S5B(%ho*$sNh3yv*We`y7GnZzZSLC)awQY?M}!P77x6c{Ern9AJjtyuNEx zU`w^$LW%;^Mt)g0cGH4Z3l8-r#XYIv0P}4ijA>w`4LqaP*-GphVTfQLmtK&X-8Udr zpRxJq6_J9zqlzw@>+-lD38yuw&DRCsOyqSBw{0lGoB?&xq1xs|k{2Iz_It=Y%Fav9 zBUko>;Mnfd16(E{NBb}6=#q^Wp0J;%64(t!#HXfgSR@$O+G$;LRsPwpOTrCgKRF<8 z?jDPz*(GyM6;Nb)vjfT_e+N|;zH}d12UUDqm@;2;MS<2K()8>+8*PtavOqqd*=y*o zJ=`Y?`jiACsr3b49#`==BbRNJ>*z<EgYZz=$U8l=zINWve+$P%*+O(%vE+><PHpY< zQVC83aAh{{?Z9K(4~Qx_bXvPe-&6~b`ZP1M;>}*FdSFR4Fye-IxB^~Up_>}zt!9x% zOv^uOb#t5PKfk)TJF$}5Z^L+jV=ddTcWl4Lad4?dT<RW&H5kHfS8Pd|kRfGcZXH0k z*lT4^kd~LM{Z)Sp_)}!L0!0ZVC=pa{Lzt5yXx1KE=N?XXZy{koH#SlZc8BY;l6sA> zI!egq6PB^B>Zr|p!-qRJIt`Ui*83^A;zB}-ovRC`!|%6!9y&6Qas-@NEz7CxcpR%L zFwF~XU|R%8WiK9Yg&e%Ck=Rm(vP~tBKM4Vhfqvd4gswDGI0ACZnxj+gucswLWLJi& zB>zD6&YC0jkf{GkN9stsELA9>gm{3IiSa9X^>$KyWtQP_XDE{Bo+1JScztj?CEIqU zEtK|f>zP5+UMK5+hmrmXs{93CSXnq2|BWPZ{5SaJpUA%aKVW=$1INGEJJY{VB}Ee_ zI~PYI6DL9d$o5ZgM#v5T-P<}l5pw>8Z~(*rAik4@!4Erk7_GlOzH>4ITpO8n{sK1t z;u`)r^sfkiX$5&lJ0m3%XF@H&sjw)3S1@r0&<X!w5q|vZDD<zR1fkA<fK-2n3pp{t z{1eaP<0E7cad#F|a{fyq{B09+Vj^Vv3jitt-W>oR0VMrLy(kAgK(N2y89-do?|=f^ zKlB3-3v7S_%YU`7018b1RbT=X{?5|hA?)-_glzxSzzJyh{;&BFWu<3?Vg4sW^7k$O ziyrwajf4#|Gho{ObqzoXAROagf(39z2^%I(K<QtX1hg>%P|SZz{@<tWzkg8wVq};Y z|26afSCkAhK=6N4GHk5>F`@qgM*rWCG3VXj%G&E4fPHZ{aSk(WPuV^l+=1L+Zcw(+ zmzS3aA?SeD%YHg{|6m|M489@oa4U+O%QqF3A4&2mbKP(Empeh6hh@qYrq!CwD=fP& zzPp)DH<Jt$2SwS*NsfxL6B3+M|551e^1bslIr}`mcbsj`c%(t0>;7{fBgCnqW_Zhu z9O;=T<)uXP%3@V>u%tU_he>6FWp|u&fuq&g?&|mOICC*NO8`h+xPP$Odd2VidVe?G z&qL+6-caXB)0{WFMfVa|Ly!a#_+ueKdLco0B0_j0L>gVN{8*&)bg}|%%nsF|pILW> zPh_1;Xti{Xwrzp0Vur4sjIfcWz@D)Fx2NleyUD7y&935h3LtxqRnX$*omSjaZtv&G zVyT|*3zm-Dgv3i)%3IuH<isW|Zo#whh4{+@H0M$T#}ag#i|h?n4svrzq6^v_w0wQc zcfmuV0)~(S=MrUvgYPoRF0;$J8i5<mHq8&OTuZ5ER@b;nI1s5!%{6`}io1p`vp-ax zcvx)-ba8Tf8$Hdg)+^m8#6FAO$4z`6RGlS@8}Jh|_2R-6QqwPqnF?uvTgeNHs)JkN zI&7ki%q_fcQf?<%=myWKNpNC>tW<bmLu^>q;s1=Qtp1s(8BT^9N<|n+)eSkimK42+ z*(w<?c+7~bdFl8(D9LYb`StLa0Ph~W!P5kx15_7AFTdCcHTzc5f?;>T<NNr!So_<r z;EUoQjv?-6m-v)bD%&*$3OkXZ3b|=KnXyU<8f)^(ecjEBB-IIMiY*|2?tEjqI@|A@ z4O4XWlJj(G1N=cd#F=80sS>QQD*V1Gq_HyezOvKNywlO5!`UkQu?noA0{p&mys1hY zYGX_y3#`(^OoMa1$Z$KD$t$T!EV{GwY69(mUqH`cV$bM%y(Mn>J$~h0)<(8>^ON7N zWYhVn{H!(BefJR61t`uOL)GHqmG0o;5R!^wa&4TgXH0-<O`KUuJvJ!1g*I8_rf5TT zXs)v>7^)i>3N;DSWP|e*3fe?Ul63+CtwM+k6qg$jbgX)pxJ;g~MOGy>N)w}6UVfuv zy@%f5Y8p(P3LYuHAFF(*vw@1p6tZnKTo79c-Mu#8kXLwN1bAbD_aU+A>a>I5pPWuy z4|I29*vXCYQ+*k)dwo{aI8A|M2$6(6>yBl=7l8Jb#6^gSx|?cw99hfWC5t&$LJ%u0 zkwz+UgDqvr&?xs&3T`n=^%rXocZ*^vl<z6i8l^E<<j@&p(3_?bTP6;b;>4D}bN%w> zG8hoOF*F@VJ}k$mW-h+C-Oq_sK}^x?iQ)R3#_oxU46le<;heMLa3R(g-kU=}VvLS& ziT!?02Qzg#RfSV~oC#V9I`It@nCIkYPqU-h)$rL9#nENayD84yxr?_#Yq|nvIc=c) zf`l?Z2VwDvBEy7r=}PRn@KQw9=mI4X)j{_fYakhIWAb*za@T3H_5_2b>Ger43ys&} zSev30_xyN!Lz%my)hpC7L^u|N4l6XkUm>kIJelcR*wOFM{vRE{TiASap|Ze&WEaA& z^RdcXk-q~!I5oi1I(pkA3Id%XTJ(~1<tt0<NW7z>g_xXyMVEo7MeJNJ^f|+nE9)|^ z0|N$DEau)r`_a~f{Qc|?SN@dN%Fg_e;54;3&PiyMDpC<_D0{2mBppG2bd9!2ahVCK z05zh`1V(klhokmQWN)|JdDxRPKEHKu5;{TSu`t$FPv#Yd2Wi$$)FGxBaN2A<t4V3v zDLc)sy2`1!%ChL}0Fs;79A$8kbBU$}WQ#P%PY+tH<RXW`uBAwQ1S?|Kgg~WS8-h80 zT^#RB)U6{z25jfjA1&FZGfm9f*Q_~MgnGAn`##Bu{&O{|MBzSP^@rnGNCk02m*R%t zOaD=!v7bKH>GSQ!_ba~a>^Z-y+sBW${`vN*+nJ@`lUOD>>uB5SDZC!bJ{M}_W~nW? z-QF=rjr?EllYRT4LdMN`4n=hi!8+<D5}T-Vxr?mOniFP-vP4&cmHQ;tiB-X6ig#6L zEE0zD*F{Ngf8@oe)kWHcRZ!XEnsfn<7P*o%XE%{Gj1E#NjIe2Q-;h_}QRkrdmTxiU zV3Ap&&={f6zQ-gs%E&v5uLu}pzM*Y|s(FyI3h%odEXA>EXPjlNwK2B?Zxc0|&XKfe zuG8nrEGNAe;6=2tUC-wKCNataT`L^gEfpNe50~RlRYj7hJ*Bb8F-OwG%k?X6`ye;x z3+^ENTs3Xc>*aLd{t7*qK5Gp?Y6r;_r{)AJZ=wmw#wqUNj!9hQL?bQCWXLiFiYGTh zvpP>fOIh4-G&x-f+72S{yS{fZT=x%D%1rmD5|P(Xd9q#|auvNu2l$@nIiR*@b9X1! z7RltM>Lqm<5tWJpHCNG;cC^-LX<9qTy^Tp8J%kK(q4Rc{>yoK4J5*i;PxWB1zR9lL zMhD|X6p?^G<ATpRO_cE(EMFh3z#q)Ra@%1N86*4jGs~{<Y0Pu#4DyQYmd=sZO_Bdj zRCAP4*R|7Kd_MTqJl-7Uw8^bgILQc;{=5%$0;wp@i5Q-~D9#0QCiY_tXoi3q?t5S; z^j>orYIm@VQ8eg8kdr6jG0-&RLG5K1XQMaHa;4lvuT;CwQ9xPt^vT_8Fb3Xuu@2=v zA`nkXAY^kQ1><bdCFGRTF_^;%gwt8b^>G*srr6jTxjHynY4St#jLp5Qbq57|B~gPk zsN|wmXeUL>U1q5@$(g|Q_yKR(L3jU1nb&eeR`J|;dyq&Pa}<=gl_i0$MK|*Sfd*;q zg#?+ga9_D>U%5l5*cGHNN!~|OFmFN;NqaPGF>atHaDU$ZMAi8~{^3;q=}_gbq0(Pa zDSC?;TH6_FDE-C1W-1S+YcU87ajDI4O3yNkkhD**cX5-q@zmIrx7b1ZPk!fgPm9fp z9oikO=&?LZo2cCpqc#&F*Xt|XCkAc{v0Fm6lzA@qGYS`mgAIc2YlY}UQnjnD^Fr>Y zHBB$M%65A`90sTQ1>K$x(->Gt?OmLUgehU*`L>z+Ne&z)ryo!$L@5y;@`tDApf{2x z*Ab;S=EXY|tK1l_LhrLdp)f}vvqzybMJ2QVZs61MuHynj-<6$b0@*u;*g-D*;Ttsx z_eig_0XHqN+CBOLjAL94-34~ld1hQxC1PSbv5{)QAyToSD)GK@kI32Msxq<{5gckx z3RM*~A{#RcAqq4nGddhKAFtpDl!2|~>51v>jlso@?ZJ^jQ2hJ>nnQXhB|xMlZVDSK zF*X)2a3gLDpEuao8N*lI#1g^N*n0F&@SSm+aTSu7AR4u!OOSV(r(6VSn-NrJOD>9f zs!o(B4Mi!g2rLRpfFCtj^N(aAgA9q<Xh|1I^^q)aH4$`pW#?(NCt6h2=xFKMULQ9@ zm(QKyak|uS-B$ub)h{~RyKetq8VGDZ_TmLY;}uflHFD!+GU9bI;zfWT^zLcEa4k!p z?;}X7ERuORoP$KBjEXN3_wf=_bQYR<oSeI1pWz15mqGTkCDF1aIhH`pjdf6~3Y|`C z<LI|zW^I!f>?sbGLoX1SCXnkYKl<#vRR>~K=QY$<Rh9?V=jNverYC1MM~C_QNLb;p zur3Ht2r$zyv(u3?v(Zx%krH6oP%Pao$=@+_;bg@T!liCwVxMBeAE9FG;o%(a9~vL- znVkfWH#sdd*Ql^IwU?Z_^pC5p3CvJ5k@UFKS9@8z?Cmc48@0F@MpLgskD7ECp-GTi z$<gaMYO%_Gs}|$U6~tP%>8HpP?#U2cQQ2aB<0D`t!1Z-FQ}$d>x#*0O8+&;}ci^-F zVNbaVo}RHUN12yWaHyOt<J#;TAuUx?U3GbakA8UAZCjd4Qj)(SNVKrUAg{d~pfnaA zs({i7^4AFw7<<kxz^U~OFS02}bjqYA78>N$+O5@|u8!9kmKxz9ZYjedp73vj!3Q_P zl!4Mnf#z6}%lRIxJ6Lr}Y!E)zAfn#U&i(w{78d*}JPb0JV3Bdd`9c*!rg4z+vYZBp z&kxuxOpUEop{ZG^xmlsLS)sW>oux&SsX2k2xu;{Eq@$ans-2pwhnkfF`$5b>G)zG~ zL&Ca1Mu_Pd-%c~ox1u{dq^UTox&eSI*nl&(6xX=6S?by99}&lX?zm$nt`l1#3KN=2 zP@B?~;lX6qo@^F94j(O6zEHIrznVDzaMzk=!^zoPb~QN#%k8y5N&r7hnQf><WEQVH z@wBzx9^Uo!dNOgj@)mF`tXCQUH!-8ur2`??SOdw=o97g9Lu#ZnS%tHy)z-Pv_SyYm zY$;{CjaVDW4V;D!oOWoT>vMf-+%QAz3B~Ib91~V*C6MPPj7o@|laZvV4RfFRoqbjJ z(){A=tjE=6ZzQG?<<Dwqsi<j*-BxzH!`)%b`Z`YIcLh1yndqHttlV7e%yi5yhr*}V zhuDCC!(82GYYihMK^YrU3lT+MB5QUICNwOpC@2KC>zh``JH`e#d6xQ)GBXA<EJGB< zTvgr$_Ab5-XM(Yx+T8>RoAhNE?vi84KejYS*dgmy?=6%atBqB@5huB%)k*r7;Ti_N z1{{=i<wh@DqWFI1PHp;I5E;g+E(z?}glC2~*L5pxcd9Qp%Ja7|x7qjlSWi+HMJi84 zM$7#SR)TK<y-t!)hrtf1BxHW@j=KMdbn$^PvH5*soGllzK1bOuj@^mPO@tfImurx) zrw)h~2-t)vb&7okNInT!ix3+Fykq3l_~*_k3HI(SZtaxz5Sq15dCBkSFpoii;TQ+s z_UsJwGavFmb%hiIRm^O^4=OM^+yNS{`Qet$$#tH!HFdr<l}SVs9NlC!M(1a5=fm+! z;gMa<jP-g*>cRxc3=H<2C0Lo~lK*S~wP~s-^`DU%l&T9I%=B%ilL<w|b$w5ZSqZ+Q ztI@7#Hg#-GzKh`$8J=lA?{|(KZ@}+-V77>-Q_Ht8QPb@<`i6MH={PA0#lg~>E>*_z zc<7{bEV49g;7a21c&0Qi#Tg=v+1fSct`X|@L_X%x1)3!`MoL0*c5)J$!X^-ZFcXM~ z9zHhcY(G&qTLc}m(O9)n1((apFW(cYIiH%?I3cx)t78ziWF-agLkecPNq%-DBxHlI zd&t>BGaDK+{bK!C;UVP~Z7p5D>+_wRN`T9AN=s}ccBnUl@ed+fr+yw4y!EonZTo(% zF!Yq=n^YHRr&v3B^NiT3GiV75@4CBtu%ccCh1EOak_7vk=+VW)hc^O9Q(KiMdV%{@ z!0L;{%igxO&Vsc#FfCr8puh<_E0ds<Yk|Am*8DTJ+K#>6pNOcGizw+`1HmOTs>M(= zzk^R?C83!;;X!tOnJ7brEG8|CVszxt7Jm=7dULHm_icdvwfAoKM{F@k9-E&Zt-o3y zNq~ms`DtJ*#34UD_cxw6H{s|czq;m5j5Z$+z|t`bC@#I>Fo2AZ0L;($v&BoAEh_Rz zgGh<pq^QdXsjNFv48GPDwYZBS&Ntmbz{7S?S@OWe*n2x>Jq9K9lb(#H1SncYNQ%D( zLQb#S21mEk8Jz8|3yU{po3A2Qg2Z1F82V37l@ET%{|6jF<Gz%GtW{L&wfXC_+d3{O zw2B@SvTol=A(-TCIV5z5*%XKvWQYX}`1OK7^Q|N6Y$EDi<C?N-2d+)d%&yE&Eqsmi z*XWKNTk!FX1z%hc^p~N@iOT~65Cwzh0oe~v&4g&zIUv-^-BZunPC?&X$-*ZzyM27+ z!QH)=k6yg(8eLJck3S`1PAXyn*sFQft9sT++2&Emd7e}C)%PojExiWeu#snO$Cb$w zoc<PNab<&^U$?ja2G)0Xp1p1zS_SXA4?LV@Y9%CxW{9?5ldwSyjjW4PMETVE!^EO4 zLH%GFMGq1gHwp!Wq>-zYyN6dqghyzsmKl<UR}*>`(Rp1ib#E=tT)UVC=r}$Q;nolI zk5A7m&ri(%7KVRyHNWFisD1>Vzx_&oRcjkW%@FE_C8v9aM?3fgS|Z$aEN$csZA?59 zI!D&FAHTT0i;8U#QI*uP&cwoIqDFBls0qxM6kID=)KJr&Ro!w!^ZJV0XUzSIs+zB# z5cRhxmiFOS21XCR{`UQcg9;DiTZC4V%XtVG#OnsOaH{!H$vC;kRxjUqf{3hS*9v4% z@jff<#;A%gbZ~L@^9zVeOw22+Y;GBuo#-FE;ph=d$FD;~CkeI(tiDNbxl4SLQ(Tj8 zT6=Nx(2bk3(@WUB%{V?M|Lbso@iSw%2aAJ!1k)JEzuezn+1v_NKPD?TATG%*B*NC) z&&1JL#n@cSKCq~HeC6(w&4<q)JbM#U&`BZVe45YTyhfmkXRVTZwIZVOoRSY1JY|<m zh-%Bb=FR;|CgwL!i2Cc7>DiTsk9NPw(tP^#>FVsAeTA>-TeJ+Vp;Pe{w@B9sXl7PM zGAX%7<Xs+Lxv%e)&8-(qui|q?8X=(VV&;PI42z6R%LEC95EbH60Bm}BapV5>^~ouZ zz!Vk{Ln3-<Vm^Hd({P)JOYX^S?n$lT`8}-zV^fQB(~EQC|BQ|KI;8#jt+}tCe75=n zqoa*o-G!Ixk_(E%Q!<c|v5tO0mTsP!W|k_ZPGJzMuRK`4{}cq@JF$*d(ftR0LmqWM zka|V8Dn&#Ui&_AcqL(6IpVggKJ!0sdwRPvw2~>Z(f(HIJ2Kd(N>kzt{1eBp7R^vo9 zk7{ZeS6(gOip%4}bGM9rQ5Rmx6}+hwP#03|yu5v*<5G)DFST7B92?sYAu)Aoaenju z_Qd?0Uqlw8kUsc)a67{KLAKF#$g~c>^p4c3fy>t>K=7gdcG~BT92&4z@cE-RXF3K4 zF@g`_ZBTqN2)>nvx4w;?ijjqDNJ_`Z`r_?9z&<L!lTE|t6u%*(EJDexO5U{s$mi4w zqmcKIwo3t%lUR057m=}b=j1}|->9In&8W0w9}4&uw1+qLFFB{;D`AzT?p;eG>w3{7 zw7zeqwr5(`J6FIcl1$!%L(SR93E>tT5}KR_^4>Xk^~TMcn1tcsP;_!}e(mliVCm+U zBq(D=$f7_ZU~s`G#382MKdUDwyF0IbsC#%4Vp~jjdoT_~Z3>;<+0SSG`W4><yz>V< z{MTLY^Xr%o{+cqHgH8$qQ%PN8T2X04YNkhcv^_Gw)Y(<T)Y8f$G{64Zjir0*51z#p zcXMg`pB6M`kaJT&RLQ!O!y}Jw6otH}j9qeK<#0rPpQUf%?FYLjF#TPapICiR)4PDN zG?m@++Ftoo3f?mI1sdLU=j1&!of5~E@0GONP(fr08%L7Lxk(v1*?M~TM8$%8uWD%n zawZoSu+shw`Rn(#pr+(j3P@QHGRct&8Y<c)Ad*@FvwK5wdh;8HhNf;}G5PcVl=;D# zBSH&4bk&}rp_;bKxs^51nK^zj2`+)5mTq2J=2iyw9%)tAng<t$W^YC0bqE>+pA|M^ zQS=0<mvt(IM?vFcl(%=yiYvb!m~mOpHU074ixZyy`oJ%2?_h3-(Rg*G<@%bTZxNLu zQqHMF&9jE$qT>bQkm-$wDV2lT-g$!hA*ZBWr3@XN{CvR-WR+F6^!DDkIfF_G9wP|d zw<1g1>&v&+vx;ij#f-q`lM9=wIj8vN^n~PHi7g$h?;VGhXu+Sunz}hMvH9C_JLu#; zbX#!vc~zGZateYIQo!ffc>3tu*c&(^!*V)`TgK~qZw90{pVtdI#i!3K?<Vh3A?;WU zg3o6d58htQEho8V)H|ug%rpB0slRyHdG@9Oy)3SAaOLuiO>n35D!!67xe&CTlXK_Q z@&QYeT{mnTSSD^7bw=7*-q_LJ8wq|Ux3aopU;up?2y-!L?l?i^{v{`9uySi-a&gYd zGme@EHNluc-o+xMBBE$8qHrMN(ooOH6bSz0A@Xr0^AF}~ezV}CjnL5KWPN9MVGRVf zMZt+F9-)yoUcS0kHhT76zA23vHA8ugBX*%>5YnDLug9hCFXvhz<xnK$RKlbd2$8-~ zKzVlKgqd$~M0(>1SAW^^)2E+%CN}ru*37ywA;Sn!(-i%nRyGwMRu#|6j`8jrYqk-W z#Lc45$RHHWoE(tIz__HsOLaZhh7Nn?->SO6JVdztqhq?}{v>Ql1f1&fHnDzLJyFG1 z<4Uhpc8m>A-#qB>|CcUc?QiYs#CTJ8PjPKSN<nc*QYv5%QLUb}t&W|STU>oiVNZB& zhoXG~rHnHHzk#4Zw6s&Hq<sO9e@@wtLnqiOyf!SqU&=fhMzRyS{-PJBZtd3eEn<K# z@0z<{9?PZ~q8Hc#B1SFioKtsgc;T*3`eiZW$kS5Jie}DEzJ4Hp5U~!7-N4KrIF5=Q zc`a#rd12%Jc3aPYnw|&B<O`c>x@CqJ4aSvSPpcX39-f|BoSpn~>2~a!F!nbZc&z(; zaNf@F)MWdWE2RyMsYNAW$?5whN7EK*6;ke(*5Vvpb<rY<^nwi`zk#S(iljsF1=~ER z)QU)6!w84iMzf$w6|0z&yT$$m%e~zfVBqV~ackqXRc()4W{n_C-$ntwNNQ=v@SL{c z`8$zC1Dc2|3OQ#L6!5;lKtW9{M0^JgJeKqbHXDz6{L25#ePgO;*OoW$-wKY&f$;V$ zyDFosqltfURLM|s^=L)M_?5AlsfEwv<DYNv4cp($UOQlc=Ekvg5X?li_Wpsg#^$u* zQt0FEXp^IFYp-GLY2=gb6kTKDm&d0UbXLTiOxRq^GE>4fAD9<5Pp4M&5jBf(h;0%u zibRAIpD^~9DzD%E*gUj~S>_KSZyr?6tRAT0(<tkZPbKXXmeYE3^KnYmupzR5T+UU~ z%F#6-Fd{Xxtf2{&qB#_?ej8fi4>0HF@F@Sv?Txj&TdA1D4vQj-f-5q;BdKaMwQ8ig zYhrlj%ca}-FZCD>YWx_lfBhUaKe_Ab)yn1;P<seoJi?-Ey#0*q98@h3Fb0_h6sozT zuxt395j3W`U=PfTT4jk@XA7F9fZ9viB$|cRuxbY8Rh&$={|n`6uAweJEjA3Uw2y2A z<#aQt2go~@YWX(O${@_VGuIwGD{h~0jjbb=braKdboLJjOUWpztq0(zP?iSYe3)Z0 zhNV-?qcVpR^K<YOYj-wRZf_z((y4hhhy)BZ5b4pSAow?O8pgUtW@lFxrWWTX_NUO# z&*7ScaKxa1Z&v~Pn;nB!QQp1~g*{sBeGKg!<V}%^jwzaMX;PLkv<mKL`3?E>BE_t8 zge@~ftulG^V`!CqR1jG*4r%PFJ{#NjPH6ipl$ATXnB-*3&{}!tEGlj_PuB}>0~^n% zh!~h$Zy#Pyth~;m5x}KpZ|&w5oRE@NbqQl>#(#S<@E~Jj*oFfD3{VG#r_gnJabbFC zab|h>-rj?r!RsLSL`?FG@~)n#Z5egrnYH7sS7)HX?8@Tw62QLzAKB;s4-9ExgJ5zR zyHQgVlJX0K;*$}<;a2Wm+7{N5hTbwZ3G(&{{CdH}VwU8h7NVA!!WQX*W@*CaX$)!s z>^i~9u33y4LAnm{C%pa3%d^+-8&Nl`TY|^7kqtwiLSDTnoq!fe%QTV;Hi2o46KfAa z<R#5x7?m8%oLxP`qq55@I|c`TtB@XN=D_5)oG+p4^39dyxz&}MD=X`FZ$S-?%3&5Z zIK!hYX&xC?IFMC8QQAH^IJvmEu{yiD0w2WG2{io2$tI0|9l9P!)PK8&FtpWobQM%z zip$Omh)r}32sLv-Xqs7z>UxQqMv0q7FerPTKCj0p=LR}2V44i|yiq*4oCmjln7CCU zsjPcear+5%|JpJ*y^Xo#3a%!-W|&huM8)%xrcVQvl(U*c!ouynuJJ8XzhW{OCmkC{ zL{La<R&Hxw->=vG=jO(aH?0?^1)86onEy=sXI#;c1L1S4%X4ch3+ro(n7Xk(zrMb- zwYl?r@8<G~j;S93ivr~Z8}GEsIZe}fO;c@G=ipP&Y;J88wlK4_1Y4V0T%7o_Eamt~ z)X!HLzKw`cj{~Elm)b6)=3!>$_(a9oA^lC9TvUuKc{Duv^+UO}{D~!O2+r%B*N+C^ z`HT|;Op};3f=HwhyoQksn!!R^{y%;Ecmm$Ps%$-YiUNLcrRnM_JgPZoNLb~7$g`>V zaH)HPs~KB;h)iy#Qgl}|aR3`1nU)E090YXxf#+A#V1DB~_A&IQCiZDxUIs}A&SzIw z=hs#NT}%bYF?D%+Yh`;IDm>q~dw1{E)5`iz1uZuKOjJK8IJY;iX}aw4bpQC`^4121 ze2{(ECWic}-(q-h*?Y{#d#_)wYH7<VuZ&91!d$B|vUgO}H|JDwWmWg3k#{>SU_^bv zj^8xtyipvFVJxp<EVaBBi41~8JA_ag;Tn*8!rs56eE9fNLqF;-)PTr$j%_(cUgAc( z8gbJkVhQV{(w^mePfIV)aA*Zc=vdo(dj}__7Ng@<tcjYV_59E)DEZy@Fsq>IEAwk> zpy~@~Ne8f3wzg0eIKQ>MdTV>__HC%^w{Lx}cWys=wzv24X-ISq4WAC7pplMOPHOFV zUh`CA|J=;V>dMvzYzj?2M)t9gf3Wi&=Q`K;msdImuU0g*q!*WAW+qs9c%xPq>YC8Y zInpV(lU}qt!>hxh;sdmE>qT-w)r}yz=tL;xLL%>VP8RXt;m!$xe}1|B@Y()EG6-FT zjicpV$~BPn<QME!?P3A=?s4b{wOncr7Ot+|5iuDh<#3|ENyQ(Q&GF-@=mc8ZKYR7H zmCwSyxq(6*E#^>Bgx^8cjXQS^*3G+j;c@FOy576{^4-hXl~pAz1R<+3yP8)>eqUkh z&GL?!5!mRh&E+rT&+ThIdSpRgAUKHq5vl%PpUi*0R@K_uTU1*QX3sAs!O=h1%++1b z+FD4}j6~9jT*~nW0V6UY6CV9&PTdF&op27FFjkGAGh()<C0+g@X6GAGbVA^to}T>F zKD2_l>Ix!n9a<%9nxY%nf?De*<J8bMJ+b~Mw{c9+AXwGR9&CJcW)7Ix8#izMlgSVi z?epkBb#W2xWYDyK5q1pltG7@xzEAkpy?glU#=U#@cXyt>c@Yww3xZE3X5kXwnBRJ{ zsBNZWWNCf_wz-9h1ps@D&qtF#f!F68*`49};0=_q2Pr_$I*zjS_A_>LQ8Tt6le8re zvpy|i0Z}%Sk{73LIGa`ot7b4f(kUUo6S4k>h&7Y6)9#b!Cm8;jW%c$>S;s8q3RHH( zn1ErVvd1Mgj~X%wJBN^>TRShChgS8x@<8NmJ-vbxQwwYA2GJ7?e&4L!@dNTc^+n;q za?GLSeV=x;uz!pC_Jaq<tRVQCckXB#`w(!biJOEcR*!<<S9Q;hEv~QM-d@_+fPbOF zuyP#ij}UzHW9Q%?_`T$U;^2g2L{ONOySJf@ow%0OccL~w2$`HduLn}kt{uvv8N{p} zz@#1kk3@2w-wT?bfq*u(_JqSfv$PK_SNAS94z7U6>wD*mn5XFkqMSUvyvx;@&5@=1 zsZ~R4nm!ulcAnu;sf8uzTn!G3iw&Fcua{|mzUR#10?7O4Saof6e*IT7vrjuV)E{m? ze02NK5%m@{d+^}llPAyKymWAjA>vS_l5%iQZYk-Uuj-z^KD)kt=ho`>CR*^<FoKVc zZ5MC_wnt|;QFAXRF12^$R#w9}h>VJJ^bIt1LTH+rF(^BKFJO9_SC^1ihe^ekSv`<Z z)t^Dtmq7(dsp|h<a^7co^!RjxPdNNDMZO6Ar)1hkHlhmq1dZd>yzAAy>!_q$oI;Bq zJb&9ezM+Xo7t^-2^Fl_YXIHnjkIYQ}*38V;mtyDk<sF@Y{4DREoebKsqs{qOs*ZvA z&ZEar@9pe-Q|}*me&_LHRDJXq)&BA0w;$iPbPlTOd65bl3TXR<7xq_lEwo-+oLJsi zy|o43dmx{QviApO<{VmVbyNkZ#{{!s$?2#$38)!pcJc-mr{z6Q@f#7aDpFsxXHfH} zS3%M#dDAL-(#pG^Rtx%1)u8{Q77|s^c|zl#Q!aJSR`o2PjLq=6vO}treW7*$%E_OT zaU5N|v-$9ObYZW6mYcDIlUI0jUS-X|=;&9W>$ht+=8iHppMk%*yfnYMx_Cg|F|<QW zdQjW<QAdNldw=h9eTb<~o_wx*yN{mizq^O7yZ3f>?>^pv&+ff^@$u7-1!YYPLi!}a zrtV42#h2&GJLj%UuAu_tEsWr!QaUqBOPCx!mi?c-d8E`sFx%YITX?B1A-5nfF3H(H z#LU@E*TRBN%jrLqLrx2tl8alPQ}Llu@T8V^r<QZ0mT@7M_xKNk`2TAV&t;wb?Agge zsy`uJ^E9_{qUqXd+wgi?&9InxvO2Os!?%(0qNB2P)PrYl24}W4T+=1>Y#e<2K)dQX zI>+YbzP?g<+@*QrU#DnL^1i;lw7K~O_+QP;LE7)_JO&Nl=ls#rCr|-$czpci$<EU! zyU(8OJbSu>dL8rlWWNoDeE9fXwCunB@OEiqQ(Dyt!bvsPw8Hk;^3M6Tp{2!57+<!& z<nUKv-!RyZV`F|C?`o`ZRJwkss-+cz*~qj^ka}wmq@k^Yl7Z=I^}zqD5&6AZFb$e^ zDp^-b85c?^2XZOLGlsEbR_Vm%Da9xE!};GYEratVmr>Dc`{<^zUx}oBfu?VhtX&?d zgiU(omHSU$Csz!L>U$g5IfBTi6_@r64}TT9{w9Zm30g5J8q^ilRR|;xD*Qh1x6#0V z)^@aRKZJ_bbD$bUIHo>-1{Ih;STX#g;D53Qz2LzfhWtI0=Xvya_r<f_=TA*-LkM}a zxYUs$`F((WegDEF+UH=@9_8(qmoTaO<NmZSLjYRpr<%HZK<X273;bgfQLf&_O~=ff zU)$mT(Tx5NohS-<cPeQo3ZPxmo&sIzv_p6hMSO@ttC+f9e*W}t#@Nf3FJ@<^n(C{Y z>Z;+%(<giX%AI-hE~owm0N*^cR?;>tW|pAhS*z{e%&O!qr0u)*;Mw@{eM4lvl%9>f zw-1PXd1EswKRI`JJ{XouIpf&oWubvz`aFdI?cp(5cZ{Femv=PnUp&l{&#DeE1JX~> zki+AX7cX$tz30z&pFQ920w~!>nV&tBHF~@QgT>E3e<-SGq2bd$D`03GP*Tz{5AJSg zb{%E!(P}@3o~VObx;{6DYY7Q1+XLA53=LH_w`P`Bgr{V<g+y3;BK2+Ul?_Y?bVL5r zIGIQ*lvLW4L<T`B>rO7`Nha?_qZ*(P)L@j<XO}(z;nlwf>i6#6wy`un&&y3sO+`jV zN=`vWK|xMVPDV*d!Ntk0sjgaIQ}O5T)!6(N#@JlGu^E=rb-_9dT#c%GHKC}bdsO-E z>-SZiH)YHMbgk^%Ln4w3id*~oen)<C{~iKUiwh7W&aSS^eFgkyOSApp{z2d|b{(`D z!z~8jgJ3^>_3FvXS5IHOgo^nus$$-K`Rdn}&p(sDZ~M_cf9KVYZ)X-)WK^99IMk#p zql+%jUFuux8eLh~+*;Y%0JX<BoT;y8<{W1^?f5+EPJj*Q#j|nQdC16E2cG~FM^`Nq zGa&=SDXSb-qYxSnF)|hba#kS<R$&S@VM;b({);M7$~v;DdU5ede|OsFXJ=%jF0!#Q zD=Emjxj3dI#x>X1z>CX^^KH%bnW;%gPd7t79bRrOE>4cZ+^j$ILR9tTiE4;mudM?1 zIvxcIh)NK7KJ5@HN&BvmCD3_fQoWp^os9=7Ctq6Mcy(eNS1R&IS2Km4SAe?Ay1ur6 z4q8`l-P(^>Q40TXKTzGd|6~`f?O&t*5)JvQSI<A!*DqdwTj52_yQm)V;>DW}AKw1- z>CK0aP+$G{;nn*eU%vbC`MY<|-n@PE^x5u<=g;4~j!r6KzhF)$?ckQwRN1{y-@iCG zvxfHeE0_c(rkfvU;^_3s%1y9FYpc_%EB!adE?w@-Dz6GnN%shkw(>ygS=%WYnhUuX z3v1a?QjpnLm{^z^nwuD4s+p0VwS_4U7Y92R5A5^b7kHo=RGo>5p{=Rzi0LjLel#-B zJ$?FgNTA=Jvu`_3UZj*?#YC?uRYSt2iKrB%R~@;yjjUPtoxN9sGq?3z<8-X-5W(RI zxdqKVy_j3U<9D-Iy8|X}y}7b7zq$&p2Ib*VGi3G^{sAWCj&bi`LeN3z(}Ti}k@0T` zfBE|DtGB;acm*EeMfAIGp1pk8(a~02Syo+HT2)b6Szc0E0kyQethlVKsQ>D~!za(c z?Y#f#{mkMrzoa<?wi<|x{MH#DzhiV|etmsk?QzYT`=+y(mX)Msq-A8LLrqCd35$;M z^7pq#AWZG-%$;3LoZZxnObmRIRZJ0_?5tQ%ikq`NAtil5-IZT{`CDTD>eWj+I$FN- zJizlYKjMLKK6Ca=Zg%>guzN$3E4lS!z<k&EmQ!S{xK%bpuhQ08r-e*?5^8o|y(?{- zQnK(gb3yn<$LCa3^&d!4j{g>&$A~;;YQf_A`Zq!AzJ~|kA7X$9KZDN3{0jVwmxopT zAnk|i>v!*W_V$<<8Hk99z(#&mDJjmJIU^@4{p!O{(8caNdbIQ6*}dHz)EaYEC2mch z_{x#$-o>_|<(n&O^J^;*&ti-{o}USwdT9IxBO?O|3Gv~tJA3wwkf^8)(qGfe(!|x* z)ZU8&2lF1T4yOpo)l85tUP93E_gg0}c~OFsgYCC|N=sdhoPvCDY5sqE_ez^aD!b<) zfQ72&nxo)arQz4arRqx|Y1hy@z4_>QXjZeDnJsux^qk>V)SThpeggs2(hqPo%gd;# zl540$&C1pm#?}0qrTL=pU}+wqY2OF_=>ZROm}<-;rtZIjsc(P$_;`1hhKAbT$LraX zy|tC4{rdLJYZ>W_!oq_0_x85$-@Er1HN@P1viI`co0#NcDqbxzF$<^I`l_DAx+{y< zXIJOeKF>_Rw`^*0p?7eAn~SrztNr%&=C5D3y8QO-8+lnNE?ypMFF!piTWxD687)gT zR;In3N84NLP`5VLFcp4*t(DmsA~I$n)v22kKYjX}Nqt}UWdee;SpE5@pMJ{EPWN(m z-QLD>m*(fso`N`7n3??ezxk7&zj~W>X{7zeX4mAc^x9D|ixgF4gQ`y>`2|NIy`ZhV zS2u6(xkMD`THAYsMkeK>qF1n~OSf;&uCIe@o!{D8*xm+`58Zik;pX}p#?xXdv|rfT zTE2CA<<8y3%?(ia+dI3DU%k2i<SEoAZ-3l-{q8<0o%q?(pu$yf*&jZFr+xp^ryoE4 z{P95j>F1x{efS7$!xK<JtQ8u(dIv2&?LB)6%qPV~AO88O%8LAay!}IiH<#vb-G2bz zj=mP~@ZHCEeb+`g#f;CK*EbKTsOVmRc&Be-Wp-t00fO$^x92xEr<Ruv`y6-?nCIo; z26PX9orbD13kSQAy|b>Bt-6`@d09;&A|g}@16@%6qANThA|fOqr(zIL=<e;s+$r>L zR1EcXn3)){zGZ7=0b?rxApyt&*54&BE6vW%wz#tJ?nf+tY<FnzinqI~nzG`>3*vC% zpu!VPb=4q$pULsj4<9~Y>1CgO9vbMYEG;ZA&aWsbs46Q=Pfd66iSi3ej7-Qf_ec=8 zDNwRY<rI=7r(tAdVi6M+QP<SdFmttV^9qPf%&)G!HZ@&TT&S+D#?N;i&a8;2u%V$r zeN#P9y$=8Y-*u%YJtHkPJ{D?BTuf|SOngFId}4fJVnRVd?!v|fC_KEPs2~r!4O^0t zk@EBNS>4=x{{F{D=%hLT|7dr&vHns)UN*Ys6z1>O?COfr_03ISzO}ibs2~?=aX}vD z399Djl$R84Y^=Zk_!D%NohN&^m{(U(f+L)nn|<Z#m5n>MA3i~4?@`0e?jCfO(aFj4 zB5DMz3Mx*?In7g-`j;zPuPUo3tEeb>c)G!_o?Kd--P}Al#-W+-=jGwT$~>GMT6%gV z9X(A`3oSEC5mf^c9_jB0DgWQ^&-~~Aocf<L#NU0cB>y-~L_k4*j$bjmu=*P|_I7tl z3v<m)jAW%H#YBbqczGnmMP;Neni(6G6y`pD^bjj-@Tund8aV0S)Uu+yy6SQ;o6sgL zIbK0dMpzKeEsx|yaTiCshYx<w;&gU)R=^(XTlDmFT-=-rit^B@+gq^Q$t@u^5+dK? zlES<9zxq3sxmjt<%#5&QH5H}c0N;$%B>1s#9AKaQyge1=Wm#F6X=$ll9PGZ?;JbHk z85rn!xw)V>$w*7W$d0a(@JpGQ=&8>!F>&(K($ZR+8<!VlU#cj|PER({*P*4Q;o{+m z&CbuSs^;eAq^Cb;YGhEDn^|5`ke-sDuBuE$MJX;JHoLZlIrX6YR#ujzWTbL3QeVCT zRpA^-NlH*tQz<IRLx97>%?V>%DSFQX{g9#}La-elr01&-A9tTWd-3+&<n$yJ6_vP{ zh@1>&S0ynOcAk=w!o$rKV1}(h1KF<|D9A~(vN9uF95K29-*xKL59!GXSRwrWhaaeE zs5v;;uiqHnefjeK&d!rJ@22NwB_%J=&{AKJ5OHwyr8uukE^g@%U02<=6rEmaXXgMe z1x^7R%#hIF#ap*9SC&VnCWM3pV3(j`rA13a?Yr;4Cm|(eU}OZT7rrP(Nku6mEhTmF z0vKR;#8g=sY4G{PWK`@I^*ur|4;lk~9W6Ms!a@R;rY4DTQ8^hYz%4w+$3)mznu`bv z($UcXv0Iz#-+XmuY7#Lqv9#m`^sz!!S#*VC1sfFLKMy#;2mQP~3iGnciu01=qg9m@ zD5)rXyxe~)^8hxS(8I4kd-?>9jHsxHfsr2Ik(HKwxJ_q!Gbt%aK%npR0aJ(ca65JC z6j1v7*)v>U`smR^M>`w%tcj7q!Pnosc>{`<7#nr?Bifqk|NW1DFfr0kjgNfu*3|eY z6(xnRhzJui6Dupzom<=AGyv}k$IaQr3Dh3N{H<FCh6b!GOozYa!To!%SKojCz3JCG zhA9rV)`Uca-MwAU-@beK?)}J(5$JA93$uq?@}K9iFgLw9e-l>&2OBF>6C==S7G@@J z6wpI&V>6J$*9M?pLu25fqoeiVlcz5~e5|f22X{e#j;^Zg^9qi>{!vyT{j=O!+Fm&& zT?_4FTlZhQefH!r3=?oJjEoHCHa1{%yFNY!ol#put+uKRYrbN52=#T&_BP;Wv@NU! z#Bm0Co(M+*5=uTLo8XkHk3ancuYiC<M*vg+ay+~Z_V)n5;EJ0YE**RwItZM45WT}6 zkq{U8umApU5QoEWfuf#1eL6WI_V~bK7)ywWJp9)B+6rj6oV*Oe-320gteah3UW9{V z?_kqE(DUy7JJjft6c1EDICh+G?`m%*A|^66)c<vVUcUzIf}el5F^rYu<fH?A*zYd{ z!`}}-{J_XS571(50H^XC{khhzj?tMZ(0M~612$IX!_9Vg9<j5rxH{S6+Gps?5)xt% zNIrl2_Qs77km$vQxx>xiv}$Uo)zx2u220o)K$llnQyCc_0l^0iJYwEOV=X{lSX{Vz zYrC_%6P|+rzI^%BEU(?YXR6xnM660;Mj_yH$~)&qmhQvr{yv_-VNq%E^vd$Ju~8T@ zlH#NBe_d-+9h`9`U41@LF)kd;ySq4?AtvXRvxv@ZdiMdvJdA_q&e6?GjURd5VK{*O zCm|sjywZEXGem(t0FZ~<gIf?3IFD;<DvI)S=V*7I9QU62j~+gN@j4${$`QppIhmTe zN^E>I6*X0ILJU?yU_T9w^jq7T!P4KqcNe+|E~kXGq$I?gK7A@TEB#33;S@i@VFf=c zkDF^pE6!W#Zf&fCfIGW7cMtYMxN2aikCl1U1u(qRNFdOlrK1I@2UkBlJcNsRLH_gL zJ)&YFY3XQ|7qO(aoa}8Ox+y3w1P6jubH0MGu@(-jf1qFA;1zIOV3u&TL<VLOvMBIr z`z2M6lw6)|A6`Stg`u#p(9q=M!0-@^-N#`bB34luIWB$yE>3nFxk|?$2uK)2wR{uG zpT7Aq$j=-0aAR%d_@`b-kRMF!Phaw!c$mjuuJ-p36BDJUCPDOcoc1eA^DxSQjNs6D zGHq>*oZKw5IACG^?b|mD4CkOtZT%(KcQ+SD*xK`F-~O512lwv6@B{-N1Sy9%azR`a zRPhyhPy^=ShhvTApjca&vM@8485`jGIpCC-m>4Wfjj_H9=2l)ouDrH(Y<|wb04MVW zxtTDapXcRn#ddiP1{%=7grxYz^_79ES8y^fzz3N5`gz08Sy@^*+`!qv4y<)zasmS* zy|sne)q%d<T`YgpW^HAur?V|QBoI!kos)e`Y!vj|uFI|8G#eeh2FDPN@TH3Uyy6zX zo>ta5Ag8;$bG~JGy{f8;oPs>3u%NTQ54?@7rNz|5jZZ&)#QJsM10%zNiAjigg+<tT zd2uoCiE#SvG$AFA93rA{a^*gp7l@k<H;a#ngyXECrt%5xe-4(#`B|VPHYO5-J;a9K zKd~|op@-xJaTtHA%Zsr>2p=>vGg{qP{qYzVm>?X6?`^8D#>Kp@u2x}j0WRiWy?O-! zqlSiBMOFFR_wRYQIF%IS4mSe`&k_*O(9wzt3m$&AsjixWf}*ar3YETo{R&qGBQh)q zqPf$A<S<fiZ>?h+GGb4kIdhJI{><4kO?6m07cUPNAOHD;^tAqw>-q+IxR_^QVS*Ts zn1lodYiu(#rs0UWAe{QIUg_@Xz{5Nsjr8%v#XQ9C0)qT;39%6F!7zIE?Ae^m)WgjT zbal?0IRnH(ENNqJV`FQHb+=WO6$uClDXFQzUHts>&q?W(6x<qwf<_i0<wck0N?OLb zxVWwDtg0L8dAPZt&%#;5nm;kr(>Zne6bT729As_*AvPW!T+Dm9Iseb8vm_jnuHl6V z=|%97w&sR!N6b$@LC^tv4re3<n>Q8*AO!fg?`(h3d1??nthSbrh=_)k2BIXaI|Keu zMn<Z)uWRq=-XFlco_29bA#jM*+dvnCqo<~(Qd&_uH9ZcS%FDuHlcXdin3x%5WTkLj z)C0^c%#D{;7x$h#fgg@_@YPaRIZZ%JM8`);PTA2`f4G63wkEJ4D=!DQdbzt|J;E$( zEdF7kE!|yudb(KQ0Sg2h5tk6>;9$qPw`(iQ@H0)!jA|R}+S{9O*>x22!h*iOUU+m~ zP>>&dV|_zydPWM^NUTR#Q(YB$gq(t$l8Pe2(_L9bNlsP@>(?<bF-l5H!f?>tff@yF z-+7>><3Y%(ET9({S2<GBIuVmmoR*VQT3zWA=*Py!a%+1FOQJwSjf9Mhg@c2Mot;la zjDz<)9_Ih?!&xF$3FojPZGBTlMtWQ!p|p%7VDEe=M+YUy-v`E>y84<oKfcH4JP>ub znV6_BGYb=J2L|zv*zPL@PLiMRe0f#b_U+AMGT&H-i+NHE^VBrhO(4eGsHrKJRhBlO z<eGf^#xPdqSy)+ARF#;S7{7_UFolnoM^;9vr?2z&o!c}xnAg+RI73K6$|OQTNqxDc z7AqbUl$6S9D&!OtE{^tC4;>Q=v!}mbZELHpt~OTY0cnWJ0UjP+9;_(~I4tlp4Gi^4 zE6bak>hLfxd_3k`+gmED$|zA;0_@C#zG>-b8Jii0goi?mps%BSxEXX$Hg+~86(w*! z2c4?9Z731F46~wJYTbBM&thT2KuJw?L~IlX2iq-dX(DY+b#h7yZhn5K;<9o)0)n`h z_jU*7&k!(++65O}kdy&$j`dhUxGgU)3z~^FEl^Qf2s<1R6$Vlt7af7i+lYw@v9hu# zD=RQEGGOHg8gOxQ=H}-tt<3*1I$v4@aV~UZ7=<D3I9S0$5)u+<Xs817x!LKk&sej* zQWquI+1ZSY^jVm(GY^{*5fLh{DqmchL+Ly=Z=<87ewK)onn#hGlD4!k8*6Tvl8VaE z%oLoMD-PxvnVAqsud2od9URP~P7?*WimI}xs0db_$28E@*C{M1YHX~<&%Do3%*VyY z^j_(@a;1xk3cIQR^K|FvtnI7=f&;2*Yc5`taImq$#ylGrH<zKQ5uCE-`mb_;F$qQF zTx!fph`^klvX1$(_VKjb0(N%R?X3-5$52#KQbtKx)7VT%0(Y*`&H2021ZSB<EPb=j zpXXPUm%)_=(bQA}$AwjeTAS*Dd3O(&8xtc@5n;GuSU{SSlO1#hbpG|LmxmjG3+3YC zOwUNYIXiXa<e#IMC)Lr_DlIPos{mpSSaNc4u(5%puwour*;#pcxy{T?z}qAz#zDWp z>c@b2_#k*u@Gw}JhX@7sIXf?VV)6zq=5@8y35dzh32GCQQ>P|kX&@tY@f`g*TNh_q zI$9S;I~>e2x_Ek2)YrrBg7F;}(%QP(Qc@T3F|VgxP+Zv1aOnu0_eavw;bC4NJ~4Lm z#?Z>zG7XLtB1C`m4D{Z<-o<6b^J^=hnReEeSaqI{pU=j@j)at?z2#e9U0v5r%%(zi z!Nwt`zU1;;QR|eFvg-QkB39<9XlNAF)D0}Htvrwtva)!X|Nac&83tif?{sb+UNsd( zT+AC7=)zCH>TO_1f*p2nwC(HfjtC3E#XKO*&CO+PV}XZx04zB*X=-}xn9SqxHrhIx zpyBh&bB)dQUOrw%CWbnC+MsfutDd&Lp`N3QV`WWcN>V%{13lJV0_I`pad5C<#l<{y z3QmsH^pug2tGLd-j+PoB2?d>?4k0O3MoJtOq(y|l(>i&$gD_$}!XOvGyt9XUMO`g0 z&yI_Ea*%Odh>3794@V3RLVi&}LnDg$<=@XdKQKQzHTLlFBRtGAFwzHw2DY>{KYjgL zL0<Me54V?_D_8}DlLJ&Y7e^m2cRD&6Fk>L~u$>prv0Mt2l~X@UBSb7@Y7tgd&^BAr zHgWsG4z{34jhc>DU02V_72zBhCaIu+n|UX&^Jf@@j671gczD%uFc01a#&-rB%(tSM zx3acq?`#VX3&z7d4-dDUy){1OIXM!O6UHY;kDTy!1oPx%Iy#!=m8G)_(>=YNNht}w z{yyGF4=+sh@q|aDuUA}rEcDZiv?K-`%u8Mr=LY5-ZCP+L&%w!_kQ9IQY9H8FJj~PZ zsh=gL$V`pL8lnR8E?%CvnMZjWXVCe&Iu!G`ybU>-o`IeW`tmxi1{j@hY&=Tm{k)H4 z9;9h^cL#O#8QWC}4D-QZ!R?*xyU(8o1^U6sWn^N2_SDo=00|hmvjha+eg7SpOrXB& zatqcIEiNiZ&&Wi_uX%=BTi!k~xn`{N^3D0J$A_3#rJ<wMG%&Dn_dtY1NGU4fz7Fej znt%j!Uf(T+iyJ5N;I)oq-qOmfrL8F}BoH6-yxgFf5D(&E9{3p>A3i4Yc>FvtUr|+l z|IvfTdpom>H-|^A4h;<qT^|@4>PJ0ZAGkg|2w(JQcQ-FP1L9d+I*(x<m(D{24i0t* z==ukGXz?&lN<qo3e1?cTGc5rt^C060r1v=#^R~Ded}dw;x3@tvFDom3w6{41^Uwgl zx0#w5-`Uy0m7zs5&wye6&VzgRcOKU^HF&~+Yig{ZD9_5qLW*ALV{c>G+tvON_r0=F zsSj2rS8vaY$~J^d@|<ek2~{IG4HH);HV&Er(0Lj<I!yyZ8+T7cXrzpi5+^$wu4__f z2uV-V3+cHg;b9)7^TtOpZ)w)t))e|jFh4#q{Lkt<hWW~}?yk$7ms{Ig8X@RF*E*;z z=*O;(HjpW3UtEw4%)i3pZSd+mI|o~IY}A$hZv4!Xb1IxBB*V=-lba9nSj>Z;XJz>v zI*(55kR6}-BmMm3^cXOYD~kD=`TKVt-tX$`_6zU@28c<B;dp|{d9wHGW%tLA9v;3X zxF5?Pke-(sFBw_+6R;}s>iCBj_Gi?Nw_RI)_v42H%+qQa7}~mfx}lg?#?L%>8w~S! z{Jh~&I&X1+d91lg4D<F5*35XlO+r$_@tJR`!*@-pth@yJ7sS^vm{L(u9;}p<6eJ|X zm{s8z=JD%13tpW+ig|Tlo|IkgG!b^@#g1ZL>}%%n`1vEe%@>_-{DYVW(ZKbqzheI3 z{g?0FXJw|rF=u6EE-n1_Zt(rRUF__vjkQ&H`g=)L<-kx6D?2X{y%deCGcx^hTJ3m6 z=giZWZ@+jOIxRy(TMsYyuqYJsxaQlqI-VsWJIg4l=bDU%dGg~izwd4EFb_I^q@N#~ z7&)5xWfb#Rrv!Y-&!h4-Kzo3%_ulT#%JRZ~-P~B4nz#WcJwGSoNaneYV*WTfPee*V z!X|wb^FHTr_<8YTGEaeKUiOb+{zz|wW_}m1pa0DKgWbp5x3)oQRF$y+*4Eqr67<8V zAFxh3+VAhhl@(3xZRgK(S=jgyG09O$*?Xk6r_|i2>bi-AdF3OS7tu%H)p@ex>U>y8 z&_A#9$6+2b-v&4Hs6o7>tQc2T0d@d5j|>lf`r<jp&%gR7n2(8z2Igt-dK+LKub*df z|E%+PnE#x&Vf+0t>=)*b$W;m*sq+bmaZ_k-gV)dh8pGyhr^7cMo}~TY{ylh|jGP>X zW2~$A_w%&Hm6eU{tq|=#et0J?xty3qfmGbmCb9;w@1DG+qNGSgL#<_KWOtO#d+ams zmW)4d^M!fa<LEq|ybT`a#eR#<Z=f@8wN+*Km@h5D#XROZtd*5n|6p%MD$cykAENWR zXm4{Q^KQP#bM)tqqw~KzhNbvJz0EP0N9lZ9N89taZ=D_O;8-1QW@%<ZL`)<uAr1&Z zz=dlN{LFk?D|($2YR*z|MH>~j#@X|_ddU3rx+%|)Q~@DjRc$T1<LbPqk!Kp7{QQAj zrR`BVf1DWh5AZfGUOcC!p-#p&f9$|@Se)~1?%djfp%&Uy)>NXdN&Q~tg@yRfpXWg{ z-vv64b$>9-6aN7^f7CT8^5c7(TD<cr1@X_TJjgt**`x=&O&geba5ZK|hFE@@m<aTA zIeA%99L$UF>wMKd^PkrSZr^zzBxz2-p{DJfms&R!Tik1A=WONTf}i=%I?p6(=ACf_ z^Z4>Mb)T7UZ43{^>21W0%G;oIp8c52zkmN8z9=-vA6t04M2eYjgVuShYnSHdW*{)v z)6*`iEK5v?J<`wP%2fg({QT#mG2RC6H7R0NDKzu({}l7!ZFKN>n`6bW`Nahoo&P78 z=Z{Y~F7s_2t-!pJqP(4z#o=Z=+?=dztQuMx5Q4R0n;HJApRcNHXl>!){%Sq!&fZgL zCHu2nn(_`w@fD--rGwrPNv=VmN5-%O%wkr4SzO#aYQH;INkV+=yv-3Y>@od36yVSt z+l2GSj~+ryq^_=-SC9)&^02KdX=$h>CMGs9HLR#AFU-%TqNd!$wz3$Lw{di`V_{}` z@%%Z~QwD^@#>Wf}_Tck2Xy$RnuwT5*v0_-=qy79**I|nbj+}eo=Z}P+v%Gjj4EuQZ z2yYV<`dR0d73FO#%?~$&c$}Mu+r-=ycD@Ba^OY4HS9;I$^32apA8c^%!D9{)141@s zS%;*Uvg-*IBYugwQn;_fqL?QpBV>`V56<To5R#F)@H?2#&PX99A-1uzZfZR`KaXbq z54a}v3wnN+jI`w8*FXOB6YPV6qFiQn2B^p3SsYMO5@UgR8(XW&nu><HY7oGYp+T(E zX?S?JU~B?A@#A|ei&>bN8KtBp0f1}Q`f%y|v6)96GjD^3d3_@TIXM~J$xJfcqnH=q zKQAHzF{~H(c^EPAFh4N`I{z5YeGeF!K<7i+yV@~2Z-Y(e1qIHtbFkYvIl!=sb?v}@ z3|mxD-Zy+*K!9&@?Av9<H|JJZQGc>oN6sNBs`y%5`E@x}bygNeJUUOrdch^4_=1!y zAGVq8tILZ(4oD&}?__6lxPc!MwQkYT*`fK!TqP=o{bT(6FQ|Kc=z{!Wed~gR7#u?w z2kGhQUcbchU3%J@;OAZ4U8*lt-JG4GprlAk#^O4Z<YnpU&*|uB!ln+tJ2^fIokdGa zBPlsyczA#o@7x1Yc3Jc_srcW)JZ8R)3tn#n#~kpJla~_^;OFMzINV@ndIHrc40Q90 z3hL`?&`Z3rFfS~~&&9<Fr~4coEmk{sV7|?bM~@!j%2gWb=~7WsA$`3Yn;V|Kexs-$ zXJcu0xS5RfMOr#qLt_JoWCyQw<I#B(^Vf$2_^~h##mmFv6dB7|ZXHpx$jFlG9uWn~ z%Bpy-NfEJ1c*d0J8ClTL(Hve01y%w0k(HIEq@wgdI3L~!3|kPsAv_SR?afCrfAoAC z(D~yrUtUs3OiXm8r{nNj%Zu~Uk{7^uT&gTR{O+6AuYqK4ZZ0n$&xXdjS8v}!OF0=V zD;kROvf!LT?{Kom(%G#o%qS@-t!%8)(o<k;!sBfSNXW@K6;OUY6+iRm@XWVy!8_k( zzavvnP+&bx1AX1Vys@cKUQvEqTNC`Q_GT<2pqHB~Y=IiO54I&980OLIq+;MFzx(k$ z6C;Bk(&KQmnu=n8nURqJ*ocdXvM@Krx&$vjCj)cEP)LC9Fnjw42gCfp$S~+U))BeA zxds%2oJsU7m-YqosL+C|iRD+f?(bonZ{v8Hkd%a7(l?<zG%g#4`-V$cZdk3RstnjO zGSVMjSONP93_~2}iS+2|ZVwL)#_w(Hj@0>MGym?x2k>mb_AkG&z`-XZB-GMU_Yd&t z>T3V^>F213PyzyifxfQ8jUPR{zp}h=_;d{Pc7Zer2?;tnJLVVW&dg5X@iu3P$*Fi% z2}mfg&#OeqIKCKG>=?Pqh56Zt(4ePJpI}{5flBV>XTxx2WowmJm_If-2B*o#(+z9O z?X3;0bJP#&JTQM_{Q6Hn|IE$FVTofI?~CWpZf$Qd(4VuhwhRvo=EJc*4n%crWf|8k z#l#Qj{Pn2`QBmRS^kl3ppFiEXy}imSq)5oDC~O+(m)R9waAjiY&SB<%I7>vqB^{hn zQQAJu!pbf#ie)J!oN?Hzz1<ybLzoUJ87cwd<L7;Cs2|`t($61tO)7qTa^%x7nE(0b zU&>1h0X%1W+v5Z8;$V07>{)SfF%vVBsF;Y+(Q6+*eth!$Da5J}`eBut!{x#Kdoc8a zB?f!!?&VfnU$efkMvLRV2Po!=$Y}Vrh{<WvlVkDaZRqjn{1M(}X<?R(j1146#xWO( z99^9XiVIh^*1`Vaxz%S+XS<=k4%RgV7@d!ej~=}-1gF5<)EH~{uwS5u!aiqarGrDB zz_u9e7xdi};UHwDCgJjR&Ytdt73HHhXN-*vMT7+Kd>uCzI~fBXsJ*aBxPN9>P<Hn> z*rVpppC+U^FBg$nUD7rc5Ef5LN^<;LRr`2*oIZV8R7}L&(kwD6e0E_bHYyC)I;rC` zf6N#b+T4Hq(A?DMhf_Z&%F8}``sB!um>L_NI(-U4Fi=Yfy7LNhSJ#(OX87H^#>QF- z3UV$^_Hi8N<p5oC{CqrMR#jD%Ow5cUBEtr*_C9>{059{Tw1PU6w2WBSjEIYge4cwi ze-7(PG<1V6xk?-{EGRQ{e-aYnl0&muP!=LPosfu7Qu3mUn`>ER+3Mz6Rb?5R>nv=V zxwW|t=ZJvd?7h3U55J3Wa)6Kl!#vo+<<(`>%3>9z_wTXaumXC7w6tVqUJfG@BLIwb zWrU8F#{c(U|K;xDgzL9sV%VXHv4Y|vLP7#11v#w!5egS48!IO#6_3&x9^H#J@jmGt zA-TOv+rKUw_x5o4pHpX<g;k<*>hhb$ItC|9jrC8RI&~c74Dj^^-~nSe3-*q7x%oNw zA3cait%s$-dQT_#y`SeEFf+b-^$M$+eP(`Q^q4n{efs=q@0Bi7Gb0#lz!(JjA^Ur~ zzWHN1uU@^peziY1z!wfaoOkG<np*0{CWe8*0bs`V(2YUrS2tFpqQgPDNk~bwG}Hi+ z?vA#_`Pr@Y)%CR%2;}>^J2Fy}WMxpd^a0?NRTSZeJ0qNmON-YwR-Zk8MoCE_bx{Jm zw2Py?n~Nh<PlR(wfDb<(Hz5f*ji3(oIVL4}$p9ZuFE?jzcNb3tRA)b=CzwMf7L?9| z0R~%(^gy7NR5&|g)>a@r-I*BanOK-zj>_A>4nWL>TJxx?+|${%wXwdjz6uA#+{6e1 zA7&ONEp1J(pbgCpYv5k)-8F&8k%-XF+7jBp$DuuR=RjX17;FMULQnuFds~Dvj0_I@ z6?O)-&{9YsCN6qt<SORxkHkbrfc1qguOKI5X=VaH9Mc$f-pk#Ul7d`VL?}8wHaIj0 zed7W;_{pQg*Pv&??lsrfNL~;Jt9j<k83}O_A1`;zuf`k!q^BGDx1(a2!iuuN(P0Q< z|Nr*h0xYhqTN8fn%s+GQ?eFW3A_)+PK@uWF(BSSAMd1`sKmmmoRRx6<?ry=|-95Ow z1qqsjxLZd$>2%MmQ->sUe`&kj-~4muvRO|(=N$Ig$KLhsckMd0mnG2xo)NU8qNHGs zHwI5(-fah}vjgl7r@?oU|E!1@+aaMnf`&3y;gp#A;4DxeflhYT-X5;9(&C_P&dK7! z(#kWcMg}M5`};dF7>#}V_nj3uLvwK|EzBJs9fG@o-jkWBiK?<9I0Wz^unOoptPTdk zBEJCN=9c<*Kl~6C9t`{dw-Na2<BdWV;5mY>6TEQV-oXa?IBQG7*8egqxIhJgvwvXR z1|62Xy1Y<WQV7wgxWq-c^Kj-65CJOAojn5r666Z_Ag~|w`=`&GQd3hw0B>djO01}) zaBY3%`;TJg@zW<AU2ULtw6rvi96bUY2!`b7L8r8ZCD_8r0aOeJ(!yu}_H0Pj35l_T z!+qbrdiDPOI}i^bkd7ZecI3z*&I1QHI1lV$W#6%TFALj#4j#TkB3Rxt7a@9r8@^*V z%Z^>U_w3!n!ph3V&JGSMC?o`rRY_U#^yyPan6pd(JU$t$iinEXQ^>#mz73#MOyZ*0 zMKP!kp&2?8FWeALE-pdA3u@}B#(1NU@Q{w~4(K0m-Mib`-fCc^54Q`c!@N@T8X>xu zlb7Y=<Aq3;X*`<-!RA38B)nBo3JweI8|;1d62W}$K(`0OLs~|P4~bZz2G3T57#6N8 zFeE6yq|iUeA0i;QUN|CpW5F5WY{kSco;rOJDjz`n_Dk39LPjt@G6=9w%grG<I|_>m zA3Aav{>ydX09+tEEoc*N=Dz*=kDoeyR!HbD@3H+SWcQxele7$TyVPc6<H5e4Yc~r! zD?0}Z$3bxwqp<X<^s3?28+UKqSOo|HRh>O207u-<yb0(e#+&zqV?Q@HH=LoevLfKe z$i%?SomO00w6ccG1l80~13Yl%%xONpqY%@5GPqI5wJXVcc({s*ikv=u5^m`~R*;vM zjf@We{g_8+{MEM;Q=@qWIbPnLX6B}9YRbSuX9P~e86H1z93UhpbU{{5MpH{2&K3Xz zEaB_tU0hl?J2(AiOMe7MynbW7x~3v1BoJheuAYvHsxmxBNht}`<Q3$A+o2JhF4oXk z9~eG1F1oYp@}ox&(eC>Bw=ZiOYG`g$6TG3MoYGlQ*?ou4e8YZ_h5HoyQK7@a7(N-x zT?d7BaPq^(9c)~jJUjpuLF>czI8pK7CeDP)<q7j_eehoD>orWkN7hP*#+Ymy=c4 zG&u!%O>K<5i*r%cuf2N!m=~9n(9zYw>FFvcDu`Yb6%iAWl#&F90^ot<U1%<OMFrFI zv&eMZPoGZAOeCcylN{{S)m0@VCBT;@B_-6<RWRC`SS-fCP+#9rPY3!uZLO`Qjny)@ zFbB*tdH%(#x8E%;FXZIsxO=&S#MK2Qhs9`NG&Qs|5YtrG&{D^n;X}hiY8&fwiwZn^ z7>0ObX<2DNvapD-1l(9f1qEd#RSh+DO${X#Ww_W+7cPt3gw1QuE-LID85)_Mtf;RG zi;gBb*lFwHl+{$F<zz*~FP;|`I(_aO+)O@xetv<o!qT!L(lUq7h_dr5vkK}f*~NOM zb>oPB9DKrT2hZ-|KB=r@5s^`qRXzOt<*PSuUxTy&p$qp1#421qxV)6KB=X!PB>)3( z17RaPcR+=O6~V)co?n<ZJ3sU4^(!>4T)V!Scq!i6)*33n9fO*m4Dc2a8D3RW31ki= zxwQhiU};5&iiwz*82;gp%nT3EUAw;4*i;uA8|}$(cW|;N+E~GJF*C;lrGvqOg{3(N zFfVV9w2Tzs^P9Ie{&D6gP&l{m-s<XZPf1Pk3-F=PT<jc3&=j0D(+HN<mLxmmB_`+& z$|_1GCPzVBeG;f>PXXue>FG+awpG<JJ1(NUn@50yUyA2~+Ho1P(`w|syb?Qk&h9*L zYA4r;qx`2%oxdO=At|e@qHApI?&q74n-ias4Dp(&B>_(`HzAngKN``F<Qp7V)7-58 ztMeWxC@GMf9NawU&QuqIl_j9dz}V2p#0bvZ!^<PLFmH5n?B@OZXsQLE`tIJY<g{c@ zZ%>i~iC{@EBbb6&ldP$36dylthL0C4IFcPvbNaXd^pcsAIXN@&{M&EeeE0sz^JkN@ zQ&rU!Q87`_6q!bLfS%jY9=yrcfn@7o=jP#-o}D=`GSojZR8U$BoKJFcG$okp85-d9 zb@hx4%&iD^&Y)4qmLwZf3&Pe5#}mxZYcHv;>K_{!yD|&NuWqc5OHQKuFl?P1jLb|m zuvkTPbtwf!Q7P#QViyI)CB+q$g=OSVh^uKDSn~>Dk*}*9;^;|j=59$x1l2iDOItYm z#pbu<*AAn3Ghe=XJ->LRw7fVnD%``%9j?@dWMgG*2@f4W4hA&n=xh)8Nli-zNxXi2 z?b9soz?L8wKsNXX_yRU4G%{kYF7QdEx%m0}<QL|)ceLac<OT%#eX2l3tI^0H`3j2* zmRA=4Xwnln?xQCU0fdds^*MRj35jvx5up%W!cl^P{9!pUF%J5I*0!d_rMVX`U;I+@ zaNHN)zJxkm-IuGX%X4zG0RNYg;-NusWLSnqYU`>7hI;SZyYsWT>tPjcMR#v!OKW{r zUV*)5tO}mKUl7YDV{t~Ed=}#_Y#7KTh}&~qcK0cz-6s{!N+`&wX<&`<1bYVx!#g28 zqqe1~yskDSD>FJFAu1t0G9Iz$g!riVc<_bXlH#7x5ko_LQ0%%`3{D#Z9E>brG5R<x zgmtWJtU#?kf&Sn-IR*JK@o_<6!2!X6p%G!1Qj@A`tH-A&H}2jAlHbe;_~OOQySGOs zMr!J7(z7yR<71+tBjVy?VY#xpqV;l1OIuTQZB<EmX=z0%e10<M&Vb}^+}?QcdNcFL zqi4_7ZmbWD3^cVil~-35mKGt&r3&-G%W?|xiYrP%P%W&kEZ?{>J~P$O+MJP-6%iBV z8|csQ_YDXQj!BG9&&|m!$WKg5jk%P7Tz))q)o|@#=(V?YwGU5EqH~fDjE^;UwwF{@ zretRYL`J%JF>G8YrnYu^W&}-rLlvy9G8QMMtSO@H92lDs5|hSrUT2Re9z#i{UurcZ zCrIPS!O3N*m4o%2(_0zu)}33RF<M$1K(}OMr6naNCSHnzM}H|fF(WG-HeSBme06Ez z>9Z$4(;?4ZJfE4Jg#XsnSJyJFj%l#a*48vRJ+^*r71YS(_LjPj|N3NYmz$<$#vlKV z5tuKJ*uVSk(UV8ubW<}EAhWxBI{>=yIXKh{&BAe^<@dk*)61=w|Ki1qyZ7!uQ{euf zf@WqXX0A-mU!4O~-+%bv)$7+^^cpw;xcsNj9<N`&5uVjTU}W+rI`C`I#Z1CQOd>84 z;*Tjh?iJA5En>J^$nb)yu@2tU!kI$z4+^`KTu@chJv0a;*4ES2)N%P|*4)|A(cd>V zHyaZdBP}g;>h!6@M~@&1n<)#?PtbCD2718};dL!dgX1F|y<IKst&MFhZJn2aVCR+= z0q|&MOf<0$3j4bc9{^l|sE0-e`iJ_4M+ax-W>&AS-D3Wr@A}OfYd5YjZT;itI&8f2 z;64PfTNCksw!i=A(T&?Tm)2JoR<6!1EzB&=&n#S-nV-FKbzy0J4bAeicJt=k@)De0 zTW5PsQ$uw_U0rh{;JIs{A1?nga_#NNCAYVIy!LkHwf7EAPHas(GBP~{$ZzcE$SEt2 zN=fsLjG_7k+PZp}IXLTET5B1bh-w&V*hSa2kB!W&N?<AMyz-oKwz~98r>I&=WMzKC zcy`^yz~su$81Kz@ZviAA1Autv=Ha<df`N=#Ub(t)>*mvEPrv`tY?&ZSUcGq@zVP7D z12B-gs6BZ6@bS~f5bgmm;1gPARzUyz;L+m;4<0{)jqtBOndRq)AASVTg5UwU1!^A* zmO;t<2bzVZK79B9t-XU7_5FKj;rI7H1x=IG*oNcUp1hi#5>`o~=J6K^3FmeFS<dV3 z6vgipF*~kquS>8X(%jucBcs!^ifZe6Mn?hFqq8%kv(w0v{l^8MxXJPS@-kZ|M-7aY zypn=~lA^qlBIsd7rX&VY6dn@=5L#GUUAlgqnOh3Um<f0TN%JZF@n-JhPw4_*ynOup z`GY4&rhC{9t1n)^{z-1Bw{L$Xnc9{#nn>osN6d7w%)ErmEXj`_-FkrJ2S-!6!I`Yx zx&@~+zq&HDI6pZ*_sKT%$HA3;vMV#pYoGaYKEc`Pw*LO&x`w2@!qB8tkI+abZ+|N) zl2ILFj2FgO*#?zeS%1(yvBs}KJs^r_Ii<m^<fQMOX&+V<k>6L`I<tQB{x5y?1J9cI z+~2`-hlM}%=>N->&hb^R<jdy`0(ewhFIrv_F^>~5ixn}A-Y;UjN6=uGurZq|Rm+56 zL8ejzLP9U4WR+F4^z<UR^O%`*wkA|UGp3;#H~PoN3d+h{y*$l{md0kL#^##_k*}GR zg^L?4IU~Jupm*`w`r5~|up2*3{QX7PBWcurhV!2$Q+xG>nM3W(+b;qhc1L@SpJYjX ziW~^dj|?Y(0v^Hq&6_J58;DC@TL<J{Sy`T5TtqS>Gt*^mW<3A6{N<#v%>NEdPBe6M z<X6`u<m3g#C((n$?LB=g$TR~YNnPJaSl1;ir|Z^}Z?kI#x#URl)+whH9oSAN^Qcjb zy>e|sD$**4-oAae4b;D|tloMQkkKV=8Of>O0%#XDjTXX3!NOq~63baE!2aKq=`wm& zcsmCNhM#YA9FSFIb1U@Vn~!_$%GRv+pKNGqqOtvQYIassd|X&`R9H+jSU6%aum!f) zHZ_b)O)wK8Utj%c>cGu}$V}Kj{Au>UkI3JW2l@4zFQ&`<m?HM|Cwgb53*T(y(evjh z+L<H|F1DFQ3hdgQJ1F2+Z+-^+?DEp|;sOfzUrK=cv#g+>l1z0B4#FJ+u82y@gd0fp z54NRynK_bmEr@bh0}&Jd;^xUm-@f&UuiPhXtwt`w(5nuKnX;ZzKdH+w^vtD3Ro{K| zd>g2LVOhF<j}%g=?UKr&=yJ|5R0tn=!8jaDK-Z7$oDRVL|5Nrjp-nchv;ukK78D+l zlAc>x-PVVQ_Fu}9iLQcQAm7gP_qO+SxA*rTpMBl1+C4bXH#RypJ2QQCae<i!oS7wd zYg*)cfIUze07Wzp_%EfrfBMULz(3)fDT+St1(F6@1G~?pcLeYcAApzLxO?xjs@cNu z%sk-Cgvj&rplW_I(fLpB1C!)CJ3$a8=NE+}CWEkXWcXWA+>C7<bns^9Gzgl`se@N; z+<o?1n-sw*ZG&;G)ON2HGzjLEFk?Nfc>?EU=$YdZS#|5d(`~H&RprAEKN?X}b!Zs} zRH!GgK0+oD=Z!+o83dm<3Ogu8WId(vuWAf-jE6R!Kyaow`ve5UCZ-gYK%5Wq^@{+Q zyt+8MvNC_|8W=2~2_-)Y00aiJ%gd|x?yugtcV%T|Ze<z4{IzR<v9*8527dFwgFA@M zc?{r0vuAFVI{%3Of3%-e+Z6Cm?|-Iu06e1M5lM6J{wGNT4u}AL9RWO&0eop`3P^ro zesu22@4)#_mb-?BD_dG~$}8iubNyozTmwVxJQ(Iq6n&ztrlIjsO)@#6dST=7^xFNi zTC@{tG@MthvSXfro_}Cw2cNM1-t)Q=RuKjsIbMnN@7|+f>9$_~3yN!4sVp&?597(L z;woqye%2sJKtBjRPh-6~rEULB%kOU(UnK)`6OuiN&hQM2ip|U^tf}iAANyj0`-bL* zq@+au06(zgl%%$<j@k9Ksih^f0xXo4muPCLTM;cM7Z<Lqu7Yy-m>>DG$l0oMn6Tee z_y}B|J^M`i&%cEK7caM>eN)CT{pAzAqYXmffa;m843A10IG=@egyF$Mr%+XM<+oGb zZ`}x=L(@}DJv|V;rWKV$re-k0qscyj)--p#y^}7%N>0c4n66K9S>MK^mu0QvTr$>D zrqMV?oftkw)*`%l{JLjEG5blieX=%cPKmno%%sxZZLI!PrLAw~oSyGlg8)`JXF$7v ze&A`{fKxa>Fn(SCf5!&=3pPX$=U`xAY3V|v_y<EDonBPh)ZP6vRWq?LKR7YY&wpai zo;|yng@u)ci;GiQTC%yV6-^*Jb#+ld;LO+Ge7$4G4jWr*Ap9#UOF*Y6+cQPZ#?LZ? zf7Cfl*gt-B|H)(K(9I_2-)9=?0?%K){_evM-+lk#0g(B}e?9y-KEqTpkMDg%J4)|A z3tBfdJd@#>G9FPiQw#IxSPeKVnhg4PYMbui;i^_3`KrX+e7I>y^y=kn;X*UCvB#R4 zi)fiDk>Xp1mL9%*>ll!`U&>m;rC6I@#gFy14JvIMTI;-W%hW!M?Sv|~I!)av(SV*= z*FC)r*T1B^|L}vndB|CVz}+$qyjb5edI2W^?K(auw7vN;UVC){{zWfhzrGK~#LU#* z$==h)J0d1NE4QqnsegQYE7hZzN0YI#u<QlQ@7%c)mf@3+kGG+@5zsO+GkxgrAprc> zUw^HQ#jM_Y0JsAEu&}xcNJ9wr`t_fP9Hya!k02k7Z9k6cpti~U&z~=@E=Na2h+h=7 zv9@~s{SRoTu!;81-2U(%>3#jyt<RDMH2nG+Q^qgNENv#cN5`out1HaRO=V=I+k3j; zVt<A0q4MzbWOH{nO7hX^S-#Qn6u%%edeyVC(K0mV*K}|Qt(;iCyK?v0MLo|WN@Sd8 zjjB_@Sv~)R(&2`owX)8|f$23tDLh0~$9286$jNxGoVnFo+lc*33N1MQs5XO5-g%#z z`zf3+Ki2!WHshF<C!eM}x3>3J_-GdMSa~CBePq^jH(EeQNMcHMSw&-K*T~Gwr>-2u z{88ScfOcSdDJe;Hb=Cd*_XF(%>_tU|fhPg>G#W+V%qdAp@w&!306f_I`ugIvYas7I z)o(m}1fO7lKmg=D!1K#jcb+^&Q3?v530lN}z@NW;`||z!C$C;Tdhz1nv*%|8PVd~c z^KXCuJ62ok#~=R}03KEB4_~}|{@wd0uirpu@CoWjYmXk?eDDw@`MXb_UAuD^QCZN} zfEV1oyK?(BNG~SfXW_JqON%(UIKSEP4F?BD&(Ofuyyvh0t8>?`ZQa;4G+5Qr21-65 zCol9;3Pi6CUVfGoS0jjC%`9ZG#z%F0(yIn<J^41bc927wC}9$*$EXuEiPm&TsP0>? z?p-QxpL_TILqkg+*BQ-SqWE)00UFMioWsgK&3?IE{!Hl}nLDIT=Tf44qu|1)?R`wk zgICkzs0RIry6a&TDxYbDfODP<DOA(Q#Kg|g-oqPIJp^TiHTCTS{RrmgP|VK_jE^Ch z-?Mwyu3h%_c8}h^x3eb!)$iSl(0*uG2!LmBVyt_xzi(t1Dg%GEb+k3NwX}D4EZw}- z-qRTo5e8ZUfCu2ee)nEhRyyp}+|mTd-?)zst=t2kK6&%*%GIkm*_m;%QLSw)w;tT@ z9~yuj1nFFM?bOszyLS8L!pbtx`?ELSb@p^avuUZx>$mURfBq81{OrPfPk(Puf8W}z zThogRaFnvLl8uLtC$7w9W@iKh_`#N@)+UI=kp$mU6JcSYXy?Vl!(CQh+SYk_aB32b z*{7G6Qc{z>eHiH(X>fs4OIHUcC+a&o3To<73X3CB(|x1jKoi=yc_JilX^oJ)I#Gd? zII?i_?(;X=qzEo)8!Wv_lUjNV!|+SH+&H{m+CD!pa}AAHgTgb}`4m{BtVB$MRU8r$ zi@LTE`<E0Ivp^Pk=dTo8II+IG8g56_-43bJ4yjUj6rE1syj6WG3_@x!<|G`!%8KF! z-E?SDYI<>5RZHstbF3cKHUMK@CeH)rt!;?QckZ^dw{acd0;-2Crl$DyhmVDXF0iw+ zv9hx2>+69$K6B<28yoB4BZq^61K|^vFL$=ze)giMxbWQBGkf+TJMP=ZZe>M4;R^f= z3ub0`z#h2LuHCy19XgnvmV6LN4!CzO*xtSD``Dp<6B8ql_j<ZI`}XgHYS8tZI>ld9 zTyXE%b7;ZC-HnZn4gO_sPdax_0JN5lj&?{$5VXd^!U9*oOz^i~-#~Be{sRRCITmJt z_E}k3_A}ox2G^Lse!YKuR8CH2FY`R%|Kj3eoxMFhV`HHGvdStz1O~+?p+l6GF0RHT z2dtTeBF>Oi-HjewbK~L5_Oa!I3ig6HUp?<SIh*v0_~3%pE4BSAWgQDQ?tOl_MNQk8 zjbDvZohoG+q3x2~Kee)r*gszqvKn?t*?p}*-enMSSe1H6g~Fpu=23D!px|&w+1Wm} zDZYBfJu*|%5UAeC&V%6{85@_GlUq>@!7Ms%2{K?S&jaQcH*Nq2o<4mNAP4lTs-^;% zhgFa|An7$V)vrH#dWN|QP(Q$Z02TmF{li0b^|k0KQzq>}orvP%3|Ixix3;!|*3gY# zfAbAQ5T&I>0ClK{pb`+fXE!JuU0oehQ<EJ#H*3O1XpozSyQ8P;@$0uCp~0{b8b5G= z3o5|*7#bOvo0)#~x4$8k(Xs~-X(?&RQc@D%eDlp-X5(-m^yum7sle*O!h*2x=6UWx z+L4eHZyOlOuc}GTFA7gi^NEb52857MC2xr+`IG9_e0o7;trJgPzw?YKW0$a0cghDb zFK8HQ?wwyhxLVP**gU+t^(~Yu3#&&2al3^K`E<M#Y-7wB*$<w+*aq#NEU#X_KBY$a zO4^RgJcfr!?>q{Q2juO!<VgFZt&M0Y5oHsEsAhFDdtCxDfCJAF7%VBTu(ZChYj_x_ z{<EI{h<TvFx%G7sVWC~j0vdkh-hBaq)2ItzFq(jQbQR!)LK|pybY}YEMKKic%1R2} z-d-%MEU-HS!l1xQN{Rq@s3QoRIgQcMAX-^K5E>p90#py1p*3kKiLA_Y2E&tC6C8oX z#>PriL>R&L&Yk*ty3gNz2n`K}RpxQ`poqoeO+d6OD9HPRun7%5D{uyJlU+OEv}+q` zEiDPa@UR6K-ra*991;ZAOCs5z=P55Q3t9;N1r@+4{6j;sE2?5Mvjbui>A~TS3_s`` zjcn|J<P~tnUn{%f-7{vdJ%s301m}KIgAPhw#W9axn~_jD)G)YO*12$X<L9~NJ-idy zc@<b?tcCHx^48H_an;+P{qx1aJ!!YF**W_Zetkbq8Jh!gqyutx0DCY#O*d(J0l#ye zAi)=ds(Mc!Q1u{IGfT^B+Aa@Fj!z=mX7niUk<XaFaRV?fCMNO`^Af*;dDu%Ei|HR5 z>Fn!4xfdi&dvDLv@4g4LLmlV~TH9N-v@~E}SUrF4ENJ^@@4kmP7o`5i{fAHu9W2tt z>A>aa>Ed7+Y65!SeE1<aI1nX#P&jv=y+AR~l<2!;Wu)sH>j%b0pjW#2?B)8S$4j^F zxVq7pT)*q&$rG#h?&lTcv9htkI(XK=#2BbDFcA?Uq;KNb-_qF*ex$6dh@P7f-XyK0 z9ONG(Jle%K(ALe%%*h3mJjMu=ybTw|FQvTq;mfxPg&lC8mF+V1z3T)GLgg$YAbPFp zS*q(_dGp=RUyOYA;@dM9jaUS<4`SS<EyLw)qMLhXwn6(R%ayCwj!N6eIwl(jmLFC^ zNFHH(CFg@GE+BOd=m+l6p#KfyEv{#yYi<El@9Z1s6BQSmnUhyl-PqZM#CntCTlG9( zes+CbNbmxRc^Mfg(8ixI59nQg^aNRDY6%eb;2_Jl@5Cp>p+W^{6J(5wi!(3ZQB-C` zMMa{!11*C%x%cAR<r_CvZ*E+>eS3ao89g+#jKyNUeg8pNNDx|vRdqELaE+|2j88;8 zy)X|^EvjEY8jZ}%EL=xq*wEwziDV1C%)vtkkzc<ugVy86kIk$sXTa_sG2hYKJ+-t1 zLhi`XBXBYt9Qz3bbFz!GxcEiXO>_<PVzcuhbcNo~&chq-6+-fOGdUfT-6}LQkBqUU zJ6CT$ku>x^rb^TEs#SH$KaTNok8ebWC}-C0fARa)l#B}2lWHt7HYao$5`-`l_l$dw zo^9jyPnPJ^T8D_5!mg{Dq-ZXA2Y@<6vWL}Okue+%I{P`?-gCMvDpVCiGh<sjBF&xR z9}L}hVorWRbuB2I!3kyz`$^B=yft}s@x+PaC=@XmEuhe?n7{V$$){By$*VVS0qlMK zd{71k;O{_$|BhYEh!Wa#r@NxSW@Be7Eh}EXb9eFDI@rp_#_ZA}dT12$0CG@1yO}M+ z>f<->b91wxr+^9o(c!6ya3<!VO>jwIY;agOw1$Qna=e{8pc*�mMfT%<}=}Ge2UU z=O9mKZ};rVN<&ivFg!%rpU)Es5Ri^WO9va0ltvE<bMW%DqPpSjoNxr9x}E`-s?&L+ z;OfqqN8i4^RNT2=(n{Gr8-&eite*lgvZCv1b?*{rn|JSjaV{5aJu2%N%zjnMX+0k? zvk=$FvTe}*$@1>Q56Lx?_KBC3+;R`8xgJ)d0or*r-TAaU__5vx#0e~CFuMhfPO1_? z)k9Eb=gx2siGWT#rLefF^>TY(-&V}8+`U&-Ta6CUBU`Mk)*d|kjQK6v29-LCH*bEz zJOGKEogG?3?dZ`XM~)tjhz#dDh6oryV^((N>h0SLYpcjh_!~DS=dOIlyf)_XcL?Uu zmSOe5tM9UNK4Tty6XZ9Fc`-53xog*^mX~vKvsqaYR0G;$Vxv>jlA%7dfbx81b_UxP zo`)?BEsY$U9B^sSBQQ@7vBS(?eCwMSdq>5SeFKs4NhcQrB1zi>FM_f7pUQMfSTQjC z^#?DM%>((>-9XhS*k&A5bE79THlifI{@_<0P)T_UE5GU<X(EpnUD!BKk`Uh7Kf8_F zKUq?0$Bx=0?<Xd6>IL#?&;jkoFkUCL87Fame5y2-b2__(jrNL}D(d15h_(c0s-u^$ zS9o+}YDR8lb$MeGJbe`Nwzk&y-@G?4)Q9E0$m9tu8JX$8<N)&Bn>@b-^E(mR2h7i` zuEZzCBjfpdc7v*K?P!CZVq|&>eamZhdHK{y{#{7(yDTj&?!A1yc<tKN>(}R3SKz<L zj&16BELQvF`w!ycVu-ZawM$l3`oZh(5-%m7UlTx>3^E3SP!#i`qQc;45N<n@olq?V zVa46o-(}|_bJW0=O>L8zw&@W(+}(o%v#YB^Q<LCjPyxhJNp*EqYg=YX*`?fq2xP1# z26{js1Pd}1RE>_gg|e>E4rQvSabWY{;)54&l1e&3I;uM6>3CEh)$mlZjV)-I1(FAQ z`R2P{c{<;I`-=af(cW`9`&C?yV?6=(7G9a#p#76&dhO2NjU&HOblIsy<;5^gV7*UZ zeNO85p2GQ^(F^3fXtqn(c*jLcK3NlO6H_A_J0g|t;uqu_9haC}kX2fKgo$}T6G&W! zw--A*8vq_~2<jT31yI?dZGOV@DCPm-NDRw;0BK~;ZjdFQoB?umjddNpUC?oW{D9Vg zT{$^91A_wEyE|&@s;{gpU%jz$=JY9KT%9?dGdeSEVPOu<g4p2U;TfKsu(TwgkOsL9 z#|;SZ`x)jPo$S%L1AOGp%Qx;Gbo9L;z&xBoVM!tIIUE;67oG0fH#$6j{Tl3x9zsJ? zy`ZKpqqHI@HlF6=M-PdB7~hHls>V^*f(W7OSxq8~3OyjX9vFW9#v^HCA5iiJKJ^l2 zalBfde(CMVa81X;$kjW)`R1gFg*W>#rM)sX2Q*yI==sQ7hF`n&U>mo8vS_=cu$(j8 zEo8D!%H|Z#_q47**crWmbA};&$}W4)<5;DL?9#U9R7`N@1XFt_TQ@ISU??b@`0QNh z+>!A*mc9G-v7^I)08zkjenB2+U_kFy%x}i9TQEO%1<}=Vax&lS_y&djQQjjbPo6-( zV}cO4q`U-LM!#bMZvrY75fK8wFWtHWAtZ=u7Uu9H_!`ZX3fdbTgg<=b5GN-GvxC^_ z<LkZt@R47D?-tCjfir?|+RcoPp-m9^Aj!}m7cf7$u+TR;g62>|!s|VIPMzY1%K^VY z18_JQ2zyWPpMbqio;pbl47GOk0PUwwv;}5R&@uW(iK6WgJ2<!T=+$?gu@xNBHmVLe z+VrZ!YVKHST1or-N0R^TpG4&p)UzE^+9_hPPnB{+i!Pw=8=6|b4cb3h#;>e$OIiP2 z!e)nr4Tr1)pdEd?7%+d{D3nW_uv^50Tgi!A(Md?d0%wj0dy+fDBP=Q?E|G_a8-2BG z7xKjoGNvIZDPCAyh`x>pp8#OM^VeU0t){Me{qa*|^_$IAP(JAQxqTx;$|_3x_U(g4 zb}$Rnk&%&_zjh5gA|TKYsGCX2JHP(sYY-TK$t!DX8Ce<NaXXoM2I_}|246}-a-t!P zz~9cD92^|_`g&7W7eT-S^3i4m1<ym96Z3OZSFb85DWH7;Y=`gxP97GJ!O8J4=m;Pl z0bhpy!}-Hy_K%MaOwZs=@u+9*L<$^b2Z$dc)eTf1BIEJq%DRU8Rh;>Bd@I@}?>~Q? zQ#*7-*%A5Tl~H>N>nm&+kWe~YKd@Tbu{gAF`}aJZ4<A09l`>^Lt;wq3z@<h#uI(vj z5q9nNHr4;xV(u9AwYc>GHToVIdu}C)pmF$lqp%Cc;X<a-XZ8G91r0dm>`rL89o29X z#Srx@tRPZz@bZIr)`(!Pt*c{TYy@EN@N}<jsF}ZUV`_Qx{doW<!`susk^ssF1U9$; z=pSU&0$BwKG4}Bl&G{P}W#y#~4t7>VO9+P{u&r;ZADcto=>>?kb#+i_6!@2wl|^uH z&?xieb|BuC_BIDcJ1Z+oh$b4E8=&K8@96@eZfRvfBH32gRxMn=F*bK)a&e)urV{oA z{Sz1*0D=lV=;Y!eoU?@m!O_vav%lBR-v=3lBs&dGj57yz*3vW50Qn%3fZ^K*2W#3c zSF~P^NlVu?G|<r2R@Kx{$6`&aZ3twlp^crknT493!69|$e^a2jMw9^Hr&jNZ>3bbg zbcWDX&N}s=I^89@u3>1cs{3kf|MIKve(%d;W<)=BK7~C}Rvao69(C7aSg-ipmTl1f z$+CX^_Aylomm2+;He;W>lYm~J5I$1KBvJ?;C2AgjRMBa#D4tiHc3zKh8tWm0BN|v) zgM4*l_%R}5qcXD7ODjs7Tib_6hG%ET=6>?FE#MqD6CjQJkYsjdc5Q8O<Hq8~2E>)4 zv(sB^LItP^C`G@|-TL}2pcH9@X<J`tN4KN9p!E^=L2H8RF|j@R*{$HXH)dB?w!Q-l z2l}}C0<;V#23tP;U<9-o`U-vD<WzNATS0YQdQoXYPF`3_hEGg_TX2M<Pau)zZbEX@ zF}Kt-Fg&Ma^Dpu=HDXxf;PS1fuj~WzIAv_r9CNYs%ERjJ>W+yyjWf0VE2Zu8Q>*uW z|I>*{C}iVP+%1aVr%XPe;=-%tZt9x64cb3nvWr^xDUe0*p(k{_xfESQOruc~GmjHC z4rdcK;*hmHhx3xa2MX%@$QzK1ZAnbn`+I~&h9oAVgZ8zT+mX3J{^3XBzxdPXxhuc< zgS%h({pslMtNF7EzqcmywW7hPsg|DJissh*>e{rTlDN#A(8OfVuqb4phWYgq-p)za z!dlb7NL<_ezbnv04SX_dN9Q*l$L6<lE7*&hL>l@woYM{9$1=k5`aso`buKoKtiAv6 z2j+NbZ0%>`SKBRQz^drDU)h;Y%l(wLd;i${HfaBRF?NhOj`LL@#UD_i9m6ohEfPh| z<3!BkC5S0}O3r%(4GyWgNSX#JT1QHo`fK8eCM0_+D&5&90EK;Weo<~^HSl5g@bIty zlJ0l9!`LrQIB@0X?aci~Yro=RqhD$Y{ZZJ!<V0IvUqw@MUR6zMQAuoOPDo;kcLXvr zxj9f{@1$pC197Vi*5qH6T@P!}Bl9{(uHNmRy>U*5&Zj~FRU>VAi9?ZW!pN`cLE_f( z&c%&~f9!>^t1H*J&T8+upvR)%z^>?YNS(%`O7)8=+D7c3FE?)86VmlovQLz@irO#l zbWT4=+$tH6FJYMkpPXW5EEmlLaGt89C{4#Wc{3j^ycP5NS-O)q!uFv_sfoD-ITclv z%`KPv`k9m3UHJmYe>xk|*zf*@_2^$`V^@CLM|#J{n!39ynp<)!s#6MzVluLW6OtJb zG1Pz%dk=2_yeR^BTP)rT;?_MX)LjZL)bO&N*_$^Xe{0Ri;F7h|rj@EX<#8yGB?#eh zXz1F$FpPw*f9#jtODi|H1TcX4-SYOV@(v(xxK$`xgn(_t{w1ZVaX`!{z}&y|qInpw z(|LnXDeF{m%VcSrw2Nl3`$1AlS&A6?>bN8t(9^Z-!nDlnjBH5I#RKm_NE;Y;DK;}Z zy|}EfrmnuD17L$F1cYFJ%A%j=$@%%@A!ENArJuCTADfZm*Jr9>e)dm{U+(XRh_$$` zA*-wc962H-Js>s_{F>qy1d_(Wg=S*sh`dE+j911Pa;rM-Q*i~rcTR6C-F`}oC^@9$ zr0S5PNh#)5riqvY`=+(m4Xgmew@+OA{`X~#`HAezuVXu|yi3@4m%PJXd3#PJrvpml zGa4?hU%lBz>|ats5-Kpx$+ltD7mNZq6r9BgiPF~TQp8kQyKFJzFcu+0PB}X{ixBI8 ze4>95jvS|DW@A9Ku^>|&ynNjP!~A06BU3Yy@(O_-t6Ezx_w^x!Klc-y|I$=Be~6`j zF&y|k0_IEg(Pq0xMjAW2K+k}HM<Z6~zkH+O=phkgp8%3O1039#<Nz8TB#jczV80sW zD@9j&RC&kL#>(Aieo1u)<n1mRh3R?K9#nBXiSu!ZuCMIATGO}OKE3hc&41*FetLKh zc3$~i;)LCDc6(&)I29dPWJre;?VoJFRR0&17caimbByu2)M6D_b_(YM@vM|pimYvh zv`xCaL#_bEV~?QzAr%*WT9R8#B_*;FPfx-S?DQ>(7G#>8JA>*U42n1OQc6O0E|6qN zePcssCn)@(>8W2q<JcdWOlIuQk@+u$!vm8Dy+b?#^qyT_nOsl=eHL5@Tn;TT6!e>o zn<s%xH6c0R2t+L-6GfaMyE^rM$hqi~<1bHK2ZoQxZat*zbRNsl^{nPqcR#4^X5n8_ z&@v0ECZ}#9D7Iwd=AA$KIFiyT*!flW3TW?;w%H|PyGxeDCTIV535&x@_RpTb+=lC4 zReFYIOeo2hD#i>w^A2ga^QgGW*=5U;GUZ5_ijMgwlwI}+8J*Yjw(`pj&S>#VX|nOl z!CE-z6K%|#D7JJjvTq>B095Bd=m&&Z+1%RP-Hk|{-xEvD{l}&F&ujkbxX`D0IKdw~ zhlUzEJ3+nYSJeQ$C+6fwrDnh-Fv6ojz(W^i?doCfL@}}jh9_zn8p~si_Nlx6s~lO! zF}C>f{MFk}GiwHqsZ&mBx*Pg7h!}=(sZe!j>FG7&0Qkm{b-&mO&?oy&smrL5T-;+T zYdh{g{Oy^pv-9h@1+)-hbJ6@8z&sQ7JEVx;Tr@wbX!rEV_N&EzWr@qIv-HZ1FB{b& zryNkB3FrnWI^_fM<?V8m9P^LJ*|SMlh@1G+qRNxY`=j!@oI~=pOo@O8Ge<JO!;#@n z4-WT_O#q#9DYpO!zo@nz6mrMF06Ks5&tO0HyMx(Z9IKqW@_TmqG_Nc=E(B<YJ_!5~ zrT3ijDhOBOvU0+bQbDAF=TezNG(;-h+|k9z#ts-BV`L(OHQBA^_OG&JJ?Hq;>WQA| zjokX-vpV!+YOea;buyNh_RBjd+b4$R_SW{VAi}11>FLWijV*n8rk<PvTKf*mupgD@ zzJQa{up`(81cYYf6gD(p9vqpNTU)=`**#iO(?R!6lv1??hG#pWvQxzPD@m)brL4XI zv`bojC1C-W7gVwPaeG$czq<HF7uyGxL>CXKI3(_qcNW0;D?1e^*ykuZ<jUEmb6hlM zleCmF4Gc_gDZ4zAUNuY!&B2=6XqyorzO$sb+Ix6Y149@QF~JGRF&SAP6>`ceOB<SM zFSmn)=pP>+nVH^%-$y8p{?qK!6eXW}83gSkBh5WM5OkE(H$e1(&^v^yD7{C-xCVtf z`vllA1w7u~$-tU~HM7t(G?dXX`*$VRy$UY+E{X9a!-Mm;YrAL9V(G`#+(6PuStPQ_ zIf~*#{4+YMdX}oXua<Q#tloM0X{_ev?fY?;igocGJm+-){QC|{!6(}>MYdDw90HgF z=dtXkHQD%8*!dLLjw!L6*4inG|4P#GZxR-$eI)_QmMrIRp1w)j5dCMAz_@a1L}g6z zunaMJzk>5|O;06<JdigkPK9#TsXWrQd&SMgO#=K=8#^b~Dm&)Fvs#SpTs4gF`b1kG zd=S7u_-;XA-U#xOqSLd0MYEZlU)j>u*wx)NG&lgXzd5#ZW%L)+$mUGfzd4fg3;TYA zI&&`Bsmb24QE)nlGC*+^*4Be`Pc1BgSQW$!xTkkytXojHi(e2z?=*LUGw2z6X2fcy zrfYa!oAfs&+9@o<$~Qm0ba;5-c6QC6xS{u19UnbLorqERJ_TnH(@@vA2J|g5Ao<nX zPrf)fdHe2zgp^V{a=5gr?Sb<;fN&sxHh}sG71k4~EGO0WT+sXaqWQle#BPZiU>;mx z?>QYShma2+e%Qw6KdF#?v*<CkKB?^p?B$(~X?m%Ui-7QvPsjX&GNj#Nrswn+;n{7# zX1&ula_h$kWFHkhBOP-~yuCA!B#F)d!UQ1sM8-l$7?Y8mlvkKhQU=Poj7j@#eSJW= z5JGNc{@)%&|5Db2&tF>2&7y-vXd}bZlhCuY4-9~#RkyamaSN(zL2;!N7RP7jf&dFi zO!ke6a}SNA_y>b}2XD1N=-v4vy&Eavj1Fp$|C>DRw6=$RSVdg%!1UUK$egwlnp8eD znt^xSd0YUyypseW!a1t0<nnw?-*Rck!orP*zh^Z5+Vxwh8I^dV-+38x&a>LU`Jmy! z3)qgUu%1+BJ*|b<8O&Y*%(06msTmd9u>7Z%ckkZ&M3;u;bTg9M6l~+TlqiQ(TooPj zRGbS{$;GNppzR%Y2^*iqc@{N|-F^0YX8l3y$eL$FhPt7dmWdf)1_<AZ>INc#>=Wo7 z5&_TxfB~r{WamMIn^RF$R9jcx)Le79y{V_CV{ou{Yz)|VcxL(|MSsfgfPS6&Io-pj z)DNRuzPCj~gOQn;!Ko?0ImmTj^``D_h$i4DK<sct@VI2AZjVSw3yM#IQ$nPBKnV0o z5QBp-gLB@bcf7f}uD&RixI>k`LxCb=7G&X9lv6)Gy>_1-Q*l_y@vJrj=U&B+VX(<N zNn1rxV;hRwt^nXc(o8I)FV*}3)%^iUw0f<zeK4n>-Y+=aCol!EfMkYWQe<piZA16N zN88hM|DPx~Z$BV<q^DPpd#79m*mEkm@Tj<|k&D&HMewOkDdAV4>=HH+GVo3<@4NH# z6%hW!>ivj}N>u}MRXsxpY>h|`W=<|36@Ws4Na-OF!1(~Sh?I1oS%4pa5Y&EQO&u^W zgp&;&9W6b*?F0RQ*xvCm&_4iR@B%>c7rxb<oq-Mlm>i;G069n(kW47f>)P8vu0xxk zrgAH5GE2*W*b{T|V>5HYQ_|rC;55JuX#vPkq@9O1oT0gsiwVgAw3&{XrIwMgimu^t zE$9EP;Krp)RU<{4`V^#94h=2bBzUFsDA<b{1RMF*pVtilNdtg)iK#Dcn`6F3wlH?} z&Ng!XQ<s6!IZL<Hr1BA9dnNk>ZZ%h4b$3<Ad<{woz+Q`1aURRqEn;$9(={lq@%n=o z6D#+6XKyB#bQ;>ylyC-`pztki@OF+CWGcu8Kt9<w5Oh8xJUSpYF*GSPDlHSh51I!8 zNf6i|djO>n@K!W6gA3HQcYyqE>h6M&6MO-*(B-~<Ky@2fUteo)Z*zBdV`o=GM<*yH zs9(|CQp&_Rv;(bWl$56y7GKIOh|A1DWsHAp0)!ZVcB+5yC$tmDR5Vt_nOk8@Ox5)b zrF6_W)v141qzPa>)Ewe0{flb5uXInYt67I}%iBwuM(cP~^J{s5q>;Ccb&0MoY?(y? z-!XaphaZ0Yqa?pw{%<SuSJ&;mGZRXO0<(HFnFE;n<(y<JlQFK9>Mq5av~poxKNb;V zPC1g9NBZC#@-2MF)Q#G{Wm;&4nh{Y+M<0L${Apt62!SDyO1E?OcJdAYMFH4@$PsV| zXaweu&CG^~67UZ|1^x%s4QeE>ss<EzVRaoKxwx*OxW2K3X~=S219TAi)wO`|+=^-d zIq-Q}Q3-S(iGcI$T!@{+Q_?;|Jv7oaC={Ypa6J;;%L@6$2gTIh+0e!gja4=D4Ha;B zUQP1<RCL>~OclomsyW8{B{zbA=h#`_;pcF}Yx(jXUb$~*5<bde=SGm_d$Q4QZd z-!^vbALpytF8@i(gGWz8E>%V6^~976V<~ATbiDT?W0(ONOw4P!RVolujwm=mR4b_K z6`9kze*ZZTe#_{3YUPNN-z9aty)w=KkgsQH4Qkoc0f}&JTs`bPHo@-}9PSw!=@S*_ zADbAQkOce>E&yEsND=4-K$8GhU&<>4cK|$pGFXL;0B`UVkmwL2!!EER#LLhK;2awC z42uFm1&4tC#lg!Df(vU`4>&sLc4Lx*0nrvmAZnYKY8n{H;0U~$PP>$->?(AAZ4Xg= z5Gk;zu4k@eeASAP!K><W9_NdtR|*@1!nN~ayfmCs0PvOFSE~^Xzc_mJ_OHy{v0eUi zhP3a}3tJ<y+Y?Gh%>7C(nuqOEaN<#-%2}rZ;cL<=K<5DAvtKmlkhQV)$!;HA0;w}F zf4gaTEwN<KoEo8GWTU8KpkZi?HMi8avIWSQI+6i?L>e7f7r58iCjc}*EijZ0E)W(4 z%nUK6Uraoxpuo6GAi@F3K-!-yH~}mP{uL0L0M(!}^cr4Fkb`^zoCBqUfU)=Vf%p`T zVgXQhasi=bXpIm%*3?4F$OKg~{92A*E4#5M(Rejn&**xq*hWWWwT&&^2}y4f)%QB2 zM3%F<r0rUMSk;Y9&f$V_kcoF;WI;awzN%-5IdKO%JJYtF`G2s~v<$@+^yD_plB4TE z-XGTVV3&1}G>g-AtJ0vBY16CJobq`Uopy@i&*-=ZrZg;UJX*c`3{-A)&r)=LkC|(n zlCh&a)=*8)P}|fTf?QD15IF+#gQ&KmxPj&;xijoNd>k2m-~s?z5Z-`Z*T7JKG5Eqi z8f*tQfjShwAhJ&YbR59v(1QTZfzqK(2ql2k@gxT$8#_HK8$`6@&4Jh<N|n&DI;=_g zo08iedGa9@7hX;Jd3|4UaM9$--R9vXEm9<?cL8k%sCNaMG){RZ9(6Z4n^@bB%JkYv z^qbb|-lg@s+aoIfkC)Qg-sqh6!nQf@<TjjZrhvXbi;VqA3<I=1ARpse0kkiK^JNn= zW0xkX5W~_c`#|bUt=(@MT`%ich{)}+@JdrQrO9HAm2vu-K>HuTZ$xr{SQ79F__uU% z1?3N*wPkt%1e*Y3U~DjeG;|8+C$h!E+s=bQqBFo>pg!yc`vT6*oyefo!J$B|>si|1 z2v*=_7-JI<?Mk}FLRdQvb=R*HXuD*cxa94*6rGQ2(JkE5TZb0Mm+yMURSD{Pf_hiB z%hL0z6~OxLmLeU|^3-xkr6)AyH6amedFLVk{@&A9+YJ9-p^Qu}rj>LTG|$GB4qFA3 ziJ69RD!Cj`q=@5V0Q121SdSW2$6S6@D(gjaE;(C0a(rp)1jM#u%lB$}m$K@of-*a; zee<+z{N)YoWU!`6I74-PBhb|#djS8y#vnh8ZSC=PPJmnR0?;JpPA&vzDuCI7Ok*0d zOhB+rfqy|A=q#YV36swah$M&|0OwdU3uso$&{$p1P(jyJ6iejSbYxMc{JR`^x0Efb zlr@XA4KO^G6jjzdHo9~>G_yq(?|)dCd;#aDL$8vyNIalGW>;{&U>vMZ&kE1$1GlZ| zT`uidY#dy@^>F+B#{VJ(;k3t3o)^^g7c@`jG|o_C8-M^$>H4zDIUZDYm9t650_@$Z z0r@KS*(cR#EMjH{l^k(Su{m|a(`)y~mhP5bo{K9U49w_sjj1#9%2u-umNTNr>RKvb z4Ha>QYWl{Yv;nI+rsg<v3xKX3ID)0MzLkwW(H1p>j|<=+@GtNbsHS6P0lUDC+Quf( zgFrh<dM47kgbUjC2Q{cWm8gH0ar|1GxKq?@mx$?JNg}Tb*@PTl-99<Ce8)GjTGGV# zprR9q87)eQrb{uX_q{UqeA)~ZheZ4E>ZFRXI;Nfh0pEA!*4ua6Lp1;8%7+gh7FTao zUY;txJP&wqil|X@N;--2;ZUORs=3SCq-(oZ>v+}x@|7KO&S^6^B&|54i6XilMDMJE z#?hgLTa5$D@x}d~iA}a473RLhri?s8Y9ioY+JqsYPm#pgN$U{gbnpr~$O{0fddBJo z#u^630A@`?fV7Dg@`-FwH!xAvH&NC#R@BAI;s_UYY|mpId9|oqD$X1Vj;zwQyTmMZ z3mNY{ug7v0w@1*BOUBy3J}R@UyRmP+uz8Hg$PmJL@T$=y%whrRDh|1P>hAlH!5L3Q zQoL1QSybUbStmm8$nUs^x9R=AdU^ZyyRMPN^7grs_J#P8VaLc?RfkJQF`oMsoR6xz z%UC3V)B)t{c+{wqizG}U4l6jZTr@u*M^d&70oh#IHqk!5R?$8alyRBhSE%Qjp+im6 zc1gxKC#gGLQnHPawG6vx;xDA{bynB?w2qqqmUc$R?UXj{B!-HZHtnb;<%kC5kUE81 zjdDPR0>@>Ov11drVih!EIjh5ZT8o`ujh$be^(^+dEK$cYFgm@yb#S4oYnBpTB5Ue@ zM8)~Ix|=j10q0(=WShaO?#?FX2(hXJA;QAH1jGz@G>UdmH8bn?-+%vMTfJ`=z`(}c z$1TH1zh82B9&lpjTO?-_dsNGVP1fO{imSB6C6GD*d!|(@lQIRg-1muFu!s{5tCAHh zL*1gv^6Q721{U)g#(a_*%)N8e>|>=Y!o~1GVkUtXnI>ZBFJ$0%2InoH>wN;_c~ryg zuo{h9*@Z*SVV|@Ot2lvG)O4?qA>f?-q&oX?Wp+MAwi7B`=W$}nB%(`nT47u7^!28p z<)HK?j9t`mb;=<{r?XgZW&127{f9%&QBX3{cHC-iVrF4_^lXpB=KSVaKzn7^VsYDi zegE>c`_Hz~zFoG${{8pgUs=1`G`t4L2az9;*=6oqBySsc42#Sbby$@yVHO9<0Wh!Y zRSTv?DV8t}KdwUIkg#NvwB%8AkTmwS^hys(Ys{`4Dr_E4DerTSts;14V(elh@qvPR z-X}5chg2z?3Xc2b?AfGk_ec_VUbNULYPw6<WRI{hyNEHTsPPdAGcgrA4FkF@H6|*h zs-ktgp?@K{yw5%$PmvIGQp@$AqT_KjR~gGh9rr3Y8ChcLQ8jl~8GGP%aYBTyTb65F zV@lO{Mb}~lg7!I}_pz&Y-XbaKw*kLhzEIx1|NhGL2Q_`mfc(Oi+33On=cqa@a>^M! zKW-Hor=rU-O;0I8f)=$*&#M*)7QnAbDUl~83+wtHQE=ptBJKnH%i9THTmhnH^b~Sf ziC278KuTk9dUI$-3s`V!V`$o@g{0I6Ce`>SRE4J2hi5cK=d{G-x5pH82BbDP1{dl& z$4MFc@&lzSIUQ4@3F!yQS|!8b^cb~@c3CGe3=VlRqQNkp(pJ$BQ23{JWY$dq)T@{s z7HIh4h1<_weYgEsx65yh>iqcl#p{8&o3;HbRXxaDMnIeHiOoh{`O;RA$F;pU6v+n^ zT?BCc@-}H8^0nP60sIC&^?C^TmnaaEMfHPDsM2`k?7;>0OA!yq*&R}LI-x;5i=~Sh zFl6w43WPuv%V1SvD42>>h!P=K2JbIs=zRg_2}}+M=aIKRr09HH&FuotPuB7hhFXgA ztkHI>QgzA~H4F#2#V+f>t>!9d9IWY_Y!_M;lG6))Q)&A=oGl1=<mH*swTI7MeHy3Q zF5Bhzlozkw&Ru`lHg*mD-4HNmO7(a^MyE}1ImD;u4gC1DJP)e7^QgL=)b<uLiI%s? z&>(~OsR8^O_%`S<y8uwyiZ*GoR+pp*@e*dS7fm7sb^V3(0)+Geh4g~p6IR8H!{A>~ zLB{fuf=!y5W3CpZ6z5T`<6f=dQY>eaeqJ{S#1(WjJZf%y7*BDtPy_cI*SLnrg8t;n z(WLT`*rI{hqQSK4iQ2xU@uj;@UcUMGdgON5E`OBs|JRQ{{<!h*MaSfIfPF<5GT#eW zQQKT>$q+rE$<(J%%^^|PIEYW%i(8e(r9?fb>c)>@2<nGOn8$+xQFkfUa;?BIqX5t( z`rb&7uxZ|PunK*I9y36I{z8LNs$`cfX&!$;FPKlm^MDeCLxIe#>Utc@5Hk(Mkdujl z<q&9q>!nqXhh%rT#nm}QRFflX7)fmfO_R$vAN>brUfM3(<(JBXXRijYYygomrSc*m zzit2->d9-Gjx8DXNxf_xRDq+VD_BPh83&%gG7hQJ52(<%m8sk+uFw}8(eMBm^K1K@ z((ylq^+ikvK7DvKJb5)e!Be=EX$O?4T*?%L%(cA)je`_zWA)v$9U^LivU`#$#sKFr zg?%3J^#q@M9cl`Ol49VIO$w<<DDIoMy0J~{+vT4m^&!Yzz5S$rb`$(1?F$eyqT&Zs zH1I#@>eTA-=%PVTLA2Ng+u(9jMu9FZUCZf`3Mp2e7%6KJAx#LAHV>5{gv(h)D%r+p zI9`GZc<%z+;7Ygnrhu&O$ijh`;=#zg9>1g}T0{lWI}hs^FJ}=Zh7S_K2TNK;U@6J& zaW%Ph!;9DNZ7cTe@_(kh|Ng_qgXg1*x7$Y7%R3fQtHwifdI9JDX_o`i+k>;aq6_*G z%ZAdb$1-Xs@|$O%I{*WxC}^3@Z$>P$b|R&6G_7hhGN&srrNtwr+999-PfOOY3Y9YS zIg6q5s#18A$lS^<+-kJLTJC4`d}J)c%{;Rb3Og6CZU2IAyZryj|1aOXTfX_Yb$B_w zdIT`<98qQFTd3=rq2iDrZxbzP5iVgKB4!dOZ0LW{$X{6B=bVn`8EtofG?%<Rhb)OX zP*2z;V!B%x48Kd%{F{rGyQOV86daGL(=O;TFr+AINMTX^z|yrlti1z#ZB3Wp9o^Wr zZQHh!8{4*R+qP}n=8bNg+}QTydAobwerKMZ>EHLw(cV?Han?GuYt^Y;|Fymc+j6K3 zJKwiewt76jc1|V_qu=(SRx&+I?<`&o9X7ZP_;Za;xJIcE1&wY16GVFqUZD}R<p^+| zpw{p2&{yuz+021MD$@CKu%%$8U}ek3XsvCJ(eX06I_#fzTU57cztrCX@dOaOV=;$* zWIkT<7+ke(JN%~IFz&QUy=c&Gnf$(J{*j<d{el^MU3GypwlQ*aaxm7n{%g(Fzygwi zo&%pA|F1PBHa2?3e`d4(dp0*WKAp6&jj59vJ_9}5ccvmfowB=~F+QD|oPmY0p%Xrx zlCy!+KXN3j^-aGEewrKowKW?HKApI+xv7~GBm*luKAn)Qm92x4oxY(l{&&3Qzm6ke z<7Dh$XKSVJWQ_0R;QYO-l8nBiB|ZZK{l9G>Xzt`FZ|opsYi(z1V{GFD`F$8sbF1&Y zzL$UPCTwhIYh;Yi%M1DMV!5qfc+1q{{B*(T(>z*wBUWGbCH2kUO4OiHAQXv@WdWBM z1U5y)UyO>9_J*Y>*Cf__PZ;ZPU4Uey*K_9b-*C`#B@)7bfM&y{C{)WRD(=H@Iz7v3 zd|H3&f;IRJ1pYfRGW6-PbG!L0t9wPq@?!hL#&u2GC3VyIpNo_puczLx*{|o^ud}bW z*9W$3m=)W%-0d&N9Md+RyN#Q*o-5y%Bi?1c&&M;{-rVig)@>O%F0I~=oZp`x?|*tB zT%S{Z>wju(m*#HA9i`W9*ON)te!GjSuzmSh+sK1ExB~G_&#`Me+}QqHd<@t<ob9cW z^YY#s$-Pf~nXRzD-md6ncyIr@Y}`{dwW)ryY}5SGeT<|0ja$3#=<(frp6-82L};E3 zR({voPZe%=eD8;s$4`k6D$kr3YrO6cr43j)79RaC*tgu4ug1+)JHszWmae;*A}*_` z_wAFBU4liPeW5W&Sl7mz)RavcoqZ;?+n|GT&d&oJZw`;y=N4PO_sk2>LG$MvEE4pw zC+@FG!cb4>ke9(2IJ{Veh!hU=3S<CTsCI^$8r~e<TWsj)!u*0DVPJ*St?S*5Sk-fU zSY3xTn`1!A5IuAsx(&CT5c&FDbd+~;<a1r^wvwHEFndT5MD4q~zN?#$xs4NFbADOA zRxIdTUkq6`yxxsxp3&SJt(t}CL7~SjZ%jS~lvpmP5DGzVbr>(_z5Q|=xV>F;=npYo z%)tB@IvLsb(;!|5;nYaHe7F#|^#xmYuh|ojri0l}a19^T+h&p<euq*ZHGGRo&E^kj zI%BtN^df~2y~ukLz*auDTyS3=8yiDjoNNb?)_=;<fb;C@cH~0MuQp9@cotCtv4>8^ zPa<}g8b5bV_fLrMw7u1e%QAtJLHr^^b8N70*0&wlZID88%~%_?H0z<z9h_7zoQ`O# z0JOP#?hl&1PT)5tobI_^5mSP7wIV}wUqz@v{dVyVW?EVy5TWmti_d`7$+>yho<_3r z(*g-_+c>fesF&aMrY(QOhBBZF&h)8LzIIR;RNlktKiP}f@@1rayr^Z}uw;SxwWE9o z>CsZXSzquwDN)*LpkJM<9-Yyg4LK*JHZd@hkz=$`)Q*B7Qbe9qO4`_f1MPsF4abNC zp^F*z!U_DYrOn_d_DhPd1TOFTnQ6SY^T^QAshxgP4pcNs*)8Eo?ahO(I&pXP;NKI# zKSPyrKqSO|+9JUQp^OW0@73Klf;4Ck2GD1}v04-2h@Pyy`-nq!VC1%UeovOmBLS8} zQEd4$4x7;O*&W9pqG#8DxcdO!o((qGuO^cg#}u|naQ;JPf^$F=6)DqQ8#7RV1LVi} zu24*>Drkp*W)3{2j{2ObX83yk4h6|H>Vzg%4yzLbsOmZvD|i0pE9=cm>R#J*w~x*O zk9zK6dK=##=*3ns%h06xVU&`5%O6P*6#h+0Gj<b@s}XeUMxF6r8Li=VBhI^%=f@0t z;Uwx);}j5FP~OjN>Q;FGP48`^76Dbn*(u`uG;1j5BAsn^4_DQ_hnFdV`H=2z!AFb- zaE=%1@mO!k1ee~+5a&BO?->35O<F@@FvZ55F%ZNYH|lF+lj-Q@F(GHWNlZ=DhEZ=o zVi25_0N^KW*Ubfmu65Syq!R>uC*rta<o?BQ$iqPIHs&3kDU!;KA9~_qlt$px(ZrtK z6HJ2w6rp-)+ms^ydj^7(GPk-Ri2Oin0xbUJ;z+I-aA8VO(G>o~5^vD>{5RkvUTc*2 zeG#T1!Q`@obXb$Bfi>{#$<%~u1*<dcX(V*k!kF%^;UEi%3DYW!AC_wAc&*70rfFMb zQcU>J=#p`d_#Wh?APWgu0)mls>~qkGpsPvzA;9bN*{Rjg*5vRb9YnY#0ebStG|;AJ zq&2%A&VCF^;r$J~>3nH+xeeJdpIJ;v*q{tvh7pl!UxKbJvHYs<Lue<QnzQEKKSL}G zCNT6iQN#ULD5qbFOF<g=aA3bPBluma2-xyCp#2oYe&Qy-dr^bPN7d<PPDn2{@C=Z{ z!}rM?Re!mF!Wijq%p3RLRt2VTm@t6c9_fxaYyOD?U{pTIx%>Xh<t<qWq38!M?!ODy zBPm8-19!(-A7RHF%HtjpSx3CowY-{{7wN^#iN;RKFW0y0F@oyY7iVq`|3w{2b`CB0 z3th0Fe|~tX4xfZZgbPN!<ap4QE}p3^>~8Ly;c~{oGUv0|^}<}B1){yqQ7cSu9cHlG z&>W)@f(GG`OC)eZI4WY0Dt6?D*C}sNkbp}zVVRk5wE!zJ$z7<|Lc?@6!EW%e8pda* zzU6R~Mwsf#J8vtLnC~ZvkrM)22@m)$2STvaN75nAoqAPc967kSKc8t#MWV^SrV55h zZBy&eGVDN`f!P5Qet?1Dn=^M+HZNdGrwB;^hb=V(e!F^5ng`BAgwu*w?ds5q|FY@L z&{OvoaS76>x<;X67|~1H=Rks{)JOv=B+Og9p9W#q&^29svvLY5+9oAdG-|p`n8yby zyl(-(knNrk1kC1aJ|j**4I6>2h!T`iK&7D3uFjgdy_aVBbeimsV{+2Z6xSE>lmKLP zI)IfmKp@K1SHq1pW#^2?6Gxm}hez?;IO%@a2s#%YO6;D)l>YE1f0>}m977IP-I(US zCQMm2#~+Cjee6U(`5viQ7Z6`Pl$7!|u3^yE8{g%H2#KJ3tG9+xHy$SRUkU^?WjC^8 zjwS)J0%BcYUk~8JMiMVOp>9D_$71;9Di4P`(Qh1w3=@*9Mt*$Sv^Pc;zQ6)j#c7r` z9FwM9W+|ci42o*h{!kjI0}9gMJBu<2?R|_qr;~gnHa*Xj-8Uag`3vMoWeNfyO~<MK z9h`Hxk&z@60kRIWQn(si+@tT&22(cWxAkx&ELFj93$?7tlmPyMpF*nRVZ%rAE;EbY zSi{sfFwB0_(5;;TXYf|gAWM-xJ^)Vu<)XPp5Zuugh!e5*w4}nX{g5^uY{!r=9EA!$ z5(L5PGPNsPXo*dCQ@}(>zp82R%EAl!g}Td~993#wRlWHeSSlb>>+IhS5xOSHko*H2 z2WEp#tzk@YdU{~mOMW{0E7J!lC;T8F3D+f<VdT0HGbk{bgMLhD?ye<2Fj;G9@=1<E z)vO>JUUx(+#aIJm`a6q<X|ib~%pELX%~MV_D#(#&V1UN^nFIxqAnG@hnC2b<za3iB zlDyMON_{R65~EKBi`@$N6&||8?ew5Z`AGQ(xZt=`_k~h45rc5<i29CQH&Mmsg2PuO z+8KKdTkk`o$cL+4@#ixvF51Ns#5IxZ3InjyP#6~<LIwi*QE2eCVdK=mf*XB@Ev1T~ zLy+Bn1<Gl}0gU5f*F4%6>?=blf(fb?P7n(IrDj1QAx%aOBEZ^gEGZRyRgsLsj1i{% zQfeQl#D(A>cf|eTbktoLODDgsDO3TmuvO3<zS<wehTWMWpRHa!hvXC>TRbXtK5sz- zH&_TInTAr!o`)q{e;E#HzE;>F(gHD{VI%{|!&8{It%KhRUy=x%(50ppw=4}_&x}sU zBdSb7Ojiv8=l}^i*=`YX2Up4>aKTeNC=jbGPETu>5wV*siOZ*b%9@ZHtY?V&O++I} zNn!H2rvBkS4N@_~p6w2&Kp&Azrzh)966a5hZ(;%VLGeHu%~wcX)3SpK>~OZw*Nk3S z%C#XeM*;>QM<AR02`#}JztUD6=i~IVE=duv1X8w<LcDO%LH_m8!3%T>j!J@hcW3u= zDki3wWD<k@i&%l2D#VI?hab1+u1=7&g)l}B9;Ekf{&yz8$xsL`-7iTq6bUF9EJ9<> zPiiJ;uVmr=SkKg7iKrQAOy}|z2N%T^5bHsT9K>p9L$bfN(wJ8BGRxF)PTOxq{4>cL zoZt_g9qHYA8rcVq(0Jr$aybZtRw?}1R)e$BLG5Tqas43)25btFQ@-nf9q5GTF3n6d z3c4GHyE=N-;N~TDepy77A$xiAB~y5ueP)_QjkXu)Cp?*g*#e>5UIs!8Y<IX4*txgJ z;YyuxDC0XEcy2#D$tK6cn-}{GXZ*Wsy#Pe^gkUwBW_v7gOTv<aV2(PXiH|@^l>x^( z7VxWqM~Q&f7*(1>$c>Vf#Sl$*2KntB=L24P!D$j29f+M@i78E{Wi(b$t6;C5$RTcK z6rj*nm0N`$lt|>HV7x|jcGBU%t-(VMg|)t`anB6{&+aBkP%CtQC9+4Op}2FL?0Kjj z0J}yYrdM_T@A?P~Pb4g)L+1h>?HlKE#h|Ravlw<EF@9}((iVl00nF4NHM804fT1xX z%AhGh=;q-&Y!+3jG#0N*<RnFxF@O4yR6b`EDXkzMO*~S=h?Njv3dQSbLslYWJSM}e zdYOpgiWEd{kw^i8q!%2WoJKGp6^RqP`BW@Y&Q73Hw>6?g)DabWr{i*s=`s`Y*dIj_ zW>Jw8NFkwCxS$;Hse)d;CrH2_>SYg7OzUqUP04>itl5LW(2e22mUUes%l{y2p_y&O zY~j7{hhKvtm5*0=S+!|ehgT3&mlD0~o-bMyiJYbuN`dO&O7kMbd~m|Qq(voYRfY^5 zstm!Bw~{5qoHVmdK%&As1KD*Sz~Iaze#sdDIIAqk2yDPC5nU!i0g2mSo=XNKDiDRi zX4?W`J<r$+3NBe@40t9!@F8H-EHYH@&+F`#pV?_c2|=?<Jc>HRFa@U^Xx1xn*Q*;U z6rDL-V&hA5M#|hrhla7tWYoY>ihK)FiJ=40m~>C)F$7D^y2YZX*h^C}Wo~F9Vb7l@ zcN9(3uecdsqfS6bZK)G+LV|v*6&ZzRMrL3LTNeJ}McQru_Bw0CL!t3DSjlZ_W~NP) z2*Uyq4slQ@=qGp+sePyqB6JGn5fkgRqRyt>GWT%IEMpUDfUTP1!pKT%)Io6lX-h|3 zYpBktLWkJH{D}QzvaZQCI1fp<E3l$VvF9PSa}48XW6ta(I4?ubzaMso(YD|sr1H31 zb<VyPwYd9guv<k1!q*a4aKfY-f-Fd8%*dwUGY3Uj!RUX&yy=?{qR4*jmsUTpRbS}_ zl}SgGH+P?m-|t|UqMuoJH*sCJkg?eu)QCyD3_-{IBTyK2Sp+=-mdIy4h2&8`a+^OS z57Jr{q=FD!(Ud93x=et&AMe~DnsT-{sloWf!aQ@at*klfwxi7(n3a6|0%c_){JTp| zSz%%XAsvj%fPRo+g#;-RyY7@*4mPhqp~RgOEYFX;T3CdRjB7jzvj<QEElUS!8?k_u za^p0S6%uoDgP@JQ@;JU&j>EdAv5idlONIbwj`!IEG`j$DV|glLP{0HK4kY6aT6;2O z7gTy+B;iq35V8qQ#XwfP8i>NvT}Yr5XjoM!%3hMYF$618y`iz`Bv`MwqjQNhUL8!! zxj=(Vq~aYOItrLV`lsHuoc3)9M{C>`NSdJD&q(o4g!K%>F@0;kGsyl(T4u%j;3$kj z1v|2#NF$Fv)+G6gGDfck80#o-dObZ_aL#d2#j5QSIyH(Ri-9O<vgUkgFRV0LM7x;} zcxZ%}U-<Tr;m;8eZi|GDeU%X7_6<{dFpoeT)vkiG3la!goc$0TEPP!uQCuqO5CDY) zsD(oK&E$}8sZf5hFyhPDXuV9s%siZLhBI{;BFwnd`;_q{ipq(uBi>f}V7t}#{=};$ zgYIZB)(NlW3MzS9)l_W|px1I%*AwJ!WdOy$TBzNkK*f{>n0WS5_7~8YSM%Z3$szvO zU^qyLATpWH0Bl~kg(O%MC|v-mD9JCQ#efVJ<_$b1u#;)=*8RcZFQtkF&v#NX+ls1C zIP&@(%vFZt+>Ae;(HQJP<Af2Os5dmDh>u$B*|pj?R9BQij!I4=2Z1FcPG&JY%~HPg z7zpVpGDZWhz+%gA-6tiSTGq@>2u{of#_vPDVpe2Y7)?o)G5=$weJI)2f){fp3mK+- z#!TE0GDP*3l8c4D$QJz^Q%|`q?N@@)+fGa91*M?Dy%Ih686Oy|f3q3wQbvxeuMS24 z9I?=_zPSTRMj!ov4*n1j`BHfTr;r_JVlh2|q5`5ZHfBG9;kY1zd5y}?P~|KO*Rc5p zZ(q<-2laBZACd!Zs*lBo5id-~@{Yx;)jKu~c&qb6U<>mf#S8DJ!30IIRDns{tI!T= zMmrP}dhwSxR6}7MVd!ewc{t<sb({1Hh0Jv%m+@yL0RrpQ<R7GDKOW`@TGA>Znmd#; zFjrB&^%$!}k6$@`ScZ^cu9nO6uFL{9qX&gUGE~Dn`J+-Ii<E6lKCQ@RCBhbdo>R?< zvL8%Z^0vN7s~M`Uurc6FqfW7%k*&QuT}@`r#-Z?@-!hsEZ}%7(8WUX>3M?O0j%v_s zzcAK^oH0a-S&013=vgsQNHnJSuq#a7&<4^zVDgOepeCf8cSSmHo|ddYAYFQ&GI(af zjpFS$ExOkZm4$rm+-CaWGXctcmZF$ssy!f39U)1vKyWnS8M!{W2$=e*dewmWIW#at zJP{c)0whwxdgzRg5{(9i5|Za2j!Y^hLtoSzvSVooW10C%0%+ih7f0DTiGUh__h|Ai zJE#y5Oi5ivnfsjV)$YzjVKROwapSZL4Zd+Po3NOau*ITy4z_{$FhCIP(Sjk1D2%nX zy@5_@KO3T~gC|0@@h~7rU$&aP80yOhy=_5OK)%490SxKEShRG|_s|7s%}+>BOaN%( z1(K|rYTT*lis<=}w=^af%#10sG;En+qoNGsPSD?26*Gj1t?1KeHDkz(t*O^Bs>ujC z)+`B2HfG8qqso`?Qe|c^&&!oXNet*AlD$N*^&KE>h~1R9#d>I6^#ivIMjlv12&LC# zD>jaBWiEr+%*?Z<v@^vzFe$A>6ab8mjjXB!tWG8zoqA*;kP=+-<wh9rW6NVu4OWQJ z$h~cn`YrT31+}Cm?%HPhhM^rd&U-TyLREViOw(nG9}LuZY=+@CvL=*sA(3>fJBL`X z_8ItwP5G>(3&n~X9yBEy#4XnDmWC`@`fIyH@(`~YN33nDndAut6B5u$wEI~e*=foQ zrr`Z~gsU-4;_$G={c*3c{*>~RHBp&aP@!eCorDZ}^FPYQ;HndqB8rR`A)9|rX*mtM zG}+`Q``;x|#t^Rn(PjMd@3xfN7u(q{V^%Z{1RZGVZ~DF<Ldz8nQBre8;$P5?EqS~_ zI8(?K_e?kJ{PH!<5rBy@9-|~#A?gz${8;_P;q#jnQ?Z=rmsy)V&Xl%|MWL4_=q0Iv zi!Bso3*v7vX1D<<dDV>yAi1K@%UI6bPGBeQR5%Ut8V%ZYZcv5nac`n#NR+4A9NxkF zFr(58@@A;&dt+tv(UkQ~vGH*lB@lq7#tB$!poi|^F!vU^hL@)jx!pN&iuL#z&{$Ql zRFF=$Klb^&A1Akm&Kz&woV+J&eQ|vT=-NY%J|Et{UQl)wJ|1-6H)f%D01$8O^z-!q zX8erMIcMUc2s29O8PBk7QzW5L5O24Nnz^NEtXz8l!-OkJo%UjLKbNm`nkQMlf|FLN z6MVcbW2uV=P+7R-x3MTwGw-H$fHAZWal&p$T;omgPkQyYgvA2-S0oAce+c151L|(- z(XYEv3+0U&m#mBfy0G8m1B%ayRd^AUtk`qOXT_Fx0ADB}OqVaYxHIU^TCV(LMZffp z&vY<Ir#MDTG)uc~@uwMF#(;|XSZdaaym47#%NfSX&5$%zcD5H)fjNUH9PR94$o3Rz z4X^y9gEIbP$t_)@{WH5$lu&Qb*@D!MUkOn%7HCeogrG5TxT+;3&|;K=WANnP^HPZc zt&99Z!8*y&nl;gm=v6~<69C$oTsFXJUt<g$C>6w%Irh`Cri^o^m}H_v)=oTvSg2=# zD)3N-D(--qc_o4F*6DX(7D`%-p1gi0bKTV7PC}F_?EHy+&EbeciPEjNg%-)56>z17 zpX{b?p;gfPRVrE8)+rhojL;+;I+6qPjOgH<yzz=l%YM6wV2Z)hNT94S>8D+oQI$X0 z(2Mew`j=B(TY4CuC66;14anaeH82L$2*sLHlJ`hY-Bbq5R*jy=1|f;vGIk;he>b5$ zR)7Y|+q*1LFU^SVO}cX~Pq0&_@rT;j+S>!C>zEOiy{oZu*ylu`|ItX$yRL}IBrQ9K zou=ygzDpFC=DLRQhn&}$=F8))AAsl^Ra6MCx4&r%>OT$#rXWko24<djS#FZ4QnU?V zusFcRb`JSdi3PoK=?zHCxrZ1>ib`{gLt~2lCg}(f1961g_?h@v2g~gbm{)Qi9zH8y zw6HKJJ}qo(S#*P3xZRaNHHZz3MU-`1)@48tSc_DP!>)37d_pcnSbLWc3QzW<3<P2w zZyT@t*4`sKrTLU;=~=1$6h#bw88DKeRcnZL6-taZ{A;iSroRr<O(0%|MW;?pY2P8Y zeZsO>%m)_|9iXS6U;dZ~q1?!`-g;m~gDwBpYOL3oxir4EjR^)@kUlKdO)zX*0p!J| ztnxPB?~s!1uZIu5uiCGV%%5i-uU9_$wqEPH)6r-jv)x#96Ii*jKe;}CWBK$v4+dvx zB0HimW+7{7{o%Uj7&L2dhX5~Hal2JC6>Fv+Z!Zo5p5%{Pc4H)n&xb*W_V^hI`O@z; zaU|kf6XHH6=pA*3#wf|`oDqvXE%bBderEi4A70kh8ynu|D2*+WEIOAGHNqk*=Q+{z zfXxb!V+sx}y_vz}*e848M8nOfgHFz5`X|G+f9z6u65w{4{}gw+C|gZF>REccreqxW zwv;im5e;cq<<M1|;OBhq`<kLO*-r{!UizP<1l?#R%E5YZj)e4xj7C<z=tnX^7J`d2 zZtjX?O^x>JS~S7tvrjnYY)MxwY#l@-JW9x=6QC<P0%y+YdyyhizHfLR<3}50t#!vL zl>H$x?)V&ojmK5A9vD-17^?FT=>x-}%OS*Z<G5XWx?%FKiSI=U^femm-pta;g>IAN zv-oCiW>M)IXa}f$)z`OJq31yFm7cw75wkn97%Hj^ohvWmGT37fgo_+->AN9{SHA2U zKSsBLCS&`QLJ9V&uyMy@rbUekZ?F%f9BTY6g477Wrc?rqXj4>K3K|M^o)g~&U31@e z1HZaHtO-Za{V`IXRn9|EUHY3+7OYesc~H*yh@=IAlX=1#4$&@y^MfWjT-WEY>pjdI zWUB{$tY$&V_OGhm2Jy_gs0n7K)UI60={{@*dmt^%B7sUD51qUc%XX0rY=pdpvei`* zYKAOqSz)UE5$T75f|5m@YonFjzu|-}IBmLygd$J1ZyALNZlG3v;A}<KLuzxV0bf4p z{;-srcF2*{5Zr2$WV?1MQg~o~#Sh;U7+*9K0NfeeT_#Hjl&#<af3N_!Lk*^$FAKCP z;-9rlc?M_|OiXGS_bR5#*J)@wJb;C0b=dIoeQAEH9DasQrvp5&YTnySo#Kqpv}7<# zAoGMdGgKuzhDSfrm^nxhMRBBH9nd2ygAoR3q%g+Styb5BDw4XCIjVcg_OtOaDGLqs zO6&Q3xWc#E`9fH*C>5`%HNyA%Mmjv(YzWy&dr(V*VJ^<CdhDPdFKA0xSsCKBSh%q{ zDzZb7EhZ2;hfB1h>zAemD22$kt8NL95LnFqcc`UpZm%Gz+TLE*eIz07;}mn{!{a%W zr{fY&G3by0*#dJt7mhXncYplpFG$MsHLi>bEBv4`A!=O>ePmb?trH@qXNPL^QTx>I zfsbx^D34v|eP$snx)L`xhU&qG&On@%Ae0j|nzFLnj3rq=XRAG$Wf^kNtF#b=H(0+N z0~Y%!uoMc^!T>ZtrYJq$+C9A~SBw)cS}-z-G_OH^jb@`VVTh`6iKO+r1vox(TQAAt z?8-*-u@i%uTggl{`{yEq&_4gBIvG1O(y}Fh<tX;R-N1ROVZ@XP25hoX59D`N@b7tw zxkVpYw&m!e5XPK><dhy1*_1C1Vk7tRS*6=J=bpK0`p!N4GyI|9;z2>yNYTh!yo((v zIyh1w8zlJJ@$g)rf%iy@-eXkO$Qi8To0c>X#%VxlAu4-Q)j`n^;=JjE<qB`Sj7<xp zC$6Elf24TvXfVg9>Fc0dIbw<v#|NSVb@Z*2)9(TWhzo##<u*XfmBXu=6{bW)NFt|k z1WJEqnpFg5O@#`upR?pDu9eEifzT>v4Hj<Wgs3`q=GP$DQ|}WJftuMxZ0Wi%>sOI- zPF@5Q_s5i#`19g)7mgzBj+Ca#UCoazIdvuog9URxs##X0IeibT_g9Ki@BtQ7C3j0_ zznq0(AP}W#<64BusTgh?pEWG8?)nR=61);9bn=?IVU`A{l6frBbAE>ZqEF-lX^he5 zIZu_`qKf~;FDWN>0?w?)LN&ce3iYEbEN=ZR&P4TS9h@BTmKX7SZ4&5%j7!#cotFz! zVk~T>*oJf;Lb@c+JX0@I>K7h#>tq`U&et&+>3MRhuBUDXpUQ+5wCaR{SC!&f-iXAb zZHaE)hOrbz8$M)(Y{e1(tJ1}_eicrMMuSyzK5%4Gib_hU{{iarl7MhxmS&y%COIe+ zV`7#Kp1AHl*mmz@kR_!)i9II8OgJK(N55<#rybams4Az<FM(!Ox}LqR-<(Xg3V#wm z7}+mje+xCJQ0JqLVN@VtYf|Vj!woAt96}zX6lh!G$V~^d*<*yP0yGXa=K21yn8Eh5 znKnsOVV@FA*Ht=n$216Gl-0~zssf00V}M@28TYQRO+G3feb-1G+4PUP*z#NX9ie1& zMavu>@}Pt8(^h<4NVc`SmQ%*Is1Fl5I-!u9GMSN~^~j|2^V1dJjJz`^R{Hix8H}Yj zlkv1{{GJvIEHItDboj8Gt^?x-yenPzr@!|V5_U#<x?fk{#N5aGBwkj<pGw)8VbJjn zc<7ZJWDuu`Ym;O}*meGsvh1zWiOcRMUW!`2-sh3nP!)qN9o}@Go5k5lGT!$|U0#{5 z(<{_Vh@Wy_*m#$Bu1a4hx;Aa9rS}+rbZISXhjs6_idQZ-806AQ^;;|h0q4dg*fd4( zJ9XbL;B@XdO!*K{1g)I+tmZ<0vFjpYU}D}MHDL~gK6aljru9(quDHqcE*$@o0_zKZ z=a#A0y<utJ0lw5HSIMGpAiT^DM$gy0-ZvXl<|_OxhNC<b^5>q;^=Y29HJkwb(%6`8 z^-2Hj>fSVcj7AUaU`D?fC}Wfv`Gc<XDQ3%qet<4w^Od6}oZe3~n?>%o11o4OZ$@Zs zSS%Hr>qQl>ncrHoe{R?O5(CEr587@Gnq8Rp&5^HWw@dBJI5zshu>}K0MXyjANyo@^ zHnCfC50G%C?vGi##q+1PQ_nOhx4Yo>mx8VrhpRu6w|^o}<hXt_YE8dy@4_0l1(LJ| zea*0~zi@s{CYaXks=N}mg4Y@h(4}w>-miAw%-kRDX{v1VV_?5m*+m{+>fjtC+Mmy> zwuZ}nF8X%f55fgm$JE0Anp|Tk4cJ@2;!Eik?~hpdAhs-qIv?&s_(?GVm1Y?yDH2^+ zrs$g=`-cTFJF%vHK_4r=X@mhRQ_0uvqMqQ_p{g{0SfKr+9;J_enp7+iXbU>@tj|m4 zJ{zCnjl21<5`@Mrc&RVDcUJe=ti^B>n7%2;5jYil5usbvm^hV|@ipP6FK)9$a88+% zlmL4^S<Re3CcFIz-RT3|#1k*WF?Uz|-TCpW-V5>XYE>k;uK-XX8`cgRSdMSb*qhmo zp40IcPVa;KH4OJ0nx%0}RO;PWkRFov7o%gd5ds)TEcW>sWf9ZKM6G1$oawan0_Fzv zn4<liaetz|xPKL+E7k~+4VtN2N<hD99it4z_)|ck{lGcDZr5#CoJ$;uU_REk{^W6Z z8fVB*K9^WBKKto}v}nIsL4i+cydaCSN?`@GnFMbRogv-ALH5UU7@)~&gMln>CfE1v zeTxUuj|OD346s>jXzyv~{YBoxL>5CGr#w@rjD8{t@$}64J*ldq29PKDC|U*ON(fra zQrJHL(dgipC81MybnDSUsWe!Y3kpnL=t7MEU=45~k+!@$TE=9gGH(7kyEKOnM~lBx z2%4UWF{c~kTv!PZ9T>+~0UjDir^yY{Xv1A|^HXjx!xd;_bsTHptLCqlak^g=sN0M_ z(7~`1Z@|<l7s}f<bTB}d*lxh=n!3?xi5`gd!$MnerIp4m)JxfAg+`X29Zk(VWPU|> z%}3DJoO!)3Dayhwpqmpc9FV%W8#h#B9ZNQh?&EobO!i<afbD>mvn=Ed4y%UL8+-Lm zoXDF&GdmQ@ff1rgg4uo{b0#Dw6&Ss=>g@W5RcCc%tVHw!-A8E!lMp2J@w?esp{IzV zt+X|#Sw6(mvm8n^Lg>>nD%3L>1$Ce}MowI`*1at>pa=T<s#Uzk+tf&o=Jy^Av-x0$ zbXLdKa)Y2C))^Resw3D~E*bA)+4%^MN5~bAc~lz~|H;+apOd7p>E+_4H%d^*(lO-_ zhuxo-c&Q`k4iCmQcw2D5C+__a$I(hl7o>&AC9*tgetN3m%jMnK>YNq#qo5I_JVnkI zMAIzg1iiLt<4ff)NSK4yM6xNfp9I(gk=Kup<>3*^Wml&Uf*TY*Aa@xI#Y!O-`X2u7 znr(3m33nNTk$ZnhqJbb2Rna3ZJJ<GArHCR@wp7i)>=4B1&IzJ-YEmdEChtQd;}cgT zLGfNPK+nDQg)hu{+suy*0#rO66SmrVGh6ICgfj5Rkl2Jgh2?A&Z0Tl%uJybd{_cP2 zmE@`AK|I&@-x;HdcHh}+Ek4%zF3_PhUk9ZIkvOZQh7c@=f6uXYlkAzXpg}?Vos}x+ zOb7*^%AhE_vd@O6X7ih@q{fyyw?re%-y@i{-W$+ivPlMd<nxLf^d)@g$T`D45GPbY zkc(_;@YT*km+Hkn`Y>l#C%rH_J2Fd6ajXA`Wtg^b7}SJ?^^gTiXys>o$XrSRZ#QxD zjPzSWU=bl(pG{?Sg{dJuwCT+<S+dQ_A0|R%vSFgTP1!4)o;!A!@Se}EUJ=TDVx8(e zP!J?L?o@yT8oNyuB?@KVLi{HCgkb>)PkNxhl7>-3Vq#p8Rm_rQ05=3G>jM%3E^ECJ z1HYV(YS6(Z8R@Ity)ZPD3^46#2?e+HT=IZLoQd=iCa;K(v)L1${AMVeb%U^XKmaP| zb%&|f3U03K><8#{Cx>JqbZl_k-4uP9ooO*nxXmzxBUTxO??~B`r38~}ohEM(Mp3gI zhl?<mUL&Z3<Hfv2OK``Ot!Tf%^q0%BJR0uQ_I;9MIK-tZP&FM~SnqmKGz9ri+P6QS zS!okC1z?v-rc3%ZPo-0klG!zkrRGbg8NVZk7-&P97m<o5S&O9#AylffKJ!kKO;4wO z<SrU@h%=DzxpP*iF-o@AB>XNAfK2yQkjUYta#nFBW-!?u{9URUv*jh9<7~v}AVvl> zgnQU_>-qyjlAR<jUceM-)K)uA*7)FAX5SnYJOn-Qv0IKYboHlQ&V&nM%eA}?&j)wg z>&;Cp@7_KFg@1pggLpPAp;ve^vH#4ng}jukhbE*LCOkCfvcW6vE4O}5`c{p997e(j zIG+oZbp}ml22UN;T9nmbYM8a{cl*3|g+b`?*r2;r>|cj27)q1*lExabU^hlq%S*s7 zX&U<TC9FXS1aKS*Pu4+|b%9nczp!r1{NSEP<t@A%0gj_37zSg1I+fokbp^$oZS17@ zpk6y4ez!zi>gC{qW^g{KUsKwmB<|gm8{olQiLwN__hpjoX~It~#&ifCG1cc;-xp`Y z$UYwcm|UV-&Llr7cmN)e&P@i_5n$II#(|P@u1w_Z?YTOXbVr;j0B4@X-xh<~k(}Kd zjkG$oJR3$~X;U4<6ntkDJ_yZ}wNMyXITXpmz(a)|%h?Fb))$*#;#uNNc<)(^FFs*5 z>pPC1rb4Hl9v;KKm?*bdp5=*!46G=QNm=g5O(tLeCu3<lYO6dLL0#~e;mOVh`Pdeq z-*OAmn2&K?S5SSR)8V3|yd;2Afixnc-Q3x)2@vd_Z_6B0!_8d&ic59Vm<qO%d)56` zMaL;>V6Ez`0;TB(Iju4zUL~4*&7SSIBA&xqXbRtyegS0BnGX&lrXgDcKDtbd=mS=U ztf{iT+hMUXxdV-js|#1@3JgRg<Nb>q79HhOXa3PHBFjhM7_Phf#(+DPRXG+vKkc_( z3}JOQ&hko}18)QlCsvziJr}<SWWA=7z<vt)mQu`YyTwXYuHQ%bGb^<&t>24@JM@q# zlNI;tD_lz~6LQxU#@6FB+NkY2x>%ru4t$0zKd=UZDPYT@twiWgTp30lpFFM{8Ruer zn2AtuRf6c6nbRqu6h;)SMWgj*O8;?evADjnSjolF1ANNCGjRF2)#{%}RZT{#&(mnB zpn`=4RT)@c3J|7J{U7kyMfM-<ULIF_Y0|hoA7eQWEDw`CH58!$=(p02tY6G`v%6?q zzuC29I(o8jJ0YL2bNn}t4xR8`J8^t^sQ={NT`U?-Ut4JtSI>VM+DrZN!>aqOUs$4e zx~dMyjG?UPbZ;mBv2sE4L<*f-p`PHzp2a~~!IzmD1h(67Hq31FlR=#s1}nG(c1ndq zi;iQk?gs4mRE|;F*e<-lNzXZm4_~xxQQ(LocSbpXPrdTwq^jUK(1FMpfoP|0ttCvf z(pRYsxBG!#He(UTysbQ#-|awtAWMEgg|l#(N~QqRowS@2-lPtrSDnsGir-6;^hkAM z(kK_fAy7+7Ls)j<v0@{1jrU@jo`(YDo!>57uZ+^tUog%nItTJ0dzFPz2n6%=p1h@0 z)Y3YTk8d~|(?kSF$0cBHjB=58J{iL&?ul`YdXxQi$RQX`Vom(oaniipO*WzEP%0@U zSXff2foD+O06Mh9(#$9JnbYz)>t=%^6jX_V+0*7^jU0RU&XWq<AWQZ)v<Rv0wIK*R z(k8D%gJlG<u%ML0wWtmr4It+<yHF<4=`5dEB1!IiuiIsB(UfgP(<vjF>VgrAY~hwW zhoUgbiL#&v#GfGkKT+N__KFLbkUSSgrUHnn>osG_6Cw0iG9Kb!KYErYQmny*B!IB8 zjKFc1r_Y4;@&Zc2ug=Y&&MmXlju51Ok$#GMpp^2v>EHfg-oQ@c4X1D@@MpPK!fmvQ zU<N;45zHsn$#t1e{-Nd3ciKPTGjF3;3NFyF=3z^yj1<Dq!KJ<n+ji5XEB-uV+2fmY zrqzT&6<raLfYjMV=&Wp|uNMV-i+*;F%#`Q0xdFE|92^3{FN1&;iLtIof3+a2@<R^! zr@Eg361G8W1pnuUBt<+Y61Ju3sYad9$$q}-AMOEiuyyJ<2M(}June9*WRrdLM7k#p z9x3q_f*h(^ON#XyQ(0S0=dDV2W?R!NewBqZ5aK^3QC0n7tY8AAa0@3|u{6Syegm3p zL{yX-t|VBr;lG5xUp%Y7ei#5$)@Rrmkj$nOB$#UEaw+lKFsutx*0#5J@ZJby;)|v^ zq{>}hz^IvlnE?dX!X&`3#;Mf=oJ80epAa{|G?5eCwh|tnm#nrmdS~^M63`dg-QlYK zOt`R8-Y&!=1rgF=D21Z@@fjyYot@Y32~ch^?TNi+3FEf<*gS}2{%R!N3&9`Qkj<hL z-_wSu*PxPTJJjvIuNHri+TVG8`O7gRhE=Re4Z~`3OA|xmTFjQG3gk;T8-5}UB3kKV zJE)9f(zZ5Py}oBbxPfK;)01^ILx}L>GOmNE{CgCC?>%I9-r#jK)`1LXB;u|^8OK&f z*v8r+K!cyHes6@a&T@!X+sYgPV)b<o3s3bG9xT_@CcaR=0SV#^DQ%h{oHpu`Jo&l# z8eViIZ}Ba3C8ubszezrqV%jWmCWE*v6^t!G$4!J|L~Sx*A=v~Trn_QUk*T7@BEW>& zF&7O1Mf91?Nwe#PFiu}9IUOI)^n*j)*m-X$IbX5M-P@$5rq($=uWmi;PDO}Vttn$D zc+<!&3Q-qGcQinSFpLv5gKSswq}~MzKdv%wy>1Pb`f;8b<~fsY;wi^5iBSf%4B6HN z<@K&TKB>9qM;Vbx7i$`{Sm}I`{a5zrVcH;?JFZAxp%Ww!eKCx`k^sgGl^=!#I!MWH zlVLznH6v<MDb&bg(w(TfDE~o?pD7P^Yz*b(!1C_)_;u~uOxx;FVpYhdo@I*JC26W9 z0Llg+romfR1VD0EG8l!!D0z(;Du%r#0rPBQl#_lWsO8TZnlz?)Wdo9y8Ymzm*gZ$; zMZNrmlJ+GuZXVEh{_%F}FmX>Y+a7u8C;r<aBM|xa3zFi9*Nl$*UFg6`jvDAJ11mXG zc#l!j&&OX)1uteIPXZZim<j>&VU(KM!VpfFl`p)*LX99Zec1L$?uNs5$@kIKL9-W0 z_S2qbU&dR{P1wZFPhd>eSq3M=$jVdDR1Aq48G#Tpf$P<e>t_2H)_+Pkwfi>{BhpD% z6ka3dPld<R8r+?er@7~Dozo<5WjsPPS2Q$In)fVBex&P46`0hwPvt$<@?NsVoC;$o zWQb*UQ-*T*5=uu`X`fsmWov|P<`0nq?TM>8;gSiVd{2X{BDO0+I8}B*dSbm|H1Ru0 z+1M!UR*(J#6)%q{QI6mlx=RjX9F}aV-HQ)Ld8I_JGwA*+fsBaqhw-g5jmQU3dCAN8 zkn%W;D?Ec7PbxAk(wy$3s>LCvif(Q>QPM|-Acp29Y0mpCTOIgZA$I(-Aod5)<ZjHP z`;m@Lr-^XPyw2P4bS!Q6<7e^8HlAl5JF^l4&@!>C)wFHgn6kP2qmk<NXi*V<Lb0t9 z_|F$8YoH(*{2KIioh9)`gPuOl)?s9z(J*4LjlGk002Q4eGgCLiY@6~OR(?ELg|MWM z8yGg#+R!U6FR#O0JMXjnmSp^KR8ZpZ-A(f9fQ$WG`jNeGZXWYft_|Ae8|H0vqe@bZ zbqiI#VmYy1wpW=HFLD1>Yp%pKOi13Dyx+GI*Kl9Y8?!zS58v<Q*^=De&(}uXr(WL& z^{>fm#|~N7wA%e#-O=qu-#6W{-aVedkI^$byhr85hwZOvzV}eNPn*t_-q+4&+!48_ zhx?O}r}t&kUP!!yl@l0W)}D<MtUO)8=Z~|Or<*mON6Xi2FBD!z_W`%iXYO;$cgn8Z z6U>|xUxlwjw(ZaV9U}Qxq~!mU(kY+(`oYA(T=;i%&tJsOKR6#IdY1o0_b~o@_P@|Q zO#cSm{I}>HMi$n8(mj92_WTFQ^VgZaQJ?>n<iY=Yk_YlXlRU!%Vz7Sr@Bn>ZUyG8q zrwH^6=*F%PWTZ$n$FqRiAIE0>fHq;D<(b;bM(LVO<kNh$XKaHNZ?aoP`x=n<0f&9< zRBu_L(yRyTJ%6W~zr_7ZHEeAEq@I}nPA~sXEB&|h!p!(jwXlA#|L@S#e<<d!Q~iHb z%tgOH!1rSi{~>>S2Sf$%uf+fIm<2dx+mZY`vHz0hFR_`K{-@agz^(qu`_I*Zf#Ki2 z;D1Z({}b#*Cv5CuZfGp#pzr>7$k%^eGa&zK#w$x(99aR`Z??mDowA)GrBW@Zm?9kc z$3vZ}CVY@lN^oTjX-Wy|h9R8@rl_2VwYW&}qoJ_soKTW7974aOu^PI#aY+D#Wewj; zB3Ja|>XZ$W<K>iW*42FHj8_)V({D!;VSoVqCZgYj{OG;-_l}VeeZ+0><q0<Vv2U5w z0KjCz9Q=oJ`N5bABJ<UG#|gBDv?8fa6s{B);;ByMjPidyog`8d{0pQyk~)*1i=;Z5 zmcNV4D;54Kx>Wow{7%Hvk^rA1&|0%ZeD7W$6&#h|@9@3sD3a2oId(czv{TbpCBdjm zt5PNDFw>e>ahi8t6Lqan{f?FZ!MSj;U86`Kq|c0XU$g<^v0$xRp)u>%_Xa%|Z(|z} z!4qh(o)Q@2#8>kD(TP@Xfj>ENZM|J(^y`WigTM77X{3fHl-FuwMoo2b2$-qo@{HMJ z5N<RTi>v0M)ySoB{k*<spR3>^6g{Mn<Y;sHOLm4`M@Egr8gV8Qxou2GLHoNi3aIz6 zex!rUOP9#rb_5M2F^#r-*rqiXM;)uI9ZRLnwK+vw-hsWcX#cw)ZCKyIDNzR!;znr< zDTam*(kQr9;aH;<poRXIwcOyMb)>)hQTn=1I4HOFC2=1`N=vnbUsdM%xO^+7yfayq z0TuaD=*03g3O8BXBmlyc)zwtIuDd5l?J+R@w_iR_Hq1Kbc2u7eY@`j#h?!<2!emk; zJxe~|jS@SNj&TETM@nDe!r1PflHhM=%FnFvRCv2+)w=P{Sjes%C<e7Gk9U!oJY_9x z$zNo3g!Q&wZ3%SjzXZ6~PDXx!%NR8tKfK%r%V?W*tSESFMH~7sO>+hqc@jaEirJ!l zJbGk%@eXk?1bP3I5hoh{p3BX_eCHoH4c-ZD4HR*^I2|>`Z5D965n(+YM%NHJbj5U{ zJ?l%jbnX&MS=WsY8OHtSTG><RiwmZ_nP}|^<%&O(_UPH#YHG@dV%fN+VYHTE@0`yv zhm+kR433MK`vZD@QN#24d88NWJ=^2=Wrlb;`s#Cz2@~i0`mpGV+UQ7YmI7zmDaKfg zDu#M-?Pj12m*Rw7=i0n8m6~d3+`E9Xx@~&AHOC~DU=t3VzK#zRymRGhE@}wRB}LKE zGzmdPmaVB>OsGL}jN#%B=*>Y&R(QIk30TyMwJWux9&!7!jojhtJK3?+>@sG*b!6@& zZKQ~%W4|MvgmO*j4EXql{GYG)Tn_jk%!T(oOT78uO~ebkara~8AWft*3v=@~w!X0} zO|#$v{)>kWJdo^M?D%1IMqqka@E`*Jc-?H@V6FU^CHk8t>>n-DKa3hZJ<C5EvA?rv z4F7Lz+P^em|8;})FHq{gn6kGoUn5`uaDXqszmw-bED^&$Ei}X5St5r2N0#XSw>TI5 z41XD&fxHJfYe>Glf3rP1mY7KZe=FaAm>foqfBstk(J}cKlk*pg{B0BeZ;0=-=7s}y zD}wjv&ncw{SroB0-CGkW`-$>412M^$VY>T-1hYHBMCIb!CywYdYI>qe#U{!kV*``f zdw}Rm(Ol5Sr<2i>Ckgd;HplnXaS2|G>C(=~lhP9tkDFT^>CQKaq3t&u{pd(Ng!)p5 zsjZj<;WS_$QtPR6tK{dw;mymv!NJkS%9BgPSu=t^c}?lE)w=+g)3SvJ?^*rYHAR_S zc1)=Ya-C-<!>%ieGPO20zvQC*ni2TJ_D4L+=ac*+^?qoKcu-}J3oZ7S=2Jw%f=dWa zjIA1Sa_+$7cLz`)nYnFhvYHCYe)U7Y<9zvKzu$X%oK3DEm8Pn{+C$SJO-LA)tk4m_ z;F%>1LojEzOdb*orVBc<NdThg${{B5TMyQhti+nRay&jq5<J>g|CGlw2j?5f%*%uC zddXx<yhRA*lTXjcYdV4X^Q6h*81*o{aWdr1(*5H6dX~AKd6B-_T8-u_bhNd(kKxaj z`ZTq*-;Qy`fS5SgZn&0rzYqE_=G54Ivb8kaln<5o8pb!na3Z)6RF>g}!!j=~n8snp zol{TddkP3TQj%8><hBSLkWtMdGq40m4A<zYsM-goTkq}n*yz}=(x*+=AFl4XrDLxS zNDbJPS?yW@WWxy|S)m?2_gZ%b$e;+`Kl0NE;1#~=PGukz762|y1{}uj7mSjnbQ-F4 z+RttfLxyghS~Er%rNLwP1>qlEr^M*_ru)HJEu#9!dxft4*5&KyKUCg+jYs0{=+i^8 zZZfy?NmIQ`BNPGQazd!KV>E6eJ8!@&a^4I04B%STHL872!3SnPG2JgR(e$=k*EhER zphrV6u+wpIs^aC0qj^1gY1->v+_gWa=r$S+Nn;h8)X7n#Qx6<gS%K)pAm~zTbn0op z`i^CBH#3!C*=@imA;=Hfmu!t#<LM{T^=G1v<|+7SMwDh?b(D9;zSvZMbD%~Y%<qa{ zAybv2ZPcoqWmb;^-7#v>#-kQq5&u_*3=4h+*OY_fRVE;O9vBI0si1t5C}<rx!zOt# z!Win33t6#qqmHW%Dj%E|s;reLdVWz>wR~p1hJ4+h5i1Y2*II<(mgS=Qj7X8Rt~0Rm z<g8SCr!-0HY3k^$9LXVjY6-ES)MT%)8?r-yu<oEj_^BTWHexUEo;TGsxUaKtaH*|q zzV6QMuJxX6-CUk+Za1fUgKL|E55v=g_YW^GB;GN|$DNt2t*$GkPAXu1W&o(T;nK*4 z!n(hUNPhLIk6$2bkMNSreGZbiGSav<KvD$eK&L3z#_KC;P8n!&1I%|3micl{wvHZ{ zoHGr)(bG{+M~?BoQn^pX8%Jc99GaT0;MLHU$XM#nA{@bKMyG6w7}T$C)*6bTiJR29 zjWSad%DHrI#InJqvR#!lX5O8YZgg(f*v!t9u5>=39pTj;e|~M~?XviIH@S8`rNY^z z$GW017+~+u7F=W5K}uu^dU6jTg&F|qqn(^k?8VfDPoGGya8`tD%PBJbQevXcX>vd> zFp(@@>Y1=M2U>?nT)#pqiuUF;-*5n}dJMW7T7l;ex+W~wbkQ<~7$UUkqSi}cIwlz) zf&-wV+PkIPf=`gDlGR`p*2-N)^j{7x6BN;UVhj>K-peP@p@F9+g#XCa#qT*W7KaR0 z`@w}V+U~D(14eVzr5f<wZ|W<ag1T0@Buve(@5R76VN{}Y4Ffo_m$SUZor2|NLixMz zJ!$9Sl~S8%HlJ0%kx2vRZBXimHBdlI<o&+rXggt{;SaCdSit;o*UYv=-zRW^7tmU8 zm)lyv-8t9ZxSJ1s2;etr-QSCl7&dvb6$(^*xlTB+kX~zc+TUI;HNzb2nDwoN?zb=n zixQwP@WhrS>47IH_k8%>Sio+N0^HD*Bf0&+l;Ek445UEfw?x=3K>-FLzzYaUXwC4g ze>E;t5D3!=Va5c}GY`$`UX&_})euB*Zyc*dA?A>Mb((M%_dLXi%EKqwzIXs?HXT%+ zAWl)3Nc=oOgi1&NiFSaeG)hp~Qm%1LOtynwyHx9S<JLi*=jt$e82)*(xKK`5df$e( z2;0AVk!yaQ20V#ytY#fg`v~fsO<Nrj#tEsV<Wd)>UE^BS)%`nMn<}={N7Wl=&bsCJ zkwFO|G>y=XYNlb3&`vihd1xC%Mc@1(wS1Z%W*7LEq6A$FP&Zq1-~w`n(nj06a$bX8 zl1Md93IZm~I=+sZrSKorob5x_@oq|M1OA(GWhB~<^+W1?R~I|~_Q&=n9CU-zQX*cc zHR={gzWvQrD71E?lZ{Ki0_F|rb@Bs=Vx?Yg>TSo*lctOCMDUA(LC#|9garf~s?&07 z#dTB$6Y-5tew+4G%klBjxDGAK9y~_cIz~#*SL~-xE`WdlTogcbKpnUjRIN!3C6ae` zVx@qJln$Uh#A(>^8PC)WNsm>c#$r8df#C~Eyv>+jNV^pCCGHr=wNaVbLwH68j3YF` z0+1brVW^34#c(%(4Q#kqMdbcZg!Tm)ttD2M8EHdR3)$!`@Dm7*$FQoa=rMPNv1%Xr z6KS|AA^KzSjf8sWhzFkXLrEz56j72+YG=iSeFuAH=oA4!Fw)X=%{ykmqI<ED|AVo2 z0IsEJ`*-7H$F_EC+qP}n+_83S+uE_SW81cEJ6XX$&-1?DIp3)|Rp(Sq^~~L~x>uuW zrsvn)*9~2%g(w;j<Vb?k8WEKMKhx)&4tD8pcu+ewr^d)sZ5c2NPnyhLMeN9eT{v%1 zkA9OkTNDs7T1eSUseCt!J62e(rG%8rsK11}r<TV6BdSqn3Ex^0X46R&7h>)_LR_8Z zBMNmrPk3I~7hK*?!)FZN7#0YHq5V@QeN!2q`+T4op78m?VN(nuxrH|@nPP+QZ}JbC zM4HDUR--(WixF0$gi2MjX*EeT<NkFLX%~`6IFbs&Cdm{HQq2p^(&K&bSc&=<MUqJK zv-)$SWRqKQNp$@L8n%SWkQFjb?_iTU-UL;|Op-``p?VEb8PH&=@8kz$qg2q^ah21N z07&z4D3^4^AcaUmd|&<QRk0HkOWozPhq%(HT1QfmEXvyogQ7^A;Kf0O86kDM;lVh! zSacbrZv3|u6pVgFN}DoNG!#{QEJ=#Xz}V1Q^HMvJ`&x)ip+e`5e7$`zBGyaS3Rgde zkS6r<%GniO)s?tXj2?pvd=z1PRUl;oB;HHM-hAjC8wyt^!A}*w>v4J_i(rQnHDFG^ z<p$^PcW!70u1}Eql48tXl?cA3$6Yg;B%@jM$NNybIEf3|D95s3Js?JteD%g2sUv&K zVV4HTxv|xI2##yuh$rr7gG^n8onNT@FgsR2gh%ePs6MN)N~;Vg{1pEAIhDz|*-tP( zHYr$b&)15HynZC{(Lb<6_051-XAbJY14)|v(vF9y3tO=O?zj}U{GAqkpUsKaCVb~j zM@KVX82Q!N><g2uMU&;(|7PeBwl9QkNZuVK2!snWA!2``)6;53e0FRe7zuMwJfzYy ziAX}j*iE+E(EwAK;>7lbqMHZXHwQdcB_Z}BsQyokQ!UL2cHoU_-yo1bf>#CAj<7wZ z$n|aaq@2`G3R!)sa%MkZ@?RZr&g|}-q!4mGApUYrlTBoq5mXI#wp4sEia>Pb14h-J z<dCBxJXvgLWt<$LHddI~I!3n3R`a+;zf{OAm2d5C6fu+3Y}1TUXBq>{)ZY|3aew-G z#ih(2X?lIyes!qUrtEOnaQzvGevdYv7T=*`H0Z6e@LOdrk2bMFLomxx>bjr6DY?0v zVXKcCw}-W5uGpdml&Wwi$N%d5VlvlHZA-1tX7}NRr&cV2wUsY^h7dJHp~mo{^0~{; z2RT6Rx-5IYOS-3}HZ~JqA}niRiFqyYmB)xoWtCF9;V=_Sl-f|JzCl4Q-r<&;CsPo7 zUm6z>Dh{57n>~ikkd9j+Ts7lov8XQ6$s>Z;44ly%J=fOIL+@(5B2BH>_yp)Z%b7$_ zv56Il)HVNZId!#<?6e)tL(yHM*hKVkDVqNae^CMQmK>!-lK=NK2Ev9ag%?J){AVay zMv_#BRW50?4H8|8J|h_K)!CoM+G-WpST^LR1DEe{nV>rzTi?=;)IY=lBFfCaMwDT1 zHKJ(>f#}+bf25P6Aqas#M2$NM2USR>wLLD60rdr&lmmI96e?S5-IL|(=VgQShY)E1 zKbZ6*<*N5pKKulY6PB*&D?kABKR|0%z{(2co_?#aNGCFvKob41wa5iJhxdFy;Fytn zE+NFAL%bgO6lwLetTW;8T7(rrp?{r_IhKXsD!1#K<DJoZ9Hv4mw<Foc_|ielRsM;1 zRG>s=a;jDo0;b?PrdmCsh?5na`fJ}1`rYAwEDT%eIpgx^x7@Sc_87k`iLI!Axht@` z$RB>o(E%bMT(ATq+7Ny7DcDyWZ)c*>cuL@74N2&Z_u*1+Wh2#%)P~|zq0$bSrwwd6 zEAaqWbQwN!V|U0h;Uw!S_DDO-KMN0o`QJ3ZqRNOtGwia+E@Ps7HN1)phg(Uhv1gXL zm3m<6uuy-Wr6ohw=II*Nb{CMercp`|7~4yMFnhmnNTzE7IzC*#y9-Xw5I_ZQX3Q7W zP$CTEiTWx0twsLEO!j?bXv25~K-h_9$#2^+wxuW92syz9c@`X)ZZ0L6*!|+3&44!n zQg>|9<$~GQngMkUx#m<FgHuA}ea|zoz4o{c{2|`egU57IrQ|$`e~Fp`P;9~!7>cGe zX9s~%|1-o0h?d_)#6zG~t(m1^u1cCXvxN8uS4+qVIwl!u{Td-U(^m9fNnRBD%CL6& zlQ6Gvp38W?YVV*$73OgRwjv4Sb33K3b)_x_GD<&$x&s)ZmLl)wl6QJN$p@E{`4}s# zrqdpwe%x4c)yxavgQs3KM`NsvK&&C-(O;W6r7JP2BQL)VjBi6Axs*EM7?<|*Imo?* zWdiTud^8(?#ri|<9a`N-ZT+4!iNY0@YKRkdje}io)AYo2q8~Y0LLOudpN$3KD{M4I z+)YZZ5e%$v83O5B)i^hL*yTNv<{=viYTa4_6bFlO7bQwCsm#6{K$9t-5ZEbhGCGy< zD~-@shhk5;nZ`mNkk;hpei<x5!DVP)R?2M`jiwGkk@Q3!pH#4H>qQ23Nj4OiJ_QX% zh1G_d9y95597rB#O6u-)j*S&mPDEpbRxtU|r6nvTuaB%eS+bVp@nj5Q$jsh<0R@1$ z=l;J55C0eYi-Yrj2`T>@{q-O9SK(i-mx7_wH-n3ik@>%LE+tcEdsio8Q|E7B7sr3T z+=cDGkzRH#|HS;u3WKKq_WEvVC}i(WsQphWc21^m;5n1-Kg6#8A&~u>_J7o81t)uB zWm6YIZ9;kl5wUO7n5p|WVeDV9n9zUE!v8%>66*dx^!0y~1Yu`J=>Lc$e0+rTqV6u@ z$}a!V#s0Y`?#%RE_W!Ed;?B(9#(yU##GM%l{}o)6zRUG3KYUl>UnyLSg^q!c>A%o3 zF~)D>ztt3D`o8`jfn1FFALGASiLrhg|H=1T0Q$cnY5x>J(w2$!`-lG({=Y4@Z|k4N z{+2wxt?vRc{kLVmEtYTV-#ounaNF<VG5zPSzw`WGP4{o)|1{nIP}-RPKQu2fdnem} zD(PhVZ|5?8PtJVz5A?tN!^p(R{#`L9=I^HcU){^}|5KQW>Xe?l<ftxQU8a!Z#!=kM zv{{C-g0)Pk;)rhK_G{P!A>HkND%Hla+m(A?)7C~n9xyiC_4|7D`+UFV|62a~&_A61 zdavPc{<zowa{i(&;rIJ0;%O86;wwzLsvmAfxLEmmBk;TO`^#PBJAIDxy6w{E_@%iy z<*pw6=_3Z<F^+v!Bv_sOdgQOZHd-q8Gt>HNyVk}qHR_6exfZWfyinxX?!>P%;a_W7 z`K{sjP4?Z}TiJV1p5$-XRk4nwjkrgf|Gds!wI8t@Kjd8{cEg?4K6)O{<wQR4AufC$ z!8=}WW<rm-_DaXP>q=iGygtu<>1?cVBjwj$TW+R`xjxqfz4NVNzoP$3yZ@%N%~VZ4 zNqhgil$-3}$7zq%ojRU!>*bOAl@9i+i>E1SdmP}6HQjI#o_xzQ#;4zVo$S-4^x^Hr zR{eXs0ssKtt^vGJ<(O}7*SP1nZtH(90&42szVTlB{0O;j{_w``+J1UufA`()UQWra zx!&IQJ_9^+UHKK!?t2T5!yn_k@l3HlA9Hpe@$nxoZ{!kiY_Bad-*`W3X&S%1KE3Ow z`2m98HJJssa)-S){8pTTJ8GWOb5lnF&m~`abMI-OcU@cI+w%bZPw#{+_9N#xfbNS= zr~bsXO2s!(n9^s*X*}2F*jg4Fc-TDY@e{qe=D85nrXvRA<vDqdV%q)7=b;daC5L_J zkOB<HEoxa247<kZwS^$qVE-}to-dmc;yd<O5fS^^wdU4z|97`r)aCvB5T~II<JI(w zRO`m)vr^1m-R+HIC*zR$6Bl5JRv*ur@9&#=V+%^rb^JOV>s@(<l$ZURn6qW3)bf_) zMfjsz#+T0_{(=nw@mQi&r)?`3>qcNkGEZU0Ro)-K7{62hE6H@;&~Ucz!5i_HH=khD zbu$BUjr<+-dS9N_=D5oN(jjtlxS|8!X!5$ymeYoqShk|?!rr3|b>RBRmYUeKp*EI^ zwdFR!V%n(VIu>)Vt+_zehR62w9$RCL-(xN9B+hetbFs(1Z^O|BUQ^lVaeFGT4aRzt z*$`mU$~wf>%Dx|by5hpNWxy7eY%JyZ+IgKaTP(uXy1`L9YnG;rN%_W$)yFI>yJieF ze0}TSt&}(ePuq4X{Cs_VC(E6{HkZIkdwuWVecQ^F&Mxx%<{FuO8n9A>4uY=PkFIUg z5C`q#RmWjlX2Va*^&%`LF&5#FuGsVKa*xAYTNYsgnRHVAb&BQ0rrD_1YTL#QN1Q>= z2HQploXur@sSPoh=EY80G`qq>X6FlUrb6hZs)XOGi4!sf9W@B&>rImp=giC$-?@)4 zWjb_ze8xXNs<MwO5#)L|ji63cv#iDP6!5tE+N?MD-f{Bz9-x_a)(|$kccFFXUOx~o z6SRJe@Mg(7(auJdx_z$3Vn4SmTT^*aU6g!0%TKCXG9fnERrGH!b}e4j5M7fm`rKD~ z8LBq>0X`jMb92_HPGa>QpO&9M%hsZEa~rHN*M^@nUjR`L`{qeoitOe!`b|WvbK7I= zQrD%E_76T&zC&V9<K_$1ZV!00+p}|BvmY^cFJjy52_I`MHUys43pUZr<>p}Aa%zp* z(Fr=~lB^T<XHwYPkqay?tW@jk>~*arY4(l@I)>9uy^*z}fX?^TRu<NHU8E6DH&$b* z6J;l9s%g9<ncx<xPL9^$YVSjM=ZcP+9fhOZXu`G%FYUdiPPb0@V?uq0;^Ka@;3+J_ z@$t2vA1Z79yUT~ZKe6K)vyA|YSK*tQnfsUgMV$yu3pqxhf0lE+j9*Pho`U%23bN~( z^8;+_r>zZ0ufUmhDlRQqYRTo(1$<q)JAlQV7Oe$(FF%YHuHB05Lin?2tf$-}X^h9% zGqbL(-3dv}@jiSd!~i$h^!EKmMb}UFzH0p?veh5hU6V(h<NZ{VUX%#2Xa`mE2J=T% z*lqT&9yY{&qO6D3G}ni-A9csP(}s4(s(jWP10KB^^y}7Ve4^uQK89zhs4Uj2Ax6(X zbeuZd*3(JPOLIN64Lbqp%|vH>>(YygU-@m-SN7d3cvW;~?c0~B)rFt!KH!cQ6?f^% zO;e|n^v3c<S4}Rn4Zlmx4}n7q%nv=yuQ~mGbNp^`^?Rn&YF3nUoUR~Ts+~QV6{Km4 z+S`LNl2u3a;%JK=CtRYKB7DM?TXZeI*n=j(zbzN*D95U?TKDcKbQP})32i_h{Dr~y z^oR2YqiI)RiWs8v5sa;ejys_0qL7b01HW=QOU<>hg@fZqCa!I6K^5H8@t?=@{IT!A zIzQTHL0t;ROYfD-s=*<dbyCu;CNn3wX2$oL{8Z}i_GRDM)#=gNbw<#Y<@4o8@9dSP zZ=+wc#Hb5K@$YxH=YA|Zmq)7HH9w1WHHW%qoR$zsrgoNYZbWTrn_2#j&B)!WzOH53 zxAfzs%PZJlmcH9_%`ruG+tA4we)>BE7cl#JUYQQ_B*7A+aq1V!ruHCSpj32L{BPAB z`h8m+?-uc)m)naCcN|+Q)Gj?FR!%h^<yxJ8?7`M&w{fnX(VKF+-HtEzny}tyFFz6u zzkEcOUBoTzo7Q1(lrJH<H<$3dmecTEF}-@_^t+9xY<+v85~VMeJA?>*D|dhputf2f zaIJO2%BQ)(W=o9T_$H6=yca3edzu$KreVz1nv_p0<bvf5chYg708Gqjyz@;&o4+&e zkUlJBM6YmSs2HLP109cO^(ux7LB-7(Jkj=ZIU`!iU8|D&6!jC<tqM13Zy85&ci%$* zW?w$M{hwK*Al%!vZw}u53EnH-95rqhF@wyN)0>(-`*=<3p@X599t_ZWI@h(^+xlKl zhCVqhgZI<zD|+*n{dXM<qGZ+fD}0a@9|R&?hj?V|O`X5KtlTy8PPVW0b4zc}=}7%l zM)9rb_&VglUR-mYZt5wr2fcHf^JnOYTJSBZ^)Iw`y@H0DJ<VJ;MV>ty-f5Z#?xD*- zO+674RKY-zdJ;}Oijn7upzR4GmEKZa7#+Xt?hS&JAXr;qU|BVOkI)MTAc2Wlf9-Mo zD#z**l0xs5Rh$8{ldAY>Ry;9#`BQ`xKQ8Q?5^UxPHZzP}x@2$mQu3M0mTU3X^6^cf zS3`!~B2{Cb!fx0P`LdOxfST1Z$HRe{&~>?EktAUc_3e{-rIOc3K{LzAsKUT?A+;@J z-_e}#`=MFVDnPcC{P=DI-8Chj4)1Y7tJ@(m50A4yk6>~n$mmVzgYJv)-Pb$QSsr)V zU+NfpJyR^yuKu*<gIHes$$&7ot6sOH`-h=n*YXdDU8y6af>X_yKkGSn#Ua$69r~*l z`V{o^s~Emm>nu2~J%0;Y743pwH?`NsKPEwR$W>V0T`^koyVNm)cYKObv532p(Y^zR z1IylETCd<3*yZx~0CDXz6DDE7x?ReHIS6F}(+bm<c2R_-=4~dX$u_2;W4Ln_&jdTr z6wVXe)MQJB6jEoWnR)CSiW)Ztwu(D){J1ivjSZ%%jA_*4RNOZF<uP-w&FKgu+J`>t zY(X2Yx54nq)H?}<ORlpkgIvL+`va=dn9(G)WXRY;X7?}G!9wes30b`|dz8x<apf4J zL)OfDFvsb(XJ6NH+#Ip-=;`vqn-eWb#Y%m5;gW=cn<SCMf|kJjVD%-d$h~*HW;s$( z$9`JsES$+cD#T^>_z7C+ZI=16gVc>)OC0<+tD`?)1n~E*lF{zSb*l%@);a8{Vws!~ z(dQEI+wEg6RFqiiN4D#08VR<+VJeAJj*45mH>GCXFc@B`{0HR%r6d#)`12XM6%%e* zc8*NX{YlCXh{oGyQ#YuQJkk~oIm;#IF6Bgv$0bz3r?b^%O1l<YD&@MFVf(RdAaS(d z><hYWbhbz3UFVl>?!{7tzT7b$#a$(7>St_9%WS*ts=0>N=gb#@y7n{dlhw8$zD;`R z^cDGpb<<DzrM2?2-@$a-&w+UA1K#i^EdRpD+M0ZxDti=S$gE|Bltlu7f)ZMn(~sLD zL&}9~i05^H@O7=3mS7<Sj^q_RO=+i}-K|$(e?q>mN~=7(b!ai)CiUNm<D#05EBhvd z!%1(H`16hvF7=R?&{|=4m$7h9cuy!v6$h7DhvfBWn*Q2%oQ|9K)`V2fLoiOfS$d^b zuG~cNi-#Rx1#DvX6ydTI#@`$$n;)#&sE*~L_C^#K`|AT70-5Wfi=q2Cf3(drPNy@e zxcaa9AK~PEPhmeyR^`>EMs3nQY97M}dHQ`z_-m|9r*aOP`NXd#KXspPuiGYuK7VSO ztjA7GoK6zTb4i8`$eabI#hJ>98=E!GX>HsUan;L|5p&axCow6;)9NJ)o1tbfj#iT= zkYvuDq<a9z8N6aa{lMPzmr!^awec}I0+;Z7M(56rDh|4qRwge>Z>;D)A7f&*Kgzyi z6L`RHcC?T8|FAwne+L|!cEEeG+Updyex!H45Deo|zMo$QMj1=#$LA8p610j+^dOk* zU?@3I2jdY!^2Y^X`OIr;iVI;6MZ`hU>Ie}Pq1+h2m8GRY+vIYeBgW{5iAPFE3h`)t z1Oyey+vAUnLDOB+W>1I|6Ve2gN(7_KnZ{uV86>URAQ1^mM6rc+`4X}b3psd3BWB~r zmjZKEUWSb*kkWz{k^8MLJ_-eeiKtE`N~i{+PWmTh2iaT(GjQs|7O?mdN|Wj#Y<FMr zPZFdrH}4&TbMC#$T;0ANg>_^rfIW3d#a=gjc3k^U;1P(N1yZn1baU<BFVf+~wNiEw zj0P$E#f{%wTM8do?2<EaJ4afripE#OnEdj;V8fagM#3j*M`JsFR~s>psomOobXZys zi=ze)2zyXN4MS|X4SZPI@-pU|&VA(84$}DH)AQIZ6=sJ8MipIJGl!7Cv8}j$i27*w zDTv;#e*+W0*2ZCr*U<_2aXsDOLH82jr;imDq)J~DJLOmqE1Sdb1c-ND!R4NvBl~Xg z_Yt=wZbOOa(+*;1X8oVSfDy_ue<bLog&*q)G8ze^q+{vb&3H$!n5iaR%BF(A4a~2f zoW7|BJS@*k1h2;&!~<l<K;u^KUE(2*KU=?WLu1%lk(!bZv&383vgGfk9kJQhHElgU zMp(yK)MToA-L0PA;`eXcmSS}r`9|G%_yDn41-Lo62e{MA<~&<|)g@o=dH_y>f?@H` zSMQJSgVXP$b*p~wb!_@089B#|_WOzb5^wEJ@a^N^pwai>llbc$--d89)^nOc|7_Fu z0QUfStL6Y{N?q-P^Yi|rweOv-;cJAnGM4Gu-FxDn0r;zU*Z!;TlJ+V8_VqtU_qNy! zB;>t&h-;Tt(ZU-X!MgW(!JSFBO`1NO+FeK2uDYb+U5S#k7T-mU5q%jC8m5<fcb7sP zb_DBlYeLpvR=~XvAT#?R>WrN$cb;{VZH^VA!ltFhh;Cc!`=gDq^}+cszbis<C%DaE zYBJT*3+mr(wBPNlFcuycs)}YbaukguP2@^KvKng*OyD=ZaIAVCpz&Qh0Box!Ck<DS zY3Ic;rmDY?4Bob^8e@OOYrXJh=PD)aN9$`ka&58Heor<Z&uH{76}b{~bk;%GYMVvp zsHCP`_ug62_^za=6&7<gR_werNpA^FE<Mqlde}@ywr9}Ph%tOLbznF4RIPAbi;s|8 zVAl_L)(&J8WXAoi7{;!tJ-_|}s}KNc$L5`fucAe@5U&$wYE*{Fv8&Yzx;aI<fY+Eq zzYmpe!@uD+a2JPHQyzgzQ_smoeQknLZEwrHd2UuFT!<tLSHo%4*KZ2ycy0M-kn`f; z)1OYC-5c^T&AkEhio-@gH@?e@z#LmB_vAe0!`tlb)9+L7i>lj2i(=95!})RYr?Li* zw<%fY>QMCIIER4YD;e+_UZw2BeY`0}+U&g5N%+sD_0MKyrKYch*m^W?{$`9;zr&}6 z=1T+~D)(){>SL#eJbc?qNY!pG_C;S_q(AJwOvB0J?nrN~ECea+Z%OxehJLdEPCtRq z_tNuL`+L=8D#Py)Q}^zN4Hn>UN3t{aky|O=0c=g<TgBnymm5ba!Ut%BcmFN{<~3wB z%RB5Q|I7#M=Jd>x8|Sq3YAT&~k8@_34|{G7Ms2Q~r(Tb*cbO;JE?}V@I6oI74xeG9 zQr+{{P?hN%pfnM>zx!>#8-JmzCTZHd***<kWI!mzvLj!#YS0|H=(r4_S9C~wF+OK) zu=>W#GxTYuM6@NRHz#aa(B((q`vvrLf;+}}L(_S4)3AC$*A**T{LI=`d-<iM-xp7E z(tQ9`2Y8~nhPOL^fAGt^63416Y52@2SrbJH1;~1~kZe`*2HS74Fq3#FQ}P;Yuh8(8 z^0AqA)w-T{wmPJkx*c4taF}W<h?Q|@aQ^x(1$mQ0hO6fT%#0+oD2bxQF`!sF9^9J~ zx6%RRMjsCdJ!+5Fr4(>~X8@;mohLLQ641=N2@qpNPmfPw<wQc}P`RV%gD7IckGxPL zYk^-BD(paI_LZ;G=oAmil<*L>A$@W1d<n?eS4ficQ_v!kh7<B!IIPH;NQHasrTz*K zJtj<_e;%2cZ=QjJ)0-jEMr))bq~uqBFbh3I*CwzF5V1G-iDIq&1(En(({W*K;2e=q z4O5Ev)6Kt|UdWM4f&|qAy}t;=Ft><L2e)zqap!aL*O|&##tVRTBNWiPE^73r5vn#4 z`FCdw(0!Sdf|pE5IU8ufm79o3Xl6agWOu<&Zch%Xl}Pyps~e&)-mt#b7HYB`?${V~ zkwf2G;jvc%m5AX&AlombW<nPTyV_#p!AIUKNQVG((jf306KY_7Q%vZHMEYXZVLe?0 z8Y7@nh4e|bo&G2Z$Xp!RbO*OUxHSe~TTFejjh!Fi4O2ZZC^Juwhokz9AUmfZGPH!7 z80PK0C~+znJiUROy7F@va?Q}DK--X!8A0M{_?4Gxh_2jP`A7k@8t-5!y3k{+wF-9> z%@|sSATjx++&36fV?YkGJi7Kqq(HUokk0xcwdQb*d2l_*PkN<4+7V;W2%dJj<VX=) zhi}_lvP03{EG919KzZPQKbF)8_7cURpEWQ^(I<f=$oR+&9e`&5tx93wA}>NwgKYrL zHpaoqdgMx&fGz@sGz?O4n~KfQfB2J_YLFq3g5|s}mo*lHu;kOC<%O$^*fYV3=oOw| zxr4@s-hrmk{dS`9CkuKP3+Qb+)>O0PA&hK^1)m8T%LLBgU@)1k$X%1e`Qzy${AK+# zeS|iDJE_4M1E}Mxhh}COmpl=pE8+#e%Td@9Lf$E`V{U+A(y@_lBiL_gd`f%l6C{h2 zK3uFZHhZ1|3a>1KuMnf)(N}h**Bbz9CjJOt77WYUo?R?T(kGPopd@A^?k2==GV+tS zh<!%^D69Ms2*5XXMVG4>o~jwN`?xI(y7b=oWOo?aZaO~$ye{1KV#jEda+KSfDN!%d z7hgvs-u^5rf(}!uiXxM2UxZLy!SXsI1<NWHqxRSKs$Rg3h%U!il_pjv8bCIl_Ms1f zc3IWqOkQbt2QR2;U{ons;{vtXESLi(RR2+pqc8KurO^(ZwKW&Y?C6Q+-(!2M+5=t^ zT2T`SRM}P5Xv?IQhc5nE5Y)6UCpX_j?g(7euno%yy%*NvY7K3?z478>hMiK+u^dzx zv`3}EG4Z>q(`ATjU2i#Uq3UA%2G?divTjDBOq-^%pSsk2{j4Yt-wx;3prfo%G11+o z(%zn>7IbO9vOrURfl?BHTQz!itK7d<)S?_@7#&r?a_<M6dKC@KG*qk?$bqXi_d3T? z+0ci`m^FdCf2*DJY0yN4UPLg|8CyyuxUq_j$M1R9+%K?7>+I=_YELwDyf|B}N?#Lt z#Cb=^aPQxHe;QzSNot)Ks~R!-pWv2UE4Pf2iB;))aBthDwI&eMbuINi7*p=K#1lDx zPFuv3;C>H}|B#6;FR@!~Q{+zb-ZnD7O_^&on1PS1J-%ysQENNnO{*SycFpFxr_8L7 zhZF-wnnG<%Kgm-ItL|MvG}`X7woBrjKg13hqoyTVltrE8;Ix{^%kCPoqVo;;6H(rj zk~NzgCfFXf6x+II-B^6Y@7-9I&)?VBmU6bUBWkjc%Y6{8EwnmYD1O|rpIw)pdV8v^ z2lyB{D+~53`z*ThRs1@;#Hk|BVi$iE>yMZbHLJ~CWImg|&x}~r^#69AE&|>x{}q*z z6To}x+P&MK3^six=08)FC42z+d>@mtWtz^GA&=>FI=RxWW6HDp=kJ$~P^xuJ*8NoK z)w0_3+=m+AFR!)Sjx!~NX@iN6<81LuUA!g@5*>Do%_MtD+f(_Zm}fq!LQ`_3%XD~m zL<$2AXUJ^`@2#e2rOlt5xFwV&G&RO&@>izqmYO>{c@_;yj6qO*of#C)l;lK!yT|R; zNrv5rNKtUB$KHZeM#rr6V3@d<Anhv{XR!LHaTm!ik1cwnY0c!6Ck4XY-V~F}{ge$3 z>DQo1bq{4skH3l0rD`EY{Z$IMv`RTZ)pP3d8gA2m;Fvi@0v^BPB|T3L-i!KkG0oJb zYPyS@4uAMa@$=RlGvT|Yser1T32W7%CDtWi{*Ws>KkW8EPAupOXp^fSf(HI}y(_|9 zt{=P_)g{z)wG5AfCik;H=i&W0Rxj2z86>X(alZhS1nb-cX=A`0)$T0?CwS0cbRq3t z?rS3c7`@cq$}GlrDC^zV{HHm+a#$L)rqlos#d;V=ai1sJDx2km6l`dH><F9Wx~hRi z(KuDT^qg_geEiXG%8a7FAYw+=m19}0HeyE$%}>y2Fo}-5UQMbUHN?w63RRWqG=2Uz z&sqaPX*`8d&u%6g*GqBH*V701rbc8kcKmk{$NK;#kHKww{^fPw7*KJKgIoX>QqTHO z#9igCN7LvzH4!1ybs>`RPodx&9_5igXFkthuyF^t$I{2|sT0TK5g>g0qiAVA7D<p4 zY&)>s(z+-9jA5S)$dvAteb2z`Fs$AYvIbU>E8$snBF+Ed(K^0wxU0ZkqA!E_+a=yp z3%f*(#fn}A4M&(mL}mgO6}|k`@fPaua7wNMul3{>hb`;>$n?95*fk$`BYjeB7lzRZ zWFYa6#rg7Vsh|4Jt)h^P!WZ>I#6|am2iU<E(32nV2a>TJU>K;4_%GtBY*{eWz2P*; zP;GEp#idb^k_x&bVxa|Vb&H=O$RxMIU_7$qI@#|sN=+ZytKl*GTL2LubX3hg%w0EZ zZCL3VI^PJ8^+JpYLX^M`iLgN&FdOINex#g$&f*NLv8c`&37y*eOpz4|-YJ|;Eva}| z#Wn08HZlEbX%HFILITw=yh?C-^0b+vtP;l4E%BFvGL=JyKeL(+t0=B-68C;5WGKPG z!)^z^j_YTB3~bBw1k`QsBqYS~TX0kj#;{ytbSW#SCj}%K16BtV6%Mv#!mZ|`DgQz- zuzIyqBXXHOC|b#!W_2!twz~;7d1_0+LSA`ixFW`pH5bfI{hmjt<7A)KTd~KUr+M_~ zK;C=!sp^Jx&|UzkO*6(VO=Jwf^Z{Sg=h+K*tcaMu6*c+!(!=uCi(LaOSgsE2#j*4% zkh-aKtL*S2O-0%LdUFIf2CelO*ma8X<2XgQf!C|kd-WK|tuSXDld)yrPPtN~QC_fS z{Hz>2xw~>|T{pK^xJqEfUaJsz4@(FZx#H}`jmk<^U=Xu1miltvd_Ce+O95{1`A*p= zL-!~{u^<l-8!MI$u)Hipv-n|F;@D6*Y}YVfF|NQ?LGyK+t5$eSZfkJFZ_2?P2G&8i zk%$IeD--@d66`#lr9J&lq}+qsyMLjSi?$6GkkFt+9J9CbbZJbA*!!^D?4wqXVn{gI zv0V9n0aAU3Wrc~49d!;m%!@v-b}ct<XR+-0`8K-tV$RW&IU-sOjmWwY|C}I2=9W8f z5+JFFrb``#(MB}qQhn>L1vAcurOdz2ZH;Bd^3=~ZSj7%*(AQ5yexAc>Z^Y%u#2=1+ z1E#}_zHl?%-HH;8fG>BBc#7V<_24FdEznvwB@R)|Ghni=zzL&$+NojcqhpGws)nsC z%vto8DqxYtA~}2buUoV$JW>IM)t|#Amd8foo1`fZk)&!20c8xCN&0R;#F+Oh0=`b( zg9ppXz+3Uv6OBgE^RTX`L!3<MB0kn~w1XK!;q9}jCT7-#er}5mY9`j5DW6;(JH17Y zFs?5|s+4u^UH&(=zC1Q5CRVxnw@bU`Grh$Er)r?NZHl9uzgw*B7z*`3+_Fx9(xUZ? zY!saE-{GY;5sqGAG|Af-)ntw;6t)(kxB3zfoOl%H==q(0%@M=}%j=P?08f-XR7fSd z%J71h3mmrfP_HN(^j(BpJ(9FK%Ek12E<+=sIE2WGJWUNu&|o`^i`yu|YQUOk#%lDH z#W~HZlFrDGY>KrIGeC|7{30T6|KKJ8!7;!C%>Lxchg?^4zpy3RFTE@2QxA~{yNjm? zIpZ9VeR#rAD)+JjN~}(w_Z$f<kdtbtHssU>8@6|tUV#9K<juuFzsR|SLa?N5M4lm? ztos4jWO_0oTFRrUZ-pp%(QP5YS>8eKomqJ@EkYmcTil$(6KTdu^0gz0NJ=ZCpt`?D z7DvTfm>0U4zS|&dw#nBy4|~ZUst=75{5m6OCQl6h+4Ys2D<yIGiKcw&nh!s>Jo~tG zxR~44nNfgy2G&34d4PKk@}<bW;(L(GHJ~8QAvAG0!tWaW{2p&znb={hYibN@U~d-9 z7^Etcoo^%&MVWzn#-(aLa({WsqLi0uJ~$lj95&i4FgDYTdHOgSrTMo8{x<rSy(<@s zS_QSi%7&id8gXA0KL7T$f{1hw6Ww^^w;THrlo&i3YlP+E45@i=S`!r{CaAnuZgt`% zkW^S1jI|P0!&&^D9&<;%k%_jQJ>K5qKu;QqLR*MyD3!kfxDs+u&~yVE5ijhHlJo_W z$~`P2gPK4YCMY7@IMZSfKo$Py=1+*BHi5w&|0)hknqRcY=!SLZZ77U&heA<7gFR9w zdn~~oaA-WHJ8xhLnDJgj2bHimEJ9u|ORr^pr7i=FDw@M<p2_WXKH(AjYvR(k3$L5^ zY-i2n7HVf7YRbH>zR=OUquips@LIhfdL>6wbIJ0fxe00Cfl{WtuR^gAWW9l7WyzcT z&q`4JTCIi>L<P!uP*VoGQ1WYcoSt1@VoiCYJ2r!UcjY?BON0DM@^H$&Nhc}dh4mwU z!pgbo4#UcKAR@WF=cKYymuuM=DvYinoO-UfAJRlMPh=_TopK6iMVhM7Y0HuV)T5q# za?gpex>6I<i5+zEZNJK<%yUTGy6<?xuip<`(Wtpbw-z&Q;d7wA7e~XXS1U}vi|M8t zMWQ8>o7<E<*X3Zwjljyk9OIc1N2+U>^R^z7G7K@7FH6d5kNIgx@peAyKtFaAzMC{G zf20`E7BA6WciNFofy*+M1{4pL^k9Z|xE_{3TbGlJDxE?vcN9LDtii1~r0EZLQArO> z-wMf@UCZbAKCyf7tD3oDnrQn0Q8sj<X-AYCn05S>8ppvmSi-<!7>B`7a5)fwGV?VA zSC&7)IxH!-zM}k!IRh0@2-^2YfmNB~%NsT9Qe0hQK0~M#tOnp~R!w;uAQ0*Kik||D z!d8Ie#D@A3qU={-<3OZyZ1Rjf##}wydty(QdKX?>;33c$Tfp-Yf7p5W7n#RI(p*e< zajvW)#usi(zhQb>HJORP{#r`n=$#T;KW?8oLqXB-;Q4gRrebnHuc^MKvD3ZRyp5pZ zAuj&B_P{%;4h&w%CRofbaeN5RQSjhOo)EvMI8(&OFMm5JWgJ$Ovqur^odfV~Df;+p z>{Ht>;m{yhQ1JTGcIrR!atM|c;-igo_0Ua*fhH<2T=aAen7qdv1!gJeTvBZC_E$Ug za4We&b|fAbrk5lcc^Ha`J62rEFe=0x-2)fO@|w@zK;z=y46^hDXUk8&*0M?$uj3-` zoLrzrSmGOm{LVHfPT#jpWd_gVI2V?0{y9>vJ0fiAIn^aeDWJU}pY6G4aZIF=!z}~- z0dl}tC4{s0HgL|%wdeM8sU`z3%ZGZ&gkCtZk~U3S=@1wufBr<-gs|<NUCX!=^F%@k z5h;)nSwQx6gJ}W-e)5D(*+c;m1AO{d-CL-8_XJPG&=Zkjk~9MNvTTMv_7D2ti!i0$ zVy6WmCDFKc?QgHximICv8{Ic!0%`{`*p8Wofx?xSCs|4X-1AJ%<@_~jldTGtF_}$) z1pxRj{p;nzkwVA<h<J36c@`}VN~OtQm56<9wPnRQ4VSq(h7VLM-biJ^*+4ryrW`qE ziq4rQR1_V^<MOa$%_rzze8inO4XYBx*|vMWs$%S2Vebn%Hj75P6L%E&Xtua3sRb$G zf{&W`#fzqa8AQsfBZ|(OG&2AZ;+jHuFM`+zm6EW#ymx)owMlqed7d4-&Dc9toqY<E zVBwgAD=oA?EokzbV>&C3ZYubyH4>AGU(mZ9g`YMk5x3b@!gU31F8nx=TGm;UVn~&s zIp+{-^axNs_VV(I^Q)yIy6#Pk@l9RCwvppvud6R>-bh_kf4Io5pGs;PETPTxDnZ&U zLHu^^8g|Ro&TWQ?8hoZU^d_9F?>z#o)Sb4h&ObcnWw|EULfN)7iBJLQoKNkSK%3{F z+tzDJ+CDX_W=7iK%uQGb;yvreP3-fR_`jIsDHE%k2xY2m(FGsEkkxk(+d4D5gdPPI zS~p#Ep{@n>UFQ?Sh}7TwB395?49dc{3JUB9qZJ9OWWf5V>fYEI6#M^B#zHl+nSu3t zmg>%OfdFJ<AHg@v5xAV82!GCMRZ+%Qql@rtMi634S5fWV0+It3&4Fgzv)8Cz?x_BN zbU11h4;gfUo8p}fD+#JopAa#b2~j0<Hr0~nO{ne=7K2r|8Q>13D~)<b0)<ShMTk)x znzihQRj)tlS{P)v2Aik^moJ+dWHQ_wn3O|L(EJjwd{II*4_OzNN89Cyf0PPbP@TI& z12-5IjF(Tw-jnr&nM7$2riHcle%>-V#<8Jk%q(9rh^X}Vc8KOmyR^wVl_f+`OoqHa zKW=GK&HMxB4xs{Bt;}1CIzJ)K&}k4M{zTygk<m309=;kC3?bRpdZd&BHB2XqD8SYc zj!1zL;uONCxK=-RVlco~wv}UMt<)%SHC#{9sc=CjFk#gbG;eYX6d{Yrbt1jg9u1mO z+xX@=Oi=%)OXWv%3F7$m1hUCcaqP+J57yQ@io;qynCL&)4*vb_F67Kx{mnl-EJjkt zWQEyO@v#X5oF?NvRWS%j%*>&ENKGDyBDsxwUnims1Y0dZf3+u@1For7CDz*;c(mi- zb|6fL3VFQ5P<JcyvGtQlN{4fnT<T^rfzVnRC<B2~WZv_<`4N++@iD<r{jl&UvWZo6 z#zBSV^nh_$!gGwI^ls3{hdq3=p%7<)q2XkAG}U%Q7$|1oC=`fc4Xc5a7~<m3sRIdL zhe}G(c7o1xDj(Qb5~rU`b<W{YP*kSeBtgYI^wtcNb6hl+KyW@pAPobdhZGxsJ9m%F zjiR+z_IZOJU<{&nQDY(v94($Mg9`LtM+Bi^Q2vIEh@2TQjUK_pFzb=@KN53S14+mW zC<k(LzaUoFz|lack#V5yn8Cu1qk7`H#>_$hI)GxW1xW-7v&(<ZB9(`bWDgDp`jHPD zca0A`CPNoBLplvK<uhoaq%R{^Rw72j@uR4$0Y$^W|9r}1ZENC9#K|giJ)VWNA)tix z`14Cr*%(+l4@LQ|_A4$UMCeEotWVO63Cr9G9_aB3DEUi-hg#+k66m!8+rufz`VTM< z(G3~URfZ){P9>57Wrk&*{PF_Gi>@+^EHxMh_@}6<xxjGc;iONsFguiyn<+BHY4Y_9 z`^X%^LAbh^(M}nqrC?ge9af;rcv+o#ey`_`#O^`B6lwx1b7epmcIMt3ywtDLrTuBN z>kmg>ta1l#EfF0T&c+p1RCf)N$@W6rB+m$$-ZNpTg*FAk2?;IG6<p3oCj_iMIhDt( z{0LDB_c5#P$#N{!Ayy}$h<!v=<guQ=$ch`Zc`NFrR7^3VCQyS*x}aX3<k-DVHiCAP zDvFeN>W~UMV>>Ze3TLdf#>WJ$!BKS*?^H<<9GKLcXyPB`jaxxR!Ai(d-BTkm2-gES zemiM8;UjS44ov-3uYFYXrV7-|O>Q^#G{iVeM@}@tL(NEPMwvhi9I+e1Lmi4`kElqH z_1(hBMgW?R^U)P{s@2OjcC(<#Cv&tYL?q_Me2c_Pg~~amAm9pt1NX`nN>(+-F?r3( z@^G0sA&v*H0olkfq(o~0nTLV$TwEjyX7Ql038Y>oP|4wV&Ez84_%LAZ&wC#tD-uYN z?|qA$+kdqEeCY{Agv|HFuWsKolSw!Um3%okWOHs`A%ey~Y{#|%m`H}mY_8|eAI2_0 zuRCJ>j}x&5DjN57*hkvOo`}0B<t~*GXG5=^HUL9a)v|X?Rbn;O_-s8o=r+^$ehFrQ z6R0rcD{-Nuyw#+iQY#RmVaQFAb>&n<o{fHvU8Us_s<vx2R8u8Q6qQbi1QKBJ8zNIB zsL#R`1w!oL%u1TzB~(-@uF#CgYhxoiJD=2{saY-5O=Yd-H~!K0Y&OqMO<o9_&^ut6 zz62LbKMNzWdU<P8vOhjcxci|tv5Zu++5};*<XLB8S3if#IWALcLcMr75_&&|%a4eT z87mI1kTr&*m_1}RYPRT~2|D$!_|-^5KW57hlvuiJ2{z*+WuoJS(6T+rz6pcci@1ll z<rfn=Isx_AaQz|{aL1Y{6#ma@qKCUY`D#1Tk)ps?^ReuvOAW*Ti?DArpYl2&s9f>N zZ*tk}E0ycKJbS5W6I|-Gz|t>z{d@No2K3lm73aZZp3}?Py|iRf5oe_2Sw1;h$-?Wp zB~qij?l!z&VFK!yWnq-c*c^@=J=$na0_k|tki=5P%8t1T{3kbmrGgmtz1(>BQsZ6R zMuZK+s!#a`Q9bA`4*hC<&aoQibNAV^Na*(Qc<tZox~0dbE~_QL*qnfBRcH@G>6QuH zMNo2dR7P8DtCzN_C}dNYM+1jg5wAtUyxIvJIaU7Y<g!H1bCEF!uRipF&(NV;Xjjky z@cU0Vop>b%26d`lkzYc@xDQQHVpA<b`=SikleiMd;uJ3y#B=mi$o|Kb0>GHjmFYXs z(PXzIgC(b!5a*hwPeK)fKH30jL|n<&N0At^3-Bti>6ub0ZJU({{C3itTde0>Th66l z$LFW*5K`>-Q_)l}nvZu=RnCf^9nL7afCWqkrn=!m(umu3sQ09qKQCe7;$<kZHkF|R z7tD?YJWftRcu3~>S;XTY%`Xnbr={V%B6qd7(7vJSC0sHc6~$UtA#sMK2Z|$zY=dLj zWTAVJwF)U%6uzoU(J0%U!Un<{X;5B8_(ml4x*MtJf{6XJYpY67d3`r1HHh!t$${|n zQ+(peFATyB`N|a^bbY>URO)QfI80=Lc5y~AMv;34!UkS29G;<MqVqJneFCho+SS3| zZkrmS61^;oF(-x<x&92Hq4Sq=1VUj^7#4fu5&Gw+%8|qNFKi)D-2+jgzH%vm?Fu0f zO|<D@f(dDRl<i;Y{^~M{{1|*+tMwsVzZ0-O4}{@xUtimDP%4AGr@nD%=RdJ^KB4_N zy7pwzvlYK&jwA5Tqcc?JQww<5SeJJL_pcctD4A?05IHglMyvN=2Cz!~Fv#plAcR#X zeZ-q?M!b+2QVT8W%+KrY=%3ZT23EX%<9l=nfA?)&9Pj&8`7v!~33#vjG55RBaS{3x zcFXU*wXb+lWkM=^<5mlz#}E5ek8AL@(q)GX9vdy{)N^t;<M}(9wk*o&ae5+`(X)s; zE#m@hazfzn<)7@P&(8Vnk(l!dUbhpC$9p1VpNg`U3g!Y_T9tYyR^sNIY<_SyjTs44 zQJ-XDNrRiykTe!$l(5s39eD(-O0`fqTiosrQZ(FnJE+qR4;4m3tIi0JkbvPHe2jq% ze^_x3v&K%rpSSt6yrw{knz}*j?tUdYeWOvM+s8uC-Ix(EZUf-X75P>dF&MQC4HWk& zEa=NI&EbJ{9(zOwlj4@M|IU`%OYPDr|KJZ&a!10<Khk6Mqxh)#$uJD~s5?Zcx;J0K zWlZ`8wcno2Dj|VE@uieMNU%j~9YYcx{249g;%SO#+OQj`?Psa0Ut>(KbYp)%qeaZq z4yL<-$Y>BCwwMB`AoO~SbkNgP#!Q;Idk*-05^!+50EZxivBA<BbP1z6m<T4k<bvbV zpX>-RA#C(C%x8pE2kVr?kl~KP5GcwiGwIFFQHgX#$Yg%te-;o?A90U+KAV_Z9<;N3 zUCziF2(c8-7MBtZxm0a9d5*EJt=xF@n0WXic!5a&Q^muDgzan*1XkP&Rh=Wbqli1t z3JI2_6ynMPyqLSv<AWVbWNtLHkOw&pR?R&D1OhT%aL`(-`3?22?yEg}x}58Q{TDJM z1lli2!R+xrB`bv-oHei`0m_TmgDogpD-DCZ$6jjY(~^$Uj-BQn=U5jiS2*l!&F2?X zNaeteA!O6PdqWGEv!p!a->dZ~s;z<#E?A!G{3m;i)prqd4(hj-IXMuD!R0~eqKiMe zxAN;1#V$?C@~=tkfx-Ci{ALiRoQhAmlZ&OiZrV2{*~#ISio+9I;LQPT6Mxf4Tl`$O znFHgFxT?7$2IbM%;)$#MG4t&=Kuv_aRxe{a!Jy-ak9OvO9u4W<V2z&4O^GNXkpWm~ zP{VTrFT2x}oiZYxy1k@`;^`}em_HdO$pgb_@w4MODMjl(&Gm|{2icaMNEoqbvJzf} zW*gjt={VSGdXx$!!$vZ1sdg_Te_1d3D{J{uu`&qAsBgjx^cOCPP}UJaWCxe5QNA7( zM_@h(Atp1dWx@Oj))U!N6|x2s>{xmhqPIkI;Y6-M_9T=fu39UrnV}S1vw6i)aYXi{ zmwe1;PK-!PF*gqO6UHIGrK-BpYAhcYDiPA7fI^N(zVQWo5G7enNcOAhtL3rU5Lt{G zs7M;5Bda0{cos-y_^O`aFcP!R!N!R`4A;x)``?sh4yL|$Hd@ln?3OY@`20p5ECK_5 zw!@&mycoAVt!hisabN^jG&GOE<0YrV!n6@&R6+zq6m;Q~Tk3R}Jl&L%feyZGsuiU5 zm07)St`%a|v<_LLk1n}&r$z~E`G!xIZwO8>^Q=r9duRW`>vbr$K(P!aBVtxSe_ijc z<Q>J$PaTblmM5#-NT07<7kM*l$Bbeiulj->oXD~Ltzp~*W7Nr~>-IxAx;USV5tY`j zZpFZ?cNhyR@+tw~-$ZdIQfqLyZ@@Xuuf@BQsEd|%9Q;Y`G>;B2I_YP}6DTD_L#a9{ znOnCH0D_>ztQ(|hk@f_zHj>?;Go?bS=CO%dHUOE*>t-U32B3G4Tc3T(&f4*AV~m@l zb9!;%j6bdjbbSjs&mEP4@TGys?aOY~&EHF)0zXPM?Q`tm%0ZwuT#;vgU4{KzW}ZDW ziif=7GKv-74~>6gtP3T0Gn*=o91!yxhzrQz-Js_t34ei0{FyLxDk&!ep}49#Lj&hD zLP^iI%UzNwrpV4TC6EkOEvL#8aHfnh8eKQeWw7gd3@<!2w!P9+4i?fn4?MmPOKRXl z8_t>O)R+wXhd#UPB*H?wg1r*UcX@eU1ln0ob0kCKcW*m-zV44?S&MzQ0fAzO4Bk`H znF~!&DpZ;Y(T%^4g+`mo1%@`4Wj#iwdy~jigjwx%TYK%a1CcT^mCZtC=V5wI^&m-k zHE}0gdw3!YBvqp29mGrSEaF1#XoL8oDsMq_ZsHxSvxMwc6CS1g$%Ua@8(vI>W?9}8 z<FWzUtG{_rspl)vhXh}fP4|s3A%mNt_X$NPr#HQdMpZB=pc9W{66#Vgft7==1zOV+ zy3rPU*>%(#euYf`id@f&lXb+2kehYm4Zz0DCe4!C)p?I&+W%Wyi>>32?p4-1G61J_ z5j>F^d1I>Tx38($(Tx^j0{p990oKN=lL;RWOh{fjo@gt#5VKob4DlRKPF0ylt6-&% zbvZ8b?*WiqZL)i#sAEuQVJRMdHn?;r;!r^$^MI{4k>(&=Y)EZLu%m<O5ZI~^upCSm zj?%1J?7Z;=q@z=RtgL5nWkg1zMJ=a#sm&aVa{+54#^g>k5rolhAsUB&0CM__E5bdf zbOejBqHrS!-f0==8zoGH%0O5^8fJIqo?H6CQ{0HN>WXCaP#RKgUtCxYnh?>Dw$9B8 z%4p-GP;W|AGyndEzb<1{xUp%9WuK{<2fWUa9@YZ`wsD7@qgLQr!8WWl<|rf8@_;*B zvpP-fjY;M`sXjgU-K4)cp~O_<M+`?R#Bz4}*w58komLlTPWq!arr-EOdG$<l7K~$x zOqCYWa=k-rT_!VcV`=ohflh~KvWvr*VLF^eodc;l#QOE>Oqz&!5L8A@DM`_rN;hxX z3Xd?X<4*Upp-R5IQuK#@-+RO!5cESK1H}PhM$ty_M%ue7w7$)Q*G(^**z`(@c)6Er zl^%hfn^g|x2zyY|QuYKmom+hPHa{%5oDXi%C)_gg&CNv>e??9TKKcq<zW15YhE@!~ z|2avaC<%X@5(PF|;-<&$B!2M`0OF(7!m!=R1QxjO+Qo<(3O1f<DQr)Q6}Fi5c0Q0n zTLeB9K)BeE;;6=X?~T;UcWn-avynNA_T}vmvQC0|$P3TrRJI_ot{;!G%8O`5;P#tB z+DCXqqYI2?^>RQ~mHlA_a}LXp?rS=&YfY?>t|%G^LMplK@chVf5)6a&433uH@JP>F zGpt!54W~IzYI3=r&dQ6a&1sF2Ik>&=x2q(J+l%Mig?-;GVamHo(4#|jb7bW_a4!Ps ze@+u1nOji*w{`=p<sRC5mp5Y1z?1i{+8)XDGi-sBn;<kk=p(V0%I>b8K%C(J0hB;% zzv=?r14gM)9GrVY=@NA9Rjz?YYZT*m+a1LkPNhv5M|(W8buFlLF({(r!F61WaJ0ue zYNYKq?s*1s8});r8d%g8i$-!mdnLOEjIixWmlnW+6xzB&>>XRS)N+n34q%cgI_G}9 zZTEx}4aTy}Hi(7<dQ|Jf)3-3i{IO200{~oCg*8C*ltXh2uCgm)QOxNpAFlj}{ut#H zlslyI9V1~LVh+1Gp$j8aKaZIr!lxjWf2orIF$1#mK@6h8V$1~94a0a-nXiYqmBI*M zp~d>8X-%^dbN8Kz6;(7(ChPkMh_E2GAlCasB|X_6p32}rH#0|l2Dgq#(57o1%vgDe z>NTxm6&Cd(R?714;5V=r8fg{)A?P7#Zkc^J6bcs*1`sh4{l*Ku%oRl{P;m*g?@-j@ z4jo7cb(KeadrD3%8VI|ZeEas;DW>q@Jj1CBkJ|#t20M`4gqIkQ1Itsfh2+SM6yXO7 zL;&@n*b$ts6d$_)Bpxldm_})YnE|JC#0aM;<-ZSsULgdk?MyVEe=U-DXkis`JXEpL z1_xx>ja>GAcG^QTyQlDVXgS6DbZ9wcd>vX&nFsHvKLnjhjJ%`tP<6@#Q5W@Nxg7Re zSvc?Lewf3=sP>NPH%jkFG_%jBUGFHre-nx7GwR>y`;Gik?J8LPTwkhB^HPPkpTFJT z9o;YKZa2o>`-#S%=hs9tJL^;SaR_nSoY$eZZR(I}w6$%PhmN)x;cR`1Jq~@WYH1D` zY_q<z<*EHRM6JfCR@=+1c9`(;+b_6VCt|zi{Pyy-XK;A!)Frs2_gNiwJkg{c?mQ!4 z&F>Sh?IF)UB(L_4>dSxkj;w#Kas1@>Z~gmR|2{XqvwZZieSE($Un1E)MIYOrK>0g* zd-QfAs~t~-`W=0=wa<L^t?N>?lJzGZdzLGOatMNz@_3(qfBFu;pR;ePNJot!|5_1I z?&Ug^d(Z39>pn#)&*$t*cGmspzed+~-d{Ib-|{qyu+God`?-UF-LKU!)qmfi!KH0z z@8lVFAC_-;VAy9k{IAW2hMOP#6cD~c;}U#^=d;5A^4B}O*Z0>$dOhZ8to9AhMJjjg zka`on<a~yNvkH)rB|f6p8(IPuA1+_U^?F5oIHXujL}{}i1E_o!e#B{CRbYgyX^L?O zQJKJ*RvQ51MMY$?JZhfI<@GyKIvN$ftI-1%NeRqDag@BN&V4FTkSyOcvL!RUMoNJe z-*B>FZI+Au>z-w&3Kp)qdrpr~s`)2SJUNBvms*ET&&H@TFB}UsCWVipKBn+~q;k+K zEu*8GPE`^;QLQ+*o@(fgc6Yf3Z&FKtdg;6ftdW7#;zG(%WZKBKiI*F}P7!-g53}VS za*U!DM~*RDV@JQL*!MA8*{VOQ8DhP81;BN-u(8*`-J!XZm^I&8UKOIs3h<SpLI{)| z7V8(~-?cj7<l<ncyBGnO|E^Vvea}jDAc&e5qxkYuSqY$@$_fPiR0ietQ(03}zbfl_ z>SrEaFQ`+=A#g9GSOZqSUu!NkU7VqOaAmfhDprR6T}HiA6dN-t)pA5$`D!Kc-(;vM zBv`5{wkU=aO0~K<h>g|t)e?8#4OUC+Kpa7o%}X(!N*G&DuvB4eDTa!?I#LupIdh$= zT>j9ES2g2Rt$0<-*@1K_d+dJu%6s^;YIllajbi6xZ}FuxqZNV}@@XxF*WJ=$xnj$K zz^~;%VbBDdr5NT_vllwNhP81LHoZ`vv8@KOgC;ix)v<b^PQBb(U_NM8^x&7#bEwMg zA<bnKy}kz92uW;Ne=PqUp&MLN^39VZDx@ABM?7`LD6<aq2&YCFMah92p{<)rrrv@4 zPmI1OzZH!$vQH`wM%qWt1frW&W&qTtGAbSnsInP6dacIT%O9O4Fw76_p;kzCUbPW2 z{M)Uq|1*%t{-#mk|FweX(<Gx=^NTYYudT0!4%RrMSsRSENcyq=Wdn|B(`wtk*4{$% z?ePR;rJGgS^5+wz^)#Vq$NaQWdd<&UjUViD_uELK-EY$ft-jMLqRQTVM*SPrZzLPF z*;Aj-S*jX8ShI>c-=-FNC+7;Jd`{DdDqFv$Z*-2P>s&$YZ&Qo<HN-fh{EZGL!Frl= z*l2u5hh3G8=6k;R{hB>-yOw!=<86H+ZFqdN+UMid)p0)_VO5=_F;Yt}mPbR5r}0Bo zji&Kh*ZHzu>uDUmYCWNnj^;~GJ))lEDVeRR4Na$J{r+>9y4(FSdg~pX8pvi%8S;6T z*Qu6l>E#b^)|8<?Z~rikOQW5#%a-4MIL%ryq#rNmeM^7UB<=Cr^_|nYrOL}`@ogUN z3}>{+;>?)WUI`ET!)Y|uJt6q_$f3Ep5mL6$2y5P;osvZvnzdH~sf?CpvJsKYDAH7A zZ#w%n(psd6Qm8aE9!pF#S)(YVJGWed2`?_jMOqzY04evI`B9@8in1rvUYe~%S{}og zw?KOo<^ChDc)t1b`PrGEwdWONb5VNWQe2@i+ziLf(+G0;k=u{lc|My%{bLLH=SXwt zhF_>TB&wf9s+rNRMQR$Q^hjOP2?;;cc=RMi{R6<U)ip0tyKPmuNX^p-d_OhRD^UG7 z`uBRMfEKBPl*=Vmp5pVQB2qLTb6Azni`0Ee-?O<^?kEp9)IW>OwS>Qg%c>&xMP^1p zZpqvz0CtU)?wmunrFnCp{(1e_>uI3ALWA{yJ}~Ehv7x&;uY=5uaBbHL>02=cA$_a% z>+p0{-VRUqh|Y_!LRE=)gw_v_5DJjQBZKlJAvl*)cgVMM`1=~c4_-=Vq7KTkQz*&z z1kBrSea3$46aCiXnXSck3Ea4jq%`K-6^e+U>{gApZ!NY%ArPNc32+nD1@SqTfId-G z7_{ihAvmFWGrnIXx==%<oLLE#Y>UJmgJ2YKW?<?q!yi=AZ(!nDO;HPoC`@~+DQcZ< zbyg36FhJ+6Y6$2eX;t*^QKSl$;nGha;2t5`q)Z7`zhF#=R0&JRlA9iC5~gXU9;${w zb!~McDDZEr8?l-qkXcJ<5sH-GsGM>r#^(ABtu@pcuJ5lY`>=gyFfuCrlo4#4i+H1m zTanrLrMoo6@$Vn~ZhL~(+WZ)|W=Ip8<W)y^?32y_lSw^D;VdJ3&lRjm;ggsb0pl=# za-{?<jCD;yQwjJsDICk;SPsV=m}gSvF|;uVTtXvo2T5YZ)W!bci;kN~Hzk0LS0u0x zVB$qKK}i7qEGCvl;6<FV;`zSFUvY=j-W^hTH`yL1GqZohR0+2DKqhSxkZ;N~d5Vv; z(8iYu1l<KTP2=7KmwjM<kc0%R3Tu4*vMa76B>V^YL}%!SD+yS?#op8FzQAL;0?VmM zsO@BrT<eg_P5q-!J*_JPo2qi%6ld(Ndxuk5yKB;yEBOVBGw`tH&^bGaR0HmS65~Go z4+pY;x0FDMTzX(&E$bs-mALo9Ufk%ZdNKZCrOn}1C~XcaT`eOJ##g%uP+=;I4yM(f z1JSB@ox!u)$JgxK!U^}+3f+<?nvnk8Q3L0yC{p3;7^CXVgh==18^$*aqf(P?ggixu z3AzH}K$(aR?#nfaI54_%kL@U%5X8atru0E~TR3ZO@MsTWzM~jFZ0+j5$F6iFkPb=^ z1hGbOL~(^vX>Q<Xjo*!s3c10&DzyZ`4Yu-+qx|koRLD7VW}c!?PcGPHR3<fm-8i>^ z3`i$$PA*Q#Z`l(m$P%99Lgpo~;3zBxq=RxgpefS}1;WDodFGNbsWQ?Egha`xG6wTs z;0aQ)40}N`Ub4cB9{F@_Q}H#Z&?Q_91vZgNGiZZ<*9clBmn4Yhj2O6v<xHO39%*u0 zWXVO@glUFQkYN<T&<s|P;oq&H1B?hh2Q)*Cbhk#J=-;*0D^QFraM`M+1j|_!Ctyr8 zMh7V5kRCGHVEDoci7$Wb3Yaf;LI@nNnn<K%9!zOjWalQ~F^d~4F2siJiNy^m1=7W2 zJx?0Ae@XD5Ue<^wi<o$lNWYK)RJ80f!`DvC25Ogt;ifVPxGIAto(kZ+>QTjV;CcvU z4g!#(#$|=7hQX}o>`aNuT}1_pprkZVX!_=076-KOOc$__GyTKVCWEcgke^ZX*RU{X zlu_>c!p^6-X!jdak}`<%YpP*c$rQzUp~Z%RqZGAIG~_HZsm~Pyg>_cSt-5Fz!(OgN zkKNylNbY(?g0s^OC{3sGtfbI%?u?dx(}pNL7=lIcIk$<+*f&^^q`Ya4lxQ}Vb(1T; zsJ}^J&QjIdF3QM6k}{KsOi4<LvbPG-koyKHcA8MtOel!5Q9W5GBMD;Pr=r-yQL&{_ zTtU?BLtOdd?`qb$sO%uFSy>L;pn~Ff8ZB=--YZ|4v9x&-s|2I4)kI2uWoz&TTid7d z*R8Z1=7#ccm`F;zVb%#Mgc;jr=4@B;3@b%!z@6<T3m2VwY+~WGmFTD_(-wY67dgWZ zRb==ECvcmcQ?ywF1AI8V!Dj6cI+KO4)VFw{tC}5k4@R&xu;CpsF%1d-VT-~)g(rEJ zC?<*xmG~y|yBC_uouhXUXZ$W0n@!BTD)hF~HMdB@+>l8%kq?KRTJ)^YVo&J41|sFD zj(a!X0=jxM>-2~3wySYob)_Ns_FL_o8|Wb@^kKtf*lQ5|2b(o349o*)CL?$oI~!UX zM36SrtL07(hlXk$wQfWMzNCJOg@gd;)wrGF3r%YFj8g{9#mp8dLO52G*yt!&3Uj_H zN^D|p%Kby^Hlxcx^BP?w+Fb5ixX=Vsj0Y8z9a}$Fl-P()9@%}hi2)Q~)eFrx^-^jN z%Q=W$iBL|4AnvPzI2OBnYCZ>1-yKw66#Kk_*y7O=#XE>?M6rfbZGVYk4XsF(B@bc_ zH&|A<!4jXw_K5hb)B+U`Ha0nJS&!Y{g8G(X4XRNoR;b=SMSa+B;oi>*t6Ob;K+2)F zKLQS>dZ<`gcg(B0%7qq7%D-b03ci4bN!9ul0iAPy?T`*>|ApyYtr=1y3<ztXS}P@N z<&BM5_j9_itdxmI-=eBEPOfrm3w>JEQ~(RM)~%2|Ayc;t)I7{oYhv5hx_urrq!NOv zxrb0oxtcqo+pzh%xISPkba9mxgf6aCYQ#6MZ8B|r<=>S?a`jbH`I@?#s6MTJ8B%UA zx9+WwX}N_SufoAV#}+A7*psVKKNl(F8cf7i1LzGcUu93C<!iBBqr>>z+k}5tn-|e_ zq2;Tfc-XCqHFYh3@m<*X1oCSD7pmu&PcyaNZonzl$sVtzwhKL8%`*3?qUPTSL5h1C zu}$$$!-OywAXQ=<VT}kq0b9kqiClxPk<spB&=xhvq+~)ZX{ej+jql#UBiNdO?!z5@ z4TW6se(&hxC<fPn7Tu@vegC*=J&L3VR%O9{o`3%s96q0D@ImnJGaV4SJmLJ1skc3P zQaDc;fO?H&Yi}>d{eaO^5-hzwo_{`xp1qdxL5}Yu`2fdNMsbL`RaS6#x>asrDJ4ZK zvhl(Tn;|r^g*&c%l0||?S#C=4l5B7swOUFB(GglBS-FKGG0BQb(w>9KiYyenNz)MT zdy6`4P1F`D8SqL`p)^mpC|hv#L3@o4;XdqhTazY(Tcy3DCWBiEGM)@>rKnXhc#M8C z8FWe4kqmC7YfA=kQe-R{>`C{KwqtpMXUWPB6jMzGGE;m__k04Y$%xnUy)5FRU|f27 z!YHts$`ez)l7WBpw3G3uE2Nj~E!Zf{_h>rb9rhMatS_bV;JlLo`t=4<dLz9l-S0J@ zH>3Nwzk7Q|`5S$oo8x*%)gm+}Vx3SP;1vq}eG6x#H*<e)>+aVH%e{ZQ-%;z|sD7jG zXdU0^J6hBAjLLkkrL=F9-%(!QsK0lV%Qv#SJaC)$Z+eoa3PNw@8KrlmH~5U&^Gp?L zI?s78!DsOH^w9&g*Rqli&cbDp<!QS1s_wtml<i{j!J4$SpQd=BNn8q@O^^Il1fC{w zq3K)>d6FalHo03ra{up};^m*u|D(y?PgA^qH0}Fo(#I&G?&tpLWc!b%fo$4$(|>&A zh_-5M+GtG)?<h}nqK!`EBj?!kFf_?)LghlP-)?)oO%;iDi~ZOixyB}qp~<6~MB@7T zqW@@}gInY02h9p=st2p%G>s%$Oid*N8ZOhMIZY`Go9xZdlvFFav58`nvBl5JQp4wE zLU4YFE1ymLYW~c?(^PfIUPsWm)4fc}FgDJYJZX_8?P-lM>>TXbB>h@Bf8-mmiRacS zp~>f6z9yijDc%ZAL#?T3kW*|jI_Hnvzow+u(<G43%bL2f>0#CTWmDK1-<J$HAltNS zgr=YUXFcT^WgX}Gwj6DZO(Wg(G<4uN)DG4`Y-UxFq@(NRX`1)<9ri!30xtE_#i!Qo z4DWCF7%uaxbvr}<>}f7?hW+3kHGH!g)(!6+KKsJ`;aScw`|y5-%e%$PE+U4}_)>Ea z@g8#yfJEW@oZaAvi76Uzj~)>K%oebQ1|9SlYT{t}oxnm9Z)S=JD-<dbs6u0z@eh|2 zD&a~FFxe6-;fezYpYC-)h-r}$qH74=##9*Kg0h%E-;iox`AkNlbTj7$J{M^Z-~+Wd zCf=Yy?~vTPiPKBQ$<l}*K+;9)XrkG%YR>RdT43nbq5<KEbSmgbQg0+HNyds*nd$_q zxDuhrLuvZ}Nu_q12^^=mx{ySjF%_m16Y3F-m9jE<`{vxPgD(Kxm<-~H-n$}6!CO#T z?H;UUe3{>`V}t_dl&uZrB-~)IJTc@aEtT!+Fk2;@yGdg4W3^%h@x9eS11FWOFwU>4 zgwi)<<B^`tHBdU{8fx7#62F%A57lf!5FlxsTc}7=tp)0+vL&k>j1aq+WaD9UFL7ih zy0aB~`Kf^6vhu{t6ndValYz<w0P#|FJJ>x`NP$-Cd4*0xR?C=ssAYuvt?Dk%)|M1{ z7?l*7@|GML9$9Il&pIn@^sIx5A<(eBSl^|>ydZ!KrG_3SR_T~RiP8&#xK3OHz_nr& z6^Mz9AohT#P}XpG{ev|-M3_T$kQ56sam8AmWabnB+)&U5gdN40P!J&spbu3^g8afV zj$TDkR$$6d(IoIARS7wYolrg1P`={zq$hDyU@?RXm92y*uo%Vqi8@t8wN&NyAeJ1( zMskyC+#la$sNz_`QN^Ap#_uXG$cX11PSr|08o%3S5PSUHp2Mk>QRdP3yT;Gr;)vHg zYB<%1-<2OkF+NEp5h=D$yiUv-FGY1rK^#G}*A4OhmepIziOk<Uqu8U!K0~L36kT9f zi${Mx90*ygbd_QYr;_T!(XpZ!e`v%H_sJWq)^yw>%ROk78mVonHBv$}e6bUpCL#1y zt)bzKGWTFX(&Qg8{0-GtO;0v1I_OCSxS>U>y%el&$8bHGfeLvQLb^(XbeTqiDDKzn zs&B-7!8n5ucMilir3L(#X<+v&iuGFWp=`YbL<ks%i#1L!VH5F!DyyCcd?bY$XqEk; z_}I3#7>7lhTh$z*>eseZ7zs0!>gHIRn}^rs+eROnjjQewjI<&W0sLD4M(}Hk1ZB8F z&erKI5oQQjmhmnC?k83TFdT@p!HsQMP@s{OeFdadHg5MjFi8|xZqAVY1R;T@S1cG% z`+NsLm7<eCR4GIW)<S_>0q<9^5=60zSWZ%d3^Kzi&O2u!tq6^LLr*P26zL$0!2c*? z8{Dso?9lZ<PwU5S2P>r&1p+wP1NW<y5W+H7wHsn=Jr-w4M13X76R+cXuo6t=CkPLk z=%-Sd9Nfd^y(k_Zha@X4w53s?hb3_;T^RvZxf~dK#6jPR<$bYT$`L2U^tou)>$0Ef zmoJ6zc=~5pedr&^{()S-lN}Bn`|F7|oMG{y|61`F#7i1<|7*KnSuNYYk^jxg$%V0c zcZk171T@A(<kHKRBD-EO@hH-3M=U{{D~jP%1i!S(sO1rH<`%#&CT2fX|I?5U-}l+r zV|+4xDU21umqHdav<+8q5&R5_Jpve|&?K-hid+I1r9h_>6}KL-Fhf|F%!c#C!i<Q8 zQB2pnHy9MCM6geu0o{{Zfb-<c*6z){XyLOE%{2T@s)TwWmTCAM%#E$)nzPLqeh08A z<(ApNL}ANCbPuAINA$@c#L7wmz%oPglgiHaou~w1*{W}2hG#)PlNsO-f<Dy|U}Aty zh~2C@)2bm>*vfuHu}1L@V)GAQXFi9cIf#O&5X_YcG7?(_pOt0~UWx$?!etO0utM;> z2@TPMDM=vMwpx&Bms=H9cV#|YtYP;d=c<<#RQHda>$U{kJ1%b};2x(hs;SP<iN(_> zu)ka6^s-`U7Hj{g=UW`;EY_~xlj?f9MTg?;8bhP%8;xFJrc4(bTq}5pV5t>Tvv^x^ zN)~S`$ce4zw)iQEr?VGtzs<u#@pg(En~Eb*3~EiN8Ddo=d3BucjxpC;1@kQCHq!JN zi>q6U#njG@W5v{{fnBjec7^JEwOU71)~ctyGshA&0s!vD(Cr-sL?l3H{0v3D)`ZXN zl&uLL+nu8`lT}{B+F9asK_>%|xVU#_BC428oUEQ^6?3ElUz&>!_P5_fx6T&WVys%6 z{ThR{vzCiih+Wvz>y*@^k7De!b|&2l4IEFc?e(qG2e?tZcH!@l&`^?P4yr#ufp)*x z?qM+ex<XN@l{g>;b{7U?t$5<bYU-h+L;gF{Tfxyt#zThWPyd{hCtVZDPy+Fo)t$t& z1<POZVyh1;aOtrc1}jFjGRS$?z*A*qH^^Yt9*Tj~GzEDI#b#kkD>jQfg&59|yp&Uh z+@+D;Hd^Vdb~3`;6bhIUBJ(YjRgDlfu}UetK@0SCR{jmVL4|6O<5+?8)++y2to&P@ zk@-%-q!1>O+l83YIdf$S02OIz-K<wr`k+JU3nl;^Zw0<R)Ip+RNs+Dz$12nm>N&I0 zQ4uz}Au|+QYTLxTQe=-6TAi>ep>zPmx6~$5=K`ozRI(P92UuY%5-|d(!6_3!4fSd{ zf+%_jFI%}SuWFJkTWgR{(t3m={Z|=OIb!DTqOZBtejz=i1zV8cyctxnQZNDmLMFmg zK_%%<sG_qN3zP7qQWJQFghF@d5U&LLa8pkDifTmkewYm6nHw}Em;e$UZomn>LyYB! zgIbiGj~5l8_5As|BgPIuQqAak+(W~k5PLBNU#R?(@2@3vIF8#yVfmX(L9smD)7Y81 zCDa~Z-rXK$`T*`q`v7;hE$@>+eE@g$1KbH{wO)0Z=6v1bIbLAitS>On*rg6I?|zb- z5A?2o(7UY*)5uTUWB>m;@sS65CwsN`7oyich+cm+M{47V<VasXpfqD4+t+&~;{`VG zS}l*tJ21PnmL-=TU|wktlWHpE1;drM1a`03>w)N5IN<y(f1tVbp5=rP^c%*MVZZx~ z$~U^NXQXRBK*?FY(fy8OwLhcpsGxGsNN4WP#`FBGedlZ6uL(T)jJ|8B=^d5tN=tjM zs(eTD_g%r_p3!$^fgjE&eV;R^;W*f!_itm_nT|S24?FR)t9U?o=WX@+!YK8(-rjk2 zV42S>K2v)CbN_2=39(2W4ylC<hg=@*PBi;Phk(eQkT0E!e}$^=t!4sc|CVbgsK2cm zAKJo-^)orELh=4R0u{wpR6@dxz*$A20J&Yg<9sb3XSN(SC%IjnZbDrj((iS%fG;*T zi%36lR1xWS+kS@UR~+egJ#H0{&}QKh=_j{}NFW8&I?Hg=`Uo(Nw(X?AFt>_C5pt`D z{8I#vtP`@PyT?B)*cTWrl%=F4ySD~7&}M-*x~+ol!(h>$8{JT0`DZ7mRhuP>iOvIH zRdUjR`V-?7AofR0bm0&{JECs{Cf0Z~(cf!rMX^T_JY1DnKoe{HZuBG@JxPgoI8_<p zXsJ<*KLqwDg-|@lAdiluA2x4)5OOd(O*eq6^B~Duy1uB4;AB>nvLdRG7FO*M1y~DE ze_KoL?IWQ6iXvnzZEuVb8Oo5TJz0Zw0@a!oLn{Gwh#*>$>4id|l&m_f29898#!`v} zX$OS}fRUjPsau3X1d7;Dhz)B=C`6<Mq#G-B$=<vwb%`Q(<dakAu41h#c1PW!;9ZG) zeYK3B`zm$KHZdwth4*&kYl$JIDC_gM{;JPYx6Mz!o{&;4k)JQfdHu-aN3NfIJt3t^ zU8wr99CclkQ#{|j;`vlz7)T4+k6hyYstD~LGIiU}^_e{4`EvV_bI_eN^ZJ^S%z;m^ zQkQStTvLtr8+3b7omJQyDs{bTH-Y-L=N;q~Wv#y(oDb#ad}T#h^CQUhNAB_Z_Fh{P z_vZ8?*B@EWgY*CX?*HBU!}I9<NB((!T2Fkws`d+X8TviiGIxAGDkV(O_NgS)P$6Qb z?WFgQ^`#sDQOD9Jxkr6NRJYCFJL;>F0%t7OujxCif0>)q52hbso5)0^ev$E7>6}Xc zk^{BWpwHwK)CLrO4Hd4oA5?m%nh!mqtWhDFWZoj*V5oK^I*fz1vI;;&l&lia{9zS= z$u)4));R)-JM1^@XFoEz{>UWssuAVhLNdv&H=yQCo)NWwvrX7P?R9>}_GH_G*Uui@ z`OL4lC#OFsC4Khb^|J@_9W!sU_i*V5*mE{VSh-=<<D5fxd?%<7e|d~6k@A|ue8-yO zVDLG6GR`L*+HpQB8^@Zqy73bC=uSH*$30w?0!~+d)_D8T0jvZVJ?`<N#67;RPrhEA zL=M^hV*~UQ)|lx$+60=z^=A*SKYKViXtjsaW>9T-y`v5f{)zU)pFMy!g>(RA+rvo{ z)IRy!!%OsEf=%+HqXa9cB_;@h&0>3ciF;&;{z{a~kL-K;^&j{AADibNHp7gv>0s`; zCE6gV|Hv!a4AQwl9-n-k-;d3s=O5&s@0TB&sQjF-{(Qgw*f90y`}N00t3NhX`?0yC zgZX^g&+}_DK3~oE^0SARpFO-p&gvj*59fT*`YYVSdz^o=u_-Gr&G|lm=D+j-*T?#? z!B>szO?x``%=-@O*Y@;EeHHbAQbl1;)%<7Wq4Z^=4>N<$VS9R;(I;YMo90*l+0*;a zp59p*)uaBEz8G;o0lUK4<Ej6vGbmi|p3nV5^J}HxQ-(cO^Nl{^mK!S(%_rp^E4lsb zssDM8eNu;8!<N@7_-i!N#j_@B`%n~nj$)4@P9-1w4o64)u9_tv^r4Ok5Y;xpqv}e* zhNC@-Gl*)XFhh``c;ZJ<l;T#ALM{+ZL6#Yvq$umRsPOJjew~mgw;y?^F;NI`l|d5s zD#UBpP@+85=LiZ;-;X5mWzZ>|;us4Guk;?Al0pMvPoUDHLMt=~wn8hdzW#M-k6Oys z*U5&>C>tK-RZbjTzIv(cRbi#p>l@;)<NIt&KCIu#$X2i4Y0yl)ie@5M7KI6*t!cxJ z_d7lqO?*B$-6&J_+gc4TqOSO2Yl{=9YGSnbB7OiUg_)tEi4%9aAaBi87+KWB2v~tE zTy^kv3nP=7cq>{OWx-O}-gF6(%_Z1&umqyKuXnA$-d}IT^5!Q#5ypxHmRv!KcM!Gd z+=-}=a5R2*rXXsA!|y7FgsYMc0ug=>_h|YHIXBH9e`4a}FOEPuigJ;jAFual)fPpD z8{gvm>u8L0vJX-wPu0Kl%sYJB#hizJGI5Gio}p<pOUjw5Gfc=@YBp0|<!JaU)0!W2 z=CrJlo>k$M9?jdtbd7SICZ^Khr`^r!ZxhpHZE@=JKu&{J-QH<kDqziNnzO9Q>TRHx zx8duuCPwq=IdIzSti8j&qhe@x@fy5Mk<=sLH0D`3U#At4S^=E)Gb^=kl%G~U>z+@0 zB=P*equUDC4!i$pnKbv6pEf{KZ9tz<x9)rz<v*vjs#yEqQTs;e9rbaY#!Kpw{EqH- z)cQBl(Q1U8>l?Lir1Ke_zFN+nzkMy8rnPjkk+pPcPgn4F^!-};j;8W7hE?a^XLNt} zc7Jz%zpaqQc`i1p&$Ak0fth-P!<zS=&e-#L;$yxIX@}bQ+It&%YR0K@M)^JZjN0{% z=KD7_i+V=o8?|rr&MUjpXJltKKK}Oo?fYE%K9|ZjYR^1Xr}bx~o~zGDS5=<g&CjT$ zZ`7XIu5RhisOE3f-%+y`#ZP;M_iyW2Gd4G#(Rfyr&7t+pt954G{f+9=Udm?Y8QBv$ zqxr7d`)xj@dH;-3e%HBl{?ltCzgj^1$QhOVYK!u=s}jWQw5!spEyjT&INYOY+PsP- zQ%yITPEr$-qo|aEDTknBOd~mZ47c>@p;RB`Un~V}##b6|F!*%1Iy=v)w10Kue~tz% z+cMi{y3Rz?#cm!MFzwg%=zv8_!X87J&!R6zxnqyjp!!<O#q$1Br_b+lEhRtAEzYHC zl08Sa$~xCP_hp^i8|9nu*7aBqWYll;b%Q=5TS`)B&qxbMx$%+SZq*~dYJ13DwAViJ z+kI^0SLF{G5w;BsJ*#KbuJf{WRp({vTFxsTwSJ@VyyDT`ZE3XkeGKhrMg%nUgzOa$ zO$vL(qraWYhMa)&495G0hfR<5sEO0VY|MT=qpcE(aV|(DoM$RUaC(^SXSzp6d9|m9 znfjSOZ%gx~m~znDFK1aNYfx%S-59~nQh(mLy?r}N=~mCH_j?<!m`3$dQSWIPe@s;^ z^<$}OsUK5!nkjrgOZ^zH(+pO-0+|`jl!KOt5k1Wj%-K2g5pVinGjXxaOkoq6b-~n~ z@^`=nEh7MZN>-=cYm>jDOGLi7NMz&<w8mMgxAPU1X%cJqg)6esRt^L0eD2S&F|p9^ zF)aiytlYt6+pmz;6V~|;wUO+N??>gmw(z57!#j{yZwW{Bswl?qY6?RfcR1CE-yJ=Q z@rP#o@EqSYKi#MJ-1PdG<^S>b?i}3`dt)W~qXB1LKey%$zy-HQ?}flPt&H9SE<U(X z+A0`8E-U(r(j+Y=!3uaN%->`{S?j8`H~;mymRcwI&*yO7?r`GTq=V=Zw)RioHGpAK z^mXfs_iDZ{o<%KO4d-6@Y^5=RFP`~gkINR2f+f0$Im~)X@J6{Yp7O*NS_r-dm+~Gz z@{fyYu?<|GTnKa+v&cv&0tjM`qPwTpGpFGVNny><GmWD3KTb6oFYAPjMhq(XdXSq> zF-JdZ(&_$jyMnU#|1c>MK*N8{hUkcBnV6{n0S1$LL_ZWMPjp&Q6SD4<I!io`9Um?q zj_aspLW-BsI{NNl{Q8|8%h-fy1<01HMi54-pBebb=W_1J>lmBtOEd#3{G5TTeHtBm zWAn4_>D~zrwr58JnfIIlhczPs+YuA{sDW<nh&76L5YY?G#1c-0o(CqjD5fCx_`8m^ zJL1i221@FPg|`n_lpap=>hUAucZGDCe9pi@9r5N%d2vv7sv$OezCo0dO(XW|v>d>O zJFOUS`MfHOB~L2`%(N>InVwet*Jx%`k5(-Ohp^(61;DyAV9{c8J0QEJF}(T>j=)zE z7HqT)<Y|9J3xCambN-R*i2f)dTZcS5whj{v5*_tSY<{XjwO|*FIcB0i8Fdd}e~ufa z1=kUKJc{-mi)z6{_9C00cfF2S;!*!yYj825=oe`)&g`e^<ZC=4zT4OV;D{-R*>A8! zrD38!I(6YdN3#;zejO2470R5C*u$C87|z6-qT@KPBi48{es~VvenU4D6O*56u*~_0 zm2ZI>Mnr?qF!ctf@^!?_nFCm+<`or=(1vI_sIt(CdDU6Dtl3V5BNPQ*sFF;0p~@K* zzd^w^(4S5ykBkpDmMSX(1`2I;ru0(fOKuZNmE~(w9resy)l_9G-ft(`@={=h+DV1m zc-?{_pf8}Zrg+^WfIQTg6GZ0n)S$bjP*HAi$Z-S6W*VRRhxwdVn>?uYkv)_q2d%p_ zIn|(|;N6$Ak@{zD+yHJ&!*O(O`n3g@(6g$SG&w*QM+^=lQoN?vOVsnluWE&S^bzl{ z3t*~5fY%Yd7t-x;vB%~C+U$rmilK*V{pQAH+YLJS%I;lZ_hDRLk*Aeur@5_qSy&CG zYr3PTGvC1@#;Ih_qoOCr%Mf(v4lZy-QBMirKQ-m834Nmk-E-@)RwD0~lFT_Y0|w4c zfiQmym*VtzDd#B8JopAz4G>qw^BvuAt_rojhL7zmZUtOdVK+=Oh5Q{TaM{BFj9h7a zfXs!eaTI_;4FK9HKj2H_lF^QZv#c!xf*6IMJH5~v10W8^x->U{xRVVJ!1b|eFp)yH z1e>Bhua3R+V@<WNera4~@-Ubccl$OmL&pO%2vamBp<`$0qhdyEcHJ&k3Mg}NR<IRm zW7mRd1Q(53)afs-Y2+A%8n{8{L<V8g-Yx3(?)s{SYK0@turFXs?UiK_ipP1;#C60O zj-u(AiSfH|B}|;*R4aZLdy0wiyK7PGcjl?kwcv<@chuuP*ttv6M!JqT_^dkY{~paP z6DuFP7foC<_U1AXZ(lTXFcM7ce7|0sdg<-c>s0Y=4cjs3#bx26O~JHor#=}*3nsA& zrr2X!^;d~4x7pWMJl?}^P$HUY%SFZ8mjjE9J{B9BFT8x=%;-MM=v>Ueg{LPu;8@X5 z`S+zh*GB%>KF>uZ;<RlFi?Qv!Ky~QJJ6E(lY8o>o+=ZTAG-l|?*b_i-pfkQPt@JIZ zEIw)#&N2FNvkR?-J4V=ZDW(*@t1!z5+P*u}hPDPqT|_NC!Y;#zO&RnSwPAH9eqGV& zTP>8eFXQXH8CwR)rXo+n3&|8}9&D-{82uD^MxnkU&*08mR`c)AN^MEqV^Vl*<V92! z$OROa1|RDJdFyq;)C1L6W3s+_S?lgfU<LRQgc%q;AhHJb&8Zsc%=jNFsD(TLiGzZL zpm<PbkVVn+rz(1p$yefp)g)fFJH*mK8GOVV#Sp?|0kny@s?dI2*F5pq)mIb6XljWG z(I}ELANvG>i0GC1%2E*fiemr7>x4hU(G<=shG>fMcSSahs}dD8j@I~Hv077X;ReM9 z=%qkQY=AggMid(v{Y=D@)Q3HOC8H5+fOck@vNx#%wSLK8qO0-(7Fop;i%z$24;sl6 z>(}TG7j5R@1WiXwd{&?=Ibx4uiK0I%iAV;S7{7~kq~C=(DiKwjy5=5lp$9Kuv=PO^ zr!m^iX9fOBJxTB~9P#F3r;;(zr>N0r2G0fjCVJiAaTfXqgeJ?_mR%v2s3(oCw9sP$ zr>a~A5Sl3AH~=D3;V{%lYTPs}biXRSJc{=qNv?(NSKQjL^&7v*;I@ceV@8@4*$E)& zm@{<Eo<=wuEcro8zOZZdG&<T4Q3t_?OdV_@+UW68X6%|Rt7*{ApCtH@i3OU}X?seF zqMIMIWd3u?Wr(FGe_>jI7qGtx-!PmZYKUFL?_;Du13priO~WW^zbhR^BmAj{A(juf zqsJZCx%&zX+kG{fU>$Gul6Z4+^=R&OGZp<*N}ISY>9f!V=|*DXtt*Yh#`|23Ku^$R z3#0v{j5}}W-DPyUF6lj3%3Y3vJ5^mRI9z#j@dkK`skcE3y|>Uxxwp`htcz2i<{)|r z!9EQgg(K!5q9>?{EnJmbMo09ABbID@I5rjOvYPQj-CMHRvkF`~y@!^!_9Q(*)}#B< zhxcH+o|n>Ucx4SA4k7$5pTq05Y;Jgxu*;2bL%5s8?WZy>3O|*yf<AMGQiwjUhS9D2 zY)`?q6~+i!%9fSyjYAwhH`ZWa78*GyEnCj6w1yfYV3n0his@l=`hLyYI?@QI$}Cps zB1p(zqwyg&1-BiJ*tnt*iq-=`D0+yw0Y5Eb2WRBg<3er!P`^ez5QT-@SkS{n@J7%r z<i4bZ5I@$VM*Mgv95wM?2180i<t>gV7*<Uf-V2ITq#F@GK4PaLt)9I+qW`Yy*4m0< zc*WIOLXPM+s1JJ$&#*Gu9xkI6PA!D}hSi7pEVc5t9-gG{JoeZNUT9~nmR@)D$)M$c zAgB~LiqE^^<sKK2#zwHY&%sJ)sc6KWyVXGlC5$rfuH1r~AyOUnhMrN-w@?cq1}v2Y zEPHPOLocmcywQyGYQZieaEnjEE?kWpDZPfsbgv$_)~N(+LC<61T0Gy?4SwZ6R6<!# z7huTCYkCg#GU}#W*dRFoqk8JZI(6Y&ym?BGP^PZHJlZD<+|n2OB?G(DR)F?21Hm=& z%K``_id^7?M4$_ZQv>+!I!vxW>WR3ww9vu*z?nomd(ceHK!4?-6F%U;*4uTUgUtY@ z{o$8A5JT~@7K4V0RrWv;n*k8-cVv)7(btxHU5B~$fE?9F8W+@S38KKMc!<K<z$U2c z;;UULysc3=;YJSVAsI|aSPZpF#;#y^IaeqKfvZt;{W1fZPPlPQDFMoXk{UD2PD7MJ z+v@dT+Z+1^p{ez@+-@7n^!f7BHCHzQhb?Ts1vR_I{YJ8dVO$LULp>z31fGTV8LeH= zO+$74?jj9K&|r@o%-0MnSoN!_`JzrNfpSFKEyWy)eTQmG@k6>n{E!{>TQKsK6mB1y zT6(SE!~3eB^z}3<Zg8?jbf{$3tt&x}xx2LcLSkrGC-6Hw4Nof!<)<L(X}AkKO22|w z;?WevJBY^B(8FxU^e&;QQ+qtpH^aJ2yQZd1BP-|n2NjVFjE`9T?;|Gel0Y2D-e?{l z9oCC4hiie6kOsRr%iaLuK$|uCKx)M!OC$3G5GQT)Na@H7XtW9{TR>{+0goVy0(b;_ zLYO{kuL!F^oYo$1(3@$T;{iRSw~CC`5ra9}oIrJ0v+#}y!ZwJ4%cGUJW)n?Bfi>K- zA4W8RnV6Z-vxx~kr2H+kRCOW_J5^^Q(WV~spH=`YMk5UwL3#L=I$E?CrD^&MOAV8u zMH8{PiFiHB6qCWk64o@<r7dy%kSBYuS06wRB_@iu@*w4cX;x~QK#*XkDlI5l%p9xE z@8RL#%*OxZ#&!**@b(5Fnr4L<KP=?|v({K5Mli)o3Fb7#KoKy&BK1sBDtO~F7~vwh zIn=O|*w*0-rv<>p*>>*m_PJ$sA`a{gB9@|8hLs}qpW@~KTR=Mr7S6c8ZC0le80XG$ z6TZ1cqeSy;;MmV6OEkv_X06!*SVx9J9^uS08Z2<)MzkaT-^Q-wO0wJr-oF)NK|k(` z8S-Pw|Em}PmxvlkPnT|@GV?Dah>bv?>3y48Mlk87PTdO@*&d`9#G;UEi4}YSGsqnB zCT#WJafhian#n8xSH?(8il1U`+DrB{*OESk<H=-|$1r5ACO+Nsam+0xyUAd-l-ud` zlho^i+w&BG%oB@(rCE5cGO1aV8qf-s&bi=L#6(K7ib2{?@?*fP-AcdauBW8Z`aWKC z;rm;4@_&nL(uw(xNrA@J*SPVi&nKcKS<A+HEAhcS7M}7XF-_F(8s<lfg+)~(y+!%7 zd)6BBU%QD4pQ{t<Yz-MfIS-H#lo$zVOX(933It!-n=}Q+7NWF`ZY`*3Hx@BOke5Qb z2T3dxk3dE=Ssp`DO}h5WKq*;$;Yiezk!^vZGEQ*`Q*y1N^qB^O9NXgaN?htPJzxM) zKprLzC1${g;ndcAQy;&baT<eUjncV*nHpU(KRKPCvLRxro|HUb03wldJ1LTdWKPaI zs;|S#4(4;(BvkLroQT31O%VE&9s`ll-V0W-%!hg+_S+=sxP_|&M4k>qz=-TiQht~5 z6*)CY8ldnF{WN?0$blaV9J)k6fYC?rcF!o^(f;&9{Am9T&gveWt01O-M)i*Fr|o{) z%6CTb{0%VzD_pejkB?Bsv`5uClwuUm2nAcYS(K;j@<$8dc(efVfLv^pcfq6uY4)h@ zLf3{w$LxWY<w9Grr9~R70JCAPtSGZ#hOFIHwUW(xqAga&X@M4#vglcqQ?Pi-DGNo@ zBQF}Q{nQFu6vZluD`v=wP|=0StB^hZ(Y$ldE^0-)r+nK&d68^2e1g6sD_&}g$coo^ zBc9?_>lGFG19MeT<85dIHJ{q|nh&yojjTq#8cu-CBw}b@8hbU!^5qbBY_BzklZm5} zjpB|-ntA>Qn2hq>Ex=DY8Rw2<DHxeK8cJ=wEcpG3AMl8joz@k_*ka)%<q->~kP2eq zulNB$Sr+=H7Q_GaO^Z0A*EjuOR#0M}JEN83O{@~k-IQ~}=bL`!UskcHsjZi}jHDsp zGLkb2;d`CY589NS(IVZLpiN0(z-g#S!V^#We6T^-`>aJyBrG|;iTYHV`l*D^)TbKJ zqytQS22r1UUSZ9sZW<iIZ6wxFf6c)Q<c^b7WqrJ!?g5@0Rl*=amJ-OKpdk(eKO~K) z$?=KdiLJVWv+#2gbplhDbWal4Ea=+jUVqTUoc*6v=Ru2;Mnt@Xaxr71dA1j1#f*JK zmKH)e<RqbDiX5yNg&C|gt8>tk!x&ZrmUzjS>-r>~{NM)FLQYZ@i>JS0v%@Oni%TZ% z;;YK-@w#=(>%x;QuoqZcW=H4Qa9_8UenD(UGN-W@x;Wn)(<)qJ^C4csA_$v~yfH*# z)JtgWQdCN={koM24Kb|wd@`^EcB5S4QjJ#7tOpKxb4IL4ezPrn)oz<cBgp|U|Dvh8 zQG@~LNroO~4naS6vWR_5?+B9g$k3xsBcGcy#C$ZSrMX3o3VFIXMk8;*B*}BffgSjE zw2`!?IFTdo92xMWR_4Ri`?gT(Q)E=9-%5i(CnGxm<(;uR*+srJ+0k5e6SV3k1V&3W zVfglrlZj;og~W_}+i(ELl`29*<&fAgm_+)m#FK`Ba#kgusU74D00=pSeV8bwHY7Hs z4H=~NbCwVWpz=}J^i<Ou1)q9E2|o-C5||Jsw*<O?(xhOFvc#_J_Zz~>JH0e8k0G8_ z=JLGAe!t;;PyqmJ;ujwsgr2vH_vlb@7}i1IdESs9u!J332B(tM(TOqx9e^3Kat@t9 zg6jyC100(l@-y@Z2O_4PrCDLVY%6+wLsJ<f8HR*RV1|?GAA<4L6?hFQi34@8B1n(| zhmB%>c1SJ?i=&A!ND@WYx|}@W9FC5fx*k)ZJdagjmQZV#vLZXQW8D`e4vo-bjY_`w zXd99Q0g`lI6C_cKancI5%GljbYJ8OBRa0r!q>ji*3biH?o92WEiJi}^qy#1l1$R(y zN$U%3PI0@c=NnZ|Ka8YUvYcr-DG^XsUPh|fK!G(Q2Om4IU1Vey6Pk>3kR@^em|BKd zf75?7{E_TU1H9!1bitDMvH)T?hFu)L8jB#mi~EvV#@-ZwUP{YabdaWXK)mFxb=J^_ zF+q^zHWXmEb?&p8wE<C)3K+v<X^aPB73NbL`250uX^fcy6R2t*)W&!&sLo(O@Td*$ z0B9`@gV1oNXoDST5QH=~?)vhgq#MkYqNM17L+08v?^=4%kZlJ*lNK|#F{NI(s0JW2 z5!C?XY@=kMQ56D=q$yx1WRIfyp>jckc-KY}G=h<pu`emgjIslQuW+&}sW<9DF4go# zttu%9yJYAaS*asx<M^w+FQt|n8HA+Na#)Z>ia)4__O#fo6mU><7E2~AfL<l~=eTkd z>T@y)$;BU;1(tOaib*;V(drwiU*;MV37Q(V>1%)Kq&x<>(@r)evywqB)di7<t5J<C zN=61nCSYqrTqPJVBeRlHkyE%5XxwbT&Fw{3zf|EI(zu4@%FS~NxA-xVN`!};{2J0L zcNW%a;~_}&(=3d}09{;M-sNx;6**+A%>GU@Hy}~r;w?GNoR-2za7BR+zkoo*P_~w{ zC&eFJiC(l4y=bjDj8TF~GW_5sD27zoW=3#UxV6aT)C6K}h3UjsmZaAON0DLPih{r; z#vgOUxcFc3<cdHt0?V(B{{&6<IV^zy*CkHj8<wP3xHf_@@W#vN`@Cd<0rR@xf*Cwt zlfwrglUxc2Q}k)o`x<(-z-$Pa?<;&8W{}><oQ)=)2^F>?WKVNtFm2<xK#~kOY(WFN zUhLJxp=7yfmY+L><ZKK}!Yq6s#^kS0jTznxT0W<^i^Nw$b3un@iVkJbL0P8PsS|J) zarq{14V(67av72%Vfj=!M8=U<=0h#Xsx{u2or`M;PI`3lmkb&t2;>!=><{y%8W-0V zt^|@^R0i)sVMoig%nF84xjspdGlqaesE2QG2#G-)If>ek6{wagNjDJ#X&`}Yi7P_@ zEvWVsxe8QLhBG}XMMcIOsR_%}lRCdbEeDY!Q5F3{>)sgPLAxp$7zP<dRWj;lm6mxZ zkz{~}s<2f?zPYlqQ>q@W91C-DGFiELxL$0V?~p@2r9!tV;XEh1kSWTgl0e{b*F<xl zf|7|}u;$9ji`z4TEJ4Y{Q%6B(Ol&QKr=6Qd8Yu`RmDhJlJ4%pGAUj$0)4|JKwSbZ> zWFsqAdDgDK9s1j$za`{BBZpjS>Tjo%gw`p~+_<tl+<^7xQ`fk^vjFK~+MBLEdqD6G zQ)A3}3RuHdp@8+Y3Ms)ur7|wyJhzv?ISsQRut7y&V^fQsV5%{kp;b9mwnZDaDvj)+ zPQw+DHm8I-VE$ll(r;Dz@rGahdQk=??LMZu+i*_@cSWeu7#$Fu;@+b_ExZ3{!p`A@ z^uQUu-D9wEmj$@j+*JXG6;t~c5!#)(>!%L!+YqHaXwOP2TQv8diee1{Rq1?dxxU>6 zqjS%~mbk{&!V{f)5Oyi=vL1pxYJcO-JqJ;iDs)l$KlWu^hT`JY@r6I#M@Z1XVfiiY z|FL=>s`bZl_=r;cMZf@SioQXy%ODNaal%a)131zq1Oi}mcZ!+D^bvZ2)tpjF6nfSV z$-eGgvahN5dyyZtp~v|*liH+SUSCsJkccu)#mbLms0=FYM2C1LCzHqw7>&;U`z1d= z&gV~;MJMDGGU=C>MaP#)T|d>|E(1@<qGWq7=;wd@3MDC7l<a`(=3~e~Deg;tjw)%c zgOZ%rZWxqz*6?Ktw&Kf)BhiC{%N5C2oVhCGgkj3Zd!ta)zFcxg?S-6jshQ-du-vbb z7sp+LFe5`yGV$c#P)a^+cXB>I|Df4`YcS}zYwa3(D!3JcU}(WeC9$V4=zJ>zF+%d} zm`qVgz*>+i)a6p(t0KoggMf&@S4CVfns%h!@hbp)f^yb?HbJisJPAf;902NhP6t@D zaTeZS+((d8$exs}?7s~1n?MqXk&#(RK{*|Ca)xInX9oxw42cSI^83ypD5o=sGdszD zSzB^)0^>w1fM-%J74D6`LstDX3OR<7`sH^{jQb99DvfNMRyh=#bLipUAQ#(6WtAo{ zpMyMZ2q<!*jeNeIkwZS^6H1Vh{5Xbyt?~+jfv55_t>E&xcm<bGo#dYy`B*gTqU4~z z1=*D>N=Cla&=XmHBO@b+GI%!9Ta@pR`ehD-=%S={H{CuqCN*IsNi+L2Dp^IU@Qun{ z-|vuFKdpRvEpL?debb$d?~p3oDD5d>ViU(qcxx;RVNiaqkwrfZB1K6Q7@0UYGhTlK zlfWF<$->@d2B<Knxq_&txiYfor(LuLO%8Mlu?jTKAdOSxjbN)#B*#ceO_NJSkql6L zRAV|tGT;Dybk(7h$uu*+AWqy#dYd|CvMX7XOqcxpT%13J&A?=o$7n;ze0R#XO_wfb zu0j?iqmX6FrJyys)L96bgv@&1+~iU@>!(3T-Rwx>6jEOhTcZn90?$>{sRIKu8D-jo zsAJ^6l59sRElCD*G+I$}IzQL}+$eDgH3=EE%%6KRPOaykdfl53hT1F_0}vcJ2~}a; zRmg-u7qj3aKd|5|lGZWR)|^GEq^8=bkNl8)F)5LDaU_$=0v56=nTY(rp!{joFIT<t zVnHE`JQeH-dZXNw)X7;)##5JYsh1wO<nLs-3cS7`3Qa0aTzoUBo^bKa%<od`-&B1A zj3`a8?(UwoZQHhO>#S|tw$9qNZQHhO+kE@q+`N~UOu9QYRW+SX&vZ|9Recg+Op&%G zuu<EL3woky;q-x^vgS7DY{^mc!(zh}Fn<&$S238(6us|$-PHuZ{t$;zGwmw<=7T!5 zA_T}WfUiaP%z2|-0-Dh6y^HMy@c@x<QKpRza=t%mK9~R&mNyn^COrq!;|cWtw;J`4 zE-^M-Bliya%4LL*&l`y6z+9ir*Bfw)HC1S&zksy%UZ^`&gKG<y&Z>wYq`s8bU?4Q8 zZzjE`-iznvAngyIo*17tct5N(Rft8_73E3KhqCb8WM5j^W1vYQo0(bk%~yw^bZ^@6 z-AbpXGRCAnWDk92*n{yP&V|<??THo9Ub;=2`Sup~V49<dagDxW->Ks)ACIVOUpAcP zaOrq3ap?#KfyED@>~Z{Q4{X4?3}KQ=pO#p8p8Fz)?esKiy`G?^lavH^_!Rv1@s^o5 zPeFhb80r#;dX+i1qT2&Vo+MiVs@IY3CdfIY08p(ugnDr$a6wy8!!~Afbx1|TJ`$Q1 z9%#hAaT)Xt?ZwVGiQsc^58P)RbS6_8UAw@Q$M>f=`g3~mN5bS;iu3KXY+MOYE)v=Z z0L^7AiUeHz6)dR3VrElk4tP6Ew&d=8kR}ydRsyB-@9MhkKVEv=8hq5?m#pe<<!yjU z{W2{*L9_T_ERl1IG~iDBD=xzZvf3yJlCA`aW77k>*H$#7hmn51EbjgzX(R(uZd-!T zSiiPPMA$QDJMv&RbbB;X1eX#U->~)t!9|a;ECc?I0es{%>8Jy9wfRJq=-1D{V<2=V zQzPp&SN`SW&_QK35r;x>)a4?I0OC6W)H*39;_VPM`v_lvwtA>s4qXgeVNWn6u@4+a zk<ho!Zd{O`hE6oId=|=;T3JuvE#<M)*E;BELw7G<+j{#S#Axzf3xZ@nK;a2U=51H$ ze=nHr%-FwJKmDy~{EJD-0dZV%-=hJBy!E?Ia90y7<O6sX(dCKygYrp?;^m4VGD0fj zqN*f#Qs@|mO4I72UZwj+$sdXcbim+j+~84w)}>`ay0L!p{_Zjabwf*mT(jUljWp`X ziJ1Z<&??-Xh9#-Ve`+%)hGc7P-*bCtTq5%F@*L}p3?3)0x0`EA*eZ8$_B-}6gSPbF zs90>>I-tXuge?z`r|jq6T3GO!=~!-3T7Q#lF4HmO7dcMw4x(Jr)8u0~K=Ep|lrDrC z!d$g84AHqi47EBbmivG_3RL?9lh7&M<>bk^{uac|uQRiA8@FT2c0B5L9eE?p&o}v# ztRF)8Ob*W%v<NAcdhDW36S6*aB<qUzkxlm`H-OkHkz`a*Is;D)r)UF}Vm;`b6kt88 z2EoXm29Yp|w`l!9L0?<Xe=~iyno9N`FRH(gOLp97q_DqFr7VW_Ts41&5MFL0w|<kn z(7W_(DB9PRuz9&+#o$G-wM_0!s*k=+s@J~3l*|~7Z$n`?kMbt4uWvD7Soth)^vbxm zj9ltUeqnDEAMS|sxLB5Sgj`M5FjzyLfU{jqPoy?%#kAE#4tb=p9w%INn{K7Lu$78! zIO+QN*(~K{6>K7ZVjOEs?XrnV-N3{C44*Nz@p4<%`5>MgpSz@x>~>@%9k{BJ8>o8$ z5*BmffyNM7f&N@jtM(gMgX*GLW>l;Ud71jJ9Bd0&rvF!{_g1lLK`U*n0nTYA4L6zC z{~qymx4KTOqt(5-u9T1LuV_o5PkdjuU+Wm!NqEv7W)SR-ogZbHO$1DaKa4bW&T(1N z;of-?oUv#$aTi)Q(m)1XnW6j78h^8mE|~rLrLF!uVs47wV^)o(xSHG4G~1l}G~v-S zrrSw>Mc?0|@Z8WR3hGL_)$1m1ZE;-Cn5E;485*MgjzFNo&0_Ix?a7##^y4zPH`kFv zY!O!vV6YKraI>1Jf@$&6n89pWtl#SfQf2JP;~ChWwPgZGb#@Gc(Kh<1?c8hqi+%>P zf$J0M`tN9G%Y<)$C}(z!Cz0*awY9`Hnl9_?;xGNW-S!@8KI<mhz_P7vYmO_#aOSjh zvDS{==3ep{Ym_NX^lWJSdID$i;3}lSVg)Nc!}Ig$EY%U@Nmo)X(+jBO*Lg*Aw$)89 z=vH!d^{OUvEbP@;Qdel6_4IA|jM{QSlRfLfa?NU(DY(C9$Wy7Yl8<I=$y<ojRF*z3 zCoZ&S$mzUK$PrB5bxR1YE%*XH9zttq@#;)%kG}iB%hp|YBnbQe)EgsM%WQ>oo1t2e z`<??|sF_9vNn+;OcOzI#WII5ma*|&ZdE3p1@7SaRjA7sQdtdN(VFQLh0uNS5W`0oT z-ZRP7(dL>Njh4$nb{I5^6F&4&tV;d#l+*AGG}X@0;ZLPzngjQ$F1G%O*Ta5oiC?Bt zN4QXAp&dEB+#FiTk{b?~E2|WHr^ZbA&4W}|<OgcQbG@^4rVBzlwhDeAt3dg@b#wbD zq?Q%fh9xv-twSyue=A(jxgIV@iE6T!4F0a7$_7$yX<F(anN7ViL@l#&^Ht0^Xl&eS zp;6j$Kegr%%zPMG=0}T9-q0v^glJK&bPi}-ro=p~``Xuurn=r(|6%ApgL=@2-CIkd z&uMvvxD%fTP{?r6(CA8D@G3@m-d<j&_n$vlY^)-vyz6{c?&fe9@%XcG(uFb1a<iT^ z)EwIU_4fpe79;Vb>8E*3jjkkQKBg9SMo{d2mxcQdq{m*z^8xzciz?HPiP|>xvF41J zBO9xPgtb}SkSjO4a`+(~zR6QkZ=+pS@Z^c9r`EcKLnFl*+f~S|^4J>O7g~<o|JKA* zGo$&>d{(!$H7!D8mCR~i#u5HMRw4ty=JXJ}7MV#s&B9F0!ERG;uZ_XM18l6HI-Yx4 z{l`mJt`%>qn5I<|o6!pcn@>>m>~6z>4Zza2O)mUS9KXc{LuVnaD~j;9Rj@v{uI%!0 z!&RXf^guZ^6xOp+JxBE*EiF^?Mf<9ywW{pQmXf=N)hFfdsM0LXf`O~kH6PKV=;WF+ zqRMn#kcxJ(mvKGq9xSJ(yAkS_vR#s*clW-88=*zcs}ecW3|VPe&Lb&Wt+{ji9(Q(b z);zzh#^!%JF3v+3Ni;O@vA5<TdBYe-Qwamyg}3gT!+(LU;I)wnz)zrpXOkbOJX*69 zR;f6p!#>QeYz<9-iM`Gcz#OYZt?r`Mj}ZnL=BNA|lEpp|n@dJGvR)7XcnWHRX#$p} zeZi$ahWQuQ){qcP*wjZ5tEaods0IvPm_SUg?Rl7i<F%)Whz4E|>gl|IO9Rc&HQ8Tp zuL6P~UAOHRcRL^$j8>@GYo4b-wUBoFM^l`v7gHQ>lGzavo;3s$@ZMIN%?<i8+gX8U z*ZOnx`!XebHASbe!MT_l%y;+G&Y5hQF+|$znS~sVTU{CBPq$RiP3}{+@@>inX8vy0 z1kiHIhh!I`OeJEhunZD}b`&0aKh-Mma>vr6+PXXSeKuvgy3BMlUN3K`H~IXX*TDiq z@eDG(b~OF>7DZ?~Ws2c-zyzmr|AJmzcR_3A$Px{3rnAugsvk8q;CZ!i1dUW;eiJk$ zfa*=b70|{31AAcAfiZt9{_0H%ssP-2f-Aap)o}h9Gf%pMQ)+RK!!!k4>WRXQs_)R} zk}(#sOO<+4dXp8wNwm{t7IVbqk&I-P{&e}G9s|uk(!|pr&8p5oqo?9R$ClFDFme+$ z{<X2{0Q!Vrns`%siJ-YP<kj%}1ps4OBO^lwsdz^^{PN)$dL<Q&VI~qp^=%4J%$#hi z&4SVY2vQaZCITW#UsSe9nfLi3a-)BVCw;`C%>h2_kq#7@lYsAOgbdz&$=b+`@1&^* zr6^!j9dMlCZ)Yaz19}{B7Hz~n!PYua^8~epKxGH$?6Vx`)DflAY}-0WofM&)9<UV; zc=!Ocacf-)C?Lr32sKO=sL*YiF@`U?Els7>Lp`zcK6z4UnA0{ww{8jedPXp9;+NNH z<}<vd)5)veu=ASz`1L&4eSr>9MTNTaX#4;j^&aUxpg?LT#J=+QTGvH*0oOtmb*m!3 z`=*Eib-M**uks3#?VLQFdGqW5rMJ0|9{8h$1CVa&Nm1J6Y0jyq2lOSaG1h3KB0q|5 zE5_JP>=cmJSX_6%q!zoPYvT#5<wOgVa`B}bs+@fwATt-cIV>|55U)NadT7ew5>yg^ ztUrvA>=T(hIWk3h&cjvDUC}2-mCQnJP5pWaj!v%?Yk0^tI@E_bFlNNdRbL*3&qD`l zh<uQHb~KPUScT=^xdZV~JgN}yiy2f%jiVJ&_jHtt_;TcsMR@Y;S9t+!VqQpGJLhiR zA{(8AM9Y;IP2yc%_<&u6QWmg0KerQON=qs$@g8*9xvVN*2$OJ{IVk@1DKsX;e7 zPlS|WSr<~tGOviP$?X%^H+Yme%b6e}ad9{#A_(Tna-my7Of|6^3yP8$JVydG@B!-r zy7}E+TsKeA$e@A*eI>gq*Q46dSfk`(9;ezi?Ei@F4mn}W0e#cvqotkPK;ICT_JI5G z`DEPg@ZCB}bJ18Zi4R^#Xe;-q4bw@-tD}3lTsk($a8@$^9)M9sZ6ZEz7zh&G6rF@- zq#xWKFt|F6H^FnbmJlmU>OJqIPx>^WKVB4oU0Vb5TI2i2L%&HM(%8nx(aFJB-}=9n zt$_t30}~TIJ^p_!CT1o!*8fXp<oI7QH#a_=yo0TwlCcxM=5JG26ki*kPRQ2E)<MZm z-_RKUKL#O3CVU1a=3fy-d|uw)>;LKbkNBVZ|JbCBZA_ia@EI6c{)dxJ+1<_<pH5BA zz{1$j37<~M*}&<4C=%BCroT7-nj8Jc&BlUHCvI$RYUTvVz{>vrJ^O$4NZ2?TJJ{J; z={p(YJ2^Q2iYm$IJ6isB>3<mn&7B<OjU9w+t?g`WjBT7Cf7OVZTm4G?*8hnK8yngh z8UNSu{~aE;^($|g8l1l_IK7%Diyy@5OTMJOdD{sZGzx?waWO355(9rs5%CwIqNKfH zDatg7H9zA=+g;}&8R_+$x%@XA^jwLAFd(4Wuqg`E(hG}vF`UjXG8<mjKRRIzHh<H9 zMn;CdTzBj=U1WBx=vZFueA&3JX}hFs8UMd3x!dcd=V#{UHRtEz=i~i}Z3kw>_9JKK z$1&Tq)#qX3Zms*q_w9stiSPUQ!nP-8XSHQVMvhCX=PP^j`|I<#2g3C=c~k#eYo{b< zC-x+*W~Yu!y5?TnquloGYi%PJ?(hcKH!a()^=M<~d*L}??`Wo{QqIeJe>mqc<!z?i z{&uIlhvBpB=el8E)zqfy!?IQLNB8;HXo6dF;ONnMcYA*DEfJx4F;EdxzVNNW?T+vL z^!EHM5klpe{br5V^`*1{E62j4{{#Dx)BMx0wQ6Vh<H*waFkQ%HHTk)7HoQl$z;hrp z>Im!FaF>$2MWb`Tq;?;4SjPE%h~v%SG4tAN%lDaa1v+5<nvF$*KKjD_Q$ZN&2_5n_ z5DkYHqY#nIfnJUbAPd#TP+iTN&3lgx{jVS|KS=nGLdy2--bRe-B|faK!<x-0AZ3Ui zx)0rk+ir+_-5xs1Cpq$^u6AqjZXTFDqzIz+!$a@Q-Pi2KnXfs&EME&2bdE2EEE`_W z#w*WA&Yf2E{J#OA=WTCHJ_VE*E~pR+L2h*zFX#P(G90-5J#^?VF<#8Tyl6TZ+0XMJ zUI^ioNW46_5V!StTXwIRGvLO<nQw3nAJzLN5)i*5Dd1|p1*InQr&OKMdp3HJ0*D^u z{qa9mKKER3KOP$!gI=6$hmqFDrKx{%?Q3`CLd|csOz(IWPy(?B&&JLo_7)qych3*b zi14(%)rv|p{v-kWMTTbEVBf9pII!Cwh2)sAHfU+qL7_W1sa`pq&{hIyb9X-;HhG=F zZ;U%Va=jxa2kUA@hUmVFP=osI;T=vlw?H65KPnep{81<8=3#po&df^<B*1Ou$TXl{ z`qZ1U1c?b{Ko^|uRi%7yr!c5^gw=ns7qjI{PyTvS%e-UB1oLY{nb~*JQoUQB_d6?A z+ODTxovj*~)|?5sB&9YnFq4sEv{BTKf+12wo={5M*nk6Vhn)$>hy<pK9`eEo{GFvu z;3)Qsithw2pZXc8y!Ug+(En08{3ac!Xq2*=!xP(@23&PwAL_t&yk5UUm9l{)#QxeM z!3Lp>332b&-Zuc(YYzm_XMM0*6XJ-Tt$q54L$+h&v~_em>E)9AkwZ~zJ|2UOZ~pFz z<qy%bt4G{>0&mL#8|YJ$NsVO++akCGks0Ug7ez(NaM#8R6yN{`8QT+zPEiGI7tqXx z$J9}uHPsAX&)cOSnL-`c#L8xMVgOZL$71Eq+j?icdrR4Gz3uYRndec@Sx9T;JBD6p z5wi?UoEt(Z&a(tbjG*vuRGPLMhg^-IV>jxE`$=yJw;Oieo47n>*bgUBpB$rr;DYjg zZB@6*1!(+i9kB?gB+g0}=cidixfJPWwR^g$>N&bj4$OmecMCpYJcM(+Qjf#>NFuoQ zUV^yX)%nEe^KaA|6oV-;?udpU=D1T|8=Xi)H;)dv*h^$;q&AHD01|`XtN;K%YrSpC zFL153UMC$V;5!q?4I}q2f<qnx`m`}`_e_>lZurs@7o#)+r~XIm={?RgAV3kSm%2kK z;=gYoNGWr#8-mCWv?jpfUnY*^iUAj<6!nk7pIG7p8lV3ToWyI562CXXG$fc@c7P6R zLN%}&o;``0P%VFTnmv_-&RQ7L-8CF|J|TWeh4IT$Ee)?F3BojWn@oxc9~xaU_8H%U zyaaeYK2tz2vX*@oIstSwkw4_m`dn5@6|^-u{BS!FZgGH~JTeWm=>=)^-j}l<gHm{3 zJ#QLcs$EWfR`ho!QzAAfgO_1Mq}q?5YjX_0>gOQZ8K>rqx%b}?3xjbCy)D#m{}sxq zx1ti@dOjT3Ut$EmOC<qYE(f%qg4kc&q)#tuVEL$8{fu$xg?gTTa(MV&nUks?7f=`@ z{f#-}zWd6+WDXMs;QJHZVQ0<bSO7-lv+RejmmP1(3J66%cya$dxNb=?`f9ib*18Bg z=1?B@h{#&v#m=SGjNC{sZca3IQhvGKJ&$2j$KF_TbNC<XP_j#C!DV#8{Jy!N$y$68 z8WAoS_2Sb3Te>)=*06`!ONQ%d2g~g5Cf6%-fo6!dT1Txgy>*y@E<<yS3J4m6BQBA^ z4dJMW0jii`5U+FI!XN>cEW%PV;VJ=EWRizaulf3^EP}n@Q#Fk54t>j^D2*`Hl~3Ll zC^6q}5+f%BwqhReWd}mAlxNaG&fPjyV;niS*yHb1reAQM<;naZQrnbTv~)Yrra$a} z@gQJe_~y)=6;1P)(#b*+f5H~)1EqAjl;-|qAi`<Isdlz&#r@dyr0c1Bi?{@7RNkV{ zF%0Xa9&jK*Q);9F6%ghwJWc_#Yv`J;epopL74DD{D;hOk$Isyd6+AWrV90h&3j$_w zHeC?MqlOK`mPZLnDWFo&Xjf%U-#<#Td^=6_#WFeRXNc<yc}f7XIvv8w8Xyqm=&Rwz zn6h)m;fW(otiz*tZk%;JZ3JBk4<`KjOG|(GlfRAAWsD*Rt8Pqj-x4M-nd1*fi9UCr zpXCY3tP6;*9!W}h8&@;v>y7R4LWD%nebiaQs2dLv`Y#3onz9?&G5;e0vI1hAXI~HC z!$uM>J)>?$Q^#Uhc9n-i9q%)aMTQAUQX@Y-Z`>ax3!i5JtK>Ax9EwiWF13_UeFa4| zYI`aP)By#q_nkqRfc8E`p4CY@5u2K0%Ickqp<D(&QJI7QNY(lMK?3I-YG5P@MS!fu ztPrjO7x(CWw!xH5-n1Tygr&+KYNnPonH0dE_ftr5JgWan+GA$%8?B!l`vY^(ICyVo zz!|(9G{91*j}O2TK)GP95d?R#4eUhhJte8Id=S#ggY6g+hNDpKM}i=DTdH<r3oWtb zZVH$H=~p==UQuvGKVN&jo2^RCtExA52TKKHYMr&|5TR?51j*meacDN+)Dp%NtEUI1 zz38WNurhU+e8vw99DiGk8Ah%PF^vL~G2q9P>h4+$g2`G#lSgtIs%8aQ|Gq0?DaINg z)7McnM3Y4$VeVi7Yo2_rQBICT0|PYH$0R6-1W~t@$Ta&5DB`b7OY%u8DfK;1NQ^!a zEOsy8S8(JKyW5Q_<s;=E;DX~$-5W~LNDR!mE9yIX+ej6c0}fxAU}x+#WPJdQA|I}H z!=J~nuwWNM5Zg$yCk((&Lt$KW1Q`hEN1?&nij7kZ3vPsw4n-ABhah|K4wT)10~pK2 zu6c4G*jtKH2oqE#94{2StY$$XAx%aOEWp}iEGZRyQ=Wvvj1i{%R$?Eh#D(A>cf$SV zbkbE3LnptkDO3(Izn$L|zS<YWhTV}YpQT<ki{um_TQnkdIcGrwH&6g2nTk@wo{J@0 zcO4FDzE;pK(hSkBVI%{|!&8vEql4c9U!3qKzEe#tc1aq%jv1YhM^u@Fn63&K&;b&3 zqRk@Y0j`8a;EJbcKp;k0oSxP$Jz_6Q5|>Z=oHaftSkDl(eb)d<N(z(DHRYK9JV?b1 zd!{R#0)1FAjh?J4QJg;^u94-B4~hrUNS;E{nwA|@V7s%0zUIG`#T*+FbEH22<OpOF zf1xFK<5pU$Vtt(c)+Q<f7DLK5P>2^SILN<0J9vRk!cj?3@9pk=Pew-<kxXE){}3yX zQ-xTu@ABhzKhz46HWNnc!2|a^%xz`>oDGKH(k)Axp-4c<U=bQ~ep53+dnF0?#dxMH zC!nUMGF{4B99|WbL#ziWauBPb4azQWr!uYPW|XSqoVVSJ_-ByUJHa10JJP##H?a4g zpz+90=Wq}Pty1{2tp;bNf!fiI;QB)n^xNboCI8L=JJ4~@J(}r%DClk&?&|2716$YB zd8HARhU{fc*G%ED_8F-f)!JU5-|%D#X7hw{`{@YLuwCIwV3*z^M=Q0)p^Tq!;5mKp zBwHL$A71R!oN=G7bpjAs<APOenr$(}&GCy4g4ybbCO!hml?EK^SbyFPJc<RpMyXOA zLhh8TECy-1(#h{1IG^y+^3M~|=z#6~icM)UEdOBzwFvg;i5%f(L;(tISGtw^L5W08 z2*zpr%St@zzc+Zwrm)s`HSWG+;Mv<E32K4vt3dW>Fcf!=mAwqr17Oz(#Pq7l+pLSg z@I=BwI&#kE(Y|voQw+*{xQJ#K664plCv8?3?#E04sh-JN2MmoKRt8NDLN^cJWwWSM zrLlNlBqu4njy~>1Qu&@%q_l#3Ht|RaBUVCyDG;xt4Oxkh@t6p+>R}>^EmRP>M<N9X zlAd>TavH{fR3wh~=2NjqzBq$U+0pnXqK>H0GZmX-OqUU#%l<49KZA;-Kne-9!Ug4k zPZjj;Jx&7tR403wY+82@X-W<Pv1ShpLpO>CTiSVzEDu7~O!FH`+06Uc2fqeIDj%ot zwrbP(>y|I3E+u;1HCMPG5;;XJlnm9*mFh)^`Q(IuO^Zs<q6`^2SP_CHZzW5JIbmiS z{~Jki0leqlkHMKi{FXfoa8Z$;9$1fAEV@L50vx-+JevealrIW{&9)89dYQf-6kNQ- z81PDb=tID$S!k%<m)p@LKfT+E5`t!za1wQdVG2&!-=tUUu2(x)AUb`t$i|oIjFfSJ z4h>_O!Ki_w6!{UP5={rJG2x!ZV+fX#d5=X=zMraM%3R+_!k#xr?kJj|Uw${XMjelk z(p)RzgarLuBQgTdjLg6gwj}(+i?r8<0EuSAL!t38P{D0#W~NP)0K)<i4slo@=qGp= zsePmmEOZX#5gp^TqRyt>Jo|LYEMpUDfUTPB!pKT%)J|~wZA(X7W2nxlLWkJR{EYo# zvaZQCFb7GvC$OSRvF{<adkW)dW6ta(I448Te-QS7(K_!Ur1HF1dC9&OwXpYYuvbY1 z%-0;7f5xO5f-Fd8%*dwUGYdsn&gg%}yycq*tjK=rms;1qU02}-l|e_8JNuY~-{)YM zte;W)Fn(J*pT5-;)PPC51VP6P5-1G2B!V6ROXRbjO!BNBxx*il3u&ziTuunCXv!31 zT`EA`hj;1lk8-9cvEKO1!aQT4wX`YfzP;7^4=efD70SwZ`0tjSvfRW7LOK|i0sS!D z3JFpsX5A^L3~Ww;LWw&uSe_qwwV)6k8P|9MW*?v$T9yvdHewzt`OaxPGbH-#4nZ4x z<#}wO42N}JV+WaVS%v^;miN^IG%Fu+V`(ydK)?h40XY2uT6-dS4^+B;IR05y5V8?Y z#XwfP3W&nfT}Yq=Xh>Bk%3hMYAp|Q@y}qIGELg9oy<?FzP903kIbVZJr2G>eI_eLF z^k2OlIqmxpj+WSM;8eliOi$w92<z#Hqx#l-7m$6Cw9JZ+!BH3m3U*|JkwzZ9tcmjF zrHo$nFxFAv^m=-<;GAQkij_NObZQiX7X4AuWKDU}URbHLh<4Lo@X!d+%lP(?;ja-8 zZVQBty%i8+_Vtr`Fwa2kRjz_F^AZSJoP7}OEPS0ZQCuqO5C8=Ps0Bj!P2`XtDNug0 zzhKnZXgy3r%siYQhSRm_BFwnd2b6InipmMD!`@c<V0%@M{=};$1MX-r*75IU3M#qV zRaC7Ipto{Xx8vk)r2s|C&D3sDpkhk>Og#I^2lHsmt9kJ1<PgU;7!Fb*h)m|w09#jX zA@LUZN>_j?O7cr+(ZB-*x&6=a>||QJwZ}O8B~&rsc}_}Z+fn5TCtjPuT%|b9P55)^ z4Z$unP8i_{dV|x7_^4H$ovXcrwT0>AsN^(q5LhzeWEMkHEM?ozfsmdeqcrdeEVc}{ zy;8y{rA^#~;KXcT{65qxW`(8&|0t=_=Rj8429ta(crm9lkzvZF&BP5MLsai6xmf56 zZP70=^^{vvm*b5-cAG=5C<P53mFU5LVY6WUo6Kkz)3aTDbua?ph=qpq%^gtEd+Gai z@CSj&7t7)~h3r5Ris%Ux6%dWFG5Zh<#{?0~t5yDnDrZ`_hRxM``+}A@sF#_6NcOv_ zJ{KKDyfK~1I~J{0?b_7it<DXC&Ch)m&3~Q;6BNc!1txN@LOZA#?NW^E#a-V~4TiOc zp{r@<;*8bRZqd&dFxQe?$6b&F2&`9;f02@bJk1d_r&d5TwJWD%uA=;oV~i3#e#O*L zDMGrrS`O2vG7H$W9uyAAU^VjuNQFcuDch)gYN5?agf09Wr<xOGADFb{eO;qg6I5+M zL%@YbtzsJ^TU%F}n#`<?L%}1z<v%jKy;EdpOmtZ&usl>bssXcuf*2oi#t<oHA@T>K zSH%P&(deS1&M<jH8%X<ri3`fZ>X0(t73thLTC#kBH0cA%;OTKUijPfNbgx}13;CMa zt+b<80+hK-MKQ?~dqALCLXskZ;D3Y{<oe_yVCv`UmHp<I(0?G}h{%``Ad%wNL#KU| zXf!aCkUR%)WKu90dZRv&9ZNzOOU+l}K?7I3I7-(^1k?b$N0Ro~L4}B5ifhwL-DhQQ z_IAe$lJG-`8>U=n@QsVugvF$UEf&PHu?@_J0D@>w<_%dyVXU?74Rlia*brqMJQ1pl zhX8?lv()UxP~X1jZSyk&@&xt`U`T(F8>EANO&6fme<49J0iccNNiy%Ma3}wj|C<Z> zNM&-tOrJDM#g-W|Doi)-0NunYpC(LbL7zga9z|wsNx6+yO+wJIW{F?4F;f;9QND(k zDm8<7U8*QdWIzv*>>+}!YX@#c?4rai(nIU4>%V6(^1vcOD7htDv2lbebs5NFW}Y#n zoi5UWNp2yc0APG>U{xhxbu!`T&?5_hl;DytGs1u$T^fa|w?h1f+|w$l-%P)oUqfo* zu5G4o7}|d4ygyALRJotdG*znj#Xyb6W*B}aYeG325=qCpdxQmRpN?<Xn8!*wU!=I< zK~t<j+-&V`X~>ePzqUst5Am*X!rHo;K^~t!E&;7Xdywgom8#5O3f`AXxEkFk4i8(@ z7yBOLPbp7X9hH#@6<SK$LCByt2U0o;SCya?QE0pX+4Of(%W24^(Izj+{~?hwns^0> zE`8a*%Tn$@Z1<p)S<yHUw7;>h@%M!YEmJr`Ny#3LdqX?5<nadPOeR;{H{G!F%hR|- z_(PQb93{yLQI`PW$LcQ*pVy?Ag5^ZN#M<O>p|oQx3cWZ^FG>BU$U;#zKkgo5nj4Uk zSKTNdk}C?ml;y(h40ilMh0`Fn!Jtj&4pqn=_bzIhM0v8+;S<acGb+^}cbclMCq_me zO<CU*8y}}Z0s&}pjDWQodhihrbAP^bXlXKm+np1qNROWZja3Cp1?ha}>wwSud17bq z!twsY$$Q+^7uTnst}XQB`|0!N4P{T^>q+->V+M)`0P)^VKTi)}+Rq4`b2>JPFui1s z@dDd6SrQrr@qW9oiCdb+%C#FXOt`$nX+I|Cd+A1}X@cb^IB}&a-pA`YhPtR9m4!=w z2a7T#<6(054~F&;PS_oZYn&<mS&#m<uvkFfiX_1Sh!E~SK;11p`gJ#Iq1;j9;*~K# z7xue6K=E0zaxa476?-oE%$TzFKUYc!Q)P=T?hLv!mMec*(XYMZG8_!jC{7U*%u;Wg z{b>f4FrcEp7Mrvp?_3tyvWKv8(j`rmo$W<cU@jmEMmjnfvOGmv!YjV%pp3s+a!S@{ zk7srZ<LeAMnvweOD<FzT1I<Yn5j4h+R<)!AnvIfi3|{=Z-zqSmb&=mFSSL7IGRNBx zy{bv>0zf;GO8Z&utBwBzN(C`xjQ+K(F6G=UA{j51wG)pZ7V4g-3Otgbian(Mjpd=c zciIfhL`jX-lh@B+uALm%jgL}=ojbFyJ{oo?R=W4L&>}fr0avR3%WmoxS_yqnsgjvx zoveYu2u;GFBiTR4hz{Pt8>hIq<hPdqrWibh1j-tncHW5@Rq>Y%y)aLyZz;vKxtsA- z@-&0dfc(=@1EXJ!P^>9AX`l4mO{L##)#!C}0Fu}(eK)dTvk~pN95hhg-er+`aaweL z!ku$zoSib2Kh(z7-u_RTju~O;ry47VeRc%;u|~Y!ZFzJCY3U{G6jdkVBS&DW>l(%} zIj=L#kH<$J01=Qnh(dUs{atHN-)Z<C3bMqkKg@G3ON}y>inaj^7Khl_&LPJYSkNoi z-hjlM`-rims5GZIG^W@el8zA35GS||-wDsPu-yKDxy6s+;WP4u^Ya7ZQ^K~Eg?GpW zJDu@V1K7}5M46|hodyJfHApo$>?#kZXXHYJH4pKj@MIvRz!2+rJ9uUH_8wWuP3KID zuS#v_C}Q|afRPL>T7$H!P-48{KLhPBeYK!&0&y}dI<;y_2M#%H<CaBYKDdzR0Nwe0 z@~1=yWk#NL*8M9QY<bJ8F<ztQ()ijoCKzl%`mk7c!LY6QkXKu>$~%0UA;mjCPhWgL zH9ub&e=j`VZ+!G^z1DT7{-J%%bYanrW97*H<@(;l^67pZ2+q_*c0^&!MAp(e=6d89 zFl%dr054o|yH_+7YoZ@(D+>EF!5_Qi#z+vC2ZIjn@i!9kt<P=zM8vl`#C=xKJL&<A zQIgp?JqCM9=<oF7^w?%EUgq`(8{YQ_jV+QaI+qeP!U8MjCDByB%?glXG7c@hnZd;9 zH+#W&{oROzPWD9FH^Z%e%wkz0;7+RlBzKx9TXi1lMOvJuWGwiOlrggr4QXe^;7zOG z_gv2BnxZt>Uy47x^nXhTy3kCNgY{w^3F#3TjjVjpPh^5D1Q({=+!e_h8|>G$XoAgW zUU1CW5^tK>+KEVbl#oltL05DHE}YZ$BSoZqKk&ZBPBzF|YEM-t`$A;g@i_(>PAh3W zFeV=`ROcen`iDf9LWpC>a69*P!{px+J_{A-t2NfWnWd8o+$P9p@Xg%JqS7|d4pDon zZtt-|FM&QQJbTn4W_D*VR8$!{R^G&Auty;X7dYV3_CgeIeAzcZMz(_{VtSQA3HB?o zamS*kM2!mWun(mis{Jj3)Cj;PRRWA?lT}&r>kD*V6Fvr9b3XS1e>%Uc2}jWVF;ZSt zE<;gW`WlnxtyG_RP%ik0qy>VLc*5$B(5{2?g2vlj*JrWoJj@(qtNKA!Ga+UBR#oqV zcxGJG1T&IrR<7lApSFTMkQQf<K&4Ly&)$h;J4yOCLOw#-YAXmeL*}=wFx8Gl`k<ho zWKm~ZX=M-YIAQb88*d?@$W!c_M__{Msg<8NTafjTS{<svm(IGrEaj#gvSl>{w_7FI zZk-Aho|xb9!?y&+7R&?ycL(;C$dUtP%Xz?`EC3!*gQ@3A1FZ`AXDpLn0a^qT5}U`o zis<rm>RXQvVIf)^HoSb_nm#IqUZK<I01vI2_P0_dIU_VJ8O-9zJYg;jRmo1_(N8p{ z50gbv94T1)^~g$LgaH~TjB$0V)HR_BrLJX8YG1PaY`jcLLj%21yEl(k_*OgK2=f=D z;xx5}`8MyQ!?Vl=k)5;$v@{rIW8JDo5Bu<fwuP0IA>NCG8=9gb+ZEZO1EI6IMB6); zH8nsfL@KcNC4fU<G5bED7I(P4f~0DCdRz~Xgt$+W&6ST%XHi~Gi#^4lLjq*;&GlS3 zS^?bs@uz+uDKFQ!(#x&zgGz;{bv5*nVM(;kh?rg-s?bO5Q+^E}UGh*Kd(H>ULRfUg zZf*=!15X`+I4eOYXKFO1rT6KJvVP81`!q{3<e)dHAqXF^e!B)N_LE@A6sQFOXn;&n zdb~CJdXuggXI`{mWE81h1N<6IMy0|Km17c#>kspAeB`!Xl0{h+4d$a~2G#eH8EW>g zg$AL${*ARVc4(xfivUYe?16iMb5ui!$>R*zWFsEPpRC|G04F(xUzxUL=%NtDoPy+( z9u!%WZw_L^kMfx%J2;n~IcoaO-Tc%1q2b~|LDoqBkhggk+LLu~q<}X_@U`RMxj+LS zkrup1sjQLHS;w|4X&{VKfl@<M_9v@?{y~WIrV*AYeDE?h&X1hAhTb1b@#4{7j#AUt zLbq^47bT4K{|nU7w^B}f2oxaB2l^wo0c@@uUfHBDDI!7=IfWxo@;Ae*JTP-IRDk`G zB}Z|sL_QXnRylK^U<W5e)wv_D8o{3WfRG5(%r0VE*M(WXl9Y4eDxjz@y0qAz7pJRW z1Zi)$Bt`CKZgkP9BVHIRnEP4HvNF}_*RbAKAxgmqm|vOHC7tzl5r%<4l&Xzu5h|x* zxN&+>zsS1hFQ`iJPN2}iYwCts5}-=vu|UuH9llJTzz5tAt<Q6rBDqZ!x6CgoCw2zT ztj0n$weTB|RT>t%{t;`Uda@2qj(E?Dc)2zK^hL%c>$}d&g()!_wo+t6dH^9^oNJz; zmm#%`2i-E!3XJn}N=AB_l%ngY+s>ylt_7_+uHaRvc#%6S@oZbHo4a8wh0%%+SuR_C z!vC&xb*o>AQ>;;M)s*)qGBH^txy1hv^>tA|I3ZKB)_sc{6pAq+(*{pm_W*3C=Q+rd zQlG>glVUm?5zeDewt&+PY*AE|Q)gMAiIuK<zjKq5$yVVw;fs;|8g^5tUWGajbrho< z30spwj~Q-A+2IKCFgah_5=U+-pw%8DWEG%cup!s?*kT&n&t}RbL4|!%FiltK7ev<} zgi%&Acd-&6#*G1b{cg;=+&1Z?Xyj8PWq8X!>T26>Wivv_=!TX#Jmg6S;jgXux{z#3 zSq-O*ZDB7a^uPE5a>^t|hL#hPj_+?*fD7`D>=^0$6J;=#o(#tG(y>QcEU>^d_L8Ba zGP-sQ5O`O*u5W+u8zk)Xv^2lY-tpP5&k4ND^5Y8G=^@au4S494Y-C`k@mrH5Mc8%z zv(l{XlJV=V7hZ}QzMj|Nm{1jiP95GfpSy*b2{PW#30+>9pYt2kYly#cKiGKJ53Wi- zD7rSSswIyY$GWtZHAA|O+eIta8w_%(CHl>lfq=7P5^S0x_#L{RS8zHH9Hx8-D1uhb z`&P4|%j~*{7?_xkCykhcq0e3C3#r{yyen=pJ@co_QeeH|pWHHax_2yXyMM0r$yGAx z>j|&3g3<FdZx75ymAMKw#c-4dLyjNmTwmr`Tfzy@uZ@lAR$uf#ZXQk3MrriG4yW~t zfYL{pk-z9lUZS@>==<p+w%$3a!|DA*vsmOd9auqQc+*2`!eXe{T(2s5&HUDy{Bt_z z78y96c+mE$(d@#!?@oL*yIg9f$FR{4Pc0ZQ%6o)TN!o{}vWVT9x`Bi<bU|kD7A{{t z&OKA5+#Z74-ts%&9Bz&$?vEqS<hV8&wWdCI_F#=$14&wfex}*h-#C9J;!SJ!RNe_& zz-tWp>5{nz9#^~WrXP>?HC1-_F|a=??IMq^b#M+7>@Vk3Tf*hO7koP&2jGINqif)o zC)QX>0`}*z_>#NC`yy7ph%JktE{A#%{!)xXrCP>Hiu@}mRrJk^Ic7o3N~ms|*T;%$ z9A*H^Q1Z3Asv}rFQkCWp3$&lmqxA7lm5Lz(ZAOQl@p;QQVB=G~b2lGSg3!1JFY#sf z&g{CFu^4Iu(>LWf0jFXwBy_7B6{pfNz9szj#ch%Z&MtM55@63GtDg18WVautJAZ;3 zf8k{~<?f7oxIBH=dn4YgQbm&c2>=zcVQsg8<?uEX@@BT9=X6}g>3NdBh2fq>vowy5 zN_iL!(nIq8VRURVLIC54!M?npEMz(xuaPX7HJ!4a$J~G(RkVLF?n}@Y_pfAh#Tq8E zK{Itr4(KzjWt5>9dkHA8@4w{N?Ys|*b%`Yr%)=VfpEwOq<qR3j;}T24XFng87VR_3 z&-W>b6J&8#DJX|Fli<y!Go+h8%=&r_12kE!H<0Dc;QBq@w|FA`sz)|U2b;l$_MUP+ zSl~TMU@_Ej$~A>b?<1lRPs^y=m#QqR2Y!){qE%3?fS|=JfjtKJhYo&S96I?xxBf3E zg$B!VUV+IAU8n&7tR5~T(w28u%b1K*#?3!_kLKv<WMMOzpz)O$bE;m>g_Qu&fpKgV z;HjQ;irgTTHrzEQFZm8LT!A)5$FUl|a_)8sr|V6Dy4C0l9Sl3+4ot0LzN~da2Lp7G z?GDVYu?wA+=!xhcEVKnzT50r3y@Xv>Xn5(>(bUXCW;w!ZE`q-L!s~NUQ5JR{-JD?l zkkrN9xV}8|RI+~L0M8qEq8nQQY!|eQWj=RcNHwI+*sEv!Ox_Hd*`YuVj1W~4%=QPF zGd?LX-{`GHXU{*ZDziO(C87`LF-j|#gdnk(-_6bnJy{fOrL`&5@+pp<<w&9dLZ6mV zp^nKYs2#;Ia{Q{L_G7*tJ<#7*t^7UCrdo0&ujgcl%?CTAqbjC`8yE$#*1)Jk9l^$O z(RdHb&PRAGLau1kqsp-8I7jDTR+7S|hl`uuC|)5`$CN`H_Fzupt(KrGJQ&;HW8MLu zxaUh8M=Lc=kQO3`$nv7;<)xZ0hj(|oV@BMMf<}<?963)AO|yg(^wy@8FNHrpeimL6 z$)?nP0$?9lUOz5|hes%fU7bD%Zb0~u++`pXE16j6*Zkc%)9e@$?lKA^_xYAc13@OL zqDNeMsqL#u5k;hIshW=2E{M~W9YpWcs8C!)-it=YC$30>;=O2qo^$I9Uy%8+l@}8P zsCYUmY_<Jiw$OV7W#Ex6u?2Y!%h@8>+{Fl8<M}Z3yZ_QF&Q;5Wc&+PuFh&*ada&18 zc&_oCr$cMH4N3_jaaKtQAy^7G7$<g<?4GuuK|$NhOc8V@go00DP?TLcV8c_h*(58j zwx!M~)(G?W2xhJG26UKclz|@pzTpOa3m-gjPPY%l2~`l}BAXm|xAV}Ydb9s`l)a~u zR`4$?GE+@)yYGZ$h_+w|)P#lghy_b%<!@ZbY;rzt7xBMo>5uxrLPEA)n~HzsriS#; zrguwZNj59TOoYf}Lqrc-vNt&059~1E-QS%(B9sTjI#v6iz({u7DFE>_c3Ub+6w1B@ z_>K1QLjn+<^gw~d^&^JF#JD1>n8ix~ZU|J?ha?1C)_TJRe%b9+paYFE(l>kiVQ4Dp zVA@p@3U2E;<N*sf<7vZ8UJ+jxGiN?|O;9-N24SCo094NF4wLWY++0~%Pta*j4oO1j z*x<N($@((8Q(~NOTVV(%tTGC6!e$dC1QTl=CLa(+Q8OGz3ozGS!>9veMZ89faHo_l zXv<*wOQo3}^$%(XK8Z3M;?m`)nhq|k4?QRvg8XM~JKwLYwDDU4u#3f0#l2hS(#c3k z>>9>Wb0t%ZMI<B!+K}djq~eLzVktrh73!?7yi;UTQz>6L3r6kY3?zK+oaJhal5N%T zoB0BeX}$^)+1ymlD$c|VCVK;$C7RLOUh>(_MvM+(WI%(sN3Hj+AQ+PDB(ZS<rbr{U z+Oe|6hp#dR=BVHy=n2nVa*Uy?f9<lzT@agZ<#l+zxLe=v?qYcN4-hE)`zjp7vuFvu z!jp*or<W|`rDQ!cA;mD^p*fcf-f`c#^|RBqtNmj!;)lWcT&S$mX)@AzYN^(utOioT ztZje4&-*tRgdWfJy4ywmwdjJOG#PKHtP%5eqhvL_1pJbwp?}}P>Xm>2$Dr_J9aLH8 zY31??YDdivA9++h!pjigIGTfDFb<|t_?=Q#P|Vpz&x#J~wDaKiip8bg4zFki<`Vlf zr7enMKTWv-p3Idfi;;WYCdgjK{p6xe2jLM@d|vf^aW;(X^8o&kOH|33<V6Md!z0qU z$>2Hy?AgOOP*Tp8ihO*$R)v!8ic<yP%(3{}Vo*DhvwNeFR;83>!6+<lsRNsW@2<iJ zp_#H42>($IMe;E4P@%_iHUhKt#U>bkmG}_ee--14i=WA)V>8uM=+M){V>l2K<u=Q; zJhPC26~!?r%^AMS;OqNlENMe+kq0BF4IVW-+ua}^-3IhqYDOCMF|O?lsta^FT9A~N z1aQiiMr5>`y;vRx#_s-InWL+@nakdBsqPw5z*cf@x;`rDI7JPtRh?C!H2ol_R0hQ> zMU!sXv;0=XvsnvF;TzMgfGj%lz+uERWUIkPmWUC3z)F!dRo3^~EmkIWp|Npw;VNAJ z08z<!FO$Qfqnzu^J=;ZO`UsrDb#>htaL2GJ$KdCsZt6u7R(0Vlt;9O;M&NK_wTjkp z@ryv#X*vlUB%^ODMbETZtYqf+eU-hkQtQ(Cy_vW}51KMralgOAHODX^cWz^BKTn~J z*nXmm1xo0^r_1vH(LgW-Y+kUH2>pvI!^q>4%atwTT!arZ9ty5X@UMFMd{QWx5k+gk zXuXNje+*kJwzo7!a$)2UpK{;=Tz+=7>Mv4dqtWW?6j}<XV1YqpI@XT@gsD{D7d&>M z{a2fp$IX7KG;a6TX!aA!(?oYQMJNFJy|g3i5A(y!9vassyOvCQcP4HJ<O_DT|JKQo z6W)6Vj!!rBan8flg5lJyl{RtJ+_#~<)UqE|?WTS~vEuovIv_KKvZB+Yo&49z70nAN zbWXW?yc>Ha2W2^5MoJLaUj4-ov(aA$b!Hf>;9}TG6%H*rj)B@cu+wunMrmWa@O&pd z=O8|O(bffl6N;Q^<-C3MinFuI{MSGSB4Y%i-P*P0FwqKMrB>XoCw|%V1swC%vS5C< zL;3zp`F<77f+Z@Md{B4NGER7tT8tibIx{JLFG<pqUwC4p90Z3zEh!CQ+4<-4jnFmT zt0{UO3gAzEyDYs@N=tviSfhX0kWX2wEQ~_HnCFk=%_X9i)`5I{Ls^(6B0xGW0kfl& z3%qkl7(THtjBC_e?C*mP!Eh35;<t_y=4EcO@r6fHiOIpjl1lYF1M&vYp~aSFJ~6MH zmamz28yumaN)*hVHfL+(*h3GVRNw}gvYXH%q`J3;!0<?0ybkr25yZlRQWCeKI(RgI zoKx&V8ARtZd}0YCIdeU3*FA-kw&jiIjAW|wMl7-g+wL5S!YF6Tf*ugZLHx&2-qrSs z^O%r4S4O4+h^p(=qsrqU^jI<;;$R@%OXJDbU_uf=SeZuPxJy$PLi@P^#o;%XW>A-w znQA8p(#xdZ;vOg^{BHX9$IKhpiM-(y4*C8pk4m@=RuRnLrz?Va#5y@Hb4egt9=+#% z{XTOxdL`fj^=lrsgvv-E4DDR%d$6r{ox0+$)0W-7*%w-k7*zktBjS-dItiVXt@QPx zVDHf{E|HmX-L^L1wugd4Aoyhvup%+m73pv0g;hZ0kpHUt86aUBv_$ZKe@RlraUx+` znx1Ra3Y{I~sUCCplY^~O$2xF;ZGolp9FtA-(i7>P)q5nzSqO5dYAq_(ZA@lvH(s_V zJ(z7zvG`RK&_IZTOrWazMO(oHO5qlaw_s_6CvF0oY($io7_P)ywBo;oe_p++zkeA3 zRMe&08Ia5*=f|6B=5Q(T+c2yPQ`WRKd+^=~WZ;XYI;6;5U%{xEftdjW*TBTXu*RxY z2b@LN8J`i?!!(i;-M0{)UKX#mHh5?DkrL1s*gfE?{*Aw~Qr;=RBLx=HVJLy31o@7Y zqRz_g^8_ffnDWG4vxIS5eQp{+GJiJ`?}6YCtj}UmitBDg)T>v?wH@qoKTwOiO6lvk zyk2$;iDnh6RKu{E*w)0*xD~VIsRaHJ&VnCLh4`oRwG&jzF=1Piq+ZuOFI>;E{_V-S znl426bsgKzRQ7Ad-~SBRn=^PHiE$vq8IE{pSH`i`5w@{*2+-hXtJ@!DthF5E)wVK6 zfLMLs$HG&6hX>1XwTUaxuSbHoKuVn=2&av@CQo{8x`h{A$zAveUCA!o?rW6Kp_npD zm`*1yO#x$z*Krf!7*?BzpHDJ@hv_O`Qe-MGwg@nxcFaLTKoNaqbJFa5BaGG8N=n0r zGyURFH+J4%Ov+R2boVx?uC8&8%dK4xdr%P~R%=Wj4Bj$wi$c@|(j5s<Aq?Y0O()xv zJgako!jG-UU9Vk(rGB2HhI!4P8-K}mOk|WnEk(9<L3w{@i%V?k1}P;n>10iX7Au)6 zwExK(IZ7QsbH^3QEpUPaqA!B+R}#ROrt-s(KnE_~G#LUERWqVCl|qd?CEbmxjq)GR z_?!G>$Hq`b{zu;39>2D2hiOMWN~{vu)U#AEt2kA)7(m$o*fe<ih5$(JMh2r`2qm{6 zUB$4+Bw&thlybt41hwo{LzBidx3pi<QUe8e7`yvKy|9PBK+?XL#?1p7&p*y?9VYfA zddDL-?aY5iWEdjPeqK@>@s`n%zZ3mWqN4^nOaDstB;Iq>)a&U_WB!|&$csQa8>T|Q z+%N93wlIVfX2l!tkWd5gbT76&lDpxMUDD&fs-T&xMEfaEvmfK_*G6n&=NB+0>r8{Q zA!OxAXex#Tjr2f>>A>}>=XJ9K4C~`!PVK&}gorfK6@~YRxpU#M)OvU4q$%#%d*@Wi zdl`>V%@qxe<feTK6Oc4rseF^Vw#nS*8s2NR=yPEVg><ouF3M02Uqb1BmD*=lNLd=8 zTX}<|K>OmVPPk-3D8FuymBe<12<OT!NH456j3$0($r~FbUF!dqq2lBbCCU&yL-)vm zjl+^mwR`a4C~uVLbq3s*<H?99L5%O6X+*w&%8K8{29?KPT;Unycv6sQk!E!#R4tA; zRdjR8h!Vfj1u-<YNV7lh*=oUO3$Wvs1hKz>CibGA-A{CMI!uJ4=X5?!r($Tkp1+G$ zcJMrN*_o9XfR>15t)^^aN0rUxpN&*^MhXk@<BM#S!2iBMSpx;h;8&xs>nw^l7<Bh? zwhSQyjf4?{ZS0@50jTH%nVGs7X4#bOvhw4}DugA5+`+J^)`Z@8d3hb}*?C{&H7DVZ zp@I^J?`@G+1za86(+}^5bMu&=b8XNz-7)_k%FZ!3(lBb%$;3`3wkFBMwkEc1+qP}n zwrv|7bK<0<iS5nqR_)fe`|Z|lRd-jtf4Zys?S9UEaGiSx%eabMbHh@VzeG-~kNs69 z)muDZ&4xQ^9S4?gHvjMK<TcXQ^CrOe;o<wa3@FX(`+RNEd+PIh(D<6VcIuRMORqb~ z(;M4a^n24A@7w1c`WQR6Cwx?1eAxM#;eQXK|FrE|?R)KdCK#1_dbmFweR^Lu>w_gc zTs=kbW9!{K#m(0feEv9pdAeEmeYASb@kZxk@)&dvd*=CP^-k5DcZ!pn>ZkB^#J=<S zze6Pd5h?k<UFlX#eL*mDvK0L%l+b@8cK$mr#K6k*Kj|K}|BUWo|No$Sm{^(qC*8x! z^8ZHn{I56<(f`1CVE>Oe&%YiBW$pFOR<Ey{IEUHxr)-~2?m%vEH)va!%gal|5RC8F z%K=(<|6q`TAnsv^a4U+O%QqDj!6bQ=xt_QC%k3aeV7YRoX^m#f3iHm3?@p%E%@jSw zQE_&1lB1&Rqy#7Bf4J!D_Pz5pIsZJlcLcO#JW`|5_8c6^2yv>Y8QyaLjPy*D@=~IH zWwxq4TGE}e!=kjowmZqWz}4#NaP@n5oV^$W5c2zc93E}9UGe+A-rr3O@KE}#H`aSn zx8#rfrF{vkB~1DeD7cUyy^tV086mt8B8?$XaUxQ7HdTo}ZinVDz@)pvC$df^v|2Vt z)4sr0IZN9>M$|-IXiwCz@9FyCZnCOvv!l3`GBPz}6|}f{rxo{<+xL08Sf=Ouf~_Mr zDe;n)@)q|PIk`!LSNLpvA^!3J!?_f}u>{lZB71|K^RuNi(FJ`DM!q42T>wa2NFQ?K zT&j$C#4e-k0$A472;6YCX?b|%T1q{)y2ew&g-m5^sr5Tm+%a?k2&z2su-Fpn;^y`> zd753VSGiG$eHOn@nD{=ZI!l%`5+r8o#f2-RreBgU7SVvVkrx%$1h>X@+C&?fTX^53 z+)gpm4xQJK;>HMBsqn&u*f6gn989RJ9!%DbB*PD<B95l&h8$l@ieAM0EuAQQ%!sUg z>HIt@Eof=odw5Jha1Y+#X@=ARsSl%*U+jVgyp^_M+FkJYKE5v2?fVseQ5?n5$K`d4 zPg|w3UQ?j55gV$Io5qtFtCXU%B(L1p-^@x<o`R*=_V((|H>Iny?&oZnVyKt?OQ$j* z7_vj2EkT_w#U8IF=&wc@FUROFKO4(G8!H9^stLv`v4;x@`YZ6Kt8l4|v4}0O%Yc~% z|MVim?O>;_q^huK&(muNwFCA*p2NhRF?Rb(-3ogBD!i<XZ13i$*so;M`6>OZHP(H1 zk=6fDoI8f9#l<V#!N(yc702Y-I9tz}fYg~dGnaX6Q1l3GGRsZVgz8XVXIIkKG}0Go z5~axo=PMMpi<Bno1O!@zkQ6E|HzDd+^(}FkJmH9}N@|oPMzy}|V_?6B-r#8(OrHrH zE5Dzpe5kX6iO3YO{%yP<u@bs_ZNw$7^ui4A#)9ZaVb#^?04F#-o4g+E>A|#<8|SC| zGG6!ktgLmK{*fU>8uqL^p8Z}3)>j%AAtvf>s^xKPEqj+N=2!(uqO|lYQi&UUDNBZ0 zx&ODoUnZ%6677*5QEY{ZU1b`hG<u61T7wKa(^L}6#NjgB*ot?qJ#Q|9LD3sS(}|zJ z3d|a&l8f8JoJbYq6wTfkuFn~q-k8Yn%BU62IV%nq5`E#_IYbnO==j!H_Ip~`>9gr- z+`5xYuqv=gBrs5()4bjmN3*Mu^Cybq%i?!a+`E4+-U@B$3Ro31f$|Fy%KRKeC8vt? zlh$P`vFpN15m{pkzlkZ2de&G1$!MCAw<?#r&yuw#=`~HSPlK7My%xvY6{Wc6CpsF- z-5sr7p-&*gu_1L>V3OF7(ptik8Ii({_d^E+J3;^A@XdwFf(npb2)oY5D*uh#5B!!^ z;Ax$G?GlB7P7$qoNxJe?rFNv=QPDz-&Y+^pAXFlDt{3{8VaiqYnb&~<gDVzu@1X<e zYeN2h_P~{cvO3w>0|`!3i<6v$HmPD2fyVN;O3q)S7>};eHYqN%L6u-eG?}1ijs);j zzKLudmfH`za>oB|-J69@(RnP4wbhe(g%N(VXea8BP!Bq7Hl5d|G;f!mWmjM2)Lvy- zbajHrO>U0SyU4jj)3oVmnG>W3tyXc7!{XFYq&|WdvuQ%2{azb}J$YT6=t|VBC;r(~ z+^#=XdPr-Un185Qd$b7sZuQ1K#fg!(8da)rU!W@Jcpg$o(%7xIA@DMATx9I0kA3!h z`@w$2mz_Q5cXj*t@is8uQGGkRv_FMylCzG!wVuN3vFvl9MsAkcn%m<YbKJ!L^*+^q z7%F7klJ8Jl?+~n`ZX&UXHkZ4|0;4%;hAc~bB~W!pYModeT&{Rmjm|7#D1Tj?<aQu0 zMx`#&A*_PN7T2t+Reqi;Nqv43S<B!crNRK0Hiz_c1p#diW_S4(a}Mq&3p6?dG&*}s zVv~%#v-rv%W2`syjZig@Uo66hE=NmoEZQ08S!?Y~9iUspO{R0Ct(xm}xiZU1?}hjg z?X1^;f_+k>EU>kr;hi#p(SmR}{!~>IiMlgtiyU(lP5fNHlJ*aBbH3nCqR&;+R=qw> z2kx)XqnY!z0F;g&nc`HO;1$i(A=$Vk-P|#WtDNY+iZbc5OhMwwjnJ+BrJ$!QZaA8p ztpsfa5&B)<yBMzf2P$QzdsK_aYp6U~uMWG4UZk6t$K*Spb!2n*B-Rzn<fiH+bsG_v ziGs9LQ<rtN)oN*4JIK9_OCCRj40mJjcA4vvsWCZJT?J3~VzRu+uH8ll<3|*eLOkO^ z0GuYvc@37Yk5>?m=Ha;Qu!xO+`V271uJCEhbLtH7itUtrdtFYG?<cA`N~!DG=`KDW z?X^s_ggI?;>l95fz@|SRLZ3n@igO}|r!R_g!Jdl=j)Tk+Qo*wahQjQ&l%w?o%NRw2 zO$Ir65}km|KpoXxc5^m)<1SapP4-E3_#6k6XV0A8y#{0APn78VK12rLNeP5%Nu*!^ z6kkG3JDq?#oI*OChg_e8!D5MxuaT=mpqC{-M9<pX%UX9*uvHN^N`px*T7`B|wBBWw zS(Bdsxt=)UEkEiR7%lf&j>syRo9GA<Nn?tF7Pqn_)V1hgIwI8gRd*plW-Q!aA=_W! z5Gr;B<x86X(HzX1P)ynp4OfB}s0lide>hqFZ?FJ3U2ryBwKrV02c4p~n4z_mp@uq8 zvNv0GG*gF3WQa#)hFf-?VT7W6inD{4yoImEru>%;Y~XZ1r)NeCAO^HMUeRNIm@!ei zBSC8+`q`kbaGw~sCB$Y4)mrYkJis7a6b?QFcBmDi6G_>jy3PxAnASYA<SN_a`2Y+~ z^$WWFH$rV-A+>w)Pb5qU6W_Pp+)r{4n4Eq@sSu?^0^|=*&%tQ=m0VAp;+P-rRHAZY zxC(Q~3XRGX@slkIgE1<h^*eyi$h%Jb8D>{@o(*K{9A^7*;g8UyNwoXxO8Yx#iPi1W z6=I&?Y3MGnsm?Rup(&A&*olqS2n_!c8?F}bukeTjoK%;Sy@=paaZ;$Ns1e(kSqM>} zJDJhqs`+>YN1zV=U7neo+1eOd+}Ii#9RedL7^DW$K`T*Uf^k#WSc$PRdqEg+Tll=e zz0MlG>L!*7oW(X^d_rtb*i5L9#spET9bf)<r+&&sl(rc~bGGE7XrSyujnYt*;)=kg z_}!&;4&EY|Ol*)LQ5P-gBB?%_1)(N_fuQU>qxM9D#`2AA*Y)|h8M=IKk4(^}hU>l( z8mfNL+TQi}?`a^i3hu@WgvKkR#%tup%VfmsWW<YrKIq+3gX3A2J>N(Cs<uex;cyNT znKml9NIb+(Own0r;c;^AfqRA@Oke(Sm@SE(Ey=M2W^Sy5R$b(DRu@ON6$7wMUa+S) zS`NKHW}N(4U-i*%=dC&zt2(ctzN)f3xIQ;OGdMFfyE!((-%rW{kBxmnh)Rf+hLxTE zGcy|_H4!BNmKD{~-IDwrQx{%V95Gz#HYWBdHvADf#vTFQ;r^lN;hxD!;AE52LUWB0 zdsBPKsayY~#+uL!RTD*zOMSJErQ6=_VxUQjn|>_yD)hKnhXIE4XB#<214kWp`Mzoi z{#;?Kb-R9wOwq0k@fD>lHWD8p3n8Ab!@08OddfvtoZR@!8-@d?)ep9m?+qNY_7$k} zQVI@LQ{`NnU8BFs)Kphpo)DrR9(LN7=8}};uLu(@Y%$5}E(d=biw{>q>je4hg!~zQ z&Mw5Q^9?VyDNJ<Aq#_X-;?>%z)1Ik`*BOx-<soS;$0eEcZ-*s-FvF68)<}WjSdz<O z57r&3J|i&*pKBCR@9f}yer^v7eia`1F_d7Dal`pS8A7IUl=8Bi7HN4dg_}u*qgAP? zS*5vIskK?Dxj~hsMV+ZRiIcgjW1gg=o1&_nnyiPGl>+xc!a+PjK{ZRtvOq?J<r&{W zJ=njZJ2I@P2vFTvliFkj&G@Uh#<j)Vz}oPLJf64hj+MAhVu>tFWGX>rN?VQ(n^||d zS^PM1yjb-@*<t)@;w<Q{HP4Ehv$^bQat5B;XMvIc0ZaijRv|NqSDkv=T5pZ)_<B8= zxLkSvaV%<38iX)0qtd1QL9Vg(qac5tQ^XCW>9@%$yiJ|9&Xu;$j-aunl<gLBT_iVX z8U|<@&_dVe`pmd-mc$d9*DE+Ctjy|9zMC)_5l&7<lBzcBeJVTKs_v!v#rb)!tIh6c zOcm<EYFU}6X{p`c><))JV9feDZWFtLob7D%b~biyE>31TR<}dZ)9XWQKmagT_t{#* zNJ&7(#?(SY(U;hoje`*#8#@Xb(e3)C&GC+*(M_JYp|jkK-VECiRWVnUcY&>&uhW@u zJWsoaFkzFf9MfHLJX!Fs<|rFf{p!7ivSW?03KB_@OIp38e>t9E@N2+Pd3SE~!X>Kj zXYTZ-zXh>jyz0`QU7PUC@Rs@>rL8XY<tBOlcBXdwJ|F8Ts^UoH>BwlgykI4SR<P?N z33XVUkSZdk2k)r+k4P6ESQDH5Q{!y8i1oSO9pX4$INZc|@qD=k3A^fsO?OOEQ&ML* z=UiOeP<4p0F`(N<PE7}QPDyZgZ*gm9G(Z@Ze&r><V_-i0ANmtq0^9R*u+IXhBh{5( zm}p{VhyBoj(cum-@GTFwtWK`;EN!XtZK;eRnh+Q!t1&uxeO(VHFGa_8wX@diA*l<K zq_eO%cb4Ggo=g6K04mc|QL2N{TGZ+b9jx>%r_)JA#dUp83xEXQ@zq#&G^;v}Cf~(K ziVV*TpZ7aQulKjk2W5?TI<tHm7d72#r)!KCn2D33P#h||=~iW^h=)l^$0kd|0j(mb zh-XaWQk*5$0MxB9b&pcLC-O0kEl@A9GW;eYXCo)2E^7Ya4{ib((aXmQlkF$!W{ap} zHWsTkrr>f}_2qj?Ip<Rw8z-bzd3Ex`Em=tc0!YC`JH^k2g7VWK>>dhGWM)HcreC5T zD?F^+s;#B#cl~dByQ)i0H>EW;5+~G~-dK>>)@gu81%JK#a?5^zD-0uL`6ksx+9}qK z&O9S_`W!~W!n^+N9=y0uL1Fcdq%^_)CVFfU`0z#uWooPPMEB=@^*j3_^|H6EtG8e& z2~3MuC@ge>$;u=w<67XZur>dTt+8Wk@FylN<04La*Fbd1jA}I$E$HMESxIPNOL&l- zUnb5_A&W^1qZk_n+7j&I)oiW}<h~8Ez4qPh2*#F>=Ck_w(fF(NlLly5{yPhdg#_}` zaU=1>xd}%n`PH{{VYd5te5a0CAaUu9zyLBLLU2Fh&sHyK)~Lv14PqrWlj3e8l=7ZP zF@!o>w32R$INx-KKOVM=%92Me#@<^o>oKUQpLAqAr6AEVLQ?#-kaBuGHn_T7&Je73 z-PpV-TYQzd5~Ti`pfCr$)jk9v$wAdB%I7UF=S`bjbd=4J1Za)6I}*g{hc=wQoWs}A z!m*6QA>f}M0{`6Ake*wUoar27Hd;Z|%FIsJPRr4H$K0=;#NumR-^4)ZX(QjK6--1- zfq@}I1RaB}BgxGKo3C;bElqAI0XAd>cC;r3DOkFEypIO9deY)_yDxBb6dL3B$qZ$w z3_;hbJeI3GS0vVf3gaHRN)O9V)Dg5jkbCtBX0~G~6R6pr7$y!TLAiQuZC{WpZf?0f zw)mP5SDi>a*(R1`zrswzzjKp%0r;42YZ~E~rPrb1LOOvDeH;;_pV;iM5W&R>RViIz zYQn){=GCd#iJ@w!o*45UI&>~}v;gg{bjTA9KIFX>r29BI01ZHH`U@k}&xN(`K&jmK z`?&m&^i#3I6`nyp3neS_3Kipi2G=C{>q295h+relgUThg(n{FZ-V+sRgQAeK)Hpjl zXj&-hKvfPiorb1Ot+6D(n5(ko_?@6eZkkPhL{?_j<sm<`{Z4({Q-c&m>iafh3=4uq zI@AhQO`7t6#u`3{Y_+d^QJCQ97*nO+@dymBHeQ7_mcQYnz8Ec!WN@~3c)WuPmy({Q zuC}(t&d50-?Abd(ks7NL5~hG@g{tnAu<wxaPEyiNveWc(xSNZ~YFd<9>zU!;qxd?v z_rGB_YKMvn@&RAO8t?nNqqqJSR+XQenw^Mlg7(+o$|hiVUSniUdy22V+{63mGHE6Z zK*-~Tsx&7-eBhl4*zFe0k2I@E^lBj~yv<HT!s0PWlMc6>>H^MuNRk8D<-%Xj(@xh@ z!s1-}CKkM3?@!PE<@kiuooY41*Ayt-ptp&y!kC+?iZW4lh#l$>=ylXxrOeb|O4sc> ziQ*9FUl;-dui>MjrKiAQLXgq-l|ZaLJsb``A0H+t<NDDsP=rW4Ee!d5U>5DO!lf=J zY%V2g_Fi~vz`i#?J1s^z9ehQ{yl@qNyw-Ho<$1Thf)y3T=xl|goh8CW4k<CkCp5%< z!?L!ynWm{!rLqRdRdqb-eP3wcJKPQ5y5|GRSBSucKi*TMjY>oqJxUWKr7(>|KW{ll zmDlhjx5edn9!A-(D-iON4f1*A{2aM66Fj+qMD;+JHO|r!TTPdCCvxgw$f+&bM*&lJ z0=>HeO&(M6qPyFt=s+<_ioQjM01tN)iI~yF(!uxc^Ktk+TTd9HD4y@x+n#&yRiJg= zWxN@&?>=^^GGJ<z{Lgj4r|ZUXEFMn1>h((zyBQZG6k{OLo{hXW6LfD%SX*yQPGmxe zmAK^C`0M@!Z$BSZUy{twF<KvS5slzPjpTOAkMZLr*rNES_*N;<q$G7$8O($Rm*Ydh zi=FcP#+mYFV?x+xjOH-*M|#e(eQJZ0l1m*a(sR%GU(LOCrLAU~tK4e+zZ0dmgM?<* z$6@v`@V=EF<n1=6`@`r=R(kN>p1iZ8gYU&9x&A3I?bmN3k@3PmB>MIPzlVuSxB08S zbuV|FZ@HKIPeUzfcP|D+5gXW??R9|J=PUXSc=sI?&-<#*)!lhI(q|@9W{@l_n39B! zk*0!$uw|*3dowRn|I)h3(9(A#W9j?ch$-<pN6H;RcXP6M%fnzXjdxP|qdd`>JVY92 zix$#d7OmrGcw{f^n^$7DcLCK!!SJDzo~Q2fT~70FU`g4!+t7VchTUd*guvPU{tkFq za(;fR&XnfA@&d-NxMGQu9TM1F>z<y^!XtrRb(R)jj^vn_^$VW}FXvCK`K5=du;LoX zOcM#PI8+;CM2VU|n!JP@J@0VCLQ8E88DH+cl3xZgNCn~eoR@P83Nl-#I#O8YGdO)2 zVdw%1Qf+lLfvS*?1Qsf*nNXNvrgkESF3>pOdFX6L3isd@X)aVDVgd)o$!%^kk<@(z z=?)qI){mR*DAe4~*M55ID}CGWd3TqRqu0y=JCqa>#n}qsnWoVF!UKqd_|up$%9Nmu zZI8fL;<93$5_tTackcms;}n(F`^W8e&LBw`0#fL$HWI1L^QpF#rbx^6k?8*8B)F_f z&a&s@``|;J2wME=EBvrj^MSmf%uY_GhL=r8?vq1)XD$0@50$i^j<A8Kiv7oH?yo1C zYq%UFLyq4Q>Z9jgc96yFd8=?-BV_=ZEAlHv;tO^7K!6z*(zfmlEk%dv5_iSOH1ivH zMW6e+$73mI4^6Fu{n2fVjD6WXRquDv2e~;n$?16c7E~%phus=ao!l13MRY@Bdu@Ij z7si*ot+CTcAH)Y6+KJKpl0)F=lZ?W=-qFg)(P4IiIW`lDbs$|}dxWHzq|lt<yN-OY zAP*QvgZ?}uzI7nGLjqd*HtiP0%*`Dd`<S^IAb_`u3`9^86eO_e_I9eWc|Pj)QpRAj z3VFnkx)zuqFXZyojg?%|A8QgH$^SZ19xTRyPUuzm<KcP`oMGo6?0EpA=C^*kSeEBS z&h~V$GTq5YJOr(<i1v}M%7#v;iVm2Jy}87-YDGyKo+$*36QRvy<He8nw-*mex$v%r zB6<OLDm(jmgxf!gj5>jzzpN#}v(IkX8hkZ)6-Lw!B<oaMqY>nGcuuar(&+Rr>@LoR z-Id(r9BhOn1suc`sq~Q)YI0j@Vsp~8w0)!v<lJq;3VnW*L^Ff-1>_4fH!D>X`+&y# z$*#u<QLIsQv(fbFP4eJ~HllWZ7Gjp!UaZ{H8v4^;Wp?QkOqF66Vy^ul5v4uPR&uj( z->a2-HDNp$UsRlpG@X_0^(FF0Za|FQ>Ypa$h>axhptbPyFgW;h)dKiu=0Z&#!ev=@ znuKB^jHc+wRbgKe<mPH-=e#?fy8g>zcx1<MuYU8k-=4i*GJZ>;E<uQ?5Fs;|IDI1* zTg0$HSG-JH*~imynJ0<74@4mD3n<r!EEc+f*yJf+iWJu!f(}~~ZOWHKuS!qq8u^5n z>e<57-#wpqusUc_v5C_*jUkfR5e4lLx3n0$&Tid%;Z6_%%M7Q1<upS_yYB7(BOpU7 z><xv$lX?%$h8ySkKKV86V^nFjn#z8)0~<RLaQ#TrMO9Q?6yfKfJR$$>jwdqqoYIg{ z5dt_o$g{gf*xZ@c<<<`XjyAX;PYbs^eg-s-0-N_0PYy24lZx!D>I^O{4wEj>bd2`X z#>z0ARfn^s*R-dZlm^)d(=plP%2e-BSfd=mZGJLD3J0U*F+8SG)$M$)@ia$j0Fg-S z2$W~53Ci~tf1k(8kzJsrE_yoMIY=(o6F2&6&#Ej=&VO%-BVlc-E`UiX<V0)m#ns$L zVmLm&{{1z15!2G8Rgpf77)25lNX^tkT4Bl3Vp-g2W#|r^D%pIYf51Yte8;Nu`@m!6 z?>IkOo&;jZdqL~+3>I2<e?<SoQ>H~`fXskItL!MQw|dS+VJYp<N1Be(G>Opoo`Ws* z-;mn)l^L>9ZwU!h$(C;Yf;+OTEuiD4WrMG6j<W_oa%Qf88s|>4d|_LdB%1DKUM1k6 zw#{>hsUiLL4_S0^enf(nn)qE=1GQTwp9;C-oo>VwTB0;v#1w{l=X8y3<;JFFYeyoq zP~pKE4i3+Xd^%r3^vv#vGukO?YNm!!ig8TwST+X$>}3Yq59j;ase!&_;RmTHZ1*CE z@M>OsJ1*lL$|OX_QfAs(D5f$t!(N)bAiUg@j8r=X(=rR!FnQb?nWy+DBWo}rTKucz z!`*z6HGiW{PT}rKKQx}2)ATr|p|WL_TZOX)!v&OwW2I}`5$eiSch450^p}|0T>vq% z;}g|fV2mtA{k)j2iHwZaLcu`b<)K2$=3G1-3_L9yOg&z<w}WyA&WWCdrEjBeYjZ1X zuw4!^S`KQ|%<>2RP43_S?C%31TtEIog10aT(s#t^QcyxmyoC$ePeK{kQ@zyPz*tEP z-DYBgxqXCr_Z%2DOg${bC``>5#5fH!wHO3iz*~a;;~JQ()riIs=tR7s=>C|RYFfR* zRba2y3LI-qKoH6k)b075<BuUR#gYie7GM#BYK0_lNH$}Hm4Y{&t;0K1)Hp@W4GHB} z6;EIW>_2SFrY|_Zok=<BSWd~JrapJG6F`WaG{9iv7%HN*Ai!4Th*#lP*x=*j=xA?m z;Ogr9K{&hw!}2~hK_26W-$@|8<kuC((7f#CjIOK~IW-FxF5`>{&FUmzo~aRa<{6PG z8j&T+@Z0OZ9-_aGA-U-FGvOb)6?i(bq1t@Y+f_&^s=ZVl-e6q5HkdeiVmc0CYsq^| z)0Mb|NA40!<-k&sn=*w42QPYRZ)lbJ44JSg@Nvb2<;AU4ZLRcn>Q+CFfuphPHJD`( znRz+sU(#=8+Vk;XdrnnTPFB=#=T=eaoeg|_y}ivX9e3VWz}v{!3PRARA4d?lSItwL zGK(HEN;Yc8yGnD`wl?Uk3{B18T3MRf+0TtMN6!*V05$C1m~e%-Z^f9lL01GcFa>@K zfpy{!nQU$4Xc`X|o|gxqj+XZbGHdAB^PJT<$laG{RB-8}bk;!eVwCRC1o+GWLCsHo zpLh2k+kKVRyzY@deC<n4&!~7RInBh?kNn4*DxU2tyn?E*wS2;*ZA%rM)mR)ra0xR~ zVI1(lo!<hE)fV-A4|pCK@5{&^E%bCDC7@=_)YOhwRY7&6A;uY9JiM$-yl|;_cG|t% zu9g=kUu7X^*+Hb0`G0e?vDaC2{=0SldfvZXfeiJI_gNzkeXrOtbhlsknjZ72)BBb* zQ&iu@N5G(V6;q>|@)G;8vV)1rQ%QmwH?6*~&p_-B%W1a)UYr3<odHctH)aHUpa^%c zJkB0Yyw~7%U$OV|vyu@18U2#~xLd3Zr>O3}Xcvyd;x9v(^WufvBQH5AdVsw-g!qa2 zrk58KMF~dHfogR`X3@j@9QE&)ttW=6{4?r1E35o7ICBZ8dbQS8d327xmL5+luZu^U z^7A%3pISFte$Upnr&|kgXGDZN#De^UST$M83wPZ+WuvdZ)01bDE0ET(GJK+s4e@^U zcLc{=tIug_Tu^nYEG@;DX$Gd`>K^Vzs+eb)b5ooPl<i5n&D=v(LxuZb%}(bcjsnXu zL>(u>zmEh-u3Tg>Zg%%(VFLVoJ3g-0KZTi~bwXl_j&vqj;~x$&|EXLUf<9k5oItl| zTOwlA)^4LPVv`-&7xqbv5T?%D98{cuYi{y;J+!0%275aK1o3Hs@(RH_-ygy4hPX1* z<+kT=PS?5J^>({8wVt9vMhi|{#nhFlChn_ea6WSzn}M=+QZsWze+am+;`1~X&?Djq z|3Inu)XSUp>s4Le*wj+iLNW!=lMl@EuT3xO05&wmm!pLjBD5!Opg>8Ph*1eX%w@Nr z5p(Pja~M=(@|g`=%gA&-iA|4j6`Tck^Kx_Fm-w0kJI-K9?C>?otTPL7*8(-qDGrwA z*m)a1f44bE&k?CFPZL;sJDVb=ui>)WeI`vHqg~vc{BdzWxiFq<_Cs-nzpiSNXX=%E zUg`0AJ|FGzdit*!xChG59JoL^a?`z^JLx-~P31PFM#dJtV67b?kq{tW(Eza|{(#kE zy!D*DmtBP91}Mru0It}^heu{tX9ea~qNHXr<R(5^Lr?{_q{MG&!jYv4U1iF^^X)a! zhuZKN7S5W~ym*;(Oo8>8Ve;@Ot1(6cgjIGOO7}M?ZX6?Hjl*Gr;SP&0{4O15g{eMt z-~KtY;q3`TRf4lRgPcc&_uZ-9x&gc8dl1<haGWkU^2u{;uJ?<2eGGyct0sERa=SFg zeM7b{kH`w9P>l=RRdWHv($?1PZ0l~U`TG?N);r?;i$nIlH#*)WdhR(4+}{m07M4#> z>1r#8$!Skrb}MA#htA06tAkPm)Z$MTVh?|}){6;1;)ES@ci=Mf0{q!p9)pMYo0^em z)t>0s5NrL%&%KVL(#uZ43$uB8x~G;iW)e!?DpZ85Gx>0UuDN>(6(M9N+6ahIQ3zQ# zb!irUy~or0KgQpV)m>~{d_UBfo8-9JmI!V6y`A6YQ64Z@ysmfmk|+S%-Olz=#QX9k ziH`Piwv?yYb!WMKd|kc1$17)#<eJy341sa6HkkGhtzu>z3QqJCbS-X&s;WaA;n9&! z_XsR5g65f-jK8`4==D7_)zaeC!X!r0G<f3`mh%Lb(*z(SD$c(29JkFp?w_55<T&1$ zMS7t>%8WO}*a~6az+(=UyU&+<&dZCnvjw9FdHgoHdkZ4OF&A4~?gT|k%+%Ui<EEv* zR;H&{(+rOvkMMH>-f6D#a^N=_ie_HaDq{m)W{bw;uP*+1D_s$~`ySsIDf`@GEq~q| zv=Q#KIU`n{d3>i_u;y<en2&1CwBNnaZ1rvRo)(B5FQUAN#wRFKAfBeRi!wyT7<kjS z#w_QADaST<y@kXmJK~kQ!9}U3Qq*fUlfNrd6Ih`1Sfuh?nC!|^g?p7-&i^XH_^C4I zA$X9dMCSrfeW1yYp1JO96sk=2DR2duoXAUZz|-k&db##+HmLJY&1wirQge6KHJRx@ zIZ-P(Z?j?t#xk+x1@v`vv-EX#y8qc`Y!;ICl;3<(5DV(@^1E{K39pdL^{(~mip%}i z*A-UV_2lY$#kU;*xa{%1xmk>ZFp+^5eEl_8dMPn8l6E}FeOxPDTkzwMHhQ;6k&9VM zDjkn6^&_vxqj*V(Ve{{zmU{&qaT^f>-NZWo@btRO>R_PU)G2?mkSttgVw6^%(oKQF zb#ba&S*lY}ssnM_(=$@g^OqGez$EAW-};1LB|ovw9ZQ=-;pv9u=C!BhOntgXBUhuq zP+P)nVNs*YQjnP)q**Jg_9EGXHJ<nTpRO}5t}!66w^z~QZ;8i&UYWW4ZSQxG?RbHB z)|;6Y*tojjfkfCjexF~YkOy^i_kKT|m|djk0_0xscy$JO;@>?R_aTIS#}1w5e|?|V zt`<g0n#(YQ60EDugOBUCvoEa$B_lFedab>O*J=hk3~O;lM!tAgmemf*oxbIjCFp_c z<IU3Xo>oZ31vNp50jCxZ{a;lmFFl$J2$`-_8fuc*n39xGj+A4}p$*2Wr^7(X5sIgw zmmPZApgXcOg?X#DlhOvgh+?0BQoE#T9fFEr8PB7XH#t2DHTcLgFWfcG!0|Hlv_}La zKlIK|@~|KnO-4$bCG=$#=wC=w?|j#7^ZkA+-}^@RgCkd*@^M5sO8NXC(bM_W8Q+U` z{TiWqb_WPT-+7adJ?`C~Mw+&c<z#Gxpgo$~mlB9yeEfZUz6O779P2L;99^WPF;u)A zd^XbyfoPtUWp;Y~IYV`VdpAN=sNEis8Jo`5+uZw_kN!l;`3>vgMrA<(o3FEw`$Oln zS;!q(#5-KDD!o(B_w(LvbZsh)-qfr#^Q<cCta5K+k;3b1GRNbNuIv*MWQV6&b%Bl6 zG&TQw*8O5<y7G{=_@e7^cm4U+N65H0IX9-K4Jq8)!-$!RpVQ5q4=YuQ^I#2VQV^i` z$`b-|vB#+47M-`A=EicN$1|M-s6=D{TacTJA38|Fq_<!0IR?1p@$Ovx_`NQck;j|N z>e&qzGy>rF`(0^Xu(!xZt#6JeueiX7bZKkuif?i?Z^<8)TOOp@$;TdM1p`C33AXkG zkD$WVW}2!RTxQ%JBCPI3@?dk3f*=j;EcCg^ks(f<th|vT3@Jc$E=zYTL4GAtgp-fF zq@TQGmc|FNx7-JH_VjH})4;+Hn39o^5fK%Ylob#a5RjFX6_t<}7@4T8sMXX{x;egD zUVFzFUDNJuz)oFs8Q_A`j;ZuwEhsPXQhvESe^SX|H<|w7T=Va*5XpCiQ0wj0SIpP( zi{OV+6ddxe+1lD^ABcA|siu$JPk~)VSNvw2AzH}YAbozHhqH+(H95WR;K&#HO2ciR zm$TJW0Dn@S-P=5$-|=#sof(Fjv2qSH$5Js(Q4}3O=S=G#j*iCKU(NLmkd`ry?20-w zAf6fL#WT;DtUtoZNRz|5$N0E9R2~Sbm>MlAGcnjOWfh)k5i$-UBNGOwqbf11ssSvU zfh3#3XB3s<Bv)glr^AhepWDW<nVE4hU1I|iHRX6`2S@3zQP$Pf{-~*_JDZt@j0y>H zId&mld|g{QI@#F4zt#MAg31p^5tiY)bu~X8xd<R&!zGbD_%4vCB;INh$mTqZ6??J^ za$=yUDD*UU++7Uu)TnO7)tEza)%-iBJ9}IG5jEB9wl?C_6=1*Y`Q^7dzKk(zF7%2% ze<Hk8sR8&`SLt>AwDv@%yUGb}_D8ai2)94)PiMm7<n=zD_m<xX^?KiKUb@`A1DDVJ z{rY9pF<_@V_PY_#r!Px!v75T2yxt?f)5pQl6#Do`oeoZ3nnh@~e{w%LDN`?MRkc_} z{lPv;r*k<?RR=30@dO>$di&>0W0UfGe`W_JoYlI5V*I*#x_UZJW)_xyWky17O9&+r zCKNX&(!aYMO!s^o<(!C!f|`oU@KZWMUlyi0UY}QI@%){<COd99-Qr}u5#c&Y6`{e> zh;p<NHT;hDm}-sZoF26x=C%jz1Lx|0$OC&Y9S}3TJ?UBl2X0O3U0~$4JspiqeVwqi zkVkt8%*K!CpP=MNu?|=If^z~Vi$R022{(4oelM>VRc(E$?hpifBKKW(-}6H2^Yf}Y z8s8616$MQVT}2Jhmges2_Uh{Lue1Ik3PArw_t)vi^gdHxF(-H{PI&bD3N}%nk+X@f zkB{Dk!%XH|X;WHiO0h|?iE*irjD!qKRP62TQ=1r(^u@pF937eLt)*ybc<)3NX_V}L z)#`GQSxvmeWIh60Dc>`dT83X|%Tioin!R^C{dbJ~`vLZRPLI;IuCICZUP4LK9S=tR z-mA~4O2#$qDr~}a3o9CBA<{lSEnW3`sBN+o8?RZM1fE65j+K=k2*nj*zc6yf!g)+B z2liiI*#*vf!=QEv4gI0`gJwtHVc(<D!c;zpF2c^w-LBjoAJeh0!^0!`MSWE$DQB}e zNhzp5&W7aXMlN|goA375d$#bGaGX@hY-RGEClk)LM_LPxvZ$@@tf-LzhS9vac`!Mi z=I@ZCq+l>GH+yy5UXRi7<>d$qdW+Kw!^^8n+fiUpcLcSFWzZAL((&}Vf^#=dS`>61 z3V}&M7j3?hQaUl&f!+=lpMG9Xl3AiuUB|@!A(g<)qFv3Bz1888Gbv(=)j95-mlF?j z)uzl0T6cx}>^;?`?7badFwmt1BzxAWsq%_Mt8(F)$rX&z<@d~zm%?j(3xWcR9jMP! zP$Z{x%W2Zm%f7%0JHPj5cek|A(2bq3^Yx^jmXwl@*T)$v?9-^h!N8}d$JVQzt|u(k z;lF;3Pq$lH>4%2j9e5^2xU#j^%G{Ao>ZnySwc`~&fOLPGwWULWo4tQlR8w2k$KxZg zPOq)={qX|VLxYk|m%xgipO;$8#)<-)R*Frvv?oVwQ|ny5jctVi-8Y~nWgVM@$&`4x zA`6w@=QT^hNAJnxM_`ls$4gnB?<7F>$<b|Vd(<%vcnru5<WNt~cz^DDNy_W<%yO|P z{unh~J*>IkP+*KgQ!x&zlp}qC5F_r%ujx@^x4Y^PMmy2m^a>UwKPoCztFMr1vjf!D z1zF|nY-p}+Lnb0^c{uI?UvWkk4XK*-);2VIUA&HFw?<|V<exXWyW;eEI=&7mmniJ~ z{(5~}@=@hHuP@{es@Sg+@&%Upd@Ys{@_)SF{P{RN&fD>MhPEC;l^3W+$LaQjqUF2b z^@5$I7!e`-e5ss_eE07D5DH>X1KMr*f$YTgVq9Nujk)=(igJ!b1F~1ykt)hhbp74S z><mp@O)NkIFE`tc+1bld(-7bA+)>hxkM<y)k^FK(E1BS+x)OStYEx=DGafuNWJpLz zlwrYi<i853d5B3!ei4@$BFmp#UX1*uyth+^e{f09z-s6<u`&TQq6Q@f9sjYiLq<<M zGdtVb-gtc}M&oDgW_}&n)>W0+xIY57Z@-^Eh$`c<Oyrk0aWjuZ$ftj5lIKf+b5cb^ zOG7_f_nWeuidtGu#_cBJ7EUT!dYtJ=9J!5BzESE3Es?Cuz}VOj1r=Rkb!m~osm1;d zHe6=9uC|AbRYzI7ytZ}};6^Z89bR5vP$1x=#4F!}1Ol?-QbNH?O3TKFVJWRGZLKZs zEvW5n=xt~yC?Ie-I12fjr@OG62&oG}D={JA_U=w&>#z6c=a|?@F&D^}xm#Pqr=ox^ zTPF|=c(wZPEOOytMn=E4rK6yn2-DI*fB*<n2|P|tL$kH6?)!R1bxzHp@M=$2Q&R{e z&Cba3IXX*;;cg@36EL|_7@D~xa88|{n7AXc1h0mrlAYk3QA&GFqt>dUs`_13{@cU7 zbD%C)TQhs>FAv9PLB`ehZ5f>8$;Q|8^d!UQl~IDDWtKuw0sBdCdIe4SwQ~h{KmN*o z&0`+k@u~gw#UUg7N5C&ukFl%%6_@SN)lCx4X+|d6xP)v(ION6M<JYi=Nikg}Hf|~! zkk9uXncDig;`_(Rak{hASlHcp=E#sWM^4MBD<CVXjyy{ncPQe3Db^K+<ooR?;OeT{ zPjIevfA6gQHybwccCKPmtN!l<Eiv0qlAZm;!LFyiSMHP!4U6gN=z0@Y^pahWo#>@s z&kgo?`gtGSyy!<J8XD;q78N>p|9aPVyWH%su}(ZXI`Y{k#H9wuV`~$yr7|*3Dgca( z4!4U73z}KDX_zK@mA@xt<=7Th3rb4%_V*%YW&w4TKp?QVjNi=CSUx@LJ9xzBCg#^x zN{fh4(h;+DwT5y&<iD$`rWP9)J2ENi_5M)V!;_GZlvh%sq?!=&9~zp0G18{=1b}_S z7m^6p;}aGiS0NF)<?(ynJZ>G{|3XJkX-^N&hBs9sV?=k&O-vjdnnBkOtq}lQufzR4 zF45G;M5>L)*a^Dha<+tYIsv0j{Qen6SZ-0y$+1?qgY_+nZa-&dli06Hk4?$Q=;!7X z_2Q8o?C(oB>ZIswPmy)hw%X8gbS=z|LFgc4LGh;J=XOW5+8ESQ#Y)M8gTeLF@(oKw zq=YWIuKFm5$xQT(!7DI(e~U<nP?FM;x!nA~(sv(L*1)#7ei=4a{)hR%E69X|1LlKI z%h5C__#?HHJi<YN6PvQ1mS=QKcuhSs3L0)<ejcPjE9-p6+r`xKCnIA+^C852gP@9r zbBGZHF4`_X62}14_g#X$Yn+U{OFSr2NkRKv@!PxmuOIat2wV2tP#wS5D|l>ld3kt1 z0>N%VN~(9ejF}fRC8fll{HuFVrLcQ@ZkIDO`PcV#W^KLKlX)RAZrtbUh=hTG)tzn+ zu*ftlT$g9kjm)#Jk2pUD#s#^UIPbfarRU%1WFht8(5TF))YOYPTAMp8AXl(sGwUNT zO9UhDHCm9NkkvHOVSPsfA71rpJAPk1r>oqdl!&b>!NS2?JvxA$UayzSkMS6k6I&^` zrZBDByyFH2_8v|@R5dUtTxum$(m6{EQ*wK>IUzeE=J=si$b3B^quqj{qKCbtGJ1aR z>RKu&x9|T=ZRNi^*x24r1y$mYroWmSx}0P>a2G~ePkP-xkB5x#sP=Tx(eOYvHht$V zK0e4C1udbP)=GG*UPE{22bbALwq3Jk+G>bl2T0FcXt5tZCHcs*c9SdP_&>nw{QZT* zK$JlodAboEpFsL|BeQVn@V)W5ptC)Zdxi)TkTGBCzCP`c!-nhv3Um2<xHyR)pZkrl zq{1V3-dA2XV9ndJyWt17AV@r`vvhQHgM*B&^cVT(?k+#Lmrjz<kd*&$xMtg3S=y14 zkTbC@eYyDQ_Ca0*=J~ZVMyDJe7K8b$UD1ON?I7{L_mBTUnlrQkDZ9n{=vb+ZozI}0 z^AI8hV^akV&A7S|G&D7|bFCk2n4L`h{Aq|R&gb{`I2wZze=-u%F*LK=Y-w4(ysQ@p zDYb_4CrwRF2^*Ubbl|)1WVkJ2MEgfqUAx`Ws;j9BH8%*P*ZXCerf#dpta9U0xLVRs zlz7+aJJ}aay&O9K<jd_hP_VeUpS*2pP{3kJ+%LNE)3H**&vqcw@!?Zoq{Ex;>f*>I zCO=MX3ZJ#{d;%Tei4HoRou848A@tZb;PC&TASTA{UhwK?77pM8i4WTh(n3b#h_kYy zioA6`Hh0Uzd&+%@BjEGD;Ud(UL>b}X%*gDNwDx8}XH{_&gjW^R=SPUS*V@b)SlH=v z!|a5iD+_KZ{xzJ<=R?I#{UHwAL*pMbfZ(T&%d!%qp4B?FwW$vB=kDTiLwXcyx706> zOYvBOVCY^HCz{JIDT1hgQsd|036Am*_`KWYEi@#tI5khjY$$Hc{%1qnN%@-&94b)7 zsW-5`0RyqxFJEtG7u{Z4Kh?fP7}E4-<L_&WH!_vDWbIrVLS3P$kb?U9?-uT1|21;- z)|deSiK+rNC!H@SovbTfhvXO`v^bV<i#O_IUhfOzH~{z2-pJ<YxlC6)o>2OqoZfA= zd-|AO?c)<NDr!j?xxl@1_9L~_9mAx5^zNJolZ{PG_FtN24t;~qi%oWT_~U#Tn-8)z zH+D?1wT(NoozILdsHe-pYj7|^aLO=tM*(d#<r{QQc`)E!m{ANcUtKG`k%2iK8+Z4W zb2m9X9g9XfrlA)OsH!Va+uiDF?K|5KLU9Mtk-?CkcYI_!;_872i8I$9T>L!pkP=X( zX3s?550(D%>M;b+(9q=g+rK`5SnXoy_ydfzrzK>4IiPn-hsJNo)W)QutcjNA7qV<s zPLhwbHQN2(+Nqiy7F_&Z$T8&D>ypU#?tP5@T-6m$qOz`Tr(P5Hp32I|>_DEf`uwIo zr0$rSl98q17&(5Q;a8VkFw&vG2#Iiy5Ro+x4+qAp2>aJd7GknEJd#3t$0NRM%<wEk z_Ncgs;DhRd(p#7dKg86L7ma)}Hu9fAyu+cp{0&S<1!0E}4p@FwQ&A0q(1a|^%$i&3 zA2(Q-=|p&;S2mS(qa)q5wW<aVpmadJ9#06+qR>!H5tWzCrz?+kjuf;*M0PM}z)$44 zx%o&~I0E0cirUP;z{1*A*py!14_GxwQYISc!j8q@Lk2}VyBZ^0CPTSAPN&#_&*^n$ z85eafOnM|t!mpR$`{vJt^H)gE_H;7?gFvjf^k8sED{E$!(!KBwqU_f?-P6m{{vjTs znERXI5uV|zAA#=<Fb&9;0}P&z|JJjomxgBGon5|j5U^oE8Ve&kBO|K_mo}9YbQowD z%8H79o%i=UfW&DE+FzXDKv4|hBayUYNzKvkE?Lb*btUeZM|(#;U{GTylb>;Ek<;ZB z4F-l0&)61}n2mryd)ljm^9xYX^b0Ssb}t0xROC9L6&PwKHZ8|5FbC>|X!Z*tiyZ@s z2>iM2%Gz(<!xEXdx-~UEJUlcNvhY_mH`u4VsAy$doz>OpHer8vuB(Ftv3>aR<{!+U zqO#IbMNI`RR$j!c$*DoZq~c#>>FL-e#wGSTOJLJ9(=lnPm`}Mp0fezG*UO4(oHBo; zg~!AsW!L9b{n`U^TgmH*sp+BGJv+J#c?W5%X+8}KXt#7cJv-I+K;gh~QPPl)clF0Q z+pMUku9;<~qb+gIH#W2E*Im%=1_QrAv+q)<d_1AIk&lp#rzq^2c(s_ik`B6OaCTsJ zVkF7nqK<}$gsdv;`6?p=qokzNp#9ud01kxhjz@m!fxvhrO+F@;8mHm{7fO;sc50H6 zrp_tq$=K+qqm>t&RN)_?nBeH>kf5N5oE$mf=%6Pa0^rxpkOqk=%yB)5jgOM3sHZk@ zL38vk{VVU^Vo+q`J5ZxJov)ef{Jmo*JjgEX8R_K=%y6jIP>?rqYnRjzvWa<D-X1FJ zj~6pWOgrjfQ67r$cS|g5GDM@B<rS1^DyeMwhne2*qYSD*4RsA=<&<wl-xtx%5E7Rr zrYc%lR(JQ9v&q7dzPh-=AtWw1`Abkh!l$DIgPH&>IeF5O>JSZ$F7{C!2_F@%sjN@W z#;U5e_B)n<T7*x65)kMTR8zun&|EVkz&}V*Pi1ADT{F;o^8mFrwWz8le=BJG_%|KY z`+ZPiXJLm0hY3dK*A@A+V>`>Sxw4U=liBk@<y3aq{6^`4Zz|NX+;Ud|7#a-yk&tl_ zwnl@3+N8C%AhD5gag>C$_4O^^Z*=7O5!jyo<O&4@ATva5KwxUeCxhC>KBx<{)6&z! zWk#^l$}4C>bK{|)Ln;zt;s;D^hDX3QG&cUGre<Jb(m#U2v3GC+0*`5GroR}u^fbX8 z=$wtQLWnOp)CYuq{`_ffUjE~*WBs%qpx)H0h)K4#z8)QYuaVxsjFj~g1%;}PN>4je z=#+=euBfQQH#IytQ!F7nv9*~2*UB~{F>-Ql8ZA4W8WD({k%25;S2+rak2hT;iwe;Z zS=m@#4y~Fq#xl3ON=ZX$X<a=6=mscEkByAo>z;NxmPAFuc2$DhIUk=?UsQtyf>l~t z(dj1J*EbA~^59~!rxjG7WH$m|>m2<VVMGqKEvF^J`?GT!V_zB{Z<_!9aCKd|s6o&k zlUG($6qV3eT$2|6;a0ma^U}2lZk8cT<I@DlJ620h8%TkiUs47e3sDKt%sLD3a6?MN zMI}{0F=#hb|C5=NgzpUL{P|Fo7KZB&ceKAR4G{0)<aRkc%qvQ#Se{>|qNN9|0x?8X zcXV{9ZwM%^Hzd$l*YNP@n3<O8-~2@}D$v(|Hs<f~OysU`&7htU1`YAZ$xZKNcne7f zbZJ?F37-JKSfVB=m601-5Rtmv;C*mvVVY@jX<~|T<4rSnx_c@wtqxrU-r!>{Y;<lY zDq`L+)|KhVYp>1OpF-z6i7hPb8R!!6F}mK5&P{GGA;hAQ2>ac`-*5MBkC=>n*HZE` zTvt-nU2@~{0HXu3!O6uvM_PJ1I;%3%rGl;lqr|1Xhqu}a=gEnUXRl`@s=x|!bF`%; z1e%%}p7@`Qjqd4RpYVIwkOoRu&b9=)tmq=z)3{hCgL3_6Hu~a$_^$TN7w1>8&~O7I zJVY}yGbT1h0bw2>G@Ps!mY)C8rw?w}Hn=kL_wJ^gVrCw6`E*6eL%BRR@*VT4a$9=~ za*LvjOk_+X6?sU(cvp0O01ZwY?CT9iMtGcIWn-7_lM@oE+Gl%XQ*nr4Aae2%i78b? zCGjwdMS}Ymxjfo~7`~4+BFz5orZ6uN9a;uJY?Oy%yn{}7ggqmY7(Ak&uY!OOY2?e% z4uFTBfuEE0@%o08XPTK|0V)KtwVCyscGeWoedIxEOUt5GhX_srQ~p*3g!FD)TsRCy zL=W{jn1BXt*yI!?ekoF68e>ZoRh2n5tpzrRw*kb~e2JLMSXCW0z$E$}9u|2;zJHbn zMg5%_R&X?rVu!o?vI7IsHsq)qB;IseVMbeHtNE2R91`5<5DyYkTTTxvACQytXA<ft zg;-qnwJZ4Dp)nU%=21#X6Wk(iPmTm7Oz$V(wm1^K^utZCSaF@2wl~fFVs!Jb@As3^ zMLUKB;@fE0GXn-Da^unb9xMsh)Oe;BuPgG%O2?&R#&8OM+|lXgwr7ZW;F`_7baJ8H zwRa<MX5<Fs!oS{f)k0$B*je*P9)EPl850y+xVb^OJYa9Cz`)-P7z|wGTyz2Qpdelc zCQOVC{U4W>ltEyyI-0=2cYMC^)1~u!&mG}bP-*F?!~x^NV|5;(T3XwDj2))c_qQ`I zHDh7vY9$3l%Pb7_4o`oAg~chU_InY}oiqKxoppC;L!xjg?si*Zjkgyjt4nGqX?`IS z-gW=Y&p<}nHz0V%#wFw<zjJ@*VbD`h@%@?)8uQ|Aiee8Ft3^aWfY%|!!Z{P;)g^4_ z<AK(@H+N&F=LXx?(B_IWAV`}P0z*V}g+6(O@-)9;kdqXcsPPUB=V7J=rM@FNfG&RG zM7wKk$MH7Rg=%XPV5X<0_Gxb8guI&r>1)f##NJt7A4n+>VFvgce9i`a_rVEP*;w5N z{jDsk^M3;-M+GBwPruc}`sVT{%2Q9HLIu))0Utr&zE&nzZ_kU$HiS&_oNC?)RU<hK z6IUiS4w?bbc^W!8O#?$4cTYrUq>PdhCp#OiYf@(jNl()Y>A5E1VIHOP#z!%4Y1Z7< z6#7RnKRz-1&+0se`O32HuFIX5TiaV2A?QHYI;bt^$F7bxkSS<iT#yaSzry2f@ajA} z2U~P()Rq2j{LGVcDx4-H!_7RCn-B6>%!8k2W%(UCk52569iRCl{ru$g7%-13ius!P z`*$DS@9OLJ3-AR7h)Iayc!J4!viIv{_s5SO9=;~HAIl(+o|hUg8Cm%guqyKE_=gwv zXVi|jU0Z(l<A(#x(`p$Q+PZtXp_o_3&pdb=4D)#Wyx~zgZ*hQmthq`I^Y#wb%y_*` zLQ=x<nQyAYcTK9Syaf6e#MdyGQc+PJtdx`#BqYR`RpA)s@#{PbUY$RRd39i(lwIyL z5q9Rqj$&TyYv%Fz`6Il|7oBhXgO~@=!1b%YV*cU%m+#(ZWv0O~XJutBE&TRw@cq4A z?Ch+KwN-ffdr4L0z)%k>J1-Hv6pgGiGW~K|?RZ7!%+r@|zjzxuEki?F4=?wyC=~O! z=G(YBo+TnX%P6Ynnv92e^5ZeT?``lf4?2IOpC6kTIhy%p6!Tc81boTQqw+RDdw{R^ z-tNxI^1^=I+*q5MxB(|UKPTfz=DChy{x~{ML`p%zCVdq1KId@wdGTX1Pl0A$_K#uy zNN<B?eiyHw|IGY@-N)Ovwn1uCm9YTU*4zLR^uwtiuueMK@9)Kx6;17J=g)Il*!U4K z$x%t!d!)9f)ZD1*x`~B(<s+FF(MRCbd9vf`d{{`(Kd<x0VIDKz1~>DlLA<1_7*|#S zb^th!3=e+#;yK38zxpSbkBN&0=4tSH8(<!<pJ#Iatn+x7|D3mB`~5NO7v_)1RSF%c z^9hM@Q)q94*U$eN!{%nE!#5tDr2XLjJ$RjroE(N@tgHC<^R&g4m5uGK5bZvGcqcBo zoR~#{RNT@gvIem4p1h@^q)0_Wtz~Frca+Y1>@)9{j6ZMlg?ZcK=scdh4Ibvjev8g; zpfhi^Rb}{?FD=5wJmxyAm6cilU~fh$&b-YZqVu|FZ*wH`ZobHK^yiMF^S?WWrT9a= z%`uoq>3my9+w-??ogMAqSRHO=X=Xx1Oe8KL4hTWOg=-M}%zRrbdYu$%&Qftj8x^<4 z+4H)3$o%xWDbJ8p0U=>kZ7sXw>b$6tXBwXT{DEAh?NK^^oEY{G@HQ`AJg26iPR2HW z?7($cobzq&+}eVn7TQ$SRHCj){a)sUh4{~(=Rq^y1v-y)e=y7w{{cFG)HNyc<9nN0 zyz?pr@z1L~$ULsuqzAlB8<=@;HD*SJSbmzA2=sJ0d0A2%%!}{qeAPblpVtO%-+3S; zX->eQrtO`VS~nG2+-qj%Y~|vDpZU)^&m?N*opA*7`0_S&pP6rM3=hTWZN!es+n{xx z{g}+ZfBzo7C^X0)TX?%fikWYN)_JUJm*(eYATZa{(=MwlOH7D8($C|{RRSUW{O6-F z-UjbADPmSBH1qNQ6!YM1bntkaW5uxf#RV9h|0kH|k54!*^KBiiz`T;8yq%TB;buJC zoUClD8d@3<g0*6s8UCxEuc~ZlZQ<emYCY`E-cxBM`?Fk{@(xMy6{GQ`gWeHIu0f$k z#;^p;Vpe`xT--crzdKh+LVWDJ%@Hx|G5tIg;Lse~g!9Lb9zsl{uCAI_kPA=pu&pa; zX{aS8CN?oOtf(q4%+IExrrgA~vKW)Madfg{VP<;q{5jTB286`M#|#ek;PW<U=5fWa zU%bt+Vp!dy{rpkaVT%inoO|HskA$DIym&+m`*`;VZxa;yS?84%<!vm@4>yB&oSTQ+ z#M~5iz6C$?l@%RVde8In%+F3AY;f<vV-67mLN;YthoqRY>j@Pjeu=qKxUa*am?tJ9 zWRb8B&gT~pl99UbJDAVTNFgR6wz0ErYCSqXk7oW4xF+=rdVZITwB+H}KmPO+?1O@$ zTxNC#sK?=198gjcV}W@aTdT^NiiWys5Wtb4L9EkhczC#AYyvy+<9jTNS(up_rKBYR zfNR(KaOwQ9nMWQoZ-a+<eIo-oIT_r^Ofuc0m>1wbFCqdltQYus7%}lMKQRS5{}|7G z4;Yz1=R?}N+A%tBgH7iJ1<tc`u-iE~z_5#T?ZAEvTU1frH+)?{fNyf_+hxT!=T=xz zf3jIe&LJtP_*z`~bvadaRu)D)I#0xU!6l;jf|M*Dwwdj#%Zoq`NFp%rWM^}@fgcjJ zZqd=%q4~&MB`SvfWBmLtsC#|rg8X8A>w<(B977lf>FMcSzr^xgdfJ-c=Uv@hsxMXD zoSmYeq)1D~;yRS%W$Ee9>F8*}rVhV5IX()VMN3N~DLG+ycz_n~+yhc}S@boj_}{@i zX1<LJUT*`(9PpEqmlF`+=jP%#++b#U0@W!Dbn}Y}>g#LJOT4i#FD%H<#l;Dy`y3rD zRy%iKzRitCj~?R6RT}E)Qc+VOeZ3o-8=k&?qo^QfV`+A{nT+&BS~^-oV*`j}2d{MF z(Rmc}*M|i7u`mzC%fsUo8OvF29Z|E$$dc<G5e3T1s(7wR5wS{m#+2z9S<unZ99{_p zRsr~tm6fKXqVzyGAKnNITM)h>JP@tz%||kS^n4r8`QtHPUQ$R*OmwBE<M3O{i}TWw z7r=O2sw_SH?wi-Ifn;uOE-xR?hQ_*AZ{I>oIT<V~8jA9=;G9D5aI(kJ*{v<iC@Cqe zY^>7KQ($bu<825?$jLbsP<}oYKlA7C%(rpDJKtu%BU4aNU_DI(eciykv8hpBQGQ!n z6a23BW-KG1mzyhWff~9Gwj~}I=F#h<V&Esg`|&*!BZD8(<8ZT@ieiA7k&yw|h>MA` zFgL}z1TQ})19QbtNPzD!d;13m!~DR=Fz7ti5xKp&1{8ywN%Sn2_675((1NRp<yW`v z?_ry7<9M2ol!RT<H=#T<E*pmXhD%s(Sgoe24A?U=(jQ(}0s9FILmcOc^yunt4-XB- z?``ak)cIpG|L(&F@NB^LFTb(C!6zgn)Y4M-5Af;gYXA7@=ctHK0s?}8zOKWKA3eOk zvb=EkbPV)%fiwvT2|7AE<`?G9%ueC)HfM>+sd!ZhNGPz+t3=5-z8F^Q7`e)Y`Pqoj zpr=otU|mvyO77)n!*FJ0Yn4}+KQ=iAr^(0D4QtEotqrVm)DP-BFn?qG`cFUq%+1MR ziDMb>i|5a7ZErEqpR=*H3=a$D!?8XNM0IUt8P_ev#1H8F^{EL_QQ_?LWUMWpKi#># zy~-=3NXV=xY#Qm8*%e-JWn$^hVdj50OGLpX9h_28+CI&~$}TR7Who_`aoDT9-5qR0 zm<}l!Dgol-=Y4IcAK*FC&mVP7Dt>%&<kK;j|M}-%%1a9YJZF2`;{)&FV0ZTHS#fbO z6El;jn26EQYac#-eDeG$#HtYbVU?Q0<-z@XF!X~Z27B!8<yKo?v%axLi{rirDCUXC zX!x~=$!XG)WAWu}=<(?M5#DBLVU~=H49}g$F&Bv(U7ZSw3s<(*!T#a7)n`v<yP>`g z)-?qfosW%=9=$OHr@-9Q7;E^jU!aG=K4)g7gF~LcwixXf^xYKUAY`T{;qrCPp6-Pe z<)b%ejExONgaq(>9XA&{83P}vy|78Re`Z%ucK0{fqvp?_CZstp7m-<A(l!+k7Eek_ za{OCW`*?etK7CqLOvK#MEHWy5c3~zqDh$^;spB($%orBh+<*Mg+|=lYQ$Hxm%RYPh z<j9Yh8XKNEeF{P_P)i89^9piT*OyUd_}#n4###yraxPBxaUAF609|wZd^})QRaKQt z%#0%<!v?PQK78~5FY~0df;yD6j9Ax<h>MAQo_j!l4(m!Zbb~LsN*pmPC^K|_5)$H) zL$g^>79u;Hkcd!H@}i5IYguL4>gHNiWf`37ENq&&wYd)Gh=Abiy}P#$zl(5kfRF*h zJlMkJ)n(MmVil$L@3G&o0(ykBv}9&p4kHsI0E~5IgpQWR|My@2<?iBy>$hWK*rAEB zg5n}VLINcPIjsE=3Ku6ED<>xvkJ1?)-HSHyKIt7HxxGu<zb+g1_Hg;1Q)iilRibk0 z@|(sw1}997^-rBTbsXgk@bw1Z0b@7|_KtSB`8oF=J%~rGho!-KPbc`jpXVMhGroHD z3agrZW`1Jym^X}l`uu6{l`c~=BN%JI7zFwu`+K{-`C~e-UcJ12wLdt(7Y;t0cj%#- zTI$9ohJnEWV8-^)jX~;HH&&yf!$G=9NJ+Fb)Buw1j<&`5*{$`}^|ciU<omiiGE$Rd zWl*>D0pOKY6yb+EBb<s$i`O<*pFMv@Nl77fQ3AZQi=(}piz8G|gmXxM4?iC_AqhE+ zpbqspCM9{v03S~;H)n5m7f%FKXFsGTm_sHOl+J?z23w2tK%kaXI6Gq2Rv<mynHcGr zSeRXo%G<yWK+J_&^Qfxa)7iGQvA(gs3J1j8#0UZ(W)>zbZB4MC4b2T};9l<CHG#;H zh|td3657DWp*?ixKwl&nYyv_;Pyi=;TZA)=3=aDhb_TW3Qb-^sE_!I>D(3Hx#6(Ac z^@T34ASYvKW&%GP(-?N%%iWcdf?QZcC^|kiI5Y@-;{rPP$)m&9pl8AEHP_cjUJwVX zdFIR+32_l0FL%tZ#vB2pryKgWqhguDin77cVF+XY|MuPjEUv6u6MpW@KXdQx@9T~t z2@r@u5+X#<;O-Pf;S^9n0fiP-1%(vuZo%E%J-E9C37UksTSq$SbkD3)ha_}=X}jIu z{B!5BSx-IZ9QN7A-u3Qx?K-uWCD8(&5wxSCq+pIW22Ww$Z3n8e1MCi`!FQ7XtcV!f zA)!5jhB8*+l$iS9EKneUPIlJb9<H*|;-GEL$>PG&$}_4)1}EnG`#UihjeYy~ofSAk zb8#vy%pD&cg1dp<lbNZBs<I+D1n?oS3g|kl4hF&^zX0Fnmil);{16o$4EzAM5%}uk zjY1aSIfAYeyl~#$!3O#`YfHk`|1vAMKm~!be_-4O9hSVhyiizD2+^px#6`ICaOMyY z0V>X&Jp%#~<O=v8upjjMr_Y>HQ&T|zZ)O5Ytf-`LZGGkYk7DNW(<dEWZJ>6vv^0(! zJpvpEhUDl$r?iD7*uu#HR162w!e{{YY)IA#iLryjec!%%_5S@k5Dy@bjvqgE<j5h; z0|z)b5A0!O-?4iy3)_AU9=<~&Sl%-iA$o!vzGFAbj$OO=?A^n{%F4#h4h}0QBm|FD zNm=pq=~G9TvrGUyJ{hcvh>F-#$iM!+4WLv^;-c6^F{lrr89Ea$+z?JKE<wQyYU--S zc%zW;kdE#S=pS$0yW85{YG9-fw+pGmyi)WUA-b27m*wN*g-DiZJevl==0P4Lyj4*O z4h!xZ?0xnU!F=yPw+F*RT1JWwiCCcq&sKvN7OpEWBq+b6&_Bo@A|SY4I3jvu!5QIf z#l$b3I(-r<A3*%}OV{o~Mle4z2(VAf%^^8E3X2LKI&v8P%XQ!YTp&CxXcKPczWw`; zpE`Y3Na!%{vHd4x_nz02v<!2*)MjMk!M>ksHw!x}I|mELL2(tMu=J|*s^QfecW>NS z1qcCEojoT2N8Hc63FsuooA-ocKQ}iwoT0L^BH+i!#K6s+R$N-NvWCnA)znY}JaFdB zX+FNA5Yv4!xKYTpE6IC!xQdC2oIZULZs|W(ke8Q@j1K?(m`7;*)wdH<qj?26Uf!N& z=B8?D%D_Tr1Wv;l9zSs$AS5VsK~_#iQ%fDr761b*;p^vJTv|9gH~nTye*{Ooeq+75 zrXnaL5M+<8o{oyDGCW5~DGAi%736^1p%I)e*3eiV7(O;Gy0h!@qel<X?)v$+FKZiW zXl_&!yrHC=(pgd2eTUC{!+wy3`xN_8p~J!$J{ikh2ZeTU^25d*Y+Rf?JcoFBPn<q| zR!~q*Ny(1v6q}qB5*=kiFjG)gl#!Q{RoFB+1$j+vjJ=C<QPr=#djOahmz2=a)xqiM zDkv(5UKAA(6OojX1c(CQf#h9iE_p=-)AO^)blgv$PRvXsr6!Xc?9|m&B_t)lmn9`7 z)YMfl+L~A_#=uZt-%w8n`aEr|t)`9DGPf`X%rklZ#jCg9EiNzQ<mb41xr4;j1to{Y zXkj!pv@{UYRM*f_$D83p!$WEt>vM|=JbW02cw=c<X+W~Dh_D3QSVaW|WhGS&HFZr5 zB^713*iRQOi`<0GYtJq!>>U{znVzhuuM3NgCOX(@>*AEvRHfx)MZ_<j7Zy5w?i}1q zK7M|FfwRKWvLezlhtG(z^DDCo>MPmBdZu;bh<+S=!fXf6?%_VEtYZ<8QI%Ca{QTvs zH*a5qv;d(C_Xor(Tt2wGl(Z!B+$ALd18@UjBRqFNg@qNt!;79@m^V8=^Xm00G_G8` zzM6O`-rCk0D!?6snx72t77-a<RZ|IM4kWp?0=i&nMTm-tn3x#;;g8G=571q^zSh`O z7aJSx$#8dYvM1VD!E-S)#{;E<!GeXQIS4Q>Z;!N$6yWolw>JK9<|t4&x9{HS>TXX- zP4Wxyq0n6H97xa<oHo-4me!UeJLDxM=nl#%N+u>pL0o+jsAx|C=kMw1O0c$7)iFCR zqP&|&fP-I(=YraC8MD)B<h{HSJ9*CTJaB3!*NLP2r%s)}AR-|ttE{4HZ0zpmn~|Fn zpOOsmnyDoLPcSzjnBzYh(T?OB99YxbtpBU?9w;a&kenRcJm}6;7lM@~pv%D6(8$CH z&fLSxBeyVbbaL$G{rhOD1)%!w-mc`dWKVBTk^_lgNiZXrf?1QSscsY>KW~PQ7c4lE z9Z_@oxB&E$nUgs=Gx7Y}Z{K|P{>k%ale1G*)fG`OQP32bMs|Rn+tD7p$<~2n>tN^R z;g+7AIWRKRKQdHMS`3^|a&j~!nClrD;PiF%j10`J2zJh(QOK4g8&eCy)(giI%+PBu zsjliD8yUMY3&^i-tdC1hqWdsxog9qJOf|4rMRj#41w~OQ=?h{P1;r)B6_tf$<WGpJ zX&PAb3Sp72s~qC!Np0qCNk;_LIZsPlIQzxsx8&Cjqj@u5zIr{sc%`(wI5H~S!^<76 z)P`hZWo-!$9Y78SH0bDT5BNz<O9n~2etqrJEbhRTAQ(V4_y_m`HYhYQVy-UmNu{~? z`TOJ-=C*gV<Q3!w1p0ldKt-$3$RPO&iwl-l7XN6{6FBapCl3LHjm`BrdD#hxap4i6 z5M9Dif`a^EIWaK~`hwQBrp2YX7cXD@QuA=!7vH{wI$hnDtE$U$a<c&cmy+V4L2zVP zhDK`Zss@I7@7%ldv$^YG6>dd$Z)ZzueO6w9y=SZnp1xlY%O_)TMxA^X<1TC%$R&u| zb6j@!DW%;f70ya1$f;>yjqwC~2MWVGAw8qErK!BGHYF=FIw2t{AwDu5vFL>OsQ7sB zh1`<jp3xCQLw!)}x>yWO8v`7SEMPJEI4p#9tZb}6tv-SN;5#`5`7!ZvL1DoG!GWO> zVV6>qs%opprzbb=-UX81%nA77#m&37M<zyU>TA-oGGgOnqM{?>;$vaCvbv)6a!X5F zQ*~`sNqK2$MJarKGU(2L<Zs;Gc=38O^T?xT&(?0N4~+~owKkPkR~D8QA<3l*^TEq< z3i668N<mO9tgbBIxG_F6)zI3Uk&_h>6XhG|&+zvR2n~)&j8D(a$t=iEOiPWqlz?1* zJaW}=?O^D&w|2D;PfwzAk`IiJHFvg`R8^*AX9Yw?x_B{cTqvftc6w$6O?^WZtgbQ^ zC#I|^qU{_Qn-LO|#&ce0k0>5PNv2<FH6$lU<H*6uWvP{e^_|mO8SmDeTc9yoS{p#O zWMri!B_}3cii1agDLFACD;+jozTA9uY2oR!CqL66&t5#AnVp3H*40<nGOdnju+Y}l zG&w!Cer*-h$mRByx{v?*WNnw5re?+;|BexuFOS&2`|i<`N8of*GZP@QyL&qTy6`zT z)C<kRaiQh+zx>n7t(X7e#f!W5?m$!E{-A<pW+!H@OwM1O161FC_~6y+*I)D+I03l) zr_Ua*U%wHa)k0uo@+dm+YtY3^!bMCXE)wF8DLU>I(Aq6xxLe5Zf~v6&-qgaGLh}y_ zyOdl|Rnt8*2qf0l)78{*`DfPL+0oJ8H#RpL6Bi>bEp_Vjsl!K)APSo)3(-%|a(V`O z!4ctgElq>tBOSe6E$yw1Z7pq`mw{mCmKFi<Xl6__u?`CRyAK}#T!E;EMhE(b`i4gb zXXa*Budm%={-E#r%^Pbst}$)><L5eTyz}5b1h88Z@qo6!|LD<;+c%fiR~J^U&Mhs> zEY8m?T$!1ly>fM7X?+dN^0RjH=G^iUoL*aJdrebAbwgcUb0grnYoH%4|1xsz?Z_p! zw|%_!cILJB4o*&NO*=9&Jq5^b?C8iTE00P^^Noz5`UTp$dYCyl>swlD8Jmb|7-`r= z*S3$1%&kgbDeS!RoN~6h^h~FyT1sSPe#3Zn-NeA;%Fh_@&3A7BBp?HTc<1Kfxle+D zj9Ff}x^e5~(`Qe=|I%!kAWL4oc@4hs;L!sxkh`coc>M73)5j3*0WjbbT4q*2|NG$4 z;|C8OKZ1?$uRodP=Z7DD1ki%u0l5Wg9}Jd3$@~YJg{D4y_yDcFgBbPwduZYJ_df+q zlhoLT<Jz9Qnw}C?NuuWQ7YPaHb^Te+>+TfA?-VgRu5PbOuprXh-9saz)3b_d>v~2< z0o9|kGo!Q9$dmoY1)#Xe@%-{KTPH^ijF!BTf`XEwypkg5VMV4S22m6q69o`jSX*7X zew~?H3dxuWcmqlEDgE(g?&DAC0$;p*{QUWYCrGAy*bb{NUcdfHZmG9#e<hjPmNc43 z=D|nIbg|65gv>0-k00H7faC{9Q@O#Jtlhc=r!>F1GPO8AIY0NwHuJ~9m431-Gt6tB z`EowN+3B|a{^GiZq`bn=q*Ra4NGET9D=Lyv9b=3a##q?~m0ekX&^@unuR%Q^if1{c z!L8(^@1AKNRuz%oSKK<Ye)Ik>ef0y+n)%${!E=X&KlJGT%a+dZRj=gB=M4gQR9r7w zUJ@~n6ETYwF^%3YV!TJtV3)8ln<`bygkV9YQUgLlFQsIaRkZZ<BDwRJnRK=$R6;YR zp&2*&$HxlF%3QrX&54%AW~RpGn+B1unU#f$8!b5_y>p;<@!I;@$F#5;KTZ7oMc5;0 z)P9EZpC(g#^@f>4?akXS0v>ipdySuDNq&kP2+fZSCx8MT!TimeD;pb#OI}+C<X>4? zo?cu;G9xq7Wo~9X|G50+q_NEZ4oprobadob*CgcR1;r=PgTw7TeJsc{10qRX-$+>3 zB`l}w){}3uYX-UGNb=SxrxYF7PAK!JQH;HEZ9^*3Du>>_eYXwNzp$*{dK8e+C2bkW zso(-=7dDL+!bicvVHpz3SuDW*-<0VxdRBNl2M30qZ*&}xRb_K4^x&J1d+y5CtoNU6 zXlkOd{c>t{R#bdkSaeiaOf*<HVll7<w%0Z_j7&{16Cz(<{b}mJ&4kEI*gyPf_P~$G z-;xLU^_wrI%lw!k_Vp)vXQm6^Y~<1N=P25lBo8jOnMMli+MPQn;8$;c2K?;u()8j2 z3iw}2fcvwopr4XVbqo%|9Rse2O3Q>BNc9i4rF)q<l65VJa##Zq6aV7o$w%M5^@*?C zCvB}pF2c~O4vCqvo>D)l%P{oJrAAfXee`@AsDEKux_*xoQmO5d%Ax3T&M;I6A9=wz z985shkL{ce!2bVJ_Bf$UHn6k;dE*uo9+8rsTUp)KhluuH%94q$f?put&h+=T_jR}T z_aL8r-LTp{IM6pXIyO5qeRXkxnFpMiC3b6C<a>ZUP#OS5G!OVMrM!Ro%Xz>*;hZUo zKJNvR23iBV&!l$*@DCq=m)*E~@3X4e!tl&I;LL=`^Yfr;elyYePwxYh<U2b-5GLmr zg(N0}uyJJgTT$GMZ5?#*X6G~rn$D?%S8m*W_F9`1!6|Knajn#LuNE{2=9MsGJ*{~H z=Vj=b;}ThQ>%r4)to~Ky!w)|iQB!qj83$CTC$K(3CK2b2LeCilpEn9SC`DvFrSY$7 z40eo%Hl9Fmra1cq1jHt$6qZ1o5AyYk0GPbGIJ>ekf9)C=ET9P`KMDW@2D8h{tM~4& z-nn;WWo2$<8NvLuYk;w}f5--Y^TC5Vh|YNo;6$@$Zk0O!i2r}IpH$lv@K5i5rgs25 zqTvxqbMO8qNdpdu0Dc_-Jdy!?X=w^beqnxe?#l1L`A?R+hKDO#T64-P<Fj-9V-s8h zL+v~m=1vrSqOGQ(@lj1OIih-D<MH&`{j*xM6KXV^SFN&Ro`9ZzU}guOu>Rijx)N3q z1|B(HiS_T^qhaZ`UjGY<Ygnl)F`5tK$*tllXdHglAV@$z2tH3^y*Z_A|4qy9Zx~-C z19KCSJ&Dfn42z1*%qgs?>m48aVuJgI=7yxCME?Lku;i4awyuuZ^|h&`CA0!8l$Muh zYN}fiEhiTjuB@(ta`>1Z`LoE`s&kmI-&FVrT%SGrO#9Ejg#Q;Wx1xPh#xVWm6TPDi zLg0YvnXL?uN*Xwyg>{7C!9%A|RdeOHQ{Hdg2%kgKQ%yZR5WS`ql|-gyFv6qBK7rOW zcf7rmF2PDp$M~48PjXq`#-o>It>auW)>5X?I7XcqK1S9eym|b(XGAgkNws~lHfm0Z zy7bJX(%x;X{#B)|Z|0ny?^%NYRyk)tyMTV+Y2AQRI6p9cUH^Z_2K);)L=fj-U}0(L zLZkQxLm!=9RNB<t{WDcFu`oY4G0x9_V$YsEyP1WBm4%CoQ(9WGxvdpVAUkz+Q9$6# z*WY}-W5*5~TWcWvD=SMtrzqPqMb5_0GJ}8AIZW6;esurIW9HD!Cg<O08tMYiU%meB z!w=tm|KS0U`Nw}f{5U?tR5Fk6eMCD-?>`G#H#Izy;h8cXQ8iNw^XOO&I4qhB`gdxZ z?&0C8Rv`JR#N2$iX-M?y<!j+WGqkbCnwpDfnJSUuTZWb%zI^K#kh@>XTEnGSn_k6_ z^|cKuZ5&$bymHIbK8)>zDz`dK-6_$4o>|vDy$#pDq`d#|gS>gjS%bjcG7h|0-!pmv zCjsp`J}0!j`7vI5bprlHFJiyG55~mI)ZWS7)5kj^CO#{-tf8rYe0(d_qnJmNv9hr2 z1<dc<xf7P*laG(Lp}7&zGBPuL=<p!`{MTQ9t&PR3-g^MJ0{yVCx(Y}`2=@B*pNJf$ zp@fehAB}B4j_RPc$^6fsFRm^}M@5KV6t%Iodj0(mXs57=_Rrk@@E_@Y{no9|k_I&V z`WjQlFU>4%Cc8(+sVb{0%*;(?WTo4Cy5M4eh3%p8@bqMJcQ;D%(dk*f(eV_&AT)Z_ zv$D}LH0IZIa0soOSiZY*_t`}~&m&4?oM(-yQ^8q1|Af-vhM~2x&c%W0H9;vnL{-Oi zy|u{6c(0tf)mz(${YwfhIRB_NgH7IfpPKtAoG(At`?xman3gA>raQN`_gDC67V}tn zBWrzR)^s;oKuAbpN_JUAV`taM%*>~*9L4-m-lKqaV0tMjNp*GA{rmR=?E~yZMTLPU z0roT+Mc~XSNlEd##yS8z*!=qX;<ala??KgXJbVP7V1PgX<UPRi%U5@vJVj9o3ZDsD z#DKt`zkU1i{re}cUOjs8;^DLBX9Z60+_m#>fB!pHTkFRk{}%urRqPL6ynO!M`zNp8 zKxps@>PTyk9^HKK5GDD$PoG`8a~Dxr(AR($+`YSU`!+}~Cg5k`w2MoNIJr2#+3^hr z2S?A)z}CFyumG!b*RE~d*flg*)zSt^J|QPB^im2$uMS>*mK0Ybh+fSsWU<Cab$rsR z25&w2Hn(<=Lz*aI5~;_i6E%s}bV#V~TdwY1DsP{A_x?jeOCQ%6&0V7Sb4CFg&X=6S z%0A70xn2HD=^mLoq)z8jqI{#^!l&(hOv{5;)8nWH{fN5jVHGN$X@r1to(w5e)5yfc z&e7h(8&o|6Wra2M?F0P?=I2n%&kc-^A(-E@d)KaA_V#v<-oCf9Cjr&(-HXtEXjlk< zXK-Syd$7N6WEd&~f3|hBHMh02cXuq^yw%>*84(c%S^|Iv;J<$NURG8*?A6@T1jygG zj}EQe1E4;6^X|&kt2x=3aj{XYZ7sJR-0vS6fF1<tTz2i$)KI&2`{u&RGSK_8H{W&k zbVIXgsmbfN@7#a>62<)N!hBDEZ%=>U+O1pDiwkg+va*tmhmR+&%w}e11O)iOmZsJw zh{TZu-%}G|VWDW}#lyp0R$kiHd3kVZ5{=oXmzPpflf8Wy=^1Hofm2IY2PY@$J30z# z>QV}eBU00SqvAjl+PHZlByVYrki0rkfs{D1aP#i-H`=5KE@>Mqy-Jf>dJMzxOS{}S zyk6QqKQMC*jaY-iGuinRSfs2(OoLS%5)zBLwh{Z66cw{T7J28d6kIs5zPuW4N7UU8 zsnQOqQg{@dPT;&%eJcz?YB1&`9Kp(p;s)JxXi{o=aamPM>i~1C9@RDgV_qiD1Lm!5 zh|72GwzRi#9pD10hb^Y2`1Oa6g@i7!v$L_Xvg+&WfjmBQ<`f$n>)|7ZgMtI$6P7P` zw%>mCqNup=+}SgG_98p(+sAHYML^*S{0s|bW_Z9JxYDlOyAB;Xn4Xq=5J?WWcQ4r9 zz3ltgp?wn*Barucx;p#z?}KX4^_)7zUsPOh@7Z%`!Nc8+jg1ZdWp7V9cTNDbmX3~g zNJtR0#=^n^SHMj0w_o2tZ|(jA1qC@4W`XuuSy}co-!TT)n7@9#e|%I<PG&FjJmLT1 z;$oe>Jw0P%p#8GSDnJAV#V4Udl$I{8#v})<nS~<GkX7A{9$Rzc;mh{1<%0_Lf;eA2 z?>ae~^o#i5g4Qdw{VQc13peh4ez`?W+nJ4DjZ>W}Wf`IElH5PFvW?h3UlOt!c1hWN ztw7#o5OP?RdPs%BqfF*eaz3Eoa7fwNKDH^odd596Q_~Qr-pS5`;T;(pmzk4WQ4PT? zI&KLvU@Ol9<`*|^00*8veG(uC^sB0-0+@$YkUAjgH8s_*KYDtGxe8D}z<mG~08ahG zLv{7F=qght?LnQ0;^GWg1;V$swu08sjbDHB4MY&7r9}XBsED8v5W8nLC>&i~9aB@2 z9XmH`!bWJ2n}@rjr|a?Sw;`dyun`(RaDWRc!1)*&8JL@ye)YG%A(hdx2N7v0X~|Mj z65o9D&0c2Xa3J*P>FKG!>cYZ;u<z!1?m^m-kQ8ql7|O4zNzN|{PfqiRjHL#IkWeLW zi75G#>ehUEL1nEIPhP+Cj45N6uvB--2Qe>b7;5gFUq857(Y4q-yt?%*lq(CXM+9-Z zg$((0ycBF>%o*7ap1#-y?Vl{KUcWx2M)^wGj>|lThe_`|3XTWl?YZPg`=qUnXekk8 z6NIQ{bu)Wi0y2OD&k-0bDX*}!zOid~7^wcUp8tq>puxHIbrE5qUCaU+e&ybM0fEz~ z3t%vsfO&Kk;DkaOXm)gF`r<_~6!6MQ3f|scEUYZBI|Ra@z)MPs0C=b)2%I^M(b6DV zSwIjP9u@*r51XMiX(@@U%yb6BlUWlSfyKtgN>oG`!S>Fb`g*$0-+c%T4Te?bardBz z#p6vtv@0mc`-89v4L&Py262;JJK?lz8)_{r3Bd5M1sLAlgB~0b1lLO<*`VhsFE0yP z2>t~Xz$yGgL$WKXVl%S?ViW1X;f@SH=p2n~?1AJJaK>LNyW-t5X0JVj=v4&geo})D zN?ye=k6)XSP&(8wxLVe^aCPJ7x#m5*6WMtcSY@n*@xk)e(Oz-Y+o1jP#lbykx3Jkc z`xJhCKTa8&19GGTa&`cFFg{H;X?g*_bDki<7lW#LPajbAAXYO=%WK*$4@{0vBHCv3 zDDRQan7?rYFfS%1@)7eAzk+$#OB;*n9~<fH>p{5}Bu#s7&(rU|2ed;S=nGogTeY+_ zU|(20f9@=3`)BXIhd39c{>J@>Pz@a{(#7e(<>=|+U>Rxxdf$BbAvib?C45jgcb~mL zG0&9fyJTgg>l^C_#zvr5y7}zo`lH87x9+&Q(U@Gn>*UE3tM~5b738t9vBEld*1*IV zs4_4S5h0{+;@IEP*$#fBtgMKhn-Sh5t)v{}A0s^4#W&E_&CATm1(ZC-2$Z}H7sfB8 zy!YYDw+V$EaG#a!GWEUd1PwyvEF&O#t?F5->tA{E-Opc)eD>noGZ&3m1hfxg+@&qU z<!z#yduO&m`zOnltJjW7+sHa58wZvjRzgS~VS6R#gDNf{bq?qU?$MzC4dX4YXQOLw z0aWko8|V`i7n_-rS5@8E*@eV<ljB?UJYaryeO*ZK0*ZMV87a`lpD+*TU4QfhS!HSo z5cc38%eU{uC&Zyb1!xmwjEjpiFW*sAW<*6rqPqhvgE+bO;@jmLH&$<MT)TaHeq|Xw zG_;JxV!nO<L0Cu-T8341H5G7;tgMVrL_NJQ4^b_uUqBj-%*-rYM`YO0<OGRi3%$(2 zLkE#xzcPc?<HwK9tSo21?jJGV(c3+>v;;!#$k8KkG8`QH2?TSpi?g`+Mbu4n4fJBO z^C5JF-q6m&8}1cC@^~{j9h2QEG&7Hkv86j#Z$6PU^ggCa)AOoTb;>`E@p6xEM29G6 z*6x4t`_`0<3f7ZqEHXAHbQuzaFcbHTdyk%N<MvOM=+s(=h?>H#tD2-}E_nxlIz+OE z)m@P>91S}AIo#fJx-2SG6+<&)TRS4no#G!1-FIS6enE9DD4f9wW(@mD&)>W?d3Eu` ziQ^~~F&HhN(5;xi_VCH4RUpZ$H*W##ef@k;1_t2oK!pE}UCf9Q+H|M8qQGWjXDcl$ zUcYm9@!C4r%Erd*(jt0j6!QRbP(HhvEyL>LH}7+Ev!JJd3INgJsflnV=Alh+NnmVn zSU9wXh8l9bojaf!H}?U=M-a^O0p>G5VxH$9PiJrU?8-_*Qv)zOMA@Iu6A2KIjz&ud z8<Lbp4+?Ye^0lJ6;q9Do1fsg00hg-Nd86R!&Y4HwzP(i3xnI&s**+VD&1tNk0x`0p z>uPoH5@?%u?|*SF7i~Q%>lw^`Rmy2SA2G8K*T}MM(EiEt?!ynsHIw#<mzCUd52?8x zR-*yhc{Sbnv^@B+-Uq}9EN3vg1&vOs5<%5NP-f@Oa1V)qPCTWsxT^JXdtcvH%&*+N zS5;e$4$&i9tgY4_Jp7FLE!qZ^I*T`Ne!@HeiJhGtT0`yV(IZEW9*&3%=R1Z77(ioI zcIN8s+Y4)}$V>PeHzwz<e8#*s=J9t3=Fyg6^}(y}vU5IT9()tzH;Q>NG10ke*QS=2 zb8@p;SrJqN+GAp)Q`3^6KD2=Hd}ekA+ZLXOEe$P=9Go0*Y0x7uPY<!f%wK%#n;3gX z#glylk?~0<7Xu<m+XOFyvG||LbV^t;F#PogFO<y#`PJP()hO6z98`0oCp0#qB)|UP zR~}GFc?&DQ>K<t#j}~3nI8c%h-r7IAjoUw2QfkMJ+9dBMCUfcq@@dck?Z+@)C$t$S zaejQNG?sHZyM&GQikT|v;thzl1ZS$Fm#<fNbYyBqZe?|OV-q}m6!W&W*7x7MH!#$P z<-N${2`m|z>A>Uw^4*&}zXkI<5!wgL&#bP*C&nY=`FnPQs&DORgPvk!dJ28ZYj%10 z)JgtbNb|caEiLZ7e7$(>+STjV=T}$YzsHVk>Uk_y`{nx&;^Jb6wAr;wR#y7K>+cdT zC7@puK$r|N27*u&^P-}{;AjwTJCmJIEd*i3-Phk`=OS~|z?MyIlbN>Z5j@=8g9Edx zt3y+h;AKzOFhbyaIyW=Ywl+=2*XtR@C}Kp+GQG8I&fj=6=huF(!<s;F^b^Wfrx z7jKeEIzc+BI_BwkR3FvwRI-gNXqg3)2YdPEyI*-a-+udw|Dw^}b2|G~T#jQs0rnPN zncJZKlVy7C&fkqAzfyGBsYK<)Fiv2-Phfpc>iC|*`JK@V<h*FMOW1hFMN2+e6KxYy zBO5y+mG0sf<QpBAm|KulT7HCyc|a3LT!yz7J3AWy9&iZi8lVMG*`jTJ!t*HR0pUmt z%Y6W8WY2DpC7_%Ea&?V$9lc%9ae(}Q)_`3(IXMG^0@}MfYU`@6tSn!>v2o_~DP&xo zIi52*Gi_mE4$Xqt;NjsJo}93>B%qK6xemt-2=MzE<{h2v(YOPA<j%`C?jCgXy&=Fn zoI_zrA@DgI7ep7G?%FpxJb(Qf?28^kLsPw=rY@tjA}BVV=Ho{XiGUd2iUO*}QP+Y9 zq3c;qB8v(=Ah{kGe*VTIX=5Ky@&-Qj5@vC{TAqIC?Z|LV$HK_fJHPqnq=|($`!S`x zGByV^T+itF$XkYAyY*ljw|}x|yQHw3Gu$m?vQNt96wddwu0Plry?}FuA$-a%d(Y!o zrHJg(w&zq#aOMP4dna2rFIr$ID4h80T<F}9@j8~h`}VP;!+-!$z;J#+9%x`d?^euj z#;{v3KXwJt)pBw&-|YAXh5b?9BPUOuK)+*x5V)kg1X@PFV*+miDi#qD0>Cfbx&t93 zh-wz*@FVyd&6Nt;8y$o{eB=-(CkL~G*y-czz5ei#Ux4ox%&&nnf^gc+jE<pA5cwd< z&>$BuKe@2bH#&mmP(#A&J$p`_;)lxtzd!?UI2j0gPw=0By-uDwNev9OcJ%=5r%$v6 zW>C;E`bLSO?GQUSxAExJcb>5o9MU$b4msNNs>5pTSZZ2H`}{|e|Lvbd<rLJj9aGvV zVzN(_azu+Rpzj-+TE7k2KUv1Fta3|P|6RgnhlCA>tOKAOeY+Sif8HpROPsJ<#DrVP ziCfW0NW%hWjtF~_JHsO^Dkv_Ihld+|wQLvi#SJp1At@<dSX_v{jtHLsV8HX&Uw^Hp zu6q6PQ)Km<%~eo7==ZsOBSXq6O8fThgGP2R3)GR3k($4D4Ll+s&=07aNy$6E{^n~C z7=XzuYik)<8Q^g{nR*85hlB=SN<wm?A&tP_&Yc__9QyitQ&$&3zytEpW(5V$Lz@%x zb5mEZDk&+TeF1ET@BvO97LdWo@iFKKARYl<hX2F)!)5l5j}A=F;7#$UXYE7^9AyWH zA0pKaR39Sa@#f09hWk~V`E-0M+9vNmf1Oi1bVS(^`QnvPdkX6-Y#5MGI$S@nTH3KV zv~c_PJe?08KAe>@Wj(FQs^Gw-Mm?_WDQ6LO?e;d+|Jh>h81=Qd^#L{d9vORXC5oVN z_<5tS3&!C>rqO5h{8<GJIOXh4Xt^ELa1+H4^(?F)QgiU~gLu}6V6LsJV_<9qVDRvC zuWhKAzj0$~dGq~w04KxS)54Mf$_E5CxB%!MWYq#$1qm_s@fFSa8yjWir49~uRzyn( zhas@7Z>k@gL*D5Hh_-cgP-ztSmz9-8aB$El^W}CR-j?<@2S+<AD@%wb8k!rR<7n^c z0-<hcWkDj@R@YW7T)#0kcV%*Mp|YkD_67YD7#skC3O(rL;v$^0g$2RU(Y~|4*U#Sv z8H6M|4Ni<R2X@xdGtvP0Ad`UM+Xn}0+AdeLUXDpi*EKZI(AHMf)KJG_O{{GQWU8T! zowk{Unx4TSb?1Lmpt(kr0N|%q?~3Vr9a40L&{fVl^`JW4CAzL*XsxRIYHk1WtM7jA z%VTClKXyKaJyKR2Dij`d*JD_(_}rFl(EiD?e*N|_RSK6H{g^gmpS+WRUZ4;@QphAy z2p=VC9)DEPX|E`rSDkiVk8v97A%i0tSXqO7b!7N4B4eX6veQc|N}F5Thew8IXUFD# z^0qDD95@pojr@>gc4l^MZE@qq;>HHVm7}xMTWdlEs0k=Vzt7$J`YxapX@qH8UuZ|S zqr0H>5%)oBg6lD{J^I<L;J7ztS68;a0}KcHxcdUM3?~L#KK)<>v>Eyeec$9%bz56O zbzOQ<X+ln3SW1RZOoCf*griR&k>+kfa?~-m)HE<Wr)Bdm@-#JKSmWUGt*5W-1M)a! zY}FidvGmHr>h9`}i8+lkwf!rl?ekNs_kRD=iAgAA<5S!%ir=S9KA_^ltL1L$n!F9# zKVPzoTK6fCMev~~biBC~T|`WyQ4=$d6E+TK6E@<IwLORPlE4QF>iftWkc@3fOxXK- zghqxWCZmJ)wU^tGxk3KnN8-Qu)9JY@zxji^U-|v%=<lofvkJerCiAtT!Kta1p5BV) z*8J+)w4#!@%$(4~WY4fDWT1xm^%LICN!P+!)4)hv+x)*P&_oS<GHXZYHy+34w{t7l zi<?9m`Zk=?4dBNz!t(k+)s%HEHjk{m|L_Oqcxi0yXX97fEo8u|=(u0mnNQ38l(u{S z*!(tV|9mlaj5&_;RUpM5P@x^eFvKkqMa|<x%;P19DSS%Kdjt&*sk%s-1}a)dN}Kv? z;)y0Cdn+p4*(U&neR6(LZe=y_VfXOxum6(nce=yaFHSgc<>&3p{YGoQ;$ov;Y6|^P z*udmOTVG#AQ*&NbO=?j|Y-UbKVv2VJGBUY2P-E|;XJrF%s|?oUUzJ@CYtSR}I!CVF z?Vr7IPKVB?LIG7HZFz}9k!-@quj)bK*7DB9jfa2ig|VwE*SXGW@42AIqTs-;=yXV( z#-mF0iz(Vh?4K_;Zru~o^;WV^l(vf6FYk0tKS<mv8IUhwnFODlVrDEC%>;0ss-!4Q z$2fU2A1%BU^ZQx4lQ+Wlp-HKUxdk~DRh7*xm;3seliFSR0?2<l8`9YC{)P4EUuR=i ze%nWS$H$twyDOSoax1D+3W{PfvV#+n84)qmfDn5RZveb00(e_2-VEZ_Ju1{)3NF;} zvYy$SHy?j%&B)-AwbQ1RsygLyD3B!x;c;l_+P*N1gsy+=m)%P%H@E~afcf3>_N?*_ zAaA%;C|ZPoZN&a1rK)j2%qYOzzx1Mc7_ifMgHS2!RB_8>X`8f*X0iK0Qc78h82akC zBpT4uwd}&Q%<PP8NYKRt??Ff#7<VZ)GdsPwtgxo8zM}(RgD3=qV1LS@pXbT>`Q#yE zzZ<2Ww9Oxzk>l5As$qWiPmEvg?}v!BxUM0qtO6W4A|*W_HWB=q;ui#x#=?bWV&{mw zMP`gw#u;*}I_^_(1;BSsZ!Fz@N{c8tq~xUPkfTW{=2oVOm<0Q#wbu=-0K>OWT>Jj_ zWsUiX?98uYJFdJ-*m#$`!(MrNP9>)UO5`&dF0Ws`*+%SNQbH0cFwV)gVbvFm0yz|% z#R-Yh*6C8jR9U-hG2<{6Awy0%J2{IG>wtWse-Vxxr)6eiK(w(SQysi~-2%h>V&WrH zGm`QOfgY<`TQB$ZA%s8o6P*9jR5^c$rGGIT_&oyVOZCxayGKSEJG(&7fPhCMR_MQc zqvGfx5oDhLk~;$&+?eD58XhE#63$@18s#fRS9(-=$JEBk-DiGDbqD0_E*gdDdDR|N zaXpFiafz<4?7mvlx7<Fx@#4*Y<cEHGcn@}7`Ca0K-Eww&WbHT=9a&^ZhZOCfY`;|h z7nK(;zSVP#@w(Jv6<Br(=L7MqlvRqXZHBZ>y1YZK0LEjFp#C8h7kyfiTTCS-vJy{E z!Vv8AEr}Lnnw>j?>K_b>H}q0ULUt~YWJ!HvLuV%_{GsWoUqR#8ADK*M?9Y+;FNMPc zlL);-JOcEdU0#`7Py~GzTnJnaEie@Hn~j?%flM_aIp7FHEh7^}oFTh9^?%5@=#%3w zPh1CvkH~I4r0jGa%h2_#=2dq;sP1OrUsBLA3#ulkZXzhQWaH+YKl(V5(kj^bRrd;L z?~u0HC1blwmc%A!|91(C!%Ftgp1<6N>t9uRhGtAC$(Jg|3_bG>X}I&KxXRgO%aStX zNSTU``6rZJ_6QlB*YmdW%MH$G@k?p4@yo$lIOr2?%$z8;bT6`RAjkkz=RoKOgjw0# z+T7iZNS)skOV0hrrTEWl{_42Ur+GNRA3KMJ8aq2dz2{fe0KF&X<VU4uz$Gxkqd~w! z7iR71VeUjRvIT}GY8e{KV~zHyyZ);jS;sN9`11VK+fOrV29K#zPHMUv`ZkCdhH<G- zb!h47HRAyI#*uZu*b2}m`%bCLsF7UUV=HSr?mztPnXa?*>$wHA5MguC{2RbL6ZSi# zh~Hc^KdNZ=^vU+C#eZdq%dE5X%8f4@)gq@HP@xIv1}Hk^1M=nVa+DnNkI31xNmz)R z_|u}wlgs;~^17Tu^0iEffCn>2GQh);;ZF|^_m52gopUL-00_URwjLC6$G`wOfA!B` zKlZzW*<T#1oV)UScKI}~EIKX(Xoo%s{1K)1oboCNSL3pB!je)!q=DyBnL{*0D&5@C z#mL4E7#?F}B7-&At>*TxvSdBy_|)o&p6QL;`r)%W^kZtS`rdUimY4R+J1N^IhUWIx z_OBqqrg!P-%QuZJeR`&zoB~?=4$H6~mFK>Ilhd#x*aifIX5<t$G+!PZnV4H!zuDP6 zT2a$M_fC{jwFQP}JE5{u#P};otFNW3z5%pLT74y90hkw5vHNj*R^q?9_(m7o2bM$^ z52-jL?vr;G!1*gX6)4!}C_3cI*`{+`G-s2vlrjwrOm8W>Jd<8EObN}wn%iib5g@*^ zq`2C9cvAyI7!fhS3CS@TSs)d1$}3A7nrbh%gM{cGA0L^S-h|&rD31Qq?9&t_pL!Vt z?IR=2Jv|U~l+-ss^nuVjgsUjMN5r@Wg*y8L*f9k>-rmW;nuIm8&@?oZ(J}jXCD*+Q zF8VHs@g>89^S5ieXU}5k$JN|G(nwh(vdKA$;zRs1I;(n?s=BY1buO&ldHQLr=H~7D zahHm9@g6+qbpZVP4obl%+c8D9Q|cT7m;>jr?58!^_*K~X6xfa_v7FZ0DT@C}((-Q- z7N~tC0n3&w=Ww3BN!t+pXOzIWa%x0nO!2S`F?zp(^Kng2C5JqaH!4nra@MIl(zbiW z&BaXu{8Jk{C)O%E=EAdDjO|=CjPUwITOfQ8z(DwJL1Eqq@{^*|vw%gjnVetQ($?73 z-8D2g0JOh3wsU3l7u3k+OxM3TlJg7ueuO%6F4?Kc-my_|I*2kraTV6qgLF?VEP+@R z#0<EncVw(vP`Ha<5JK-XcY-tM8GB~LYNn=ZcwU?IHznFBEW^q-KfZK$c;R+-&7ioU z_gNhuJw}~~QTRRuXA#p-*SH4sEixea)!R?LI5>Ix?t_GsQaf_Ew5sia^E!ZVAb&Q1 z`Uw@*6RIpH)%IM_`}(5!zahkKi5g%YTww1x9V>^B4<CNm#^*n&kbSf0F||Ia?Fj7U zosMaGsgR3+@R3i){DU&2-D0Na^cdmUZNO%|(>HSK#|UH}6+I&zb4$FvGms>S&H%y$ zAo)bbLP!{sk)4!Rm{C#&%DIe5`)z%FK)Db?Ze{-89!39B)`QPqTFuR(gGFc~!_$+{ zv$PKkfTLBnw!v`=s%t@Ur4$y&XXk<d3rS4&jf!&*jimSogL(&VwLs|I`6Im>DdCI` zYLNe%Jngi$hkaN@T=Bs4+JnfPwiB9EJ~f(wcinkh0K2@C1R=sXs;=bnd`;hSX~)9C zjfcNyH2&K4Td5h9c%t8V8FS9F+Q9js;lT^oj;pYqRA)V{h1eO)UIEOpizcZV72B}< zr<Ql`-upzChUIiKlG_w)<G7S4hg4h@9rIM23suR*s!pKo9d-#DpT&6=HI3bU_IhUh zLF>qxXGDg&p_!J68DIto--_x6B7y7^=pGUQ&;oz~sU~FSL4=!AQB_o1SKicIbGf~# zr>A3Zuy<?>*m!tm`Xfbu%I|=Fo%%W5!>7~_qg%eWMMHy;nVG?<DZn|%bzt?T?rw-C z;3z=sa76I9WTtM9q`d_|9nJD5io08I3+@MZcXxLQ?oN<{yA#|JG&lr@;O_1o++Bjb zlka}_fA4?y?t8oY2IkCE%S=^IS5NoUue+pW5TlfW)I<wE3&Y*Yq6G)D1iNk&dbYQ; zR&~&%Hj91f5DO$X!!h61N>LLM{OOTLtmdped{w~-c>qEfnA8mO$>>R_m=Q#+R)KAa z%*ZnoFweh$&%c7p)caI6__37aZ!mMOQ?o$V0?osWRL$)S?ZqB?rB-L%-t%a<9EC`I z3QM%&KbPpkEV<IPT$VPMB}UH=NnCzcq}|r~dWyk`&U&fHc+qxqi;ecycl;U=BdY=F zsv#f-XBR7t@=1}26dNcSD1AhR-t`357?I+ZuLC3jqRsi6C=ju*gd`&q5itt<1X+oJ zOB+{!DIqZ9O9<iX4;}M4Ie{n;>4e|Rz<8uTB*@cDfYwzKsS?Y|FLt4~1^EaKT*7p{ zHe++`QJdL@+G6iQwgw+kF3brB&Aw<!j)orwug{3fC`wJNjjGyqW;}SW+8QX5*Z3%r z^&6{D0|l9Jjl!$whGr=mABx~<A#V{GOa>C@8Tp;rA<L{oqLIbpZi3V0O)V{Onn$i_ zuJ9V&_%r1*lmmyBp}VKXpf9p4e3xOgrZe&A5~Hb${wxPg<<;-t518~1K9-JDX>Rx{ zGbv6mnfaHD_Lm-UIOcRiFv5b%Txf2BANhGnIRWgn9CVDBU8tF`5He^>q@Tfx&@MIE zIf4=A8a;t84#(#~37Ed%^;3Ht{=)7A+D1mUj*hOLo@SO>Vp7yJl%+<~YpNrCJoS*3 z7AYwuI5<Wlj1i+_A{H?Lb`T-BK##J3OX_;eA$4}m{tP`6dHMM}<RZ5*p84+qT7~e& z0&|2r46o5PZPji{RpmP2&ZDbxnUW`7>NmFDfrW`pvF?#o%yt<P;F|#NR}j$irGTQ- z;LVZ#Mr-lxCmW`0IK&Sh_6*4M=e=xmhGan1mt&^>%z76S10fuAGew@H45{B+gx8F| zgLAPf9TvJ)VVUSEGEyW!s26L~1Gqr4MA;_S2ajtMFbVLb;30G;I4h|A`0=r@2#iZq zVdyv5d1M=k>M|HCFB5kn(w63`ugx8`A=X1K29R!OPxB4H%e5#QFrG_qT@`pYl1Sjq zseVLGuiA{Dm;n0>2MyLYbteIg^-+YUe%SuGjpeBUlvoXpTV!k}FGGdvcXhYbYI*EI z*fO4hXG*Rh;N*9L)PZLb6_05*+8NRhP9uC5YHo>?l59yUvMR`%f}5NkXCw#Z%S`Vw zqrp;2VoVwm2X}obNM^LwppopuN3(QreEBik77aQywKy7X5q0{~6YPxHR{wV%H2={< zADg|1<V{NV_Mfa1U<uWh*2p%?bKsFq_WA;+0-W6!IE3eT_(z!72cU&BvJ;?QP?^9C zz@*U+p@oydw2VgRp+La209NhLSTM&+tb{|fkG)JaANxk9;I;_yZ;_%QM4-cgZ~fh) zs6mLfTBzK-haP50!Qj(4QlvIn={ESd!&`9enTl;GrKfam{`ROUzG$>HrB#+yX1?=L ziQVPLF2o1XMOp|1Aw3N@rtR$6YG9XN*0svfkvqA4?(VC{)@u#kT*K-wW)Ss0)~#a7 zN{zkBGG&IJsX6Vad>hdzf!Ma4p%Xp;+x}t1evr^mdq3<zVgT`-jU}MX+KaK)d@39N ztEqWGsad{>RSZYdTvHwNPzG-jxW70)k@kn?MbKcFU5Jq-1%(L>2YL(%P;4a6Uz*?5 zEJxY{J}kVF$G{EF0RM)KmIXGS&w7h*D4!7Q3y}?)#R9B-xD9;7iZcaQ_H+UNYGD<N z#<wshIywZU!TO>b2QsJD(oB1CfIh_flrKq2nK#5TuEWbXV$47A7K*3-Xe~U`_1B({ zRhZZga!!c!G%*(u0_!NVjO1~K<>i}g%zX-|nX5<Y$G+X=98W4Z5!oPFxHV35PhgLp z^(ddoPqm&cpUb}X#?JAG+L=6ThAn1iu;bVL`HZCQ;6AD9<{zJ7o1Ii!z`vlZFsZyc zM;N59JdMT!aT8qH_^ml`pSp$ZRKS-I&wmR7Oc!kfXt>y=3<Ny(2;tkoas~2&&{1_t zU~Yt62UEdd6M-{d72qg=^H&tGpbzW<^aSGdzs3WX$Yj4v)$&%zJY^-V5_?~RMm~lL zzrFp8T%4wNN7nbZDH&BMN<G2tVM;xrv)C2AsfpdWc(Sxt6Gm=VWNxz-jyH<#+Ezc( zy~n4`tZW=+-}f}~T<mgY+ZF>mgrZ|!Wso521xQJL>4MiYhum0TvYDk(m7(>{&^-$u z%U7;D5*RR_Xh#g4)n3Y~aTX1m#(r`BohkfG((Nej0@TL1gnw*ubB5kS&2O;q0f)*0 z1ARzj0kXRx98%Zy_pVL^i+p42GRhY`g?@&Z7|$8XpYQ%ETy>(evIVEPWr1skiFPL2 zIv}dehvL3ijb+tlp3@A%2w>&J+fFlr+<b)N45nT93~0a~`vm!DHIB~Qlwo)MK2Iel zn!cWMktw2P-{T-dxAi_C2YcI?)mlY*H~JbQ&s6-y?F0Mdz!wxf=#<EeFlV|JLgm7U zUuIx8UH97Y_!@)y{u*>A60S@<dIB!K)+bPXjb@_{Ha=-}$0b3me3gDT^>#0N$fxEt z#j@OD?tUMq=0WZEd)EM|P>lWAm4_lw?-F8)VqcL7%90Ma93*dsUnehIqYxM%LUhQG z-vOUIi5x)@K?HW7&4y^DIXb0nFsUrEsE#GKyXa=FQvA|J=-rgUSf0WWU60JdpNgfi zz|Yz0#>f6c0_P$?JHr?JphT=XsZp9gU-*-jQ-?8Oh0-L#<OyU`<&go?nYJCF{?9w7 z=huOEa_H^)L8aUebGIMZl@8b^l+6f$U`j{7>qdBp)jhuakQszHT`vcJqPt(Fp0{E! z{RI1k&{?sT$Gvyf_F*vfJk>Banw}VG?9eUtJKN2O<`0xqwr@knx^%|K*v?@#zX$T` z!OMAz_qkmWfmOuZK}8K6Ut$HPaX5nG5w}P$nJ=+Qi_JcPw{^v>3{Xu43-ihPP|dW4 zAg<iQa+LCYDGf+-e$mZtR-3a+i#RfrA+JVQ4k0vJ7vyR59ptLjo#}hX|ArZX$0!?F zEC(ok_{n<jyMlxrF8+LZTo_f$Tbv6HzUjHQu<E%g#{RxH5{^oOqvfb!m`m5ekLu+V zRjT4D%Nu#Kt^Dn>tYvhy(6&wM#A^ZU`U{cEgf$yCMJ0D(BzY^b_(es~!1UKpy;#W6 ztNz#ANQFi+?EQ}Z;uwB08}coql@PM8N{M~TGAC%eZkGtWLx^ByJ_E-v1NbllV4oDk zfc23$*O7KATr4W{Tq+A3nsfc5J8&P|?Iz8Lyc<gO=_Uvf;woku+xi$h5b-Vqss~+g znxv)@<OWti-e`YRwADIx)Og1X4WadTUzSZ%jG2NT)1kEZ9-luabmJ*WsJ3eu>!@!F z>rjhc3|%ybHyyop`glLzw^b$fU*ESR-G@FVnG6o;YJ|&40dk+Qv*XL!Mztvd=fHPQ zyyi-iqbcOcQRumALE~&vDb+?K@MZlK&S}98&1pf*6z7aBt<r<cuVM<b9x?;m)Zh6G zRqJ9C8<5(pc9;t<+%*?p>*KP(QavOG&r&$gD8i!BInPR|_99^R5_myH<{}&lF=&xo zYlhimip_4yQ{L;N8=Yemn`4x7$8g`wpfbqH!uzws-RWzYZ7PN#x$SX)c>hy#YZf~h z_<>=ic~*D%`2^JgTx}VsQ{U4G)raK89_69T!FKWhTD{ps@2BI3uC9UgLB?L`P9qm@ zVkgsmJqk<A&+4PW3Jcr$QRVghlnK0GpNlo<lUbqb*B&DEY9cUE%9F4e#n!)#T%Uwz z!QkBB5BMhGkNtF=J2fj}=;^-{F{KLpkU?d7$ZWpl33I@hf8q!6%UiwotOh?;Z#h(K z!X&RAxtu#tW6rHQfov=LwUGDO*}_9mq7jo$|G?RFgyNf=o34r8#qC}3*S-8(su+Vg zXZH~@iIN}Mjuy`h6tCf86DrEAGahR;4yK<>d~WUH`}SY<b%Z}#ESfs6`ue?JH#|=^ zPInkI_Po75Y#Qmbt+Kbn48bOnBrIE;>rprocp^oQ;kavmVKTvi9RX*lKG8rdmY_{# zK@qAF-A<@6Pp;opaZ6yoZ@863m9-tKBBHY+R-(Odjjts4`eb@)N^un!<g-ByI1g8+ z%|?iO-aB0`w11Gs@HDJnqP1z2TQZNY0klN`N=mQL;n@J#e8`Zkj2u*SMP=XI+rI{o zwX{zPk}4&lxv7rc<$wF-%%O!hDo*r3$Bn0BdaT0XsfqV_%fneTq)IQ<R^Lo^-b-T9 z6gudhC;y?g+;fAR_NWQG)1X6n<LCR^$Q>+)-ze~<Cw2L-2mGLvm(gVePe8~48^7e+ zC_q7pRx}8wL4L9v2M(vJT81`}nhNpcH;IbVZUXr^<jd$Pys<v`w@<fNF4~b=z8b=^ zW==k?R+5O?PBvy%E5)IObFftNkJRd+#S7t<?2-rdeS1xVSZ*+doZK)Uj(G=V@Sd+B zi|BcX)C%B_U`KQ*d$(_K8=^dPr~ybQ-mKrcS#oDIr_q!jK?C*$1@O7@bRrz9$L-M7 zNkhM;SM8=z$JpbS$H)GYz?LkKsY63xNMBMo>pXH?((q|UGjXutCpXqGES3S3)7v$t zHR*N3Ivw~6$VBV|_moD1mMn1>tXMkZwR_qPeJ_WOI<!or>cRU}L?F5-8jr>N(jOs2 z7<<%M!;~gHN+SY;OdIBc`O~2v_yN2;3fw_rlThKIum1QM#DvnqHAa|j)J?^-N4cu| z$OjH2*p_~#ldj5r(4>Kiqinkmy4Xpxh<<9;&-~epa9+E68Pg5nAQ|vWR+v`z9#TG3 zZ%h~jEOB}uzpkUPSjygc2}_nJIPvtA93a0u*7Kv5wcV7q=FHvWutlaefvmp4kKe&x z2p%~P0c~K)D^Z-*b((2~a90?gYCkBDL%Y9*-a>@jLWJH1ZDDHm$MkL)DT@qTCj}{F zW|~#u6k#2xRQ@uo+*W0<O!hbAg6qR}W9PSV@WoYf{-F%{K)Bc(HWc)>J2G8(ow)QZ zaOqoj{c+?<N6}SLo04J2)1%L>$1+nu%FX*?ywF?o)uqJXt~=0A;sfg@qPEJazTGgI zz}Mlq66J9+l_l;7rPXM?F9Sv50;R_T@g{J`;nL}Y!4N8*v<ohkU4R0xDT?14((yIc z8}2m-ej&tc!oWAtVAV47G>}`*01@VNrQRDLMyOceSmm(~;V2p#a(_Z{WZ)i}74W1; zBR<^Rv!#EYM?yXMqb;tCJ!o3nC>O~q>?gxNC7U)9Y*`IcZ3mXfT69fL+UWp>cRXyH z8OMlsFZ0umNIkHih+bO?5f3)EixuVjENbag^Ox^ybX8Ly2&XT>aBpm%vuCb^LhmHx zVFmrXIQYFe1iIClzgWYjeAger3K?to2^AZRWp8%3{)33zCaCUn65RmU0U*?m`05TS z&zHysAuP#sD^UUAyEM&tRu!s+T>9>pEmkipN_!T=A<7s@k4G`4R$u<GoWLWjq5 z$5)5nM3hD9mta=6T+}23nHUX5kCG5*HFI%kZPCdT;F38}zmSED>PXNn4_I*<1|fJ| zK&)=bHa!*tE6#^{0@HW8huVod3#oL)p{My!PjHzn(W$~vF}h*pbR~^xhtHikTHJf1 z7ccDT@6WCSire4cdIy3J3Zk1<B^O8ea8%0	V#A58kZPAZJ#Cf3$r={EaahHd8jp zYD0aUWtq<h23bO5l`-xp$?(I2yvT*U(uSS2gr23S|6}nASw5JO+(He)7uH*z=&v_r zs$MBWYa>i+Hub%_=a#p=<_w^P9DiUS@O`MZe-CnOdwCyzXWner>v(E*_u~2K1I2Pd zsA93NS9+59bti$;5tGpxoe8vL`X>w;yTgF#AtUY0gY38iJbO<dds@3dI71|lveFWz zX{#PCQm5S9OICDhTai5v7GZqZEYl$?OCgPow28`V*GvN+=wn+hy;<AKrp89c=l89n z(Y@H`Ux<rYK9)B&zs+1$`AvoM%#Zj6=`lpjufUTeJ4}Bgp&KZX5j`NT+}_|W-r#as zLxz=Q2;~yWA&;RbmJTu5J09YaWVW|DKWsN@ZZf^=JO`7Ap!>z+_d#dfp9`2?G;O-9 zF|As(*`=M;88nW9l;$sjIqe;}_xF+qf`x;*tDB33iT$55M^hUF04o<M3+bOTHZCp} z)_(=F{2k2CPs%KB;b7@zMG9aAfC5!Xnbp0WEJ&HPl}v3c%-l$s)!a?p{tA(?H?ago zXjz+sQnPY!k}^wMSX)}TApl63#T@M%UDTXR%q&Pj-kSehMNPrP)s_^%_J;uwDQfNJ zs%+sR=4kKa=wRXCM#@dfENN}$X5j)l+L^dnh+CLBnp+?U3L^Y1g4araj}!`!6b-EF z{e4!}@fe*2fNS9iPfdkcbvOxb@Oo&~4ek)}_C3o$-8@6Do@QLA`h=^u>{)Tc{Fg4m zZO~p<3;lDpq&(;DO2>aHnm^+HDH<-Wf0YaC-(ml)T-g30!v9veu(STFSU5rF|LcO` z`M)oiv+h7J(AG<O_jPj%N(1~4@jq;nV8>irvj0i!KhpdWo1N{y#Af@$%b&1+7n}W0 z9RWSL|G#20i(7!a99l@Zn0Wu+RfDr`z#j>F^6!-F5rp#pA?TJZehL`)e~|Alw*Sb& z%JVPz{;383E*~5B|ABn}>+1ghTfFD@0CPw%II#EsB+Xw;{*eaoFXd(XqhbH!(_-WN zKad8%#qn>o6r_p&R!jezJTeX-1$A=#qo1U1F76=h|6l7d!oTY=>A&moS+;>RmI@Yd zvejaRu7x(WTsx$QHWCZ^u0~T2EyO%Ew7iNcwHRmBj9CI-Qc1#ITB7LQOk8tHEJYm^ zy<65o8&}$*I0)XhO6Vz>FZO<E%z@4Id`vO>V!Ca@H(TIg&GnNwSP*Ib$2D?c+)mP4 z*J$`Iie|L$Ne*A*pR*XiAgRT9g!h!bhT_jiOjqU~CNb?XNu;^adeY)ar@4J+RsQqm zCX<#F_)V@gr7Z=iP_DIp9u#F=uJR}9Ton`vA`+P>A&-)n>^Y)9*}ut!#v}#0fR3$& za(ax1ZYQcv+9sNmcs1!2nv|_p`qLV2)9%ZXo@JUK^gBH7%-Lp@Dw&uGJHc(?DzeXv zy-}I&B(UolX)4jdF(^tP*mNZ|IL=L|81&Qjq1J|Ubl}o{v%-Agf(cK!={04bN+4X& zZgoOibG8qXt>gTJ{ZlXMU>X5m)mf9d$B&iM+Kyj*-_F8u!wM)5*2mu!C%6q2v?=W| zCbF=ahYVE=9E?lAjqg?t46y{6lewD@kPv0YKg{nn>rW*x#H$;`(;M)uk1>63C0v|! zUi+5bZ{p&XY={7Vr8a~a$0&qg9@?aGs9O!z$g*Is)H`b*9q4_KvEm;I!*6g-(M6lu zSSbUn$XXdzZo*e~r>-!ir<n^MnIFgIr*0kvLm#ud7)vzr{tQ)p2+0Dnvwr5nuW@g| z@joKOT(ymwXuu@@LWOB;D+IY(>?F}TZ0hGq7a(2`|Ffew6a<cY<V>U|+5S*zl;}=? z<;jC>TFr5P6P+bc+Q{|gow|m+*3q{)iJ5yrgn#*H02)rg{Kw(l(`~4NfmQ3GiqA%@ znLpb&Z;<)tj|e4Fjvrp{eR6yS`*;8$ep(9BAN%ozJbBn}0)xll+K}uaqONDhW5)Qc zf(}=s?8hUR>%#jk*p3V)1IXu2J>uzVe&QlT@ZWoucN7E=!>X?*+kZy%B%MgV|J>AM zX(@zl`(s(xd^ywE{VT^5QBI>c95F^7G}82}?&r(LflkDi9G|uGOzH2qOOLsq*mz%7 z`Xv{&2M3z6Rd~~n@rL3w@pOu+*MlASR7afJmZ#n6wKc;NUPRP&95WK_c}59D>xr06 z4E+(&+{>5paKZ%6X$ud=Dap#R9W9;W!cD)#nax5Yt@qM#qA{n8px~4(UudUvNIRFV z=Ji+JC=R9Nl(GX?u=rC9Fr(@Zfd__3-}R6((Gu&vYW=?Db0LLdFSzZP6Z{HSPcgHd za69xJs-9|MW@`G%F(96!eiBY3aCXl{0D+s2n>3=v9M+fs4NBy%|EtX_f<@@bkN=@T z{;OsBs|jOa;rLgR{Li=VpC<VquNVn0Hz_r@KTQp@nF+|JFB=ETpY2am^3U!J+rPh` z{};J9|EFB+|9-E3_I5xo>c3uZill6$%p!j}A`XAu`X6sARVmT`@BWEjk`kz0BOL7f zK`B}RZ9%1asn`gs1#@Oh6tjNkIMP^QTMSn>(XmXFlasSC=0Hg-8o?y80BF^omf;P9 zMJ0kjMGrDHAU~wRg9=JZIOAe#JC9fi=DJ|(eDB=+ENnnypXmbx_wL0RexqsQ6YM_b zQVxfK5}~iu{zQ?8>ycsSY<}h!x|^M~{p;@C067Q)0z(5hP%*ejn0xQlwf)>^jj6<2 zu{(ojn%t~Vm;~6->a+YIn0xOWriXCFdZo~hg+-k}PE13qL%okuYiG_Lyqgx;#+sT$ zO|{vjW(h)t-=j_WL37`=P-WEs_dL6b7BU&;kn{;5HF<68)(iPazFV)W^6}{>V#PqX z3ytipYv#W}*Sxpuzdg6=Zv=?(bU>207tzRm!IPQ!&~?&l5c|5eC90P0Y6P#M%#rgf z>C?V)vKTFOM_KNM@tMt8>qCwB1B(X?t`AcC3%tG`s{SJw1wL@j9g6sCtNmNoI3|5M zYG~^Ca)nqbpY{?DuTF>|oMo-(2E&Sejfj}FqnWjVW!3k7sHSF`D|x<apW2lRo)r;? zn7ApK8-&ue<H{QLWSNJl_52*KkG-FLay&)aOKtpuCqk#*3Zpty;)`xF1=6p%r$Fn4 zZX@*4^f`V8)S9hKn{#w%;2%|vSk5$IxHTrc4A_|r)moEpPU$mo?ANAbj`r_h8}@U% zIa#qB+5*WU{l(G4-~%DlXROS>CZ@D;1}*iR)~51e2!1Nuq{_2jJRpwRJ@Z-@VUG)K z42F|`qk2zm+NFG|e6g)9!Y#AL=}RfD5PBAK&Ka9;#<nk_V(t+2{#ZDlNNN3+Pd%#j zy_$Dk+wQxBi_0bTm4H#&S{iMPHP=i<*7x0GhToFD%Zi!A%T?pmoQp2@IffZ)NLAw` znHt;DS}*zKz3&`+okH)h<2B$`|3O**(uzOI%EiL@FRf$$yThSJ$}A%$CTikpVNMG8 zORH2#4gM<``#+QYdoPRq?|#XD=<ENtKEk+xZ8cGo`|E@eKq9aQ{2PF4lRW`7l0+nm z%GnlMLggWt!9dUB_Ih4J|6$S7K9^05_<UIkl|Z!!s^eZKP3-(&C5teZBrU+eU*EUq zT{Bw9dU%2fu(Wso=_!71`TDAa+1mTj<?H>!p4h;^TGw37gQD?GC1K>T?v*7xpo-`0 zqrbQ7^W}d3dO568>gvT6U!YZ`VBel$6iyfJ*iX*urw8)4@;fHx&}*9<`qAE!(ZK+p zsQ2q-SN^*d1bHKHm|@P;9SDE7c~ymwJX5;iJugGu(!F~2JQi>l{-H+J8(zGh-Iz83 zAy>86*Tp5JnNFihQ(Z#8-h~(}o^Fo5=lM5i`)ltAEpM6Z{)l#dI#!DbLxqU9z^h+7 zdFQ1=?<q9C*@1)bYvra142atwzMi)&<g5Dm+K+Kia{dl;RU2|iovJMW6-52M{k9|5 zyG??YVMJQLCwTiDYQ(tU^S-)QaGkaHhB|ui@UVQC*<4>4{~j^y_khtU=5>9a-GKG7 z5p^IAyQiHszMMaDf_%(z%=zJ1)1sE9E~12IhlGb9?L-EnUT&wHDl(2d%C_P0j_tl6 zAQK#YV4tiO*(m@<y|WnW8EzEHsj-TZuL-i_CAI}Bq;6mT2BkSiYN$TU^!a1QCip%S zjkf~jGts3Tem7sIPnjPIs-<u=mbj$g4pJAP&4>lbc0UvlwxV_c#fycJ4T^?A*=Rr6 zJ0v}S0|SM^TlW)inyhdd<<SnGOjX;AtjVf8g7~U-*M?nX@R00yhT5AA5&>zTx44ah zIW42Kf>6YxYn@&6+B>wdEk9RjR36XON$ZqszJ7)~92S$bt#05nXnH?EO!Cw44Qn_Y zS@vSPdxigrFy~nH(q7l+G_H8F3s$+CMzZ1)tL#02+@hnJRChK?4pFQNtu#5$O(kaV z;@xH#f}X(PY{^n%u!R!Ao>{yEwCc)uNmnDUbNpAd0mE_UiQhG<Bf$n)@L-#P`Re5q zdijh2yLkJAF^mft>KBwQA=+6Q0MblLtCqG0Ht^DfU}_EQzG-|UV>rMpodHD+bhXk< zQG1x#0ms>mjn+t^B<}S@Wn?>kd1P=ytje9km=I~yktxE^K6}XUs&`^{n|n-w=ohm7 z`TqQJ3}-^xBgUOka1z%09BwdE=3m@UdJsnwW8q_iMj`8B@4t7>pARMlpH81wv%)<m z9ZA;KeP5QxqhXsol^k=|89M`R4?n*VVjU_?cKG=}--5rdc7XLFN?%H|s8pc44$C<W z1(bX8N_g5UQcvwMJFb2AI8WiH5z{7DwShNIe_^XEL`4;5tA@!8cxLZta_k)+V}`pB zW!^9Rzzg}5*8y5tMkhL__U1=;yd&{XpX=8j7pY1lLSFal<D-bZfQuoz0Fxmpt51Yd zRy8bidxSTMVUss+;rk+v)|(eY56^b|Urac?({@NQr(g(iB^V|$kbON4<KXmdc+rB0 zuAC$(MBPvcx_ob&;L#ZCw09uP33gWTwrB90rB~1yy)wbUOG;_-d3T2SB_VhaeyK@9 zp+ZA6FW{DbOtp>)VHe{piu2<62wJSzv%hplx~QUiaho=tT3oVYjB=N$HVZE>BWSQi zaG(HQh43J0sliuvG`O$p_u>xCAqKs)_r~BhEmv|MoouW6>1E2QbxdYHkCH>`QDS$N zC#M&q6VgrT{nVC`{0+2@3`-%eY`;7l#WdOr#WK1`YJQhV%*6!UkB`bNiKxi}Mls;s zMJ&?y%alF98DQevB~{W-eA%(K!V#0=iHkR5$?1t=Fn`GC@}s3}{2ol9M{|ZS^arRO z*H=LLtP%vcpzLxi6%Q>7x4P+s5xT!HJG?&}m*)=%$Yxza4PWX3FuhSTmX-``+*%yH zx-wG-GSW}p)vy#GH|xaHhd0tyRm-ShxB8^bBpSY`T-US>Rl%7SO9e<ls)IoGa01ag zq;-^X?5e?eq%`nwxLGv3;ekbZTM-m(19%@{a&fOJ%1@E6Ge|FuzQE%IHmN2ZK{zs} z7e1-kjO(AyICxB7=pz?&lzq$Y@F6^MhEHUY+VaaMwb;*;;Vah|mn%K)i4RJF(?=CO zEx;&wC<r$4W)*&zpo@J#yTyU?;BptcxSOq$mP_GHC}JBpAeo_0ytxIjHLDHn7(r?w zI1>>*u-3yHKC+Q&>2Oc<8(_HsD|4rdrpA07f3W_}LuR;LuQhEyTc6d@cmga4W*M!T zA(Kn3YjLcvVZ*b_Dn&r6_8Up@?LHtwx+6IBo1aMR2JTbX<0n~1aGnBe1_I*=IB?E0 zGn8X7q>BdgPpbsRC=Y;qFGNcBz8vFI=pVo>t}eToTA-a;bKP7>ifQut0d0Y}s4Tok zdjBZbY}Jzner7dwl>#z=WG>cm-SZXN-$L@^n&oWlrCgE2T6fzoJQc11UqT$Qp$IEN zds9JnG)&eW^rj!4eRgDDyF9!1=K!g2I7Bw0-Nmu!0+p!)C1aX6u3=n-ehsji`w5q) zr2IPE<kr90!BT1KUDk1>Y`H54<YXyAJNC${2#BUhZ#$z~(QC<EcVeH?3)9qjtR-D} zW*>-A<=lg5Q@gR4f0^tNMRR8+f())9F0u)q%<qo@#^VJ%h&$F|H(qzNFgWT9(*fqo z-S+1@ISv)24BH%(Zp-g4CPVZRp1827EeqsBjZXLxIwr4ZCsC#{@mQq<i2Ce@2ozf0 zpn)NQKV}E&7G<wkZj=kMGmxOJY+Ws4%IH>dQ4#pLe_E^`ciN)R@HlYi{_M_^WA&xS zH21+-c{SJesrBT}R{9}_ar`9+9=b7RHbgP(QyivZ{dTs~gmL%nBf}`|wp~OE0L_($ zIvd%U`geevA~%6)9M<r%YmdqxTn8l~&%mb&+7{QIWi;1%k$PTGG5~dU3|$&#+%mdr z&+sLlkfPbX7c}1Q6sHLova<Rlt0LAo0OHT<jSf%$%)^8Qo7;nsf^o=DRs;{eh})~R z4>i8BWYm7d2~-Fz)BJBtqV&_pqogd`?d?+OKNl@!)td!n4NNC3>PO3O+$@L*&t!?r zeb_uz`6aj$NRYkH14CVHoy5i}9j@0>nx3Ni9<?PSd({kwfVj;QT)gDEP{q@D;v>;4 ztORmt>8F83jiKv<kBV+3`7MN>5b-HfyJB=bWnDkWj>U)ydbCj0o?d_FYak<zB*v9? zy!)}w#H$~H5;xnlpVG&qVMemk1flGO*y9-p|FHrNg$c#X*L4m;jOHYwMQ3n5zagwp z3SeoVZLhJ`cqhORkrrKuIJP{3+*y$Y+e|kEuJR%Ma62L1Uh5SB!kceuD~0PDYRGOP z<`D{X2jXvGhs-035;%>Z=+;OcEfq?0Ud~;$5*jBxh7<6CC#bOW<HD9`CRA_bm(r|s zvnEEfRLtv$)MwKcIr*WFt#u24v^Pysa`prFJU8^QDQwUFKGVINIxjn71>;DJF}KU+ zl5aoNxJUcxqsNu)i5X6tNJ3P&a(~^3bRH)agha6d`s#y2bNYpC%k0VtcBcDOgo&TV zl+NL5CuOpiPFMy;GYZ7d$(&QJWI2*qudR20Eo5(=Am=d}hy&IhAxj4_G@FPd{ZpSZ z5ea9X4TvSrO?%SOqk;ryCEaFdYD;bmt*!SBZvp9tA+o`S=Y59X4b<Suq`O@SkHt>z z`WHq@vb|h&2p~IiwOrOyQ4lmX)JAF%<?C>>7Iv?%R2AVi)bI<hycC;LFpB}&gX1d8 zAFlJ4^A18zRIOmk)Sc1t4BaiXPa#_j4i|S}5tG$v0cUNTYE+`jv)F;qll6|uYiBBF z>AvZ?rc9+6G_oCysS!xOq%sgxKZv+QR}V(qgqfC`#25J&%fhLv$1>eP4Ja1u`}40b z#iX?L84jQIMDXudNegH-@$xODFDWy0Q~<*0%~FEowN6f5N|qe_Lv5ZHxiQ7B-e)uN z{Wz;H&N7jG_vEL-rare(hoA$BA3T}>w;ShS%~`0>IiK5p<Nj=FBm1<m`MHfi^y16` zuBd4hx{)v&KB|C2|LDvC;<J+z)#2Rg`T*zxAT1QtbM#cPT$zB{6EQ?ZiG#9`k|BX0 z4zWhE`Anml5*iBaTAJ+oqapYB6qyVnW-l(Eq>qwLa3^lds(^J=#D00+n(~8n)g?Wn z!?fjbGrx1MS_igu+-j7>hYxjx&ZrVi$k%HcV|!6ukNVZF!D9hQJPg?*rwYcbqY_wM z&S<~Yme-+(CjD9Cq8ZX&c?U9rMG0#^7MT<WeV~|FdW-5(-4+{z+<{HhX8?H-#17o+ z>b5}p&BZFxkL|w-O=asn`SqiIhP8||rP-}FH)4fp@fmwEjxGEhYEmWn3>C}9Pnsi; zyY{_gjDpFiZ_`1MC9HifgWtyXgVCxE*(5YZYRy;xDw;0uF_LUwig@#&##=$Y&jBi) z62_&yzkCw0a))`dGaj)q#fT>4`3<}gI(2oLU2Dnu$Z$W!#!eo=l{Q|rC>ZYy!K4MN ze<PMNfO5CuL&;R)6*QN;lO=WY9U2SwsYq2LLzyq;9s4!?nZHp;1vks^GZs4bZRSTQ zzVdo#{_E_@b~C6D9+ybq3^|wF57{)q8je+c-ktgOUtJ4?uPpI;wYGs>_ALl2Xs8D^ z#8k?49Z67TVJAg#+U!1c;2m#PlIrPjUrcgMJG-^G`%n^!GZ7u37EJJWhU%nBqo)<* zTDa?ClovUVDrP(&M2ytrQZ=pRG{cRpet|Tw;NxgUS(SEOpJNLWMlu$a3kp8eebdJ< z(OSgQ{fPV!72|NIq8#)Nv~W_IwWE1Ar8pz-cz+)Kk&OKOunszuaG&bpQaxugnvAxj z%rBf_pjB7OD2-Ikk}oilkwUpPRcHJH(S86>GtF(7?ww9(b;`>2GEK0S=ti6zsy}#2 z9O|oMk+>V>L$MJdpGp$1K)XBj>h|x;QHj6FYcViTpJQ9DmnX)zaO{^v8mez6ZUz(E zDcfCP`Ip$mBq!x_cla9PirHPwrJw^}uoD+nHu|~5DspI%Se5bJ-$jY#pczlUZj)}h z{3`$H`Bi4@Dli-|j~|fo;Gf$>)YTwE82@A=IC+vUY}9_vK5=t`<e;5IO1IJA`n*4a z8#(#ia=o&qZpIL4EtEOFx#wy{<)?kjGXyxa4_`=G9`v$Et)XkXDHMQ>vB1pxv<>Bj z^~gNtBI~pn>r_#xD$kkmG8Z5?%2wrjo6EcW<RqnUE6Azi>eO6A7u)&sT|NFepX#wL z40u`|r~HV`05zEzKkjbkP;7I6(1Nm^bCww&%W<{A-T9{Spjm<57ke^AbSZj*Kdg~H zTCB3poRB^V)E@fkCL@TJjr3IayN>xrlMo!fz4u}sozB^@-GhfeXYwGB@^hA(K|*MS ziZO%g+$Ss8W!sP0&8dxW<Z%e8_Zl59!X?LD24!oIFTc4QH&G391YohHKmCrJjPQFI zOpCj)+?Hb-%B?EcR>QDpfpxChb_30#H1cIL@CoirOIu4nNn7LgZ=(1%*B@U*G}}9= zG$FKT{$v@pC3R!&{o>EmDFS9mXoLXtA#?QHY&JN3Uy3mX7PYQqf8#NmGzkF15+s9& zbO26L1EoUE)bfcgg%oNbf%LVR4@cQQze5J?8L(AYLGrJI2C!Rtmda%Id7bI7!VOl@ z7p}gn;N(WUYGOuv<4AC8yJAW6eN!l>m-HzFTTk^P_i=C+NRB|q67-~xSev`3#Ll{u zou#=$o?X4(P#qRD6hdR$7zxXVIrPu0dE>x@Cto?O{>6<Fyx=M9_EnbsF+DyY^Q9HQ zpI~<$DZ<wd;1S&{067K&B>3tUS0*7WA8|ET<i`bR?6&3UZglj(21Z|mUq(ltetXR7 zp5@I+i-n%4NfKp)VgB?fkSQ!LY*OJlJDvhbDg8j&t4~*7U2ECl@sjMK+?N5&7iq}b zFRtAZxtD|(P?@xH|Jf+iOPr4qD@b|&QzK202t7<!Ebdo~&Xh{X_FuDsI<X>9coilr z;U6Hn*01oxWfVt|+Fy;5tD?6QEn$m(W~U8{(BK4+hTDhqG|?KUaD{)@{)~okyze>h z^}`nOg#nBqoZITNE~V6Wz>FX|WIfMsNR4c*j0jX|51DsAVzR4Bd@(5W5pxJw7l%0> z^OHoE-yaPTfQ}zN9**w1cDchnp!UJ79a89;6Y?|v$loOrTPpB|Vv$$>AmUju<KQOq z%({+L%2JxWmVjdkwyCF9ZS~{Rxrzlx2v4+Eo(;EhDi5Tnu;@@(UG^T|hYE+ed=+Bx z1p)<~xMpy`?zt93!m4d8-&U4&M%;INBZhpz9uw@{H4#I8W=swA&5+{ipwHg4#~T;? z{HQ8ZR^S8P>3&Ft$nz5gJ)e3uNdSz=Vpte?RSI#!YxQ$63!!iY?z~dYWH+Cvj^ek# zlJ$-Dn;?Km89iU5F1>S7td$q)<vY+PR`L<)t^}LqU?)BsenP0R`Ah*Szp(>`vg7vb zg+;<PNB@sti7R|EX@U2z?Vd~4$_0lMG{4hK8{xuf_mSx-_9K;Wn*CTz-Z!Mot*G$b zDzGLASR<bRM0^pB7NYH}qV$+9uw(a|(jAOMECiNY<Eh&>mY6vRXkzkn-~k!5t-qSY z!gvJH2y-1-oS(__CSLGcCNV#g5`~XK0iI+nU|$@VAn3kPigu$s%A+~$aL-CT`p#H5 ze4zW?5yAY)W&JlyFvp8OjCsd>R8PlPz=z#d(dFRmHw&1)xj1;cyv;mK9ozk=YRHX) zlgp3q#pWn42h2ew)VN_<$kpq1YIYmYR6}45a)$10R?JY>9mg=~1*UCMlNwyO6Jv>D z)|NVj0rm`8T;8H9;MGvjjlOp(d4*d9zN1L}2IeHMX<)#Fm_}=8uzG|wTi7*B27<sQ z6fg`dZc(LN4<IH(oBD>~2DlE3_L7j}iUaUHl`x}M*r?dp!PgRJ^LIOV2L{ptd8fM2 zp{eetoo8)nRQ8ld^x)P|&ZqHgQIV!nd3}td`=pOvt*&LsYPG(kg{z}!5NLWiLfo|( zv`#I`#R-^NM`BoB{EMAm@|2UAibrJ3F6Fyd8P=#Q9oYd5pY>!9U3@6us}AVr<|57_ zCQ%2zefr`-8QtKQ5%9D9_oYDN4aQ6lfeW<j07Axvz6oS0XdheXbPgrTcL^2j>C@t@ z%8#va4Mom(3ibQKCb8Di1Ebs`nwHG7gYbMnr?mvem3;Rn;}}!BYiI#FKK7*(Nym~O z<QRI6XX}rPFtV~#)3YNlS*^y5At+gA|80dV$h^HDzAna)?b&WRHBrKMa86W+5nu3j zHoz}#%Gzs5a5wt`=w_5q(k4upXkcyX^YI;4=S7^q;xS!Nx~4LfJ?A(;6NAAjhzW`r z#@<#TcWFHr3Wn!d%uY2liZsbjV~Qspv@T<HYZ7h-|3Y#5Eoer5)m8~B`}(6$a!fA@ zA4??mktmr7eK@Diu}R>;%hddND!J_OMgp<u<Sx-zuo9g>T}!-XTaw{>?m}w-C2WBN zAzGG)y!@iAsJH4!>0-tc#VU%r5uHEO?#%TW%;QbF{6+_IEmIkjOR<MZT3K{Yx%4{t zIim2EApH@O`U>5^mZ0ax<7YJAIrnbnM?DwV?$`D)T^?mO5pNOsX2@mO8WIXR-j4gb zu3*91Am}TDUmPemegTd*`gNrKLN^LJQ+VY+@{#lF{%aQu>Itex_rR$KXUD1ctNKR- zQ;FHX2vBNLdvrd;nZkrs=o{Matz$(7<l5mH@(gfSJ!J08nJuIgRaJ$8!_=S~cG#Q) zC2sY}Nw|JOY*k~z%_-dbZ3uBR`nMk&e>w%j_9t5`XANg(?410z#OZ}bh;*W1pJnH9 zhFkI$5+%+GcM7DL*F2DeUGvns;eFhk41($YZGTmEhH<QB9w;b^ra;hv)WoZC(szJ` z8A?R@9kH{X_*Po7gCp)Y1*Q8oSEVU_5HO!<tG$(c<!Gn7o@CrYaMR5&1=s0(Xvw_> zdsy#LplUak?m+y7RVvLjDJv<J>jqY0tR1?0A$<9D<MUwBvnuR4WqmGYxPLVaMaLq# zNHxC3$==HsYy#mV3PH!i!Be)TfXB+?gt?{oZ}H4LWw1V*+{%F)u%pj8Yn{7X#BG`* zu;d~D66l{weQp_=DILW0J~Hz3Nd#bQ(Lp&>n-cfK?GF;h$Sj_IP%Mq^@@W`oj~3#N z^$EWSJHPYNys_0L4=-LB9J+4|@2ZIh8}>7i=RHQX5T;)=Cd1o6+SEYy6$8`=I&y`n z){wg_V}jUpousUX(h6bLAIa0M8>)AN`V~K!<H*?PyCypRzOq2DBl*TQq-E4jOD$;T zuGkK){$Kl=lrncMXU{|=7jPi5X;R~_h_yYpvHv3ek$OKSW0lzWxV0T~H74<5LovG8 zuEQkk;W~|}BofmVb6AaJJCS=gO&jz1U{k)_F1DK$w|5dYwp%o~3Yv0&yuI2WjU)o& zV}#Y4`!v4z(yk-+{2FY^2XQ+vX+=T|9DVgw!F3n5rSr<dc6sWyJ_>3WMpP!o%<cM0 z#SY&xTYIYhu~EeD1FIW9e{rpFg2Ahf#Wc+AeJGuOLC_y-utFQR%?kHLHj5~uPdso) zDzLACghA2TvR7l=Ro-gU%fS*_(cG$9*`kZz9TthOb2`XcZhXul9LDA-L^I->SXP|3 z-C<@f^>1R7>B*=su397o^9@$SCE6HLP|9izmo^-u-eDj*H04_yq~38M5xF)ML|=q$ z4sfer{#^1|**f~t=23{~grL~zV`lUhBl!EWLL%5Gz~C3uUV1w^sX(Qfou17cbdN-$ zTa>4wYl-*lPN0djXC9nk4!W+{yi^z3nW_N%k|Nx78UW*a%RtK*EM}`}M-nyERfO(? zhG6QSlZq1kgK~Svm)k=D#$-oZCIn}0a8Y}sUlhGv1LfU$`6}!6<N<P1q{qxqz|OC< z_ZDv&gdHC{G74aS1YiO=MdR7?CPsH>z56|Qe3_IEuE2{}mwM~RvY+_O9wyflgx^p( zIx>3xla1|<N$ih}jgy;;{a@Czf8w(KWe`{X3vZ`v;_?SC2VnafkELqi>geubX5k7l z!TpW36LSRNbR69Ng#59=sUk3gR%5m{5q0z;)%%l+i-#375s}s4-%aU%<^3m$NZG~F zOwGcL6lCC27MBDW<So2F$iKe;e;_;fUq`XOk20hN{|ztnhg~sO0Kz|55fmh4{+Av9 zPn?u1D~R{Mp@mYe01%IVSOl>GGO&aACjFC0lA8&{<zE(g5R8ZmL~#5Kx|8Gp5r3Ke zK~OYFRuBRB`&yE0e~7=W_>uq+@i(ALl9LI<&Oak~KoMO3ycj!(_zM#GkNf`<<M@XK z8GBY9QUJ%_7Y9*X2mtoK!HXb@8^rhDh&>s5(4zqS%^*mOf5RvImHnTE`cI_6KUw+T z0@D4zL9_mT+W#{s>u;<j$KMJ5M~oK2f5uw=H~Z_EpF%Z}mIqOn-oe~6h0c5cRlwJc zWL-uTa*4!v4mg=!NJ|XT*_ar4KNQ;UdK7vuNkgrkGYG6K#_oK9t1iZ#AI0$Ck+=wH z3$!x}i@NaKj!&|GJghvoqnNHiz^z3`hd-RRZ8n@_|6DY*J==VB@LV?VNL#n~*CDmT z_o4HB^8GRI{p9`m_Z`<J@}lE&-sZb&u4S|T&Fa;1$3?)?f#AH*+x>}SXWr&g)24zF zpMK|S?%Lby%YG-k=VR)c$(#OWN#16{K}PjvEwy|#$PI6q<J0T%YCh`j1yn#ru2b{g z>gL<*ebDyaWM_qvuiwr<-fh~`WSR5jW?3iTrRDwn$Bw3@L*=t=v);SWeFEJYarG}( z9}q14_}80El-@~iIS38>rorz`>UZ~a|0WYg|2g-`p5*7N+A4|?hk(gD%5z@h`;YY{ zC$o1~j`o|0LO#2(m(8PrZL(Q`Ut&Y9D4svA(o)wM4S%s|Ux(~|=Y88H^5gNDd~9?S zddWJ2>9u~$C7{F|df<OAClCLO6!z2`he{H!5|zq>TZRRuh}Z(CsuIi<ye347E%^E^ zL>y8jZR2u#HD2?S6vfD8+2IhJF3cF$pLx}5D@?g|8yEY92J6(wpt*SKE37ku1ct%Q zP1nWM>(uH|fVHrqP!j=CUI3mV7fI*pqrhO^m44MsY_Hh;h9AC=3U)jnVi>I`zYemm z`_8ZLM5sI4xJa*3g80E-<Cql`Uyefr;l<OUNxq_nd9BPia{EpmLDlb0zQO7GYhE`{ zLId~YpsIvs)f%kt(hY~MxmY9$;5)H)Mj-9{ulZ2leO6cde0jNcqwV)g(;@SnYqpfa ztuNLsuLNeXg9-bNhL57Q=YG6x9sfG|NMhipT~wL{`2`9X9iHnzc(t<W!tH<=mS@HJ zLtn2J5!b~{^UUplsRGP^zvFhd!S@Jlb;RwK?>9zjsF8kjn9*+u1{mNr$?imB6Fdge zt$NW3qz(<g0N2An_Sf`aGU8^QY*U8$7vphT==g8|uINOUCf)B=TGR4d6q5&MDMz8q z)Ym8N>?@9JSYQkG<c^!Z=GDp!@TgdAqmE^1s&a5bZ!+wZis6%~m4XthgQ`Ic@<&yy zQMHuSRaBT(l*vfEXej2meqW+sP%UkM!*-ri{Y~caVv?0Ecs-4U6r0ut9CM*(RLg0M zOlfK8^)yVlsf7bAtos(OmJ1~#rR9i;5`sM}#=lc@{R67bpf`vm=b6)<oJjI$`Ndxv zp%pK$r46(QVLm0K61HmN{xC{X<J->!;V@&TI*jc*xRxB)-fnG$^aQqub+S`vg%RE! zNgT{9Zv*^b5gsV$;cc<FG)<UR5xrbAd_$cnOTEaIuUoW~<2WOF1i74U02s{`0#5#~ z>%Td#p3-)jFMs+Q&Ist_&1N(U?IX=LN!f;{O!s3Kf3<~9iJ}dxSDSDeL0F1n<~DCj ze9vr(bQ*Bq9z8t->_k%Pj1AMm^C9{@HtX2sgVnz@584D(P~@aa3o|ZbpGvefJKbGW zcJ7_027g8H_6j{<-9>dh(@7+F{z7){HxGZhW%z>E9ayj5Cxu*O(G~|!!E>dvJT#ht zYaJJMvYo<K&tL}nI3@+pTMh<y)O^|St-!O!euZj;Oz23OID#gy2o<Xz;>E$b^>eDM z`j1y*X(>8$IEGk?&we9py&|;X#_5}M5`jCWqI3$^MqwDj5X&MQf#0PuJ@HT@)M8?3 z11V&lkw}HF;3$2U=}5bxEW<)+6nmKoMm2-0(73-akZXTin&3{SWVRQ__x6m0nn_L? z*I<3M)y^Pk`T}p6zCkU=Mv8<hn{ZF+LsJ4ZlawtY8ePLZg_I1llp-7kxiX!TR*7U! zgEr9mk+?X>SQ(2E$?}A%YWvk42vCdct`p1<N_Wbu%ZYo-W=kQ20r;9lMQgu{dN#%j zYrgb-IO5ftwD!{qvoRgPGhWAu3|yoee<~`0suLnY0R=`0dsL8d<?|o`Riw0tzr6S| zKq<%6m}HH}&(;a_(4e7pDI8S3d%z%@o2*V-bYE8lr}BIPKwTdg4Y=#=CxEf4ALZVF zrn33Tmcy$8(WC>nQ9ER%SgKHOIBTPv*uw?9qoQjl=Gy0%vht&S`FTHZQwb||ZTk%1 zxOOF2Tcf=*gj1g)i7w!Ze(Rp@AFCmyWR&1T)+s*hb!1LtYmT^?It83hxY*{tHF%y` zi!{Qw)VS(L7_T7r{xq}3D~D%9-{X@AUKNjt>ZOk#fc8BWEDRCx$RRJa60a2D#G<?j z_noO5&mr3mJ=Df~YcsL!kI{|LTznC1LX--4qcnFz=PDL}TW}$VO}nS+<K3#&v>;MK zP1t`+XDgKavM~0opUN?<=0m0vOaml0coH-$EU7hndwIhQzI>{f3}nPyT`<UnmfAFA z76z(*qGo%me&V}BXQr`^pM*z<Zp9@wGho0t{TB}=5}j^3L;-pJ?Cm%dx2}=p(zBgg z2nYg3p=w@#o-|DgQE=M`hNt**LKHlQx8Z~$2`6Fzr7T8NP6daS(V#MW;`&ye<IQcf zJAuv3Bum;v?6V9wr`s-yqAB{vJQHo=cuQ{HL=tI?(G@i8&#Om2?^Z)j#ru+fPT|YH z2GTr@FlP;6g=(&j^Iwvu&RdfX#7N$^;U0lzfUby0FYU?7`B_u}OpJ%O1>wV@n4fFy zk##Kk$phzt!7aJXo!Db3A?zSHXSi2_ga|REOOF^DKj;tu7Ce<vaYni=60nfNzG%}N z9@p;-QAf^jz*g{DW%tLW8<g70Xg<Q=n77=O1RKIY)dfsqk0SXUVoe!-IglEkX3OcC zj;C9II?xz{2TM0}>wbab?f=0_8IF!ngI_LQ2`BB-b?<<$n7U@)AB{r)t-p~$@za<H z=?qXM&2_Kt^~*Lp2XLrvY#0*xSAE~LlPPcLMo2G5p$RFNKoH%mwQdON!3LBYh2OZW z%EGU(W&uLium~cRG9V?o=w+$)g(H&8y0;~GG6Jw-T)MpAjAf?gd@EOzK~U3p`U-^} z!qPrx%_Yj{(-#Ec9-dvRUbm(Qwgh8iSc5sB;jhK<-P9vtD5#{%V*CgiBlrny<g8vG zTe`PrF*H7BHRD&x!*Fdogu35b61GyDK?>b%Mg5F9j55|PHYnDq$GT-Sn2g8}!`*D6 zqL}ct>nUtg_uvwN>P(a`OtNxsGvpMwqoGpQBEW(@kA$rb965ivz#tDIZ-%aL+Ik8o z-Yv<1q04&u#5_2(iex7X-+udFNZ87e+84rK0kgAC@ni}0l-uH9+>EpqMSBRr;6Pek z!Dd3DDik<#yi7#;IA(OkU%w%8e-MEu@Nw%M{1WXd#V$k+sT5BV3tiB*p_GxQrhyXS zY_O1(3%w}&g3XQ>q5f3j9IVEN?xJ+S|KxV?vpk+zc|}jG41Q+g+t0|Q?hr1*wp8UD zoysXpw;;u$LAldu8%EUL0z}z#>}u|O0>#?%NEqwof>wz}_#Rzz1q1<sg8WTG(k8Ux zWXPm;ZS914dAM43Tyg<PbxI26N+@s_1ennlo3I<y5)P3wfudfKcy(!(|A)DE3Klhp z+Vr+<+qP}nwr!nl+qP}nwrzW#ZS!Q$pGjuspG+zjsY))suc|M5RadXotNVFhI)}{2 zgB&S5e%(v9#Jms#W3*qV8Dwec-?!UorvjJ3sunnNJrR@`qf!|R<UPp}0!azYtiZmg zp2%YbiYXg94$wiJu2zQHF>A|tcBGcbzyK78zb`3aB>587+H2x{UDWH7l>kej<eDfY zik6%dK3<%>!KUG<C20=!4}PX&V@pV<FggB_C{j>|+HmX(;PpP%3z4-F#Tp=h^gS+Y zX8~LchvLz%N?D*vLd#+knR5NmFvEDKhz!JerL88RWu`O#Rj@j~E~$jv3|8VK(Lf)T zTis1(UeC`e*TlW-cn}T9qG)tMIB|7maPMv67(7GgRhZ4=Bnn=q3}9am$<6?Cpc}&r zfFc~UD@;lKT>}nalU@h3voWX`?wB5$7}-NRH#7z1k=4c=6)iW+5%G>$=~}hA-e5ln z<cb!HMDmB3h_P@z5z63yeMC>z>P^F#zThG91`tShIG;bgIcB*MzWy`_Lgq{g)v#-K z#F4ZnE;|Y3Y9g8W3Z_;Yac*J*e;9d|3VM%Ir#pq-E8AEN)AnRiJUnte<7X6JCZp4X zIQW;E(`H%6U<bDe^%;ns;AKSv3h!3CSNcPXMokGNXvO3tpA0@2J?B!|8vZfuy=UY- z*dYyWgBhqo@oX}daE+Jy8)g8&p%sMXT~n~#5Q*u9jE#KaTF9$=?^>Z0oc(we%ONZ- zpzBE1syI4`l?GZnm$L~N7CWi}mKuy<8L`i9Rjo#A^|4GrT6`0G+K;UIGpj^p1NCC& znHEl>j0jsK(Lfiv7AfmF6>ihVOdMaVDEfd*1`sT>=<MP$iV3AelIX*)YL$9*0h6|; z6(g#Nq}VqTpJz&+^?SkhBAPgdhO9^i1--@%?L<Hw{NXc63h~?^cbsb8@Bn2_0SdX{ z2m(t#jt^JfeS@L^O5RF4*M!x|_cVa80Zpcmp!mLS*Sv|KD6T0jcGI&^yd)YmLnE9D z-N~KqO@#IALU2QeM%bnT6*gQIimhNHM}#$HVV{UhjeiAl;4z5Fl|}NNI|^`BRhSvn zh*c`KLW~L$zr`}20!Uma28+YK3&Qp{^DsE1bcHGKjpW#ukV(7PSaTr1t4CpWza2Fc z-682L`UKM)f@-kEpwz>lez-_%_GFozKiw5M>j(n|);fzx3s*VnGgvj29z<)(BZJo% zJT3bHo3ipSUDceWv6+;kV1dF}EXlC)eqw_r5izZ`UepB{=A}+_41ooOkuiKl<R2gM zK?fofx(P3(*5^<akGX|~E^!hpD`W)ZagmU}(0!Edi6MyaCA4R3ocEe0yKd|J^Er#G zU6>J$TB;ip8=Xlf;oXltJxQIhCYLHbQZLI3&a2s`Hv7;56w!g;nm*;Br}+Lkth1dZ zi;K{LEQ7#N_#<ZfqMNYl%R%*Dj*aN0gAb#FYHASv*7(8;X0=chA#zhDb}ip|XrfA{ zfD4u#zXA{?jywPKhQZy2DtG8CdgA=~rxbz#C*xGZtop~vyZXh<otEGxEV>m)dKS<i z5x5mmj7T_Q-_2Cg7sIGMfzW&?TQ!hMA_ygO=3v`$L7D;lzfLh!b0x`*rWaP0SxfEZ zEzu91?LNS46cg8|Ym*VbB{@~4nF*v!2p%KGai$G2lx*ClOI`)|f+D3dPjZNY0Lpq% zF$M~r=@jfCKrM_MJ(PXqB6jM%%Vc(F?8QBzF3#G^#8L$=+o9GT3el=8A<#VEn<rRK zA=K8&bmowtC&42~<|B;mRO$hk%;0F^i<}TtGrX#ioJ0)}rI&}WU>VSensBtE6i-tq zc9dpgQ}acLK}l!VGFyTsxQ=U~7P)BU7XnN)Fr|#T!JfSCLnvok{4Pkkkb!!X#1G<T zCepZ}E&mnNKolK|(o;w@W|5);`EZnpXFprALS;FVcO$HAGz5cz0UZR_gqTwG-UYn| z<*?OYv<!Jmfs8kHIvtY3>^A}oV(cn`BUHp&B&7Qik#m0)<b-46v;pi3P-o2_p}9p# zL>;aH$WB)NZrNyVRZU2MB0{txVS*M4sLwQLe>qr*6&&<F<`EWNu219H`b<$4Jenh_ z1X3lHq(7rRHiqB_HBSK~>t;h9=&-hl9~FwK`MWjL?T}!1@-}yq6z=5!C9AD8?$Kc4 z%7e_jhp9)4=q&362$~d-r*@c5(xOPrma_mm*Y2T-R)xyffU3$0E9kKxLq++6FNqxF zI(+q~xB_L=aS#PA$`-rPm5OKH+acWLxUMY(3z<zJZnQ3#5lIHavq}VLHD2B8{loRe znG|RgwDOSHvJ&K0BQvZOyDvddUZUf)2#T!sjCcLgB5CC<JVX#A?BD{vG;0>c=0!17 z)R_yQYaPQWepY-~v)L%H6|)u+#!#VZ4^-T&48`^sf3XZy+S6APO+NQq!>*}>jGmMk zAg=hq;R0GL=$12c|M=-)2EvmFj~H4yp=S0o4C)aK15qqjBytHmfF+eM5GpAmnc`p# zAR12yAzId|s)wm$Tm1=NX!P*|D|6DUumF`BbXR*RIf;B{K38xqS+Ci*Ys6n)7zSTl z_%2!ex(p#Kj-w7r=2?ev(lFVloHR(dxu+fu?+nM#(9Or4XsF*|SS(_xC%s9yA`KMW ztfBZOBL{t6AZ$&qf^6wj$;4Vm{f%RsG6O-?%t<+7rlm$6^Op)M_^bgmF6nSB%M@sp zWHuT5xI%id-CCqQ!UC6u3)KL)jMPIzvrY?ieNj{3l~%n{2NQcoPlkr<yq#0glYn&$ zIsU;p3Jey895i?VnmqN8#Zgh5F9lPmG>b6Bqsg06lCW58$w_y(g0UTxW8l;k)p2cT z1>c%X{sJ9&p<ss05mm_Sq&wy3HXVldKDCuX-TY3*$r~Z+Lbj5)RGK3oP(2Z8iC{<! z(G`Uug($e@rDpY@<zE<J$OK|?7DOoI#Lci-Uu9Y?Ol4%RAzaxsEXMxmPZZ~}P^NOr zwM4L>HE+)HO;SM(0H3jx0}e1@V%XC9%yN%;x!Z&N$)Xg3Fp{PjH(CPI5_S=BX%VX> ziCi2b%MpNJy0b-NRxwyxT}LCmv;lS`IVUf~8q*O#kp3JEM{%_GZwC9q?7#xSLnBzS z<M9}o;NR8-Sgkq~7#0AG=^|<NeGT4pOl8bM=w~{!8&>ADMLLe`h)Hp#X&2ZwcI7Nl zQX9q$dhIw0Q(M|ytXc}9o-J$QvYmyB=$Oh4f^@kB?AuCJaWW%Ds8k;@Ttg>FJ5mo7 zUWoyEcf;TVqlqUrF=E*r`I?<Ge7W0D4hzeiIo)iD9&BnGF(m-gOB0(KA)AXCXO{ta zD3m0(LWKz?!uZNKbfXPY3`$?Slwm8weqkM%nTM{0p>bH}z3bsDrEv9OCi6_W(l;Xw zKD%+my_^}<d}tIs+x`hQoMR?|adQD1*<y*(mM3kg7D=nEhqW<lw&BJBu>$0W))`y- zdKN`u;iM#tGTl+OXHL2bqdCMtKGAw?vjhTM$w2%^TmY2<Rc&-uHgs4yT^A9f!2)Rc zIDAc#a%8dT5>$)&w2sS&TeDq3O2A_>RV>LG5PjxqK##Tjk@)^mIg65M5ZGYz?@ieH zLRf|332IvIXu>=Cxizm32v;hF(xLg5gMWe6UqoQy%$H~>Hpqq~NPo5f350?cr8I08 zh84CJ&nx9UQ!$w3Nd_qz;1Vk(xx$18%vl~lDn3n<LMZNNjB?g1_Y1hmM^!GP{3fFg zy?ZoaN4)#!SyGkhcBe0Jf2`<qqx@OwhQ2siLv$5Ga~uNPCP_q~=?Ox%TA1M{c&x+4 z?va)0Bpweg+!6x;Mszk+Y*pmTz3(G_pO>k<;Vb8dPZys_dp|tiLHdrcv!Ca$fA6RV zir>%rUt4p~yZ}fK4u%B=0JHuk7+kaQ(L|YL3rtry_Nh`Zs7MdH#VtHCv^IZw0mDTq z%UllQ@_trs^;)J_|Ai#4)g=0Q-^9_B45G1eE9_xYrDZ)%?*n7%p5TVxlm1CCC%EV{ z+!YZI99WYgJOUNQiviT%F<{tqrxDH{H!WS80CeNHF94L77q9dtEM0TtR>+R4=mfr2 zMx3cwcJpA=pR-<5XT!MhNyu_C%Ah<)O0r15YYm_sTET>l{a$X-iMn@NX3rhL&dZcC zS8;U|Q-!^PEE?<TX3X&tZHuV-p@%m8Va+SspgW!0FHUSQ>S{$EAgF>Y9S^c3TSnBH zJXzO~7Hl<1#Wi{j=zXukgwaQNr(~PrY|EbPK=Q67y$=NIN+};?bF4K54w4RL&Kg&@ zt}W-<FCm>Qm2;4YBoXdiqz*cfrH((QVOdM0e{k6j%0^9(HBd0jVyT}V+E0vDgIl<8 ztUVcZDph{)vC<(uU4u|=ROc{v537bbs#eX;u}#&&WP%~()RP)qV8Vdt;!99kUhzLj z0#^!|K?Y-s&A9Bwimp;;$0#mP9#~2H)7s1QCUu_0WJK}htc5wKK_uRinsP{X>8?6x zv2OA<J_JSLp1B`YwB3yUQVABM;OMqYvpg$yIOV~$GRZ-eE)ZsC@8}4ep=Uu<{-wdj z>6jadajKPQa90_dMOOY7Zic%1SA8QW{m%yGDFvS^?LW`Y0RZA(3kb!C2FLsM;DPf9 zU`q1j9AK6Ox0PnuY9;$XMyq2S9M{m(Dr}gw8y`Rtu0y1FGBnzATv~ISPbp`}SjaQH zrk|vjdN`f{!2Hsuh=@6b;>E=wi5U@l>*9NqqP^}!>LDB$Y~t+m@@^x-pgQC_Tn^R8 z^9u@LqPoY#Fa&bYauCQ({5|}N2S?AG)Rs%;<u~PyOH^@!6~HLQHl1O*b!c(Eh<`(! zumkmA?t%%jta|kt%12Il9h252;=XuL7y!M60}AKFh!rMY4Yq@8TI>a@>v7)WmNEpo zc4nCD!G>_y_aSiYg;3W!aw>cL+o7d<|DM13|JD8b&Qibf{J8ZswD;cBpNT>Lp6kJ; zpTy3SQ|JEK#`f)f8w$zRMsY@E%0|)AIpu!h9J1)>fP^SsbAM1W7jI#h=qL#Xo)U;( zac3e-D1gO)@l=n3dLM9~JQMY+4fU88@`-*#XOd!Z&5XmD5mujlnw{A0$IssVWXJy* zqqRqt!{An?L0n?v`b#`BXtxIBoQg}wU|}>h{=-o;*?2$Zq?bFD@xyo*5Vu^B47isb zFwK)8#$H>1c9oH!Efo*3CvD1NLQB?NHGJDH^s|unwV@<Ku1*Qe$Dm$D*n@7S5@HbV zOvHf1WMbooaV8sVCA2i_?x959+~l~aLmOf__lj%Do_yQN-bqZ#tBg`M3AUyuc;%XL z7$qw0_lf^KakfR?R)4NaH4rN6LBKiGbY4y8i8=j<skRWAF*qW&5=s(3f!BSgAFl9` z^i{0LP^-1+!y=PX<UU0)M_}P@@q0UtevH;%bN7H9_7~`@%BxQ!a&CVPQ&o+zYwcY^ z7H1riXo(X(;~-S&){kQgbZj?xDz0BSjPS4;2X7*FM$Dw>9_LuvsW!kWSc4FJS~bvw zE>(@Su(3$*E$MUUPu|x-(7*0)TcR<H0L-*E)xTkAZUfD!i#BR6yr@_F#4>^*DZJs0 zC+IgJ1;LY@e>UfF8ayqW<Z1>%*R!GI2G-Rcf_dlMG=#EJ>(*}M^`Cb_Jdu~@kilfm zhc7-z<hn@*w?aR|*z2o^v_ltnZLl;>MF*gvq2<u#+v((v?z!L!FPrb6pefQETgPBS z8fjFXx!O<+klUSVAyzJWzOChFoO0!~gm&Ad*za756`xr?2qJa_CzdP(0r!UvR>)I> z<SKa~o~-~L(L!hz%7bi*1?H?%-vHW#l9F2|yi4c{^cvewj^QBNoVL9E-djGaM&4jD z=mC#yS`K&8rnw@utr;y6$-Q8&jMd1`5iriQW{*?FP@O5+1`Wu|VMPF%C{6M7Yc#c? zi=}U5&+1=u{O!EW%EN-Z(|fm1*7(=E-iZpAq!YAtM)|kzWg>DchEZH}hjg?U=i}XL z#*YW^gLg$#R3JY}M4DQnqdJw?V}oFFxy3rWSGBdkC`GGq4kbZC;jjk2pqKY}yo06d z`uhGHAq(@Ir&_9<oX?}ao|k%w!-NLP6<Qj&akc|^1Q5*pgQEJo!JS!YLl9gpOrx)5 zhyq8db3x4f=2U|*=9u=|_~=o9_B?PsViCrsFLifktQmUl3c_6rM!nFWEiZq_T$b~9 zwK=3+k);5;O%Fx<g!A7wVs)GbPo+dF3PcBFjyB+{J2aU7gL&ai2To3z?mZ-+)nZaE z0$DvFnY{V92+vPp?=4l5Q`KZSeqmJmAeE)z_*QHb)*sMZFYAC#R=y0d63r2G5VSx& zf|NSRh(kW+iSoq;@vHxsSNxrAUx6V8Y04!;LFGxAL-p<?KKi7PUABk&*DFuM(6v`! zRv;`wA~@I<IR<5yZ>cj?4_6vwi<Ce&0f8GV=m~krXPnv=C6jGp$C?(>G#w~CRP}JW zCO8ICf-i%pLh+N2sd;hi;!oJasWcxxE!H><Lp@9zXKYE*#9&O2o}rCO#$%8mNg)uh z{1%9%N<?*w;<TtJY19m^V3~TBMP*R-beJH=U)DUOjWUIJ5IU9Yp`tz9P&L=Cf?7mJ znj<1&FbjvsU41te!)h|Fsq4U!f!OlW06yHFqA}!y(Xur8+lBFEm##z+@DQFC4eRQ3 zm*2+vK$RFJKVV^XN{>v=`&Bq5B5}Gdo>iE<s`1wORpT<-L4c4N;Rm5&7oWL1R#~7L zx#toC*H6SMLlQqoQ>-EH-!!RR>V#DRDS7b=2o?=i>X{`nXwdTT_|4CFGqtl#2nwVJ zKBT`JQ$XM3+;V=KeB4-)<Kb&1c4SA8GNt*JSq53stN1W&Q|%zQ|IW$D{-&hqd+B%b zt4`{`s7)$*S1VoRk4nDSm+I$lnMz}}6F^nURh|iaC|}<hR^ygxHQKZk07oUKs-~6& z9HYH03yLIVYu9`1P=G-*C1u;;OXweg@AbU|TT>a5I$}}IMj*j^4#*X8Ie;&Vsd4G8 z3bwG(_a1g{b1~a1o+f=WaooUd3pc9L6rhb`RwCnQQyQ?qkEl4EKpm$R>RRK<&jhwR zVur2*Gz~Z9`<+_N;`rOmm?f!lObcb`E1!5^8HF;*Y3DCj1H`#A!ff78_*B}boRy4y zX{C+s1Vmr&`mb$ADx2KWu|$ME>mjP!OKb|uwf*wqvi8OOSTHe(MHEyiOpI-3W?er& ze*ms1x^m-W9?n$2S^Kh>F3TsL=&-?qGC0acPAcd-F+maj(D(cV_}n7nWM*XeclS@u ze}7HkXIGw9$<2;{O>7~+tmUGBxJ=%er6|E|3S5-u?3PX5^t|#>*75hfjmCwk8g=XO zW%%AN%}tT>eNE}}$^N^%MZ1Aim;Z-@fAjc9`5&sjUAtP@6XvNtops%a{?l&B+RYZD ze0rH-t920I{DdUCwkScD{?|3U-Xo_uKO(A-jq9P!eAp_7J`yGt*3(%t)^ONM&*f5j zFE!tqyKLX$`KmN{f5aD$Y=iziYsWtDjUk0<HbW!PO-=|#f%e^z#kdN0(Y83Q%5doE z6aAmp1-7;bLW~<zQ~LE+!_V6%^Nev?1MuTn!xEs(F&30>`m)#9T~CHV`pBIR&e{kD zf3X}^`E4gQusFWVu)6R#YW6?Z)qEEI8!Z8O-3!Z%oX@=I2es%9;Xe0ge%d{5b+Z#V z7{}*UjF^>u!s(=)qcb@q?k&AQB3b&NbNEYtUq3Is(xu%WLpt6IyWgE|Pp2MEqb}sR zx0!TizV;5_OxuG<+k*eivTwd~{hLZOuRl=zAZmlCGa95%<r#Wf@426SIyux<-4npX z`Koq^I=RuqJx+4`yP(z<A^)@F*Yz|6A8Z?2hp;-e!CDr0xQNZ4+9NR#x%N$BT>|}g zq#se8auPb-I$laNrl?%WuORM}6)7jFwqwx{JE3`$5j;!T&*8d(aP>q@Mj$-Mams+o zHy~X)ju@;J17^<mJ?n^_U+Lb%azq(Y>j9$7kHaUs=W5Psq#4}MobwEVnxmM=y?R`N zTF3N`=*JJQMKUC}+(lZDqkz12J^+iuag_e@8GiDWkMW$RJK^!~`G>(f$##tzvi!e5 zFkw5kPCGcxU;h>#76%3{=T+RkXN5aho_Ta@)7a>=$MIkTWS@Ub&MhX0;GA(df3K*D znJ*^mq{`;aXKWX-wqVAU93M>wk_;sRs+s;^j}qIVo4cn54w%<7$x=?d1{OIE{{1B` z9>U|@;z@-HuqO<s&Lh&fLWc{u#Zw44E+=Kg1}q8-eajMrSY1_%Dq$=n`Eu!v=@*Z4 zzTd(D&DI-@<oL3<f3Nqgp2@x&Q7kgS=Wt+rW?YY!_)d~ojrCme&7m^~h$$s9vKkJh ztBV^!UKOJ06jiDq>9ES+P61*tAZ|*-rXT4yV}jFYv8@*snY}TDn*hKY;X|YB`Sx{8 z$;o8h19A^&PoB?~wo?h4-$<}#8s**C2$7taCe{I-8_8xUjMC{M{^S*;-eW~5(#7dH z*CJFe+^yjDyerbQn|xz{<0RdKYg8>(v~THQf-SS(gF7_$V9*gi6CZ_#wc*Jqk6&w+ zamWdeuDm&$TX@Q@MtUzqGSpsqe=RG?!7XA~5-uK-xp|m2R%V||HI5zO`+!XK;wXae zgH^CD<`0dig*KRa_f1|XSirD270H7Wp-F+;|3l$QOi3;@d2iD@2nesq?#x_^8~}QX z)(IgcOzszOcd)@o6+>TZZ%Ma)PGDd?k!*rAq+?QSU^WTvM0Jjuyl$)iTx`S$3h>jY z{7A5?l^QGPI~!s5#R=`IiL2uQK}D)JGU?Jpw6k6|J-~MG6`6>XFB$i&F)lgH(>t1% zqO|Md=3y{NRLs^h=ahguT9ACNC+vv`!7=(=bRr<>`<B4fNzV|XgUlngzG``Wt>w?- z+u!Y)lklgc6{5OCDG)-}F5?2bvuo#16DUlaN6<#LD|eg%I0R8JOo-#<70%<(WC(^I z5;>-D8w$ftB@zB@e|OKdI)_HMjl;@+y(iN`l8dPtkd*(`^;4saCRVXl%f#vw!tKco zW^id%EG?nvM<?f(P$EV3SvJDRyYoXR%KqFbhzkZ(Iv*FY+5NOw>OX-t^30Unfx3j_ zY7=VhVS=gidK~$!zYI$AHS!_f8U`Ls(ZqTl9d(vo>iibz(Od3<(}GD|RntNVS0aA3 zYu%-KXRT;a(YLeHgj|WB5z-iy<kpVZ@ipwW$xCbPY4S?7!UH@**cyBQou-;)VMc#$ zdBENyhR<9x9fNSg6ot6Sr-wcqJoTyH9b-;%5A-sMVsfIgHI#M-&R9q2iblZ9SlLck zv4z*v6GG=x3;B9TVrFGN8-t38*!%6OVk*sz8DPxsSIATB)=rs;P{>D!A9v(#aeE&* zU?X~ey8A?_j!5)s4#7Z>9eC0J5@{WFRFx@J{E7&g9TP_cA-x!Yf=V05j7doFMAxxO zR{-1*scnx*3At?zMveS)J8Qs(nq_5f4-Ui8RWreLYa|ujH}fb0mvARDMwz`Mzpv&l zd<$BjaW{>^zW@QKT{oSkKPq{+b8?<xGF+TegfVa+@D5T9W%p;qx!`xg5zp9U73D=N zrpgGXHoD9{Ax)y^I8T;fZ@fp*h9*k*OqSu#soKz2!3|f+vppLhHI96fWjQ5eD$%r^ z+}IxbP_={vE;{yp-q`38cLd><OJ_>^cP?d8kyALdOr;mfW|&GyNsV-&EQ`q`l5NG) zgb}MW+1~hO$Y*BKzVnt$IwcrM`8~KQHJGG2Y7@5$1)(zh6eV+cs9jZENf^xzhPKPJ zV|Tq3a$QZBoW#k2hVf3?AO3)1N^y|JCkUD&kJ;<S%b6a($sSpvL4;x?z4XX4g{`YQ z<W9OFwcaV{@qY8Pf85{4@f{u^QU(lEIZ5Qu5qU?XkOa)GSSd)$d1^z6V<EtBtr&ga zeef9OX6)7m#A7CoLh!p$+h)>cW%AZjZ$#S+rG?wt|L)I+x0pnpFOB-UB?0vqLSeL7 z@9Aujiw@)Db$o;ZQs!am@8ONgAb=Cl_;OBaY>RaA1x5Aamd8)Ls-F=Ri13`PA+VT7 zGid@YX=|vK?Bf?D#|^p#2nVGS((lLDv_lKY1KKiHrSV_pJOIy@%2cH&eeYA`uao}r zvF5`FNNK)rhJLtPCXNLFz!Z`-@@56mA%h4=^zO2F&Hx9FuufD|^W~zSpKmo`Wcw1- zfw&8-0rr?Q&J-Lz=wvl%6*;hq%R8DN<`Dbq2*K#)Y(*l#Dq+Z;MxLq+*sdnv_I@~o zlW&rrB8P9{{0WJ3+4St@+KOEU2KbCe;$l1&`PLU!vT$O!X61RK_gVY{KTKsEXl)AM zg!Lig#uxir6yv*q{wuA><G!Z#-N6k(E+<P;3Q_<rg)&G?4)a&5lOQ;~znM9<mWQR{ z1CRQ?DGhur@3!Z&nx0F{$X3l&6<XULYDRTfqFOBFjw8o^O(K`A$Q+?L;~L1Ss{jI4 zTuZJNVr+#3$rrpFMO$_Apwnt?Y99s%PanSO4=@n5tj{V190uy8-olGRWVWy1Iebsg zy%A3wn@Su(LHf2qEKyAl?#f!c6JI1Q7k0Z?1Gj)ERD-sQ;87~Zu5#>Lht*nkp8t2n z8yk&2o&UR;2h6ZJvklM32YhQBGfMX^=I+Z3`k4I}hIo*q9zv#^0I(LKIbiFOy=0g= zo-7luZ$5XftZNAY>|_{(8evTB?B%p@Dif;ClF4QZRlo#}czl0(oYd0TF#*-k6@<e4 zdW|}Ab+gI(+YEXdm{5^Xbtd*dMM!h$fo}wyV#n_eZ_nGqbQ!$f@A2Gc*5|3-TFNj0 zj0YKKwtp;-a|h_$+Z;NwoxRz3T~Mz$xdA(8CocFOUAVrzG^crw*GtATcQ(2tH48t+ zj?%0C*!A0nMWsrY>zaTpm?}yxPYw#-YuB`|WH5P^nu+cl*_>3B{8?$i;0KLYBP=HB zjG8R4*de8G)2f_0^qfQW_u%K3@=P+O4iSYe2Cl*U1Y+$=f@hR@vnmCLnpGDU)rD_C zPQ<2&#QXIdt>I!-e#-55J<kGinM=5q?G+&c?#Bv)*$RWITtzF?vV~wCWEETpX7!kT zn)DXZ0^U+&XKGthCV7ZXK|0b}B65o_m0MvOeAhD!yp$kc0uDI_<y6)ILh&XsxlqqJ z>#R(|AXt}A6s=`q*0w?X{3AJ7W}-lPZh`aTR7-pdDVV<TuS^>>I~*UwP9gA;8xnWU zQ<fF(a*4$!(#feIB2vnYyh92`Fkz+E7QS(BT-I;d_gkD{V9Jy%UUnB76gVS~Uepjq z*>c-3qGbAa#vlmDJA6)!){!J4Lei3VVtV+rfLt>i!db+ZbNu2-q<IT{?l*nK)Ap6k zmrUepizcjcMY|rHN+PHiDng!+r@;cJ(LS|~N{d)fyw@h?f=Ftcwc{$2p$yovo)X}o zy(^Qcw&22&K-k$P5O^yySHg$+fu#|*e=VT@T4!sVA<C?h{YZGCmI=5UKAf^_;Ux1# zP&yR`us$i{HQ7Y6K%B1$6_DuVxh<rC>Uj2F4h;G({7N4}2sUnb+7qcDhcb3@YaYP0 z-*@Xvyv<to`sH5fG-Fc7R7NHucXboFs@NDBM8iE`T>V91&UfG0g5Mnp3567pMZ}K6 z+*D$?T@+CTl}Az6^fyArF=~qx`1zKiOyEMsu{OWdsu#XEDo{J+8KeN;q=|Rp1m6MA z<UJ*y>SrL<zi9MKO|TN;RMS~jYS^02-fjNdru=BJJHzT<RYVIZ0Xl`I<{xVV8zhZa zG}(r&6_LCRXtotuS!TSJXw^>e9`SYkrup%01W?tG>0m@Ums*%;uARrNEMUjDDMD4( z(dx-}FPKFjmhO}$e{&71VF7Ld5K;%52+J0)Q5$#>>0o+6(g@p3LHy80bpE$=y}ijN zdw`6Pp~&G8Pfb1X+D2us2%ii@SdXy`nhNwMUYaH+f4~c%!fM6~XTuuSef_0n2-)(( zM4}H;AgD2iRXL%z9m$|kHQ#=?$KyyN;W}-g>+j8~b7(A^c(n$m&D5?orq-RfJ#RJ0 zKam`S$#lpV<?p@Va?UCHx)jZZ-bIl{*3BO;w)IS5qVJpdPUec=F8<+H=)r=~$5@;b zIqqoWW2XwPy`G4jty7?u0DHsXC{w-lFrTiCB_ibd$00Vp+6Mx7-XFV!BEv>x$SdUZ z8Nvv<=o^ZZx0X8uv9<iA&#<-J;@yE}g*?g`i=^31lJYcg_C!5*QO;3~sl>$;GX&V4 z$`vK%%2KO9GaBbSbVO9KH+C28?suYiL!Fci0(kRpPEAwS!{w9$rEU)&v)bA^*M$7~ z&G1K6VG@nz%;At76ZdE&eIWg@Kvkk}F0@SY1F4G!H)w+Rs{GCR4LF*Y1sd45Ec(gU zT<2saS+sH#dpFdN$Bu;LmR`_uVzX|xbQtlng<{8lIb$d3L+BoOqWMKGP(TbNumQ?~ zn6uRWn35PErQ2pBfMOaZH0IK1QRig)(e=>*Lt5&o&kpR26%@b<9*zX{9ed1sn$hCb zDCS<}N;#$JYNY@wMj+-PySIcu^0%^>MI)&BO_{33eP)3R?Bi5Z{-kIXZ(7>4=K1A= zQr23iAfq_FXPU)*0!31erL^vzF!%up4x6y?ud#cc`56}hd!nO|1&)hS5=eJU&H~*S zz{$>97_5V9xzqSB(KB!7|C$TmEks`hGug2e0~f-nv~@)wU9hU&`9_4BKxX@K9FaYY zM;ua~Vrqitu9F>Sye$5i?!GnSkhs2rGuvhxU5ucpOv6w!CTV2`LCywk*1T+59AVm? zmU8J1>?B2IkgX|xL@r#4Or$q@xTehT%s;rMOFhVXhH0;9X{ELtTA6`n=t~!xHFQkp zztr*Fu*Y7CU@B&cXZ28parzO-#8m5ETqEabh3ym!lK~w{sJY;g3#0x%gRCZTC`P<g zaYKG(yJa%-zewHMD(lgVS%pqeK$5IL^a?wm05J_uG1u)QfTy}uX3!h*SWP4+rUEs6 zaHSRf2C681pBPq|fc=BOD9@XQLWexBKc!}M!lkO8S3#WoohgK=y+fA!^}t>aF<*p} zup)%>4K#HS`{Hq?r`KgB61$-Hc|H?I*YomIvbKlsmCwPV%m}nXB4;yWA3v^QsqkW= zwl`K>OpsV&uMDC74s8n*EK5*}v8lH#(PY%y&($`90yGv*0={*4(E*^U7i?kfZk%IR zvCk%eFQ*uu9C{DSu2vUz>+S7*a^T=|RnVG3Fo6a}5^=CYQ4@H5^uREB7{SA9dC9#+ z*K*IYhhb7hrnPCM#$O^Y-pBqSo9ZnQux`tpw1EZ1H=DoxFnI_6?`><&_xbtvTbV1( z>-+g=(tqvqd)E9nb?4kE_b0vXC{KTEZ^`ddf4uLIcj$ZU$^rjHW$AhE-wgj(82yi3 z*IM64*Bjob{Oj}6#pvtTig_Ot{_)xctRGwN)&+LHzR=tE)%)xHhVP5@M~*itACt$R zd)OP#U+XWb?z{`E+*Cise<$pFKmRjC@;@Ra|92z3im88~%$zJm{}+VTe-JzW1>^aJ zu2}z@@bZ7)K>wNUVfnAr(f=d5=YK6D{a?|Y|AXZD|E+5Je@yqk#NfYEP5%!t>OaSY z_5UUL@c$H;|BqDD|6?})Ws3iuYWjZ=VE@y!|986W|Cwq!)9*t2KSlIeS^fvE?tck; z{b#!CzbWbeuNAoe_JFGBY;^vbncv4b&bGg1`*d;#a)bPVu!Fq0xq%Bo`~BYx(7O8v z0}KRl4}*qVljq!gs;UYlDX7l(d_LXm263L2t5llRXtk^|@4fr(Wjfza(UYGPXD25) zDalPra#H>egT8LxM_<#cpYtcDxt5F<YGm4;qZ3(SPE~cI2X3TD&qQf2W$F)R>)MlL zy(xPPN?T0(^PFpJ?XC_tzvq|P>#;d}exL8-ldZN}e&3I$$B6+RO23W9dQa+>{E=<i z_rO~GB%nZ{#RQqf1d+)Ik<AbpG{K5<(Xz{_O4M<C6vqK3y;VNZ4N~E?vU!^JMZU^e z+6GdBCh9^5f`&s+w{LgTH67bMrJa<KsUhp2rL9NpxYyjipNpk3ecyLXUHM7L_q3GH zxR=PuEgGD{H<N3L_h(4X<p_>t$aYt`d(0f9meNF5)OkpSh8T9iQ^G>}kQ0|u6}S_2 zSrym06+O+sO&8mi=MS#s)GO;d9A#{<RK}KCzYC>3BiA`0)mI)CJA6It+`cAH^V^N8 zKjh*+#a|PqzRzkdQYDReiJAIw;fks0H$;p@G=ObnMa4D2t#O^U(Z&{*-uEdFQ_QqO zS2e`gF~ZiWywD-G%p0&r6RK-RleHtsFvF>Eqp5l!XLnL!*D>3r6NN7sk+tugKPROH zEv*O7FA1>j!J9nIV7dVHVRQ;hT@Z7hrLE}p*F3&2A4_$Ieue+YPvYp~^13Caty5X= z$dTCyja13Z;z><ZOHo;pSD)(dXQe1FfK%*xd-WEY(luBQb2iP;G)n)bQyJn7*(1!B zAWxTKj#uOLS0j#>qxF|xj^$sD6`#&k<BeBh4j1C}SKv-pVN;o45L#lEon{*T)sGCf zhnl*TuEM0fO0U7!2{-_F3lo1s+wUv=Q_$;I;bmiN_qZ^{ek+&GPw8i)x#7Eypdmnh z<rJzO7q9#X69<=69FuG7Vl!(BP-p7GT;{P!-XpxlEI&;Xs!M&BT}fZlNMEEykR}(L zuUObFTAHjI5NI7jRH(Gl1gC4=x6EbwiY2-xrCF93)%t#jhWQnGkE3NceJOaR@^!BI zt-%T`DqF<5-FQu8E&TY=h)q`Mg&yFI0osqqs;Aolf_HH_c{kY8gKjTB&QJNzWW(#H zvetPTC_|Vy>`iYx`>PPRuQV<~T+H1}+vChe?lD>1sS1opd6^_qnHyv|OO{%tpF(h( zNqV3}XQW3AQ?X)Sg~m9I-ZF>QFoVu4mB=b_xC}eC;*0CRo6B%e?B2+10_n5@y@sje z`r$YyQWYUZt2c)0X9lY`CNjJ-YL#=|n!}aIKxBU&4v`@`zBQKpi56=5a=IG3?mQE? z3V0G87?9^8ueZg?{C4E(mHh0c_{$9Y@vp15Vq3Z*Mg>iv!lI-KKL<g{g%bUwP1$Pf zhRAY6*4QEiA>~QWI!hoaO;hqt<x2Nuvd$#Emf77!Ff+B+(s;X)H21<pM`O9Wll2G0 zIaoL*m@W(CugOeWOL#IPeAwAx=zvfs;5HWDe5f3tAnCP;+d{0$cI08;Z>0^A*4fuC zSs3UX(W;-Mr%+XDPwX8PEzIZwD7FGXC2H?>ZNM3(QdOUM7Z@<OYB~QEI)J(^?C<Ar zx_VSrCpUW}$!TVJo|DifU92kDSpHebNivG|;udY2;yN2t32aQ035eo^2SeqX$kt)C z`@Ao2^7p~LS@;5#$I?VcBbiqO7N|uhQJ08%(0Qxrsy3y0xBN1@`ZlNbHp{ZB6F`1) zYmDAi-Zh%0O;_6jFFk0jii->itBySN1*Diw3k-!~eHiNeV`-u*QLmm5>DPnXV660* z)+{mqSgZDA3F6E8lYNR4EpIKVRPm`mO~~mgq>`wyTWM49ec-If#Locp^6lZ9{gy8~ zd*1K%;pO{tV4<V>VRrd&3ez-a19fL3h1X-n=USc2Jhe5q$2;b%iT~f%RR3|Put`h4 zV{yG>u&#!w<QB?&?h*^6)}%Rt9O12C)iJS6Vs&u2(qlC$v!s#2U2)Q%BL#6P4bct} zRTQ?kX1!l`oLnjDtNX}W21jXC2I#bTc%)TWlzGVgl?U{BXe1U0R0arC_L#&bSp^q~ zRRI%>Pt?s&bq^92kz?1B<v13djH|5mcBT%%9l|EFdE!>B4Z2*}m87pi+=zD8ySaix zV&g2}^`hauGQrV;aC!bzHAKm}OKQs;3q&p4T)&d`Z!!zM;7)>{HM3UzK2AsOf1xKc zS8V}^9YC29RGc6c&D0^;*d^WEF^OxOs3b+1^jT&A@npuR)_+q_Q<gTJOfOf1c7pKz z?w(wYHv9vXGt)h)MHMtvUv1Wg-Ndfbe+@(P9Z@>6xqA}pie+<C^^>}d3CqL)TB@nb zI@@ZswQL;aKgXrco<oMa(RjNo^hnj29II}Fr+d*^KIPUQqJwcGiittra6sprC(C&a zSMJVMVNVvIx$Q9sO^|#BnB-RZG#5B^hj_*J%H~Pxr^yZz)t#g@^z8MP-cAl$CR)Or zx43nSrWl~o-;N<Jz?CF85yI1#B)Fii#D&HIX7Q<D*aJf$_gl(QdV*z*qk$)boIMH7 z0cOBY>TbF@o4m1Cs^lm8q&s}h0?M;zE*?LE(QzkAbSaJz0C-XY!CMl^8Rm*_z^9$h zK^!l@oUcOe&cmQE#K+gkG(b_ylAohzZJ*?9I?36p2peU9rIxHiyU1G~Gs|p<uLSNU zPI${tdIm<zy;dT!O6Dgzf<)7pq97!!t?>0Mdzen}HA(8OB}q+0`YYu6D;z_`Z^3<u z^S_&ec@v6>JEEaWa00ae2l9_6tN#uboK6>94p$uvmmNT)=r3hx?_{VW50o6tR-Me$ zp%WP4P?=+wU1b;}>Re#$;Uw?is<WwVvjGoW9Om@Qh|h_i+MliJGe6Imsy`B;v=AUQ z7$`m^2JQ&6S%J5fd#(&Hh!ll`3;`c&hv-I9cBpOef*+?f&n&yi^>{v?2B-Q3J^US^ zHnf!9zy2#4ri_m3+iu|}HF%nwenP1jrA&0nAD*6r)<lwAPnhDAAMaeEdT+D_dCUrd z%oKsd7KO$bmC*W|z-JWPCIp7rRa|BR**b^WfUf;vo3se_Np5w1la_ehK3yUDIgX~@ zBAeO*6Ap?p5s|(4XpP`7iTH4}M1O@x<lK36IqAD7HWeqinyNaXt+}NzIjXZcEw;Lk zS8xRK;P%SQ<jl_I(9-74(C82_Ucn&sDIJ6|IXW;mxvjN0E3+4<F}J19C-ldx(T84Q zso-U71KJPh?u6}xDsfB@wffl&&=>V<E}V?*D2j^}7kL9^7jl%Qk~CKYCOO5g`6Wn; zP%@!mhGbo|l&h4+XcnltC>pGa%Z&Of4GPOI#b4Lw^T){bXLn?RHZ@%D1K&vPAFbVE zkN<%t9IMcNykKa&Vrsl*ZoF(pylzIkDB!dH6Ez5qRoUB91WC1JG7pDKkm$5=$#vo} zZeoh=VhfM6OAquL%wYNo&~dgDYPJ-|GO&e-E=qNg^JQHe-A>G$UGky>`N>M?H3H)# zQhn8TzrDBGV657LrpB7;%HYQQ!pz{z)a=&S2!B5@3k)XaH9j&vMjA$TI#OmfT52L< z0u(E<mAe($7rGvdoCI9B^g~SSYi#%nM2rJ0jN{XD)AJLPv*7s_r=`|9CFYjSvU9h= zd5sOeIkFa_K9|N?A4|7`{q;bTHaGoP>TT#*vn~T9F;W{DS_4NNX8EC73GRGhtWCQ? zifqxoEa5Gs9VR><J_|mMuj7@9=SIqPSDgI#`zM+sr!^2;%5Ct>tV0Fzg0!Mz)l@mx zR@W#=nY!AV>nm*Z^YdQ&@_drA!YzKHr5!q1-OV6{iNtUvgl>?(ZivA6TXrFKoo{%t zZDFEwCKZwJ5U=)Loz6^6yzYqfC=XF<IX2Oxe>)T&s5yo#gk}mP$Fh75d$8V6^(B#E z_<W<NMrQ~2+gp2B@Q26<&`^S9#y#gdWeBO}Ny__5TBOyLG<GHxmUg9<R+ZLPrS?{( z)+SY!Hg%@fBv$6Wu0@irUW%GdYO+2`Rtoet5eMN2In^vN%OWWOhG%>S^<e+1-pH_) z(wy4ny7U$+V8*u6I@b<!18c(z!g$`UJ4WILkrjdnfte(g8ErW(RA$}9R`JWo*;3Uz zWrxX!sf&=i_5v$*&en>X=_N>RpCw`f=xNGaV-*6EMAe0-oz2e3p0C%dsq3w`fKyR} z@*t?GIh7tQ5SivWP(l6zr|2KVCJNIv7~47>-CG@>Js}e-X}cYSx=3!oG&I1pQ%gOc zyGxVCSt3seUa#Pourh0b{68Wn1Xwv4NoqP!PpRx|YkD^p*H>4)ZnpcQF;&P%Yh`6( zW~KJq*&U9Lr!gBF*iGz;@^-V)yV;nzxmcO$7~PIVuOH8`0RgAEdT%zG#>#@Swq};1 zO1^|PY#fZJn3z!zaDVRZ+ngR58viITH*}Vp)0<-&AuHvo@h-A;^L4u5kLT(1;3sU+ zm7}{$jVB9jYmKsj*RMTUsyNk{sKOH^xu(@i`IqAu1%Cvbly~PwFWw;g{^U+?`CAei z#j7m~?AwNChPTxBDDQM>tTZX`w==an^!eCKQ58q3Oh-n`=LIXnwgTTJNoqi0g;WtR zJ$pwzeMh?bK$+SeUYKOdM{LYfbVy)zVQ~}U#Pj7EChTh%H9ay(Pf1^5U2$=7gV({u z#sKabJ2xFYIwwIte#Wg|(wsuF^s6lUot@^>3(%iq<JnzZ0sj<$pQx>ppreSJANNB9 zMu$5>!n8a;usXXfu(YKvw52kNYJsAeuEpr)^>sa;zZad^*Us8(grqJ`63;?mJz9a3 zdoKIW1yGr#icuYn)*@G5>tdwuIA2UEDQy^dTFy!Go!yRgN3&{RY4Kf;q{#Bj@Ogjz zYG?k|`GBkuua{Pz<6>qz?R1Utf-`Z_<Vr(j_uXm?74eWs>6oNxSb$YT74eK|TuQTq znsasQOx>eYUx|E7V~f<wtPB(cWNc)_)J4re{vf7c5xsn@klB7>f9&9N&BtQZ#}r*} zs{Z+2P|o|*#>NS&SKgij{Yh3<1U)5ZqMhPrLqtR}40{5fD>ApGHa93Sh!q)DY1Prz z^Sk@IyIb|^Q<>5l8;KR_O>ZJZXy-h@ql&vxezW5+z!ip;vT~p5D&rh$PiK)4JADNy zY3W`6_ykhir>MC0NK~5Oa34Lkbo%^>4{m0s`bsD8wDx=YBKG>#9j&)yDfzX2Q7kNU zhRn*uFXLL|uCTNCiLJ3`Yw#x|EaM_f`qG4R&5UX_5-aHB6J1ScVM}<HTUa5?P$i8? z3nL#JJ+;Hz$En#`AISY2Wc%oQ+!Kl|A<k#@^P}-s?<Wq>wEBA)7z=jFPsa_<6Zc0X zI?1oTr3<~?$K&_tm<14*-gp{7N`MdIXY$kPCBqsOd8SFI%w}5LZH!pn6DbZ`XNOYK zO&;f)?kM14x1=I<;%ef(6SEP6occpY%2NsuEh{X|UkfI$-(!od*X07r`q+)ho3g`K znJY=`uLTHs)LZR?7m^%Qt*Ua>@_yB{#YIQi9Epe0XtyUxn0{=_N#YW|ff9~k5)KOU z_8chiSVMeeLv*Qol-XzvUMo90T{|sL>mBp7b{>nXeRm%Ns;`6alvXeiF$D~a02*`# zxB)Le6Kt`@Nw7S*qdd1MC%C6GF-XqR<>P%au+x(kr`LUrrK{K&$4_b`M`Z-KUgfb; z<+&=kajH1(k*oZ?@=6^++XHq`pI~k`mNJ2y{ex!eXd0BO-`4gIZ1vBd+#WkzEzsLe zc%E!iD^ij$v+&=uNxdLUOm&p9$oulg&~PE0;I{#mC_EA-I}~VeaY9u}SD3m;u((Ba zDrRD+I<hC)f`=}ht3B<UPFFhEIR_uY!7BVyoctWkoc#1Zv`{}+*1jX<a^K(W@>|MJ z)f!u52H`T4w9G41ocj$#i)h=G#`YN2R)z<eOMJBzzpuR~D$*8FF=e@Nc6iXNP|T64 z9CA7hMTc5*Sz#$xb;s%VfEu}FKK&h8nOT>I@Z3g``f{KSCI;X4^YR@Q1O<Pr9jul# z<pF^)d<E9(Q2DMn!O=0MM!)9~7+!6%24$jf&qsYdS{}*Z;^6r52pujhGfiD>V~3fM zbB^D0aE>TFRwpb%4%rG`-7D$PA?=-{tdnG~<>mM|ACuLzB)#4<!@)=X@5;gdp4qq^ zJSxZsWC>%uZ+lOFV;f41pNyKFfNp}8WN>wBZg@d+WL;;9ufE*F`{X8RCTtF$#|v3` zUXt+0J9BQoTP#1)ye84Bg{bf$I}sj($23hQ+-j=pbmm)%?37(T{NpO^Vk0Fi&aH1^ z(fi}+;_`2fPe|Q`b~8*(f$}|So5UKVg_)Wd6J>|^u`Zr|N8N48ObxnB-J!D>7GeIi z(P`ivOjNYY6bNJp0_veMfQ_ez<I&H{^8{sFKMES6Fp;OF5uXp_l0#Ow^v#6rjbzQi zJ8uoJv2v92Qk3)2zv!5EuHx^HnvS|W@753CqM{hxoshK4MCiyNWv2LqhS*<r+AcTK zES0KM&Jdxhjz^>K9}37Gcf)Vp^9}ARjOWT9?<v|wB`Sg%r3H{um_}rfx00jAYjmF5 z;(E3Ksp8ia2=>DU_O^O;h0vJ^l3YNfb|k_YXJv({u1C8UIdwGT+!pPlh^{w*+FgO7 zfUb1i-R)C!q!cAh-=d3$gS`b$$mnY2===EdGW?aTFM?JS&-do-z&-dO*t+04-VE3G z6uVp*FttV|a98l-ws{tdgH^9~_g=(q&IJa}7zlr0tKiK9(VG(1)*F)(nGj+vAvHGs zad^%9>u27VBs+A5(nnZCBQ#MXwc7$Tezpu%6#p9EDh-&Fq~Ruup3vZWb}V$gSDxQE zQ{HTX5A}xD9LD}a&slazZJ1JWqbp5(<+(u8+-qOjYOb}$t=><OD6<<RJhL$ld4PuV zTls-K>~gw4kIrPJ2Ok_LxHvibUf+-zT%4wnP#BAj7YYy=I1Eq>6PE7sSN+z#+;zU? zUhYUn+A{853~-{hP&vCBbLKxEsCyvYkKjCCYr40OSLyISnM|2Ma!|m^lDfuPik2c) zrQ+_*yiEPe8>&OgznP4s?`Jcn#On$^cLep1v*l+V8jD%Hv$BB7L}&64ahx4WNOxJZ zu9MM;gNScliT%Mfco#Xtw{Ci#hR<(1&A)*qW#@5I?_C9Im+1u*Yxg$>yev6Lzg1^S z^R|MZ2^6+?;$(*;CfA0i=bOk#pjVxhC9o42I(q%$5A6HZYioY#u^N<wCIZt$0u&b2 zCMiLp7EqIyu#@K_c35bsoe|^v<G<whfed0HXg-&f+=7D4)~SvZ)`bjCUq(o}fPz#z zJ*`tUuopZ_)wN7;^e{7f(PLK#ERZ}@He<ynkcu={st|F(Ba`H|KQockeR%1Pnscn* z_q$QZxj!HM^ftE!cHs-|t|ccQnFaQUDMU)M6(TcDp@)S>phxi+F=3P`L7TfCfgglr z#kwUhxH(_m1285js;y5iyX~AoQjmDW5Ib!|(py(kZL3X@RvRPH{bxzgS(Th+Z)Z=z z$GQ=;xYf6~VW}1ac|)0<oJ<YxTTa~P$NVnZ4sRZ+X-G~`fyhezXY1}C=UeO897ID- zzre!im6ttOF?-$`G}lPk9L+7+trFq2229|bIR^Z$-V7~yhuJcB#mF@CCrCw~`-aC$ zDPRvxt)s)qLyW9L*%MXoZ_@{%IXB7qY~=w^I!Tw^21kR;4$D<+Q*(EHVHX?Hm#nR^ z)7SvO2NS}X(c*?f@Z^V-+@jve+Sti)c7i!J6P$G*U2u1VsF<kGg5kH1e6*-AH;w}F zb4>W)NcsqWYUSIsUlcPxe{AAo{>Km&q*Zhvf)cMFflaTsQ;p5@MX#4K29s6TBZk<m zz!YIIm#=QD<c9uCi||B&<V0n#7!4w!S5d&j?I<|I-ciK!2vXf|<6)^R&x?%h^=Ngv zlaX);LU9S@J70|r6<-Z?ZZh`%2HUz7F>QFJaBiFcWhNUZe!RcEcu?AvcP$jo>vXTO zv!6$#{kzDx^Yn+rMhYbR@`0_vSCdz9MEyvrPSq_MR(_A?{O(^Gox!#J_2sa;@*jCe zTVW|dM+qe=19-)n+?M|h>Od90t&W|STU>oiVNZB&hoXG~rHnHHzk#4Zw6s&Hq<sO9 ze@@wtLnqiOyf!SqU&=fhMzRyS{-PJBZtd3eEn<K#@0z<{9?PZ~q8Hc#B1SFioKtsg zc;T*3`eiZW$kS5Jie}DEzJ4Hp5U~!7-N4KrIF5=Qc`a#rd12%Jc3aPYnw|&B<O`c> zx@CqJ4aSvSPpcX39-f|BoSpn~>2~a!F!nbZc&z(;aNf@F)MWdWE2RyMsYNAW$?5wh zN7EK*6;ke(*5Vvpb<rY<^nwi`zk#S(iljsF1=~ER)QU)6!w84iMzf$w6|0z&yT$$m z%e~zfVBqV~ackqXRc()4W{n_C-$ntwNNQ=v@SL{c`8$zC1Dc2|3OQ#L6!5;lKtW9{ zM0^JgJeKqbHXDz6{L25#ePgO;*OoW$-wKY&f$;V$yDFosqltfURLM|s^=L)M_?5Al zsfEwv<DYNv4cp($UOQlc=Ekvg5X?li_Wpsg#^$u*Qt0FEXp^IFYp-GLY2=gb6kTKD zm&d0UbXLTiOxRq^GE>4fAD9<5Pp4M&5jBf(h;0%uibRAIpD^~9DzD%E*gUj~S>_KS zZyr?6tRAT0(<tkZPbKXXmeYE3^KnYmupzR5T+UU~%F#6-Fd{Xxtf2{&qB#_?ej8fi z4>0HF@F@Sv?Txj&TdA1D4vQj-f-5q;BdKaMwQ8igYhrlj%ca}-FZCD>YWx_lfBhUa zKe_Ab)yn1;P<seoJi?-Ey#0*q98@h3Fb0_h6sozTuxt395j3W`U=PfTT4jk@XA7F9 zfZ9viB$|cRuxbY8Rh&$={|n`6uAweJEjA3Uw2y2A<#aQt2go~@YWX(O${@_VGuIwG zD{h~0jjbb=braKdboLJjOUWpztq0(zP?iSYe3)Z0hNV-?qcVpR^K<YOYj-wRZf_z( z(y4hhhy)BZ5b4pSAow?O8pgUtW@lFxrWWTX_NUO#&*7ScaKxa1Z&v~Pn;nB!QQp1~ zg*{sBeGKg!<V}%^jwzaMX;PLkv<mKL`3?E>BE_t8ge@~ftulG^V`!CqR1jG*4r%PF zJ{#NjPH6ipl$ATXnB-*3&{}!tEGlj_PuB}>0~^n%h!~h$Zy#Pyth~;m5x}KpZ|&w5 zoRE@NbqQl>#(#S<@E~Jj*oFfD3{VG#r_gnJabbFCab|h>-rj?r!RsLSL`?FG@~)n# zZ5egrnYH7sS7)HX?8@Tw62QLzAKB;s4-9ExgJ5zRyHQgVlJX0K;*$}<;a2Wm+7{N5 zhTbwZ3G(&{{CdH}VwU8h7NVA!!WQX*W@*CaX$)!s>^i~9u33y4LAnm{C%pa3%d^+- z8&Nl`TY|^7kqtwiLSDTnoq!fe%QTV;Hi2o46KfAa<R#5x7?m8%oLxP`qq55@I|c`T ztB@XN=D_5)oG+p4^39dyxz&}MD=X`FZ$S-?%3&5ZIK!hYX&xC?IFMC8QQAH^IJvmE zu{yiD0w2WG2{io2$tI0|9l9P!)PK8&FtpWobQM%zip$Omh)r}32sLv-Xqs7z>UxQq zMv0q7FerPTKCj0p=LR}2V44i|yiq*4oCmjln7CCUsjPcear+5%|JpJ*y^Xo#3a%!- zW|&huM8)%xrcVQvl(U*c!ouynuJJ8XzhW{OCmkC{L{La<R&Hxw->=vG=jO(aH?0?^ z1)86onEy=sXI#;c1L1S4%X4ch3+ro(n7Xk(zrMb-wYl?r@8<G~j;S93ivr~Z8}GEs zIZe}fO;c@G=ipP&Y;J88wlK4_1Y4V0T%7o_Eamt~)X!HLzKw`cj{~Elm)b6)=3!>$ z_(a9oA^lC9TvUuKc{Duv^+UO}{D~!O2+r%B*N+C^`HT|;Op};3f=HwhyoQksn!!R^ z{y%;Ecmm$Ps%$-YiUNLcrRnM_JgPZoNLb~7$g`>VaH)HPs~KB;h)iy#Qgl}|aR3`1 znU)E090YXxf#+A#V1DB~_A&IQCiZDxUIs}A&SzIw=hs#NT}%bYF?D%+Yh`;IDm>q~ zdw1{E)5`iz1uZuKOjJK8IJY;iX}aw4bpQC`^4121e2{(ECWic}-(q-h*?Y{#d#_)w zYH7<VuZ&91!d$B|vUgO}H|JDwWmWg3k#{>SU_^bvj^8xtyipvFVJxp<EVaBBi41~8 zJA_ag;Tn*8!rs56eE9fNLqF;-)PTr$j%_(cUgAc(8gbJkVhQV{(w^mePfIV)aA*Zc z=vdo(dj}__7Ng@<tcjYV_59E)DEZy@Fsq>IEAwk>py~@~Ne8f3wzg0eIKQ>MdTV>_ z_HC%^w{Lx}cWys=wzv24X-ISq4WAC7pplMOPHOFVUh`CA|J=;V>dMvzYzj?2M)t9g zf3Wi&=Q`K;msdImuU0g*q!*WAW+qs9c%xPq>YC8YInpV(lU}qt!>hxh;sdmE>qT-w z)r}yz=tL;xLL%>VP8RXt;m!$xe}1|B@Y()EG6-FTjicpV$~BPn<QME!?P3A=?s4b{ zwOncr7Ot+|5iuDh<#3|ENyQ(Q&GF-@=mc8ZKYR7HmCwSyxq(6*E#^>Bgx^8cjXQS^ z*3G+j;c@FOy576{^4-hXl~pAz1R<+3yP8)>eqUkh&GL?!5!mRh&E+rT&+ThIdSpRg zAUKHq5vl%PpUi*0R@K_uTU1*QX3sAs!O=h1%++1b+FD4}j6~9jT*~nW0V6UY6CV9& zPTdF&op27FFjkGAGh()<C0+g@X6GAGbVA^to}T>FKD2_l>Ix!n9a<%9nxY%nf?De* z<J8bMJ+b~Mw{c9+AXwGR9&CJcW)7Ix8#izMlgSVi?epkBb#W2xWYDyK5q1pltG7@x zzEAkpy?glU#=U#@cXyt>c@Yww3xZE3X5kXwnBRJ{sBNZWWNCf_wz-9h1ps@D&qtF# zf!F68*`49};0=_q2Pr_$I*zjS_A_>LQ8Tt6le8revpy|i0Z}%Sk{73LIGa`ot7b4f z(kUUo6S4k>h&7Y6)9#b!Cm8;jW%c$>S;s8q3RHH(n1ErVvd1Mgj~X%wJBN^>TRShC zhgS8x@<8NmJ-vbxQwwYA2GJ7?e&4L!@dNTc^+n;qa?GLSeV=x;uz!pC_Jaq<tRVQC zckXB#`w(!biJOEcR*!<<S9Q;hEv~QM-d@_+fPbOFuyP#ij}UzHW9Q%?_`T$U;^2g2 zL{ONOySJf@ow%0OccL~w2$`HduLn}kt{uvv8N{p}z@#1kk3@2w-wT?bfq*u(_JqSf zv$PK_SNAS94z7U6>wD*mn5XFkqMSUvyvx;@&5@=1sZ~R4nm!ulcAnu;sf8uzTn!G3 ziw&Fcua{|mzUR#10?7O4Saof6e*IT7vrjuV)E{m?e02NK5%m@{d+^}llPAyKymWAj zA>vS_l5%iQZYk-Uuj-z^KD)kt=ho`>CR*^<FoKVcZ5MC_wnt|;QFAXRF12^$R#w9} zh>VJJ^bIt1LTH+rF(^BKFJO9_SC^1ihe^ekSv`<Z)t^Dtmq7(dsp|h<a^7co^!Rjx zPdNNDMZO6Ar)1hkHlhmq1dZd>yzAAy>!_q$oI;BqJb&9ezM+Xo7t^-2^Fl_YXIHnj zkIYQ}*38V;mtyDk<sF@Y{4DREoebKsqs{qOs*ZvA&ZEar@9pe-Q|}*me&_LHRDJXq z)&BA0w;$iPbPlTOd65bl3TXR<7xq_lEwo-+oLJsiy|o43dmx{QviApO<{VmVbyNkZ z#{{!s$?2#$38)!pcJc-mr{z6Q@f#7aDpFsxXHfH}S3%M#dDAL-(#pG^Rtx%1)u8{Q z77|s^c|zl#Q!aJSR`o2PjLq=6vO}treW7*$%E_OTaU5N|v-$9ObYZW6mYcDIlUI0j zUS-X|=;&9W>$ht+=8iHppMk%*yfnYMx_Cg|F|<QWdQjW<QAdNldw=h9eTb<~o_wx* zyN{mizq^O7yZ3f>?>^pv&+ff^@$u7-1!YYPLi!}artV42#h2&GJLj%UuAu_tEsWr! zQaUqBOPCx!mi?c-d8E`sFx%YITX?B1A-5nfF3H(H#LU@E*TRBN%jrLqLrx2tl8alP zQ}Llu@T8V^r<QZ0mT@7M_xKNk`2TAV&t;wb?Aggesy`uJ^E9_{qUqXd+wgi?&9Inx zvO2Os!?%(0qNB2P)PrYl24}W4T+=1>Y#e<2K)dQXI>+YbzP?g<+@*QrU#DnL^1i;l zw7K~O_+QP;LE7)_JO&Nl=ls#rCr|-$czpci$<EU!yU(8OJbSu>dL8rlWWNoDeE9fX zwCunB@OEiqQ(Dyt!bvsPw8Hk;^3M6Tp{2!57+<!&<nUKv-!RyZV`F|C?`o`ZRJwks zs-+cz*~qj^ka}wmq@k^Yl7Z=I^}zqD5&6AZFb$e^Dp^-b85c?^2XZOLGlsEbR_Vm% zDa9xE!};GYEratVmr>Dc`{<^zUx}oBfu?VhtX&?dgiU(omHSU$Csz!L>U$g5IfBTi z6_@r64}TT9{w9Zm30g5J8q^ilRR|;xD*Qh1x6#0V)^@aRKZJ_bbD$bUIHo>-1{Ih; zSTX#g;D53Qz2LzfhWtI0=Xvya_r<f_=TA*-LkM}axYUs$`F((WegDEF+UH=@9_8(q zmoTaO<NmZSLjYRpr<%HZK<X273;bgfQLf&_O~=ffU)$mT(Tx5NohS-<cPeQo3ZPxm zo&sIzv_p6hMSO@ttC+f9e*W}t#@Nf3FJ@<^n(C{Y>Z;+%(<giX%AI-hE~owm0N*^c zR?;>tW|pAhS*z{e%&O!qr0u)*;Mw@{eM4lvl%9>fw-1PXd1EswKRI`JJ{XouIpf&o zWubvz`aFdI?cp(5cZ{Femv=PnUp&l{&#DeE1JX~>ki+AX7cX$tz30z&pFQ920w~!> znV&tBHF~@QgT>E3e<-SGq2bd$D`03GP*Tz{5AJSgb{%E!(P}@3o~VObx;{6DYY7Q1 z+XLA53=LH_w`P`Bgr{V<g+y3;BK2+Ul?_Y?bVL5rIGIQ*lvLW4L<T`B>rO7`Nha?_ zqZ*(P)L@j<XO}(z;nlwf>i6#6wy`un&&y3sO+`jVN=`vWK|xMVPDV*d!Ntk0sjgaI zQ}O5T)!6(N#@JlGu^E=rb-_9dT#c%GHKC}bdsO-E>-SZiH)YHMbgk^%Ln4w3id*~o zen)<C{~iKUiwh7W&aSS^eFgkyOSApp{z2d|b{(`D!z~8jgJ3^>_3FvXS5IHOgo^nu zs$$-K`Rdn}&p(sDZ~M_cf9KVYZ)X-)WK^99IMk#pql+%jUFuux8eLh~+*;Y%0JX<B zoT;y8<{W1^?f5+EPJj*Q#j|nQdC16E2cG~FM^`NqGa&=SDXSb-qYxSnF)|hba#kS< zR$&S@VM;b({);M7$~v;DdU5ede|OsFXJ=%jF0!#QD=Emjxj3dI#x>X1z>CX^^KH%b znW;%gPd7t79bRrOE>4cZ+^j$ILR9tTiE4;mudM?1IvxcIh)NK7KJ5@HN&BvmCD3_f zQoWp^os9=7Ctq6Mcy(eNS1R&IS2Km4SAe?Ay1ur64q8`l-P(^>Q40TXKTzGd|6~`f z?O&t*5)JvQSI<A!*DqdwTj52_yQm)V;>DW}AKw1->CK0aP+$G{;nn*eU%vbC`MY<| z-n@PE^x5u<=g;4~j!r6KzhF)$?ckQwRN1{y-@iCGvxfHeE0_c(rkfvU;^_3s%1y9F zYpc_%EB!adE?w@-Dz6GnN%shkw(>ygS=%WYnhUuX3v1a?QjpnLm{^z^nwuD4s+p0V zwS_4U7Y92R5A5^b7kHo=RGo>5p{=Rzi0LjLel#-BJ$?FgNTA=Jvu`_3UZj*?#YC?u zRYSt2iKrB%R~@;yjjUPtoxN9sGq?3z<8-X-5W(RIxdqKVy_j3U<9D-Iy8|X}y}7b7 zzq$&p2Ib*VGi3G^{sAWCj&bi`LeN3z(}Ti}k@0T`fBE|DtGB;acm*EeMfAIGp1pk8 z(a~02Syo+HT2)b6Szc0E0kyQethlVKsQ>D~!za(c?Y#f#{mkMrzoa<?wi<|x{MH#D zzhiV|etmsk?QzYT`=+y(mX)Msq-A8LLrqCd35$;M^7pq#AWZG-%$;3LoZZxnObmRI zRZJ0_?5tQ%ikq`NAtil5-IZT{`CDTD>eWj+I$FN-JizlYKjMLKK6Ca=Zg%>guzN$3 zE4lS!z<k&EmQ!S{xK%bpuhQ08r-e*?5^8o|y(?{-QnK(gb3yn<$LCa3^&d!4j{g>& z$A~;;YQf_A`Zq!AzJ~|kA7X$9KZDN3{0jVwmxopTAnk|i>v!*W_V$<<8Hk99z(#&m zDJjmJIU^@4{p!O{(8caNdbIQ6*}dHz)EaYEC2mch_{x#$-o>_|<(n&O^J^;*&ti-{ zo}USwdT9IxBO?O|3Gv~tJA3wwkf^8)(qGfe(!|x*)ZU8&2lF1T4yOpo)l85tUP93E z_gg0}c~OFsgYCC|N=sdhoPvCDY5sqE_ez^aD!b<)fQ72&nxo)arQz4arRqx|Y1hy@ zz4_>QXjZeDnJsux^qk>V)SThpeggs2(hqPo%gd;#l540$&C1pm#?}0qrTL=pU}+wq zY2OF_=>ZROm}<-;rtZIjsc(P$_;`1hhKAbT$LraXy|tC4{rdLJYZ>W_!oq_0_x85$ z-@Er1HN@P1viI`co0#NcDqbxzF$<^I`l_DAx+{y<XIJOeKF>_Rw`^*0p?7eAn~Srz ztNr%&=C5D3y8QO-8+lnNE?ypMFF!piTWxD687)gTR;In3N84NLP`5VLFcp4*t(Dms zA~I$n)v22kKYjX}Nqt}UWdee;SpE5@pMJ{EPWN(m-QLD>m*(fso`N`7n3??ezxk7& zzj~W>X{7zeX4mAc^x9D|ixgF4gQ`y>`2|NIy`ZhVS2u6(xkMD`THAYsMkeK>qF1n~ zOSf;&uCIe@o!{D8*xm+`58Zik;pX}p#?xXdv|rfTTE2CA<<8y3%?(ia+dI3DU%k2i z<SEoAZ-3l-{q8<0o%q?(pu$yf*&jZFr+xp^ryoE4{P95j>F1x{efS7$!xK<JtQ8u( zdIv2&?LB)6%qPV~AO88O%8LAay!}IiH<#vb-G2bzj=mP~@ZHCEeb+`g#f;CK*EbKT zsOVmRc&Be-Wp-t00fO$^x92xEr<Ruv`y6-?nCIo;26PX9orbD13kSQAy|b>Bt-6`@ zd09;&A|g}@16@%6qANThA|fOqr(zIL=<e;s+$r>LR1EcXn3)){zGZ7=0b?rxApyt& z*54&BE6vW%wz#tJ?nf+tY<FnzinqI~nzG`>3*vC%pu!VPb=4q$pULsj4<9~Y>1CgO z9vbMYEG;ZA&aWsbs46Q=Pfd66iSi3ej7-Qf_ec=8DNwRY<rI=7r(tAdVi6M+QP<Sd zFmttV^9qPf%&)G!HZ@&TT&S+D#?N;i&a8;2u%V$reN#P9y$=8Y-*u%YJtHkPJ{D?B zTuf|SOngFId}4fJVnRVd?!v|fC_KEPs2~r!4O^0tk@EBNS>4=x{{F{D=%hLT|7dr& zvHns)UN*Ys6z1>O?COfr_03ISzO}ibs2~?=aX}vD399Djl$R84Y^=Zk_!D%NohN&^ zm{(U(f+L)nn|<Z#m5n>MA3i~4?@`0e?jCfO(aFj4B5DMz3Mx*?In7g-`j;zPuPUo3 ztEeb>c)G!_o?Kd--P}Al#-W+-=jGwT$~>GMT6%gV9X(A`3oSEC5mf^c9_jB0DgWQ^ z&-~~Aocf<L#NU0cB>y-~L_k4*j$bjmu=*P|_I7tl3v<m)jAW%H#YBbqczGnmMP;Ne zni(6G6y`pD^bjj-@Tund8aV0S)Uu+yy6SQ;o6sgLIbK0dMpzKeEsx|yaTiCshYx<w z;&gU)R=^(XTlDmFT-=-rit^B@+gq^Q$t@u^5+dK?lES<9zxq3sxmjt<%#5&QH5H}c z0N;$%B>1s#9AKaQyge1=Wm#F6X=$ll9PGZ?;JbHk85rn!xw)V>$w*7W$d0a(@JpGQ z=&8>!F>&(K($ZR+8<!VlU#cj|PER({*P*4Q;o{+m&CbuSs^;eAq^Cb;YGhEDn^|5` zke-sDuBuE$MJX;JHoLZlIrX6YR#ujzWTbL3QeVCTRpA^-NlH*tQz<IRLx97>%?V>% zDSFQX{g9#}La-elr01&-A9tTWd-3+&<n$yJ6_vP{h@1>&S0ynOcAk=w!o$rKV1}(h z1KF<|D9A~(vN9uF95K29-*xKL59!GXSRwrWhaaeEs5v;;uiqHnefjeK&d!rJ@22Nw zB_%J=&{AKJ5OHwyr8uukE^g@%U02<=6rEmaXXgMe1x^7R%#hIF#ap*9SC&VnCWM3p zV3(j`rA13a?Yr;4Cm|(eU}OZT7rrP(Nku6mEhTmF0vKR;#8g=sY4G{PWK`@I^*ur| z4;lk~9W6Ms!a@R;rY4DTQ8^hYz%4w+$3)mznu`bv($UcXv0Iz#-+XmuY7#Lqv9#m` z^sz!!S#*VC1sfFLKMy#;2mQP~3iGnciu01=qg9m@D5)rXyxe~)^8hxS(8I4kd-?>9 zjHsxHfsr2Ik(HKwxJ_q!Gbt%aK%npR0aJ(ca65JC6j1v7*)v>U`smR^M>`w%tcj7q z!Pnosc>{`<7#nr?Bifqk|NW1DFfr0kjgNfu*3|eY6(xnRhzJui6Dupzom<=AGyv}k z$IaQr3Dh3N{H<FCh6b!GOozYa!To!%SKojCz3JCGhA9rV)`Uca-MwAU-@beK?)}J( z5$JA93$uq?@}K9iFgLw9e-l>&2OBF>6C==S7G@@J6wpI&V>6J$*9M?pLu25fqoeiV zlcz5~e5|f22X{e#j;^Zg^9qi>{!vyT{j=O!+Fm&&T?_4FTlZhQefH!r3=?oJjEoHC zHa1{%yFNY!ol#put+uKRYrbN52=#T&_BP;Wv@NU!#Bm0Co(M+*5=uTLo8XkHk3anc zuYiC<M*vg+ay+~Z_V)n5;EJ0YE**RwItZM45WT}6kq{U8umApU5QoEWfuf#1eL6WI z_V~bK7)ywWJp9)B+6rj6oV*Oe-320gteah3UW9{V?_kqE(DUy7JJjft6c1EDICh+G z?`m%*A|^66)c<vVUcUzIf}el5F^rYu<fH?A*zYd{!`}}-{J_XS571(50H^XC{khhz zj?tMZ(0M~612$IX!_9Vg9<j5rxH{S6+Gps?5)xt%NIrl2_Qs77km$vQxx>xiv}$Uo z)zx2u220o)K$llnQyCc_0l^0iJYwEOV=X{lSX{VzYrC_%6P|+rzI^%BEU(?YXR6xn zM660;Mj_yH$~)&qmhQvr{yv_-VNq%E^vd$Ju~8T@lH#NBe_d-+9h`9`U41@LF)kd; zySq4?AtvXRvxv@ZdiMdvJdA_q&e6?GjURd5VK{*OCm|sjywZEXGem(t0FZ~<gIf?3 zIFD;<DvI)S=V*7I9QU62j~+gN@j4${$`QppIhmTeN^E>I6*X0ILJU?yU_T9w^jq7T z!P4KqcNe+|E~kXGq$I?gK7A@TEB#33;S@i@VFf=ckDF^pE6!W#Zf&fCfIGW7cMtYM zxN2aikCl1U1u(qRNFdOlrK1I@2UkBlJcNsRLH_gLJ)&YFY3XQ|7qO(aoa}8Ox+y3w z1P6jubH0MGu@(-jf1qFA;1zIOV3u&TL<VLOvMBIr`z2M6lw6)|A6`Stg`u#p(9q=M z!0-@^-N#`bB34luIWB$yE>3nFxk|?$2uK)2wR{uGpT7Aq$j=-0aAR%d_@`b-kRMF! zPhaw!c$mjuuJ-p36BDJUCPDOcoc1eA^DxSQjNs6DGHq>*oZKw5IACG^?b|mD4CkOt zZT%(KcQ+SD*xK`F-~O512lwv6@B{-N1Sy9%azR`aRPhyhPy^=ShhvTApjca&vM@84 z85`jGIpCC-m>4Wfjj_H9=2l)ouDrH(Y<|wb04MVWxtTDapXcRn#ddiP1{%=7grxYz z^_79ES8y^fzz3N5`gz08Sy@^*+`!qv4y<)zasmS*y|sne)q%d<T`YgpW^HAur?V|Q zBoI!kos)e`Y!vj|uFI|8G#eeh2FDPN@TH3Uyy6zXo>ta5Ag8;$bG~JGy{f8;oPs>3 zu%NTQ54?@7rNz|5jZZ&)#QJsM10%zNiAjigg+<tTd2uoCiE#SvG$AFA93rA{a^*gp z7l@k<H;a#ngyXECrt%5xe-4(#`B|VPHYO5-J;a9KKd~|op@-xJaTtHA%Zsr>2p=>v zGg{qP{qYzVm>?X6?`^8D#>Kp@u2x}j0WRiWy?O-!qlSiBMOFFR_wRYQIF%IS4mSe` z&k_*O(9wzt3m$&AsjixWf}*ar3YETo{R&qGBQh)qqPf$A<S<fiZ>?h+GGb4kIdhJI z{><4kO?6m07cUPNAOHD;^tAqw>-q+IxR_^QVS*Tsn1lodYiu(#rs0UWAe{QIUg_@X zz{5Nsjr8%v#XQ9C0)qT;39%6F!7zIE?Ae^m)WgjTbal?0IRnH(ENNqJV`FQHb+=WO z6$uClDXFQzUHts>&q?W(6x<qwf<_i0<wck0N?OLbxVWwDtg0L8dAPZt&%#;5nm;kr z(>Zne6bT729As_*AvPW!T+Dm9Iseb8vm_jnuHl6V=|%97w&sR!N6b$@LC^tv4re3< zn>Q8*AO!fg?`(h3d1??nthSbrh=_)k2BIXaI|KeuMn<Z)uWRq=-XFlco_29bA#jM* z+dvnCqo<~(Qd&_uH9ZcS%FDuHlcXdin3x%5WTkLj)C0^c%#D{;7x$h#fgg@_@YPaR zIZZ%JM8`);PTA2`f4G63wkEJ4D=!DQdbzt|J;E$(EdF7kE!|yudb(KQ0Sg2h5tk6> z;9$qPw`(iQ@H0)!jA|R}+S{9O*>x22!h*iOUU+m~P>>&dV|_zydPWM^NUTR#Q(YB$ zgq(t$l8Pe2(_L9bNlsP@>(?<bF-l5H!f?>tff@yF-+7>><3Y%(ET9({S2<GBIuVmm zoR*VQT3zWA=*Py!a%+1FOQJwSjf9Mhg@c2Mot;lajDz<)9_Ih?!&xF$3FojPZGBTl zMtWQ!p|p%7VDEe=M+YUy-v`E>y84<oKfcH4JP>ubnV6_BGYb=J2L|zv*zPL@PLiMR ze0f#b_U+AMGT&H-i+NHE^VBrhO(4eGsHrKJRhBlO<eGf^#xPdqSy)+ARF#;S7{7_U zFolnoM^;9vr?2z&o!c}xnAg+RI73K6$|OQTNqxDc7AqbUl$6S9D&!OtE{^tC4;>Q= zv!}mbZELHpt~OTY0cnWJ0UjP+9;_(~I4tlp4Gi^4E6bak>hLfxd_3k`+gmED$|zA; z0_@C#zG>-b8Jii0goi?mps%BSxEXX$Hg+~86(w*!2c4?9Z731F46~wJYTbBM&thT2 zKuJw?L~IlX2iq-dX(DY+b#h7yZhn5K;<9o)0)n`h_jU*7&k!(++65O}kdy&$j`dhU zxGgU)3z~^FEl^Qf2s<1R6$Vlt7af7i+lYw@v9hu#D=RQEGGOHg8gOxQ=H}-tt<3*1 zI$v4@aV~UZ7=<D3I9S0$5)u+<Xs817x!LKk&sej*QWquI+1ZSY^jVm(GY^{*5fLh{ zDqmchL+Ly=Z=<87ewK)onn#hGlD4!k8*6Tvl8VaE%oLoMD-PxvnVAqsud2od9URP~ zP7?*WimI}xs0db_$28E@*C{M1YHX~<&%Do3%*VyY^j_(@a;1xk3cIQR^K|FvtnI7= zf&;2*Yc5`taImq$#ylGrH<zKQ5uCE-`mb_;F$qQFTx!fph`^klvX1$(_VKjb0(N%R z?X3-5$52#KQbtKx)7VT%0(Y*`&H2021ZSB<EPb=jpXXPUm%)_=(bQA}$AwjeTAS*D zd3O(&8xtc@5n;GuSU{SSlO1#hbpG|LmxmjG3+3YCOwUNYIXiXa<e#IMC)Lr_DlIPo zs{mpSSaNc4u(5%puwour*;#pcxy{T?z}qAz#zDWp>c@b2_#k*u@Gw}JhX@7sIXf?V zV)6zq=5@8y35dzh32GCQQ>P|kX&@tY@f`g*TNh_qI$9S;I~>e2x_Ek2)YrrBg7F;} z(%QP(Qc@T3F|VgxP+Zv1aOnu0_eavw;bC4NJ~4Lm#?Z>zG7XLtB1C`m4D{Z<-o<6b z^J^=hnReEeSaqI{pU=j@j)at?z2#e9U0v5r%%(zi!Nwt`zU1;;QR|eFvg-QkB39<9 zXlNAF)D0}Htvrwtva)!X|Nac&83tif?{sb+UNsd(T+AC7=)zCH>TO_1f*p2nwC(Hf zjtC3E#XKO*&CO+PV}XZx04zB*X=-}xn9SqxHrhIxpyBh&bB)dQUOrw%CWbnC+Msfu ztDd&Lp`N3QV`WWcN>V%{13lJV0_I`pad5C<#l<{y3QmsH^pug2tGLd-j+PoB2?d>? z4k0O3MoJtOq(y|l(>i&$gD_$}!XOvGyt9XUMO`g0&yI_Ea*%Odh>3794@V3RLVi&} zLnDg$<=@XdKQKQzHTLlFBRtGAFwzHw2DY>{KYjgLL0<Me54V?_D_8}DlLJ&Y7e^m2 zcRD&6Fk>L~u$>prv0Mt2l~X@UBSb7@Y7tgd&^BArHgWsG4z{34jhc>DU02V_72zBh zCaIu+n|UX&^Jf@@j671gczD%uFc01a#&-rB%(tSMx3acq?`#VX3&z7d4-dDUy){1O zIXM!O6UHY;kDTy!1oPx%Iy#!=m8G)_(>=YNNht}w{yyGF4=+sh@q|aDuUA}rEcDZi zv?K-`%u8Mr=LY5-ZCP+L&%w!_kQ9IQY9H8FJj~PZsh=gL$V`pL8lnR8E?%CvnMZjW zXVCe&Iu!G`ybU>-o`IeW`tmxi1{j@hY&=Tm{k)H49;9h^cL#O#8QWC}4D-QZ!R?*x zyU(8o1^U6sWn^N2_SDo=00|hmvjha+eg7SpOrXB&atqcIEiNiZ&&Wi_uX%=BTi!k~ zxn`{N^3D0J$A_3#rJ<wMG%&Dn_dtY1NGU4fz7Fejnt%j!Uf(T+iyJ5N;I)oq-qOmf zrL8F}BoH6-yxgFf5D(&E9{3p>A3i4Yc>FvtUr|+l|IvfTdpom>H-|^A4h;<qT^|@4 z>PJ0ZAGkg|2w(JQcQ-FP1L9d+I*(x<m(D{24i0t*==ukGXz?&lN<qo3e1?cTGc5rt z^C060r1v=#^R~Ded}dw;x3@tvFDom3w6{41^Uwglx0#w5-`Uy0m7zs5&wye6&VzgR zcOKU^HF&~+Yig{ZD9_5qLW*ALV{c>G+tvON_r0=FsSj2rS8vaY$~J^d@|<ek2~{IG z4HH);HV&Er(0Lj<I!yyZ8+T7cXrzpi5+^$wu4__f2uV-V3+cHg;b9)7^TtOpZ)w)t z))e|jFh4#q{Lkt<hWW~}?yk$7ms{Ig8X@RF*E*;z=*O;(HjpW3UtEw4%)i3pZSd+m zI|o~IY}A$hZv4!Xb1IxBB*V=-lba9nSj>Z;XJz>vI*(55kR6}-BmMm3^cXOYD~kD= z`TKVt-tX$`_6zU@28c<B;dp|{d9wHGW%tLA9v;3XxF5?Pke-(sFBw_+6R;}s>iCBj z_Gi?Nw_RI)_v42H%+qQa7}~mfx}lg?#?L%>8w~S!{Jh~&I&X1+d91lg4D<F5*35Xl zO+r$_@tJR`!*@-pth@yJ7sS^vm{L(u9;}p<6eJ|Xm{s8z=JD%13tpW+ig|Tlo|Ikg zG!b^@#g1ZL>}%%n`1vEe%@>_-{DYVW(ZKbqzheI3{g?0FXJw|rF=u6EE-n1_Zt(rR zUF__vjkQ&H`g=)L<-kx6D?2X{y%deCGcx^hTJ3m6=giZWZ@+jOIxRy(TMsYyuqYJs zxaQlqI-VsWJIg4l=bDU%dGg~izwd4EFb_I^q@N#~7&)5xWfb#Rrv!Y-&!h4-Kzo3% z_ulT#%JRZ~-P~B4nz#WcJwGSoNaneYV*WTfPee*V!X|wb^FHTr_<8YTGEaeKUiOb+ z{zz|wW_}m1pa0DKgWbp5x3)oQRF$y+*4Eqr67<8VAFxh3+VAhhl@(3xZRgK(S=jgy zG09O$*?Xk6r_|i2>bi-AdF3OS7tu%H)p@ex>U>y8&_A#9$6+2b-v&4Hs6o7>tQc2T z0d@d5j|>lf`r<jp&%gR7n2(8z2Igt-dK+LKub*df|E%+PnE#x&Vf+0t>=)*b$W;m* zsq+bmaZ_k-gV)dh8pGyhr^7cMo}~TY{ylh|jGP>XW2~$A_w%&Hm6eU{tq|=#et0J? zxty3qfmGbmCb9;w@1DG+qNGSgL#<_KWOtO#d+amsmW)4d^M!fa<LEq|ybT`a#eR#< zZ=f@8wN+*Km@h5D#XROZtd*5n|6p%MD$cykAENWRXm4{Q^KQP#bM)tqqw~KzhNbvJ zz0EP0N9lZ9N89taZ=D_O;8-1QW@%<ZL`)<uAr1&Zz=dlN{LFk?D|($2YR*z|MH>~j z#@X|_ddU3rx+%|)Q~@DjRc$T1<LbPqk!Kp7{QQAjrR`BVf1DWh5AZfGUOcC!p-#p& zf9$|@Se)~1?%djfp%&Uy)>NXdN&Q~tg@yRfpXWg{-vv64b$>9-6aN7^f7CT8^5c7( zTD<cr1@X_TJjgt**`x=&O&geba5ZK|hFE@@m<aTAIeA%99L$UF>wMKd^PkrSZr^zz zBxz2-p{DJfms&R!Tik1A=WONTf}i=%I?p6(=ACf_^Z4>Mb)T7UZ43{^>21W0%G;oI zp8c52zkmN8z9=-vA6t04M2eYjgVuShYnSHdW*{)v)6*`iEK5v?J<`wP%2fg({QT#m zG2RC6H7R0NDKzu({}l7!ZFKN>n`6bW`Nahoo&P78=Z{Y~F7s_2t-!pJqP(4z#o=Z= z+?=dztQuMx5Q4R0n;HJApRcNHXl>!){%Sq!&fZgLCHu2nn(_`w@fD--rGwrPNv=Vm zN5-%O%wkr4SzO#aYQH;INkV+=yv-3Y>@od36yVSt+l2GSj~+ryq^_=-SC9)&^02Kd zX=$h>CMGs9HLR#AFU-%TqNd!$wz3$Lw{di`V_{}`@%%Z~QwD^@#>Wf}_Tck2Xy$Rn zuwT5*v0_-=qy79**I|nbj+}eo=Z}P+v%Gjj4EuQZ2yYV<`dR0d73FO#%?~$&c$}Mu z+r-=ycD@Ba^OY4HS9;I$^32apA8c^%!D9{)141@sS%;*Uvg-*IBYugwQn;_fqL?Qp zBV>`V56<To5R#F)@H?2#&PX99A-1uzZfZR`KaXbq54a}v3wnN+jI`w8*FXOB6YPV6 zqFiQn2B^p3SsYMO5@UgR8(XW&nu><HY7oGYp+T(EX?S?JU~B?A@#A|ei&>bN8KtBp z0f1}Q`f%y|v6)96GjD^3d3_@TIXM~J$xJfcqnH=qKQAHzF{~H(c^EPAFh4N`I{z5Y zeGeF!K<7i+yV@~2Z-Y(e1qIHtbFkYvIl!=sb?v}@3|mxD-Zy+*K!9&@?Av9<H|JJZ zQGc>oN6sNBs`y%5`E@x}bygNeJUUOrdch^4_=1!yAGVq8tILZ(4oD&}?__6lxPc!M zwQkYT*`fK!TqP=o{bT(6FQ|Kc=z{!Wed~gR7#u?w2kGhQUcbchU3%J@;OAZ4U8*lt z-JG4GprlAk#^O4Z<YnpU&*|uB!ln+tJ2^fIokdGaBPlsyczA#o@7x1Yc3Jc_srcW) zJZ8R)3tn#n#~kpJla~_^;OFMzINV@ndIHrc40Q903hL`?&`Z3rFfS~~&&9<Fr~4co zEmk{sV7|?bM~@!j%2gWb=~7WsA$`3Yn;V|Kexs-$XJcu0xS5RfMOr#qLt_JoWCyQw z<I#B(^Vf$2_^~h##mmFv6dB7|ZXHpx$jFlG9uWn~%Bpy-NfEJ1c*d0J8ClTL(Hve0 z1y%w0k(HIEq@wgdI3L~!3|kPsAv_SR?afCrfAoAC(D~yrUtUs3OiXm8r{nNj%Zu~U zk{7^uT&gTR{O+6AuYqK4ZZ0n$&xXdjS8v}!OF0=VD;kROvf!LT?{Kom(%G#o%qS@- zt!%8)(o<k;!sBfSNXW@K6;OUY6+iRm@XWVy!8_k(zavvnP+&bx1AX1Vys@cKUQvEq zTNC`Q_GT<2pqHB~Y=IiO54I&980OLIq+;MFzx(k$6C;Bk(&KQmnu=n8nURqJ*ocdX zvM@Krx&$vjCj)cEP)LC9Fnjw42gCfp$S~+U))BeAxds%2oJsU7m-YqosL+C|iRD+f z?(bonZ{v8Hkd%a7(l?<zG%g#4`-V$cZdk3RstnjOGSVMjSONP93_~2}iS+2|ZVwL) z#_w(Hj@0>MGym?x2k>mb_AkG&z`-XZB-GMU_Yd&t>T3V^>F213PyzyifxfQ8jUPR{ zzp}h=_;d{Pc7Zer2?;tnJLVVW&dg5X@iu3P$*Fi%2}mfg&#OeqIKCKG>=?Pqh56Zt z(4ePJpI}{5flBV>XTxx2WowmJm_If-2B*o#(+z9O?X3;0bJP#&JTQM_{Q6Hn|IE$F zVTofI?~CWpZf$Qd(4VuhwhRvo=EJc*4n%crWf|8k#l#Qj{Pn2`QBmRS^kl3ppFiEX zy}imSq)5oDC~O+(m)R9waAjiY&SB<%I7>vqB^{hnQQAJu!pbf#ie)J!oN?Hzz1<yb zLzoUJ87cwd<L7;Cs2|`t($61tO)7qTa^%x7nE(0bU&>1h0X%1W+v5Z8;$V07>{)Sf zF%vVBsF;Y+(Q6+*eth!$Da5J}`eBut!{x#Kdoc8aB?f!!?&VfnU$efkMvLRV2Po!= z$Y}Vrh{<WvlVkDaZRqjn{1M(}X<?R(j1146#xWO(99^9XiVIh^*1`Vaxz%S+XS<=k z4%RgV7@d!ej~=}-1gF5<)EH~{uwS5u!aiqarGrDBz_u9e7xdi};UHwDCgJjR&Ytdt z73HHhXN-*vMT7+Kd>uCzI~fBXsJ*aBxPN9>P<Hn>*rVpppC+U^FBg$nUD7rc5Ef5L zN^<;LRr`2*oIZV8R7}L&(kwD6e0E_bHYyC)I;rC`f6N#b+T4Hq(A?DMhf_Z&%F8}` z`sB!um>L_NI(-U4Fi=Yfy7LNhSJ#(OX87H^#>QF-3UV$^_Hi8N<p5oC{CqrMR#jD% zOw5cUBEtr*_C9>{059{Tw1PU6w2WBSjEIYge4cwie-7(PG<1V6xk?-{EGRQ{e-aYn zl0&muP!=LPosfu7Qu3mUn`>ER+3Mz6Rb?5R>nv=VxwW|t=ZJvd?7h3U55J3Wa)6Kl z!#vo+<<(`>%3>9z_wTXaumXC7w6tVqUJfG@BLIwbWrU8F#{c(U|K;xDgzL9sV%VXH zv4Y|vLP7#11v#w!5egS48!IO#6_3&x9^H#J@jmGtA-TOv+rKUw_x5o4pHpX<g;k<* z>hhb$ItC|9jrC8RI&~c74Dj^^-~nSe3-*q7x%oNwA3cait%s$-dQT_#y`SeEFf+b- z^$M$+eP(`Q^q4n{efs=q@0Bi7Gb0#lz!(JjA^Ur~zWHN1uU@^peziY1z!wfaoOkG< znp*0{CWe8*0bs`V(2YUrS2tFpqQgPDNk~bwG}Hi+?vA#_`Pr@Y)%CR%2;}>^J2Fy} zWMxpd^a0?NRTSZeJ0qNmON-YwR-Zk8MoCE_bx{Jmw2Py?n~Nh<PlR(wfDb<(Hz5f* zji3(oIVL4}$p9ZuFE?jzcNb3tRA)b=CzwMf7L?9|0R~%(^gy7NR5&|g)>a@r-I*Ba znOK-zj>_A>4nWL>TJxx?+|${%wXwdjz6uA#+{6e1A7&ONEp1J(pbgCpYv5k)-8F&8 zk%-XF+7jBp$DuuR=RjX17;FMULQnuFds~Dvj0_I@6?O)-&{9YsCN6qt<SORxkHkbr zfc1qguOKI5X=VaH9Mc$f-pk#Ul7d`VL?}8wHaIj0ed7W;_{pQg*Pv&??lsrfNL~;J zt9j<k83}O_A1`;zuf`k!q^BGDx1(a2!iuuN(P0Q<|Nr*h0xYhqTN8fn%s+GQ?eFW3 zA_)+PK@uWF(BSSAMd1`sKmmmoRRx6<?ry=|-95Ow1qqsjxLZd$>2%MmQ->sUe`&kj z-~4muvRO|(=N$Ig$KLhsckMd0mnG2xo)NU8qNHGsHwI5(-fah}vjgl7r@?oU|E!1@ z+aaMnf`&3y;gp#A;4DxeflhYT-X5;9(&C_P&dK7!(#kWcMg}M5`};dF7>#}V_nj3u zLvwK|EzBJs9fG@o-jkWBiK?<9I0Wz^unOoptPTdkBEJCN=9c<*Kl~6C9t`{dw-Na2 z<BdWV;5mY>6TEQV-oXa?IBQG7*8egqxIhJgvwvXR1|62Xy1Y<WQV7wgxWq-c^Kj-6 z5CJOAojn5r666Z_Ag~|w`=`&GQd3hw0B>djO01})aBY3%`;TJg@zW<AU2ULtw6rvi z96bUY2!`b7L8r8ZCD_8r0aOeJ(!yu}_H0Pj35l_T!+qbrdiDPOI}i^bkd7ZecI3z* z&I1QHI1lV$W#6%TFALj#4j#TkB3Rxt7a@9r8@^*V%Z^>U_w3!n!ph3V&JGSMC?o`r zRY_U#^yyPan6pd(JU$t$iinEXQ^>#mz73#MOyZ*0MKP!kp&2?8FWeALE-pdA3u@}B z#(1NU@Q{w~4(K0m-Mib`-fCc^54Q`c!@N@T8X>xulb7Y=<Aq3;X*`<-!RA38B)nBo z3JweI8|;1d62W}$K(`0OLs~|P4~bZz2G3T57#6N8FeE6yq|iUeA0i;QUN|CpW5F5W zY{kSco;rOJDjz`n_Dk39LPjt@G6=9w%grG<I|_>mA3Aav{>ydX09+tEEoc*N=Dz*= zkDoeyR!HbD@3H+SWcQxele7$TyVPc6<H5e4Yc~r!D?0}Z$3bxwqp<X<^s3?28+UKq zSOo|HRh>O207u-<yb0(e#+&zqV?Q@HH=LoevLfKe$i%?SomO00w6ccG1l80~13Yl% z%xONpqY%@5GPqI5wJXVcc({s*ikv=u5^m`~R*;vMjf@We{g_8+{MEM;Q=@qWIbPnL zX6B}9YRbSuX9P~e86H1z93UhpbU{{5MpH{2&K3XzEaB_tU0hl?J2(AiOMe7MynbW7 zx~3v1BoJheuAYvHsxmxBNht}`<Q3$A+o2JhF4oXk9~eG1F1oYp@}ox&(eC>Bw=ZiO zYG`g$6TG3MoYGlQ*?ou4e8YZ_h5HoyQK7@a7(N-xT?d7BaPq^(9c)~jJUoYZc~6`^ zeO6FVPD#m*>=c`v6cQa}LNHTMR+N#KlU3L>IR$x5ZH&E(b5Yf=y?X$d7nhXK(bd7} z=_)8Hh+Y&G5fhP=k_3nX;DO{_XfAn01=I7h$aLIKpH9q7B&8;k9PHH9RV5@Pz?UT@ zCDhbaFxr|}EXKf4U*Awq2l_m1t*xew)iSp*2h1~h{>7`e-z_dL<mBhLd%1(e)deMo z#b{wPHMBGk(^S{cQpcO&L&HOA8|!n63OsxmhInIXS!qDBu!yh(+*m~g1!W~w4K;O5 z4J8$2xY$n@E{oiR&1=stD(oE@8kwG~sILo)jwU+TY3t&Y)l{YBWJSa;o);E6eeN9G zOg?^oeu1;X(y}7bGKbHIvhypm3hFD_#d@Z7<A{D7e8Ox8&+g$qsjOoWkx`XZJ^cLT zt2b|7gR}sl3-<@aDqKFeyp*&g^4ujQ00VFXVIw?uK!t@B!NZH5Uzj&LKlAGKD>SZL zyS|!uDc;)F8Y;jYgPNZV@D>pnUR6^GWDX>`wF0_eX+?;NiI|ue{^5_z3=hy<yS~=g zR2Lf??a6R=aIz=bSiy5KGsgp^gTaD@r8x*NFK>^uj1=JWo3}RpapovcIJfWK>gsM! zNlo$#@S)IL>>NnY6r48G2$t5CBs=6KCg={zDoQ3MM?qYD5~yfT0q5`O=}NG+Rn;*& zE~31fM}UK0isypbaT&AIYUI7V5<7X$?mTd6C)bIi{HIQxzaSzZDXXlaYi#W9=bMq6 z6Q7a{@tUb60Z%YDA(-Po8qtpA8yr~E+^qkr^ByQDDUh5T+&t*cR2PDkC7{c|*wDzt z2+rKY%OkfiZ*+3(=KcF<ss*6>?%uBCv}8|jPm%+PU`a3|n1Wf8tf_7kA3tw~j~6UB zk{wZV`nUk}l9`h^IWzJ6+i%}|_x{QAXOpv2Rn-+yF;UPInMQVip4-tLyvf#qWb0t( z=HZr}ojEWv)ITy*P+AO}PjYfJC7A0O8sPMG^^6S6tq69`pi#({BpXu;!qyAN6U@+S zFR8BT9~&9FG7HGBZmf?>PNMrTY@Hm8%uF?~SVeVpDFsDQDd`Jh7X`&7#TAu>W#mtY zt7#fo^9o^+ud5v5=t*tnZb?T3)j3Z~TR8j0=C|b64x@Q9U%q-hzj&pzyf`u{+{4Qq zuGEHPV`Xg#4;?@b1~lmCY!CQJO-lwzyncP{(=6`5mLM2FHuwkl0yZc#GGeYS@JXe) z`1$+f7v{EiwB!}!1_b(jsz61n(a0eA3X2PtR~G+h(i1rDqbCmmgpJMhIeFO$iE-f( zp%7idQG$Z}VL34|4*G)Dwx-3Uxfd^A{8IC9+!x=zggRZ_m#eDFb8@o)|Cf^Dp+RtD zScXPw>#7EZdhgu3^Rv0@VHIvgcW-A)YkgK;fxTy}3ZA}S5X&cHaYmhd7UM2#7|11v z+jCrY_bH{_Cl$_0D9EX4V2$wvdj|@`J0U%zwxy}Ot~MnrGddw5Dj_~H9<k_z_^9}J z@P*ux;-1kFLqmN~?7CPCP8$Opj4WU=`Zz3vb*yZxK&?K3{@^<~1^F@YaY13h0l|Tx z5n-27ld5W~$EPPZ?%oBG-^>a4;>FFow?`&MYU*p!vod1iW1^xX;^Jdrxw5*V^>Rx~ zTT^vyRY`eiX+<e~elqCJfaGu7-gxnPGxNx!XV2DdtPhP0G_^LBS63F679q){3iH9s zatiW_D@s97Ev&9A-?%Y8Gu6=AoRO0i5fkMb=+E%?4G0a6NsLd=&B-jtPfSaVxs-rh zemrv3aP45|wYPS)4^K~`bCM5?k2QC;msC}zWM>6LM!I-0Y+NX&wsv}E1WkQI6|Al@ z7AK~xDWdHh7@H9ilg4viXOAczLrJDzYBeM$NaM)C$z`dPgY})$TN&@xom-$WT3Q=G zw`63cB_$^&UW$W9e<?XJBP$&?UcTIXb!p-0vnM~(A<tespP8M6|JK!4*D|e+X|T}N z)-*Xiwtj6D)X3%bmb#Ds`ebdFo2F*QAODUKm@kjmzx(ddlSklmQ!^7Fv%7mc0J`uw zIMfTx!f~PH_rLtp%dMCH;>C-*_wGPb;QpY3W@aa5u1wBfodZ<gfB4|l>(^iO8aM&C z{HM<zuV23rp4CENWb!CF@N3Y;Ou|J>A}$i*k10Ct70}u(Vz^t#@Pewb4&KzlnL_gq z3cHkCP*u}CGzcWt*3;G0artM~+}Y95-#0cl8xt2JEiHBG^r^!~k01)0DGSk0&~kbP zdchIlbuCSU<0BotT`ldcjcqM$otJ@N=av=$@MvaCG_ei}`@0Vx09=8nheikbhx&#` z2WRGHR<Ez!V*a4-`pp|_H?A>l{p064Y`pW}J_N8^6Y+qyzyIjbjoUYu)>ju+uFfqj z%q-5&EL@qHpS^N*VQGC0&GNH$^XA<05}aOJXM0UkLv=%4U2`Mgxoe;wF8?xe?d`}V zx3_(~_IBpA_YO`@Y)v~dGCc*zZ|vyEDJzdkN%M`2q51{dx_X#7IO|(lYZ;q}Y8afo zb980V(>8eHbZk2v+g2y(*tTukPC8b{wrzK8+qSK_zjtPRSo3~=%p_UKxo7XY)~S<w zp1rG{s;4YgM3<XmqbI5)^{Fm8?=l^m+aIJ$N;M^oO?(I3T{r*r9*j+6T~E5%f&3%_ z^LGbATbo>9z#k3Wwswc}qj@vAp6@y9G$JY8E)V$bo1IxO#<-4BKKGBC+xcOnD-gyV zR83P=!SDB-*?lNP0)fEjn;L<%!{abQU+|g09gsE%OG3C5q0tYh^pIkH?-xK-R}j|Y zr<eKe^RobiOya^y=>A%EXL>e@szm<LE#{9O$1a~5Q|DJ`gV$(GJk_=}Qnf^L8=JzU z1@l1FjdRc6VjznJ4J-u>^CO9$!w|WxqqiS-%vuy;@PAS|69NN)vDXBW69duWpO6Y@ z1gH>5n(A6Q?Y>!QS}>!3f4GB5+*5ttnqKY8or3ChdVhSp22qF?d#xZ;FzEJtN!XUP z*!hvAt);N0iy!rgO*q$0bOuk*j>Zyhdiozgi7B=9rB!aWLzDoE)tH+&1-JyBN7?U( z`;vX@X;>z#a$X(z`&LhzzrOa`Lkm0mX9|^LXT*Ui*)-G%3iw8UFo)DvA(M49{AT#- zhH8ZPh$79niu$S!3a_&*fZ`YN+iDx!4M1HR2A@Z;>Gao)?LMcl5P^r2@9~tWTa}uF z@nd*UcV%q`O<feGSX8Dcbf)Mn2E$IV8U55+rVMJ8@nC(SVnwiDzZi;u(j|=#4<v2( zf76_s$(4Vx6z5@cybTY-jE!k`@lHmh44D=W9c4hq*Q6x_`9eA_(k`c5x7)S1wc*BE zTmhMnPtg@piv{1%`y6s5ugB0d;mgT&22cW5F|X(xV+r3#Jm|?-I0}DIg1(Q*3>sYA za4B6ar~^F>jci?Q5oS`Bc@tYx-tX3r!@|0WZv<&sR%hn}wdJ1&2M~i4vq1xwd_J@z ztY9P(I@{vv%Ve}A=Szup4OWnC2jX{2R3&KmE3%aLr3!z#9BqWD=cvQBx_w&U7kX+d zw>;6}X31=@N6U6V%`w@dG5W>&`VFvA#x-brzqXT3nLSnAd7z*|bL^b);#5qWXdZSl zV!f`$mMWiftj#U+>pmSVT~(F6wbh-K)umb0ISrX=A)0|a<R57nMpP0#y-$m?-pSw* z=@o$wi>Kb$k?jPi*T)Qo;_;8aBu|eNo@T|c+tvrO_s5iLX+II^o6~;7T&!jBQ0}PN z`9gkN+|51gfx^7#N&3GxROaR+NV~ux^y7n7hf9tJ<BC5bHWzuNaV5CWg*&Q)A)|ip z9gT`@cRxndY*5%#{ig4qsX&NGtIt*xC>v*dm?-+Kvh4#5j+XivXtevrqA8YLCXD5s zI8HGtLlb)RapFk}F2`PGBJ_TJD?M@M$*|Wpf_m&@pb^9;WNT?`E`^0Z>5U~sR0rx2 zZBIQsH$S_yJU<iV_-xj$@o~;@uyeEXu=uz*1f)S_rjfZ?X&!j_^U31?L|GAE7>YXm z-o`z81$Hy3BKfZ&5+F6fI;IQVz;6%<{Zltvy{;R|SNm@!cy}g(NAB-)Eqz&LU*)dB z5)XKIgpkOOF++Zm2h}<lzi26J{IS7zzA-&!g-)L;3sSS)@OjeAK<lHj338}faIL{$ zg+5AVG~%oB06jK2N!v%!a5?Xrx2gP6IUGO~z)?_gF~^2dE~TjD|3!{`9Q|t=8$5>t zhKEwrn6K!z#uCo;PZ)c)pnWmtJ2?11@?x|U!A!)v$0)$x&UVg@dS<^%94y-0A7lbB z0C32L@$ri1tD3DgHxm;RTNCWQk0vYsx+cF7NZ)&)NPPH|2i%TGgPCnI_jmZWUt^B) zI@*ogwO_FtfH!{^S;Fc1i$VenAspa~55k)O;%#98BkpV9>wGnK-TNw+(h(X)qowII zMzi1G^l5~u4JLfai*+Q6=96DloOi1r!_1ZMX|R9$u=d%Y*@$Y%nyFS*Z<Oty?Wb=F zl~en*b>EJniVR7>r5o{hy;Wdv+N}Ex8)>CpM$43rxIbF8M{SPhyF?@|C;-hbXVk@2 zWb-XEer<=PM+WAEn!pz^1r+{@oSdMlEIY?1d^Zey4?PMkEEM?$2-F@V6*gDLtUflC z6_GW9(k7*(np>7@A=6P{VpdmH2Rgp|C46ldSuJ<^Tf43N0<E3H_b&ZD-4pzc$<UJT zqcp_$d5GFo0RL;p|17NvE>;qUGbh9)c(+fuL|(<|utjOhZWB63cwR|{j}NJ<7(H2} z*p#uqV3a>#iPf#ME0MlWQg-;|Z?&AG#`aJ_s!7$pHuI8V@hs=>EG+)tOC+5hAMIu$ zOz#p+KK1ODvJveM6NPhI|M>_MRoNY7W?yRr0aUrn1UvkFcPuvjl{f_$eRaP5h9N&- z*+r0!_4N%bPJb1=KnW9O=_;=+Y<{w+u+%XMa57Cz`x#DW^L3<!gaBbKN=Z|*zO4#f zhKR?-ML}?9VtL!U%V-z@SIdg%gN8;*u!M4*MRaOtnmNdCfeVNLzRusHMn^M~<j31) zIA@^KdR<>v1Bk#Jja`tq*I_@t4^I98W#-sx9}ynOYXMkI4mRGz-85zb1vVwv`xA9| zPTF+1H#f_StKHplRifAOe;eMd7UB<QhgQ44SQ_hw)6=8O)2QS3v_ew|y1mb_;yU!k zGzW^*Gjs=IADiH*ZSXYDI~~Ut`2So6Q@3!-Y*NkoXKO6;b-^$96n1{!1a*3hXax0c zG9z@VZ?kxUP=H)=^HVf-y)x-~xqyC&G8p!BBM$wWSlAlfnjhvNA?K%Qsmv?$^Y*4$ zEch#ySXW<r0WopC(T19`n;0A0lGlbj!_3U$b8<rr_*mBStBM}1-0}jnf_~Q4w!?@E zLqEMe=7i%F=LN?S5k}hx#GK_ekA6(&FjUiviHU~mp*7VtdwzZ*nwQi@JWX$W-4LGo zd@fZ@CqS}(yx}MfF-+3hkavg;mr+;HOiaiC6D`-h+II~7LN4>}ZXS%C+&CofW=}P` zXK&GZh*&)r8|r77S&x>T?V+nO)w??!pI1fsrb#7@GtHvQmHIUJzJup>L-U&Irv?K~ z*@UQgMO21ex|&8t-D@)27PZ#HKc!&kIKNBTgJiZma?;<aG8qJx-doxC|E5sn6!Ww+ zKdZf%W8ABIkXrc|FFA8)3W|#xO1f%@8ksGROiYwi;|+c&Y!&#s>U#pK6I@&tKi{9h zF2OEDMMDB8K%Q7}M0X~rBqVOzhqwW}D<5xfdrekE*FuYSyx!nB`u>7|1HRt-aW%JW zUQq=Yn4BM)|NMJ%dVJpRKHn*7boo4Q_jVhO%^+m2H=6G4{Jwcqwa$kLzcB#>Di?^r z-O1M`aN4$@`OS|rajRH@P462dveyo|9Gwm)Oa+aZWq-_#t`5g{c5w#s9kboWD5)sU zHqM^aTX-lavFVxKrOxA8K!wwCwaw-V8>{-_9Hb;aF$KeeB8*54g0620ni^@i?j)^o z-H@1z_gn>}9p5W^i));4I;8<h*>EW;Tf~yHc<Gf^^_32_U5?lDm$0xD;WGB5PX3G2 zZxEcR)Xae~{z>09oiFKAf;65&*<;0g?}DBFl1m@y@gTMN{Zh!!@OGe+N1bC4?C+Ef zN2NSJ^6%f&?A8FTG9S1}2;1W&*eCSSvHZj|{GV9jzuOI7%hfthPp+|B-7O6iAdA<V zQ8T{g>d^jX_J);SwLNymSkz#C%`S~>ElrJXZnT@7i(B*9$jFeIp#D%hJ-*Mns;b-< ziyL#`BipU92vr+kx%?jYS7VD6T&rmgbqZBBbS(rM*MvC!1mLIIu9pi7@*S@4NBv{W zGw%%?+b&Pe!VMN>qvtKx8-8CXgC7_D-g16+a(>%uRa)~=u=Yvy^$F2ou@u!4rlw}# z;4ghC#Y^Pj|Kh>7l~4@~^30D12L^Ohb=Q_WJnSt|qE|~&Qxz2xyL_-tu`TQal%&+4 zC@9W&xM3{M6k(!B73X~l_5@MaT0Ka9+F3-$b#f~NByrZ;-(THxtQA7jTj0`mNhWG~ z!VTO=b!>2U>aOt%Fgn3Us|WS7tiGbD7gmZ$2dUtpphUGdBfqDjp*IL(cpTTDX>-<f zcjDNJEo>r`%^{R2x}#;u?sTg7(7^q&WIXzb*Ebfq2f68Nu4w7$prIml4sfa@SY!tn z>5@-_9aY(ckGowpH?}z7?|_zv(G?fJJch-F1k=|xGyqdu&(6;Sc=?@<&~S0jcS*y7 z0>O4sQyAQuw|$@TOIvR?R$08BNO-Q+hiy$n1$SXS;9yNn-FpdI=c+e55b(MGW~X=~ zNg%YkF|2HLUA(X4`Jkeb#yxwsbH6`b2U*TMW^xbeDQnwbuR59c4Qxh7N5g-a*vWCb z$pRWvVq#py#gQ6^`ujoEdZlk#^tJ`<F2BGaAh<A01D-TAG@e+tnIM<{_V{-Bc`M7x znljw;?tbs>8D@3y@fjKAe%6ms3xY!qQsfCIrO;Oo{e-Ga3n7_`Q`w9r&~n<}9e=7j z41#$I<kaK4=H|+Hh<e!xT%u|Eq%pyV<+$?kZ4;GQPm9)zR?4C_!Opu(*br!{k6sbb zL#fAcm00zu65KLDa#T<D6c6bykUmmy<j>u)7hhfFZ>H+@9O7eEnnx<#8l4K-#mB}? zO-s~LfbXN_w*A4ZPXoN|m>jkMC}6P;3K7&pMR_sk-!PRpZlY5*w#6C&PtVXc7<qwS z9{>hG_H#hk<+166T8aEpC@Xw#4^ADTzoof3aA_u5&+i^y1gWS<6yQ=Co{NIiVa7p; zr=x?fq?Cxy!(r9`+dM8KtfQdh{Pw(sIIpi38Arh0AC|iJ6$=OZZ$_H$V@nTl@@zd6 zvIUjJsG=guw)g#{uIau#(Zk8<X<6?Xu+p~n`pDx-Xe~Yt%?=l9tfpLIiUCGW_CDfI z5oB<jJeget<!j>ElDDUjNfoM`ZqNN6Dx=g|Ww$XXCI)jX%d0FsfmY4grWM}K_D8xT z8rTXkaEFc%xL4;+G;5;?>{Z0<?iI+(9NHS)9{!@=uaavq<Gi6^64$&iF;EAWK-b{C zuqxV|3fTlTxYX2z#Y+d2*&l2g8H<*xrna%W&dMFD^s6EJJKQ6;ukFzV*&t760u;yh zaLWfIV7?SqpE2Fv!x?=i_!q9ZtF!6bo+uOA&6p(2)|M`9C2cJacz<rMlw>5py_6I> zlPh|5j!08O5DA9>15_C?Bah93Dxz}28FC;|0~Ie|aA-(USp=)^X)5c@vweEo2O1te zROQIwIX8T8Uq-NzhH~URsP+duKaD-4L!u7PZX+;)PDl5n_ofyCYil!z01pLxS&C#e z_m~QpO0Eh01%cj6^&u==Pg8El(g0?NdfK-e11E5T7j1nJaDZ+<q({@Sx5;8!!xwy_ z0)FhP7=$3HJIEBPmk}?|y};P2xzuLwvVYum#Mg<s>Vd9qQW<jFcegaF>#(?<`*}au z$7j(}yUO*B=IbLMi=6@HSP16m3h-j&m*KGR1nF_gChEfsD)*Ellz${rWsw@UTBApl zj*%l-Gku`Ej-US9;{e9UKu&&#e2>x-=&>S&8+A&EdwOv$)Q0&}#>;Mc{<<5gQ5gRO zR&!mesgYG*Po2;2iq`zy?7BOQ6SMGZ=rc?n9-nq@@U-{v<aAFT+Y;1s+xvx>lM{)| zTb|P`r%RA&I_adt*rf0r_Z({IcdlCd>&4+)!LU@zRcrg~U+tdrgHcrA-u)Q;psv2I zpWA(@_a9a%Gb1uGaiQJb%J$qUf^T7EWq5W5^48C)Lc9Y)Qr@*ej+v#&@rhB0B;FzD zq)vFuA)s6HygT0VBLv?*C;5{3vFjWh{?{QbDUoN@nMV1!p2z*>wB8@@r@eI+%xHD+ zMnuDnBD#S?oBRwfvu2@pit*%%<rvBJ5pHr^s1!ogU*ZG?US!8X)bvCy2q#civ$@~! z`*hW21y;5o%GVdTC#dL{8EJ`XDw`{-A^$uR_q85*^?hA^bi{8!BR#OOsb+?97!gl& z{2stHOe}vOJ?#*Uw_N|D_?Pz!!W`B3PgE2O!?uF5sfeg(eg_y`5NF%--ufXfhpL^e zc88CbuL(9EYvZ3m!)~8%A$4&i%}^B=Hgx;w%E~`-B79wphzfM&dV)C76BD%BV#X^o z2jCILuw4^<!cg&gJ<LH%`@^x5m9%EPSA<O4v+I0Kjo`noMrKLvnep)6e}FsHM>Vv* zh%U%D!#@C}*NL4&wq~XVaIesR-MO=H;U{iVv$DkgsVm}9v*V9vec&&aoWCzuaTybc zxpj=S6A7a<Te|YQeP2rY1GAK9Nz2Sm;f#$E%!vB@$1675X8%bp<zzuKEIhrxx+1+0 z$nik=bBol6E$YNG;lXpib8T!nwbN27qYJYbY4MOuX6+HeZMqrC`U<(wQD*l2Fok=U z0P0?_dAQvEi`b_`W5S}#ovHfU^K>&>qCzwZ<WN8^d_KM9At86J1%tn1bSA=4)|^W^ z@_S8<%xLInbbFkD@eb~lv7w=hITBBEw0nS~Jby<T-N4yC$F-AFLvD#b3}AM*EW)1r zC{M;-Lf_h6owG4C6vweMizgR{;~C_i7}CT&$PtTxVfHbRoSEJ8#h9e95*Q<|$ZF0s z>JOy2nBP)RxCu$W5n-He?wa!3cP}&T!Sma*K~l@-JqmWtrqY1Uf0yz`o8Fw{DnD-U zq(x;8C{um@xuwfBig||HwVwk$rVStUIrfCF%9sToAyWg=@<_eS%-850&=DDBOMHq* zP2O-sENdfBEdW{bVB36c<bA2zg8KJ^){L<OB3iZ_MQBl2-t>6N9x=jSmba+3wfxc1 zOK)3sqGqE@O+_mjVU|>@vTBKd_v6ni-5O*vcNB-6Z$B>pST%&NDSUY{ODYya5EBk} z%o7TYC1|ATdUSTV4r^60BK3mHL4I8AJ=Dp5bn6CvG@D}Z*jI5~@5FCJ;Ej5~5$VCi zG}~siT%3{$Bmp!ukd?vKj0_8u<`eSx8~yLRfjNHb7};TT@O065nRxaVGo*}ih;;)K zdfw+5SYBd-i9TTQw-UhiAtVy(;0%FvFF9k*lIl#1ACFKBNt0l&i<^Rrlq;rq%1#9o ztnB$ed%kwm2;?O^U}<{J2rNiqF?3cq<r9#$)&x4D*99~bEr(;)eqK|MJDow?S<HZ3 z4m0XfG-6XY_<EPKrZ{nT1V=wbrcs~m!G|}4o0S)&)&P~Nj4glLnz(ovq4EMVkei#A zp0_(W7#G40yG9Ov&|;^Z_8tI$aud8g5cfRDYfux@mDT?!hDi5cy<CIJ?JmuOvJH(r zVSL7|Ts>YFB))XS?>lJe=zz*qUGA4_z|i)wn%Yj!aE#5F2d(P+;X5L5vl8C8+9ai_ z`qp=Tjlw{dtA{H17mgV+P#*0*Kfiy+9Lbg^F-x8f++c8XdE4Ze#A%^7MaCvEt@#w0 zcVjaMtf8VpSSfMGL>}ayBG6+|vC=Y?SS{KJgf^}uhx8OA0*7TrSB{Qp3#qT3MKCEZ zb<n@x*$cw7OOWIi;x6YtI(1hjT)a`PkESzEgGhk#dOhmU^Wfh0eUJRkpWSleepk_s zG2#Px!q80vE-Z;Ao|jYG(WE_!&Z}i2hl7l=`dxDRKC)*o+5N9sv#<x9=FL(o?!%%| z{2Hh>s7j=`JS53idzc5f_^E%j;#4$!gCjfyQN^`GTQGTeZ~+AN&}TTg5b~?}HopD$ zlt&=D;&9_W01M-3+c>h^Ob|zxXcu5ue#XK7!+*(Nhl`5~EC_V1g9pee)l|~zb=5f> zJy02IXK`h-aI@GL7$8gruI%jYVrOU8HvY_wgKR?>hKIg+$3DW(x|nSR^FEjw=Ku;R zJaiHF@@5JfND$v};bDqkW?ijuaP#_o;#@=#{1R5`$}OfZXe6Oz&pz5013pDq+mWE< zLki1*(0S)TRf{MXKB6`PEZ?soQVSzO`8!DdM-)?sPR(rJ)5{Tinc@DCMV_A5qr!*~ zj(4V_E@pNpR_*DN*AcByy^cqH+!nuj^NzyWOqLBg<9fNVBebdK#%h5T>@I(&P-5>f zdavny^`h{Lx#nYKY5OB^B`*ps4hBtqO3KWa3+<DQ_*?FjuJ@<9f&qVkLa)8A5P`YA z;I-<6-B7I-qkt>K?4qM1^Y+CHWXPw&7Ab{XwqTD@AW~~#C%ESC9wSgYP-+ZhWYAuQ z&2}*2pW&7mM>nJ|IFrdRYq<Eq0^SEm6ckVmK`W>G=Urbn@p}H(7!x%CEP?iGlYe6J zWRPBoMrVjH83o#EXE?Y6kK~2>&-uJ$v-ZNqL3#wi>^ZTaz2t6k{JUhcxh3G?np__N zmnCwI5T^1oOmDv<@-LBiI9qN{7TvPz(DCNj%MrNN9?AzSuhcCyCEh>86232UL=Mpw z8t|3Ys0{1nk{v~&=yR`e(=~hG*90_%YAPM2G`@BEO~ZoW!z#gY@qIe582z>=lcX|h zwf+TbjYeqAN{A1Dn#DkSCGZT8(o$2>$A^XC`ZQT%-VMSt7ZX!a0gKGTyJMhp0Q$Z6 z|A}}MS9^ZmDj7d!OsdH9pFFqukO~8N?f0k4px>^HunY0=@##IT@Vq2MGl-=b5~Dml zUq@C*SpK6lR#w;=*gFn9X`Ya0VZo4klD{1DiKBbBr?T+y@ZR1$mDDhWdcp2yO(7tr zg)@L!zDjB})KpZtJ|Na%Hz2YE7;$~0w@fF{L<Asuq2B|q113+gu?W)BJ2JPWW|tx` zcqUNcgd~gb%7i5Qw?|{`q3<f0ug=~yYvk7gepyO9&LXSGuY1V~UNGl0)>v`!+}Z-Q znrn5OjrQ-)yjehbl+0vi6TYg^@}1tI-+a}VRA$Jomrv}A->ZgKSdUGuk05Ms1lV4- zWRbZJFCGQ8utU4Qif7FpKQtiVIS(&n&D-!7?NJAj9%(Cy6oJcGko!`&esxuak-jy! z{>_bZjZJA<kAsn=$Ni@VfMQ^MzCQsl1_jRE3OFG&UIf*E_`&pai$3{^i)(UN1gs)b z6_J7tBd)EoD<{kl-a3T{Z+3B!w?KPIO->fIx1XWBKXfJ9N_k?365|1e=EBKw(s6V5 z&96^E^K30lgh*CZmsRPsdzjeWOdK8Z>WfjHAU;vC@Bo1@d?yEcB$*8%5Ph?=-3>j< z)7wzk!9RK6W&f~1xh~DI%mH4B68(4Ap`d`fT(w5s7zwFmHrAP$B~@S=&ak0Or46)k zIZ&jsIV~hTyN}rA*iR|9?ROGjr$qHS^3)4o(K&QRWs3VTmwS_}z1qOMs{Gy3=KHSV z+V^_g$W%nYl|O_}Q5BCGjlgBuNWJD|BjrEqb-nNB6O#%|D$eI$cB7omM1S2}B!0z_ zpNi0gWR?Ve%Ci<Vl-X)ouDY=fIOoJcq&VstLEbLLFH9sx1<dvHQZ$koTI*qAp=M^o zN4|H>bUS#Qs6^2pamKEut~NFf`$Ky}kpGf_yqK2NFNo3~l%hS;n{RLQxo8%_7Fv4d zVl4$7xsS-tLRNw2Om%#3t5rL#9Hwe&&D~&dP;ag8pp9@8kXrno`rwV&Z!kXF2g)v5 zT0$;5o(dMfWOdagL*=9cwd{in<cQ*rY=1c}FtseOGV+#~+%w#<vKgxH@7z_D)Lekw zahg~!;yKklj18P!oyA0OMY8}M=%UMerHW_Y@A(J`O5=T{4Z6tf5^4V2z5a?r+q3je z;Iph(AEo(B6(@R=N8#MrhNdGTU7&!<5{t@?V1~-R7uUoWcAwA-1#|X3f*be8MnZb! z9jKHP`jfmM=&31nJ-$uoYgcTq=Vku1ji%={s6+4d^YrAp;<W*$r-l5!DZj6*EG2s@ z@@VPfY^^arp>-m2=AZF&Ev0d8=<6fu)|A9@zvg_exc2%-56z4Syf?Fz*f9se@KK8s z%}P{;2+kXO<~;n)FmwOio8V%S3q5;mW!Lu&)RDV|^`+Uq+B%(aZ}~~9-fEiM_-7KZ zA??ZsyZN^_lj|R@n0pnXK|=XF`~k*)bSR>e`yvzjWZ_ial2cyb@Caq?;^~monqra} zZ&rH|<UhS=s5e(B03kk(z6!P`Y&QnaH#ark30H3I11k)&oc4#umlL1At$Owh3-l~u zJ{4Aa4@9)|^hA}MbX8<56(p=pWyK96-N2cd+qmS7t_#gfz#J;zmJalibr7v)NbkAD zs<$q4x*TM~rb>Q+sF2!tL<q-Q_m9+^3GFT2O$|ne`CWw6RnRQAO<7*I<qqfX^j4oZ zi)RfLNIphUt^K~pVc@XcM4fw7t(Qw`j$C87c9s<o-=M~hV=zbq&&f12rNx{Cx6dvw zq?8%v^k5;R>qNhQ*Jw}V=zq$SD2r%=;6hhPo}^2Ce@jbT(|H9Q@#DswTe<!Uc|6fG zGg9+i7xNq*PH*p^N!N0uDOZAtG-R$1LQTL%W?Jw^@<Ox&bW(x3Yte4O?Je<9FX+I~ zFWl5;uV!!%`dLkm>?AbJ7bll<A39LRkwNb|n4he5GyVy#_Pa>kl%nBi1!wes-Mo9M zyh9+eYgM7t4EDGA_eEAz?jSfp*}Kv6kne$2JMR%BPj#;BFm7y-(HLb~_b#L;sS%0w zcGmh6XJ)?XqQ7zC;*YiX%plmckT@=u8x=bXPgiSYZE<yX0hoReC3vvD-&lUm@#yR8 z0kKgJZc$F-Bw<GU{<2xQf$x*tAHA#Vu<)kVYT}AYP&_UYGCm@9<d5_{Oz?!E{t#2c z<M2*V(?4oM*p3z4S4s{Ly>7C0w41(?QIuST2Zd*G1rk(8s>Kn1!QYFUmf<S_11)me z?_bX*(XSB~6E#LW)t$iYR%+*fu7{T_SqYfr0SgX&jb7L4?+ZP}Unta!Q=`oWi<o~v zxzI8Pe?-hJohkzLe>$w0_8BpVg=M*TxT28G{k`-3kYn&lOiYag8|#Ra@H@RXK?gnz z_ehmk67FCE2r4wS7@i-Ag9V;ZGr#AQx%|S5{Fv}w_&|@O9%oHE#D3%ObO=uS`-_pP zp7eMZ?41xJ8|MHLc);*B{vH5u2!2MROzHRF9MXuYc`ADhFAR-sn)yn&07E?VI3cGV ziv-FJd=d8!5!E#p-J0_)8~k0jdy{?#VZNU3h_1S?I(xyJj;>zDmz?PM8ph&bXqP$G zJ<8vh$(Rh?i%x%T+ZPPc)m>qZ!EP&)R3a;FLK|h;JB6zOLq@T=U&QE`KAH(y(qv@y zC~|WBNEZ)u5g5klSv)~!gm94@4~mk%T<PPCC47v8%_t~8@=nY2g!Wem)1*x=rsH23 zLI%JRp}T~6K_9Ypbknru!F@2ap|ug{7||XwqBAH0q}hnMcA!P*NKr*Ii95I-{YKhO z689P8w4p*{M{M~DFPsU+XC5n$RNMqguS|FK6la)W%gIY@Nhq6*_m8p!-Z&HHs8?Q9 zo?*<d<C<5=j5?%J{*jw~yZwk7kbIiX?%4=E*HiHfHI=3uO{E@!<GV+&XuU6O9a}XS zPhdV2Pm7FsrAX3wg2v8TzHh!8ho3UPm9nV69q-fRJVCXY$fRs`W~jF#8UZLD3!Xp) zYpiapZEQx1XKkZOO??g*y-Ztuowd%CcyRU+@`Q!r0J+?*W3@#<7sb(mSdnQj8IaWz z>J`Q`y*x9x5@D)`2&J&Z#u+z?ek$wus=(72W@>)9`&e9;W3hsaEmM-UY{z|L56231 zD3f<FKRspJ2fPf$yY#HU%uu|`mW`LjYi}6QSn*s5d|akgH{74MK{O&6IL&_G_VS`W z@f3z{YqJWJn?FB{K2;3*nM90NHav_)GvF?gl;Fwd{WyW7<URzuJG^vE!n=<hS#6L| z56^g+&mT)1e=oRe&lJ9El8*3)Vi9BR9f;qRLqvFvl>uZqQMCY|drF%L&>V+*0o;C% z)A|BW9YL#lcomr)cV4cqtTnXU^x)=U{LmlBMbDXc)X-}C^^W}sib90}$BJpftRmE# z8?%EWLkL&|M#dyTY^zmAPc`+%e8(>fXA#+_(Hn2O4cre#mep@tF3c&ZPvbmP>lC3! zPfJfVME%8Xr>El_(R4nZ#o3vln(qkXwS>vX-TsO5wWKSM;K8N`XzZ+d1bAtGwX}Lx zSNSv*=7pYRlN6gELd|&c>XrVz&`4C&6jgSEG)igukQspd#Z<0)ckoi(>uG=ajkyk$ zB7%un#`*Iq(G8>bm4*kMa^;wk`(SLf*h!n!EU__(86HmBj;ez<QI}2n$7Emnx_JgN zQFlXO>zbFl5|}U+GE?7=Q6_8+B6P<w8er!|ONN%9;1fbZ!nCxl;B7I=n6LSZc{H*d zPi*i@QqoBv<LjKXz#;e$FvAmELD?ntuN5+c`yG6R34(WPYqU2#n5|)kl{IOW>D`TN zxeJE=jXvU*Y#`{q#j3+>+VFIEaYJxUT;pf`C|Bg3@COTbg=b26drgW9edU(Nn?>c( z(dD~CYNX380q?O3;Nu-ZvX5e&Nwit%3=i}#*m2!O3ES$6%E~L+N(@z9=F%Ef<hSJ9 zogE5BsSg>*cna;YeRj9RI?|_EQg5rHrHG02Z7m#=XHVe0gNz(m7;hX0wQPvvQW@`D zv)AC{pbY4T5-tzL5;f0Je}m?qOKrA4H*X?ZMBYfff-km023yc~iTU#l{-O<OnR=^p zODl_;Y^<Ds#vZO_CzA)vg^?ra<sPntd)Rm1U~VV+)w0p8It6>~aArXV^rfXBXE{3B z0Cn_#aIGb7#+6#~tx<YNzph!`0$EP5y-fdE78hH&>t;M~kgZYC4vYbXSe)J6cU@g4 zb6fMCHSh`ferJU%yzoPqA<xS%w?aP9F%!K4AGU5zPLKDipx`7$9>><k^2>mGZhw0s zKX(7OALvV{<+K!wPuer@Z~3o1#Q$s~aP$eZ^{=_`X~N<V34K?Fjs?o%d>T%en)u5u zp)U@zVk|t%;-dU-;^l@vf|v_84U3N-uy$8%dQH3LIULW2p^XXA`AG4dq&mA;;(kU< z0s6s-UPXp}iDM<kXC<lqRZxT+Yx^VdZsX%4*oO4W*5qrOK=fO2DQH?xb8<32NYtEk zU_NmMDC`dBU!c;`+~)vWZYAVUgoZI}b{q(Rm5_dlj&wqdE_{Is@`SQ$5<J;_CGEnc z+WmuI8TXyRd)e3==A$mEecSu67BrICjGC^@&z2tGw&|e_=;%xYC+_DeuRgf*mf3c+ z81C<n4(l;v{aSm}D#K28&)@dIcA9Ef>wV4L?Soy3si@6XHsh-bS;3yr1(~ReA}_;6 zU$6ZrNpZX0dKXCz1$JvmSVyzoZ%v{MQ`SbuzgNzLRT&*Dm(9Jzcm08zGw6ZA7QN~7 z<Z1dTbUu=9CKB47mzENNtq%#k9eD;x5^#ZfLVyA?1L_Y_Ms9p86r7=?prWj$wzZ(` z)Yes2QetFZ>t+qN;_hzxMk?wzcJ1$*^~SY1P_lqnpw-<XDjXD_meyBB)yp;FqW@TY zWgAYuLn(M+FLKjrTx=UFE&&@U7pU~B&`S{dK@uq_h%v}%tI(~pt-ZRNGNn~Gxm!4Z z#0bN9S1n0NfWON%hd{|eee|}H9qbsGVPH@@$|IpAs$hg0xn2#}5t@^xEnryu09pJ5 zk!bX+YVu($$=zb$+@xTHsPi`t)l)RK*0m9S9uQlfcmB?y<ggPU@+>S-i}fnehFo!? zZo4UMEsu{{91*=Klcm~K%ez2fM`pf}rMvF9e?Uk2`gisj{zp;;%t?h$7}`2o9KlqU zo(SFlSAh5_F>>!KpgBCrIakA<&>v~R*FXlJkuf+C9v`2MuM5Z$`>$->GR+78^lv}~ z?=ZU;v$6sZK>y<98v(e)G2^5ur+~UjLe;_<xy6n&R=^&i0V}9ZcSbBu{YrDY;5)3n z@D?B=@`YJp!0L;(#Hb&mAdP7;X+<gVb&=J(4s^%g)jLB)(ki5)8E<HXDhTj&8$aEP z?kQKIaKLe2mvWZj0TN*TZV_b+wi!m<qRou1k5lZ9UrMO~vs_Xw3x#*c`d(!3;I?dP zx-M=i!`|?cKYDeeR2>M$SLh5KwddI=t8U+dFi~lqJ<aUMQ=M_v=926o({pd=Y;Ig* zFpQ~3AO-nXIgp(BNqM=6*qK<V*r@4HdlA#2KqZh?h`azrNH?miY(cOK&2Ii5wr5v? zaj4!PjWdVczJe~e>Uw%sc6Lr~ZboKm!eSJZWTkqu8;WE9xEjH#Op=mHFfjDS=)%V* zgiQYUS%U^+`MZ|;-B2{5jwrLL4yI`tNJ}q1!WTJDa4nYksTKav=Uc$rqy3Ds?5J^; ztFF)pa~NNjO8<TCu6%Fh5l|T4673RE&0w7-3bM`Q@d-@y{3!U@qw{4)bFa31*~Nk? z83x+m$(jcC_x&)_m^P8AI{A!#Ful>yKt})r*+`ZvAx-RU2k(x~dw3ywt=mM?JTx6y zK|+ksAN<3@@EAIPI9{^F>Dl!T0U!#p5;THr4{Z*96gxQ)8jf;<C<yTdwFqzdv!)yp z&E3ESpQx?1I<K|6F4$tkQ3uQ!>2<Nm|7Ii762Nuip{el0nJ~it{z5xEYd~p^Uzm?| zj*Sv6U)i1yWpf<nbr5QBVQY0}2q9X9?ExO$-d$Jb?pxV;y+#^+7^<9W=$(us(0{rN zH)ZIZP{DQ9nQD#*(|(NSTFE({>^Doonxq2!Hvcxe>m}iF#VY-`#CVXHoG`tL=<(yf zBse1~3!o?aOltHO1V?(ps!fF&NhyYsQ%ISn^c+2HzTNkm3(0r<#MAOHJaL=MrL&89 z3J_OgW&v-xx&RVkZ==n3!N=Zrje&QCjdO~Mehd`O$czL3K%@ug0K}0`AOsTuYI<Wd z;K0N*KW6oiXn@@fTHFZ|=>UB#>A&$A=p8(q2e>FuA&4;lufe`?#6VapHAGJC6IY{z zAdp!MF(S*1zm_;yqdU-@>9QS3r5DuBzBY&o-bhrnrPXHDM&65&@qHC%js(Y1MQSj( z!TnA5hMlaLN`RZ>O|=SS_#RHr$ETW!%{rYgr_jdhIan>y`gK%Ev58Mf`t-0%CHp;v z{4tFZ&|RxJYQbZ`Zi61{aa>c~)2J<>4rmz*qhE)GJ6)aeOeRjAp>aW}QLcgcAGVf- zmU@VhH0}ftUlAO9b<Eafpfk)WK*yMbK#zn0F#*R^tS8M|n%mndMbr;6D!7(I%LzjJ z;|m=r1F)FO{D7k?9T(&cnhC68@vnP+2zWt@F$9r!a|FpVF^@*#Sz7otJ_4@7{Gk~G zq*LoCXWiL=fmqLqmEU5<P0@6lKP2o>7N5BbMN%5t3okW&)fb~>rgj4z;v(D(jD-XM z-Q{KxTn<p&JoC-juYR=)jc|kL4+rd%2?gguTZBsw`l&8)tWoo>6?3^M7V{MgnRg!O zS+0?L(<iM^#jLc}yqaC_aLTqW(~8c%v1wMB33Ua$OY$<)^6LwDf!gx3NL--zL8Z<4 ztpP_AZ7dgj-gwx)JD>nfq%D8l<z9Jzrk8#J9BU|!0B&G*RJ|O)8L#(vCI~PUFqbC- zP3FIN`x6QnVCSR36=~d`3|JwSOrELZu9SGqNLVNExC44Vx-xGbK6;M!vj=0FN4sQn zia$%;Kx`mO-5@epWj!beTsXKg)Ys$29@ZotGM3J^iXJ=GNvXeMQ|H#UPBNeRo4KwJ z*fXt){ksLC{(MTnfi?0G5$<b(G%^I=o1n57rIME;4a`x$3!X_=ZQ9}LFr2H0k6hN> zNGh=xjT%NLJG`X}z7zJ@i8%VJqul&>X>oRdI7G~CGVuff&j0`eiH!cN&M+qw^@9WJ zQ$a%cEWLWU{1*`Kpi`4sBe{!xd4kpF8f!bysyilFMyN=alI=sk)OiqGmTS<=JB+hh zfjI(bS+Q2Lbig(rVHm@y*PcTvKh8V@J=L1W=kCkVd*7~7NbqOx791stC|D2Kh>@*4 zj!6JtTk~q`aNl})f28S)lbtbHr-zaev>=iq(n1}m+wkNI!}pB<_q|W*u{bKj+P*5( z=b}#ZTv~jN-WKQJ|C)`)pDjI8>(5F8nR%*w?i;N?4&g71Yl|g0g<X6|XBL6@{oOl6 zB=E=P^43+>U+V^R=I4<tJ-8XQ|7xJL5l+3dV69AmpAh~DZEp9E#A*2OpW*m`V|5l- zbJg(~b)9K>p=D(>se@%_V};_64m^*RB)W<uwx~vUM&1-Ol_g&Gc4r>e22qS_KlL<k zkmC~JnuKO?-dsUbHT!OTyh^!gnCWw{ma0=7`b%|dJngPW`}fbGZxV>z#$mZ^%!LO` zR=H!=DS0DYe}LTSTm2XpfwF6IgTyfC#byP_EA`VV#iBW_p(#`{o`Y;1m&?Gs)$?%5 zRf=wQ6b%8~#EEls8O!~cY6C(FOa922Cbd31x<jbt+fZ&JNCkKC5vLQZe>DMTU{O<d zay0)e23t@p><-Zl!wp(#v5_fAM{ms95cy1yAdjRc`CNN2?Aj9)TPaU+sb8wYhh}E0 z(t>qr_^GZ0Ne#?uFrMBfKUcH&Fh`x{+`s4CeAIAkI?0G)DW=lrF6JliH8}Jzk@uUk z!pJ)A;%pF*ZMVavb+>h4*6+iyFhoKOH9HmEZ0c@aM0fkhQUxbT?uh$c`TU#mw(<2s zs}8ku_a(qxGQQ)K1q<iTD$c?Pl6C@->&n2P*}M>~Xt43y!O!dnnPy`2qwc}tKfJ=0 zBs+R*!NhrT@&8sO&XEqBZ(z7bU;*WxLuZgfIFLgCQyF3Z#t4kN2x|onMukNVg(WuC zg~9PXXiOLDX(N1(rc!O{DLj~%%DLu_f3&W!*w=hD!;Tm&Vl#14Lu<fpv<;OVb#~pg z9)CtgkorAt%4dI07=oNpBeZ#+UA@QkVatgscB<%WDDMhtQ2e?cxo!<>IsNSM^mu>j zsE!}JdumH~3VBH|7#`7736m0I%6>=Bj4kgNSN|EX0CI5dzEGMN^;4PzfrhgV=w}m& zDK^7_tZJ{Z&+>Px&hn!sIi&5Vl^&<(3Cqm8N(^yQl=0{))<?%T!F8DLF%(|As4jmt z#$*6eTz?N=Cb3`s42}HDepyO>7!G+D#|^w?j)Ebe!zPJ!MyM@@=&XiZ6$74{QCWJ? zS$bKIv`?+H3d78d++E!+_Ic%2DJZ%mR%d=9gRiaa8LY%0$GTO<8GRL3Q{>0cb>+ZK zeSZ)5KR9>RNLP6_mh)$zdb5F6mmOwr@6hHj-GF$Ho}&kWz2TA8Pcu|6<?$eyrQO`f zipD{*IBtMfu?kHhGeqOYbA(oHI4VL#0y>@WX8zdSc~}M{#y!rEcLL5tm(#+9Q4wwb z;Ej+Wc>rb_x#0<e@s1nhF<tJt59q#!^1x*+PPEo)i13s_P9uB;XMoCrb4?uaPUdGJ z_q&6ME5B$nDz)~pgW=fEd?{y51Fh?a$Kt%h+z0YMItvaiW5l8*4eE9#@3cQZ!-S_4 z<eBGOH!N)pO$|IBtYiNjeH>{BdYLR6I;?y9eBU*_PdCqY>ooU&eLipNX>_c!c0!Io z#S_M@np|o9w8M3Si<-c2QBS5fz<?S9VXQe<fh`uLN?}A0sQ$GZS8JTuc%a}M$NJRt zAc-hxHBpUEZA~CYb?p>eMdEI1cwzYSHXzV*i-PGYOqnVZCg%O{VztobSscYpw{eBa zvR!J$IJTClBb=$E^cMLC3llmIJXkv&8##4RdA>_$o*!{r=QKZ&Ts)Gq;`n23{=Ng7 z8uqvd{xdZvwua%E0-KvEw%3lU!>{0KtrRP5BgsW~(Pcx3z$dQU=ei2FEfT8J7LXpD zZuzaQ@2{~(C^nyQ|BwEZ)sudZ<5F%q$1!X^0b6vOlKgQd89Az7ff!BF(-j!d7`-(T zRPhw#u;*`t3ibzaBv)V`<Lf{4wL$VtAJ81tBh<WA1SO5^J)O*d!>Zd`8kw&ZhZHVA zkuSbbD2Eg;g_*JbK5qPX*fNaf3|Yv|35j{eJuHF!eg{@W!-cO@@Z%I}Op|P2_W`RZ z(p7_k2@b)7IlqrFdroy0N&W@surJAgEL8l($1s1{4OySo^=bK3ZyC3XKKpoi89WbY z%K%axDtsf_zXda{A|`+9zRsz}4>xsjqK!hK>44jR-LYE`-8F4egM0ufVo&JTR7#{o z(Z?X!(mD5oi#~`aDP+WvReVKP?mQvBsG=xrM&lb_m|#KlaeWm-%D<5+;V8uFkk<^R zCq5v@Kco?$kK<be3QxSXCoe&#<d*JGLNz1rD`#CR6kWzJ+2Eks2I<dxtB!z01C^(l z)|i^;3G=W%N*3?DnRL+Z2Zw31O<_PAa7xzb*AJhQFl)A^bo^G>UEx1Bkr>S+AKe9i zm&n+2{VO?!|9ENSMJ(^QFKy3Sc*0<bNNEAGzCkZ#ARo9d>?cee0Qt2@c8hw|)I#W6 z6w^9P84?JW&yWYu;0Mr<hrk_F)xkdlJ9_d$Lw5;*GU@3?)fh!+#|l;Zx>dW1v}TFE zx*X8|(4AR%Ep5HA<Qy90!JhG!TSG?zzYfM`3-97r@&i`#`x?(8*1C&si#p_VyI)^C z4_sI2{gUp#mtzIKT5oSehYwu*eMB*tw_(*4*0rrik@)gP7fR$Oi4|5j!{yeaw33I4 zMEFY2hGGq%&%(t24hMlMxKS-RR`oI!0A_x^ZHdR$T5P%0!uSLeun78p{R&bnH%<k% z_4E^B_$xPX?~f89957LJ=7~3s!~#DUmlzRnh-B`6UZfHmX6)8BxX2}{oJeYgC1C?B zYa8bv{Dd;q{m-3E9Sbt6fvm9x#4{J&kr4IR0<h0Ut<q!Yu%9Hltnrlt3W{jd#bB|a zvU`~k%H|PEXIhiXHmIv-F!5$@09f}{@0oMA0wIs0(op<9?rgjsY<zu6t;rTpNoCq& zXu%UrUEt9{Xf{TVn+^CRmVxzN3DiS?W2O)vg4;*%9B+I}n9u~nop>3TGI7eQjMnx# znTz$qoS%=xBRXB5g?_H6A7q!XNG$n#P|B<~0w-q+XSXN$Lh?e5E0F6uj!M4+=;?IE zPZMCMRI{<DtdNOipcB~<lZk`JHAJabhs-&317X~+LDzRATV9I&E3ZcS1OD#yjdT+9 z6q0L-K+N(Wo?|hXA(Mw9qVz#YY5vxy8ohF0YjYWhTE4cSdAhvwEAIUM8W;*XE{JMb z|GhlUgP~Y4B<$n~a{OhH3O2VM)X<R+`-U<eI#)i-Y)NsKVV27S04t$1Pn)#+P21o~ zQsl^5Wy#81Lc>@zNLsu`oC}bXTB?OfW`5v`%DXRDbWa-D7^B~?Y#h+MGJEhgrUeQ) zHlV=pVAj}t2ikRfe2;!JY<FsPzqY!#b9H%wGhX8<m>g-9o~P&S#Sz(|(pezW10~Z< zQ7ElX{ANe=)VGf_V~(+H-27QnJNd$BBe>+{R>%z7wXooNq!vChqEb4FY`D<yV$0|0 zPna1CDJ{hf6h3?Bns|W0wrm=sj*o4Xt!}UHozwBd==Xit<qS`=d&@T?$8}yq!5rgL zo?#l4U&gl}3BS7y-{6pS<cRT~Vb>n+v6k<#I4r<I%hLq1@uc7<5M)b7=xyvyun5yT z+Z~>Fn^m{zzct>22!)V+VsZXKWISE*8D6(+J8sagn{-&GUe@b0j{_f?|9ZiY`VBwu zw&Dt7Vr%T=>}X<O^PeL-LrWMYHdZ1=qW>JRaBwg(|IgFFmdgKinwOV|LE6OD%-NiX ziH#k2Qjv&3*~8w1h(TS>(9*=nnTSEj#nAb`&Pdo8m;ndWER6p%n}dyrLEOZ`%-k7< ziJg;(LD<gP&QZzUzz8@+#01#a&P2@7z=McE!q(Zu(caG5z}bYz+0g|!`pd%EN#4X! z*v`h@&ep`%nF#pq-xk)uX)p}G|1(d-#K_LrgovLX=KrzAeeE(+M;uK7&40SxWR1Fw zDy2d_xQHqO4dSs@RSPNDI3=W_nmnZ#W8H{B6z8{`sExR2(UXyg>WpxbG9q%Hq=`C~ zxJhx~536c{*F>J^r<Dm?7N@HT+05(Njw$a<zUK`mQxQNQQ6uRFi6B-F(SuXekKUiH zNM#AOd9m*qv;Z&)5iY?)xx5gZInmjw+>-?QLweCvXDT-;Z1Gg*GG_Vzd^<~|CIsY5 zwI_8X!4*oiH!cE)EGiWKGjyd090b0IrzZnDO`x}7iv+$sUn=BJLVzRiZ+oGX7Tt;S zxuU(gfhrkx?cYjOvUYRrSrzA5m(|~H<*L90+8^9=m)q5f#KH!wxDSQv@SbxvdgYqa z{=M&TGx4@|fsuSchHEK7G0p<Tz+WAhb(TcqLpL_tmBvfg^w@$epGia2d|~|7>r?8g z^Z&qDy06YzO$QK%Q*n8!FI$XV8`duBy7zhVFT=1x3&@T)C%<K<ICW*z$!t)jGSFH_ zbQN@L^-DnXAJ>j`(fAn>Ia`n6U?nCo7Y|#tXX0pMm33liba*x==*!yimggNd^8XGR zI65cl!u+^Z8bOVr6M!)eX;C=QtN}DLF4@Qp%-ci-cpRs#`9?tU>RkQorAle8lJKw0 zSR0jZ!I5{Ns5GRZTnHOmoJ8lPXdMS2Pgq}1#Orx@f!CaXF#@R?FAkhqmo^OFQ#{ml ztH`M)RFY(JRDCM}u=Qel(e_b8A17)*k%HK+?&1(&e$5MeJPqM4W|dyN3oe=)7rJ2$ z+tYni24872NAfpCElHi7cWVLz=aLZb>gf;!w2X1X$>Zxoh>VVT`?7-PR<w~X%OrQ8 zu@@;!iI^Sc=aXlaH~&8_reGg68FA7<904~j*86~<N$3tZ8_>wR`N=;Myyk%?>yb8- z;SBX*|E^h1b*BAD7A{<4scXBiV8VHy+$y>Y{0N|wHxq5VVBLtO{yurNw3wL*pj$Pp zY8tPmJGkVr&ERJ>i$D{g<Uqj9&TD$zybSfgeq?!WT&0VbVXeGmo3e0!tPTENRv#W} z$yDI}dxkv{ql&FjRI?dm%cD4E-?2LDLZhx47WW~ftYMcHZ^Jc?`>PS3!9dp+7RjYz zB?lvv?~1DMc#@2`Jk!q1J|@gCImT!n0&a7FnjMKDX$%3QeDzvAsaxEkbUkOV>Rxsv zHLH}>e+`W{Ne4Bu@x=dFH=#@mE*&YpK2PoKfya>uoVDPgdx1X>y7A}SZrsC28F(Z4 z)ZEPMt({*iTjMmeP{915BOeSW4<}K0tud57E)uxVfBoEU-{LMqOq2e1DE1#@`Y$|V zWMuoFAja|Eff(oi2M}WhLf-!XvHvrK{ht8~2weXYuwedIz<U4oGX?`d1HM82JA3|f zk^g1S{{qQe|BW^5jQ=03VP*cG#;^mA|4)eg|HqumJ|++V0MMWNC}#sBkPAEne(hM{ zBmw?A`~K^)|6?CB*Z=r$0k7!)z2va}XL$lHIsZ5O|8IBme}n%?t#w<S7G$4ewTo^v z1o?Tqt<{zV+ekF~j9$pMJxq-h8|-$@_J1eaZj=1S)_3;oEu>uLG!${x(IdO*>*>Ug z;}7=_;|C;P^N@rNl1tKk8K+0qZ_CbXd|tj%^QV_0_c`lIDex8;#Trkivg%!_Gzzm2 zgo%hkC9Rb&^Q)7mtH&MhiBYFVcmn$o&JO(2az>0uaJqb1YFpphy*)T)^GQ?|MmE{9 z3?*g)y4>ly+D6M&sHL8uv<cE9tDfE#QF$mo^g{j?$Qbys>fK$L)Bggg?;P1RKC{C5 z48@n+{M9D^;=V~QEVXh9OMBz?@oh0SKfYN#ttS)85%m#|#!i}Q4Lx6RtPa0@)0twB zcWUuzWhLS-u~j&VLe!l}`=?hA%7v-x4W|8a^;m$@u;=`aqjwm3m-Ij$570N|=fT&; zJHCrb<b*1ip*zqmz-eQ@w0vDcaoBPCSaMoZgK0CA!ueEX<`?8hXnV^NoQ3#)l6mWd z^9~NXkCnKvm3Og>e@1hpDt1I*$OK;t&4YhSMP{P3N-PA6Qe*nF98F<naA)|rUhZoJ z7MwE!%n&MIxl7Q2dq`gGFE<Dfo=4Y2cS^1o>inm<ixMybGJHcs*36VzIQ766WQi#7 zogAZjRaycYWO=An!sd1Lr_e+sZR;@o5y*-=D{tCR1;J0`CDA!gV@~<tj>-t`^Zzf_ z-T_FmZrd6yn_afL+-1AVwr$(CtuEWPZQJUyZR^kP+;i^xZ^Zv1-h1&PGBR`KiXD6H zwRY|`$Cz`?p-Uu`%Jz=-({<#*j<b+i15C$>8tjhKsD9xDwU3DJNNEx33<)=PT6wHN zhj2h2*Q=e_-D^@z-4Q|^?5Z6tUlMz{`nR_0NE9^ZD!gwPsfC3z`zct$6z9+LC9%*0 z6e`KH*fk?4<j%9cwzt0U$~n3Wb5f6z?d%c!f+7#Ih%#1-Ca-agaw=BUO(FYKqI$>m zVy%Ja*E+v(N;BI|UHi067;w1WRyEbu3vlAQCVT$qevJ5vK+E}6@0{bP`*Z_~4-ID! z05{0;Ai;>W>|hz6_cXS;lWC9HC6pXzxqt*=bPq<oa?||-1e~26J3WA1GZ2`WvY4v~ z9;|t1SYkc3Q5R8%#kU7jIy?j`B(sS#0q4kK&&XN;Q-tl|dDOr#mAf3EEO1xMSLmwJ z;;xiBKay*GW2i_RTkw;Iu&45d$QXYJIL=c&MzNMC?rP9WjOV?N4*6|n#`Snb)z`<~ z$I->Z)!WelP=a%4?Km-`W1^v=<MiG2diwP}_cxzTS5J2v_ihgjq@0&uSQr?}5LTXK zdZ+N`!gLH}dzSM$G~V}(KUdp$Rz9fljpA?dDSWuZ9y00uLn8W)m0yeB4?f=CNH)dY zL|w2QrCHi$bfY<&lkv!=j#qF=Iaxm3p02+3K9BkyU$+Nm18bKLM`NdtOPjAtpKBd# z+fy-bgly~Fp&n7bMDh)|$Ch@C<NDXS6yxuTAG-`&w5vTHxvVjF{5X2)=u$m7{FVp3 zbOI1WJWzb@Z0DM!+-%wC55L7q1BJj;C2!V*_bN3>kWA~eLOM!;EVT0xl?~2TM$z4= zSS|m${6HQ=Pg*D!-s?^g8+B$?HG3+mk;r8x(JyGnm!c#kE*Y3VvELLoqQ&_O{_BV1 zmTEMZR<4}ca_jj@Sgq}(4<4cpl_?UqWqL#B^)dxnfHx=Ca$~Nk<evu4<;J8@2<6t* zI&GvrGmmn}GVf>$Qf?Tb$hY9QFd5T@3x{nYkwN3}lDbFvYv>UB^VD*6kfPc__9w-9 zLbR9*=E1;9I(n&nL)l=V2*)n1pWP2XVy1Ml2d9I<%01}qag&0KsvT=L)inmWHN>gR z2ORLdGcEr<G(!H0TL{@(^`{8R*S1=0JtHM{(-JVo!J*9u_Ro<Ss1;r$sV`INrS%L4 zgCt%t^O%>kQ+kt9-m^wjgE(0$hG(0yBRn?W^fwudV9DjQ2YuMvJ*&2ARr6ODptU$D z&Sf-P)0o(VfnFuWE)RQ5ZkVizj5S=OA>vv!7%`s&QEI$(*$gy+MkqBJ$7HZHgz$gF zF}1JNS|9zr%pfRT4f1ppj>OL!y&Nk@kc7tHMn@Mx+8Af2#e*%(3k4n29UBdK8tV8* zH1?}~FURQi`ZMhwNdWiMm+se(xxd<kc{CHyupK{G`W@HgppAHNw_5kkvAtt%u*83c zQrT^AXxN#VgC0LI8(>0n7_sVV*pui`{UEmyRo;{3;kvnTg_+X#l#M)b(z3eYM1BCi z;yun|7W*4+8s=Yc=C{JdPr%Lg;_kINnfR&a)gmcL4^yclfJG3Dvc#>Y*~CK!UU0^Y z@f5!)fsigWzOVfofsyk7X0;{(XY!$*mR}&BzRL<|A^&cmaf|h9KL`viC0)8P7X#~K zp&g#35;Qz~N`1}`x5<C|uivD6O?MSAbfay-tY7K3J7~R`qT}pDH}IIar-4E<20FuG z*2eoT|6aN6Z@CZ`AGf#{*Bty-I8t5k;_mO?kUCbCP~&ilVViM<*pe=G(AqK}OB`Ze z7o@#wl1?^Wt3lZNQaUmDgS5~rWDwh#MY3aQCDY4=SHG-1!gNM3ad<oglrvYzHtJ(S zIh`O_LmBXmthhx!ZIC|E>NK%FnGt`8Chwm-bH2(Ud(!>XOVz@DMq*B`54B|Lcu)%c zwOxVUlLtu-bqCl}axs=CXo5&{wvzCSqb-I14#{EUg5xE8QKk2oEm~tNsA#jQSc}F` zHzD<*Ias|M)?<r&E;$?Xa!kXMFFgO)=&$z6?yxo((!&{PQUr01`==YyM<uaK_cAu_ zny^_q9@}_y>}GW{MB1>tcyOGHw_|cFj{$e8YgLWpUaUj9)~THBX`}vd@+T%DlJ9)g zE^QEyte4lmfdKqoxa?{Ep;#`V_X~dYcZNFNwzb{QOA}{fN*UWq#MtWOGaS^rZd}xL z<q7Z0FHzkfx5hthH5;%Kj6;c_6F;8_Z;L8g<6DUk%Os%ECsy8wHmGjPDq0!rCS;V& z<HwN_!%|Vpg;GkD_EIRAhzL&O6E;xp0tdq>oN?QXmWb_1A<VtX2r?F)ygHD~d~0oP zp~jN5SDEhSXh|2c;|X^2Vj_sbe8UZSdi8#PbZNI-+V~kCQ=c;!lm_czMq@ijKuqIH zSNesgTXBp8aYJNT>1|z8>p4V&t#4z?NUdjrgEKU9Nx-JwLwhir#w#+)2-@o<W6|>= zW@oT~vE9TFa1GXxB<4WQL`R1E!;2cnOsNmLQ5ELDzFG2L2rc*5;>f{W{x~inDjV1@ z-4p4)EsSo+Wm}pXf*<^G<}v0nFr;uKpRvtueN^(>e{nUicer<Sk3CoJ_pHs5o*I88 zJ=0|L;}H8WJAPu@H0%a&vi??z1i_ykmd^@X&v*KE+;bQ--R?=}UEp8|9*f3CmC`@I zlp+PnJSCUVsd{=;*I@%qN~=j7-;sy-^G&IU7>5cYdVjcEoQ@?x)L1OHqN3n9Ns_Ty z;`ZRk_e_XgorgpXRqm15r0moiu@m4u@SNK1-`xAZ&*CR(pw5+HZ)Cn!56~2JSZYfa zs>JNza<smYA}n#;M~X_i7{c52?xaf(UM8|}npO)#LaVF9&stfr#((A=O4W;%CLyCv z8+e%^qw$cnN6iy@kPZv}HEtcI%6CI?#aesvSvFFM<Xv*u!u>st_frim<vDOrln8jA zynB~hY_$E1o#?PmJ+8e0fyN*|n`3qPwNL#?n?j(0+4S|NO->lW(NW#wTyQ-Kt7T`> zimb?p0&69Kp5}_k#7}Cv7y849)RPHfqJ#SFn^?>_`3{&=3KesoWh~Z3F@&}0qpMmP zJ!sUvA`y7~b-0MQbt7`EUjvOC*1?jr6w=jmg3V=S&Q-S-7eU|$)zbL&lHe^9q)NS8 z(BVO-hvu2@b3tUNajVO*Uw30uvA;qzfaw0j+D}qh#&apG-8fDlc1GAn-3e>`^4CwW z-r-DXQM@r=lPQXW3OiHSeWSQ8p1g8#M3q8^+}yOm2VU<#j0Ai^Sc3Za;;?>6fsY{= zWbx~|SovMG)Mp#i744r@09=X4mI22lnuCG=fd42jWvW`IJmDm!a`E-M8|<g#m9yEP z0(!9^C?BlP_qu!hiJ$V{Mp1m(1)wHO`NL6Wi@%XEnWp9+PW}+p=~jzcJ|Y&=cpXiV z=y$sU5g!iB?d>5aBl6&mM-#vnpEoD=a6R?UQ`mc7u$7xd3O^4f?9vP7?L9gAD}pLN z{~pa#pkT*vZtswetkhQ0L@Q4p`eit=wh@z!=Z>i>-tZXcTJtYn?_W7L?|hW^$Rb{c zb<1acT4q-+FUAjiPC~K0XitdpMAd3ICmUziA!msRopc6Nh<H(#V2Oc{#%d?OxRM>Y z8k4j@mfUq@yPeoDl~~Lt;37?BmOG2ttI&cT3=EsM!L25+-oSVjT(`*JyomHLhmDEy zc@=HrJ~beTiF=}OcHlFR`CMa|Wm?``o3J?+pz$^uN_pvTA5pt=_}%gJ__2$9P>Fs^ z{m8@~kk@N{1^z3y(9h<;0<yH8LC{b!&Aj81jFWZpzJMRiwPSF9-yDByx(-fW{zKOT z!O!;Ogv}wF^egF4+(?WS5eQEyF&QSJSwM`ne3uT1PO*s_DP^HxgsaZU?e=sfQ#?Im z;wB8S{!>c5NfB|`D;8Wk-pJz$oE>cL5d#AvGpOzX2gx{k{|21K+Gq`kq4@)n6o^bh zSUij81>GPfXwWdDHycSc^2>6vc{UdF=9-;oR>g0=?FszuGp&YjH<HK~QIxsfTXnc^ zl5YgDhLBVRp|9XfBWgVi%Yko!{f23&{xrE;PVIeYQWnb(%{wss&El(CQ9&4#W7htt zbiP!n87b$fTCN|xEMb8bK^Pi%>s<qQk~fhtdy7c&4ly~uS(B4c!r1xz0S`}&6UKL0 zP_=tp)3;Z?mCpL#{|D6mFR=O#s%2uMr~kijEyMqYYyXQnmHQV@DyQ%854p*}^luEW zVC-n?>|ki@NXWp-^k1-7&=x?B+Bp65%RlH?0fr7xMaNuUz}AgW^Pi*GI2Zv5WQ^MX zVB!BTy8k-%pI-b!-O4%G8Y&t)5o!Xy3W)%yTVpo>WBV_nR^Z>Cg8%*$C)EBQl)yiS z3pz5u{KEt*08T(iC+y}Vs_66&Py0`ss3QX*!+*i^e;P#qUH#YZBFwb(gpB`CFaO|n z27vJ|#sxs#veGgTvi$2h01Zrl28RC_EC7S)KL!WDVEgv~h_C?+mj5yU*ZAM0^FJLB zw`KzLfc3xn_m9O4Af5jiY`}Q|789WDAIjvvEJlF!uRZ}tnEx?a|0klG_5Tgg{eRC9 z{kvz1()x~;fUf=flK#K2D?|OF2!4b}K)v7J3lg?x$n*@D#;(xhWGK}qGr-!PCuV)X zHep|78QMxlX_}1`Q+ze&Yy;)*GFwLb8ZZw5hrMmo@0lV}tOx7e|EK%*KkOMB+kf9h z{~E~u+(rKh0I>agivAx302l#N{omXfAk6T;(@p=Gv;SYH|BEhgC9Sn~z(%awSo`VL z=S=T*t^h7DS123ktE(%7V06IyRUeI;Ul0(W=-VK8m?e4E)w{B?K%$)TY}fn4)m9+K zQK?e7NtH&^GSl{}&vv@w?F1e9VPWQ<L<a?#ad8gH|6%lW`rP{%pMRY^ILtJqJyE03 zbnPEX3vwu{8r*UHi10{|^i-sNW3sG1T-2Gc#iX>xvOUSV#L?_%bMbwAn!X&FApmIQ z9UN}7T=V(7J=~A=aZ~!P)z^AZH{}lfrFjjgCP)Mc5SWjbnvWM64;NYwmO|$*I}t89 zn<z&cwMDh>W7JvZ6<#A1Tq&9T)jH2xK21|cO4vZ1Z%0_S=i&0{X1t<hy{)jB1dwa7 z3|!c_*NlD6?)f@hDADzK#nP4?7k^DjdXIgI7~lAXoBv{TDfapZ&9NBHz6jmwEOU#M z^`ogc!5M89TCOgdjsJ)!pDy^&saOf&kWE_2d1gsRJz(9*y6N$ab20hc@&;EC2O^oF zsoM8cVcWoYMnL(Qo7sjy2PeCy!Nc@=t-_UD^sDe=%-H8q#Yv*59zP*nH#ST@IrWN| zq2L#A3t2&7RZw$myLFVInYq_((%l3T&A@pT2~M=2r7{m}ur<>f{Qj8o%Kmuu&>y(L zWQ5^lo#5je36aa_zr|zuPiYa=ukBxl#d%H5yN^%t@NPlt+>H?0K((Q?atj?$Gw;RC z7`B((K2L88HG97K-{gm}bg?;|Vw0B1EH~sRtV9ONWF~Q>M#{x#%zu_2YHz0{C{IC? zYyg6-a}B9#EPGk&Cg^I#7pYYG_ye{`(?uwg#aN@2_`Q|LqowG*rDr3#XCsA2GnM$G z<yeFH_`PL#lNC5rMwmqASS3g4`WLzpVYV<6*OC=jH0P;R1X}*PKrf-9FX%fx#jbhX zzGa?PhBo(e6KvNqseF{aR_bd$J4kB3$<G}^RAb{5@8Mz*5(}fVt(~l<je%;6otR48 z*U7sCH<)B6e}!mM-(;54Rn^lKXb`5z1m()-w+a{k(e@9p3?|N3SZYAfw(MEtG=9bw zUXf5QNr-HI-9yLv2)V`8(4RcxKUVrUQT|k80Tq@mVEJ2rNo*;2|5lGfR_=-6?}Z88 zi_D^<-3Eq#dNzL3-_?a-D?7?Z`E9i3`Bh%+I0=#_ND}&@Gn)C4586{48!js1W}@kS zY$bF5N7SJLf>?3!XM`db*kXn>wNfty|6fMQz9OxmE)gvGvK^&ghADLBSv2}-v?j^K z772qTI5A}(oV#9}`u!re1}0-aj><5q7>h3N4zePYk&-mJqdC8(u)Cup!pb9;Ic6={ zor(2?c4iTf>7(MBW7r;OU?$HdD{*R0(m^Xg#}Pq+xlePtn;cB9ht8kLkFN?pOmOZm zoW0~*Qsptreg(+Qi!1T56BeB+(2ZM_EXS+~Erw@|%u^6i9(JuV2ax`1__JBQ)Oq$t zYn)EQ<mNPpiQ02vv{gZpYi_KqzSPaZ@(t<)A`A;cn;9AqAWvxu`@?`3db}6XC(sW3 z7n^rBL<X3j^is%WE=K8Z#9qKBhdNkFdrzx)et=_mvu>h}Tt%@hiC1KlAcGUI$Py5h zu&v9b9!IEBMQ!>`fPeq8`RqqXAKI#*pRe7~@_tE;%=ErEhl%+~R(y+Op)!Ac>3ccH z&tdc@mniEb=jp(5P{Uv8z^D%Ra8y1CtZf!sk2|tP7k6%rf~RQQ=0;j-e|Uu8L7KD@ zw27(v9XA@ztCJeHO3yMYud}MJGt4{Mfn>)wM(CVnouhsM<O$92Qv+8jILTnJYsizI zzzSJ4AW$h*2VqX$7REXfbZUuy0QL{*jT9fym?Y#LXjC6AKz&%gvrTZI=d45)%Rl6) z2soSvmlM}_Dy;Lr_8k`(`RZYvz1)4WUGrvU&iY>8J$=6S&9znDO)u_EU>Rqvp>3`u z@whK}U#gOsCO2nyc|{*L@O^(w^d5u=8a3tG7uMPbX{#BFZ=lX*FEB%EjGH3K5MA?E z9FSNgR0fqQ+*hJ8i5tk>6ehav%ZXB{3AYI;qq4>}>Hs49*%H*}w-MF!_L9o<uqm^M zKbGN9XQ6kN?l5Lye=tL#(L<rJMJF^!%Q=ZH|2D#WM_Uh3b^pmMbl`lr7|X1ccAl}? z%Gd_HNz`C6OVX^dMw>0Yl=zX47v9QpGn2PRVweHCS}?d>!atlBCd-$sf-GKhMs1#D zhOB{??OW9PNoK|y)K2)dV$!VJ!(q?$9dbBz-r|qk29hpD#Q|2<NFAJsQ`E^7ov^}z z_Ol?JF2e*Uj?56v@*)W>X<^;L_-r|FGmyad=E2!u%`ZSPJ=MKZSWaE}*=l9bMdUIS zknEgmkJ^^W)s;|FD4m_Go7ibcR3ZY@R7qXZ-cqfpVP!A-J}Pni7(CdC&eLJ0L#oPX zUvV8Y*^R;cF0*<U6@(XFNCN(X3qIpGUdp4tbaT86e>exrWs6B<^uxQ4QD&J}eU3wW zfJbz@WcFw6B-vhqs)MAOj;+qZ%i(U*SW~Fu2A6ih1U*dZ%K_9Wq=Fa+QdsJO7$?lR zsK6-DGyxSHTR;f(PE#prSCF(}6zF)MqX*#$&=llh%~dBygBQ+Hh3t5bWSjS~e`)5_ z>HS*}2HseaHpKxF5O-1lWK#k;{Y>E%<fP*XnEfe)<9YDSNhl1a=;$h$8aP_XpU0?a z>jxRDc5>DVqIxM%i3Q7$4)W&v^b#wQ^WQgPhdiZ+U46r)o=f2wMYCgVfx;<_kx*im z76dxxU5tkW>OX5P#Yv5Xddp;b%j`o$uOWR%az7h`c;X95+M-~Ka04`e`*IJ)D=+%< zjwbWY1}k<4OLn1>bQjVzH`7#6`igd^D-Ne>FbEBBsZ4Q7&eIH$wNA0OasO=Msj@2l zWd-d!-OK8l5}gq}vOQkbWqO=4R=p=iZ6f?prziiA5U?r8Y600?>ao;EFH{f)HUN5{ z8LS;a*`~6_19_0rIJM{^)8+Aa6qM{6cy}>Gt#2;5b9o^gs)&K-(`x1`(SP(O^^j6N zQjz$GFDx|+z2WDdTB0O}+&IS~<y(Ul=mQog6vpr$tdZypk@3xd1$;`*W$gDLo08LX z0BidoE6AlEe1itz&d+Ntz@jBuvqPJYae}L^Gta6r$B2unNK9-iI$Xs+_)~PSQmnVk zJ!0mhvXt~y7>A03Tt!)x$lBCgkQ~j?lm<uD+cPK}rT_2J)cDlq`oO~a=D_d(D1Kf) z^${(UBA~Pn7rC{iC<~J(xFMIh_dD#{w85KBLNWhYOda|c_|};9m@-LpAhqi8703tm zb2fsM^)RZF1t)nOWd};6x`HHUI2JhtAaNJ0N#GBWewuhql!UW{+HeNAsxUgdlGBvx z^Dk89s3@tL9&c9z=dZ1yF`DEsoi_plm2VoG`!2s-bp#fHojCrGIQir__3Sw5v^edw zIAP#N-3MwgT#J&Ihwz`3=6|@^odSg?4T~-l4)79^wC9_+9i6&hU*P&vmp~3OCD1Y@ z*cU;~jI>cJ3mngCVre&{XKen=+mRnGg<K*rjQ^;u`0TayQt6LTnNwF=QC{j_o1L5L zpPHE77#ZU0C1Hlc!n!0tA;3(*%uM}}o{65EfE*9Qf@0xjLH2>611BSf5GHvS9rGL$ z_5>Ae2M=fe@YwM9!05<-vcX}lu}X=xp|$APsdrLkMPQ1efvn4^w$j7gX=i)c*PzKo zH<ElEa@?p*4^8r;g$%upy#}jvPo)TNHb2IyRWC`pU`Lwhn$iXfk(Yp(0N2O<T*+fC z>9QkMcJ%cf-JZh|gf;0pXlmN73}sGI-o9d@lyjqF_-Bc#%8K(deAMIPcI)D7qN3b2 zL4vsr23gHjKZTLlU^$d_pr3Z|@6ngce4HAeutMwn1jlqLV!;6(&Fvbksj4{bA<1EG z;^tBu;&H!L7<_P3Olc_fBxv?U*(|mooq@_TV*Rk$dSSKpHm;YK*3h6gp&^igc=NPd zj#tWHQuV{6*QFFdL3qIaR4Q!Eat)0Njg4~6jdG23stir)bd7QB^c`)pL~WfU6|Lky zx~LgRu%E>2L__3M(<IFEq=c9rac$K7y~{d7gBl7mD(kC~8!W(Se-&0aH<{{K>Yk8B zbGF<t6V`|=kc0?L#HmbZO7URQYfd)`pN5VXDqbnujNXi$1l%;|Sa7m7mRyX_z_NSH zk>kOSl4j~FkQl`(PCaa_Hix!-JfDr7uf2Xd6x1p9gBzPt>Ck|Xsjq_M<<4;kyCOGG z7_Y!t*Jx>9Yk6-A7+FZ#Y$DY}Z~>>F1E(CB>v-Rs8P!h{dqDAc21SRKSpLp+6+$J% z&Pq#E(SmtMW@BB^xiY&vKks(2-WiUrK-pg@DG@O#w*8ygW`BPay|#waz$PziGaa>+ ziIts=ot}!>X<zXC_88;uf0V8BVx?}V$S-YeVlJ%ULuAFu&VYu66$ypldUM<2a8F<F zD#ujUUTR8bie-SJkgdWq&)Uh`?nE$}qt!(azd>7y;U+QqN8qo<Fe_y3%7eL*LzR&- zB5|T~O09%nDXu}#oBv^HXLi*56^hSS_T+}2Igvq}%Hr=G>#+2&rrIvW%?`Dt206Y~ z##Xx?Z>tHa!U(0wh$z{dAVv6Q(3?bYH5lyR3PQ$5ugHhb2xo5?W9z+BqfFWGwONWb zG3*X(E+X7G-faE&9W_9CI>4?!$usP8fZ8Es4MI#b@Rp%t!~VTvBJBNp?CROCBWUJc zrA6Q4qg=Y*bSF6YHs|M{UwM#+D$74HP(@7-dZ7ZM!t9~pnjY_199`y^TaxEmk{N_G zz|oCYqP25+Iv!763yy88r>)k4ljp}treUz}Ex<}W7X4=YsZ5eZsP>1eQ7SLBF;h1k zPsbG$*7Q8gXT*7ruSYtgSk$mJcrS;Nq`9Yfy*}8xy#RhbFiZIJnZ^64h{<LvZG9a7 zRIDVq!a&JwrwV;p9CTtT7HJALa0PK$9777H!ZeZkOwB4|=P=br0x#ppJoO?AJp~~d zD;WuOK_iGCm@!0nH!llxrmu*r4T84mNQ~-;yz^DXx6di%tao)xte|T7^$CdUA4Pfa zBXUNX2|iY2<RAK>50Enjrq<M^dPRCMLW4@pTADh(Hy2x56@VPuq~@3i><}+HBLN~C z$3AXlytUG+O}jqMQ1qmw+hk`c#~52$v$UAWb7*mMuiE<uu)-dB`IUR(;&{8;sF8)E z$9Doq6C34c+TRZ=0Q5!TX=hVYYtCF0kP;`KpYI5rkxo#;InPyQWA+tOWy@OUM?_S@ zNtF1Zj^Lag*=!(^*Ul@v9N)wm|0pxJM3kmX8l4hKJ~Dh{gTI4YwXxcl{oc>|)^oot z5K}~w%i`<%%TKkJ#9!Uw;w&Hr;)suy3z0k4RVXUax3;MRqt)9T039=cVpHpn{7DH3 zz<iCqnmwghA|sB~i4<9l3p)*wOS>XO;cIMAi#o|;eNydzyW1=%NgO&Gd2L3oMWZBt z(UNi(14T&-O7c}h$m(`k<LGoafwSCqV(}zx@|I_dllW->L+^K2dgBNG39M9BI&XSC zZ`k0ZrEHAAN3FNn7AHzQu;%#b6t;#MhG`TA4)^jH@cX`s<lKt*OnX1Q-V(A}dU~>Y zQkKRm`eEfH22b<mHX2-43+W*xZ!CNQ6ch<O@ECXvQFbcGY=whxaePy8W?hDVTWhSJ zoVml>>#%RLD<xK^^AcNIzCM<Z)If&H0C=^+eW}7@S$yqCe$+i%@p0*yI-I5pVz)Nl z)Mg}U3?=gm-PqnZFk82!<r`wz)it}z22TV0x*d@_)7XOaXQ)XS0GrhE!$ns{Dhj<W zz6}iK)AE1nVGASvz+!^|4=RkWNa_ex6$%nHt4zj92vJ4xK%aBhrggTZnbGP<g*aj7 zMcQ3Pe2A5u`86Xu`Hddp>&()#uUP5>cwBx;_$piC2u&fKg^-qbhKO>#fN2o_b^c|2 z0B<eDjlwCq+)U8Z+7%gLjVzzESU)}3Z;~%!PgM#%nS!cCt-dI?kgdGw005{F8>W+= z5#{MMIY^H!6v<D!st_WGJzr0sp@A@n2bw`Di4*Qnn1kmK&35Il@?-36BPw*;?g3$y zMk_Eza<{zHm&2tI^iFp6Pxr84l2Vh@l~y)bX;~)(UArg9k|Q;OLgdiRkd@uyc5RYg ziHcf@wi=%H_p{L%4GWU1T~qA5<lpCZez#19t&ov{-e3!uqdkAOb=UsFsPK_dvk}sc z(fsUR-k2GjQy*H@n&7Q1b@w{FN}LLvA>j5zQJfVg+V@JI+36I?jWDfB@N6Q^zspQO z#N;+kkqWbz=s24Clps4|lMQ=2PdQyn3XOH?8JqWddpJG2$np-ZIn`{0tIAWnMQagT zfi^Qy5n-fk6Ftzz*KMo0PMWI1kgC~p6u~CSy)-xq00;y`Nlk!32P2{FDFRt}xZCf4 zJw1+5#`dD3BMTCHm>ckVLoe86gh^hFSzn1)?Y{C<ff_1CIxa*y?te!`zj79SzE!o= z<ajl|fff`*Yi|apoF%|U3@9?j#n;97@F&}3r<){G70c)&Rn%~+^?aj(ZFAKD{GLxp zA3=O)zBmuz7Aj#Ov`7u0r2G_Oy_}^i6&{0=>?Y^qIcO!{jsS=+R*0A7^K+#3bg(~p z#47tjEU^|ASgJZS+YuA{1CA|G-trhaV`!aasB#zzmz|y71^Wt-l5|bl__#P5h(rv| z7WM!Im8ZdvOkE-Lf;iq6FFUUOH~!{1=g~%lo`;yla{q}Hvfnp(UoPv%F}T>ZDmSkH zHC9dtNQMB!U28cnMyT$j(3bA#tcdtvOEHO&(YL)z9>89#o<!+^W7Hm^f?on-RT5iG zAfv~NFa>eXam|vziHT}1(irh|&c_D=m)oVe^;4ydMg%Y~=#8OlPjnn5d(`?#MOWIA zB<CJ;KO4Jki<?a~R=8AqDH5c%0tKhmMxl4naRHtm#N8H$+vD(5MrzRRuAGyDz0c(p zncnG9%1;VI;nDox#Cmpp6oW*?TYMD&zn81Vr_|H!hk>S)n<qVjur*B9*4m8e*Bjb4 zSm!+?_s5F%_5FD&;#WFjdY}vpsG_*Gp@zJ<kVUbmTO$u+@8X*Bz#?EJWA6D{k1p~& zN6a2Vb9FR-&p~H4iE~u^tu)sDXMiNu1~s^|Bud-C;LuLUC#T4E_Y$&$oc>cgHAl@G zK&ScDF(+-_uj{-j!E7-;fn#q0#(;-8>j%K=Oltfq$8Q9KBbqSYCXU6q=Hc-oG!)=j zV_^>JK!$-)JO2g$dj8y;TYR7bBc_hTI2I3sO|?!+n4kgD;3??fagP%kQfy<u@OuCK z=d~}5L;#l8X(>A|FTHu9Es13=jl+iln$|xr*+xg>NCn~v-&}bm9TFqd#8&vg844RL z2aVNG{sF8k#hEHtlz-poPmAkR1a%L7s=fLQ%jfM@Bue(zTQ8l}wVqAboSSpe;ahs1 zEpigE!gQI?R71#K{yzAA+-Y<uWm4e!mV3Y(QAwe85gcCDhgTmQKwhW$;c2UtBTxbw zp9E^Ng;;Xqe4=F;pi#6o6xDm22%AyPQSx&95Okm&PJ>r@jTf41)|WGo-p;{T_qyT0 zb#lPxq-po!uAK720VV)Nq4#*z?d@b^6^EU8z=1PDbNJlT7NU?XX9bpXsAT5XHQBWS z(WM$(z>Fy-;+D=74SAc%B3IeaB-1-sS&!SA`%^J+*RN`OyTiL^X}gjKs%`-3gVdOv z=y<$z2P~PW&1QwGMrMQUEV8b?wK}(j1MNfBQr~W<2jq<f<;Y-m#m;~DMM`c~>tJc< zU_U*^6q63g(wEA=HAGxUoNq=C;F0&|<z_}vp}r1??(9kL5sxf<8g>ezXJ-$LyiHy8 z0U2h(ec_b&dGP?nuyz$zk0+gO%4jSWLHB49mpo&n`E1^rk)kWQV-2E1xu1th{e|dI z@!j&j-Cg#B(roR8Jocegeb??5N^(5OSfBToC)*i_2B72@P(O23SkVCDNHgOxw^ulp z&B!T(Q~5Kags4-QxN)Pst%d!P&O9q22%bmV<?X%PLam<#hV4gRKdmIdGSBW<>wMIC z<cC!EC2EviqTpq>xleAsQ)u-rZ7<IT-4tDA?X3kR`0d3MsPqu!tFoJ_VzN>+wY;VD zW!-E-^SwceqL_et{B!vm8x_k6y@BI=W!7Q^$ycd5S%3BDj&oy+)T6Y2<ztlCTrNM* z82Hg$r+4V#PZVS2W2}M@i_n~BD!N*`?N-aa88hsUE+|Y#m`uxb`w;pe)geS}_D&M8 z$3)<})0lg>>+gTLXwLYhXG4x3z-Cyq8;4-P4<~8MR$yJ>XJ>0<W_>uEx?E(?Ke1uE zRla-KZB5@S8oeh`7r{rD36tuNoxYQaE})yEDO{y2@8N1Y&k;vF1i%yZ_?N0j6bjyg zZ*Z3`Mu_PQK!vV~G~`O4Riq|%41Ix5bZuhj?VQiqTkbb0TgPgdL=#GHivV|tS(uO9 zWH#@<a>WaSricB4;V?x*z3J}#%`Z(O<OK=Oo%{gBiWBScG5$U2ZCGx)lFW9!4HGlw zfAjRKgQ}piAl%npX-v-H(M)*cC8;j0EO_Q%Kgae4eq(!5hfB}@Xt)lLl_S*j^yOba zeAKw7aI$}8mRMkGS)+exevo*Hs%^NJGE#!!q%xQ(wW>AAsMya+kcz=7TcYxS%o1rI zX8nUcLMR9|hyE#rs%HCZmAf%Q{RolR7GG()62EkJf#N()mh=)edBMZ+-d<v<mZ;uO zYg&0>d=3z3B4%l*%!5wK=RmFV!O_@5q(45sq4*xZjBe`CEK40kh$M~-pl0kMDYIa1 zvM6k~G;lkbDB5_Xd&ESr_`t03{lsPAYdb$%8b3mp^Mumj?$5XC{EWK5RiZ(nhe(4! zEpIEVwS37&W-e~iL!6A%Fb-EYg-tK^TbEq_nI61cYXNbjoGI1(4SQ%&okz<@!wOg3 z7;6Ovs5MgtIm(q{@ya?sPB_`gw1Uq~ZIfdkT}6`YjI!YD^n?f{IYv=l1-VlqmkhD( zm1@WsQlvOpz!(asCVGQr>B_2VV@oVJU*^sd1_sB1bUIf=_`>FZJ=`v0VycQ>jDAf1 zR5Cj=vzs1dH<;^Zs|x&{ffuMMztxQt%%gGX4R?uqARQkOLz!-4u8_>o2y<on3jg{* zJXGlzM8hOh#pwQEXqM!ygrv@hV17}~i?i`8WA;w{CyA>g^}uLiR>S@H7nKc*><X+6 zD9%i27-p)LErE_~W#@E0a&M7|?d1`AW?X`*Gqj=ku&*cM4WXgoY6vJ0oE&6G$*i-7 zy}pOJy@~tl)>dG4-#Ounkknn&U3GSuHJ0;!TGM`&s%dWD#rWRCkKP_2g0<ssL^yN( zKs^WS4tYh?gge;4y+q``U6m`Hb@b(gkS#`5=({KA50Ab<gXDvJ^!((se)Q7-6Z8Hf zb2tm&3(mgrYV{~={&s|0^3KnRiH4PH9C@}%&47{SczD4a0i7<2EI)Md3FZV?R)6zo z6iY<@1JWr&%p}~&Ol_Wlg8B(+E(l29ia31Jquzs-OuD@DyQ!qZwxy&DYU*=WTYmVM zaeZ`F_JIPx7D6l)_BdrefU-RYds}O39cM>72*KbYH1o&E7+JJ0UOT?nqHjkiedCg= z6Pl84#Kbggn6wiD6pJIjS-N`UnMZiKNO*<_J%#5#4^fI^2u?b^bhyV(dG5AM$QGZ} zR%PO{N>3I0cWCGDbw>8C=(YoxYO*eqR7EbKq5FhlS<s|E4e5gY{g++Uw={}928>wb zcsQa$a$;60HkP{EH7lRTN5e5}RTw4U={Z@c-%{_UT61xryN(qTj+WH0=a!MF?RC68 z-QA5%ZTDW+M|Tl1Wdy*HAcx>M*Nqb#(hKg=iq@*fJBqVbHr8m(^bL(*ni(2enJ@Lf z4qwC<XH>Deqr>E50Ul$@I&Hy`zR3~55m+Pol+M&riu&cw%>DW(*w*wBPHF`;eV(-v z3$gPWg#sp(n933$R*2jg5)YT&C!q1g=l$UZvei>=#p4zM;$v5IdPW7vS2h(>JM<fE zD0{Ih^9-!O()13KvME+@Qf0OW!XZdYhPKBAb9(nbR$b8Z+2?*@cqk!zGS}6C5Qm&L zRaHG+Q32ML0v}~?cK5V2_Qav$-fs1Dy<S=vf0F^HVFQv<;-g@1VXHB3zqoVye%ZTQ zh6wSB^Ijzj`6$~qaI;(UoE-72(FM3P6I1~5ai(9Zg0bFJX_4(%$=+D`xhUS1i$+h# zyDw&&`Lt6WH&*{wjsCBuPK<E40Aa2mIqY5RIM4pAo<gse7ezt7GrC2;QP&u24iTL_ zkq&J8g`Wn{=Y{jxhn}*MbTf8l;9@6g8=jt!<VENO`zn>;=>?A;v(yx?o6qzWxo6b( zmX^6^ux8?rwW`g{a%k*5O<f+Ao|jM7rROcS-qo%)d>+j$&v)ixP6+Th2zj~jF{&~a zmu@=uN`~KmC&$mmmm#cRq<KZ2>f(HB@9~d0S6)(7IU#FQn41bQQuIy8)ZE<)R4~rc zXD2x3DO(eD8o3542J-hn8y(Mu9r%}`3ENJDC=LaPubpMkZ+CX5q5XY*+CFdAzJwT| zw1cAy4z<Tw;vNq$E|f0~fM2fcPoSE#ED+GEtGAFDut*Q>@_WRG2$H96_sdSeG&cA= zADfb9`n%iw1@LH&<m7|40jJ<{MOdEdaNV^(r)}Trc)#16SW8kSr2!+VVC+a&74y;6 zKcBjbNkd*eshT>ZJD9n&<n=J(*Ck{R`$R7L(#@Il?N(V@-_TUjL^PS9BkP;vTb*3e zo>|urTZ$5z57!#Mg#;#HBtpUeG?UqcLddd1$f8$?&Sf%aE+N(aA~HF~k#`c<$;r<C zSmbRCXgh-;w#CyRwMx&&Sq;!QC*NP3W#g&)qG+*~nk7_Qn#8yAaxy_kUBzLu{Yo4| zLcP2{`R#0vd}%b>=!@(EcT>?K$Jj0VvfSnQaz5PU`TP$8cSrt_1sfnsX0rQZJ9XQm zq13wA(8$~ew7D%f0vyOQYDP4X&wu3@cP(r8bqD@W9VF%N8P1sc$0sHiCwZo3!o)^W zqy}CZ17La9!~}qA!k(c5RbfKUJx@v0gHrbu8pe{?xNwztOpf`LX8iahqdxNM2u8_e zAk|O5uzrM;B^H|znkzIeA3z;vgsMDt-u*tX=IIJRQG~TTgP22s^VzQ4yam1EeH7m9 zbC}FK^v-c<to4n2dkTacsU&>KaJ@3ac}KD<4bKQ7SBVYSQFWe)p{cIf-qKlL_4Cc^ zuXVt^h(+>xFg)HNeCav}*xLy*5|T?z>SzHJU2aWYa?NMugG$Tft$|dSsm2@6#~K8L z)(i20Vuc*Cw_($B{QX#)o`MGW00rb|RG(>C5vu)0&pnSLQ%g?4@-w-+IwzLWrs9j< z%ajGJ(s{9uT(WoN%YsQywBQjUBjGb{Yf{X8yN@ULKt|t>)ts%JeLhv08f3Xx7YS_m zyqw<WkRQ>RJ#Th)6Uk?`I-TqyiT31*5*+MgZ75GOYtFKJcssg%j+f7#$TV(N=mTP7 ztTF7sn?+68<sIqBX`5URR8$7o!=fS_AK;mt1<caZ87R1X>GV9(Ra4?rL&b+u)Oq6M zmvZ=*Qux8e%g(-a9kxu}A6}dSW!XQNgu9_WON`b<S@U7u!J_w<I?tE7&PxlmG6f<D zxP3Rcy7R)tFczAd?gd1OOjTQ&V<)A)mnSDzQVfot4)L=5KYm^3WWlZ16->RVmdE(N zP8W>GU0+^!DP9w}`5fOGDtSL(E`8nZw-9W%I3bjuxdR{<jM;lI#*?ZO4Inm}skWuo z)darnNthE}{|sqzgsWlYtOQ;*0@eWVF-tk1OR>yc?jX=h4tZp6agb}N6m%O+<?c&V z`R6I!7pOeu$2)RVVBchya=#1Ee<;to3+(48(mKzmJpRg!n!0JP7cBqdo#z5HK9-Yc zkE`9;@OtC!q+jEgoKY8;sOsjVV?5P+a-y1d-eSpi6vN1x<KNTP2@qRtcl*7?&?qS7 zA-C}?FB;h8>3i+y9abir?N#mB5u1I{(-B(T@$BMq&Aa741JGBvy<La}H<pGIc>CF3 zd?h|LlyW@IbzCh~od<G26SY&Iz{w;jnTpGs{F&3`UbrYozX2#2<W@#Y)Ivy4JGRC* zIJqXh(jOo@amx2cPzE+VAyP9(@itHXrZCyHB-ybb*`6rn`2{iX<=YZz#yIQaVr@*I zoR3KRp1H+7|8(7A<Hkc{sy5ZVp0l2Rpe6n;zo6cEG0@Z&!nB!1Yk~C93fJr7cgGng z=g84fcejH3-y-*Y-4ZjoyKcZD+x80nqB}LozkYqm4FSJ>{4uvcE(h%3=JjzfHoZXJ zF_V4C?b#mafp`C6)B_&^SUa>EFM7Ui06VA^HI|_J#aUJw`=8eCreB-$iiV^yb(^~n zZdCQR=~rV74SjI0Eh_Dm+I>pPi_ikrMjNH#JS-6l^Qr<9{7=mvd%r7?U%NEu;nQ8H z)Kw+0FeE4;9VkbbLh6iEP6v-Dhsd7?UbpFJ0`Eyv<mW8kPm1ew!wbFri)|Atweib> zq&*Ik-eq;kRpBDeJaJYz0!B;FQl8)uebL&#$U+05H5e$d7txlKp?)G#eemA2%=P*% zyENMH1w|}7=3)!87xVf;pr!JuF?<y4_|`*q?eyV?eDM4^c7Jet9%|S;mX)>^gmQ0e zU5qDs_4f1j{_g*|eyq2Me|VXaLSOd2|J6t*2&8dVlHTt5`wYbq_QMcGzItm&dSo(F zcVqW^F6s+0ivq^omCBqPCRckt8$|oGQP2%Z*egt+BDG!D=j*|Ccy%I$&cw7h{j4J6 ztbBKDf!y<ZJj?x_w&V)}Xq&rHWuBGBBsq6az2b6vviyLi@Ur7+XYJ+AThOTcPj+-y z3u2g;yCD-5ABU?OFJ`hN$NuV(ah|{K8+S0!<t~G|YgEo!iYxP_F85^OOgREQ=)CM~ z+`xV!2A$nf*YS*N4$t;A2*sLcS`N=27LQKQz@ZsFUy5?Gyxj#}YCSVtIfZ!!#48&! z7d+$ZISanf?9xDuc3!qnOK51?4batRICy2&7L#O^pc13jU?DY6;z#StBzP$(C&8}; z_B1i-KT7LKLJ<5^=Q6a%;$+vt1=zVri+X<+O;dP*c9(h}&z`Tgn(LVPW+tSirG-UA zBxLwS`1xgIWJJWp`-aA<%c?bW6>pEPmsUT}hgY?_>#&j+oclOowW7;CS@KGYJe6Lr z&YzXD*o-GZoT@JV3K9btLe2L#A5kBNZ+sBNNLZww)790J-rygmk`13bU;H}^E_jXD z12hmjfqH!2k7r{Os<OJBK@qQX<px{cuV*XCGkl3Xw(oPizQ;?kwx;N+MoL*w?2CoJ z3L<G|w9hmzu(j1!|7xtQ12v7XXO`8N9^smDT)uFh$@sya3^mxVxsQ&jLFNFVh^o?{ zFcE<cQdZ!q6d++EFfgJ6JE#!BsOZD6>PxWde??L$jC0nPd)VKK`?{_lo0=LG($?28 zQd5q$x3L%h9A;Tr>5ZI-ytkfuOe+%?lVua+!PBv!rIm^4|69p-FQD{z7;X`!Q&R=< z#EB0M6DEP=&U=YOCH`I=Pde*vq|lX_mlX|3MXsx{?dEKNt4ehzrpgqYt?Ji4+1}mk zhoGTqySW~#CJ*y%$0xVh_HBewb*WqM^#lI3LUo33Wra@1S94c*vZECLc5f&HkzniV z;dClAR!;ZxWq0YFK)3t-_O-(muyA=lJgi+s9s?ATVgP}F9z7ZI%bny!rL``(?H+da zhLEQx>QpeY;tT@Qy_1LWap_tS%Zh~x>QA;|TJ5Vzsu~z+@n@*m=DXi#>Kl|;>9B1W zu$F7`3UO;{scNa%=^2=M<!SNRO~I6i7?4~Th!^+U7;d@PN?GCIc~xasVW+eNKFo}> zJl=0kV!7Kn4YpjeI)#69hlFY<l?D5Y!%I<%RPowcqbt>4vbt0QnOYvT_MIv(kotC^ z+rX!Ix>7a!_FWs)IzY*6y4va)d)i^DAr5!tnT(#$zJSRNW9+Z>1ZMe976SWY;%{xC zd|%%#D_VL~+`#d7g&#U>0r^4zop%*&b-+tQSzbe3M?oF9sj;)NwX(AG`>c0>e5UuZ z^ZWF3a*wg6kOQn4J1pvB8H=#T(8<`x+gtb2ek%RFxFIDusnEF4*r-@gT3i}BGUo2? zxkZ#v>hfYTOItc~b1_N^&MQG#3OVzlQcX4@qk)Hr)SG`Z36N8%Y4ClvB+1FC(S6U| zd(XhP=WoaB_#|cH@|IKUDVRvz_GsAawepgzXjIjz%qmnfzpP#oEam;f!bP`>+B!p_ z{)X9+|3zfvSV`%TKukX78$D|zjN8PbZ}07mjsLtm6mo~azz>oyaC-Oy<|8sCRQZ$e zGW7i1_1g96DHRhdEG)cN#7CKuaypZPgq-^GY(RE)=!)B;@qTZuYZGq~+fjwoMmpzZ zJpOEJs5$R2gWB@mk{W5oAc`kD2Rh5c>;r;?1QZ(jcDIJh^C>E>v=n|`cVTjVaA{?6 zD-sm)9={r)1Zr$aDvnM^VD|P|lbqIFJ|Hphvc*S2Qad^`z{}qJ%h&T+B143#<Cw@V zxcn%+U`L~9cV)2XOp?faWtOY!^~Bvwr6E0y#!dbqb5~_Cb9dVZ6nJqS(T-(eqO>f* zvQ%hld>MUs=_9@9HUCD>96!%|8}jQE7|}7+Vv?lzswbe#*7xJZ%{3(?WPN+&d@Zr7 zDXHl5?P<yq>ogLeSNQz=)O@|&@r=nb_}91o86ee^dSC#Ez%w$ymaM*&XAiYgN3NKv z9xwCGNcFZ@S=i^f+WBQfHndcHK0O`P=(e<fJYC|rt5eeI;9Ju1@lb18TashZNV2LF zcV&rgXr9ZpurAZ1`S@2Qtzi)}nh-6OWuWkRzh#Jf>pmNU1T?69zLw<pjL*nCJGgFc z4LhV99nWM3u&bq}eZ2I%Cg${bWH_4_eh!<g98^85%QHlxDjNls%aXi;ixPF^R&}Ye z*<QB^p`K`L0Q4@%4h!;CYs(~CY-g%#0xh$)*ELqRAQBKa-5qw1-mr%k45%7)SJySV zojnhyH;1O+<z6<pI%0LZ+P)7c7XdQ#e?331c&V~p*5-5jmF?CDcmqnjzZXgf_&z^w ze}A4H=WKhwKv@l-$njUAVRw2!((qpLc*0DQ4+#@|y_Szhe0X(!3I?*J9NBLA0&T~1 zqu<<Yj<|ZSh;R%=&19~yA(oY%==iyn*cuqS7@GqJTy3-*v$2&Vry#uJx*?|?AMQdp zA^K*8lrzFYcEoozR3}xnr#-p@G?a*mm0&=%<-YT&xrvC0e-f1#AjzFvT@L-Fe6Ur5 zdvs1s!>sEzwloGdqy{De9tBz6CZ(gEnx1ZMt-rYv{pD-rYIYOR(ovCKzc&QCXSbKz zk0R~7MCh9{b~^`<zR^21&hf#=KB@Rc^NVh}hJvz`idsrm+VwX44puTsYLxL=45@`u zu3qvGHG#B5-^j=S83j##WpRPtvB~ZpCQN#=rn-xjMO#U$w7PnD#ub0MGOV;VFOT0_ zkw>lz5eR78xrm&Hgoc$D-9k!B%1TShOF+xZz{|iufS>=We;9&-yEDI(5U~SZGa)|y z?*3kV^RL&}*NEszAt%tcnQKekmjb^IYx@!E(e=v3S;YL~l(b%VQ(ImsA%=xLKmHMP z`O)#86jU3l%AW5R6sP1Ya?jRORaN-_lFYOW@58gCXs#9lUVh_i`GF~bpjGnR*w{U> z1z05vmCP8&lw!(T3bkepRRu*wDTTXRdtXhqmPY30UvBo{ytHe;wDga2XX0sic#z`p zNGrn9Fi9dSgZ{ujy@sOv-oBpsIR4Ij%V8Sa_OAZ<%`PqUo8LD^m!YHf4Ttr~#Z>~< zaY{PMsEBk(DEQUQ{pX;taUpFw7EUti5wFh!61CM$8Q|t*Kiyue&+lwMbD+<dC8Occ z;g=CnLz<?EJrK6X5bX#>^qJXna&b}V#Xr}4cyQACn+X$fH(R!$S^Ia4hKThC@%COq zf5&sr8&^`By7}Z}RIM=!TG0;BcGTj}mpVIKy`0ZZ9<)Pab@fzp^D=GRi|)0Z4p&<& z%oF#vwp_L`G0FbXnCgTZ$+WbSvKa;j`@4ntd5sL56bxhCa*A;oS=RZLyrQDrz1{Gs z>6x1Hqobq35<XK4Be~QJz~T{?ose5yE+s5XNlV1q(Hz3@nERokl3Zw1=)kC;+YO?; ziz_ZJA*ZNFNi`<uH!v^-ZKy@*F$4M;mrpEEi$_p+T!Bd7n#1RL`?PuR@DmL!sWmk$ z6V61HlmX2pJ0YQeU<yqyq>3MOtp<l;RJ@^{kwgoZp&fYJ`D_vKbPQUJ=;JGtpwzsS zgMGDT8x!CZ-F;0@C$e1^ADfVp(#_5)=*A)0+dUMq*GSUZoFZwfZnmIi>6n`y1JQ!Z z0OL-^&F&0ow$Q64ix!gw1%c_M<QfzSOA20gT=$R@ks9k6ft6u&QwWO-Q<Bh-I^Tj| z>bU`wL9onkUI&eoE-)T>1Q-#qLA~*4*c<u<K$45e!tLcbuqb<JxQ9oCR@KrYp<w6d z=78!nGtRfYoJ}l#Ffi0L9)Lg82`H;O1sj6npziP?viCs(W(nrLemvqnVZT5*36-LZ z!mIPIFZDeTYv$}g4WH*5SWHxDX_$XJ{!V;SvRA9LsV5Uad;53p^#ibC=z|@X^BJn# z+s7J{mhRihoS-Nd&P!!@d|%(ncBeaNL<%O3^9#v(`q}qqtS>#oylix=*ZuP13k4c! zaBUbA3KI%7^+J~B#x^t1HO$D=+7R?2{?JF2CPWBC<u9qwp2NOR&stSLZON|F6|N9U zgy!WSp`gt!?V0Uv&)2KZ(P-oo8%fxPP|drX<2rh_E)HK5RZvJAYDE;1SqpR%GCR~+ zL0dznxPcXjTwOuKoxFmA$KAvdIzF$;YAQ%q!2hPY{Nf%aru$1?h3K>4ug1C#2dOsf zrJ?4tZm0Lt0RtS09c@$;+z~6Qo>K=eFGQBSreIZbIh<v;fg9AL^Yjzzj%g!JCHSB{ zghw`%D98^9Ueb)6KjpD}pI|k9euAPvN<a?Wo$yc3K)pK=8920fUU-~PnI1@80|fC% z7_T+oU$#i01GfJ80I^|C4x*=*UPDaDuyF2=<+pVhv)0T`xc*IWVvouUZEc;PK*MXj z1-{w)t52@QlSEWRrQhr>nYPy!wq(R){|hof&Au`=)c<vVUcUzIf}el5F^rYu<fH?A z*zYd{!`}}-{J_XS571(50H^XC{khhzj?tMZ(0M~612$IX!_9Vg9<j5rxH{S6+Gps? z5)xt%NIrl2_Qs77km$vQxx>xiv}$Uo)zx2u220o)K$llnQyCc_0l^0iJYwEOV=X{l zSX{VzYrC_%6P|+rzI^%BEU(?YXR6xnM660;Mj_yH$~)&qmhQvr{yv_-VNq%E^vd$J zu~8T@lH#NBe_d-+9h`9`U41@LF)kd;ySq4?AtvXRvxv@ZdiMdvJdA_q&e6?GjURd5 zVK{*OCm|sjywZEXGem(t0FZ~<gIf?3IFD;<DvI)S=V*7I9QU62j~+gN@j4${$`Qpp zIhmTeN^E>I6*X0ILJU?yU_T9w^jq7T!P4KqcNe+|E~kXGq$I?gK7A@TEB#33;S@i@ zVFf=ckDF^pE6!W#Zf&fCfIGW7cMtYMxN2aikCl1U1u(qRNFdOlrK1I@2UkBlJcNsR zLH_gLJ)&YFY3XQ|7qO(aoa}8Ox+y3w1P6jubH0MGu@(-jf1qFA;1zIOV3u&TL<VLO zvMBIr`z2M6lw6)|A6`Stg`u#p(9q=M!0-@^-N#`bB34luIWB$yE>3nFxk|?$2uK)2 zwR{uGpT7Aq$j=-0aAR%d_@`b-kRMF!Phaw!c$mjuuJ-p36BDJUCPDOcoc1eA^DxSQ zjNs6DGHq>*oZKw5IACG^?b|mD4CkOtZT%(KcQ+SD*xK`F-~O512lwv6@B{-N1Sy9% zazR`aRPhyhPy^=ShhvTApjca&vM@8485`jGIpCC-m>4Wfjj_H9=2l)ouDrH(Y<|wb z04MVWxtTDapXcRn#ddiP1{%=7grxYz^_79ES8y^fzz3N5`gz08Sy@^*+`!qv4y<)z zasmS*y|sne)q%d<T`YgpW^HAur?V|QBoI!kos)e`Y!vj|uFI|8G#eeh2FDPN@TH3U zyy6zXo>ta5Ag8;$bG~JGy{f8;oPs>3u%NTQ54?@7rNz|5jZZ&)#QJsM10%zNiAjig zg+<tTd2uoCiE#SvG$AFA93rA{a^*gp7l@k<H;a#ngyXECrt%5xe-4(#`B|VPHYO5- zJ;a9KKd~|op@-xJaTtHA%Zsr>2p=>vGg{qP{qYzVm>?X6?`^8D#>Kp@u2x}j0WRiW zy?O-!qlSiBMOFFR_wRYQIF%IS4mSe`&k_*O(9wzt3m$&AsjixWf}*ar3YETo{R&qG zBQh)qqPf$A<S<fiZ>?h+GGb4kIdhJI{><4kO?6m07cUPNAOHD;^tAqw>-q+IxR_^Q zVS*Tsn1lodYiu(#rs0UWAe{QIUg_@Xz{5Nsjr8%v#XQ9C0)qT;39%6F!7zIE?Ae^m z)WgjTbal?0IRnH(ENNqJV`FQHb+=WO6$uClDXFQzUHts>&q?W(6x<qwf<_i0<wck0 zN?OLbxVWwDtg0L8dAPZt&%#;5nm;kr(>Zne6bT729As_*AvPW!T+Dm9Iseb8vm_jn zuHl6V=|%97w&sR!N6b$@LC^tv4re3<n>Q8*AO!fg?`(h3d1??nthSbrh=_)k2BIXa zI|KeuMn<Z)uWRq=-XFlco_29bA#jM*+dvnCqo<~(Qd&_uH9ZcS%FDuHlcXdin3x%5 zWTkLj)C0^c%#D{;7x$h#fgg@_@YPaRIZZ%JM8`);PTA2`f4G63wkEJ4D=!DQdbzt| zJ;E$(EdF7kE!|yudb(KQ0Sg2h5tk6>;9$qPw`(iQ@H0)!jA|R}+S{9O*>x22!h*iO zUU+m~P>>&dV|_zydPWM^NUTR#Q(YB$gq(t$l8Pe2(_L9bNlsP@>(?<bF-l5H!f?>t zff@yF-+7>><3Y%(ET9({S2<GBIuVmmoR*VQT3zWA=*Py!a%+1FOQJwSjf9Mhg@c2M zot;lajDz<)9_Ih?!&xF$3FojPZGBTlMtWQ!p|p%7VDEe=M+YUy-v`E>y84<oKfcH4 zJP>ubnV6_BGYb=J2L|zv*zPL@PLiMRe0f#b_U+AMGT&H-i+NHE^VBrhO(4eGsHrKJ zRhBlO<eGf^#xPdqSy)+ARF#;S7{7_UFolnoM^;9vr?2z&o!c}xnAg+RI73K6$|OQT zNqxDc7AqbUl$6S9D&!OtE{^tC4;>Q=v!}mbZELHpt~OTY0cnWJ0UjP+9;_(~I4tlp z4Gi^4E6bak>hLfxd_3k`+gmED$|zA;0_@C#zG>-b8Jii0goi?mps%BSxEXX$Hg+~8 z6(w*!2c4?9Z731F46~wJYTbBM&thT2KuJw?L~IlX2iq-dX(DY+b#h7yZhn5K;<9o) z0)n`h_jU*7&k!(++65O}kdy&$j`dhUxGgU)3z~^FEl^Qf2s<1R6$Vlt7af7i+lYw@ zv9hu#D=RQEGGOHg8gOxQ=H}-tt<3*1I$v4@aV~UZ7=<D3I9S0$5)u+<Xs817x!LKk z&sej*QWquI+1ZSY^jVm(GY^{*5fLh{DqmchL+Ly=Z=<87ewK)onn#hGlD4!k8*6Tv zl8VaE%oLoMD-PxvnVAqsud2od9URP~P7?*WimI}xs0db_$28E@*C{M1YHX~<&%Do3 z%*VyY^j_(@a;1xk3cIQR^K|FvtnI7=f&;2*Yc5`taImq$#ylGrH<zKQ5uCE-`mb_; zF$qQFTx!fph`^klvX1$(_VKjb0(N%R?X3-5$52#KQbtKx)7VT%0(Y*`&H2021ZSB< zEPb=jpXXPUm%)_=(bQA}$AwjeTAS*Dd3O(&8xtc@5n;GuSU{SSlO1#hbpG|LmxmjG z3+3YCOwUNYIXiXa<e#IMC)Lr_DlIPos{mpSSaNc4u(5%puwour*;#pcxy{T?z}qAz z#zDWp>c@b2_#k*u@Gw}JhX@7sIXf?VV)6zq=5@8y35dzh32GCQQ>P|kX&@tY@f`g* zTNh_qI$9S;I~>e2x_Ek2)YrrBg7F;}(%QP(Qc@T3F|VgxP+Zv1aOnu0_eavw;bC4N zJ~4Lm#?Z>zG7XLtB1C`m4D{Z<-o<6b^J^=hnReEeSaqI{pU=j@j)at?z2#e9U0v5r z%%(zi!Nwt`zU1;;QR|eFvg-QkB39<9XlNAF)D0}Htvrwtva)!X|Nac&83tif?{sb+ zUNsd(T+AC7=)zCH>TO_1f*p2nwC(HfjtC3E#XKO*&CO+PV}XZx04zB*X=-}xn9Sqx zHrhIxpyBh&bB)dQUOrw%CWbnC+MsfutDd&Lp`N3QV`WWcN>V%{13lJV0_I`pad5C< z#l<{y3QmsH^pug2tGLd-j+PoB2?d>?4k0O3MoJtOq(y|l(>i&$gD_$}!XOvGyt9XU zMO`g0&yI_Ea*%Odh>3794@V3RLVi&}LnDg$<=@XdKQKQzHTLlFBRtGAFwzHw2DY>{ zKYjgLL0<Me54V?_D_8}DlLJ&Y7e^m2cRD&6Fk>L~u$>prv0Mt2l~X@UBSb7@Y7tgd z&^BArHgWsG4z{34jhc>DU02V_72zBhCaIu+n|UX&^Jf@@j671gczD%uFc01a#&-rB z%(tSMx3acq?`#VX3&z7d4-dDUy){1OIXM!O6UHY;kDTy!1oPx%Iy#!=m8G)_(>=YN zNht}w{yyGF4=+sh@q|aDuUA}rEcDZiv?K-`%u8Mr=LY5-ZCP+L&%w!_kQ9IQY9H8F zJj~PZsh=gL$V`pL8lnR8E?%CvnMZjWXVCe&Iu!G`ybU>-o`IeW`tmxi1{j@hY&=Tm z{k)H49;9h^cL#O#8QWC}4D-QZ!R?*xyU(8o1^U6sWn^N2_SDo=00|hmvjha+eg7Sp zOrXB&atqcIEiNiZ&&Wi_uX%=BTi!k~xn`{N^3D0J$A_3#rJ<wMG%&Dn_dtY1NGU4f zz7Fejnt%j!Uf(T+iyJ5N;I)oq-qOmfrL8F}BoH6-yxgFf5D(&E9{3p>A3i4Yc>Fvt zUr|+l|IvfTdpom>H-|^A4h;<qT^|@4>PJ0ZAGkg|2w(JQcQ-FP1L9d+I*(x<m(D{2 z4i0t*==ukGXz?&lN<qo3e1?cTGc5rt^C060r1v=#^R~Ded}dw;x3@tvFDom3w6{41 z^Uwglx0#w5-`Uy0m7zs5&wye6&VzgRcOKU^HF&~+Yig{ZD9_5qLW*ALV{c>G+tvON z_r0=FsSj2rS8vaY$~J^d@|<ek2~{IG4HH);HV&Er(0Lj<I!yyZ8+T7cXrzpi5+^$w zu4__f2uV-V3+cHg;b9)7^TtOpZ)w)t))e|jFh4#q{Lkt<hWW~}?yk$7ms{Ig8X@RF z*E*;z=*O;(HjpW3UtEw4%)i3pZSd+mI|o~IY}A$hZv4!Xb1IxBB*V=-lba9nSj>Z; zXJz>vI*(55kR6}-BmMm3^cXOYD~kD=`TKVt-tX$`_6zU@28c<B;dp|{d9wHGW%tLA z9v;3XxF5?Pke-(sFBw_+6R;}s>iCBj_Gi?Nw_RI)_v42H%+qQa7}~mfx}lg?#?L%> z8w~S!{Jh~&I&X1+d91lg4D<F5*35XlO+r$_@tJR`!*@-pth@yJ7sS^vm{L(u9;}p< z6eJ|Xm{s8z=JD%13tpW+ig|Tlo|IkgG!b^@#g1ZL>}%%n`1vEe%@>_-{DYVW(ZKbq zzheI3{g?0FXJw|rF=u6EE-n1_Zt(rRUF__vjkQ&H`g=)L<-kx6D?2X{y%deCGcx^h zTJ3m6=giZWZ@+jOIxRy(TMsYyuqYJsxaQlqI-VsWJIg4l=bDU%dGg~izwd4EFb_I^ zq@N#~7&)5xWfb#Rrv!Y-&!h4-Kzo3%_ulT#%JRZ~-P~B4nz#WcJwGSoNaneYV*WTf zPee*V!X|wb^FHTr_<8YTGEaeKUiOb+{zz|wW_}m1pa0DKgWbp5x3)oQRF$y+*4Eqr z67<8VAFxh3+VAhhl@(3xZRgK(S=jgyG09O$*?Xk6r_|i2>bi-AdF3OS7tu%H)p@ex z>U>y8&_A#9$6+2b-v&4Hs6o7>tQc2T0d@d5j|>lf`r<jp&%gR7n2(8z2Igt-dK+LK zub*df|E%+PnE#x&Vf+0t>=)*b$W;m*sq+bmaZ_k-gV)dh8pGyhr^7cMo}~TY{ylh| zjGP>XW2~$A_w%&Hm6eU{tq|=#et0J?xty3qfmGbmCb9;w@1DG+qNGSgL#<_KWOtO# zd+amsmW)4d^M!fa<LEq|ybT`a#eR#<Z=f@8wN+*Km@h5D#XROZtd*5n|6p%MD$cyk zAENWRXm4{Q^KQP#bM)tqqw~KzhNbvJz0EP0N9lZ9N89taZ=D_O;8-1QW@%<ZL`)<u zAr1&Zz=dlN{LFk?D|($2YR*z|MH>~j#@X|_ddU3rx+%|)Q~@DjRc$T1<LbPqk!Kp7 z{QQAjrR`BVf1DWh5AZfGUOcC!p-#p&f9$|@Se)~1?%djfp%&Uy)>NXdN&Q~tg@yRf zpXWg{-vv64b$>9-6aN7^f7CT8^5c7(TD<cr1@X_TJjgt**`x=&O&geba5ZK|hFE@@ zm<aTAIeA%99L$UF>wMKd^PkrSZr^zzBxz2-p{DJfms&R!Tik1A=WONTf}i=%I?p6( z=ACf_^Z4>Mb)T7UZ43{^>21W0%G;oIp8c52zkmN8z9=-vA6t04M2eYjgVuShYnSHd zW*{)v)6*`iEK5v?J<`wP%2fg({QT#mG2RC6H7R0NDKzu({}l7!ZFKN>n`6bW`Naho zo&P78=Z{Y~F7s_2t-!pJqP(4z#o=Z=+?=dztQuMx5Q4R0n;HJApRcNHXl>!){%Sq! z&fZgLCHu2nn(_`w@fD--rGwrPNv=VmN5-%O%wkr4SzO#aYQH;INkV+=yv-3Y>@od3 z6yVSt+l2GSj~+ryq^_=-SC9)&^02KdX=$h>CMGs9HLR#AFU-%TqNd!$wz3$Lw{di` zV_{}`@%%Z~QwD^@#>Wf}_Tck2Xy$RnuwT5*v0_-=qy79**I|nbj+}eo=Z}P+v%Gjj z4EuQZ2yYV<`dR0d73FO#%?~$&c$}Mu+r-=ycD@Ba^OY4HS9;I$^32apA8c^%!D9{) z141@sS%;*Uvg-*IBYugwQn;_fqL?QpBV>`V56<To5R#F)@H?2#&PX99A-1uzZfZR` zKaXbq54a}v3wnN+jI`w8*FXOB6YPV6qFiQn2B^p3SsYMO5@UgR8(XW&nu><HY7oGY zp+T(EX?S?JU~B?A@#A|ei&>bN8KtBp0f1}Q`f%y|v6)96GjD^3d3_@TIXM~J$xJfc zqnH=qKQAHzF{~H(c^EPAFh4N`I{z5YeGeF!K<7i+yV@~2Z-Y(e1qIHtbFkYvIl!=s zb?v}@3|mxD-Zy+*K!9&@?Av9<H|JJZQGc>oN6sNBs`y%5`E@x}bygNeJUUOrdch^4 z_=1!yAGVq8tILZ(4oD&}?__6lxPc!MwQkYT*`fK!TqP=o{bT(6FQ|Kc=z{!Wed~gR z7#u?w2kGhQUcbchU3%J@;OAZ4U8*lt-JG4GprlAk#^O4Z<YnpU&*|uB!ln+tJ2^fI zokdGaBPlsyczA#o@7x1Yc3Jc_srcW)JZ8R)3tn#n#~kpJla~_^;OFMzINV@ndIHrc z40Q903hL`?&`Z3rFfS~~&&9<Fr~4coEmk{sV7|?bM~@!j%2gWb=~7WsA$`3Yn;V|K zexs-$XJcu0xS5RfMOr#qLt_JoWCyQw<I#B(^Vf$2_^~h##mmFv6dB7|ZXHpx$jFlG z9uWn~%Bpy-NfEJ1c*d0J8ClTL(Hve01y%w0k(HIEq@wgdI3L~!3|kPsAv_SR?afCr zfAoAC(D~yrUtUs3OiXm8r{nNj%Zu~Uk{7^uT&gTR{O+6AuYqK4ZZ0n$&xXdjS8v}! zOF0=VD;kROvf!LT?{Kom(%G#o%qS@-t!%8)(o<k;!sBfSNXW@K6;OUY6+iRm@XWVy z!8_k(zavvnP+&bx1AX1Vys@cKUQvEqTNC`Q_GT<2pqHB~Y=IiO54I&980OLIq+;MF zzx(k$6C;Bk(&KQmnu=n8nURqJ*ocdXvM@Krx&$vjCj)cEP)LC9Fnjw42gCfp$S~+U z))BeAxds%2oJsU7m-YqosL+C|iRD+f?(bonZ{v8Hkd%a7(l?<zG%g#4`-V$cZdk3R zstnjOGSVMjSONP93_~2}iS+2|ZVwL)#_w(Hj@0>MGym?x2k>mb_AkG&z`-XZB-GMU z_Yd&t>T3V^>F213PyzyifxfQ8jUPR{zp}h=_;d{Pc7Zer2?;tnJLVVW&dg5X@iu3P z$*Fi%2}mfg&#OeqIKCKG>=?Pqh56Zt(4ePJpI}{5flBV>XTxx2WowmJm_If-2B*o# z(+z9O?X3;0bJP#&JTQM_{Q6Hn|IE$FVTofI?~CWpZf$Qd(4VuhwhRvo=EJc*4n%cr zWf|8k#l#Qj{Pn2`QBmRS^kl3ppFiEXy}imSq)5oDC~O+(m)R9waAjiY&SB<%I7>vq zB^{hnQQAJu!pbf#ie)J!oN?Hzz1<ybLzoUJ87cwd<L7;Cs2|`t($61tO)7qTa^%x7 znE(0bU&>1h0X%1W+v5Z8;$V07>{)SfF%vVBsF;Y+(Q6+*eth!$Da5J}`eBut!{x#K zdoc8aB?f!!?&VfnU$efkMvLRV2Po!=$Y}Vrh{<WvlVkDaZRqjn{1M(}X<?R(j1146 z#xWO(99^9XiVIh^*1`Vaxz%S+XS<=k4%RgV7@d!ej~=}-1gF5<)EH~{uwS5u!aiqa zrGrDBz_u9e7xdi};UHwDCgJjR&Ytdt73HHhXN-*vMT7+Kd>uCzI~fBXsJ*aBxPN9> zP<Hn>*rVpppC+U^FBg$nUD7rc5Ef5LN^<;LRr`2*oIZV8R7}L&(kwD6e0E_bHYyC) zI;rC`f6N#b+T4Hq(A?DMhf_Z&%F8}``sB!um>L_NI(-U4Fi=Yfy7LNhSJ#(OX87H^ z#>QF-3UV$^_Hi8N<p5oC{CqrMR#jD%Ow5cUBEtr*_C9>{059{Tw1PU6w2WBSjEIYg ze4cwie-7(PG<1V6xk?-{EGRQ{e-aYnl0&muP!=LPosfu7Qu3mUn`>ER+3Mz6Rb?5R z>nv=VxwW|t=ZJvd?7h3U55J3Wa)6Kl!#vo+<<(`>%3>9z_wTXaumXC7w6tVqUJfG@ zBLIwbWrU8F#{c(U|K;xDgzL9sV%VXHv4Y|vLP7#11v#w!5egS48!IO#6_3&x9^H#J z@jmGtA-TOv+rKUw_x5o4pHpX<g;k<*>hhb$ItC|9jrC8RI&~c74Dj^^-~nSe3-*q7 zx%oNwA3cait%s$-dQT_#y`SeEFf+b-^$M$+eP(`Q^q4n{efs=q@0Bi7Gb0#lz!(Jj zA^Ur~zWHN1uU@^peziY1z!wfaoOkG<np*0{CWe8*0bs`V(2YUrS2tFpqQgPDNk~bw zG}Hi+?vA#_`Pr@Y)%CR%2;}>^J2Fy}WMxpd^a0?NRTSZeJ0qNmON-YwR-Zk8MoCE_ zbx{Jmw2Py?n~Nh<PlR(wfDb<(Hz5f*ji3(oIVL4}$p9ZuFE?jzcNb3tRA)b=CzwMf z7L?9|0R~%(^gy7NR5&|g)>a@r-I*BanOK-zj>_A>4nWL>TJxx?+|${%wXwdjz6uA# z+{6e1A7&ONEp1J(pbgCpYv5k)-8F&8k%-XF+7jBp$DuuR=RjX17;FMULQnuFds~Dv zj0_I@6?O)-&{9YsCN6qt<SORxkHkbrfc1qguOKI5X=VaH9Mc$f-pk#Ul7d`VL?}8w zHaIj0ed7W;_{pQg*Pv&??lsrfNL~;Jt9j<k83}O_A1`;zuf`k!q^BGDx1(a2!iuuN z(P0Q<|Nr*h0xYhqTN8fn%s+GQ?eFW3A_)+PK@uWF(BSSAMd1`sKmmmoRRx6<?ry=| z-95Ow1qqsjxLZd$>2%MmQ->sUe`&kj-~4muvRO|(=N$Ig$KLhsckMd0mnG2xo)NU8 zqNHGsHwI5(-fah}vjgl7r@?oU|E!1@+aaMnf`&3y;gp#A;4DxeflhYT-X5;9(&C_P z&dK7!(#kWcMg}M5`};dF7>#}V_nj3uLvwK|EzBJs9fG@o-jkWBiK?<9I0Wz^unOop ztPTdkBEJCN=9c<*Kl~6C9t`{dw-Na2<BdWV;5mY>6TEQV-oXa?IBQG7*8egqxIhJg zvwvXR1|62Xy1Y<WQV7wgxWq-c^Kj-65CJOAojn5r666Z_Ag~|w`=`&GQd3hw0B>dj zO01})aBY3%`;TJg@zW<AU2ULtw6rvi96bUY2!`b7L8r8ZCD_8r0aOeJ(!yu}_H0Pj z35l_T!+qbrdiDPOI}i^bkd7ZecI3z*&I1QHI1lV$W#6%TFALj#4j#TkB3Rxt7a@9r z8@^*V%Z^>U_w3!n!ph3V&JGSMC?o`rRY_U#^yyPan6pd(JU$t$iinEXQ^>#mz73#M zOyZ*0MKP!kp&2?8FWeALE-pdA3u@}B#(1NU@Q{w~4(K0m-Mib`-fCc^54Q`c!@N@T z8X>xulb7Y=<Aq3;X*`<-!RA38B)nBo3JweI8|;1d62W}$K(`0OLs~|P4~bZz2G3T5 z7#6N8FeE6yq|iUeA0i;QUN|CpW5F5WY{kSco;rOJDjz`n_Dk39LPjt@G6=9w%grG< zI|_>mA3Aav{>ydX09+tEEoc*N=Dz*=kDoeyR!HbD@3H+SWcQxele7$TyVPc6<H5e4 zYc~r!D?0}Z$3bxwqp<X<^s3?28+UKqSOo|HRh>O207u-<yb0(e#+&zqV?Q@HH=Loe zvLfKe$i%?SomO00w6ccG1l80~13Yl%%xONpqY%@5GPqI5wJXVcc({s*ikv=u5^m`~ zR*;vMjf@We{g_8+{MEM;Q=@qWIbPnLX6B}9YRbSuX9P~e86H1z93UhpbU{{5MpH{2 z&K3XzEaB_tU0hl?J2(AiOMe7MynbW7x~3v1BoJheuAYvHsxmxBNht}`<Q3$A+o2Jh zF4oXk9~eG1F1oYp@}ox&(eC>Bw=ZiOYG`g$6TG3MoYGlQ*?ou4e8YZ_h5HoyQK7@a z7(N-xT?d7BaPq^(9c)~jJUoYZc~6`^eO6FVPD#m*>=c`v6cQa}LNHTMR+N#KlU3L> zIR$x5ZH&E(b5Yf=y?X$d7nhXK(bd7}=_)8Hh+Y&G5fhP=k_3nX;DO{_XfAn01=I7h z$aLIKpH9q7B&8;k9PHH9RV5@Pz?UT@CDhbaFxr|}EXKf4U*Awq2l_m1t*xew)iSp* z2h1~h{>7`e-z_dL<mBhLd%1(e)deMo#b{wPHMBGk(^S{cQpcO&L&HOA8|!n63Osxm zhInIXS!qDBu!yh(+*m~g1!W~w4K;O54J8$2xY$n@E{oiR&1=stD(oE@8kwG~sILo) zjwU+TY3t&Y)l{YBWJSa;o);E6eeN9GOg?^oeu1;X(y}7bGKbHIvhypm3hFD_#d@Z7 z<A{D7e8Ox8&+g$qsjOoWkx`XZJ^cLTt2b|7gR}sl3-<@aDqKFeyp*&g^4ujQ00VFX zVIw?uK!t@B!NZH5Uzj&LKlAGKD>SZLyS|!uDc;)F8Y;jYgPNZV@D>pnUR6^GWDX>` zwF0_eX+?;NiI|ue{^5_z3=hy<yS~=gR2Lf??a6R=aIz=bSiy5KGsgp^gTaD@r8x*N zFK>^uj1=JWo3}RpapovcIJfWK>gsM!Nlo$#@S)IL>>NnY6r48G2$t5CBs=6KCg={z zDoQ3MM?qYD5~yfT0q5`O=}NG+Rn;*&E~31fM}UK0isypbaT&AIYUI7V5<7X$?mTd6 zC)bIi{HIQxzaSzZDXXlaYi#W9=bMq66Q7a{@tUb60Z%YDA(-Po8qtpA8yr~E+^qkr z^ByQDDUh5T+&t*cR2PDkC7{c|*wDzt2+rKY%OkfiZ*+3(=KcF<ss*6>?%uBCv}8|j zPm%+PU`a3|n1Wf8tf_7kA3tw~j~6UBk{wZV`nUk}l9`h^IWzJ6+i%}|_x{QAXOpv2 zRn-+yF;UPInMQVip4-tLyvf#qWb0t(=HZr}ojEWv)ITy*P+AO}PjYfJC7A0O8sPMG z^^6S6tq69`pi#({BpXu;!qyAN6U@+SFR8BT9~&9FG7HGBZmf?>PNMrTY@Hm8%uF?~ zSVeVpDFsDQDd`Jh7X`&7#TAu>W#mtYt7#fo^9o^+ud5v5=t*tnZb?T3)j3Z~TR8j0 z=C|b64x@Q9U%q-hzj&pzyf`u{+{4QquGEHPV`Xg#4;?@b1~lmCY!CQJO-lwzyncP{ z(=6`5mLM2FHuwkl0yZc#GGeYS@JXe)`1$+f7v{EiwB!}!1_b(jsz61n(a0eA3X2Pt zR~G+h(i1rDqbCmmgpJMhIeFO$iE-f(p%7idQG$Z}VL34|4*G)Dwx-3Uxfd^A{8IC9 z+!x=zggRZ_m#eDFb8@o)|Cf^Dp+RtDScXPw>#7EZdhgu3^Rv0@VHIvgcW-A)YkgK; zfxTy}3ZA}S5X&cHaYmhd7UM2#7|11v+jCrY_bH{_Cl$_0D9EX4V2$wvdj|@`J0U%z zwxy}Ot~MnrGddw5Dj_~H9<k_z_^9}J@P*ux;-1kFLqmN~?7CPCP8$Opj4WU=`Zz3v zb*yZxK&?K3{@^<~1^F@YaY13h0l|Tx5n-27ld5W~$EPPZ?%oBG-^>a4;>FFow?`&M zYU*p!vod1iW1^xX;^Jdrxw5*V^>Rx~TT^vyRY`eiX+<e~elqCJfaGu7-gxnPGxNx! zXV2DdtPhP0G_^LBS63F679q){3iH9satiW_D@s97Ev&9A-?%Y8Gu6=AoRO0i5fkMb z=+E%?4G0a6NsLd=&B-jtPfSaVxs-rhemrv3aP45|wYPS)4^K~`bCM5?k2QC;msC}z zWM>6LM!I-0Y+NX&wsv}E1WkQI6|Al@7AK~xDWdHh7@H9ilg4viXOAczLrJDzYBeM$ zNaM)C$z`dPgY})$TN&@xom-$WT3Q=Gw`63cB_$^&UW$W9e<?XJBP$&?UcTIXb!p-0 zvnM~(A<tespP8M6|JK!4*D|e+X|T}N)-*Xiwtj6D)X3%bmb#Ds`ebdFo2F*QAODUK zm@kjmzx(ddlSklmQ!^7Fv%7mc0J`uwIMfTx!f~PH_rLtp%dMCH;>C-*_wGPb;QpY3 zW@aa5u1wBfodZ<gfB4|l>(^iO8aM&C{HM<zuV23rp4CENWb!CF@N3Y;Ou|J>A}$i* zk10Ct70}u(Vz^t#@Pewb4&KzlnL_gq3cHkCP*u}CGzcWt*3;G0artM~+}Y95-#0cl z8xt2JEiHBG^r^!~k01)0DGSk0&~kbPdchIlbuCSU<0BotT`ldcjcqM$otJ@N=av=$ z@MvaCG_ei}`@0Vx09=8nheikbhx&#`2WRGHR<Ez!V*a4-`pp|_H?A>l{p064Y`pW} zJ_N8^6Y+qyzyIjbjoUYu)>ju+uFfqj%q-5&EL@qHpS^N*VQGC0&GNH$^XA<05}aOJ zXM0UkLv=%4U2`Mgxoe;wF8?xe?d`}Vx3_(~_IBpA_YO`@Y)v~dGCc*zZ|vyEDJzdk zN%M`2q51{dx_X#7IO|(lYZ;q}Y8YwQMc1~Ejm)h|U@7dp@|<$Ey7Ww^s9H*7Wq!kW zcHP9l<jT(&@6C5_0VE&;fOzNT;ki$Ofs9#Rxw>)d=F?|SzyH!~nIKDEy?G73@Ziw{ zFp#^bJ$U@^@zci;?g2316Iy0gK>z#T(c=dX9zTMO@UK6a<>!YVegx2h-~qV>Y99=i zLCO3FnuVr5eE0yZy@MF_{d;KP_xC>qO_S8vhU40vyqca8R!O4f@fQgR=XL#A&g<?J z#qSg`JFaf8ORyl)+}%SXqtmmBYU_GNM*-ENvooW!)5w$k#|5Cc$?^R1GFvA{4UCq& zl7fPgqP&tK=wU^sBnD9w9uox+T3B0Mx_+IRTMEgT33vlZ^C|uDX71xp=>lK8eEj_R zgC|I)d)N-EFJ8a?Np7jPZ+|73+Lkn$Nan#u%yhBLyoAgw$&Vl1dVu5yM^m}MnXKKq z1*bH>x-zvmKRG}5$u{%H!IgfpD>KY%pZRh=!P)7y{{G^+hNQg0(4<t4&`2k5e=91I zQ5|E97sgoG29;e|f6zU##;-v=Ac|)>rNOP_r0<?-A66BS-&fo^vwrjbFMag`&zkw% z-@$W-g+KJ@|I3!n@l~(n%jXROcvM_3T3!+{j}tMA6)}z8FJin$&|sIaF`Ft?%Y<M- zrcwh!LNBFcl~uI#^dh<Qn3;68CR9Q*rlA=(`p3r#%F0~5Jk5!g#%89*=9>nQubGvF ziyJLDBfWE=ck$Z#+Q+o88$V6_{YBU#Y1Dp(^PeVDd-aByL+#DmF9IHRM|+K*WJ!LC z90<*i3@3mB9>M(0n=2a|h)Z5u2jpK_S)N{8L^2~Y(`9aEJpZ`-<)pF9{|-z}G<0<2 zSJx!u<ORhi(SyV7J$)?5Gy@_@UEfGp*Ci~c>(-NRvug&q<Vf<?DW?=2*iI<(s8Ni) za&1E@(kh4EzJ0e1)W5K--g*>}(Isse$*JH1Xcsn(7Q#ot!eJQ_%ULYI{@;}8GI~~c zI|m1bpKo*=kX2=KEA-%-k9+RQ)~xrRY-nntvHfytc2-n;Tv&8eSWGlnIASrd1-92V zHH=J6FcTtQU;Sz7z|DlnOxQpCY4*U6$lsC&`SqJGrpx@8BKGwsdS|8!-)!X3^XDkq znIsP`wwXo>?Ao0>DBxFbeg^#P^3wF;0t)zFN`U*bte~HgOmz$n!W{#yh)T<Z8%XsJ zwxxTSIg)iPh;mp15flI7=E+CjzV(T(+$U|VMlQn8s}6~ovYt{usmn0*%%w(E-+lCa z8>oL_S-O6Y6jG_}lFFgza?UVR2p@UDI2=qs*N^R-4#58ZQ}#HaO*XK!0(s*W6dsY1 zo?BVn)`y7pU&@k+u7Y16-_G>+w)b_n_xB*5eciCyJvh)eHaa#tGktY&ftd%KnI(2> zTI73xJy03|MKlliFQvSH`pbF1KjEAyiazfJk_K7>yU(O|1n>_ZfS294d+)QV*~0M5 zJmAcP$n*1{YJM}(`A_cyljJ))K@cYA7lkAygRpUA_*+rjjBOos@Mh;U2%65RgI8|c zefC<L6u~KNgK@3YcCQvR2<DYAV?C{T0_SDund1^!b?d>?ZLI!P<--p@8c|bqXc-4o zs3)*KLM9RCjY7{E1fMqwJ19kDJ*DxlY7BOahc=!-aHcr>1O&t;rWBSyoDcH#ivXCs zx;VSCGJowF7%ZR(B|i!P1O~Iq%d7Y9uim+LWo2b<Wf{TzwQGQ}wSUM4e)GYDJBZGC z4B$kwXKs}`|A_y8w4YSl6!1^)f2MZ;Jfh(dNptW1CrJYihyZ>a0X&icd}(P4NPb~{ zbneRU!1+&>yM~7=TUv9<E90|s{bLhc14HdR80JnCeWI<Vq47~oGC87pVdL@i+WoUy zv=eGHoL8-~W1fJXe_&<@pRoSk^STmN5e6PPUWxVZ-lJjZwqE}WifdS@EHRo7<H@b! zDrg*j)*whgKL|cgW4$@0ZU0Tn?{64iB?EI4l0Ave@C=KJ&CDsRsp}md`(lFohUSK( zq(uJ!Kd|JKq_(b(+4Z%lr6sfiER>d)Xlklk5iKVd7p|<Xf^ztnANjM$*{XAxu-{bp z2wa~%`%L@Kzl8r6FSnw7Q^qj;<rBT54MO06>Y1$!k4hRipM`aV;lV?vP*ro~w^QD4 z-3XsU(^E}7JrKR76_rG$W-!8|$v%PBG<UqclP<wZPRIC|u1|7V-^QbtWv$~}GS*V2 z(Ktq(7(PbUBD{J0x@SZ&`$@HZvNmc?iMsU6q|)APto~J{t#9U>p6^+M09H9?K)Zl` z;A!1}Q#d~`eqH~6#|HchHbfBTU|?Zs=|ZFU2SXp7UR2uD-TgCFGqEr~I5E!8e`3#` zJ-eBOg_VVii&I)!vbn7lO&~jUbx}az%-7$1y<^7?8(V82{3|O<K&L3%GeyqE&oYC5 z)HzJpKYn!o$z$fw%_isHXBz4P&tJX%?!yn?egEMBkom`dJ^VO6!&EYl?|no&O7A}l zS~oR3li`^%9#J(@3-jn$4LB^C4ElF!o9^M^s#YNRs>IxUxM@iA>g8+ULNm0n$C{dp zXqhUJ;#-E69=?3*7?8VP%38yvSestOkM*?;Ds3EE>%4Ny)IN;ugetc>P2DNcfSy^` zJ-rRrzofkX@PoX0$XSEH-7*fmSl=^x0Ve_NIzA_~z4<X-dvyZ-MK5B%z7NL4%+%h= z-qXiBA|^g7x2&P5e|&r^)uWh4ld-a}>;=s4+_@8$;ggS#x1qTa&@wVJedzEZ0Q}cq zf31zhtloP7xB~sKu(}FJLkRZz^`D3wrlEw7ARmowKaT34w#od@pD(U1M@L17Ulg^m zwtD^j4``>biT2Oj{_r2^ef`$0&yof-{Q4SG#xKn*Z6>=%$Ehl-E6mJIWn`t>d%ECa ze}(O#^6>Ozb9XmN^3my8zR~d%zaTVv)w8nEGBoDbbZ`i*oLIiQa`)LqJ<lUbWSnP> zs#C#PJ^zH#;fA5Lvd+bU=`}$qJVaH;b-lI7$#}1vxz$_Si2X|nEja(EHiJ#xd7qm5 zDV#4q*88|N<CvBwpQbyvw)a=~XcqHWc_V9mWY%;yT0lrhVoG*dMPp~z$jr>At{lbu zQQo6~c3^rbDM@v8)&2YT1MLIsMMZ^yCjs^}8b#pDDM?B3y2d&HJlOpD`r@@~An!rd zZ#;YipJ0GM0OUQu^UGIvo;*cS3JRYITEu|BpTB+k^8NcKuU<WR@#5jL=Vt{@@7%TX zZ-4(gR$J@GAO9Bs9#!lQU%Y(&-TNo6-#}>a3F=5|j~?B8@DL^WyHB58yK@&&S<u&j z7u>zOa{D$&FDBq;;k1iOi#WMBzuEB(2M0&b(7@Kb=db{)bJwnI-PkoWSk=-7N<JYc zFZ5ChM6V8BewGwhBZywjEM&39M|FJCs|Ig9`8Kz9kVBd%VG^mws1r4b)^tdy?pv<z zT`F&%d-wiBLrWjm8O>dy_;W@98qSxT!^%F*ez{%#Oz9q(JETtMQlfmL;KHZveN4-P zSJUID2K|V->tPitpJ{}EbDj(-RMW`B#Lm&)!y8mR1Z9Oa_3Z=w2<GQd%+C#sk0F@f zvwPRBUH0~NkKVqwvnK)7@7;^gerQ+-fM;-Gtb4G(Z)6xM1An%4v^BT2w0Czb-MrP_ z(-{#F23i7u2jIVc_g+?3I_%Zl(geugxQ`C4+ykIKdGqed)vGz#nQ^gEt!*v09^CIA z8h{=I>0Ea0)YMSBcKhbS$}-UVvp3&$_H;wDX{pKUx9{A4{u0If?81Cce{WBJ-`cHP z(~Apml(MptjfamXuFPg;X9NWJ!Iq}hCWyq51m9B=VPT<Y=f%UrT~=P&)_HkwY7&jv zr<a#fQj@)X80i^laDh`xR|h92>N`3LYU)x7iz8CgeWT()6WX|WA|!8VjgY)LQGt{= zvT*b6^EcY02rg+GEWJvTT6zq_@JqYgIJ{okK0h#X4UJfX!ZX?V6j-FJL`;KK91;?X zy0#JfmlPGVKo)uDuM}K3vA(<-Zb#JJ4yn=(sZw|polfAqRedWALTWJPBpkuYisA;{ zbZAm)dU07*OX~o0tRB@i0ApSz&jaSIZHUWv?zXhIaUI|Ss)sG6rug-TkA;LTu(PwV zva;&y>w!EzbLJEq8|&dChl7Fx;S-iGcedYt_M)h`@Z8xmd-ftb?%T(1Wko>Y3j7QU zW@dQ69=Ou3-MbDQI+&i8d=N<vxOXqu-o5Pm*r9zB6C;rKdb&FM_V0sg(Dj@;#a~oh zaPQf3Xu-qXjg5^B{$+1ZI(JS0w3d#Jc1TDNw8p~10$0FH@V8&zKyU5-0|f;+7G{C= zSy@^3Gv6@=*O<S4y?=aEPEKYo^E~1I;^JbRy*)i+W1#)A$|^tv2E`|#LzI>-uErz> zteJ%(&X85zjUHQb<KfHpvE_pb_JTNHJ?}a>oAitL;DXjGwf!q)9Sb+^eSWz`P1~7` zUyW0pDrFg=?ULL-wX%)aKVK5E8g@z9eXT&=We{>$m3l~p!lO*)QF1<@;BZLU**>-@ zzIw(zGE>tKsNTuWgW(++8<&}rTTu<cEIMupGGHsu1LhYuZU6_KK7A4(2lT6|rUICU zRggL$=`}UguRnTvhPet*Kfrwe764BD!$WoTwdg8SChb9;h~nZ5SOvnjwzh)S(2ZYz z^9@80rKLpxb*PA-5)iv*Hz*ukT^&<XlN~!ZYr;lokei3Qqo?cf>$f4H!LSh;KX8Bx zD!};|8X1_InSS-Rzaf>;vIh}qDQU@4QWD>M^UYpn<8UDK=;`UH!0N)ng0S!AdG0~l zk&qN`8yL#3s!7f-3Qtb+iHxNNgpg1rZ;2@Rlj_!ddO>Ba6Hi{h^NcBDm#|cK$_Fto zXc%hlonJq=TG6%GJiNN~EtD$@t49QJyM+w-bi5R7W6T-Z51zi*2JN3LuU@}CrAGNm z+K$UShKEV-JPM8n<n6iSNc*I%jc6$mWfO#`W_2@rT>>(I1J4l{EGe(Bw7#)xco?Yu zv!4Hmd7#0$^>q<pp<T=Z8h+*8eF1^fs0&~)nt*w972t$I8)$ZPX8PhqF%<C1N($cI zUM#FEusa08pukH?iU4@1BM6*1jnUE|T3J948Xgt`R1ce>HEAh{tju%<!;@JP9D&8g z#!6H~7{T_=o%(vZ&)<Cr4Go4>=5hC+h{fYgK(s3;$oqq^2@O6ga0YRcT|42lYa41U zEeXKzumu?2-Gd$+5(L*vBH5tlDK9SzS_u9H6~HO{LqoDFs$w&<17Z{D!QqY!Kj<8d zZ0v#L6>!F1E4$*|GiI+ngy>ZS=YCRy4oY6dF^^xHkx)9+Ft}RQxo~yk=eg!Ryc5}Z z6<B4gh4I1i*3n*Z)!U%`^TokEX}7T1Ir|iTeLqean*(yB19El%doVstH)(nSzjK}- z!54$7dQTrv^&nO=OUrB8E)PtOPa@i8^eFF<&zQe)128WpCh`&U62F3Z*h?FW=^q>E z?CU|f7bH!4Z_m^3z6Z2J9q0>M+gr7?G+<v?J%8>jX!~dHzK1v$r2fYJhfob2EYij4 zz~$)a;$RtS0(#$k_#rqr5G8z2ICr1DKrzph=(}WPr0W~&2gXLASGxJ^<@%$?OSkU0 zy3v?izw6}56RY>`=N06!va!NCc-Fwg7^pHZ5fLGzZ{pbB(%BAvq^zuno|_TgB(0<z z<R2qE+Qm1}*3HYz$pw@=#t4+W4Hw2QrM&my%eM)I9dMtO?K1Vf>jVu#<t!s0dadeN zs_S2Q^WD#1jC}Uu+cOu9SOl~WV%()I!{u$Fn|o)rLHj4mm8;i|O54afCL0HqA67z0 z9$|YW=YuLPAaxGt2kz0J{|)0Uu4kibZUI#9>>KD46&IVClUG&U*x7}|dXwW@^*msH zc70t)@B)f?85t?i#-A_`=v{yG1X*Qj2@v++Aj`M!#3#g|LIr3OWQ>c8GcVs!RAxj) zMWVX{ErU3@_u|{-8#h*OZd|*4dwyjZJv6k8#bUmF|3O$t5L$*+bu|@mjjXJUPeeVv zFb`2Js$W1Fjm*p}Tt{Ts(BuS(WDC8_!9xd;U%xVg*5k*I&8#eE!0sP0-_hGWwX_66 z?#R(2a55Ym`w0YdvWv60_(jxBbPe=kv-2Tzh2GH4!yE1uLh^VsIUSSTDl{{XjIpIV zS8qO%H1s~EO4IYIRdvcgj`4DjZ$yVEXV&h2@%z@4j0)D1YAiA~Cv+JSgfJ8LjC+rs zZR7S&mgv-4hlrZOuB)1)XfAmNfI397ht*w?F&qs#`#Id+bGj@lR24%rV_Q2S&7I;O z4BdBPPJTgkEhwD931$rYNzdQBHF<UM#EIi56fqbrpwO+DzxMFSr&S=yt2b`}?0x-w zPzDCz??8nAj$O=%654d9yQ08mV`nQZD_*~Ick$Xf*viJn?9w87XcY4Ra!@|InJvTW z<2UbfbF-kQfC>Q7;i-vmCg!0{a7kcna9B9BhK3q)yq!Cs8aMX=#77X!^8w~FKVqKe zAWvs+_w33_LsJ7VJVe=_&l3p{kd8)62OE-<Mh^;e@ba~yy5a4da0H^do&lGt(|M!d z>du)*-@d(6+__)UO4&Xegw1KJp8_$mqU&mP?-FR6ckh33E*EV*D(e}{epSk8Js&Z% z5ZB1EZP5P7^6tYA$u*PqiI<h!au2Dw9#*3P+IcnI`LsOvvEB#72`pzYy9JF-suDrf zLr`Yt&TtQjfKEK6u(+!Aa(iFjR?M&5y;oIRjSkTxTdb|t9z6Vv`7PQ8l{$+zZ+^l& z0EwNQ9a=-}=+PrbjvkJP4CgzB2pB+PR(9s<?b{1$tH?|E8#gBBu6)M4Hs<kn2<Fk2 zVfDeQ@3M0~V;+1H<Tr|WF)`7(YuBchmveHnSy>TO1KML^qf^t8p+2;L@_c4?2HO^% zhb;{)jU1dDaB0vZFi#J$!^~fN>zf#RN5zwU1CjAbCl>=EN!tW3g0c9Y%5+LtF);k~ z2QQS(1Nqh6K-DPNW*k&=qbD>rq9nim;8z|{NqGw^zv>=oB99hb*f>y<5Z>B9yN%mF zSyF1pj@l&eCnj_11@dXo0qw^yUMI8}Cvkp!sx+2!I=h68_KKM*>f#NEwghLYqnEE& zcywfHMs8(wd1DhieH8Pyw$}IGyf-k^hvmJ<<OwVpnd!jf0P@|NJii6=I}zFk%+IW@ z#3#lh<N14bgQ{=sXoH?&WO@pH%WHOd`P51NT}bo0EG;eWy?niR?b_Ar*XLJP;J?R? zZR&X}R{Q1q58~oth_u<YOIB9;!Rzl5FD0N~6F`^@G6sTB6!W5@!r*8SZab5mP%Q*u z#ogE6W#=Mu)WDWaZIhX{=@C5K-Gc+OtE)p(li+1g0mM>Cb#+y1TV_ewrQCuDWUM9z zdO#oq3o;c{jgGm6vaZn%WvZxgVDsSOgBNd-N;*M0sygQBcvK(N@KmymEohkqk_UVF z=DS~cI^TZ#ivOa~-g7$pRa}l^JpuL>UYXmV{gY*S?atqgBfnB~*{MY3#V}4_y-#3$ zPU`rc!ug%i3*@|LwoBM}$3;s%Srcs&QzIKYB9-pq7vviqmzZ0SRa$<8iFrU1NL+@u z7dtx}03L7%>KdR0P}!nwe!}x8<^kbI49k50X=KlCkR_m;0djSXbsfE3&~bqLfYyLr zIXO84g96&SJ8J8yudFOzy|HoT^eJRqojIN}Ix}ryVGhlL*x=#e8J?W5v?QRA2DuK$ z4G8f28Ri|G?9sRbeB{o{H|`#E^t~a#Je)&eNg?n#92Z0vo$lH<Iy`^<8tjW6LPJx% zpr$UPv?3@rp62664~c*n---gN#!=UT2%+m)O(Kg5Js`Or7=Hf7BWYtFQ1S*o^%7=r zyjq@q>FvmHO~=B>)jPlW=A?;*H~TTAy)rfjG+fW<`N&&_U%T~S8@GS5XuG7aoHN`l zWU^1n<`mBNw5~tc8NGmWh9P{)E_=`8Sfz;U(zfSROmOA|Q+p>{H!oUXC@7rx>|E&F zk?}f~z5DjDqr-pzQNVD1K^|yeK<`$}Z^p1&Fh6z$(baNtGT-d@28I1m-XkYZo<P52 zf)Kc*yaZZCzheS#0xA{}5dy$3-MRxIB#3Gj=I|r<8qJjo+8Z5&KYZj6CnpE9gV^cg z>%IQ)kzauC7R;}KGlFp1&5VwrO%VAY$<QDdFh9Al&^J1Q=1@bz>pgo;o#Kbf0lz>4 za5xzVdr$D6fW1zhI!O%-wRZIY?Wa$)1!hptG5SV{qU{hnIJfcW)pwq;6&%twst!5Y z^s2*Z?pSJCN&EaqlK<_WMCBCJvmH~~DPpoum2yOjE}-ulnp(dN+CN#wudH%QS^r(a zW`~3ghpYpj9eukPFn``CluMkjTf~H0$%$LhNl3#2XO0Mak~_mAEGj52k%xyHeYI>C z^2H4@rXeXQURYd+zK#f=0ARrL*I$3FrmlMZ@l#~=o6S{FKIr$keIrB4DoXqI?Sn>k zFbmX?k&&9eb`3ltAkYt}n@Pz#zy9WH5Ey{TD{E^RSsCDQJDGY0>W72|UrIu9q9KjI z-_D&J931-kdQ(>yLBIp@(Pjk&&qJFN^K(;IuPP}ipnU;shwuST9u|<n$?-Ah2p}E- zUxxp~`NL)QkB<&a&)`k*sAug&3LIqzh#w-=4OAZ@<MHOox`z8zocVNoE7~USKYyK5 zJ9I?Z5&7bkQF{vOD{L5$P&!;cuv*%&IJ9v4_dJ~sA3mIwGG#rj$*SPMrA9rj?I~vw zcJ1~y)&JRI?ils8xb*=w`W_j3ZY7GKark+ounWfFLZ;DY_54`{4LIfOPH4Fu)o>HV z5cMpqAX0Pi@`HHRh+wX*t7Bkn1Yq#+bgylwnZI#kYI*bhc>pKF+tb350LljhHn;%j zA7s@6Sp^9(_VE?X`5PN$<)sb|c2-172!|oCt#7Iyn?v5|1&FqFbx>&(_?MNHMR0J? zDD&lZAl{btHU~#LD=SNgCK{R>pyO!o=>nl{X=Ono*;dz9EnL4bHg{!maiOxN67~iC z6Brx-f(kw8<l-WnvxNo0(b2xMzt_*-2N{GUI}J{ZGY59o(lgQk`5=>k;oAoXYuYYX zv|f%$OV>3t(9qUa)znbOVoj`V2xO|Ejh(icg_@qhA$8|}Q=qv<lmOtTR_}`GdmU1A zhR{{cI`yDB-6guNVQ8(Y`)Y0f@~iKD@5^Im0Pa8$zeGQFK7~C}Rvao69(C7aSg-ip zmTl1f$+CX^_Aylomm2+;He;W>lYm~J5I$1KBvJ?;C2AgjRMBa#D4tiHc3zKh8tWm0 zBN|v)gM4*l_%R}5qcXD7ODjs7Tib_6hG%ET=6>?FE#MqD6CjQJkYsjdc5Q8O<Hq8~ z2E>)4v(sB^LItP^C`G@|-TL}2pcH9@X<J`tN4KN9p!E^=L2H8RF|j@R*{$HXH)dB? zw!Q-l2l}}C0<;V#23tP;U<9-o`U-vD<WzNATS0YQdQoXYPF`3_hEGg_TX2M<Pau)z zZbEX@F}Kt-Fg&Ma^Dpu=HDXxf;PS1fuj~WzIAv_r9CNYs%ERjJ>W+yyjWf0VE2Zu8 zQ>*uW|I>*{C}iVP+%1aVr%XPe;=-%tZt9x64cb3nvWr^xDUe0*p(k{_xfESQOruc~ zGmjHC4rdcK;*hmHhx3xa2MX%@$QzK1ZAnbn`+I~&h9oAVgZ8zT+mX3J{^3XBzxdPX zxhuc<gS%h({pslMtNF7EzqcmywW7hPsg|DJissh*>e{rTlDN#A(8OfVuqb4phWYgq z-p)za!dlb7NL<_ezbnv04SX_dN9Q*l$L6<lE7*&hL>l@woYM{9$1=k5`aso`buKoK ztiAv62j+NbZ0%>`SKBRQz^drDU)h;Y%l(wLd;i${HfaBRF?NhOj`LL@#UD_i9m6oh zEfPh|<3!BkC5S0}O3r%(4GyWgNSX#JT1QHo`fK8eCM0_+D&5&90EK;Weo<~^HSl5g z@bItylJ0l9!`LrQIB@0X?aci~Yro=RqhD$Y{ZZJ!<V0IvUqw@MUR6zMQAuoOPDo;k zcLXvrxj9f{@1$pC197Vi*5qH6T@P!}Bl9{(uHNmRy>U*5&Zj~FRU>VAi9?ZW!pN`c zLE_f(&c%&~f9!>^t1H*J&T8+upvR)%z^>?YNS(%`O7)8=+D7c3FE?)86VmlovQLz@ zirO#lbWT4=+$tH6FJYMkpPXW5EEmlLaGt89C{4#Wc{3j^ycP5NS-O)q!uFv_sfoD- zITclv%`KPv`k9m3UHJmYe>xk|*zf*@_2^$`V^@CLM|#J{n!39ynp<)!s#6MzVluLW z6OtJbG1Pz%dk=2_yeR^BTP)rT;?_MX)LjZL)bO&N*_$^Xe{0Ri;F7h|rj@EX<#8yG zB?#ehXz1F$FpPw*f9#jtODi|H1TcX4-SYOV@(v(xxK$`xgn(_t{w1ZVaX`!{z}&y| zqInpw(|LnXDeF{m%VcSrw2Nl3`$1AlS&A6?>bN8t(9^Z-!nDlnjBH5I#RKm_NE;Y; zDK;}Zy|}EfrmnuD17L$F1cYFJ%A%j=$@%%@A!ENArJuCTADfZm*Jr9>e)dm{U+(XR zh_$$`A*-wc962H-Js>s_{F>qy1d_(Wg=S*sh`dE+j911Pa;rM-Q*i~rcTR6C-F`}o zC^@9$r0S5PNh#)5riqvY`=+(m4Xgmew@+OA{`X~#`HAezuVXu|yi3@4m%PJXd3#PJ zrvpmlGa4?hU%lBz>|ats5-Kpx$+ltD7mNZq6r9BgiPF~TQp8kQyKFJzFcu+0PB}X{ zixBI8e4>95jvS|DW@A9Ku^>|&ynNjP!~A06BU3Yy@(O_-t6Ezx_w^x!Klc-y|I$=B ze~6`jF&y|k0_IEg(Pq0xMjAW2K+k}HM<Z6~zkH+O=phkgp8%3O1039#<Nz8TB#jcz zV80sWD@9j&RC&kL#>(Aieo1u)<n1mRh3R?K9#nBXiSu!ZuCMIATGO}OKE3hc&41*F zetLKhc3$~i;)LCDc6(&)I29dPWJre;?VoJFRR0&17caimbByu2)M6D_b_(YM@vM|p zimYvhv`xCaL#_bEV~?QzAr%*WT9R8#B_*;FPfx-S?DQ>(7G#>8JA>*U42n1OQc6O0 zE|6qNePcssCn)@(>8W2q<JcdWOlIuQk@+u$!vm8Dy+b?#^qyT_nOsl=eHL5@Tn;TT z6!e>on<s%xH6c0R2t+L-6GfaMyE^rM$hqi~<1bHK2ZoQxZat*zbRNsl^{nPqcR#4^ zX5n8_&@v0ECZ}#9D7Iwd=AA$KIFiyT*!flW3TW?;w%H|PyGxeDCTIV535&x@_RpTb z+=lC4ReFYIOeo2hD#i>w^A2ga^QgGW*=5U;GUZ5_ijMgwlwI}+8J*Yjw(`pj&S>#V zX|nOl!CE-z6K%|#D7JJjvTq>B095Bd=m&&Z+1%RP-Hk|{-xEvD{l}&F&ujkbxX`D0 zIKdw~hlUzEJ3+nYSJeQ$C+6fwrDnh-Fv6ojz(W^i?doCfL@}}jh9_zn8p~si_Nlx6 zs~lO!F}C>f{MFk}GiwHqsZ&mBx*Pg7h!}=(sZe!j>FG7&0Qkm{b-&mO&?oy&smrL5 zT-;+TYdh{g{Oy^pv-9h@1+)-hbJ6@8z&sQ7JEVx;Tr@wbX!rEV_N&EzWr@qIv-HZ1 zFB{b&ryNkB3FrnWI^_fM<?V8m9P^LJ*|SMlh@1G+qRNxY`=j!@oI~=pOo@O8Ge<JO z!;#@n4-WT_O#q#9DYpO!zo@nz6mrMF06Ks5&tO0HyMx(Z9IKqW@_TmqG_Nc=E(B<Y zJ_!5~rT3ijDhOBOvU0+bQbDAF=TezNG(;-h+|k9z#ts-BV`L(OHQBA^_OG&JJ?Hq; z>WQA|jokX-vpV!+YOea;buyNh_RBjd+b4$R_SW{VAi}11>FLWijV*n8rk<PvTKf*m zupgD@zJQa{up`(81cYYf6gD(p9vqpNTU)=`**#iO(?R!6lv1??hG#pWvQxzPD@m)b zrL4XIv`bojC1C-W7gVwPaeG$czq<HF7uyGxL>CXKI3(_qcNW0;D?1e^*ykuZ<jUEm zb6hlMleCmF4Gc_gDZ4zAUNuY!&B2=6XqyorzO$sb+Ix6Y149@QF~JGRF&SAP6>`ce zOB<SMFSmn)=pP>+nVH^%-$y8p{?qK!6eXW}83gSkBh5WM5OkE(H$e1(&^v^yD7{C- zxCVtf`vllA1w7u~$-tU~HM7t(G?dXX`*$VRy$UY+E{X9a!-Mm;YrAL9V(G`#+(6Pu zStPQ_If~*#{4+YMdX}oXua<Q#tloM0X{_ev?fY?;igocGJm+-){QC|{!6(}>MYdDw z90HgF=dtXkHQD%8*!dLLjw!L6*4inG|4P#GZxR-$eI)_QmMrIRp1w)j5dCMAz_@a1 zL}g6zunaMJzk>5|O;06<JdigkPK9#TsXWrQd&SMgO#=K=8#^b~Dm&)Fvs#SpTs4gF z`b1kGd=S7u_-;XA-U#xOqSLd0MYEZlU)j>u*wx)NG&lgXzd5#ZW%L)+$mUGfzd4fg z3;TYAI&&`Bsmb24QE)nlGC*+^*4Be`Pc1BgSQW$!xTkkytXojHi(e2z?=*LUGw2z6 zX2fcyrfYa!oAfs&+9@o<$~Qm0ba;5-c6QC6xS{u19UnbLorqERJ_TnH(@@vA2J|g5 zAo<nXPrf)fdHe2zgp^V{a=5gr?Sb<;fN&sxHh}sG71k4~EGO0WT+sXaqWQle#BPZi zU>;mx?>QYShma2+e%Qw6KdF#?v*<CkKB?^p?B$(~X?m%Ui-7QvPsjX&GNj#Nrswn+ z;n{7#X1&ula_h$kWFHkhBOP-~yuCA!B#F)d!UQ1sM8-l$7?Y8mlvkKhQU=Poj7j@# zeSJW=5JGNc{@)%&|5Db2&tF>2&7y-vXd}bZlhCuY4-9~#RkyamaSN(zL2;!N7RP7j zf&dFiO!ke6a}SNA_y>b}2XD1N=-v4vy&Eavj1Fp$|C>DRw6=$RSVdg%!1UUK$egwl znp8eDnt^xSd0YUyypseW!a1t0<nnw?-*Rck!orP*zh^Z5+Vxwh8I^dV-+38x&a>LU z`Jmy!3)qgUu%1+BJ*|b<8O&Y*%(06msTmd9u>7Z%ckkZ&M3;u;bTg9M6l~+TlqiQ( zTooPjRGbS{$;GNppzR%Y2^*iqc@{N|-F^0YX8l3y$eL$FhPt7dmWdf)1_<AZ>INc# z>=Wo75&_TxfB~r{WamMIn^RF$R9jcx)Le79y{V_CV{ou{Yz)|VcxL(|MSsfgfPS6& zIo-pj)DNRuzPCj~gOQn;!Ko?0ImmTj^``D_h$i4DK<sct@VI2AZjVSw3yM#IQ$nPB zKnV0o5QBp-gLB@bcf7f}uD&RixI>k`LxCb=7G&X9lv6)Gy>_1-Q*l_y@vJrj=U&B+ zVX(<NNn1rxV;hRwt^nXc(o8I)FV*}3)%^iUw0f<zeK4n>-Y+=aCol!EfMkYWQe<pi zZA16NN88hM|DPx~Z$BV<q^DPpd#79m*mEkm@Tj<|k&D&HMewOkDdAV4>=HH+GVo3< z@4NH#6%hW!>ivj}N>u}MRXsxpY>h|`W=<|36@Ws4Na-OF!1(~Sh?I1oS%4pa5Y&EQ zO&u^Wgp&;&9W6b*?F0RQ*xvCm&_4iR@B%>c7rxb<oq-Mlm>i;G069n(kW47f>)P8v zu0xxkrgAH5GE2*W*b{T|V>5HYQ_|rC;55JuX#vPkq@9O1oT0gsiwVgAw3&{XrIwMg zimu^tE$9EP;Krp)RU<{4`V^#94h=2bBzUFsDA<b{1RMF*pVtilNdtg)iK#Dcn`6F3 zwlH?}&Ng!XQ<s6!IZL<Hr1BA9dnNk>ZZ%h4b$3<Ad<{woz+Q`1aURRqEn;$9(={lq z@%n=o6D#+6XKyB#bQ;>ylyC-`pztki@OF+CWGcu8Kt9<w5Oh8xJUSpYF*GSPDlHSh z51I!8Nf6i|djO>n@K!W6gA3HQcYyqE>h6M&6MO-*(B-~<Ky@2fUteo)Z*zBdV`o=G zM<*yHs9(|CQp&_Rv;(bWl$56y7GKIOh|A1DWsHAp0)!ZVcB+5yC$tmDR5Vt_nOk8@ zOx5)brF6_W)v141qzPa>)Ewe0{flb5uXInYt67I}%iBwuM(cP~^J{s5q>;Ccb&0Mo zY?(y?-!XaphaZ0Yqa?pw{%<SuSJ&;mGZRXO0<(HFnFE;n<(y<JlQFK9>Mq5av~pox zKNb;VPC1g9NBZC#@-2MF)Q#G{Wm;&4nh{Y+M<0L${Apt62!SDyO1E?OcJdAYMFH4@ z$PsV|Xaweu&CG^~67UZ|1^x%s4QeE>ss<EzVRaoKxwx*OxW2K3X~=S219TAi)wO`| z+=^-dIq-Q}Q3-S(iGcI$T!@{+Q_?;|Jv7oaC={Ypa6J;;%L@6$2gTIh+0e!gja4=D z4Ha;BUQP1<RCL>~OclomsyW8{B{zbA=h#`_;pcF}Yx(jXUb$~*5<bde=SGm_d$ zQ4QZd-!^vbALpytF8@i(gGWz8E>%V6^~976V<~ATbiDT?W0(ONOw4P!RVolujwm=m zR4b_K6`9kze*ZZTe#_{3YUPNN-z9aty)w=KkgsQH4Qkoc0f}&JTs`bPHo@-}9PSw! z=@S*_ADbAQkOce>E&yEsND=4-K$8GhU&<>4cK|$pGFXL;0B`UVkmwL2!!EER#LLhK z;2awC42uFm1&4tC#lg!Df(vU`4>&sLc4Lx*0nrvmAZnYKY8n{H;0U~$PP>$->?(AA zZ4Xg=5Gk;zu4k@eeASAP!K><W9_NdtR|*@1!nN~ayfmCs0PvOFSE~^Xzc_mJ_OHy{ zv0eUihP3a}3tJ<y+Y?Gh%>7C(nuqOEaN<#-%2}rZ;cL<=K<5DAvtKmlkhQV)$!;HA z0;w}Ff4gaTEwN<KoEo8GWTU8KpkZi?HMi8avIWSQI+6i?L>e7f7r58iCjc}*EijZ0 zE)W(4%nUK6Uraoxpuo6GAi@F3K-!-yH~}mP{uL0L0M(!}^cr4Fkb`^zoCBqUfU)=V zf%p`TVgXQhasi=bXpIm%*3?4F$OKg~{92A*E4#5M(Rejn&**xq*hWWWwT&&^2}y4f z)%QB2M3%F<r0rUMSk;Y9&f$V_kcoF;WI;awzN%-5IdKO%JJYtF`G2s~v<$@+^yD_p zlB4TE-XGTVV3&1}G>g-AtJ0vBY16CJobq`Uopy@i&*-=ZrZg;UJX*c`3{-A)&r)=L zkC|(nlCh&a)=*8)P}|fTf?QD15IF+#gQ&KmxPj&;xijoNd>k2m-~s?z5Z-`Z*T7JK zG5Eqi8f*tQfjShwAhJ&YbR59v(1QTZfzqK(2ql2k@gxT$8#_HK8$`6@&4Jh<N|n&D zI;=_go08iedGa9@7hX;Jd3|4UaM9$--R9vXEm9<?cL8k%sCNaMG){RZ9(6Z4n^@bB z%JkYv^qbb|-lg@s+aoIfkC)Qg-sqh6!nQf@<TjjZrhvXbi;VqA3<I=1ARpse0kkiK z^JNn=W0xkX5W~_c`#|bUt=(@MT`%ich{)}+@JdrQrO9HAm2vu-K>HuTZ$xr{SQ79F z__uU%1?3N*wPkt%1e*Y3U~DjeG;|8+C$h!E+s=bQqBFo>pg!yc`vT6*oyefo!J$B| z>si|12v*=_7-JI<?Mk}FLRdQvb=R*HXuD*cxa94*6rGQ2(JkE5TZb0Mm+yMURSD{P zf_hiB%hL0z6~OxLmLeU|^3-xkr6)AyH6amedFLVk{@&A9+YJ9-p^Qu}rj>LTG|$GB z4qFA3iJ69RD!Cj`q=@5V0Q121SdSW2$6S6@D(gjaE;(C0a(rp)1jM#u%lB$}m$K@o zf-*a;ee<+z{N)YoWU!`6I74-PBhb|#djS8y#vnh8ZSC=PPJmnR0?;JpPA&vzDuCI7 zOk*0dOhB+rfqy|A=q#YV36swah$M&|0OwdU3uso$&{$p1P(jyJ6iejSbYxMc{JR`^ zx0Efblr@XA4KO^G6jjzdHo9~>G_yq(?|)dCd;#aDL$8vyNIalGW>;{&U>vMZ&kE1$ z1GlZ|T`uidY#dy@^>F+B#{VJ(;k3t3o)^^g7c@`jG|o_C8-M^$>H4zDIUZDYm9t65 z0_@$Z0r@KS*(cR#EMjH{l^k(Su{m|a(`)y~mhP5bo{K9U49w_sjj1#9%2u-umNTNr z>RKvb4Ha>QYWl{Yv;nI+rsg<v3xKX3ID)0MzLkwW(H1p>j|<=+@GtNbsHS6P0lUDC z+Quf(gFrh<dM47kgbUjC2Q{cWm8gH0ar|1GxKq?@mx$?JNg}Tb*@PTl-99<Ce8)Gj zTGGV#prR9q87)eQrb{uX_q{UqeA)~ZheZ4E>ZFRXI;Nfh0pEA!*4ua6Lp1;8%7+gh z7FTaoUY;txJP&wqil|X@N;--2;ZUORs=3SCq-(oZ>v+}x@|7KO&S^6^B&|54i6Xil zMDMJE#?hgLTa5$D@x}d~iA}a473RLhri?s8Y9ioY+JqsYPm#pgN$U{gbnpr~$O{0f zddBJo#u^630A@`?fV7Dg@`-FwH!xAvH&NC#R@BAI;s_UYY|mpId9|oqD$X1Vj;zwQ zyTmMZ3mNY{ug7v0w@1*BOUBy3J}R@UyRmP+uz8Hg$PmJL@T$=y%whrRDh|1P>hAlH z!5L3QQoL1QSybUbStmm8$nUs^x9R=AdU^ZyyRMPN^7grs_J#P8VaLc?RfkJQF`oMs zoR6xz%UC3V)B)t{c+{wqizG}U4l6jZTr@u*M^d&70oh#IHqk!5R?$8alyRBhSE%Qj zp+im6c1gxKC#gGLQnHPawG6vx;xDA{bynB?w2qqqmUc$R?UXj{B!-HZHtnb;<%kC5 zkUE81jdDPR0>@>Ov11drVih!EIjh5ZT8o`ujh$be^(^+dEK$cYFgm@yb#S4oYnBpT zB5Ue@M8)~Ix|=j10q0(=WShaO?#?FX2(hXJA;QAH1jGz@G>UdmH8bn?-+%vMTfJ`= zz`(}c$1TH1zh82B9&lpjTO?-_dsNGVP1fO{imSB6C6GD*d!|(@lQIRg-1muFu!s{5 ztCAHhL*1gv^6Q721{U)g#(a_*%)N8e>|>=Y!o~1GVkUtXnI>ZBFJ$0%2InoH>wN;_ zc~ryguo{h9*@Z*SVV|@Ot2lvG)O4?qA>f?-q&oX?Wp+MAwi7B`=W$}nB%(`nT47u7 z^!28p<)HK?j9t`mb;=<{r?XgZW&127{f9%&QBX3{cHC-iVrF4_^lXpB=KSVaKzn7^ zVsYDiegE>c`_Hz~zFoG${{8pgUs=1`G`t4L2az9;*=6oqBySsc42#Sbby$@yVHO9< z0Wh!YRSTv?DV8t}KdwUIkg#NvwB%8AkTmwS^hys(Ys{`4Dr_E4DerTSts;14V(elh z@qvPR-X}5chg2z?3Xc2b?AfGk_ec_VUbNULYPw6<WRI{hyNEHTsPPdAGcgrA4FkF@ zH6|*hs-ktgp?@K{yw5%$PmvIGQp@$AqT_KjR~gGh9rr3Y8ChcLQ8jl~8GGP%aYBTy zTb65FV@lO{Mb}~lg7!I}_pz&Y-XbaKw*kLhzEIx1|NhGL2Q_`mfc(Oi+33On=cqa@ za>^M!KW-Hor=rU-O;0I8f)=$*&#M*)7QnAbDUl~83+wtHQE=ptBJKnH%i9THTmhnH z^b~SfiC278KuTk9dUI$-3s`V!V`$o@g{0I6Ce`>SRE4J2hi5cK=d{G-x5pH82BbDP z1{dl&$4MFc@&lzSIUQ4@3F!yQS|!8b^cb~@c3CGe3=VlRqQNkp(pJ$BQ23{JWY$dq z)T@{s7HIh4h1<_weYgEsx65yh>iqcl#p{8&o3;HbRXxaDMnIeHiOoh{`O;RA$F;pU z6v+n^T?BCc@-}H8^0nP60sIC&^?C^TmnaaEMfHPDsM2`k?7;>0OA!yq*&R}LI-x;5 zi=~ShFl6w43WPuv%V1SvD42>>h!P=K2JbIs=zRg_2}}+M=aIKRr09HH&FuotPuB7h zhFXgAtkHI>QgzA~H4F#2#V+f>t>!9d9IWY_Y!_M;lG6))Q)&A=oGl1=<mH*swTI7M zeHy3QF5Bhzlozkw&Ru`lHg*mD-4HNmO7(a^MyE}1ImD;u4gC1DJP)e7^QgL=)b<uL ziI%s?&>(~OsR8^O_%`S<y8uwyiZ*GoR+pp*@e*dS7fm7sb^V3(0)+Geh4g~p6IR8H z!{A>~LB{fuf=!y5W3CpZ6z5T`<6f=dQY>eaeqJ{S#1(WjJZf%y7*BDtPy_cI*SLnr zg8t;n(WLT`*rI{hqQSK4iQ2xU@uj;@UcUMGdgON5E`OBs|JRQ{{<!h*MaSfIfPF<5 zGT#eWQQKT>$q+rE$<(J%%^^|PIEYW%i(8e(r9?fb>c)>@2<nGOn8$+xQFkfUa;?BI zqX5t(`rb&7uxZ|PunK*I9y36I{z8LNs$`cfX&!$;FPKlm^MDeCLxIe#>Utc@5Hk(M zkdujl<q&9q>!nqXhh%rT#nm}QRFflX7)fmfO_R$vAN>brUfM3(<(JBXXRijYYygom zrSc*mzit2->d9-Gjx8DXNxf_xRDq+VD_BPh83&%gG7hQJ52(<%m8sk+uFw}8(eMBm z^K1K@((ylq^+ikvK7DvKJb5)e!Be=EX$O?4T*?%L%(cA)je`_zWA)v$9U^LivU`#$ z#sKFrg?%3J^#q@M9cl`Ol49VIO$w<<DDIoMy0J~{+vT4m^&!Yzz5S$rb`$(1?F$ey zqT&ZsH1I#@>eTA-=%PVTLA2Ng+u(9jMu9FZUCZf`3Mp2e7%6KJAx#LAHV>5{gv(h) zD%r+pI9`GZc<%z+;7Ygnrhu&O$ijh`;=#zg9>1g}T0{lWI}hs^FJ}=Zh7S_K2TNK; zU@6J&aW%Ph!;9DNZ7cTe@_(kh|Ng_qgXg1*x7$Y7%R3fQtHwifdI9JDX_o`i+k>;a zq6_*G%ZAdb$1-Xs@|$O%I{*WxC}^3@Z$>P$b|R&6G_7hhGN&srrNtwr+999-PfOOY z3Y9YSIg6q5s#18A$lS^<+-kJLTJC4`d}J)c%{;Rb3Og6CZU2IAyZryj|1aOXTfX_Y zb$B_wdIT`<98qQFTd3=rq2iDrZxbzP5iVgKB4!dOZ0LW{$X{6B=bVn`8EtofG?%<R zhb)OXP*2z;V!B%x48Kd%{F{rGyQOV86daGL(=O;TFr+AINMTX^z|yrlAAZ=LD`UI- z-&Nked;j3+%jJ#x;|tfDdS~-$`eSn2JYqnfWa&G{D-uFv@d1)XzGC_e5nWFq9rp{` z^z#_k^B6a21BR-ZKb{;*4a^QssK_pDZ|E2wn!LJk_u;ly-!A`I`-2}r5q*yu#08u7 z?mc4fx8HKyF56|hY?tk_UAD`1*)H2<yKI*)m;VjJ`q*{~Wo~41baG{3Z3<;>WN%_> z3Nbe_ATS_rVrmLAH#aac3T19&Z(?c+F*Z3MFd%PYY6?6&ATLa1ZfA68ATc*EATS_O zATLyTaAhDbSWjYVWn*+8FH?15ba`-PATLR6VP|C^FIQ<~bZ8(kH#HzHNM&hfXmkoO zHaH+JLvL(va#L_&V`U&UATS_OATLR7bY*gIZ){<7Wgv8Nb!8wgQ%zxWYalT<H6Sn` zQXnrvX>@Z?WpYDrZE$aHWo~o|I3O=ZX>4?5av(28Y+-a|L}g=dWMv>eJ_>Vma%Ev{ z3V7P>eNC?&S9Tr1Tj$jKBw5veCV!#clUglNB1MspH3&(=17{K-(~OKveGVz9TO?cj zlE}Ju(h4##V0Alz+j3xcBSRSp3^yDpqga}xrN9|<>g$}d_uBis4r1H_2;7T{hxhKg z-?i(Uz1LoA?ce=wciCNbm)&J|*<E&*-DP*#U6#Il_rL$o|M;K(>;L=u^*7%Q*Khlu z-~V%-XSjaz+ws}H?|=E{KGi?|_UmuIpWoQG-%U?DT)+LE-2VOi=D!Jd_?FyZ{#@Uu zpWl$@nXlimCrq#MU3#i-`Tg13pbzBl1b>;HaJc2$-~Q9~g5l-QzpPJued5NS^q2Y1 zs&Vk&s=qP(x&Q97;#OyFt@~@ZTmN77kSg3Buj6xixbg7n<NisCTkFHCkE+xDRXlkh zef9bF{z--_d!G4iAHDrk+YV1PJz@V2`=7S|v)$NmWB+qCyz|baJ8aDS-|5ETA=5qM zL&$RuceC-AmfbC0<1<+Dgyg9@{^T8fIeO3dwr@ZDo9za}Y51NSB^b!}JpZdAhkFc$ z`NNJ6ACFOrmN^)z6aY>OxiPM;KAt}E91M-4p`wID15uaW@ZsB!R_q`SUvk-Q=m9E* zU>JTc+j!uHP_f|{75pd^>|b5Drr@CmaSBBeUGww6@A3W6+v$I4Ku$lm8w{U+8BRAJ zzuWjd$Di?BuF{ReL-yW%8b47Lk2?#7DMCD14SaRt<Eb4F;^7z!{YO3;gQ1QuO-}sk zggyyGmx~{v4~BT!(r-9?&*=oT<j?*HT7FjYwIv0B<x2#vKhsmSY4w<1$nrNZMWYG7 z6ynMQY<}`P5C3}G+r)f2H{^?L=BSthpmDI^Plsvm-DmMV(-ngp#OcZDis96={^084 z=_4OqeORTan*)^ufQyHoZXEI2-*Px^6NaB?HnUt?u?rY-bXM(j=Ps-OT|B<@<h6Y1 z5ZlUh^gH?zmW5wji-uqNMJxt@;UDDAwzmlq4D?i`>jPORJUut}#+;#;gCQQcIh<lF z)%;-1YXy&oF&IM5z*Z{yx+!9-^bTS7aYt`InwI_fSe)@SoCkop70=>yTvqYg(tzov zQ{J*L*wC%V&Rfrh>?SN`VrWfIGHy~`j141F6w6bT+uIKYx(?5Y8H)rjj>UW(gJJ$$ zwFnh))KdB(cKl(Rm_G8+6by}*yMWAcD=kx<wuzLvwZnH_kn^z!-+cJ~hf|&eNk><2 z6ApwG$wNHiu=2A6vR%W1FrE80Z6X~;>Dl~#ND8_cpSioc^<bbS15XuFw&uwWlD7W6 zkU)lDaIzBN^$5A02gAWwO_-1~h}|LV1x?C2!bTPon|WOtgF!h21<Bz<j+a&jx<Ol> z5E@=t&}UnT+M(bnCCwJfTN|D>bTI~2+8Z`Jq22m6@%fkHxbVGxUeY~TpVOJRKjsY6 zw?}J-l+nc%rlD&El!_^Vv{TM-$_m(uFF0hok^h>viEzes;mhpkG2)3OS<J~P2|EjY z__$eYpa8V|xW{UOtR$V5NI)&w73@X3xN!CFt-s~&mV==Rd3c5AGUN|)?OBl<`;{T? zebou<;9mS0!GW|~#77OKWxI|EBsuX}*~rV87-^1%>*16$v@Bzc`vOM^I;#K(>A3K< zqN8`PZQ3TvAwTIz9*8J`r4JOv0{m`ix_g#NRI~kHNJlDU2rP{xdwt3?!$B#BV3^-3 zMS<dCLMl!2UxpGu0@*<|fvHFncNq_eQ;dx%fh0-$3?M-92qk>kDj>j$XNH9+PQx!7 z%T|M~5IB`AB3Po>&N!GQFKt8`d3T8f(vy<SRWki+SeYNUl?i8<-X}{lAPg8vkoF*Y zD5nI{lAJ+8i?BG*43h@flt6|9+R>ettqg4_5XQPA9;Sj|P!uf;XX_@e;r(@hF;j`b zvObwVm~fx6osRyTGn5<#F??f+i&+0cceal}R{X>l={j4_X?<6QYGTS6VBHppf!8X{ z`K6}>vOgUThH7GpKz6JlH=sETfKf+R9+mujECf)Duwk3ZOw+PG!YB|Bz)k0^|8@oq zWMSLUWx?{SgO)jFF$D7GU&eJ?=8ym~ROz1c{rBH}N~;M{fDlN5;Sat_M=-7r^ER=H za5RTKd5Vj$B-Fgs*qfk>d^|cAI3_?(z~Oqv7IVOmX=xDuEQcrT3_{izLZZRZ#mulE zB`rle4Oyn>!*4H<Gq{NJ(Cjhp&T?y>{<U}QX+gFLxv+Cwh+x_c!@Xl^8LJ5`5#>8Y zgWE)mio+|9#sz%pKBR;}cAX-qXhf|+HWVfEhkVkq&7C3Ph3Hrr{<~po#f(~rR@eMK zw+lyq{v~8|5jUni2-b2U2bcCH#5&-yR%IPe50K{mm@}kCmDbFn#U^i;uos$e2DJk? z0g?p=2Ow!QysNd+8cdc$Ndt(~vV%)ry;IQxn-ULPkygCAT#^57znWlKeMNSJTCDID zFEPeonBzGU3@Tcf0;3|J)AY>*I9gw4*!ygBgrwglBvNFw?vl|U0;BY{02xlb&O!m5 zI<@O0k`{=@4yueoOHmdnEnTgg&hqq3HU4zV!H_d_VVg){LwiX9Hgx0;PGS)wpJ7-Y zk7qbKksnAB%i0hXd)w)~_1lE(M8uQ5&>BqrfhhUPFPq2|g;v|mJn$lx)oCEcj7Ijm z80n!yP1-?7*yTz~ePymOVPMJOJ_&}3FZ;1=4OwNyB7xL{0cSX5a5Rl20&D^{(m2|J zKO7TCsOc=W7g-xI)^|`37Rte8kQ5Dul~^d~>a^m>CyCNE2dp}1oW+iqU8rkGR`>=M zWV!XHgI)#%vVYGN%M5+!6wqFk=SR)aGo8TEk1Ey#=T*!J0GM8M!Tbn1#j`RchY<>} z8mmOD2uOOs_HG(ZmfUT{iw-NI#kMR?X3Rk#(tuHybLF!AmEklsfXK4U$pa1Jw8Zjo zVmgK1gu^wYVITlKf-2K#T7(bh-UM_cea%Wy*5ih_Jsfj}h#gU?fF%(^@TgetZwyJ@ zd1nEW3V^K5NUNjmFw(H@;GR}2K2~7S@eV5jXKkI_a*AJOl?p(@Ipk==bhn5zkYHd3 zUDSYH<JZmPmgztQ1d{Nk8i*)g3C<M_o5O%Jn0a@m1sXQ5EukgohgfV1vijgfYezPM zO~JdR#VwsJNojIw4r!L^TB;}$Ee!(6!81Za6A7{1lrzxw0Y!mSE+zafN=yFIA|x2g zg-7y1fTQJhkl?-+OMXj%f_5EwEWn2;v?K&N;6{JQ@U$zDp9m1FlW=8x#cksZ6;O#- z??9n3)6;N|A&|5s;Y0v9Eh%NC<qCrVfGJu&xEvj>4hUo!nhPtAFA+}T`U0M_9RZL# zI9uoALcpjMqz#0vM3O^=)>vvKNlYgw1VJ{nWlBqh@2Zs*G#Q9g`KNJ%Q#%oIPv<=O zbmzURk1tT#TSKY|(%z!IiP*t}Hypc`P@P$=&=YimPNm07?9pm155uDiN|+U|IG`I& zvF?cmY1yN?MYajTT4YTMJw2nK-(DcM5T=s@lDt@0kkw2Gu{0PWJw{X|BrmN50dfik z%eiWX^AD#rLG3-I!$FT!NH8vNnu_6_N*+I5>Nb*}g<xYAy5M3HOG_F*cbDcs>V#Ej z9M8RpDHz5|nJ_24lt@66k+d}feieEX$DvV`*<5f7gSvHUVOx#Y)Sqr8X%hngC=n;i zR}4u$k=MAbkbZPmu#{2(rV388DM+K!a!~sAa(o8N4=YJ5;o#x^%#MzwCCeE&|0Gc; zD~4=1;6NU}^RPlDwjz#T5Cp&T(cGH=>BNQ}FV;$E6-f(C8zN;o{wy;Le3eAOk9(Ka zlNOqoGwe`m<n5)Z3EG5GIV4yY#7@@Um^0X*o2XeG>bdeoftx6@bP(lrb1-<mvpB-% z7d=qUpE)9g*eQWG*oB;#25>IN9)SuW!fv9Kmb5F8a0beI;VsUM6&QFKd07~o!`<#I zp{R<iV>qd`?lXyyahsT0u3dZv{tzcoXwo82;+heT4!wy}2kd=C<=3!fhcf&R2%o_a zCEYpo`+PXgI+6T$u|Wx)%0jI-Te*)UwvyCxLY`R?W`04ItYSIZ8w2`cdZs~q$Sas~ zhVfHuYQ!zQnke$~I`to!qUw|vF9dLare`giYmFO(w?e;QMdcowi~&R5taz$`3q^~| zLXldHos{Lm@?!O#DQ#hQWxnw-J>lIYgtrXAs}y>(V@P$7PV9$Z061EM8hovx+_8!o zdlMTI<#nPxUGa6PQiPoI>y9`>M?hV1Cbm(=!Wx$auFsv?0f&ypR0fuW7-@;%H)^a_ zEo%DIC?%xsj^@A<RsPOWDr^e&W_p*1BvTO$qe!tXhS!QsddrAxzcV9{q)|ok6D9zJ zOwx07bjBG9QY4anKUHd$>*)-a-&&1DSrSpd&5)mEFPoB}IQB)7&lVF=CJGDJI}36k zD}?%e$|VT(u}<WcXR-1MXD9^;*>MC7FUTJbsJ!kJPz5KpEzh$Wwm$U15ZMbRP?1sj z*lx7i5K%{2OGfU!(WKKwi_I)UmJ7N&n0z7{^>iTaE*2rTR0@a0tA-m;Y)&E?%V=+s z6DuF<1mStY89JLJ`JToA>#L%ggR&Z?M%5z~1d!V_(3JrrqDBoIH{Jv`?3&_)g{IXq zg7_rlejzejq+?mZpu4?L&fvHeh8J*?=ZxhUX9z07wP2=sV6eoaM$YBbH$Rwl6Px20 z3=M0WGFlx|i~EFCjxPjS%XyhSV+WU<@*63t;+R!uG_tfMIHA!fb4HV4s`1I$ERqqI zwy;HX6AbpRMaK{{6frS~)kOb36XCfL3KwKODO&r(t2}3DXk8<d4K)dg3FM<ffI{($ zUFBf}L+T59j*ooTSvOs_(DmpvO>T!`99EWgGBz$`x*_oXZ!aXTV_7;?FA}~q_8j+S z+FLin(F!8rLDyd?;(ABm=nZpjX*6^~(M>Qw<B0PaxYBk*RrcYm>^Rws)8YDJ;jAkJ zKemvf=`&V_6hbFuGB;X&&<i4}GJ)wd-G8A3QaJE{n6bj%v8#9sn=d1v(Daoc!E$4k zVVkh?%J8t#n%%X8vl=ed2`@ASgG3J1MHq?>BYxVJCH7&9-#~_-3T;*dsv-zdXETIt zs6i~jAMA3CD$k{qvSsONX`9oysI`ppy10D<HYmyM71zp%V4x)`s%B&fOobjX804C4 z6ADd_+H{|(2hmX}Q#_P~P(T#eqof!V9%ai7;sCA;PA>{?iqadF@pQ_ZhK}j+5nUYD z_Q}(!9X8@x-xMO&O(6o%KKObDouUfc)y$g1L3$wb1e)^<UCWl?228@nlJ-tQ3bYSZ zVope{0x5fWLqVqk#a2U%aY{V1h8v4nva_`5g<z$+yVN$3SqEHoqFN_Ks{9ZPi~}i5 zS76^yUGj!Gw~*chm_lGzi%9+v+L{u`VQoL_3c-smG*a}1j2WX*a3{ozWO~3hlu)Xu zGJLWPZHx#oU|=o?I>|;-tl#M`SSiG6!i-EOwV_OW8<;K<aL)Y@3=xjjAaM$b_=*X5 z(;{=gs|m?*vdmx&_5!-CcS6t7NfBH+!3nxGKfFzhJ5^Z;0HYxmqeCFIC<^<R3xG}y zNYxw{zca-&Jv#ei&aj$AG#)JDDv>2pRFikceQaR|;jQ$6B-m!dc^3_BlKQDpRiNIj zE4T>;@K0>;$|!iK0HxNpEO?9tM^nNxJ>r(*(ib$?p%7Up3FdAYa!W-LGilBM-R*dW zl4_z;?EzI&P}LWX1jD1C!uFClCtN<T<{dz%D~|}FbW>>FjH*%ReB6aQs2z2+Akmt$ zg?24;8Htl%#LiM67Oi`{*uccFq?#xeC@oJ38%;<jYQ@bpsowU33VTJ!Ef7&PZ!z$| zOGKBbwLBsSBsT{@ek|8$q-UdzDl3}N1=qR6m49kJ8qS;)4XMs(NMj0yR`M!4H87-a z80;EgRJfSdl4SefwukL1LSpn&FbL~E2M&R?XfD*6o_Bv<8G;WaL&affauu4uFv4CS z!~!VPsggQFa0Zj5Fd<S=5@j43!4YH0LJ?`MRab{poN9N7(XxGi2B&gasb~dC!gyBp zrR9qGGw4urrP!_DZn7WP(ZmPR(fy^;{OW}vq>n3ulswoBa#&>GDav4x?(r+ch`NXv zSY4nU$+58AFw&zmuqE!1>m`Cg+N~)4CMN~;(IK{&s|mHbRGJ#t6&*mQk5e!ptIg%8 z5t?aOpELYaH3!aM3mqlIt~AR9t4W+DH^@+!q;A)WZxGQsSad4E2TV%xv9w&Z3$UZJ zg6mqaQn@lWxxJZKP0(&~qx3*)jVB-B=oAbZ7)}cZp%zao!)W89kA5gJhD$U<DD!0a zQj<eQj-}<ih)`p03UPwV>nh}~hN(W+OrX&&C!#@_Oyeqr&dPWx``j)VeBdi;P_NM4 zndSH)715khM@pA*0RpfhC8a@yjUwwPVJJlhS?XD=!fEUb0|}8MCo~ZX6O!78&VExZ zS{YLld&3<~ml`p^jQbRGr-m}9Y1fhlgV%gHsM;k#SO9&;mEkxBLn95Qu$riO&`$5+ z;L4+wAcrKg&2}vyWu-SnM@vL%(@35iVrj(ygf8dOV>Lz%ZC!C<UYEf)5>9e^5v^s# z0R+IESaC-d`Ta0&qMU-ELE>T!CgjMCOoWFq7zVCa3I-Yg3}w<Kobjz5%#Esz(T4k& zGj<x9%xIV#O~quSnq|8N+#9OSB9pfm%@?l76f(D$@QzlM5ngRIlGJW!R7J;B?hs3; zXbt$)tE7}M7=}u}BMz~;1h^8tDjua^7re2;@-bw38zT{?@F&-9a}TI?!<{rV&u1>q zrCtq|w<9S4GWN4JRv|WYW;wfHCx!}1J5Z@)84$?T$P2P;5{(qUxJqHRFyNxECT4kE zXklZAy76`5&M8Bz;+iwfs8angEFU*xiSbTmD$s_DFE-%i8xC=rAY-(lHYU=gQrmhh zrdlMnZFy^BHJoAD;UiE9`da5UxY(O0lA_8<3{x)SoO+#@R551=!Js18j<iS+4yD16 z`j3GsP%5sBo16=Ws4lxAF<{XJsK^hklT(VMWz!0^SIk^=#dfrAp_PI2lq!xS*8(q^ z)`7iiPvb}6<ES)JWrGI7w869=*wKfnQRNkvp2m^+7wBs}eFQp|C{p5Q+i-xPTI>-6 zBbxS%N;V0xlL>$}fk+UcwNjTGbTHL6wR-DQ-(^M&)XFeQECZ!#Qcj|g@)^!N0V+OO zWTFZ?j2Ngj>v-u7%JWq^VxY5PxnA)WLvbGQjLs!g%(!y=2Y?!km|~#LE3v<iO<@;Q zVP_m59kWRh0?f%FHm(fB^bZ>1(!9mh%#%EMIvu58Krt6KRU1_k>fin2KYjMg-^A;4 z^80js%5Q%je!?%ghv)wF{QvnC;ZgndU;Nw83q1f5@^E3HU;xg5WEeWmkc=Xlr_nO& z9B-CN3>6ac-lVlWOf778z5$3ts;6|~kDvb4?_RabHUEW_*R7I%eD04drNS09J5b*n zDwmt{%-{nVUF98!@g;YWXCUdnVctYXg2C5HA>#!@9*qHC-C!`<cq~Jp$YrM2$pLma z@u2}o&_}9#A*R=HJ5ZdDsk#I0QxVOn)OL9>U(ajTS2h^#eUY1TVwoxE5|e0{@V0?1 z!_^rJj{VfNT#NB`)Hj~R8=sm=XH<1@MpX^#38TlmyfK}7MYoBo{x1t<{xzSc*)HbK z;G~kVV!O5z!62&%rpSY7Ce#sH%H`NxOF_0|mK|dEfxh{x84O<(`6)KbIk%k3xe|P? zCGmm=yOpTIHgT?H1A|M1Gn>d)Yp$p|;H4$XrcQ83iX=n6(kp}IO)HS(EHu}WFY<KU zgPawZj$lw>n>4V@!{CyPRu0kWajxaYa;8)AeQI1K=GO>QvR61~c!#VE<E&Mjoo$v{ z88QqdIbKS_(J~kayFQUp)YX9DlLu0T%@YPTj+yGb8jPz~HyEU$Q^D1jcecJV_)6%T zGGZwFb6Od~SRzNYmX+cr>UdScXxL=<$ioUGc$(mgqujI?_NoShP;qwDEY!|M;>&qD z)yg<3m_UbaZ*g%0nO<ljsQg$qIdPtf80K1%VDPGrn<l924$UjPGW0ovn0MJ3<|saO zE&qD^!2lxySp`vvv2pRZgu&>E11TqzodYz{cGa{^tWs}+F>2%-9Ce1~s~Zg0?tK9y zI^q(LCKfH|9W7@Z`$}^OjtS=;v;LFzuns(d0idSziHXlpq|(yENX<lVYozfMqu;!e zE5jTN8zY?PsJvn!gRc{>9XM6<=;<g!BCqq3hY%+Ps00bxAKxFT@^N~dmbL0L)c8}m z>J>*I)d7n!w_L<7*b7HKiT}g84Z*MmctMd(HD0h-Q{!@<xyoy$M}8g(7y!Pa!BFTU z5vgQ*v2DWFS~sEA*pGb3X-ptpZe|%bgkcUF@r4e!q6+QZPE_AN+=iy#|MmSp|F8f3 zn^)_4`tN>WZ+zNc&5alR&%GNj${U|fS3CaP8-Bj{!-bq%6mu0aoD^JK<~#H`!)Up= z2?(Uuc=A$bN3}4?xuu8$%RrFTcrqc8p$!-edRL1I`N4R~=S6?6hI!CJeT?%LGD<Xc znvWdKLs!o9&dJ=sADrI%Hy{4TEpHP}7&}ue5z{t0>?6&>Zr1{HmK`oIXkyFA{y3w` zvhl}qUY^UD{xR@@kJPD@0pFN`%siP!H?E-;>zR>TN{|TOOJy`<EhfCH#P7I5{?VWO z*-}g=S1AKNFjuD`y%%Owg<z0#A}|s%WNd#J=S_raLetK8c~U2|vvJy7Ern^%_Z?|B zl<&4Tx+5h$QxvDl2G?Fe>vfspi$zO+`yc(u=i4W@u;^7P!G=wFAUVUc=&UY!8O-w; zR?&)?!o^0_h9r>59=zgTh*0{I{G?GZu3FlCG)$GFc*`iyAZU1KjG5aP<QBlK@bVjn z>;n9&d%swU&*0A)RaP;(*ZD|I9LNbG(>V{B;f7N0e>mF($KHg?kHAxhA>ynY9?6c) zMr5P$9OO%Ku7PTVSRn_@Rf1$LmR2>QvZG%3ll#MWpZwv2|GfQeBF7kk8JGA~?1vV1 z!L*jrY*zL?73)7EOhJW}J&3a97w(0jgvz>i+Rz-adT4S^t-=M^oC;3C*jDm{J<oPn zLYtPa*X~bW_1%Sf6V%TW22AM0>G~v2yd}chhWm#%u&W|lhSJ__8d&B<!3zruP8QI( zE>7d|Iu4@hwD1ZGD3@`z#|?$DEL8P6w-jI#xN@!t)#<(cYfsH`o=#dq-ndFP@N}e6 z^)&h*iQPfT(`Z2f;KSk7CzgXwsyztxY5?;Vg)GsigKVTg&uf<W0JlPul(xxyr7xjg zvbg2s4hgq%+kAidwfn2Z_zamZ0px78;@y|bI*MCsF=&z}dkyPjRww8X80T8f<d#Mi zb162$U?->zL;$lXWgcIxSz8OFOYTkQu=kySZhU5_hl6~WzTD;4KiIqZBBIkvky~8G zKiu(5iJfS~6m(s~Tv{>Eka(@g<iQ_=-b7SX3Hqf(v$c$ix>7fegAATKM!LM#TUrJw zMXMa*Nd$%t8o~Sv)ZaXOgiEi#zjxykLp<n~X;kIt&=vRSrh7*WhJsF_X<&9axBz*9 zAkF^@D(u-inyPFdgs4L-Us_=l4kcXaBQyANtr*8~mt#1oy-*8!;dSFQLmMxqcz7|b z!}Ys^9oK{v=~ykOsPdZBPJnf6;w{xpC<gDCh7tP?fZ$>^am)vnDHfxG7XdSjU_P(n zV9a+J>3l8+Cn=bG!$4ZKWT-?5tjS4~+Vj#6KPYc}N~N8vvuVibVy^N^n^<x9q+*A_ zfwZtqa2F=1)Bx3tID_GX(JRFgmdY_4C&zje{5A+30Oy~i{hV*97)A+YIzlKadMTYM z`Ep0b^iZ6q-yQ6GpIBjazCg}Ehlxmpgl!Xz6y845x|UuYO9a~`AYG9VI|hUF6VrXj zD{T~-Hp$&<EeU0q0+@zX;>@jtjR{CTnIfrC`#v(X(#Pp{hw|o2J|8U_$Sg3h47WLs zrIX3Rje}lcY*d-^gFz&s0s~Lm1Zh->thG_hMMWix%^g9fSDR?6gPhEVK{)I+pHkVU zP>=*JRGh=3-yMclb-SUi5pgWzA|nQ9aEjhvb~ItECOXUQf~CQZsHTBF9lfK+6XC|E zmrw7}$kcSZl0*lEJoZ>?teAA*u~xyWMkzl5qO6s@Or81bh#3(hm|Y%fhfh^w+vw}E z)HdOPLRKOAAyK<NXLuT?f>tMb(=a;ziPkWaKLoRmVLj}ZO5Q7x)<8;6N9hPOST!rn z(<TcAsECl-`;cZ<=h_G;67oJ0?Agl#{U<w4f7(7f8cE2A*QIVI;|WZrplO?6n@iRo z47ba;1RekACnoHbmtT8dx<6IQTntvqQGBdY>!8L-_HU+Npxb3j8Mq({s!posK>Abd z@L{YSrdqOWwV?xxl$KSNr-9@a_|!o}lbl<ydEF=m3o?_OZXZZr;|Jfr_JnIHVI^@I zDb9%!4|>5)qdIU0)J9f1Ue-aiHZQ*7yxcl7Z&Bux{W3W24%|bsRV<+v$Qh~=99t=1 zG!Mm8a^(u-mZDv29Z$`IxN#YV*Z{M{v!H+GYR(*hZp~(sRXEH-nO{@oc^YDdGEQ5d z)T{uHcrgsx@yUIvZ<Xhz$NXBC#@&I8?cRXb+=^3V?=CcnhV@<%S8qt#Lr%A;uR2X{ zq`(>sjgq4%DwQ%Zx94WN{{D9W>nOXPk4*CCR0lP`n=<OC$@DH82ZNb7r^V%|FS;28 z5O*)V{(*h(6C9eEnSi{&%FzA%%O9Mo=BrN5#RkdS5DeFz6a;k2@Me`#4%$HJsGZ)Y z%I>}QJ}Iw1zxc+FhgD*{UOt(A@zc-CCqDekUp`I$>hBir30F`593SrUcT@irUv9Wo zr}P=-UoLB}#b5N^rPuD;F;AGMVYX|70no`wH(Ny@yI=h64_@;*XFm}YLTq*7Y|w|+ zIA0PO8XENHv>L>R_Py%Un7%7M*LY38(&*Mp2f&H^JWa7*@in>N1MXocRh%%gBJQ1q z7@=G6<7miKJEPo39aO}I=JYRj_t7@Di6I#7Wo0ke_hI|*^k<pKEno-a&S9kjn#VL0 z{V%8Yj^27O!Y_*5`Z=zNFn~s#HBa1fHU^JAnuo85k1IEK?W{g%fZ4TypS;o3F*)@; z7vZiKaEN{J=YLzhcCXIty=7P>OP2SGTTw{iu7$h1ySqyP8+Uhi3U_z+!rk57rEn_> zDBNG_^f}YrGu`i<xpzL@hYfotD<hMgD<YqW$XN0J#V|Jd?x7hyMro&D^5>R;@eE>@ z`VJ7GG+ppXyxAYOkH_xGk}j8l&G)%&_jYIdqrdjQ9?5d9F=&lHZEnFCHu-;U40xSj zUAgCY9gQ`v-co)bY=o-P@1afP>b+WOznHk%+tyUx<io&zD!2K%_frRFH_rCQv`S;B z?8~fo>s2p&fMsMA!ouh>bCKWn3>I%<yI6PF;xn;%!G|CHU5KjWBOj8@qa}nR@`@F_ zbE5W{KV`&KHqYo`#ncVZL#8Qu+njzQSlCmM;tTP&9sNS#<(n)SMFi1+4m;^}pSHuw zt8n3J+OG($@e8WRo6R%5{bbUtzYbE*nEe2Xk}aRmrF=+?Qp@n1@WmUqUOX_f*g=w? zEr+yn$`_N(c7XQy8h+%KhyIYOE#~sa;lr1E;<XADB-vL#2tg~B7ArXRH=VI3lMNk* z{Q^$swcPnft|>Hg!^rTY%b|cTNS?0@_VoZnNcJe~A14&~j7K9?5=B$S<CZg+tFS`~ zwwH$8ae89D<qXbP14LG6#x99|-Nw}n(&WRpetEV%KlpUpeuYFkMSm8^!5Y>ZJq%6e z2<pq>6ivWqJ06h|={CvD^(u-HV0Ki_D}^-?=gFispq<&xc)kk(Hd?CDm*Gj{d<*ZJ zU6VZ5Ae*E@PGZA)jyvwm^6bSi8|XM>8-GacCL$M0N&B`fS)N}5b}JW7Bd=5jO@mnk zw+|G74)wDzc<hpPB_be+8q0h}p3wtcuoeii20rMkHP3e~LsAlH7vIb+>b>iO*|kK1 zx;tXb@fukt7J^T948u!6*EJ;LWctZ8q0U)3i5Hlm@-$I8_LT_b)93Rz?f3FjO@L=~ zNbI-^NVT$=lBQK142U__3rL%~c61t|YoeWy;6_|2#i3L6A~qSpf%!XoV-q*&g)ool zFuKYUkEb~W8MqmAQ-YaY5+_%~n$q+`iJHM3JWsIE4s3bI?+_)-GugfUDnZ{2Jvv8@ z<V;|h?DAwG2~i~=tzVHjViV$X0r!nMTfQL`=`E>?Vcj5C;aY*83F5o>Tx=}R6GhM# zo9dIzuVd(#_rz<V^=KI6zcB&=T2So2j+{1DKhD&k`}=yUl|IB+RZ0xzbRP7xdSM5( zRzy{CfuVe=)(5nzBU+iy8E#?OcnJ-M$rcQ`RTvcPXX)%rNswE0a&plDV&&6yjM>HD zcBaMes|nge1F`iVXYBBaJD<gHw31T<XrQx*%uniXZ!3ATc)oA6PKx=EQwvZWBj*U9 zX%=xnoLe>VCh_IQP9bO_Sryxk0&RoI>BU5Ga|>p%snZ3(_X_QjIrRo(B@zq14S%;y zHrNM+It_i4eY%gQh9(tJ{z6>*L)%-0Je)|$TqPB=MF6KgGl0&aPQI{!tP72lS4`nE ziszg@de*r&LSFjgdQMaTu)^Vxkj2KM$!yo&2Yt6x@pYJEIF3evhIR(nD)-C&xBl0c z!fdr{=(}&-mxib!?U%M%vo}@VGqh;+=K)CppB<Hxf(YhA-{x4mNOVk?QKO)(r6&nE z5`I8PqF0bv++oF2vsxo9thA=eD%1$^bqi$q<_T;!S|<%V@N&ilaUa@u;FxObj}t5} zz)3pR`(WdyOL=b_v6s1}lad#a@ikpdVWazixt}JlAHs;4Wsez4a8WfTXeu$6r=2)r zLh7-`KcA4b%c?A*)YyOy*7#zcG{I_dpOFxmw4dm5UFHm@<C5)TXva%ir!d71u};M{ z1Q?PHR}xSxwavP+BDs=x9)6u|Y(GD=I~|CBVa=ccF)^<25@z8%kP9NE<?d$!PRlO? z`aYR06%f63(o$zz+aYMmsgT+g;_@ylS!8~*I3p<oj2>anCzD5BIrSfKR`f%jfPp9- zSM0_fO1U^QGOl4$9PAPV(XpX$w-WWFzmJP@z^{iO9<WHuzpa!#T0}6q+-md)4G5oP z-<$pT(_;X&cesEDFb97~(TKJHsW)Go?pAZDw&N8q%`PTYimGYn#B$k*q9MR{)V%p} z$3hdk&JQ<NI9}Mbek_%Ul)$E8C^=m;&hWN!h`u(AX+DWqyrpQ8AYz$1%N@@+>G*ik zbJi@NMU4J4uPaBX8iPc0W$aomKTL|Zym%%TrK7SVF}=}N?^=;&<c5b_rXzsCPLvd+ z4|lKWmoqqq1l#B67=B}<L2K=38N=N>=^ay4s37#Xn|4`-;3ZX?%n_$g4d-$?+|OK1 z4;L3vJli{n<i6cyc48ScgdU*@#J&^rW^$4;ZkjNnm<X^O^ZF0C4_tbgDI1l((HOA< zP`pl*mZ{Wfsod3+%i$KiNg<ZjZ}s!`83v)-O^xnGfp0asKrnUMeKJefjLi^f6%PTQ zgmJLyeMpTW81V21JQ+I`mKhq^oV@BG)7>j><;TzxM0oawz>gR^<4JrDNsB0^tV2fy zyWg~P5Vi`%B=2`mse7m6yEUcE3ZtKlxqz-s6)6glJMTwHZ%2G&BaQnIJ|%hG>3QR< z0&H`DK*_`_WQ}sd1A7oY(Yi?E+5>Ibezc>Ym?{>2e7vg&CiyN#>4!7T>}!odWlzTD ziAGY9RFd&ger{bI%oytX5<&o)F-x8hs8TSJo4%Ve9hM^i(%KuFVB}8xQE2;4ls6`J zGW~5GK~4GAFJJKJcSJ?FOtQ_7%%tH&aEywx1}@TgyI&ZJno%3&APK4ihYXIsuaXUI z0Q<~0APspLR<{Lw^LN;rm5`GFa>$kX#9%XZvM>UM-SO5kM^<t%l|0~5UeqQ*E@qvz zKbF&Si0E6YI4Xb8^nn>y?h`8)NjPWA@L3egWXUr|s7pBoF>B3%`Y5U)QwcRVPyERX zvKU!Yd1b4`Y;p8EEH<t#e3>&S2&J^=0vQ}S%CXM$jZIj(7yluAd;5hxR}_m<6n;+f z+LuVeiguj&#b`U8FdPo7CXsKPe8Mo_G#&VN645slBPW~97SpqQo=fgnsB~$3?u}ew z`-~YaxE>zh8=@GI+cq#ZZpP6Dt)I|E{l#?<Qf2r+H4u$~8)mJ=gH>^*8MwW&IWwgl z3-CXV1VgD1L{v^3j|nC+plHnkR_ZByhp|PYyNaVEW(RliDSA(!<ffJ?RFTT-084k{ zXh{$PdHUt4Sg-QX#**F72-x|y&&?igXWPkAxE;?!nb*wMqaBsx!9eK0r0iK<nJy={ z&^XuFw4_@)(s5g1Zm~0c*AMm_@E%%mygI1%vo23(4aUzcw23RGUkq#|7ksd)*YxrV z6^@tGftfIr6dbN><enE#sc%VOvr5%tUD(puDN1?Mk^&&NYEJr@0IKxrOdqiV3*p9; z*|lidd#f)X507OTqzr9Ba~-}o2JqsGG|loKkY`OO<!q~$9UYbD-uc@R86pyWuU>8l z5h?RlY{G57=95XC#W8Iv3FLFxmFr2D>rv*&o2Qh{g>WS);Xp8|#^_Y1HId}=kRUlw zSsevrA=>$CNookm%-oc&1~2oRj?;0IgFW%tWPB;6F!vRR21I1STxTpXGYEoV9$%3) z6p5Hy`t$PkXJ8r$gXlQ<O$|}Z@=PaSctzhbEK{wsJ@nZH!iz79o!gI^mbl2o=I=?y zCk6^hDAsWI%IU)f7n+-RMcr|j-=$xyvIj#bk~6tm9W9e#_g}hGLg}Z=ticMC=$;#Z zAt0^u*wvVa5eo@Oil2+<;86o}jI#-*5gkwRipG7;n(lP@*_l6PU0Qd{K&mnWV3x_- zaAj8zLOD_raD(0t;M))Pth7~_!Gz&H1sL;xQdy}SQW^=O!;*Frg9PuGA4#-?6ch)+ zN(VsU&X1o6ZfE-yhMxT}`S8O$UF`r-YJucM%nhZ8&qeRoKGQ08JWnXOU9K<ll_GAf zMHmy*;i5neu}+rLbON}RTi0=SkJq%-mm(<snq@a@LM5ai`W8<0Ex4wOHeIp13G)u` z%oD9T49bYouvnzlHbO@w3%xJlaKF$`ejqbuyR5ImZ}bNSLGwu?VtvI}QJ_1U5mE-1 zMOIb!(MQ78Zw%vmd6poL;XuMRH$K*=7ChR?QQ7C}A%k3@ine2iT!&2M-X|UHq9f8h zs&Pw<F%w`{(VA2EwmO!+QTL-!@zP{toY|)=j~ZGGd=yp1C(`1hza(znNF$a;X#5(m z(P~&}k-=iDSrh(!=+o()`opt6P}#Rs8~x9diMg@HnpvERd{*=;LKIca4Q@Oa{Au_i z$#zMyKTkiZnLwHV1y+5G{m2rnR_S*XW@C6nT=TJxjObS*;o*<MrKVcX^llOYx;&dp zTou*WQwycdJUkLGK^^*{4;0`p(UMdd+1>6yC1&I9*vsZ0U6yX@dyz~Z0AiiceEu~V z%!)A`O`pEhC}&&uwY%=9#hfN}xBmFKU>_98B3iD7VKKU)iJ@^WYRz2^_9~QtFp>-% zq4>NRP|QARU6r8ztz$;0hI!@1on<Likns6ubPHq2+bI6_Q_$A5{=;CD9VyO0*ky|n zj<t@Em8G4Z1|RFU?E!{r^FAJJ3sXesrH5@SJe3Co$Sh~8m^{52B<K^Q<Z*&fn(&`w z33v792qKHwvyZ`xnfV*tb#htc<0f$vsl>%ekgTyfF2d{sYNN3;2}TGX+e_ya7)uMy z{EVpVv(OMxMDAD}G~4b8qxG~BQt;u8pV`$79k=HaaunKJJ&h_Ws~lspt5-rUl?92_ z>Qegx*8wi!pL9WV2mO=@LpV@VNw*}9zBzrsk1oqzsa}Sox|yc>c$Y>ya+_%%&mfIj zjBM?M@^IN46JOr}UQA@v#*z#xS~Q(+`<gMhm)wiyiYuI*=KupjSMbqSkso7%(g#Bv z9jtK8s2^BF4M1fqiTd@B<a>B^xNon9YT~sGD}4zWsGO@Ues%LE<EDDJXgRX6d$B@B zVX{ggkdi)_ap1-o0f_9GG)7)ON_K6kvO%Yj-!$tG#i-9`)RH?5O={!p;vNZe4HU2e z?2ZHV{7$|+3EM(y7dKcu-x!;fkI}c0n{L@DN4}fF1JF6PGZJE-&Kd0a+R#Dc?KRMu zdloat@NUA#?+#z<a_>!qZ~0SMG3EWHLnt)0g`gcU%kFvl1#7`3y0C4LTn+kd60RaD z0wz!6ZO7eBUJW<y>ad9&Zy^~i)Af(~k(I_^De2=hQvIPP{8uV&R!nv<EcXjJw7b{i z!cs^U<sZVPkA;SlYg`=@#<`|`IVMZ|l6DK$T-4A=tlu^>0#DJE%r*MfJeGY^#q*Oj z@>mE%K2<cWog$dsn@}pET>I!0DMKT8J*STZWLr$d0hd$|<!u^dIk8PX;<1tw(k;sw zgOSfs;_7NqyL!aJhZwm};w6ag!CPcth9L>Y+MW3D6laQbI=!w7v7|&4;D)~(sfC|G zN(%3X`;>-1IwR1_awj3vAWi9xs+jF@DC=gG5XC>I3SelilVm>qVy%Xn%EOMC7r=f7 z8QqG!aXrw{X*CjxoYr|f9FL-Dzj-NG+{AOwW@A#M2bm|9u^6|G9#S%uy8)<d4(8|M z#}-&CLaE+=umlN^#;-(Q(U}vg)$i!yXzWJ@84MwYT-`ou22$1uFfn#9$gnE;&ccT$ zBOekUbn%f@r7HN$!^2~5%f|C0ry&7<7!`szbZecg!tZqF7u~>iC>OWsG3P2x{RPt| zI-rb1W5rB`w?J03ll4J5(L>C4$&xc}854$QB75!E$T|G$-Rh**_4V6(ezGvD^W~vd z_qNmfTK#qO+`dJ|Ik{>lOLuT{*85R+sB@dU_j&Nd2Jc2`_ImSmocAf1_Qk4ovGbwz z4tGHI_WJ5*;Pz?WxDy6%ck$?>H%rIr5mvUYz}@r7{q4oF*Nyo@h6f4{gKLjV@Ez9= z^Cyb7tRu|KL~r@mJ=V>a|0*K+XGzKb&C)3ueFbM^XUh9m>Ym?<oj-IwjC9QZpzdM) z7wVojjn02x-NVH258|G;zyGVa=PxPG@2`5(`ut}p5B|TF^1%F)l&9Ml=<U^uf1R`W z9ZVVcxAC7|lR(F;-zENq=YM$f+jAzy{}45?{wudx|CijR6*6=(H82#l({ufQ>n;6n z4|}q&Wi4TNv;XGkcXP~mAmD%D-yfI#+dl^Ozx&7dFN&3w>3>kH49qNlFV}x6)&I}# zJ-&JaK!KouUjL;xzc2ES`}FTO=^rAn-~0Yag2wVc@aDIU?B7Yy{>h*JDn$GDw=2xQ zSDvM7iy_M+`%JbNu23|SCzh!N6p)7^gI`yxXd(mv5(CRBNfHZDR}E-|F-2sBEyaWj zZVZG}rUc`a;1Rne4As!Z3=93B%`18D<2WO3md30Y?SGERq@PW<PI#nq->%sk2?6=x z*AcA|@}YO)U)g_!?jmkND2cVoiF!<<0s<u!V&~hF%?ZSu5uUEdK8&T=qY+MWAa^Fm z5KD3>VUYX%;~<_C>zgat65kpRn=jc?H~*GoS|<NH>8HY5;+qje^BMFgmd27f?Ctbi z$-wYfU%R($OTMHg^`XOwf{mJ<%4dw~<Z_kIEhbvi$_~?x%OcLDDsO8zKy%ETZdNJ~ z2<kCmUFENSbepl%E!CLx>3W2nim|fx3*+|JUrF?jbl@#~`)Eb`W`;jHaBjI-4p=y& z!Qg9rjvuJx4(72~olsMm?E_`(_<6!))C)hDgvD8T+6Zu}UHS2?V}~>MG#EW7@AJX> z_^ZqWn~t>FXUk6$X~<1OI`Z0YU1Z=d*DD7)$UL-hY)uESAH>Je=J%SkrlP5$l(eHL zwK><vXi8eJ7iVqPa+CY@>>T2BV4yD)hmazvd0_y7jq-;YRX`1N3zo9IvzA|dT@O-L zyh0(kw0{zJkta4(i2IbMtqjXGV#+y^mg`fJ%>|FlkE3vrHjM%yj#-?I#pt@agH;`Z z(!J3F-B~fK9h*_Tj<Atd&BG?@kq8q=kiMAng02?Y2)7LDd)iZY3*|+%cN7M`A)fD8 zVkq&p&?<Ce9I=p{*-`YXm~Sq>rg0ZHuqM2cRug`+_GpTwWn19qT0R;8hmr==9$w#H z1xjn1v@FWIZA2J&F^+Ti0o;jTibSo^o^RYTJb3!p=>t4frNxN)-)5MzGhO=nk3+S> zT7rcAnjH@x<1+C(Tn)1v5239I?mJ^V(w_7tocrMvMN!?34imz4<6PE}=Zy=gv>s>a z{=pf4BKgL>vC-I=7sb4GSp%?~YU`N8JcX0dAOwZ`DGMBSdRD{z{BEH0!&8Ra+Rs$6 z67;3JOe02)r<Hz@MYX|!#&mg(<YSDXNEHnAf~s|YD^7(Go7UxNM@lu7;OHlQC3WkR z7)$n1EP*;4T0I@F4+xHBOIfHv+&{_l55_+el%`u7+e8NICqx>|g2S%&Qm`P<#*e_E zmM)*E#dnC=7O!UYS6s>rC1n&d`K%yw#cLyl)gAgA=){(2!loj`)a0l>TyfgrgE8e@ zb<FYPK-Ce?Y(-xUm4MZeOw3G8Us!ubG1pB(@%zs1*>S_LakAluR0ANtU?G6<|M7dV zd4aVEK1uYiD&*fa(;rnB9Ub$3RLN|AHNgK^iLw3G+`hGm|4&lZaV>FsoJPcs9aSkB z^80N11&HW015%n+3d5rOQZ2h2aWmVdl>BnCP9EKzc8u@9vw8h879%tY`bZ{M4~}`= zS%salywZ{9o8=e$igj)<I<mJ(v%H0AN0+pCV-7ELDZdW3(oF@a`U<CbxDm?Gb|gq9 zG4SHx^&;yk@1FOb@AuYs(rU|&UYNZGuo#UGdavUBAoR|UP1W!3?15iPGa?ijY!?;^ zkDlBfp3=<G3ahNXooqlWaRZ}<a^wTJJL=Taq#$&`B>)C&xZ?*7Y^g+aU#Mtn(xuvE zMewQxD>cc`81SlAh35h?HUvO7w_ov!n4V9oPm(4{#?uhJg#yRJzp?}zEhnjc+&phf zG|D|PeKs!>f|_-~AvMR}J{cB(esYvkovpnUYvgvn_a}M-p?Le>sR`Uckpm##R`KQK z&C;T9xC1EWLJOp23U+!s)()s=btY#fPZl2VHaMuaE}N7^*2y{$+>Ktsf4QBz<9Wqg z>-0$WQh+F@#u_8vIxwQbrK&3;NYCq^uClPrtuEA2JH{`!`Hq-H$b@>yuVjQWUrQJ& zVJMYcJ76TbLhAh_mGKEoEw9(=L-(au;18o*In`tp5M`+H3Hz{xO0Z35O0}$g^vjX6 zxW`QV)MZkfIf3&QQZd^?<n@*wY0>ZHX&J)_!IwXiD+4|mkeGge#AWEO`?Tgb81QwB z;6TD=q2fC?^1d|pzOpO=)UmWqL@d{*u0w=Nia1?Iu<ko{VuLirbX-O@u&DrRiCnmD z{<#tc*JT`}{h%-9B{924Fo2mc0;A6^KcKbJb584UFB?chzIXpz0yRvP9_>|k9y6K6 zCR~%)CU^*$LwYo@3y{OZoZ2{RI~L4v#!P2DLON51Jj>wWiafO`;i2yf*V->;(I1<Y zS^7*ZDi{Y?9By77*j4#@S3?@2ceA4Ec|#Hhj98b42~^4Xbtu54LVL+(lL)xR%0?R9 zeHNo*dSWst*F;gI*BPIT9tb@nn_9JZsTw3PnLCtz2;CgP+<*vTUi1=*+L#^=vyZQ- z3emrQ%j9DSeh@mb7bx2nfP}h#xhjaBt;!c=f+RHp%AVL}VFHkjUiJ~v9A_5_^o2Wj zsj!~xr9+g3RXQ2l<!WNW5*tV9h<uF2MLi*8M@$^+Lim@~^|X(;og5!y;SNH1%(N62 z2uY{gp|f*Tm9_QD)%?lh=;h$*W9{JRWM@koXRCKxN1JPUM^lr__$DOS!~TB%>GL(l zYXzAeH&8bbu(GYl<oTfHZs)Q(M(=9Rqi^w3wTAayNQQpD%q|ed6LPdd93<FY)wo5# z-n^`o9GGs%Ll#eEmY2DOQDnf=@;9fY$uOgy(v+%6D;=5130-ZSwr00+-Htb1Y`eD> zNausF?`wef_1i?XU+{Qiqrgh;KD01Jr1`iZUG{36nYY9J{d<9j@W->4r-Le}!zdgc zH5^hTfq8jA+Ssy}!I{$6qbJzITokMm`9W!F5qUPVQ=cOhfXqK!AVMj_bdl{+ol*2l zHwFdMLPW%WQ)uH-C?*o-n-M2a0>+6G(I#>^)EzX62v&pv(@rHPgepfqSjPk#+V{u{ zBCT=@8$wK&ldxGv6dJ_`VlyacIwX%y)e5J?I!=D0Kr>iHt_i3uHXV~kW0vO`CB*_O z3wCRVWw%pZ#pldnvjpfUAoCc)!~PhLoMJ?r3PLlS=tzn#DQnw+1q1#xKQ8{$4W^vl z7#X{)O$25#B#*v()z|Ms-vZbfrz^&X{^m6~t{ZbT;ty)e4NOWoK4#=a3n(qj*kMHL zzijE{5M~rdBnr<{_YX07Xdq#H_%cen20q(~I11M2O9Q<?(>rdPfDgc-Wh^7Q0Zq)t zIDv^2KUIu#3Xno3Nfk0)A;Bn>lLu2&pJWnh2y9t8gF!f_llM`4E8-URLv+h<qT+Vt zsW?k-NKo@Li{Wyb@GEs;S+Ni;Cj4g6{Fv-C5vvz4(1f$Tua#;m;lTct!DwG3z+^~; z=*NMI{w+87lT)PIAO;m0OtC5;RJ}LUHy0bKV%Vm5fro}OR2|T!sEM~wO)zeWkOl#Q ztzxVReq$iJ^#zwy{)bccIhZfA{A;i;2>mf}*fog}cWI{-cTq(bSET;Rk4t?uBHN?* zMs{kD52*5EcQh(qTK&2b&9Ws}96+Dod(42v&Y0lFz;;x&>4EU`>|2M>VCoIedGNFB z^210)t)M<7*Y8sHTL#;KK&F`372Gw8lJ#ki0Vw+ogYEDg!CG&~Ja<U~qD}03MUBhD zV$&2E#Car{$iB;OMtYPZ+l*lwC{PMphao}-c=U?UP6zymR@4zLg@_Uxf}02oioneQ z^m$3Nu#X5DpJABU@~HT2ur=lRRi`!7D-f27c=!yD&X7?fS^WqLCtT}slqRau`{+M} ztMQ2O3lbFJq_j;01Y~A{xD*$8ZS^7oI`f)*2$-#9!6PWIDS7{}k-B<lR1@g5ZZPP! z%cBCN6XOE%HpDYwG5Xd&ln;&Ad;#o^8)ivaM->z}+R#FnhXzq{vP$8pC<)CSV0G%j z=O84fX$E*<^GV+Y&O3#}lecFmU}?HS=9!einm;;a{n?W_R~hDbv$1mhn#2LOmYyGJ z{Wyn+&>ylUzy*`}t$}55PvZDakI1p93%9m>nv({rwSh(@tYi|0&IxH)DQrW??*mkz z76C0XE8b?r{qE{y5WI|TS!NlaXft1wU#ryG=8|mT`s<aBBPQFzb;27Puj~^VjAJ&R zYY1W2P!NJ*vV0QjzEgwzBG%k)8SO_k2(J$|M*d@_`{o7FDm-&)^`fBB#c8#$DD^?Y z4}3U9mVi01^qewgSi0Uri+u#lFam~L8_`e%KrJuHLR<TQFCp?;fzyJIl_E?{TvdrP z;?j7s8&T<&^d2p8im&={Ja6F!5*D(;S)yZlp^p4M_S>W!oKQEi&G|3fnF;=gdq$y2 zpy0Mh27_HvVo)Z_XLKzj3rLJAp8Df8tn=l~1*-FaaS3{$m}Ep%&ow=!iwe(3<u3=I z;QW-khEYr&jE9buAXpQz%?3!lk-mFSO|zIws~tBxx$Sji#Os<r>kU_3c9nG>O*?L^ zPL0VDm&<A=8Hy4I*+;&AMc$*TgBL<}cG7D~FQNZ7m_u@Kfw==H$vdA6(QBTyq@>5% zvAB*_T1~}Xp|vy~CaoZ{Sy@@yQPI|2!6q!)=$5fZx`uRhsT^9F?Z6_FiwR6NEUH_Z zq5Rlt7?A^tXoHGla2AgTz3D|iM0P$NW%|tTV{Jx&`_$UQO6$uAkPJ(wewMd#CiJ-U zdjZ)b`*a)WX)anCD9t3ggT)ingBvP?bW7IYBoCh3mCkM|DJcX9Mwb$tPU92OOGIog z>zW8^;o*}_X~|1Rz8N|$h^dDvKOiEwJNOhr%rlZN<P_26wzxaPo4`j5%?&D*5`WrY z2A|OWxq~TH=#mcmXE{s4wp*LVcXpqAA)YVV2o&t<v;!b*oN$afBS~w%R{#Jb^V%1t zhM1#MA6l*+Z-<(>kHHi*5mKT)-El78whYKho57D1m-2wXhOx(lc(@c&pvP%=t*#+M z1QM298i5$8{K0jgjKKYy_%ftq5rW3KN?VEOd(@?1S8r^6&=c3YWpYI^I?hHd@9q}g zj&C<<G%%>%7Mww0*S3bsr!!*c5fpFH@#BCMkc#AFNn{_s8yL`mJMYRH5O;M?LeY;N ztuY4W!!IDAbr%FiKu%@5Ip?$pPQG(D3&w7`?{XT7xz2EcBHNgSQ(uT)CHN4y2B%K8 z>og=_I|~ncp71ehhPl-NL)9KcJmGukSw85auw^V2To{-1D(;PCmMqQ{AWyeK&w>gc zJ!omD&3QEc85X_8ilHzH;JQT36b`rUa;H@@;M`g~+aVBgsu!%%vUg+;SFE`$H)sl^ zOQTUxyF14HN$?j<h@MzebIYXKg<wS&#K)qufx1Y$^&ev8W60+Sd1T)h6th=rVQ~;L z$!#j;&Ke(JL|BmpAp?wIeq<ITReD=UTck^VH#NaZXbYNM^JS?az#7~k3@z;&aQB5R za!|Q3=ksS8Oy`I){|phrcrfreSb-zEm<Wk8rxD~s<_AGe4lhPWK0ZxxCl6wHeACij z4|+e2dV3rU3U<1H%Io;hr+f5)J-r5d<Q&J40ZCL%U1xON#rO~S3S@E2x>~GZow%zo z=kD!D98`@UiZ(TZga@EsPeVtK{rWL&wP7)EuHpB{%*XBWXzxjOUoVI0Xu2*dYXUXC z`gJJ?PNRUq<gDOA;Po-P_!X^Tgo5lq<L<(-?`q2ArPNsTC6c;mVRh3q9DA<Bxt=?e zCVbV=;>wye0sr(|t{Z@G715w+CNBze<^rW2Df{%}M6M}QBx@6pEkBr3HuYfw@t5V1 zCthgQQ!Ac{mV7j7fUbP>Pk{_fx#*3C{=%~C#?NIS&FpGOhCCrz-0~I+$qf&1Bzaxh zWDokVZ0>MRcV-3XUl0^PkKtS8!fUBLSoJHc+1A^zw!{hoce}gg+;QD!1yT*rkp<kB z(rDc2U<JgaYS>2P08rzYRY4~xkmd{Op&oz<yF%_Cqo*Z96jiF*O8NQA94I9PSHNF@ zp3L@oF#&V|Hv<x9L0GA<BCXsqh%#gt+F#GXQ*U<@Ot|*;iBmGTzc<;N1Tgntp~1&F zYxs#_$5vSO-COyPbaj%SL6>QD+*K2m+ttTXcBin-;b$fUQ*ckBeHGmzCp2?*ZFjv_ zkXEyVRmKcJZ7*W!pqD0b=JB543iT~&Y0R+H&?GWi-GgsO>ZA2SuTdBy5&}uAbrq!q zC;F@ieq-_IHZbhlpfgDY5?5th2exmQQp}vuTUyEZDHPzV9^o-Y>!%Xtbpt_0KF;%l zI1|J-)@juon=e{Wemm`sIg*p`F5aAx{4;WLyj1ZgtU;WQ@XxH~;X9Fig0FMSgTr)r z`#fEdvKnW1)UtU=jqQD0xspqaj#zBp>I%J>b<%|<d`k1~3Kj{EkyhAT`)ED2Teg#^ zmN<F2@?befK`PI?zzXg&3s~f4f;ZezM_?*vpdjVdp#ll)H-NfOT#}na1PiiM{T845 zf_BK2Js3aq6K^Ih&6V3Mz+2#F#|htz_-&60K?V>Y$zfozp3rZKx2I8UMHD#3sj|wi zW~5QMa5Wm<2_G)NjS|7J8F24C4={!7tA3b)3)VwGT2PFP;sVhkQ=i_3vzD;5_#tdL zX5w-mAFLu#I?{F+?}|Qc!yoU^jS+uLziD;N_enX;vwj|gA1X2BD%1KaWgP1>z329* z{xY@?!d)dBi^?VrW7AC0YIHBh>=0`|@ruP!luJ>cVDv!{(lD2uGx6#ix{|ZR_8^Px zU^d|^Du`zP`BYX-ok$J3w^R+61a&Z59HFd&>LB(RM;-Mk6}cPwO`6L#ikXM7-qQlX zf-7uqmx|q3FB8PFcvuW-q$-E{3_<zUhw^A)?ZjIoufYwfoyVyq%~z*M2Cv|96~C;( zMno_N8d3F8d)a2_FM=RW#R47%vZ_Q1K8r4tBuFP83FZb#LT<pYi)BHYF?EH}Wiy7o zOoCKj(;8cfWCimc15WaF&-k4!wdvs0Q%cx&Igj)XV;Z{0j<2*21<sr1h3V^lvLr`F zg)Bv8owS_tecK?W24n3_3pe;+NZ@j)H*k??a{odck@$wc?Ohtst5Mqif<vVZm4BNn zp-;fjw-+q<p}Uz*nM0ZW8Xr{N2hUAH%4n1*{@cifaM9=wc6{Sc%E~%nAVO7Ow2d@g z3KMT-Y>@+dLK%0fdB$XCrJOThs^CLEyfH#mGHG>*5bPvI6%|f8ya87w=8pp2%!P=O z;A--=TwIZ9Do!Edm5o@^mzj+b{cT-CquTXqwR$NVA5m3VS8g7-JsNeC-Mxigm?|N5 zMlk4zcourGkrH-6c>3@6yNWvvSZGXZe0Y21$IszKO-GHC5L>#P1owu(1SCqzIu^bN zUVhZLao;#~P3KVN1~exIa#^$Net{Ot&zUv1`!a{zm@OB@Cb6=$U0VFOw)o)RL=y7K zrH_y~hiCxL6?7Klht5tFCHr{<D@^X%wQzmz`hkOSEE3?~@YJet=lm*`T9Y{npSFi0 zk<|@(pZug1inSK2sgYSqq6@v@`DE??kYGe>D)qCm=W{3_|LF-1bn~rNO!W7ig5hif z?HunoZLdZcUNM43Z4nzZLZGfnFl&|;1R%1}M)##5`H(6c6li%PPjhjB{XJAcpax`k z4A9Pd;WpCl2Lmekp$ahWu%4R`frw;WRtT1iX_+-bTUekVohVn%Hg@3M-BEQrTNjTT z-B&y&!+$X{_Q!D7?~ySkR(87o7#U;zN5b6Sh;nj&;N#@<?A}ms`1Fi_(&iKl?X4Z{ z3=Hk@>DlQ2HgG0r{YH+na`?UHkAq-nr40e5dIHui_?o|Wu(C6}arGIre}m!v0^R*_ z?qA^qIXi0uMMDRCO?+B8A(1ztouSJc$nFn*oxq=4!9Ta+_}c#z2>CsbCumO(^EX5t z4-Y=Au#1DJqQh^9-S0F}dwP8Ozr|dBCyKmX>L2?>m}uzm8UBohiO|0pe*~W1!U-%i z^!Utw4GA*7CD8w6Fuxg$e;KTA2J`O-kHVW<f5-1}alyP@I3vqjUi^jCvt(p_v;O?5 zw>0KA>u)FiK9cRt`knWG{F{Hwpx>wel|g?Y|JeQ~@s|H~P>}u4a{TvEmVe^={jUWD z|M2FIJMr&9!9VipzqtKB2@3w-!c2b^$-g5Vvj15w|2Ze#Nr&&G!*|l*JL&M9bofp> zd?y{glMdfWhwr4rchccI>F}L&_)a=}Cmp_%4&O<K@1(<b(&0Pl@SSw{PC9%i9lnze z-${q>q{Da8;XCQ@opkt4I(#P`zLO5$Nr&&G!*|l*JL&M9bofp>d?y{glMdfWhwr4r zchccI>F}L&_)a=}Cmp_%4&O<K@1(>3Uy%;kek*o<%Xr>&WB))p{6qKjU(`MSJ4n1g zwLO1=4%z=S|Fe?kzXLk#?-7Ob!AAh<dVQUhus%klqenM%h9)IJsyv(o)_y)T=?1n6 zc_~TLRsy7G){%|#R-Lf+mOjdC0CqHBuKf18nkgUCMWk4ES33Tc{_Zch9xLnL)o}l) zzWYn;#r~(_?r&;5HCcT#LxVS2oT8(?gR70<pL#faT2)hkg9$#vn`G@j;CjqV|D=)o zx2mUqpE>^?tM{WFTuE!WrNRC6BFc86={DW7h0~7{%o)lG`sdG|aDk|ApFg{)U3>z7 zy8Su(z(dSQGk!iQD+|QQDNnUOUj5wgXWuVYDmALms9#{*y!YBnwZ9mpCEd$UkB_rc zkQotYC;um-v(4+$%kbpo@XBtoKIMiIiMoAfPg;;&SylfR=ch2YSV?z9$_GaC%Dp+A zQEPN^OAPD7j8iPl)@CQ~>zj$w!AU$`&*$B})y6YkuZOG4;chN+@8z0mH_H0#{x#}* zze>C~5I=#L7^#^Up^;Fbl|U&}{*puCqT|t0lp$+m+inJ(1s>t$&w`6ZQ&deeJf#!V z-#+8lQs&y=f7^C*dUi2f)Uw=ESWoC5?KStGUA@$dy3Oo-Ihrlf^}5H<mK_nlPfB=< zx(ORurNYj=1DuN8UqiFcg|f{-H#y2&U}Su%FN}3WnSz%47QxEDPmoI+xaUx)1h>a3 zt>id4ucPj_;$T_-CPAG`JTX7VR>Xoxq_3~^K2q4!cbpVZzU5-F!qdUZ?5uS&K3gtx zCKY|je;PLQx>j+JD5$}WP1TJGkxxwiNl2eZ1>8uImtPUk5Y=KCZeU{Sagp$Al##mk zq=E=5LeN~9`$M25<MPLyVdcf0k;?vf*uF%#fkd6agL4Uy(}=ag;oO^)u*&<Em%YNA z`i8CRo0yL-0V`Z}5ZXZ1!8CHStx%JXg$-!dr(9k)53^O<-np-&dr`DeS#4rt=84Sb zq)03T`pP6m(Vqdzg(yt%3s=<_6B6V{pb1tT9Xivs$!g5o87oGpYK1?NDfDoAtq~>) zkj4rzhRSif$`OZ(QM-zd2eXd{^Y<sqafeDV`f_o*N^r)?uqXiN1g028`>A?Abi+cd zVMfm+%P^=<k}L4Ee7At^f<^C8zjqco=X7|NxLX)lT~3d(p2;Nhl6zaIFMEAQP~#&# zu?td-idMXYje?8IkI1xiu$V9esxow7EOK2TZ5Lc+lpUiA(xyC5FQu)hq0Q64Pm&49 zmd|YxE{xaq^)nA7%vG4Lh0`|goZ~RO#S~tYP%n!8+Hk*(it!Y5fvur8cFcdE^mM5F zti}u~ES<-^R&z>dE_nG+gGEy6j^^uu4&H^xtfSowhI@28a^BP4j%F=8#7q7PSayFY zt+XElNf9IpzS9{>f64{zEQ|^j6>%}rbUm<;xr`UJD}x|ZoFfiX<OG{blcrSaBI93U zknAqd>TegpkT3bJL}idfYnnl=mqKHdNN5(@SA-Q=^2D*_!J*e9a-nZD{As@gt%9N8 z^w(}im@-0wMn?q4%Q$97L|91a*9G<|b2dl9FGAm^;1KD;qZ=Ywuc%?hj>pQest!{@ z%RooqL4moBvO4PRjL-T{Zb=V*=06!>UH)+NkZ(+uM=zoBlbaD&;$_1xI8vY;u_#)I zTo#%OO&gpcBOu>vUt;q6OjR4dUOL})9IrJ(t6_A06u?O7K0DN;AjvsB++0)aVrTvU zbqEoH0in$V9mfKnR38#g4<CH69n>w*0=$OFGZiEQ%>VgR$Z0xKX)SEq?+vaEmekVO zB%bSMAKIWBrz2NZXientHC&M10a#=nh(g%f>GTVGuu@rd>bak9&w}aHQ&2a`lAw>b z&HlnpQI*WZjySuK>0w4pqh!7^e@*dYDLe51>Wx#lWrE{`e<`Q|RVpyD9WE?|S1e1j z*~ayES-_89E_H%OC|srhEwy-Vp^qT-TCv)Mls)#VwI`JcbsNRU>E&k`m1k+Dtt~*Z zBdde7j<Sy7RE^r2Cb-G|i)9=nFql=Oi8o;REE*8VWJ`T8hYz#Et+6`Q1fOd2o4yPd z?ou1YX76fL?#)6ynLo0QvZH1#el3*0%25%pI|(c$tZ7qN;lJ-b$OCwP!8pGA^~`$4 zlb$~1efI0-`LTPtx%}6}-1aDjVa77b`f>ud>%8ZwDv5DoLuR{2#6d0Z>(gl0Zjd0L zKHE0G+BQI2%}{(5c`9?330h;s7(s^MjK6G`$Rf5ppjhFu9EDL_U+z3V&Ur^pltN9o zSx6a~C8|zGv-l)ag7V}dtdh=FQkm{U(iHrsg^$Qn(BJ2Op-p}G!~}&x2Zh2K5nC%Q z=ODJg2S9&BSqV~gC1w)Zb=;eaV$w=ENn2`SXa-&<s5P1*YS37w$&{Xtd&<QLZDKy3 z%-JR~NCREU>)R~iAIJ%j<xNyU6t6m_G|ezU)WFH~E@*ltG2sbl!GBpaYS8Uux8-~d z+8aM<^hIn2Nfo1D2P>(g3{1x=Xyc5CU1Ucg&P%0DGXjbxF+ef@k${pgyJBZ}yx_m? zkLP`U<*2{x<ENOK>{>1?r>=Z!vDoJ%a++*t5|M3-+?>wY9$S?!otdZ`*JeOaBmz`l zPWgrasnpc4u$6rrk~p{y>}x~iZZ*;Qtjb_pb`~(!fyVSGv-B%G04Fq`2>cEkeA0fT zm|JiD{9xhZ-t-4fYjgs@C(mvMnFSv8X?E>iZqd!6DdOrelI>VkJ4rPiYn|D<y{-D; z`e6H2PVKx=I+*0UU8o~S1u=GnkmOl04ww^Bfgzv?JPKG=zaZ%E^~K2T0n!HHpd<eF zZuo~l<B)q*Kik-AJ+S7>WJfwBn>`PFi_^!CE*}EWaE1%C$#xNdxDxyz>tjjjCi8zn zj@ciA*&adIp9G#C2E(9>4lR+Wfuj_~Ux!avUddRrkg}8!)JTC!%$f(ak~UnX7FiIT z@SP9uaTo8kcMlZ1&xfWJObs{t3nwvrg%UG2!_zTsXV}A2C$2gb{|pf7Dv{|bu?-SE zgY+WGey$7Pj>#u#4*yVq?WY0UoxM9!{-Y;ne=O&?uWYNYXbUPqcQ!?HJw+9%yI^ag zY;U{@4PPId!WgUQB*g$x>j-lbJANHUl|^Zd1+@EUJEMJEbW(KR`d~qq@p{})^^y>| z9{<y~FY;Hhe(QoPW{?fVZu8xALU|!zy`a0Af!bl@%__^>kh@8B<8w|j?QYlm0g2xJ zzkc*n>X}M@Km8#btcZr=)nwu=(X$_)yhkqoRgrL?HzYX&wU#)(njpb0JKDZL`9gmY zdY2gri6Qh8%U4wTuQ3g84fwd6(=cBjtCGWnA4^Lg3&^R@$65{i@5E<XZ;h5{)pwd) zv_oulof#IDX$EX$MM6Sr(SZv7K4Q_na<Q%w*RaXM^5W0;!dMjSq$<j)1eV68f}|++ z#?)A<p6&smNIh%w<0IqiE4{NT>%9ZLptw0bl>0PLilk_uoTQfKqRfo$;0Bzgo{t|M zCiEY4Vhj0?Bfp`(fNu<24l5Hy_*1GL`~-QTyv>A@vK&BmFykQoM&63_Rb4@nBNT&_ ztX1^{tX?3VKrcnSDqO-*LTw-oTvZtLqmsk8>Ma#AQ+T*kRi~%3zT?YA|1fo8h|U9^ zzRD}L)n&WSmO31>!1rkWplJETX!Xo!>6B>glxSh#YuzhKFl@7;yQ@&*a?^M&HV1#< zF@u8B*j=311nrr6E_;Xe4|lLV$@3t)=@KaE5^QszCID^Z@;v+FswkTEh)JvX85`2Q z`Jht-`jJo7WzSvK9x6SND%0v}i^}so%Tv?iJ>#PjtAqW#T|`W<7#OE`NO<T;=;_Iy zQqxfrV-aIun32p}%t)TlbYNw~;6fySMMU04hTK3!*nEVwy}GWwzGATFKU`%u)mS3O zSk;=dZ~JmsVS#6iq=Bf*p|;q`)MjIS+Fh&3NjsQ$7IaXjO$SZ%sgVTr8(S4d@wQ3< z&QxxsMbnoA>AdgK1ZU({81OuJOnBH{wkJw%%L%8gQL;n#kEpin<{&HyX943AHYG^Y zlJd4?qs1JntpmhGsw#_)w;#i=uQ!|Krs5Rk&hTPQt<XrSe)f<7#QI91wEcaw1NnyT z(sQw@yh8FVb7SpODF_97xivSdw8kr<wfiLpxCk4Hu?R<enqY9jjnSo{)DxiD=43Ni z19W=Jj|ufcrfP)MTADfU?wW!F9)$WqdSgseF4*tM13#<pCEU*^g_)g5Vx>}GYL;qf zlxeJ%YOa=QtWczBQl@H*V5WZ8Hi^^LNl?*BjMqg@OZf0i$VSjlN-;siH1ioB-7UJA zvZrf7r@v1_VNzvfNph7LIAu*?iDRAd8}qjtgrTer7xdU=LNf#*d?RrRBkE!tnAEDH z)%=_OgW0ls@@Bw;p@V>n<}@=_#_GJ2;W1cdrzv6#_<q7<O&J1%SlN-AmBo7hrkDGz zq2rkcpIzQJ#U5}&V+tK=5EAtzkeuvkc4244S~9~$Sj#Fc?K3UUO#y(Jq}4h?RTw95 z5-M=gzNwDq`7xkog3t|$+dUv6xX7F@+gS)1A2TB*PDKmmDv_0CQRk=0>B&illjZk; zh%%&|#iAk+qeAPo^k&=3{fOmdtXfuiS*wZgjdYC6Ow80|^fueP+lT8&U*G*qojVJ4 z14VvmOCwWZ1up^%7B+em42-W(aL(r!jdquGHO_L3-&%@|X^k=TkrXmjxMx_}cv>9r zhO)HU@nTkKiqTvohT;X*GzM58s~4|KmFy}2%J777j!D%LKE>Gj0S~@=#ci44Ge42M zUNXm4eM|}TqgCekzFUT*hSXQLE3UVy&DYBDHZe5Wbb4BhQsjpzjfI8FW(6pIYydrv z6IX-53@pQExc2yZ^&IBt31euveFR9C4PBlhYZk+7#pEQwj^@eKi}|jmUwg?QIVyRK zdBVZL30Vaf83DXuU|+j)X&?9D@-b@Zm}(!IsY_|j`(QttmXG!j3)kx81oR~ba!+M} z7!6s}c()76FFeE+8n*uW7qh+7G*e^ZbYmjDum(7);bMe#R%h$=;eFnLb>)P`a$w@j z2+;%#=A{`}vD=)_q%Vb0q6o##KqXT7sWy7@y8Y3Jg2M6_H`7URo`bW&ws2-OObwpX z{sd{RaUPE+whoWCem*dB=<TuD<B*8adJ|1eH2-*%B&kAg(M6jIT}d=_Tr$SzBuwBk z!jfqEBo2iM0`<wNC5E;Eil<l}hQS%iIc7RCd=eHCBFelv5Fao@h|mrmX6STp5oaqn zZR5d6)j@g3pJlIJN90qUm61_`s-<U#AkOiM^5FZV4Ai5%EQpAo^n$M-C-aOgDUH7r ze2En5Q)<xC)bT$5v9VFsDyx&w5E+IU<UtD%Ah5FU=2FI4F8;Z0)6Ef#nlOKn=qP0$ zX-#925;=APEpF;jeR&0z-zhJ@cu80oV{;KcIJ<xShzDt8rF=`nceVKT^hM-uV^vjc z%2eQ&6fK{dYY&~4idV!j!&zcw@)B8L&GOBMfS`zjAnr*W&N20CgT6>k3y<(ZOg&4? zwaoN9L5lL{h@@cB!GV1%-0#>Gt4rOPk3B38otK*ekp)EA%--HqKB`?rzUpQ_j{PDb z_IYVI;klxmg~H>!tLt0Qnmk?K9v#zwqLOR&eLv&lfq4U78r-FrzlI&C6DYD6=C>Ij z7Pp6qeyp-WE@&f-@=CVlbG4dPlGt+uc&tY(M<6A>(0t}91PYfHl;o|1kkxIs#L{VX z0B62z!{AO>=PAt;C-Tt%hTiEY_rwj1_b*pgI;p=usa@ruA+HO=MXs^h6emdDwPYuD z2w6rBK?j6@!`@x{@m*FBomdbaYwx7im_t@dPmEQL$x?enTrD0(;%J^<M1bpRAzUTp z42O<_f+B$X9{?}I%Z>+_EVARzjjSt9uE_9jY7O_0GPQbo>~*iVCq?PBonmUs*F^Dt z)|a8s2VN?3oiB4+5MSPxA9Br9yq>?M45e;|*s6{(wi-+rMoND{HMBMK&(v*fe1%wW zcFt_K!qEUfYk}uVH#GZ99BdTw_H0tk4;xYbRZ-}E?xD9YmxllO3#KsqCk$2?@PPc7 zvV_)PRiOY;lk!B2*dSFTH`HlYZ5l^w>PfBEWQapH9)zt0_^T+{NvcWNu~*a}Z%5|N z9mQg=x9aj)!duxKOK2S7IOubcdypvS9he5;nj@9v?ng^0E+h`og$BINruMI4mWc8R zb2SrvJw~}AwiLzCV@b$bl<ISGvzf~4c5e@;VXMYt&tavhRapqvjbw>8TdEKu@SQI= z&%ypM@VlA;DsiK(Q0RRp5Dhk^_wvJR&4Vhmo34H#<$y&PfZPQS<>^3i7@dQS?ak$f z5J{;q%5n=UjFgN+y!Nd_M9IM_K_OD;2FUUbahqmIk2poGIBN}e+smnlwAxw8rS@?) z9@5tn8=nhCgC@wY{+?j7=tG@ro4U(uFe<zxl&ttP!_>q*3#*fT)9U?8TBAJG#jYNE zKjX%OC-J!4krb!I33fbEC%?CeWQQ47#JblL=Ke~Lg-7QyOp*#Q8*SYmf0iKGXO#_k zI7vEMP6&>2>KvZ&c(^(`{*mDsSaqaX2V0S&c!AO=wg_!vq$0vV-YmMSjjP*Sb(Sz* zfhJY8Z7+gJkbSDZ?{^OSHC$>G3_1`2Wm^%*!p+ro=jG;lm^`Wr85L2G(9Kkz#}j(i zCM`tr=dk5Z@rtc`?g~%?#jp0WU+s5Z!z1oF@}D0nnya!r8XiFN@*=d?1Cx$pKZNxv zGDOFGi}d18w8~61N~9>1(L*S!;!^8;MF!jC{Px!Gd4}{7#C7D2b`x%-5EerDssWUc zn?(2}Yd%ATTmLY#-tk}>TFJZB58{Od;%?#O1feArEIx-&Wk-lP%FGNyRfl>rY;>pB zzA@ZW9!+N$rL6>64o%^-t<5uUN8zg^ZM`-wHr6UU0llM{t=HwtP2W?xt`KToG|!!f z4QI~-f5WunP#s+7RpeZ$@8}{4-+9i9)5<|4HfFWT`F$R%F$V-By&wFRrJM%?R7XN^ zV@E_rSWKX~n8e`F!}ck67Y{{eoOJI2awkC^mB4U?#6~^H(7_x`Ui58rgCuZVoSKs~ zTFf`cgI$5s&Eo8u@!~oF9?TtTT`=nnEql>6rCvh8Pi;w}6SryNx(@5Y24jsyPSq~5 zSg8$v!SUrG=q*(2x1JxwuMKvW>w)pK<bbU$IR`siuhX9-Uyk;Zh{+6uhjRG{zu0t> z^$`?q@Rq&xdpWDTirrm4>1#^4xYNN2Tf$^)EKeH0JfLiXwOvATJuPaVU7jSvzoasx z`pdw8DvE0xXvmuinH7q<)NwO(%`GeU&b_r{Or0+)5e4oi@R|K6&i1B{S*T1#(e{db zO2aMjy+lz~$boG|;o5fkdp1H|Sq0Wxr;x3rbkEw!S!$kd!)ZR>m=e}6S9I=`U^W<T zz%e)8@_?Hu<I`KOGofxxjvoMnB^o=@ERMmk?B;eS)bHnBWo8O$M}me{J@fMM{^YhH zyKq+pMob-nVK@c`lVasFeyj#at-GL|+a*?TP@$DR{r%-@{C#%{k-!HYhxyE$oYaQV z<^<;H6m~CqXd2&~L@OPQeHDlsTvO%6R7kX7BWvMZM<`6NEEE<4`75xJBu9!sQT`o3 ze53Pt7-c7JvaR|g^Yg{VSES6Bhb~%+voBU5(=Lt$dk?8O)`$s&3KJzl<F!HCxjW!H z(MJ)%<O%*O8?Js21V#DU1+X|7PafT{fCS}+tDB7`c7F+ITq3CTMncK|1I$1(zuW7} zx7M?YYT3n%z~_?-o2ogd_~-P5<Xwp^9jxyihn8r;pTnBEIWn>N+j2YT<Ue#<aQJyu zmlARcf)i4}=h%4q=-b#EI3mMxI*MDy>w9koq&J_}3p&N8&n)jI?@}S{SPX*CXBZFO zUd=5hxn|Tmsm07Q`vj@Kc-eXOrUAVyu5obX@{LV!r}Qemk~X;zw4Rf5=hgB7OOstU zY#dl7ZW?t)+F9P%(cT*gekQlFx?^AfeHjRIF=*~MLFN7>Cup#8Yh!Y8&dD>5ng=z( zm_gpfBBUasXfUF1Amh?d&&U)A{^TL@aV7H)=4yVk;G>Pu(Bx!&XLn%@1hz%Ni76hT zkv3kwx>h!N_FldzjTtpVd5t4>p=A)#o<6U~rS321S|R07B;{1Xq!tK~zEMDVcH@MZ zZ*fF=;|W)P+49q;pL-@Y_v6;gx-lWc2vO4%{h(Gh6(3d=&&rPR?i*{i5tqcxqR+@6 z6wRC*kjTKeq{2&eJ=cZ~d*<J&y1+a{xc#GJy5{~QY)S;2>hd<Repx+H#aH7>uT^%8 z4Nu=Z=<xrSE@16%?drsMQ+H2sZ9_^yaY#}sU=LBPp0%xxotIl&eN16bcy5QHeFCM7 zGXcMWph2{>Q>mnV0g!)A*^fgf*ebj>EWcmMJQ_x_6T1GQ7pHFR*7Pl6fG_WwyI>y6 zrWv9a*a9L(E$f_9cWrp#u21@9G2_V7QqGEI&Q895Ab=3D4vgKv%pW+8iXM3_X?l5K z<NkJA&w!eq2g>9Nn`*jch8GRSm0eG(8SWmQo?4up{Br4b?3*z5HyU`X`+RWT&hXS^ z`;{xD4UMTqC1J_w`zA-z7HJhy?w8i$99?zMB8v2a4I#gQs9B1nL-7UMJgC%)NM6GT zhuB85ph^|1n3KE3{sqgu-4|fs>(Oy*<F!?7k6dPrAWh#!0li3SX~*!Kw&D3ZkwpWV zh%5>@XB8CizQ901O)W%x2Ms)y^awT^k9z#d|IB@3s%O`hH}BsHj>&=W_AI+9qpYKe ze{odFP;&KXMaTG+v6-ob&*bBuZ}1J<-^^Y+V1eewv2_s4M78$*fwIQtwBl0e<L+pa zqi<`kVeM(;lkF5;W8#;`rx$cq#GFjnT+A|4!Zsh67dB6)R`d}yi*bl;5-^HHgcP4J z_LnNJ-~QM<w2E2g4<c_KRL-m(sNvHn>yS?+?G%>NdUNw}O4YC-vVdI9RnyARH6Sn| zHM6Xt36-Kb6tR9ATH+5d=jZS!|H|!+wYyuXn8Xf?B8!46GQA_IYBaTKq`GTjc;?Hc z+xai`7!GRu7_Wc*95p|=>+03Y<`z(U2wptGqHMhVjO-j#Ef6pUnFbW9xumdb_?{6o zrnz7b%!^uOiCJe0nx=r-OWGuwh1RfY2If_qOt$|E<!Y{>E<Y_c46d|~Yy{<WGpPs2 zJC|ztHqy!<%)B$#9y}{<pK*<?BbId&({*(A4+u-iD5|Xo;HOZQ2H$*`V={)NQ_Q0> zhZFO2@D*!!Hdk(MB16)tc{PXx3^fqx(WM~xH*y-rx<+PaR~Dug=O^~3(9h4|nuKt~ zpnz{z0sEUBgI7`Bz7T~yTJ3!d?HuGyk&2Efnr>-QmNB#n?q~T8`Sc>itaF4dGeoU2 zdGupwm3>qYSuzf3?5aK++xJdr`zw@{JG+?VWXsT6dFL!DZZ%KW3vL4&&!~tPm|Slk zUQevN&Y}^(rDkvK<`$fgl2>&JV`;{Jdou7KV`JEc0{{$A2ZpE6b$W4OdTDWHdHLSn zgPy_bAoxT~@{IDXo~dmab>o?}<E>X`puz0Q;`9>0zW^WE=l>53X<>t4avHl)QxuZ& z3xnd55y9bB?q1p!){=(aGByeF_6huY!Ng*g<f0a$mYKp9>4Iiy!sclVY60vz!OE^# zj2c0@4)G_v{maX<*Y6uqH>_KN$F`9TL!Uxky(pc47D>xAk_$G0X^j(W4?*N5&0`pq z9L$_uJ;S52%PTtu2Y;)O9%tsj<hPtJq3iO^mF2nBm76Oo>vwNK4UWoT7B)D;qb+G3 z8CE!uRX<VMJ~=qKxVW)8ySf4&#MB8i{Kv^Ajei}w9!S)GyN58e)pv9iR9}kA&I^c5 zbPfnLb3tgDS&Qm=iJC@<n?^7wd!9b8$0+9pIxk?F4E4NGJh_|)w|<zoRU)aZdscD# z33dP4GB~}Bx#SA2CcS2uQ#(Y(^OB}d1C^AsnnS|E?Y*w?EmOZ@G8rcw8%IP?NNiSa zYhT~5*Zt?_#*R0w7pDc9pPiWhO#5eC(U1e-bF0g9Yby)uYm1n=u|B`PzO=Qu^L+2- z@`{eB9|4O3<pmq>w97e7(|Ju(ZCB^uQ_yT~Z56gKv$O<Tn_66)__8eJ_({~yR~f#I zh*6INqobGFE~DmQX6E=r#n~bKO`Kd*j4XLHJo)uQxwZU>C2R=J>z&t+2H^RO69i0? zm^Fe(q!GM^kqnx_LR$Vmef)R=-oL7BJ$Q-&esHDf>MA^{IcG>%<$%busrYcIdxNVP zTYZR3Zl+RnS2S?|8y}gL32__*bo+tlSJPmA<2?2;^rt5FX<uFjNe9knS6AoPRsmg1 z1;{aVd3$SRdmAb|-?)2s@72@F`c4HcHvmjjKPWi2H?L{B?DBN~_~P=`28Mi)eb^?3 z{Hfn!cyQT!%*T7LU$1Iu%POyoO3%Vvt1_~8RMa=;RB>fh_ob0{J1t;DeZh|3H0iui z9FJivuVE~;ycdZKf<-%oP#WPHkbA=3zodNl_)|ka>MzuQ$ajuyIYwUMM!Onu(<EXE z>!i}2<$F&{FVApj1xV;v+k1NlC#4pn<5sMRnxpmn&?_kU-S;r7pz160YipqD3us9P zuvfOWP!%}8wY_?4d+qjZsOz_HeXe(IKYF&e_ws2-bPf%l4xyltj#o}<?RZ}ERAc|# z%*yJ@)&^_}O+H5Uv5<eT^B(6q*Z7xLItQ;-G_|A`mtkfmSb2D(Ru}4;(91c}DY%nf zv^&GA!=mB?v~%l4azWLNAi3y7DCI&T?{!WV@!;Xk34wopx&83j{zNheU4@OK<z31( zkoDvj>{abz0r>85=m@o3Y7Q2zuHF$b871X#qQ6PSAC}GW<EiKbTH8N+^|h7H!oInI zLLDvUP*H^6LDh{rcMjIgyLaJn>n^(9yZiFp%h{DxB`pLYt1`QqS4e(eVe8HEj+qhI z=&jA=FXYedYd(5pL0=#^i2V_%{$HQWf4)}L+S^-HTMuT>FDAj!KiJIGUC-KDNY#u) z(urKk@dp7TG9eQl{b)|z2o9Za4xKPoji57Pwx=as{vl@P8&PyZ;GdqJ{M0_Qg1PDn zB5xg9C2X3a8`y$c>nG#X&^JA?{wTL`Owb@$)yy7jd~{|GnAjUPZ~l|X5ESk6=s<OG z5$$Bqw0{wH4DhSBP%^$x_}0C9`0K{Kd-r#Dp1pYy5}gZzPbOyJ65p8Ldb6l)rekDj zegn3-g^C3LdyLOVlRtsi=N#Fc;rZYVl(7dXK+igkvi0^ec63oQwjh(VB@nYdEn)#t zHj|PUr*1f#RtT$RFg(&JA-@x`{)dP)leE+BljkQG{+VU<_D)&HEanPScEgx}VWhIh zB{h#4G6_3}kfK{VFPn!}^}O;x<ZV5@f)i5<Yw8Bk6AXUetljYg@;>!N;lXmuq2+y_ zcC@g6i~9D12gj@+_?vg`XdC+waHxr!geO*yg5Xzm&yFpwuixHY+Sq`9p~A3o9PEz} zeDq`I;2`+D<bvYhgk(fen3cP?p^crmmeqHnHa`fNoIbAyQqQg(%Ay&>tRBFm9srL- za-QD{nxBDyHnsMI!#}gM4=q>sE;bIXfXM56=Zl!9=>(#jJiWZj)tSwarTeK>Lu{Hp z8s>JM;ZdoDCFooY4vUKooAIxgX@9=w%;Eyb`{!77ZFPSAS2MFuJ2uoGZa;i<`_U2g z7BqYC;Ng=e&)&RraEl@0P^FS`a8GV2>71|Xp1(f3zJBM{>h>mD@YgVckB)5@a0RwU zXE;%FFDEXwcjQ)9!#IeHigWZ0G<8B~nwl{vJAW@=dYV_4kXMIE#g|z<kWtm2LDiQ* z1xcyu|6g+6XL<Dabc0Vg{4+(q2>qvI+DA5`3i||&<JG+D)xGPeq+Fariyu6H+dICY ziAWdIwzcy@Mx|$0x3-VWO#jx*%-5G<=l10toq_x;@1LCv+Oea}`B$orf%(p($58L> z?0i%2A9#M}@nckd^cdCt@#D82-?wxQs_J=>3K|M%`-K<wS9C43UR#`4-dMf01>buh zpNX>f2WI9RT5NSx1*yjbvth~Us5uF!8EAI$1{SB~Jx}o)5wI#!U$kdX^QTuq(kXe< zDtgk&yPsAI`cKuM|DzTXRnU1t<DXM5b<bAyETD|d@Vc@?s+4`9b^yxBpObMMUA(jT z@OgA$uYi`Dv4fLWcywN6&A{mBSE1{-Yd7YOGB%%qzq!0Lzq-13K;AL5Lri*5+xJmN zgS~ry?{j^KsZXAKu6w(Wp6tK7hpxN#c6RSR-ht2Vy?pWU(~kvZO$<W%B*LcdNzKKV z=gK?hu1v0>0^}`>;G<GHGfPXD96px)pS^jc)I%`a+|yflsV*V6ATTb;*+0b0*-h8N zf=|ooKa@jG3!0LPTc1<$p;7RpmUpL?bEB4VA(!{~4}<vsYY@+6o&4<C$wI0>Azkw{ zw{fEB+G^YIdRon}n0c}~vO&YQk@BLWvUSvhXKw~)wl!SSCG~6^eEdMW>N+~d=H|Y> zQhD5^dE;NFXi)OLzP_}%`33l2&CEgC@9jJW4d3Vd(bFeT0djbJ{N%~b(<i&np6xt) zx`TQh^Y~=H4TgO9_+7N@zy9!cX=77b)d|8$HP^Jl_Sy2z`L>~@#Z4Grw!Y->S7F~U z*pFjlejM*=tZ`JjeyFOY6@uBwv`mnCYY(KMt%H(*>1p-A|Em%Cy;?90nszE#S4tTd zN+}0&DaSL0v1C^1#O5i*C-=ko-!CnL^Cg#2(QEtYrm<g%q<w*=Z<DND9;t*)dgYb- zPhTfj42tS|8`wF5$fp&T_6-kz6}tW=hl2@PF)13<71dP;Bo8Y5KJd5Ez<<_uv~E9y ziq><W8bvs!K7R%km_JxC{G;H1vIo83!5)VEJ(TBp^mzBhv)$)UO>IL6d9=9Hks<kg zfPH=c!X(<~VALMv?U$D@sr%#pv@b&dTI#2ox_dzC6LSmvV-r!X-o;JF%$#4_;s4Q$ z{tulf3VC-bX(tMxUDBQcUFoz#co0Q=h(fEFx?g_&^l!%4%a<=^XQrC!tDEYo;mOk{ zd;iLvdGju({ssWwJhWEQHZ5kBpyFAp?cdC*<SnG_yY}GO`0{;2WWJQ1jlH)Ih<tft zGb%qhcX&P+mP<L~*yd%SfnWMOg#hj0F<EzvpV^mpH0@tJ%#+Wm4lo1KPtcIV<C7OJ zaMiu%&v&0a-|qq_*+-e5J(M+iyaR*9&p&@Cs%W9%(>^O;XdF;d(lHP2ZfJHLW$)2y zKZl;EgIc;iH-~Ep2`<|M*!K(#RW`R~mR5wPWVnSySbHM%ZS9o}ObB#C{?j;_NGp_7 z+Lc5GK`QG`F6T)m??s~;pb*qxl+<UJJ^<m>zX$5~?%uYsG(XSFO-)TjMn+0bK}JDA zPEJlnNlC%Q$*!rcT3=J~=kL|n{1(R8T)wdxmeX~?ItyHls(Uq|sHJ;U`R?oYRh>6w z%mQ?+?A${lk_(Dk`}%%IescdF0#l0%5G2m7uFQP}{AWwE{owvV;4yX`v>L-L2H=BW zKYjJ;$;($yU%Z5h`7f$s-hKJ%*Ot#elfQ5K(LR6Y)sJsy7FT3cod`J8q%5P0F3(-+ zTkINLS=iiK+1dcL$2gp+uV>~QXF2WoJnBw>4d}(QaoKsu$XExT024=7EfX^#1H>t- z99E+c8V)fs76EcrAqrMu3N~R%HevpYDpJZivZ{J<@kxJo+UI9yWTY;#u`(+u$hx^W zrX<ES*Vn*{%Zu}E&Gnh7Nk~sOLp>c{ZZ0lPj>6omKl4IV_2r3bh+eO)0`@u{1qz5t z5P3fB5GqOgu8}3sd1O+(oS~hK2P!9DTHkneVjNd0@<>-Rg`QV{y3D%1wtx;=S8v_g zk62L(|8PH0-MRl{7p?7Iqy7>N`KwpYKiAhUUVmHRMa;XX9`WMEn-3q}{`Be1hmTNS z{rKV4`yXGv`|<g^chBCuef0F%?u+No-@J}aDq_E2PABc)mef?)y-?r3I5@M0_Vz27 z1SY1NA7|p|^vcRjutsaE)2l1}H^weq?#wE$3QbA(2#>b%K<ZiBDH)mzxfcs-*-=uE z*;trZm>Qa!7+|WIk)E}MDGwJ1I~Nb^^WPVEpc+)2iHV`DsqTpBE+2k0GSEGJ`gBO3 z-=DK@J5OGulwZX}uPIeS!lsF+6s1=kxwwt2S@@m3SA#RR^<3k0tn3iM;R(3~%{{%C zTfyUZvsk+WCT_jCvNFHA3a$p_;ZZYW_7(mCCgqNC?_fgELFm(i!j6&gZwP<+`t7T? zzgBn!9^pmwyKkPoeA&^_R$W<EU0GUHQCeAEQdt4Dw7jgitgNX2>cGP%&%o`x|LOhA z;xfOaIRv&Eh>ZN!86dx7bY*^heP8Wy&6@kBvzL~Yq-CUKWTrz+Nlgihj`H&Nw?`mM z?d;5*T}_<b)Qn6Fe3Dg65uEI-SWk+ZvppdteL>xoUw-*pV*l#ZOFBAQzVkf5^D#f- zfp9)^_DpVe`k%0SLz64H^<%(%*Z7uGWUaVWHbk$|)>)^8Onnk+c3-_KZJScE@HBHl z_(sR)R8;jJNKuae7M;h4JZ5Ua;`;hGLF>MU2jCxKfCoQ=&c^%-{EL@|RsA6ChwJNi z?|1h0m>3y|h>5^PepM+c&YU?TCoBEx!%xt~?mT+5^WxdP-5t~#b5<p8O`rJ6k?P*X zwxQ*lD{J#>D-h3Oj6I&837vXq{01W<0|^Q7;jcS;_Kc9Is14Fz)6CMu)z{SCivtJq z9<C0j2+7q<kS|_B(DC<MCoXwWf|G;ow|+`XU5%WAd~s?1e|z^zn?@?T=OKWFs^*%b z;98~O*TkjjOCf33&^x{P=y_;XvznPLcv1A6;a1d~;op7(0o2kDa5c-zsHu``s6@@m z))vOq{F<ftqVQm89-(R92ma{+4|AAm%p<1mzk;c6fBg7(cbA5S+TX|P*^|Asm8Jdq z_RVV<>5Ia`g7^3Kw(sA&_ZT(A+<&t7^4*)5<YFpbEiy3+r`Y<cp2fN=i`QpY=hi;Y zOu)BnYH^`=aDbbOv$w1L_V(tlU$?sa_U#*aSt%}F9&0Z@Ju6#nYbO~kOEy-fy`4wf zTkBA_Hr6l|eu1r(*%=}-W+Bz7n-f2M`kP68U-xAKg0oot`KO<L%Fj;sa(CU{#&VbD z=g*#kI9QmO{P@55lb^qOn{{cV{l;e3<gN7DQ89}YRb+#zPb2vSM<KnSt-V(_Z|}K8 z6zE#pdxS<N<)flku&GP8Z_lo;gKM4N+FID&29gimd2-?A`WnX5Vk)#>*xFjYb$jK` z-NnrfQ25(ByN_SJx&P!T)F*F$+<X1*J}RB~+0vlGRdCrKK7yxx|I?=*KmGjiK>g|G zpWl7>2yDX>P(iE}8oYW3Ek5l%dkV}a#YG?f`Kro_{CvFqLxVS$=5O790N;+j7Vz-h z$9H|#Mmfcd&z#pc52>i=UVwO~Z(?P3WoZF|?%TKLH#eu2mk#?JcoCTA<>3Z&4}YD8 zsxk`)yOF)Ku9dC2ne};DO(G&9R0;!KQ2(MUJRu??Bq66_5K!pu?Zw<F^lwxQ^>vt; z7_q)(Yh?jrD*+(^$O6{iB`+(@&d#>DvheOlEPrfwXz+@+yQ`YA;>8Q%aN?lC6HRs1 zAb+39@zD<-K49r(pMD-1=&LL(EHBQlC@H8aD@;#Kckzkx3rmbl$TIgx5Vt8%vP<O@ zk|n2MWMpCy6BSX{)YC9?wQ%zah)v9|uDv!jT~u7CuCB(<cOK5Hh^Vlkp+S99Jy5+5 z007^0r6)ZjEjB(DYD`>AY+OuyLR@@ed}3lkK|$`q#s(-nyrQTe54#Opl97?}^YdBV z+<gB2$4BU-IspG@cek<rQbArey5<z-@7L_=iqiGXO<=yYxuK{a7iw`q9_9(E=I4}` z6mD#+zyJ6Xbe5ebd$^caS5bl^oSB<_<?5A<JGUP`L1phz!_Dp<be7S{$@3y=1gr`w zPRTjVQ<wUeD_gHBt0=3eD0z6g!LOcNTAbb7JUGUoneXT2;lj#1oE=(vdL<n_O;Zak zGfNRw0}>wT?+7XX-|x@-=l`7gpEJbYeXb<`I88)AL4S^4F}tw(8#eZKcS;L$%}tDC zr6t8gh52}SB*aB!q%N8n8<rI2K7RBND{Szo=K2~q>EG0{qP)85axk0FCM`K$K~6?k z5Y8=+<VA58N4tj)e$V1`c6L_49_w55^mJU@oC=Ea(5c&7u-wTlAvO{s-{O+OyZ68P zJC(UvY0S)wuw^wBrQiVHjMOCfv2Yw<pZ&Z&73F1FS(s^Qsa+iGzS-crcW)UO=y|!h zpf|}#OTx&Gu9EOenVIOR&oMD^^3&4NTALe}7iC|nD9TPxHqzIjrKREG;fc-8&#$WH z=H{fQKWA!WP?(!pUQ&>rlAx}tOhrX0E+IC%wuU+Np!`-=mZW5)axzk1z5-R@97#z^ zP*YPWD#=5D!^6!9V_YeE&jbCCq9Q`D9Ur9Us}CP{pFex?_TA+4Bo!5vxR{8X3}#m) zF%@>6l9Iy1%@tsVtw96XuNx@HNwcytBU~IYx&hyH>eLVE$q85?{Qid@sA;G<IM}b> z7~XyP^8U`wlQ-|C=Vm1(FVN6ZUyu-SaP*}(uS+g&=@4C4-M18-UTJ6N04@bi0UXSb z(BQ>ew=h?hN2Vr(galxhpkk#(OGE9u@4qJ@C1qe_1gRIkC`Cy{DI+Z<b@2iiV0gq- zSs7{Y`NU*Y>=*StLNX5;1AQGWIJ3e+0+yyGiE&Xm87aUmJjTaF*jSp22n*8D(Ezbq zo9o|vb!KW3F)^{U<OTGxLRDFGg<}O96yQG(IKc<~ygdr@vdW6{lH;RQl@utcD15x! ze=G9<Hk{DIuRnYG1dfcTsEC1)9^jFcmVCHPXL~a#DM>(}@ALsvhxBkeb?OvQ`uy25 zTwnU=(L+Z&8~Ch=k-@>&-@JJPikBE0b@(INn(F`kkAE;R(oc<#eDl`S_$U=6g|LVS z6EhPlE7P4@+ut+*?+VAw*~JOe9>x5vTLy*(tSn52zvaRGd$3pEfB(Jd*E@zO4z|{W zM1<YFUC-aXd-?AD$c+)`Zc7WZhg<TW=dmz1y*YmqR|5wdD^n9A&}tTDCU6wcLvLd< zki*vopkG5{;Gv_V_2HAJFF$;&t||w2L4S^}s_gR$j=ug;Rw4bf+*;aRIVD{S?PFW_ zU%Y+x<S`5ra4w9D4CXdAV061aJ_empTSKk3stjwsVt5Gkb<XxS;AgZgtOdkz26~<d zM*<Q`J|&yrl&X(E{RFRofI~+BQ~+{3ybbpE0Knjin;R}2d>uLnoO%$w!yl0l7x}OM z{%;V6!*7A2o<4m#IU)A=z+)Ioh>1M>*818CXt<ob48q+7B6_TwU0q&;gJbVt(?8Jj z?)^K|=#vx=R6sa(oNw=HZzdupGB(uzb$?#J2JM2Mf4DJ>mE`241AW-<F9gHi4?q0C z$UqO!Vr>AY@*MrS)~=4xnJLhDLn8w=R_4Rac6T1Jv$423+2h)0=*tokVh~6^fBW{v zjS-OO#f7=U&ET|ZYN*xKUxEfp*cw2WS65RR86N?`2Ms)8-bG_AKwel}xO!{5v%3?X zg8;sK`PD41-Mwe3+U`WGN@7MK;B(45=SG(9!|VP&p1@&IY4P;R^0l#17&4OLqw#-T zYf~MZaV1@SK2b3)9L&4BIGiCS=a#dG&TV@40mVFwgXhlC%}k9SdEQ|-fc+;SAsM{V zd%!b9fj$6`huec&5EM9%YilZs@^t5Dcb^>hp81a+K7jE$A6v>1#XLEgnz~ACd^8m` zRdPZMRzhGu4UP0$+nd4C-@kVkx(P0)gteq3#GF2TDmN?rNao=bKf+-JKP!)$Yey^2 zTj_3Xtb>3%yE=Cd_CvU8V5pCkdDI0kywgY^(4eKG1*r#DKRi5yi+Mr*^WZ(AVj^kj zXqFeTq_&*wZ6UfTC@usCf>m?Ag0Qg`4y=EmU*F&ra9m)PaJ57RW)iX}@M-%cRgaWh zo^2mqL(7Gsu&~h3<mABc5RBc&VICq@Q5iWdegQ5{b{x4%#~%nt7(}&v6Uv{y`7y}P z8}@KxZRPl<UPzE1Ozclz@|<{>$6v1Y_Ye~krKcuA^mLr|D@*e*%7BdE(0MX#ZH=7V zEVMXaVgBvgHw+BtpiOQ4CD?a27f0CI^Jm}wncWBX?!xc{10Mt_hc|LTTohFC6?#wu z=HZ89jpm?OTbQyiGng40;QBe>l$e+pEKH5Dz6<77UO}$BwsvfO&cFaC^98w?Frc63 z<!;4xc@72|(7=SG_{H^=fvZ<=GB3afnECp7!_Qe+S~%Rm*})F1bz*V?10%h)h1u1C zzTI6cf7E7eWvQpLEj%O;POF`heN1c=^xUq?t=}{o9li#~5RUMriu}Cd7QmiX);S=j zyS#J0Wq7@+s*0R~Jg2arv%e3#jjg4{)WnTXKYhgdb>IUd!-9!Ph<SxY*m-$zG4F|R z`tCF#C662;qHuEMKAabbn+`XNkBNlitf8jz3GIIlmc{v5pd~gY5`#U&hTuQ3G7q7L z<OOjUf2+%ju|fzRG&3_=-B|tc7#Elz9ER_0s;|byysoZRVQ~R2=3l*f1p%XmhFV2c z`P=vJdAK;06yy#!0|?I&5YW)kiV6!Jez&Qvnu3C&uC@x5zJC1*R|X?8EC`~x(}d(O zQg3gqV;eGJPoFt+j)DHn*)vUbSUMLk4;LT*`GoYe{*mkY270)dXJKK27>}5Q1O{tt zGc=~*h`At~`mbK;?&-k8JRptq@x;YE#P0%v{Ba4f5bnV+diLzuoXph2%?xyP&YU>| z#6m1-V{c<)Yl(HYRh1P92ni{vslZ+Q{PWLA>6H}R8iayI79r(Dm*+}a#<{q-t?jI; z8|rzuxuDO&S;Lw?G1SvJb@~(u2{9aGZUG@S9v)oGd$~FP&#AK{9Fnf#g$e0J@R7FW zhHppAPd`D>0ecQ-Bn6u{76%{%__yzDf6;kr5IwB6mXL^uhL#4RB&<6F{!m6ns<*Fe z@9Ewjz`UMzaY-R?h}GLb7lWgxrlwL_Q93m}4x7r$!eW!8Bqf-b8D(Upa9z{`%q+}} zmsS_|o;-mcj&<<WQdc=mKuko(M@mlF(N=%Bfu6P|upuii2e^8<yJ9`UENm?PVWBPE zU3z-DSm6N+1RD{T5a-}v$GW#`E6eaRP0WmH8|&KJn{e566!XG@zP?^~bY4)9AADnd zLv4CS3fM@jM_5x`6?%l6f}E0yBEr*MSw%@sRtoFaF)%SoN=w3U(A|L=1#aJYpr+$N z$f_)$7Z_JLQqnpRlTn<OlT%t<=@aP3#>R4MdkagVKtqj$jEseYgNdD;PehD^_dFiv z|MA0FB323Kup(`JQ$|L5Tp^*fj3i+1d?-f;CCJ|g#+|zQnm0eb$LKr|b-0<Bs4z1N z6Kn?t@sHT<D+NxHpYMEmRoV9K&0{j(Sci*wQVjFdG}uib#@nc=DVJ52HlXC1eEh~R zR_0k)SyWV&n3)*AiM%j{kC#VQMyjW;^Y)$FG&q>o)7CgcNJ7daLP1G=xuq5>9u$<6 z%4#a)6cjFw_E--c6AQDazh7-@tFEp#R^|a|h{^#T9$p@-DGN9(@G}hz^-3$to15zJ zFfV*O=3CoaDyzyUQCR})%!9sZ>1Y|78Ha?2LX4oVqkXs;bWb*RHYF7$a6Si}s<~|_ z5xoqvqFZX+cva70VZ%U4O?5<U6bA>}Eo^BbZB2D@N(yd%eyHNIay$ZpxS02L2j<TZ zFpJs+7hI5(0dJ1=SV6cgFE0z4i8U=yQCbK)91#@;QXdx`fy>*7i3+i@vM4JnFfuY= z<p&yYadYP8=Pa$v|1mmWS_E+}bYvKXA?`R>!9x-f5@=|s0`s}q>9EgOv%XRnCD_^7 zjEwYIn6Wbtn-UQbDz7SET$)4aJT`Bmqosb9h?JT~k(`pYv@jcMZkdvb%FxUdoR}*P z<{6op5J<18#s(c6%%e^d1-Xi<vZ$yCR-MN*(AC!|EGcSiti{i~&r!_B#mDqs>AG^I zi;4=nssQtJ=jg2MtOJ4rs%mR4UX*aKvBJhY8y7d1p{Ws^vgZ1)a)2=jMdVy+%u0yB zoSw3d`Lg!$wA=!AcGm5!4P3`iR8mq#Nm<j_Oh^KEuF}o<yVC?`nM5pov(KOBSCp5* zl?Kt&R0GF_RfSrc>VbK850@JgBT*4yxMEm9nv;_qbOv<(^{bbM8-NSt;^IutNWD2b zb>!rqqnIbv(bXy~F9E9nVh&hxa&fS+fuyiv9$DF0d3m|b%uT@CBqzo}zrgCpfO+^J zcv0{$Seb_i1@<{RFMDG01}^4xwbTiS$<GOD6O&V?CSqwIBX#i{{W)6~XIeU17e_lB z%rm-pdQ{Zc!|#Ie9T(Ety4q4w7x6K#r(IB7*wAq42%Yyw($e8!ULZa(cJ#*3%Gxpw zjuaw9fAkFW-oD<&WySMrE1;Qn)|Oaxo{yi;#=(w+l%&1oTVGvW*G<f(LUzH%A*R0M z@?25tl#;UQ`syN9=Ba3C6x7rWEUm3PkP@=8c$okG4B;6DVN>sPZXRAW6-8Xk8yM)q zPr&MJU`T=;c5t-q>+g;T3&F)aAkEFqWo=`Dhj{=jIW=i&dhD3Y<MB4yI+~#2^UHIM z&GlYBUPdN{I(pima-XZ7w!WdBql;r@O=U__JR<`=)?EVTVd!yiuwli;Jah_9j@0y& zk&&yo&c2S88X*Y<ouCdODOE;F92TTSguv4}dANfxVm-nj7r?x;hkHd`EiliHi+OU8 zab1Xsa4`=@3=TqmQ9(l^iuvW=&pbacKRGq_@bM!&%rh|32ZaW<v^76{{aQg@_B;=_ zmzyhC1%#6WR5uq#A1`-0IvOxzAoZ}F7tgU=3Y3*oKT9J-EM#gCR#nh8Thcah`@s&j zph=CIj#gb)&&n0y92h34pn#isC$RHp7=(;GQn`3|)p0Nn-Uh~Z1{}<{qM5g{wrKBc z3l9s%!#ocUx1GH;KIS<&5|b0gCr6K*@OA|A<YYQJn&p+HvkTKby`4!Z3BLY5-bfEG zO!e`EN2IS;Tzo9_(~Ptv1{};wUKHmB<{fQWa5K-r$)1oDfAwk~*jGHv)9|UEC8o$s zjmH|I0`o3jp17Guc^hZY`MNq3^SHbXIhme;o(%f(I<5v7oo{SBO6UE&k7ORCX?J%A zb@dtBRS699!C}Geo$b5Np9cl{!O3N0Vu1G4)KmZo7`d|q1mAuC9hgj@zUy)e))Or* zDoD@BM8~grhFe?SJ~6pwtn~8D`K`x?m{+Btqt!GpuyOZ5ghWUwD&oEl>vWoc1aw~C zErp94C-dO7j%41_%B-cWDJ&!qAM?E2pqUU4;$a^685<uyCi8gwJTPBTRet}`gU5S2 zvx_%}N3ISH4Gdi$7#iwFJzgKUK0F9t^k{cCFFOO`SzJ1gVIG&xLjw*Db_nSD2YP7n zFi%QB$*p{bh&(eb0W0$$;|QepITZ7@xEg$BUI(|gK{GEaD}A)LIR^940Kd1Hni=2O z*};{eMKjNUVgAm8d-r!9*EThH!hmaPte_~*%Em&9Ug=|RW7*r){t@@RvQeoIRwh?( z&x^`7giP|BYTgM|BRLHdS0*+NngP&x8ag^n14A2kPef>>jFJ*3I~%TRQfCNBPtyzO zxhCOZ9;NffM=@_{*4)+<`bRK7J~8~y>O6+|%ChdR%bk~7+glnT=s?#xs4eKnu8ua4 zDQI6@kPXbg!sBi5>O4CKTXbyHmHux0%#(8}oF*j0%{-Hv5As;dgP&(*`5iisPVA5! zpZO#G{N(f)Fpn#W`I`CrcOTyG>g)Ck@C62lNr>Tig2{QZ_v>Z%$B!N!z9zUI%OH@R zml`h_S@{#ND)Q?1hZpu|)Q-1ZTYmTBhXc&hY8e>Xx_i2zm{-QnJa`)n^LYHc;ZZto zae#TOxk?Q4_72v}c)d+RQo`|>Z>qz0O{%QC1o{`m*D#n;QBfYOl#~=CB*d6i;TY!e z>pTlyoj;0sbzq*9UG6jycIL&7VqWZP=JELXBfQNQop1bum<Q3o^{c;P{^9+X@7`x+ zrol01Wo0fc{Pu3}{k>i6?5vHoRe1V)Nmb>*P!B6RFA=>IjjS^={c>9Ectz*T)0c0* zcpEw`Lql5+FZZx06!W;|+qgQOB_cb^D5~e0jE8yh<1xSQZSXJ;I)9{}ADb9Cn)ziE z^H`?@e96zF@-{$wfUo!7?#{~c!hYS{Seu%-0Vh2_C*w%wxsGD~I66;6N<qRVeH8OP z=WzIW@nbSifo5Lzk752uZ-Zuj7q6fH%>0Ah$J@8IL26W$u>jWA+yD~v!>J#zPCDA} z@5PlBP3>*x&vRMW_z^M5QAyc*q_(Hj+^FihiG_LPBbgV`N8r_Yvg7J}SV+)6uk*)Y z9y8wtH}j}LyrircS5^Ub0632f4}SXMImXYw`X`u=iHip2Y4Ca*U>>iZXLA3n^LUv5 zoVQ{7{W0tp=8wo#3LUBQ35ju2Xm5kp&;J_3=4PkEHy)m({owvRc%6)#9EM}8tN8cx zw8fQ`jqR-v?LK~ZCoZ|1m_>n9+|nkp2C(m*yrrU~NJT@fWoTq~l+JtXGw+s+KX3De zdE4XYJf6G_9_Gb<i_UMLGjFw3W%!scEyBe-<~ppEm0AB_Z$>K4yv-k?^SWqnb0qU_ zzQ}X*=Z>TEzdMGd_(Q$TF_=f`d|OA`^S5uE9qr&)9d2f6W<o?vBrYKi2tmMwYY_a* zd|NAeofK-$QgKBa6}QIO^SXM-{Pemh&yZ9BAz@W*ExY6Dyr_|98lL?8fn25SQ96H| z81@hFHZNX0r>3D!#x{TKz;#%h^KI_j+Jd1L+Emt5qOM8(Ugm{`_|Ko`K{MY4I*)aK zFw7JG0Xl!wH7WArdz)Ik^C|`L&#OGhJg(WK2fR%in0atDW=4isewvsF^mI9SSyCL# zi|^}v)jso|*9LCic_1WdPQan2?VXoeHx*mlYi8$c<>G>$`OiAfBx>fJaRl@D@-}s! znQv_j55?(i#E#0_pmm=8n9RR_{~o?5G{_%Yc)LW3nQw#Ed8})f=I3T0FxS)5E~_j{ zOo%<w&*REf0wMhT=c6&+2JbZ~Vpb_M^YQ-_^WbfC@OYbJ#jyFs1sI+GCz$7tPdF~~ zZ5^$^ypp24ot4GmW<1=StZb|rS{e|7wPKqY{;Qv_s%&U&;o<&jJ?zfjQ)wmpvs{|; z4oUG9qw%GK-VsTzL7_*+umsFvR(@Gr+&pT(J6B0UeC)i<5i#sB{X7)l&>Y)@^T&@K zLQJHtu9{bn3s3T}tt)A1s3j&QHZe7<s46ea&!(cL+{CuB7?Zbgbh2Y%W_t1bIo4AK zgv7?j3=a0-^EPPaamBD-yv?y<Sly%j{886oiwlmNd*J7fgrBp#cti~Qc=rfz6BPPc z=am)ZZ7j_XH-mVbn}^%P+!S`c1wZqZ6&+W4&-3!k&rTm~aPPrm4iN)FHf33dq?oen z2^AxLiMdj^ufw94Cnh6gk+2WW=NAx?k-G3Zn9t5gAtoWVv9oS!Jvu*+X8sSjCiM$? zewU22<l)yp{`3><gMy-5W_AXs$KhEVP*M_Ofq5HStIC>+hPrAHz>%RrtkY?Dc(`C} z0z2{Jdn}7tn3)-+q$L4>YuEa4>HM*oM;<e8gNJ#2BLg`(8QjTCGToz?7vMiHA_6h2 z7x;M?G4U`zF$FsR7|(qV7@0ulL)yFAF*<L9P3Hv#&a-o{+c`PFu#0u=z<vx{R8ihH zd|g0*Z*uJ0WyLq=R#;JgvROyYAt|c(T3q>cIaPI57DhZePsDn`C8GF(lq?^%neD5~ zi$D%YA~5e{XLGoL9}=~0(b3tV`N&))Du(@I{QNJddwuAF{9=9Uf`k|xLl_6?>FHj- z#PVHw+M3|!UEN)(FIC-~ouZ(mNK3}zI+WyP>FLku=xD;G4!=7&J_?;hOG_guIbnEs zfEMrE15$Qb^fjsY-@!a)zKsiBZv)31@RO656A<9%=HfWqU}ky()hP^g^NR}V>ub<U zys<DZEXdEr#R;eT933rIJ9l8d&5cKo9^%SX8tUm%QBxs(y&Ibwp1yvgs32!!X?D1o zjPylXI$A?x1Bhe?uXN+lc@*>4hXnYsFb~Dc!{Zbg%UNz6QM1U%lItE31<J~*c&<qi zu}XNxl<65+(9zKxUI_(O0r-)Xm8PVk^guWt-Utj^5WXQi5UuUaM>2o(d>hdD<1t@e zQb<fpbfu@`@LS7^^U{(Rz<6A$EIs`0o7b;_WNvOQFCWi_#=2K;-$F|{87wOrit@7H zoI>w#vd7Zdtu4$bDJiXNtkTj`U~IzUZ3sxn$vG8Jem)gH^XKr)w{gKc-)6rfQ&3P~ zJxv3B-N3xDsZm~0ep_1;{I2$9EF++on=5RA8oCd*B_0^&(d(pQ;3vQP@jVkGgCEl4 zaI>0<Vt|>Ekpb9<i;1!@H^sUHFFz*(bHz|dfbTGS`v(WZ{J_XC=seaDxxKjt6oZ^e z^emV51@oxTf~$$;SGVr(VViH`c$$!ugk91%p*%D$8;1LaOIU7Lt){9B*fTQHA6{4i z`w0v~9OsGj=<03{4-LleZS0QJ`C~Kx?!yQ0Y{2#}zp=o<CnO})(o**i@agJm|M=<W zsEAMk0)m0QuEUKVJ-olNym0t*4D@z^Gzkd_IyyV%7v|2)PT}!3XNk$FcvT5VD6r3~ zM9Db57*^~Uxypt4*@)1fr%#_?T~dKc?&W90aAsv|l~<TQHaP~T$;Z<TYs>Ae4Xkt2 z59&NHe`EamPe1?6&B<YjV;S#@=g)3!Z!yrHv$3`e4-4kQu|5t&b!}xC*Db}w59s{$ zsR>b0;q3HetSz5E-MPKJ$}6Nu$gC)A8tIqW6<%;<V(HFd=6^U#M8PE;oKjKRKFz|) zE-s2?DJ7h7*sHzW9c)9G4k;Nb0pjE5eQl^8;5pLIA9YPCetdG|(=nL;`R8BCOA7%! zXM5Y@1MlKsclPXAad9ydGn1&8h|$q&A3lD3^86{psu22Nm72rl!TozM^n)b^d+hGz zR$E`QzOhD&<Gu$d=84E?__c`1Y0{Hp@#Ssk@#y>!-ezfGmW+%H&z;6G7l|BQoeGKz zSGLx{{^7aRXHRFlp}r2*H3b-*kByHWy)gu*z}(arYxuBVpohXfXJ(~?L!Q9480{DI z-4x*<WTqzJ@^#Lh?u8ZQqc>-ajSWSF1n_(vHy1k@10Sfput~UoW>-*l_cz$1=Fgud zq&Y7aky%~RHWd&SPfAL1{99G~czc{aeOgpZ#N5&>GAev_VJ0>z4A(lT<1>HE7#7;x zfBewg)aZv(KPbw}K70D)$d8yB8=g9S3PLbYO9;C23UXK1mr-W;-MhxdS_%qsE>8Aw z9OvZ#U32_=JYZH;Rh3N4j3Xk$2Cnu#eDnY>^Q5$bI+V1GSl5h*i-~-mdq95<>q<0q zgD<&C95E~?Gjx9v65^6Wvsq9UB0HUsh)`1UqKlhrS!LPk=2}%{8Jz1ZY?`^Xxen)u zfZ*)CySER&i*Ry)kO9Ly*uv%2Wz@=I6{Yv@vEQ%)dW5vJWM*CtBNHP4jCEy%j+Vy% z_h0|z?&5^&w_{@1p^34A;vzyq0wo1Gto;!R7bhDlCnpt;(itAzi#G8-=^Y`ty-VA_ zE*tmuaQUB8XPJdnqH^l;o5nf@Crpj?Pn|k-9OVq~^#<SpV>k=;j&`~EIrkquh)1o5 zrNMenC-}Xe=N>RKzIycvtD1dgeq!{PH;jGy{Aur%E>klj7;C^71o|QSd%M2*V>+*1 zy}W+4KRCb_4nCZB=%Jcg>c%F9fx!V_#`e&SLF!jGR->ZBLApsuNwhT70Fv&Gw#E6` zt@YLQwG{~D`?@<aQj=t5P`C5};FVPr;fFgToQg|}*EUw4J%2_?Ng;Jn0=%?~qrIDp zBUDd>b4Y*>KOZ+C2|10R4)r-EC3(pJA5SkgXK!~GPXttFKcpv^LnaoK&VvC4TZ{BS zpq5lPJ7U&WAU)lg80nc<m|c#_+rSP$%!OL>sH)u4*|xQ@zOlXv2gKaO2m&8w7A7ri zO|YO1%?)edUhdsBfyj}F(9YTt+Q7%5J#^<lUnCf80zyJi04IA}gfol`4*L~$2DQ*q zNFXLIdT8V-=I@WhL`Q)2g)XlkCu3=50zVwn7<S&v-IbDpTv$XXIzBcyGzfj;0y_A~ zqr=yrXTk0@*Vjm15C^My=FAxhaS<Occg(NG908=K8~V4SVwu8<vcb_|2xI^M_TB<4 zuB=-Ve(uaabMNi%>y9D`5Qsq%B1F*O?i5Ag6i`3`g%(u>g%s{?!QI_GxVr@jnuNGp zM>^?r&#Y62By@jiyWQXXbLX;IPd(=x_SwhY_3n4=I<=Q2(E^?kw4<V=V2(EiPhs9| z2dc9J><*{Fcas0Ch#1=;p*@0zGFIV~nEK!>P#}R$cGlh=uCmhNpl!~{;=<C(Gpa@g zC+7S6J24oIef#&F6*xn4aVjm$9UmQnyMf-5nW>4YvLZMH@FB1Y=sK(p2Ero00N>`8 z`gcG45EUK_`~bHR`0C@0LKfgTg02(1aNgd*2KqQ_OTyOwGAp=11%b1FVB7{Bmb|*W zP*_q3(Wtn@MY!{D<`578D$bof0|FA{3iu$fAN2dD&zw?IQ$YZ4W&%pAsHAXhedYU) zV&?JFCmmgFpmwyhG>#lS0vrg2<mf@Cw1p+u!pQ+t3<uJ}XaM$XNY)97v4g{X-@bbF z{{1@;4<L|^A3t{F$RW-H2RJwn>|tf!v3oBI+kOrnzC$8d-ZK{=dV(9iV>io=UAy<} z-NVAl%ErzP4l5`m1dmloS@HDgQ%9JyOaMGS8LWzkir7=gzy7`rpj1raqS!?-s1KnT zIukG45Kb;GLBR`Z>Z-<gqmb~Bj_wZVA8*~e+uGi0V5ASX3#r4rQuG=jx|fre<>TXp zNS0|ln+Cz=K^`Q$RZ$8K3+@~2efAQ;eD6TF2g5^JMv4!KSfK{bR)ZK8t}8GkD8Ho8 zKgb^<Ah=#QB6?%N8R2Zj#4nyYeG)1kK>YSg*X}|_Fh4Q~uusd)Avrq=iwYk)av1*0 zb>IM8AUrK-6K>|d{riufI(=41=rHfG{U>Dip4XGK40F5GW@O{RzMpG13p*=22MfnR zaTTMm^s4l#;nf>=Z`@b~2mw`{JtqK1+|RrT=p@FQ_k?3VH#aw&p|Y|f;K#_sz|Eaj zTw1iUhRg)j)KCLFaOTWuKE9(6(|t0yQOLC`$$NOXiiwJxK7A5y=|5JGmzRx<4*&g_ zM`--jw-ZyNc?CIM-kxUWrfO=+z(QvPPQw`<KXDu&Bq(%2R!&A!OC8P@00S)H>*rlu zS~xp5{boyl1V_AnW4*ejA}AyfWRI?%j*6->JV!|>3Do2j<bd0u5u7g8&{!WBJ~l48 zv+MGsM-S2N`uVpnYa424Zd4Pzp`@JBSy9=2htGV&evpOx6#G%3!@?Lo8OvP<g?4cA z!^RzKT%0^Shj@8UoIZV4P*6@u$&Tz4o17F99c4l=Q&3ivk(ZNI*fco>c};DMy^C{E z)vvvK0GJn-l+e-D!RhHLC@P3v6crH@k(81Ihyvh&<Xvbkc|`@&^Rviw+)tlQ%uFPu zCX*cO)YVlbBqhL?B_$=))KxIrnpiBxz))Y`P)`T?JZ-J5rj6Ayw=f6HGkN~StGC}R zE-&Qd=eT>hgT&PZC5OdmVKg<gG!WBN*U(bOo8d#lLuwoAbBhW*d>Dp!V`*7wK(eri zums##MFj<AB~=YIbxjQ=6=k^CPZutW+=R_*&n_zL9T^&#o~)>^3yY2>I@oFJ;*`}? zrR8Kr#4nx~7CL?I9NbJketv#|v%=D{BGNL4&xo?~E3*pfE7`?*rgh_pejI$lYzNQo z;XbLXV-b;2l~q0b{N<}RZ(oD70HF)_2gE8|KDfM;v?TJ}B_#j@a06i@Ja<5ag%!cW zi=JPYH#<M`>h&u$u3Wpmns_PR+SVE>z#W5{pA7I85gA@pQwd}aB)PQ$x?pKVh>D4r zm>B-ykIW1Y&|SN}*4R`R8yoG(aCdOBC)!xSb1^f=1Eqt(f`z3y2rw^ikF<;w;Pace zHvVzuC{Q@J@80U_Zcj-~@(b{x&|K^sNYE6VHq!`})|MnY<RvEP4$3M@CMHKgTzwL# zXiov>@9F7Eu(nmzF*`1zyqiaWgI|j0g4%Hzv(swiy}S}TdCu-UaB3&liKG0dPMyCX zA|WZOtfFgd?C$5Ak((2rk__>hsU-nVFgGEX<3AeFj^rC0Skv6B|Eu#JC@3kAoE+Rd z=+0CZf|Vtp%fQ&s$ixWF+{4Qww=i#Xa_r{)`)H~Kp!)9KuH>|2Pj63>1BqZsFe8|P zS(B`(ZWJFsZ-$Q-EI5)KQFHpZ0Q8cXlQ}sv@%-Cw-+cG}$@6EEvr|>o6;Ux!&=i?Q zc7UGS(H^|X)`4W}VCUxHmY$tCFf!CXGE`7n44hALax^8F>lqs0^mX-&49u+vcFv$t z$d)7<Qwze@3&#`8&}%QLuIe8f8M`tI$ggg!k4sLX`!H;s9E{9NHLzGkb#*BPMNui~ z3t|@q#U;fRm4#*GPl&5&8d&oRVUe$^9OCFnZRT!CM+DV5PfJ@k`^DzB<kt?Pc{5+W zdOg2*rL?>_GAi7|%N?%NhGb)9Z3zz@Kn?~p=;&+@_(@Gm21&ereeKgM?!cBH7(h1o z2lxUuC^Rx+t}gINrMdX|`{Wnqws*AT732m4`hBWEMXS-sAo&W53zk<F|7g+^IPRk- z4*`UY&Gk8X*$Ihp;Sr$_UBXd<g8X4QF)<GMg4VXC#ih9yFJJsp^Kjf3-@b%8UEP<f zs>^e7vjG2>lH#F3aAa79Mr!M-28Md?+`IF$x$9vSZbf%*XG?2+R$hU<XRHdIzF!c_ zCu4C&oqQJKE^HXcC5YQ|Tz2;<rQIhL&PpiAscB%1@dSGZ3d1`gJ)^dzsl2W>B`Y&J zAt5RuJ~AG$=!E#F_;~Pz+>+v+(Gf#KeNgPWSPV`Z100MjU@`hQEQEEeY^*@7K7sz= zJ2?gUG4XLhVZi~xfuRv$mr|3eYOBYmCpYfi1(M&)3Haj0&AYcpCPr%NYtpkaV&h|? zq9fwsV_~_nx}x=ROG{f*b!}Bid1+}yDSUo1=+1!TZ`|H^@p?1!$fIY^)^4m1jSMuk zHkDUb7M2zv$)yVO!OL<A@`@`;K~OEMt}NfUF+MZZ(Au1llNAvY<s0bF@b?V}4US2S zPtVQCEXYqxOO3gdfLwk&a@BC{VCc2CcC`;rPoi^@4~&mBcea;QRi<QT1w=-=crk2T zD5kb{dS(PoeM1$jt}+%UrmQKV?Hm}J5fYQeb6#hUC>}#ereA6`BqvDY$ic~FshoXt zbYxGvZpWF}b|$v1Niwl*dt%$RCllMYHL-2mwr>Bvd)E5SS@)bj&h75iy}PRRTKnDI z^;Erj-sHfC%sDOWtHn&M941XoT)}2zV1OSP1-WiSpqMWe7ZWKBJ}}RJ-OJfx=cJxO zV2*f7mp3~tGw5q^L2Ze-GRC52dU27Ji?7MY8hw8F;4!5w_UqNyIyJq>bdc~X2KhHb z>~i<p<xHa3wv!SIDp7sACl8?Qnu~KDcB0Qd_u#4feQvBW^=o$^vi<2&P-*)!C&1K{ z!n8_yQZExsdCT{Cd*N(J?;eK}q~*PMilADr8#!y?hraPGCHJ+}^ni4qs5A*Z%5E$b z_a*xDDxG1A*6?*eSuMimd|%p+J4BfFByD+>IX+hC?~99b3-k6bW{c~<i195Bb`D%r zOj0^J=hOSrez900m~<-G8M$c(4@i$*<Q-=^nZP|#{7xOZi^^zgI_u0-fBmBr43KMc zQ)$*(1emvWL_&bJe@W<1sMpZf5HTpTLsQl2>J7usoU7LfTq}+$ChL#ABko@vcia5n zHCm`Qeoft9$1~Au9I1<Hn5tEibadmilT)<XX=&M=4$cM^-tZH*^)7dZ>$fNwx>{zQ zGD^bE!a6#Rq}#1lIfV3I%nlb9!(`3P-W`uFhmB7Nva-Kqc$k^_Ack$vkB6yfVkIT+ zqN8$OAy(Twesgi2X=z$vr-xZ$SuBe#HN{4cS4!$rop;=3I5f50OBa`DN*bH^^t-ui z{O#Evox-}BaJ2>bP6X!X286a&xd8tt4c*o@`?JG2Gr8`s*<WcyQo5b)@Lkv2GhmEy z?InC}@7FhTLr9k(jN7Q1rmBKpZ`(6_P>2Kq0nyjh0%-?Fp@cr*(*fHctq_)ka4A9~ z?@)h&i}<~s0hOJBSPvhb<~vVM0uVBZ^UEQ7t63d?vQShd@(yn>zkNG${?M2_yF?qf zLSy2ovZ<D;A(~y^5GKu^1F~+MdHfUuS;((v$*-RqPW%{x$Y~k5dB0`Wq7Z|RO6f=l z2msn%6G)B?M2o&dDxeXdLLh0XYw5K4WTt7sjQsxQ1}1Sw^>Jf*xhHo5s@LK5{{9k3 zAztLUj8M*?+x;nFQ`&6nOP02p!um)2uvcu{sdl_0Xq<K=mT<$vZy!obskJw)VxtYB z7?`Za+{h`wCGa%DemB&c>{Ca>GH#Xq;=td#a?<qqx!V>}(9t(tpd33b4h+erp-xc1 zH;Td>R9}Hi*3s~t=Bpj77UCm{Fykuht=uoT%CZ2GU&L>zt#j7{d2JZHAHb&mysU5a zI)(-d+#i39rA*$aRPT=+!GpRfYcpu-qA<mxGDV^@MQ<_~c8E>ur`9l~Q>%;x=?fJp zg8lfxPza<hX}G^9X}$ZM=F~*4{DY+^7n|dCXb5I>RJ)URB0Oc#v}o`!9WuT;Eg9%9 zq~jv(bj)$RS$$m{YN){#ka_<QT_&}d_X)YnCRg%&2uTyZm{?-~C2$e*jLtTe@QJ{K zo`{8`@B=02eV<6D!Nm=e($#|6*VE9*($yAWCS{p3u`%WSYI#2>s2%@8kd|e2a@tp0 z`o6ypF+edB*nh$2O*_mAMk1lJC9b|iMq7Ngm}pyX1=+eUe!ECjjE27~OL<qK5Y_2m zElfR29lF`&-3&k9T~o2?ffhGIW{o{kx(#ZM$sUc-C)V3%fQ>SyLEH1Wm2|@Fq3Xs1 z1r?HQ>x37lV&X`1znvcIc{#dR@s$0`+%m8B!@<%;RoP2h-AP$pnpK_Ckf{cu3FwE6 zl9pjaCDGISusH3R2pX1N7I?RK?1>%TN`QKKNM|S-d;d-H_(0)dRs_5CYk%hMh;lXU zJ0g8k+E1AC)l44BZ8cjT$oKQx+525km}fmnzqk5|oa_W?XE=mDe6Xr8$*~|@@dw1F zLeDg=1ZTQ12X!!H)UVyc5wWeVhw$ok3hT<B^nKIi2;pgUS&9OsV~qFXg+Eocyn)8i z5?=$2HlJ8D#nOv}(cEK)Nk(O8Lib)yJZZtD*ozE=p3g6(N6uUs_L>Gz_dN_Wg7}0i zEsc%E(6C3n(S-1-06n6u$@{0KC+Fs;C!%ccjha<H&S?&IZgw6PZ)bb|G^mU;G8Ze& zeNR6=c^rT!E8;UlVTbSQn0t@FP6ky3{}n_6q$XJVRDmn_H6o#3>PCy_Rejk?-}N}} z_IS|n-Cd5Q56jGp+!a{jJ`ax&68RBk@OSdS8hhhsErs<cYka3`(<4^sKa-__YBuZM zkDBRdy;RnL_LcK4)flYMhslgae3kB?M<&N<dng*tXT5XQ6`v{x{fGiM3QErA*igzP z6gB)m$dQkte@tP6XLG>tP>LG!72Z@^!ns6+vS$g}6>+|Sga0KjLQ4_MK)ij31pMx3 z<7}^E_PxNtqRsh6CIABfhpZnPD}TDI-fVR>F)^_*!R~!;wDPNM^c{xuxdW2KhfTV} zZHv^K*(7s+g?;%pWGk<sUCUkh7P$g=^LCIW9IrkpB)|~D06zF2ya^y)78Wq#J_bHc zm!nrbFLEjEA)z!{nvSD1d-aYVMyT3g!WX<)hq7qid6h-EHwrS$TzMV_dpGy1AN86I zsFti5YL#_HS$<i*`le9XHJ_VzZ78b9kOW-1;SX1v`35IVx?ivnR_dj+On(sfMk;ry z&GCE|iNpm3p!wyDI=Kq1zhuU)Y_asnz#LH%_`)ZF#9t8;<5U%;XZVC~h5>IOham+8 zB3}T3n*F4LrmC2ghsM%EvIbDvq?A;1%Q7uwIton8s*0)rhv(mfFZIJKWsbjVwv?Zt zwX^x&q~E5xgT62sn)AGs1{psNP&@PCe{B1mrd7hlO5$*42RjGt^a>Ztt2iDsD{b0t zKxYfjDar8hA$1m^CyNxBGWO+<@cS>ax^{FX()UWr4n6;_l5^158q7~Msoc|MUQ{fa z;ryA2#s71Wq{IEa%}j*pO`_4ej@?o=yzPFxV0QDLzd&K7?O{gNl|~>ymCH=9-Op#+ zV#7~~Q-IM&=gW5p@*S341nEd$-@xMJNB%REFkz;y^6LD?2a5_zEu#P@)8v$|;Zzo1 zds=WX5auG4G&Sp5E74_$c$}RT1h>bRw!Auxh7fSItcc!eXru&-Dc4v;CkLmP1AXVY zfC%8@^fh91I6Xmrv}J~K3Oc3N`FYuo2#nF#0f~DV^5uKy<nLEzj=l00;gP%&fYsz+ z<4xR2V<u2wQ-ZxaR)^=L{R8*vYMFkyvs0!@^iuYB-K)hy{QmU7YUc+_L+#L?Kau8X z)bYDoAt?l1UT0Ww?Ruk{{e|l3x&yHfjqudgc$#M&4rBBDQ5Qkf&D=5@R5N~A8cTeg z@JrnV9beah9qz*#fjt|{2p#HMEFK^fAQ#;H6b+p(OuC-Vpr4`)hTUC=gMY{8H%B(- zhImNG`DvOfa!Y-^yl58ke~Ts7*43Otj9;y{qNeO5#>O`1wjxh6GqZRfUlRk~m-Kuq zqX#NBJpnDCA2l^?Fyg|{kFO8eVR%KkL9s-H(bfVnr#Vd{?^D?fRrF$FqG5VyjkQf4 zA0LS3#WmrNQ|q7CgeTq~i<MIekgV^oI7)*I6SUUk?P5cv)a5ke<1)ZN%QeroZA0JS zi`?7mdt*me4$0e@6OFE!8?<gBR*!}Hx*2BH!zCv>=*kTBu6Bo~6;Zw^Qc2?sv*<FV zUJbslpxK>}+{U`e0sj*=Au3)Gl_BTO#^DjS>WtO}t+lWZDHuA=uM+k^nN9cXKW|hS z3<8U9E$n;0Qz){Fc$%A@)Sk^T?$q5$t-OsFoj5cF#l;OJT{J|E%$A17$BV1*2Hq7m z^Zi`(J%HH>&dv)TZ%<$sVCSNu!TuB=kE}SN+v8Lc5;tvw+yLI?_t)3mMk}H#p#@uB zFYs)AKS97gU(el`nrjxXr~(X3_BYMHem&XU-fy=bZxq$Kd>%KuJM~9q5VBY6jd!-b zUp%T>XG4Tvm;eHmb41|o<Z}Z!Y)jDm`rE0vRV=}V*EJH^OS@dQPP-$fg2wccALe>z zyTcp1I0N~%*-k^GR3v9BXZOktJQS4J)O62c$59QS+;OSOdSjW5RefO=Qj(vTg5h2f zMx+`+*Cz!{jWkSmg4VcpP|VqDwj9!y?}feDB~JJcr2$ImPzfqq_@a||$)#4+r4F@i zw&&BQu&@;2683~n-m~LR5S+=>jQ&yn37=M-Pw5kaG@b(4BgH(g{O!Kt3vcPMK()EO z63CCRHlUJ6oox~1=a>aYr93zM_wVG)W<RbnAGk>{+rtIe2lU~Q{P+}nR4noDt$NR; zDxJqim)Om&=6VW{g{zIoX&-ZSXg@PM!wS!uZd+q4YB1j>=LWXs#s*hc+KrBd%{gpj zWJpa=Kd9|)pC?^aRqpeJ^;z)Yt(I7X%5|_De)qe}(S>rZl{EWWg-UC>W`gxALL5H= z@Dpv9i}`u^c9*xqzES4sw|b5(=SL^udW+JLv*xRH-%pf*_wzn4IbT~j->ub3t+_~8 zyQI3hgy_&%imGu_Q!{Yz=iZc}Me?w}@!(rZs0IeP=0^kl{W_|;tBdaLc9tm7E5)g) zii(My-dHEt7IywhQfg2X6sJ7gFqWr^Fp;E+bKd#8f~c!4?j+xBEu!N(xE1`9IP2{0 zF7G&23!v#OaA`Xw6E!{H`md$h*Eu_MSNR1P9pR(Z1N&H3UeMGFDnz6MRq#+yBHNmf z-%`=g>jg2~kE+qMIcvK*aBRfpHxNo^5lR)^&@yGWJ5;=B;C@	)8E`9gWz9+;B2i zwDhpoP?0(VI93oWumg;A$)~^$E3L!ETrZm%n(gtoLCZqvii(~eLSuu2=xgfhfuXIZ zXJ`Jr{EmlcxVUFKq@jTUU^}QO46aREK96}NE!XQSES`@fJeO-jHYTEiJ23BXu%@PN zJp?VYRU7RH_}ss<QoN8P5L#RrmNz@k-<ET|QBg_bo;=#P-yW|5EvFwcxCiu<we7A} z9L;<CH=?7X;Xh4m<+xpC0SzfJF)rfbNDYI1eV}SR($~#;TY|P1pI{IWTo|VQj~W^p zk1ShEkW0V2eL8);lx1a28SZ#@zIJyFGdubCjEr(V>PD#r!664Ia)pyp=&J_5Lsg^& zlT5~`Y(x`iIqvO@J=Pus!aN3W>hWE1bEV%$K5qvs(lmb1nBc>5TzdPoips2|Me9W? zWm219=UybN3pCb6FAM0Q)Zw^Dtaw)nZkixDs3&`f2lwSmA1XNT=WN@FudMJlQFVC? z@-ZvTA(d>7Oa|`YW8<c#C2A?a_tJ9Pd}G$90p4~@4jTXzFj)tM2<pDPtO)dXs7f3+ z(Fq&dLbZU0M@TD-yg-jP00SWV(J$=$(0EL(M1CQZ8MeC%rw-BA+|(4XI32C$dj~Iq zR9Gkqa4reUK|yLa<DkUT(ZN?zO2p^ku<HA19+w{4o?m=+bJk3p+gpQ-BVgwTOWpH= zg@gS&J<aE#xtln7rVa|(g34k<Q4wX!>uy5VbkC0H{`lmiwC5C<X<Ku3=zb}*8Xt#d zi;Fc{T_!Qf03#=R7ammz85Ac^W*biVl6bo4<soEJi7KbteHTS#lv<<gIts<aV2)*Z znW-nxqB+yJ%-hlSK$k=VTP_A}-yRJ2;?#lm%V->X1u?5@8S)~VwpzEFzwqaa<Z8?q zZ%C-b6)#K-)cyrfHFztij5eo2HbD(4F?D9~)B$Do1Dis|qNS>?Y3Qo8a>FY5tk3!i zbI<8*eQ-uL$kiDK#ql}V^ak;tD}mK#{Nv~DguWg01J~Te$@FzslnL#6R1#)$Q<t`a zwuT41FQ-RJG6LXMLJFP16+JUcq^Ti@ghPM<s*ISP%Vt3pUNP<j*`KI^iswHtI4G$s zg4O#tnfdC`Hnrsq4G$lpa%lgQ6E?6XBiKMgIs6t_^9`P##tzaxQHN)z0cb&|qx;r# zT?2u&xe-W!hXTGNMY57}L<I~b*M$CrK<}Y?7Z$FgDKliL2Qx%H>D`He6FA0;{&nuZ zk8U@pN7KH$!D3p?7j&!we&nMVh#;vuz!a;O9xu;5&)A~5*lOpzchq{w*MYj?j;?M} z5q#5oyEvn3zp$3`aW~M*XVF}<!u5vc<1HYIoet(u0OsHV@MPqd;jr)k>2}N_>ctE! z^N=Hye;`t2ks7mFrAL&Gkt10(y{Ej2pZeWx55~wqPJWAghteJ3zAS|sc|wSLa(*V% ziuql}({^g^stc+?82=bnb4{zUfmL5mozM4@*8I)vsw<Qev*2^^BUBz9pLTZOr03xH zWLF>C64YbM>zSC76N$`Ap3^nEQ;=yY>A2n4q~Hwq3~KOaj#}Hx`N3=ckW}+!OWVwE z?e4Sv5meyb{Rn-(wyw61+ikJuFIEXNBQi2^f$i?{*6a#`PeDaRSXMgn=J(11ynRAa z-qit)>BWh$@ezn5-a+W34tUH#pjz{^GuHeq7~d{C`GWbO^9&sR#{n%Vkw@jJM%kI3 z``yNrUX<74?ivebv^sbLqTzZWUH^e~Ub?4Qlh7N*SaSJNjO5xdH#sg;3Zd!`aRLKR zvZFw1dLn0pW2noSoG<u2x+=4LD;p5yt8?6ARCLVrv_v(Pjpdc#zaEKuS`WPXJ}%xm z;@6-N?%3E=(}UTJh{rm<_uv{Pmfw&bw~5A@ul`c}&HVvkj%pkg8Og%1rJ!soA}X5K z4n`Nq+4{7*c7V&FYHOq2?(OMgg3ZU;5H(=f<^3h3E{>!bqT<YkZWmoq5hW+W*U5;e zKv$+Gh!Z_NPOB|uyga=R9&QZVIo>M_6|dLL9JshQ6gyEtYu0l~$h0-H#@E;Y{^N3Z zhSZK35AW?8xMN*peao}xyo?k4J&<~x*eQ5(da@t)68*=m8w(eH;s!M<OKenaA&;6Z ze?03wf05+eU73pWs6fn(L#(YxD5crrrSHw#Vp0^$VxlE2GdqP7HcAj9>Qj_wY?k%j zqg=}Iyk=<FpT4T{KLtP^50oFbNL}cHPCOGHJog*d`le$WEwwVbFpH5E56ML4E+O28 ztD&rqkTV@+M(;OMxHkzP?**H?^UdGzJxVktEV`WO%HQ3O*CWL$L?b{S3dp(lhnGAg z<o1<d&{vGkco@p6Q*nD<kExLv4IPbcw<FNr!M!{>ICwrw;$e<<2XK(*Z%?D^Ki%WF za&)ZEDfWW_%<Png+mRpU%GgQh|FTo(YzPU#ap=h8$-&`x0{JV3G=2wi$Rc2vbwng* zX8UwLDk-c4#>gwOl0Aj`4Jj_>rxX-!Left}7{}||#=N$jiwryPytXWm)UrAE{O!}p zG@$a|sl48*H!Hcqj~g^$QIQSGRF`*V>3oG^p6+_(Yfq19%}0HPJ?^72YQaayRFAYY zTxUJ~IdThBL`K*WAH!3V*BuZ`TM1P2K~~+_HeMQdpKCXv{(hr1V{C_rmMudOS`d~u zJ({#b4EK}eEv#uNdvNg7+fp5`Uhh;>(TYZxA=Rp=TqNLqk9wh7g-qs-<goSW;{^b- zhVV6oFOFwO#bO9z!r+d0LZGn(jZ|F@PcPPBt;&a`o^d(IkE*-|JJ=6zT%iwVQVbq? z%dhGj`Hcv?Q1>|^+?kkWTCJ9fQc{6V01XXfWw2Ev!+fQ=gk1gxzgsV0jNckYRwx}j zT{K<>o}I-sDWe=>ZU4BQ*I7E2r`SNEH(30Q1h9MviNqQ>LqP3w_NbGjIuqmj0~ABj z1lY^MhTsC_vMHXjV?G5dd*1i%&uujVc?ow|n(k8q3zAq2on=n>1f<PXf%fP%0S!gV zp_tY0R}|!qrx3Rm(;yc^jJgyJ*cA3YUS+H)j@<1*(T@>n)Tg`fVNKv>W%;SqK&C2V z%bzwT&hAF2yub+Lrl!TGtqu;x`Ot&T;r(y4*l8y{`v4%_1aCLQ9S`y<)c90I)nAH1 z(!Cc?m%uXH3-iD%Lt_sZ?=dSE_m_EzPaW~Qc3L_*Aahlx+r=u-w0)$ew%t7xV}0sQ ztNM2Eh6voOgf*-*N~x;8_MTm#Fp%Zwp$h(nV}=ZnN4v|*>)SR*vf)Y0l&1qX7}!|a zGC3k~obO4Iu}(~DIzi@L-v|V&FE1BXO58S)2l=ZA)L2xkv`i&d3fBXmjVs6@Jp_rs zVVTjDqhnfw>#AlDOv*~^_3yTK12JtAB)NsS%eW6u+>{9yu9fSe>CDq05}-U^4%_wI zxwm}YBEIrwHXXU&RJ3D^_&^>pbkl$fOQMO##pG5rX}6-&N~y@f0Hdsar<}gG?CEn> z-%I8U?0$!Nlhm@?kf;>D2C6lx5-BbZN%G|`<~}Zd>Tj($6-}R@aCbpeaqW<1OdcLw z0D&F!DNYWA{7Rm+Pv0HoA;^w6+?Y4O!g$Iijw~kw#KAe*8EBTDw)cDYTlCZ6;^G1e z1YK?C0eY3HD`@pPYwZv3sf@KVxw2TeS*#5V5GDeaw|921vodNLzURb2wjvC{LtnpP zAL3`8&$NJf?N5$z0EratJBfREGlUH!h_AWuFhwvkFIPFZd3`@{&Lata2rG5w6w&85 zkWjK`9qx$%XA#y`1Zdfy!cqWq?irBPA`*s=sFeWA=W~$M!iZ4*7Lxw~#niq-Gt1}X zV%Sb*sBd_Ir~BowAUv4kjj6DcnH`E%d+PXQSSv)Y{XrkM*|*NTy`Uz8Wu4BrPHyxN zZStw1N?;kg)6X%4*lU#Db81h$FzkG`=}1}H?hstblR}GwK~tZSGUNGN`*=P6hC8M6 z?Xfn$-w&YBW9K78V6HECr8;gqSfj-#-~utT;NZZ#b^Z(){GqT(N+Fje*liSm)Kbs^ zuKBau2-Fso8Uq;_w1;7%4UG7Em?g&HHR&_XL^8}OE`E@J*FF*j1yp_D^2y#==jV02 zp5GP5c(p%EfZgguR7|c6(hJeZG!Z7FKx@r32Y0}syl~$cpQmi*Zs-_Dw;-4uCpNUF z+;z5Jr)(Ct1Uy`$%LCw|SgrxWRDPQ2^=EkA1riTu^Ud*sYgR2f-Yk0=0@vz&S-<6_ zx}~PX+q+o8*G0C-0or^$zS1g{VVzvEgGeNO&K2$-&2IQr0gb`x3I{2TFP%Qq&>;BG z3a}h}?{+Lk-%ZLSsf-$}zd>3f;aW2i;{BjzG0>g~JpH7!)Rgq`p`p0mjlVE&2jH2D zh^eT6N#<c)G0@on{hqsjEFMKw9v?SK#t-QeD)RitPp#gh!a%?F+v7#xPZvhm`S|$w zKiw|yyd*@^h$ZO~BRt)ohgL{fej_wiR@fTY+x9$Z9+0P@L6CZqKOFLiqkFa{Gx6~7 zUSB<w)G&p5z;0(uAt0uN(}7&RN@~^AR8%?MAiqMdL1YOq;(AAJn2w=|2tf2gzWQJK zO&()o5u~TKWo}B$E<|APOrXLDNfzLh2}$;D4oBNU-c&MPoV;jO$*%-_GnIIpL{^Yr zc9Ru6Va{lNVa3UFYYWt9uGVrk*u6dRW&-I^GLo51_$o)rwtEVH@>N|>nIXGeJhCr* ztr%WnJv6pFfUvz1V0+q-Mda8&yXV)y4(|LYnlXEL*MNZMJUEv%Z^d7*LmfbRpsgTM z1TJTRZi`|1Rh8vN`oF;Sudkh|txM9n?Tswm?>^iC6#Z*+eF=b3C~)=`z%ilm0;mSW zH>SrM^obW-T$6)BU>1?8h!k`vaZROd8DYBc<_Sbtle4qD1=@3Ja<ZtM-8ALhfeX=Q z$|F0J7!S}i7e<cr2RCQW{OSZW*T%v`h-5`|NtI5!n~B}k#KAtdt_bB3;sX^64-f#u zcf7w#l2IQF(K|EKRo}fdwFQM8^qmJ@Hi`wxWpS2e7Vtup=(n>51qIyYsx|1wNJuTQ zu})7fssh7sh74sYtf7s|fFzYoX~BQ8dWoHne3f!qe<lI8i&d{8PCW4yokFHnCb=JS zxHrh!stn94%ib)lzi!K~e6GffOhp7-_=EWrRq?3N2%MLU)T^J@Q~p`6>wG>QnN(m> zaXx;t8)bJS`swB%@hc90SA-@cvn23Up0TK=%u>sA(T%moIU^1v#ZlJ?^l~<SW+E}l zXRe!*qLED3S_>5mF*6%F^to-K+s5NWC5nEJGj=g`v9`9~8{8d){F@B)i)mT?fGGJ! zDcU`?@%l=igJuzAp`~Xo)|}s-^ML#$WEF76RLl3eQn}s2VXCIq)CC3y_1f|V+5krZ zsm1@H58i<N3gf-Cuk5U)CFHE*p<wYtR##mzL{2(D%PuHijwt@n=7+;PQ}aA4BX6<E z9m6dvo1yyN_HAWx^*Purr^&AcJjdGm(f-rR)0l9sXcnLXU3hV)RQ}}iH5V>HX}qVj zP8YFNEX|*@+gE;IbNZ(PIG6S6A~m0=;zX}=DV#c5(R4(l^A%88Vo})<%uv~P;~M!w z?-F{TU{2qLapR(_C8U?%fJ{jt-^ud>9~)EG;#-A2cf@wPpXW|mX}Vtm+x1>PPL8k2 zU+Q7Ho5}AQ^Lk55Q?fQA4j11~R~zyYTE;V`{~Ax#P#WiiygZ<8PD(8GY0mYCYp=a` z(@cxNdof#y9dQs09X31CEJvn`;JmVD%)xIDG56iR3N9o$)3e7`bbeh!9lBZkx-i>Q zTcb1XDLZb_TS=1}`$z&7q+NbzH~;cta*5)Kxl<t;Ae7I=?`Ql=hax($Co;ZA7DnYI zIq3-wk5Jks{s)p;Q%o}b)oM4K{JSR&^~MSXAlTc%N5RH~?b_h!`nvin;nKCOf0;p+ z)9&EtV*KN$Rrju8zMdt_hr)8tzKE8do~V+eu8NGMg2XRVS#iS%S8!(LRxWv?s{%6< zF#B@2#eKbG9Ym{X(mQUks?Ce+PJ7wV$>JX%Dx}u#;llC1`i84dg?1NjCkLWKeb0ky z%W0NcCoQj9bB6M^d#a9|#4`u;B_ATGR)3ynGjP~!pw2$1*2yI`MXWMhImrr$uTx{k zF&LzQXJ;6i(qc}4+hvs%P|6H(y0Z|{b)et9X|yGB^gZTEl!muLaG|RtPtc{ly{09u z>b!ss`*P#XE?<2HKOF0s8L9cKiFphS{b_5b`J?4PQ>FwHVaQw;h?;<n%rx(Z<cVkt z=%501)1uvk+g;?Np4Wk)pTDllT1n?1^tGB8-cD$oD@rcoKCq{ZBZJ<tH$PtOVvGu^ z@;y)8kfPye0cZ4k*|>eIxJ4kcZBe1r4Dz%7`$<+<W-mBS*|Xk#pXZKMGv^*CPj#kj zKW1!^-VkY8`zE9)sS$zodfM_GXL_#jysu&WJnEPD^Z?kEkT@=uD-}BnPiIR-O;J@> zKA3(WC3uj&?`U53(a6ioKCw|ZZee!A1Yvso-jZ3FfzP8{l-}i4Xjo%Q6>)h5C>|FH z86Ocl^824%Oz?!kzF<?sqp%K9(<rq;Y=?60OC@`V9#>gg+6^DcNJ_4P{esiDd<m*U z)uQm<;BQ5ZOYjwd{$@Gtx6db&=$G*G@oFQUst(|GE48Cv*WFW=tQbskp9P1$TCa2E z=eeHZ4-{&~$&n_51<WW=F0_n+Z{f3xCyGG+pY~r&dyE*w!m?aET#-m;eqMRL$T4`u zCZ<M$4Yfo{_#Ivwp#ATLyQE4i3AZr*1m&7q3{MZlK>|;x8DF!?T)tt2zD#(}e4vL? z4>P9iVn1<s+6AZl{KQCAkGs9{caMpYjk5s>JYab1zxM$+1mB}kCiS~<4roNxJe1vs z<_AYN%zPxAfhHb$oZu7p1p;MzzVJKy@T%(bF3q{-b^gwq-3j0QP#+IBL>Ju`o!y`f z2NzG{3r=)=4P)_8w2SOt-O68>$(RgX3yx7YZS#ic>Mk%xU^f*>DiIadAq_HZ9l}+B zL8I85PhxaTZ_NZPX)>}p6gjy*r1N{aa17%=nLL4~gm4iY_llA~T<GJBCA^J<%_u0} zb5Bb3g!YyR)1*z$r{Z52g8RV|p*w|nLGQD4bknru!M!oGp|ug{7}4(2qthw;rP+wN zwxLDnNKr*Ih}*dze1}_)6ZaV8w4p*`hi&)@&YcLxrXMN}Ra^y1E=_mz6sMVC%gBpu zNGO|(_YN}!UO5wHsh3|=o?y(c;+j^-jM}AA{*s%0xqgf6mwcSc>Rt~y(^K&XF_oqq zNu?fy<GVw!Xt^tC8C@|MOJF__Pm73op-9qsgvQQXx@)=}gP%0Nk+P_}8SB;LJVv!1 z&!B8_VyLqv8U`pI2_8cQX{@ZTu5Uz(XKtZNO@0g&K2KSFp0-REyL0vu@`Q%q0DZZg zM`{azPKv{Qu|m@xGN4yas7Dyn^y1XuQiQ1vB80*c8)wWY`mwbAvm8%jh^guM_I+VZ zj>QTxwp2;hvJLl@Jq#<vzEs}X{N#jf5AZw~@7%o%GfnX(TRK(}uf1+WW5shR@P3h2 zReyKZ3ekXM;5hS++rx|c$WsuurOhf(X8v?P@>o9LYZ5+IQGY)g&49Z=Qj8~``|TK# zlKTMc_Ta)H3GXg;c%@!KJuLlsE^jn(>@EMcEkpRWQ99fYibagMr$2s24iVugRtAvi zNYxC0?k;H}Kyw)C0dV_1Oz8_ewg;~0;gx5!-+H>duvXJ@(}SCb@<YEP7d~a&QbVil z)j9MfC<+z$A1S5@vx-n}tj`P#4<cX@7#WiUvaM7eK33Nm^Bq0UpN40hM6bW@)N|h( zSysJnIy0xFK92EFtx<#=J}y316ZI9jo}7$vMALbD6lG<AYQ7<i)et5hb@?UE)sQYj zf(M!Iqp`E<5#Xi$*3#-;S>e-Em=k)EO;T)x2r=WytyB8@Oe0ZVU0Bft(jcYjO=bY{ z8&kRV&E8XWx4Z56C*~SdiU=lRDd+dgL|2TS7aAUP%H<<U?)}k~B1dgjv&4oZW_UPh zTdH>6L|r!NZxg+L*38q9iMr|wT2?*Xl)!|skePbFjWA(r5TQGa(f}(jS~4^T1|1U; z5~ihX25pH^#(d75&!LfJdtifKkdjUSJ-$v!^BjWr{?j}`<&>Rbe_J4fx!=H-nIL#K zH%EH@1hLgmv$7`5Ful2wEp@`sztV@_ko5=NHCwftO&OjHEvyU9ifeqY8{vxB6@F*o zF84?&YpYIirmxs^f3>JMJiK_bPmOTCA>ci72E4x^NcK{!F^M)Qo#KK120N-fFJ@bL zR#|>QTaKZs%~)KeiujV8y|qQbDDfr(8B3u(vdij{SVQ_SOX_KHuoN+ozNvv@^5_n@ zvzL)03+0XDpq34GSS;n8ZSowL7?1&dSHk6?Sfu7T?5o#|y3l5ex_%YWBJx7&5q!20 zGT4N^P0X9C_Y<v8%g|eyU0hz+U}NP3G<0({Ihx#K&JQ0-FLiSz+`+#21aUjkuau5# z)+*R>hcOG<qc1K7I?2(|`m3Y=g=;BxHLlQ-Z;8}H`f<hT8o+Xl?P>bgvZ%<)O*j3X zgKU+Gwto~z#Ny=Uw&UV5k<*g<q=8S!_cJq0;h7)840%p|sRi<$j+y8MIN7>5Io;nb z1A~$jc^q0A$}arxxc%&ieA)e8zo9RpmeEoyJZewBz2?1i6aTdd$I&Oy*1zJyrwNTi zB=lJxJQ664^R7Q;YUD4yfIdISjIr=2jf?cVj+YyH4`eRbFf2N{$J$x3?lJA0<8U|| zf;J{Z=Oe{;l<MeYiTfTt3FreSdJ!4?A&!+8pP8igTR{<Wv^7fN&Dz^ruodZtjmhU0 zf#{dwV&IgX=EOu^pr|=%|6JlUkk}p0FJGm(sn;I1%u2|<5DjC<>?i;LD<S<D9pQ)= zUGNMQ=mBNhD0sZ_LfVN-wG)M48TXaWd(qGo>a8xSebaNl8aSNMg!)IBpY4yo>xR2F zpuHmzoVbsxtZM((OJ>W#VyLe#I<(u2^>g(>s}wugEpN*m+i|jfwdW;grx$iPro1Lg z*^I9;co}<K7i7FPlDrffeXZubIK}m9^Gzf*1X!&lVGZrqUP}^XsIoRX{+)6Ltjfqh znQYDlzRNe<>;ZQSw&)G-M-S5vp|jyXW+EYNxoIik*!qyrTM?&_B>v~9#{?)K)1ZDJ zrR2s(LP6<D3M$H4YMb-gj;)=g#l=Q;HLkzlmfhS;Ur9xMN3Z;RGGDni`itig^R>E~ zMTG<7)6#lNsd~7Eo%J7zE^Wfdw<!hB?L@9yjEihy#U)@P<N}m_6nF|k-%BC|1~LXZ zZWg$9w6;}sQKqyACwB?^lNezb@2Djy3GjEiWD_XatB>4Nu!9`|BMb~`MtCH&L=}v1 zBi5<_+d{L_wD}AR?;r~wAQBB8m5ttv#W|Y{oEsF35Vd~BA$p3&zjUpIpZdkt=A6E= zDLHHfh&&34)nYx1wIP=ssavm0TFT-h7luWzOJ%8c)N;>J*pZp9W$CWk@9xo&KL4J6 zghffJfH|u02}A#i7Dq6Zr6)r7`{6HsLX6z=0%!_La>~*0BlJU>_c4&cXJiaYgvZCH z<Ld-^iT##0Z<wY90Q%RUg14Am3z?bz2%x|5@{9mn;+S#Ll#@VSC7~+e^qe9G8Y^Ip zQ2%9A$6F&7$3CUm9q?_|9(W6oVflj0P+;~&YhvWL5s-$on6$!__}Yl79ecW?ud40A zLTMFJ(ezie0u=;!y7ljFg?E(8kvQPEFN@hr@Bj%gKiBY52Ag!FF3~1NmxoDqhfk$c z{~0c+=J|qKWPMMvH*gy^HC<;{l_4*9$!|Tnk*f9tW6N}g4%%~Ul$AHHftaW?PabBr z<f%?LtFuXVkbiQn>8!6^Vla%Uham;|S2&QI_(^%WiP)J~so1FLP<s&nK!HjiEfaYH z3X!f=S=j<%=bK#p-fd1V1L9D<KpLhGx_ktkan<$otZZ!^U0sdL)P%(-D9K9nX4Vx) z|8g~eRhlFv6=PuNjnajUjSHDX`Thb8!t!$|^S!2MKpj?QRUJsvGLV*Dcz`c-8s}Ol z^;IkQrq4Hzw@dpGY1v-wBv)0g5o$lSCiUm%nVa&RmAijId~>vOcooC1G*OT(Cif3u zpyzx3_imj}TbetyrHf7$RLM}#dJon#u-|V78OF4UOjXIJ^aFny91L^>Fp!O8xf0UE zUbpdX>AZ&KqgT63G|fZ)AS+0S5&D6@TNoZe`xD1YHakAK+#&!(L6!rDk?o+(!4G36 z#zVqTt`P+xKA{%iEx%WnL87@CIO7wwwp8V|bkzn~3_Iw6IU&6)H2PhyM_2;5uH7{i zzBv(w``w*uhh_FF&GHNLvCgtlqU9;u@u6&t!MqGW4a{$@Ob;SNtFYa}quaUZ%G`b_ zJFQhqqYpuqaSgtaaRm5HmExuhz7Z<8%s5fa5@Fhn@?0r7#gqMHNm!LsfZyWZVt2V9 zJStzI|B@IB6q6ICR}npW_?rZ0L}dZgWS>ZleuLmhk6X2>P$MbDP;v?>)0CW{r_Htb zd~qT9j2(Me9)u-skvVsCGEV~Hs?99mEm!72!tJcJ`Of*+d#^C?F0pY=P|=To#2FcJ z;O~g^03Coh@-c*9B0x=Vlm;9anC8o@9vls@y+(^WMk4K}uOa<AHVwUvhjR}X2`U5; z>i0R&JBAnlYo&(B$$ji%ln@9qgCRy_nf}`n2Ww;-y5o;*ds4|cwUduEqJkF^RZU5i zS(TC3LPUIT`KbfJQDmVS3~o?g<DFp#YlafwI(b8_92vfw)8pZ>dVHf+=hHEy;c6CE zi?nVHRZ?vHLz4ba=!KHqu0r0ZMltA))hxB(5n!iYkM$_7vG#GqhENBzl!ei^-NKEo z)_6JtC)dz8zr-lVz&whrdA_+0VmOUE0mMfH2VWhtWeKPZvkK5LCLz!xVL*(-F%{`a z^OofFv`7*4fs6>QX47(l(0=<wM@k1Q<S^gk=t{>0dVyvDvsnCUpYHvi(P9ii<Xs&= za!t&mk$4v8e~b--t1!Q7#sJ-^wUje%Y(PV-NBQzkG2_N)x{YrVwkQiv+yx>j^=$<g znm+0a(K3@e0rqj>t_H?J0)VbEvv4kZC~ls)rmPp=n)wE}0rdNQ_KAf2Goekw#e4l! z=Q!5LIhXR;oD_?>^7)Kgcl1n`h~25<7N{at+F!hyoo{f;HqKLuPCl_|Rv8Jk`Mitr zGE?$v^LPQ;@-s+Wpm%{KO?fT;hZL<W=X_px*go5!08OM#Ki#Dsc|WG-J^`FxP#pf; z!04ztIe-&h&(U-sVA6j!R|cBQZ{g-U6wtuVM}sTUus7kqOe~o^UCUh|@sggfM&N!6 z)O>VhUfsR*9PDQHM>h_4$mkTmm$-shLzcKgq_fJpQxG_FaHXrS#f{#tO5CR}o^BRC zw6Bp;f5oQGu5KP@JoYtlUG1}HSQYto2}DMHNWg(L@DUO2X@WE`1l^gSvKXb3mm&4f zQojkFN>^^!;_5J*sfP_;)Lu&}u@{aQMkm|9{t<j5?6nnf@KZ;*{`TDLWDjwGnA2$D z0Ro;502&e*{aBq~jw$K}`qw4{h4NT>^m6#mA>KeICo+d~7J739tIjl5x1m+HO|Xnm zkuD_L27jpYAUH2oqnWoGXSM)i1kf^Lt!C(eWj;bNhElIQ235YDdIWf=HI2>Qm7({% zUZ#-X&)m*CNEA}A9<UK3Te%;R06sV8)Yjm>^m3!5>5Gz`Fj=PtlM%EalETwM?5SJv z<O{;~i~x5%kLs~FDnr^nD%5A9j`Unwd=6d~XW)OEjK-cUJyPpVivyT>D!uO-e!U;S zpBvW{NpcE1dy`Ht0P*{)XOKuB%KGBQMb=O28g%;mp)5VP8MWU^fV2@#owQ($jK8lC z{xNM%*SEwe_^|I`_<$pI7FcuDv1xUkDS4qKWi+Y%B`0HrqW5+@_vR$J@+7v%26#r^ z6f~7ZUiLO89@ctMj4NODG%t{&V&Uq9CUM>zK~pumE`7WTxha^bGqC2$6CL^s^<Q|} zoey?zAA?^c5IYS+a#@)3_n545N34_bM!0?exs%trQ7!^ym*jehA<*-Ua*!A5#}$eN zb6P`FsAN2Q*;+2={yD3sp_I!M-K<C&0=V&Gr|43ayHVA8gcO#%;Zsd&eRy>H5X;xW zoCc6`?xI6ZM_9iq0?vTK#;)XO{uvCmz*yLAqHBh0w2~qtQ;_zanAJh@=|DjqNe}Yb zwjkKmM<})up5zkWRQq?$j25N&U#Vdyx)LPSFe^cLdK>&)O<qGBwVJbkpK|h0!?5Wj z!;7StN}f8IAH7!L&_hMuu1^ahYPpNDKtQ%!4;I&4*MwQW4n{)}2{F`cRdlncyLb`Z z>>^4O93{EK?{?(#uFG1-)(WiJ)y~`&0k_Hc4wDuvoZl-s3&Kg-2t=+b0tRPtgSDc; z#%=~avchGWh|v$b28yD1g)K?8^;Uz3bLHayu1K6A?K@q=a1X-*$~*>7AqR0F2LYxs z!hQ|m7`NfS6gU_a7C00Z*;MBT#&)4GoqtUk;k!4MXj4z(!NgR|Hnsnyb%Djc;;SBV zz-Shmj*}W(1y-Z2uV}Be?W%E)8XiXKbH6T|`95w4a!QTR>UDbg7T1d{C#u+?qOYO6 zBd9^~<7)V-CA9hEquay%?XkToe&F`8HQ_P%Il*9PSW_ibN{lJ%4Lu{atbI)VyZ=1M z{+Zi+Nn+%8X%Yk)&RU?JO(dq+1P8LBy~;kr-=#XkkD6qkwyjoj^e0zXX3j-okdva6 zM^~{fI=&IE-F%m!;L2Hb>7yYg9gyPkbLb+8{o;E_#BcVC67qvE$b&d;;4O0y3<e!C zNvt(OZ8k(_HRLMq_t1>Y)Qis4%Y2}HY@t;cVrJy->~gltEwf5N(Iv4u^%WU-X=zJm zB?dXttu#*WEx(*3KZ33;18(a3y21a#xv@sL$g{DWJptL94YWFKF?)IjH-_l?#k=(! z+zIRq54FCVp?WHh1<EY$<V2J=43Ncf13ZgVXcCzr8rGk}wQ9mp5y}(L>4Z1(MsLqT z(;+eLa0a~+aK<|w=g*A_Y5NAQg$&94G1JHmj~R@&T_KO?a?ZR#_uQ5HFKTe2wN`?K zCk?V2;LADvRpy<l<A}F2J_@+s>`h$wMVnBmwU6u#N5AJuIcXYbUEMzv<sRhRlSk>y z+dGdEix$_b+nT)5e*Xv+o>Y)$o^@Hbv@tX_@VNgK`}gquP(#quWXaHe&CC1iw()JM zX{Jl3sqgdSX-iL|eT}sPau_O}FmA=<QtP`dt}9&RIEJ%&GQ9x?)F=pJ^_dE6ktkIP zBZ5HHkDa(0<HUx21*bUH$Hsd}L`kdhDtzi+1aeeYj<J;_Zl;FkhTm`e13We<m@Y$= zsWM<<-VV-J3ap>RQCxK!mZ>b;q?V0iYna-@n2JkokiW4oq4U6lwb8MWQx}%yId|mx z61R3t@e|3#BRMIKJ>=x=*|Vu(kBQ(vQFCHz7@jJyxvFA&ZoAn32&&RbvC=k@TyPUz zGK2_t<jQ%fEqC1{p*m>>>DK9z-|YPQ9DRUd^B(hi?@L)Z?gKe0;ihvK#pV;RLB}c1 z8)K4@qxunm(I`DtjscC)Q!POiPeBfQ_DZN=w;xAx3HCm=_Dx?KB+v97%|Sg}%}Yg4 z(#X!k(flW@x}Bww`D#&c!8{cC!ZU?(aM5C@8SBrZhQ9~RLugKr1?-%Vn5W!B64-CI zV1+bX_)7WTPM}6L$@+Khu^J;>G$@$h5ZsybdKt53RcDaopMeVdq72A<`EPs-^XHx5 zwJBZi<`4DeG27_V_vhz<Gym3fpsPcLZ&>@MVESeF#82IqS=IQV#!gPO5hyeraJ$c2 zb_=50#tmwacc6>d1NtSE5-CyiAyBqt)@}d17vfP08F6?8U(tm-SBNjNFcO>5_}T|1 zNDzHYU&WB}cZ5n93b8um6@%%qH^|X9X$0t__-28EV=wKA3(!fq#aomR&4{~-8JBWJ z=TS^HIH=YE`m>(OLtxTC#Yx65OilEJIaqHci#Og3I%v23gS45(P@oJr#jEsd2Tw_u z)ti$#zRT<`@E;pUjAoJ#Zh}9HW$d{A79YXCKR56qmbKrNv}MjeVz7j#Gy}c9fzPEN z@3_zG$4u=2`PB$^i#pZR0_YnQ(^^a!5(wvy;Cs-Zd(hzffNfOOfvEm%J$a$Q+k^m_ zKYxs>FbdI*6e{<0D|ZxW%@TcdIiUZdJF)Uw+IV5f+1Ja1J>f02gbW9K?vKtC+{Q2G z`7h`7Hk^j9b`{<fw#(^uy*zvDyR6XrCf$84#R`13++2$e?K}H<i()cw!Ky2)Y5y8Q z;>#VGFP5JmR#@f?lUs|_N**i};VU^Ej5UBh4Hf@A6bP!|O10=v*~63%nEw8{DIQy6 zvFTg`;~hl6BIx(|BT%u-I2Bmd(^rV$w_N|7A4;&W|9Iu82i_PG3;aM_Vz~bSlDXem zp-OD1v1{wV0+*<ABB>RYgf%d&ZH$BP1IkqQpF5j68faDxS^WzT&s=y*Ley;oz&;(Z z`V&Kk{V37-3t!nkzmP^<3>F(ItA`n(bPlm(x+S@Eow{lo6L01kfOTi}mN9!H5d0u2 z4aM*6#>VT;#@DOVl57E$RH{9S7Bt@22_7AYW^MGaQIAhz8Bph$Ks^XJVhZ*qxOo82 z_QJP>2}v;Aj+cQc6{oyRZ)vNQIbS=-{{BEbtkd~X;Om0=PIdu{#FDoQrObLQaC|y{ zdUKp7Brnvk47s-Lp!Cz9o=#`%Bmss>H4BT%3Yl02I*}bQnK)=nLzH@D(412@0LJYK zbZuL*`MJoi;&Qmp|Mzb1a0fwm0lB6K#0(GO85V;XGI<ChN-va@=1+a9kxP5FR_Ff6 zr7LTi$BSFvqK>c6{=vYb{K)3DpG#vr7>ebC!j2vwN1qm{V6$t1_3e4EuP9?7vt>if zmK3+?W;r|nuwqK{v<cgvwDm3|g$}HhmaNRhG>nA<q(!U5IRH7S#TuAo=6kNl+`BSG zx1{0qQTlbuhJMXUvwJUNS|E{QJqipDX0`QKfNlHx*T@&cR)<#CON+A`SEmO!;}xEQ z$)Q%s*`M6qI3inAItyfaAZ5BK3gxe3-<e@O^{u0fm?La!S3lO&4!%&@a4vbdWirDy zEiAZhsrmQx$dvX%Yc4dr*s?kLV`jzzN=tDAg^!-uMjoKCEsMse{e4Siv&-{q`(*4O z`fU$(Dc!^D&hpjBVU5>NFx&WqXNU&nhw%+a!p|<lS2$!HIb!@L*wy<xtff0F4hyi5 zvNVA#JSq5b1lf{ddTZNbEW$q>ZT3$)O{!b;Um9<LghI&Pu{eJr(jPDR46mBE9M<XA zOxk~?UexI{jR7akzrHY}e!=&@F1x^(*cdxHIhYt&|8rz(XbHo_#Yn_R^v@9s2L~hb zznfY9qnVeNh(X%K#>~l_h>438*r-UvpzLmELd2jhXJ~0+<V3`v<ZS5luNDbw12bTU znuYN{qdC}!7{pC1%*>r&nAkaq7=&$q**YlM85jYFh?oHD+L?$s7`PKLNZ2@;IM~_# zGH@~>a&mA6_WrPNa+Ei55Vp0pv$Zj?aUueq{nO$Xa2O23&ws{=m>Ag_n-KBy!~BOd zZmSm=I^t*wXns>|Cact~R4L`^L4{P|Xb=xIs#-`v#wo$&Rpco}7;8ohqBuY0M6Ja| z3m=U{RHucLlo648B~8?^#7&9<zFAcXyd?5OKQ52kusB|h%Vu27v`>0v@I9?Nnu-7d zh#E-ONd&RFiS8XEzx8}?K`KqK$&GzWrv-pfh;RuW$mIs(%!<xb<{T%`AJB`YI#Ib& zVT-3al`_l!^K_C(P4Lf?YD;QQf-8_}YghnwS(GdM({-r`>;!&^rzZnDNual8ivXUU zClwr(;O_wZY%7q`qC0jvQ?yezP$k2z`CXw()@H6fqvAB<yz<kvOcfYF`;B|{VyjA# zSlECS_r72a-ecBUuS|2wujdVJI^M=MAc8N@a5W_`#z~+E_|}eDYe_UVcx}B^VZ3-n zk1g2zku+Gv7s~%@ZBkuz?k^Zi*X0?jX+PpnDlSjeMYFL>{pxvb*B(#aMJQHCKH1U6 z#Fy+Or>=}TnKjB}I$F!Hu7Zw@eldvt!|IVP8b3oKXUh>Bti%N7!a<AnbR2E0vQ8|G z4$sCoeQ6us(wzNz-tPee2d6|`m~S^q!>BQI0x-tG%?ihw)qp0(MQgeKIqOJ&_oK8` zpKwTCoy+e%R4GlB5`Gowt0VHwIP%UE6^1mF^P!^)6X?7YEn@)W@n2Wt@p|r_;MK=q zj6he7CkIZAb1R0=2_EX2Rm5Z?DoHXqs=k!~*jkaDXxoUPw<EQ$NPcW*S5Yu9zUG-d zo`!G-vr;eK85hl!3*E4q?eQ)$ov);cBl(M>hNRZkt0jSfb5V$Q<zx^7TE@8k_~GR~ zSVqUZZArmnGup_9Wr91v*pn2dSj-ml<IyA2i~lbdQ;@fsj5z54j({r{>z#k#1av!` zHE6``+(gtkuX(`nT7>mP7(-p?-z%0Aohe_E`E!?8>Y7e0m@wW)*Yd7>UjiuQjYMlt zSXZLS-;bWn&1Pl-=vMVBn#L=C?45JjrtveIM4$;!vLWDR<}^L8p9i~P-!nbdFaL;_ zVl6*snX+)duMYfNQXd*@&QRd~eTqFCql&FjSiKQw!=pHA*S<32Orx$E8uu=wtYMoL zZ_PD^`=bG$!9dps7RkAMIU6H{?~<zEXo8HmEW_5!E+*74ImT!X0&b(9njMKDX%qpY zY~@NlsY~3xWG#E3@=kU*HM4}(ZxxL<Ne4Be;n?p;H=$Gu?hjIYU9Q^eJ&ywsIBWiW z*F1kNbi?=Aow)nqQt$@y$=T_d8(ZI4wuUKaA^*7p2R;~19!{dL8e=GZTqJOzf4y$D zZg7_%rbz!g6#EC6{)K0ZjBNh~F<@Qzf3^M>5M%#efEY6y`@cc#e}=IC8L)uB^>4s} z`Okp$_T_5~27m^9f&6#!{Bx22lIK4`GSh!24fFp(8dm0i3&RdP{+|%}|3{pQUM3I# z0MM87AZHCDkOMpfKDMoJk^uj$eE(ebKk_ki{rlJA{D0+RX8dP)0xmiK7ykdt-TW`` zKcJ=Kh}(+nHB{ZBG^&Q)|GU?9)2qqRSuJ_Ju%e9j%=lF{hHRw{@93!5taTlt7WV(J z_Ris%HO;<oG_h^lwr$(C?TKyMwrx%9OzdQ0W8$35^FHtUo^S7c&b80JzWblGYW3aK zU9}ojzwWLTUSJ~CBvs@Jet$p7461!y_>uZirZdE6bc5t#SQ*&*;#v4vP501<mMfm? zGtbP;n;mW=9>@UGBG)={df`Y+S+XPc3NIb85Y}r`GsB8!%bVM?ynN&yOU5|-oR>SI zZ-CzA$r;7nL$_1=(_ub6p?*>$SLfJ_Pp_>&PuJk*I=6fy7&`H49FrIKp<%i3KqI`< z0kY___n6kQHVA+rhVfB)M`R(eDfu1pcrf85OQSPU{|gl1HPlD%g2ty2*IDH9+PJZT zZ^D4_0b*#LUT>dy+l~n}mFUaKTLUcND6w6lDbO%l1_Fo&?xNTG3#_Zvb|{#(wOO{V zKd%8eA3^9IKe*|~jc_OT$}ZXwHjDTIS=iNfcK!9kLA9?S-*KW_vrheVoC(WX3}~$^ ztY2rY_lz&l3m@-EYy^M}oBtTb6%#r)eSw+4H0wV7^q{GBD)nYUaf@*<jp&<Xm^!6a zg?Yj0L*J0K2(om2nst-UZ7SYkj{rX_=VS{LyM-9}^bq<lgnQ#WP~eu~gCTB=x1Ujc zyqQVkLBGdbtl~zWtOM6DvRXko($f+%Mcb^QvAOokCef!c**5V>gge-RfkP0La-eyc zZ3~2@$xT?<ZA7ne0q>YQpL<xq5q76^Y0|M_;+$u@Fb*b0@Z;DBkkoX~J29|9nD-51 zPvNo4rkO@%_DSbq46TiZ4?e)-8xX?Np=~vRQ=tp8$=GT&13{p@rFe%BVV2NkvDlEz zkPyc!LJ6LOipiR7BLvHYzFl1Isf41|>LS~<cr8`7ELFlS*K5gF5RsUMHxM@$Fvwvw zgqm<DEOvKIPh-fg%3$0Su!#4&pB=fkP$<n3s-u|!wwW^)2p>1Q7!-Sp0atbOt1v#c zOL#X-V?~r_Dj6$q;a5-r7&8?dnqp{0`4L3L>c9{h2K+3E09~r^5KQ_yV+es6c3WJd zo><1B8B{Z*zx%b+Qv;V^#ONy-C31t<Fe_hx5{v<Z-57q*p(#W7ll2}2^od8=nnf!~ z`?va3dBWH?(W{k3&JOV6*na!#cX`1d^~BZ3Qnijt-B=~M{sk;ZT=DvmDADsP^v~sj z-{0+>d|jSfQ;(K28#61<7BZ(AQ#0X8m)X!VC$QeXf<rj9$Dke`O|+^lqm|xTf)5<< z3yTz}1jp*G;mxRjM(z8;%>X}E&t03cbbFj<n_p%Z2u<v`^d)5QXv%xYhHws6#v0PY z^Px9i;r=-KaXDqqoV0CgUZ7wmWITYJo-0fDIa@pPvC;MX_JJ+8-sIuOBO0uQlO)Gt z+rV25^quA4M-GJsWBcSUxeqtbo85Pv<eipG!KI0!QmwFA1@75c6RHb#0j9SwTuy9i zGGl&h&4vPewgp{L#s1A~NH^uZ_jDsND2$9&|29U1-xEt$FH61-5A1Vbjs>>6Ltfyi zwh%Wuf{O}X?JR=InUWYJs{{v!)e;4lQlm5y1;))|P=W*ROu{&YOI|_jtwh3`v2p8+ z!kdy|p;ugj##Zt5x=AEB4tYX8cip}+U~3~n^FloE#0bls_|I(_^&2oT9cIrjafQs2 z4N=vmXR`Bk9e1?sC*&+jddZdYOmM+7CNvs)-0BTm!#~K)P%u1*_q|HKC0P>+ZHtDy zP}=Cl<zq6oD5}OQz)nLysNk3t8VWp&KHXrMh>DGvQVzV0WqFo$wKa}SSU)&Odv~g+ z5nIC`@d#@Ds=KOHP3MnYNb!&BbD$BQ>2=fl$v6<Zl<I>19Mh?eS;)apsW?j&2m8S< z%>$m7KZVsG`dDq{m$NOjv$&i*r+VRrQ8*Ii5mh}k+va<Pc`EL!B@kQR7(d-C_k-y4 z1CsProMb9yj>#lblmD~JvvKY+u0!?fz%%1!Km;CCa${>=ftX<Otgy4dsu<8AV{HH@ zNAC)Diz<I|<VS(WyDTTJ+a^b94091c@>)eh;^|%CxQ=;>m(jvm;AOs7dt+@(e}fyE zp5M?xtIr0w@$VO0eoe(i1^&^15`>{^Y=A`v0M#^kFo3vNeK&vuk5qrnq+U*zMOqe8 zY=%;ZP;NeQ#J+r7QW2ACb4BL>7iGn={RSmlNaz_ee9M>h8*@>`MV>kMWO*gVuB`(3 zABAmpIf@Oh<OeRfP%gv~Bxn_){nB3KA<JN5!aP+OmlH?bXQtXVi#L@ru7Nnd9Ap=j zN`s1nQ>b~26a8OdW?GioV^{mM?5rV7*jDEGZNt_QN)Vi_b`I|)S%$e7<dX&NLk`O} z+}|rO@Z(tGEm)s?w)unM51%Hu2TNUp5@ejG-53nr6Z?wRFtnRMgQ2yAoy_dl(1}x} zuaEECKtC~BP?H5HL&j#%S*O6$5hCQrA;p_n;tIuAw4_1t)(|2#Jf{~P&VG5u%cP?W za7w4DLmMnGMPG{<`3=M4S3)Q|t#T>FH)(Y7pdm@B!z3Al#3y1sy^AA`t+M9$yqyi? z<#jZiie#k0gK3`=Ct@S`MOx(SI-M0I66LC(+pZyG31KUqf+A9RvQ8FfN)xMq8Jx)N za-<KHUzNbEM3i_yYN98BK;I$HXtDuo?Nicea|wnaplfeW#U0w~9HX*|kQ0D-F}ios zgMUN!-kwTZ9UZBcLPjM)sx<#t?MMe&jDIzeC?8F)S2;mfWEJ8PB(bww19X%Y<%PSP zmIntCchaC0C7+96mL|%ggT|+2oyupo>I!n!NYv)rUjZtbvXZD44GdT+-OGOff)%DF zshI!{q@dWerN_Yc^Y3^WF4IcHQ9;LK<CwU~UQC9wJ|8o_QdM+xUKq9}UtZdd+Brlw zYEGi&_nZm2%W5=(3OtvFSTs|F6?6<&8E3JD1tah|F#>nI%C8Fca%O6vR`c<X4_qWo zVcmFN4}<f_H0+JM(E@QnyOUZUCg$)&-JEvi_EK#|Pl_fPPZX$pnGbBk$8(`$G@DS5 z+emu*oqY}y)yyQ40ln513L^4&o_w{aU&LIrb@^iA#lvZwlXZ7>ATl|(^L=@Y7>|!L z1BNv^MIshx?5HMjHDV*BNpQh@hX)ZZI_ZiyB`p8WgS5jWKTc?GYIz;E&|UEImE$K{ zBq``OO+(!q#fuDdtBBlg?c?g?#0VAQ`-a$h00xd{ler*w3ysp>boUtn*v9wCa8nh; znqwZno@Yc-2j=G;lW(FEsqu1(RzqhZX}>8Rx6Ke;d+aMI>!4yAVsQKx@EmF=;`|w% z$qQ?C>AL*cY0f~cSxs3uvp%erk@R(-FJENLmsS0~5dpmZ<IyJxb$BzuMksPm>RfVW z*56p}-8cbww|M-C&W2nG)#4--+JrgFUy^Uz@%mOI+*U<Vwt%M|u0dURe5&4ey+Z@t zN-;k<C@TZqIWXB%r_~nKxrpF5imRoex7}*v3c)jX<cy(7^DwZ`@%VB}c>QAZudkir zxg93M)Mxy=HJQil^;*@8mUDP@=c+byI&?+!Qs5P5!HL0suN0vVt!YWxYTwnT(a~Zy z%^OlLtTi(YotgB@`z)7rqS&Fgt4_SaeG2^LU8NEaC$P0o-kt{l`lZt;v+cT~j)x8+ zje72O!N`u~sw`VSNJl;a=a80G|FbXf4-@z=TbYgZfAa>|{x@s+AApSfU+9axf#V;@ zgn{XA%URLH$<D>m$i(Rjzv1|YF)d{G1-aNd|9SGq=2nEH`-;tBVIXMdj<5A67aIrT z*R~W!oj(@#zi^^IdH>Ynk1r+fXlJBk;*78Lbt^3T1qhkAf8juX!9Rk3Uxog@O5p4K ze;kcJ*@c`KApf{#ieClb(}}n{izzw(L4W?Fi8(RgGyDT*`jaU7Rn@<qi!#&F<1_vd z`j0oj@J0Mxby3Ezdxk&0(VqmSKg8cYktoX-!Sqjp<BMSXgKGVw)PJH^+}w~~6=!C~ zXZ$A*2^$the8ztkC1JzFfzL?)H~+7He$|5hA8h{QWB8)}66FsUmM@C&AA<c!WBN+_ zTjO8U7Z=8Vabfu?kLh0&Gs{=$fARc_%U3?ef9GTWO8YnURWtg(mGH%r^(*b)<*|KH zf2IA!<*S~I{}T7#FY+H^{_{otjqEZ0Q)T}xWbgm%6#rcpB^d)Jt1kinR@48)om5FU zlEaio9{v=zsaCDlX=!N|Qbzv0wchX>Q~?15jF2!Qk1!&P9Gu+iiezvpR3h>5;P)ZP z)s~m6V+8&+A$S)$(02tm{w{0&BxXWg7DB1~QAYAp7o>_V3>`orwE=n+nzYvH6*rp| zxQn(}oTpRSw<B6X8MYfMxcZjV`|jK~CqG3t@nGmVOwr?61<^b9Mw5Ug*0Ia9oU@F> z`?s7cA!=xRw9wfN1Zq3zAG!F{a`7NcNhs_tC=z-?Od{%!puV!w8L9H9t>#jSpn4;S z=k`Ij!Am@s9RaQNy<Hyo8;aGEvBHea?!Sc_O5-V;8{Qi?g*Lb1b<4l@ceX2MW5zbA z{oqU#wfm{7eFn{TBzhJ+W=g*vjaqs#V$#x4dy#_`y8P>YRmV4!Yv&epvnTEc-XkdC zhdwcU1n6MBJTYzzDPg@Zb57K-(v2`>jx?F#iSa3G@^q0M;Oe?GYvPzmL%TI=?$n`^ z-<uAuJ(x3``%mDVT3kHb&xNmF@614`nSc0EzgHdb`4}3x_4FZ>S|5!)G5|DyABH|f zde65l#4%^?_!tt!K3C!t5CndBcry5i=RgHL{Jn=zo*wwH^Lr3%+uo_eM$Rra*Vy4- ztXev1%d&yP4i*h(wbq=059hZp%61$aY?U+8l+`!#Mk{<h%4H9ZOV88(wp!(zidnYf zELdDW&el&+$|dnR4kF=FgvW<jhTkHxXf{k}Cyu*pDAgo&<KWpd+c+3PNCX*vA~ZE? zjE^&GuuC9E?LQZ)Z_%4tzz<)DQ9lkGb((QxGdW!-o&S_Twa!{)9x=zxUxr&a4Jqpd zCFgq?YxX&?>n@_>8OpF7D3uj-0mEIq^}&L@Z>&JMEBU4bA*9A!IFSj7LmNmag%zK- zL;@2W5}1RwjK&F<@d;+y#0~skjNgCttv?-(m4p4C!|6YUm;cxc8UGs6{+syvf4dj{ z-6{Vkz0hU;NO6E4`P<I#@1(-id0)M5MI`QiigZ<m4tYig924s9-H<dr(@EB<ra1#P zW9=I7#h_Y=OVbbV$9TA?EDKUc#_GaYx@~{Efew79-CF7phWd2!A#}g#GAq{V`G|vE zGQ+-GlpK1mTQ}Iw7SZqAw$JltI^FA2FuR_#mpvW;4kSm~gHUiZ#zo_@(0Bnurig0t zmi-|G^@I+Iae}EX=DP*zQG7HFS|@Tr@`y6>y@H@3Vharc)%YyxeD%vDW0e|J6`N<1 zZcVng-|N<_-#<S09lGp4Z%?r}`E?q4C2`t2IjWC8E?S33eAI-GjY!5sWFhdu_zI=t zpyepY^(omv%*%=6$pS>WGNRyF@STYHpok;VK!J+$Fp)vbA${^$7DExqXi*@ImgpjI z%jXTu872XY(9>W;{5|eru(<m50VYlm8Y>qHS{4e@mlD^fYa3T!9Qo#06&7q_+8-v! zpwGIG{)=|=S8MpwM3`8a|J82(nuz{myP^Mkj{E<2^!|IA`Jc3#;Q=vNKYVzAzR%AE zNxM@7dIoe8HwZFPq?+RyK%Mtv^L{|vu#fUg9cANmttRp*zS=Xk!HQSeE#rMn$oqi9 zzILkDEKzCJgSDRjLNR}d`$q?CZ2#8FpSk8g>E-`rz3Tr0j{b*f?eE$6f1;R+e*ZtX z=pg<>{`L-t${)n^KmKL_PT6)O|0}WoiE%SC{afrmRr^n3|GyyI|F>njzvceV3v_=^ z@L$%?Usl(D)CA<e*6aSyfbE5kOf8O@D^8!*(c&wy#?p7v@A+GanluW8BJr^-;1Ywt zW{CI;QBl%9uoUH5#9D6&qaAMZkc{;DE?oZWj{0syLKqOxY}gcq>KR4FeHhNCXIYKE z*Iv6|4L5+mHzFfLe_wWPH=kv7FY8)eY`@#Ot?Ia@Zkqi2lG5Y-yZ3YE^C|c9?DO^I zfo&UR+3q!W`_n1Mtj+gs{bsf2>ihE%?-Jj~<C$G=?)FOSwu~H?cJF)6#>e~HuU-hZ zr<4tY5AE&J-0ird^xExuGU?i{uR2xOJ-@H6=fNFZfqYNTv2QzE-~L#54A?!K>8+CU z_SqZBy-$6fsc^X7uIOcWYyZ4#+*38Pt$wv?)B4nVjHBGZt=)I>{MzMky8j^&p>_6! zh*d0nsBnAW`#d~9en^B+dF4FY;B~(%t;5Q(@ECl;zUH=kHg2xi8+|&lblpuCaam8k zZJ&(n5-jlS3ynI#x;5UUrfkya?lY<11|5`hejMQVaCpu<wb=2!WnO>|T0G@ok)V(M z=KicC4E2Hzc^-^`!;4jjNZ~-QKn9S7YG<ga;mzT_#fFY9%r6KM23APjy53!nRXxXt z)pK06JqDx<(MR{CTX)|Hk+0uHM|mShKG)M}E7{2hbAS{<)VaIsySjOwT|fD5!7t0# ziUpnf9YdB4uXp{4=SS|1cFlbBpwQ!%4<?@iN-P&t2!$ZG28_4M-hMd_+}<ua^t%`@ zW?+5{os8_;X%H`jaB3u8K3s_V+PodR_sj`M)4|LKxTde_Z8OO?ze6dI8omXkW{Zb3 z-O*b%dXYkiUgW)TU~Au7F1Sz6_4OfdPPT(cn_p#Vz<CaJJ942GSDR)xJPRm+*h42{ zClR}gjUPLw`zJ(rIzH;fWtqUqAbyddIkwn0Yuk?Own!nl=B$m{TJ=!qj?StV&PTLW z06N?~_Xo}1C-Cdz&i7m|h$+E(+L0l8FCx^Se!F-F(=DwKh|u@S#b>}8<lH=Lzelq2 z(*g-_+c>fesh8gLr>wrkhBBZFPWP!&zI0F+R^G!J{B{tt<I6~Se^$@BVaWpXYe$*c zbJkY9S)2DeDN)*LpkJA-{xPjJ6LL;UZE9#PBgbf~s1pT4q=-DBl(fDM2igHU6OIuH zLKidajT89QN}It^92ON{2wdL`GShf(=a8YJQ#<`89jR!PvRlHF+M5U6bmQ*o!N1}( zeS|9IfJlg`*&)FOp^OP}@73Klf;8w12GD1}vf2>hh@Pyz`HDk!VC1%Ue#NxNBLS8} zQEd4&2Ak0G(H+MhqHo`TxcdO!o((qGuP&1o#}u|naQ;nZoO3`F6)Dq02QyHB1LWJ- zu24*>Drkp*Rt`L-uEwmHR`^=}4h6{+>bMqG4y!W*sOlOPD|i0p3+v5u>R#J*x3BIz zk4El7dK=#_=!I4>tI(vmVU&`5t8YmW6#h+0)Ar+#D-m?;#+~t>8Li>=BQCoW=f@0t z;UpT9V-yfvP(Dv>8rFFLO>b>KECZ^Dvs1+RX;x9rMLOH;AFir<4=+;!^C3OlgO3;w z;G8Zr;;~+n2`+t>AkKGm-!S_9o3w|-V2VvTV<3n*ZZuX$C(_X^VnWV#lbD*QjiO$G z#2`2;0l-h%uA2)A-Rf-CNXH5IPQ-D;$o-4qkcWZZY%MyxQY4id-}S}CD2>6XqlvwI z#+e2MC_?qqwkbvY_Y4IoWp4FC5cz>t1z7yc#gW`F;KG!mqAC1|C0?QN`ES5UyjLml z`y$Ljg2`nE>98hL18d;flc@>S3s$Dt(@5xSgfTtb!a?Q}6Q)!c->uZs@miB1%+j{V zq?qub(Iw*^@jb~)LFN;(1Oy}N*k_>=L06LaLx9)jvQw*}ZOGwAI*4#f0`%pPX`s!{ zNNaZAUHll7!uuO|)A`cua~rZ_KC+mSut6ETjUpn|KLy=dV)<3yhR{wpwPq}Q)Iux` z$1(IbQN#V0DW{%`OF<g=aA3b4M)12<5wPWPK>I0(so^HSc~gVPN7Wf*j!Q2z@C=Z{ z!}rM?Re!pI!WbK@&zbb!Rt2VTm@<Ie9_fv^X#I)<U{pTIx%=7~=_6SQq38!M?!ODy zBPm8-19!(-A7RfN%Ht6cSx3CswX~9%7wOH-iN;RKFW0y0IfClc7iVDs|4AK6b`CB0 z6J4;Ne{OiP4xfZZgbPNa<ap4IE}p3^>~8j);d0v1D(9ow?ZQH!1){yqNjpq`4Q8<0 z$O5Ahf(GG`OC)ezI4WY0Dt6?X_bG2tkbrA8VVSvbwE!zJ$z7=Te8W^W!EW%eI>tw* zfz@!7W|->o8*eL=*!K?-V`l`m5+3lMj)Y*TkEBDKJN2q2IC5}tzdq8KibRutP8JN4 z+NIW^W!Qr@1G57rd;<f+w_xt7Y@WxIP7#s-4qI#p{EEn~GzXlC2&Wyd+SQ>Q|7qKs zp|9a1;u@q`b&W#DFruHf&w&I@shI{;NSL>9KLx_Bsb{wGYV90Uv`tE^Xxwy}FozFR zc;5nmA=^DI2$;>;d`6sr8a4u35hW<4fJ#B5Q=K(^doRuM;XKhF$K-5~DQ+O-B>~9l zd;lwJh(MHUppF}B#?Bd!CyqF=29M&ke$xH09&|1|l-NCsDgEwG{ya{XIf@*tx<18y zO_;J|fj<%@`q+tn^0lFOO+b9*P*Tdrq=vyje{7c*A|!(DwcZ9s!(^Dye=!iyjNRCt zIhq8>8i;kCeJy|w8%ezEgt`Sy1B>COn>-xqc)v*;GE7LaI{EQw)7~gq_&f_(6{mUD za7>y`nU#d<6DX>2`$K7<E+|OD_ZgH4XrE)`S>5C#v8g$x?7q2J%AX)dDw7ZZX}Zq+ zZ{VE6jf^Cr2#|G{mBQ8F;+}nvwwSUh8#cp{uv7)ZE!486lLGkjehR5hhYjz^yUZ+p zqYaZ|z%ctwL$~&ZoWWZ`gDgb`_y9ZslnWM`L2yS~AkM@-Q<4fl_e0uvu$@A}a1<*1 zNDu_C%ha#zpd~gv%m5Q1{i>$KD+@2^=j$$aa#X2#RrTj?V5xx2Y_c~TBlJv@A^8V5 z4$KFgTf>;*^!34X7X5Vhm!}R=PWVAU60S=y!^rg@rcq!r2mP4RJlsmYVY1fJ<dYnS zs#`-gyzGcriLnOA^mi5y(`3^~SU6h3TBMw6R*)mnzyOW)GYJYJLDX+1G0i>#etmbA zmgJ3AQtD%#kQjX;SnO87ukg?{Zl?!T%2&!iz!k@Xx-XQXi5P@)NA&yXbrV&5E;xKu zqP>atu+2U+ihQ{G6@Na%!h(G)L0l8bt}p;Q4TVYZA!Hz+AB84w8#Yc2EVwa71{75c z9fIus3s6oY4qzM?yVlXZU|$(Z5lm3EaDq_qPjyQY328EN5CPU^6G^GytBPb4W{fc9 z=Te72B`yR<xg+jp=cDe*SUUMNEujjC`K^NP@Rj}`Htfz6`D~5qStREG+2S8k=W~`c zaD#<Vl4&Tl?0HzS^_StG7ORCFA}tUDn#MAaJUoSY+q(Fz@Fj`B30>;yaZA$R^~~sm zJfg}Z#B|jlfR2!$6YZ8EcW|XF0v9~Rg95S2;`Frk84<hLlDK?2r>qIN!TLt1UxAB} zq@=#~@udFZKMhhb$DZj9r$8T(Os6O7P7>!&jBjEA_C@hT`jM}YysB*v71-foX`mIo zyqIfCVu1t<K#o8*p$09%8^7FE9p~$;R+pp*SOO{ANFiRh;3)s{=;#eP2}dPCy}Psf zF&Pt6OfrGN{z<GrP8DL!zQd2(b5|!w+Cmtk4-e9NH@A@qa55BvOZQXK97O_328+;y z^MjfR+B;deKh`VtXCi7w8q>MF<-tX91;kpAA_uWL+K}wetu&^Uyv#BUoYVGO5&ul` z250y~7bkl6o<{b8BQzfQ>0A!NpcM*#ww2(lbWnTRAGrRI1Ov7O$thoLz#eqmYnNs^ z8U@`Q!$Si-YjE?DI=?KU%80$Z`I0F-&LJ~Rvqr}o^aGwu!F--jZZ88N2DUp~3GCcQ z<Z!vpB$V+D4m`IXo@A5b;nkacnlt{*tzG~kdt9)ZO{+bYxFuoHQ7}gX(bQKUrOJ?F z4GZ|i(6dCqdz328G2}+c+H#1dJA?f8j`IO8z2GznjSj@#uf&Wd(<&M(s8z66U*r%s zGYU{>tIEB?4@x9*LNH!4Iy>ob;MVXVhr-6d&7|jsfoFG<B&Zd-zY^KA(Ma4SPWC)h zAAnsm5YxLlf1^GE!wU%u>CmNsN9V?+Trnu??kt8~NQ_^{fwV<oWB@buTg^=N8enM5 zh%#tO5V}S94x43_DvjmKA~{LXWz4TWB$bb8MM`VPM^n$#Fk&SHm_qS-+K}Z48PAC@ z>s}_JxFQ9STO?9|AnAE0XXg<NNJZiVA3hb!l(Q4))NRdZ5e-Cz-l@1;6S~ZVJoZPC zgc(#M1yV?;WiBX3e5#-qpK%iKhkDtA6tntUNHg+p5UUO#Fm$7Muw`AB$nxLFT4-h( zF<W@=`{7riNaf=do>y#}*5DPyG^9i?yXT4)L?WlCg;Jn8xYE1{F&~`qFKJN;T9qL~ zhblv`<gH~1F(=IJ5|F6y&OmlO1~51?iJx;u0M054G6EYgOGKB5P(b3=nP-y$i3&tv zu-UdiSkE)|f`Uty7z3V&4}1w2wTg^1`tv%w<)?SrP(sk`6OW<}G0eaz2b%RuJoM{^ z3Pq<67uopIT#z#N(V=0iG8r{-lp<e)RAT5rG$%aLd5pkPvu?2{D)!P;%$OURNZ9k| z$elzJ4JvNNR;d#ZQd{aooROd(Yejy*Gb1xFge?hw@*?fFe|;R%n1@31b+D4#%-mdu zC=rGQA{^qNP|#2CCQ|3n07U2%$}=X`ds%}`r)Boxm|4a))DT-W#g&nj*0_V<`ooTn zxYkI6Q-uz(hxrlvx9OS|+u$4|;jX~49>t!g*v>JGldT1_v*4TzJ^y~#9Y)){tB}g$ zZq+&aYShB+i{Wk+6$oETT)_#GY6!9*nF%ACrtd5iVFjcA3G?Rnd=N$UYrnMmfvx&V zcc@G{qP*GrWc+?dqZEV8y1Vh~y7`RF=AcGQ+9e1&=5K+*uuCH75wJwQYbhj;29ewR zA$gECsvs4F;EHBUK{jOq)cttpj?t7e#Yqh&CzckO3vFf1QMVm!KESNxV;3mP<KbT> zIc0^ZF@$t5E(7{OhBXqTOzfI-ZaLVT0)-NHQm{Nf@=9S5Ix?=w1k4^l4YVvBq+P^3 zR?3a@cveWv$qj-I_VVM{LOBlWp5`_(;ZGR?pjqB0PtfcF$n~Ykj6nfU{5z10J7}GW zlwDBifsuqqSwYAqI2A)#@oFFnFApJsQlMc~p(qDQ?#2+TNR5WZrjua(;*QQm)_4st zZI=Q~GLecmc<3l#3TZX{Z8@FW5RTTkEs!)peYHsO4}`T0#8CqqzB9=FNLpsa``{>y zLIr!Wp-5xTKGr1piZVv;1{j+taC&`xT5!%WQN^n56FPN@A<Ka%X|m>gX>Y7FT15Nl zcX()on4kC#kl{}e5bg_vPJNXSV-5|I`Y?|`9o24vGxHJ%+MN9m9V~oZGErPA8V~@5 z1gM2V_|4>yuc=UevM}OH*l4{>!^}LKuSU~#86wQM)cch2B#O$3ZX-U{24K6@_x{8y zrh^`6Fg6J<<q9f!Th&x;5TMs`*4N|Y?qvYQKU=8Xqd>)!2AFvEQugQ3m{;=QHOL`; z*<v_Ki6AmrOap9QxQ8TI7ARc+swl}Xp~Zj<7Um5+Ca{xf^Va>s;V-3%1<!X@GT(}- zP&o452<9roacRb%%V-RCrE$gxPt+fpR>ViG_Uc;c8>%bHAV(#qk%PdJ5ht@8o?<EA zdJKg05*ej|S75PYxbBk@PAzNZCIlyD1LOClUN$c>D~zV3%9#7M+&+~2-I5n`Itv-5 zeA-;x2r@+VmXeEwzQ_*!98+JpE$wH5@#{`Y=mn*q;k^<)_!%D<tbemP?P5lb+jm`z z061cyVFL?Cl#D+50bTqdAo9iX1WqA)(8OYT0!0Nx6Ku?W1fwxQ1dAFKwNT|OOSiDO z2A}VsrH&fq=HDa-+*Kco4<nwLj^&+-SE_ex8}L@<hQQ|M-iznoPJ;=GVyOa?xL2SZ z)s1&3#`WVbZ>WaCI>OM^b@Fh=>gzV?=L?zZNG{{gNCE`bs>$C;$-X_z5wxULLNs?M zXJD?NeCaV(i5|al>aYwU!$Lim=}nmhY+4@*hh(URdE#58L>4LAsC-(H?Q(=2{2Zsc zGi5)RwB&7llXf#yU14LunP#10J0n|rce=XFtgU0=J-<~n8Q$(OGBhT-EEHHisvOmz z`F>%nFF9j~6tfWdo$-@mqL64z@nKh(ypb)WL%_rt<v~qIIq$M`-W)Aifk3+SK4tLq zxI4w`1}(bx4wa>R?d)dy;S&MMT$ZAkWU2!oP#qyju|RM%;TgFBxd@oXsYca+#W^%E zL_850GXf-1!dmFGuM&+Wh7yw3AdXBbCPQD;E3#8*2xFPWasp`JvNuQB8i{~9fX|QQ zU3O3*BAAl8j53c|*{j{1@xo;MP~yfZR~meiVm4thDPhY6@f>VJi(!Bu+M{_R7Eu@* z9S1|*)P6QZSw}B~YLj6=kiKkn2Qk#=cY3>mtblxhJwq7MgVAW|pfBhGv_=gQ6cYg2 zWS%7JrW$uLx*~cm<TZ`S6*FVfJPliB*tjUeq!V-lt74iku@!v^t!5ONu{HHNMl~5h z*M=ow(bim9<cIPlyi}Pv%+pe3Q4#}sh-5DjY<&kv8)7#lZm~XESN*^(gRv(T5kl!T z*|Mz@T$$@&HZ${#8SQkjE=)=*5d{F_V<W370jskqN2fkn2&4p;e7P|O{OHmsRD(5Q zG;(j7q(KY)PC+fHsfUiafl+A3jmzFNg;3RA2GdlT;yVL19-C44jjSo<Y)B*>>&_t- ztV0IAQByuE>3p%`x+hJECUJ|6hm{damci;SkvznU<`HY#N+x+i!MFsp677DLXLg!0 zgBf^#9^pz%lQ=wVaev%PtUskZWldCO7F1{%Z6_gv{@k~+QMl?vrHCSv1;}Q#Np0t0 z*CyNiWdFM)${6BhAi9j7{@qq``(iu$Wz32ufuI9T{Y_sTBD7rL5G6HdB>oxg*owyo zgfoR)anEes-Y;MC908ap<1tE-6{0>7!jIKo96rBUF%`?1eu=f&^Gs>mL=<{)oL-U| zxY+V*i$we_#xyq|C9j5Y0VG!xdKt@^`w8s$oeHO6UZY{V?hUGt1MW@KG>P(Lo8udp zA7)gVVcs-VeQ&Ic0h+Rb88$vnqXYub<QM^K4fN1G9OmA9*YMJ0BDV)8PO&~e0~)Ie zmI~78_WM4c&*Q}Q(3#WitFzCz-FIBy0lN0kqmPHT&u5ffh4%-&xAhq)9stB!dxLy^ zfN4Ktbk6CxD8h`=ImR<=yA(-i6vW%DqGoPs8f&*6z%b#8Qs=$c+>fOz-R23F&)}rx z>I7f!%UJ5-0aO+)`E4x9)Xcld9bgQdL!7W161R9W{F7dTEn%^M{$)vm{cl3J(SUlJ z`t)n=)IxcqCMC;bfUfK}`GDfHVin#5CCd(6@>#Lv9l#e#2vg;Yt{x0}GgixLtmv0M z@tKZ>=@iF^iRNk7E&eovOBhfw?~BdakvFc3Y&pYNxfzmX$}SF~Dllgdg+DsG7_z-Y zTEi<p=%7qKSaM5OX@AY^6eZLfcD5k(f9<R)84a``T}03vKU~q45@<0_!7=>p-}79F z0j-DpOu;(A(V8{hj_6%OauWdBnOru&>QG|>94HmUlsT$qRa3^fQ%o{mB5N-mK`hiW zPZfA5Llt*G&AgmIck8?nn1zxSqc3le$y_%%xRVg23Oje=P;)rqSfX_6W2sH@YZ+Xr zL5<zaJ+umXze*)5+a^U5gAtm9LsxQOju9QalQ&**amjBt5lk_73JH`oCjGPvGpbUJ z4ZSE|sedWet)++YN%A<8(UAPjNfTp0olvYfC3%nZ)Lmu3e8u=_bP$r*J!2=baH9$B zu>v$u-obT|dU0BGZ^DCfX`G!hjX%`Z&cOjVUDuqj>`k4O!yzXE{g-Bf{&hu6CTZC@ z>=aek*I}-}G`CfZU*x<lG@qWY{QyK?5k(ck>m6>|g8Glcfhow6vVocBT$h?;sub-4 z7%UI4v0XxbRboLeU-|$NbM7I=k)qNZ<ItF4ze+km#6TS3Hhv^N*1>Z71Ll?7hlkI| z7tPNPicbmKSry$N7jAbYPz_>3V-aN?mvtEu1lA(e;;^gS9iNa35!T)%gu;`3D+7U8 z!`sFyzjg4;PH8@6T6|J!KSdG4UjmF|Xw@E~U4atg4gVbMfa$LTbr*=2VbQHqSK4>X zZ6CKP7W2i0L<i_8=$AhxLMS)(s<#<f)?~~7xf1I=Y9WoUV{3}R7Gwa6brTHRRseai zDXYBAw-HjZ{rT|D_gVY-o~d@``EuoJVCTK2Hx-TcKGTgwH;$DntH$-Qf#uutG#H$v zh3tgFn1!sZ{fq0KW6-?49Rj>)+5J}0OstuHti3o4c!ED}$(@lPJ|6}h+EXnO^10uA z{7B?`O^C;=pik5t8lxn$OGYgAl#tr={q)#IA70khD;wU&4;nipS#&NXYJ>$=&U2!v z0o!FDrxYAodUL~x(GT{*@rIipj=DJ$=^qT&{;`YYNr2mF{*&D4qHHz!sAuW%T9R?# z+fpXX#x$f|l|xr;f**6aZ>x&ZWNH+^y!2|N1l?$+%E9_^PK5M`jK<dA(T`+;ECm;) z-8~e^ni?I}v}uAZW`5&Xuq9o!uyqiT@F*ddj)N}i3Y@v5??sA8eSgJ!A3ItnYppw0 zq3jQl@xbR8Y&@=_^~9LG!%&@zNFNv$T?!$N8^i6|(+iV-Nqj3(ps&$f^I?`wE_9zD zpTReGH;+nRM>|06tG>R)3OxsUtMuwskC@q+!BA0U=v;mlm%$!|AY9;pOWzGqy!y_* z{_V$B&_ryXQYgV*6*lfz%#^5c;SKhIlw*y*Wso`n*rZB;F>Q(}OF=`S?o;CHpj+<S zZs2FvyA9zFbbpN0CzbP1RM-BdlzD5_M;??jJ|by>;AEb#hC{T=;QXNR4!5;g?0Qdg zN7?FuZ!1}lvi&Qnw?RBJuIhrBDYeU&a(WM&!JbHqGf1G)$3rJC#Iju^1M4BLp=@=P zgjyl<Th^HBzeM_>prB+?XWM9H_is313r?G^A)&}q9a?_C1UFDCKXA4p>m#)})_^aa zbiZ54O*!VsY6@<(NwQr#7b!e2zu<>&3XCn73jpp6?k<s~1j<(MfInCQ+@S_j&y@vQ z7xB+nr91(&3MMACjCmK+<?A-I9Uj0!v^uVPe}8U%tsH)WPNxGrux{SlOr7M6(6VAM zPayMxIWtlvJBCL;(wsg>5k+yLU>(pWD}xaRXrwT~)vMOff+~`_lsT&Vo$Y7qZCVx@ z=$+QHak$L4()mnSupkw$r9Hy8aU&g`Z9atTtTU*s$uJw|UOjryj~BEhtgH<2QY_rq z92ME2$QBa_ox>&C(e+bH6O=;a>x;S)AR(}r{cli<+uYtkQnkIkZu>|=+{Y;v%7@3Z zD8G+Oyu_eG0%Qv;^j$gH06hHhr#>Mm&sVuJDy;E?%7m!(G!2kpNwiOhn4TP~(SJCk zegPld@=%_;F8j<vSac=s?hMs~51oNH%Rwk7>NI6#w;7AFelFH~G)pq%pjT-j2(Pey zJBBO{lVB+nsD%M&fJ{;PytRA!lWrI%-n3w36lvar{F=?iWx^0uV-iVgck^(3<aXYY z#o3jO7NaMIHMf$P>JCpuhM|4_O?5K%XryI}083HqfxCfoRKtiV;|$njKRl7&Si!%- ziR2c&XW5mbi$a)i3X)TLQe;y;JBp3m%V(8t<D7ftsvEfU@K5uHhKmOU*&szDZ}Bd4 zr0C*Efvl6@>%_xzfd<|qE%=O5*&t`Ij%`}eK$xTfrG=>MO;!g*Lx}UH6P7Ey@-jBf z|2T0Az5OM{i${YwN=;t}-O3SDoH#ZR9jI$yt(<-rC_r2Q1T41>Vxb&f)vPcnB0>^5 zg(Fa^mT6uQm^B$Hz<$n>tGHS!9|uCKoHbaujT55k(wSd_;6S}kNCawbAF-w9%4|?Y z$~kcnP~0C=R^rc#(_Q!jX?LVFRqkqTbkVspK^QEU`%&GhD$V%|SnsbCrQicBs7mgZ z&VD`%!$2TP)4{b2l~XZVKR#<%WZm@_R3&&JQ0U|}bH^+VP$lzRpy&Ju|4E<72htd0 zz;m7|xkVNKlV4I!>;#-yorP*@ffVXnSy<fKYn-X-(Hb~8;w>-Y`RWAFI~kYk_cdNF zOo`F3<zidXeF*82Jc~^IOsSuE(5(|~AUL1LWTfZGsd`>|9egU|+R&=w3f@(UXL%zM zk9H+`dFv)p7;X5F6|xma{4Yuu*9KKMC7KP^&H2EQNhvBRrTzz~Pm2P=iCJ289-HK# zP>hLLws_)t`(WF>k3m+H1|$xc6w~2|aGw3Lg`D<ai=wKWx<3V)S?PNAx;8kO>=b?_ zzB964!fpsPs8Hvlj$%|GVQW$7Gs6ulJ03zFq!j2_;mAz|v^ijetN=6)HRgT)WjT%Q zXFFw@sKP!en69UE=z(b%!YHeiw^#)b>&^hZb~ENvVV8VV{NqhCb!5{&>SD`pc_Tu} z_==V}Jmf(aLCsElO-Q!2yp~hOuBZ<aIy#|{oHCh_q4mhL^W(z};EcR8Csz9QNEwW! zH<R(SZ0w#E3oJ05y>$4noUQ}o8@wA`_lLjF6%uwvdb(d%-}vnN+XP-##ji@)>0!{Z zb$ICI9ApsZ@oUp$Mc6g|ld|ls((%jg-@FvHe7#R2v7su4UAnyKzBdaq6J)$^6MDQd zpQl%-mk?@lpV)YpcWz3bD0;SSs-^cBzw~IWYKQgiw~Ci9*BRu}N)1}90s&{oB-pe> z@H_S1F5q<UIL!DEPz0@A_N-?^f3oW#Vqjw4A2nePg+6wlE~NEP@h-c|^v)mulmhDu zf8&;^*Sle9-vPcfAXmwvZy>zP4o1({y52V*Rpu((5W`U(3i)+U=k|M!wKbdo{nEsQ zZsoVZ>(#wk`Y4S)*uk_xF;K=2X5@Fe(%&&#p7aBB5t}a@HR1GrqS-8R8;-1?vAh|f zwPCSTY;G4-yykwZ&HlMvbBhcd4?JkQHE8x>J~v0-wYptvr^m3-4~{JvFe-Y5(nvZ+ zrm~6Mn|pwSGxfgB;4Pg0em(U{lXAZcZhtQ5dUm|}HF5hZ@<fhngHe0xZF?8iq%Dx7 zHRyAiZS9%!b0Wd4Zdc`nuob-4aDXm_d+>gx`)2z7a8FBRn;!%Ft;#;~@KP7&AkpD` zPPH{$?qlJ5=lvjDkWEZ2{LhJ1mePQ|c`Uw^Zt?zz<#%GMVyN@sJ_I$2ai}z_I7yM{ z!ZOA0`LVxP5VI3&+UE_h;+sYoz%rG-+h5cZ{5({Z<_`;Wn9!&6^-q(EB?4_hho13$ z&fI6?Q@ru87*>MNyag})&hC@deKun`+yrJ|#&HBr#a=|{UNtICrEPLe`0*XLSt2;6 z%vnl+J)f**)*qAIVTA7V0dD*^FT*i+SNz@i@r(X5@kX^OlH6wisE{pdhb=6}S2$oF zW_x;0r=K{z5AxSA+_Pv_CNWW|ccVf2NIsv8PR+&$U>vd7=Vz2fOef>DlBKg|Q#SLM z>(HZ$4tFN~i3Z~SRg7*}BSf}nX6`8g{bqHHG8ALK0}34m&iVDaZo}eS<46SavBnH0 zj>FS9Lx%FX#FFvZPsgQ2`^^gqd`sg6SzJ^KE1=CKcys8C=;jZy-=D$&O;;KWWqC8X zzLxhbA4uODkj*o|X0V}srd;+Hcn=d<jC7sz%%C#*i73R=Gwb)Hs)`ywe#=MEDkxV% z&|;Rt{sM?b2fr){oxG!4iw;Vq!LpiHVDd&6Y6JjlfD4JV<K59VAtRM>_s`j-Iea); z*hnF0dLqW0YLIhfB|vm!99sc+Xds;;H%y}qcgxLBxxoxqppDgas)4VXyI#WSepaAv zGk!-0!%n;bQ?HyaZ(G;J09|Ce0kd!FMyDluAleTLZN-&V8okgcWtSBiS$c9ZGxwDF z8R0z_L0@y`{kEtm3p<Z)K`?(n>gr+AP?2>k+3;f@&j)0p2U`Jb2eh1JK5uYXHKg9e zyLbFV-W;0Qu}}_-5LFV)?h~0aAvvkQ__<Yg*FUT}t0QAMq95o!N;{Z@AgPbv-QF5K zMHFqhtvSu=A)cP)P@)mSfR<6Ap2;|<1H~zF{Gzq)b-n>T(Eq!7#Y?<xjpUE~-lJhQ zU+j?1>eyOt5ER5ZL*q^j1Y4^`lU*!(U*WL`x#CgJYNO&`xw`wak`%VRT-@}=2?|-d zW*p+M`*RY{bp+kv!Pthc^N#q$z3<{U+G**6v=F&OR%gw>f7kHk^6qSP&WQU_&<IkV zBIgUDX_az<UfZ_urScaf%))CS*_Js>0PKOt8^p)*@CfCyYtRS54GJHSyAFn8r4S2! z!QWjoElwffuA?w=Z_i0I5M-h%`ov}DI^R_(qKK5OR5LI;1aZ1^g6N%_6iSN8`_Rbv z#1%<Ud=?GSbFaU{7iPU~=EnvBDjtstTW`IZFZ3Nk8G2?&Y(k#Ga<&S#bTdNNdfg3w z*<bo4dFpu(PxbwGCa9v_cMjSMkG0?D>Cl?5gHnS?TvSp+2$sUXLLs?J_DoySprCDJ zr3$(bLcympD9SGHv*D@RZjhDK*iq+}XomTF2D8@t06I=I$v}^MTycXwhYuaOWH<!k zgenMfkxdT1*n8?xJv&4n=IrXG7e;4CW~nP~^&hbe(-sbcnzFDSvS0}<tHp=RrWEjY z6Gu-=zcvIG5wi8!Rz_Et8PP+V-7JwM+b;iNB19$|Cc4{{y~63aV}}Xv`RM8uq1-3d zt=<C#L9*vg1xTQ=-&9eeQ2t(s-{g=mECAs}4-{C^@WY6h7*}Kkvt$Xt9f8W`fP{d{ zMt{W6FQ=m#bg)TA`f7JC3{52iOs85x!F?^4JYWH5Jbi@8JL3Ip=EOI@84733FzgKw zfXZdfaq^{tn=3o}0Xp5;F<A&58yt5x#Xx3fN{kb3GYsK~RYpNh*nFatU}Ck?^cBK5 zYKG%*0p`+s1a)w%nAdm_?wGO_?I)PQQdyQ~!=3uRZ;}j$xO4@omZK}{T`!8JApc4G z_Qw+|ZNjDi>|)7ON#EwFbP7^3yQYcMT<H{JF$syG4x~j9sd$o&SgH_0r3UL0?-be8 zRO)-~f^mm90|}o8XN5YWWP44*Mu7li`gaA19BwKX6&GR#)7`<1QmvRRZ}}V-V@5|Y zGN2*c!?s(uZy1v7BysTqW=KEmbmC-94xVK8El|Nj&=ViK<rqU()a-M{T@hQZ<#l=9 zx!YcDZen@&_7N!j`zsyAvuO#v!;^{qr<W|{rDQ#|AjL4@p*fcfUvOWz4RX@AYW(9c z5=OxJT&Zj_XfiW+>Zn$utOrxWZ0x?q=e;WoLeIwry{%&ZI&{HMn#|`k)`)rgQL<WI z0)9!eP_^f<1|<-{F(^D)M^)B&TDknfx>1XRdmfe7@Nxt=j+S5;jQy!pe&^I>6brV| zlj4JVoqYJ+5^<^LgA1C$xukwAY0HwhH#2U42MZ<266D_J39{egesVEpL-2^HzE1|< zan_9;@&SO!C9362^P_?X;1TKEWpJGUb{$|GDJf^mL|$K?szXV4#Hj*s=2-mgFsPl# z*?rJRt5eIfVH6fOH9*Y3cUIto(9Bp1g@Khrkvt7ORp_x?jKS=_V-t)&NxTa0J&Ezf zC(LBgv6*Qpbn5HlG3<+pa+~K_omk4iisG1-<&NBB^7Vf(mbRm|%7YQq1&<n??5vZI zZUOo&wIGf9n$&d#)dxBsE=bBt0yq~)BQo00p8XsL!S4B*nPY0Wnaf{rscsrm!IpEc zx?ij4I7JO@R9#e{wEQ5aREET>M3b-Cv;CIEb65+_;G5DffGj)n!C}NSWoy8HED<C6 zf|Vg_sjTgGST0ZOKx5<T!Bx5e15wHN{3M4(M>*A<d$f<p@)bCS>+ZfW<c?)kj>XSU z+t808tnS8HT8?w%jlki=Y7?#J;unFe*K!uvPeI>OikWG*T+Yh%doO=trPibMdp7le z9x`LH=6-pBYl&q-?%KlGdYnT0VfThE7AT<$pCQW+tchR-*s@?J5vqnO!^q>C$CV@F zQj8BX9ty5X5M48UIw_RGh@!n<yw*(VKZY$9*H;!Rx$xrvpK|aFTz+<?S`DeH$#~^y z3M~~>u+Xq71M5=(!c40F9Ui;L;l174^J*_m8n@?tH0OckVWOvoA`}4qR@#a6llg9D z7maI!U0bH3CkwX|@;7#l|K`!5Gu}%lj&Be3uiU$f1*55JYaQb1xep@;sh@sWbsGkS zC5opj8i33g%8JhS_VVw`7c{>~p>r!V65QFdI4CRlGE;-Vb{o!ynT^#LG?-zqf=gg0 zRXDWiI0ox(z>ZJl7^O|@!wa1CU4r=VMcWnxjwo`cmGk#BDo;+T3Z4QTiA)fPcIsAJ z!bB^-E4ATvKk&<DEZ|tQl?U^?AIJ}6$q%S-7A{fA6o7h=mUF_J)?xH&(3wl|drOiY zsjg2L=OQ=;YD;Mf%g#SmtcR}hUQE&RP=LJg+h^;SQCj&6#u-QFKt5!zurLaNV4mKS zx0H%n*#z?O4QFGTiU8@l2F#99F7VDJWBA7XW?ZG-WPcfQ42F|f6~A_xuqbzzO(;5) zN=gY9mQ-rs8I(7K4lS`V_l<qxw0g?AS?34^Ria?_vOQTP#~!}(q5?O}lHGt7A=SG! z0)a=`<aKPYiXavil#;j>)y1O$<eXv`$|O3S;S)<F$(`$Uzw9lVw5w=3Wh7IbH)fG7 z-16X16h=8w7W9Pp6~zB5%BRLbaUK(r=fc=b08w?VW>k4RgdR)AQylDD&(e5`4VaJw z5LT8kIPTKanb2NdKuP%3xjEFiRhIe@g7i<)4{=YFQhs-X+h5G<*h#$M6pjV{EcZ&d zjn)y&;K$2?`NX=pu5-!Xv_1Py`v-jIzQW*u3pA{H+7T)vg)nq*Y3#zb-E`@RKTTWp ze9t-4Zo;67u82rL>g*zPQMNYFkAl5LKRZWe%5&dbhuazs4uRm8LBNW{SW~3Gnip32 zCWox1;b(}1ZP*&Y|M4zK5zmQ)ZDn?<StoR|pRf9hdw?8ljXKVe18fs4gXb67L?1no z-bsUJO1z~YhpP6XV*UDL)>hMbtJ0nM))b3hWg!iO__qmERlgW(m_RAq!tqut&G4iR zK-2Yzic+KH1j{!3=kT|SCykePLx9Tq40}V8nUsPAGp$@MC4O6mHDSuy_7+dx8-YxG z(KN?YxyuU}b#pLtfZ$q~1Q^yh^_qZ_2z!$g;s%%|a-!Q-!sGLjm9|EotbS4g`a=6V zTvfG%3v1==LOfCsAzg-2D9UdiaZ=RTdHr4h<(5-k*sE4B?kkVYgGd%H#^SvY{DBSG zEK2b`ZHW2}DtUH8-5&eu@fWH6o#&T7okC(*#j4aXtS7d#Ff^~l?0Bj`K83U4$I~F9 zmEO05$~Y$MYLhkUd*+23Sk^wgSXVNH2;aX>o->tyq4;}mA-i*iFF#@($#6y@?mCol z>~w`~Z5#tM`Pu6CMi}d?hIn<XEf649UiPr?RA1o1a@}m>3k@2OAkL7|rU=4mqb|vl zpPH}XMVIpyUPG62injWj<Z~&e%oC?Gh|5yJ*b;QzML0&(Clcn9P2pj>E0z?QDoQK^ zOsSo6(GXBXpV*wWx}FK+478Ke@!`zgIW$aM_7;=#6}voqOlxXtUE=fV*23;ogoxFf zGKPXTjoqUV^?>w#1gH>(aiV6B?Mj~1yF%f|RpzbLt-?}2&QZfWWzvoR&T&d<C>+ zwsS>!xoeM4YVP?~Mr7K>ng%UaI#=ZInf>E1Z4k`^S0t~{84`%T7{*^o0ArfU4?_YS zq-4W%7*JH*nA%JVHS(BrC#o*We^65`<-wkfp`098-opXEu6>(nTO&%W3fau7OfkD8 zO|=9-*$~7mc<YJ)NbX7oqi`4{uQ5Z#sMj=Lj%}23!jA;C{7F-b#w@RFK+;MR1!M%f z=SZWdm%mWbp@hcW6B^Gy-hK@x?sv?#XI}b=|F*~oM83oP|6=bggX4^nG+i-UU@<dW z%xsax3?-H<W@ct)W@ct)uvjHVi&?UmdDVTlXQq35Z_Mt@#>TxdQ55y7qN*~_dGqA= zJ}D{w^^)1)dnXonqJst&TmMq_1mS(;<m1stWB!w==!0N72abaOOel?}wg{{vPQ?@d zkZ=RUR4<+#s++-(ZPIO2RUq&z(QeYi^uuW5u@R5V=>eL>DpUV>2u*ncftD#jBRv3i zDqywhe${jr+v=d0Tf1*PAv}#@N#Qws=0s#PwcgDsX_9yP+9_4?TE;y@b4f!ZxoOAT z7&1*)D&M%SZ6f!+hX0%+`a}d<Azdt^izbB2heSH6Qv3J}HA^F8J#UZ#Y)4$xk$_Sd z{qr)&N;2C*loMrV)CcwpW@F#u<h8YuF7>EI_&E8m5@jeJA=^|CMxjY2+C4-_G#5&Y zIs<Nt@sy-AkVe-|bfRxyWyMdUgUX{{T#%XM_)^g5QKxmsRn7OgRdjR8NE6@Eg|Ic( zDY9R$Icj013-IC=gz(<L#<!#I-41nhI*di4XLMeUCS&Nk?%#`+HVHj)Ia!sMz!u14 zEhlYaN0iOv?+sNqhYJgd;)`sQU;t0>R$zfLMAcZUI`iTU`rW<UEkkHv!=Yr*Ydgnn zASyb6rY5ciS=MD+?B5Ax6+#n(ufA}o)`VPmdV20}+j^bmH75~`VnC6FZLd>R`Je4x zGyd8M<K;6u;aQ_^x?<hLGOVD`ST$D_D3TNF;dquw_7wM9w&F=x!GYtS%Kd#kc8T=y zxCZpTyZd}B0E=^a-k%$EA9{T5)IY{A9ol7GQfqc|bcZ+Rd|q@%dUp5*-iA+Y3GbEX z?lwOr1ztlK-mN>9dY(HT34X~v+}$4kdU#zh>478MTRQ&Y!`{7ijGL<~^!RrA^l-J} zeQ)uc<%!PE?AGrZ^2qzg;+3W|=NKnD*+=1HpJVg=e>9Q&y`|*;?VV29_y;5l7i+;k zv-bQ8?umtw?O#0J|BsjWAKg97|10jEf3HdK)2sYXojm_a-upWz&)<Qp{>vozf72xR zx5)WBli*(*YX2BFO#ht+@SkcD{B*AWZ^w;Q{GsY+Peo+&gYPS<MLJ1n?(IfmIT~ak zOehil!B*x=+_#3Wi|<42T2m92p4<as{HPiACY-~V0r<Lf_xNL=ZN0>HJ0oGcQu~8J zHiJ<Y)~~JoK#Nxrf=qY2gP_R}2Z2A{oM5dm{n-6RSk;WbQM5$u@B=6jZNR$8z^EaV zV2V(ti0lK7=yue}?8Oh_Xo$2$iUZep3@_qnrMgLi%F!(BABY}e6sx6*=of<L)2}bd zV(7|Rie}=B0om=d?#a$_++N3sdu~Ksn8!#)-*xtl$Ohs%qm6EhBPp+c6f>t$&$Uf_ zJAgN;Q%*K4s$3jftyN%$g5f9m@NQl^^XcAC@bPY9Qe$9c(5-R#Xw^7FWPF%NabL5{ zbGhCsb9yo?r^X6l!~{niPET}8OTSVF`-!{W?dZOWq^S>>jAmU`v@r{@jQ6TnSsag7 zcPgV8Do3LkNpXUivQwJXZbjsf+(Q8pCoi0}>QM#gX_(wmZcFvzhENtqbQIW%Dua)G zC&v?92B(2lA$B~NFL;+HI}tfThb%CrOtukA_6SK1X}s!XcKB#1{k6syU}e&DopN~* zp-{>2Pu8%%#@Anhj+K@DU;K%GB_8}^Br^Y>u&c8E2kfdc@pp3NKe2*8sK7fqI!sRg zs5cl-q%#@(QWnCYh72+oW<o|m5kUhNM9YXp69ry%9yuR0oTk`#^c=RGpEcL{t*#24 z{mH$*mxqBk*aC5|_vhjn(AM6zwzfq=3M`@QU-S4qZ1Nk3gpKTr04D?qT_7Ypv%P&Z zA};O7%Fd2~OBf?1JxEE3s;ERcJ4eaP61K8Z_*=`~o4=?wRzr(dU4vUiN4BD}5CD){ zTAZ3+V!^>po1AQ@s&Y6xyUEIeUtf>t>KcE0qYVhC7#Of9C^(frf&!}x6RBrRwZe;* zA+i-O)P@V<VH+x#doVG6lrGk@mTj%IZEYmeQ0LOoM76QZw6w~yw#m8pBkt_NaDQL; z^1{ro>uqc6<m&3>Scn{9BP(kSoH{3qe~E~IPWy9peSLf)>E>0XNHyQmlHS2H`Sx}* z5O9AiQKEdJOw*-dHEkLR1pcU6ESyWbea7_d(__ccm7$<zV6ZV}>Ail2bM^7?Xi)<p z^nGxlB_#CeeoF80&AEAcdb+te^>cdvxa-=p$aCV_I*o<Yf+zv)1s%bPGS-9q?bwc) z3m53e9t$W#kSrn?bn~3|*CN6Xb#o|CrBWtp)v8V$%`Ti33)|a%eSN>ZmHpPu$KQ<- zRvpw&>-T-?Q1{xEnI15<tyQ~n_-%3J;NbZAdGzGu^9#-F<ePfyaPoNc>cJG3Jh9@$ zNUoD6w26>F$87fLuTY^`czVJkZsOr;5@J8`u{N?Z*3K@q{GMx=UuasMsheJ8Uz%lJ zo4>8d^HT>5mzJfKmFEMLr^m*MB_$~$!b>3`6FYhBWf8e(BNF2TrKK{&C5Xw$w^6@d z+S#?HVp%GqQILS4Vgw_?3I-TML6N+@#dUWt{&IIC{_eNFj*>e;Pj72tl3GyEw2d6v zbeVh4&OYr!%|xpIPqOi^Irf*t<6!*PZ2T*6<R7!~Z-A8l+<oCc;F10>J@J20Py96+ z{t_bpR!{tuP4ka&^WPyo|2`P>zgLHT5_JBz^~8T6WBv;n^ZyrQ%->KCe}|0uXR7{h zl&}BP)tl|Vs=@znB4hp<4gbUH{m<ft`Ts;U$o3zo24!O1EXRLh4Zl&wg@z&v!R)~7 zgpx)R{pAMZPXr@~9QgVAi2?8IsW^169A0!jU2QsTul~GSEIDa+=vZ`AAG+ssbe#(E zKD}O10AKilhcB{VdT=0fAgQ3e-yqTgh;EP#LBr{VX2_}95e8KCmTB;G19neg^uYx! zN#dGg#n(pm@C~o&;f#=y+G8a7z8B!<&&4Dr;Ttf@GGdjaPOyAi<YIpja(g02JcLW} zh#KzbJKQWUZB<ftlOu8^LgW7$maPmBK#kCypYMzp-w-dlFsw(ie=2EjXMh{xD7qFC z9$-j<D_t;ELd~0zZ`9C`f)_3^IKInlX3y@EH)Dm9DK3wW#%WMDuN!G^M^mksHC9?V zGY=8obu`ze&swW`a7ECu=>RUxtgRhCXg)tz`h9m{Jc0?z+^*&MdK~tn>+P*uZGtf7 zVQAM+gd|luSgA~$23@A)g(Xdm9T#Yf+NQ;vBDH^W?8&c0Wf(qWS(i$SaRUw>+rOoU z!pzDluW##$#jLFFYG~+xg@YgTs%v0i@G*tD9u|Qr7fe({ybJz;7!m<TzO{i@Xb6@x z8w55V@Es^J%9T9&{X?Al%Wh!2L@Io70F#FS6V?i9RC|E}fNTdQ<SZ|i4(ksb#MVdZ zn|A`x<ko$SH+wNV8>6LTW#8PM8~5x!`25^GL&mykQR7?ulx*F~H|8{mvop_vI19$w zJ9lFXTJ4eo^QOWBW32+g`IF74C;^r73(5VZ=Vu_?kXYMmm-Kye^L>-@vm%KbJ%&J4 zwYQeaojh4P1KI{Uj5R>{_ufs-jVqw-_c-3v(Dn6=<-I!l3u}Z^{3!2~9v&Snk8}xP z;oQTVsYluTI@*{loDol2XOi@WG>H{i=p95I#xP*_Q4&Ej`ll3=B~v)P$jlQAGp3+1 z>4-E)F*qZ-?<mMC=+BJr<o`^L|0Ui2l9(J!|Atxgmy-1#WfkN98UyT;;{LZ88=p+U ze@k@#pTc|ok=FCCXMF$j=^B5!AeFUN+M7K-u43$_S|2jK+IjqWAY5RqVb9Oc5rZ&3 zpU?Z~TzvyU`T}?cAww-Fv(8^sRD=@bRi?XMZqGLYxDHB{%Z;lvnig3$pS(BI9j?Y1 zDEA996BFzeWyd7AsQ>OwPp9{dx6$eQ(XBnODeayXm9A@dUq+Zq1)zV;^EKQ(Udls> z_L;?^dVgML+y;x<3ftx=>kL=3qs`gp?tbcQ7)U7K^|rUa)^Z`>{d{{f+Q&=nvr=E{ zPTQ0_^qcO<znU-s%wK3WPI@*@WGqZ%HAosmu<S^*<Yc@YeZ&UMu8&z~kzaI$TzIKu z`djNPfB6($9XU}0ZN4p0-HyBSo2${1mer=>deYGNfJMOE+Kpz+Lw3*m@mz_n_Y=0Z z+?d2uO43WrefZegH@y5u!!z-xJ6NvyFwS|{Rwvmj?5wX%#qmz))3EY&Q5=E?#Q6+C z`;NuRi2EEe%1*!q9d-XzN2{j0XYTprQ;SPHC0wXvrlxA2W5rE<C!mnZ123yJp$=|# zPlLP3#Y%+>rPzDn>!^|UovNc`Q9VI?x^7IULUQUk2~)v0&=!h<!m7aLn0Bj315-24 ztEB637P^7cDpK4iVG9*L_#i8ml`p%aDoeX#)kBF0gUN`$l68U(FC~ARMg1-w&A(3z zuYPKO-!IN<YTmxPkNe^pxXRlIr43RWLN7np0Rwy~ZpO4Z<MqCOo~zmM$^W3-k70<( z=@g%^NM^gFL}e$|SD`SDB{x(lMrTc2ysf>OlB7NcPqOar)|qKYRb$)9S~bQ{EB=#8 z^OIn}26?Iob)p!1q>`Yw5@n<mqqp>AIQL|@@BmmzFj9^^m`~7KhCfk(OJj&dY=&KO zkpA<JZg{8--1vo51vcGjY89cD-!{l&h}a{>R!^}@Ubjz~hoyn_&CEE*g>0$-wU4Fx ziuV?>+IPxR`(Qv!tkMla3}QlIRJN6)<&+UfjgccuiQ6h=m+%^k+{CwFZQ9Gsa)zpU zh5`+u6xqOBh5T00;zVsfe~TcJe8q(ZL~V<nd2XWz9ML67^^*9A=BFJD?APEcJdK|d zCxVB{uSY6xYHZ-5G6ihE>(59mgm0efaVg3@F#SBSAbU~RbhO(b2#!z2F8jN>Fm2>U z1gJj@S3KU!s~sl5(u7Gv9(6`CU-Q9xietjWez+QIx*b}|-Xx0IS3r>{&69;I@j%RH z$j~bHQVITMmg+0g8tVFitx&e5{LLVR!7PjJXBxe6GKqQoU<qz?*(>+9C-=|(A6NRu zqhAloFsqo0&aU^e!c~xyG`gd>-zRapqryYWBNn-)EjXP>^hCC%5m6W;W1FKnZt37A zP9`dGYmU;vE5OH)z(IMBbGn=CO)iE`A1Dvc3tx?KZ~i!WDzv03V3mFIm!FkT7T_c* zI#y&Dvn*MRUJ;oO%NU-eBBtK&T4wbp|JIPWUcS(IlBhMtpkaJ@9LPfJF*nkxD8(}~ z+E!ocYH#rja|9KN4W-Qr`#FOur71L#2`S`oC%8|j9rQO2|8%e{s37^7i1SRe^6&5+ z|If}TL`r*4t3<xPLs+wJf{uJeu??wbM5Hj2Bj}F>5E@Y%=QBO75ao*6^h<xg{zbFt z*Wf<%Wno_*+k?g3k{a2mT?sB@v!krI7O6rN!TQpdaxSu8828SRR!L4%0p;KZ-_k+R z><JKPyyMy1%s1|~<P85@yEY0Rqw|^>YN;jiiF^TT(u&t6q3w5AYdEb=YTPJ2$*jD{ zs=mlD>u3j&8(SM@aFTP1{MMqaX-be9uvEcK0f$pVnS2jX$gTl}MzuT$cl10r+7Yi) zOZ>H=uvKrkc#qCFK6g)}dVdb))#8O?oC_moDWX{6HcwT^{xqnZq`p&eRq(0ru)xqq z5Buct`i<j)KQnXM=i>VQ?WJ#~t@3(merFupC~F0MeI<#{ZNcjdKw*;HoZaOab=V;A z@jBkS7c6YplxtU5YZs`kW+btOHl01k3ac?@f-FmXAy~0TY8hV{SgLqaiOwRSFMnB> z;Ib<(Mx!R$CZdAI9@D7vIXo*{lJ@i}yqeKYN`(<VWg6-0;uo}O*sX<Y%xU<qtT5<| zFz6gn@eMNaj^c~o4Y6L(SAzj=WUL~4PW$sQtXgTO8OyEAZJ_JK4aU=?%^EB8*)j_W zule|4t!$UTyd6@54DjWG!OarEUwNT&0?Dc<5;Z5ZW?7~v8u-~hMXhfXru>2JMDI(+ z&AL5Yc03=!`;(_Fekg5V>Ebk85M_<DL7BKkojg(TOI+w=1?dbK#vrj22Iv-llF*ap zR_%>W76aA;2z@SZo%C0H{gu*F-6}=p)m0uWmj<1GoTYxw6VA0mYs=*6imxe@$xhZy z=rkZM`2o^YNn6t1Qmv_BX(#tGB6)ZhG}wv3*I}wd4q&#cxCor+#$<hwUA~SC#1AVZ zg?z+=1UihB^8H-6JY4*;KLgKWgGFrk)vJ$Lc9CCwhD&>ZPi(Vfnyhw$VkaJ8FQukq zqciuozuh$26ymVPqg^o02$%Y}2XhRqD9(i(nmQ-W4R<OgGy*b3NQ1!P9}K(IREpLW zC}R)_J{I8MPILq^3B6x)-pSSAiMvoCH`XK7=5^>-nmKuV^BjnYKU$<swTBGCo8%AO z6i>+rEIfyva5#dnJBD&N4Z1uEfx{9TS*B2fL@!Cai=48$m9=cAWUnBumj;)dvk2~> zY`#e^u_QhHemT0&SGwQT_p8)nAuOY4dbBM-G=(_=M%==jP{*u`d7n_7tmaID+)$*q zOt!boE?DdW+M6`@tuc@<u8_1X621t}Ujwu+cW<onPk-LQMBd3@#r9yyHcXQ4T$<*3 z8UVGgXnU$+f3gOXNFR^J1h?cg%>YH~7-thNaUCDPuKb%Fyzh7?t7}pWD0X0TxTwo= zH)#a8Awg>*`dX)_a2xNxF3fHY-CXLv(8nlJ5DGB>zNZ<a9Zub*y21y&m(n;n?=0Kp zes>U<>=SVPXNdNvnbg+VAJGscOnmQFQy<CxgT&N*YJ~_Tk^_Oz)GUk!vcy{AB>UW0 zha#0L{Uz8vHW*aqu&?Y97)%jy&7T|iq`dR!_dyP2$0>jI_Ca>AGv6-_8bn)U7h0d2 zmRQXeeLm(9p1RH~yXp)x9-0yfiH+E=D#1ZAvB665-ZHmv;8A5M`I9Iv4Hu=V3V_(k z#7vkH-NA$o7vSX)7>3&adtq{Ha(#7RZgqX&*8n&{UO(*tJ&Y11CO8kJm4z4^iwC3u zkD1pC{PUFlvrc@m;7N2H#yjN3sMV+nX;c6$;P4#mmG&VUQQGPknxi>4WgT?~YJ|F? z6n7XlCDrF_T!<#2MB<-m5;c*MPLgWBG9Uq>7+;hfCjk%N&{#jIj5R%8F8WUI8$+XX z$)P&Wg!-x<bk;XrzT4`EY(iVHg2Ay0$+7C$u`+3~+G(+(pm(~rv=DgaC6BjZWR+%# zyqt~!q7w#1XYqUZ@k!dVO}q|{UGR?x{izFJdzq5xnUb9I;HHM!Xq5#HCp9tj>rp`K z#93R){e|E&WTvsNwH0r@HlC{e(W*1*YD+2${VUTmll_z9Q)|OR0==ZH2-w(Xgs6mA zDOj1QU(+)&lH*b0;Mh>jUCk+8F?A4R#SufLuA`zKqC@XtqHMn)*xlYW+}$!e2p+9* znQ1IjW3Oq=J9O$DRap|6plYD#a;q)%uy)$oob@$m@-Pf1Uj!dEYBR!;er=(^sN<}` zF5OWr!k^BMwrtf)k}24dA-<rt#zx{NWF^G&wmVgJUr9Rah>;t4dcm;cvH)XGx(J+{ zvMocMky5a$7%$~s>-a@h0#IFYdiWA~cemL(Kb@c?e?b^;W{pWvbKXy7C_Y#YqaEO@ z9rS(VF*6^x#yhmoDnH&KorXksfKPL?Mr*PvR(nY57cWV3DK5#FZz~)DqzRS`jCv9* z=e%4NN1)C?<q65p(CK<nwe~ii$H&%?z-N&muz@(Uv@5PB>L7CU{iLUblyLJ?Dcp1# z9L;hKjS7vma?Q1Jja8ZqP1<ygF`V=*ZPNs8og`JQ<V0Pxj3oFs5>DbFN}4HB)>(2Q zEce(p+Wy`}ouNSuMWE{HveX(IXxeYZW$tyBI<~rd<dK{WSFHFI5_4n`B4Y^}W4cm& zxb&LiwZi+M!?}tl>Ndk?BS#@u%^5b_thEJaqZ5ei9y63U$b%$ceFZYJc*U{1wdMNI zrnkp~k<*3ecl&}mrG7{w6B->lFbef$u)N$EE>Rbh1}dW^1gjb??F%ihO(8>bDeHCQ zns6S_6b#Um12Y}3%M-);DH3-WK99htkP?gUxh^7TL^xS#394Ffx5*srOFHMKXQ!v# z&Q@E$qAE~#mr6>07#G|8&TO;0Ifz<W!ENABkh7kO+{nbv&c;bk#p<*xczC{x_VYW) z)_Js4H&7Cku`)IjRrDsdWanf;$HtC;L3FviYO%jztap)TscSDaVKBkgM^(&L<(p;i z<ZpK*9Ldq@B8*$3FU53~97z=Vt?`Q;x_0T-OxeE5Pz8x3!6~Iy(zg^(Kk(UazqB(u za`qh6`#pPN&DV@rKUQ`A`<7K`dT3K^m(qHN+Cqc8Kr3^rZI74bI89-=@<e!~Tuz|U zmuB$G1PL`boS+IK<~z@b+qZBhFE}HsonymHxv-UKsy1<)4jdk0yjcG1pK)7i`VBYC zQsYu5IH%m)JkT|W(NUlq1`Z9oHx3E#H!m^EC*KZWS$mb|eGU(D8NM?d;SyM%o`S#U zLGP<Bl3}8Wne6q#_(z7?!6G!>U9&kj&#<;6&$J{niE2P%7%fF<=k#>k9X%Bs+Eh<j zt^_5|j*(8m;oO)*l)BIR0{v)=lYh|c{;Ec;Jk!QXU3WMhQ&e2hb2kG@@E=|bcSf?Q z;b`!m4JFC&PV#%ca&~)u>U>bPu!j@#mysXF>#g+lv4WE^Qk04VC0CuQjAgN~38~oR zDL9}NBxSKoDcp)v#OlDBW#-ObG_Ude%)_&^^K6V%L=@~4q_hQ%V7?GWP+{HtY_OR= zKU}O4wM~Yj0mBMT=M^8`$JEna)zLA+fbxqYFqcFn1;_(RX1Z|!b`+GaKSOSzfdwX3 zv?h8*deI_-%FSAuIzE?wHa03g$Jiz{M~CACdomab5nDU-@v7jjl%B8K_Hl<`BrRMe zJ4ri4+t8b)MNgc<N|<@p-rPbI_9!SU-H;T=*<M8s&mG*o5JDSUt31$uzg_yQzDPZ6 zt!rw{Sd08qViof99bhxk2}`(VdCII!-=nK+*z0_WiA%VN6JFI3ozf$k^?&5G^NTLV zHL=It$<8bgr>T%frG!up|2nWH*utw?Tkgw#>1TiLx!DwoE+Wlk^YQuS3+N^FQ#b!} z;vWrlAVAN9#2e!x5}Dvr+th*C>gD!XI%a^xq}Ct!krNR@_!z!7dq}fIgdeICE3q3D zb{e3Rc7=<5sj)^Y>ZFYEPPO~)W<94Yx$k7?xgNC=g_`_MPtIEm5-B4rB~T3|r`u(P ztJC2K$#&C;&6l*!U!E;N>Z<_?yW3sqMG%x2P^qGP+VphVu*OYK-55@QR&Tv2L7cj0 z#YN^Ax`GypWf%&H@ObC{{icfa)RN>xdpEt_0=imeYNC2Vj?OdccIhY@U-R-R3Q|`K z`8FkQG;ACk92qj;5Of7eZZgnxiHm4{Y+VVsDl53DHQG<f+TrE7-?!eC5~I_3hNG=e zA0t4nFH55jx?JJ5P~pBPv2vg=;+Cy+x9~t4M%M+kT^na&J)AU(n)!}lWM>qRt=rP_ z0k!DjlHFyEuK{_{j>Ma3WKK>NVjTKeo74&-L{&y8i9F3e4-Dqh3%=>$h$4N(=756? zER3s2>Iead1d5qfCS%7315n*DX56&toowiUS{<oSN1Xh~+lxrIF>=6fK)Hzzj9?!p zww_(3Qt!{@@=el5#R6Ak68R*Uyu>3|jOP(TgXFi<H><rbR?@tv++vH(ggvcY5#d%S z3Q6<zQ-l4+`9JJvN?|8b(6ngP=jG?JRo3l43)Jv6lZm(R^7NV<<hvHC<oj&^)DNVd z_xrby063&Q%|O+JaW@#O!BePa+wv!cQO>quRfbJB|IkXqB{)O*D}LIuU!~!Uj<$C9 zH}IiS(i60mme$y5Sx1Cj+eau;!!^Ppl(5avmE97yZBm{IN?Hjv8Xk5x(@_}>b5hG) zlbrmNAE&myS1bmt&=CP%5OY`~J-;_~SAN5(3Q*8;5Ydm)k@YXG0S9N)hnBU*`D;tv zJonENCPRRPydJ1Z(-OqHp6S4?&L6qqCROnsO(gl(nej+iyhbU~q2}Wq2a|7-6bBq~ zq0gr&$16!8G0r`sv!2hl$0vWXyn<?uH5(DC@|3R7Tf~=OO^sE5FjKdQ?P(L}w$)rD zO;%w_*X%g_z#+~((?9UPM2Lu#9*2MpLPp<F0<m;=v)g^YzZ<2F=|#gp5higr)93es zowLmdl{z1_I+v*0e&VYFH&BXjn2T`O{fLZu;x2r9u4=2v@oatuFDQu8UJpt+iH8p# zP-2datBd|*;aO*=8z<8g%l<^JsNq%X`9Oo%<f;4AJ#Wz7!URqNvF@TRG@>Ht5gH&# z`6(oNISW~;eELV(O-_e1u*yCi{!s7iP>+kJr^xN;5Q%vts=Fd=G3Msj03Eu`@bTRN zhn7e$1x%e$^v*Igc}&H#&Q7m_UBw6~h9+$SJlr)TVkReZJMWwK`@z>tT@j3eSpG*( zTb}-B!R8sKkw(Ox+vxdnzwsrC@0WS+&Z~#fcsRAHmrn&8CfrcaO#VpQR`Q<AFx^QZ zE!|OB;c-D0;*!H7&pT&)z5Fyi2{HqRXg$OQ--Jf1BsZGCMh@rU3Su8(o25V#64ab! zFyrc+4)=u4HcNBsCrcX*3E>_w8bdhl8MsPzXn!UZooh>xp1RMFHFnz+H=AfI@c??M z;-xnNgeO-<V7D>wK9wKT^#+&g-LJ`v)WGd+c}IIY@3V6Xz2k!vGAaYnk^Ju@dbWL3 zgT%!f0u`USm#4<N)Wh|wzNWOR2P2}W6<pTF3ee>J8GRF?^9GvtbxHf;<}?-QJ)Jo{ zKo$;MNkZE|L%~eMyjaY&k&n4|enn+q{&Oc|?Rj5~D)Kl*${s>@aWH$y!C*Cxbx``Q zJldW(KpJC>7Svf1sco;nZ!6-RQ)IJ!2Hio)_@<qjqvrK_oaS4{nzVkis`I1_x50c5 ziL>$f8}PAaef?CONsYhd1r6bF#p1`>B(S+x+}$5VhWtHh%+0{<DKIf>XWzd(ojx?@ z7VoLTiK`<skH*2_(5#XZ#cO~ycnI6O-{6J>7hCHyJ>7gHKJ}%M3c>R`E@bEBr8kea zC9%z<ad|Vr();BlTkB{Xs6yQnn5itKLt};*+lcNt!QepTptBn&+(MM4IMD=&3GNyu zwzy1&)AkUg+NlHC-mW$xP_y5kdl@V*^sGZ?T%C&cpVRYfP?AU#r^-Yo8-jQ8cOiFU zkE24UlLA&Z-29)3OA57%5b(2JJ^K(0lT?~-?>Abx0wiGxNMY7nNTk+I$6FQ~!p&EP zB6|-L;4{j(N*)hy1NXGU=<q8q@I#VK`*H@-+qszQp4RMnj`jo`HEkc=R8qd$!}+5s z_8u;~J|C?u<8qP=*na{jew}*QKoxT2EWvXRl>on8P+TYypQ$1E15L1yHgqQGDBFzZ zdCG<+SY9B?dR$lB?u$XYzE#`V?q5gA*p}SVbblWDAU9?wI2<lqgGwc6b6Dc3QCQ<R z{a97sSf1Iyh4rRrsc$#X1M$L!abPk%=M>z3C#N*6wYM;^x0@PeiB5-R>q`~f7$PYo z$v0*EJV)N0l?RTX!MyJgU)z!2ARU-{H*6I|O;7I`dYQQV`~uM|+80JmkQc|U)7`Gh z?tZV+O&x{JChQhP>YQhUJe$p5GhB4eaHv7NFHg3w++T<R6W6Wq-OYJ7FwMqJ#C;bQ z;Inc)SCZpF!TzwjIML2TJOHCGhxV4M%8pK`iVhr$zB<RXXhumHoXiJ~5TQ+G;>C{i zwifnFIq@w8BYGTcmbdrvinP8J7_=X}lUYhaWS(5J*Lkb+DGUL2C2LfiBfrRP@*Z7& zq|ob~*_@pWx+=NI*;xro3fhS)(&!;6RAo0+MQ5dGYI#ZjlykKX$@c;)iev%p@yiu# zY*Z>M^a73bkzI)qrd+1!WdGKqJI0IiqaL;WJs-2g`fTx*PT!Z|BE3V8V7wSRA9ERu z<Okhprjm=5>vpx=vk}wo$eiL-xbc*1w>Ob5avfsidhY}wXLLA$8=aZE+t1y1XHB4Q zdN%af9(;y*yHPOamtRTRauwL;1liddnOU#)$IgE;81Ff7Tq|EZZ8xSa=M7(yXp6o? zm5Gx796f%a5Szm=LsvXcS=_<XcA6mxzxDq@+~ZfO9$qMX1-ZssIv*~sGXN8^{G%aP z61^fdp=0PBa=dFDQ*Y~Z#?E55NyREg%Q%WiX7dMVm$<pv@MUK6_7hK>D0q74H#ja6 zbhOLv-tU4kbRwS6UwD&mVc2nF++W8&CcF&FO_q{5E;iw!NBu7Ezje?QR2GE!*eQ?7 z+uxar4nHQ<rIiH%_jYq^F2Ag8PU!IH`5pYKb3&dFX}W*+tN(S-xTAQqdv2OgU}I79 z^UQ27;S5dNU?*j`1k+J<FjIP2Yl2y+pPeujlU=Sv^%jLK!Y<V6D`U7wAX*ONeF{y@ z=KC^lW4QVO5{V6g@>C^3>GmAeX{;Ri8Cvq3yTgs0<U%cRy|31k%G}t@XQPRPt)Vgx zHYuMAt<D=)V+V=x@bHrAW9%%dsYA0Ybr3OvB*LGTxr?;SoVCfku-!u6^<cbc?TO(I z3(@=)tH$RIk4>QM^kiY|07KpbMu)dQ-?H;9@(-Re9Wo<S8WdW2TVbumV>SwFaho2} zM1+P>nEI#9z1VkEYLzTKXtCBD>Odt^y7>cs-@H1HUVx4rp}H}~5@O$xr3`w6C&m1U zeRhm!qLXEbfS1-f$1bXhG}#Gt&dKo}2}Wv^s=Nw%t3*B-YSS~-fH}BGX`+BR1og(@ z65Yau9bj!kB0O8>#uf^Jz=nK0Q$+O0VUP2x{fDs$0HYY=kn+A{8VKA@540W3^|b+j zzGUDB02DU5k%Ra&&b$!L@b+Zl!lS9vt<4mZnHu5FO`g6y-I5GdIt0?Oh*UAV-5QuC zc_|~SGb5V)Dd)#sdyqAKp-oKU=}6r(9G}*3JN!mt%_g@5Zw-zMEDgm<)v_Vfk*n;S z%17xfGPXH8z{re^2ROkRnEmqcV7??WFjx)-2SJdB4lbE?a<}{GZf0lf_O!7PkllAm z^e7^I9eG`yU1o*tw42tnTLm!5?fWyfbNjWo2ZV6t@B<0K>}P<UJx+&$5?cH<e85fu zO5e8Xxy~xaVtnuhGdt|{J?yJ{-=Kc-UOq;Ca#}yevA?ld|A85TIp`nmzOiccNF2d- z#4F0qxAF0Yr3+jIj!I4c;pVt6!Z|`ZT~t}V7!u>G@$l?^W>KgXNP>IhlLlBxcoUi0 zd;<mb<Fq_b&^{Hh1SSW)do7s^d8gNtN&9UJNg1@Xr!F>vU!uo;Vz6@#6wsLwV5@S* zst72o@^f*vwYJuAceH~M4$i}}z7CI4MET&i6Nu0Ibc8TAF1R?NE9-`jPr--EI3mKZ zIS87jt4Ex;ho%1r%lN@a<?)wAl<E+Qn?Wxf;jUAGw=EO8#XGfCg`}*~L)GpD*6CxF znX@aZZ4a)RqRTi{iAQAUCcan>JSnjuUAVvhtgHHpPN_$q8JiLxS4>1++)~xrLU*%f z>Fw~~S2RZzW(j0^PFCuN^oxnsOf2}eL&dm*1ugukMMP?Q9e+=EcVkoAjpxO|b$E0c zA!r2HJ|yl%<2aYhoSTf272t47Y1-1-3cZ=Jp%FqeLqjX`vHsisqr^NAfZZJxsu1(3 z7*kg13l4r7AAAad72-FUOfBWeZ*Hu-Pj|v?O|N0(mM~MNSxYfcTThXw5Yh>$Z2sbf zD4oG^2<d%78t(#LudZMlJ>`~suHj(bwnfJ$G`!_pCgN)Qz9S7~kG5qV0TtMqUZK*~ z#fpvqRyz<}!n9;qJ3I)-7r#TmoSyeC?>*CP3B|pct`3w0^pptzaJZxjsx1vU!sO)U zVPWKfOT)X_>fv&+FgNxr3rWWTBCRYy#o5A9W7huX+VSIY=Xwz;*fZ8^nIibLY*XLW zcEw|2*rP`GQ__snd>%dm`?V^V>s^%RIS!TWj8q<q;#_#>^hCV+qBmKOI~DL^etxU@ z`K_rFGYr9BlqXOgXB#Khqkp5P(DU(8Nm$^7VcvJdCEAMXhtAHA4jj8VGJV+7!rAP7 z4>>6YpsgvS_>tP0hX*ue5k|qTYGqh@!QJaLE!ETd17k(*3GI!AMeYf_sRVQ_pt)Hd zowKK@%iY4`?B1&Mw8h4&+QnMHy}9M#+DzOL@k<V3UT$18K-T=sRp&<8;N$ni*vZ%; zlqH-D|Bw5+SfAP(f<x}5#}oiJbd4%&Qz2%`Ph$!-H`fAH%#-x#ajseF)&!kKo`H&i z{2lN{hf`5|!G$QIwj&X$eIb$yCs~ZEt?emTKOgV5x674x5oQ?eps0d<?J>64yFJW5 zDrfqjkLPwrFil$Kh#1w?8z@ZJ<omYyJrYBN$&**RWk(PiYXTm3O-aE1?lwOme7Xa9 zg}}|vkKl1ZT%7E1*|s~SZ{O&6xn3V%Nm3!FgCMS8?nnoSd+Yu@oxF}tLs>qmn%rmD z1D;v%yBiAX5^;vUp_IMr=1ll>t1hgrX)0?X83P$8`ep={Cl<7Us~X}9ks`BUT4Ptx zprp*is044Ovg<I2S+<B-jH*$&Ec(qQ<l67V#)r5HjzU{G+1ao2{Ehx?CvYS-_!{Jv z>G`<J{u-y0yYtf=e0A?sEq2n=L~08Y1eTtT#)zrQxEwa`38TnpXE#URo$OG~45u4? zP@EAiD_Z24yX77iyF4CGe|330{G|eSL;0Eo?=MGTy!~}Eb<@3})T-FP(99dWxh*Ih z62v1CD3&1Lw{(cNlC}M`^(C<mn)*ACJG%bvp2gWwfu)%!p^+T9fuBwvRDnGq{!^N8 zW~jnc7&G$DQWN)}*1d;>vL!UmohKYpV!fvs-QCNo4}Uv=Q+6Im_5E2`KTOURgToBV z6B3*Md32l+qI%bP{e91hugf1*3EtuaY6ca-d$V%=3jCV?PISA^ej;z*E62I9)+gfm zJ^*^SlIStR<=hna1=+SVEF+LoHO7An-~^1OtFGDH&{<vf^~vk6wa5DtgY0>0aJWVE z*tO@svlVD4BA=Sn(UKRH)tbEElFu#xla|R}1FZ<G#vjYa9{g;r7ZQNPh}dUu!l&o> z`LZ|N2M!1{G$PRf9_ZN-t9?gKJq{yMOO7G(GkLo@#~0Ej<BDF&RD><l`Ed@Mv$qw> zg2<1wz92?Ke95?~Nip;3KAhMA8+kcYbFy;segm*H$nmhx6Iu&+I=;-H++nbKTyAYA zPy#nP9c?3scjSxW?QP|(sgE;jPO^LWJG#9O7f<dfG%lAI{bQo7Fl`~5#Y{LA92h9+ zn_Tu(RR=gjBf}kTzpy$9nWm>RQStaN=((o@Qepui5<@BKe6b1(If4r*f{+qrCm*`@ z8zyeIkB$LyoUbgR-7s$@hO1)i`EV}~QM(JBrwd)DrG;9VLJ@?#K5IPPd12z1bInaR zLO+U304>ci6Ve}x6BA1*`iJ-X_*s6h-!5{p5LW97CZ7Q1(SA=;1;g?eXMa4EE(l${ z53dZAy>77<-mi9B2sc|C5z9~9K1(h*)0ZI3dw?U|XKyr9Z9}cA33AheC?~A`0owQg zPs7ql8M15`qTy3xmU6+CVw*Z&Lt&Kc^T}P|qSVqT>Nc9l-;@9ZXQ|!hXxwMVI&xIu zpXC;EKMF9us!Y2H?dB-aI{{VizU4+vUbfc@mnVAVIfIOi<|NqRX?Hd}UAj5`tnp3G zs0&B{xH{?>P4*rg0rE~;EI1CLnb~vvdfGZ!d)nJwzi%)#3QN1ouRSP;1$24%TsU}z zmdRy%R(o{BWdG^u2&wLPaCW}n-|z#TcX?i2&BZ_($sh<lll2#$OH2->9FFlER!djs zf$h;nZWSnUvq(v$;_)ZH<#f3f&I>cH{hrfwEu$xHA!4K-T@e_ZSdm%k_m>+#7DyD9 zg-?%<(9BV~%2T*3Om-<rb|^@;BTjjEL<)HPus{YHWxf7c85JrQAlANNZL!NgUNv94 zbk~@yO?9j1t`{6=iM!4(sCSwVFtLF$X=c-!Bfqo6^L+i@al*|#d~nd+t?2f<$Zc1* z#8m#e`*V|RdxCt_otzL{y*T5A`m%ZWIx|Np4{Gn~`MNhcHAmS2%s%7wXb*75zj-w5 z`4arOcW5{M>3P3&HZxGtSb*)9U|VYJzhAkYdTP!q8j``%ZSLN?1pM4&T#hj?@W#6^ zue4Kc_bx3jLib-8X_St2w?Hb)s|twsJ2t!P{ir~B>e68RlI~2S4v@sgl%$5Xrygbr zt}|3U9z38PqI?*5+GL;$xFJtbn6Y>{Dz4KFEA;X!wn?beCMXM(ao<mRk<+CFAcUWI z;4X9dkCdRN+<!sxL2rMj2nm4IV4}vEM_*8ZAw!~h<-cs1>GfIs#0d)ohA%qg;)rq< z^ZP)drwXVsy%ucw)I)b|^$`TW@+BU+-MT&uHLM@X$yf=)xHY!U#}Pky`FeSM^pmX~ z>dh1EpQWTQmc8u0H!=u=Xq=R!w|jg)L3MzCH9%FU-WZY@p2*Z)+y0n|d`HTnf^&1B zF{6ab)t=1;(>`t#c10HT3>B(KZP)dFzqR?bJf6Z}Y*L(lQju{|zCAie>G3g^<#t0~ z@(u;E$=j$p%T8yUocoEgINO{k-=ix$>$u-qdA#-#HtbH!j_PVb3iWg|V4)G<a&hIy zN|xf<T|O|%^V5Ci4FWmaW>R;F%vnisVLj93ok#$dBQk=|%1y@(>?U9`*e-M(0$p<W zHZQ=aR>abB_!8ONJHZ2nfC4^L<)(StbNsY=rg-v-vrI_m)~3$*Mi(>Y0wLL@0UGW6 z93d94u=H!-%MS=&RM=aLlT`yt3|oUl)I3P;tj?0YNW(Y^zt3@|iPI)3uO^8=3DTU( z(jQ7tT!<Fn<RZ`OCC;0q@PlkG^gy3HoNqMOu?hgkWn^STfBcY?75pJ6C@U-bLqei& zXtcVlT0>Xq>hNM=`4!{WvQ~E;cJiE4A2+;KRJjLRUTKks^3(b0gGv^M(FB-d)t}$O zB%ddQnlI1ZV&3*21Yk-L@W^CS)zuSTkgq0E4R2fTf?G_^_>DLNbWmFXdIDZ|C!^y4 zIo-~{@F#|H{SB|Dlci*!Kthkr%M8EI;X;g!2?oGWISYn!zVKT?1U*puMDq`hw)*mK zjg?i9reV&^vKo^EJQJ?7N8S@z-!Df)4R$MTBO_|iIUuNF06J6_V(>xg3Ov;UWE?~$ zW(-h!Rbn{RpK$CyCE0(zN6;vaao3l-+g(ZcxU3$Um>3q)*Vi%AQjfH^aTb&PVq043 zjTn!(v6{R~E0YkH;}GV<*RiIjmyPcKT`6!Qq<ps@W*(|jQw4U<O#lfODv9jIe}+sW z@lqW}KJ8|x*p-==6$MR0sjIQ+>ZFecpt%+YumoiTeA_45yPJIxH2^m2t1)T{a38h; z^6PCMhL}}nx&`lFzdTg{fC5WP3_3oV+oBU4r36<yLm5bf8}GNrlOZwkx^Iu$3onGa z-7i;99WI|6m)Gs>%6Y^gaI-V|vk}mvCrf#@l{~M!(j~vy!^znYe1A`y3PDkvL1?mb zbUQXCQ~SfBVy=SrjpG-+_W1-&4V;X`158Zw_4gC?HR{jtIBl5l7Ap#hu`6n+YN<Hs z8CZJdX>r+2LDWc?&^(w(e{ME0U2}1iv%<pis>;qokLd}$S(&H#yq+D!b2oDuY<T2! z3KMmQL~5v2g!_xbO3{h{_-(CGmFkaKU4Q_VmOHIo$I3s*ecP~Ykdu5}shWMeE)8lO z;1t$fZS~AO?Qqpl``ZdEhWF_2pcMPjb{Bd=(}G8H0sYZ&S2i#{PtRu+Ej_BPkObSJ zw;eX0*9)!8%&2Osf8I4z6g1Rz6xBhS8apdnD=SMsPI?C@fxTy)AIEPKJIp<WToBDT zp^>kP*hD=Bjz->IUb<&?lj$$T4JpY<g+_%&hQ-1%5;Cw6(bv}xEn-B{XMZNLv}H2a z=Od*NJmXcQQ8NEjs>y|CH1H9VdkL;5eO^<kssC}ZAjQqC(S5_)d&4BK<7dn7a4&7` z{G3zkA)G+lc4yG*x%8N<WLVXz!Y)!XyQp3gB<=Op+*!Aa)+$4>{*u)}@bSm+p|bKF zp}0cy2S(O#D6g@3-_G+hhu~><2=o@AzAv;uz|^l-xYvl35S2HgvyjtMmkXEs`&2CK z(9p2nAKog|)Ki&Uq?EL8Cj)ZRL+8BijW;_hUF-PsI1Z}h)-pMdV{s=NL(O^n8MGER z7PQDf{Ybv-9M~*((^n``QgB$<tL+*dkNe2j($X)px^ok=g9}UZ8xi2pHw4v)B`~84 z(y<IWLep0dnw0cz3jPTJXD!~6Qrc0O{+@Pb?>-(6k{Lf}Iu40_gUS!m3$`?hwwDHr zPNay<mZo{So{rp1RU6XN=v)<UGq+XeGq*Rr!9nL|k!;z<$4krNElNcu#}+YuExe`| zJ>_5OnGxifZ9=~vgCaSknop1xpZEBe+4#IZy1JwU2d{1ppROcyH6<0jJ>O4SU>`^H z_xnFQ+&5orc06FQ4gU72f4E-HNZr%_?7%ZK!IvyQmuC;P(?%?r01g-VfzrJ#mgaVO zF1EfI5e+RBZ};~HHM%YBulHxTZtB$ZIs_IB0(`WZRu+`lbW-f9#a&rqYnrF>E$oYo z=-z%+Nh{bS%*MnEWf`afUe6g4Ub+uPVEzqiZ%-vT-eW-72YZ+GjbHXD2Zz9He@?a3 zwAaU;r-Ymy_Y5bq!na?>OM6wfs|rjJXex#Q<#MD?kYdDLxm8^N4x5WM5ws(XHIKj_ z6#E7FfZ8&t78_u7O@Kw#=Bmc>22?!Kx|{v>!86XUIenT&-Q`t{ZYPiZsr8}BFY=FT zJRLE*U2PwG)bo@!KEFNQ&iQGw9#>{_`&DdL2>Ja>yguei2nF6=ufD$>ALeX&J;GQH zpvntYqT_VBL(}n}@p-^aP!5R_zCV?Zg}-`sz6l3#q#W3+`+#glcVk>$t`EC-E&bpc ziUejZaUhkI9_jeHme}YUIUAXQ`k$}09CC1!B&Q(0;JKot9`0{LIU@OF1(!3!LwCe= zHB={6wWr;=smqX%kSN1}Ys-J+)AACNkdP6V=p)M?ou3W;roOdNM!0iIO~b0|HnK1R zHJ}A01|0!g+$3k9ot&C#ZmqvO7yIU8>0){r-qKN#UcWO0zhk?T+m9;av_RyOGkP_H zM98mqY?R|ofOAywjqV%6R1Fn%DGjZ(oQ%s=*fqRVr1S{$gE(>vwS2wQK3Y6^$xlN= zeH2u5g{Ap9Mu#Tb8@N!JiJIyzb~bHgt<vi1UqBavsmjpO+PpkLFC{+tE+i0;O{XGC zK2kb%ehhPIEon<FX-^?7Pkm2)eIY@?^Zs8@RJ@(}r9?;_Uo_+6;;wIQ)YpG|zP}HP z9Tjqee3-hl)V(VT>ae#TpdDN+{W%Gry_=NL>uzexD<#4-w-Y2dfGs~bOiV$uwyf;= zctmwf&Z6{aO$7iH{7Ey@GQ9RrlA?H82>At#E))hP&j}opXGTYFNX#KB;b>$>xh9oT zo>OQwYiKH{DoUx`T-*C<vb8ia*MIYJ{>n?c`21V?$9OaGHQe3F@%dzw;OSVTP*lLb z5*%N^P=9P*0ACM3GM{r;1~<K`$v!w`M7|69MC&qj)IQ^~-#fcV!aGdLL>d;64~Yal zxw?@JiW(Ktr(@$LqaE;j-y+jmUY30ZC+G3ze0_dr`-wec#xw;TmyV$94>jZ|x|lst zJ4~^T5G3!<fuYXMs=WlKnzy%(TE8>l!mp>x)--E>kJ1sde<j)6iSO@t=y~QzYEw6x zn24-3VnZ+50@;k5Cwr{3#na1q>*Pb<H&RzmH8U&I#{1K~ven^YgN=3M*4CEGF)A+A zKN4LXe<_ugc2ovrVzRrQo1N9jz)it4(k-VNla*tiUCJve+TPg?o16mHlph=%6qX2> zm>bHcW_)fQvDxvt)#cKnqSW-n>>bU)Tz9#zs;bF_hK2Ubin`rkD%*Gx5|Z*tO4Kx? z!oC9olduL_)b2p=x7d6Vp;~;x!ovzALYEu?kE{Fjy<0ML^rY6*&`bnl067!7b9Q`u z|G*@=UT~El_(}~f)rdqxJu|5m9#cE$rqjth((x#)8u9CU2w|yNDHrE*%_i2TD7t>1 zno8ifC_XeMCuf+RR@99}wzIu0;;fOPw?0PJ2CTQBW$Boi9D>k8%7Wre#7=JwX|^x| zlEsQC0s|rRQgZc+M5Tn!Ixc!BiOG%h3?a%eyQxGaM5#&X$epghu=HF9mDRD$E}sSs zmH%Mg@d+^_;edPL({VQR3xOpUQ-s+maA8yT(((Qp7FkwHkAQ)notXit)66*C^mH;d z|H{Nv*SH6HTPLKV?igeMiHo)+fW+Ac{rNA!-PDhT-^A}0s3f6Ll~H+i{`R500b$Ra z9;gxUc!r3MEG-T7izC>IOG@@^l`-*Pp{ADjo_lc%suXf-%j0x{Cjb1p!lI@7d^960 z#)JD<85Y;qx3t;m1|FV*h3oW4x|)9S@fPF5$TTY#72|ocIR8k6P99Vn3WLgmN=rMJ zrMb4r3UUEAJh?IiJ5Mn5TBQjU3|09}I;3a6@6DqYu<7&Bb-ctAOpVyQ7$_3B-lYxP z?Dlv%e;bKHIkJ|5ZwS%6&N-}O<mlq^K?Q(A<I*ajl1`gr7*p7yO$*x?u*425LFMWS z8*Jqj6x?kmlrRW*R#wwMyL{$Nb@`tgxajV81y$m=hTj^iI$Y%1@Mi{^54xRR_j^nT zsJ8Txkq8Iu?0SwJ{QOW^3Yx-I&E*Id-TJODcTQ9H>{}*{bd`{Uc2MrwFk)a|CHcuS zwi3%@1l}NOe0_z*K$Jo3c{{({KY;XZg=gT><9p(B!(_T6cMTB6A!9z(e7xHrhYZ;G z<!AGIadQ#hKlU16Nri^-zAiqm!kM;ab|UnzLz1{xW@u~c1O^yf=*<aC-<-ek%pWD7 zAt`_7bk4N7Ft?!~p<rg8|8Vlr>47@)&+%zxicH$uD+KpizF>eD*hCU|?H&1!G_7w1 zQgV&=*0xw3J(ET~?IuhL&aQeeFzM_<P}fk`%Du9?YH~FG^{YOzIKR)!{jVsL*rTDK zwt>m5MsxGhg#}%ID5+(f?<oL)5;is=Xy0ewNq<AsfbP4FnpUg3MMpyiYIXofx98Ks zx0>}Xlk(MbkxEJZAH-V@pT+)<<nw{kcmC|&p9*GIw`12$bqZL_@jC@qKH3&a_?dR( z+Ftw$O!RmY9UYvR#1x0g4WUz3?hl|typaKiQ!|s&QG{+AKRJEhD2a)&J7+z5Sw#H! zL1IJJ0yL4)IAbg<Xu_`@4^3Ti@E)?CVhH$sueb>{$54iNxzf_xB`rN!(b-h&g%DJQ z^aQ>{-D<97^v!PexL~%!(w77_6_O2R@_W&6(7uTuY@-SE|AZ8vjmfYOqn*+`wzjGa z@a5^?c0sxqZZ+2{jY)Ewhh*%W6DOL^Eh>O4g9Zrj@&-n@3BKQK@#X82m>ruYW7ZWm zXa2DwZl|WAhk*81ap?B1t;0a9^vTuT+(Nh2(o42&5`i-QT2FOp_Cl@_lc<$#MW`b@ z9+X#G``N<X>%BydTp#{PK%%OE%|-7GN-yh-*Cshk2qTUq(&ULcmec*jGy=rEw>7ZZ ze=N}vk0q4;Bd2?v>6$vMTYdk4jEY)RLLqqTn0Zetb;CI38@V;@#%yI3mHGQyBd6Za z_p>z)1cbv}8LKz)Wfu-ivE|hplg;<Eb?Arl{!0jOLI~;*4tqf@H03LFcX{xG?GS^g zgIqPu)OtpiRBYU>XRfWp)Ksi*(ouEY@CT|og4LbP&X(R&y&#k~kZoy<xjBdX)<e#2 zh)_7wy@7>~Lw8Aj6#!dihF<8@r)ReTpt`yS=kMN?KEz5VeS0u)(ypeUl?A`<b?t9H z^Tt+2WhD)Cyky9-6<G;h(w1nqzpKZqHd%4;yP<}Wqc4lXUpsd&db3p*x&9jfA3@;0 zAg8;$bG~JGy{f8;oPs>3u%NTQ54?@7rNz|5jZZ&)#QJsM10%zNiAjigg+<tTd2uoC ziE#SvG$AFA93rA{a^*gp7l@k<H;a#ngyXECrt%5xe-4(#`B|VPHYO5-J;a9KKd~|o zp@-xJaTtHA%Zsr>2p=>vGg{qP{qYzVm>?X6?`^8D#>Kp@u2x}j0WRiWy?O-!qlSiB zMOFFR_wRYQIF%IS4mSe`&k_*O(9wzt3m$&AsjixWf}*ar3YETo{R&qGBQh)qqPf$A z<S<fiZ>?h+GGb4kIdhJI{><4kO?6m07cUPNAOHD;^tAqw>-q+IxR_^QVS*Tsn1lod zYiu(#rs0UWAe{QIUg_@Xz{5Nsjr8%v#XQ9C0)qT;39%6F!7zIE?Ae^m)WgjTbal?0 zIRnH(ENNqJV`FQHb+=WO6$uClDXFQzUHts>&q?W(6x<qwf<_i0<wck0N?OLbxVWwD ztg0L8dAPZt&%#;5nm;kr(>Zne6bT729As_*AvPW!T+Dm9Iseb8vm_jnuHl6V=|%97 zw&sR!N6b$@LC^tv4re3<n>Q8*AO!fg?`(h3d1??nthSbrh=_)k2BIXaI|KeuMn<Z) zuWRq=-XFlco_29bA#jM*+dvnCqo<~(Qd&_uH9ZcS%FDuHlcXdin3x%5WTkLj)C0^c z%#D{;7x$h#fgg@_@YPaRIZZ%JM8`);PTA2`f4G63wkEJ4D=!DQdbzt|J;E$(EdF7k zE!|yudb(KQ0Sg2h5tk6>;9$qPw`(iQ@H0)!jA|R}+S{9O*>x22!h*iOUU+m~P>>&d zV|_zydPWM^NUTR#Q(YB$gq(t$l8Pe2(_L9bNlsP@>(?<bF-l5H!f?>tff@yF-+7>> z<3Y%(ET9({S2<GBIuVmmoR*VQT3zWA=*Py!a%+1FOQJwSjf9Mhg@c2Mot;lajDz<) z9_Ih?!&xF$3FojPZGBTlMtWQ!p|p%7VDEe=M+YUy-v`E>y84<oKfcH4JP>ubnV6_B zGYb=J2L|zv*zPL@PLiMRe0f#b_U+AMGT&H-i+NHE^VBrhO(4eGsHrKJRhBlO<eGf^ z#xPdqSy)+ARF#;S7{7_UFolnoM^;9vr?2z&o!c}xnAg+RI73K6$|OQTNqxDc7AqbU zl$6S9D&!OtE{^tC4;>Q=v!}mbZELHpt~OTY0cnWJ0UjP+9;_(~I4tlp4Gi^4E6bak z>hLfxd_3k`+gmED$|zA;0_@C#zG>-b8Jii0goi?mps%BSxEXX$Hg+~86(w*!2c4?9 zZ731F46~wJYTbBM&thT2KuJw?L~IlX2iq-dX(DY+b#h7yZhn5K;<9o)0)n`h_jU*7 z&k!(++65O}kdy&$j`dhUxGgU)3z~^FEl^Qf2s<1R6$Vlt7af7i+lYw@v9hu#D=RQE zGGOHg8gOxQ=H}-tt<3*1I$v4@aV~UZ7=<D3I9S0$5)u+<Xs817x!LKk&sej*QWquI z+1ZSY^jVm(GY^{*5fLh{DqmchL+Ly=Z=<87ewK)onn#hGlD4!k8*6Tvl8VaE%oLoM zD-PxvnVAqsud2od9URP~P7?*WimI}xs0db_$28E@*C{M1YHX~<&%Do3%*VyY^j_(@ za;1xk3cIQR^K|FvtnI7=f&;2*Yc5`taImq$#ylGrH<zKQ5uCE-`mb_;F$qQFTx!fp zh`^klvX1$(_VKjb0(N%R?X3-5$52#KQbtKx)7VT%0(Y*`&H2021ZSB<EPb=jpXXPU zm%)_=(bQA}$AwjeTAS*Dd3O(&8xtc@5n;GuSU{SSlO1#hbpG|LmxmjG3+3YCOwUNY zIXiXa<e#IMC)Lr_DlIPos{mpSSaNc4u(5%puwour*;#pcxy{T?z}qAz#zDWp>c@b2 z_#k*u@Gw}JhX@7sIXf?VV)6zq=5@8y35dzh32GCQQ>P|kX&@tY@f`g*TNh_qI$9S; zI~>e2x_Ek2)YrrBg7F;}(%QP(Qc@T3F|VgxP+Zv1aOnu0_eavw;bC4NJ~4Lm#?Z>z zG7XLtB1C`m4D{Z<-o<6b^J^=hnReEeSaqI{pU=j@j)at?z2#e9U0v5r%%(zi!Nwt` zzU1;;QR|eFvg-QkB39<9XlNAF)D0}Htvrwtva)!X|Nac&83tif?{sb+UNsd(T+AC7 z=)zCH>TO_1f*p2nwC(HfjtC3E#XKO*&CO+PV}XZx04zB*X=-}xn9SqxHrhIxpyBh& zbB)dQUOrw%CWbnC+MsfutDd&Lp`N3QV`WWcN>V%{13lJV0_I`pad5C<#l<{y3QmsH z^pug2tGLd-j+PoB2?d>?4k0O3MoJtOq(y|l(>i&$gD_$}!XOvGyt9XUMO`g0&yI_E za*%Odh>3794@V3RLVi&}LnDg$<=@XdKQKQzHTLlFBRtGAFwzHw2DY>{KYjgLL0<Me z54V?_D_8}DlLJ&Y7e^m2cRD&6Fk>L~u$>prv0Mt2l~X@UBSb7@Y7tgd&^BArHgWsG z4z{34jhc>DU02V_72zBhCaIu+n|UX&^Jf@@j671gczD%uFc01a#&-rB%(tSMx3acq z?`#VX3&z7d4-dDUy){1OIXM!O6UHY;kDTy!1oPx%Iy#!=m8G)_(>=YNNht}w{yyGF z4=+sh@q|aDuUA}rEcDZiv?K-`%u8Mr=LY5-ZCP+L&%w!_kQ9IQY9H8FJj~PZsh=gL z$V`pL8lnR8E?%CvnMZjWXVCe&Iu!G`ybU>-o`IeW`tmxi1{j@hY&=Tm{k)H49;9h^ zcL#O#8QWC}4D-QZ!R?*xyU(8o1^U6sWn^N2_SDo=00|hmvjha+eg7SpOrXB&atqcI zEiNiZ&&Wi_uX%=BTi!k~xn`{N^3D0J$A_3#rJ<wMG%&Dn_dtY1NGU4fz7Fejnt%j! zUf(T+iyJ5N;I)oq-qOmfrL8F}BoH6-yxgFf5D(&E9{3p>A3i4Yc>FvtUr|+l|IvfT zdpom>H-|^A4h;<qT^|@4>PJ0ZAGkg|2w(JQcQ-FP1L9d+I*(x<m(D{24i0t*==ukG zXz?&lN<qo3e1?cTGc5rt^C060r1v=#^R~Ded}dw;x3@tvFDom3w6{41^Uwglx0#w5 z-`Uy0m7zs5&wye6&VzgRcOKU^HF&~+Yig{ZD9_5qLW*ALV{c>G+tvON_r0=FsSj2r zS8vaY$~J^d@|<ek2~{IG4HH);HV&Er(0Lj<I!yyZ8+T7cXrzpi5+^$wu4__f2uV-V z3+cHg;b9)7^TtOpZ)w)t))e|jFh4#q{Lkt<hWW~}?yk$7ms{Ig8X@RF*E*;z=*O;( zHjpW3UtEw4%)i3pZSd+mI|o~IY}A$hZv4!Xb1IxBB*V=-lba9nSj>Z;XJz>vI*(55 zkR6}-BmMm3^cXOYD~kD=`TKVt-tX$`_6zU@28c<B;dp|{d9wHGW%tLA9v;3XxF5?P zke-(sFBw_+6R;}s>iCBj_Gi?Nw_RI)_v42H%+qQa7}~mfx}lg?#?L%>8w~S!{Jh~& zI&X1+d91lg4D<F5*35XlO+r$_@tJR`!*@-pth@yJ7sS^vm{L(u9;}p<6eJ|Xm{s8z z=JD%13tpW+ig|Tlo|IkgG!b^@#g1ZL>}%%n`1vEe%@>_-{DYVW(ZKbqzheI3{g?0F zXJw|rF=u6EE-n1_Zt(rRUF__vjkQ&H`g=)L<-kx6D?2X{y%deCGcx^hTJ3m6=giZW zZ@+jOIxRy(TMsYyuqYJsxaQlqI-VsWJIg4l=bDU%dGg~izwd4EFb_I^q@N#~7&)5x zWfb#Rrv!Y-&!h4-Kzo3%_ulT#%JRZ~-P~B4nz#WcJwGSoNaneYV*WTfPee*V!X|wb z^FHTr_<8YTGEaeKUiOb+{zz|wW_}m1pa0DKgWbp5x3)oQRF$y+*4Eqr67<8VAFxh3 z+VAhhl@(3xZRgK(S=jgyG09O$*?Xk6r_|i2>bi-AdF3OS7tu%H)p@ex>U>y8&_A#9 z$6+2b-v&4Hs6o7>tQc2T0d@d5j|>lf`r<jp&%gR7n2(8z2Igt-dK+LKub*df|E%+P znE#x&Vf+0t>=)*b$W;m*sq+bmaZ_k-gV)dh8pGyhr^7cMo}~TY{ylh|jGP>XW2~$A z_w%&Hm6eU{tq|=#et0J?xty3qfmGbmCb9;w@1DG+qNGSgL#<_KWOtO#d+amsmW)4d z^M!fa<LEq|ybT`a#eR#<Z=f@8wN+*Km@h5D#XROZtd*5n|6p%MD$cykAENWRXm4{Q z^KQP#bM)tqqw~KzhNbvJz0EP0N9lZ9N89taZ=D_O;8-1QW@%<ZL`)<uAr1&Zz=dlN z{LFk?D|($2YR*z|MH>~j#@X|_ddU3rx+%|)Q~@DjRc$T1<LbPqk!Kp7{QQAjrR`BV zf1DWh5AZfGUOcC!p-#p&f9$|@Se)~1?%djfp%&Uy)>NXdN&Q~tg@yRfpXWg{-vv64 zb$>9-6aN7^f7CT8^5c7(TD<cr1@X_TJjgt**`x=&O&geba5ZK|hFE@@m<aTAIeA%9 z9L$UF>wMKd^PkrSZr^zzBxz2-p{DJfms&R!Tik1A=WONTf}i=%I?p6(=ACf_^Z4>M zb)T7UZ43{^>21W0%G;oIp8c52zkmN8z9=-vA6t04M2eYjgVuShYnSHdW*{)v)6*`i zEK5v?J<`wP%2fg({QT#mG2RC6H7R0NDKzu({}l7!ZFKN>n`6bW`Nahoo&P78=Z{Y~ zF7s_2t-!pJqP(4z#o=Z=+?=dztQuMx5Q4R0n;HJApRcNHXl>!){%Sq!&fZgLCHu2n zn(_`w@fD--rGwrPNv=VmN5-%O%wkr4SzO#aYQH;INkV+=yv-3Y>@od36yVSt+l2GS zj~+ryq^_=-SC9)&^02KdX=$h>CMGs9HLR#AFU-%TqNd!$wz3$Lw{di`V_{}`@%%Z~ zQwD^@#>Wf}_Tck2Xy$RnuwT5*v0_-=qy79**I|nbj+}eo=Z}P+v%Gjj4EuQZ2yYV< z`dR0d73FO#%?~$&c$}Mu+r-=ycD@Ba^OY4HS9;I$^32apA8c^%!D9{)141@sS%;*U zvg-*IBYugwQn;_fqL?QpBV>`V56<To5R#F)@H?2#&PX99A-1uzZfZR`KaXbq54a}v z3wnN+jI`w8*FXOB6YPV6qFiQn2B^p3SsYMO5@UgR8(XW&nu><HY7oGYp+T(EX?S?J zU~B?A@#A|ei&>bN8KtBp0f1}Q`f%y|v6)96GjD^3d3_@TIXM~J$xJfcqnH=qKQAHz zF{~H(c^EPAFh4N`I{z5YeGeF!K<7i+yV@~2Z-Y(e1qIHtbFkYvIl!=sb?v}@3|mxD z-Zy+*K!9&@?Av9<H|JJZQGc>oN6sNBs`y%5`E@x}bygNeJUUOrdch^4_=1!yAGVq8 ztILZ(4oD&}?__6lxPc!MwQkYT*`fK!TqP=o{bT(6FQ|Kc=z{!Wed~gR7#u?w2kGhQ zUcbchU3%J@;OAZ4U8*lt-JG4GprlAk#^O4Z<YnpU&*|uB!ln+tJ2^fIokdGaBPlsy zczA#o@7x1Yc3Jc_srcW)JZ8R)3tn#n#~kpJla~_^;OFMzINV@ndIHrc40Q903hL`? z&`Z3rFfS~~&&9<Fr~4coEmk{sV7|?bM~@!j%2gWb=~7WsA$`3Yn;V|Kexs-$XJcu0 zxS5RfMOr#qLt_JoWCyQw<I#B(^Vf$2_^~h##mmFv6dB7|ZXHpx$jFlG9uWn~%Bpy- zNfEJ1c*d0J8ClTL(Hve01y%w0k(HIEq@wgdI3L~!3|kPsAv_SR?afCrfAoAC(D~yr zUtUs3OiXm8r{nNj%Zu~Uk{7^uT&gTR{O+6AuYqK4ZZ0n$&xXdjS8v}!OF0=VD;kRO zvf!LT?{Kom(%G#o%qS@-t!%8)(o<k;!sBfSNXW@K6;OUY6+iRm@XWVy!8_k(zavvn zP+&bx1AX1Vys@cKUQvEqTNC`Q_GT<2pqHB~Y=IiO54I&980OLIq+;MFzx(k$6C;Bk z(&KQmnu=n8nURqJ*ocdXvM@Krx&$vjCj)cEP)LC9Fnjw42gCfp$S~+U))BeAxds%2 zoJsU7m-YqosL+C|iRD+f?(bonZ{v8Hkd%a7(l?<zG%g#4`-V$cZdk3RstnjOGSVMj zSONP93_~2}iS+2|ZVwL)#_w(Hj@0>MGym?x2k>mb_AkG&z`-XZB-GMU_Yd&t>T3V^ z>F213PyzyifxfQ8jUPR{zp}h=_;d{Pc7Zer2?;tnJLVVW&dg5X@iu3P$*Fi%2}mfg z&#OeqIKCKG>=?Pqh56Zt(4ePJpI}{5flBV>XTxx2WowmJm_If-2B*o#(+z9O?X3;0 zbJP#&JTQM_{Q6Hn|IE$FVTofI?~CWpZf$Qd(4VuhwhRvo=EJc*4n%crWf|8k#l#Qj z{Pn2`QBmRS^kl3ppFiEXy}imSq)5oDC~O+(m)R9waAjiY&SB<%I7>vqB^{hnQQAJu z!pbf#ie)J!oN?Hzz1<ybLzoUJ87cwd<L7;Cs2|`t($61tO)7qTa^%x7nE(0bU&>1h z0X%1W+v5Z8;$V07>{)SfF%vVBsF;Y+(Q6+*eth!$Da5J}`eBut!{x#Kdoc8aB?f!! z?&VfnU$efkMvLRV2Po!=$Y}Vrh{<WvlVkDaZRqjn{1M(}X<?R(j1146#xWO(99^9X ziVIh^*1`Vaxz%S+XS<=k4%RgV7@d!ej~=}-1gF5<)EH~{uwS5u!aiqarGrDBz_u9e z7xdi};UHwDCgJjR&Ytdt73HHhXN-*vMT7+Kd>uCzI~fBXsJ*aBxPN9>P<Hn>*rVpp zpC+U^FBg$nUD7rc5Ef5LN^<;LRr`2*oIZV8R7}L&(kwD6e0E_bHYyC)I;rC`f6N#b z+T4Hq(A?DMhf_Z&%F8}``sB!um>L_NI(-U4Fi=Yfy7LNhSJ#(OX87H^#>QF-3UV$^ z_Hi8N<p5oC{CqrMR#jD%Ow5cUBEtr*_C9>{059{Tw1PU6w2WBSjEIYge4cwie-7(P zG<1V6xk?-{EGRQ{e-aYnl0&muP!=LPosfu7Qu3mUn`>ER+3Mz6Rb?5R>nv=VxwW|t z=ZJvd?7h3U55J3Wa)6Kl!#vo+<<(`>%3>9z_wTXaumXC7w6tVqUJfG@BLIwbWrU8F z#{c(U|K;xDgzL9sV%VXHv4Y|vLP7#11v#w!5egS48!IO#6_3&x9^H#J@jmGtA-TOv z+rKUw_x5o4pHpX<g;k<*>hhb$ItC|9jrC8RI&~c74Dj^^-~nSe3-*q7x%oNwA3cai zt%s$-dQT_#y`SeEFf+b-^$M$+eP(`Q^q4n{efs=q@0Bi7Gb0#lz!(JjA^Ur~zWHN1 zuU@^peziY1z!wfaoOkG<np*0{CWe8*0bs`V(2YUrS2tFpqQgPDNk~bwG}Hi+?vA#_ z`Pr@Y)%CR%2;}>^J2Fy}WMxpd^a0?NRTSZeJ0qNmON-YwR-Zk8MoCE_bx{Jmw2Py? zn~Nh<PlR(wfDb<(Hz5f*ji3(oIVL4}$p9ZuFE?jzcNb3tRA)b=CzwMf7L?9|0R~%( z^gy7NR5&|g)>a@r-I*BanOK-zj>_A>4nWL>TJxx?+|${%wXwdjz6uA#+{6e1A7&ON zEp1J(pbgCpYv5k)-8F&8k%-XF+7jBp$DuuR=RjX17;FMULQnuFds~Dvj0_I@6?O)- z&{9YsCN6qt<SORxkHkbrfc1qguOKI5X=VaH9Mc$f-pk#Ul7d`VL?}8wHaIj0ed7W; z_{pQg*Pv&??lsrfNL~;Jt9j<k83}O_A1`;zuf`k!q^BGDx1(a2!iuuN(P0Q<|Nr*h z0xYhqTN8fn%s+GQ?eFW3A_)+PK@uWF(BSSAMd1`sKmmmoRRx6<?ry=|-95Ow1qqsj zxLZd$>2%MmQ->sUe`&kj-~4muvRO|(=N$Ig$KLhsckMd0mnG2xo)NU8qNHGsHwI5( z-fah}vjgl7r@?oU|E!1@+aaMnf`&3y;gp#A;4DxeflhYT-X5;9(&C_P&dK7!(#kWc zMg}M5`};dF7>#}V_nj3uLvwK|EzBJs9fG@o-jkWBiK?<9I0Wz^unOoptPTdkBEJCN z=9c<*Kl~6C9t`{dw-Na2<BdWV;5mY>6TEQV-oXa?IBQG7*8egqxIhJgvwvXR1|62X zy1Y<WQV7wgxWq-c^Kj-65CJOAojn5r666Z_Ag~|w`=`&GQd3hw0B>djO01})aBY3% z`;TJg@zW<AU2ULtw6rvi96bUY2!`b7L8r8ZCD_8r0aOeJ(!yu}_H0Pj35l_T!+qbr zdiDPOI}i^bkd7ZecI3z*&I1QHI1lV$W#6%TFALj#4j#TkB3Rxt7a@9r8@^*V%Z^>U z_w3!n!ph3V&JGSMC?o`rRY_U#^yyPan6pd(JU$t$iinEXQ^>#mz73#MOyZ*0MKP!k zp&2?8FWeALE-pdA3u@}B#(1NU@Q{w~4(K0m-Mib`-fCc^54Q`c!@N@T8X>xulb7Y= z<Aq3;X*`<-!RA38B)nBo3JweI8|;1d62W}$K(`0OLs~|P4~bZz2G3T57#6N8FeE6y zq|iUeA0i;QUN|CpW5F5WY{kSco;rOJDjz`n_Dk39LPjt@G6=9w%grG<I|_>mA3Aav z{>ydX09+tEEoc*N=Dz*=kDoeyR!HbD@3H+SWcQxele7$TyVPc6<H5e4Yc~r!D?0}Z z$3bxwqp<X<^s3?28+UKqSOo|HRh>O207u-<yb0(e#+&zqV?Q@HH=LoevLfKe$i%?S zomO00w6ccG1l80~13Yl%%xONpqY%@5GPqI5wJXVcc({s*ikv=u5^m`~R*;vMjf@We z{g_8+{MEM;Q=@qWIbPnLX6B}9YRbSuX9P~e86H1z93UhpbU{{5MpH{2&K3XzEaB_t zU0hl?J2(AiOMe7MynbW7x~3v1BoJheuAYvHsxmxBNht}`<Q3$A+o2JhF4oXk9~eG1 zF1oYp@}ox&(eC>Bw=ZiOYG`g$6TG3MoYGlQ*?ou4e8YZ_h5HoyQK7@a7(N-xT?d7B zaPq^(9c)~jJUoYZc~6`^eO6FVPD#m*>=c`v6cQa}LNHTMR+N#KlU3L>IR$x5ZH&E( zb5Yf=y?X$d7nhXK(bd7}=_)8Hh+Y&G5fhP=k_3nX;DO{_XfAn01=I7h$aLIKpH9q7 zB&8;k9PHH9RV5@Pz?UT@CDhbaFxr|}EXKf4U*Awq2l_m1t*xew)iSp*2h1~h{>7`e z-z_dL<mBhLd%1(e)deMo#b{wPHMBGk(^S{cQpcO&L&HOA8|!n63OsxmhInIXS!qDB zu!yh(+*m~g1!W~w4K;O54J8$2xY$n@E{oiR&1=stD(oE@8kwG~sILo)jwU+TY3t&Y z)l{YBWJSa;o);E6eeN9GOg?^oeu1;X(y}7bGKbHIvhypm3hFD_#d@Z7<A{D7e8Ox8 z&+g$qsjOoWkx`XZJ^cLTt2b|7gR}sl3-<@aDqKFeyp*&g^4ujQ00VFXVIw?uK!t@B z!NZH5Uzj&LKlAGKD>SZLyS|!uDc;)F8Y;jYgPNZV@D>pnUR6^GWDX>`wF0_eX+?;N ziI|ue{^5_z3=hy<yS~=gR2Lf??a6R=aIz=bSiy5KGsgp^gTaD@r8x*NFK>^uj1=JW zo3}RpapovcIJfWK>gsM!Nlo$#@S)IL>>NnY6r48G2$t5CBs=6KCg={zDoQ3MM?qYD z5~yfT0q5`O=}NG+Rn;*&E~31fM}UK0isypbaT&AIYUI7V5<7X$?mTd6C)bIi{HIQx zzaSzZDXXlaYi#W9=bMq66Q7a{@tUb60Z%YDA(-Po8qtpA8yr~E+^qkr^ByQDDUh5T z+&t*cR2PDkC7{c|*wDzt2+rKY%OkfiZ*+3(=KcF<ss*6>?%uBCv}8|jPm%+PU`a3| zn1Wf8tf_7kA3tw~j~6UBk{wZV`nUk}l9`h^IWzJ6+i%}|_x{QAXOpv2Rn-+yF;UPI znMQVip4-tLyvf#qWb0t(=HZr}ojEWv)ITy*P+AO}PjYfJC7A0O8sPMG^^6S6tq69` zpi#({BpXu;!qyAN6U@+SFR8BT9~&9FG7HGBZmf?>PNMrTY@Hm8%uF?~SVeVpDFsDQ zDd`Jh7X`&7#TAu>W#mtYt7#fo^9o^+ud5v5=t*tnZb?T3)j3Z~TR8j0=C|b64x@Q9 zU%q-hzj&pzyf`u{+{4QquGEHPV`Xg#4;?@b1~lmCY!CQJO-lwzyncP{(=6`5mLM2F zHuwkl0yZc#GGeYS@JXe)`1$+f7v{EiwB!}!1_b(jsz61n(a0eA3X2PtR~G+h(i1rD zqbCmmgpJMhIeFO$iE-f(p%7idQG$Z}VL34|4*G)Dwx-3Uxfd^A{8IC9+!x=zggRZ_ zm#eDFb8@o)|Cf^Dp+RtDScXPw>#7EZdhgu3^Rv0@VHIvgcW-A)YkgK;fxTy}3ZA}S z5X&cHaYmhd7UM2#7|11v+jCrY_bH{_Cl$_0D9EX4V2$wvdj|@`J0U%zwxy}Ot~Mnr zGddw5Dj_~H9<k_z_^9}J@P*ux;-1kFLqmN~?7CPCP8$Opj4WU=`Zz3vb*yZxK&?K3 z{@^<~1^F@YaY13h0l|Tx5n-27ld5W~$EPPZ?%oBG-^>a4;>FFow?`&MYU*p!vod1i zW1^xX;^Jdrxw5*V^>Rx~TT^vyRY`eiX+<e~elqCJfaGu7-gxnPGxNx!XV2DdtPhP0 zG_^LBS63F679q){3iH9satiW_D@s97Ev&9A-?%Y8Gu6=AoRO0i5fkMb=+E%?4G0a6 zNsLd=&B-jtPfSaVxs-rhemrv3aP45|wYPS)4^K~`bCM5?k2QC;msC}zWM>6LM!I-0 zY+NX&wsv}E1WkQI6|Al@7AK~xDWdHh7@H9ilg4viXOAczLrJDzYBeM$NaM)C$z`dP zgY})$TN&@xom-$WT3Q=Gw`63cB_$^&UW$W9e<?XJBP$&?UcTIXb!p-0vnM~(A<tes zpP8M6|JK!4*D|e+X|T}N)-*Xiwtj6D)X3%bmb#Ds`ebdFo2F*QAODUKm@kjmzx(dd zlSklmQ!^7Fv%7mc0J`uwIMfTx!f~PH_rLtp%dMCH;>C-*_wGPb;QpY3W@aa5u1wBf zodZ<gfB4|l>(^iO8aM&C{HM<zuV23rp4CENWb!CF@N3Y;Ou|J>A}$i*k10Ct70}u( zVz^t#@Pewb4&KzlnL_gq3cHkCP*u}CGzcWt*3;G0artM~+}Y95-#0cl8xt2JEiHBG z^r^!~k01)0DGSk0&~kbPdchIlbuCSU<0BotT`ldcjcqM$otJ@N=av=$@MvaCG_ei} z`@0Vx09=8nheikbhx&#`2WRGHR<Ez!V*a4-`pp|_H?A>l{p064Y`pW}J_N8^6Y+qy zzyIjbjoUYu)>ju+uFfqj%q-5&EL@qHpS^N*VQGC0&GNH$^XA<05}aOJXM0UkLv=%4 zU2`Mgxoe;wF8?xe?d`}Vx3_(~_IBpA_YO`@Y)v~dGCc*zZ|vyEDJzdkN%M`2q51{d zx_X#7IO|(lYZ;q}Y8YwQMc1~Ejm)h|U@7dp@|<$Ey7Ww^s9H*7Wq!kWcHP9l<jT(& z@6C5_0VE&;fOzNT;ki$Ofs9#Rxw>)d=F?|SzyH!~nIKDEy?G73@Ziw{Fp#^bJ$U@^ z@zci;?g2316Iy0gK>z#T(c=dX9zTMO@UK6a<>!YVegx2h-~qV>Y99=iLCO3FnuVr5 zeE0yZy@MF_{d;KP_xC>qO_S8vhU40vyqca8R!O4f@fQgR=XL#A&g<?J#qSg`JFaf8 zORyl)+}%SXqtmmBYU_GNM*-ENvooW!)5w$k#|5Cc$?^R1GFvA{4UCq&l7fPgqP&tK z=wU^sBnD9w9uox+T3B0Mx_+IRTMEgT33vlZ^C|uDX71xp=>lK8eEj_RgC|I)d)N-E zFJ8a?Np7jPZ+|73+Lkn$Nan#u%yhBLyoAgw$&Vl1dVu5yM^m}MnXKKq1*bH>x-zvm zKRG}5$u{%H!IgfpD>KY%pZRh=!P)7y{{G^+hNQg0(4<t4&`2k5e=91IQ5|E97sgoG z29;e|f6zU##;-v=Ac|)>rNOP_r0<?-A66BS-&fo^vwrjbFMag`&zkw%-@$W-g+KJ@ z|I3!n@l~(n%jXROcvM_3T3!+{j}tMA6)}z8FJin$&|sIaF`Ft?%Y<M-rcwh!LNBFc zl~uI#^dh<Qn3;68CR9Q*rlA=(`p3r#%F0~5Jk5!g#%89*=9>nQubGvFiyJLDBfWE= zck$Z#+Q+o88$V6_{YBU#Y1Dp(^PeVDd-aByL+#DmF9IHRM|+K*WJ!LC90<*i3@3mB z9>M(0n=2a|h)Z5u2jpK_S)N{8L^2~Y(`9aEJpZ`-<)pF9{|-z}G<0<2SJx!u<ORhi z(SyV7J$)?5Gy@_@UEfGp*Ci~c>(-NRvug&q<Vf<?DW?=2*iI<(s8Ni)a&1E@(kh4E zzJ0e1)W5K--g*>}(Isse$*JH1Xcsn(7Q#ot!eJQ_%ULYI{@;}8GI~~cI|m1bpKo*= zkX2=KEA-%-k9+RQ)~xrRY-nntvHfytc2-n;Tv&8eSWGlnIASrd1-92VHH=J6FcTtQ zU;Sz7z|DlnOxQpCY4*U6$lsC&`SqJGrpx@8BKGwsdS|8!-)!X3^XDkqnIsP`wwXo> z?Ao0>DBxFbeg^#P^3wF;0t)zFN`U*bte~HgOmz$n!W{#yh)T<Z8%XsJwxxTSIg)iP zh;mp15flI7=E+CjzV(T(+$U|VMlQn8s}6~ovYt{usmn0*%%w(E-+lCa8>oL_S-O6Y z6jG_}lFFgza?UVR2p@UDI2=qs*N^R-4#58ZQ}#HaO*XK!0(s*W6dsY1o?BVn)`y7p zU&@k+u7Y16-_G>+w)b_n_xB*5eciCyJvh)eHaa#tGktY&ftd%KnI(2>TI73xJy03| zMKlliFQvSH`pbF1KjEAyiazfJk_K7>yU(O|1n>_ZfS294d+)QV*~0M5JmAcP$n*1{ zYJM}(`A_cyljJ))K@cYA7lkAygRpUA_*+rjjBOos@Mh;U2%65RgI8|cefC<L6u~KN zgK@3YcCQvR2<DYAV?C{T0_SDund1^!b?d>?ZLI!P<--p@8c|bqXc-4os3)*KLM9RC zjY7{E1fMqwJ19kDJ*DxlY7BOahc=!-aHcr>1O&t;rWBSyoDcH#ivXCsx;VSCGJowF z7%ZR(B|i!P1O~Iq%d7Y9uim+LWo2b<Wf{TzwQGQ}wSUM4e)GYDJBZGC4B$kwXKs}` z|A_y8w4YSl6!1^)f2MZ;Jfh(dNptW1CrJYihyZ>a0X&icd}(P4NPb~{bneRU!1+&> zyM~7=TUv9<E90|s{bLhc14HdR80JnCeWI<Vq47~oGC87pVdL@i+WoUyv=eGHoL8-~ zW1fJXe_&<@pRoSk^STmN5e6PPUWxVZ-lJjZwqE}WifdS@EHRo7<H@b!Drg*j)*whg zKL|cgW4$@0ZU0Tn?{64iB?EI4l0Ave@C=KJ&CDsRsp}md`(lFohUSK(q(uJ!Kd|JK zq_(b(+4Z%lr6sfiER>d)Xlklk5iKVd7p|<Xf^ztnANjM$*{XAxu-{bp2wa~%`%L@K zzl8r6FSnw7Q^qj;<rBT54MO06>Y1$!k4hRipM`aV;lV?vP*ro~w^QD4-3XsU(^E}7 zJrKR76_rG$W-!8|$v%PBG<UqclP<wZPRIC|u1|7V-^QbtWv$~}GS*V2(Ktq(7(PbU zBD{J0x@SZ&`$@HZvNmc?iMsU6q|)APto~J{t#9U>p6^+M09H9?K)Zl`;A!1}Q#d~` zeqH~6#|HchHbfBTU|?Zs=|ZFU2SXp7UR2uD-TgCFGqEr~I5E!8e`3#`J-eBOg_VVi zi&I)!vbn7lO&~jUbx}az%-7$1y<^7?8(V82{3|O<K&L3%GeyqE&oYC5)HzJpKYn!o z$z$fw%_isHXBz4P&tJX%?!yn?egEMBkom`dJ^VO6!&EYl?|no&O7A}lS~oR3li`^% z9#J(@3-jn$4LB^C4ElF!o9^M^s#YNRs>IxUxM@iA>g8+ULNm0n$C{dpXqhUJ;#-E6 z9=?3*7?8VP%38yvSestOkM*?;Ds3EE>%4Ny)IN;ugetc>P2DNcfSy^`J-rRrzofkX z@PoX0$XSEH-7*fmSl=^x0Ve_NIzA_~z4<X-dvyZ-MK5B%z7NL4%+%h=-qXiBA|^g7 zx2&P5e|&r^)uWh4ld-a}>;=s4+_@8$;ggS#x1qTa&@wVJedzEZ0Q}cqf31zhtloP7 zxB~sKu(}FJLkRZz^`D3wrlEw7ARmowKaT34w#od@pD(U1M@L17Ulg^mwtD^j4``>b ziT2Oj{_r2^ef`$0&yof-{Q4SG#xKn*Z6>=%$Ehl-E6mJIWn`t>d%ECae}(O#^6>Oz zb9XmN^3my8zR~d%zaTVv)w8nEGBoDbbZ`i*oLIiQa`)LqJ<lUbWSnP>s#C#PJ^zH# z;fA5Lvd+bU=`}$qJVaH;b-lI7$#}1vxz$_Si2X|nEja(EHiJ#xd7qm5DV#4q*88|N z<CvBwpQbyvw)a=~XcqHWc_V9mWY%;yT0lrhVoG*dMPp~z$jr>At{lbuQQo6~c3^rb zDM@v8)&2YT1MLIsMMZ^yCjs^}8b#pDDM?B3y2d&HJlOpD`r@@~An!rdZ#;YipJ0GM z0OUQu^UGIvo;*cS3JRYITEu|BpTB+k^8NcKuU<WR@#5jL=Vt{@@7%TXZ-4(gR$J@G zAO9Bs9#!lQU%Y(&-TNo6-#}>a3F=5|j~?B8@DL^WyHB58yK@&&S<u&j7u>zOa{D$& zFDBq;;k1iOi#WMBzuEB(2M0&b(7@Kb=db{)bJwnI-PkoWSk=-7N<JYcFZ5ChM6V8B zewGwhBZywjEM&39M|FJCs|Ig9`8Kz9kVBd%VG^mws1r4b)^tdy?pv<zT`F&%d-wiB zLrWjm8O>dy_;W@98qSxT!^%F*ez{%#Oz9q(JETtMQlfmL;KHZveN4-PSJUID2K|V- z>tPitpJ{}EbDj(-RMW`B#Lm&)!y8mR1Z9Oa_3Z=w2<GQd%+C#sk0F@fvwPRBUH0~N zkKVqwvnK)7@7;^gerQ+-fM;-Gtb4G(Z)6xM1An%4v^BT2w0Czb-MrP_(-{#F23i7u z2jIVc_g+?3I_%Zl(geugxQ`C4+ykIKdGqed)vGz#nQ^gEt!*v09^CIA8h{=I>0Ea0 z)YMSBcKhbS$}-UVvp3&$_H;wDX{pKUx9{A4{u0If?81Cce{WBJ-`cHP(~Apml(Mpt zjfamXuFPg;X9NWJ!Iq}hCWyq51m9B=VPT<Y=f%UrT~=P&)_HkwY7&jvr<a#fQj@)X z80i^laDh`xR|h92>N`3LYU)x7iz8CgeWT()6WX|WA|!8VjgY)LQGt{=vT*b6^EcY0 z2rg+GEWJvTT6zq_@JqYgIJ{okK0h#X4UJfX!ZX?V6j-FJL`;KK91;?Xy0#JfmlPGV zKo)uDuM}K3vA(<-Zb#JJ4yn=(sZw|polfAqRedWALTWJPBpkuYisA;{bZAm)dU07* zOX~o0tRB@i0ApSz&jaSIZHUWv?zXhIaUI|Ss)sG6rug-TkA;LTu(PwVva;&y>w!Ez zbLJEq8|&dChl7Fx;S-iGcedYt_M)h`@Z8xmd-ftb?%T(1Wko>Y3j7QUW@dQ69=Ou3 z-MbDQI+&i8d=N<vxOXqu-o5Pm*r9zB6C;rKdb&FM_V0sg(Dj@;#a~ohaPQf3Xu-qX zjg5^B{$+1ZI(JS0w3d#Jc1TDNw8p~10$0FH@V8&zKyU5-0|f;+7G{C=Sy@^3Gv6@= z*O<S4y?=aEPEKYo^E~1I;^JbRy*)i+W1#)A$|^tv2E`|#LzI>-uErz>teJ%(&X85z zjUHQb<KfHpvE_pb_JTNHJ?}a>oAitL;DXjGwf!q)9Sb+^eSWz`P1~7`UyW0pDrFg= z?ULL-wX%)aKVK5E8g@z9eXT&=We{>$m3l~p!lO*)QF1<@;BZLU**>-@zIw(zGE>tK zsNTuWgW(++8<&}rTTu<cEIMupGGHsu1LhYuZU6_KK7A4(2lT6|rUICURggL$=`}Ug zuRnTvhPet*Kfrwe764BD!$WoTwdg8SChb9;h~nZ5SOvnjwzh)S(2ZYz^9@80rKLpx zb*PA-5)iv*Hz*ukT^&<XlN~!ZYr;lokei3Qqo?cf>$f4H!LSh;KX8BxD!};|8X1_I znSS-Rzaf>;vIh}qDQU@4QWD>M^UYpn<8UDK=;`UH!0N)ng0S!AdG0~lk&qN`8yL#3 zs!7f-3Qtb+iHxNNgpg1rZ;2@Rlj_!ddO>Ba6Hi{h^NcBDm#|cK$_FtoXc%hlonJq= zTG6%GJiNN~EtD$@t49QJyM+w-bi5R7W6T-Z51zi*2JN3LuU@}CrAGNm+K$UShKEV- zJPM8n<n6iSNc*I%jc6$mWfO#`W_2@rT>>(I1J4l{EGe(Bw7#)xco?Yuv!4Hmd7#0$ z^>q<pp<T=Z8h+*8eF1^fs0&~)nt*w972t$I8)$ZPX8PhqF%<C1N($cIUM#FEusa08 zpukH?iU4@1BM6*1jnUE|T3J948Xgt`R1ce>HEAh{tju%<!;@JP9D&8g#!6H~7{T_= zo%(vZ&)<Cr4Go4>=5hC+h{fYgK(s3;$oqq^2@O6ga0YRcT|42lYa41UEeXKzumu?2 z-Gd$+5(L*vBH5tlDK9SzS_u9H6~HO{LqoDFs$w&<17Z{D!QqY!Kj<8dZ0v#L6>!F1 zE4$*|GiI+ngy>ZS=YCRy4oY6dF^^xHkx)9+Ft}RQxo~yk=eg!Ryc5}Z6<B4gh4I1i z*3n*Z)!U%`^TokEX}7T1Ir|iTeLqean*(yB19El%doVstH)(nSzjK}-!54$7dQTrv z^&nO=OUrB8E)PtOPa@i8^eFF<&zQe)128WpCh`&U62F3Z*h?FW=^q>E?CU|f7bH!4 zZ_m^3z6Z2J9q0>M+gr7?G+<v?J%8>jX!~dHzK1v$r2fYJhfob2EYij4z~$)a;$RtS z0(#$k_#rqr5G8z2ICr1DKrzph=(}WPr0W~&2gXLASGxJ^<@%$?OSkU0y3v?izw6}5 z6RY>`=N06!va!NCc-Fwg7^pHZ5fLGzZ{pbB(%BAvq^zuno|_TgB(0<z<R2qE+Qm1} z*3HYz$pw@=#t4+W4Hw2QrM&my%eM)I9dMtO?K1Vf>jVu#<t!s0dadeNs_S2Q^WD#1 zjC}Uu+cOu9SOl~WV%()I!{u$Fn|o)rLHj4mm8;i|O54afCL0HqA67z09$|YW=YuLP zAaxGt2kz0J{|)0Uu4kibZUI#9>>KD46&IVClUG&U*x7}|dXwW@^*msHc70t)@B)f? z85t?i#-A_`=v{yG1X*Qj2@v++Aj`M!#3#g|LIr3OWQ>c8GcVs!RAxj)MWVX{ErU3@ z_u|{-8#h*OZd|*4dwyjZJv6k8#bUmF|3O$t5L$*+bu|@mjjXJUPeeVvFb`2Js$W1F zjm*p}Tt{Ts(BuS(WDC8_!9xd;U%xVg*5k*I&8#eE!0sP0-_hGWwX_66?#R(2a55Ym z`w0YdvWv60_(jxBbPe=kv-2Tzh2GH4!yE1uLh^VsIUSSTDl{{XjIpIVS8qO%H1s~E zO4IYIRdvcgj`4DjZ$yVEXV&h2@%z@4j0)D1YAiA~Cv+JSgfJ8LjC+rsZR7S&mgv-4 zhlrZOuB)1)XfAmNfI397ht*w?F&qs#`#Id+bGj@lR24%rV_Q2S&7I;O4BdBPPJTgk zEhwD931$rYNzdQBHF<UM#EIi56fqbrpwO+DzxMFSr&S=yt2b`}?0x-wPzDCz??8nA zj$O=%654d9yQ08mV`nQZD_*~Ick$Xf*viJn?9w87XcY4Ra!@|InJvTW<2UbfbF-kQ zfC>Q7;i-vmCg!0{a7kcna9B9BhK3q)yq!Cs8aMX=#77X!^8w~FKVqKeAWvs+_w33_ zLsJ7VJVe=_&l3p{kd8)62OE-<Mh^;e@ba~yy5a4da0H^do&lGt(|M!d>du)*-@d(6 z+__)UO4&Xegw1KJp8_$mqU&mP?-FR6ckh33E*EV*D(e}{epSk8Js&Z%5ZB1EZP5P7 z^6tYA$u*PqiI<h!au2Dw9#*3P+IcnI`LsOvvEB#72`pzYy9JF-suDrfLr`Yt&TtQj zfKEK6u(+!Aa(iFjR?M&5y;oIRjSkTxTdb|t9z6Vv`7PQ8l{$+zZ+^l&0EwNQ9a=-} z=+PrbjvkJP4CgzB2pB+PR(9s<?b{1$tH?|E8#gBBu6)M4Hs<kn2<Fk2VfDeQ@3M0~ zV;+1H<Tr|WF)`7(YuBchmveHnSy>TO1KML^qf^t8p+2;L@_c4?2HO^%hb;{)jU1dD zaB0vZFi#J$!^~fN>zf#RN5zwU1CjAbCl>=EN!tW3g0c9Y%5+LtF);k~2QQS(1Nqh6 zK-DPNW*k&=qbD>rq9nim;8z|{NqGw^zv>=oB99hb*f>y<5Z>B9yN%mFSyF1pj@l&e zCnj_11@dXo0qw^yUMI8}Cvkp!sx+2!I=h68_KKM*>f#NEwghLYqnEE&cywfHMs8(w zd1DhieH8Pyw$}IGyf-k^hvmJ<<OwVpnd!jf0P@|NJii6=I}zFk%+IW@#3#lh<N14b zgQ{=sXoH?&WO@pH%WHOd`P51NT}bo0EG;eWy?niR?b_Ar*XLJP;J?R?ZR&X}R{Q1q z58~oth_u<YOIB9;!Rzl5FD0N~6F`^@G6sTB6!W5@!r*8SZab5mP%Q*u#ogE6W#=Mu z)WDWaZIhX{=@C5K-Gc+OtE)p(li+1g0mM>Cb#+y1TV_ewrQCuDWUM9zdO#oq3o;c{ zjgGm6vaZn%WvZxgVDsSOgBNd-N;*M0sygQBcvK(N@KmymEohkqk_UVF=DS~cI^TZ# zivOa~-g7$pRa}l^JpuL>UYXmV{gY*S?atqgBfnB~*{MY3#V}4_y-#3$PU`rc!ug%i z3*@|LwoBM}$3;s%Srcs&QzIKYB9-pq7vviqmzZ0SRa$<8iFrU1NL+@u7dtx}03L7% z>KdR0P}!nwe!}x8<^kbI49k50X=KlCkR_m;0djSXbsfE3&~bqLfYyLrIXO84g96&S zJ8J8yudFOzy|HoT^eJRqojIN}Ix}ryVGhlL*x=#e8J?W5v?QRA2DuK$4G8f28Ri|G z?9sRbeB{o{H|`#E^t~a#Je)&eNg?n#92Z0vo$lH<Iy`^<8tjW6LPJx%pr$UPv?3@r zp62664~c*n---gN#!=UT2%+m)O(Kg5Js`Or7=Hf7BWYtFQ1S*o^%7=ryjq@q>FvmH zO~=B>)jPlW=A?;*H~TTAy)rfjG+fW<`N&&_U%T~S8@GS5XuG7aoHN`lWU^1n<`mBN zw5~tc8NGmWh9P{)E_=`8Sfz;U(zfSROmOA|Q+p>{H!oUXC@7rx>|E&Fk?}f~z5DjD zqr-pzQNVD1K^|yeK<`$}Z^p1&Fh6z$(baNtGT-d@28I1m-XkYZo<P52f)Kc*yaZZC zzheS#0xA{}5dy$3-MRxIB#3Gj=I|r<8qJjo+8Z5&KYZj6CnpE9gV^cg>%IQ)kzauC z7R;}KGlFp1&5VwrO%VAY$<QDdFh9Al&^J1Q=1@bz>pgo;o#Kbf0lz>4a5xzVdr$D6 zfW1zhI!O%-wRZIY?Wa$)1!hptG5SV{qU{hnIJfcW)pwq;6&%twst!5Y^s2*Z?pSJC zN&EaqlK<_WMCBCJvmH~~DPpoum2yOjE}-ulnp(dN+CN#wudH%QS^r(aW`~3ghpYpj z9eukPFn``CluMkjTf~H0$%$LhNl3#2XO0Mak~_mAEGj52k%xyHeYI>C^2H4@rXeXQ zURYd+zK#f=0ARrL*I$3FrmlMZ@l#~=o6S{FKIr$keIrB4DoXqI?Sn>kFbmX?k&&9e zb`3ltAkYt}n@Pz#zy9WH5Ey{TD{E^RSsCDQJDGY0>W72|UrIu9q9KjI-_D&J931-k zdQ(>yLBIp@(Pjk&&qJFN^K(;IuPP}ipnU;shwuST9u|<n$?-Ah2p}E-Uxxp~`NL)Q zkB<&a&)`k*sAug&3LIqzh#w-=4OAZ@<MHOox`z8zocVNoE7~USKYyK5J9I?Z5&7bk zQF{vOD{L5$P&!;cuv*%&IJ9v4_dJ~sA3mIwGG#rj$*SPMrA9rj?I~vwcJ1~y)&JRI z?ils8xb*=w`W_j3ZY7GKark+ounWfFLZ;DY_54`{4LIfOPH4Fu)o>HV5cMpqAX0Pi z@`HHRh+wX*t7Bkn1Yq#+bgylwnZI#kYI*bhc>pKF+tb350LljhHn;%jA7s@6Sp^9( z_VE?X`5PN$<)sb|c2-172!|oCt#7Iyn?v5|1&FqFbx>&(_?MNHMR0J?DD&lZAl{bt zHU~#LD=SNgCK{R>pyO!o=>nl{X=Ono*;dz9EnL4bHg{!maiOxN67~iC6Brx-f(kw8 z<l-WnvxNo0(b2xMzt_*-2N{GUI}J{ZGY59o(lgQk`5=>k;oAoXYuYYXv|f%$OV>3t z(9qUa)znbOVoj`V2xO|Ejh(icg_@qhA$8|}Q=qv<lmOtTR_}`GdmU1AhR{{cI`yDB z-6guNVQ8(Y`)Y0f@~iKD@5^ImL_c;ug*{SM94ZtZb=PB9ulU@SZP5P7vVQ&cF;xne z8vU3yW1qZ}fL@>wK2pdeQV1U<Y94=7(P^(Jo>!fAUXO7a>mh?98dzC_e05~_F(PB5 zGP2W4D@vPN+lNPnXJ^Oee)6_0;2by;AdUQxWOinDZEbPm#^S~X#FeA7(_3po1*i!q zMZeG8`uZ-Q6lsKMTVH5Lx1+nD^%3_$Yl7=Bu|4|Pt>CyfW>;6Xz5@&g`ndZ7v<xQ( zTR#0@1hg6Y3Vq+?RCQZhL3LevQE5U>URX+oPfUVaaD<~zAd%*7LUPnGx70K+Jf~&z zFY+`sVp!wg@~x+@>;v*RWo*?PbFuWw!|Lwpj)^&qGqwFIrS0=mtM`8Y(}_taWaCrZ zEsEc#Og^CE!mH(O>YBU_+CN{ii(2<7kVWvJCv?2I6kSA2qfrwxj}tZyXA?H!khMLB z^OC>^3hMjF8<31`Nle)LdxS=YBqpPS_O+MWk-0(s;YZ@X_|xgRE5G@JyI=YJ>FDpP z`LhbYw<hzoqQR-DmY&{<=GOe`+O(pQxXhf;#AMH~C}g09`Slat&Pmt8TGPNtT-*G= zE6_v@d@^fC=Qke5=C^Yz*o&J)8u~Vz(+%LqGQ#ruK-H9WE;f&>z5nnB=6Gpr?Pudx z+bv|ks_3|1*_ltv{gk$Q|JeLCX#adMc8ocW^Hm_lA5ftk!!X1x5=G79M9kwQh$(zZ z&U*w64yn3Gng%LbM@pOeYvPF}Bzr3=-PtDqg?(~<QEp{5@L~7x@UQ=p?svMw*e^~v zaOLOi%>71dzv5z}Uup{dQP{xbL|b29MN@NLRZVJ9No;0LNMee21Tr$YIZ$Koq-SLV zajOj0<X@Ft4{OjP^EyYa-tC{gaZZQMr$PZ$BW-z!Ly>I4$gk=_;@0xc#f^u5?1izb zE7!TsYVWzA$D-iCuIO|~oyMa|^@}OmM(m$2H*Ve3y#;VvO|~s8vtv7km?@SiW@ct) ziXAgEGcz-D95XYs9Wyht6I0C0@RZz{d2^?}_t%4a>#vbiI_hp6X|K~~ckf<%cQ@+H z&F5OV#K!Oyx-&;vKJhh5>{vRzG{|q6`o`3l<B+ylrTOGCgY0fh1T^jFS5IGB6WDrg zb0kW_TA(=4Rg%VOQl1{u5>~Vxz=wP|ac7p!UW0B9bW99Xy;sHD2M5zz+o;kt?5RqX z;KKD8YXeZ@u~8W2e33nnYyj;P;I10f8wfuZcqr$z;ArR0YqOR!*a&<q#)r1z8)gfW zN;!A!$YV)ix9v<1Ryye;11o)wQ`V)Z*qR~feIM2@Z_6(cfi}%5<m!RGR(-Fe1*LZU zW8~dyUDvs8Sk<#`0rC__%66lMW*PMn#x+j@ijrT#u^tbbNpPlS8;*PG$BrW{#i#lq z&IH79v0NxvnYcQd%c~13JM$oP1IQr*b$v#1zwHk{JnRq}bm10!s~;!Gi2J!{QmW^D zD;KGAdKMDe&|FDaRt}EGK}5;}WJP&S|A7e^KhP6otiK=HE@B+1I)H6o#(An_2i5H& zYeT*6Eg3=1k-w9F7@H?SvG=(!EEV#puwn6iIiR;mPV?#Y&M4|3?0BrofUB|{WNxLj z_iDR&$dVR8NbWG<&{pYmEQ=oNC<=a{q@NgW)SJhQ1m{4@?0*+Fv+zq1wEj<<CBsj8 z3_>AU4la%e<Rf3tTpyHZydooG1OECNpb~z&=Q?=rv;GfaC8qdGIKPi&>Kb%+H-v$F zcc__fGs+x3p#?q+c=tTudr~*k#%*GvI9zS~lfJ%U#GenkJoA1W5TY1<1H^Md;H{<Z z0B}B%M17dh?ZVlm5>a(mb{m`<7+E**mT&@5JhV7Lzue|OD%<gdUD<_IRvmY$&o-^` zc3k`z_t^>Yc6UW`)_&0X5x8#e>|uDqj*kDuP&@?f<eO!e@*8FnCSB*eL*zy4oIbjm zGu%GJMR}r1c)3+@y-aJnP$gi%ASU~j5FOJ?Jzhhal(hDPoLmp`@ik2thGF_wu7E=V zgmAWNMM*(t+E_ygF9RVHGV<q~UnM#MKbHv7q>Ya!;~waOdLa^EJA}Bwud}qY(=_EF zy)ZOkH4$m((XKP1GRXX-SqM3{V1;RjQAIKd+c<B0hFT61e$vTl!UV?*S@Yx{JANFU zx+&jNap5aIHQv@yoMMD8{ZwR4MBZrlbMGtPBYXS|<<f)79h~V|Y~wPiL7P-^-zSqd zmv<4plDCstU2DNdIx6nL#?s`&DU<^UJXeTj%~!?EBg;mk@r=9TY2nciWQkh0u-IQ0 zuNp5$-%prcNSW1MjDA;VKR~q_%Or1fq^q?74gr++`43<Mzbvn<tgT0hf89itns^y3 zxSzCmJ#3yTa%2Baz!egV1M=l|?5oZLI>`2R#0reNNkLvcfo>s8<C8<ZQ(=Z$s9-X4 zY@AVpsN0gZ*D|~>gA9%Lm(TO7a!eM`F(pc}=B>Dotf5%Jb|vyoroVo%`~=(r{> z!A+4p$(D>3$7!w^P+4%D@;#rVRn}b{wLsM)>p4t6<92hS-g4!KZfY{~m73mN58syc z`xu3dme*a6MA6~S6BXgf=e#?BCg<FRxZFLlPsF>58CtHBPz%ktpUoXf7=6mSY|Rw9 zY>*D~g<%q7?Cy=*mP10ki;)3*b)aYhz;+ckenhh$>;`c9+)V27-L?fR>)@4Twq1HS zKQLEObJ9YZhVa5ZqZHg_UQ)uU{;aj{iB}ZJ_uE%Y6Ji#oTwj~+9~wZ!`e<NC6u`1v zv3FZlYsj;IKX(|G^($)aal4N5+Q7W>al?r*Ipubgi(-{5c<*-MwhGu&=<@5=C|eYb zmwRDWCb;?&;%GHN(tf9J!fZA15;SC>@eUd*v(87nv{Vg^uH|JOb%j}hJK03V2B=^Y zuAEw>zI!T(vZ{jePOy3@buUsquvARtnkPGt&p*0a??o|JVUmS0kxJM}P7_=(x*w>x z(8-tf$vJmMmJ1y;nN1Sv6B*wlNZU}faVKcANWUBZp1x|DfdcHT%Wq!sa8-g3!a`yA z{%)87`wI}=euN6N^P)LjQ$XMW0Rcf;+D71}7<u&T?C~rb={I+5$P;4XagfK?F>#KK z|JrYgE3k~bL#(eEI*9WLa)|+odt+m`J3Wx4Zi<;Xahl=Dg><n4j`oo@?1Hp6;Ht@@ z&16#l*Wmmb|BUz-lG<U8@E=0YOq^xz$)&AT$xgK88*Yzg<$HT4Pj)HcP8T1!_niRG zPl%G=$yOOe8kG+5z*8aiYmSRpmhV-T9?+JeDQYqoRw%;XBxf#dK427kk%EmTQ}5em zbxN!vznCO;H`|*F8%bYOBQUsk`Cr+|$dQI{$Ffn%2H7u^aL+V)^pE$;fIlnYa*-`i za_#lhsYjk@vP7Og3Tps8k-Pcttp)TpU@sGLXX|`L>e4cGmS+~0=GR%6*#Y%k9E}b} z*O+rdd(w+t9PwB1Pu_u?4z$Z9!y7dUww$4i{C4OI3jvODG}L}-=zR#yMJ|Tr8uHB% zI>>@&%r5>+2iP9QeddLQ7OvVE*KDLK6x6*VpoLf*U0t`GoyW7AbMC(26Yz+B4OO`3 zg)%{zm0xUzzNTRWK7f?1lbzk|=`<iPQIX5OxxVzo?~2pc7U;w3`}huh0kxExZ2ner z>gh4}p^LE3It)kmqo(c|2R>CuEE0kD(!f4nNvv1h0Yd|C$qDT7?$>BD_mbEM-}5-R z!RG+R{B`}p{cEi4Wvg!Ej#)PQqd{0h0(2f?d<Uua4yIU=un9m9B=A9aKu{bjA?|CU zYN~=F^hisj#FLem7k>+~ptaHK=0}k?#f5-L9rf|?+yD_%;@;VWDbT|12)=nLO^x5} z;7ct8><Z8@22J+;0q_#iw^88^NKyIsFaho`HVym->kq^oxD?xwh~}|x8QdrJjUirY zBAOT7*DC=-nT@FF%DgP;elF{7nt-<U1W3Xjj?&7VOHY|id$Ylwo~V#66Xw^I8;ug| zB-h+cH*ANAx|QyS?CtOHOVMT3S;{6n6+uhbW7=S2H4&dmu+dkmpNo=R&NiNeQ-VRe zwZyNYS^jKJBo9&6M905U&V*MP?k|<iKEZc>hx@JH4TB|W-Rsuf_(kAoDBVOjxHTs& zISgAD8g?`M5SqyE81>-e2e2t{U$Bx-hWi468A=K&${MO0bD9n<9VJCY2Da5MmIzC( zuEvkVB0eK$zTRISIo5lN=8*C<I-5j<0^-urzL!vRa|}7@-V~l%hkn{3=RdX;K5sTG zw2l#%fDf1RR}#$k;D^1IL=Fg`4{+GXcWG~Ft?VREZWc=F6!If7z%blaO;qCJ?Qs6~ zQOQni_@bN@VjmP?pjSQ2C7~grV1OIGS_#+^n31N=qnm#Qn|}e5sCTbu@S-ou-k@V& zC!>d|@ih$AQ8cvFwi3GQ6<eKkeEUYuX2S<`&o5Gq@hH-SUUHypIWKN5jf<Ec5;-rC zrPx-@IsU+k!gwxAbJlisjgI`<clZ(-DX9YCpu!^rYZ)buXe>($ME4c+6aPhs()|Ev z3{7;*{^CpEi#+G8CxcH<ADHkSAD@P&1LP(4UD~){nBoKIo`dsWVs_4d{pyDZo{E=i z0N@bEjFl#z0IjPeP$`s=U1(2b0oo(fZwb}m(tydKM`>moa*Mh9y&2e$eE!!E(Cmwr zgot;;VD)LyX$8q~HQ|-pb~O8Mm0JS^(kjFv8INfBDv0lC)<|3nuE>`na3FCX7QQXM z2S`Bpx`dU`S!Wn@iZs$Y-%PODzbd8pO>;;!&E;RB=z5SoL0Yq@YCE~840^toeAlfV z@!9U<=n{>-z2+<ndBw$J046Hcox6$6rxZt=m6=3a==AJ!8mn{XXbeNjA!vTyWj16- zUSe)eAS(ki1q&q&YBy3k47ddH63_!sfPDU$nI!;zuF=K!+4}I*KNi&!tbS^@)0^K3 zS4~I9!p6qI#l^rxRY;7CoU~YHdQEYpkE0%<!YDDZ2m?cBgeG)!Ou#78#}Ygc%h$Qo z=bWq_bx4`{bAOtKp0xD*&HDn!F^>5XAJzPKx;%4uKd4_K%-gCQ<tod*gxHO)N~Mb) zxhh{-xcTMBHAOjvRnl3eiGXb~xV?Y^J)iSPy0l(xsIF8OPdb=TB}2gL+?mrLQlEA+ z4XG0tDw7Ur`_t>~^|bgfPz+=_;?u+)x9~1$JO}5ZRyvK;O@q@>6ePq5d?BCB^!H)? z2;(H19PXSi5dk7#O94YDwy>s<dokl<!J!|{k@%rrVdmePlT?*Lqq*ui;R9QmD|4DV zYXZ%N?6n{qkssz8e9zay%>f+eZt4o}90|gFuZ}fCzxFE4@CxxT&#;iA<tp3qd{`fa zd+3MhpW9fT8bFLvVYz;fZtJQobNQz1xLPHRJ_u9FG4Mpn=I=XMf}1?>M4;e2?MN{L z#IzmZI#Y6tBNb(eUy)RJzsbAF>U=`5U$#v9CNUZyCMQIzBC>zemxy3MVFnt>-Vqz5 zg5gMyS+uB7A}d9cvkNFw6(6Ce&9-{KaUgq-9=MzDh9+#1I<<E&P5@%7Ow8V!FVBI6 z*;;Ax9P_Y#Kf}N~#m4!CioOq8I3qI_@)?O1pal>|Ie_9%0I2GWP(gwM(|j1!f}#L6 z=V-A9$i%(0)x>?HQ?OfjIM)af-~vz~zOVh?N0I#DEmV=%IS-r-;sd~@F~or88L8$t zSi@Ve?dh^@iN(j1j^0*C3ZBRm)y0)2l?I;k;c?%~4(&hgM--^S;Rg0JT<NznXDR{C zlh#$sP~La3yWiYajjh*cy*dQfpUuE)5ZA7vN{WrWNYbW<oG97;P{<wmQUtzjF+<6} z57@5LVcw5zsJR`sCeQ*eVWRhGGjpY>F`UZ8$<a5=D>lg1GmT_vnro_s8cO4g2lE!j z!B@j<UIYz>nfYkw6A@{VF`&i}7z%Zyxr?*Ao27s~V8i??->BKasNcP!BWD2Svl*{( zw54MMJi#+Tvsip<?ymjr(W3Ri<X!B+a*Rx)khvD-1V@J;RT!Vuqe1S}8uDpZ77!8Z zUbZAEX4nu#v;I!P=EM9QXTET9U2Fb{y0_YVl+47ozg=vYi=LqXAE2|;B#grjhLdZy zG3&vndafR!AN_iVbv!=rNMM6t;aWGvDV8~6*12pZJK1cuY%cTC4gITg_>akhW|%@| zYD;eQjwb|VYo|#?NAH+4i_G|%JnjW~nMwK8IXr(&`DtVh@T-90#@uGVJ+c<2V;)aD zZ0{{_fI9Mqul8cMyf4Fj4<C*t44WS(C_1WE4&aE_y+0KInDCp)k%1-koxdP~0TJvx zRJg+RKga!+2qlxIYB<X!9x~!rKe}ClMn2jyk8WN%_O{bIBkOzHq%?{o#V%l0(8VrL z8O*Y7WFMW_I5N~$V@Ix6B(5_S4mS#J+E$4v-(pf`RyGbYZ+jX!&URQcEed@*`641; zBoH9#d4L2z)xqlN0<VlvnG8}sl_K}fP(JY=N>{Ag;A+twsf7-m)SOEyu@(&LM<v-k zrt?1$e76y{_f`9F{_eiX(GF@CDZ9bQ9Skx903s6UeVHBM4#;Zzdsim{1ag_Wb+UPn zp`O4e#=j0_&wtP1uRQv)vIYBj%LvN=75PN6bwE&!3(;w@3eB|5@M|+DMgZ+=jKwq! zXq%4^jKP#M_W_l6hwlFFs*R&FSEcCPkEh8*_|unj_7VkT%)2avC>Cz}M1a?gS=Cj9 zH=UeFY1+ahM@;6)fh0r?sKl_eU^~hdJo)_4p9X-d?pw7O9F;*$ZxzZT5eHfh4IX<> zvm?m9MuX8ibN7_m!y<o1t_rWKdduhC_s53Sg_7(-PF}=Q^Pu+ot$P5-7io2J;VkQ` zaSlF3vL{OmX+r6{>@RJAQ!CA1E#v1SfPX-p-T5wI@_i^tC_Z3cjS1fL^XQbC)}*|^ zqB5G)&Z48CLg8~8o?BBQO<5vKME!eu?qoET1#Z?>M=s_%5sWh*wKPw#{UV{N_(pN= zY<^=^+fH4)a=A&k$s>rSieFl^Cu){>njJT`PcH*+L{QuHgK}AzbJv*6a{J5^@&>rR z0J&d}wIduKm7SC7BnH8c*UP{jC~ud^=1r;fjbW1T>||>=oO)+1?go=jleM!Vs6HZ$ z9XLjnFkOv&u0u>_${jjXr__CqZWnC+IFMZrR>oPl$L;{{TltaQzo4NrDT;R*gC!sa zehYX`caBzEXkZN1)*Zbv@M$W5pG(sH(@bk1{K_p1OEFhcu}_NKvwCK;(wt>V=r3&v zqAIxMKs=pwUXDi3LADz8nZCR1T+~o(8p*IiDTd;^4#r#06$JDU;ivP%{O}si!YnYb zO_$w;RhLyE=C|FE5F`Q&RT~xUEXq!9Bv;$;Vg(0D&akU(`P}o;meJLGi#F9G*9E|3 z62AR}852861$%xNQR_$HvvU7|>6{>qD2UOE{+FyUnMOkNz0Ur^NNyo>qAi`3K*Aik zxV~kHBjg>&b2!c+ctEN9z#;Si4)g%PSVqXVJ`CeB%u<1kUSXb1VS(lIT>t0~SWG9& zNdtVhhGI?12|T#y@|niAK5A!p>@%LKL3@lQv8h<8ffdkhv~}feH8!2qZjnPn$USc7 zrPCy1`e28Yh%KInr%$opvE@V*+f{VGC~xzBArm|sI%^JT`t{P~?)G%sRvFiSdD{|y z8+0G9H#nrO5+WtWkoAO~8B^Lesz%~B2exzMI#-+!K_X3rNX1?Q8fOE=6dMu1mNi#c zr+GU+PxGQC+NEu&7VoF$2+7PkOAN4+m2hb*)<(rOAhenOpvymV`n>p3ADsb6b`~8x zNn|}C2@X$XJt_XQ8w$M}%L%$<_WVKMgGLE82B=N?=*;>YWxej|5npwpzUq9vp}uXV zRv2WY=j`Zovdt;ANdBNrWO3*t-2c$rn!!v6wy#}bnDM>rbmG%KY)vW1sPE~5>_c#6 z4tJJkVLG}4t=_Ds(P4wx-94~ANYg9crDN~*(N=#?gTw^YLwPhnW??%!ysW;TG?o+K zQK&+dzz9{pb{D2m9g2!r7LQINw4OV1c@&ZXjd6uD;2DoI*5NRBY*0Ym(|<0Y|H%(C z?UViio#B=X^gd1Ykr((+H|5@wYMdyI<shL6y>IpJ%h>%?<{YbH3AZv|@;RUEjGTEz z8c``V_wDpYNOGkd)%7&au5Su+cC)WPMQY92IgJpC6xFHO7(G#wyo3l%D9AI;IIo#o z>l^F2Ut7lX?LF^(;rB3F)VEvp^m@B&c$#dS?$m1Rd40Lt)cMl3%G?e;1QSOPyKHo- zL1KgJf)Fu=;iQ&CtA_zI0!CkTqyk?kLXk|5$X6-29b0XfP`{(#7|VRya4m@>X)#ub zPigs4j^fNArh>@TSpQg`<igM2eS?hQG(?#q6E6B`_jozq>Q4NFi+24Ig?X#gl3`3W zLt7|AQSk-JJ0=EnuJ;hFG%TMe3rcgH+H-seTiPdifpT%kj*6o<*||UMSX8k`h4Jqw z*|ERqA1bi8e8%?Ja<&r;tkg)h&@_;ocNJOGhw{JW$iAy7bJ-xG_|*i~rPV3F(ed^= zas$KSHR}7^le~P;1GZnxNn<~P&BJGnj#HF7${-_0A?T0MAU#=z0gKUHB|#BK_6h#z zkwC$ACzj|G;(2uSovtQWuJJXRy;_*6rwYHMfvvlPsVKaft+|2eN?}m`9L%Trdotyq z!i5kMX3_onzTKulG)L%sR(5F2L(V}7?59hJ0xAxCrM!2)U`EtQd$+H#8p55wkTD=2 zx-sT{r_Y-CJdG@W4;rv9$bik2rQ&0l-fsu3PHKBKy{I*f+C&{b-{1Ei`L$$#TpcPr zLz<%e8K+_6qS_BLpW_A_I@r;MVbHW7ZC@{0&48B;>y%*6AQ!Pa>_ZAUa)QWBfNb%M z>(23as9Px%q@iVeMQ6?&0iKA02yA-8b8omne)LgY6@Bv5aFx&xglf=dbjAl>VEgZ+ z5n=b^n)vb$JT=Epz$fGuE<XgThhLRXJC`XsjbO4Mz_j$!9(7mjfhG-<|H`z)R7a1W zh4)f2d*aTdfpy*4O`C2A0b#%?TA^Lty-UQb+L+MtSz>j5|FVuuZz6f)$}d_ZW6RN3 zwEzD3zMdPYwC$?6_3PX%22)sa6Ugfua9;xUjC;>|z|aPeUkPV5tNomk4}0;!xCT>( z2+HXt=o&om8a(LQe+%_<e`N2Lj=aFYWxT&kdb&X+MgiKsLd8$*ifu(|lLT*VHrPIN zM`mtwYfmgWyE=J@JG{l_;35ClossGM%ebXnzop#o^@m|Aodp*KZF1V35BKgn&danu ziC1rnF?_Gh7v~~_J5Ih{BAASu@M;RHnwG=JJUPR2Me^f>3QO#va;p&<NdpDKJjI6t zF?z6vA>yfn0pJQQ6btqh-3)nvDU!zx@tA6}4X0{2ufUH?{JyV(0g9!DDWGjVeFW%I z<$AAtKLiQ+ja3}F<BcLSz3-1r2=m)THuXI!P>BgKbZP0I=MYg&Ahy7gumVkM8)YMS zfic$pePvTd0!*r)t1JO=j0KlOz%FY5_TjKadNd98twe_<zOr9l0hO8<JT^>LHzQ)n zEK>1Qb5h9~W#tqm-t;*D>&oIObLN6C=te{uhS$rLh1-pV=etsKk{L{5iRK7e;8;Tk zWK;l}mBGz=9X^q{f2~J6<p5xxA;|0F#SP>)PkeK@;CTJ5I2pJSaq`oQ=GGdS<JH}7 zBsYXZS{*O>KF+Amq$lvmOu0W`l$p=@4i4uIFAj1A<OS-NpjWr-l|=n$X|zUv#lum2 z&cdRwKp~WYO<+YzA`Be;B0{-5V9KuT59fLYzPcsZbYJLOemd0Sm-^%TQ2WQO{7>q_ zP}5vUM_6<wD4&9nK75CfQWw>w7(TUQX>sa}SUj_$x;?q{DQtgx?Hvf%&x>eU6<r+V z!cZ(55OQz_+kZ7nftXnhsB6oGfBY~SJX1QzXij#SVUo=SfG8q2O&hlnrLJ=(DzIm+ zFlS~gqM|S8CoWte%m&CwEmXrLF<x^-<Xn|1x+V^-jnJ-{*Y~QQnp}GtQiB$9tos1R zg;{0w=5N#X{5JeXx7n`I`Oxg-%F*EtNq>f?V6>-Ee3YK^BNk|bN@IpX3tBSW_yf7+ zfzR}ij@ssaX7oO`m5VQPN;^*obr^@d+!CpNs|FTAm(<*IMnrO3ffWZDUQFpM?Excw zKDoKLp2AD_Oam8)Y|ElDXnWpN+356m+xj)S8};-Pelf${<jVZfz<!lmpZ}ZTFRnqV z4}yjlVDX}z`i}@GT5^Q=cknCMS6GWzSZrnx!KG<@S$I<K#}H+UhiI*A4zLK)+gt7K zwi`ch(!PCp3Lp?b@ruFegUYx)<<UQD+O%JzT{UX6OgX95Y8(YA&7TBa%G>+i$0cVt zBWptkM|&eZtKXk&^v&TI>Dhtwz~7&k*x2Y9|B=lA+EV#XHa9nrPTI)Y#L*PU$iNE9 zR0Pr~yV)86>D1)(&5aBkfpkhv`i_5cB&_sIKn1F1hQGhf!V07lH!?FZb%bL8(h1pE z+Sn`E>KPaTfB!V-SCnM*94tUzWBZS!pqZnCypg?-jg_s9wUM<WkR3=TYG&zZWDh!8 z>Ny$-8yVOb8o}}M!u<_`>xxSJz8sD`TK}7{RmJBDt;WU%A!W3?jn&#aXay8V7$TzZ zY@+Z`a#(VYGt%Dq5Q&7_ox5Gqvkebf`*7en5o|jaxQ_x7f4e1rA~TUT3z1aL5F`1a z6KZKYwicL>Y7e~%O=?rcl8e<6(n)g+{{5ls^FFPh4BLeTQcYvZb;q}72VX@t@gRh0 z9MOX*1<@<^IwSvi*5T9CtfTbZ>*uU9A!-C5X2{eU3bhTw;5Q((TpT1*B09Shx`d7p zlZe_aw70BudWt+olc|&<w9deXW81*Xpn0CtR{y4&u68%znqq}yj4)$^>z#0IQ5<DM z?MvOd(E3K4b_s|DZc+Y<6H~7`$eAE&v!Jbcguu2hdK5HlOurh1QS@uTsIj%`BnvlW zaqoIr%R7W?>k@jsGj@>R7Mkc)mjpQ+y0=E2<YV+Fq8ef5tjK<)3t`GEX)?uMhKH<4 z6Ztk^%d66?3ByLUEtag`#&;cjpS3>P!Z;$iE<kKmeI)q!HVb-mP4<MC_=W9%_o#+A z9>yfMoY;j|?WVCs10Vo=v2`iZI-xc`3_EhiMUyIaJCh_sqVOXllOcyc1SseLf82!d zbRvfyUqj+qca87XakjHL$Mo;9YG|o0$_Dh?nbjUuS#k#49A7>v+i-BOl}$=hR$Ry% zEb(<J7vI>=KTP;pYm}@jW?GN1;BpNftsbJ6NCG)_B9M}WNBUX%pTje0){JOJ58ACL zRVB4!k=ZkvIT(UT1sN7l>Knd{j4*#;mq3gB`H-i!L2qn^+<ziQecQ9&X2S86$>Bul zctHZgGIN=Ez!WcM5oz`?xVQ_NobPeC!E48+BcG0^FWtJQNLJ7Z=HuL@7cSy;T`Bri zA(YZPAywwQ(F{0z+5jRc+_>y{()U5Z0a=)fn4C!IZ!pI7+z@}EhktbIw;r-`u>Vgj z{H+_mGyjto{?^$4axLWezt=*USU1a2J*>eOvbfMtWI@;;us=eHqk+Glu>L?;0pvi? z5gAgTy{l~By?k)N`Dmr#sI?OGvRHJ|YSXr8tK4`0*4A+($o=qq@oDmjcXIzp7F-t& zFbhZp>!yZG3jkgr8-j(?3Qm)fw<7d@)?K2+(+T)_1gj4rU`Z6$7%RRyw1aPWNegF$ zl++p{$-|$ApF10qn1pXYFH4VAlsd*ly}-fxB<S`)intG#;t@62_I-b|w76O6i<=zK z6^O?BAuLN-l@B#SXKt=NUVKBm;LNZV$^N0JwUrL8pRM3hOsJPG39fkFR0%a_TE1Rg zT?$^P$l%vaW+Q9XcX=~rIGMuI=xCg}FXlBvt*vOE%Vvy~mQTz>gnl}jYtepPt+;bV z(6(utT%2BA{dK4D_*m}8??Qh78<e?S&Gq3|m}tk#OQ+fxLCjtMPd{OzROw))5^+j& znW87AG&NS-Nn_L&O~w?d-K$@oyh`MT;r*61sZ<zO5D>9FTe>KW%*^uowys!=%KEN` zhW-~g_(9J)1_lOi6R7K95vX#(zyiXb5O0Vf5pbki8+iGK;7Kz<;Bx`|lfuIs$;13_ z;+*e)2F6RI!WRZGco;BXEu%)Y<|(L>{D2KP$%&=GI);PXdP{xrPM9>gc3<VrTFA=6 zXlh&DHMi%)J-H1&J$28Jv2IvUr;eYHty!jKOoKc*@yv^}ps&7hH@2YCD#|l&$lo*8 z%oCV9+>DCiQz<=@++BQpoP_HaYkBUFzHMy0ZBTwxByyw0;H#+g)>OHXCuyZa+dzl4 zR#jfxxvIKyRc%=t#hd8Ayu7l!{lfah9N`o{%srutM?=LWT||&SdpB$9Q8KrVHX;jW z#Ff^bB)uU`WJMBs16hMHsQU9>5<w$cPl~~kA)Hot`T>>^Q^1&bNE%QGL67b`40s09 z%it&dE5-bK2g%03_7BBmWcmxmWMuwtP)t^a{|Cj4zmY4|!wP;QhiGeSGdVi`QfEAt z&R{U0EQmt^6=X2TfQ*77j0PcqmJy34GI`N{;JjCNlw#x2wcm1j(pclSvLbkLoPB#M z4-2`s1#qzU=iutq(%QDRwnah;EF$Y!^;jD;S(`+{M)pO36NG}v6BL@>-aZ%-mv&@k zWyQcHh>?=+BO^mqR3e+1C1Yd?TVBpz)3o>IEvSxF*W~`9&Z(j;TUMU0sw%g*Ffq5t zgoB$lK3-Q*;c#+t_4O<K`g%l1$LPxoRX{*lZ?8pO-jVzPG<Z#za4mhR6<)Lq&{jNO z3oeL@r9W@>&cyg$x=`0zwz=B2xt>J*3x~D_s*Po)rPWt!n{Q{w;?6E~x3~FEPmH`e z-nO<*uC88=`N$DAva;5b6Q?BcPZ1F?X~!3rm%k1r-MlIk$>*Ay(%ZPkUtX?y18#pw z6e%AnQ+B9ZO_@ecPKs76<j<yEKVtfR*JZ`gk@-YLM`vTq)P4B~=j!9((WC|-@V#@P zA|UYTd`R!|{dV>6@Nji?<mdGIcGIzAk>kX%brcJw30VZz4K{=oWvmOe=Gcmv4HxLg z8mn4@AX)IS&&_krUlWKQ>gJI5nOqsztXY{joRvQ#7Phnf{QP=-Elb_W!`q1xRvFYo z<;Op<uXE|jNUJ)sty#UiPrb0bxA*Juarp2M1Zet4uBo>+JC{edE^J}Z12ayH<T`Oe zi!c#P%;tCfWpXqNPfvKnO*~u;0_+FS1QS;J>Y2r+wb{D4`G%$GnyCfW#Tmxcx$CkV zzb~qT#U*JarMargQzIjVl9Hby!i%Ax65F}$Wf3{3A`;^Sq@^;%B?w7Kw^2Ww+u1dz zVp%GqeIf!!#Rx`xClFu^4Ndg&64%+eFyQV+$nUqlj*>k_OKWRllA4#-u#FtraGrh3 z$~xsk!9cA47uxto$A0&SZ1k-E&_<@eGFF-XYm8MoVI$BM%tm7NdTv0v{~H_i>CMLw z0ssqm1N#d&e}w!Ehv9#k-b}x{;(zV*nd$!z+7@QU|7m%%f<FI~?fw4@=j1yB7ytn1 z$-a@Zg5%2ueFQzWEN~J5e}(Ul*uSwba{QyVnEwhN%l`?!|Mk$~{~6x@XKnov@?UC; z`LFIh=6{EK?~joGf`;X<`ZbpSnttuSxo&<7`LAgHs%}8T(*Fwc`M(*>Zz2B`&0h@$ zSpI7U1OLrv{%%73Q!B^v+phVI@Sle^ER6pSb@N-uf2kYhzd{3QhX42PGzK=7e`>Tr z+{gc_(U!0VQ6jcBzZnsrqrDS|Z~0&MN^t+ooB;osIXTJH5=T=&^POxpTA^&ANG?+g zET9NOgSx5ytbrV8m>g7A`6;;&W7U981V>a(#7bPG;MPF+^OR7cG7`#nNh38Zaic>2 zcNUd=4+&gRw@YKzOb(}GvYBVoZ4;iEJa=mj#=-!9U_J2~5kFQJ@Y*5bT{lTHa!I^( zPRvsVH2{K4n1g>;E++_QMr67o`yifnmsTXjk-~)nTRg?Fgi-$Y(@`QN-Y-|GHL)!b zAz!MsejZe0R;KW~=u{C@2uj4!l0y87r?p}U2c4cP6%-loXAe5I=1XbN95@~++N$Y& zCdIB!E&oi~YN|P{;yCTJEb3DF88qMc9p}u+W~CybkRCJcb^hvm_ZcglQuRsS?k9w) zIBOgKaGn7DmE?eEN4`SPTN`GLIdF8~+-kGjaN&#=o4@HLaiEeXgx7L)LhbWx9|Tk9 z=@GMWFVbKNE?4DAlc96n%5hESPp;gP5Uk)l(*5=EH`xhxZ5cIEs}B<yXw5^~3R>2> zMPRx&EBo4LymSfd&HD)O662WjyUm(YvD7ijS}{~wT<c@BC9QaivvzB_sr`EPjtSau z?=F;vP@`%1;0%MB6b{s@0FCquR&u?wRuO(~`)MoQVbI)KrzG7J$&D2fzU3J!!}3iy z@=j#s`c&j|AtUqS=-gz@qX3jK%d@dK9XAiissjjmP-LqI8&0)T3x@YEJk(W-@QDUg zqNGo#x)yv8tA(~At;70W4wOE^c`+TGg+ZX{NcXI9R0P|Y6*_TFxM(gM==xPGw^tDv zJjIP{NpED;L^U>^&GB^X3j*BBzXqUSWen>MZXT|KWVB3M7Zuz$q71y5#yR~BJ&56o z#B4BMZr#6n^7e5s1bV5;h!gkY@VRg>U-<=$!?qz<frnqtjz^AhoBAKDhFgt?($$9a zoiY8=n)D%>J9dtttnR>q3+29bDeKJh`3R%Do?ztx?*g1iz4d5nGBM#px2RiIH(XA) zbIM_v!vESR4EyoJHz<VZS#^)|`++X_=dbQ-r|IG)SWEX=#!Q^gEB&I2YJ&q!nF^e# zhuA~WpRvCbRILYCb19D4wk=OPQK@|niG3DO{$i6BXT>p!D_D<Dr>E@=kL*;o^bI4J z=aeFUf1H%CG}Fe!HabK<DcWEb3SqsMk`<XQaRd>gboopzu~Xcxc=cO<#g*((%GY9M z-xW0OL@m_t`UBs6?f4Q6gmmP%+8ou#Yc6{rBy-+%=NxYiY(2@$cI@>~31t1JiJ7VC z3mcypmikFp0l(Q@dmcD;E_PsOwIPfyE{JLQ^Sao)z+HrzB>oGt^H0n4rwOB{XZeR= z#`0H&8SB4;VfIJJe`%7L{;IoV`giCq{|NanXqf*B4fDSPnm<DR3mT@sGBcU}9n8!> zLjDUH=D$M2{O^F~kC6X@<`3cYJMW(jGtemVf8Uh(|7&>mN63Fg^Y>xFe?s%OA;4d_ zm;RHHG_I-PfYE~DwOEzfp5LybCx2AgP8vH}NcnC!end(e&!8LEGN8tV>W#O^q!Zh0 zF3-ja=sIq_Oh1TIQdOiebEkK|dahRu_7;VsqZcZQwDD_u&+~rYmWA8vHZHg>213S% zH;Z9m@SQa3?J&aUs$KEK!N{ubZhQmP(f2=|_l}=mPS$_5ZSR41{R|zj{7R4oSp{}2 z%abO``<yk{Wp42!mOOeTl{r%jQLd}w+S}DTs_CPKA0<^cP~j;tvNz_F#faZ|0un{) z0g{vh$GB7-7%H$gfSO8)k2KGtoiIg}{D_IJ%1ufY)%YF-=N|R-&0D<p+kO-8ll9bd zZMTlXwvl0;pJAs*@br22g<85mZ^GrGdKmI24ty9P<cnS1Y{=K;GM8Q#KEiePlUV}l z8n2^Aw$K>TK)P?9jmWSY&#Nf`GN_1~(vo_<Q^6@yXJ;qHH47<KA*heWt`A;Oqbt$* zQJlJRW03djqaM;O7M^l%zRO(??4I(0B{XzcN<C*$SZlbZazgAd5%4-oKU#Zm)b&cf zsCj1*cZ!%&?|o6O4?k>^_y8rcGgy963vs)XvRO>i-HSuB(Fo9PBsSy<-WrANg_B3^ zKHx4i9fNb7j?~q}F&I1Yl(San&z~kenFnt+1*3>26lsgLod=ZH<P~V{*)CAe%H{-` z9(45RBXacBgKw~n21Z2k?t`;iiP|E<IVdMTR6^YYZax*|lOO@G@b$IRcCMhehNF1n zN<P<U6*j?UzeDoKz1<L;E0Mlz|MF?<{dctx$@o<i@Pas@F}P+cFxKcPXY~}fb#S6R z-RLEuA-#Ne#G-M=Jc+y*+p-P3+z4@=8m&)auuhx4x3d#vr$_Apwj23u8)e34_@@Me z(PN5U`k`@kV+tpIsbo>rMfG?T2s~}F3(=oGwW;^jVIaR8)KfiL%6tkSGR2hyG+R;= z!Sq>Jy+l8K1PrxqeAYXzWbm9>h(gm}EEMl`0Tx>WfM2s|G<s68z=R@c9SxA<2>Vvl z!UP(oxdlgCaADptAfHOD>&oC&2%xkQLWh0hC$sd4|I{~K5lonwq8mgkvJyrQ<I7@< zVAxiZ&aP&$8AG4iC4{!xfYCpRU(2DQr#LqKg=xV~xp)SO59jQ$R2WZ8awUw8XHo^} zlQa(u6pH~~h;CG*=mmf3+F_25myd^?*UiyBCfmMA(;{0@)4trdZ(V#HEgjrlt@z$o zhx1T;pj|a@4{vV9cPZYi6nk2I0Wpv&{W$ou1SYX>q85*M8`2Kn1ZL^VZc>oeC@@aZ zsL2CxxfRXrv$LO>qUNx*y>z=t?%&P5-s(MHFReu)^1oVN3+IlPW9u+GJl#I;6;~9? zjfx-{t=|~p@2(G9&&EA6O4OyVbd-BBDVVsBJ>CtwTI4T=GMH7Gj~(ZjSUyc|WNCMB zyxluY@z;WG{m5GgQXg9b+i$(K-qW&5Uf$>nOdE{2QYGm!nnBMQW~}Il%oAkdE@0o< z)92)oqj3Sy)a8PfgSKFa?CKiG`!Io=KyJHQ@fN;dF{p$9DkAQslW=}XdU2Q>oSz0v zqWmuaf5nR3N)h3FG_+!Kk#-woUH+j6AcL3iR(ZmrTd~j{WoX}WT116YWCjKp>d;PE zgLH|6N+{qZc&GIeOA(xaskQiF$0=wlrxp?T(7?TtW}G+>r+7HYO!mGuA?k1fB07Af zwlVt-e48=FNJM`h+)w{|{K+JQFd&UVIW$64Ul`HJ+p1_$KN6Cd$$1mbjhqo9k}!Xd z<_2q^E>5M42Q?JI`(Oqo>^sMxZ)OuwB_vTZLx7>PRb=y~aqTY;G>)k;U8SE%4Sn3q zDG4X5SZ=5S%t8X~R-}6^h|(lpKKR5yUlOHXyUIXhe)7+(x3a)bxfnP;B`9%WffenS z75uJ=(1YCJ`jL4JHPco!4D7r;mFp>kk%0YrTF@;0c&row!Zu=zL_LFssW)N(J%g3t zW?wf2s<67uPaQJwAKdMF!Nnb};affH@l%TxglIv_R`pdM)t|lXCu@eC?S`Gr#+`l8 zmJt;t(&xZ2sO({xHnz6IO)KUJL-JvvJ^0D>Drk47Kj{RUMkUSW>^qf*Hbv#waSHnu z`MWL@GB)<f6RR&G_#IuKg`DaWQ=@3>!RNB{4}JYapFvc;fI*z04=3_Uw%Hg4S7>V4 zD57iCEak_gPkTd}tQ&^XQvOAXv?j7ghm3W9Ny^9%nb4AD0I9;lp7P3TUM`Hf1@5A9 zRABxsZY>V6p<@tn@$C(xI%u8HBV+v+>;p>Eu9s1q>U#y@a8dAuq-bGvresl_)e$l@ zb{#>Oc0Px1sR5Q}+oQ^3Z<@v)5R@(?d&tkJ9tBa`^J&9`N=Imqw2-{MyUTM(SUcT4 z+$KZXQ-o)|SOKd91`~6$m9nQ|YH+qhs_*3Xx?)>I<Rzp@_^Z{e<5C#3VTFGnx}d&q zEa8q+mbg|owud;>7{7`d+L@9hIt;-Zp%)<-LmA)e<C#?3ae6qvy@=*0^LVsX={S8l z@9K1O73EVmE|Y_2&0t(rGHhd4H}*lVk?AwYl2I})lTGsij1sxi)M*5MFaj;&x4Dh( zkk*0!(_&Ec?5a}vBN@+wp^J$FF&28LRw&n{mMGj>Gnvw`YCVvVNN_AED??vRq}tj* zvtZxidvWYmZsi`~eSDPC6Tj2G9|!a8!iprTw}Fl195S|@I+<3~g)BW~C6Z=cBIl6# zkH>7l2COK0_V5n;4?$Z-+uKyCv~R-;zF81A{>VZ29HY?+J2&dUEzOHi#n@=-rf=cR z!sQ_XJ>ETWW?1K%!o8w3=Dq05lzc9RACsBrPgVyq`{I1vHjPBMQG~nob6eAk#j9pN zvSRLdsHdTPiQx$9w2EDHqt0>PZvivO9T^cZzn+e2gd-(;v+hUFekfj#`*O}2P~u-t zJqUqSR3$>Y3NC&XC%9O|VUr6Bm(*@ef}eI*`WP;9!Wb_nqTe%ts+u)$<CV@RqWmJ` zs+chW{v+Z23&0pzT&zk)M_06sUbbJ|2f>z3PE6Q5w2)_ph<-T>XgU$rZ;2d+oNWxR z*mm*`Lh<;})ytb8IlEICI{nrHGkGf+L0K}$$8iOfs`=7-zMf2T&?Swu8_`}sGxRnv zVAd6O)2Ti9$sf9r@fsyqK1QPrn0T&T%`?@lJpOZ&Xp@sXMB^l*NqG;4`YgbqZr+KS z7dV+_7~zU!QWV}IdbC&E4zL!92>2;)|B^y-!cLCq*6{l6zAtZLUP`+$O|f@lxS6O5 zJ}$=1f=Jn-1Fgapv{jZO)3i)$gQodsSQI1XAF(y_`e!Sy;-1uG4VUh~n$O=(8Kneo z>&WK<I1FQ^ODEH15t85sZ>nR&)d#v=j<aB@NC4*LqNupmO1;1gN;q!I@x!{j1xBQd zd9NM}BBwnE3^9z8Zg(%*-V^y~v<HACpQWF?E3coLrfG;G`&>pNK_l!d(`E2wI6Jxr zQP^ztNeGQR9A9hZ?IxoV+$8sz_!4*lVDjo~b%SEKW<Orbw%-&R%SJ1mMD~4UpD3xD zRCpg(p%KEgs6d$SlB$tEt^~!ibfgCZOrn4HB$bCZF@?xl>8@xbiXMNb%3APyAgPg+ z3MyZhJ2BL-3oeECL=>9Z^ki`y<D@ZhgGp{&t$aR&TB=5lIDD=ZV+wvI)O2zHZLi<^ zCM3*05f2=_^`0-tHqNrlV;b!6f|;UIYjENM6|mFsy5N0;Hei{j=L8mOt!kt_D#FOH z3aSxS0s4+1Uei$Yv*A@8A(%`j>fPt$`z9YA)|cBqXsALbe-NVKnV0W!WDlCIYaHXk z!HwXtqGlajQak<%bk2Mtcb+d7C%)@n`DkOr6#OfX2G(r>Vp(BzM869&3XNCq%zl&a z`|)vV?O6sjp+V#Tej0Q_xtW-qK<EUZuH4kYU``_$im7y58?>xWV`QDzeulG=K0bu^ z(yD{qM6ovBHdCqD*aksd3JiWd>5mh~toEPDNcbqJ`x`aR;~K0LXo#i_8;~LJcvH2T zKBGBF;3l>gLs*`BK6%X+5*Bz=mEy-q`7HG+nIycP&C3{m@9gr+TX<GnBuMNc%<e@W za5qp3M@7BHjq^)OxLWt)u3i<QiRTU0hM91j9XsO}5gCXU$`2)bw}wC0)#Cna_GS^$ z4Y3~EF9O`~pQ7D<;g{kRsO@Q&S<`CRp4F|4KM6^8Oz*orjuzj%G^-{KHntSpk=4El z*|J*JgwbLM4)`wTbzjlo?X=v6=VG)e3fDUgt?l0T$t&}NI-ereJ(QXRI>n0n6eRV^ z!t|XWP~M2t+TbQMYMV=vb;cGl?j3g3h)9<^+P2)Ws?>7Ri$<p<j$k#FT9(dCDy>Ry z*lWcVbOG6s+@PEpoo5MRjKNHP%441*<|_#+%L55+N=$146E^`tdNUHTdDnci<q3_! zvlg=1kGWm*mXf_{G|KH??U@^4<X6MG6WEqAt?yvWjgm5%@NRBWJX5RY!x%6=rZniy z5L#}1XMIY$F>r2~*({uRxqh-atgEsMJ<+LjNQmNdqkb#9p4&Pw|0*4~9I;XI4Dq@3 zVY|MYF*Wr2gb3u4@Dc2p`x7L*u4`gK2JNn%81RGzDh7vz_(#=|7ooY?Mx&&i)<hFm z3IzBjzBYgW_igWI`~nah&5xPbmyUfSOdC)%zBg7s-|aO8H8rDiV9=`E#EsccGUQ)` ztZW0<K8+k(7o3H4Kh$-hjIiHi#llJ8PwjvfB^JfLPhRd@G)}1?CR^D8u07KG3@MN# zF#<Q0n^`c$qAghAr;29e9ou%nyl0-~VuiGxt;WFXgdbWC4KeMp!b^?XNPp>83}yRC z?!+k2d{(EBxU;g8vwdlbFy(p9?H72v%dCi>zFjGgRng$F{^AevW_B&q*iS{tb<WPM zq*F{BKX4gpuwG(nb>R(8VTgXL*O{6QT44B!9ljTtI$ISf9K$f9<kv7`Ail8^TQ=vQ zt5Pss&S8`275&y;;oobJdBnnb&|!MdzF!zgTj+MITtz6=9<b8DAkVZRT)p0G()<n8 z>D0Z9LE$O~V;<kHjpJo1N|H9FbA}F1a~=r3r<OJpRDQ$lzmQdi|9N;Zu0*{c71HNJ zbU{*7sCC@-RTIZ_tK^3ubOf(Qgs%{~XujQxi{U_-HfifCpxBMr);i7W7!*fHFJsHF zfy|deE3vJQa>bVcB!P!0*4fr<yq>acf$}^9<4I18{e#|%gn@^?=#9NC!F<Vv*wT!m zB;Kc@G?P_0qgxS6X?)A3V4+e}$Gbx$qgRcE`<h0XS1(}faM`tAvNn$`Lt-PGwSJ1k z^w9XSaV>J?RH=a_Hm{rUL^hF;cCKNAR;rV|?7m=RbDZtO+WUZ%umgVu&yP2L?b%OX zS=PSFN2$1Tubnv>Sm>DxH)Y#?%$}G;F{A{{w*7viI*chTx;Venciw&m?-yFgBzi=& zD}$VBaMAl!<70XhsWPC!0clo2H38_VX)*X!l^SxA@U!}=4kRk~{Rteh$sFt+x4K5M z8|@@dHx8vrRP75vYK#j_GWR#G3IxT-tri7iGZSoc!nmyx6~ow77|-h-jP9@87v0lh zNmJ9jF`7}2og~EHB%(?#!;F*KVTPM3+b~8$a-HIOj8-N0pJuc}Iyr2O5~I^B8}Ya^ z4<BfDVFx2crcC06SAIR3qSTwWJ=8Nud}m<3yJJ3Se(lMx|90;^$@JjJxs%v7%DnSl zHS~zl2TeJ`b+HWguma$^aTA_YtU=lD*A7>!>M!ZWiyYtf$>Kgw&p*c+frVUNEJ2Y? z6lc(HZKcu#r)pLW#yv;owYx{Ypf18WjrUw4hq1?kK@eK0bY?y%TX8M9{CQ8hu{kF~ z+RI9oVCF~ow6+vuupmc6+^a>(hcWw1Lk_FYTN-Bz57oN67pkz-w(DJh*VWq0f|e7` z4-THuu!y;ugX|B6REqUFV$B;IOK_?>-<s0GVjA27Z_t4g<wVK_mLo*Gjd()1hKoCG z5nD|7C=#wv^}^($0mnG)hcy0YJDgU<ToRq}xcKdBR)V*Sg@d6h9>G`aCW5hqx1X-s zcTKWG<IeJ^bDu0SPUMGklPP?CMxfI&tZAryyXrZ<rvBIkzfZD^?T_ONtTqvxwL;I| zofq5@_*@NsVsErh^HAdn+v|ewM{O#w8y=6BMz8f{-6KG~Drbo}!gg5>^RpIN&Ig}` z+=-H-lO2dFw^g4YIp;?N$R-SJEBFceZ*&{d@%Vmh2R|h8EWdNS!Y=Q&psc~vgR#~z zb%XoX5mA+$!nU8GIeytKBzYt7ilgiM3%Z~6@7`o)HV%e=(EY6cOk4g(NVoi-_-%PT z``_``3`~Ec{uPZJY@F;3j2wWBtbdE$7P0|(A*~&M=ltP0;pjlC`<v+r+PDHWexJq0 z!3dhz&Zza9PWyYL`JZ$DPAtmX+ZZSrIRZ6+bn?QYppbGSS5PGRp8#^fza54Cc9Z~W z{U>keZzfO3fdTG6MB(KH(uue_iYYn%c8LBi6LVkyGW-Yo`nymRB<i1hQD$0tAmeWy z9TYq($^c6Iq0d1+QdU|9;NM-(qD-IyhQHCBqAZ{U)87;9pak3B6KtTwpJ)li-x&W9 z4F6jI2`i9So{{bM<puo{D8&N0On>`dP>Kyy_P3xuP>K~)_9ylh6xjaHSL@H|f4^FP z55Q;pKO>d@DGUe{!|~4^<-b+qe~mBsxC2UAbGa3?5ZXnI-9+<UrdKPMKNqA6to6Io z(^JGC4AA@OcN$mUKmcgdl|HCY3$m}LPbw;c3Gyma9Z%P%8vz`9#mc3|73vKOOq&nh zo9PZ0qjY4u`I(6c_KLD25*!r&DbdyLedTR*^m=e@KiQCWONC0)@pD&3h(kqH|B~xN zxO=>mhZ5BzlSSq3oc5><7KIhI&B50bT#dFCXP=wfiIc%e0zR*oo!!-@Gd}Of>#O1K z+!Q{`wbkxa4cYx`G!Ooj1PS2&f-`Z_GjYNrVZtjx(ij3I2O>p>qowFWHfVO=8MPO9 zMV3j07K^5+n`d}SCunL&fpt{5w!oSn?#?f+MvIzOn~Lj6pzYEv0%ljQG-B?ux?X?H z7U_6DU~9>ZNIaw@J;mIHkE~MT<=z{fh(FxC<CqI$pL^HrBzuAV^+Q8pyc7D=JNcR@ zHi13DT)LoL$3kVqT{anIr^$KkFa9fzRt-0goO8)X7Uy_MxX{TA4V6B>6gTyqCIwaQ zxS6d9v~jb#>fBAvK>RG3*lYgtu#xx8XGh6`+K=(+Ix(RN$*HGA40+UGO`r1eD*_v1 zTCE}tOwBwmk}gM?XnKz-h;gHYEL3>lgRGdA-~Sv|S^POt*`J8emy9@&tR1v}E-88v zwN^Nsdz%(s`Ox~hTbR?(xP5aQ_ue&dg}WYF3s4<GD?i%?JNZ=Dh-q`e?S1<=TlK>y z_l;~fhA!q?yZD$zGRrv`Dl4JB$|vJkQbUzObmqi`>*|XMNs3<(N!Fd6+S7HZYAip# zt{7ve6&|Nj>V53BL7pf;9V^5hD*xDBjxtn?(OrBvn0+{yzc*R_ai|o#FZW}23I13a zE~OzBp&543Ub^0~PI#yd+~}E988*#PY6XF&-!|YrMC=}8tE<o@r_-mz!_vU|YI>CI zOg5E|!pHK<viBCU8b8^QeXwdwtkM-i3}QllRF;*a<%AKS%E*za$ZdtJLui#rZj3ru zi|RbHl&+$dE>9hpA{&^kklQR$n5gCFZxKY4t2keWsAbVL$7ytjBeE#@r6@k4@!<ys z_H*zBp1R)Hp}@ZK^MT5X8ViJoOdiWx?Fo^E(A8rt?x#`@Og~R7sBRP%ZLJo_kG~E_ z&U-pKFm2?9_$b~CmpxufD;>tb(}aja?zM+9pK~F)3S+{=L|u(F-1aSHuM)-V%b<yr z=19VoxFF{;WT=$8$pzLJrM?$v_IHS4E0k<0QyZkvnSG_vOQSVTCNhujE5eN~dFI^q z<kagC{T~3~KpwyGVrIz_=BXF0GNtYE<eiFD5S3cKjXM6#8orH+h^mX$InZl3bs}Iy z;Lr{cFpQA4jyLoz3e4)vtQ)WBnggo?$`1npJn5glwQ^_g#q0Mc=kBEZW*hVDb$n5` zm{Az1EP_zdNmM^LAf@S2FUoAE){fak)QOwN(kLM*<-OQ6f+Z}ol-;V;yy}!($}d`G z@acpzD|^$(xKc|y(aE{8sCaT~`UvO-h#3Z5GzpV750|xwlray7=i-LJLb?Im8a~j5 zP60tB?L>Cbj#S)=;)4A-Sp=85zqm=Ff^&(sUz1)?tEO)we2a`jF?0b&)c`6*Z+7iq zIfqoMu$u6Kfx^~l(ENtM71%?7e{km3<EO7q&f`frW@+f1k+(~vRY9_-`>Hu4#uoN= zjBS;5&V#B0V=S5h6mlL6Dt?kSxoY0^;7?@i@_4jE=@mR_WL#L3Jwy!xwOo>2A}hjk z+q3Jgm9*Zd>YS|aov!bjX}h`rPs-cJFLh6Kj4ZcZTWB7bgV(D%CJGv_CzkdEq&8Xx z6e!rl3h4UN$-9zXuptt&q_|+mrsOSVlAz>TuI1AR{A>F+%Q+UG*Ndi6^r2Qla_fbv zBC@<v+Cllj=c8nQU>WN8^8GjOKAfD;fA8}4{rkbuxvcWe)Z)t-Wu4g--Px5rdDVUG zS0-ncww}FwjpwsJ|NP6q<c33JwV-jNuyKT4SY=7u6wsd2GznVDXAn*y??J2NBW#ka zgs4*UtQ9j!Vo>m;lXv4#M=DrFxkObIHjlJkTd3=vN-OK}imotmOI0unm(ULq)(sTU z3E<W87tjk5Gzb+i2o*PtlCw=vbV$}fWElGu+J;woBs4_ib>-BLG+deMo7lKAxdGiF zvu4mEwp!UPo=w%0{GuI+xHa(4q2eQBn*-RR#NVeu#-WH$KbBSzNw4ZFX`N^hS{<H$ zrMUejXg-9xApX~8wqL(FaXbHp<<0B2ff2a^nn)@+1gW$ug`69uygQAO*Etm=qna<9 zW&n^TV-;)cl@*oK+Hz&;)`Q)HAAj)lbz<3of>WB9d8|cHSylIJ*Ti;4?U-d~ji7N9 zxtu$_lCPvqo|a#eykjA!MgX;}E2p}*u3K7caZmfmO6T>3#Jm<gyJ%h|S2A&{?}W_0 z7c~1$*z$~o9f_nP1^65V&vVMCJz>@G=hhA7(F;3o7$Ia5eZewL);?L$IbFj&N8hK= zB(TgT;*wW#OITiSUeoyH;kC)NhjZIIU8Bn|3Yhrh2<Zn>NI4LQnA1o)3hPHg$N<hC zDhxM*h6&)cs1&_~O=FA$%7b%yAm{+i2j#Esyg9Rc8`Y~%%D+pwedmFwoXzR;`h*u9 z$)#N=<PZQnm4XMgk|!|Fr0xgIbLa$d=>~J_h4AQy3K&Po*d|y76{nQ-jLvQJPHeg- zHmf1BOan^OYlgcgw)2{&Y$NMH@X6&psO7!E#;AMMiJPU+$+?3?moke8NNZ{zUTM8D z<sVrjukA@CWJJKJPQa;ghDYxQej}j$w1hp8q$9bE3#A-_S^>eJ<jSn<!lCBOq3Xn| z;l!um2$f&cnOohNR};ad;m)h&&94_AVjL=G8>j1;V-Z~G8s8k0-5pmpRNOWL!Rg|i zz0F6@N9J$m)?YLA&1F~fA{4bC60u-W^pb+zLpEv$wy1j5!7xOlhy=p}<XeSZiYd8P z**yp3m$c2)c22!|_2z_@e}nSu#VcWHOW^J7MTb)t9e%iIe@epUl$hlyQHxU|=0Av- zoffksl&~R{a-fiNrB(4_*9qh`2oo}i5;lt#F^iG5O;mNwP<F{c#5YuQ&Vn|Mx$ zfDN-+AmAkLTq<N3N3Y;6q8I2KSzgjMR?#vX6jLH1Z%4+hLBu3S#H>ibsd<Xe=&YzE z`9&KlIVV<CS0Qa@X+tL!b7w6ZSABbgi4(%y)y>My-P*&`+TFv(!vh|n!Yk%32va8) z13M>eYezLRdj(@VX?^<(`p#ndUXrFk>W;A%{srF2ZOK(5je{#!=k5ahEBBrbOl`&# zbZR;$u_?O~i&;`jI*A!4D7#nd1hqnxAz_(GuZ&7{N?6951y@BD4ODc^SNALwG>nW) zufKhFVs-xB#mOy}h+g_1VpjhxZu7l>;aLtfLg0`=hLBN~kXfFPO_`8gnUF;R^<VTW z1RQFo`1FY+tZ9@yICO(W%wwhPlH?tdm0i-cJacrt3-}DenAHO0oJtj(N;uR5Xk=X! ztfO=4ua7R>jn8hB(r_o`)rRd5G0GCKtNp-ld{)AaR?%5l*HzWR+0fC=!p+Oh+sDN} zz#}va85!*#6CW6t7@U|Anw%Dvk`a*#H4|!hY9{;_UJOo14v0(gjgI$@i1rALa19J` z^!2m#^fGfn=-WAITG%NW+g#LhykO|9Y#(P6S`lA5Sl7Qayl`*r!LwUCFUOYehUc_h zFbN|Pw<Z>`IIkI~;#Q>{(4vlPxL}n{qu@;;?<sB>4?Z6>9~^#h>r{H#mF>HaPssWE z7I8Ui0ydTJgw4)ysT0vlQE{uws5;xb#w4d#R@Zg+4UF~m-?-G!lT%RdAChTq7bdUn z!YE`&#Ht8ggMeLyK*;!<q8qP%h@@?zl5?6WB16MHTi>r-&8?7K#Y@IKBDSa-BFccc z3LXV#7)=OSl?gc1Pn|b}Uc;>FB4LQovPD?BxjXpyxQB%LM8yKbk!hLnIr%AtC0XT_ z`PG+-YU@iInkpJws#;pBTHC5y+p1dHDw|s?np(;ln&4e%lU-hwR$P{pUlf~_3;iNE zA;l*u&LcF^B_J4jkhv?uz}8;H#9C6%R@&H0+cg21+)~&&-7~Q@vGx$ipWb*FlG!3^ z5JVtmNh)S7s2vKLuj$*Q_QmJZtNAIpWcp=vLU>!zH5;AV)plj-1fIWX5s?MvmCv%P z5Hd)s>3VK#-NoO2d-r8^?|eXVqp?%M1!ZR{UM&J9S=a)hpwT%+cM-EFW!DUCuUuX4 zTz%g{WNJ%p-4L&eI{}v#0hi`!UVZTDWU|g8dI&vxgpG%%Ye1lPL=+$zotcwXTnhA- zH#XN^?r81n>%Mkv;Kq%s6XVyXr$%O`Z`_=@ar5Sl+1VSjH&IV+&Wzri8J?aRnwS{8 zaijP8^^Spo=AK^oY*}MdK}~IDX+=_gA#64{F%=mZiwF*P@bR~D^Dwe^P&2W*sAn%{ z8DbMso>o25HoQK%bRWo{T)iKW+b*aVcuL5WLfl%$HV3fR^l4Obtz=aXB9nEOuu6ca z9Abp}f#s;Yo{ZA&6Mp`l%-euzA!3kL)Ii{Eh?<DCu)R~=yI9q;P(QdbxNyIHY|AR7 zid4*sj8C70Qw_EQ@RLbc3L1s0ATsrQi}d}9b$tqDZQ|)<oKK0_&?vb`8oL@hAsmoC zo?(&TSYosDGE2)#>Kf}hIy(oi4vvih-eYsK<MVTq3-kMr*y=3i2{gb|)PLvZpf5l# z=o=nx>+1vlp$Fww)+84cM`z@~2!f1=aS05y_3|}ycF{DqmeIGCF-2Mh7G>9sU7x?Z z_2~Jd=WnjgY}tjD(#j*w3L5ij1gLt{fX{&{W|l@O<HoKNtnFPGnB8;f%2HC<V0vlK zNj~TA$GnU+SQ=VB?ZuT%Jk36Q{5ZaRzovHqHHr7?YWv7WX5F|hvXDW|k4q;~!>94Q zRxqiM36q!^C6^ijt0Jk8F_$`0&LLUPuh=rY#wz@hJ~GGBE7UbGC?F;_CL=4Ov;u$x zWd@19J~auPe@1t14qa!*j#tM~b?&SGe(5RDeXfp=LszQr=mK|?Q&9!xCo(O|KQ<8& z5@Cn*2hXEtZ7XMBrC^Rk#8kFiTV8+g?B3q1yH8$~wvLJ#hI}t%LLqK*!8}zR*{JT_ zaKSo<7M0cUmUT>Xj&Ck#nU2is2}x;uu=Df;q`y&7*YhH#k#O;h$J6HZ`yU5pZdLa# zK=ju<v<4%4aBi=#X*8vRmyl7SqH_tGnjfb+(mSbcX#S3^Z#J=jAt9$aA(Q-B4t1J~ zwqi!1x;{nLk+n9F^-j^%A(>SrjhBZf$HwMv9^f3U+t|UL`@%N*zw_Thj-?}@HGh9B zfLFjHfmv$o>Vl32=07ew4?K@|M2wSvu(i7vxP5s;YgL;7kC?LNfh8ax1b<}ywtGx9 zi;Cx2AyYPGq`YGhsJ)6u4X<t#rMxGPewcAUd2Z8GO67HAeBF)NjT4ssE+sss0E|sT zOFym#PhY)l8(zC~1vT@$89kNVKB}HcBY;`~3FE%3eIA9BgRE6l>$Umov)ei@DYS|n z6tZsLNg<fzZ8;=#h}jf~7-WbA4EXheK=Z95>ue(GUE`XvYX`1P&djdNPc3|n^w;Q) z9b546jRjv^5cHR!$%)GY0}us+=K<LdPtAmA*Et~6%H31X+D<{=T*<;GG`oF#<-y&( zmyce&?HXNCvX4I{VooYz0obc~)vJ2eN!jL6$a$Vq_0{()i7mYb;jocsZpW3$6P*4Q zWpQPLo?o}O{|44~cb>g&9a;tNx(_^@WojiPhh~VjUz4yw42`UdQ$+dH`oqMcE<ycZ z8buEh88-?Agrt$HmAi*mM1)6Ztd<#)hF24M7SVZKE_H7$&s@8h2Ix3G5aHGj^p8)^ zEYDBO{}zUSbv3`^Q>cCfp1=J{e^qN6M9mQDh9#$ahDSU21X?28bu4Y<4Q)(36FNuM zwjaN^y^D%%5mA-Yvd+Z9W}-%MDyRv}mlRwpS=3O|o>kp)L-YEI+h@%EimIBgpAhx8 zD3<o&R|ZB8zy9|9hl2_a<XePRlgoJs7{u!aws5NXQOP*D$5t=jd4h<nWY-F0Q1L!1 z?Z&8zFm!Nn_VWvfOH9lwtZZ%>nVsk#z2WE)OUJK6L?;Qh2dutHaJfr-lT%!iZ(4hC z^U#f(v(ro1z0Ej2C;#hkfblb9xCe`aeFW1O$iLj*U)kIWRzD^yHy|#_EhNI$+t0+& zS;g2~%RaEEd3@#Wlg)?EA3S>#Q_x8v<9wRW;Jiklif65od$l5>@|=<n7(8W{Oo(dB zyXMXPN+#wvPl)>Km+9G+hmUr@$<loK^y%vCoqdI`>07i6t)Wx#6}L#&320_kMlvb6 zN90`|U%9XEmCdaeOt0c|Mj9cY?PBJF@C=KLOv?lbg%B0uQvhswd2!?Z_VvjrkH8cb z5kn$+X<|No3Da<!h)eFtZSF~};rTtS17lN*bJL4+<Nu6}`8uTi`mMRIpM19Z1EZsj zUEPJ3>XHkJ!&5Slk+F_`L6&Zwnr4<NrcPlHtFJs*zyA~j-#f96R?+<jenTF0KahGw zw<<+M6^mK`m7<p-V4u~URy|_qp0#!7(Fs(4yMhM(HU{|C>+2A@ngo=gB39!>HIHg) z8CPB{--^rQ!*jQceNh))$rZe*6i^pZ?Yz8wqvKMGOE0xu9vmCn4<RviYH@z^{`SQD zoL@v1qmVxMd~iF$`a!nQb;z_1zx0mOs)5VbCP46^{&w2ujvN}WR`B_wH)lEq2Qh*V z;cZZSG6=qvhqu0sor;l#Ye-7R$ok^#J-|LHzmrYF=M=voqbx$ntxDdt0?6mo3Zszs zkhV(!lap9>O&5`|b?4+l?cb=NvdyTpWFHFn7PN;q_Afc7;wxd5rS4rzBkOw6B(%P7 zrnYBV*E?6hD3VOxgG0^P$O+*V91@zG2J+rHc=g83o0x>*;ZSsPaenRYCSd92mn0}- zMaZH+B4BXAD8wPA-ao4+D7!naeyDqR5@K6SczZAoMQsY5-r3J*{`wW)1ibSHJp9*P z@AK=J5B{1mnuAUX15-&|V_H#ZL~5o-c(gq-z|`4Q!_?BsBQ(GM+Kr`q>kppA6?b!K z`=1sxW{`7JKvc=Pl*1#BZWM*Qr;J^4V&!l|exIdp;q3>zCouh8n4egEP}94Bu{4$4 z^V(kdR0`fQ_5~W=b?4+gG@TO1mhY9c+)zPe3L8g~%DG7yI@x-9_(a8md#`F~19B!8 z7qHU)4f*T$wxFiuRtiX25HiV;3K}ZfB_NVo0<(KVa(eR{hlZwZVlnyi|CIT`nIl3A zK6KTdp`n_#%ej>`(V01ZF$pe#p_Xo5TIN;;_8w_f*O~_xhh}d@<aG!d1fLZ)V^Q=3 zsh4#sg-1c-WR$me&5A3(9++`i&o%w=-is5S{`$Z#Z0}%hh|zd;rRDmXp>GkDB2vz& zM9s5?;-cdP<B;i%hbfhV+TMAB`XQ&JU8M{io&0>k3}ls6w)FPixH*GL2_7Q|-M1o3 z+w04>*0YLg*~N^&=aUPYsyV0l=k$c+U5PCntnVF%mT1AB!<xD|GO_vFay#hcKXhAg z_<2>A5^@TH6H>tE*m(Nr+t?d8BExbzid)9(dv6A$H=oxFI>o2YEbk`oQX%bF41&*R z7!TfF%`GRnX4E^W#mqDN1gXDx*?IP+0lh4)ad74GjZJW;^eVoRHn|YAo|ALu)$##L zlU+A#99Sl98g)k6S>D*u-Wv&iCbzP>V_*P%83=PRXzn;c<^ClnXs~i?V{&oM$uo|c z2Q|T%LEgn8q#~kdFrsiE<I+&i$P@_v<RS8LCG!vFYJRieqm9te<YawkcVP_#wnf2- zDITGbHeSBERyKO}UcM=f88t(BjU#rUWf0PyKCj26?l0$BA>~jc<y69?76_5PQ9yZi z<Aj-SaYTCK30HsF^3$iEdnPvb<JQc&F(JbUQPULtpjI{&A66C5%8v2w8*8=^m&DDY z&&VJY&72&N$iTRy!b^2M*M<&z=HIHiz&u2_{i9>L=KdsXN(7wh@;0%4Sv^t3SK~^r zRd$RGPv1P~@c)-CVC`@1>cn_ccTaI`LrOt$NKz_b4^ge2wXKewms?zYOkq!WZik|M z0;P;I0l$HuLA10}sib`Ykbh3uk3%QeD!eu<zhBBc8b-1cy8faUr*7@m^etk5FYlVW zU>?h+8KM{10wP8&>zq?}ZFu3XPx@ss<H*xe&WdKvPQHF1fDo|`jNQP@A2^PR9(gTk zdU;{v{&ri>fSR5M%H#{1YPw~H7Y)XhT~DhS?jD|=TAZEya_M&Ln=tk_8hEVxd~n{* z@YH1cl`EwUjj2T?Vae(HCP&j2X%$lLm)7DOU3JkSiu8gFA-{pBS&F1X@devFsMLx` zUc(57*haIUN)@Y^le@+K1<SqN7hvG)(Q#|zwN-78TxN|RP2WZVy+~?l$MBrC;rTm} zMFX0MEDAYi6%_Ekz(7GwEkt|=4Lp|g2sRs!di=`&%zb03XV;cD@81fJ$${|pEW0YB ztfPs4aa74ra`k9M$M}`8nW=@(<l~=j@D1DF%w9WSf#$}sbr8%%wf6pjvc~4L;!^12 z?r4*vZ)>k%?P=tb?G#;O;+My#7j#y{oJ`nU%raBLHXoQ5HczKk^bs|SafodaFp5Nk z6rV8mmnyH{{@6UUidp6lB5xj4&a57&;nOJVkWVG;6qeI^bMtXZ)vzJ5fLzX1)5_5` zATS~|v#g;Bm7+Nmv3?s`;tw$A=kO^1%I%G{yIZN4#14xhi-IdMy(6h=G_`7^x@%&1 z=F6qq`7iYt4r=@uuYdg<H9xuQ>eb5T7EpT#UOd90Y`p!9>>N}r5HJRr1{A8fq_AuF zo)I*rxnK{>i&|xgS!WBHrhwW@+9aBV*05>@=2e_bw*L#|YObL!KP@&4uC$MA1m$!y zsRzhAmumSo(#jyryffDxJS%RWagD7bmUR=;b#(R*2usN*s;vj$r%;v#-+Y*3GKQs7 z%%d`g6Z3QM6>E1kS8i`2L(-{vHHZWZH4y31r6Bk>avH|EMrLPM7N!>GC-$e%&(GnS zgmA>5fNxg;`<oquS5e-+5QRNj?R^aG9OO-rijFCoZfR1MF|-QqXZa2J^diNqbA&B3 zM6EJ;^kZn1eN+%xG7f3%sy-Xr_fBa0E0mQxyO`u;%g|bR=PW92HBZ+IZUY<7sE8Pt zTyGy<PprJoq7lHQW^e807MzfhS9J+vX~uthGVmZ{W7viR01QwEhNsYVdU0WTX>n$G z`QF}xp26!N_(V+djPkCYscji`<C(SNtygED!R*T7^b)|o03X@s{|^jlVS`|D8oN<b z6q51_gW{7B!Qoc!UfLGcl7`+gHVN|f3H*A&#A24@q86f-nZg$7f@W#L=4lLS0qi=# z%C1?A8bP`a@h80f%geLZ?;BA!tXqP|wvi1(pF&=}D4l>7Ny{{n3pRmijT37RLF6UP zV;Gej%$!|4!=tjxD?0`Uf2)ulXXe1<x12Ab>+;Q&<+;_Bn=32pcW*%rj>=&cHaNqh zEomMZRydGVKT+B~IXJnvxUo9Bx&j}>)Cn~F$H^v*e;v9WNYsD3hcL9&cXSn0Uy94l z3y4j04hS`KL1>y;i|TrbnnsD6MldLQo<6U~DCY(`FJPJs^}JC$xts^LewesbBB`u< zR&o0Yb^qEjIK7Rz<O;4Py=ItGJ4D6vlBQ1sm6WrZL&C!Cy{_>sQ@>&|87CbZM?_Fa zY*ub-U*E6S{paS!jyJ6rrv;jyotXbj`)6FykOSd!tIKn1D+}vui<r8xKEJ-cw6(eO zeDCJ+ijJut0gD3V1sm_Q%Q;Qcc}-JoSLfhU&}?pP6}B+5v;<q5T3np?vMlBJNz~6* z8NQ8(QI7+oqnFw)qvl~|=J-U#*&+Q+oLp3lEO|6M`SnA&wfu=CYzWTlo!5^B;Q5Rb z1Wc2dHG)W_5xj<x44T10TK+$M{CEQ1zp89Kc!~mkaHZ+$Dm<z=XGmD(fXK6{_;9Iv zgR2=^eTYnMrc!iQG;shMADNa3aU2A6`+?_I(_nt%JoYj4rzZAkUtR`D2hL|#SLfGO z0bNW5$T4+!duwHT8!9~CxO;c+)zixQP6aJD08CUrC^)w_uW7pM@^t_B;_}u8hJ28H z*d~Vjso!FFaM^pz$9u0|uWD(_DzA)6&%#`*GO~A6)Hml;ab;EarIB|#Enq}_!H(ZF z>AX=Kk6|pYVJx-07l{mlMLUF08sQp{d&1toq<r}JQ$s)MFVuj@caCj2Mqc7ZyBcxR zBw`8cq|%<{drwO*&v0l3Na$GGdwT~br52;(R;-DdqxJmID=7Kh_b{uV>MQeWYoO{2 zXh{dKSGKlL6*#}Oy?SeV?e=Y`>$h)xu6J%fdbYRs@@YtP4h^3Up`ekDS59i}cwX~V zWB=UD%IeD225bsVK1TMjkbkiA9_KpO_?K5Y2d`E%wWJr9VP+;+d3d8%7wVeO%Q@01 zxRYMAJHxBPqT&OzbL&NNLDh{Qx#&bF<w7Fwbxs!X;Ni{*fq#Cv{qWiTL^23ng^i=- zUCK3(_2d`qRqbK{`0jD&2(?^l4i>Ji-VrewCFO9Uze&X(md)|wspte++dq5twUy7p zzPW)y9WCZiQH0+?)r~uM4%W@Pcj0mCF1p^k`|{n(*_Bl#Ed(K}GP{~rNPb^o>&^0x znGx9Nt<B{x<j?JEK6+$9Um!S${Sm4DU!Tl>zE;)R+gnsy4`$CVCc)7^*v!>k&)Ql@ z)r>^aiCoI@2LU58Arl_`XinV-4xMlgoiJ97pfh5&rzKtfA!g?rQFKD!pPrul)IPL= zx#|icZyj1CY?`7Q*n(Q?C*#!6H$AcbD7SG;&>&dV%pPofbY>2i*c&%*{*%cN6z%iq zKy`5u?PSoje-U;J@T<2_GQLmv*1dc9>&Cr%_jh-my?GH5oeP3bCT8Ih-<aQev#4#R zV`OQ51Gc$^iUk0BjL%1tKY`ch9NC@W`QQzdu?HzY&pM8>_4YG%bWt<5Ad|Eu5VJll zVgXS$lad#wZaAA(2&-l=Jklv4zZ0?khln+kwA1dB=O-BcnPv6%PFcq+<_c7H!<c|! zq_W2)HIEuH2|I_7qFXyJn}=5Qyz)TgZ9Tn$6H^Oo>ITsh41V9N-SGqRKJ`W6!E(%@ z<$a%aw6K4R`u2kd$E+aun|JPL8~YG&sEM0|CsvPw;8%6ejxDaQ-`-x@*noeb!mx52 z?2iz9^ke7XAo#uHg5uzWWJFMymAkj0jh(ob)pw#cKM0weKCcH-&#oQHq8Y@j9>Am? z0FOj+p5F_apMiijwf2O=KeMzCEm!w0HV&?U$m@IOi<qbB1frZgy}ZlSnaz=<`>9n! zY??kA=60UpQK^L`=v)mBi;E4L@voO@f4=9;;sVI~=U8=Zb$<O<GqX=SHq;+(KYVoi z(Gm3)G<)#i;gcuN-n?{hiy`7rrIK=RPi`scoUiJhzdpOZe&^Qe_9j~J*D!*Qj%^ok z1-3_LI8k#iCoZ*j<W^S0IEajjbMy@~bwX&GnlUIle=lHqnpc;QSBFW(msvfKQPrP8 z)t5m9NvZ1pUvl1OdGz>ngHJg8Gey1#{ikHwM>e7g`vi^S)x7J~z3Zr?T%1CSA3T5C zJHDZbNEg$#wevzorDs>SwvWtA|JKaR*Oy}F_T?R&f&47*pPdZav7^oTSE`PI`Oc%q zQ19*Rd{gfqcz);cV^n?g7}fsq<F_B*w{#Aw>Uoh08VYFpg%|c$bS<=ATbx+lSiQ9c z-+LgRiL&<xX677PY;{xxsmBDfVae&JISHs4Xm;`j7N_MsPw^WOuqsktv}aKBr&mGJ zDS6W>deX|fpH>U{Pt~CRqZSfX(0M}RpHnV%&sOy;pp4D%y0Sy6lzpLg0LsaqlW`nf zytDc6d30f~fR>xFgOgWybY5l6!06~#q3gG6H|CBqHlKmNxx6&Ly1IBk-Z8X8OnOk; z_fbcKy?cM}bA5=ZPo8|Pd%KUG?7zE*uDkbkcJDskfzR%}eDU$qj|F8-3_|)O!lv#? z&Bd4J$~)(-Os=5<<SmTgqf$CEOG}s>K9>ETy?Lb6LonOi(_46{E+MxdFfPg2Kg7)0 zP1nMLPs`~)ltWGnnv#oKpHuOnQShXecc+$fqn2?Ym-qM&gZTez5YJ_u{OsAuLaIL@ zUGp@zaiZzkYTNL7TFtPSd9pgPLBqF^@}i@%b<~4rZw6<!HC)pr^=uq`{6M?vIy%SZ z=DxmCdEBLW<6oy}Q1ZUMzO=dd1^8dh%t6}k?K}nz-{<_%(<e{?a(I0F<jKy{C%ey{ z?L2$BgL)nF_+-BghJ5(=U9{}K{_u8bV^dny3BpM=*R;a++49c$wxOlPO&DLczU1&% zVc#&=k7Hwg9PetZaa6i~sH&wEg4xKlOptnO52T^3gOY*iY4yPWs}cFVS}+Zob}Cs{ zN*NbQDF<>X$1{epWLD|K<|)M|_rv+$FD--fC6`grYy0S?v0sU#eSxNLldN4Hsf0~> z<(2zSUnf@#it2kC*g1m8rxlm>4G(`6y8b4Ig9%zODH_xj)l~>24=VgV@VC*xf7W)i zZa;*I)^ng5ML4EDe+CtpKUgvRqu_tC2fg6I9)|oql;?T$c=yG#-RDnDZ9@oow7Arf zA^ClPeSQDJB--a-)E?#SmzOZ9`{Vw!FGB!Y>Zh8zdqC<Fa|`@q6H%_-#ZAY|oL}4F z|Iv*851l9qd3P#lCkmil(w+ic>9j+55Jh~5LaUg%Uw;1dZ^qcmmoH{#rkd)jo9e3J z$<rr$|H_?t^Dd|U1_0kYv{uqKEoPRW;#sTh-^{AyEu`(c_Tbt0@_j>OzLcJgy|)jD ze0gItDnB`Qcs>}GOF84%=4GLQU-~?S0PW#1S$B+|*_U@T?O#01lh3LSFay$0(2&F9 zlNT>=)xGD>cb`4q?*b^<N12~Jlr?(11B1oSKYu8yXrbZLJ}Y2o98glyF%Rx;Xm%ZC z@6l>Mhn}c|TDm?rhieH5F53gx_Y4hHHn(P$R)nWyxP?Smdm{C1?UfBo2y{dK(>R$( zE0k2)l|%+XD(g-z=Se2-MWY&^5Y%9l)MuAI0O8fY2kQ6k-nOwcKhMieO-)5cMoLaW zMnOSNPEJNiNx{X*uBonCUsLht@737+7RK0IzOfmW({;f*3tWw=do`h`rF&HQ?(6qe zoi}C70(7nH+(ROg3yNF&`hG`#a{nFzQ;Q1_B+jm`%zXv?XG^pF;Qm42F?Joa8pACH z;Dca4ef8?e%U4fdyo8GRFREhRefjFwmd`(vzi<1|K7Z%ck8fudS7cP32sqTFETfAq z&t2+U>>6EJ*xXv#+5ol3IGm}kXXYGdIqmp7>P~<S=*6>f*?GvwSO=c~6GvAq6Eh(L z#3`#BR-+Ia4lyzo0diI$3RYnXHepIOVg8FMQp!5As(NwpNq=|R=VxbRq%N|tGAk*_ zy16)}B*rz@*T9R*i}P*G^_i(jNKZFIJsn<dE-p@v!rZJs^Fmbh<%w#DUazeJ_BtK~ z3W!P&c|PqBDoOjUktNW1WKzAHp`DEfDkooB-*|Om99JsxNLMq3o>zdn%(}j|fDT$$ zZ{6CDSWyc9a6eGpx&LGrt?gf<{t^xOt5?rI*Vivze_P>2%)6)_@#4js4<FwC^y$rq zk5FIz_~F(2A78%v@%g)V&)&R!^z_;8i|5bZypB#PV!vQcC+*;t)KuBMP~X2eIJ1WK z_A8hKCZ?MoXX5De%F0c!Mr*6nt1JCC#x7m%%qp)6O-c6%kGAqa>RH<<8JY{Z7Yl3I zQBsiESeRIt8k(CJV5*ssp0$N34;KeJ7Z2?7-xqkG8dRN$iJ`5j?uh9wAAU44&^>+n zbV#7zpR;c}PhO;yU&TbPDOE$lrirK&rB@xfxQ(n?_?^91gEP1FT;p`C>=41>3AqK$ zJ-wJ)!Q*$cSi1uzZoRp(GQYYCt_J1dQ8Q%r75)Jx<&JUhU_#JA=+lG3j*;<i2!Hwd z?W?!HR(J&-;YIYjZ=St;+0oHfU0GILSz1+5T3KFFSpl`QysWsatf>F$z{4lc!0o*M z>HW;&GQXrb1hyK8jQrLaAiraDWqy5qU+r<tn){}+mzI^JWu#?frbA6hO$m#R^78k$ zM<7h??982AO`P4-j7$uCl2uF*ob0SvPl}tfJs~B1LEV*Ke)(Hs|LWCCIyzdu^E|-w zF+bvga6WVPOm24ipRjvFlPkIPW59gZ_?A;-t+-V-M6c4;S*L|eeG+PRU%e}Bn^Lmy zG;=}tM#tw=RP`T7QI7u>oyUkgW@^FW`uaCP>%NBv;2&au2S0<(#{3HWi<gI0{UGg! z>+5&#clP#}7#WC&iNHpFRVgXXoH-*WEB)%jPte8gJbJYA;@Q339n>0gRwZstpZLm= z>fXh+q2-$^Yx8R>5YJ+aJ)WNloqA~e1|uT_2?_DxuRDA8jF70P4borJ%+kcw*VNvN z0|)aSt`4UN$<<7dFJ3~>@%LLNE_qRclY{NIeo9MSjhupfacTa4d-qD4Mk>4KA%KOd z=9;76TBYIF#HH#>A!*mpJH7end1zL%nwc$lQS_YQR@9u~-+luD)Y1=dHOtGWsgi4` zM9s?97RJ^5nx*-o@L*{kp=sX-{^<b^bC_z(Bc|@Zf~jwR{P=ixmxhMg-^c6OlfAW- zrTzN$&1)Iyi^9T!_xJX;@87%k7&XM)f3o-T-J6)?Vk%xOGBFFM*!rrT#kwnt*JoGf z);`Znz_)B_aiMo`fSZf6x2yg3_U5l&x4QiH?HhSnDK1_fYcD@ND_d=ACmAhEHddy+ zok!bS>rl5g)-V-*fvuI<86q-fA=Rmy6F+_Wn@N3N_hkZtvsnH4r=Nbx&rbJpcirB` za+l`k&z^!fSeTjo_`ms+pTByWb!nvi#%9;#t@PSaF^d#cWP_?tBl!hKA-$ljy;nDH z@3}-2=vv!*ghnRiqoP-^sY|zS&#te7Yn|WPTG-wOk`LW^a^dFs8phLNDzsnN+FHJK zd*#mE#mx;+_}e?Xk6*pH|KusuCvSh;d;RV{DxLV*(xAdsaM>R|f~S4|)2AOl{rvGj z{pshQ-+lN9Y{L^!L97)Tym|*MKJ7hw3d|?PMIZk8s>+J|e7yZbgEyDvZ{2?Y-;TZ( z@bKNocYW7JImL|6oYyxGsi^2)fOw~GVr6z^X#s-n+qdU8H>Z}D4*MK<5t!%Y;RbXM zf1QS^G7AU0k-f97m94s&^?6xMA|fJG3IknG|Dr2AAtE9qA*W&xQ0VUM#oQ_MZ&VER zb(onLvA$(%WdUO=0U-g%0@mLpFDuQ?&bGL+@a{(}e{6SX@QSy)tD3Un#S7wa;-JD4 zO?A~Ef1k<m(GMR!VCiL_ejXa=t1K-nFV3$hDX1zdOixXB@rm*aON>m&GWSRjw<%Dv zOXU=jC8uFzWMUB$6;apJ(=c<jaPta?P0X*Zy*4#nR9vX8uEx)I9?q<YsIZ}-L48v_ zP`wWT0N-_`Cp{xAHa-?=Ok7NCTugjITzq1DVq!u;LGHrF1}HqdqNpGbyA4~Ck&*KA z^I6^8eE$B&N9d$F0RL!rx3T_GL0&ex<`m}d*X-(w()G<vV7|4vp{O7iYH>jx<_W6i z=aiQeZfvZ-|M(MhmYpYixR_U0QGz3!nVWs(>XnT<w;w)1W$#hL&F&s_meI+{^CD^l ztO_bl$vMqam-?40TdyjsD66O_d3d_Puby05oZZ|!IL4uw@8{*=!pc0H9a?&NB^^CY zQwuFKOA%EA5+3RA2r2*H@6Y__|D5`tGsNG0t|b3BO+-LJe~w=<yRiBjHuiRRN(*z% zO^jrvCB;OA`FMFG#6@MKE}9t|mK5ebe)JG4Z1Ab(`WiUt-_)|Ayt?XgFq_aOEjeC6 zPDWS|&MlARMR6BLyN3^c&*F4;c2>Y1>s$2nbX?q=3X1a3soPty+{rB=HWDJ=;*!F< z_rLl(mAP4I%*>3iWi=J0-~iu@)Fk+^a2#Nt{k%OD<z-n}m}zOLT^#Jb+2Ff(Zy6Zq zdAYfuH_1p#!pM%UlJHBJndqs{F)?xS)6&vfn;VxGWnZc&%1%!<($}G-rQzb?iOtT> zud3$e=A@@TXKG|nn44K%QjngKpsuP+MMWttAvU|VhB@`1{8m<$q-3OWGE!f@0#)H0 zNl8jjQ&TA_$wPp{!_5g}Tq%0b1O1SqB0{hoAEf824<C1*KYQ`^-Q@Hn6&01Zn24MV zW>+OK6?UGIlETBy6<~&~K?B*Z8z{(0v$8THTpTgF0pE4%)DP*&30NWg{)Zo^X{b3k z*stFh-hKJ<{?5*mH}9tBW+f#r(9lv}kPvZj^rbkjOD=Bd5M5W@w-lXTX=mpEE(J~j z9L$i=;Kf_FFjtmGrY3}h1Ynn-Vx>h(L+!iozb7FjWng3ksTaN|MM*^|BP}I$@d6lN zc*Im$8ENqO#AH<L7xg_tG7lO9eH|@0v%*3GmZm0&aZx!LDZni}#>Yh1SelCn3)0ci z0I^$}>)(8JW@-{KF|oAd1@y5(Rata}V+9)&;6D#I!3X`kJqq)(%8K)n<D*rT6ey`E ze7xL$EAs$0oY2FsKYRKFj*O_Nh=GwF;E|P<e7H?#dow91NkE|Q^Z`?c^l&?M>J(7= z{Mj>HU;60LLq|Is_^gSM!NJ$xym<qPmlzv$_#@hy>i_+Ze=sr9PmPa!^VZb(C>14z zu!sl~GZQN-)16z}-!uU43dhaa#R=3N#r&;X28IT#EKG;L<-z@Xuvg!I|GnwgJBBF^ zw$_A1gx$Se&)>d#`R@J5jS=W>OAE7yTk@ahu`oBiIe!yZ0|y%`QxhZ5Y8GZDa1_u( zZ(}o%!`B9&UqfTyp`)Yq;ghE?KYXmNDhGE#e~zxI?DGnazWz~GA^o%5TH0PYC0z^c zV_WxMynXiMF$@!ME{u!}<~BB9bh|!22Axq`L#?)|3~Rn(cnI}%&h|FoXS6M>1;lX% zdY%YJ0uo9-C7a-ss*gYY1h0UALq`Bq0CGIM4fgi{z~G9T8!jDu9Xbe{dJw(CACV9j z`LF-}ZxDyWZ-JtoK7BelA@=ydV;D<_i9Gz)`q~O;xSYHU!rcWTdaRpWU0#HPWA9+o zKhX2;{X5j?lN1kBKsa`sZ|`bvCL$&>Hq`%he_p=^?Sh|wxG{{C<m99Sec10W1jFAC zKm5SRKo8JjZ2+h89R0c0u8z@}DbRUCBLg;8=EKc)cOJ2`vA8<f<JxEF%Mub|5J*0M z`}W3-5s>J`g}KAc;IwLLsMXb9f(A?28bFs<S5p}o9|6G!4LoArMPn^MURYeXdTYD0 zyAz&+0KR<r)hw^wy=SW0?nJCgVn!k0bILpCMwafw>;68Tz+q8o@$|~_wXsncGLqt> z@qb-wQyrXfC0%_!Q86wY%)7fdoFOLXma~Y?ZF=_s#XO9I=g!g1OpPCT-eEX^{U;$I z8NAYaz%xXFJ^+x1+k;yW6gZD-YbuKJbmwSypB(p|`HvnxfblvXTgnl|JUN+~x=L(( zG!->fazYGNLSR1)jr3dFo59lGzjqh92`;CEwWK7(oIZUjH!J-}=HV1S!eIqJE03FN zM=Q=->27VTgMd4`I(HBDL%3>SsE?I-)CDlS(?}rDprxY)sRvg-JUoPpc|rd3;60*Z zB5CPpmKU+4ww&y3A-X9jE(8aHRdc?Au(1{ntbd?i-{2K+Tws=PwL}JH60#`pY5OHr zkCa@VZ697k%Y~t^u+Y%t<iPL{jNQj!9wJsz896R~0WMB<9Jxxz9|%YoM74Yq%AdaZ zG04vw_Hbiu<@l#wNRS^)>`!0voOqbWU#|A|5EB!nrzS!4be#4pOY<<wfQ;bKc`|Kn zjhx&pv^Zd4{_Wd03=HR>O>O-p*mpM<N7&l)XW#yr-3RyX!tewG9|S3fH*!H-6jbpQ zdQb!A;fG_5=Ac+xn6fZ4m>C=3`Z?f~n3xzWOpUR=3+7f{L9V>Ec5HsmzyK%n1-Y3p zpr7aEZpC(a4h9;~z=Wju#r2het5<L`FTe+w`TBXo&skYoINZS5!49l-VsZilBfYhS z+0}u*-CZny)MjmEsi(6oJR}fKtDTd5Ol%bN+^);5-!vN?z6Qq-j_{?5{Ji28z@Apt zIUuLIymP)~c)hBsikyNxr?8;2zYn~Pt)<1(#EnlseZ=~8-~%JWf{96pd4)yTd3kX$ z?}>2w?ld7Kj~pVRaB}57oEM0j4mXRBiG<^<p{DW)?SBrI#ravFB{n7!gFVEC;6Jf4 z521(T1#uXEtILbALI@u;Gc#J<SpD%B7nmR%hVN~vug1l^uC7*LaRDyoU%h$-0i%Y7 zT18d)+xPEzxHy#*<PJ9j2+tA_(9qF}3JV^7x2dk0f`X#1whEQLe*Fqp1|u>o2%@>u zgyb+%Z*Q$*8!}=~pE+}mf&R?dGfj0^Iu|bw7a#xmg!Hujk?Z;fdbpTpVPS$8kC=o6 z25W3HG^XK*xgebSuU_fy>A=H0AdU3##Kk<s?*fASaS5>y?!hp6_Uzf5%+$lp40LtQ zoH+x;LM&-xZ)0O?iFLPCl@$pH2`Q<mz+L?O^Uq1?l@#0>gn~vEA>~Dv=So_}xwyEk z?X0RB>Up@ipwGft!<s)a)YCb2`V<KXF&t!W0U<UX9$d_OxjFyOsk0;;lCI%}3F$@f zk+$ZBZ%52eKS9s|dk$wL1)Db(2OtFax9@C!(RpeRJ*>8tkcfzemIk6EtUCk#P)0_o zx36pO>E0i}yq<P(Ng;5E)!RT9gQKUWrczo_IyF5Go65_=Vw0pKC775QWn`srUDN~2 zEX<9URu}i4Jb@pMb@0_vS2;~UOhm^=N>16)R)4sGp0*~iAuBHjxO%y}Vm-nvY%Km^ zp)K8AdV0E8;Q<Q-8xfZf=ip$+y0>d9%kVQz%#3Oq>)P9!aM^Vf^TL9@zFv5AUQm!9 zd}Dn>ZF)uu*hs8LSW{gUdW4*UoRW$n!qZ(@MM+Lp3hUP~FfmF>OTuu_-GLefZr^#J zrsF}#sw|)v7*{z`(mD~7QJj{OQ(9f=6X?gr#&T<W3rnIvLyd%tjD>@PiJhHKM2v&? zJRau%@xxgnRte{@B5i$BMn-yEA)&O4Bw+7+C`Sh+$lnLXox1v(H$T3|=sXa0xS5!! zFf$7iYzGGMkJ#=j1x}Km?|gYx+4k+tV=~`Zhl_bq4D-}9*i9hD+o-82msOTFpyZl- z{Kha==2=)-R8*CinHax`yfB52mq%7cs;95>_MO`_IGESd);L2*LdqmUK}mhNr4}n5 z6qJ<8YAWOu6fTbTSPvZ&3$v%cUu|ovuC6v#<^gGl$^jl8ULLF|3pgzBGYt&&N-N8o zo9gf|FMK@aTiaVItI8-*Spw|LgT86$Xc?OshlGbhjG(WheYhEPPd0WoB^4!bJ_ntu zxos#By$rLWTWZ~SRnKB!!$3(*bwq3w2M60NY-u8GO?7fg3T}RWsN%A6JOYBanD=%E z=Fbo?i`oSjT#%FjZ;tg?LAWh1FAJK9H7!t4S_nHF5fuhf9~T{g%iD;F3bC@XC@U*4 zGBRN02O4m3bLQsfEUnD{F*;vb1aU5OWEh1Z?l@S%LlP1aXlSSc^SRmSu+LbtzET$@ z*xA{PjPzNUu`>^w5)lz9uPR?$nnUS4HgBV&rGA!(l$uA8oRYS*FdJ)bnUadi(99H^ zm@5wE8JU?7NUy5K1|1yCqfQeAxr(Z?sHg~5oyRoL)z>L3DQax2#m~IYQOw81$Mjz5 zx^kt9iVC}`0P}R`=&bFm1A+spYHKcDlyI=I!p1xs7dMxosS%vA=K8O4fH4V0<XmdZ zN{GOmp0bYlvi9+`+yZuX*6pngT*pvUQc^}qS<~1|NCJ1R(#`q1(*$RkL@a%?&!6X4 zl$XJk2GP`11IL9`g<6~Hfq8cimm3o!Q4wLdVpu?$lan2E26X=QtCxoxfD7f~;!Mv- zy*WE|<m8{Dm?zcI)haD70jmIF4p?$>aj>z0q_AQhS=m{6dAZHZO~BhEC&od)!0N|< zdH5iBQSdNWnTH4k_BlH*dt&kiF6MQ$)Cq{m&k1T1lT)WAVrd{Fb@3eiIa?QJS~^-6 zM>`zMGrD+sRMgkQ?}G6i7t-3g+EP*%@iDKbT~J)u&~WJpo%cu5(&1rVAU-j6^v2N2 z+A<A}6e2`_^bGXgzTU-U#q(<`pqX~omRNP3kDt%R!H$HKq`l=^UtL|-P0Xf3cEQFW zroQCzTv6+klCtXh>LOO=sc2{v)YJ_ut*tze60)**nE(C^;TZ;DQ}1+c9$qySMO@4q z80f-J!0K&aNP-=9aJ22~?~Vuy!Noiv&CShaZDWClc>pXqHEC*k?3m2s@iy8znxNtH z%X5v*^<F++Mka<jdfK3JpR1m>zM-C@i(_R?WlB;!BLh9wT>|D|=y7ncVa3HfbP7(6 z)bx~*k*m1QzK)g}AqfSYpbjA^RYpo27NkXlz|%T;xPvfaJ;ERtz`V1EdqrI>Fwc&Q zd2*0(U5JTrF%L%!4nlrWK|>>o`Q_iwJU=i$IW_k1@gqFUGceKzg$A~?H9vj*T0vg+ zJP)^*n=4oagp&hQHy1}AFLydR8ZcuZ^{|~6&#_zzl$BFIOCv-qWNHysRnRtD(l&AX z!49^dNsXG0R$W)m$`#=p7$&KpfSY+Iu=8gagp52=xp;WhaWD_w2F7;=9L%?(nYXgG zXzy$b4-3Y_JP!}IoxL?a<~cbMlM}`#M~|HFb_Da}WI8&U<&~wg3)4Nlok=MPzWzSm zNDnVe_3?y9q_0<8d@S_SjI<;M9L!5z6z2x!9c@`~Gta@vo{$uO^=cp3S3Jzq@Ts3A zrpQc<#~Pvn^DbVVxS2<J8)wk@x;hl|xV#NHnVx~34Epjqt_B#LZ)`kD=l#5oWFDkx zcXtPM^%>h$2@LbWVZrU4?YqyP2L<}U$z^0>fcDhXQ~(JWxw8ZW-+libm`tF)>v9X$ z6D=+(NYBVb$FF&YTU*{fF}Y@}^zzO5t;dI$SEZq&)if}$arZ!kL`W$r;=T^+beez! zbY9;rg^L>}^We3PWZu%stfj3fEF=&g^Ss=knGg@+VIKGy8y`L<^LYF`Fkewse*e*f z$9p@oi#LZyt_}?i3|$`>8tO+qULUwVJP2R(Xm>X+I|JfbTsn_o9+%ET0}c*$2<Z9; zdT8-5Pf9__t$c=vJTol;EAt@Z2&DHp6!W&Y8hmD62e-FDGcPMEeYCeZ2J_GWzqgs1 z8Q<C2!Ihy!GtYow{?3DY_jexGHZ^#{fNN^3peWDE#zKl->0@tW+1u6r5%;~aQK=7B zCRcCIi^?{HO!Ay+-U(GBISmt6CN>V50nm9GIyy}QLmPKbL};Xpk`gC78?I|oX9!78 z(+lajCgEWorSrx|F>h(s+}0HOM=(D=G5pW!JcjwovhJ?SotInNTN)wgK-W5`E$GLt zjy8}fXkT2A4a~p7<8AQjJUa(lbZpd>{%-utlXEJZCM3hnJd>Lb@>tA+pJ!$H9XgLr z?2sLw`6K=O<n$OYk1LA#n)&;8AKvfk>-G!q1qO&oh~apG$$7H(>t*-Hj~*VrCb%EV zAdsGy8ZQ}H`4g}z^6L197xrh=j<;P~e)r>t1I*KE85r8Sd%B^RSH{mgcpD7!c>KKK zQ95sNfO)LBN(}S%4%W<gy-h+=!tt4Js>63ps;s;O`WM94Fql$NQ68+6loTW+#F$m# z80PWoJPTf(KZ<#EV4jp+?lcj0=EaU;UhHe;@%Z^8yv-M#Z~TLp2hqUwtG{CY;r*BI z-e+Z|!7*oLWiBoJ_HOX~y<P0=tc|r*c=~%uRpr1?4=Xz_5xo?RtTQtGa$4<pMd!@Z zmv6s#8#*mRLt76o_pm4w^SI{QxH_ICB0I|{s^^-Fhk5ejF~9F^@GuWLf25xun;1Eo z`DGOISf>Ph$<L$mHb8rTulL^W&dTz_e%;(yo0_-*Cp|wW<4ESYj$-~eI!{DOLBb|| z6!SjkaQJ!gV=_;HW?uG>Vg5*OgJymgub=<S{Da-c+qbqsYE+f60M^#r021`WsUNUT zI@<5=#g!FJ?QQ4Jb6ME<5i!Y8N!fd(wx`tGsOq|jg?Z&8nHSMV;MIAu<LZ1^NYFp8 z^T%NxGv5X`^Qb|*q^uZMRsnVZIFAeue){4$#?Qa{Czy|kiw5Rt@Om3y9<QHga{sLJ zc$oj3w_*GJG3*!SkH}RD9jWsPiE&eCZ-dv*{~E*QW~ak99-gHA;Ql>$os66uhGVR& z`1kX)#g&zf?X3{)K7M#7F1ehTMS)b@(k8M7u<xF{rJ|%rMMJG+Xk>Sk&U@@L@0N@| zZ}WwD+vDgwp1chn=EZ)C&TpVIZ?#or_?Ry(!o@u1I;@qIS^r>fMk>y{%^#xkx@d26 zB=c^*$aD1Pj-&IxJBFqBL%q#0m`CY+TSwdTw{M*t?ci7)Zf0p_LPSg?E+GyGLBNG; z5d6%1TPu2<6l%^=aYY*yx5nA?x_Zd`^tvg}kW>L7VO4D{yW{G-sF7zHp8WiQT&3+% zI)9uP_7CtjFJ3&SrlC&8Hh=8Eby%G9ZSLIKf}s}LRMu3Yu1Wn~=7oj$&!6W(Gv5U| zk9B`A%oG0sI)BtPDe~icn_9f{Dh2V+t31d&uGypqyiFUJd2lsmMuu2^nwSXmbUArh zQXI^S@9TWkKJ%Z~25#SZAS7u{z@et?otIiS6<geEX6J0>;)0*~&pOW}YUZ7B1oQav zHg%twZ*2?@#p!Ltj>_Ahb)Nm0%)fvC9=<3v$RAsHyF`kaZ-dr(tZSF%=Vl-<*VEH3 zt1L@Qh&|HJ<H}V6A^iO3qcPqF?=>l6Rw*>|@&6R_;B9p9c$;Izu=&LW7@hwonCFjA zI4<*T9j(B;lA^qwmBry^JlveDY^)ku8W4iDVw)NM02M*%zN??Fs%&U&;o<&jJ?zfj zQ)wmpvs{|;4oUG9qw%GK-VsTzL7_*+umsFvR(@Gr+&pT(J6B0UeC)i<5i#sB{X7)l z&>Y)@^T&@KLQJHtu9{bn3s3T}tt)A1s3j&QHZe7<s46ea&!(cL+{CuB7?Zbgbh2Y% zW_t1bIo4AKgv7?j3=a0-^EPPaamBD-yv?y<Sly%j{886oiwlmNd*J7fgrBp#cti~Q zc=rfz6BPPc=am)ZZ7j_XH-mVbn}^%P+!S`c1wZqZ6&+W4&-3!k&rTm~aPPrm4iN)F zHf33dq?oen2^AxLiMdj^ufw94Cnh6gk+2WW=NAx?k-G3Zn9t5gAtoWVv9oS!Jvu*+ zX8sSjCiM$?ewU22<l)yp{`3><gMy-5W_AXs$KhEVP*M_Ofq5HStIC>+hPrAHz>%Rr ztkY?Dc(`C}0z2{Jdn}7tn3)-+q$L4>YuEa4>HM*oM;<e8gNJ#2BLg`(8QjTCGToz? z7vMiHA_6h27x;M?G4U`zF$FsR7|(qV7@0ulL)yFAF*<L9P3Hv#&a-o{+c`PFu#0u= zz<vx{R8ihHd|g0*Z*uJ0WyLq=R#;JgvROyYAt|c(T3q>cIaPI57DhZePsDn`C8GF( zlq?^%neD5~i$D%YA~5e{XLGoL9}=~0(b3tV`N&))Du(@I{QNJddwuAF{9=9Uf`k|x zLl_6?>FHj-#PVHw+M3|!UEN)(FIC-~ouZ(mNK3}zI+WyP>FLku=xD;G4!=7&J_?;h zOG_guIbnEsfEMrE15$Qb^fjsY-@!a)zKsiBZv)31@RO656A<9%=HfWqU}ky()hP^g z^NR}V>ub<Uys<DZEXdEr#R;eT933rIJ9l8d&5cKo9^%SX8tUm%QBxs(y&Ibwp1yvg zs32!!X?D1ojPylXI$A?x1Bhe?uXN+lc@*>4hXnYsFb~Dc!{Zbg%UNz6QM1U%lItE3 z1<J~*c&<qiu}XNxl<65+(9zKxUI_(O0r-)Xm8PVk^guWt-Utj^5WXQi5UuUaM>2o( zd>hdD<1t@eQb<fpbfu@`@LS7^^U{(Rz<6A$EIs`0o7b;_WNvOQFCWi_#=2K;-$F|{ z87wOrit@7HoI>w#vd7Zdtu4$bDJiXNtkTj`U~IzUZ3sxn$vG8Jem)gH^XKr)w{gKc z-)6rfQ&3P~Jxv3B-N3xDsZm~0ep_1;{I2$9EF++on=5RA8oCd*B_0^&(d(pQ;3vQP z@jVkGgCEl4aI>0<Vt|>Ekpb9<i;1!@H^sUHFFz*(bHz|dfbTGS`v(WZ{J_XC=seaD zxxKjt6oZ^e^emV51@oxTf~$$;SGVr(VViH`c$$!ugk91%p*%D$8;1LaOIU7Lt){9B z*fTQHA6{4i`w0v~9OsGj=<03{4-LleZS0QJ`C~Kx?!yQ0Y{2#}zp=o<CnO})(o**i z@agJm|M=<WsEAMk0)m0QuEUKVJ-olNym0t*4D@z^Gzkd_IyyV%7v|2)PT}!3XNk$F zcvT5VD6r3~M9Db57*^~Uxypt4*@)1fr%#_?T~dKc?&W90aAsv|l~<TQHaP~T$;Z<T zYs>Ae4Xkt259&NHe`EamPe1?6&B<YjV;S#@=g)3!Z!yrHv$3`e4-4kQu|5t&b!}xC z*Db}w59s{$sR>b0;q3HetSz5E-MPKJ$}6Nu$gC)A8tIqW6<%;<V(HFd=6^U#M8PE; zoKjKRKFz|)E-s2?DJ7h7*sHzW9c)9G4k;Nb0pjE5eQl^8;5pLIA9YPCetdG|(=nL; z`R8BCOA7%!XM5Y@1MlKsclPXAad9ydGn1&8h|$q&A3lD3^86{psu22Nm72rl!TozM z^n)b^d+hGzR$E`QzOhD&<Gu$d=84E?__c`1Y0{Hp@#Ssk@#y>!-ezfGmW+%H&z;6G z7l|BQoeGKzSGLx{{^7aRXHRFlp}r2*H3b-*kByHWy)gu*z}(arYxuBVpohXfXJ(~? zL!Q9480{DI-4x*<WTqzJ@^#Lh?u8ZQqc>-ajSWSF1n_(vHy1k@10Sfput~UoW>-*l z_cz$1=Fgudq&Y7aky%~RHWd&SPfAL1{99G~czc{aeOgpZ#N5&>GAev_VJ0>z4A(lT z<1>HE7#7;xfBewg)aZv(KPbw}K70D)$d8yB8=g9S3PLbYO9;C23UXK1mr-W;-Mhxd zS_%qsE>8Aw9OvZ#U32_=JYZH;Rh3N4j3Xk$2Cnu#eDnY>^Q5$bI+V1GSl5h*i-~-m zdq95<>q<0qgD<&C95E~?Gjx9v65^6Wvsq9UB0HUsh)`1UqKlhrS!LPk=2}%{8Jz1Z zY?`^Xxen)ufZ*)CySER&i*Ry)kO9Ly*uv%2Wz@=I6{Yv@vEQ%)dW5vJWM*CtBNHP4 zjCEy%j+Vy%_h0|z?&5^&w_{@1p^34A;vzyq0wo1Gto;!R7bhDlCnpt;(itAzi#G8- z=^Y`ty-VA_E*tmuaQUB8XPJdnqH^l;o5nf@Crpj?Pn|k-9OVq~^#<SpV>k=;j&`~E zIrkquh)1o5rNMenC-}Xe=N>RKzIycvtD1dgeq!{PH;jGy{Aur%E>klj7;C^71o|QS zd%M2*V>+*1y}W+4KRCb_4nCZB=%Jcg>c%F9fx!V_#`e&SLF!jGR->ZBLApsuNwhT7 z0Fv&Gw#E6`t@YLQwG{~D`?@<aQj=t5P`C5};FVPr;fFgToQg|}*EUw4J%2_?Ng;Jn z0=%?~qrIDpBUDd>b4Y*>KOZ+C2|10R4)r-EC3(pJA5SkgXK!~GPXttFKcpv^LnaoK z&VvC4TZ{BSpq5lPJ7U&WAU)lg80nc<m|c#_+rSP$%!OL>sH)u4*|xQ@zOlXv2gKaO z2m&8w7A7riO|YO1%?)edUhdsBfyj}F(9YTt+Q7%5J#^<lUnCf80zyJi04IA}gfol` z4*L~$2DQ*qNFXLIdT8V-=I@WhL`Q)2g)XlkCu3=50zVwn7<S&v-IbDpTv$XXIzBcy zGzfj;0y_A~qr=yrXTk0@*Vjm15C^My=FAxhaS<Occg(NG908=K8~V4SVwu8<vcb_| z2xI^M_TB<4uB=-Ve(uaabMNi%>y9D`5Qsq%B1F*O?i5Ag6i`3`g%(u>g%s{?!QI_G zxVr@jnuNGpM>^?r&#Y62By@jiyWQXXbLX;IPd(=x_SwhY_3n4=I<=Q2(E^?kw4<V= zV2(EiPhs9|2dc9J><*{Fcas0Ch#1=;p*@0zGFIV~nEK!>P#}R$cGlh=uCmhNpl!~{ z;=<C(Gpa@gC+7S6J24oIef#&F6*xn4aVjm$9UmQnyMf-5nW>4YvLZMH@FB1Y=sK(p z2Ero00N>`8`gcG45EUK_`~bHR`0C@0LKfgTg02(1aNgd*2KqQ_OTyOwGAp=11%b1F zVB7{Bmb|*WP*_q3(Wtn@MY!{D<`578D$bof0|FA{3iu$fAN2dD&zw?IQ$YZ4W&%pA zsHAXhedYU)V&?JFCmmgFpmwyhG>#lS0vrg2<mf@Cw1p+u!pQ+t3<uJ}XaM$XNY)97 zv4g{X-@bbF{{1@;4<L|^A3t{F$RW-H2RJwn>|tf!v3oBI+kOrnzC$8d-ZK{=dV(9i zV>io=UAy<}-NVAl%ErzP4l5`m1dmloS@HDgQ%9JyOaMGS8LWzkir7=gzy7`rpj1ra zqS!?-s1KnTIukG45Kb;GLBR`Z>Z-<gqmb~Bj_wZVA8*~e+uGi0V5ASX3#r4rQuG=j zx|fre<>TXpNS0|ln+Cz=K^`Q$RZ$8K3+@~2efAQ;eD6TF2g5^JMv4!KSfK{bR)ZK8 zt}8GkD8Ho8Kgb^<Ah=#QB6?%N8R2Zj#4nyYeG)1kK>YSg*X}|_Fh4Q~uusd)Avrq= ziwYk)av1*0b>IM8AUrK-6K>|d{riufI(=41=rHfG{U>Dip4XGK40F5GW@O{RzMpG1 z3p*=22MfnRaTTMm^s4l#;nf>=Z`@b~2mw`{JtqK1+|RrT=p@FQ_k?3VH#aw&p|Y|f z;K#_sz|EajTw1iUhRg)j)KCLFaOTWuKE9(6(|t0yQOLC`$$NOXiiwJxK7A5y=|5JG zmzRx<4*&g_M`--jw-ZyNc?CIM-kxUWrfO=+z(QvPPQw`<KXDu&Bq(%2R!&A!OC8P@ z00S)H>*rluS~xp5{boyl1V_AnW4*ejA}AyfWRI?%j*6->JV!|>3Do2j<bd0u5u7g8 z&{!WBJ~l48v+MGsM-S2N`uVpnYa424Zd4Pzp`@JBSy9=2htGV&evpOx6#G%3!@?Lo z8OvP<g?4cA!^RzKT%0^Shj@8UoIZV4P*6@u$&Tz4o17F99c4l=Q&3ivk(ZNI*fco> zc};DMy^C{E)vvvK0GJn-l+e-D!RhHLC@P3v6crH@k(81Ihyvh&<Xvbkc|`@&^Rviw z+)tlQ%uFPuCX*cO)YVlbBqhL?B_$=))KxIrnpiBxz))Y`P)`T?JZ-J5rj6Ayw=f6H zGkN~StGC}RE-&Qd=eT>hgT&PZC5OdmVKg<gG!WBN*U(bOo8d#lLuwoAbBhW*d>Dp! zV`*7wK(eriums##MFj<AB~=YIbxjQ=6=k^CPZutW+=R_*&n_zL9T^&#o~)>^3yY2> zI@oFJ;*`}?rR8Kr#4nx~7CL?I9NbJketv#|v%=D{BGNL4&xo?~E3*pfE7`?*rgh_p zejI$lYzNQo;XbLXV-b;2l~q0b{N<}RZ(oD70HF)_2gE8|KDfM;v?TJ}B_#j@a06i@ zJa<5ag%!cWi=JPYH#<M`>h&u$u3Wpmns_PR+SVE>z#W5{pA7I85gA@pQwd}aB)PQ$ zx?pKVh>D4rm>B-ykIW1Y&|SN}*4R`R8yoG(aCdOBC)!xSb1^f=1Eqt(f`z3y2rw^i zkF<;w;PaceHvVzuC{Q@J@80U_Zcj-~@(b{x&|K^sNYE6VHq!`})|MnY<RvEP4$3M@ zCMHKgTzwL#Xiov>@9F7Eu(nmzF*`1zyqiaWgI|j0g4%Hzv(swiy}S}TdCu-UaB3&l ziKG0dPMyCXA|WZOtfFgd?C$5Ak((2rk__>hsU-nVFgGEX<3AeFj^rC0Skv6B|Eu#J zC@3kAoE+Rd=+0CZf|Vtp%fQ&s$ixWF+{4Qww=i#Xa_r{)`)H~Kp!)9KuH>|2Pj63> z1BqZsFe8|PS(B`(ZWJFsZ-$Q-EI5)KQFHpZ0Q8cXlQ}sv@%-Cw-+cG}$@6EEvr|>o z6;Ux!&=i?Qc7UGS(H^|X)`4W}VCUxHmY$tCFf!CXGE`7n44hALax^8F>lqs0^mX-& z49u+vcFv$t$d)7<Qwze@3&#`8&}%QLuIe8f8M`tI$ggg!k4sLX`!H;s9E{9NHLzGk zb#*BPMNui~3t|@q#U;fRm4#*GPl&5&8d&oRVUe$^9OCFnZRT!CM+DV5PfJ@k`^DzB z<kt?Pc{5+WdOg2*rL?>_GAi7|%N?%NhGb)9Z3zz@Kn?~p=;&+@_(@Gm21&ereeKgM z?!cBH7(h1o2lxUuC^Rx+t}gINrMdX|`{Wnqws*AT732m4`hBWEMXS-sAo&W53zk<F z|7g+^IPRk-4*`UY&Gk8X*$Ihp;Sr$_UBXd<g8X4QF)<GMg4VXC#ih9yFJJsp^Kjf3 z-@b%8UEP<fs>^e7vjG2>lH#F3aAa79Mr!M-28Md?+`IF$x$9vSZbf%*XG?2+R$hU< zXRHdIzF!c_Cu4C&oqQJKE^HXcC5YQ|Tz2;<rQIhL&PpiAscB%1@dSGZ3d1`gJ)^dz zsl2W>B`Y&JAt5RuJ~AG$=!E#F_;~Pz+>+v+(Gf#KeNgPWSPV`Z100MjU@`hQEQEEe zY^*@7K7sz=J2?gUG4XLhVZi~xfuRv$mr|3eYOBYmCpYfi1(M&)3Haj0&AYcpCPr%N zYtpkaV&h|?q9fwsV_~_nx}x=ROG{f*b!}Bid1+}yDSUo1=+1!TZ`|H^@p?1!$fIY^ z)^4m1jSMukHkDUb7M2zv$)yVO!OL<A@`@`;K~OEMt}NfUF+MZZ(Au1llNAvY<s0bF z@b?V}4US2SPtVQCEXYqxOO3gdfLwk&a@BC{VCc2CcC`;rPoi^@4~&mBcea;QRi<QT z1w=-=crk2TD5kb{dS(PoeM1$jt}+%UrmQKV?Hm}J5fYQeb6#hUC>}#ereA6`BqvDY z$ic~Fsg;BEozq(x@7A4LpfOrn8$h>YWThn~CnjEsgGYZUIWZ$E9X4LR+<bLu;pwv{ zKhq)4UOb<forM3^)mPUtt&VB1(AL&8IX$+1Z57nW<@T1kkN^5)ZI_#-X2u`?juDtI zkJ!Ka?$MJ+;B-?n6Cks@dpiKS@Hsft3(dlDq2>3#{L{;=m;d6$i@W#kKvUrUpn_&* zCuXip&R?AaRNsI2;MMEbU-TL{0l569&mON|zY(6*LSSU_C_3<K(8Wx`MNA?t65@|3 zI_?$F+AU(ZTgdQ&s<95<)WVrU^A8HUlw43%(>*i@B-Yl`)zoqMXV%==(b3;GHa8m+ z7b7h#b?Wq~!$*%G3Y#el(NEBFdIoyI5#e<$O@rej9lc#G?X8V%Ep45bfnevB76I^R zW=u4(4hs9b4<7(rfvAT@2l|KlhDQfy=4Mu}uiaw)pzr$48*4YNF>U?h=Q?b>^WZ)N zuv-)HfVRK?=+TYaH<#8|7gny$EiKF}&d)4dnVFxxa&=*8eGSd>vv%|5-0~8fUR!5- zO;ba4LtR~SBjCAfpdT*(GIH(h$R)S8eZ2N|=C$_@PEKr1J2Emo1;}sg=*TH6k4j1N zjf|oC1=_lLm^nD>TUu)wn}}){Y1l>AwvUa>tx8}i?7Z@va<;nkOsA+?N@Qhz!+3Vx z#K7dr&lvB`cW(hCAOnDS=jP$LPlADrSzfuiaqH&OXHUQX(rlR^OJ2Qs4ZiT;(E~7$ zyQn>Q{P6M9#}MuTFyIqfW>!G|`{2>z2M-=Uf{pO6Kbhs{haY|f(1PFrxdmz;43<I3 z{0EwarapZ50Ij`)81?;oXyNzwKLt&b)YyjO+Mc|co)T6`qUP}z2?^(Q{aMcI?i9uE z6frxlZm&zQAky63LnEWpvx;i#dPYY9)uXdBqqEb<ll{j9pt#BL{PHqeCr1s8mb{XJ zf|8=Vk|O9~MW!SMQ4}5%1rS<TTV1+-otaw-$(RXv14;8K{qbh*<4@@VU%Y(${P}|? zNTz$(4y!L-zy3*Xskd)`C7IfmG@3}}!AHz=vCO=L%q+=|AKiL@<OfGnxxtyN-MR&* zG{3qswKzXHKljNt^T)xJezGew%xj<daz4S?>9+p<;<|>Uyu#3=RFBX|CvSf%Dw0th zV~iKZSlI@ZU0Hw7J+a2GK|LUfXE~+8t>mQdo@pOe6_MXp+&Z&<^ZqY=^#jkE`P|>Z zbBBdL^yvS~md^22ujI?;4FY&nTrXN)5;2bxF^d&3jovR}yhqSrm#{ILDpkvbU_qu* z142SCrDT;=wDj~Mx$~Hrbhai`LNlhJ88`aJ#|p~IT)jNaiI&D@rpD%*29d9sm4%BN zEjc5-bD($e+WOkZw6GgLP5k{u*duAweund(CR2O$hM7a{&D$>m9(G52jh|#meu^9j z&5sNxfC3)D{LPyy8ykpAURwv`Us+k6UR*>nBQw)wZe~3Hxcud$vCRJtOina(bmUjp zB;@1;#V65&!|gqNEXXtiB1v7}NLbe;ET`+%lW((Y2D#)&^42M*6dl-3DD$XMjJ<Mg zLn_iLhu*$@w++<4u&myC6p+y+Z5hd_-~wnDHjNg-N5R5j84}A`EWrNXl<6{hR(LxH z2Zo<-bR3XXWpgX^;G2(o?#kAz_n&NNYNE0Ia%y%~RD4`mbW~VOG*~!dF|Y-;*EThb zOieHoB41zqY3jhugvd<TKm2L-z>mn^k_Y+qn=huz{Foy4^(T5~rVHO}<k9ozDB77M z4=%QuMhfiOojWMtS8sj>{Ot15^x^^v_+LtZ`?IW|pOQ><3=YB_1Fncl%Y++9^$)hC zdzm?sbuEZ;SOXCg|KjG!N8i5niLcxzZLLNw!qBS@iJ7vVQa`E7F!ao&MpfT^^n4qr ze_>g=evcGVsqK==q3CkXFjNR1dBHdwOhDI<?VJw4{{K_<IH658u(Ses;}#Sik&>QU zS>4u$i1uH~l8LT@Um)Mk^!K*+b+`BTAfJ8Ru-ZL1&^I<ZHajzYb#Z~22b`HDc57PX zdw@Mq8URH!5BM*oynp)3dB8v6oGFSv?*)<uS_8Y!q;~}H4<CS+-MD-2v#Qy`@XS2m z%!J7E^Pp;eGtv1^?*o(MJ3B!TCg&H0BqoEfab);gQQVAe9dz(!=QIeK&Z&b}ZrpwL zTALKXDQ$yst<-j}7BmRvl`vyHt$70HW$2mX5?OWY!P9N5{#E6}4?h}FQ*~$=2UMsh zus%X25$BCU&lv=tHwrr_MPxmt@vmwOc8rHMo<MM>IQs+y#3rT`mOz{j^7V@Vn7q0; zyRtHW?HU*?pa~^E3IGHKv&+k?_wKLWxp!q{Wo~5|!ThyrfU&iI$OeA%!Gk-9&Up;r zM6+jZl{){3|9`ZfRNEBrPw#)GcK|%1;Sou5@BSxA0}hA)ejNckk^y{aX$nYwVSaS( z%J0DWPnNrehbvoJbIL2@vvd7p6I=sB?K~LfP85Bjt)`*zQB5*AqIzND@$}mLvs$zh zYBZczt+HdDfS!L~W(S|J{@(Mt5>^og9ywl#_3z%JVd=JB{|kz1Sg9;Anh)d2t>P+Z z9DddyNI*XbK2Kx4Ii+p?P0R0Z7+)m=a}$z1iO%p0i;B(6DXgjM9UuE*g8PQ%hNPrK z{{TO*<dmegu8!IDwW*~gv;r)YmX~O1s#_5)Cl?p4tgeD`_?RF0v&h-1bC|H-RQL#7 zpFR6b`_I3G{}(T}qJ2}wF#Y8dy`v36;DG9xtqhM!8aSVYb%f!;L#I$xbLF>F-f!Iq zpF`7AO+7sjy`~kFM5bmi!lTJPfz~v4yuFhy!AefY_?WIwa#`QTqnBl^<6JV<Ql`;3 zMx7WwM%E&{dHlL(L^1nGwSBTSYEFr|^vtBv-fgV@Ri&+O=A54IS%UypIcGq-fPUa< z-GEa#KQMk>|9{5@{0lZj5a(cEVQJ|?qxc6yADv!Q+SJ|sGgULOFh4jk&d+~h&z?QI znT3Uwg^P<*T3WKXtrblmJ9TwYK;X>R-+aAe#||4?Yask9D@#D9DBCkd&c@F&gMZXH zOxQnubpOd?=FrV1=ig@<>H^PSz5edQ58r+N;Q^5O$A3NiI6lKvGLP?lL_13FKMPtn zH9V8ynKB+xHB$@o=vWOnESe1ZcWRsN;o+)QAo;4q+<drcNc8IEYvDpOw6VvUnu}<e zDv{z_hL#?_eCrsHyI;y$!=+f8Ud4~~wGAq599rwVa?8{{jO~Ofw>nMTDbawQS=T+i z4cEV<y#Mfnym`o3gTUP~4!l_3GkO6h0qr_IC$zo!F<yIh0{%rWV!yr*#>C9j-pSt6 z$2%e>J}bAZp{aj-d@I$Xm`9Vbvasw0%<tT}6PDqVkB_&Zxe?GZGBbVX@F4*F*I$3F zjm50qdjPls{jjjQ3P?i;_WJdoh#aP&gpVK}jcq@U>Y%pC{Lh~+t}aJMMTlP%wXwE( z{rwMUr?83k&)oj-AL)Jl)~(Ny1~mNo8dJtE%`9yuyGO^VDyu8Z%uQuvrQ3VD;9`G; z?V<AU^kj2)H%ju+=~=$f@f5!xG<wyuve7a$=GSy^2(6r0zPobw*+o6iBT8hPXN{^; z!C5{3gwo-Lp|!Hk#ewNHK`A^$RmXL`waCeMubjEnTib~JOA0MG|EM;DP2PE*n)@l7 zFF)4%xHjXMmM5R4JGZv?SNLca^H_N!Ykg$abT?W+NJwHzc3DMZXV=Kg%%`p##r#p; zqkwi`dMPPMb#>MK`}YIw1MEdbg@Gpl_B0wr;LIsWN%6YIIsiP_{QCOhwQC^nLDg?O zd<36hfItA`J;3wJS9hK~MNtY0p9xyTfWV)>ef#qL`zNnnJ$mut;j`yw1y1kWwexR( z|2tM&>&GAe7XThr><?eOeE!}0C$HZ?Xz&T@NNbNC-F)y6CHcEgpIy6i7g1Tz*MJw? zy}NSzHb^ff;Ai2qi%W|*xj4Vs@eKzDN6*l}*1YGi0IPG?u5I1eH8fb&(gsRCAtx{N zQVK+`4qkqi6jvjNUd=3IvBpPreA251Z$0@ow|0<2nkZossmG`jHHp@ANT}{xuI^nb zZ=ZYj{zF4cAJ-YpU84ANMgbbmmz=}OKFxl)UH(kz9+^9&PUljhe52sPr|o@A%Y#?b z<ERGxh`Q@x6)K-;gn)CN3@KF8$i&3X(cZ%wR6PV`g*El<1N{i*=TOYg4UCT=nBTK| z*REam_I8inzPGa{0oCu_i_m^(SO|b;aAK@`u)l9)7%BsQwso{Mx3#o)cP!n!)!x$? z5fKJj0)PkLzkc^#R#rOf)!fnq$lth+4z1h+pgwu??#k7xIoX+Uu~DsUEw>)r?;jd~ z9t7!JcJ0*EP`h^f=EBM{(EGDD-*xtML$hhA$?Lc8+<*QO#r*8Td{2LGPk-Oqty|NJ z3viUOvXYI5k0-9oW@cvu1o*+0rq(8i#E}HwQxjofp=jsD!^2%xUfR}qd2nhHjoGJ{ zmr_!by?q$z8EJ5VQ%hF|CnxGVItps)QVNSBQqz5-;y@GHxOpNZZ)uH?ygE^VlsK|* z^X~IE+N20BX&WrPN|Rc848!nCyWBXuUfMoCFmnx!ScAed+4&S$q^v|tgH;?75{tUF z5&M@E6|+DVdFQVbTsX15yc%vt)ZGrL(hjLocodyZ;Jj6RD-1$vFy<s2!ODu_2HkXM zQfhi}SyfBx0CTJ!)iwZQUM9~2=B;gr%XjX!w6}2`-~y_LEvBaU^@opzgf6hNv$3+W z>g(%)JU(;g6dN1s;UkBGf&<|bmM?d<-+uO@sJQUl*)x0gB0KKe$8KdsK;a7f3=3vv zc)%XG(yraR4jnp}o|b$NNe;MoFWBC_?EBcEeG?NSkoS7JI{WtTgKE(AoI1r{R9tZH z*>h;Y!`+RIjSc=~Z%;aRP5`u)j*fOnND#Ef!omVqz)bMBU*AA)?fwG=1vwUGf%aKh zS@tvEF$UL|zka=cd{j<OW-s$R;s4^|Vx7G`J!4~_{j$m`Km-QGC!s@>mM*TwBnPaS zg(A+7Ro#sqTXW;#%l5J5g9`S7IA1;QIysy4i}>Jz)+@FBD`g!EH|~9YxkXLenT=nK zQ=KYh8KLcx+&{Iljo3e560#b0N!fj^K;C5#a#)pmNQJ_qOy*H?KA_-mNZHvwwkf`P z#yv7q(-5fM$<Bk}9T^*!nUh;l4Z$orZV57AE6)Sw7dLJI2cABC5+DcktE#2~n1@x6 zIw0vaHPx>_dU}Sr3Q#}5eE=2!PW{6}b@jFADpMxyL7j-=;tW^?!nd}zg4WQDUw`ur zL=dH=MF4fEh@cV>yJt5j99>-<Q&W>2J2z{>Mre?mhr6Sv>+$QiA)&#r5gI>mfD0<X z`4}1*n46h?^|!wvmC>>X5osxD$x>1h-+c4UUS{KPAoS?z>8Zf#!oq^E@8)^#LE4d! z6mJ_C%CD+P&Myj2PV<S3r3QqMP$h4PDEX7>)_i(FWvvrWUcd8<DPxzgRCme;F)wHs zYVMt1Ke$@awb(qoy7euTD+{Yf1aZ5C4Ec1t6l`P68QBk>zSsuspDeFlzdof#`AXW3 z%RGjMN$)%gjtAuJx#URuq^*r;DG_B8gs5h9GkaYEGJpfm5g05fuduYfv1@o3sQ$B_ z|A={@!MXKy5n-WS%mNyI<=%Y(fzzl9U@)41d2|)vghCr=c64U?;zcnO@XAUG-rim; ztSqoQ1j3-eOG=6Wc&H-?oH>or(jZz{KoA-p76Mcco1rynDT%DibOytdSrZ(A#m2@; zR74oT_RgL9db-cweFzN=hE?Wq_n?Ty<4r)cD=5hOgRlt=J}Ynrag$v;;k0WTYAr1Z z!0@mI7~b839vl(`*GnSVpyw$sFAG`-{sk4lDf~l2vMZ`$GqVF?6Y0U>jtoEO9F1)3 zf#el%#$PMD;@vZ5uRVn5RRrgLQiBdkUd1twUz?FoI@B<@TGqL6b>ru`<~_U<*?ARM zWvqqq!SdG8UUAjip#Ag3!98iWu-Q5L6n=d_P8pj6a-;)tb^v=YK20}idI7(4o*=;& zgQ|K@A5irmRx?Y>YuYXkOpZ??+Gg}9?~%`#zi|UFFD54P5%Utif_d0W8;j{58|m!p zLAe(sO?z+8)9=0qv_l=}3tHP-wX`%~UsyeV?ks5gXYam;I2WY;#{Gv-4IM1f#p%H1 z=;`8M8EOK0-+cHXI5-d`d{8)dpS?ga&y?u9WM!o58|w$gMxa-^`RwKTqsL3P?zp<q zm|VZ><jE7O_wMHv<gv1`!a8`?z{D7+GB6PlA*65O*x%CG4t}JptcaeQ5#A)Nq#WcQ zBRty0H_+D2%go6Ilsv`=l)McW#xJG3_u<R8356YSpOx)0^}Xu^4MOECBOrRM>RGDm zUwQN0&tHsu_Tt+!7mZj1v=3t3r7gqdZK9idXSPB6C(D(q*N#ft$T}t)2bLdJLP#EA zdnM<CDlQ;(4(JE&(V+hg<1Madqib#fRPXE?=o1weo0*eWRo&Rxg~WQ3<6HGSV19Oe zT}bc(ig_6sDbU8BFc0WmfAj=dWoii!_TV7Px9`L!#Gyh3XcJ_Ni;FWa-%(U%L`6lS zy8|tQIJx)Y+vOWKR&Q=xyM23pWf?s*w2Z}KzJ32eSV$0BhE;Vn6>yELtc*`YJ-sjw zQ7x)pKpKtA%q(0-WZ2N;1c_t|z0AQw2a#XDGK1FR$B)gdEN8&(A2Hw2+dZ|k1VZk} z(Iap&931-z1aq>Bv$*(0)J=2^^kTE~A#{b_(9Xjf?iE7vcr!U2lieybGmnh1r8`$| zK9Mx^KBh|3^Qu*K%0G_ra*uCBhbU*(?tk(7)|8A2){|;1GBzi484`pr6ZedJkDhJg z_D`1R)LMs#n!>KDnxtqhc?W<xM6!p~U6C;y4LbWd+}?A#EGkqLLo;JrJ0i`U;vWp% zcVbR{L3J%CoWTiZ4EssX-@G+>b@9ZB<0up{7%iaCt(d>|@X4oDAjzvYZvpIm{d`ac z2H@{Ng#V6R%!m@&bf>$bz-D7-D=jNtzjJr-+B(?E#>VW@B6?^P^8j*CKD(JM!|LNV z?{jmrpr?Qe0MX&8iEt+7p-pf}U~F($IJAa_8gjgyJD?gj_W{I55X|!d<}*KHp64J> zXK(lH%1T31128;9*`Lo72@sHuMoR}9l9Wad3Ul!CwW7M=?VNA~qPm^|m#Wiwqu}b! znMdEgy;R(}U(!n1J{yG1X{?_DF|wlTYIW}tXq$KMe{n7sZ9OXM8O(lF%4t0xF|!cY z$g*wF{>k$0!w<<dllF<1mE3X<skt6jqXF7^HQo8NJovHR2gC_1XE3`3jZUf(LDfT0 zX6Mdu4~c+IJf*O>s`YYvU*A^DuiU*?Ra=b?(IZ=|t=1kq{EYc6+6I+6i#Kn6!aM+p zot+(8L+$9%BS(%Nj))BBJBA1tKx0;R=IZU+3u~*$OZXc%Cg-kv#=JJ>@plO3(UxKL z!K?4Gb3S7pd=un1ig__H(Yb5ark0m;a<f@k5mW=(V`8II(~_Y+w1Dz_W_AYK7M_PK z4K0lvoE&gz&?7KU53$3{UwrGE7<)&>lYIk`@ku8a10qS=1TTWI_@ByjN?0*4{PhPf zl+6SA)!jhVDA;BkRCA*zG&Z6nzy9D?9#Bbn3oF0s9%&+v7G2mlP?8Yd+CRIE+do-S zYR8V+B=09CbLs{1Y0v@f$1q+ev>7LHetfDlmUBA0gpKx!nJVhy4T!b`XR4!@uUB|< zWNJokWp#OD6Fhws^R~9u_usrXFw}?Ty~yMVEE$>Uz~lh(-J3kW1@k)*+6T<ftggf- z#v|kTdv=4WZ|!J<o?>Kr3Vq9Kc6s^KN&a0(^Sdl9E$+R1y?E`~)$7;iS6AS_$Bu34 zc`R1@<@*of;$n!j*|kenR{Fu~?-DO1pkEU}m<%!of>0FmqN2j!Xb^5Ylbuj41YyP9 z*WYF5B6HNhmQ8JwnYQT>Jlx%b1GB5ELsOIBWl#acQb~1nRcl*jN!g{`f(T@+CI)&y zAOs6C6;zFmxrMT>(GF#*sBvKP;NpW9Z<0znK{~2B=IMA;AJy<wvW+chnFW#ud->+O zUwJy;e*235qS4-SI{Q^zj$=Im_7+~5+o1iEWqR$--;E=`Qgqp=MCHXWPGG%HV0}*N z_@2V~ozV;AylA#d*m%c9OFmf>Z4*-?8#^ME?&25Z8y%OJTaZ;+euRm6Kodw@hPM|x zI~xEVa0u!epaoFbqHTV{^C;#4;YbY2eE?}>&u)+<pqv46b&Yi$y<O08fc${gfL%E` zIRk?N+Pgby>#DD;EML8`apv?XWL%v&o-;Z#ZDC;!&4SqA;o%vcoUpVcppXW+4#y1$ z@cS9&9i8maxC4CT&dWFM9(44*A;3JGLt#lF@Hre8L>Haz+BZ5pfBhQliylHlQ@x<3 zE~B&}C^nwv<3|sPfEeG30;<MQ*MbP4>sd`AiwZp;xgHpP{>CF|V;@lR20rx?W^ue) zo_^`=$Z$=^!pPM-zxn2*iG?@&F{QmSHU~6Z&*=HcTZUh|^<W#ff3j%1q_CVb+%06X zPs-*L&iAygKiC<)fOCc+e9A6+&*NC7i0sm~=TuB^<^)rFCtEi!T3{$BocQcq=-iR< zI+nfr_OYYGfB;dzaDG7^Xkb9^R?Kh4uv;)cb_LPZa&j`??Dz(S{ZZZ{Cr_S0zhi<B zxTL%UT1LNP0&fB;77-Bwz%Sjp10f`cY8K}3BlsH4l?vJ$9fUu8<Paw(2eX6N>Er9Y z{_v4sfbSN}uYogyaN5m`j-gEu`5?*AAQvz{xv<bTI)dg<L&EDldrqC=hsy!KKm%|% z83=n%@SlLaPM$hR4Ggt*^#JXsPqYPQP|z{@Mv0>B5IZ=x@#xiep0O1i(l)9NIokB9 z!)oqWYFbJA{6~`i?Vm*D6x6dFQ`#wFvQL$AM2jw<?;Dz0zYW?yS;nuda!XnNUBYIE zgbjzR1E3v!yBIKk-YAqyoUmKOgj>mpThU2K!vbfH2z!z{!y_yzC@ztQhZ}vhY!~vy z4Kk)7DJfo9T!_Ao2%i98!1LE%f32pjdj0WJWc8cPRZu?Y_qlx|L&_>j`}XaFMs_d@ z)RB>qn!k1pJR%^_52%|-$veOP=4%ibfXORsYZ+M?;Bh;ddIsu;ga%(qLUN)Zjlkc| zog5q-`uciPR~JFR1M<;k1qIJTn-lYMQ&+DlDJh_R0c?lx0Ztwkkip6EG3W>&9sys5 z|HJviW%iGc4ouJBP4TE_?L-P3We12KBGnC4A0p%N=E}N;`&FFzbbKq?ChtFgol`q> zMA;Gf;+0W*3hOIu7?4mpTtBc{+OasaaQpW>oev*AoRu<VJ*~;A;J~FuJ+AF3XAySo z_BPf3*<$V(^|iS50X6y_8GCLeilA}$d84ok#^FMy(P#DiSp^L^<?K#qxgFJT6U7kq zEUX|>bMW$mc-DwuuC1$MU~B|n@bGl6ZK#>Qabs$E^Zj`MC&Sy*!jb^W2Lv{_0O%iN z)dE=s2{HEZ70vk@8)fCC4i0u!L`w*VA+W7)svny}-suI1wsmz-X%zUEm6b(saL_38 z<#r(6mi9IWM>{JkONb^Knj4_wXz%F)p>AnqK_b~!*H$fDzcDs<WpZ($vZfOD1^p8k z8~}m}J?P}(BAl~@1;NqLzO%pA&))|bgd{r+PK+}LcGl7}(g67&lYrsd2M24~E?2Z( zj!8?`H8jxB)>hTjP{(3TtZfKns-caYwwZ;Rp1~n?=YLb6xki)#;HOsais^eDQgnvU zRn9u~pgP?px~^ept*ZNKZU6GC?|$#gV`fA@c0PqYQdS%)6drZgV_2{F+?H+7{>id_ z{q`|c3YQxFm^Nddypw=lpb$P%$Rtt-A0=uYe^k+FuPB~ZopxT2aT@C(gCiPPS%Z9a zWcV>6W1}*%(@QH#n_Jt5M}}u-$L4<Wwk_ZsI1?a^{E%dJW_E3DapT6~#s<WdqqEan zYeEI62`ELs&)xd^E}#@?glSt}Xh*lByP)+E_d#od>oKuC`q{1ExHo23SGK+b3<vtS z`vSBKCk9(S{a^&N8Ttx+-{e$vTU$YOU3yVzLQY;-N`_BNf?IHeqfa1_=59iA)G@cz zG%!4;W%DocG&N#a<KXhGr?2b-@;GH|)f{uN^vc8P?&^+-IgK;5{VS#I^HZz$e*e>n zNhoCFQ`{|z-=|DIpyI--<!<VlybanvU$To@_bHG?@S!Jkytx!zL`<Vm6Elw!HV$VK zHsX-AJ%{s>zy}KI`^X!RjBQCw*!z2gMusFNql5Ofm)nuKLH^-K;=lOQ>A5Sv`GdP( z`TgnW@2mN<3ct4|^R=SEsi~Hp-iqed{Oa1YqLR4GoY2H%&#)+DpoaPN6W-2A*TP!U zz(`!%{J$&EL=Aj0Ye(ld9>?alb1T@3n?xG=Hk{K9;Kwq;^7=s4lyxpPkF34_@CW92 zX>9Fh<5$})WWcKExL?_sPs{z3wtN5B{5EL+d@**6IgayHAjKb0p&i38#4Qp<&ErJO z<0XhGd`ixH1Pu<Ux=5M^Dq2TMoBC_wi6$g_D=OXDCjf<ga(+>6Wi{|&_wewq|B~)^ zy2IEnPB?Jo=k3h>Mr*&~VxwPb3jI;oz~n?*UtdL2b6!<VYEemSW==?AigyGuGPyZW zWACJAWdm`m4A$gdm0b^O&?ECYN3P!OpS^KTht8)$0aYVyd5J@jY{JN|>Otbx^3KJL zhkxvav8yZBxz1|uxuD0Q;J~ivbV!}Xqe}IQDcVNtpD#CV-4oLFR<ci&wu;&>?{rQ- zNZcwJkS}4G1fQH@W-J%Y1aO|Jq$o|tIC(Q4ExZ-;`&qh^H^TOzNvVms1vwQ}mCY@e z`}&!a+Fkhq$bUK;(%A3*h4tuPXJc1>+edoG$C|pkE1Fw!E2>ipiefUdgA<Y&5i!(& z5PJ`A0K6#zcv~#q4C2;3D%4#HF4XX{p4po>AAf7j$l#K-)25ZGI^}UFkR=G=acJn; zzA%i0u7B*8-AgMsxCAhO`Q7sNtnv;ZZ@5(`T7-aY#Qr6vs&PQfD8Ssm^rCqfu+w>i zP$}zFam!?Bo3x8&vHL+%N?D2+`s%nO8qm|V?83Cn?2K$k(8UAqK}Z`IcPTbAJH5E9 zu%@oQqXS@rC<KIHf6Aht=gIl`<RN3f8>OGL%^#bQ<JV`ZVSe^cj9>2WhlsVft|6<e z0vtIaB|RWE5&W9s7X*^V!i8pH=ZL&TW{g+H8FH&S?o)9Ez;{k>EZu%eizqpy<fQ75 zqe&^|R;G!V1pB77*A1)y!?#ad`~LT3jrob}%&%iRuDnawc$d7xUU_>?C8q;Q<TDyB zuV1~{M(kfwLJ}%4&dIi6)fbEcITW1535n9y=~Bd0S-Wg8<1iK>Lryt6Ig1eMfPA8V z5sn<EWoBbQw6P#l9lU(q0>k`b;v-WtlJW|H9;;egFZcB!gg^Haod42PIe&<ye=!{R zJp$%S_0eX#M@AYuyFkx?fJY-%=)ZiU;^-j}WS;<%I|CftnB)K&9wdzt&S1Y9<ts&3 zdQ^GG)W*u)XMRa_2juN88inb3)gDxFJ&E&iiLS5gzFO0_+&;bW;>~~Lhkkl^4|ZPp zUE+k@a&~)U?Kl-3S!76u6z!jEzf}Jhl@~9*)pLyTy3}G7Sau5M1M#esRf?=_hO|w( zyhE-4#$%75{vj0?eOi)ROeH0<5>HRU5bX3Vi56s<ojZf-9}J2&^ioPfb}o=)Nqu8O zXD2B9q3Nk#LF3pTnM`Kv&yo2rg~J1r2)#o*0`#6;UYT4_1br4<2wV;=FckEgjhiQd zOf?}n;0Q!5BNIiOA-g*Df5^G$ljAQ>TnC1a$ZkEP>~tQ>(Dkh5Rd+wA?q=a%QqVFB zswSsyA}F?G<K~?|`Z$u(D%kl|_X=q5kha++W4lY1#3pC|cL|HbO7_p5zuboFUsZaB zW=tr_mny~#J@XD}xbvvE%GqVhk}~B;nTn42CzM_G2pOH%^S1KK4bEusOKGz4%fVVW z=o4+soG7+*FS2hS$N*I5K<EdAS=rp$+}({xo!=8n&i%)w_|I$p>bTISc{srzJBNlE zJ3B$W=U3GLy(i}6N2O-KB{0IHLBK;7X6@=>?nE)N1%@YT85+xDjrOU#{;M2W$1%3} z^8D4?Pcv%<kEv5mYPuWxHi#I8aj8&sXzA%S;{f=^k#)b=3eYF}PN~bNkzCwkD{DLM zKm6^PuCw#&xdpTkVRO;^8^Amh_B*7A-&{05s%ZE0$@Z(oe`Sfwth4mWjV~M3BBvZs zp$X^)C_3c>^5yMvlpOPq$l0?=ScseW)1u0g%lo79x|~DuwM>bC2Qx=9z{8Q@PY(|F zk4*rbb1Am~2*0Sd9u#uNzyLab_0M2G_Pc}GUmUBPyYhQ>`82OAIxYlghdv1W5vBK> z@+t^d<FazXl2SpWf#*`0Lo`Gx-Q3Z|$i@yB9%E!8gEiT$=Jv0$WIgBj)ar?z>5bg_ z;j=pQV`{GY-gPpTm-fp$DcdK8=JwY1uOPyvcj@WNH;pZQdZwP70$TeH%dj7n=e~fG z)377h1_Xp=<P<hEUmhHpm|I)F+1Wi>QPV;9PLxu$1%_ukp|VrN_$x`PucfTM0klh6 zeI;Q5m={#B`*C|#;=j81Mi<)$mP8j1sW>F=lXn)t`71jWDA?yHI^@dPrgL00XOpy) zG7St&Zz;PxlU_AU3C+Qp+i05+AilGtxY~PoQv*X75i!9D$uSvOAQf`TD@z-iYA?5g zgy<h1ADNlngx^Ofj{ei^(-bA2dKm=mBO}c{JrH!1)Hgu%fzUgIt0=uk#JC29I{O6J zF$Fx{-pRn4gf+9!G&GdaG5dEV*S!iZ`Yws_CBuXBw`;p+&tmDv)!abRNLeJZ$vKMR zL;N#3t9q8Iy04aXF09^p`f04@=I#4&mx^`q9z5rD0Q~z7O2H@FF-5jh>Kp=?1Lv{q zr#0F5RoM9y*p4Z&oYvYYivLQ|@^2CrsC^{?%a$zXaGt(N+YtR{l)$)hYD8sB@vsar zdcT76aZOJphdhusDo%xR)~P(wwtL0R#Z3bIQyV)c)+#&Z!n0b8?OZjC@cKksAbb$O zK=^J!VcrPxlcLkJfJL*JoL||}*4WkEH8eN?w7)sFb7k}w)X3&c*S|TE^9%caggSFB z*{R9iu~Be3h%!KN71q{+bWbfTfmjv947jIvWUO0IxQky9Lhm$pf-~qDduGIHrlxCn zUYqncCE6)0!^$^5zI1qa;dXY-ptzy;SsfoeMxBUJ_&x<^5z|oDxCZnsG9dZY+fTkY zIC=Z-gM^e)J94<Rs_lXEI)HE>e>Q;n2^H28sw^kf_FT~W`l9*2A;fNp8ekq=VDC8{ zD~FH|AAZ=z=Rc{CeY5B>wLYos2<+vZj%j+Skc)uukx$3`gEFMuVy5Tx7~$D%z-GPE zH*)L82xK1>JtG}+OT4`^kR*xD0Kx<y`9#J-NEnlmos?IYQBnrVxr|BsZGC+}xe!8b zW&YnDMgLOPgU?@D&CQ~NMQ9_#)05D%v=0n`qgA)I!Ep<!Ye8|P6c)#4=YjwWNlf;Q zigOQ*r1%GedIxW{K<M51BfT3b;fxMykpG)J?X<RseON_Y@xb)jgUFn=6Pi>$HJX8U z-FaL9yS$SGA;LMTuH^E3P2X~9$HKymhreeu{@V3hsTq}cqThKLbI!Bc!1<uz!3)@q ztFWF_XFaWj*cr@T0nD+BCaD<}+pzqnmUr*o`$U(9<#aQW+Z1f$xRfY|R9qDu^HiJ* zRmsJwPN3}_b_pAw#d#Jrjop3rdS?AW>&TjCM25PdnU;weU<L@^is}X;f$S6L9ufi2 z0)PRjCS>P9gqu@QRa9G7-qc)kxxJ~Ur(<xicWeyUcz9;|BSnA8?|^=t`Z?Xhr_>Ll zTfVnNLxYi-nZc<kz&XfuVD+Z%Zipt}C_wCRMDVy|rf!c&NehZkf>T1Idq4>EN)UsC zFoSd6q<6fzx~{$`mbgQezC(c`V-{rLSCmsfKD~CI9#e5x$?>c<1Lt1Fk72OMJ4stb zQDYm5+pYlMLDEbtqc7F`0oDBhNwj*cwS6$Bpx!Sy-6t>wv4CWTUs7ajUTs77!$;fG zbpM|yH*Y^6dZedUk9((F2H0~dx$vmCs*#J;$VKp}PATD6q3jYi5i;;jE$_SY^c4{P z#OnQsj7n7lb5%V<2yBf=4rWd+AQgZ@fk^2g5y1HXwuqE;pjm()fDqJvVND${Focs0 z9UUz_z3l`2fY{#gG0;B%VDJJ!@)y3<ot=RW0+<}4V*oiw7m!RS&g<ISL9Roapr&#w zYcfmAf!Gss@?$e|!c)@W1mHBl4QT<$P^6uQH=LollZy$-0koNpnWdJIv5KzYaV_Wn zuHeR{OjRRAoB9-_RSpd;+$4CV^C;Mh8U!2p)}Pl607(OYcZsPlZkuDiMYb?@_0BeO z{!^EM(K$=E)THteV0$I|1a37~UUhd>$9xS+3BX>9R&gH7*ezmmT+=lut?~MU7ZWS@ zdS`DYmvkE1(v)xpnxOD4ZSZ!E7Gx^O20%X9HxP6_BRo1FHZe3QH7YF=zz>=S0!a|q zAbS9%5b#zsHG>P(ws(O1ZtCuWkQ00Xw9w_gen52_SYKajZ*OyVcVlN)Lq{hlC8%G~ z+)~QKIkW?<Wt5bs78YO1Er`p^L1m18YyyNBfOe{X@F%nr$W$~|#hF`SOib1F4W)F< zIn}9uSELDGJ=7fHEd7gWyRURluB%yxam(9Fnnvq*RP$?jfuxbQjdh8xFKn4b0pBrs z{f8fZ{G%klUH)$?^H<mHy)zR^hXS*DG?@dK`{kTuER!*=mFh0VnzV9ZT|X8PV@^4e znMeBI9P%xE$JCA5zGYfyhMEykNk<=m1pH}Y=LmrzkxI97_jd9Q07U`VgUAtZ2xtW6 zkIl@6h!XG*Kn4B>)eUMSuc`(Vcwu!NAi21%p}4-WglWifT?2Fw`PH?6@Z5@O06Fk^ zT2TpfABlkT>|BVQ!&A~eLp?OoH7FFKRB$~K-OCF3#RtXI-r3N`4vkec^bHkocwSBN z|5S9_uS^xk2dX*7`z1GmfX}HLmd5)ZQFhUEDbgSpaLPOHQ*@CfMl+JyN>L5pKHoNW z?H}i>*)IP{%Y#QxLoQWC=k>&t3}Y#2Cv?2`BV(8W8cfV<xm79<Q;sM&K~yWK>lK;P zx_<vT5Pr+(dTQl}liwwEyuC8c0FbX|X$@-G)B%ZbZCpL<JvPDb798#w8tD@i=O3FG zoR9?k4=w;*07wz&1wfMkS6|921a|;De==BwjR0@(6p-i;Bf~DRBgD(l2;dwV^bCsv zK?R3^{>8z|4}uG8R}VNk=yqe0g8|VNM<8mOm}(jr$>0dQnohfvsO&0qer*p?d=M$H zsIF(OV|>+$k-@9#avtZ4rB@0Ygu=D+V!Sk*QvmRl-B+s-4Zk>g_4cpK-LYN%bB46< z(hFN7vfC3%N6h_7E}Dn!Q*h!@qRLsP0^w`YDnRD|;j>>f=a99r_Q`G^T>_~yFn_yg zcrCGH(43ULW02@jw=LSXZQHhOcki}sYqxFNwr$(yZriqAf2Ynn@71mI=l;sdOe(24 zl9jAErm*r2jSI_+@(j$QSX-A`8$id>xe@?<M6htx(c4ykjbRpkT81PzIs`P(DHz5Q zJsF;|yuLPRf&svBfsI@m4nP{nZ&ZX#faN@<M=V{LxIizUtlumazxtO)|5ub4LqG*L zN5Fg&^JoO_l_jCcF%VUzPtBNLcWOPwygTbJ5$jpGCMIs&98J3gDQSna{K8cvlG4~w zbfxK|z8oz(vlrkH7w%?YEXWVgQ(+d)=>X2fz1WiT=3BSa1T&2Kc*r)LP<SD@jlc4! zpXw6F61nJFxsaD)u~@m3b?<?i?HaK^d)fwG!a`%squKodqhjkatK{qxn{JhrP+!Qi zq98k~u(^a2psg^EzzOsaSk_X|=6_7y&V0%1g@fs{3k0Bvu<fV2+Na>h_&UIkWev{m z&-EJ})gTYxjMH~A7YH!pzmP8rLF%`BM*{v!u=7%5i!>cf`>zp7CC}A!RY|^OCT!w) z-2YlWy0Y&c-g=Jq^M?~#$9tr-N+e}&fHq)dZlIYgavtP(F6^9{>YH&Bj~kMYnJeep z#ZR4^#8f}AsS6w1r&-SfEu1%pY*|(%em(q=QD2fUz)ida_y?MRP4qzzCdkZ2sbm!h zg9}t20_QT-TXxtw^cU)3V^{UJQp#+_<A%}64(}NPANc)tB94CQNVkx$t<DahNAP*g zCZ2#GnE<~;B7+}`Gt?=0!|>qdOrT%^bDv%w-zDe=(9Cr9Sl)7fzCg`|#>Os$+O7$P z5i-J+RL6*rIyaolYAuR+r*U@MZ;Sv+cC6Jr?QffpP_)t18_y9{$aBvC&p$1b3l9~j zeec&PM7Z<!OOBGo<Sa+5gosuiCqw|>+lzlTVSe=Tqh#nxlAY06W=E3Yv_O->(?cAn z+Hn+$!VZi89{QfuW3g06wS82nE<~N^IJI~ky(}(3hFXj!Uo1US8_r7un7FIGADXN` zk6<s2>q?|Jgk8Le=azrJ-#>jL1pHApS9h**ep<Icb0o)dbRcF_e(M1;Mpz9p0(G+f zzJj=?G<iLciL<a_Bw@G!C+f`5=4z93>N>Luf~zVh(ub?g#)>7Moj4w?Nwk$otdUKy z3_K|)sw+Hf9nRb=jiP8bzUt{-fG4HGwFxZ}Jb419f9!koajN8Jp=K|DTdU7>=&sbQ zakRUi>_5IneuzN#nnvYw(3c+3S>#VxW)zID{Q%_8-Ww)3@l;%s8zo19F1IQH->9C~ z$(PM(3{An4aUA68Ib8-9tzJe`u2Xb#BB}9UrcRxs%b6c0)EePZm<z_vHL3JrQ5`}o z-$(MA04upljyas5{c7+y0*ae^lB4<N(O3gxp?3*x>2FcWN{mbaJNsfbM#$y@1-PX= z$rd_-pf{euS<ASS%Y0KEJ~gx2l$Wei!_IUiiE5$NgK+e=_&8g<M%n8%7lvN)3XsDv zX{EwTq#4Uzx|yE6HegUgMLurNiz4c|N^$@JcifIvHr+ObS$>WtLgDez{@AJN=1}$U zz`NT=lqot%afLtZDHPmRv`=mpS#|!oa9;tqPsVkev0&yPspcpOC+ffxxv2^mna>Z_ ziUyv%8~(}(mu(?LJ?<GUiQ*BqB-+*62qMgvj~`l>yg)c~zJ=l%g9fPZ961LY!2%lr zFqIYdYYIoZ54ToiXHZ;bS6pFLTN<9+hd_6+o;AYtXfD&Hn!$mJsak019HMcB#=POJ z9d$%&6`zZf9@+TqM%!4`S#Q@<=MgnFhA`l9TQN^EWe9jq1=sF%e*F>Gk0~#z)TOGg zp|U5SK`wMNcGDKxdiK@p>GAR0Srb2e|J<JN9Q>MKFgm8G8Y(T$nDc>}6<g6csZQd* z1bBGizEqYNNg_i8N6k_HYiARPE49D?u4`|w&GYrB&GR8AIi&CYDLcu?7nWUgl^o$9 zFXz@(YKV?+hUqllr!Ts3QCt0Lipc~>aTOcAN@BYr35iH!yDB3)3IjWe<NAfnQ6Ly- z)FiRq2)We|mBo;=a?n#VGFvY?TQB>G=DCeVag>RHtGma=KEJ{$1xc64>fBdk_^quY zlZ6oQM7P>Fv%m6shU^5QzT$ULKhO&@1mn&U;i|yOeDU&Iz1cvk+YY_2Z)9tfc2J^M z&(Q<V-tbt9#0>eb%4DGI%3fYXWz#Te92daf5>@I%CeWsTFX38sVaRZm38=KfTLlyM z7onM8Xb)H;UI|!J-A+rFM#VG(!?%KlWd7*sWQM2o#=CA{C$xDN-arQ)DuY*bSkYSR z!NM~JxlOQ@9R8|H&b4ubyIEgFTptc5u6&{`$W+=V4u%sX1=7x%23j|dPbK+Bd5>gK zI!g{N6NI9rjp}wLA2cLip~5qY3QP;G|150{O$|IBtz(CdKaVv8{+g^BI&6A*|J*l! z%(l$;=(G%cf4%JJX>@L~bb*b5$K%JXn_O#=*kQZDL{6c(s3+4Ipn*>SGSptELYIhA zrZB+q*9h&!)fp!?9V$A<u{<|FO2JE6P1WF1S>wr5-Z;fp6S<ojUK*0z`3HDzlQUk2 zs!(P@#e5uHt{2(7NFcfCHmy-wc1W)o$JQ}+hB20w-629UGoo_C0(a1|l2H{`6u5Nd z`x3Ty&GHe*$0Il^O+Mun95}H4!JHJqeWBvO)G$0(WOY-+{JZPwAQV)im13oBB(>}= zx@rg-@XVR_QeWw|O+<Ot3fQaDqp;om^F8qd&gwns_c@TVemVenQpQE=IDyH_Z;Ogm zS}@5dD^DpDfYvNCTZsmN)>kV@8Bb0IeesU3Xnz<-bPfDDxe2MS4On3Mh~lUo{>MvI zK+4G8)5%;6THW5#$b6$DxM&HSZ26U3CAefI)Qm;!q-p4=bri)JtcZ;R4E>yIR1)*! z9=MpA6IZzq@(g@JlXP(J5u-W6RfC)n2F`=2pr0XUL2VvE;q}*GUy%h|s!YR0Gk@I+ z-kjC-Zv9ekowSQS|9pKNzVL6){B?Dx@{VbX31nV}PmAfkEvUthHg|KNjDw@-fY^WE zvsn<_H*Zk^e*U_MJt5vwDG(Axp91B|7Tgan`$3<j5#h(yag|)T@&$P#iz6`^jBkCQ zf&@?}^;Hci(jrvDkO<YmZs<)<y#Y@kW#AxA;#>KPPQA3JuYhLcSMHHQG$S6W=3Oh5 zTqe+2VZhsm=`Q-JkAIT}s?M^k(KS&M7NNbBEk1a%Xd&DWkJ9IxLw|L^D&3&lJbFn& zuic)}@m*tch5gz>U@((<au*OQm9^&_Dm{Vyd~M=^ujqUz>&RYuMq>_7Y5n#32ELX9 zeqz6}oicU;C~QQqSv07n7D3!0nbxDr5`nsW1wR4>Jpu(k2J9lM4Mz>`>M010+$RLc zW@H%EpcSK>C{`cnR_`g%m?iq?vO^4^I<xRt+InHgJ2WZ)zu>I4g^UG!A5P2{-N&yL z_^%c8H=T!X^c3F}cgpMbyuErJx~|jtCO!PD#`1r+-Q9|g9=iB>i=s2_K&vZmYFm#Z z@aB&%l`2dVDz0&a$!|t#C65$~@RpsA#2P@Hhf1W41_CL%QLZ>v_c0a%%#plrOT^Y$ zY`fG!c?aP!3;2Bt1u9h-r~bC}^cAE}lOKHWLkbr5pQ=9h#F<23h8>Pe4EH}mF!#GC zR*elcc55GA<`h*)B(}nkwE0bIn`FoT0yowD53;Effo8Q}wblUfOvU#^1iiKZnCIhG z88Nh&&ywBNxGMgI#nkHJ(3s#keN1rWi|}Q0ZOP^TsA}fWaprFUFdnQvvKH?6gP%lY z!1=u0S$RBIdHa>yk}bfK%C#p@f~K0gL81duY>b|^8gYp%0~-D&P>lebFa~?$-93Ti zdf{3^g(Mj6#>+yLOHf>Awsq9YUTz-ck~|TP>2!Y;`MM&1l3qb0Fc<8DtFYYipPnzB z-<=i+DhM{Mfo<+ODvSBk(dtZ|B|uTC<zP@+Ari_$B(lLL69!Fch*GVOm~-d`K)K%l zZSG37zLxk^U5^d;r|tKTb>a0Ek!gy6&U3?GV9=W(l7+w{^@B@mis@61Upuh2y9`FI z-q=t-U)}qbbp3n}js%_*Mz(H>txj^IDOHXLJ9z@0d|RXfFKh-jb{0UtBTa@ZRE#oN zlHX^V<#7W5mr|IgPuq#nG`bQMJF-+;vM`lWGZYUKmuwK`0mw_Q)IlXPJ#t3oKU65W zCyo7^p!;XpG^lxP_UL6y^IOQV5ebSLz1HR@z^?Q2XZ(kLr%S8nt<A-qv)dDd;RZ+1 z<XEfhA|ro4j=&C?)&i04w`9605{31t@BEma`p!vK%n7EAn;%PR7jGy{IH!XA8mVE2 z76weO^wMW$WJ+hT4JQgtY{eqoDHB5xg{6dn;#c27GxsmqmP2jS`MIOI-ShWn_iXYg z`r`n4HPh4V!Sdb6ag)bTAlLYedz2bU$oLL0L9EB{9R^WHo)Gs1dgJi{WAy=p-2ynI zBAq`6M;dktPOfZ>&c^N(13#my!{KGGMQw-fN8=+9Ul7qd7HbGJ^ZA<B@TPUg@gLo$ zNvCz{RfA5;<nN^UuO0f-AK1b7HCHGTTVp3@M-u~^|BUPmEuom$SqK;i{xf1`XJ=sg zA2A#Ie~Wo|2<T-@Y|Wg_37FV8euYW|^eP_qCIs~A@`jcsM$QEE$}Wb^|0R*MF);f* z@W;aVKWDSD5ztGRSeThRLopK23)@-SIV#&57?}|Kc5D96Rg`57oU90#*#CQ1$imr4 z!NgJ6&c@!(*2LDCfP;Wu%);8)#PN4)ZQyJoVq#=xYy!o{2ld}7xNi&(h{O32AOQ6J z{47e@og*?ZVwku=l9M6VoX!I3e4U#21KNgtmuKpz7^iDBQB3pKUa${Ve8_DZA80~7 z1|0RZQ-5TM$*>)6^!zW-{Fl1_frg#^e{f-E{9kZkXZpXuh4EM6|H0+=7XJ$}lD5BE z+uQv|X#!_QmtRQ#U#$M~RDPlRKe2-P-?1Y2|6_IKEnAEG#}&6v>tyMJL}S^P%r}2K zQIl4YNHji{6+&_l*bIqaF)B*N8;-JEi$v=)VXVV#0g8!1--X+M(^21zSQrx$h8>5p zP(7olxDV6${35IIW#gj@&hQ@)#J|YM(3k7Zo#u<I?p0l@tDP@fw{;!Y)Gd?$Z<EsF z_0sz@`}3OnbMf=>{=~imyK485yYu6eW7g*Luz9!MbL0DV!ne%-{d{59o4d2tx+5#k zt=;>T^Y8oX^SBq%?KS0}!MFBKY3@$kNqX&0J-JNnZyCJ`ySK0P%{=(S8xY_09Q(GT z&7JSX=YYMV+1@I7FYo=4+{e_n*$Ri-or+$@&-S0|#(gz2+v*RiHmx7M=Qyf=c(n&k zp1;49^Mh~62(633%HP@it;*v;;QjRW{4E(m{Ws^$2EY4Dc@s{amDk`0?jyJ5r*Uh| z-ss1Pwd-M~h}(MVbLVVik8qLqKzPgv&aLq-HD!xd_kdabKIpKV>-!Mbo6~dlwZ)GA zGxG{;(Bd@*n-pX0h3BV|DD*E($lG8HJbtWVL<%QH1qy&1bUR~B4POr5Jq}ECVSYi7 z2(V)6_U+zgtlA|3oSx&l?I|Eth(3l7{igeFh(i4y2I?mT%B7x8Tgh%dxC4|ZlFq|J z-_70E+~%3Dg@7D?D>h87FQyzje(&Zh?`ZCwcFjWcpz!myHx|DlYAiQ&2&E8@2CSFM z{y{k|{Qe#W%$GPHR$zV%y{z2lc@Q6@NNOa0K75G##)2J(*X$Wc)8Xtlgr<+$eKRSj z-;p#(4gaEYv&B=I?$|v$gJ>aSFUtM|u(i)UH~f$1=H`$W7yDtP&2d>8aGpcmu6(G) z&6e36?;>g-&d}NTS;XE_<M;0Q!5J~Wj<<SoStf8Yh+kxAjxEmJ#*QP0EpkY%Ia{N) zRy{O^qqEwT^9fxQfDTX3<6*Pc8N%j-^CR~=QcAF%c4Ua&yC@Bq-yZ(qOiL>y63nAY z@ddC31rIO#%Scv!S|A}_8)udw&GM)Iv=wM<C?kf@OrILndk3Xq<s+QIi-Wiwe@4pJ zn|jtAYZka)JL+%qB5k$1jRn8666Ng%hPAos(HX7TkV`TeQ$urEc_v#WohVphC6r0! zq|HruunxG{aLh;$`j}xa+`!+rv>5``VM*zo(Dl<GGmY<l9t9>kwbO6Pk(yRHyCpoS zy?M}0H}0Vx;x{n<J5)IbL{j{Z9Wq=H>bNk^e%*Z|NQ2H`07Lc%n+*}J*xCB0j|5Z) zW^Q}uZxCS~DX=`MQp@o;Ttdrtcbq_ozI_AI-V;Q7Huzw_x@=k;bJ!N)C8+EK*MJxr za;Aq4R-hm!2<Z5pa7?NiScjli4g!|0#+;c}_(uLNCFwNUgcf!Vn=>Pr+6FcoPyW_B z+ud90e%o!gkM07mM($#I8~-uPVyn1SXwv*JYDvBoXi@~Ff0Ocz{RGrn1U-jwXZ%k_ zYq<T0%iiSWDdT=Psm9beB_ubr_iLMmbsj*|XWOV{Kov=Lii7~|I_jlpXPf=gO?B_l zbxL49l!trp3DY6G)0IX%_D3?|wf8dQ<*x20X1{-v_K-Miu}NnPBnju8#`@S~I)+6| z$i-d~a}$kG)CZ6_Bv&N>#97;Ib3vh7oy`W>1R?*K1YQ`0e=$7DFwm#1MaSP1DV4@A zeF<?YV+fjPlE2;)%!7iIq55e%RHFX-hC)=b_j(~n0zm76tp4Q^$ZnYMVaid_l>Q`= zA20+0cMzmr>r@1N5oRI56mo;~*pq63H3%HZG(_qJYcm{ar1Ul-SRQWSAPb2J)2d8g zR_f{at;vvPY1`z|%mgqPQgP1&o)o1Z3kg|*LXmYGb1;cuYe@njz#H?~snswx6bK_7 z#CRnE`U)ttFlHBIHG5w!evHcD{SAES{Au>N4cRf@S<Fc|V2oZy5s~UYLT)Xw0&1T_ z=x1D7vliZeLM#m@F!i_4!u?mNrr(N7K^pjR;eLe?0<Kkr?0K9pev0CM@RC2hXh0OA z>I^a`WELBE2PhB_`eaY4e_X*}jSV*EP5SSv0#i6m8A0w(^hR8?j^hBBRL*i9eoawt zsY*yCKLiQ?J@_6eafTZB2e$eMdzMgMkBG=RlBKTYwamOoFCH#*4l)7xzCF(oG^f5e z3k!rFno#mf7@-vmp@RPT;i)<TQd&`NSdEg?K|A_*=C-hhxl6|D8Aq#}?`F3v3&9r1 z_Bto+F#QeK!EPf9%t}aF#3OFez)g{;h(YSu5m2vlzM>#O*KDFPbCGI6HWbo_P_KoC z>1@Ki;8S(X?@j}&;V8{8wbf6)R%mhGZ&G7tMD`M1h!sa7@YH9rA+FteH4|KU__*Wm zH0C0)<dvy{VKTeaI`j;Cux4NmzywfmZ~_aKuFB>GESVHxN#L-hhQQwv7|Qd&nMm;3 z@oHTi+VMZOy&3u%-lDESnpL-`^o%3=X$PFhFjSgpK!rqki;vSF9GZG&YaiCmK}9=c zBud6j*9r3kK!uMj0GM*!GeUsbT+J6G320#>a1~KP(u!!5v^v#UGxv`&tl!R){c+6B z2AL8D!ha<J*_;pI<O~ssa}Ct-V$C?X;_)SrCN~gJ|8AajKWzqGiVP)o&tb`Y`BS`2 z&}WXJ1gmXM^V|}pEL#waM2S6jVx0XlI2(czYe!Pj-X=AS2KwWBe2^g#^dI#$uo@=A zME*;GfMy)V_AJq)K-NHP3mh8({5Z%GWoI-k=o;9JD{cz#XcPS=aVW4M$?6oR=S}-# z<lzgf;8k4aS;H}DI%QUpYOi2u#_dm~fx2KI4ZgFelQ7<=D08~WC*srd%-MbOu~aJ{ zC#qAB0BO3;{httA!;MU&p@>j*Sd}8x5E7n!&$d`{DgSJSBjKnEhFfUlOs5117W@=b zosJs5lJ{6x{l*%m#(`lEnuhM}4Y`81g9cfP3<v;t1E>})G=tzzwn3apyr-oUR}Mnj zcyXLU!f+KU{74anZp+kf>|i9fJj?(Sq5P_*B`OQA7#8ZTcXQNe_|)|0@8GC`%xtp% zIY#K2CPN7fa2}cuI=6-~$LZ^X>n!={9;{9urkn|YfF#_OV1-fWLC&DUW)Au>r+K)Q zfMT)L(&m$%hN@dbHN5YNT8Xm-$o6*@57TDTN?JHt!dax8YgSMo)4~Fc_cIF#Aw$-0 zB{9!E1O68JrX&5Nlal^kAR@t-3>Lo^^ea4ajoa-(llGDJ4{*ixpy>;xY$5^S+7<I1 zyKSP5&xJs!O0+le8n!urK~)G>zY)l1TwJt|C5&q#-4g-eprte^K7tAa^rO_|Ys0~< zfrBvqZRbiILystT@D7yIhzl6U&7pO2Ak<feS_B(ZEs`J{yrOPNDk(!w0V2rOY$7Ec zd{dE(%7PiD@>c2)sLYM%D1XB9=6up!8B4FQp(R`axv*W(9lq8d#E#RMqL8goJ%{WZ zAXhvpeK~JQ3qM#0EtQ5^%aMmISAQK2X0cw_A=&~tplK`%#mifmx1&qYicpdWoY1AN z9=9w5QO|-w#4DykN<v=^0_X?@HrZ|&@&I4TDtN_PJSZ5eBEdjspAoT_ErrLgbIz8K z8?0}H_M0h=EG>=2@0NNja2}*;jx*aGPKhxhmCiukog^WU7~jMS?1SowJeseVysm8z z9oXSwX`mIox|C~6YJm(4K!Hd;`3FXlFMhSHI?l)WPhFA{U<s65Bc(*)qNBq5v!fT- z6g;&g&ED?b_f$+wG3g{G#}A1j1$Bru$F2Zg&qJLMSqo8&J_1PZ!~DNYfU}_xJo*(W zb5u!aS!^N`u5TJ<7_VfJ{@A~%D~V_sY0Q@jmWNlx6_6W2N}MF>=tFWV+iA>ed6{Jz zxaaNnqW+l_4bBKhE=~;YJ&ha#C+NHiGr62ZL2H!$>}$bU>0tJBqj>&Mgaft($tl0@ zfIZm6-#yxyXjBY$Ob-o=tii2on*6eeDkF~a=4<BgIETzM%^Dpqux|u%Me_wB`TY#U z7`X0mW$;UH(WBKmlTfBlc!=D71kx?erw=cV8Ls$Gw|YUy><OW2cCGeUl9q%eN1+@I zBvT*3lqy5c4Q$|dL(dXHuQBR0$B;W^Ys(?p?hK0i2d*dl^n&vwbb1hbzY;UrOsi<@ zpjM$?ebFPl%qT$N?JD;QKWNd&NuhYn=<KAUfqTQJ97-DlH<O+_M&7+G(x6tD{z??j zMk5KAIJwJEeE<&4KrFB7{D1Wkn17M6k&j#ocy;bv%9Vn$9xh@ygvAAP9LQP}M+UG` zL2G8SHvmIpMpVF3f-o$?ciAng)Mzc=mncY!u49h-kX65Dl&Gwso=rVd!$_17VGAYd z=|WZ`WIZRttb3V><BAkT?~%y>f@Bt)oSjE7p_E7xy!llvQ!dV6Qg<|?MKzEVd#B@a zP3SWd@;IJF6K2tn70IBWSGl1b38;hKy(dT^p6cZeQ_Sk`q0A^iA=e#1VCl#3;mW$M zQ4~PQTWDt+v0C^Z`w`Zm$rR!h-_~rKHV_oWHKfI^yXT7*MI)zagj1k9xYN9du%4U= zuIbPSTUDS!hblv`6|CimuqMsz5|F9!FF^J@1~9oYN#1fs04^#EG6Ea0O2n3lQ9<H1 zS>}=fi3`MFaoD#(*e)~ngMv$znF3x(4t)rjw2F*0`tv%w6=rtZP(#q|6HlU!FwG#S z2AcItJoM{^3dLrQme~2zT#z#lFkoP<GMO}Sl_NibRAcBtG$%dMd5yqRv+l7eEB4b= z%~%?mNICN7DV)R-4Jz)&*J%<EQ(NjposnUlYeh#9SWp-l!<I#U_>lM7f2;8t^HOSl z3|8`(nVahnC&IEqhC?0}3i%1$Md}<GfC!&Md&b0it!l9Aw9Gx7vdG$o8sey>xH7TP z8Fvuge%sNL)Ea4UsnR3$usq|um~LpX56(jo?Fp{xQSN(+@1DXs*;=qT3(d<i2pohx zV74u|3adWvRb6tdM=kEX8}3z6gYdV+6`V1vg`fzLn=r9!`piKSRWSLVv26L~gD7#_ z`lZzmY}Z%1Lub+x=gmDP6ZAV8r5I$^JxtuzEo5vp2Q^~REkn|?fCh@dEsJ7Az!Cdw zq>w%vMD7TL<U!e}fm9GdD48(_*^~*=^y6PTMpMleCpDOySz2T+wv{zU-FLKk1G7<# zU!kr}g#RwdsVYp3A!UN`7%>hrtdXH)V>g_0%faUrDV2GWf)xZ%)(VR-Q1DD9VfO)Q zVC3kb>>?JhQ|_E6vO;3c?htiwR-eZg%W>KEHFr>mR%8i*=J;Mc!LkdWHkYR|1_eC{ z9zZf4V00!^_P}HYMiQRogrJ(>RSo4Ns(~o~dI$@a0u8GPM>$CGG=^YDYBV%9odxR` zcXTeX#cP0TyA)`Wi&lIhz(fI4%KXvak=MBo;cSiD21ygr{}U<kjkuA4G-hDKe*x7W zNynn}7#xLJsAx|<6lv_)$Cji}QO4xe0BaKk!Jw~C2f;Nirc||aMz2meWH}HeL*AS( z<At3@hh#tVg#d#XvqIni75*9l>Apzh)K>{P?$9u$5Bm(%QSBx)yC8|E&D9Ut!OGty z8^x`v0SQn@h*l^}&`bgKkqYf62P?6RgWk(L%)-m{VKh^hA<BYBb3heOs-%+WHsWn< z0KQlK=ufg{I_QB8Ym@L^uBe)~T}|Bv33e-QeLF$nUItLS(n8}N1tzXMz|6a!a<G8T zvX+maK>>Mei|HsWio|R&1F&`F9+F^LpnL_Ws;sb#9s@F1m^bj8z(KCfS9gpnP)Z#O zk?*W*z8zJec;fXhn7a(urI}zpqcPZ())_NAQGaMgi2$woZ`WGiP+d_51sVmdJS4WP z1i9t#G;8_xb0E}T(J@*CMOHh;+dgTL)UswCA_x+8Z~-5hRr4aV!e}b$jCs)2_Mv27 zOFpcbEEL%C8FL9Es1UV#DsEPWB0G#rEPa)>w3P(okKLBgD=HzwM`Z?x3x04o|7LT# zrHmXmUtP=qcoN}Z0}Dsgj6Q||U4kJXily=dE@6AH#9{_QB}F6?9ISpsqj4cbiyGBG zp(<IHZejBc-o9X^jvD3Wpi%?wYR|<-5pT?=3Qomq)w{M0_-pe+;0yC##S5S3!GuMz z)PYGnYcP)L#=Ddg`tjFy)I(t%VHoN<dAQ^Cbz2Mzg)DWX*YOvm0fHOV6klZIpilFJ zEoqgI%^fNkSZk=iIL0b75L8Ydl_6$WsOK_&s<493=tJX@4%M(sf>ugqk+F{{q!rn& zM%W?DbE!L1^@Gbu-Pbp1H$&GIHU?a1)+x0!vA1`ptIN*WIu<?(SVfcL@13H+U}4BX zgXg2kQxBRS6vp~cFoj672va;5zbYjPi^UWlb%iMy*+Mx4OkPkO)`XPvt;*!h(~%bl zrpp{q1<y>lQ-1uT!|>Xrwp6H{+e$xrB}ARiQWBR+bpQmaBO)yp42~wcpfI2i1=l#& zs2Z@igaL+(Cnje>ghEc(2%Yg!rq#q$M*cg9E1QbN*cbJI;#3;KRA#Z702a9F#aXsN zDyR<NJ(|470VYfgTT+)%<}oLCv$s1@m`o5#(m3r(OJGvWE+Q^1V!0@hgJWnh3=l+j zvS7q225Y0^V5png&yFPL_!qI-WEc>nFI(L~9PRCk!LA@HAYX9b5SHw4ELtY$m%0F} z`2z)p1ps5RK$>+|jW-ot5j`LBk;d$bl`&<Wh9f&{T$Ewb3HA@WVumQO6=NE`W(<X? zHT5<|Eg4bQhBaZy)?7t&ROK2$y38E*b-A)Ai4h}2s+Sn9z5}EUshbL~SRcKse&C+b z*b|!=vGkUF)z%5V%ylrEg=N-^Zl+ilHl>xA5`gKskxh+|&DoT*Q=dEpN|IZl+!zyK zY<Ud2!5S$VrMFGWpoL+#pq9+kL&w~}D753wWq*cJxN1LxdAdyLi;)JO-6;G{&Xj5{ zB$A$O_Xr!#A%noEDW8pOp;&3tleR>Yq{YU=%7`_~V118R0rFk*gsp8YlOmyDLJ~%q z?jXxEJ57br45B}eXf38m0s*eLKkhx&pGtwMCMq)vI<$<glZa7&9<*!>zB*AkqR3<s zs`<~9w)3!SlWl&o|3ea049O}Gea4D^x0U>X`0ha&i;_to*g#W%)9-``Emu53P0bmJ ze?vdD;`IjMN}*8NH`}!L%h$X_1SZaSj*?=7tWSjWWAm3l$Zu9k#dc;`W^4AmP~I^S zgIStjkfH%Dwp5ZUh`-02;Q^%L(=aZ8;*P>7W4&-cgPV9z<uc4`G;G(sLlbtuyNjA3 zRhepY`~>&Iib^xgo1w1njg>V(S1~ZdA;4{vL<E`|CuFOE8G3}r+F$4zUY<(i@!-NO z))!zzXH&&iMLys8I^g$yp4=I_aJv6+_MWiw#q$}UZx22Be){}*L)}ySdeZycoQ38E zK)Sa#$kzv$@iWHYnu&`d$|#*@y1=nZk%B=*y5BBp=8>VbcIyEQ6R9Y5-jB`wUcS+7 zo@D(APFk%_@bS8ir70djW93%Z!KO;he3;q=#?(2&4Z9<Ci#H=U>owRG5fA8Jl_ER< z6~>DO)Z5Z$*l?#2&Kol+Sse#-<+#fSl$aB*@FFZ(b>LRWiY@N|zEVb<E?;u>VAPwn zTK&U@aqS(S>1dcvd5V;1o_5>fPdm7b2_5sb)T|wO=eoq6GmM>^A!VlG;vl99djVNE z+S$dJ{a3U#yz-kK+T@!xw{)HEcy_laq292w1-YM~60&40(1L6UQFG#GO<P*9#W)4m z@WsF9tr8PP59N)LZIZJ!YoZ;=tA_M00IV~)Y=F(7#soM}I*2)Q?2lDV8P{$x=|qW~ zy+j0waL)pD;E^nK+#wCiY6AVe^S{6>)U+6V1%phMx~ak4geWz*`7?)_qY=jv<$G^S zZPMda2<3)99A@sJRWJuts#)1KDVmr}Fr=KiQUmi$7!aL&@k&d}etU`FO2N~}U~Dnz z=UrG)m4DbVit?5Fms8zZdYE3NPBWPdDL$PvF$dI%#G6x+_sP!PRR_%1j9<qFp-9{_ zb|VY_HK9LOfCVZzxGvEw&4}$!dT=dIa8RWQgxcCUH~^>XniG|Ms<UxA<V0W`YbNO5 zR>WkIm0iM3Q+NG(&jQoj)-jJM_*`gzJU{vYh<{T!6~pTt?%IO-Ps4#J$&<2yS>|1r zn`En$>;f1q4{>l@LXIo3VOFob0ZF*_k>beEXisrz&2T=XoFHQ$Pw*PQ6QAqgc>DqL zN*=?*XBCPT76v7zMeMAK?obMMx)P`dabU2Cvrfyp3<(2kk!x`|R3A>yD1?b>9}+?l z$U)0MAUE)L@XPNVJhM}p&zYBAmD|rz#R--HBN<z@hv?Rz#reX220LK;>%iOv<7HWO z>(rGG9CO<ztct~b@SrdNdJ6g#PKgoAjsMo$46JIh=dY~AdW~7g5a`&NVzLJrz+vA7 z!?hJaU2Vy!?C}2!DcSjX`r`ko{rSrLbK&`Z<6~gwwV^j1js7*;jZHs+oh$c;`}-fZ zPtWUMaF!N|6Dm^{injJK_ao<^d3!q~MA54Iy^@)DGsAd$aTxHVK-{uB6JdNlEC!6{ zpGc^;e)owJQQw*nk2xXls0VZ=DHfNESe$9$KQoUr<Nx~bv$j9j@xMoD?U3a#xRq%T z7umQjiKho_SAm>TaOoJ#4JXIGISMBl?nWJTb0*Wj8E^e#m&%g>chdZ)c+$n#Yx2=9 z(&M$H;vjaUO<0U+$+{|sZrX&t=W{>Tm1M~OPy+KY{3#{uMmJRn){k={VnAXtw)Vw1 zkqxpGTAXqBP$F+?blA|Q4Yrtl!L?vdx@lqWASUHiMk$>DTh$f3a7o{f6qWY<!2cRQ z*(7hRJ5{CX50UjC;2dl`t)lb9oO-}in~z8z7#3R&A&DEu>)O`~Q+QAOEK+2s(cJK6 zkx4FepQM;2Fn2ePO5a33MC+@*y~hr{1p2J}+p8WiyE}`is>ay4`X(WZGX_bt$O)gm z7ov3I%drVMx*aqb+ov2#xL<{XHy$%BW?XoOb13at<8K+HP6$4w8emM9qQ+X#P^kNw z_%Y~~`?(kR)AeOTG>YMmnfj`F8H(oG-;}aot@g}|dcjXDBN&{_8`f}yejS`2G|}O< zF^5y{Y3?XjJpj6v1tr(Nrgk61JL{@0l$lbydM&T_v=!`$yfljpCUZJ;_D&+#MLMt< z@)62jS4pH5vaoH9rG6~h4-E}1hc?$nCwFki1y^w1bPEMdk?PPg3LD%&qw>VnilUF) z=2!!<eAfMCB|q($Bc~~}-6q9;>s+Mx#PUuMz9l%mXf6o2JGi$@o)Rcm!3*(Z3Gjdx zOfz2=Xk8>QYnAc}&?=Oe)H3c>OrNjY&~|hP2ifYl>E-*@{82gl3X@I`cxc_czm+=0 z6`^ItXr4g+7xuzPjr<e=<3w}jFhva2iIQzVpS%oK1fY@91W&J8Lkqe{`dap+?j_sL z*2}akG|(%p=ikvP|61o8QNf~gyq5L||Gzt#@NDxT6la}5ZB53xIQQzY!+!jrZ4ng} z$oFEA#^$KV4kh-OK$skEv5u}4Eln^=(ccp0k{}^)SpA>SOFKMXLDIFoy>17{!aS!b z7Ai-lbEq$;C4a?XLIUIpEc9JD+W<WL38sIbs4myJGb*eJg35$x^fV1n;7GO4h?!p< zt1(6$Qh$k$ZUt!1J(mL(VQl&mcX!6>!KcnZ+|?k|Gj-asvipoBIX@TcecEMN3b32B z5X28Szg<IChbizBO0>cNbU@}PeZJa#{V6xhGcP)Da>_KXK>^KX<1!J*s&UDrjfVwz zehND;sp9O)MvJjC!<u`kOm&CXBE!%=|E4-wdvvn0C4l89j=;UZdFo-LlnF*0@=;He zPd13(_|4p+uPnQA3^7O(E+GmkPs(hnH%IZ2M}@4?9o);mx#|WkJpwZVq2UriK{m+I zDBFCC9VxoF(jc3p1Um5u++cx^$cx@%)HWy?Y~x#2w2&rgKxrYW`%~3H(U1~+=|ts< zAAC$r3!`Ulq4&qqeE76jV>AqPFs+<1#fjqs(Sf=K)+*@_fr2CjK)~{wAQmd&Rn3Z1 zqN1dc)3}1Ae=^N00<)$<1vxHRbCuRh72-hXRI&yOcW^`0Tsre>5FKa^h={?=?IX7J zTv-gN$hao20*d=%%1ZqCaJvgfk@rSQQ{`{w$CjKs6GXs+d7jm+s?wZ)iS_<UF-m^G zf~w?hne4ZVFib?^G#xz4P<d6O&C`p9CAK|(AvMBxLd8x#Gk2`g05x*YMFy_#@D+wc zevrl(1K!J2scq``6#*%E@iPb(byn)>MKWm6vaq;~k2q7clMM(8q<cQ3%k@d1FLG`< z-wi%)EXlF3)nZ$+14x;YJc~^IOz9PTnAXWQ5Zs?ra<a?hRK34?9sH^j+AwMpie6Pp z7kML+&vqqxd7CEEm~8}56>=3P0`JOKw+2<XC7KP^&H2EQNhzu+rT&L#uS<d=iCJ28 z9$OS(&`gP0w)hfy2jDxs&p}pH2BZ#Hlr!N-@Sgp0g<SUFOJZtVx+{XsZ1g?*UH`b4 z?G%p_znD0#;r<CXsM6%4jbT<G<7iRpv%n9lI37VArWEK{;mS`3v^ijgtN}C*HRkyq zTh8G4*-o1#s&Y&TrRymld0-iaFv)4<EmZ-;x--IT+>Luz*d?D7kA7;Vj%@iyU2XfV z{)<pHzM*3Y4|&o>{9`AvAuQKgUdttGSJa0E6P-{<L6ywJ*m`2x`Tgw%a6!?T6DxCn zq5{s^o5^%uHvUM54IY@zQ9687PTzqEir_}y{q664gN&1rp6=JxH!=71If<WDaa<`k zGYmGqi2$>jg973_aci2a1h*k@R+hb8I&t0o!be%l-}^ce8>(v9rOTJ@bGJA<NzV5< zsmCY#bAE$%4f#j@2M7Q9!A<!GRnN9ft@IJ|SdY%Cc3AImyLk0_lTkjc)S$&G5O8i> zl3h!bpi}Sj3SRet(~KVxRmj?9-+C@|g+mVs6ASC{qzP*%^tt<dF|CK1Z`ED4cj0tJ z8oV$3lSj5*?~b*77x>zMLN$w_f#^Cr7$aZn_P~5hg}d;dIIhZ2$nhh++siy#Yd9gs zwTTJ++Ka)*&7)cR7_C0|;fz5sP{t?=$`^g<OU$+>!vKB6);niSID?;9Hmm$UM>eon zzKqb?uvlt#x2q~XbHDXw|J<(mB}UFCUi7^hbo(&xyAxlnZr9qGaU6`pQ%gq7ieBL~ z(vFeoY!dh89w3oSJ<wVF#mkqE^S^1*?hnE3Zv|a%jyK1X_s5ZE^4$NJw5LCJ_TWs~ z0!dqgerDJ=-nf1y6U^%NRNskOA!-c==u>zGAJ@9?W*(3BwN!TmFmXPs>?4n^b#V_9 z9WLk9TEpeP7kxV)2jPQkVrmgqCf8X@1NIlN`BS<j`Xg4qNUVyXFNgaO|4>dqr&+~G ziAEQeDf#Bd9<w54C)Ttt7+}XYjWB{|D*M`B)f289smTb01v*UXQ~CI(Nyid{wP3)^ z`n+Wxu=6Y3c~}f9Lu%ebl=^abXLVo9S`IgX8<=sPKu~iO5xG~5Nl<H>+!B5J;x$VK z=ae~13v%R>*Ub52aX5_7pFhD*yznue@^r;NT%Nw`zmfc_RzsHm2>=teW$Uno<NVFP z_GYnX;Bs2Q?R`?Xh2@z;w=#)|N_`j$(nt3GVRC9VMg-@K#kstoDq=pHsFf<6Gn=+q zz}kcvQ*wAP=}$C}@ULQW!yX~FMK^O#3FtSgW0IvDe+ejb7`PPB>$(q%bB!Yv%Eulz zm^=+n;|dwd=N3;U;5eU<5$iWEDDWwb7h-i$Ev$esm*mT#H=<uS%>H@}12kQ0FqGrV z<o<Q<TRxF}HK3ShfY0K<cu%_=Eb<*CvKr|+=b1rg^b=D`q-WOeOIH;&fV?O~(J87_ zLegQC!W{!dV?bP&gibxsZ$t;B(qdaJC^CCt2sZ+NH^7HP+VSmbn~;;qy8Gws(H=dW zEdEO&Y<eZZnr@JHWg|p#WEx)scxoV<rZ7yS3wO)SPr1ViSEP&8b*e$An!jDf?S50F zX*2%90LMwZ16QwHC~w=;#ROYozXP{#>c*fWej+{y3vI=dQ69U}DCLk79$9{MGBfv- zU5W6Tk6@^|@cLX*l7m~oupnGGBy;sJX{g9Lm1-D0!1o53?7>k4-vujYUC0|8Rtu>& z@#>v8Q!s~NaV(SvCqk0~xBEfiN=QyBFn(**-SZEt&g#fmjpzq@jM5G!B~0oQaJRR{ zND)I{ZEH@mdWvUYJ(6sMG@xTrtY<b3>OgghoVaSO`&eke2=w<=uXvBQt&tkd?>!l2 z_rVG2td6ba0YOEoGc@khK(w`5GTFnn_YoP7kS`wdtTrk>&ec7blcKck<>p~9PEgF! zHRF_kJD8Vzt0U|V55_V4Sa2jD>HU(x)lN$nqJzvOwz_D3d8y&g<=frvoR#pSq!pq% zN68mL*DB=#yR~iOPZcOgm_yJ)wk>m*1lR{rFo=)k<rU85&|nCH9~3#Ha2*WAP9YKg zrN6soTbx3|UB_VMKi`sQA<4y5^-0Pub$rz*qli_k)H1L-gmAlaf*72e6ibRJ`q0Vw zC6q`}y_XCza&LVR3bQ`8@?(Pll}^V*thYbR7yFK&4Lvg?x1i49xLSo;x|v{V|2_== zuD|q4^3?MnU+eoHOwh!-9~`t7pKE;==+T>RgHnS?T~t#;2$#cuW6#~CdS)zXQPKZp zr3$$aK_jFxD#@)Lu;Z)S{v$7`v7^Z?(G2tV3}&nM26UWkl7$)hzTp9T3m-ah$#4k7 z4OJB4CZ8I7xA)YeeshRE%GuLRFO1HP%u-j{?muB2rYjr<Gi7BvV#O9-{SzNDmr}sj zO%gpL^U)AkM8w`_TNzzpX2bwvcDGEPY`c2QOoT!{O#HAVcZ1vWzyTZH^WD`eN_9Y@ zTfGkkf^5%|3Xni+zon{7sp4Bm(BzOXEC~6R0VuGfVbq9(1W$Agt7I9#9g*7Rkd%<y zMt{W6FQ=m#Y_LgI=4Nj{3|%z?T&G%6(S0MAB481BB7KC}E8^>7_RJ^085(!PFzgc$ zfZAolaq7K-hdVp_2`1gyF<BS`2Lf*|#Xxp<TAT}hD-7|3O;+)@gZN}A;pBR!=?A27 z)GX)GBJ8!-2-@IyF`w}g{3%r{`U<$ga#@yV!-M*PPm(OBgiHmRmZK}%LocePkic2{ z&i5-DUBZ?i+)~MON#EAFObT)`ho*`2eCagPZ$}kF9Vm+;GKnM`@l;{NN)5JGzG?F5 z>C~^>MdJ<$MpAwct_pP~srH(Le+7b2>As4RIXu)ZsxBmqrh9|`O0{CPy%cg>jF}w8 z$$^IOj@s_sKry8_NaNxK&5%d!bmHVp4qs&tEYKiAFcP1;<(Wd){@CYCxFWUOD(Lcl z@wC0)-No|lA0Sft_g6YfWYZCOg(s8v&n#OiNXvO@L5X7_z;G=azT>^~804gH*Z9X_ zCX7JvyHeX^&}L@v)={rVSr4X$+1UMlpZ9Mti9DYh^tOxr>o9~uX*1u_*di9}$H;5> z2nD3fLjSylH7J7sjzi<iIjXTO(8=c)){R*lKJu!5gqI`2bG8J-VjfJV3OJ{(qFS(z zofRL}>*OQsl}Jdx9bVB6&L{P2$yk=eeVXwAJXt7Hm7w&#O_IM%_{qnZ4Iv<<`n($W z;%*u{<O2XxNLI_6=0^n&ARy7Z%i=i!>^Z<XQc=y7iGF;%R)>=9N>B&j&a?X4VbVBJ zaCoDWRi~C`!zwOqX@HnP?5-gMp_{Q4iU6yGB6}Kosxn}^7=zpS;t)=}N`8p!zl!t6 zC(LI3_E*$W?9|uCXFL!W<1x>(I<u686T>wv%N@DP<nRAxDs4w=RRAZf3m!8%+ufuX z+XnPoZb2UNF{$ecst<HNT9i_d0&p&nL1MC>yI7e3!Rh(^Gso2Mu#~^!QQtMDg0JS@ zbbnOQbBP(+sJW;@YxzM<s}4z2i6!51Wc#g3<ggW*AvC350a<qDL%@n_%GE%OE|Vbn zfR~|Ysc!6bSgua)!r<WP!B@Hg15wL*uTa2Ypq}f_KifxS`3RoEcX!_z^2D;K#1iDE z{nL*js_w>JUX63)i@@c=ZWF8L77&H1*K!s-NWs`vj+t$@T+Pb$`zn8BqtT=Ddo%Tb z88TzG=6QdIZ;53_>DtEJex61jwfn>n50unJ$dD5N)<iS|Y+1CE4E=*A%f#!G$DJeV zQcM6l5elJ37+o`SJ|&#OgsQ!0ywObMKaL|F*H;!RwK#f6Ks9&)p)j{r{Rg?K$$0H` z8a)+EsL-$~1N%o2(oDMl3jwFd;j7)t^JYIy2CwI9Ea!>!X|kt=G86#gUdD;-hvi{* z51soThqi1-PZnM$)C*3I|JKQoGyZ!gu1^omaqh#_qS5rNwGK)3{I`*V^ok#L-9LlE z5~cGs4L}x56(#3KdxfvnE7}(_nA{4D1b2=sPO1w2%+w(8y@rcn7UMsR8Z5Bb!6k50 zs+`*NoP%|D;HT&EOfn|+;RVk6E<yYRVr`3pCzQD}D*5{wm1k#F1+RgQ#3qQuyLIa= zVPciO%58YvPXcloi?|kT<-r2(hYAB(3InQKh0D~k1z;Xz<y;7+b(p;x^ybn6UQ%Qy zYMYbBxrmN|+R~aLatqHDo1yD`SJMo<lpvo1_SyPnR95~%amLX(P*2%wtW3fnSm%!v zEu~^sHi7*7!`WD-qCmQ?0dr$ii+uCRm_BhYOzSjT9PdMp!SIsn61Ps17Uk}82}MWJ zNh!f1Qpydyg9?T)p(R%4KC!P{R<Buio1CFw%9JdBZO_&zaE2fLQbQPK$^C;7CDXe# z0zp9D;&W`UiXagYl9s#`)5WI+<eKIX&Llpc<rhyR&7JRczwRxXva4u1XChZyFlLo2 z-1gv95<xvv5%PpQ4iY$y@~&}ETEK$hy)rfvL{i(R8B>`EVZfI4lmG|qS)NF-0T-48 z!p<^=z+0Za5Z=!VC<(v0G>5*l%2Gc;lvyGBmheO^6>vAWKW5p)N#YBqbS&^^eN@J4 zw2okbI9(OWC(+GyolgeU_Ut?FAMlyC)h~q*Y*_cSBT_*QVeH`6*n?}k>(Y~Wow4fi z&AHHS!laI_h)6*0>>_eeu{O|;g1g7KxI|&jbKly8-yRMQffSHM#E!(=P-3`Q5K#q{ zNBN`SXNZhr*cu`5{Ut>i&xMR*Wp=JvCwz90uXfBcKmoo%6X(bYz6GAadrUsr$3U!i z*5H{EZz;s7roE(8zd4n)-E`Ti{9wL4&FWWKNDC<eI*F#{7h??@D2-P*(Tc4Zp7amU zbTgu&)Mz!qvW?&^{PXHn<NeDJpt3&0-jH-Qr69pfE0<eYz?N}CgsQf^#gp$&Fq1$m z%`sK}`U+Ov9NZirxE3}6mMu=bCg3c>-sFs=0k(;P_`a3s^s;2Ft<gKHpNx>9(Eb5W z?N7p$waQK*J{gFxE@LS)73g=IG);D1|6hP|%jv&3>sGMtYtPMt$QJL$61|WDfeqQL z%JDsINcs(`d3HnH9tZ02SE>D-m)9#!Au(*?RqB}5liOODnz!P1yj37SBH0KNX^_#% zUpql%oRfC7$r|-N3nC4y8{dD~)-r^NzOLgsn9F}z{Qb|6y?Mj;(O5@v+>wZf4i#KG zT@hOw#{f+M_WJ!1raG%3J{@ZdM98)GeQbQScLeZUH{19^g9c>C3*@wE!f?8%Yl`I8 z=34}@)x5=z(AAux?fxc(T*_(l#F-3|vQ%*P1YLJg&Jp#=goR{N1laD1WhLf{63YNn z8mC-zL{za?c4w`wH=;NL?c{U<c(X4~4HK9BrR032E)Q?hnwnad_`JG}um@FP67{Bx zq2MiJ_b4PiAidE5RiZF1v<&h+sk3@lXo9%Pyp6hbIGX2q8rauN`iYkurz9p>v@#Ss zSJd~1_V}dc9?&vk(=N6&81d5iB8Q*s(WA6MbPqhyyh3LvAckUCe`P_;8EQXFNeqyZ zf2PBLV(P{;X3}Vpr)0ZPby5C<ntxKB?AaO1DS#C`90=;#cbIoHqQt9E%>I@sWtXI> zl>n$1f|vzw-w*=H-^gMX4x{EZW~dtVng-0Xk5Ntfk)oBqYHHD%<&_OcS!tqzjNtT~ zXcYAd6iPXi(7JoV;QPnhZ@|XA#O!$HrJwolh>k$!J1j^^Al))K33OoqCpl?iunw%| zOyNIAO~0Q0G!?v=i@peEuwy9(%!g5F>4-o&V^zNK4GTAd%=F<nAbS`M+b2IpR|m~r zB{@w0HUBZ$er>`bad`n}w#hO)8%9x?f}v(i)XWHkoC(~he%>%Yz_d9o;nL~fN{mP+ zTUC6Im_HX8PiydSNuK7JyLU;Gx|j6~)mqimOljV?GzCr9lP)l=Z=cG0uI0OCk2x2? zRLl_1?4}Ck^d*vsuF^TXLeAC<-O3*#1KO8RbH*bVM*R(gtRk^5LOfS-MSfwsVKVhQ zOWE8k?be81fsR)|k}OC38@fjUViJ~YrqfFRPj#crpgZWXl0Z&O1!{8dLM!?OR9^Bn zKBO`Z>xRH6&zp)uhdie@sb+b^rK*=(PMq|WA%v;5MV9k<&t3;HSBMk8EQIp~G`Sb^ z>~W&2+i5BiGq3w`Ivq>b{rp|Lx`Y2WkAp>-5ono2&U)G|ZcN2O;n`SiXSAq@Afeb! z8RE|yv<*;@EI|#%hVGI?qhU`USL-ke&}bM5_~!muJAkTgkhz(=QMPUQE}H<poMKo~ z$Q>-ZT5af!mzUSkp1t=)eoHdJI2ss9_}&&pb->lZJ;TU;I1jJIIrk=A^Bv31|HIu| z$HldD3!4Ojy9EspG`K_K?(XjH?(Xgu+}$O(1b4S!!D-xGhn(}Cd(V5nbMJiL%=|SC z-FsKn-c@U@>Z<Br_j+n!8kUo)FPkg#6v~Knu|7&Ady4w~wBkru!iM3V$XUG{IY)fE zUzzm2xp{xiPZni&y*}3K+;#cfsJ)GzJG4r>e5>Bh))`!%^?A}6>e}M!eHlEm#lKaY zy;*-7=XnmHeYI{|?0Rgw#~YBjySY9XxO<*A>4L%ESv-LEVd-2sz{%0!zkfM;xVu>L zzO{JF^hD=oaO-gmx#v8#c&2F2KETdO_K|zrWnF*$Um=ozM@s&uly>Rp8#p5yQ~p1o zJ%128ztJ9gMmpxdQBQ3DMEB7BAJ9Ea41a@r-p~Jg?CEz?|A6xR?_f_S-F_hNk6wbC z-1Uu*N*{hF{>x($<dAhk{GT-ct<7)EnHc{QYWgSGi}n8m_WGZoCR#xwXEOsM5qmv1 z0^0u-ZThb)(|;NO=I`9szq3pkTB2xjXugxJM#~f}<jLi#fraE@Xy7+B${I+4hRH$Y zRiw#9pH>WLg|LNXgsen`3U3Vrm8S#}6%moU#f?-kMU9I5p)IO-9uhdBZhwwhGdi4( zNoSl*w@r9vaNVss7z={<6EqO567php5nMY&K>r|aMk<ZB&W(9Wrvd>Z6J+DvmB|gl zo)Ma^%-N5p*`*OmaU^#k#}Z9(EM<`W{pTo_67QEM(VEzn2v;D{+A#lKWL7TsyXaK@ zz3@E|M?(U35Km*p9R7ZLo<vY&yr2F1v9&-#gL>cbNZwXePniU(=39j_Nvo;mw36er z(~_`DneuzI1Zeh|ll3b3F9Lc@IM)R$@a{8KI%VpUzCWJersAw^{KL5d^p}$Zq8)jP z-e1}<YRw5o2hOe5D-0LTXs~#jUJ?hYxI(xsS0+@IXZyeyJ5P_8jC&CWQ*bz{PMQpz z>z9vfJGVLVPC_t)^GSef<8RUv0BtE%5-XI6bhPFnZ8@#?4_zO0Z<c}DXxy|3fMy^Z ztk^il{BE=6R4i4DqE-y07RTBcO=&Cc;;h|j-nV`|d&dNA80ZUyA=GGU9vH)*Cb@m} zYLG^{1uL1}S*r*?H(=_rcNhex)+zB1^5n)!G2e>x<zd+-Y*{C=3Vq72b0H)1<LI1Z z&7&a5W0q%QaXM}uAFKDl=-&6`^I*lUaccSGeSnL)Vi7*^9hESN6jj%P2W+LtR;YDY z-^+o*M=(F8qq8XJeKX~ImN-iMO^iyNI42x57dCYLYUbOkh;*)!M%JV^vKqo#8_(u= zTEGGy=hDFdIFyuO{r=6vb&!;nY3rh#`>!YiZ^m(Ue?t!<m|_tdjF(&YOi%7UHu^v> z6)91o{&$~cHl{1TfN`icI4jWb%h~bBF-}we{grU5@le{jkiIj<1FcCP!ntGT7>b$> zOqfv4TbJ_Ad>=eW#kB-04_Ft1iEp<aO-&{yJm?nnOX`M8X?9Mz%u}B;8wH{8P_n_{ zrf1bX&hH1hV4pMHS5MPKOEG`mXBjiHKQH$SFRBg>G-b%Ke>=n)idM!_E394%u;!2- zv29zLcA`{O4vBr{Q&h7_jk97K#o=%GOsl8u4U6Pd{xkbiFxM%00dSn;OIe1EiEVU< zep0l-EI8a+F9iz{ZQ=;Rr?RCp)x=IwyONdc{>m%qp_I%LCf{W=&O|NL@P>U~pmuzz z23#6aTwSip<28pp!AGY2>&`juT&M=(na$Yiq0)~Hq!Tk!(-$^AG0Y8<P<(!~yY^f# z01f~_XpJGHE)LR1zCZpi)-P}t!6%9SX$t#2GyO4n)6p^i=Op%<BmJHGcVd;5<<D96 z|0S{dZ&TQRKVkj%pz70`k0BTc6v*3$f70i-kbmg&pDPjTKdnT7|BqIp|JUlAbkqM{ zbb4}bWUOF#a{gR<HY~6cLH<d<Kg9m69|PN8y+zOXx6b;rb7bGyVFv<QSv~vT@Gk@4 zH)#95J_^`4y(iyMVhJ-t2LcU(KeOroTs;~8IKZ&{X_tSQhwaaWOY8TF@h{5xeU<-z z+#da(Zy|c7-y+Ks(6js@0Sx^gJ@lSt`;*CKVEMEB@6G<H#9!~pe^c{47@nCPzl|$_ z#&5}3*%;pUfn(76OI`Z+PY!?S`>$sI&_EvM->d)49shgv0(y>mmNq88?fBcA|6xXD zBYOuk8*2hudK#v`8HNGy_m23B^9KB8!ryKGXPW+9XC|iq4Z^t61JYAbY5C4!;zZ#r zQBo{DRgxi4k{|Q{X^0nw<QfH)AbtuAG*=wC8wMg#x7#0v0yMmb_p1QaMtFF6$<ZmW zTw`txW3-B}Dxx`g%yY7Z(eM1UbN%kMbDfp(?%Lyw?X5W50}=`ZQ<umVDeZen;mPLU zTRn)zF9?DKwQ^-wNH3rV>}WP9lmW1DT*L9p%ZV@u^dJ=Tm9;9P3iid~g{$8@NO#?X zgSzn>NcA#^c&}?z{KHROx+@vI7l{g!37rQ0n_SRQAJ9asnoeLaNxk|n)$>=Ziu#KL zjPDiJ`6h;+dD&u|&bE?;9J0J#fDWpYeeT(fB%>;}>+iqUc`+@hG*G9{I$Nkx^*)+` z*l{%?>mKP*f)e(;-3U}AeziT$0@a2ra80NvgpqJ`4^XD}AFfb$*ZN^zPsaA+z+S*! z++4kN@M!eh#F#G!@%in=@U1KBtD~O=$1OhtJ_rWyF)U}Np<4s<E&NdgP1sA&D>x*+ z1n$LTge^=XKiP5rnVu{(V!a-`W%n87Ijd<1nH~ol%%~pqarY(c4#_fvLR><|6ht-u zhfV#nZikvIOk{b=c4%M@UgKaRGR}TCyiyIEw7p5oTd0?S`LI4QFN|WZAYj4^3?C&6 z;ge?1r7E7hpx@#q?&qkhMqZS}9#Kr#Sv%<`+p6kre9=Z(^rw%WNd983KxSJ+PVg?$ zm3`dS_|we^7zO>9zH=H^5-d<z*h+vo+bUGd20IPP=h`@xA(M;<AErmG>%pIR17@-x z1#%&jx6CJgXq+Q^Lc9?x3~6E)B($(-ZvgvoYx4m>CQOk0a}J0iUdGjrI7qdF8+2ab zE7)A9FCVZXdu{M0TG1f39*YT;fg5*y{Hw$f*I$T+h-bu|6S7MMM-QkS5nudJn=PM{ zZDs5Rn|bWAZGC_u=j9KRLS9rH${6YR8lP9tInbXN^$+<vF{2hU5Ecbss}6;m!L0+v zMJq)vo-mL7i#Gv+8^n&?Hk+vTjGmA@NT^u65OFXJDXYB72?0BpY<#~+&SpuM`Lhdv z03KHKvyKqR&NTc<THtyNe*;x3vpe8XdJK&N#e?Y9sRC#EnqY6}u(=iDTYyU^SYvOm z>mZmdRL}j1KhF>R&Fat$f1RYCVF`xuoy0La2`CI%k^~CFoOS*cUz*}QL?PY2af<g8 zpqgAUxJ5l@d5U$CtbbvEVw_iCyqSValejYFVNagOPdA11ipI;Q-MZs?<aqq@0`?O5 zn)y1U2H@~!YzATV$LW^>Cfk;`Ft3iUe(1ZRK2^vPc}#t<<WA|z^hx9C>l*dxfBU*0 z<PrE37!o)cI4OvvWLE>w&Y<Nc5bld1mQEuWPqI#C5p5D5G>Uz>bg=zcxJ_5puoxm8 zLblPgV|4%Rc=z}k1WV>F?sH#Xi|&`EglFNBfHqTb_yqd7?w5cH!g7Kzk6=xI3EPaz zYXOv<ySTM;Lh%{p*4V1O@g4S8tj_d`jddWKJKpV#&CmBZtX+s_jW9QhgbW`gu5^(( zrBBX3tpzBm!gTo$3I#b6tTplZM*HeB#7_rn2Ug3nQx%|m%E@p{C=NuwhOSF~ipS2$ za-5YJBG3aLLsx1Ja-6AM)!5+_{}nMr>tx7HO~#mu!%Zd=KY4(LDPR*&eDBtCHk2(I z8;QP0^!DM6oHagO!s<H6M@}$g<EwZN8|HXs(7}#&;;RD_6wv{SYotfmGkSD<`n}t# zXU4AiNWzNjr>p>J>yOnTdbWuiFtiCTtB}M{A@}o!^ewc?vuhvMBRn1gR`qfNWuji# zA4M*i+-Mhs-Vh3FbsQA(u7>j4adt0B6Mq?n#^NDtJjCP;{Dfslu9L+(<MWo{Un|N< zT+G|UhF?rLlhwBtNE8=ki7dpbiA6|i&-+}7XMQs^HJm&0C4dqFo{E-|nuY>*s*9jY zN86gN*v2`VSGkC%7kE=pTv$ZhrX?*Bzh?Zs#5iF(XOX;)7)DeGw1xR~+1Lg!g}Vp# zicEB=y0-2QPu+o?)~jt<*5NGqbEN|QN}GcrkO=si5|?{FG?f%5c%#fKq+QiCa0Z`U ze(OPGco+ylwXStya~(`;x=XK~ifD>Xr0mJf*{iXgrgH+HU~D7R>^%Q`o3>}({!6c; zYD9-wBqaql#L1oNuolhvaRmVth?Iw3Qg6JW2UCY^f-ZflvKYAbYR(cwm!dSwz`^Ow zFCgU<KDqcU1&36r{+Y3N1OF2X<o7SKg=?f#%0VP2EWeZ+aer=cWq7oDSqKKDj`Q_e zz8xnY!0S~N#;x_D^gStuWLiwNRViO=EtCaIb$BQ=-(stjrzowgu)6iOkuhDL-qbeK zB3JtXXDXKWZq?gwl!Nmq@yU9MCX-Qg=5T%BEzGnx;3;4AULQ_i)8Yt*PxQVu54aGs zu{ko_k`phwu8^_bdu@)O`4~tz9qMs3$Y{<UAh@k9-tO$;DO1}qA*PRo`FH<pqXUX( zE6)Vi%dhoO+M=)!y@?tI)VL&Ln9OVp1|`-SN@F!lQuONGn-RMzyRyXRGFK=vH<g2_ zad7ZZ8J2q6Jl**k-s#*wE+;1oMtm#uMywe6wha@Y=Oe?03m5EQkEtW7Br2M*jZ~?f zk<v!NFY#<uhQSz!IKaAL76o>N?2sKJ=OjV=k+Bx#7a;eG_Fd0dA`#LEB1^Ct;}P51 zYywlzjpgw{i@ke2{?c8^-Y!Yd?vJEwHK(8Edx&tTgKxPW((PT=@m<zEa}YLC9uA-@ zt;36RTQ5c`k0^yTm1TYsxnU?$n+@7KeFrKK_2L0v#!NP4<^XQag7I<a-#=|t!k3Q| zZJFgy0H5%)(89b%CuZu(i%E*G@G#pOx%bl^nP(Xr8~}RUyho%mRyK=Tvnmnp#T^|B zXO&hq);3{%EECrrN<L3wk4#>z9hF5bA$f7Ci^++$Ckw)weVz<RElE8!oa3q}<7>hp z=>j1hb9&<11TWvNq$FZ|e~5^7<>T%XD+7doP)C3siLhoWI#JAOJA==UqOYMF%uApf zv{L4vj2_Z{yQV!?^Gv3aggQTuNa1*q5cSRO9?dpB^$txJdP_OixDK#^lbRxj*qe~Z z%aX-VSU@??IBviuQJk0bvRKARYRXIjr0xsyA?EmNefOB|Ri%Z=IYWAa{=9wBBhxZz zW$a+yI_H%&UR0XL+0}DMHHcr^=a76uTa(0|;l~}6rzn4jMiQa`bB$ccO;YG5+oeTh zX~<!c?IO|zZvjcG#$~%&Ql~@-vD(AqBcrx%Fkg@Nb)<uAP_f-@q<P#WCatbA8^>ma z=kZHwjIxqn?H7q6MJhQlML;SB00~1B+5HF^ycx=i*sw5h&NrGv&|>HdX1>NXTY4pn zn&7Fwy4{06c(FM(<jwdictL4RgDi?rX+wdd*m-6}q!o2C4j7O<`)4jbFk-{YBo@jm zi-v#MO@KlT@*3$b)EC3~syw*GUo?pxI6n&f@&>9=Ba?=lv!q6B4;KK-e;K!f4^=#k zXM<3<IDHeyf+Amq!zX-5It!7RGketLo6|ue`kXC5DxFdZ^=L`A?-;I)&qsL3`$bT% zMj(&{k1@{$dV+=_e8-m(6HYmHL;dEgq%6JWTlUv_LPZG52BNnf(p(+e4)<Vc^kkL? z%RE{2_^@(n;+>upj=XhaOTw9&<FSQ(Ht(6TO%HJdgmIcT@&fR1sePbkh+P6gBr!FU zDqXBsu-5=;@FIK)9H<SZQO<*Wp<}sin_lsS+cxY#QzSz!1Eyep0V*2B(Z>!s(131J z5QD%zeuU^I2izEVf9XYV2+bBhMr=YseEC4jELQO(#Z$twoS_VkG6CG!s;L$^Qu{zp zN5Rbps9gbeAtXl_Z^zb;{15@45=ePaneKfR1gN~CHbsH0QV@biRS7CT_|2K4*kb6s zRuX@dVi@u%mA`?P1CHB+e*uB>r!Z8SAQEi7b_wmTaC=obk)df9y|P*`QW9P)I^^U# zFeE&KOHi3yRZmMGDxPoUXZOFbRO)M=4C=}fR$fQ1*QT8hbL!jhqae2+^>xlQVtHl= zAn~9`;3YpNiEC-s7Y>cbbF&Fc#Id40G3_v`hR@A8B{RR`II!$Oakl^}Kkap6-_QWH zyoV1!6#a6kItrocX1=V9YB@*SKa(z5pn7U&PCuwB7v6s(Vq@)?*+&rG&kQN<d${dU zy}87cqM=dL-tTmL08LoG)YYU&L#iQQii?l$`A|YfJe|!K5g{HTTaiZ$`HYqKzRz9| zn7H`+78+5U?2p}YPu_1k3-yC<^o?8RI7cq+ma8`E0OOu4B?UaQQg734uPc?VUa5)- z&bN^m5j@gI{MYN7+UieTwz3^|-QzrtZ#P9IdmTJ)6J(t;wravz8<3ZIMYIz9?0m%S z1t3}?go5w~eNLdnlzr5boMMX~G)CQS9ER{6HfO*aeeIt&VLbawup+#kf9Q-f*B@_G zt*>`DbTy*Spg-~7BRLts93QTJLB-Ku4f9>WS|CN$KT)C<`b67vrZ4q^toF;uW!kqx zzH(Q&yxqKxBYw4PUemf&MP}rkGQM2k<N{7?qyKQ5Nse7>pV%29j}`3{EXY$-P*Rse zPd^3@X-N!f%G$MJUoKoelN#1be98crlUO{U-($T>BR^}`lr*BOgJ^^Oq73^4@A}%* zm4>(W60(0c&D#0!8S7eQ%OOjYqmlYCrsFd{zS};-u0r0ljwm%bfd=$lzf9NnWB21N zj_JwQ%kd+)0Qbl$!$d1CPZaEa$~r1rcCLAH&Osb_#cG)tN>aJ_8Kds#weV=5C;R95 z2!CEPmRxtW<O0iBA-T8$Cx&nmZ4&+!0q=Z%a*7(Kv`Yp7?sznFc$g1-!hVUF-zVwa z1pO44!Ee{P<a8xP-;{(?W$Wszc0|uS=MK@KNt&RAP#MP&#L&^>)%}d?kich6D2ct( zW6H^bs;zV0R&7ZiCsL!WjoPN>J0cpN6(u;@2K$<Zl?lOU1T_K8JOw4716N6)=!CVi znSL3B)GiO-Ip+qm6b0F6KA;H%3NYF3;x=h$umx7UiYu-H7UDDH3by9)fz0r)_W@kX z9#n^U1R7eNg$A!e0XlN)uArCTG{F^pLW1kat=?t*l*;)CSYcOE{#?0ZN)+UDA!FO) z<9kMXF9T5KIvmsQ5H`7%P0i5_#7v~Ry~PkUcF8vrxk%xD!;xvq@n9A(A?A_Di38d2 zp52`l%9HKm&(6QJt6dCn6gw2orsf-4G3RZo_->f6B1RfGns3|s0@JH<XM@CVW*(%9 zy!pOJ$KZ7~CWVV(-{pbVq1zK4Ch^)$7i=G9lnR)*X`ZfkJ&L{JBjTPBLPjdoku|_! z$9E;OzCEXRJ)6G?D5Y7O^6DIZ(5%La*E1=x<ms9ecCZ4oDz|b`b4y6GY%C>ZChr%f z9yWIHXZbWAg5=AwloDm!9jPcD9jlNVS&-pbE00Ud5@EF^EQh9{z-0)YD;RY}r9opb zM#_YnVpo|xXeN(*91OF=aors#g>8q4V_r7c(=1)zAo_J31kzj@^^x2g-{h*F+33fa z1woMGZI#xQ2(=dd@oDt_ZMt(@8AdPc$ewkK^`};0JZ;XC{_g@~ou|Nq7}Q&qsBfR< z7S<w<UJ*5$P#HG$_dnq)$eE4NX%>?;13JvVr+u!HGzlNoQ}`&8JRW!8NV;GnL@epf z5nY#dqnS*;F|Cm~k{O+7oPr4a_VHB`9;=vbB@!^4ni@X4MAT8T9Bo9Ng<GvkAYV)I z<K9I>DTc2jvh27ESx*qgP3R=*+p?GT3!L2yW{+C*e(JY57`s|<3%8gZYk4V?pWQWZ z5FRyX;32b{4G%56_#bB`d4(d-M80FmWx71~2l+(2a(13w*ERAqB*);=`RqQ7Pa<7K z>`I(sahyLEPQ&F|oWH*BxcdZ|v_6CPlBRmvl}pp*L6SnWKlqEAor#F33XFY<dyMx< z76v~vP?HJ46x#k9iLGAu4_=+=vxUxvK-#!b&kTA0Qyk|{dc`qV@b9}hJ&?HF9kJER zWhbo2IHDCN74x%+NjM<MA5S{i>TB2fpBe_j!C*b@xK@HIn>RM9l``|ga*QTsyh>W_ zpDVwti;jv}OB*RM6SWPctH+98DXdGpFeOPuO;Km=n#5rq-bqOWq_on4(-IWZoEWK7 z?LL^|O%8I|2OA-|B^Wiz51;D#t7^`X{pkD}oK|bHpiOZ&8}rCWOj<5|GCBo(!AkvH zeDE`$HAv<b|9to=slKY~U@LlJRANZN{b}c^=)}5$21o7sa2tL5sA+KH06)JvZe0Vm zc$I4}PK|ljXxwQ@06(kc+V$$Nem0T#3h8ZQIEYBRI7#v#U0Ekx>sj|I%7*6kYqSrX zn-}6`>&G^J67odXZptKE++DQPP~352Ve*M3(3=a~4)dT`6RyBg?V=JdETObZ=7J)O zU4ajQa-~EY)t^(CN!m|{b5%O_0g#ZX4R7T{QF&QYy?he>UfYLbBA)x#Pf!o=^del| zugX4i_%Cg`D@|v`uovIk@t!mlZK-K~B4g*2z&tvYvAU_tTg*fmI>rB#V7~Hf1&bxs zWkSkXlFw2a7H)EzqF0#6gwWT`$P2vQzCpVd#LvCrFEYF&&UPS+Woe&Gh=zJ7oz-%r zjBuV{r4|ws;MwNf>lSZDs;J=26)!Ou(^BW#hH8k0PQqOrLK`0YmYaACxXP<PI&D;0 zzRY5qU9u6mws$?_S-3`T^;TO{t}8S6wZvoV<-K$@Xle=E(y7F`B0MZ9t)+cH@rJtg z(|09d(o@G9o-sOj39eZ6!>kK^uy66qw8@I09T}g#dRY2EWFj0hpZ$5qQh3dG=UA6c z#iZfLQCOsCP%H!0D$9^O2WMmRdIF+8U;{=6z7Xiq&l|RsjsRcQH7@f~dmY=a{_(*4 zG2W5pMW_|KpT)Oy&w?3zP}7=UC*q-PO;umtZ^AmkI873IUh9{@tl&Mi%7&C0^mbr# zjc?d6^kWNVcV1%6XPL0gy=zEIOs+IjP=4u-*Nuv&XzAv9Bf3-uic+23G@}(SvXNK2 z%kR7Jr+lT+sC(UhM|<4fM>Pw{r5@=v*|K-fMFVZ5YVbu3axavOkYi@@`0GAiRx62( z`#d&ytQ(Jnmn~{alHEt6B9j>xRP|u<p%+j)WH|SmDO0P4w2$QnRGVJgb;_jQHL_|m z8bZCOAJmf6dJ_&4_DI7lyaMZ&6BsN3Vw5Rq!(n(hh2H+19}Jj4HH!v`#2YQAS~!h6 zw!SMDp@@#lR7!qMai>=jl`>kG0ok8lnj-bpuNM*(8>&PDK}EuZWb}nBnVjn@^OmMv z4~Cv+aI|<G>m$e0%>`~8+MT5(D@)nzTS|}=b{7ff$%TYz+&z>>%k(tn)zW3^Wxs;& zqqCX`=f+A1l93`n&x_QFVZ&Ek_zI$w5BnNK95zLJZ$w>`TX%Zd9-X#l2<<E+W3}!5 zVlddtlHb`mmDmpUer{`fYqSu4jW@E1&&NtXYY8A;cLly6ayOzFnX~Di@On%8ResT8 zTQtv3xy%`|pzlUw>e_9k{>08zsMH~*md{NEo42NSR<0Nau}l!LCpk;ZHB80ZyViY; zU~{ui(iE~N5P+d&FW$+_)-F#lzY0P+FDT_?X5<uUG(A1ld;=PO#{8kjO=^>=KfoVg zqRzN<xjn7w|AB42_AuLp=MDcPkkf197`g8GQK+rB(%RJ3f7RGyJ4t4pQ*;2|-6%zG zqV9GdqsjW0XYroKs_&7f!c{3}%^Pt+Zh!Bt+7i17feCqr>DKJ3X89L=PV0DFQV(^V z;cTW=$4s#X4l(YF1N($SCkLk<UIt#3I7N}*17VUdn);=j#T9h00#~$Q%b6uEMH|nR zIbBx`=1DqQ`ZJcRJ*Z*MQ-u!xA2<N9x(#!~a6flWrpg}yEf4z$6^kY7g2@bzCM0ya zGehkjd8zd?t^o7AjMl4E$n{kxyPG2iW&EhldRq0^Hr=__C6`~^9{W8D$T7GNJ1$*> z0xsEk0p4XKwA4!gqwRX0HP6W()cACqp9t^-;!drHd}qnJLjYf)LPH)j5rW0R1cb&W zer_#q?P;BE4lDW_oGz|v`i{=8e`_aCg=G08AMaO>62S(~pfnS?7hGb<`N@?bOh#<t zyQ8X0et*-Yb_}0Ap-s+37i*&a-N*-@v_)Kd_lO@AX)9yd1o8q5U;<lkD3K1Lg!U%K z-M1gx<W>f^bxa=8`39&)lNkbnSL3Jl?VM{RIs-e&It7m>q7WhTFm%*!mfOjoIL|MA z(!3v{yDXGvr%g#SC0S@S?ZO+>ZJykxJzQ{!dtJO<Zhwa1-GRkal2|Cfr#GDmw}<C1 zUNjMDW66rnNb?g+@VpRRc!CI2#G948-o7TfBvC4WI)LIt1Xbwr(T~>|lPK{Ag@|Af zEgsArtS${$rmUPSvQ47^#7I{{3oyU0qEayJc^t=6u8<phM6ypO7(2J1HMQ||+lBY$ z0$2fIgQ;h$`<{!#leQ?lI4HSlBIsQzLTzu>Uz6OMa{Ild;1v&{_Cu?_u9O~Tv=VCR z3Re#mnUO!>TJ@VyL|CV#CDRaDE2=_&zsX8AJN>}Ya@E}*no~->=b;tm#w4PbAjU;B zsFXZV&sVG<XcAs)W@bkFN=>~Z$s|pjzIM=D)-UO}0i6~%?{l1m{IImp=E8L`?N`0{ zW^A_i3-NjMSF6GHu$c~VA#JHO9PM(a?N2FKiF9g#U{zd&5t(_LwsSeMVXym}0G`1t zH?!9oc+@q=nY<Z#)CJUwffZ+j^;e6%bj5si>^@IaeZ)0D2HA%&S2#Q(CDCNg__>iq zyYw9wsu@nR%JxquBe0jSpW+s;NX-Xz{q3bO3TaY!PQ%C{0@h2Gquzny#y~Z6qGeQ} z?puibIw_K}vK<`CD4fVMY`1y91faLMZwMy}xs^LHWsC(*vkF4CBCy5gt-q!j9kdMD zDq>oVr{Hd(8yyxkhG+13@dV5<+(V=Y8@}2yO4(c^zXJ?u7%k@4c|BZO@@*X*3b2VJ zP$&^#tg1Z026(yk=xF-9lE{+hz8A;F#yFLck<Ml`T37dZ&1H6_(fAX5`r?Cd16-;L z9YvC)l?)%@!<RMgQ>RndQ;%qsZ!YZYT@|Job#iVpo1EgEbrNg^8S?t!P@t2j2)Q47 zj1<H!ulgp`jp;3g_!8^fKIf5&MYsvg%hq3Vw&lRmHpltI6nl!=&}^wN*rdpSq<~O2 zC!Vn7oNx_<3|yDbNh?r*C3%{PCMzFR!1ifK33&Z9pB(5rjBzPM@4K3Nc0HAZQ#!UZ zRmZP%?mH=myM1MPg^fa~VwRx2zN{k2HQ5d(cu0<);SbBv5jXc2379^NiSZiK0l1u+ z;Nags+(m|1#D!qgdr>hw5%24-2R|Op6ILUk6kWjHQ$Gtl6ZsdM_l-TYkU3@_bS^?y zj-0FpY_Uc2P|cJL!{=HG_0vfV;*p-a@@XgXk>T($9I|~>l~M8ndt@G?99$~x4nNfm z67m@8_O|?xRDmrC9VPNh7EP@f6%f6spp6F5E^_zc=LbY8-t+UBm^>}k<t4nthvp^^ zWu;tYm@?&9(l5ibmrN#;pk+aY1^TmlD|hk+Rwkv5*$T5{8;M%qlm*V=*!tgSXmvmL zk+MeU4>(#Rk?zTsWN&j)R{ikmFFY)^GK5V>;aKL^f2%Oz5r~gNu@BOm9#Z@%Kc|<r zDD>{I;(($e0Ta_HIUh02*Z25%_6mR@3fSEl+z&r(nY**Uu62D@@Nds9y6|RVDpVub z4vgyY03}0>iKRV8L4Ym}{6-)beWkM`0*1gG(iqd&Eb>Kl6RjQAk*FQg(gpwJ<>4Z; z;So!P5U_J-WLFd%JE{)NB?%u{`aO;OJDSDN*hMBAwJ=o_aHwypx9`U{RSO}~;6SFD z=RVnmgC_0KB^A}DuJtwnY3%{$Q*l{4V&_v#7A24^;f8cJLqk*B%AlQKG_C2XRgC9i z`*o+zgE#+hL=OC0d`H|dK;o0XLIwT&sDk988ZeDzCmd!tmQk|bKDSi)lf%;7Al5u# z)%%xjY;156TS}uUIZX(})Ld(47t0+L-4dL5)l23#E62_p7B#5p<5|1NYj^>Cvn%@1 zWSK#rEy$PNL#C{h3lMEEEc3$CM>?1c?lW`O5KJX_^8#EI{a}jYX3fF)0ixK|hqdJ_ zMb2B;Y~Fn>j1w6b`e!p(dFF`P=Lu5?6c9E;$)+4q&=4Jsu1Xe=smll~=*uu9Qf3@) zm4~NVRgd?o3mIBc^{1j~Q*1}k92qOoYZf@l^m;xCZ@~r}bGbqWS!%J<BajHxl{OvT zw-xCrH#*B)shyhHN{aLji?eHM*Q=s3a;AH^WcjUu)YKyC?3?<g?YEvYGK{+@>XeJK zM{o`Y*lyo<)kevga&1O!)Fm@|!(jaLrHh5jW7U5ZO;hLzD_N=*#RoF)5)xzA$%^+` z2F#iYP6nk4C?b+YV`3|g@tdj+E9F0eYQ=O?l4yx!z!USg@;7QX*l986UjX&OGW}1l zy6+d*NzuDUU~J2QT4_;|1QQj8xOPSOs(|F52giKR*cH>_-tEPcJyppXrxL#$k53EY zm^HX5G6o@~Y0^5Jc5~p@jt)>6uOdP+Uqt50`U&c%fmxyVsXt)LW&FXt2E_Imi)yAC zWBAh;ml;aF77#DZwg^AX6+B+MPsSG*$EcFTq90N`weEaB5Mlc=-z$M>dN8gHwY&Qs z&K`K{Um$LULPk9aHFkimmy1#PFljd*Y>ihnUX(n5fwKOQ+{pS8$(^L=o7YzNnT(H< z&-GRSYsdF<)xmhbQR4+-w?^8mIwv#wmYBrOj<=O289|+ut__tHm*X)TZC1~PhG;ER zJTHtzj3<iak(eJhYCj-(f*8y>Z=P@VO2uHudwvw?%fRyta59JuZ9Sz1h!uJUsmU%e zc0f1ZD+crCTQI9_w?=S6y<LY{tNf4^7LIj0^i28s6gtn}!bJ966@@}kNy^L)U((TC z(p`e(8=4IR@uX<cF!CX+QJtuEt?Ja;a)p`7ts$r|Wo+EIB8B1pFYFnQe4f)&mX~tM z`77cZ{+{e`$g@w8^Hl?<=?zkv<4^rlmG9FQV!iCA{@YCy1e=(ybU~Oqx3tsr`>Cb= zw!67(b1ko*TiVKZ`v)3n7RuvTpRCZeTe}KRWzLSHv{yN9Ro04|<5+KT*t5m}wFjMX zE9)ZM=0X`5gcBvc8|kr~qr3|(XX2fVQ4%JbQ2fJLw?R~@qRw#s_OpO*5U{*sy5`)~ zHKDt?aT9Zd^lNe6nb2OEO3eOivHViT^*YSUwIe-h3XO7x7ZoGUV^ZNut2q=_!f2`= z6_n=1oif-3gb3r+z9`k|dX_)dMU@>Yda=Vfg4vz~pq;RZ&!TI-%_nO%AJ&lCemCzH z(hu+~ZLD0OBy`pl%?;{PA%qr76ntEp@HqAH0Ist(9v`mXP49CI<{_>vGuW;7aA+9A z*-%VquU!CLbjWhoB0hCJDxkAI*Fv(QI`vlGA!5C4gf~-*d>)h%upGA~Z;;+RRXj~- zqHH&H#Jn4^IrrK1+3Eus-y0Q${>--!+ZJlbpdc-S+#(n|MG*1$(IRbI7%wg$R*ZTk zi$xV{9_FVr^nmC1RPf%b#+;hnNk3ml+0t{9bbPorPLotxeit14n;1wz-dYOXE-9J1 zMLv;r&sB!HH!+u92M($p|M|Kwgi7w|rmi9qSNH_E86P7>^O_qfe!41ywW9Y@Wc{wf z!w-=o<(sAKi?;nBQ?6bMszWA(i;<wS=mUqbWI`F@+YcTyWAu2A2AdzY^E*Wto0V48 z?+{+VAhag~*U~o>@o|)*f;gI~tGFIJggcWp^4BYX_He14a6YMici7jO*pDB+UG=3N zd@~J@<bNdfu#De0D3>v)t|*}trCCvy6%9xUQaa{>_7EL6;HX$1(H@eoH(h8#I3YJO zUMI9IL3k_;0B9emyoqaS(ZENy%*}aqLPw0!<kYIV8|j2wNz|s8QC(OmghI9uU-)hw zUImTaZwHm1ArLQZ$Dqd_r=dVevMJk2yd2J_s!j8ls3>J~x!<xr>v?aF907ra)jrh) zox4#|aT?ZNU~ddC7FiI-Cx_L5r)`FP<03NLrl!{A_;duZnhf7fXQH<8(paq08g1_2 zJ5+um4%e_HGLCRAwvV(AKgS;Et#FfMxU=6#;9y(!_^rSxdsX%Ob|MzD`dIK$dI)Oi zoe;{^Oeacv1n7rOB@zvwKVq1{Fj&BRg^K59#7I?AY9!A<$z5Sr7-9dunCqW1p`$-} zK_7Ld;iRHiXYzP)_x{VZRc60XA&ZY$Zl$7!5^*P@K&Lo9`+cx1-zD|ROk+|f$EN7x zrqe`4M)=cd6Pn55y-iW2MpzBE`_;m!ffa%cM3oKE<vazqFAF!e`a$yA2-xHHjNKZB zSau6>Bvs-Hyh|%yEZ)@TjV$C{?pFCl3bQ2ZAiY9~F_H)K7Nde78T+=W8{6g+gli@i zf|k$Ld+tzmS~R?i0;K)~0msf?>lDUV^79<p9Qo}Vd#Pc|>btu$VCu_rXQUMG0C#YA zD6D4#qMeN3BpDAxr4TlLyuVPn(L2$bJ^i@&T{ystV=E{<NOB(s7;=qht?16c7{{M@ zord%i@2|p}A9PPcZs$N@)gATaJy`ran6hqH^MF{vw`jeSEi#!}ce(=u26w6=2>2Zx z^FUDx?%RvbS^QsIO^4*{#cg)t-a_*w=|>VP!y1^PD6S|kP0XRC2zapbYEQP>0+lp| zm^RH4%N3SPoKm-k(nz<50TbPK(@7sZz)R^~^*>fg6$M?aISM7GrWf^YRDLj2Dhd@w zX|-O?I%^8l;BQ10*MFp85cUx47A$-6Xq?uNEA5#6F&DjRA;Djt`TkAn2zz;b=tmqc z&0Kh|75PtrQ9PJg#?EL`1ohHRXM134WNAF=?zC$^7Eq4{ZX&yHzJ-w+HkEy<4!Wfe zq1vW)7`G^pq0SRxbaV>{7wpg7NEAYYrVvLt=a<18-SmeeO;D>Y4$l*KB1=b$J2zH~ z2SkUnJJydiPfh1pCug~pgqtzZPlCE7&j~T|7CcHfYIOI7AwAKfC+E1m|1#$<-rSqH z%s6@_`eMzALf%IoT1>@lq|teqDplq=polzK>nQBOK$RV*Xca?H!X*fRwm-g%8`F0n zc771FH;=1uFx!od-=T`fbS@0j9hVXPQiewzdV;zy0JxOC4yjjwG)(y5^S(1_a6K%8 z|1=|0(4mg{6hDiL&-6<SqQ#!@A|pOIyPr<Xx8=>;DdOsUUVMf?+;m5O{VAN!;R-4F zw2a|(6_`?_(4r_mL&s8;zRNP`W!NyJImVA*898D)vI`|TWh@RnL!fqNi!J8F(zw$S ze*s_)mjkb8ck)~3@l^XSzBct`%ikQ+4RnB13hhn~p<|b;ileqgV5e^orh3n(pr;t` ze1hlnG;AN`e?6DKYe+>LFN>V;Qg(VO8V5c(`_x)sxU5YY^o)MX1Ws{%FhkFn{na(o zyA!KL7Typn(0avLJbN;?_?2T9%8I=^f};>yk1S?47I|N~D){(g)8=YQ7%P#Q>2v~d zXnEbD(<8>cIBa27z(;?e{cv%&1E1tCgsfTai>?a0V27^P*I!4*-zYm%)-JkAGs(HU zZDgXgZ$6qOoog6U?T+ZI(xn6o4)P~?J_3217Kfjvwv($KLWFe;opW|wADHg3uU@a! zzID@*Jv`f+_H2W^;R*(_=D^0dAL|T*DZ<`N`Q8|haooQJVkYYh)+$_-47~1sTlXCV z<qTKDaRY-e@Sxr!*{YW<jM6};f7q?9rnBkysJj~2x>;($seL9!thLsD62Qy<0M~bx za8CcsU!(x$6`HjxY=?jTa^Jxpe4cwBi<zW6^x+|9#C2CdE8V-{0~}-&YdT!f+1Oo~ zBA0cl)k8mat<lP_Cd7tp8FNv0#oiKHJsUz9@DFkd(@kmqn_|(;cq=8rS<HR~P+}lN z!Hd!vOl$y0HH%SrHzJjW)hTw1F~d*Qe8(twUb=%_FO1qc!W?vY#U*(SfN6QRFCcu^ zftNW*;kh5hbKzcwu}gwB=|F|~K1T-E?(as0K+_?Ub{l4>aA`uqPle(=Is$B6FN1SG zwPGM_+QGy`xSq@U9soFvH@f*kPr2$U>Cn0@gOi=2GwF1RMnz(<a^?kywpu1ICzJdx z+H+!~R%X@e)8_9S5il{Uf!8sMjP($fm}N6uq2of-%m!&OA|!Y?=NlJ$9_$G-TgBwg z&dUm!!{lIN#rq0?EppB?a;tEeAjh9ppUI*V_WN@ScPQJJX{gt73K<(3<Rz}Y&t>et z;Iy?RXU)tAsHasICaJSaOQe{VvSexgI9t4bf$8!g@_|+RlHwau(js^hD@z0IHYApZ z^b$un>3mCQZ5CIY-88}o62pIHD?_t~$GNZ@dA_r`Ia+#~h@x8*ExV&oobbXrM;Jo5 z(}FrJHE=2=EK68$DLP>~DH{YANRzcz^)W`|d?RLu>Z(1`#N#`KZwek@>w%4y|1ieY z`%=)*zV9lZj9+YM@1Ypj4uqV9%{t@C=d{_wJ<sTqi^07R<V403IhhEX0{c>L5@A9p zecEpF9SAY0U%lWIfLI^<ixE~PEsb%0b|b2$E%L_P1?#kX6!&gU*ThLWFm^jD7Dywb zm*sp2;(TZ)<1@)1!xQ7&wt9c^H~>r@uiMVzcCCjyp<&J<7w#`fbbgjQ{7LI{oWyRO z%pNTMIiMgKr8Hml@M|plWnpyATpWp0ftd4BxVE1d%MiO-vQVlB;s?W%{#k+quC|go z$sM&33!|huCYm(fFiAPp9+s-;1u1Od!=7RjzBNgk`%7Y<z<p9lc-U7xV#~^a*5O{w z%{pBz!Eh<K6o{jqnR_$%5a^a-WQ|@&hrk~@!BBSfMI7=!--4wHmDXHVGkY$7es*DH z@i$N^ET3WTlh@XGv5DUkT(%&ab2=F?UAt!{iyMK@eBVeXa9&LHM0XxR*{LwdWlqW* ziYI*m6YmF1ZNot8xmQBF>G&|;rKZ%4*d^yEHjcD?%Z~22;KIbNGAql%7qbStp<BqM z<^ZZ*yGuyf;Q_wFXFrx`vI{$JE8IFQ&x~GJZ>x;;%?@KtcDJmkQ9Z0^2XB~z<z`1< zamT>t)5}Fg{M#pws`<jGgK!<n_a*GY)i!i8AD&O{!c>4W&|JX08ev!h?!d)Jl;=bm zw}pKG72n|cHj=)M1N==So2}QsJ570javKL>l&%7~gM`|p#7cmv6=zK#OZve7uBDn6 z-xBWVxwpstjjp7^(X84sG`J>I3I)&dnjrgVFOdds$lI#nLwZzLReP?RfEtE9?GZz5 zS2IbgCM)7)p<~=j7!7(lwQG#_m1eCziT4ksS#_`7Ce91B)FBmKEr5x<R<nK2TySr^ zA}Rv23ah>bpG>;8PJp*ndo4bTa*c3w8^gJHdLA(ZA@<<lZoD0ye7J%s9L-?qMwB~I zvp$;#_>~V>azbt3KR^$E_?Z6zJ<tQ_nErCqX8`=2;`q~JUf9OkQP9Z2z~0Q((Z-&D zme0Z9HzB}A_Z~+}K+pEi5Mu<a4DUYkzc2e;M$PcsgZ@2gn27@n+dDh(y9)k4yzL37 zndt!p)Qk+w1oU(O1_HKsTHxL7{`UpadRFg|wnRmQ<rNeu`RvW~EUCrbo$1XCq!eLj z|2*TfHnB7!p#ELCf}@d@^1GY-yOX@0p_#P_0V5*;t(2bYpTW=Q>DZXwBc%OHHs<%% z{%GPoo|zE?%%4v8^8e<B{}%$H@g5KjK)}lU&IJ75JQ)FOe_ZqXddBz0|8_kL^Ly;I zztVr5|8@K;|L;fU_ulw>`u!Z{kMz4y|4aTKhW_0qGu=PS$nqXK?qB!9{61%)r~l)g z|D~S)LBHR!{_&)v`$Lz%WdAFl;a_t7mf`m~;9dW}%Kke3w#gsmtbjl3_?NuD^Zq;8 z{|6cWw8#G-^Z#(~UpD;zO&#=f?;WG{*P#6mnD#e$^`B$*Z-D_7--FowS-`L7VDx*i z{kH?{zl`=jCj|L_1r_`awET(VyoUy~cW@Li)w3sHd>@UHdjCo=(9y%t3R%BL3j94L z-v^|Dyp5Hf^?&62ClkZ~_$TTW@Gt81FZ6|xj`er_{}71q_wfCXK!gqL(4LC(F97$6 zmL~?ryNOPc)L}!?(T||SNDz4~Bcz1sR49f_LkNWBU#oH{zUJjsT2yYx_g8*|!kk)| zldpsgTBuwuYXBEoQS_!Rq%hp`dZ%CL_s`z6*MVNYR$GAHXWkPJr|VZ-7<z%R(aT67 z6>JhFv}fLJ7MQqhuwRa|>W!U_J9oQuz|Qpo`#xNy(5kX;U%atFep%uby<hVmU7T=v z-P%(@w#Vf&UAPRoGM?mm=&5dpqRxVu#H>R?Xihoo3Ve~O(W$jPI4sZf%YlA6squNd zG;%Q3aqXW;M^?YYJ#M1T^t!Qs#dz$1($a=~s--lh&AdyQ0aws{18wc{Tq>K_mwyDg zr44zh995an@Yb__142>HBg<de!VxXwEfWR`UBA6=o=d%nv;{85UzfKm!sHqV_N91~ zZViw2@<EOO6GkMDq|k^SjkbwaJSW_+tgft;RBNT$XU*3y;1CA_(F`{Yi>3X5>iTWv z?D&lp1NKQ4YN6&F4d#LzP4Md*R>j~xOVd}M$gE<GOK{CqHC5#0;H<%jsjGdXYZz_> z7b_7DImkzNn4GwMJ;%?F$jiQhOsLVT!8CzFzS}%Kxt@?^0x_V}T;T{mzGLL#OmXiF zc0U96g&kq<SXP~29WB_{A*J|7r#|2NKPQ=fdJOD>X3m41LN{fz9-SOoPwgUm`}jz{ zJo-Yq4pst{&sKvw^d@jG@G4V+ziLP8j^yQkCg)59`)dY5<@IBwAflZb9uQ-v%MZJ6 zT1gfXFG>W`7DKFe7MMF}i=;Cc<y*{|WixpeQl9rU;!|G86gMYk%ojU7WpB?{kjq|< zs^DVoQ0F|Z8ppise8gHB+)17|o@oA(Pf<a)jydaFG&GC#G>eiP*zTp+MjK-c*Ra>v z?18BFDTb*zumK+>nn~XfF7y0MvFC2do39j}@$JE=SnaDX^RIw}WY?o>4NcS4)7f^I zi<@Dt?4_imLqx0M#JxC>Jq#Gs{;%F3S+Dfc+gyTVkS&7`CJzewVyCvhpfSaA@$<8D zbo3z`n1WR{@i*?yb%vqr$sgz+VDE@t&0g<jEKP4nOvzipxFTKy)?B}4MbZwyUO2ge zb>i@ZB5nxWW3q)vl1L1ttnyof-#NT$surD>-=_!O^qg$fu12oDuAbSy4I_?JizN8r zE%2S33B~Y^-sT1DgQ;@!`E-LW)SxC~MsJ(ltE*c7aM>6<XL)8i^=-oK@#)Uq_Sy2; zR`}RW7!MVeV6Tc|NYWDi9Lgz)&|@F^oas3Z-fT}Z4!@Vy{K*<?&K#@>D|u}-`+?3U zc2a-`5#<!M5o#sSBuFK2JCEjtSNKw+82_yF-~kX~-?Vo+dNSs;^A_pp+e>%Nk7e$^ zJQ-|6zwyMK(|odTRUxn*WCzcUqXWX_K&~CBvQeH(W;1}enO)cV2G{km<C^;w<?xt5 zP8s-WNXM2U8MuqW5K}>rKFn1R&@R%sNi4msnGsXIIbf@1jn|Z>F}gPR-ND8V-_B=K z+P+wC_=-YjAmxgjdpu^7cDH&5f0JjM2h|wb21Z0kNt2=qgQNc~E?Cq=BB5YdUb$6b znW%%P3DoHo=0un_<I6mP!%?%`s<k6oDNtaiX{U*IG9|BN)fF^Rym3=w_@+5P5lSVZ zci5Km!)Ktv)M9l!^$s!CMAYE+Ia<RvxQ1*}_XiW**}gXzrz={%6>nIXPW8D9_#Fv3 z5e|AnELUK4%LmUi4^*!qFMpC(xoZ*a{tv5CSzDwZ(4RASG`{(5t@iGltipJ5ElUae z^>LHffU;-#W`Ag*Lr3h<v2*K-gXR^ATGg4BGKGEsJA~lwL|=;mUcydgmf-1xa0|$Q zswl#$gfizLc1eG_zzKtNA?V?OuN>Wg8H3h#WSp=^4AY56oJ!g!sLqRN0p{F74o@~; zJlk#Zw<7^I+#B#JhpGiEg0P;?E<pP>555lLLmW7NW(|$$y%mn7sOK#@`EdFR(tZQi zniCfqbXOC^kshA7PoRbq#NHngM*SnFH?nB$dLDuu#epE~M>2>~u-4u2)gV*jSFvkd z?+>knK8S~qm%0M4?5<lZj%aH^?^d;6%-bzN0{{&?{piwg(S(6daZx*93_+_dq~5tr z34qW}-(3?^CUy;1WUHNZ5d~QXZ-rN_oKuR}8pOQOsfT^2hD-eO;HzdD#q4xm=N%D; z(cF&goZ*M!OCwbw*sGNMuziwi1u@?G!Uksj<vE9nz4}RWY;G#X20L4UmmEK^!^gXF zsYOK<rJ{KBuuIp9iUwkA=n(%8U6V(4>A|B1Q_M<nByt;U{&FWJeftZl9%bJ%Wk@DC zs`1xk#)84>pKwjBV!CsuO|)n6ER0h6C)#KHDS<vf@zKti!wwXjTGF|>Y8Dm>WD6cT z2IQkYMzTqe`{=l^WK|E=fI&uA#(>WtgWI+OmWsBUL#iCsQBmgj9mbmja+O4zb{tGj zg8F@&-*u>$bIn1_LB2zxjJj<6So-#Xn9lqsg1;f8Vi{KkCR-U-BBq$d)C7shc-9KH z*=3|05z&RKMAx!2r@r)7opU0fX)Vb(B?tMK`D829Ijn=UF|-k44R1wm<-46iu^)|v z)R6<KgUMvt^u)Mz?it<e`LDPWKa1#(U28!hZ6aeor|v8RR{#{vUxi&*tyhsO*4f>J z*X!-n8ykKwZfM8juC>}RwY#G_ZZ(tByW+GI8VPfBf@@>tQ|DIJt+3M4hvS)P;aaGm z07*YtYp%kSK-*Y~U{pn&!G%<IJoJ9$q-eA|j1+9|V2d|R<zi^<jbPDkc2*e*51Qbd zt#%I4TmIbEdq4rwnomnteTN(Ha~BQUMbBnZUp>E1l_2!?X9nh+p)lw&Q{tzq;Jhe@ z;2xuMi*8$$C(R#2dCo?HN5Te}T+(MY81ce{jTh@sUkRpP?igdnXmF7e2D*^J$uqCx zu`_6UP7WMBq~J-A%|33R9lElt;ICFYOOc5YC*#on<h{!kn`)}p2%hJ=8{9_gZgNrI zXZUGmlg`=uD<*f8kRwhh#oCOl@>){mBGLw%YgU<a*2BXB^lM9iT0^HZ+x(LYmiJ^y zQ15c>+uPnX=hJgDd6->-$wX+>@=x{s6+4B=pY@Z7CtNDN%Undpw#f3nqJah`eqT*) zv?X1P_fI@gUI~c~TQcL}6o;cXe(pQx-*C2B%DsQaxh$G^wsXf=@$}HhO*`qGF*!x# zF?fqSJiuWqp)2XOQ!ue!Ey=P&5FPj4{MiHX6;CP?IVQ?fWY(84;rwjO1-{TiqF~LR z28WLp<gp$R-FdQ^PTp5zL!`7`=fo7r5r;Kex=+}~at+YNxbCb*XiUs<N>UxG!a{>T zuTGpX@!YvWbdt~domYptuh*9xian+7BD9q8kuX%wOM++bm#%RR3>1|%bmj`*IkMa# zF`_yi@wo_~K>QgwlE3aN?w3`GoO^USu%~?8Wx&$hEK@gNnyIrGni<S8BUpdKw;78y zK~`7Rqu4-m(M*E3ST^!3k7nMfWw=1e6$fjdN}|$H&#;+fiH_awg{GmOymx`2ka|J_ z$;6gEdOM5Jr!K6MV#T3u1+WBvW)`fnK_!h`pKB*-(DAi<i;?bi=hu34_QOo;_aqaN z89-+GGs-Ir$J>S8WT^l(&Y8yru%%A(IPz|?{+dGG2uX;U&YrRX;_wqfZzGu1CXF5F znj<DeD#Q<!&omfd(T)IIxcjzUc_oJNT`C6@pu;Ku=h^coX)Z2kLl(s%-O{@NVJ6W@ z_6+HiY$dnx))eDto^R=qh}%3}4R@VUj6ObPz+Ru7*VKg6qX)@T15ecZuM}xD8sL4L zl+IiK7XUgy#lJ~b3Uf__U==qZm2j|w#G%r#iD17i18Za}AaSeq=mSuf6WJ3-jP51_ z5JO9&+vw@WraLomK>gr)pI4MBhYV@lb70a%gEB+70|!>zo*EyTeKEZE9NbA3lchil zwN_|3S<aF1cs#r^fk;?b4i|*@<#z<r6O1ES<5N^OzI)&Rv;vwrIPN=#RO8OwyKUEb z9~2v){v1kLK|-tBWLI)K;<v*2tvsve$!r$-s@tyCz5@piFt<DZ8OZ>SI5cH1;_@Hw z^kztB{$nfV&D_sn!foNUbA(%iuotcd@&y{@jv;akaQ8I4p5Jv1Jh{m@(QVjx*dX*z zXRkN*Q+%Y;K3tFS)0=ZMLc-s_p-%vB1d&rh7xeXZE5=z4vtbE;Be{z_DD%(pSV5vd zI7PrZ!EdMt_|j72iG~3PQ+n#mA7)enz!#QD$pl4p9ZDyq6gYa^3b|b3G-T*HEgcRh z!@6KOTnz$!mXkh9z%lRxi4fBE;U;_wvO9Q18agO#|6s@Fv*thsn;F^^lugGIrYK5- z0hjgURL41QPrd&yb*uSn&%UngXJg*Dl(wx2`a^=YmEzZSh*Sz#ifK549*;Q9(V0Kd zVVVs1hou+>K&4)xv@?Iq3eXEmOX=Cts1weV>a^yio#aJJCu$%ND>F<&Vr4*6b?+RY z<pEUJO>5dr%l%*&Ms|?i7wT>|VK1~4mr086Ic#Ze51Cf#rN`*AcfzCL^&Of<rUfXM zv0nV&-j+_=@1t*HFSCXW7lw;33NMQ<O0UVUN2RfPi$2x4M4#)t#(j<FHup>Jqv4~G z<6-?}?FA2s7*V6dC^f`ifYUC4T_VF)0c?d!RYMlVH=<#0G#ZwpVW2oU9OWFPMkje< z*F+PWojACgwg9p2aABgWs{(IBZ(D#%{32P0V#vTlE#2JR49@j3awFl%K2m|o@LfA@ zXH*Zgc`{8&lnKt}?w;%s(@il8#Rg~pv=hkH=2tWnewY?0i?b_RT9Uqasu3_gWXRx# zRHB@*a{VAMwt*7h&r7EpNg(<7Ltk!t@Y)|djQ4uJ`rX?n&wJ{X$7Yp2|NJ>kbGmNW ze{}wmA3nU%bMS*No}cl|OHbT7dm!W4>G@B2KhR1Vw-ivPg@P8vKro6hCDybKJGeTb zI$XM|R8^~erBS}LIx18<5)M5W0zn!xDEY^v6vZRGJXM!IK>u_>?o4wt2%~PmVE3!W ztL}`kKRt-96oXZQ!|xd7SnuFRnHQOBBHV@k%Z<g}x&A91YrX3o8@;zhe(F$#7{@3_ z(;ZG;!th>95o19EUc@xa;=qGDblt}X_mL-1h%B@!p_T$v)8SrqX>3`H#DbKTV{Ou^ z6vfzp6h<i{MWDtfUZ7`EoBIVj@z9;2xA8ta6vR=NrNMPEWFdk!*LLE23Rp~cGdu!s zcArQyK?aySLdg^4EybvfP+(R-A@D0+(c)?J`|AhUsFNCd!6FtZ8YNnyM0sj@N7=ng zZruLZ_4OBc-P)?o^@|s6@^-|(_}SHOESW#|2b*)p-uq=9-xPdsL;DY|d(`^~x%&D! zKe+kk*zVUB?3{b)!~IKterH$i-=9()kM;tB*Yhc+<A+)?UO=Lx5V48pny|=&bgo0q zh){|4s9ew>E1*HmfGbLe8Tbzz-~l1c!MYa&HP>?B!0D$zU}Nx1|H2RFe??<{mVOu4 z2I5i>IhhDAl9_OjBQtjrOWcPifvqvg@}Xz(voW58Wxk1t&?}{jX0q;V>YiYnM4`Iy z_-^<G!-oBG+JOi(4EAB{Th6_CI>!;RZX14RS8jjqm%D6zb4DH7$Xz_J9Ofi}&^mx6 z0(B59OzR-nbr7Eg^P)*aVL2vXp-_iUvrvPk@!hl%|4SV(9!&rI@Y91id^*p)N!I0N z@50TvX;*GOv!(JqU`w?K`=FPdc)=d*bb)RF<@L{w0`OG_r7%JzVGGp=icU*`m377d zV}ZO-X*F);HXE-CuZms9aYK`Z7CfCyG8Ss>#=rD`IsWBTc%66fP7d5^f#-n_WKoiI zfMrpaK!3t(EEm&4F-iBrDZ+8|n2#RgV!ZB!yOdHvkV{2Q>?F%A1<Cs77Quw<!y3X^ zv)p<N&6BtbC-DdQPk3%KkNHlFEp3wCC4HiEn{}+yeZwUkl*o-zn?$4^y54)=rkoJ0 z@DD=gg+oSnHwrd4g}a-NG*O$Ro9g2<2)GUXgKWzP%*<pq82k4-_wU~z*jr$maqZfa zaqXpGwRZ3>PL}q8kVg5hs5ETBD_3M~-IxH4p5Wq~C(flRMTsNzzauj~dba1skA8sv z=fTnC(RyLuH>2@OxshZhzHjeUcic{K#i9E^H+~MdGEoVt!JfT{2ds?IG;tcwk4{Wa z%ulRRZdSxa;VXsZ$|~(9;U-P2^ebGjvZmBuq9`7BX-!RaHHwy$LeG_fx<qm?CF;~3 z5IggqTJ_W(5ZzRQiX!cMQKnB98MZ|)r37(mGNni99lA=d(<x>6=v8{Swxl%1I3!lE zFitW%lmP|n;+qbJW*ZP~dqZV5Mm9ZsR<PH8$TCx%?u-XfW&aAg2UZYRO-4^srq0Yz znPrPCYG2fw@jf!iNfJ0d$W}b51X%ck8qX#+d>=_|dvn$N1-IOFQQI#!<$j3IS~v9k zaif3mNbZk#=_RQVGlxyRcT?_pVPDJMd6zs{U-?qof*q{`xeHDI{PAO#RiD}_=|h){ zzHse8%FFZfp9yQg{wqOm@18>zmk?~*1gw3>tV<~rLxUW1fa_P6w4s|zHlv4xXSts` z_HrGL*Bpn?k&=Ivm`-<zSyIB)h*f4yG*&joG2MHSZ+d8<u%zT#_wDY7xCfmNMYrK6 z$Tss`rw4ga*zg)*o|>9Ft1?XA^{dJlE`<4rr&Q-6rMzOKT<4=y3}YNF3#4K)mUUVi zIaE4l7B$*H4UDH!49XL>J}_Bl0tT`xphMGmB@T#uqPzk+&RtPIh!039D*uVk>!#X( z?|9{`+^>#y=idMEc0A&hKjGTpFW0~F!>9i`Yw4%!AO8y>1HU}=OMJ!ekK$=N4!_xN z>phR<zP$Uz+~*r#V)6$)0`g}j(6kGB@2FK5E5jpXn?6mW)P-cIw1Uf+QdbyRDJrEY zCCH92;$vbdTvlTI*TnaqBJq7yB)-z`5?=w%eoJ%%2aH&24dEh^EXsn+%e)v0hJ!@Z zRNw@a6McTK-{a@R2p5QBw-bt>9F1eYYQ_=tV>(>}zjZiEX*2-7mD}efK%vR_pn`2w z33T}g{`%ROH?*u?HQ}1O58RU5firjibl|A*_g_BY`P^H=K3~ZNm*o!be=3)IdiJ2_ zhYTF``IDdir=}EI{TRpwI{u}hdo7<Rl*+OsA&#dVsVb!!k|jzXC5F2}n#!FYQ)3RI zh8?`}|AArk;TPK^ROqzvjAgUqPaH{qms7!CGUL8@VSS9RIQ<BhKK(9tv#{^^T=R1| z$Mckrwm}<ifi@`U4lB*taF>L8n*mLD7;GVpkg)b&)Z5Zn#q|Ok=Dt^bb@;44)$ddP zBVb0@RnLC6?%TMJP9G)hJ(Fm?hdtjjpXu_Y!2f%J|C88bg(KdGkF-|eOJomra}^aR z?hcS7DkY3TW3(0+i-A%n7cUhRj8m1#3efD(vdUJ%5$dzkwlEz`+XBFc3<pdHN9g@z zWm_e#EJ?*wtTH!E4W;H>+{@16jqJ%ni=g_{x?_w@oscHBZ`*kpYOMoFkK_}PXgC^* za-yCxlD<?~N>1{rL^9|oi6g(u6Nihvo|puC<w7!!qZ-h**My=}iN{d|2L+-p1R4i7 zwkciwQECSxWN^|vLxT7NQhx#>gvLR<ZXSe4qsd)Bmfn>+wDk|UEgie?q(5%K_@31E z_+@*R-SWy+@u3?qx%-CWXOrgVaL?hDtM=kc{_q}N)v=)SM+26(jh}q;#9O!Q&wbT4 zyAhj!kCy={>wu0p^ww@lH-tK_yM~^{9N1m;4feL*0DD_iZEq7vds|X!Z-Zgm;jM8P z4KZP}uw4Kwfw6KI+KSo{UkBFpByh!#Bf%YmgPRe@F4q_V1`CAw_ab5bqez%1EyK2S z7-2rfzt_@7@Qj!>V`m$fGuf6EE1P<<#g3$IG$q4&^W|5lX#j0(%zwtsX12i7mNAbk z5Lc5c#akV>nxeuetwW=P)``QG#+SMjC8erzN&}~#dH)Qc71C_ZV1TW%=n>0LsY=Vn zJUHgDJd-@F9^Qjfh`C6%IQXK-Pk$=#(>V8@VoSP>><YU@)OrHq>rOW_li2nph75*= zfcFO;!|?6W@;PG{SH05m%OCvm0NxthcHM|oH*o(v9qN2z@yC?6sLfLY?G(^bOUH!g zN(CgxsBuT0vRo43{%Zkt@_RyG{GQPNG;2#PyR+lI`(7cxhYtVO^Ni0QKuB~!T?}%> z_BZ!}7k<(fG_uoSG9Cp)10c{4MHPi~H$5%5bWTCwnJbzT$qG?5k>SB8AmN)m3<(BQ z@6%#@=Br{`PTOs%gIXS71&~8mmvQJ&7d7<LX{N8!C{hTRlu3-XL{@UF<XI`OQU=Ll zB`8Fga0IreGR66=b+58gDjNSX?XoiFXbM=5sqO|BD+HZG*a@md1|35iPM^jA_K+9J zbmWG<nr=A?Qdle^fISvr+LjY_Ab?ratI2Lm)@}{^F{zS{ST`c#lD#A%^K10=`fs2s z^|AU`7gx<E9ktFG+{OGF$JNda4p}3DoN)|sP9)>Fk&-2kcbw}~A0Q8M_euB3+qkDB z(M?=V=Kz6t1%b%A!!baR0hIL%T^C{t+zDA$R1J98=`<*wTHS4K;@(HLftNmTrx24n z@jy$}6*Xq*H)>eh2aR-M4bG5Ga6%LpkfLk3fsIZw{e_s&Dzph87Ra_;=5XMS5RG$Y zn}R@T%+CNg+`Drm3tk6wgVARJcaC`<8?I;02W)_kzI8Hwj`aK~puzXRk$aE%7~|S? zI9CPd_993As~t|29tE*Nci-6)&p2!28Am68jTz^lMh5Qg2S@v5>@IHsF9og0f&^*- z2EhKnkVYIg6DCez^8s9eFCO3z4aS#Z;l<qa?YSAkzEl6a`@Bg%=1zYznt$`uVE*u_ z7~_?P!LBQ#x*T7>!>zF(q{r7F2X#Mqbkr)2Q%D9!FDA+0^2tQvI9cHdQ6!n?VxlM% zrKdm%mWfrt<|Q!TtS}?!Y)r#3ZIafiE!WyKL6gBQU=dFTyzIX-5cq<D@O_(59o|PY zr?XUff?#9z0<{Ym(Sjq0sVB%Y9N7#=W59=B(9H%p1_jV?%fJCtIRX|sWNUN=n%}i& zbVjxY+2EjzR32gx<2@k&2iYJ!ny}-@S|TGky|8-d{)s&v084DJ1VA4JzuMs|U@(pQ zFb^QH9)l<Y>^=M%NA|sTItM6Q$KMF(YCF|t+u(D+e)&jv2RTs$y<trXyRg^rdLw~I zgy#+3s|B<O|8!uF^HnDo2m~V}R$`eGJre^~c!n@TxyYDiUh0_{xHLFDd{N}~z=OmH zm2#ZBR8xGZ7#MI=Yfv;uMJ*`Np$gjh64fKL|0NpQrM)gu#Kf8S3AdHt5?6|1QS2i= zLeZk<V!JNcqWm6j`!CcJ1kVNB7mtDBgLrCYGwY+#K=p%=*+9U3L38k}c*vW0^s^ng zJue^3?c4SnT=M=Oab)f1cmFo`K6wK##Xor^_tQUplH0obH+beRa{tL4#0@yI3u`~j z9ku-yz6a>rfr4nKRXfjI;w9sZao&rKi@m(2mx7c*fuL=-x>GXqm1UzKe+zb;9FB!C z{KG-V|66nR`__1<uQDt6&nvRF2i<GXGB<`AWy}dF1t*Tg<0inp{s*bPXZ+>&wEQFY zdhS+y?MshjFBo`p?lxhc(>-s`(id|*J<oA?(~Yxk@;PkUnUTL!_y^FA4^`oX*1eae zwxmcX*ytl#lrN)}rMJwR5Nm{ffpluP&=eS!x<I%fFgBGHrX^;imI>E#*9e=qO~SqC zA?^wEEcY&Y*MAfp4IB-Iqe2?h2*U+FE8G*jFZFJUPx@<84gO4OY;bIJRN1J+xYTrc zhB?hQGdi<mdf7#>i^>-X^L<NF*QV}_-kJJi@K33b7Q{Y~aXTX!=&X0Fp^*$9^ag8$ zVFFJ$e-$THrGkC|iE+*o76{!zLPcq*iz9MHsicHco*>1Ar%1b=q7CGsL`<>aDbg+l ztR%&Y=X?^5)wI=+ns^E%lon)$6{BA*RQ+B09e<)XzR}&x9PomD9>^fG-gw=3JzMa| z(Ml?rSFB8e37x7G``FZ!%Himc0(qO1)*Dlm{J%G>%sldw$AA4=?xpSRc+~5Z<ge)Y zblcKrfzm(7{RKz<v~bqN^L~;|Z^&GGaTlKT;Rkr`zF+2k`r+=}CwJ6kAI6!TSp8w{ z{Ty7J`)%d$5a9e#knPU{?FUggK5fO_niIQ+L}!-GmzS3D3L7$#St+ryf|{eW%h&)8 z1@t1I6#;i={x7@S;Re_q-&J1OVAB1P$_AsbbrrU7{tvrKQucYc-Y9J8d21{HN$2^| z^J7!AS<$7@mCDu5wXR##TV3}%o_2M*jyXSb8DQtcOqbU*U8YM{+z}EF`&H3R$9#mK zqWA;hP-%dwkPt;$Ab{fKjEjRn2Tr*(<$PGAB)}qNMFtgVfWMpt%tRI>%f>2}SF}}d z73IPImb1nGqaI2O-}XI6wIKFFM}jm!K~04Mho|9OQ>KoM&DcXTLhndopC2lC8<r|t zu8hkVX1a$_b%9qfTfqrhCmhO{pqJdRI<08NDEGoz25YaPY3UPg3-|+`1lON{5J@nB z!o~^WkCKi1-@4|Fx5rmayC8q!m1$R8)Gt2n&-l?>?wfG`<GBICzKOqC`|x`u$%+YA z=2qZ=H*XrMNj+C`^^I%CEL>0Rh*|m1_`iW~J%ISE$~oK|eigTx=aZF#xlD8fH&(i! zWK`M6iqVx*xE5(v$wgJSd7KIAI@A8HC<4hMkSYR|MIgaYVP9+m$s&*{0+rNj8%?1q zN2-EUaFxj+u7<?O<fyutvFV9v$;-9HjwR0d-g&{b+BJ@AT-O^{R;)^{=Qe7$IX1fP zFm9>1DS40MKG%J|(t?fNFP?HoQeh=k4Q>^x4!ik515;=oFqfm>+Q@AY5=r_U{Yop7 zI4SrADp>5educzVwA9ZrO_v54k+t1<y2-Mze_gk|M67<v3a3L8;?a`Qh%AXbM?{>g zD2HRBP#WnMwkX-%1=^?EkNUCT1hcLT9K(}vD_)K_V-Xz4cFWU`zLLHgYJ9$uLe;pM zDsHEfOsl3<bI|*$!-JqrIOV2h8$IPN61lrKyzib$c_B2g5Uj|KKf=7lZWihNHUw#a z=X-<}Cuoa+jA^``MtNJP7rEkFDh65TX)Go6g9`Dpipo@KaDzQCRftpiya7KSU_?sg zWop(7j!S=Y{jz7KOqw-3clqQ+3vT%5j~@Sey|B;q{L}4^W`^PqX0%<i{?t!i%l+#? z{JwF;9T%OuYUHQ|iNNf1<Ky#|{c`T2x7ImtzjNKi6YJ}jR1M#~=E{SsR)21f=?nmy zU>_Sxxy^D2q!hXwu{1)ZldRemvqvOe5M!9sahL;mH!h6HAG0(jAmoB{_-D~+{Yz0y zoGyxmob5c(C(3&s?CZZmhk{*tB>Sns@+g{%$t{Cv<cfH5CH%%*L~uO+{5Suy>-#9! zedSPJFZ#ezQ?42O4Ec4L_ft~vgZ<pV50^*t=gVtcPYTCel8%VkNnY#_6>o|ZZFACV zn-ilDq&jRxnRA(q`EkrY$xmAS%l&PB&hKCWtD-BQGLfhjMWR}y7PVI-s(itrv_+!Y zD-zYLkDBJ+io|r52E7VF2ip#0c1{{)alKiv|5((41t3hm^_97~Q}6sX_s#NG#yo%h zdwYa^r+0jmJN@{b*zq|x@$}A@cVG4j%ho_4grqf8*Wo`}scMumtKF$!1`RPY?jgak zXpA}5JtjB<U1ZL1UlcSRkRNc70*luhI2=m*8iWRYq%cw+=bI`_)i3tV73S(oe5-}k z`n5in;G;gYTL#1vnQED4`lD2zWrIu+tCZuxQxzpy7F6gJ#o=_hbg#$l_W1+BAec5y zy95-B(XH+_>DHR*12+f>gn%#P#TW$zSuXVjy*^*itt(2Y&kdm4)LpKaVR{Y2bSt_X z^a(E0031V=1TJW}T#6#g1nLxYyG;|x;Xoj4oTK2$D28+>eXv?cz?1jH=<q@))QNB3 zVJoU^I5fTo9NV67s3$mK)Vz_O_UfpjV@vfD9Yiax;6jb>>*k)h0jzFt8vFM{(X_t^ z^)0~fb%Eb&g5SH-tp<UXY-czL2Wyx!jA(jV!F6`RkzKkaSa1=D6udGU$MqiO>ehSQ zu<_J`yGw`WFn%O=?Q5S@gomm)@Wt;ZCZhd5{Z;OY7jth`N&#=~b)cK(`|tf*1^01J zIQRE|-QL0d3>>TMrr5kOrygfKrhu(97U;)Ac3agTA4Ax$k!p9fXDDvuhRQ>gp^jnB z!R|(n>h{Fk@dh_7oWMo9VCyJsmBLo0+;uqsJiUse#Z~w!jimT$sY<JHrrbmLVe&AI zKJh$xDxZ~SX)~Qu-3#zMezCknTjZSQzLLL2rdsMM_f?+t{6=Y`dN1E8zu<nAe_ei` z|AYL2^F8-x{4x2M^HX=N$g+fV6V!sAmYPgUP!?b9qCkPtH68i9Mo=|HY6%^)oD?!d zL>x#~31N;h#WDzNo2auu&x(R+f;9(%&*O4B9M~`%rrYDupwEay<8+UzVbLHSMfG@M zNC9qCIO1@`bk3{moT?}sM~KG($_L4HKI{YfjOmt6^iF)~3o&)G+NE-8C+^&RDcM2@ z0mxFt4$GKi95grsF0#}Z3VD66#9M)E(-Tfms?7$EhPt!eSpXQ}X3wD94Z;~jifC&x z8JCOFWs|(W@5ZLf{VmMYvVE@Juog41H0s!gGMJiI!AQnUNAV&VkG<ic8}~#qa(N^J zxbNB-rGc6*t1O!FfGNho>Tvo4O&+&DaFz@<YZJ!<q|snOe{dek-5E_UiJyg0NxVr@ zDMTowdjfFG6M$nBA^@FX6{c|?7;Hh%uk{$$+fg8Z3QVDp#+?2cR;MHej=+^~_w<nT z@!VZy@qxbFW^$VRB6sVR&66&|xAcrZ{Wa104W3k*!@vvqKj#*K3i=!35Evi0nP%vr z5I^D^mOWO?&f`E`mGMvK7X9D{)W1AGe~gdvXQL|ANcvf|ibJUhIl?v7j+&Z`V~DRY zGOT87P1cdES?pL;(>h?IV}12Q{vU^*cKE7_5$#G!lgyfbGW1N<p3sX``$Gq-e((FJ zN*?LQrPP8kse*C4zfElzOgU{Dg~|eD!E|j+Lx#`Pj^)p*oi4Ye=gW)IYxE8J>-yJ@ zuhV8@gA?;cT}4A+P~025w0c=JsgBk;o1J$#w>a}oVT*IS^Ghe^)C*aeUlj8+Pgp*h z#pYzWYEF^n-8r36F3?Gy*%Q3i8;wecUKD1ke^jM9D9UNovyItEWL7~kUP1M6!PNb` zt%oaksyi#_I1<fDtDqhhZF>a`)ob*173@V7MVq3clU!^$D=nH+6iW?AZBGdq>akH9 z4ou+p_Ap>zhK1Ei6Ac40T^X`9gEIkIw{z$d1Ib`{UB%1dL6MY+%_0$<%stYXZxm#H zkxnZkvhaoIWIhrbxe^Bs?UOPH24LC%1CLsRy~F<Lrk?cCqg1CKNf*<Vir25O&EHIK z#tS22nrDGlB-sc9H3u8nGPt4APFy;hF#FK&^LhP&M2Zt7CjpR-Lk$_sHO<|-c>7CZ zR-HF^$%hMY{is`STwBr}yyDPpw>~q;Py*#IMFW@ZUp8yd(nSj&OO@O-ZS=FZOjtL; z>vV)GlIj)x&T3f^Tygt2Yxep5uReb2ma~T9kE)_Z)%d#eS}&e>)>SNKvL0xf#zqZP zg4?VgV?lRS2!n-Dg3w&nUPj8w%A@tsbEC`4HkXORJWc+l@CE)0!dW@%nBmI$F9|P} zFLx|-UE#kX+*S60{$b$5&|f@%5Bxp!*OJ3!`La+<sB_hM2MEnBOSr%_NtiEuSn_ZF z8$&mIPF^G^5(OSseNm?ttT?1$L$kD2txe<YQFe_{sTM2@Oq?wG>BozsLnk^Y`Diqe zlm#>>71U9TYOAqXk9a$(%8b-{E=foi22WxuZpX(lUxu6UM9g7o#!|8c;Ivgj2@Er0 z!OV!nZb~ee5ewBd9hAn{rGENq9Aty2EFA@hO2;&wVG2^ZSV>1d;RrYp-yST4SDIPQ z2+NLw3(<;r0_;NY7)puHK#B574jhKwEH~Wmsg9L9F5AAs%Kh`_FD)Sr)9zmL+)uAu z^PI4+=ihftyz7lsxi53?{RH3l^0eCzym{!=15D>n${*vpL1u;V%tBsbgY!lgc4?Rn z87>F;$GfAN6pZp3cKRflwqIiHmvj~uHE8=KM&$?IdDZrb_GbstmF69eQFL4u9pM=f znBtidX!W!PeoTJMJ>+=8cp|LJj*z;TEaDamSL(|hZH_1P-O3(yx32s3_4;24=PbX} zwaj&+i*sQRGuGMxY#g%{s<jzyMTgOGFj-tK4Sie3C{(?|DKi;W9)b2%Xz4PrD=|&4 zqA0Q$e$Hc92{WvWjruAMO1MmFmWbqJ5nh#EBry>y4U9DGFL*A17h3>iR~GX9_kyMX zTdjNLiFEhM;>fj`sWY-i;Lkijz+4LsQ2s_{gPXd0z1|+h9oMv@<jbFZnETJl&u@GF zPi5OfH_p8EnI~>ud?&so@WMe{g4O3RS-1Vs$db!{_4a$OFg-IGaQ2CvTZN}tPpE`< zBpnTokq%+7cX0F~GF83MJ0-e+%oXM-bG)t5uCjN8cRe44j(U!IzYP2>bd>R)zpN}B zrVKkSOu1L;PbwV!{lmy$$2c;|G1@yedXYNavA}Uu{LKFiKH)U5k8^5<3wTnKOax5H zX~BAolBO$Z7>7)3n3maUwwb`3lqPHzHQkhVOs2Rfvzj8MAd|7I$s97;<EE4LxLF*J zGpYM@F2#<y+FkLobWr+4%1b;&(L{-pN*T#9SuK^?q{z_5RFlLskQ6FynAA7pW5xLH zp1y3y()OE<F!zhD-*OrqQ;H9!@)!iNjWFOk?ERM0xS{j*-}vs8i{H7a^}f1YJ+bGm zT=Uay*IxbT`bRdMdVCA!Hcmc=IKLT9+;6?{%U3^qYd^)=IFO^Iz-K<d*%T{KhN3<) zjmrvIWtujRTOuq|=4rCeo&~`=;D~i0g-W8dtaN`MeB(VC<_Efmg$72?agPt56P@gy z6}m7w+r2bAJ9@QvweKW388na|yBvYQBtP|JIe*l(+1P3j!{8%PRYLp7Gn8VB@~jKE z0s7ql?zq<j+!L_Cbo&!?Z5?(HnI*8><LAt(Qz~m3+8x*tE~9A&$y5W~TIWzzTZYU0 z^+ttcRn#;TF%|2BsVKvg&0tZ6BR|6zV5+ZF%%;cp9GPIONT*M(=yj24dchIKT-m0c z6-|Y#*+L>6)uJnlEM^a!c%?Y=o^d?I6e)MfzS@86{XF+2_WtQz?8K*!sXK3(v#IAp zGFcxw{kH3##?u3jci=J*_d2f1eVqH+h;84u5Z}9g#KI?;ob&(^+l04K06VNwuYz5n zy3l}-6<QwpvHq~*X@?wkR5{v1T_HY1d!#B{)=(llINcRhv5%y^9-b4Cy2XpVd5^^h zl04$bJ(vvu?ix6>fei$vqh$@75e`|DQ9_mj7zKG*tfh*@TFNQwpxQ!|<)4LMthW%1 z{er0;HYm>~MRexBd4U;Yj|W39;e9BMPGS{-_i(b0;hd%8gD1dU=r+2$vosLX#B$~{ zrk!x(HB3>FL>cTPLvcrtDY_yUOz)a?>u?%4Wo5mY7+gQNp^*l%K(JA{=BxK5%$-}d zc)~ZWxnNdg=%5Qn9z4iBv}whXhS3+ff1-|Vy=>Fz`M?wB<|cDr08f;n8obPE)il9d zt0lb`XrsKMQW7eu)l%NtL`ECpJzpE`oi5GL7HZ$9|MofiCu%FtPMlqNLFMM!t+mpS z_>k)6+R@tR_^9fs@u}5|q&e|9)vdK{wI5a<i~l3>Wu+PLi@r{>qoXS7k(e$qVrT%< zByFe*9YSEdlIyKOLNw}9N0mo))$glM)~m^2@K6970V~iNXbbSQ(B))WEfZ`3CfEYK zf-S%VTfooGQCE}+HhPsv_ie!zpoYQul<WhmT{wx#%PL-W9dv!-%Deb7SF>v(s5-_o zt}w-|tDHW~#bQ)0Cf!_2y17E>+SPF?;J_99R=}O;Hoha{dXAi=vy_goNf30~R2Xtx z5uj;(%#5UCGsKp3fx*RO7f&B?H-Eb}XvFI4Zw)%}n)W{)zv6dyymZZz^ZvN?7hgR1 z<n`BWd;XfMx6KGoP7azov$6el-1N}{7;k!@?eyZW4qp8%SM$5Bm*4u;tG}W=v;iTG z=KXo`?7hejgzpP9u=xhe$WHQuxl!Cc2hR=-3xpa1GPv_zPQb_&6(p~w>Pf|_AJULl zaF>GpOw0H!mS|bUN-sr$Lfs3KrCc)mP6^ZN6dD(2C{er=B??tB8htNK(q#K5_pr2* z2`s1>Xc*Ga?mzA)%l%vZ?f$%<_Y-f@9<Vo{4#(+Ssu)ntVZ^gB&cdL{H&%dgscl-y z^wqt?_TShxAtH<|2{St<_{L1?GZxC`BC)ai^ohRCE}aH}n5F_nMZ-~yJDs9aN;*Y7 zf*msOC89p@Is{CO)AhEc==b}~gvrn-`pgX-H*~G}*|?4?mrS~&3CzNO-jjXe;hsy$ zqZ_WBa_99uF9J{A3W#cA`DhY4U|pgNp)HuGY*w}^?Mj#OiE><#kW!{BSK5><g+qsx zyrPyVU}Z@>;S`a(0V7cmc~z8>0^+yuTlsdri$BbZUHoyLAU?()f<2zM?PW5J@9j69 z^&79!xAIJ~@kPnT7vsr1Wg?aKAwNO>ZttyZViUQbr*JV~M@fF=iZq*G0lji-M@I+$ zw}S^y`S{eS59z#u$8wYLFxF-_de<7o3rS%(UoWf|gn%pv63-J}@F48a2<O##Q_v*Z zLQRySrfV}u^gsZ((veiv%^EJ#nze}<N7JONM%to6+7xpZH0D`qrOZ3gX`?im`6Z0E zwUEdAe0)q_u4Y_JGb1M$G{Un2HIHY*uI|FHs~wQ6uis$EcFwO;He4y$P$O7zN)cpJ z1f4=y@5A<t7#dlYfW_B$<QA3>DQg_kQGd?;WBJd2_q(sJeb71f9)8xTt^3E%rThxC zz<ovYbjfThBHGrKI9;5na4yHcgp(qt6w~qTfl;*xC`Eu}&#-~jY1~yRaf>kziwqv$ z<*uZW!s8vVbqnkO3mqP}ZiXWw&kMZRsEh$ri2c+V>Q&s8>WAE4Md?WqC&ZMLlr!Q` zrP(pj(ZaWgGo%*fdVa0&pz^Btd;UG~i1@kmAMtD1=T=pL<9H&9lA_44qsVen61|cn zay*|DRIebYDiAC$W5T{eBm>7HmG8tZOA&Y$V=0&EUOdM91H(@D*bF9+mP8~8{vyK7 zXd-Y9%|#r@c%NY$*{PF^l98K90On92=A59A?)Y<j%>2H%W~gQ(Oe;>Z5vKIF<E3Dr z1~N3eil=isg#JM`i%kNw%S{|B3-L|II0cs}H*-V@I%q-<cppIjZ1hyAEy*ZyNlBAP z19v4Ek>(o2*k(uE9w%*K`BW=VA?_#UyLQG|;?GV$-F&>$$cXla?dfc@Lo4QEVLM=G zxj*8u?DfOjyxt~O(BbEuL3;DwcSJIU0kIZ42119%n7p#Xb`o$Xe&+MsV*K*Qxkqml z_MLtSx98UM%q3;l<SwRkc@q?kj9>q{M_>YqCBZfhwUc2R2HV>K1MO|Ooh)l5LEX57 zGGUAGiNH^U;<&(-3Co2xAusSC%~itL0-ipJ33?w`Hd_$x0!NedmG)nKEA31ANP8RC zwml{njIrVnSUz7I6f4LsG=V=;c2VI)14woPEoOW4M|<X`4wg!5>rgQTHd6w>M$_a@ z6tf;}066Fm)_Bd)kmQf>N0dJYj>d#{g_AK7kYfoY7>Owymne;jK59}(SWJXNhI%N8 zHz&6yNfP9kGr8HsCeM5}mP}%@U>)<>ytI)fo54yOY7*wTF)?PcFr8V<^)ZWilV&H* zT6!?KIf5hXagpA~McCsaH22h`kBcxZ8DXw>gz^$o&JmqHI8u!NMd<VVh}0*N_z=Q0 z3P8#b<v)(`U&;6Qk4aqQFKFP?MH~5q<z=eaMupR6$%<qrzIxYpC9oZ%={eFTY||&; z2Rl71`o04Ee=wdw^)Mzj1AX(>ox0bP^6F*;yB)rwqAoZufL|XQ1`e>Prc7NkJG!sp zess{2i`U#=cEcNwJhLk?>+Iz}>X<S2f_20A)V&ify==z5?R$DE$xkl7blAO5^xRK& zUVZhXhwkqAplEY_3iRT~*IOQe6Fp>`(P{jZ`^<BkJLwU5Dsr2EVAdM=0pn2ca4;X_ zW3t!j^}E65!lK`yI-I(*BFOBlAhWqNW^HNA+R}QhEsZgOR?e=VJ<O~vjagf;|Fw-! zO)W&^PFhSHYRsV1F#NR%LCSPtYIy~Z2g&l_)?j<EE64{qQt$IK-aOf1nuS#9|JC49 zzhiKjeGD$Xz=>U!`@06*gn)6f?_fP};R$AhopIDo5@*Sjpo4p@Fu!Ojs;o*XCmJbJ zbVjgCbr+CKrxmQAQkY@65X<d@;th{o`BCemlMJ<^X32T0p5jyYZy&XM{GjW5R+05r zEIsF*w|WY5k4EN?@s)r_2MXaOdwjt{64x=tW-iK<t0)v=C)|=6(#MGB$<xIad4ae{ zmK%&=?qUAH!BNII_c;Hk;4ER5a-orRXZ;rjmkLXjxyDlWQvclGRoJJ9g5zRtsxVc( zSihW`C(KhX*VRCjmrM{E-iiqGej<zzq~3}75)0@Qf`P>VECULelE;}-TF9nkK$qpI zNHz?RFp>-@CUMfhPe4l0<6~(!0bpkZ(w)@tWwZPci#MVu!!QdY6!?}&AjGBtAPXKz z1p+|>!!)c=_;7@cJ7YyUdotT6xu`b_lg50g;!>svQ<Teu%M_k!Cwhs;Xauw&J0%14 zb&Ezmaoex|i2c|8?e<S{-FtU#Sif`EEgN<c53am(P43S<2mba0T#6lUz4_Mfe*NYf z42K(Xi}*O;(2Yv*W!9a#(a$)`7-#U!vGy1#i&g81l0m*fCFhnbk8O_0!ve!1=LgP@ zw8$6hvjVdsi{&NyB4cS_Nu(?Gw)dmpN8z_ik9d!i9**T>{sf;k(!Rm`Fk>`-zA@7{ zs{O4bXK1FA^G9h2UGztt8ghmz4yo8sEwxo`Q~4M}V$3R}GJI-jEW06COl>F{b-mg0 zb~s(7)Rtgr4y&=Jp47XO2>tJo^J2i<=o2tE&Ip*F{BFRU4QYXxWC8QCF^xfdM#Q`r zF#m42+)hW#^o^E#ih|Ye^AhURR+?NN{B3w**gXqxJ+%1BPp+MLSAX-#HCI3T)aq3` za*KqYZ=5`NQ~rU+bEj^<U|7#7?ui5Y-+cGYH{Pc>JTJG1I}A89P!tcb?$k({)C7l< zab&G7Hv5`G<3gKDx0VVGo`y(s=}6DW$P~|%$Q;j{NNZ_Z={w@P?oY+f^)G@(H7VEA zz6=?xk0qn^nPd_9K>uU#ul~<NpGHm-7v>$_a8#3=qBqI|;sVZkM5C-OY`82}tE<h$ zm$G0)DZ`$N1tVO&!3Y-%Mz~lo!o`#+3n%z#U%TwAP|?1;naPjU=J%tl6^xfz971Ao z2+407zjoBFr1Z=P#Qzdu?K#o(eZ-&@*epcDhZI5%XT(-(Ywn-+^W2xq-oD}2D<12K zKX>)2C%3P;^6}gvA`hQ{`(tTq?xrX2{AL9A{DA|%dhMO}UbFLqZUN-H3V1Qm>(=l( z4>ow5;2Zc6{1kpZznT{nQ&wcf;V~5l;$*Bbra-DvwOPh;dCY@7q}=>J`}e&T-&dB| zN8N}_7@c8P*e<@<$JC$T9<%>@F8&cCdtxP>y+=ET=9DmZ4jHd+aI%RL*_CwmrA@$g zTulOXam!<8FKWK{lC#e}clagVQa<(Qit~m&RXL`)b!E>xc0V-dk8wMo9|mv%>sr3t zTRu!VUm003y?kEzb;_N}%@t32o~?a_b0~puFfd?T?Rx<sLZ%U748m$~mOM+DrOwi3 z>9ZV*<;BWkb+NWsU+m~ebyT`&%34MBkcydVi#9hkw`z4_bwyjn57meDd#diQy?4M9 z>eKq;l}}XdO8q+JuPR!k<wYP-1S*O^mF*=KuAxAp2vih-5}GF9F3rr8E0em)hhr%p zuk|kp(-2;HsFsa0gqlMWLzjlOhYp5BSEwwsEc8i;FALojBB7rHc6>k?Eb47}>E#BU zI%D8N;P7Dsv-xMcy#59|8tyb3Fz!FA<nj_y67@;EJs`~j6rUD@5T9Beief(6Un>ja za7D=S1RDm?+v`}|HfWcWV?usPC7~F-GZdqDg_r{wVv+AqC%Jg1R8a%B?v7>-)!-WX z7J5fbF>AM`$g2Q+v4`GN6K1c9SJpJN4(b|2ng_KFB7<lIzXAnqyO$AN%<g3n?-ZaF zpn(0ooLGg631Jtjo-0<665m)cCOg>dszSK2{F9=48yZ*$(*vUylfYp$V6$>UVIVx6 zUeRX`eA*rpW1p>A!3M&qPeYR>>85vXXTY|itjd0+3Bg;NG7Y!kF*vc@5sM(DN{V2i z9~7lt*o`|A5mcUV=yJ6h!Bv%tDyI1eDl<x`y=BknWCcqOsY$O}w+{6YNi+_W?fptf zWva428Qd_W@q0;_u+aGiEE3wh({<am*Ihk0`NLNqoOsUAn!Bf5|MQt<yS{4Cb&LJ} zy2#Bh-#>lPtJfd=0G}0IvU1+Yvl7ANpt0*Fj9FV%mOk&=1;GnvUD%k2mUz^P`g5+E zHFL{F&rv?D$p4en2oItF`omtN(kY!O8vp3B&H<n;gb~&qD&~;iP|_|H)HkQOjB-?t z9qy!#^O8JD8PzH+m)fMw5|6-s+A6h6UD6>*WK&lPb68F=Dw1fDCL6f5{jvg3nAP$P zqYi4BQT<OtJOu;Iwk@T7WHAcjAv@-O$5~|`!Rl!;j!-GzO*1&DlsD_^jo0lI?qt9o zM57}FW}`_*1z1iaF~S!#U3Pix%{TAbz1x$nDt&Z|arV5&$ec}Bx;%Hsrk)>;uMIOl zA7t5K`t4*qaW4wf(I@cWNzCJ?DfGv!dbhVB?ZFkY$FF0LUjuP&LN}p$e=-=LZbg{+ zECJ@N1l%k<-<x_8U;;eQ>!$>~g+P5FDjZ;rXMp-C4%#320PYIlz=SZ13s4s&d^}8+ zhqs2?!}%~D){{!F>QFFJV#*=qu)-@v)uHsNjzUygWwB`be5NrJ=A$SqD6C8foe>(K z!QbyYBcMZAmOzuOI~Ws%dBf>&IcN?ho%jz939m<xLpJSDU`@?BJGHAYzFC=K;lRMR zQ(d{{>)yTO@rj1kp_x}qo_y!<j)yzWTRL&@DsoTHt~&;fnLOpLTS<n7O#we)8qWv( zsQ8OQ?m$40kt&N=RFR;_0wzKQn-y1={^)@5(E%V1s_|%LBQFdVFe*1QDwXgKGoygR z*&x$`fUMgETU^+xG^nAJ#v4!-6wLZm%99Pq4+ZRfXx&iNzX8RdaOu^kN=d01G*~?k zjZvrL>7+%Tq0Gng$s&1?ay7aNUq#l+S1VVk8}J6Qp1V!DRo<xl1U;bKtv-hyQ-6+L zkanoAqhG5ZqIcE5qra-B&<V8`nxh7hU#&tZwNagjEL9OKx4%IEVreLT`wQAbn~tcV zY`IwSFJe-Z_60r0j9S_&>?jcgU84y?AElvhU_FpNkVbVhrIz-O)u>9coK#e=qNs=? zBxz^J3aScb3rlwuB~{@N7V31Ym&=xADQyZ-I&oyTCA0}d0LW5e#KPs;7r&?U)Ey4> zWP7sVVE2)1;lo3{5k@m}=HyM9zh1~s>N6E1+fqzF_0(hhv)tvsIFc+2rvJV-cLks7 zxp~2|scXosj6CR6ID~{3fIi%ONpTLcn_8$$0@-O{V!_vVhkee9XW8WxGGn@ZqN~Gc z4-kNQrI2Z{ebwYJ(!nOeq6_-Np|elG=q4t-%A0B-Qe_J}lYY?bfbre|;~h2~xsbKY z+E+{@i~tvUaSdOslJm`r%{xubj4=$-)S<$l!{Or4{BcVui#Hh25<7-%y--%sz>B)# z5hF^-E%1mJHAQpEZUcEZuN0LdS_$~FNvTFoI~&koX_!3RIg%SATGDuVoHoKW#ysDB zvFk$j5^1iyz`a(yMp`ZJ75BOJxc@DlQmQnw3RO8OomH+%cb#`AYII*Eua_U-?$@8f z+sHQUNqslkBkptlhJR1|Ksm-AbA9GMA%3GoH8#UXXQd(9iF-_~v(jB4+=%LQ@or?w zlAM%WNhfvZoD%22deYIEf6r>9lF9+}SHpZZ2ljeIRWnm++MLQ?sLnDkH?K1{nyRVt zK#dd~He$Y;AYPX~QD;w+Fpkj0wgBKCvAi5h9G3(|Rb}v1RKqku#*f=2AUD`8W3BnB z%NhHXDakR(bi31n<P`+T33yIAoL+~+DTCXSR%I{TPP54iyn+ap+`Q~Eb*F<>&keGi zer<#@kK0A(J*wW5h6A@cX!;T7=)_N1YHXs4m#H_ZMC~NgEM=mJmzg)3bV9*2OEU!A z%3{VGu-8+&@k!6g`OGW|jX#mi2En$0KV`FQ@PE%FFYuko%KsvfTyh#sbfqv|$F-MD znbF~h=`r$B{xI0JhmkXXr~?gf#oR!n>}z)H8&2ce8>X;X|A%%+^do6-AU<VWdp%2A zlJkdmNHP1UyD&qWPNh1u2h2fuHpr_(JEZ~ii969yvd?}~@56iV3$Xjl{NY_{jE~Xp z>9rKb8|c&C+2hWjS~r`{xWhxk{w?1|y4k!gHhY_j6edwT0hZ3rRdRS-?!|piH}myR z@7*%^tUcRv9WOpz{XU48A0IK_AXoG}@a6$B|I~-%y4|M_GFjsSUGzU7YYhCSf-ds8 zuqN_E5sByknshO{+*OxmWXZl96nVkr#;)=Z8`H2Rg)%c;_wo114>})mbqQT!m-MDf zaasOMnDZz;N7xvQhiU8Zotj+dzKCy;TC^F?`|$(n1KJCuQ~!<jhVw1sL+)MWcaA?A zM^(4`+l+I!=?XfGV#YazTrA^URf+ihjPv=T$k{pPqQbJyT`q$rox5BPqc`K+P(>GU zsm815RfQPI-kkGS9oUiVn|3Z5EbUyK=*I4`jvMrH)iqmGZm?9)4lh{ZB(aTsrfh`e zjBz)R@`=!wW6kUK7rvy(6b9%E<EU|>`+FJY{nOb3z0yyBvJ`WdYXi$F-*1<&kyxgA zQ-LfxoWYU|Oa9hMGJ1I+!@)}Tcg8aYn{VUG;PQAzv7(vY>~0o9Wg|v3#XCUNPa_>G z;wrHV-<*5!&yV+y)+TqopSv61{?UiSa-WkbocnsrfOG3l<@BE4;`3W_S!h>0H<|kf z&`22nr$8ems@KJ7Tr}izi<;=M+^(2r>9GQ>gzD1akHWzN;gCT$%vooOBeKgC#V*>2 zrO`~4ce-o4%2^Hz5E82z&|uI)(iOMg5p-8-m3pOPh(5$I*!iHTRk^D?=lNUQEuI$N zBKIQCBHvnZjbp8OjrSVgEsl-mCif=KZQcjeZQ4u5i{?J>7wTu;e>-}Nuf6$bX_2D* z9xWQ-T_asLyEs><xB7N4!QDF@r_tro4Um>#J%qd-PtvV=VaKI|I816PIK!%krjctR zeE^CYQBoIuIZC3Pq<Oas`oi*dlBt%~?6%zGQuoVl;_k%f?s4IAG%BLfQ>?eFm_9(C zsB@F_Je$vO?yfo)^bKk5h{Ub~(FmQ_L%&xCL`1(F5j0L53DNJbc87yTH-k`+x(`K) zlKT#DQj#jnR2bLp1bGt#dGjLD^T&{uKZg5C8*l#OJ&hT)yfNbhF5B(Pn1#uoE!1j8 zV8;OYcq;9jmqs>yreMa<PkScHZ}bkYZ8|StrUWgw^p%g&<z?x=cH}NUr((c$(;IRN zo;Io~B1>E)d{xhbSFXEm4Ow#PH`~u`nL=ex708}<fUcZ)yXA0qlGkP8#)I4eI@0}H zO923%UCPG2U$M>ypqf-Ebw&nf)UkLp87+@hCK|KwR5DebsZ261$8*RWd9iXWUM*j% z+>URNZ&SX;CrBhDr*O5LRx<KW<@d2fx#b1J*FZqPDs;Yh0^DgbOi_ufs!2>h0TN6< zUr1&PX=tK4+kxz_#4>%JcB-TkyE;G}3gV08VvwQ|4PUXqeYs<+6C<bPY<0Fdk2?jH z|6f5*Iaed~28_2OJP|EJdBh<$VH|~A#_Bi~8#EqR7|HITz>ze|FEe^*D4@wW3XbAY zmTXxtHx1|h!bf3NWKm(p$?j^Ll4)qh?me0ICG5TO0_{cGmG&o`R<z(O!@UgR!$mux zusQZZB=dhYb~V6JRCoOU-d=9^62e_#xCC;!z01b|xj^vvxO|bjix(3)H334*VI+Z& zjv_7S1sGe4ix!nyiGwrLVoQ{<4uf^vT%33b&IBhGYO2+$r89ov*Elk*Mwux}XYl%a zyBEYMo#`&W|L_0a|Mx!b?cUzL<Fe1DIEz<`+p}b0tugt^cGmI&V4m~)3(L~w%vvV? z=#a@PmU`zb;7=vGzH5^<UwmoXU0c{c4oHgiz^;oMer9`I9OtdlS++9k70RK3u<tGT z6PXXolQKW2ut$|A6`oM;RrorkAGhWTQ*tCyo{`0arQJ-rfE$h+YGA~r29j%(`9#Ja zqRsl<UFLTx7%%aaHfGWnFMp&oVJb~`-hqwXafkR3M}cfUUi$8rRR7IYE0#-^Z~yxH z`|DRPY;pFiHJ4SAX1z9ecKg#k_J)5c#fq_N#mm9b12Zgs?IQ2Y5?>eEE-k|ZmK(pj zk*;yjzn}YVsiW>vWM&Il$y{%QAEUbs)Y36<1UxSCEeSdUGyt7Qk6Hde+Xb>Z(-*Tk z>0s6=`Vnv#{t?S3bktf)KZb6?LKH-qgVtm87~;Q0ejhZ4!Cgo{3BMcqCBl~<rnyMl zEQ$0-D6<N<ALZM@*8(xrcNMq^cz~Uv2id9g5yZiCAL@TV<O9(Rx1#@h5f7nUInwvR zpNDp=U_0Oi7ND$H^J)e5oixR)tdg}djeRLuq;nRJWs~LUth=qP*3aZ$%KubaZ4TSL zw)5EyId9H7n%k6na`p-P`Mm4%p2}ZwRcFC_1?Qc;h3!R^b1ic}owvXEepi?4&5{l7 zW9kA=*jrv|E&aUg`SKdyEZ^bzKUuK5@_GNss&k7zTU=utjSv&h)$F4z;^d%e>cp7m zW{Y<qH;!O_=9pP&Hio2mUDr(!f8@^FHg3J4!%$#L&lk_Ay22JRp4lmu-m;_aJk(V{ zbI3_0q>`5~bs-<k$8~NYRZ<l#LhDQDY6{R&!gK~B6Qmldqk3whC^gfy)IzOv9j&4m zT~9aAjnqc%w3^n?TI!&6==n|5+`VIzCCk?D-Q8uIUAAp^*|u%Fx@_CFU0t?q+qR8c zz4zJY?sM-s&-2{(<2%NRj9eKrW=5`zSYxbQ^Z%OyHmXw0a?ESI)R0b#`1!K95&$MV zcu4i=#sELPerSVM#c;;{f@%aw`qEB^r&CPhqHV$g^m8yPno<ow&^9;uw7pL_Vil43 zDPWFBsFL2mq2Tn`!0>*XDE2@pIZX?0642-XSdu?2CU*S`i$$Qpt3i!kclI>GG{*E- zDiwNG9a7J{RTR{eD2iT!2$vH>*%@MjgwZhw{u-AQDZN&OcfybEuuEz|!vmlIywb;k zTMU}7!moO#*jJH<SP^PAww(c9xZ2;GF_VDHUinUqsysWnao9aGOT1wx0U*C3bTZ)F zJm_G)a@s7TDn8<R-8^VxzG}3VIq96eVme=3I!)F!#B?opCT%^I(nqL9+kl7COSaQT zZCephQ^?D`#==Evx#QLNbx5#z(ZkJub@p=1LRo}X-bxQ}N@3W9+XUkQDI>8%H;~L7 zMZ6@lPD%KplZ7e$_+5k{cDs_;UBa|?=%ZUo1{e5_@txRLYmXS}o&F=YtI}(2lhlLm zNRc$gmnAz4;Pi_Ueny`bI_ZScOf(0V_m+%K>E|T?-+uA+ikG^a-Zf;zj>jk@Rku;d z$omD%Eg^&mZY9&xZe|_f;&(My?gv7@i*|igb+Ks`h1lQl@r6Ujt>Y2tbIZL3AceXy zpML3gWwHM4bN3m`x%+XMZL=%yaSLVmptP%gC7D?FpoK>CAFsh~r{HudW&RB;DMdxy zUTFU}2yz9BG!tq%3#ge0n(|DOmgM?C6DaYOP6qB7^-rZc<!J4WxOx}^vd25f(6Tk$ zOhM8;#SFao4Q;uP$)5O4l{QOX+t5h@`mFh+Fn?=hC;4{($J?n;xV!Dz`)v#EuP<y- zOtLyb9Qg>8Q;!Ulul)D;lsp|#f{h-tk#NqO{MKp@8Og;NA%$^1Z$i=Q_)?Ms0- z0}4zHSC81#)sLR#KRz#jRRD1S#a#B8FNGG#R9r^(Wo$D?Owrr1i+$53n&(i7Eb7o5 zxeDmLubpGdt0v4aF*Cs<W*!+aiMma>8@nDa5>!igOzoL4ArKUv1U<e(5QL^7oC&v> zO89n*l|vR?Dg&lN17w|yaWvB-L;0YZf>iI2EK&9jIZ2#HrLqG{<}#-8ZJW>XoT2P4 zjhQVGrJfIY6cokpP^=SSB~~r>#g+1fh`X(aVziTu15pu8rn5$ckVja6(1deqK7w<Z zEvmCC8iH9HkmkZTNcE*=ZYOh!vnUQ0vOQq4tR(^J-)Dj4>eS=V6cV!_JZ8%IjZm7y z1V>f{{ZD*I9nBg=xk#LbEUv*>ag>HMRpP~Vb(?sGlrV<P4H9C?NkSB=Nz`%kd0u%| zskv^z%b<5c1w*Tldz`Fw#v=K*#kU?&rz>N0IqK_!`)(235m$DnCMqe#s!!)m?#gP2 z3JwNH=BM%&9`#6B+XP{r^_o{b>=tPBUUmlOo_t9KF*QgNkX2km`%0wrdX?AL14DtR z7hkM7Bn8IPvvZ_9^fb@vrpS}l!`e<yW^=F~37@+bm#N;AMbu$Hi8bIOk0L`1vilcV z?-lIq!`vxJ))|QyYd%Tk{Oz--;RAZ4^CpMZN>7aTEPCxs_93Ypyh2!g7RS)Z&ERo? z73#3|Oy3nyo554ZCRLIqp@@f5b9?so0tzPi!1!WGZq+N2OS}qlx+hr}Q|K7vBqfY* z1^KpZe{4IV!81jw2lV#sqs5G05YC|uwTs2L3yQ(NXztK4Hert&+j9H$e|%WJNhGm^ z%?`XY_r<8T8H~O0RK8D+tA*NI#17D}qD++4?VYU6I+bpQzx`}2<1TrK7vSu<TT=pc z3bl4uu}^AFWp55>>3N&dcW-QOGS_3^nx1Z_c=WBm*Dy6xc|jsGLy4zEu&4v9goO^V z4mBnxSq2ZOSM8=0RMSt}In(F?XY8Y;r*rl@(BE-8qrC4=O^Y}S?;ldTpj_7Ai1;3p z8@CgA@@qKdR-MNWSpZppAM}D0E`@SlV#F`RaltioU};Kx(EGHthUgmRRQNP2XMZJU zzXOm-AL<kqz*6;-{$>%xc(N5f)%!(8ys{UA+mH3ViUPkusg<D3l8BnP5Y0}L-j0^; zguj4513sh`INIo!f!~MwtCRY>IzTu!woXzelr$dI(YZ3nKz<hfFP=FT%f#LRKD-E& zByEp`q~e}K$9--X;nsCD$kRdRWE<4MDiv@6c={e26NFs;kFNwPFEjs+i2EBo`J0GK zM@z#%{})`7?i2I+=iz^)YyMA&xSvGYe@Ddq18Dg-A}$jX<DW!aIvP6GPb@AC)4w8d zr3Dm}m45t*#Fdc8`8x|2{1brtUz2d@K2f3niG=$}^Zd==lm-7Am6Mg`AD8>zlW>0% zUjI(M{fmVAyZ&81iLC#m$Nr|m{wG4~KWuUSqTv2j|F-`m*#1GW{f!s?UH(b5{nP&+ z=lP2!`}-WfNwk01ezyO%W&UkPNB4h`a9J4`{%rpbMA^T_`@8<5{EdYBd%S-g|4qOB zUH%~5e%AkE67E0x{C}X~{{Mzz`+I);r#bQeaQy$t$M1XfPjD_H{l8t>{|z|z|6Gp$ zO3eMFOaK1Dkp17p+&@;-=llOe%%%POn)oLl_x~d1{>$2={o8o|B;?}IvC{o{y1$6I zw2X`_pDX^qBj(1tf;r0z)L#~tv}~-;tSx0F+9Tuee8UB#NrF&O#v|gv0mGftiSF|! z2pYx{G=MVQ4#**4`F0*jlm%$re@MgSun&c1BQ1p3|1~!P4b@X!P~tPA1N)EkJ@=1| zbh(BEIfX*W^zsrbuP<IeDPG>ym(R^VZ8HL@_MJejC|ZyDI|BlWMZWBQ{jzn)cqdko z`JUbL6(?Tr=`rR+Q7g8km;jE)2_Ub%)!t?;=X{J>0(kJa_dPdJ*ME=vo{*>BXKyxt zVZ&MMCez`2LZI%`)`=oTnd|<|+J(*&OG0IP`+368t;lB;FE|z~K+B>Qk1OH<dK6t3 z!~~FqN@W*(S8-?P5gm#R>5axlkE#3IBuwv{Bji!o&66$im`wvNVEKo+*T?L}`S(ce z0z4nUk0*S5k}RJ9sWlAF`)a(!(KJkq{TZ+d&lzs$E-0+aZF($t9Jm-9G~iC2OK)rw zKu}(AKb*N<NrXt=9IzK08DSyw!?WYSLx(nvR?6n82dmoB2dnMU&76ySO?dXsX;*NR z1)F5Ho0%7EE_)74tNM<YOk~U+-wE26C(m7Rv@k%fZi`Dw`)K|~pm{vcoh=9weyR(< zn)SC!!t+k2OPh<pD2UA4cji^d16MWJQdIOTkQbsJDwa@(4!_c^i6AtPLLCxyu!nBF zDv%5x$wyc&!E@p+q(vw4;eFyu7`%Ji<SRU<7#$jMe+6sQh%Qo=KtJXks;8<|pf&60 z#>KdGrrkUFB#4_WqQ$SS#zd*!>~&Dkoj;rr1}^a82=2$vG?ra!9ynKvVRySwlfH#! zV`24PXgY&w{uo7I>3I>BKzU6kh!M$4SQTDY34$_`Xp@lko#h0C>4D3;ioArnh5e4k zOE;u0D}_aLW)F3KkAoU;q7F<hbC9A<UvW+7j5+WU088h^!MiL25PIY!uIF)2v(gm- z&YwiR8vY7zV+2usVW0GMp|h@WyUv>3<HDPGs{`ko(3aAr(_;o`UYxqr&m*g()V^Od zEt4<8R(TKZt3c$#Ccv^$U@A1|9eSOejEY^n7*?bd19j_=C)%75O>iAB-MK^`nLX94 zR$2i&!bfV2HBMUqyGxli*zNsKB;zFT%pyDy`cWKoKnGgF2~n<t?z=$OR0=kXO21B) zfbYRw)8D)uAg;QVakU=h3AUY0f@a~*LNpICmt2b;j4uS_*bP?Cyo(UfgpaIAx-Jlt zd{@(9#G<U2x169vSAp|v=*_(q8)1swz?}{#S->uEEDE>q<C>|Xo2{}?Bz$<&gw;;j zznEZ%*hY=J*)ldIbMR!~2@P5euIyUP{?K6XjmXM1MoSk#ne?{@Ys-`|G$L*6VKs+@ zfH>lfuM++YQ}9c)0m5PHCGSJq#=2UVrZE9<15VtW$t(pm?fcf)Gdixs+YLt-Q4t7M z#46P(d5yEgzu%>Tjsuem^o*+MYh*)svv`XQeJiXFfhK4eC27k%lsi-lJde<A-aCA* zUkU!uNnq$7uFqN*om-pKHgZ4Ba!rKUyJ{UahSw0YA(zOa`O5ePWlI9(v@6vui`v~S zJxo>gvWU9sFTl%nJ_<Sulpf&^e@!Sl|4Beg=S6u6d&SHj6QhqxEgZtC0jVgkAt^Ny z0V>Z~+Le554?sWj`Up59A$98f4r!5PBhVGU(;qSPJ)Yi{-*Ns@hmSrG5-Tn`BLQ6$ z4EVL(8z3eY9$~u;%PDQ-sA{DQSwjv<1s+Hc43ZK$-VY%9)|y`!Mk0eFERoMaS>_=& z#G1fMfslJ37_gg2b@LzzSXc8#DoGG>rZG$e!`p2Tn~6ah1DyZ8kOa|~*sefH=lwiK z+5u<UV!tri1$T`?xf&KyH<HJudpd{5&kvw4Iwm!N2_-Ppx)q>V5tU564C*^L2m|IX zwtc+MtWp4ZyP~>=#v)gZ0Emd>vPjA3WMmQIU`RIbcogN0mNI#6KNW0bY-yA^&5ivK zsURtXc#L&>GfZ-LvRNhg$-)Nr8N3bLSgR<lwT*>tG|r%vVaIDJz$r^o)sS=?=G>r- z4E1G;Na#$)n*4P5R)*R`N`h1uC9`slLB-E3*l6;Li;7Yv_Q}eYU*&G~=hf4yC^|#O z#!>;I*JesRmeF@j92lY5%#Dx2N^oQ`$=b;~6KQjY)xScE!q-FzYYrr$sa<YM(^i5} z!|=~e_D)Xk@9C_T<?VxxDr1_9rI=zIaYD$20*oOkWNYW)u*z#FF>HE$PcbUvV(9}F ze{vbA$_Bw0M=gX|JatD4y(>)aOI#UE3m&%<cHk}rD<ok65+HzAM&%JL;e|_K3Lp+0 z;JN9h4GPFK^})~xc!gkQ@4pI%ox#ZFxl!`+fjxfTK45fkXOvCZA>UM?qDmBiUU|8R z9vvNw!c~{u<y}?HI~Dzv#%_Kn5j`#4h7U_BknqC=$*}}0(Vqea_=(EOj9L3DGM>at zsqWqv3SmnCeDYsxygjY?xgCA<p$!ao4eIAu(Ag~5bbN$T{30E%Mo!K<A&C*8uooZG ztt|5q$8s1fI6)?3dkRMmV=FN;J!4J^JM(c_WK7J7e%}bkw)UIv*9(BMz3^w<vTAFa z+F4dR^6u9oEQa8ADvt93lhKEQZSdihl>1^#94Lfh(e_1+0z?ZZiqU5pOyF>#J_;zn zk*HMz@MdeZS<g}4=hpgwC>KwI#NXHkuZ&Z2>2Jw`qZ&aQG2vpYVkGcjQE(ft<nSmw zAeo@~u>Gz`S-o^{9|UHggLv78ECEEg1Z%3Jqqy4#QH^63B?Vi1QPWF?zfHg=<&3s7 z;og`I<iDD5J}I%ck7w>54h8zSiMu+};js^(*%tt%cV+AA-Kx-$JZFGz@Ksc;*ds}! zrAxLWN$JYF<VXML%pZiV`HH>=HGFZa2Ebb{O*IR_>n$BX+hx)1vE_pUCPdPo8BzJ& z)RoFX>$rai+!`HTw6G~x-GvqXhuR6D7I(YlPQhJdc^a3awEc7D5VO{8%#CMAJ6!t9 zQ{oi0r@?r7pk0U2cw1*u2*OHd{s!5r<Pr|^G7})q_a0v&d%_l4jKj{XQwn3~%{kyW zzDF%|YTYvLaqn)pn2{kf^P;nNFn62W8}+7Kl98)s4okAg@)1;K_dUIa4VM9nsUvO- z{D?{}p-lSy9plaJzM{uqkquMv?n4PQNZQu0$)+rsZdFf$AH7ZezMGHD^{%JNB%OZ0 z*aw=>@n?(^S^EXUqOg0FRs310k%J8I`Y2b8So|ZQM2qr@@V-J2lGVn7;}?VR(dC5V zs3R8ffr0|tb||oz)vWrKel*wPZm=?yB0Di{s4M<B%RgU+Tk49Ei?ORDaACdelCtz9 zdVTpJ<UiRa(1vEUK4e02c&2+370`8AFGB%^v;8Fpk0c?>p@|PaYtF1z-E4Z?6ov7} z%<8w-Zvs?dwE755LDlmE-Pe$2-PEU7lvna~0|<oe>EeuZt0S>}SraC8%CcQeb*l@< zD`%mPY;Ni8zqpnTVmFv%N90UQ*~96#=`pVe-aX;rF&aC|-&~11;chSaSYFVrFjM0v z*MgQZ9Foiq9h>_N_q4wOaVGB1jhAu;PAmu5wKmshSG$^z2bpi)LD{%)#n9W@pBLRq zhol{hh3*ls?ueENHc$_7Aqu05icquU1`vy@(o|X>KK35Vu*waDr{=%XF4}BlD3Oz+ zY&hy%>|jTQcjYs$R$nhOX585C4yKeEQkQ*y-WSW<K=9C4w{^Wqo$_?k&)9#YJtyQq z7;@%%359F_RsnnDY?7y*rEkV4>+dG|F8<7~xDltBxRJQ1O{!4<PRKu(gi}-_I!G6F z)k-Q>f<u8o*gzs$BEE#dH^qR-N)4KWH#A!{+Q3gC&LvuoBNMu%2(X(52TTV}7UhXa za)nnbw^kLXKqp!*ojMDBmqS*f{GiLjLLAbJzcG~_w<x#b<tYM|<Zw&irePKW&19o8 zEOk8O6uw3QJ+bO`ay8b2@W|)5dtl{=!5tyGCanfh=L-f=g9Fxo#P3Y{d{$$ut-~O? zj!R&05v#9yo8^2PXo!BuI$}`X+90Ov!^ZjL9<7+Y7E_Zg8Ck4vSwEE~U|+Ne{X73m zzJ0h0=X!6i{yvK+lW*)O9m4||1UZfY>RLmrMWC8QxM2~BML<HWsPw^0kX>bNx^bXR z<&S+kh7>)>FfFZuICA@T+tlftQ`vj+N)qDFjFUk5N>g$8`s9$i0VOSgD?X}J;&lh6 zw48xvrR95}_*XHoqC>(?Y}bMZVPOybBRyclkuzHY#kwc&DGLw-?>{$YCRRVr)YOgx zG{QDiTds~codPa$h8VRz_WHQBlU#4u&&D7$DVv_^rE;|dKAxX^3&OI_I@>ryh>Hjo zb;Vy-pVDI$YgEPy+sw=81d?*9estp)ly%0G*?~&8SVUOLlxhEJTrd*3n+6JA%BLt6 zSf&QsQ!bEYA7hF!7yFiAF4pAl-KPU~%Uwh1vj^G)6Qd_s8Ax12QpA1;z|xBhQD@T? z&DA}Y8Sdv}GoBW$lrw}SNjJ?`I#gYlX_ijw+Ee|KtJm&kGFQ``-LsFfw@QpfX%U*i z{#0tbkdfSN#=Q$&e(roalvrt}`lMOe$N2g8H5<eCTz-K$d!)u)Ix^TJm>S4#4Bk1` zEPb`9kRlvTacqX$s`b2I=I)7-caCh`sLGav$MBYNBTtNA4VUhb=9W0tDhZ1<rnyjc ziH%=TLthp(Cc_gt)eX35;QHq6oiens%M4C_TISff-cPP=Pg&&-v<EY}`Pxefp}E5u zFno#pjvq=H+Zr2#sv41}>z}V?8?O#=^RrFlXt*chVVf)TMJNr!i?yf&fMXmYvni+) zGoSHL#Xt{0QKV?p8*oAMz?WfDi}zP@*Rcxui8h{DVJ;*}o_^t^-<^SK(|uI`<h-dm zfo~&@x|9^9(^R6k+<;QALF@swl``cJ6kC&hf$hX+Vl~-A1j$JJ3^5~3D))qAgQ>`K zvrx^|J3w5J4MK`DUE7l!A@&4CZVnS6u1}S7VVmQOhwMvVV&3r*%=(a7<KD#ve}_qX zll7|140n02KJ7WKZ)upiy$EC+osB;#+!bQ8->ns(lhdj$c{R9YXWu*Jj#{sVHXlB` z=E{LZ6N>s%zVmGheA<qo$5x+%w>d+GHBHC%!pM@FLHqi!BWcU6q@y5fuA0{pQi~=p zlwv{Lkwlq*$&p!&Vf8n~emwC@vfyX-+uF;BpQ^EdC15!vCWNBVswEOr7kOjdMEN2X zQz>^&N2Gi1YiL<2yiAm^kg7!Tlju4HY)Wdvl}N}X3nt`8syg}2lI*-N)qKZ4&RE{c zqq0yTdwBKgEkQ#{>S?;hlCo$;)$GU0Pf{whF^F>9)Z~M+y%5OEvK}q;{mc96M;yBY zkTs`BW{^}G2Gjf9<hKyu$zy`F@QQkZ1N6AebSkS5#7T5}CqQ@NhrWst8=V;-6@hFO zzSgC@?iFA#dIO-_&3M@@3HclI*W+)bS2)WdU0&IRLhJR1ZPCq5f`S&|4G|ndvZ@5J zS`jV0A932QWm3DXhdXk3yFY6LTMs;(uv)V-Avq8t=Mr|c2}*zw%Jq?&2FHgN?DSTX zi52?xO)2g>Kr7CaQ_xb;uBSGxk#ofHz$=2C>c}T5G&h@lxhiAcwPmGX9_pq_MolUm zc(rWeJCB<fMln#ZETrnO)WSw!Q4BCU$X->noHPAC<+bzNw7r0Q88jdtxxCC0Xl5_@ zB@J2y9GGr;XH}gt&F~A&-PFbMqzEGShjcCR^BdoU=YoyY$Vecz^CpRVps@aa`=P`X zNKQF1bceG~+3C?!254Jidp}x}1YaO97HBsWepNc$T*hWn&cu`xiz9)@c9g>0k2nGF z>XPO$@Asm9)DkrrhB6T{nKrwcRlwEWvjCiBuJ0uEdSA(_Rr!VrR1OR|hN}1`XRNNi z6=Ayt%8fds<KL@VE~1YX&5i09tF$aJamcmK*`#T6Fkk>=s&?~bs;&-1b2M(@O1h~> zK@}zwz~yF;gZ1;E9VP2i9&x_kI=EuDKc|$u<QU=IgjSTGx)@)i(dyO6WxU2a*Pbq@ zWq7i?jP@VoV#oTRkoj`5FU<jz7Q~I(9u`=jZoC9*_q1X2S1<uXLsQMvvoQf-O*sNC z1G{f3g$US|XNrH@h5=^%PE^vYrr?0I#^b}KNUczK%xGHPF0od?UDA=&p5<AjDc#~Q zK<5zpT=X3F-u_<v+(tDqm3SgQ=j-9^91)YfnGXO%T=5JH6v+OO-EWNpts7;FcS#V2 zYeK#x!6r=1Dn^5o5}{D8Pc4Y=iK_(%<m>ZQkV-8+I{^xJzo?k$Vtp>79P9W>*i~+5 zeaRZZeA|?%5y!#9l%<Z;!Kp+!O_WAbf@vD$6zDtHVvPlY#a?|?`TD%RS)Ie;_%eg0 zN`y++{hsy!AyxPqRNaYnAiQY{rs3ppx@GH8T6_1t9Z=nxR(elJ|1xm9C(|l7{pZu4 z<wlM~w3?g~%!q=uF!>jWmhXJ9*n4nh+HMb7k(SEb_Tvzd_2bR$JLgXCL?74#*FX>n zMn)ahk3&nIZ&a$hTo|xts;p-ELshF~-=V3R^7Xoi3(J_tQfaw7FE`<uN+Zmj37`2+ z(+2eBpGX$qTaC)>t88y&)5VQ-2}1W`%oE5R>15y#dU2!3_V^+QibI4EgEb<gzlLwp zQLO+b6N7)_Zuf#$48jTK5ea`Rdh{N&pTbQDG3&F~))Fv@L+xMYRHhC&n>EUluL4<Q zH<nKJMXLV31nd|g@|7vXzuE()UU(H13p9#?-cZ2Z%6NA^%=H!UTxoLq&8{qjVz0S# z3|+8@3=IC7*PbX^gh_;$fsK#j`K{9b>M$S*#JM%0Mx6UrZASsbPIwh2UThVnPIMK< zL9_t-+fc>#(Wpig(-_s~@4#YGN3$RR{w!13T+oqV525A9LYLXrvZwNtWX7lsp(Pc; z3B}T6h@k?<bUM|GkSl)S+rW3YQ%_uJvfRE>@;Vtb8m&Ky5d0B%10UJX8U>On<JA<6 zbwX)Iu50hh{RxZgY9A;vRsEcWwXfjilJJUuwn4RpCijGB<<tn{++_gGyUsTXz!b$$ zP|L|T_(UfWgp}%GiA_=_%+Ye+&rrtM3o;O-fag$<fcKjmK3d$!?5kP$gmH<-0fs`y z!SRtLb{n?<ipbjc2l3G&rj&%q9>?#5WYx-b=s2AONXp&G{A}}3N1>^>(OMT+<(G%Q zS)D&9e|A$e7yF1Ecz(j)@4)G@p)!2Z%}ip3h{3ou93lt=bl{cE^?V=5o*{6)9?U;H zGQQp3m@H;p^2er0;2&UmdU341dTPt8Iq@f-nt~(-2!m%2(xlZCGY6~BtMyZry%1et zIgBeR6wS>uLKt_Q81USCBh$PWmQ$ajaug-H*H)<_bS5Ghh0z4P2TtZz{+?(24FnZP zP}T)LUj!kRiI6j+mRm$z^jf7%^(pEX9~HC?AzzRkD4Oii?9dqy&*^3$g?0&M(6$hH z$hT!lB2<?fcNDiauKgQ0t$du7UqhF9DVbR21bYkf15dbFL(`^9;l#a?BPyY~CWS77 zX)n+idw@&GgM;H)XbyLW!4Xj%e8}9o3!2B4o9BD*&B?$VV2tycrsgzFE)`kd`9i*V zVDLNn-jf*?ntl&*2@*u<SzUjW2F^g2oo;Fx{+`Y}T&u_*b9J(1iINU2^1}E0&iTMe z7yZ-yz)i`#{kUFByw#xR+^4tuN=Xv0J-coJ*jMga&~N5&peM7@wSP@0_iQY+vbj2M zx5;(ioa~_xsN|YgFNati3_w;^Ffvnx#mQmRF@Dpdyyq<>@PjTad>*9Sz9rne#=_y$ zy2{{uE@IA-Xo=v0ZXonFifS1FY_&a=Upns$kh5E;Y#Lw9s*?GskK*iblx|M6B|`w5 zhO5YzxP}4<*|d11K0&Q#{-;@N-5h$gqbjwk0?)$OXU6g<VQb}LOQ2@L@kzkg>^e>; zBDI(Ccz0%`MBM?y8*|daRH8M=`5hA)|9!T}Ygd&wEqLZp)#h4TUjGwn!YN<R*Ege% z)hn_$-Ceozi8os#2R_BxV8nEMNxDlE4q)>BJMJ*Yk*@$c!L&BfZn@%4#`<;j3d<$c z;EFmLy$K>CwF2{VF=^_>BaR@p5;yW8Kfe(=szJGo$^_sN`ZF1=f%}uVfpN71a`@Ok zY_{Kwkl%0>w|o?#LhfCk6-dTzIvYXTd^k_wRR?a2S+pIuKWw<z4_h>8aIy=|hP9Qv zKZjgDr`zpE!B%IF+>57+Vsg(mtaF-7toKN9j4#llFI0m*OhIsc>nCvaVpyr4IfGXd zRGKNbkFAwKuu8C;o?paD@FSdNT(<pg7GiZh<(xNf^myzp(Jq-Oe-5E{Uv`lrWuFyO z8%uz{S=%zKPDvH;eRqJ<)+L52N^^C6h24w4(NLz6Hh&f29^;-SG0>R|$Xx&)Er~yd zjIw6J|A0EnsVC6uKAi{C1&_QPC!>GCfPExg$EfYs(9&@j3pE>(HM-<6f3owGLvpM~ zY%Hw|52x)u4*hpCR&5OH2(>nf+2U8Mj4U`a8odQz_=%^Noi<-NXj|X`o3@yI=VfP) zN3#`8jr#NXJt=Ip_JG?~-pb5+T>>?E4g-B?T>2VE?w>D?`nC10b~igy8$k2Sv3w_% z=`>eFRMb;Pr?|Uz;E;hv;Rfv6^)MU;Mr!72+gSD01~#P@4ZXA`Ji1xkEu-~=uDMFn z)8%~RYqq*Raqayj+4E5=dP>C{)<J6(;AF^%k0cbc5b{O0OCSSgV2r^js<Fxm@|pax z%V~M(9PX&~{>O?e!8bKaO@33+E|X`pePxC7^6+7i=;F~kg&XbGT$;O(gDo*(#BXJ^ z2Gq<0NBc-UB9;`LkwcrjkW&3ptz_g$Y{wBbt|(0HEqc%WG8YP`p0#^VDH~ec7W)!V z^hWAhF|?NaIi<Q!xxb7yn$j<&r|Da+yReDUKi+Ceu~daGTpbT~q)u1r=nai5ziL0^ zWVq<WN8be8X7JPJaTNQ}fuQS3EPSRe^vvz{v7yA;`&CP4(LSGZwjgP2R11-kQzw@S zck$IJoOy-sHNzYRN?X@?nA7I~G}!m?U5?HcaIdSi+u<^+;FBgvj|}#c)0ydeyv{Q8 zX!MiI_e#y5i4!j(x<xrZbrPLx58Paqph64l?^eZxUVrxXChTx(1qo$;tOxsz-Lyj5 zL0``7;poK?h}LA4OXHInz{mgS)iZ+{V!B97^;7(@i}L1X1eCE==hF$M_zZU5q8prk z-tqnxbjrf9L4Q-wQYz*g3_69~o=KMH`9UnncaeQ)9|pn-Wm7djIkPuKSGQ<Cm$#n< zOT4r!QfK3Ds`Z%?ut>n@8xn4WbajB&$QF3xu!cm1;(|cpD8y1lRp~mCjYA`whEcVw z-J1$GZ@t&b)J>06u-$&r1Mid82h4PQ5880>ZLdBtJ`51Cm3_rcf5oW-V;`fh57^UH z0_tA*hUdrox#)%fBm0r&J<{+GfZ;Ajk4k}>Fcf};m_bwOX13t4?-WL4zH`;rNW+dM znYYci!8rwDp?P!Nq9(ZnCD8jyZ>mY<(n`kD%9=U2xN>7~71A5gT+vh_H8}w)oMzpJ zuFNfi`r0gIWu_eK+bB?|9Aq^FbEP3_eOHK(9VY@%JysGkMO5JMggE3O(93A#Q%S@2 zTW*!Yy%xhe0a`?KPKsc#SwF*F9Qdfe9d)#CIhokJQc>3DzLGW4l4ifi@-@(Vm;)qr zIe5DS(lCVv>VC^mVj|~wg}34yM-*z0p-v@Dp{cbEFX}tomyV^CQ=UUk05vJvU*O^Z zhoN0sniMG0=eb2BeZ0T54eM?vVis7q8^y=A4CjL3es9+u4%UGzcva87N+lLW11}U| z*qIG{vEuVk@9$`{QeB&K+NcHOQ4hJNhKfK%klkZAAz@umB~ThX*tMG%yCy6p8uW?( zN#?MoMLG;f5IB>Z6c?9LU|<kuRBOmj2iTI^uMnY_`TfP2PfAp{OoU3xRDf?_M8za~ zHih9D)zgKHeN32K%1i>`*A<*(>43RV3gTfYOrb-8h>4IeSDszD)u_{_sC)oG-@{Y& z`;L3q+uEXKKa34)MiT8|XlJ9qAv0)!N!3*|;YHY*tYS5j-fI8!@d`vszjzu`B{|hq zN!rqp2Jds*FETR2v67^_$BYb9HqV8c7byW!N*A5o-W+zP`&ZbOM7@WU_eba+VY`E7 zlN{GN*HYKO7qkbshFcis>XsmZSTYT!(%qOd$2DHln#rJVLq2p#FqBo;lAUOcU%;(* z4(809wME|&%pvw3rhyY4*X%paLN#`JsW>34i#hfgF6hx|Hmy@=^P8xQ%<nj^TN}tt zrmJjjrduS#EsE7!DHESFS}oO&0(xXRq9UgHYueZ)T9ksS`%W5>$qBmY@U1ehV|YK* z+?4XzegG*dzoK_F38Lt0T!<5$HxJpyy8z9R3GGTg<&=HlA~}xisaihP<(8^O+Fu*p zz$JAA@W;Zs%lpDy#lGL4&14w|k<Q#9lgLYYDR&{?cpk2{(e8X=rPMJGp;cB{q2AD5 za;=zB6U!radwIk;WyKux!;iDtX@eSTqXFi~Dok@A0{;yf^-F5lY9N6vf*@}yA(0qk zS^rPU7T}%ix-bYc_~A0w@QM{oCgQs2$7VVMQmlSdtiFC!+`uu-oz0l!Gn}tN^*z>c z?0$>*r$tV+Yy==1k>0P+&Dm1uu2dhIoz(LX&lQ<3{F8h3ms%5aY1x?-l~-%o<@DHY z4{ac!RvC*9mUE*uCyH@OvpWg$ez^xZ8$ut*Z&g(#2lS4+lfNSCIB$CO)Hjqi1vyPB zY|6LzB#=#-P?1jr9Q*xa_MK!T`tD2)b17DTK*k$L0L7Ci-i3)OAnmXtwxRPb>h0CW zPydPoG-hHnW<?^=ilrygdo{8O>#k`u7(Rk9Pk%kbkzMf$OwsPJ?;D|I#QW7Exu+08 znabf}E_om474<%Oy}eo)AjS3!ljj0EJZGNA>iat8EcR^;PnFp$RsB1Nxx^X9<#=ao zd&3(s0I~0U@iK#Zia|=9wRUjjBsZ-zhP*>*QuIlxE+v|>pF*E9BdK0!yUu2MJJCnF zH@ok}QLAU+(nhKwcIg3di2E<M@OHt^Y`>xH%y$nC?p{nh*xG2?4lrRitgklSjhpHg z6<iB+3#zo%1(lya1!?FHfAX&sU(}w~a?xCRXqA4vcfNJaEStR5?$zE6fT>5i49)Y~ z`llNo^K^8iFNdS(qBvp~@h>zS&*n7GMK!CpsjU@a$x4+LIyeNe36v`)m%M<0_l9Rf z0c_)a?LNl|9JJ}y#JBp)Gm$jVvp*2~C~CxtUq&b4G<@I1k6rZakC;05Yn<;F2T%r5 z#tIEI+g5=-VfNP;i}y~7&99?plF^c1m!MxZ2|Z|yz7x#l6Qp|~#-zhb7E75(1m^{j zuC^;He*O$Td|{F`#Am8V_I>t1H|0rJKVYEVLD!W6QMBDuvQ;;0F{}0iQ7?RYsSXu_ ze66X-&aVEj9gp5{;#&_2nVN)6L_xR@yA7L#<hYAqx;UJEsXTO!+ZW82!k*n*VYLe& zw-mo!t-Txp$utvl_uSl9iS_L|(4wSbbGc2HBOt#a#V6i%ToDU3<h<lr=?4*H2p<IP zBi4`1U@AX~X3n5<*U`NwWDs$Kgx|E(d!$>TWMb=O8fThg8XzKQ#LaM7-E?T)4q6b| z5$2eoC^>Z)7%`*x>BjCdHOQ}qxd1P;v)n~lyv#KHt!1*rJj94mvylDV!5TPi$=Q+A zDMYe@EVzg)adPi=_d`0>(}dt7A2z&;l^)|@D;ATxI%yWcw)v@2!sW`XZV=3fgqfIm zLS%11U(FC_L!-UXv}vT?jrC{Ruc;7=^&dl?=&~|Q72#<tuC`M4<R9T+4#ufb5gr9Y zsNDSM#BGFcBJheLyD{Pzbhth!4D1<$9F`3Qx)^Xuv3rEj)I-+^E}wB5tuJlFtwyaV ztsxB?LzkIuZ=&_Q07<CI@Nu5DbeyYwy}Bn{#53AIlULhg0jZ`rCN)hAk3yH&jmFkx z7x$9W=q*3<pJ@C#**|qnJC7iG&E6V5YG@N;8=XwZESWOm@-v&-!rVC*k0I0gAvol- z_LEYDfc}<(P6Gd{PEcC@o~{?CB-P|+ielA5Yr$WB{z&L^_Q54JRJ1^6*m9ugKah4f z6r~iH^GisMesTs`hG`SX=)Xe2DS&uTPMI2PyO^Z@<O-{A7cfz{CnrW6szfBYLE0b4 zC3&vs=^URSzP&lDv)swfaCr=A1CfJJsfGoJ&D^N%t55j_LTe}Q5#N6y<B$_=GZjm4 z6s-lTcIz&S%IUcY3nhchDIuF{I)ObHcewT5V!%Pl<#rWb^K}1O*;$!XnSnLoder_- z%nc{3vMtg(r*|RM`#f}#qXZzllB+RARlKLQo+?F5tV8l`lA?%zu;<mZ9)<}I5tJGL zCl0?1xw}eHJm6@tUA7g?vj0`SOe^$m2aX5byLBDp36$_#M&uQ13OAhPmR?M3TVPd% z_dy?bH+)$KS~hHU^g0xIl?Cj0X3$=I(t*|9!HyGDE09*y)-Iw3KevJ1q8M+xwXbu$ zvw8}d(TK&c*fD2{gxT!O-2z2%f*D1n+Jf>!w&F#-Z`;&6J28uub^A`r!a$FGl5cBr zm`8L@;Apooil5590|PCsy8CN14wkm*&?zYb89gxrRcXuladUq^5;II{<94&a<c9}1 zj}@@_M}yU5A%N$II0yvG*KqJWT_{f>S5U}4R9AxCZevu4oh(pMZTM+=B+)IhBw!_g z4~d8l2rdM}F7qNkvZAj^lMpXQ>OnYZ0z5C_q~706pbDUWBB6pxfI>Rz{Y3WO1~VYj z$DhS#z_rJ*#}ihT3wTR8v={G5Iu|_T*H$JQXoeQ2mVIBz`eDSw`pq^}vLBB4CSGq@ z%aqqxU@;sAcV8bvhc4pdCIQ+n6cqeE)$ff6S0mQ?RHQLSWRVT*C4koAx8>Rb-nM$I z$GlMEYA(VSMYjM#C59Xq?;CeGo=zovS}~xSSXJHcO}^iU5op0zKhORs>r;4)7Qg{2 zT9<3R2}M6p-hn+hj1LCUrlsk~mKK<YLi}MO4{j=Kjm|4#ybXY)061q8W?sN8JRb({ zOlFAca@ZB{&&|v2!sWEeul^s<JYR5AF=*_OmDAgnl{Gq4i?~Hivw~+)HNnkN=B10w zOY+Fd@bsuF@JbY{l(NPuuLG}&uuq{~J2Na$`s<gSR`BnkT?^mJ-f}b)&w8aE-79Du zQI@1Maft`fx~acZ3*3gP=z0fHZhKrIUHLf+w&L7}EOx(!btrpLns+>fzM?sL-_fZ6 zt!0DUnrl>zc(965McMGWH(*V%-%7rfdB`L>4ZZ9s?IK*GJ^8GXj|j3up@M+bvZ#Ke zT%r^da}jIIZ_h8zuM>c1P&d!6UXDl@Pxz%A2ViB;F?WUh+-tE-yOl&j05PQd`NR!e z2~;U)0&F6fH`y~WbNS7KoAT!DTBc><BDZAC=t0gv0FP9d0iN{vlD`?x8<7AN3=tj! zY_I`Pm>*E8G!WSt5(X%;hfk?dZLPM`qLH)_(~{rTH;^pajKFg@MXxPU8n1QtuHMPu z^ZD~ykf&3@X*!bR<pel8LaVf_Pk;BGe3;%Y-)QmfQ~h0X(4+mzGYwaUsoF=hr;nt^ zSFuC<O=77i@GUfFa1b{?`C881J1ksw-DF3)N{cp>mf6~J?10FU`ny!;iKQw{41dlM z5g-kyxy4_I>}g=y6Qp_F(i<3uZ#r){AKa#z`v{#4c|Q6WSlgT(?USCCZ-C*gVe#pC zyy-s*vPs#!bKMr}dtbPPU@xNUZPqvHV?%rsdDj~rZ|c6TGe4Onn3fK>LMGbeSxxo? zsV_3EzTh!ovBmAKsVz`NnBH6zvl=pS2iFPrQ*ot7><x(cpSYM`b?dRLmDPh^NUmRT zc06eEqusJp3_z4G_Odr@E|M<`x-`5W(?zFNn|iJ|TOq&?X<DxAwjr(Ts#%*;|I7h5 zkzWy7|5+xQY87G6iCsL_Vg3dfIJDzlHSBio&+0463cLgq&r+2s-%$Q@_(N<|Q5Is^ zl$w-FxrrN1<-$tsF2vKv7t)V)yYv#g9<q}vbaLEPgl<2e$9j|yvy20U_p}#<d;{ud zN2;A4%*Os(K~1_c49tCodi>c(Pl}`D$OU<1p8BSRkWwAO8=<G`p3u9y-Lk;brrf#^ z9Orly<kfm$uX$I%o>_!4a#SKzp%YbvbWUOr;V=afcljA$ShVelk?o1q?z}0hf{4tw z5Gk}Q0#dLLB^z?8zU)QB&>YebJe?pRB!(GZtsgQNp)_b)P=Mb2+sdSr>8#Olf^5jV zM?O5eC&}54ITD!&aC`_Ny$ay@NC^0PbYfExqd~g{34*)VDJ1}Yt8$pxYNMW_oenqG z{rc}8ph8|zUEePx_b1lFos^Vf<H0t^UKgakNeP=g9p?^uVh<+g)6=e)8X^D-)+dga zg>bM}vC+jC{RB}=6jU6A94%+_*+1&p&cWZse=n+F>-O0dzk<}9@DZ2`x{nJ678?`_ z0zP>HnDV360Tb#0-4lxlSdHA8({}1D-GS+iZQu<LR>HSpyy}&~xaeFF3kij$P|@Ai zAN6XGNNb=sd<6vYhkrrFEGnk0p01AqkF5%+r3?bfkEY!hHk-#8FJUBtHVI(-d?Wlz z<TVdlX8$(Z%>iwX9D(o^U(^87pKC3WH&YN%HnQ}Eo4#~e)Q7NdB_xV#zE8}-u-M`) zCT2T7d)s>IBSKN>=EKJkalK#RL5FwW^Eqntqhn$_cz3fE#<6Vg!v!81iCPd;sD7=M z0e89<2bQI3$G6>6L<ELfxiOCW1X+j+l#(js6ch;vdQ|PjDrMn%-z7T-psKzx=eSc< z&qij2Lr>GDsG{gu+oH$k#gl1ae#DN__>tL{@v;l~vdQ$~gL&-XW?z+S;mxh$<w?Cx zu$&czRI`7sy!F`Y8$v0>L4O8FaYgpMU##yOLR-W$xr9P9UQ!j;Q_+`QlS%qg{DjHg zAS7>Qs8kyBZ&vV~<~3KIEXQV|%ANu=Pb4nQl|$ZhlJa})l%0pU&L-?nwazBJi^vgx zyaI5+N^bK?(LP9><nQRE#>AO{A=>N1F63hq9r~87a5{6t3Z+#nFtM4uh6Twja4SkN z@yr#wMas&3*rkfsqv44iA1Jli_Obs)LGni^_HP9VEiEGp^FI|Ntp7gD{NE`^=$UY+ z`E;y>4NQ!T?f(!m{ErkQwDc@ElytPTI4pEDIIN!%2#wDG@K1$_q>hCF4z;MDqR0=W zKlLM$I<~e}PL#Y>=KA1&7m|F&kpFKXiG+@`l8L^(@u$1b*mUK8jE0ep@lUUxq2&gj zst4BJ(dhrG9bx!KeEO$$gzi%V@?RBZ`cLh~XZg=I;0&MIi{I^k)&GU<zxBh&N(cVC z&u9H-VPO8lj_LE$`ggvVSpFQ(-`jutVEjkFKYV@eGcz;%+y4Kj&%eg~S%Wju(Ejr} ze((P-f7$<K$NZ`O`K#@(c6wUYKl?G$|Ka2Beg3Iq_!JcVJBiYNqht7w)8bzR4WBpq z_x%04pyBsz{?CF2S~?o=|3c7!^LMck<KG7UPeB9IpYH!DXkcOeoCyD|prOsp#Z94a z>0>nQE~Dk*B_XwikN_8KN!ZIwSXkLWMkEU7g{%g(8*j4c6pw5WFUtp_RM-pjM@T-X zPkDKcPl%WWP-uOzg{QGXBk^a*pu{)jjfXSri-R^|dZY2tCc6xmeV2?Uu8A~tKR90W z*&x)o&p^P@(bnTiL7p)+P@S2^falI!t-G?o>jO|)IpXm&2KS5TN;SA`U>t(;Wc!QP z;LQGlINZE9tZW}wVkuPmcdJY+DxsnW=fhlUI{8B*8>`Mc#YV|Yjy8`Br$o%*HYuLW z6j!6L?>Da>?3ZGGWScI=eYdnXh9MJ08T>`p(2dZm%!bc?xHDMJ-zvO;<CyIF-|k*l zx^)`Y0T)^f$1`YR-51C3XNkYwz9Ape<m_bQ^Ck&>?0{Gj-Huk#5NX0H#Hi-Z9YG1| z^A18nA(xJwTl04LuVPhBV1pL;Ps^YRNF(-Tv1LRr5t6ePnptXkH28Gq=*myG)s_iO zXoB5(UN)7#dbS6$dqb|;Anu??hmQU5@LT5cv5(H!V_#|C9mnzIXgk9eCtY=mBf@(i z<0GETHHKJ@bgqRMrey%P;I-N=J|w{kPMHEa<y*p`r!f5iFsas9euj|}r0Ml?G9^-r zh56-uVA5XSf1Z(Lwf;*5f;3iWLM><Dv<$6kKtwM5p#zku#mbr^sA^yi`xI6!O8bWz z1|EpA-O7kXVpLE7Po`Mbp8ASc{TmIXhBPZWWS!Wg8Ox};d1@F&5n|CwNQS`5ROOV` z6<=eMg{>Oj3cH1;-;x6k3_9=t-fGy#;Hgn!orT3qvjnhYuQV`+1A}T{M&SEHkIlE< zLU^g3J7-KyDfnI7=~1e^Q(O2)SRn;#`CK$(AnQ#MHvjivkIoHP5IybV<QWSfiw~G$ z8BbI~%pt(pF9WtL^PNc%I#^D9ZCBc@pxnU=I}gIZO|i34P5M|*F=)ME`up@3)epZw zn&wuCG$}n-9LvtcS6uXkH0_yF!jXFpC3Bo66>jkjv#4R9cVyKggu14J8lrNY*||i4 zcMu*iYd}7}dU|(sEr1F!^or7naH<I1$HeYFj3OFx-XPw9?5oui3pJjt8gNm#QAEZo z4R=_PTfFG4JZJ;)Bl;zi3i(cQ8bgLT5*-J4e9c`18SqG|lRISiDaYF0i8UM24npNs z-bs}f2+~apx)DLVP5OxTN`_HMTI(jHjTaPx8l3JtNJEz(P8`0=VWPh(4m=Zy)^!@V zLt*FL)UU3`IzOtqL{?9&Iwonv+*9ql)`z9Pu`k+~v5Ip}RS~T=RBqUBbI=I!s_Oye z8TAqH(E^JA3`*}eN5gLAGy49F%&2X{CY>30&bMO?gcZuvX>LBCH%S&%apep&$%4?g ze4NxLj&uY*eTn|_OP&M0s-Geq{?vUvOfu&oKvbM-cBNo#ETxd9CI09*UlF;m%rQ@} zvmgg@xZez)Ntf5g!Cj+<WZ;@%U6O>LuVQY3d`_nw1s~0^EZQ+W+aw~|TCX7Q+?>iS z8v`&||EfG~GwBxvcgwayQX17JO6!w!utvLWsdMqXb8-9iU?^`o`Gh$_X*6(#w+cK& z<1YMwS)Qk<5NV4~b`2?v{BR_>0cD);k4N6DY1$^dNk1VFveah=aZLr*=5NjHqGM@s zbN=WNfU+|YsnQp;p+xMP{1-*$w*F*fscG5Z`Qczn5oj9>&cYG~{_Ln_{%l3)ssIy% z$8`SF!B|saFyWqggx>uT1^Kl2W+_RubrSjvpRkxEeHaCXB@}hX)y7j2g^Kp#>R`W@ zu;i)|n5xa_<JmpqpNxi#>UY@4UY0}C%PB;ywdtUuJF_8(O$fa4wWhbTJh5c*SShWh z$Hv-J=&E*;@zx3f{qnO+*<Fkv>0pJn(Q|u6dNwAof}RW~^<^uEO$>+1X>1=)Tk>x$ z#IwWhg<OGlAKB@Uowp*{?L0<u+G*EOu&`?4zrgy8=Nfezp}6W$16Y9$kJk8k43n%8 zP|n#4ifxoZ&|=ADd&4tSZKU_?r)a~dD*a!CD(UP42Rh#Zq_yW7KnrMQpBL23`fC?1 zb$S|tkdOGChQVu{;=UvHPY$s1#vZ*g$=7QIV#G5mbTS%1<u?sY$1QkTOH`3cLm*L- z{1W>>&O=wX`?AAWN7-m$0#R~_B|z4@N)MjWA}y0D`*k8Pw2VqR8qQDFF_8!Run{Ic zzO$sWq<u#T=oyg6m!?{04-yJNac=($NZ{O;%t_44UjFiyUo6@6_3XE?d@nlj9*T3k zFmfxdNhP;mFbTE_Dm0#ZXz+c_0%jQ)6&q9Gk?Lq5aN3E17Z}pLf=kQ&TWr}${cmze zBOFt1y!KZiNp=f=q5-?}pwyKr=2!4`6(E!<HYv$qifJ@x-_C^ay9z+avVDoD-|X)a zmD3|RRCLCI1oAFZ9iFkonZ|^l^8tT%g+^H)-NMVNm)^3z`hX7<dq;qF`iNvvf4;$> z0!R@f4Rn3ouxUJ?#5lj(uw${I53~X>MF0;R`AB40ec|4+?(^*38qPeh#?ESrv28s5 zi0GMXux`BYoY?YUwrk{;UXj|m^!gx#hIy4@;W*^pzC7ce*y`fBIg9Jzd%s}exFv$d zKYlO=xxfK&0{{jqGQu0YV*Mhnj2rj^eqf&l(x%uM7X-hsNEHs_!3&?>ZgQ^1d#L6| zz{M~ydW=Dll1fcj!D_`*e-<sc342Sy-g9Q-4)?vK4u~eZpLC07-?+~~<5xS+)J4J| zpSANKwg9vJXXo@O{b@Q&=FsS=Aq2q#K(9Q9YrIkiP%=fcc!sk1oOyb-&}FYM<hmt` znj#C!ScRGv3FxjpCRP_1I#ojI++U(xf(Nj#gNLx<f|w&XnkW0|l95AOumq)ZdNs<k zl7VwM34ofj%|~EINqCemwJSj(;^#on%VZ`KExrn0`Fco&mJCq!Rg$;J!r%qP1Jx7* z!iijmu1`k=0%k^jVczD=9(rD@Sd#d1YeQGiAEfdLJng#)itq}H;Qor~w!U^}JP!F7 zUcI~FcGq5WBU!tN{}3a2`)XPd72wK+DPn6Q!O4Y7Y+RD)G&j<AWs`Dj6W$83y2yA< zLKnauNkJ1!7Aj*<zjQ`5N85^Y*$QMOaR^@&jj3&|YViTS&OSSZw@aBn!&)_WS6q7c zHvc0Yfvlas?dR8O{>^aUmNAUs{wZMwS#1(BhFBS?19_++HD*tXOy_C|4W*x9?9?4^ zj|11(dlT>Pd9Qc*eGgnsW{+nq_XiFvWsP%4Jdv1(w7K1Wf(&@SkPdY~4#(mM{N#CJ zMD10}KEUrq!RC3gwr5fgX;MSL!vl={h8}cUVAP<#O~`Ymt_8|gKh+B_JQhtW?a4G+ zkAbiR`FeW7ue7&#cITA-+J&hsOXQX_Uc~S4{XQj-$EO1J9sDToDo6hdouB2jGXdM( z4iO6-2wDX+g_t8pB7F49oTjq8@+2iZ6w<C5WouR8bnbxuv;P#-0fUpF0jTNLw$(n9 zn%tHz`;}21_Yx=@qOdSYU>6lMt4ak>A~txts)Uf^&?Vt88t7Vz@BC)^BfART_eiph zH5jz~$RE*uXht;fAiY~xw=X-saV*0L`otzp8ll(a*>4Bq8m)_1&0qFXN)D|IyQ#F< z*Fnc|aj_73aJq$2<s)tOS=K?|&K3Gezqx`hY`I}{Us%TcVwPdde@-@j3A(dRjl$yC zQvrLig?@l52l_cKhhr?!Hwr0Ky{xpCe|+g-R)chBdInT`n~`}Ku(}Ki-GWy_gHnBj zK^w&-2iYP(^1s-7%izd?bkWl6HZwCbGcz+Yl$n{CnW=5J*={p4Gc&ds+Uzzn+v}da zGjC@1-FP?l_eNAjR%N6<rBWfOq{{OhEDy?6wqKa+D5F_P6k##6CK*?K8F;ynN_h3_ zF%W@ZpmPy*I*C*k)U0kK`K(5bE4I(FM!%1LM;{1`-Rx3zVLSAd<J~^+b<mf*nA$BI zfZpibQ1m2yK0Gt&dbtW*-0mG*Ue8vvs`4}Vzg*967kjusLwMxqz-S|K6YipC<<8bt z<^e+CaFfbaT@*{EDP#oqt3Tk2OY<n`$2r1cO7ypJwVZ>V?NZy^M&citwyAsr8wOrl zOO&wJ5;0Xr9{}Ob3!RV8=uc@o2cnaR5}YMm7~B|M`i@^L;i*>qG!`naJfm%vL5%{7 z47eHnPA-b<0+>G$>bhCcNx=)``;g^{T0W|CO*3}4ZP%AFu}WM;3SJY9k&di537N#C zR36X~?rPOP9I;juO_VAIbXcz)j5<(&#=KM07qG6SWAP_cr5;xr#e)d!z%Xab3dw`- zFdqc0etX^^*Q0iQ7A`tYU2KI!_8AxV^&<M&S6_>(*`qUhPA{Hds5pb=1q1d+v&H70 z51DwjP7?peHyN06a#+eqJbu^s{_na81Q;z?2L&on{svREl~C7?TF$%9^-pj}8wQgp zxEbhlw<6U)1!3ULDFICAn11<7hwU))T=|3=rz?n6jLkcKP+h87e8Eq9fsh%LV>RO) zku(W3L6EX-nQ!y6R(Wsvol2U@=?hC(Le%uTa3x|MgM)|@5qg1mISB$`x&R5-22CKh z1HnU>_CRx_mj22VM(lCaIKFSZ1Fp1Gqb-4F2R|`KV8Hpaa@|pII+WPgC~?3UWzkXl zn<sh}2VZctZs%#%<p2#7Mn4?Z&*V@tX-GEu27OmvuD2`N?=tRTo^!V)>fX;ty3A}d zA5B1qXP8M9-<}#@JP%-+r+8mq=Ey{DyFNf;dy)?p+gIWC>B85c$>^{%Bvp{47K8kH z%yvBkuhLOOA&QlVofkJaV#EzI&~5y|?PsGHr07}?5ld?3sc_%W)oWx_`-Q4w$e@>Z z;;Rj5EA_l;pT*JGAz&Q3kzivIJ1H@Cwkb4{M>x73@w>&<$&bcy@s|cG+x%u0Zk(yQ zsO?|DY{W^R^Nb@WtrnmKsOIjN5I7W=8#cva)djJoY8IxLGN#D0`3;$2Ms2>IfeUJC z_y2qkmH%Q5mm6$^c49QFQKR%&6dehxsM9mA?R6CTOj;hK`)v%`*O(T_s4@dh7WKc* zMZn1f&QEnwqL>6xz<>{+Bj9XBF2Vsn_An0)lLD5XI4;d_rY`L#y2}~%8Ap^&v#^v> ze)!f+35MEUtd`m-t;XwzB(bhC&du@^8c8z`qZ8P?2dwOR$vTe{pECg^_Vo8%O%4Mv zOHgP7owPG?>A^RrJ*ocAFLb8xkj=i6VXfvsucC7oqGP%k&Cvj+Kb4O3A`{opsfuLM zeHOKXB@K+`As`$WbU=EM$&kK7!&q*SDN-kG4<2f=0NHvIz-Rs~`aY*1(b|0=wWxJC zwhN2|FK`#!wJvGi<B-E(CcX$hOG>G{VcIoL5EE{*nDMA}P|TkYBN_*3;8F<Qh|^y3 z^-3bm*FNZNLkUVaEVw9N`ML=!;9FhSw>%=3^`uzHpE-pWnQJj@HwIfN0&TWP_*XlH z(e8_v{trqQ83bt?tF9q$sXp%)l`G%Bf3aEvZ646-Jy9c{PbW3_?U$2>bTrDR0DrM9 z8~`468s+|?ymIA5HPzh1!j{y9(MS?WP^02{C0~_k1hlp5W@3(-WrVt@MVLITdRGyP zIu<H>U|*~35U40VDgGct@bAh2b^JWIC4=O)T1w5{Lsk81tek@>eicX4gW*$+*5MNG zi7ARWTl`0;tr(6s+ijX}>c4K95qg_##jH#uJdax<%R8<dmcIE`dGhAuWftce<>08_ zv+?fOe4B#bCeTaoV6+c)_B8Z#5qpT*kdwDfO{WNRCCw_KXT<Js0Oz09pB0md0xn?S zDWHj)8AXcTRP-BC&RVFV{v;1}AJ}S2Yh(Z!$Hg!)@dhEsDQq&;GLlR#iw2C52Bmuw z8NZW;Rjoc?&YKC6d$eMjfo#a7T@-V5TjJa2sDP!{;7ods_pVwzAs_PN{fQcdnZPFS zEA<-~L}R$ndfB->Z5gjI+3-7fyX>isfUTbDk?3}k{}|{8mo+*W?VCu8O=slMYWG<s ziLddtIJ`B{IouhxHSoI|tOsyuT?bvS$4v<MT>oZ>J5<Jd1|jdsfTQ-4)bAfM{U!?m zGEfY{`E}W(brl?jJff2YCQhG+PvJyN1J-OTn+v)d;(Ds>iym{uP<e1BeTm8LL#2Kf zM^VQW>_GwHjn<_O6#PyS+!a>rgMDRADJnzWhpOENFKlG4Yi&BUtew%X*Rc=qepJ@F z3hY?~y+Rj49vv7O+_BOPrZ6@GZ%EuEJx|kCr9&nF-5|T-B`=T$&j<m7uNj8Lsg+b- z0dqx<g9!#7v0@u3B~3f3%`^nE;TN*$Yl1?XK-4E=!qq*OPH)s64>7-DNm41?#MkA6 zrgVdE|AUz>%Z<i<SLTqX?*`-nnjGbMxdfn&CF6=}Iz#xN^6uXp%qf@iq-hKagg#WY zH4E1Q7Z9^}R1;*a#r^?I#w<i0lELP0q4y$T)YZ!GdA8azx@Q8WhnVgt@Gwm0)D*<e z0b-y@DK!dB{**6z{q6kQ-dFlfo~71+NjPZVrX}%Rw0Eyx_7M2uV!7F+zDkKxt_pKU zVY*n<)|_U}!z|iYeBnWGDE4lwkknBLs;ESs#(f-Ix>?7tw@7_`W=7LwW<e3gV(kDh zO|7avKk!%%v<$;uzhp%iAKYY>=+_O7k~gAf%g)Quyp0hE+N7gS`9qL3g$;ZH&I%9R zbLHoTeQq;{(&M_3TV5wAr@_UoN1M|N1^vCrAD20=lD6`!4Gwn~Y>z!)%1j04#2nEb zF`a}|kjOvn>`1nHnfx?E@%ty*QqAnrM`>@dW7WGmzR$U|l=}=GIWxBvEhbpIfUCC2 zmk8AIak?6Hh%EB^U~-LmjiX~Kk9P3ftPC7R(4WPX+LrScHDTV=j+JtJ!*AV1Ob%xP zV2$OEiFl!y6hzN}ego6$Xe)sbNTp#1HjIxr-?^bu-xB>Tuf&fj1D+ZTi3wZBzcJ;( z?D4WAanthobeR0={UqQ1>*BkBg$RRz--((o+8+G`Mts#OQ4d{in(4Dfhjni2&D}L! z&j7>karcj+<GpA?iakO3#?e4>vbTv?99dEY)z>?)=d*%zYqz#WK=#5(JED!|>hE_& zd_`bMYS4JYA51o}S0EI%AS8%^dt|}i3Y&zAnajSi-U0Jet+c~(S2N;44fR6I6M=|F z3mX2}&T!E2?NR>zQRqK9lS4M(Fg<<Grn7H4_U>Idht(7hThT9`<!g&#{;^Z!b8)(N zBV-D@(@<B%2*Ea=Clu+@H&Smk3|XBfeJ5q5B*Kav^h8ypdN9vqgvr&^j$X-+s=LiZ zC{|jd&s=P#7D-82pM8T6dk%GJ?p$4C%v_Uej;_xm^|o<s*5(smxsDKjafP?a=a4T< z1_k+3cu5{y0Ip+kJekZD1|MGGWB-ss1^3Aw0ruRyTF*Z^OXe_33BOT*D~F$C@d1ei zF9#rg5Ww$AAt2Qiur1l@NvmF_i85Dhsy58%r1i5FsM-ex--)u->u{=H<~yTDTK!v4 zKllPMp98<1NvS&aZN`Zh%7H-F(|w&~L(bHF_HejC_T7ZY_Y&C*4u7vpjf_W1I2}ON zdmse-0r}+45;DO=WQ<HNDQp}&L*ov>kXd}wgxL^~2aYW~T2i>~6{g$959YJ!e-ZxC ztA6cfD%&HssJ`=IXj2ze2cWnGKAc<`&|I{ea?l;<j2AH5((tcL)QntGPcjX`6J|$7 z{%*8pRHmsT7M2V_95P}})}P*cu5K+0%RsFJ|L4PA%7RRw@Gu!$Hc7h&$_oBR;=(+m zN+cIh==UrQ8R?xF$4KRD?YUAs$lRLG*=NYcO!Ur@3`5}(#~C5v^n;DwGjJ)lxv~S@ zKUwQ&OH(<a=F3yekZPF4b1PM~Kv~{~wM&?55nUdTe)t`64NjYuo?wsI?r+*5T~EgL zX1c$4oNI<w5$d4@&fVTE#i|2mbi;@hkgX&&qa2v>$ZSTGbVIGbLB5f@D7p<j=EXrG zc`6x^LfsNiLN|8x*U`i24K$OIqB)hbtp~xe*vDe^zko%{NnZi-$jE3b6(=yVi$U{+ zm~FteaR#=3^&~LFe-f4X$GSikNYwK9olSLnoZE>#VX?NJ^SXndRpKi)K~_n-XiaAe znO~jmS3VUFKB8A`-p6!w<lP{~OW2N^>heoeubtfcKB0Wg!P7)rND9|w&78xZ7%i-e z;XOmnH`&QIS=PL@pd~@zA#=xPuD|HiF~$;rGF2>%kp+hkr7IsoHCdq>`E|kjl^^~w zd*gB6x)s0UllXIw;rsr~XJ{4o#Z7$8#O0_2eRKYdz~H;X@axLep0meCOM0K_*UXFa zikmOzb;tX&zLqteg=~Rglv$Kzw^?9hRr8`IxemIfrqCT090b(q-6Fmu@ozb7b<?{2 zdio#w;AVYRuUNq;MZl9Oe9S;PJ4J?O4ZBA-hFl_&S|Y(awC5D33h(dz=1yB!#g6*V z?@oOBT{aZ%Zwn0!O=Qb;5*@E!nNassiy>P03MP}$d~#(XEmhLXemzer-$FcMacs}1 zoZ|n+vd=coKH}QtJw=Wd3;)rYEpOx6rbmy_FCUY|kCKu_UovrCi_DJ)D4dD<R`L^# zwpDJkNw$&aP?cX(VDF@j+MM=dK}x*qM#qwIsQRh%Jg%rq6l@H|a^J0usDZWt_C_Vc z^!%eppdV)u$yf7RiRJvQqTtcQ020155mkD4U-<aIPX7Usi}8y}0ynLwE9WB9YuV2* zjFtWZi^Q&45p-C}QD5ed==`2cvP-Tujl__jBfr9+(I*BjV{HlicQ4ijX+OyQ;Rs`D zZy!z3{QQtKO29xwO{NCSF*;(VhY%`rW2AzK@s14p-oL+zL*KFo<IkwvaYp6Hri0lU z=uR!|V%tG;Y%#~9*p4`8oQc|<Nu`7b1&&%Q&@SM@7~ee8;nSept<u^Irr<virH<1* zEt2j!MhDg|5i~Qky-?X{=*EN;B40fVJG@s3o?D&1I?Ot>?Y5}QOjumHY@F_7N&P09 z?g$*Yp@c6KLKT3JG;0C*80|y)$uy{1<>%)XA6b72yV_*^vc52~#g>JF*+lhPUIbw4 ze@X&(I>8!daXw!l<JTfyca|Xgem0IUWyk9)(HT!>LEmw@KH|JNrq-K0XM41IDG2YZ zs6k30K6F2)NUB`=^_x;`SRDQMu(+Z)5<Z(x*2*>6)q5UOYMGCYPtHg)-Z+Ddf?{D| zv%hweMNKcV2qpH!@<VgET3ljfO9OU9*Yl&cjsg0J)A_a;yrh{1Ew348XUYT&9uv*I z5p+<=aZ>(pc}kgfrdP9hE?yLwD6x`}z)dWjo)X2>l!k&Hw6<#bY(~_sf&_&Jk288o zw7irXC47k!XIvPUId=>+mR-SYe2E2<JNrdGrbM(QB|riinWmEdI8A=@HKhgNBX_fS zGIXh5nTx)Eg~A2rac?pa@)g13V)9f(-FTr%U~g;WuFc)RW{tPtE_=I%pkCYWh2dtq z*#<Bw=;Jt-*A$quet2*c+R%;~!+R6<7H*H@wfVWf;(ZIE=R7Y>k=_1s@kiogxcp=d zA57<ge6&2EolH0Vktnd1*!5+Wtv`h80s9iQY(T{Lj@8YzY2dPuxTMvpc0aJ<q1fgv z)@DEM^7A@3%g$dK;i1vD=*(6t5iXTX*o}T14F?k{M#4RKe0aqMT}^Y(X{UC`(%lB8 zI``QbMnnrQHdgBl&x6c#BO?L@($QDz+<UCU*?+-3qf=YItC8Vk3a>}M$M#RNZ()w9 zx`6=6r*YLAUfM4sJ{1sSI1<uXK1R51nmT>e-=6(GOT70^oiPXH4*tr0stuq+PL@Ag zb-87xzwXHztmI<7k{GqRl4y<7plFs965%Gj01aLot9_l6XG*7HN(rJ(DveSD;QDm+ z$R}78F^&Mud?n37r3%f-QmBDzZ1_Nuax*X`33mZbKP_RCl?r4fJzG_hPT9T1YG8Jr zz?mJ5w3JrDrd(xyv+;pO6^o$n2$ObtIh)XkKefopM9aYK<&WhSH+L}8FjNbqku9@L zjI<!i6UDoH5`~I*OtXv}zek7RV7!M<3WNZ%gY0p`E`Ye1q{FFG)Bhll$mT)LIS5NZ z*a^)O<v3A>iV+JE1_-z|06}J`aT<s{va1FJB&|e2`b0n)o{D*1b9v@=K4xuvliZbe zV~`6Vm)Acn^SHq>WT?m^4I1*HO{@zVO2foqX9^Jla8t{Z764uFa2^Mh8UZ~LvoR(D zbQq;eJ%l;=W5fdTkI@7-@NB>w=^O)q&k~J+QcA86&vJvM(M*k{q};F|V<wB>xg2i3 zjUbDMB-Zzqt4OD<u@!-WU~!o10#ml^RqQ{rptS9c<?Qd@yN;1Fr)N|@fyCap1Ob6S zl@k62@Q(I+fvHe?C=b*E2M#NI;CFKLEa)1XN94h(zENL+S^$x)MBp&!0`X^SJr3xc z=56D8a&Oh#t+OBr0s&(oj9@EzANd6AU=LFIey6j!#96!(Uq%(fv`j${@F)f96`6j% zZB??f5%Fnp-M&<;-I#4wNa1+DD@9GsS3nuU(yKE8g1v}5)W9h&EyfN(o-0yfy5J?! zJ7BZu{Fy<@qwiIel5%JoA_wv1F`Q%VC;_hlVS9WDE#=UW>jeM9oL@O<jI@i@F|WtB z>8K#!+qkk?c*Jlm&d4iq4dD`9)}6w;1O=H|O3gOsGB<8FWv51b_@KH^q^@Zq2kL@6 z9Qs1&^t2&l0;3tknI0A;iP+ZpmD-JbbT+ot>UCJS-AKaA5Kq0?pt9v`>TdAag^N}B z=o>yTCovs%z5{V=*k076CWrSdUlUGH@@;Ya6kga5Z*4XN29HKo%Jbci`?u>bu7}!T z^T}i6vWeQVD8C+u_iGJ~YA<im@7+8(wO$_Czmi#?wrSiPO=5A`35{29bjm9@$T+;_ zG0n=t7!b(HOS<cg-eJsqBG;zFnrBuR-I1{p5vF8KE)m_l_^%i(3$`RUL(cT&8KwDa zv~44!ssm`&6Jqy9kMAJUAmf)^M^}EzWWy7_Vadc$U2w^=>(^hWePfqO&;s`c`=Qn? zIc{8H1EQ5*$%tjSh{S0K_tPDMq7RYdjKLRhw@T36(Yh1YnS^Jui=x2$(mN-)Bz0kO z!vGpST6LqQWk(GMr`vWsZrvfk<72m13Oeqkxb`4%_@(Cdjm0^a-9K~Lm5bBc#fMx_ zV704bj0t^g2dl#96zDr|#wj456pfraNG)(H8Cees73o`zwyQ)k1=W#}_M>NBfJ>pP ztdmA6V!<IUOOHXR<{Sh|O^;bCjwS4ZVonW?pG}v&GB;w5>Lk>mCMKbD)Jll*p@s)@ zFzFVg_W^9%r*}R5^wZO*%zmzQ?zy}i*?LF%vGrIRoXL;*cvXF}-t1rRbn(2v`6+YO zzX^a69J0m8P*2xjyD4ak>?GDpwQIGb=vbc$6@!`chLgPW0&fuQbYuH&JIy{AOa9#e z&y~yf<DhT#fCGVm{HetG)X75?xql@;tj?2AL>^fvNhfCKVKr1K!|tp_vrn&HCB7yA z9tO>Y<-oti?X}>X2fcD7Rs-&I&3N0nOFz?5^5go)%oU#ozsGG$P0uZk^5WETu803w zo1ZS+^vkt1FRqQG>$=!4aQ4CAuh|dQ$hRC(3T<vLSn?8gSwrP4Csk=7<N%~S`V9l$ zbgtlS!|7H5Q~_nZD}T&(qaI}CT&5hR#6)5Y*=74?&jlF|p<3`Re4(4QMF61CyyWQH zF%VAk$pHQ|l_gjqefgs}mj)}JP6T&$;_+)Ap6fZjn)Kba0=1GR_{ZU*>p0YXEjNeF zOCBPvGwyH3ksCh;99f8lJ|ja<InJqUd?DcdSi@WmQw(BB+a<ADbqpu){HS1AmjGQ$ zxP<&^K3p*mW@Z-r!G=GxmE6?As8ir7Jn5<d$LNydaAPA>vyfq3&%%!RV-D{)7uT`z zTXRcJf~|b+$8YOlIp7R~*A!<OBQNJVt<eKS%~osKA{Xb68@DB=&=u0Tqxfe`y;c*( zK@$g~S^Nli+MxncLnXX<E^ZfkCK8}18A0E_cf$VCk+#v^BIN?y&sxShc1SUvxccGE z>9ug;O`rBjMXz&dYB)&)7N-JgzTen|McPRORtVk)wakwS;DSGc*MxYn;Vhivmv~w1 z7~b~XpSIoDkwAF{?(i7sJNxoBxK7cVcAXG?XVwr@f1`vkEhLEy3{C4cGEj@vtkBM1 zJ>NRm#M?~tT&#i!F_DQrm6x)tEvog|I8Bk&m&&CxF?Yr%Ur)<MJJQu-YcFj!C;j7i z=+Y%6RNb6L$$$UTK9=KGihdd0>0+ll)Q~QJ6BD0JM2Jt~x2QHkmU362qGiiQfW0=D z#HM2{3bqPjKF+UvW4ioIl>~d)(8Yr1^E`bmy7`mI0?Cr405XW=(83!Sx0I=r?Ttr` zO(fgKkpX)OV%gutVhj_@wO7>D9=|ba?xAXQJZ>K7Z@8K}%}XC|PCf0}bT@8{TpMl9 z^>X3y-Wbk?w6o~!1stz(rfP7v1?-^4ln*_j7*p3!aj?^lL9|KMz&kKa3lF*aag|uD zBUhsm<|bg&V~b*C0$QsaO+Y6%gPukD>JD0lUrv5dFBYU2mJ^N}<(&!4hccO*rQImS z<daQBzWqk<WurYUWpIC<9GEhE6Tm{I_3VDg*hjqa5mA~tgc)m~5&$|L_<4TWibl@c zZ;Bpqh-GRIwa-q^h>CaKCNx%NvRWozr?S<ae;j(jK*t3_z`nNze$iUrvhJpY9}}J0 z9b4+%KjP)e?UO;6$e}_YPBx@P8zG<ke7I$Q;YT<M4v`0W6YXvE$3U~xPQ0MCwN1;< z&d<)dNF({zskzSfb3P6Z_85M*I%~<RW<YVkbHL>`jpK24<aHC6J4Rh{fYbW(?+sEr zegOr|X*|;{6MAOkk?S4mQq1}gUMT3k1Z0$6%{a(pmQ64ga%fx_{UGW6<22wW6IceQ z&cTjGYT}{9wX!*U*f_o@+fwKHaiD$W8X%wddVN$Xz>D~XAX(9kby~q1w_W1z5FuKW zR6a6Bv4+4{)co*TLs+(pfu2w0oCpLTDuRtx1l*FYrx;!mt~3wzu}3PWCggg2X!|{= zguDm$LP=a8iiz_!F$cR&W(bs~`Zlh6Z53(uXGWb+VhhUxRYY*SnwDs{Y&`<BLc{Kg zZ1*yXGPeBM{hP3?FV*Ppyyc6fpx40;KEN>T&`!p1>re2#;A+kQpsav<Iv&`llbB)c z+6Ob>Zj;tEuO`s9pcXLp3bpH7o(H=!R-N^qt6PB&f~D)GFXSiu>Bs6k6^@GE@9K}p zeK;xiRvH6Ua=CY-X0lovGNx8tyuIS}Pkb-1oA2Y;#)zFv*LK)=k`xk*7j9n!ZrM}4 zlIEpq9pwdi3X$6+o>+JRo-hPWtXKCS=5vVA13;AU*YHjI$h!1Jkrlf5S?3Wvi3{h? zkyiTcV-#VIO<Rfze+o41v87o<O<ggouadezA7r~(TPKk0mxm%fdUk2uS+=0=;;~=O zu8vA`;^=T{no=<)Gn0I9B0@9hZC@kcK*SdaA!k24i2ajW`>s;X#oWHP4%DvyBF;e* zFl0FGuD589?p!9a=p=7aap0~ZEq8BIQ>s`vu<N51v~vA)O>cpzZ$N6&0<S1e6s<K3 zM=l9XGMN)aWEUh~14?J&<`1%8W0P<_DGqwzD&RS+4MOgHqhfq`p05Vg_jMcyJ7O^0 zc6~O;9bc|HnepNKb-BX$XY3U6QQwGu+#lQ`GsR#yW4RAlQjzx;;r3V<;Fvol><@~b zTQtu(DLDkMINoIww3q(Ted&S==^uxmG6D6$*Q{MEDIX1#I$DM?c$eQiZ0rWxh;6{y z0ESs`%x8;Yh{i|jb(+xDB_+a+<I2D?-|W6CB+m(+BL>(TM|9S)Y#c2arvgR~&fY^K zI|w*w3Cgy4{VkT~51%i));Vu_>#l!bz;E~AYA$f7lly}k``8Wefp>S0DXib8SAyfQ z7ObNrQliYEGpW&5pumZiFqg9`B}U~dA?umRUXz13uLjWwy`gWD`bE{9xl(&Z(dU)% z87>leWorVt_}AQrT4oJcJK(5Sf~#?9w}vv-FqwRw?UR>d9M}>thOd6qUL5V54H^%X z%WXg`sHDmLlEd<$<-?_eeSlK`sUdbLO@N=B&QHZ`z+%8-u#8x)P=l~6;r8U_%ESUR z&`Qg`9;2(?vbk4lrd4n3b&tw~Ob?svkg0COG8M<ctKZP;)xcq1?hKYXEeMu^GU@n? z$1btB#e$Ahz@$+<O~7_1I%i|Pyb+wef+Q;m&69$em$F8eKW$F!CCR5^rK}7X0OpJq ztpsS0mfk`pQWJwQEr9#z(1S|rqb#=3#Sy|EF9cDh7cWF9QALmdQbQV62zU1qo?~(d zW3eID3&T)NC|Bw1EtL5#J(S8>Y)V&{esAxEm4Tn(FzMD=zZK)py%Okn>b#Fgd~#KB ztPY5I@v}Kfo7&87Bp;~oznuMG0pSmVj6de~dD=`p320wne&KID`ED&Pdy%NYMma|@ zVEJ(9Qs#40=gz+-?gyk*-{LExy78?RUc_L|Ooqax75q^;L`x?!`?qV-oERuedOS(R zbh-H(ETDQlOCDoqTudOE##y<yekq}|h!!7Jj=Hbg?UP4YF#n)>!240WvQ0a8Xq%_q zV-Q8bfn@*oG%0n+jE&q{bvHP9^u!hVmxax1&@=4aDm-eOw2OURRuf{XRwL(>yi82) zgbz?O-L4DrZWA(O^8^tm?hd?}jAQKDLBFH)SqENN%fif`!Gx}H=@>k>$Fx$D(g=kU z>p-TMXgxiG4^y#LPXs8}AB`6`wY2T2S+rPdws+M?3MiXR&NFwTt$StpG-KiP<+)aj zodykD-)a?!J8*M{!Q_OK$4tzSd$RQU&P6*SG(*d!$kyX^hO76dYt6ap?9V<hH%~xS zDg+-sM2=b-r)(f7$a|Z2!s8hyT~PW$f}Q$DK*s|p9C_g&okD(Nfz65_pO~Ce)EGt0 zrs4L#dYb>~N}CZ-a1+N%%`<+5rUvR{gl%V4l+0#+MaangmfRW%&_w09=6y__2A(`S znvL~KO+QrdIs_9Te*+c;b}zF_(}E)0Rj5TU&q=%FBbos?SKAwr8}LBl_TL%wx1B4_ z?+6+>qIF1sFT}ro^Ie+AP}@~L;n+vL4ZA9S3cD-T=2J?imQN@y#R+*f3T8FvQq}yK zcHC7Il@WdDIC+&$2VpgwwyXTW^oBXHjaz+?>QJ?aU%g4pd`1;nDqn`QRp4kvHNz9! zl<ohCUS3s*JMYko69@@!$!cZSMuhTV7q!Ws#JE6N(%XRBRmbA@Dk&_n*J1?+tMg2) z-k%yKR)UKg>dxv`fof4%-w-C;rPW{tyyl$j!*TEvd*!+Ba_0STGycZ`N4%cTW%`?r zp(P8;&Wlp<pEa)Hk^AMq{Bx%#3CjLeSc9et<-jk#eEa>Xt`lkIQ~5(ajp6sRO<X7L z%u&*E62V^6jW218GUqn+quLvH6s})#t9`0#sBuYfzDMhds%<%w9P%NvbLdi<oCQhC zSo>V_TsO>lJyyArzjf-n9F0z2S6|}627Dvv<-Xf$+a2an?V9ZljV7B_m&T$}9>94a zk9ss^^Uq3+=Ol#3-g^o}Km=Upeyb_5vK5R<%F(3btUpe^HAXEzpk6z1iha|hg@-ey z%!xBCSQga!L@I=(7)PYxIF5Mw2#XkTsFb>f2m}3oP^?!g3@Rs*m$%u+dheIy#`k2v z=me7kpR3cm&Px1hmedW(je$WL1^ewTj5hx<Vy>(?WFV!0s3ZE*;Uw=6N6BX*3%Sy9 z_n9b%j|&6miC-aVNL{<HWBBK`?!DmE=1$Q~Zu%k&$JRvQC+WgPEe-s%|3O1I%uN+Y zdX4|>W@H-{|K~;D>$cbFWE0G|&jQop0OqlU6>=Mxk8JQ%#Snh?VA(Db#d9eF`Urgq zMfGlM=hlHc(z0vc$^AR-^%zc(yCQrDWZ&fgm3GG_W>Bv;FjZ*!%i-ZZhv0cTB(4$# zqCjt%0vCbcA8Y4J^ObWjcgHkZy@ih+>~Fqb&}~nvguw$HbJX~KS;2rc&-9L58uZ;c zQ-~_w;{pr8_q}hjalEJ6v3GvvG9x7%?hv*VWIIip2sW$1zXZzzPWc?VQKnzl*O3$C zn{u6rT!U5NEeVT08et!O`?^CQ9NdWriehIkzezF(dkVaQlZFU8Y^(dl&o`$s2nAdI zaN-Zn%;&|vfD!hLzoLX5aD}*A{YC}Z&4H>6dZz?^WF)zycp2!7ASrW&mAOVQ>`juQ z(a(}L>m_;j0f9LoZBfX&_PZTv+{z;^GKWZu$kPJE&n0+ICM(u__vi}hjSDblQy(d1 zz6J0-6ROVCnb5dmd6pAyua%)i=ZMZNgDuZ52S;XzO`$LEDDU~TXpOS=+8FMNa6*3q z_u>>hJZt#0#krMYt8FOHGQ)1q;%|m!l41E=(X{5(pJx8Uyr%Bb32}tnoZv|G$aR@= z8DN^`RcJ|={YZ>dv;;K57!pFWi$6$R<-#U4MTTH&F^dOE9k!XO#rJzF4C-EGIQHbJ zhbv@_JP%6XbpXFB)dRhzGqO0G|5za{JyC4+x{<~$VYt1EGQjRlUkcZhs$-{(65#-} z1X_`ZT6n0GhgyVB2}CVVdQsvzHM+Yr_eKXDbdJ!a<Hj3D1OVn<&@t<v8f-Q9&4mM7 z%HK2}{C>(tsEZIyaD1|J3fQ?7ln>q#hZI7tFFx-%L0#Aj8hKr74eo@}Z=aBcTz}{` z5#~{*RO=HDy=0rH?ENwbXYDQKlVk*Aq!z_;j-c)~553GT`HKCG7CZj!Nep!uCg2z5 zmJg+0jXx{vxi||Eo3K#|?%pP(ujj|I0|u%YlMh9Z4=w-Mr?Xw!r;{B_ALKhlAsua& z-+6FF$d-TqYs{y{mej{G;kg{!d1TA%<=Mm5slkzT-w%YB<$1$ip-+^u1zZN7`&xf5 zpJ0EV>Un-c5tvo)Sp$%Bce2Hg{@uKuUhlD76k8-FU2{*!Cl)9FBZWum7Vx|UhyLS7 zkLBkMk>nX2taJDi%&p*-JlM_y!`9qSHOhVmg!@<wdy75<-14}CFrBL)QGlzBNGCUT z9Ku!jo^OE?<+iK3Ch-Q{2KhczhOjKqh@b-u`Wz{?Xm%+QnNkr>8Ux<h!BPD0u2PIz z(@Ua64B(nB*wizoPKB}!cTp2DV&b{^`3FyyR4o~pt#z9`GD_UTROIcTM?7h@bD1YC zZKG5O(0ne-&s02GWW+M^Hw&2fv^8umF0TwPY?&9;H;vU}otw$em%ZMtVz~G4CD!xn zU+f2S!arJ?8<=kWUlwNqZG_uEj1br+v$&O>PeK$Az$6QQ;9I7jSy<uXe8WxWZg_Vm zA}$<>`yo4HOwKw5D_3ffiCz8;*CPFiWeUa8S)_CqC{xo@Vk$5)5(*6x2N`a>MjRJg zhri<@&AjT&=hxXy?nz~q8~YD_Yn#m(*5$dwx;rh`f4q!Qj8o0$9M$Eq)cySVB+c`j zq(22!Y&2E2iAbz6L5!_vw6EY_1je0bKOuQ2V)FiXMBZPP=pRHL3kwtb|044KYLEU_ z{~w)B92{R@+`kif9RKcgV)-jtu>qJnL;pXWPE1^1PAAqc$q*AOD?8yA62<YKOrE@| ztf-1QjijlKo2iSXG2p+qI{gdAWBTg~`d>623)6qncz;nU<^O=I{^_&&FO$=k`R1<~ z>94uyABLp=`u_Lxzf4a5Rrl9$^tbMB`Cqo9|1dfI(|+{t<=;4e+x|25KP*oFwvPYS z<n(WC{%+yl<^RV0ADNuK+Ws{;{axlCwx_T1pMCge`LF(b#r#Kq|38|X{>J$`CV%_* z75o1lkAL>}tNdr*|GB>ZK?(goX!Eb}{BPR+|1rMy%fF4ye~ic9ee(DHFH-Ugk^SEt z`2Pn|^8bCX{wE>{&CK-=kJ`UEpZ*?u|2-o4^<4IU?0oun{oH>ck}O}F^pEiWLL{00 zR{b9l$yhHKAJnC#r#5#pH?s$>*GW5efO~@{5D_V&exM|TBry@l?+n^{6Hr<xX!76r zV6ddrU?9S%ih`_V_j)OEikb!Jjr{A>&AWw72KjzZPc|y01y5@AU$8k2{)fQ2fxFYi z?c|1c&STaX5HJv5BM|6KQYssq39(irNsrjmebV}}hrM^B4I9uZWG}t`FU5{}z*{x} zsKfU<{{lC$h59-?3_hT>??6f4e}!77eViv2ssJ;0dVHpH<IY~1KN$Bwzn-|#ELZC6 zBP!SWQ#@!?mKrFtyw7ed*M5>cj8s)>E%*B5xdNL|KYH6&Xkk^iK2`|;4Ad{|OAWfh z&P>407>4}R2Fe|R?@G#Y0R+n!<~x|cq4=T1aBs?<5VV+>&nE<8h5_L4*Pe)iV06b< z7oYp)KzmEgP^7d|hR;&6r-HUPrRN&vxCuqS<Q??Fb7OEXz%hiGA4ywRWV6DbFuoa3 zyV`iZ$(ZVwog6%Ew`ueWf5M#V*A9LLiN4>AyheOR6Ep;AC2A5}zKsz)L&pv8T>-HZ zJZabvK3_SnfVN%H%7%tIP@ILS9|y_bAXN%>d4<!4vt1GLiYX_2gho88zIkH8$wg2R z);#u$FoK)?elqqrLV4=tLyY#1V%tLT|ArsKG1x*ft#a*71b*08<$%!@f$Ku_y&M2( z<c^Gzj2l0wAjobrnCR=k`=QBOezOT7v%~2#K+2RcsY7|L+zRz)hrQ?!Soai*6M|vn zGjQI0XR89hks;3YV)cg8sS%=@9`fY;+iYVDi4&S)%xqpNoSE;Mt<_oVMI95K#1Lx( zG`^o8B6&l~Es<kPVxM;^ugAok@Ms=DIw{1z0a3AIS7gs{7Cu4*vgrUl-DiFUUPf4x z+_DmTjkC+L8|_Fjt`aT`zr}%GRu$^D0*^ovuF$I`EQviv1~UeI19L<B{B?5<xzK-R z_`sZ7FW)TiAh&^c8PkLFiRqeCps#Z+-OmRrKD{rD&+mwf+83(`-;fV&`juAH59Y#- zZM*=>b2%u*CNO(8P}dieTPaFUa&;zAVcZqBM&AQTKYZbe;t66`ln~uWq8&zxDkFYb zxISt_K64bkK6N9yb43C^vCyMSSUyo9p@FwaQ6O8=pu|7_b4pT{U1gNk@S{(A1?x=c zLFA))C2PgFu4S*P)jM<m-9ZF6M&pqO=di+CKZv(%fUhQ?snp!I7Oj~mehq+e@oe6@ zoIBur#j9V_$8#mIe`SzQ(3TdutjG*egS>I-!W=wFcI(j7Ld?LC{Jt-hvTk_;d82WI zr!Sx=EcT~3xHR1BgpoTVZPtl9n?5hv0#r}#vcUT!xG_4X1mmP7Ceo4Fa?(NE%x<tE ztS!}Y=oC%e==@BXeR69+d<B7PC#r2#H-KkSZ`Oj%D^`m|nnj{P=C)}^qUyMR^K*r1 zQy4xue24hvrRIv-7wg^`Q@If+t)I_A>0K7DlGJ;2ZN-gzdhkTaBYaGxBhLnHpV52F zd^D6x7J9(>b70W{!-Dz9=%Jn<;Vb{Mx}aYcDF^T<3N%Fw115)?>c=ROXWM{tm?;7t zGbarT$!6~1trx`4;?`unY0`fXm}JRwdCQh!fy)lGB2ch2*WbfB6DJizrtTE7GYjwD z**F!QHw7tohw|qM_C!00(57CEWwDsPzx$^<fv9vxNs-zUp&r8Crag5fx<|clQED+( zK(9Zdzk+T_=C0*bH?PVz8TZD`gu8plWVsG)eS`9pA^zF2-1I#p>e_RY`yBN%rIy_s z)5Y3#Wf4H5{Kx*7{3lA}THa5xy$&TJxYmkMR)0sSqDg~F8=`)blttQD)yZ;dDz{gk zz$^d~vuUKiBiLIqNwEzYcyZ~3D60x_XM2o$n=ri1qZ<OxYBvdW->zb_Uzxw<RDbz5 z6%@r^jOG${cNh|Oy8hD{EVR-&={jggC8AJYX-MD~cIh@v>4t`YP1z<=%9~H;Hf6md zo!c}{np;{tN%R`Zt8?$EObG)Jrm)ocJI2F19=@MlQJYLwD^vA`Bb(FPDPyUO$|B-E ze5WMI*Xcx>6IeA^_LKP*n!14>xGLA3vu}BG{iKzyzH6JSbMPn@wecY+=P2C*w`R_P zN0r=VDv&dV^@}b8CF_Dc69U_7;6C<hiL2>n0S(>8J(@a-sl*4fYGQ<YB!;{4LHult zqcjpe<hjf+5%vcN7*{*)GltdoVVa;FX40OPcO5~otH<~A5fU~~l1$hW9mUM3c3m~D zte*Ke6LX0$cs170+@OM?Y&q4}Lg%nee5ywd$t_8)dRXf6tK!}#U6Zz%5=l>Q)v%$J z$i~TX>ASAll}%75J2ygMk@3kf8kPsoaoLH~jEa`<x6<jE3P3C$v5EdWixj-I)aKJS z$G0CR4o3J>C4=izPFUw?Y8h<j>zf(*4|u?+a#Wc{-ri-~I&1h=HIc(Q=&xN#JzX80 zon85)E}=yhZ~JcUqFYF%B%_OxenB`$rzzCKa-fmut}KC{Z=PLGo%48s5va-pTKpph z{MJILEk%5+gqMMdj*&U1uI6#bU2p?Wg+Ek`IPo{?<0xo><B+rvix9T^1V*R)DIT+t zJfF>SxdzN1;7EQL&m{EK#dAFYi#O;=4?ZEHtIW1ACy;cUD9<Vv$7O}U2zCUp?&M46 z$LMveX7Gu$x0W+$a(49~M!j5R6yBaA@ZC@IjPn-{_81;IP%t^U^fPl=cJ5f1ah7RW z*EZiQB*9fF9=7}DzJ#3kvJ_nwKmJBZF_*PWn%ixKZLoktNuEZ8;EApxjiQSt4DBl& z=`VH+xlSJ<#__jG+_XxieI`l9HH&;%C{=UXkH<AmV}7^Hq_Xe!vKVXU@N+#*hHTly z-fFi!m}h_Cw<LAPKaO1*zKGt8Wg6*Q5!v>UKu<O`@GlXcnjt~DZ}q2BC0HFsd12-X zK}rbYg~jz$6YrS;o{QEl|6Q;@hF=g$NtHwfO&UL16j*x+uu(2PC37vpQe{T*N$RFy z)q&0{tu3wXQ4sc$nmyvcffDc{#`>Xf`nXS!+5Ry!iP(`}yZWkoQX?|1@1&Hq;K*Rt zsv(a~%52~q4G0G4%-iKSf2$^yANaE|?5C0vVj5h%+8@{vuh=zf6`v$m4_f>a+gomg zdqeLc;s;b4&IX;`Fy7BK3=6GsH7u}T3^guYQ6bcaujhgHxhy(K(5iu$RqRR?P0#@1 zHJ4GRq|G&GA>_&0S7th6iln(hRQmM>@}##!4^CzL6F{-SN6hMmZ~4K6;yK@Ik&1$Z z!Itx%oRewuTs`@f3-?du1&7P->|c)pPalbRbx+~8(GrB&L)SAI(nZeSe5gz4n`(Yq zpN&xJNYk7skISNTCUt9}HYDM_ODP%gTG`4UN($V(m+JPinq%FKxU{<@Xe<28k>bn6 zRxp(-WBIZ-l^5TOzLeduoWxLcac37ROt{^V<B@bJvfoi(Q=e2|mpr2FvqQWp3bq{y z{uT^^0dIRB%-R{Wa4(+ghqlBThK<&NMqyV$ArC@;35M)AL{(Lz4~7E0#26Sf$1Xw$ z9)VE?xcye+6FY25GMwc`Z*q+p=k~Li7nB(=02G7~h$g0fG*0gF4H&>)uC|*Ti#pGb zS5=tJ=+K<Yd)mIf_f%yuxz0WH2srMDIJL02Jz(hQz^~!w>tI{`xZFDPJ=q_Lr{iOs zY`zk_uQB#OFU!C))3ZqKDqVo^5JhjQ6j#vd>O&+0um1tCg&p{#6~=J~mEuHO0^Su^ z-wVTljKAkVN5x$L874j^#bs~@M70vi<bjKcKy86U4s%}%6+A7^30nY_(r0y+)!X_3 zJ6vrj%3si+iK^>Trzi~9p=m+INI{!Vi1E5&xff>M!q!ua9lFYqx&Pz#+>hn!LCMcg z<SEU^+uX}YTe78i76}FgZ{7!%@%gUzmVzzHZt_9Cz~`}!dOaxdb>;NBU&`DB_hI)i z=oM_z<l}aQV_}&#tXfSFBo(&J>zigWxh6(qm#Adi!|W4w!3V6nTa*-7U1le&+kYI@ zc}}2?sE)8G_OcFzP&yQT;dsTCf55iO`%mYCuvPb~1L|e|2tZC3n27l&(35t|C@UIL zS)hcOJ5M1e9f`_0aVK#pxm=YpjA?$8T4S7RU11C;r-+II`ZG$H<S)lo$=?x=l`Jry zq!>empv69=*KP0qp3-EwzC7>Av|XgL%;#}9DFZ1$OgA5ATf_T&)o`GKdq)s%)WQ%m z^!cQ8mMu`~r5N#+S@V)i%Sq;)#v+%_jAx=*Q#q4P9D(oaEFQKb=2s$_NNgWdpKa~t zm}6EUB<o0cGBU3%w-8n&Ds=)ASKlY_u!r%g3K>4#DNu@ypj@*yJ-X-;tU_{ybtoIF z0vo)Xgiao8Lj{oJD5ApMnKdF+z*Iihe2T?d=5Zgg$g{Cqn46@nqL=xBMAi`Rkiost ze}+FB1x5#ptRFcusVpl!Kp7U2#f;1gH#t7HL&!d&X2+cDwjM5<zQAn^Jtn~DzYN*J zkH-JfQe&{?*7^AaDns33tB1QbG|j|*nCjLv(hW}+dH)G`d{#4|UKPzDpD^fr102yv z@F3v7gnFsVN_3R(=L~Li3I6(%7mutyaOzqrIf5A`&tjqdO!?a&fcYIwy$rs@p?&c? zCls{AzLv!68Mfh@anjhzNz6mr%44&y?0W^=nLzI5wWYt?nPG~Lm_B!DcXme1jZVOg z42XdCs;TFPl@P)O?ks{+`wwR)+HNzr?@KmJuvTo5`zc>}FBpc!)Ol@@hw`h!<tSv$ zWn)HEZ3~JmNwTVX6Ot#rC$$bi2AE6EVUxG47$!QX7lU+9jB2klgH^rDeb82sQyhRP zL#g+ulXzBmS!J24PACoi6w`|wrx{66%+ui&q!AWSH}e=-CK*{LVH4~7695DKBv1W= zGe*A)E*KMZCE)R&63U0-=+`6gg7xq7K<)0z<@3}NetW#cwCyrMrCDHd8g=O}E;mV; z?Hn#Y@?V*$86K>C`u)lHw99O>nyyr{rlD4kZ)NDTIeMSye`LPs^FGaedk<IGuJ@LF z7|X8FYrlKAKrxw)N{f}TzO-w56ItKW`?|RM+Yk<GF&awrq%Ne*vmJY1lvqe$wJQr< z9nZ$;=9saB<3kjMP<8dPcEAQ1ao3OW+e-GQ$1Gzh?P6YN$bN&P>^lI4!A{MrcCXJY zlBFNWOsrOb0X-GnW*SXGCfA4R@DmZoWQFJTSGL7Jc)NZZ4Xb7$R{B9!E<R{xrA9(r z;4l?YX^a-9Wk*<k9fSW96Tz%g6+kbA`#`M#%CnG@{)^T9sZXmz;HgFeTffV6sV1{o z%<?(zN5$iXZoLOh28k~g`JEzn*wFsuk>~~r56-K^*<)P}C<F5zzTbz5#s~v^S<R#& z9<>`?jvVd=DGror1`woh#JM3A4%y|zaYQ<zQtR;$XNnxFiH#GoDxrCN@~co`sRd${ z)E^k~AWX8!A;IPD>ST3?uFto_)j{nkptHx;w8*mrf2ulsz4E%Cs$Xh2JS*~gCD(ip zDB1D@Mo#v;Q#o_lOjfd$;n%g3tRflLjTem{C<7GYaEhSuoMA>@BrIwwl{9nFx2|e0 zw}-762cA5x*GXK-@MaLVTJ20OxCYWCH>sUOl@%=2g)OJQu(-r)*jDbx!ib%lJiq=k zzhrjLR7h9IxPP3reM8^0JT$&Qi8VuTR&0=1&c9^kHCb%+_#xjmWUJddvjiR^%*rp9 zrWL$p&5DVwWo4{YzHo}8g|E}8G{Q`tqnzBAw`>h=#dz(djwr${rBk)}^L!Eims3Zh zC=Qv7N%Ek2L&u8z1LMTG5mZIYb4)SVOc}<^_3bj|HaG_5a$uI~3VoT4#?wVVp+=;0 z_IO$NQJYh{j0YN;5osCj1uee6CeM;Md(Le|3Qus~u=qFjYGJL!A|~VOmB$~Ex#>S< zk{3a|*{}Ut1s;y8SB!Np5ez;7zkm;K1Y}mX-xxFbUV}N1mKzIr2_!tZ9jpn)o`>4a z>0HwD7Bb|DtqP~jSpSI5Svr%0pSk^*I=+UW%-?ZHmlYQp1f9BTWs1r)RInUhP_@8j zrNc&N)sL-&vecAI$x1!naV5oc1jhWChX&gCT3xOy01@|R?z?xY#r@exhVXah%6VJS z_Av*{oCsiuHV7tRCx{=vk(Jq#gkaI_w%U-um{M<TNTYg4pc=@8RQD3g{rB*s;z%oU zvE&;fqo2gd`|S;Angmwc&Q?cCp6qdNUt8LPmI9+FGq!7spQ_VV_;fGNP;hcRI83h+ z+M4RbnRiFCSKqBO-k7Y5m+gh6?V|jrqc&a>M8d{m6J$gSEq<9W6PaQ4Y_x7P@w#fo z6$e4J9v)FL-1)fBviy>0z)bpm+hseu3JbA$%YBo6fd#r*MN;89Xg+Gl=^kg7U9GoV z=B@3nTQYYn?Mm<mGd%9tW$mnJLY|AuHV6uo=1)wLsl+BuC9E!ax)?!SiJan}LiBYo zDCbg2E~4#T+fnouDV=(Y<VN=B-96Vz#!*ks98p;*(i1`TWYSS#Nlq$V#93>~?8lU1 zQr2&(5lIX6Zi>k+0k~;sbxL@!BVY4>f0EX`V=P4olo4X8^4YmPa5X85U}P!Cf(P3U z|1Q51-w&K0Ix9gcWGTm|9gzCvP>wGWEQu_TRr<jsm6%M-G!))1hj4tvS8sAO0A8WM zyP$OaT+`gCH*>LkxwX;kyjWVur($p2`p(fYJxHGzUsT+@x_(NrB#X=8v^pT;#ou)h zOE;`{E|=dMfkYxv6wlLaxmSiJntNqjb<MEiv*=CJIEIBxgGDw0Q*3M|&+IuV=0+|e zm@Vd}Vi_zh8e2jXV~b;2QMzD#Pb5W3C|#h@8IT`XFu{Jp!@G*M*;Se{j#NWk?{!DY zg3i8>Zh2VA2G%7Br-@kx0Skf-IueatTX4RXq7u}el7`)xnX#$o%<uVf>6?vm2`A8y zd)l?}E_C)rLK?cr#KQ2{xr)Log$w1yC}W87Ja)NnGb`hzosV$Ba~FPZey5Amkh%?t zU^L$!;X|b|{2BM&ARfG>BT{$`UXOu-%7CaW$Ax3YpsiGi&iRATp&(!tHb1?XAmLtH z3kIYHNi)pKTMCDM?E&Nn4SvEXr8A7~r8NfoG1KNQtd#{fwH5k;bi`p~>@?4rip$5z zzrH~M>KaAOfVh#N*Q19~3)?V#_XJfU{Gqy)tKg|soM4OseyIcLW@P0`q=;}cJ4s#| zKNe1`=V30}I`hIX{H;Ot-h6M*fEi{rG6BL9a}#dW-goUz^_QaxYc$Cpf&KK(L=#G$ z96jqz8&ef<Mg-8RA~ni(U}niU+Mt}$vjvGUxEs0po_>Q%fir>MM{j-<#yvB}6HREV z(<_?mV$MI4A72^=sIju`yOw#J6mtw_S|PFgFi}<C^eDuCv_>Mb_If*@vm*W^l)+y2 z@*YW8gN9?uZz#&#Qw=~mhC)N2ye~xdB}XGiE3NA(7t>G)G&HEaHX>M4CC;g9-4Gv3 zp=yJ@m8@S+x5<&HPbCy}^@yFtoK&=d9B7L)IZG#IrasjU<BcdXV$I#3F=6dYET8+v zE&Q_5SpBR+&~=hSH#0+~WvsD8@4R*7$yI;8gJ?Y-NP1dXz0*h(eK={8dYQw&S9cEM zuCAH95MF~zKBIH`Hq^J~`)JVW^Eg<yKNGA{M07x6*Y5ey+)*=55^+;&RP-3YYo_ui zd7cUC|6=T%qa=yeY+tr*blJ9TyQ<5!ZJS-T-DTUhZFkwOSAFl?nfoy7&03lJM0^n$ z8OlF0&hPB~tJr$NCj^slrB~FdJ(?8xPx->FUCv$;ttO>L)}&3qgHfY54@NkHq&83S z!@GUMD9S0<!N{WYsbs3!%62M!lT&C06Iotu+L6DKsn}6ilRJSq^<7V%>m-p4U?i2k z_MQriL|gK)`b38u1J9n45+N*4E8X4X<}lw|{@baX#Mi}MwpMl+4RMa8*oNEsH`?zr z$27es@B@kJW^ih-Yn00&@xB?I&Yj!7sY?D+JA$>WLk?!-h2SW>K`<%|B^7!hC@~;p z{<<=mkvyCra>4#5o>s3*tu(k4aTIZk64UF!RX_!n%AcM)`3G7zel*a<1u^@g@4xvl zAr@3FS6h>x4+~}Z=T=AXzRF%eRnrHASGYBzvW^gGeHi}s<5<x9H4Wx&lFS}~z5b*E zxSHNGW?>1aW_wPJMB>DYs~Lol%e3cwWZDCRNLO16^R1<F?j;35{dGo-wVBb4)W?;X z-W)X(jpFL=pm}H5jfTpMl)LMbRIJC=y7C@k8n4;CcSD<X@snOP<qlSQ$CsarPUCB+ z$aoy9UkQEfv!!hM8|`pRjhw4(qas7!ravKUDiNGgG-hF<3Q*oO<dVD6<b#dNxk3^N z#Og<g1*&}TcCL3}fd=Cf1*#ZM>N_UK@x%r>t~5hp?X{%J1UE?cZ4;*V=$q5t5AM^u zU+&lbuWg<13v}B;zsVg<&&OAuYUK=d^WoIDyx-Wmy^k++;jgojceW<4Q?1I8m&2K5 zmQFD8k-tdq6DD(s4(m#xdZJ!j6aA=Jcgmz9K?2Aw>m5SU^2mYfDMPuVW`!_VM0bc^ z5%FfSob&=!4h>t;gitAIAAbt6y}rIF^W{m7<gwLe&Kl7xL=8HRbMTO9|C*m!TB>l6 z+*85N8zcAKIx(LYi@{Ce5|Y$s@?DZ={T!_vMYdSJ&k=nU)xR7f@(z@!;}#>U#k3hs zt|h%-o5OC3J;|BCU+v1(P3x|~OqheXF~;sNM}{HtCh1~vV%oGZbG<Br$An|Ym-fu$ z-S_f;<lJWLNP9fm;lt_@f79($`g7+|P$-R8vAjlqz2xaVfpEcYUFOpDsCof*fxR}r z7O8n-a$dP?GtXOiv1|c^5n`!rHRnwOw-=UBa>!662`zW2l=%EZE3HUMQ#YCk_p}j7 z&hP2=;T<zO$t~?_kLX?EljoD#*B{+Y<O`Rgu^6t-2N?&??ac)b*VPJw^ipyn(BI{2 zP{mvAeag>N6jCpY7hykcS29rutGd&7Y^;N~Z?pqxhrF7c#pkN@4c1aH?~o+{J( z4wrnrXGmH9qm!9JjOSe~YH&3-rsuDXxVf6Tw#tz!X?&iGsWhz|ip~$#W5;=DvclC6 z8S+-c$F3BVEXQFJ22d+f;&_eQ->p?Y)G*advQDAw04;HwH`-z>>zYm<9pTR&HRw*e z*wWUW)}`@Jcv*c6_5`bFmm!xy(V=NqMIvEcF@0?Ha6(=};D8MgL1qas8<1>gu%y?V z^YxHL{8I{@O(G|)nsVpqN5KRvlZk(;F7$ZA%}HH?fwBDjL$5)sL*HYOPu{qxrxW`U z(;#8sBB71MB7qk!sh~*61+(X|TKHgw-5?Epq{3|fgGy;NfzYT$Pp^jno6?R?JEVT) z@NU5-FmLcWTY!j?y@7vKqItez06j7wQl3;YC?Y;Y>Vq#*lWV%H3>D?Umo^J~g!wfw ztly2K87VP@pp&$t_yDt}91%1;e?swRuQ~}pEv0=JOAf}Y$btmXdn?<@^ZN>Pme{<a z_U9H)EjnqE8aiC`BJZ~~3@h&#+~s!n>m`?MN%QCR($!TK#M##@dCysi6n+&est>e{ zz{l6SrD5!rZzGyPJ$=6qetTPmF1G&UHf9E3t~+HD&r<Q~gKw9R!>=cO3?KsT0d7Dg zglw@95pvNQxG2;g(qgIHGrGf@fRpD^Gh-n;#r~Q-H#bmmjWs})prZ3XD@*#wLD;HN zLev<z!F1zA7Q~mC(27%D(s{;c4lb7~kAJ;f1&Q>P>@ltNu5n&qw>fR%ef;PbcV4z} zGJ{FX124cJIqZXVlcYiW?($QhXoQ(5R_Nj?Pi3~;T9`YRs7PL}OjW42Q2D?;Q&S!5 zO@E;4`r%M<^)hkx#Od;oL0<Ce9s50A=t@<z9|Hp%wB@bL4tXMAl^*`6Ur<AhFTv~9 zAlUS<7fO!K<bn{;gWEPVUs44(of$;r5ReYH>Rl<ES&*zxKL{DZ6k&>J0C7ghkb+Q| z$Vza9#ji}k)C6v0d<KQ%Y7Rqgi{wRduqZ@|CWVvMS)0>cBF--NbIvnGy@gnNtiLr` zDwA9rN;@BviEJ|xQyp9o{X4yYxK9HF_EK6Q1wm%$#VEK@9qoJD3xBsBJQHpQCIA4R z402lvO^F}~K?8vcp$&l;^n@_=<0t=2{!!a?8ST%}U>MaSxgYYSV(!-e{1C=2hyn;X zvPiDv8;SyoFR6xqJ(Zmi=&c24IX`W376c*)7`Hkl(ISNQSqD0ND>n#c9+1CCIuef; zRx)7_SP+^;GEZe_#^}YKtX*Yj*65{yT#Xf?YifbWxqdR%cYqjYQ39F!fG|)BSsf^@ z0Xmp5+2GSaed^3je)!XMs}~~Quj-+K0J@U{_&c&^B{#Fo<9+UF2M)X#h3tfLM4712 zQRCDA*&}+#0FJ>-{V68<5M6W;@Rp#;b(y7yc;`tYZNJKO{-wrv=cIxC^wWI2V`(zb zl%N<*7nT-w82wBU+11sZ17SNvJEXj|u5sB4>vk)Y^MuY3+I5WyCWe<d>q-chfcX0B z3Ud1=yKLP~;f))|u@uvT=<H(*xx|i}cSr91(ujmKv0ISFWY`ryMG~nxbr258E>FOL zo{;dc_xfy7eAO%-8Al$v_>aOcJbs0V6LZPl*$j{LAw!j=d16CnL%I2!T1u622~tK# z(A*u_Ac8MgcgSVBp8jcY#wL)}8Rj|1uCk@Or(%z3wO(=_yM;R!rR?KqRjWrg4`d>- zbenr8wlbnntyr#R`+^uwyx~STu!4zOA~GC!(1HP1VV85*GIp(ud?Sz8<Lm*ZOUe5s zaI)|*UOjDttC|oL`vMQ{Xt!!nv|%o+0NrSZ1&Wbj?(u+XBd+9~i31`w&Y7Br4f4Vr zeTdQrz?B{v63z`@ce&gprcr-)eG=qAzWovQ(g9)z?k47$;<-ry5^t!-9f-)^Yld1l z=s<xy-q<l@UvyPh&syvDA37fVOB`D;@2huSdR3-!MQvA`NeW;w+^1%;PHHyt6{wPO zE^f(SAs%R|Lqi;rX^KsW8K!8H2E&mDt|wZY;+<Z=+c#QW`;c9RMB9i*To%tUFJ8mk zyxQW(9eo6ZBI0P70YWv9-)25h0Xj2K0e<f7er3e~#0JJ=<}aC}h1?Dfkco7UqFovL zu}Bw{pomj^X~NWp^1QBfOpLVcI7wbEMpiEFLDem-<r#5V{t%nxK3pgyv_;*kzr9$U za3Cbsy{fg!_A{<vL3-jpe0`@XXcTp3op4&&-pH?B(x0f0?JK?D;GD^YaCPr+&*GR^ zZuws_%SV{X{71x#5anPK#m<i1&{c8CwtrnoM#TP-l0+@#l=b)0Wz@nfk;j2Z6bqLf z3`A%Ra+@hs4jYaRiVFZIv@rs*^#Gy^dm_^Ltw@OXdX^i3zCfHU22bv``MZ&1aI>+I zGRB9op`88q>nJ508)a?tkB;URE)3<`1YQ!6v#E##UJ+{Y*eHb@WsPule@W1iw{^*Z zld5-BcS)nQ##7j{dwWJ~%Ee&HQ7hEL0hYuj5GW;I(BLvLlsUgdZ>Ey+k_9!*x=xIz zA+Nl9iv=AyrP7ANDP1JW&nL6S-mmY`rke44fYQ&9y}EBH^W9&;WcV8QmyfiEz_oGR z#8C(lR^6RbnC2Si4=%R8t~=wk6Ek}x&XU^5F&%&4(Z!nHB0D!D+k6b#q|$T_=q^L5 z5?WO@wH;boOyfIB2XvKi*z4S+K0fh(H(s1+K4|&u>1!p1E>c<2Y<(fmza^axj%mIn zH9me9lF=(H;}Z@eI#raNq3Jp3jrH?+1)Vh;vg6j#Q_thDNXv_vdyh9xE{*-ULEFyx zg|BjO7zoIfIDR>hQYcZEDv2Jp_xMvB4?*&}(z#pcN%rY8t=$6b>(8DoJ~x|8CV5;W zZRamW*3~C5&MF>_tg%%1zL!W>Yh*>&aNe{)(3C|gdP`^vZ{bC_eGDQ-GwNYrh~J@C zgFXAk@997N--zNrS)RWvA_x7q*SxWf(cfvNZ+rOv7pU|<$qe{Li~g==Y~%Dd6M*r* zB^1Bi?*A(@;2Wa)-!cQ_1r$`oMgKiB;D10Uvj3$Z|J%5LzaZ1U1By)a|FW(BAH)Ux z_b~zgu+jf(vH#ab|Nr;?mwo<!)BYna;J;b+pLYF!m;e2qf5ip-`}+SL7x0hRfq#n* z`1kexU(<iL`5*Ov?8m>&|D(>p{tY+&ALrp;$NQi4???q^hHpTTk^TQVmcRA?0^$C3 z?*DfJ_WuLI{lD&t{{w~l=bihFzWpOX!ja&gSNXq3;TRadIaU!HLt7(r8&d*08GS=V zTWfure^sFVD=pwVbnoAO6aR_A{YMzVf1+?4Y;6C7w16TnNH-;wwQG)Jn^v}Une<Mn zgkqVs;vy|cE%JM`HK;^E4eMD+4JZ_4O*8@1puG?j^q;*+XC;0o;^{w%Av9fahY^R3 z7clv_nAl4V!G{Edi$=e)GZOu;GOj$czrJieraYUTsIT0c9c>i90|Pi0qA?1oTP+mG zKK+#WM{Nn-Jw{pnaFgY9q0hOY3-+}bXN_0i+Sp(6i{B9nZbTib>9n6$JNi(Ijl>1Z z`?!g`<rnc?S8F0*fj^j?Ly^;WmwN)|-EfYW{%*0@95R(X+Ya$UJaE-+^%%`pGZi-F z|D+k2Zn2mhlKqsLhlfz%Ah>Gvm{qxmt@1s<9GT}ZcCFHBk7*75Kt5O}@O3^;f1JX) zQUtplMo3T-!W4DlaT4M7h=24)BbGvf_wONKP20~!^#-}`=D{Ksq$a!oHFo0hFya^W ztJ5?PJo>5)MKKak^<#x_fq***S_xKEz>*ZhjC1Li?j%Qkkw?>}kiMXoRO;7B9D2>e zjljo;t&MrM*Ayds0Yl5mh8QIa$QI}x=2!`Qy^*8e(l{R&G^4D11y_FqWDd9te3=k= zGGvM}5xSdEo)F9+XoKZL=_dVTN%y-HjUCPzW*c}C`p7zgbwV?_&Y>1PGy`kM30<3A zo=7Pr`6Q4xrII({PLH-WLwv>H-R{S^6nM%<zoFYV)mMgY1AMIzWGN;6ba=-Z6KbyI zYLOc_p3y!5ofu9T;PL}YD-8wN#~<$I_gM(|1m*#DgxV8uftoQ!ZNt3ilRbhJ-we&> zXQ(9lJz;c7f?-aX9$O`fEf2pI>k~cM68;v%h59A(4svb{HRjLZoLg;=X+tbm*Y-T{ zY<j}T?6(#0FgH!$iP@ggPOXaTOZ*9SK6|I$Cw4vl6Vi2fB>CC!##F!LqZEUj5qL-= z%0`&3pm~;W#?{Etizz+esAr^l-|4*9C--@HK!P=1W&+#{rm_juq#f@<RDUJD{{++t z#q188IjCy@Ei<BIckr3^9d|aq=z!;l@QC1!FmkoTZ;N;3i@O1iCmhZnrz>)+ALm{- zDC1hP1M6xac83s&bUOiG9oCcRjft!8#Zag=XP;?r8`!jU#>okH4W{7(ejyeve>*>R z#`PrU1SZ)YRL38`U!Oauo8-qRF_Q#YYYw-B&)|gKrGbvg_=?0i<)ii^(<PcsdyRNl z);e-K*<;1Z+s*O*74Q{Ct}kCXc!UjLl^arL5U<}BpGZytpVFuQ=kPx0GZcm~O&98R zq;Ih9@YTMpUXu8SQNm3jny9wS5<r#>!Bd>4{{7RAW0aRDFPSgVHWSXT(J14#)ZtWm zWaAhHUt{B?=@|XU)wFXURyZ!DKJC8A-A8*l$7;KhXG$N6?NMKn4T6ormr3!h5#J$s zD-mj(Kj2%70A$+WiEb!7a9+L$k2}l!v)g@9p)=+ypy-c#02A#<%llv(VRVBj`z~I< zTM<`7?bkxj_V3h{089KJsh)8QaU4d7=45Q~zYkm-5O_mJ*AEPDAu&VJ)-mysjUv*@ zCV-9@k1W*tIZcoqk>ww0P5AEc?f}0%k0UTcwCn3J&pjSF9|2wHJmb7`GN!aabB0?} zS_Ce=FL7LB$T}kB_Mu;r-g!T#f#{-8(}S%0Li_kwve;pddnb6%DKYjyFeeD!!h<)X zLh}f4>vJ<M1qD_@IS5d{gcqe@9tZjV=-a6787YyIR@dnF=!WX@8$UZYq>;g#NV!0W zT;q=-yiun4@%CA-j9wa79r?&~oS4sua*ZI~0%ZrmB~UWva8D4tlWc`{f%t5PT@F@U zYu+>CeIu<8EjYHK_(<4H;M+mvd$Gu9k#3Z{!Fph3(CsMNH{BLGK~%H&v)}Gz8a!`O zGja+}eCH=jC(!RGuaJ&6wgT-8xzT9;$*r)@A11<$I#eU8CUIh9W^q(6Pcs5AnmXji zWOd7VN+)9XEUp;r7D#4u$hB~f3XgO*w{C#q7~+^~;110|+CWHmBzB_;Bl8=F1gA(R z2}p5gq&4JHvk$osml2`+@WZxIy4bqWTYwumcD$dEPXWThfCW&UpgCN#C2m|JyCf%H zr{OdrfhXAKJ*zDs2l}b)A?v*t$8ZjG4p+B>{1+KBLOrv-U^dBmpi(n$C)oGEOFdic z3cOIgUQ~LQlP8jMeK;b&YN4G2CXW2@auR=zYMvZ|<!<#8Op6Jy%KB%w;8-1#rRs52 znn6ZY`}<(|O+n{9;H~sx+w&z-gZ|D6{RI2Nx&u`QH>8wX@+M#j+n4zn_6fBRQ*k#2 z-YFk>ETqeqD>bW#p-*km=UvL{1Hrrqqdfi&c9Ior%nWI|M+Q(0OS}NkRGaVyO-jzk zN~9yenOdM{xF^2_y+nm7)1yrfO;l+JqH!awjFtiG_zXZ;WKyNbV=uV)Y@dXBXq-2f zu$SM*0Q%C~v61P=$2FdE`q08O_x+gYAITVnsWNqc!AuebqHCOSgs9s;UJusUHQh85 zuii?<C^H?lh$z%;xw$mw{`s$w>p?u$7EIHFG^l2+=iLp;NHj@zG_~htf-0ad*_=6K z?>4`ukq4noScG05HFYD&xGRgX7w_R%eup+JLqlY94o7IpVKlp+^*t<Co|IWu1Yf>3 z%o%MTxRep#KS*ebTM(i)y>Y(Pc>hpE9?)T{iY17~BSm$2vG~&`t4ot5kUCnW)xf;L zT+v#^y7<SMVg!DFRLU!|e}s*~3Z`6U#>hF9o{xjNwBL>M#A3!pPWMx9+FAjV#s>N< z`8Aogt%GBN+9J;|HGtm6dc|kcez?1Crq$iyZVB2_DJ`2*EHu=_)1%3wFR(tHuJU2- z+l}EB;AevN_L363&^DN`RqDR~?8qd|q`nsu%xNA~UWH_3^`@Gb)S9YCU%qXEt;qBk z>k5{d9L8kV61IJ7qW?33!=k$jvW?wPXgc>Y=gFZ5ZOw{^GD4Le;G#mLn!8OXjz z%&5^ISzrD953j6r#f*97{`0%UK_}6!vK|EeX(afG!c}m~@>~T^&@&B1z{#EOXcJT& zD6Tm~G_vN0k$p3cL3qd2y6O2l9w4}DSYJ>3(R?Gw92(zPvxM7Gch$hxStoW3nBzkQ zG(^(=>^>Z_@|U`RcguXE%4(%Y0D#*((%g&uWcy%#pXwBw>&ManG$GOro(himr`*9Z z#6lD>t-iwOC775c_2b#}%h~k16Kmd`gTV538(7Qt<FBYXnj)LQ7W$?Sy2Xp|*AR}b z5Dn;b9%mkFi8a=VPV$t}YIfZe>0l*NHl{M-v>#<rn$XhjFE2yP^Oex`tJlL7(w)o` z07~VcD@ypu>&3)-Xrq5V@a^*hj)1-qO_V{!-tL)%dYD*folJaRh0pxeQxA+(x<6#F zj_CLI;^>Qufk0=cWl!#Bo@*XxBKw<>-gv9aO#F`tojh62)&<yD(fC{0SXJG>LD6b~ zVu4La%oG8g>1sUG_fA$N_<zA6;p_*Km#Hcu3=d%tLt4QQm4qA{1aSm>h=Y*L`{M|_ zNH2;n!n`=C?2Dt8hctM><?sEd9<>|`fe75?J=O=Croef{bYBtpP857L-N3s9W=D(? zKusmbZX_+6F?K*pZXb`t8(lMW8+$1{F014*bow#=mRY%(GI>z+)`=?e^cs@Z-PT}k zUUl!(Ie&Wn!W|qef{N<lEs-}pdZ1R=im`MhF$~l4J?L8D<gT!!HPfS(+EoFHwmoRs zfW{&Ej*oa9%6b58kd4BF;)}50JlS=mlaWfyKE#09eT2NV=tp>bW>R#w6NtX|?`R^T z-~Eh1n76tL&d&RG4>hAY=2evgq)hytMd|C5mgOK2D33Qrw;M;F`@H9oL!qxCLL;Ep z?tW1)h)`gGzi9?VN>Q6lpg#eo-?#yNHO#D9bB2mY^jh2qjjdX<jzKeEQ(|eLU%b;g z77a|@!mNu(498n}-l}!iZ&l^auXrjRFRW8PXaCUobllc%8?_pILdorV6MddKnl8#@ zR~^t=tk*crIV-*LW!03MzXoFCwSCArYT{)$-1Sg3H{Lf5f0j|>H$S{~yu*pIRf%G9 zc{})+X<9kjPbZEqg_J9^!Tyez{6VERZKPa{;SlACnUBbTWNfsAGni)QH-tdsQ2itP zYk}uLKT^F<boAq+m`qK9)*Kg)gNGU(`0#)d^BK6=ASWkrw5Cf@uoA6)Q*=$&C)aiK zJp-(nCP)4#a+1@(`weTumC=d$(dT@wn-6jQX{K)ACu#pr6lYo`R53r0!b79h2IN%l z903Ft#sI#0Y13doxkU}!h4){c;$p{!Ivi<Jrfvt%gYyz;BZ<KfN)}97G(h@iM+-4h zT1^*4S)ODp+{hWI?l<S?f4*XJ`hvS-jWfN!jLx0Tj|YBQT&vCrc=%UHq1Ym-_LNbA z<E3E2D-G00(~$M|c0-hBo&Qo(ROQ@SBmZhdMTKidLof16T!_PAk6||?L;BbRo@H8r zgwTjcQYHw0jy^jSKon2-rR_6G&h=xbcQ0S@WEan426xaG2kgl)u!iss)gJW)bX68( z1u}EJJFfp{;?J87(F6Lqa63lZ5F%{S-x1kDl^G8;`pL#wbNU_VUK<1SD$&Ya0yPyC zvdwU6iZ%}(G9B`)tQ8%JH^Z{r&!3B_D7dEHTxWY4w%Z9U{%HsqBptoprP*cpDi#CE zgc;Qhrt6f}(H-*&y#DMKWUqC_M#lt7@daQnbyzJT`&Yt6k@qe7k!mA*@hBY|n)QQ+ zb&?`V*NOe?Fg{<Dg9;5J%<*`MkS;7T8wp)4UhyZHt%@XnPKS2q2{KGIvE_vFQH!bj z!PL~$0&~YoLS>F5pMf<%30YJWTV*75DCn+r<j@#K$;Y+&%G<9dLG16112B>Uan{B= zr3JUfhSSFdNB-zj^w$-UXs|12^0HE7aDT<dCU>#ncyhXsAH9Ha?{DvII6f~u;kolX zW^g#dE_1gW!?QhnqNE)Uq-TM(|77-FEnexbO)>lmv-UnE>Nq@?*zl?{d$}J&@^#TL z6q|o<FDdG%e=Mf6;}fkJ*8RL|T$zg9?~cyGR;xW4NB?>pYi_Q>qxjv(p0!_W*l+=3 z^UU4!IuCE)>9Q@&oxxVD5rEufe%1DP#1yW@08%pDpSwx&lgTgWnJhwG2XoJn04e`Q zKxBdf`UXlk?oHVI7AzMo|7YUxkv)fEUKLQ1<6>Kq8Hu@RvON{^cu_URJ~N(@NRN2_ z2-J@dp#(kudJMX{%FulpoMB7i!6aUVd?7RKQ$mUidYI`51utQSYoJ|qG@|G!8`nH? zjY6|c^-ieMA()s1Y7wW%7V&emaJN(=LNv`VydA@m#|V;wfF17nHRra*aR$bMs!lW~ zM^teqXBDE&+rIL6y&4NDHqw@d*X5=ddwGHp6%{RAPnx_e_y|G;_;QSUdK(DKW9A^H z9vFB6QoKkr^;0`;utd(~;d<12^Iq-W0uMJHn9q8)Q2lmvgF5GU98T8fu#9+WM<1`S z8yJ6%;d1^EX2<6RNz+E;>{J1G9}41AG1(wCYc#F86llL^?1fH*u3cd=Xidx>o;cF9 zHa%>3+@Q%46+*apMr>wF-KJW}j_l&?xJ1}8Ra>@Na3i<)Hbq?{;N(L9)jek55TZ)f zAhFAlyNCf+Fpz^ej?C1|gqdR1D%5fc4v|kOFiwIZ8FHJa#>)O^q*7DBlC1~x0JTEd z5RC8fTkZbM=RBVY;@*xWP^u|V&R;73FmkGAxqL$q%siVOm$#%kaZ@=R$RE;y*FHJ* zQbwO@WRbsOah{Fh`Lw=*Z`i!|#}4Ap6AexHNAJS{%h%16R_F3LSy@WfE7o>6`cB1- zcY8I&a&_xz#)@36ZC58>p4261mo|tORUpzvW#SP4m5za}>ALgJ?9Msf!6gGN{e8*{ zL#5_o-k#0s!;NCADDeo6b#lv0e_){EiA_2SAt!A~$!2uV4iTL_mNcFbBJ@$=n1qXu z>^+VGDBHqv1)B>_<e;PqIGM(?>D(pS8pJK6b{Q4~?vLk=6_7o?c)#m6V4v+y;*Xam zn%Sm+TjOXpS|5W0;LN)hy73a~%JMdcse$8`e)CT?12Uhtjp$A(tlN!qs^c-3qnGNd zZC|sr)wmsG-FJ{X+RDW!G;$5x^iB^(wV$LeCtL?`^Uk-d8ga#D%^1-9Q-1U&@TBz4 zgB#|(B4}2#*-ldG4#iQGa!^Bd1sjXbq^GI9$+&XoXpiZ7pURz}b=EFzBF0AWaBcy< zc+dCPQ5TsdW;Y3|Jw4W;Xs&$s)7|c@!6q&u3okqY2yets)Ggu2C~Cg|HiXo7<O=mU z&{w44i&dfAkRZm;;ol>3nGP_rgosE;8ygqQ+<(XJ;4lvGyxlF}n{B%i4shvTaPG-B zXbx51SoV4ufMsfkNMoZXaPJR=3ZIFS<lPFdVUP!hp|i~C4%m_;qmobr7uMFDBzqC( zJcI@BU{&g^4<^ll)x~=YZqV_N;U!On@kumop2OKU--kRm-Fx0bzSTaLIR$tP`cU`~ zow9{t7h#WDpGWUm&%ym*wp<^TbZKo7Yzz504%Jxb{~Hy%P>LlbVlrVT4hoi#F$Ok# zzsMr62LX3HGT(@Vc8L}WlBJ-<Up`^v6eKT*?L<-;+}|vn`86(C^o$Z#(_6d%1Bj3& zI`>3hq!{vd2p}C6vVb5~`DS@V>2TVN(C-#I^-sH0v_jGS{Ujia!F~(j+8})zn0}<+ z#7XoZ2_S9yPSk{)kWP)c_SiLq5f2~tya)8pm^k92#=i+;&dlF3TM4U#zXz6KRdTdp zy6l!N6)Xa_tB{+pQ;^RfP|QFiwK%Jbwa=mhuaB0rhXe|=o?SU4jZ?%rMLVgq;xLx# zk;lclDr8iinWSC}a&naUDjoTqEe?dfHsF<8X)Tsm!fKO4xhORCrs7feP%&=?>jJz{ z?hw`Pt{m5P4}2oCFZFsoDDimqPiL3`WUqm?^@DSh>}^wm#qYAn4pr$-V%6}^vY)Y8 zR-~C3tfEl5*Ze3Jo>2!8=r@Bv#6Ua&Z7@)8Gf1vVrP<w9`Y64KP?}{mUvH2uY0{GN zXbjZcn?*DCpE9UZf<o=j<RGjSPKpw+J1eykAN(^t4wgzL&sH<SG@5y4slZo+B(TC6 zLW@V9xI<)-8iIn@BDgPcJ+gE6;}B~xtQXiRv$E-ow~%Ju$#Q%lSjH|I()@^jfPu9* zNCBfMyH%rJXucxcEQaaba>6rxnt7)N%|dX(ZBlx2bHYZ3&XFJ4L}$(nc}jqSeOR*W z3<rkdwJ!nTW=6awQD9V7<UR&6z7?VD_a7$ixIl3skX`|d_Bc79J~u_m34#bajx@S3 zv;<1#1kPQ7UGrVsTZ)&6Oavd|HGomXVo2;@tzc7#Sbu8EfC6|d<6yKx2cSj5l?G@} zaVFXD51&GKj9kB$of40uk=C`Kix&9%MU53-B1sYKM5}D<8W{FeDfW)cw^Sk%rKT-C z{xS#F69%HWmCuaEn%4Qqx%D}&`S$r%r&YH+AF1{gz079x1<g{Q;nGRJvH*!fQw|FT zFIga{6pu(fy32mwo{-gyN4hNz!^3qtKA)ZRQ8j6bKZkJZ9Xa?bSQqpfVm{MdNS4_3 zEyd=+IzT?zXkS!sxX*|$?1Jt#7KiFcyB&{F}ZnV=O!D^rcfmHTXG%ZFa&Z!0C1 z@kf|P#!b(vC&_6ko?H&e$F}0#Bp*K2Q<Zq)OdEtyNiHQ+%v6raSBDcR-d1WRDYjBa zXxllAUkCevirKtWOyg>0@Gn?}VBXe8M%$9!?N^!~&S~rSJZYYVov+?$6#2wwFVgI# zQnH;iN+b6Y9%$>wTqq@E4Qu{tTrON;`}F2wWj)xOYDHYAC1~mx%%DmpX=k@VC#_|W z&AYfQ=%F_ROFP~55fzFi3|etyoU%g~D^-OnLXTjCJFe08;2wwc>X%?%bBQZRroC4Q zZ?p6zt?(46G&?mj^{<$Iy(|mPr%>58uy}GkEWP`=fk-ecFAfN)(z#+5ufn;*wZ!jI z#6^tABftO-wqg^^uF?$YR1bKLMTKBQG;fj&C%MwP6dY#aWiUF=4aBg1&Y8@zy2%<~ zPMPI7nHpE?L_A3pc>|yT&NOw_<rKwH0({1KwT4nWR((lGCwbdL<v)c#D$Q5+*E%1j z+DWD1Vc$Eg{~Z02(}!+?u9Z`)>h>95=zE94@v;4|w!c?A&sZ6LzH4Z=>2y&wvnY3; zcz#*tU4J>NyYl&crKy;#fjLI|(`9k?BIC0K%FrV4PCy52fvFE`P>md|R^;ce709h8 zIE5Vsu{w<q2yvo-`YD@YO$2muBs#zD7*?fLzshh1=6KmT85e|Pb{3*(Vpn5TM|^+n zG7FG!O#gZ#)aSF|U$=@yoS+0##yV8wPjm*+=NGk@=TxTCpWF&b`(-!PD-7qt2SOw* zQW}IU%;rDaCRu2lCD#_t`9^e82WqE0j*C8xrw%{0-d(w_>@VzL3%gD|s8ojUJSw7H z*MPjL8V6hbHv}KAr*CYp+foE`#)x?;3Re=VrxgPX1vMm1;Do2{XU-}eHHa&Cvm8w0 zdoj}EViejk@npXkvSj3@hl|Ah6+I})%(B%am{dHIlrevVQXNDmI8WCG$kH7u$pAT1 z23epAMtYg$k)v6Q*6RMEIy%wXXxroja$$xmR%Tcc<XoOWfNSja*-!{#pq+f6ZP=$f zcPsYJ^KQ(l<&xsENSJ!T+PSRSRXxiC|90nISO+G;DWt^*7`w}j)oc%d6pcbD-mgmD zRd7rI1tVtRVP>;5M>B_Qnwf`9#Tz0ZV=bykVl>sG?u@R}z>J){EZX?SdWQ{nceJwE zpAE`4D+@|?88`6YKQB9pX2D2VxE`Zb(NZbo8hGuUK80Mnc{|OW`+s^3uGrR%WmuLh zCyi;|9y7pit=C<=sNXkbl$0cKu{=D)YoTv!#U)=zWT>$5K5{vR6Unpq_=I<JS7;xG z{_cT)D)-=l<ze)jxT%+{ZK~sbNKiJ&o}NxQT0i2_oby3=W_0QB@siZAz+KO^6)%(O zRCD7`bON*c84uk=*awN$hE1XQ8&SfgdT24tpXuOd-}7!wV#SFa*;(-#Nhhbk4K2HB zjPi#w*vVT^6MhK%;=T5cx}1$_!}zoD9OF8pm>AYFlnbRek2XOzrmX0~Zd0-Pxda1v zzV+Y~_K)fT10vv*2a2Pa1xqd44s-W6nvLCfrKcg;0iUR>2yYjS@qXf~(X_1g9NzmC zB4yFz-n9w=w9FH^u}TxeNK82XAvnj-3y<N(sVt+COV}6K;j6>gZ+Jr2vP<@5%_CW^ zOuVc{mPxLDkxSDFms3Bpbwl-1kJS!4N~={XYCZF9!qXb~NQiYd_twlJtnYXtIWA9G zPAU_V5EV;?@I!y%2c($^t9d+-Ogzwq{3+bdLVvD^dg<*7sI*iqI_jCZ;8hmnRxy>z zwPt43QTl;%w1);04mS@@A^7;xe(yjllAa(LkPI^T=xn>}n9F?><irTt@TZA7WBvZ? zWJP+iMlFfk>t?l~Z-3LSS{Lzl@<f{F@@E0OZd2R!pjr)n{SJxARgsqxa04{J1vNSB z*DyeWzwKFL5_}l^*;UGuy=M{n0Q$W`il||)(g=xHCo_{_&L@KJ$V2gc@}=P^@}|Zv zcqi{<s)-7}No7PY3ZtfOU6-kGC8aZCxr$j0FmW>#XF>0Vkr@+0V81lYCZVYXL`wQ% zfkzvez0vf9Opl%b5a*n7DTYAr{Tv%1qqJw<D$LKdX^Tm?vELe+fbRBrECLICQNfrM zOfIH1n4;C9v)DpdrXGrXAatDV@PT<s)NmxjiWl;XQ3(Eu4)Pa8Yh`DMiwNoZNHcxX zWWdoA5{DxCMhz6?c#BPC5D=6TNltT!T%fx?3hAXLmK}=%XNgTXC7eYnxLdgZNia_G zz<(!dkzy)5RDh2#ytWlB;M({-=u<jSN<3pQsx5vfA)(F6`t-%r>f(oeTpYn!z35?N zydVa*>dzK}0-Ef<k5`n_XKO$27*20KaJV7&u4L2}EZgP;RCn0i9gJF1&`m^78mud4 zD2ADmN3s>#%8;34b=5zkdAl<F>E+!sjIg7iFh=1{lQyAcYEsv%J3gKYEvsV8X33Ia zh+R%u8#+!r3N$pb94VXRE5fdoa`i)7KD%Gz{!aL-S^bXM#7(skLp$_4R)&*jThmhv zU#MOr(Ua8)0Q6dgRL7B|4=|Hak?9qTIiqT3;P)4x%#F%zgn}c>NvU{f$5GY}mn|80 zHf5k4QovCmR!%=;;fhSJpl!h`dIG$y;dby+c8|`K^;juD11$Tc1RfjK7T4x|Mg=E4 z{XLun%U$`<*?(%@6@!Z!cXiw`1(XZQz5L-k)1x|9Gk`H}^#E*byn{5-|Cw8lQ<3^h zF>rYgOU7gBy<)sx={^IDugiyFh^})hIygLwZUgh}a{AcDO3P>XH`SVLCA_jaQ%94* zHYz;N-9{F^K8?%AS(jr94o63w#SR>B>rI&$p6C7OmYAOHB9~JQ{Jr*42p};KZiS3u z;}6#GLM@ocz&(bW!jaZNR{Q!DXi%Jf=rBV}qCP0jkPv`+R4b6_8z3q~K&P-a^sP#S zKMTra5Lh^!8hO0ZmTIW|qxVM)`mD<!r;EI@Kc|Le^~gLS>T4bO<P1Ofd_}F*-)~l^ zJH)aIP_5OltOInfSB9ZNnSVCpGEs+CqIFjO8vNoUTN<NFtn~^t8$jcW?}c{Ul<)?& zT22}3@s!P&^hvE0)Tyh3CoO-4oEqAX!Ms<jU|~4kU7D4cx4fB)G?6{FX3;=G7d|W> zh^ArMjL9<QT0W*JCvSR=>UD55;NbH4e97um!-y_Xr{!`2zWaE2LO*Lef|-#5B}q}t zw%ikELq7s*HQbPhadfoaFsVX^Vpm531`ED1WR^5MF#5ud{-rF_b-4rgr3`OqEf}^9 z%81jm4=zuu*W9O`%jBfGRSeBQDD&OHwwS;vlU1-(5_o5QiBn6{%;oBOiTtjkAA8C1 z$)U~uq<$%11$c;d#40c-)v9}N+ZsVdqYV3E)0lwVQMn`8tT6iv<ZPD$vyvb~+JB*e z*_R(klwr)LUP42D$)vABf4!hkhoMdJ<J4Tfi4(TDXqTCsEA`?DoS)s12m)037on<B zX;iQ~c&<5rnZG<JAr%4?`95h76z~;j7X<x~AJ+`7o_r57In)5KRZrSoO1T0Vz59Z= zxXU8u%+!u@&Zz}#OjnyiwYHJU&gmpjl2Pt)I)U?*9sQtX(Px(S!|gdMRUA8t{I*G{ z#Fv=^vPly>ZF_g==gs&sU-mCNu6Vpu7N5b?mGG#uS{5oO12Xk5ZMn%THrva-aZA_e zhPdF^n6mV`k53H32Y8y1>h4qjc*k&0-X_q;i4YemIKsi~I{tdWeo+mWc9LZai)h_( z965<h8zdMHosvZZJ^1rx!ogptoPKsVoPgfo%Js%3!7l6`xP|lfV*;|AH9~W*&R~+d zojN7y3jC*wyGqKm=MvdK9wZbZA_g+oQJT4mlAW35c&b+t?I)2{zBF5fNECtUtH~?j zl^RJ(%b)_4trz$WyzKKbjM?VZW^EKmj`g3R@UlAg^K?j0@b$ghZU{@|;I`H!jE7WS zf_Fk=@T!k8x2R$hvQrcZXzo5^URsfcWUGT$bKg$SgRvv{EqoF9r?Pfr&(-(0I+I)n zZ0{B6Y)%^<ldPYlu!f2+@|GpSbu|G+igt}fF2xDTf#})?IwYA3`pyCcYntbADQPLI zoH*3Q<ydgpgiO>h4Y2H`8lUIT_qXTS@u6S-Lbc7^om`JX@Q)k54~tKmuHW)&Vx4{) z+4Jsz&!Bn>W&7!*%exc)YF%u(oyUcw9eIf=Q1>&JvG|9vBosqvs-(BEBza?UT6xZQ zKh&#Xxw!W|nq!|7AJKbiIUr=)41TkDW8EzMS!{*_ELeeVa@h{~+Sge;8=P9N>e%pC zhFlDVqtctO_br@bAC=k#vaws+QK6E}?H;shC+=O*bQMkyv5QUa%T!v;XcJd5P1kNR z$+&Isq_rm2+w_X~x%Bz`t2>b{6Yz%|-(2_8&1O%67NWt}V&j#PgNuVY^A5)bqYck6 zN_qw?6-<_iDpE9ObIe9r<R9@yo8p+0e51ewDu|w4nB5{A;xPk6oEDAQ#?_8>X|!~< z4EWU8G>!VG_f&;tIdQhjVBn~EWgfrznUmTBZlRpKH9QXGc4nA&ZH)v15p)ZP_3wv? z<0dp_m%k(_H5b@gAthHplO|<aQf}`LR2r>mQgj&g-_He?x*b)lSDy?m9)~{xEvbQg zlJ%qA4^WNc*lu`bdTi?6T+&=MT)-~JkwK+8gHv^9;4!xDeNMS5_<oHZpB<mBKRlVc zrazg$odjX!B&ht+&2C-jUhG(OkUEOiFlg<Lsa}?ypJrb2Br}Fe(*X2st(uU!k{qi8 z(b<#EU(SO+VPs<R7E~=x=vHl>b@=^|zVYDRtZ6&uIpd+b)jJW$>&@0R?~}3b-%)bg z_tLRxsnpRGCnMn)y21J$47)DLvQ>P}RWH%BCk}7ie!CcHGt(~f(n5}pezgA%V~eQ1 zq{!Yx?CiLAk*=IoBYhvUh`E7LM9c^Ns><U@<_yvKQIJl;9+iO3Ys@{w-K6h(q;A{k zN^4U~H;DVlq~7P!*Vd6nOeRuI#8DMYZSu#lL5B9cTn@2ZOmjE3O{*ZUkt#_?u2YCd z*p?ZPY4J}xbvpi;t0~E?Y)ZPCB4Mp$?6Ap_>E+yZ#*5}rXj%uGQi-Vsy5wtaX`)BL z$76s+rqjxzmx+sbXS@{npHRC!*%<5o&1V!7gycnRPF3qPgTQ1As_3vm=v5`^JgFLr zna1A>Rs7v9pq!0V#)pJlom#b2tE~tE-J<PUokMyTcR!&{Yzv^-TK8xZes*|Bl<?lu z7+RuxZ?}-f%ODWBS0uM&oCK6Ks_=vu4Q{In|88AN1xB3NT^D+l%o0(P_=SkuS@Kpa zr&Wf>Lf<KPDY?}$Yr6>Y?qA7&%f*JS-7<R>qsUhZ(LSL@*ILfKZjgoLc~mEp<NdN4 z%2A6i4V|He?mqYae3*PRPsWGa8{E5k=P(DwqZ(PJzxNLA5wlU->9$AQJei^|qs97r zH@V#xU3I&lbze8Wy+y2tvZLa_0|AG%@aH~zKeJGzf}Li3V68^{W~M=}3sbEgI1yAr zuA_Vac6{eDUK5iw0N6zoPsAz&j_>IWg2pLZ(-H+(mMV236k?_&T0piRnq*Ai&HChB z<zn(?GB8^4<ec@YK8Fd1@ot<*lN+@N8XDXYwx<|wM2H=SS;Wm*hMkmGoX&cd)W+Iq zNtSXeOvUznK=H5&_#8y+YZ6|?w2?xYMTt?taw@5=hPg&l_)oS{*h0`^4zw?aSLY;6 z`*E~$gAbwFRyL4%MZR&WEEP2`d>`ABKr(}MKi=lR?6m&ZUNJZIT3riGo3<Q7;jK}% zcuFN;bNVcTfOy))0SjyKI!)??krMTHRjPc-IXxvFQ0$n%g)h47fKraDA7<ASuuDJB zfBZbbCytu%y+P3IG%inpiM%IGmf<n34o_paG#f7en6I7ZY4;dQ=RV)yvAsckyCUmk z7?yu~PWuzPFW>>ubN?iI4y0a!Rzj&tJFJ3%WHvA_E<{9%^;L+U>+ycwG8Bww#4v5f zf+bPXXh6llL_14joDX!7p@}R0L@}6r2`LdauNkM55q;Z0$Sm8A8a`^vC9Ynr^fnjq zh!7bc=1!bv@hV!&SlZ<zsWPEST&pR80@=%5I4J4yRKPyxAgI}?rCX?WU%6%DI;z;n ztYlC~PNCR9UA9oScrKl+5C7|j-CP@LK^2TXJUJp5mvc3sEeC}oC~kEB+0#kpSu@K* z$60|Q_~OrSeW-FfaIMo9z8h`mbD7%tiXjxbq3PkHy&kRo^A3Semd7DK4<cWmswq`@ zenuo_)Lm(LK<34GKSh|tBm_BPe`KZjXrmg^DH8Pon9;)dv_R>&Cv&PaNgpVt;O)Wl zy~Q^=W5Z7penFaXhsx3<b!|$ATGpu_oFo;p$3h%depCpKg??2SNsh>a9!`+0K^Z@! zQQ2~%&%ta8S`^lD*CT9<n;n`XnkBWIbMoa@V_Uxc?}}obwsUP)_BZTruuoeq#qNnb zL%66i5q%S<PM})M-hoA=anTJv5W;7lO)X^eIX2C3^Eq@n6R}g=zQE-x_!Bn_V8N<2 zyu%M=Iu;uQ1uJGUlfhYN0l>qZ=1*G*UVd=Y-hu1R)@71m*`6PhAnrI71qpD%>oIT& z(^nX>OHhSj=@3ES4A&*{Eu@T!Q!*=KhXJMvDiIj~-vUfl3u;0@8Px3Nf^$7o!|JK^ zY1wmNi~Wp=M@(@Xh}R3aqYF7a5X(a7w2?P7QZ$v&O(I@sE+6bgBPf4^w?OqKseHB) z)`f)Tg^tjzI58Wgkbjp3y1ti*Nj?e@o%FL1D>W5<p$h)gr|aKIR>#QL^1{P@+v;6f znXkRd=7Zp74Hh<keqp-3AK_LB#Wtv4S?w{UV0W6y?B7MP@Wkab6gQm576OWP!?I^3 zCvqa%An3MZH-69rrbABbtEUJGM(ll&F$W$dih%dK#(W44!8xc!(^#*Zp!mjF7~h0Y zD32PV;*z?&%ZyM$nS{3|#O(PI!_$dUXEHI>evnvOk-wNs+-fRTgJX+jA}C=>d#O-j zoNLaL=kUtXfj}V-4BxNYi+?2J5Q7AI%>N>Gw-izbtEY#X5DZG1-F>EryHoOxidwu9 zEi*|isP4ZnK11Ocgp6NRfc@!Rj?ai9N|19>K_Xz++w;r_4EW%&>=#&Dzk<Hb%Heri zl#yXFX(vmm<rRz^W=pW0MFjjilFTUmvs8CI;kgQ?WxdUHAOGClE3fv<Gg_(OGySOX zR>(D0(4#-!2Ri;NXjV522!(XWBzKhY^|WM#y7e-DJ7#WpS_rXBILs57hTv_M65<zO zWpJJ6O5=(Bt~N8^nc2q=h$q3c*biurpzYYpfOlFO-ZuC?X7cgSwlaWbpiJ;QusKI_ zzCy-N_FzHIH4tN0fmB$1u3uG6GyyjoaoxCcnq5fY0H<m}L;1BkA(O-K`^(q%dt@c4 zrmLstRGDc?&q;lfyRJVFO~vnu;2SNSuph?n6@-%Y{TYll=uF2tN$X$aCB4V=#})t3 zYB?r^_}t?f%peT_mW-4l0NUO$9W2U=E}$I|d!NwJC|K`Djoz7nTP`_Kx7F#*ol&K* zPT_tG(s17Zynb|lg#Xh05UGM#k_9(RjT8BRt;7eF2aHk=`VPvdti)N&=Twd@&NVmV zv=C-eKu!-fDd3<lc{x#Bh|-(^a^euA%Ws_s90@DKW*Ip@OQ9i=OmxrPPF2ib7@6=c zg#;4NFGhqFBrs!fL|=pP=f4XP7qA}%khzx%LBd?K6~lwrM8H^pW>CF@0$KCnYH4eD z!zL2_74Z})u0S4tl7dSj!-8f))F;j5$U;3fX$6!DXb0Kj<QJJ%WcOThr~Df2Ai)-; z20K}SNi`OHhBXu-39!U?pVHT``Wi;aF!d@ft0NITAZ>zUflO2&M(Bqn8kX3?Lq`ua zp{#Fnf!^rk(=2LaIjV4EA_{QZmS$sS?1~-Bh(sC0ug2J{yO45TI?LdwpXTUKZIenQ z+P<i+UlqeIMz4eW3(O`ZLj$zZ3{-vENVxK{7d_Q;!Nhf5N3SCsQHBEa!$W9PFM32e zXpP<xT>c%-xdHd%GXhC@-_$`K@jDB3^Sc8jS>42gU=etI0a$DP@2mbKk8i=GNnn!I zOljn2C}$e18F<iUoJpRPB?vitE@_>EBp3$SK7iy19l!;BV%w_-a8??TfviFX$E$Nl z!dbzDCNnaS|7i7}W-dw9?w*4(snILfLdBq|uu|Z;Rb)@VFxED3u{35<p(5cnNq@U& z>{|>>Ot!v*<pYc&xEDV!;ir_(4wc|5;@x(M=XH=M_yofb`UoVF?gG0~(r7OUx`)wY zGqyqqcmEEkgdNhHc=9aJmqeIc3%?wGABrA8zajv5u@2kT0^&7zfTK@&VcUX8l4{X_ z;L#v1uC$sGQe+b?2^m)R;Et%d+{kU<37}_2OyM*DU7!;HcJZaWW&mEXu^Bf%@L$&~ z4neof?%6nkr~q$7V4T5r!nr&AK_xq@5RwP!P@7^S^#|q$0ma=B&)V{B)!R9TtX%*f z`nSXy=n}3Omc+LhHbW(O3UseQf<~mVVq@lHTtCFPF9GWU&*gCrC7Tp;&-EkfBB3H4 z>9eim{7W|Khn0waaq*j9kP_4waSa@f2>E#ukR!OnStB-a|6~HP%>VPF6njsHWf9E2 zBwY|;{Mb^itjxlJDIL<EA3uY@Dr5#$AKHMh3$Blj;2o*&!D5sX;t!z}z{4ygMwZ`S zp%f+4a{~G+<rrCKCw~a#qgZ>)fd7gT!x}&VEd9604CdZhf7eh8AnDTq{)%!GX^$Du zUr}WEj|vPZJxY%}fRF;iKN>(7k-_h|zYRH1)Q_IOf?5DU9|7=Jl>b%f2gbnbxdsRk z8~he<dTzg61%__{go%wH1P+10VALC-{Ac4wSb<U3#`kv*341Vr{)!?)2!J63{kMn@ z{<HD}`!&^&r5bpQfu946d{{Zr(9Z!zJ_c}%0re0g7Y8`eaI~meDO|wMnpI#0CJ!5E z5Eo=apy#iky2=|@KOm6XV9@vZA%aEKM0^XqUJxKrrNOtrVCbV3pwoK*{uPxdot_iW zzoNq6p)#Zm?{DzF&2;$nuoOqrq5(XV<h1`2)kbu?+z$CwIC!~$7xfeALSdI~29~k| zE~&x}__hM!RcV*=U}-0;eM8+47?mEmPY|Ml@I1s0pic?`gyjJ*h>l=DA}wJQD>$vs zX4mgJp$8Ws1jOLL^tSy4_+5(I)enC!yo5CAM1=>c9CWj6D1(p}5-<v4V9}yHJxFf@ z;bfKZIY?Zb0oGIYX!q&%TFU*IAFqoEemU#_X`(!RDsT+)+s=iG^Go=N3}NiUKRh?> zlKuYoChkqFv)D~M$cRb+KzHyAxITOkHbJ5gxxYLBA$EWk&3NGBq@+pRI1vI6LI(o3 z6NFL07x#eG967LX?9+XBmDs%)QanH0PA38+3SgKh5ECFbJ|wkEzr-H|D0wyrK5(ul z<{n_gC;QS*6(DY8`)&X|-2q&n87#;sKsycM>#BCq*kk`d358?QT^x0`;CiCVml-S7 zfh=eO49I8mKinAP*cOiVOq_7^T;WMc7^CR+vEXr-_G)uWSSY#JvF^(Mi@0|HlB`SH zwY$2|<*F{*Rb95(W!tuG+qP}nwr$(C(Wm;Ic}L%TXC_Yk5r0JP$jrT$GS}J>>xq5e z&y^(+>H8CR76zV`#Vb|0%xJy$!gX!)>&vEBRToyKOAfi$V@<A82VQd|0y}uP=~^^e zaKpFPsYE|*@J!pw;f(_i(3hU&z%`hr!KO>%Ad{>ohonbF`=Fp#5Xm_9^1P<jb^Z%` z;K6m%_1U%1wc7RaWhpFc%?nPR!eA9EI_H<DRxs8y@Y11(CaU3x64^3G%~ORn0LT18 z(0r;Ap*vL#D?gbp@dN^s-(C*%1xX?&$s%=PV}7K;Ie;dD#rj?QNDd*;`q_2o(3t9l zbLY+qp+R9}D??4=g!^Nm`OZx@T+srvi|Q(f_o7B)@xx`XcB6gGux9o*#(}aW+77td zf}5*ag6E_gMDCv{1J<5k0t+06D$>5lv*NG9CG9f|9QB3L&OWX=;0(Jo@}8k?W(b*H zJ0Omj%}JF#j;wj5Zh3x~1MsW9Jy=y$iRyI_ug_g!pO#bBe6`GJ&~xif#K1%*n(cjL z8Z(Bzl(Ym(oZ@3|&&O@^|Il;wLJ97uaPUGQHZsQ2VhgUeV5Bf)GgZx6wysCDss%{N z{sKHDMhE6>L^t&#)`)H?myeAri<gZGcuHahzjI}R>UiFPyx{zW^LlRn!I|A8)y$7m zdeto^oYDvo1F)&y-J^45<gk0Z4$<Plc`kUr1haX4DzB@Yv3qN=7_+&WRjKJpQ@kct zd}M-&M0QZ;s&l0Y-kh8z_w$${diwQTCkejuv2&lQk>BZKc{^XvYXo80Y2c5)*1oiR zyjsWkeCy=7x<g1ttTu<GacNo!WqC92@)Dx?&NN>+bE2bZj_p!!VPU&jD&lIMr$kb5 zOHpEeTv=gK(QH&^emH1S@xqiYKRuChtTfN?tDq%7L2<U)<VcCZuFND~1+%om_r_h; zHWNfVrxA+u?1u`NQxnDfD(x_lqeLDg2ldA3Qk+W?zZ3S%r|g+^Gp`EkYdL8VNl~^u zj$z%%&tLQ0H6zu=ZA|yn8;NBx*-Lw=b{`d>(*k$z%DhW#hTL@e@l`cPo0rQfFGcgv za*NUso>7{GAWv#baOLlflP#W)WtyEH6yXcs7V+j_CsJo&u|ziyz`jDxE3AIi*A*B= z5uiAQ?{6`3gC+H)25XCe7L?@{>uk>#vO0kiqS_>bb&*qAycD5Qc1Iw8G61!Tz(>dP zKXFbrJDMCTI83yE_tyslJ(LB<r?doX&msLL2VM&=MT7WLNAMxS2XnuDxgEHD|4!y+ z3X9n8GP{6YrYVs7)W=j13}D!vg7`Y07Ui;9NTr3<ib@>Fm!i9#`I;^2*Ta@{PUS1| zbF9?4$NEtK!N_6`-!7lN)jRRb3qgoVvj)HPw>K^{&EbN7XOaEw@%wFyrJ<*#`y0iB zj`}z9;`ix)_AvfCkL+jBf1-GN`c(fLk1XS--I9Wii3x|6n(h<jLH&sj_$QYvpST3C zyv$!*vJ^a)W_sds;8enP+GfVO+!lsr`k&*YlC{$}SNQ!wOkevCkK-pp;?w5n^oKw4 zv*F)~`~SUX_R|IY+wu6%p4mTvCVz!`Fnj_t{`L3`MEE@SH>k%yT>lg5@z*+kKrH@8 z{eLL`f4j!Q#PA!u0RBgrKOqeNrtWX(#_uwWpa0(9pdP=!Gcx@-cJSYye_#K<_4n_t z{cihrz0dbQ#_^AR{Vx9-0`dn4<gcHZnOXiQ|GUng{n34%`(5wzzXAViTR2R=K_;Jd zexLuFXZBB?%0Dl+|6rN@H%sI%+?KyD)6aSS3%up;p4s2C{Qr$-_8&<%fAh@#l}_=e zXO@nck^Vo^Bgz$Bt>orWe@(DF#v?DYgOZfmB#P$5KuOYkrzRi1NYOK0kHTp!(r1DZ zBM<^2;*Fz~uG=B9YY;g%;`aduA#h2|0ts*+{%Rn=wt#A0uVikxb5vwKpwM>=S<vZf z83Rxr;d$rryuSI2^~iR}cDUC5qc4a!wnaFq(%8MtmtN)T=;=>w*+T>Oty4gnI>8{l z%aw@L#mpC*VBx%`pge}gM#u8;7e(~9W6YrCl@&P454%Qspm(~X#fF}!)WipN=>hVi zT9rl1jT|hk{HfAx({uEc9Xam)sotsh`)dl!mGG1CDg3E>w%t=z6HnLM-l@i)nakDM zvj^4rbCB;~NBx-Ss;m|+_ah8blHY?tmQa-Ml@Lk3L>tcBc(x359KKXbu8cEvG8u8^ z1@k|(yD_MA<_fG~e@pQmG(tY#@s)t201~S+ImUNRLYfj-?TBjFxhSku=6auLAkP<g zLjUfLrUsx1pR~;4P_2>b$^D4`i0l@Xx%b2Tl6J4W9uH&N_lEjRq2v6Hy?s&w=K#wE zs3Z5T;2weYG6#Tlvcl)C3TBxYrgx<5Biax%$5Fe@Gt2RKlRQ(t3WzNtNzJ2@1F5jl zqAx9=&DgMdPheq!u`2uwcMoIRhl*zb%I(|bE)f?=j*LHh*&eCBNo)HAMl16-M3rR^ zf%~K8=!=xGlc7vhL;EuC`WiuH>;bEkaGs96d^c2s^_b0|q&O0H^2*puA4oE?`Slc^ zHQI+cyP4vk3T}CTJLy0grddq8MvBLu4LX#Uxg2wXuE$^Uk5JyHPhux#UhS}IW1*@~ zNOc#h<)eu8@K*;e2Hoz|2-ue4&7v@SpEPtWZ&8a@q8;Qe!)(|*)3CQIuNR-cx%qka zbixwM^r1}?GX}fwWI6G+;mV7;&uAQT9CIWO7jVcni*(qe&Hx_E&(7JOyq{Fgub+q> z+Zv&r+~icKsQ8z(NDV8jaD%zd%S7Sd;pruDR-br<g)8a|+mB89m=O|;DAb~;PBK<g z#3FL^KGb?R0M)laIR~ce|I*8>nKWmaMAYaKB|3$;b?FGH+>N{$e1d;tIUC~kNbBs~ z6b#3I!!Q|xZPqDi;sN4=ox!pAuG;57gCh}AtAlWwK%!11FGQBVD)owL%=dyqHmr|} zJRG{$AEodt;hegL&lQZxK^jS#{O~y;&LeR9-1?EUMQx>OQiUbJCt^bw_8lsf4W;QP z-1dC$LTqsj?Bx{Zx-E&&Gwd$Us+O0bOY4<at8vZr>FNCzO`>}EB8?jy8O05S_1D~{ zb|xIH;AHoXtW-0^1nJrGJ4hyp===dMsQ0zvGwo+A-}89uB+vB*`NhIQ+-;(3`jRC- znwX0mifQig!|p_{gVstWv`6=`7A6z7!#9Rbj>sG$JR&lfZkO!>uyQcV;5$!S>v+i_ zE?0~>eeZ`PgL`E%L5EhC=9Rb`*~zi8`03{r6*L^idw_2b$J1p`D3aap<rR^T$7u~* z?obugJd#~sB*6<2#l|N`$F21xheyVQBxmLL6Mzi&pisJ#ERY7pikFHiDC_O;p?Tl% zc*o|n!8OoyPiWZ0I_YNuhw3*&?pUHbhJ_Ah5@%*)DzT{TZY4c<v2{vM@r*9-(52Tg z>Q|Xo%!`*4YO<Wzop@jn8eZ&U`|rF^&&<SubUgM%_45=_<tdKyHAM0avy#0NQ0YH4 z6-wP_ATS_Eh0MmvDDldsPk;vD+l>KQAvr(WmLV7Uzj`zd!c*L*l$8t{(GM5V*NUFO zmejP;#FNU_w@$B?*%S#%I!h9`1ggz_PPiFk%5I~8*!M^k_r16zCKQj`fL1|p(RN-l zPu@PUQZ$`8SQe3b-Sy%Wi%u?+`vg@)=PBsSfGcCi$R>U~sigu{6TIszi-&oB$_JM+ zSNf2={EexQpl`X1U)x*c2SS3>0b*);no;ufYe($BEoi<b5IO`}6rjcnnC?WNAep-K z<RPkO^v;^P3l4s8*8^}&!_u%H&^CS#f29)Lm(}54)C!lZY@U-n5pT9hOO_BH#<J8i zP+v1jzFVeXTQkv~gO?pI`;N-6gjcYUr%Uj?caML6p*%kgPJAD<7g0ZHnsH2%d|Sng z!cT3F_O6emM}S&@t91?zyOEf`!d8!={G7IVe=E}{m5}h@Y<WRKy0oeeMcKcciuB$g z1vj@hKNVLD)gRx9xW$RGU~RWrq1kXqFkEWvq9rKzbnb{#tfQGwI(CqJG8k-fE<T~q zwTJH6X{^hEQwQ6&AVuYRPqU#Ehia;2Ne`H7%fkB2$^EN~NK7c)wvaXZ^e$xtp-Zk( z3XVoUAD+=sHy`<TY_wAgdb`AEA%C9Wh@WOTdD+$Q92T>S=;r!Ka<<uY=mr|{d`-@C z#@3BNJ4b`p97NOMvwk@7keUrINU<9_lDj`y1a6X;M>q*mUdM|};}YB>Uf-M{Y}r}( zGcY6@vmn;fI&htzxc1j)s_a+LwFaMoN#L7rJSD>tvxT}-qO=e^Zx8pkrvb18q@H|) zA26yiI~E*F<~0}fs5k^I#y^%h<sW8xW7%m~Pa#>XZGcK|JG)UCa*+?4m}a=1CX!=B z`PO&7Nc?0@+#%n)>sKZ^Sr?YIa+K!msL1mmzZ_)g!2;WlIF#PXA6c<-dOnGfH+S6F zfHbSgz~*8}P&M56WgY(No{RwLV{1#VS<l&5$)VQ%rEZ<bVJQ0$gLO-)lXsH!u8!9g zb7fa5oOd32eM>5zmotzQL*-=39-0JN7#b`v>5}NmwM%)+vP;uT(JRpl*&EplU<-B% zh8^MzAOpw&u)WJ63rtD((5+jTV{3{#@PVyvGTRWtWd^sCcNDW_BiklWE>JJ9iFfbT zdlPsAB*0tIo36`vOMeTL8k8O84gdyF{ip4{&s*JFUCkcs>D_$MaJ$LPcZOzJaSN%~ za}%5L6f^1A#**QA6WS6Y{1bBXf&x=TqeSD`dY<y1ql-2SiH!WCb2AL$vh>2cXq?*y z8KBov?t~D@KT?S2ic&S_(Rs<+E$T_o<HyFJq>Hj^rpT#io}phMX!-_Z!ro5+8t<){ zMZ&^4s%AMRril^b+#X5W_#pgSBW58L0ut>(RGEt=nDbQ@T$ExP7>t!;8|tGlp=W)h z+I~2&#J^4Ti+G&%e>GQV-WC$c9Wk0OhlaTcvzvprb<K$fu806{Z6eI*F~87-+t$p4 zd=oI($xQ`Ts+if&AEiLvkQrs@f5D1u1#hIdZi=k_uug<Hz!2N_1i*0WTK{)J)Suf| zTKZ3V-QV&I{UcsX&e=-;UpJ>XRNQ~bCB)3a@=q}o<3Hya`b{JJgM|2dCuwP6$HQak zgroYojr}9X5Dgsz3(oHr{waLo5s{MORv}aT%>42@GP3<IQVf0K6hHg@GhSl)%~AW^ zn}D&I{wJRAld||P@zQ6`o4>?}s-HP~nE$?)r~3>5{vJI3xiS9Cmh{(sx0#*3%^$zW zPM=TzGdYd^Z?dATosGV>Ik?k`dX%_HA2p=^Bd34DZ2uNPs)bLs80t_C7yUT^hAMPG zusMQZ^y;F*EQuYi@7>0a3__~jTUa-cXNl>sr0!=eu*Ifm?j)-R?mA2*%bV$IQ>1i1 z@~fsw)I{7SP<N^?dxFu6Vr*#{W@%U16B|Q#&(yugr?fYEYceg{KDfO2abH6#%3P<9 z<*7hprf;=EFK#_3=Q4L~B|$MC&m{@qDJq1~2`~*1+-HkET;#v{zW>1N+JiP+B&A$D z5`6eYide4MCP<4eE!^w4g~H$k<HRK&QY@pk8Hs4+@o$#SAAig5K%4e6-23awVfcSa zAoM?H{C}hm`b(OqzlB17OyEC<LZ5RkrfqA-You-S8MNV0$yv%<82>Rxf0<R8e@q%g z^Y4-d(fn)DAZi+x{~~Eny$hHVg8ai}b7M(M=_BFsU5cGO>-urQ?D{fqR;*l>uXhy? zXv&vpKUn=}Oldw?>m@u+lr?JrLp9hd$sgpiP2I#H5zIFj)@xS%`^Bh+7+PuGcAc{G z0Evf>?T_u3?JdQv8LjUV&4+2Km^fdu{298umKZv1rK^81W1C8Rfwf({(eDAYx1Ue? zh2mweUVF6iY&QzCG@;&C^K0c)L2Y)`;vM0q`WLO|@w1BwWlz_4m{EYWR;#d)Wu@$K zS<5ckhd`g@+r)L|bC3h$FW&t&qxKN3tYmY&m`WD?Hq&nt=*ZXbahE#JOiD;db`zHt zYaJi(W1OV-nsrvkv@)WXJvU@ZSgl6Fk2xcXgXh3kdVs_cT9^RFTr&|~#9N-PQJQfi zy*6B>QpC>jDkDa;zB{?EsNPtR0%BrMDYy_HmKkPGzn-i{y==szLMy|Y6;-rf=-ghf z-&q;Hu2xgrtG$>L!34tyQHjs@cfn990Y7HTmHEJAe=URcniQf)fawx`0v+g)V+PUH zf@%KhqQx%P>m~yTyeD4d2jYa1F^EN`g*y}Bv+48Al_T8);FMGwhG3h$2nX3D3sG<x z^06?oTjB|PA|G4W6H31dnnWU>%b!?e-ym!23w8txGEy|Pdcww+dge{{D|$B}o0)no zZ<`=)j0*o{YHXw~%c^r0W?)Z3$~AG65bF$SE5tQu$DZ~muf*WgG6I<DlDo4zwI(=4 zYUI~X8Bv-a)i!n?UqsZdvEs{5)^J-_ZQ`(~Rcyb&t?y6ib@0p)+mLs$!+WFyqj81y z$1``pc}zGac=X+NXEfLr1L<qusJcy2ID3=<^J2tsLx<32cq3-Niy@Wn4HPc!kUICZ zg7gBZ{gP2TR%k+}Sp$~TcKZI|GgKOsxG>9bN3N^G=YZG9?k?)0`SN^+KJ*>hzY5LJ zoTq}^7hN0Qtmodsm-p&xn{V3-8zd|t4IEj;_7@iMj_jY}^g+4r!OBB-)UQff*m{z- z^=kohyTx?Jrn8QPnxenFS_`xI&H$zSwLg8Q-{7@r>$=JNTr$Y9AA32E{Ck7eNt|FL zdcWieVO`XQgyF~zi|;+|p|Eg&@RKmeFioK5p&vm*3Lb;GgjiEIi+Pb~R~TGqJ5aYS zu{A5N>0D4&C|-x#=$4Q%kbS22w>??5lYQB^+qjcf3;(8we_i)oVAtXY4}{GXT9fcV zi(m|)+N1|_&^Bzv391tk!?N%b&_vFdIv<7{Ei@xvPLf<KN^!q^1c5be3Vk$K9`Lg* zqA_8*9;GtBLhNSLHyU<6tZsLVE~_MVUJd*o!<sZ{xSKjp$ooYxD@BPf#X_mnYW~@= z4%5qa(CLzwU02NOf-qAAfMOJMA;*1DS`oB`Nu9LVG`|cm&uAS8Uj-k4Ua?#Q-ZMBR zQJZxvz#a2@3W^lS71>LcD$@$xM4qwka0h<*yS^OH0+Sta+;LT<Xm}q&*5kdyoAXJO z;k^#$2F>XbH}0cwcH!s{cV`V$A<<yqA-%W*Z@5KnO^n*JU^=8S492AEGonJdq$}hq zT#En?(#HLi_H~pCjJk3pQ0i=~rXi1SPgk0Ljtz<M^ODc?kcfq4)uY`$jKh=BxP=g* z;U&>1AP~R>skVxq+Cqe@9Bk)_5ldeRd1l+$V8orKIvgB+F1OJLbdLPS1`5@%Y>ms! z>KD*gv|sCr#=672ru`j2HZW9*8Wt>^-s-D>Lq&>tFJF$45OdpgfZsZL;wp+J?_n9= zZQnvb)9kJ>1Tx{!5as0<kdq(*WOnz33>Sd#kNTap?Hrbwmu-i0^2@T-SiG^bD4X<I zRd_K&RUZc-+aw;aQl^Zmjl1Ua!JT}y-h)hfq9RK<L&r&Czk`PXv1(pYdy4^?Sv`Qj z{L>W`cs+GhgYP#hbgf4Zj6w{Uw*G!2Y&~qPh8Ev+pkq~DF>-RdS5YlRf7+U6s@IMB zTTN@Po@`?Q?OQc*s!`?lax}~*-;J))-dkeL57`ZJ4%8XX=&J?M$2!te(&5?-$2wV; z^X3=165rWD@zc7kfL_>0@G#Wm4l+Z!5;A8_lij5O>cutwTFqRvgI*wZ8?eRECv?zA z05!uN`;lE$L;_ZFu%N;7p{g2_w@8Sa;fg{5Nw;fa1}(Hh{Uf5i8kW2#1Q%-ziUdwz zyF^gf4c^5sW4hMGPrAoUkNGrK*6irQICRH>sBxmZDSti|b~P=rcHP{(*D5WF9-1t2 zb~XTi3=>1Ug8eHrwHF^hzc^@gXS2PIsgC$sGVHE7DA>s4^ua=(G+%W&RB=zDV#sj` zOG7t<pw;*`Zc?y;KysMw7OXi3M{>+!Ec43j?pgE%cR?D#)hM`i6&~2l7h+Q3ngV4D zmEC*heDKP_({BCxe(Mq)gi6t~_%*z_)~@x?j<3F~PI~|%0=6Lp%`$cXr5NL0DY}Am zkoH9gBhx@<Z71lHMZyhIMsLeYP2NjRyU$C^yFVdMbO32~mBU0rM#ZpXaXYXqoNszO zn+kqmh|ykZ-VNI(&%TRMEqW_Zz%&FuyACt0Ox7kRZkaz}B*7JJmDrCdX;jli&R&j9 zPlq26>_0-Bghvp_KY(8x1=qe0+`daKYA$NZu-o`EBiPh?q*AR;zN&fTM&6&6oq*pd z8Usro=tR(IN$4&rei`l&+Lx1q9q^L|#b!Zsvmgvz&2H~9Bc;mQa1+oTZa9cbCd@C+ z%Xs7I99EK77Eq+Pn!DIgKO9De&s?8TcXdD6O|)2MHrHPXEGXE^>?C}dTXb~)wLE~p z^oC6ZjUkx_eZj?p2IlVa(?)QbN!M*{J_-<m|MlkVet9laJz2aSknTD1HraDh&f{$S zLn)B};-$vG;K#7Zblxm&+&%?H<=je{!Z-bxyr(RM(ppvX$r$u*UM&1`>LCcdo~M9| zC*+}E*4lcPwdRk|JvCxE8OWw;j8GD`L6d8_xfpQS=FQYfDTSwzBQaXz>8@ZzT{D2e zy{|P=qxIG$5W0g<+Um|BBgJFA{rSxTi*z$9rdH>b*0o(=!q#PzVimr?dJ;%<p&Po) zmdu&sHX!^B{ad33yl#vm@YT!nXgSh!kjyBl>w1mGbAGaVWCzDAq7S*y-`OQ7YNzkY z-cd(#TS14`)fDggP<Nkehtvhf!BPSBiz_z$RB?%H>zh?ZP&FAkO7_Ac67@D(mGCcR zPohRi>lJ!YlfZLk@JyBQ1#qetQRoIJs~xNnB}wzj$q+|+RAdxF%JTbURh0Gh*%|Zz zq|elvj78`XNZ&1HV}HHR&bi#E3_igP_Jn3C4Ug+B#5!_Nmzo5hguWLxaHX&&tDqXs z7JLXEq)aD#e5*}M4dpyv*D<SePl+LgumSfOG0~5;sj#8!<Y|uaocXZv9yqC9F6%;w zcKc-R<SVPVP!b=UvJ1-d<%byP6AsHvZ|&uv`?Bt~v0H3Na9wi=0*tpr$nkuazfVBe z1^XGb!sSV33ksWJ<Ql$vM+!8?Yz0<s0!9{iJ|CkK7x^%fRx>jaa>J8+-gK||2`j-6 zp6ZUhtR$`^cYW1bKM0pUFSKXGUr&BBfAvWa@@Ko+NO;XOB2Js5G4nW)J6Vla$J+Fy zaJJZP3r+>Khio<6e<OqsEg-vOt25<3>=UxDGMT`iEab%C=#wVb?%Oi{c0p4C!Qkbk zgAVUV2Qpw}cviNo#mKrYf!pV|l$DcGY*30w<0zJtlcBRdW@f;2b1L6>Y%#lO&@|q( z+^)PdVP<$aw_e;iYgK67>e?y=yV!p(H+&0Tb+89(QoC*12){7Qo=wdP#ahUj^{@p5 zf;%W-4ykm%C(u(0q>)Wbi=Qo04xLh6*fX)0rMkC$*O!s2P{rtd^)s}r;WOcriF}S= zKb;Tv<kf}?qP%ZL8;~FUjsr@G&)bo5pu1KVQF@0Gq-LNU);FDk6Yc~hX`fQ2BYa$o z_-jHja~mRp+me<QqI~H#J3c~Rp}e%XWvx90%usXcfw%z_BA2;x;t@Vx(2QMDnShO_ ztz^na!C*m2f_(jYSOe8xi^UaS@T@}_Uxef5>7n0DO`IE?N4gZpFK{CSWQ6YP+_w6< z9y#Y3GHKtDEtC}UAD*2|m#7_vL@OIuMr!N9v1uWm4}i5Yc~`UgTX{&*5K<S}7Wb#K z#qjCIS!ga{os-+dH^AH_I>3wZ+op4EUwVLvB4^<q4v;|P1t>CAdy>I>^Ti2mxps!o zF}9gA2{DGNrOflPx(2_+=84g-sv3t9mwk@_4U^I+15XJP9=S_M%&rnO*e`$Z5GLyj zd$+F+>!ZSA)-9=+$oc`%mUkpY2+AA6u2BjlG?aNsRRdB8do@F%7evAC4;$$K4oUj1 zdhP8nqaj6t8GxGyJFknHC>`OCm?5LUiRjEH(uP&kV&_I)K$_xm8kj5HiqGh|<)11T z*^#`E@gw8ngwV0=X8Yb1#9$i#y8cnjLuSm?dQ4pMTwPqIuIF5KfkMB5yh8B_`(xKK zm8aqjQ}u(BL8Q}m5bMeJXq-eio;JpuT+}n@(U-EX?aM<*grmBq5?3vac>@b0+Lmou znryYoUOHAjt?)^|8~K#r8;b~U#C({N3#x5jF)6VX8JnY0q|#;;?E-=P*(18jn()Z| zi+jznSbZk<6T!ImPleGb@7#mWdyQNqEw^Cv;Ad8NrNLY&nWo{$z4IGUVm7$lGYabM zn0yv;o%|kc1Y8IXNm5A-d8Ou8FfjpQvT~mGlAl+8VaIoSbnBX@wxjdeWkyzgC$gU6 z?t*LBXG&ZaEnFMzkMncc$PZkOX>%ma72kP0O59{edyCtP8)ZF3F|p8FMm+W3eLCBO zz=e+LWqm(JNaKV#cI-NpTt1oE*TS5~%tvoblkztPk=@F$#6vgC<p7l|p(Xf5_9Wp> z?Z9n0`$3^iut~li>6}UW34LM#0&ul*uY|aCB3qCla$OmdMK?C<%{&!966LEn10x^l zo<m4;YIOVdXGo=3=M!<kpimLYF~ERgtEVob_YMTN;mXQLQb`G|ro0lvUS9z{*&4|1 z#d(EK4*0LXiyC<;{SqpuR9Sc1=gow{=#)sRhQi$0X8R-?sy|uyH@KkI>PHW~7IG!D zoP5kR(&=1i$DAo$r(p?-eTDVo3chWDNJOq6)4QuQN&{h#-YlDJpWLQj3g$=~b?QO| zMX191fqiALX=v{gzSOhayzDY?K`FKD=5JEal{sl}?Ck5Q=|P|sbhWI^-|Xz{jT7mE z0!izzN-H1xnUN>;eJ(Ta#22@gZHN{wDI3>UE5nk@^}$RGsezRw`s5}2{oS`gEr#Ry zi)hLBu)`JMUya#PM403v1EnXlCdwP2*CRqAM6F{4$oaBl@hC9Wlvpe}U}U7Dn6)r@ z9nh*nSF32YA))2rrm|GwY%VwZVDsn589B4=v|hJ@Hy?AZ=&81V#U<BFd2dW%qSg#7 zvk&=Lf}r}T7Q&wVle*miWc+g=9E>MFyKPQ-pGhNSksoNuFQt<0G3~y;2YCo8vyPN3 zN>l2{7dyq38WKZLT4nV+u;^D35j79R=uvPjSfH~zClZV$rTg|o^-f03h5M%h;QCmE z67bh}CxkhI0795J#pXp%NvN8%LE*arJSOSeok1zu*_)pxJ<8cl7L_Jpit_nyR<`L} zcwJAb*Q${1OJ-_`d-k9Ayl36}zxI8<ToEdDZ+NdeFnwQ4m{bQ3*8@6tNLAO}Jtv1z z7LcPiU>FB1Ol>S?$08aukY}SW@KTV~t4{%B5grLqsa(gjLZ~kBf{kjv92dOsd^I=Q zQbq8x)X%(I6@^R$+Ro8K`=&!<eEYP(XD)(|Oj~2Ll!uRR+e*!2RSNWeHLCgm7i+c@ zHQGqu(qnJ02ck=IPfnDn#<k&U?HpW1y<gz@aJ=F%Xm%yHRQxL^lk;9LS@8VPw2;$g zTheT?U(zw&xsEfq6$lm346at;ZqWp>l3JoA@+{Zx+cNhtB(1LuleMe~ny=f$;j|=_ zYpuqxwTx*37=quSI4i$`-<Nq~XEtz8Y;!!}o=_J;Z2DGlIx#k}SFtj%(wD4Z7nF2B zRfI?$wu{#@<z=?S3vpjOoa0Rxotm>)2e+`8CBxw!EIcmjWx~mEbge~W5pbjj3e;K? ztw|N@;fweSnF^Boqg-|5j+Shd{o7)}GF|NhUAWG8r%9IxRS?FyRmULS3FBZM(T3BY z#2=T+Dc$@HE{L3j8_<YIP=FaK;WNwnGkg8_!Xo_k*_Be?$2m=Kk=kq@kzS%pn?C3g zEi~*=AN0op-6o^<mk!|S1h`WVe^BBJSfme1+CWKg>6KNKAt5&!ZU>B9amoZgk_^8B z38&<<f$6Qyg(b^VvgGAO+#EA)IMd$0e}mXW3ivKB`FsFXdfB9d|H|9JD@P3CfDoAC z;q+WSms-wWW~emmKXH}nxXEEPKZGzo<OZfQHAxXj4e9&X|NID?0ojJ+RDV>PQ_ByY zgx$<y<GSZPlrgn2G@EURR~ab5r-Ajtee^z5ob5tH&nVR1cEHh~d_H5wun)Fypr*LN zC`rP)RTe)^MIP=_4yni)#t<l;*G683p`faCG`&|VZJTXGXym#rRYv-81xrgHENp4k z1f<=Whq&Kbb4?qv5cUn9j2mpYn*h*=-#)7-r)UIb9w3aAci)4Sp84#D7i9u0|KOs_ zu}q+#P)(Z%JENEYiwXh=yHrg5a3SLU0j=%+7Sxo_IlT=070YOy!DHUloLjCWH&{N8 zFOgtw(>w0|)YehNx+m&W?@Yw1VA+P-qq<n7)^+qs3aCCiQaSJF@p0M-VlP#Pw5CK+ zAQFsXd6~6e@J+F!@O;1etnXfN{VnIqJ3jJsBxGKvPKAL3FH%N_nv<egX7A)YCB3Gh za<N+XW}do!OMe@wYy5uBM;kYje`mlYHKuN4>ZVSol)K56sZ;!liLRVUi3}Zh+X}T` zvyXI>ntHSHh<V5+Nl}hQ;c$KaQIOg$y3wUn&|_=zV>sO1McbI5uY>LVvd8ef@g#Hf zXzqD~x2Q~*r?18&Y&$%2=;Zb!qv|b=AZ?^`pT+K77`BZV<rsXJk}uDJkK16}hlia% zc?LV)cEn>Cci(r{d_kM=lB2OkkcHw*O*TW*aHgl>%S`uoOTmW$AP#4>U2<hK1BcG0 zU!{#mcBYojflbhzBH#j!iV`XEGpuO(EM8yB2XD)|Z`UYa+EsYd7xv-7+kCX(!^Ra7 z%~M6K(4opk2RFsL^Y0LgAf!S0>$93u$?PBs!vlH=pgxN9(!awf!_SAuQZ1oI(`EFA z$04NB4azXmhM9RleB&2otxn`5I154aH!<PiuV%OnD1q!p6E!x{^bIYPPbc}NsH}?L zs6ky!R`H|o^qgDaazXLXB>wH^ag25wnXc}1q--2rs}}p4pq>26D6k1!#?&lWHyDix zh6TnU*5sVLzhH~cWGqA!3|_RHW0)H3;6ZI~7lQ^dG0Fir=tc?aqE$DWOOk2Y);zdc z?Lhvz0Se2CGGT-T28n`v07Tj{MyWYUrarNGQGLQf%esQE&0EvbmDbO7a__urBtCrA zo*%t+G-(8iK`qic`to%ip-wV+;dl#g+6bxg%&a5TZ~^cHkdr8hnd%!Bv1=)!$z%JH z9UtMbZt2{YM7Pg6S89fwjLCRK=ikh+a#XHkXG6CR;Yhl9DN2P-Cp*^>3@$D@%Ua}i zQ7=Ie0w|E&b+xevrz^^JE6cx=>y{YUamXB3q{fvrmyGPRR%qPGD@aPpj}cEkjFl>5 zRTzoNv&AVlD@t34`R%tcs;iZ6QrBz8b#@eIJv7j;$)+pDT|*;lvLv6KhKYeMm&#CA z`&%>DUEB9HxG#-G#{(7)LgPapPbg)4_&Ebiwt$pJVm1WxQ<ZWmPP>LZ9-h%(46_s- z*x;nrs}vTghOJR=C>Ruq*rHZetL`{J1qwb!&B8K$^5R(AB?hy0=oKGfHdOa?Re%)I zB}E}Us%Vk8Xo{&5^ZU}U%sgUxb@b#?-=oUAjs`8=YX;05aoV(o3!D^-zYvo0)S5XL zHI}$-J&pRcKaEV!InOYfE#Rvnl`nOS3tkc`7l&SANVW@N6b|+7&vKrKra#@nOGeKm zy_tuV<(noi=DlNqM5LY19z7ATtcuo8H-bp~L{KzK;40Ibm``kdEK{mBeT33G-$+$B z+i2e@XixMg>99SLO+gJ<V$s>zzo{<aU8O7>xeE-jsWbReGRkGHTjCXW*Lp8n95JV{ zPxosyas%mt*@?h)=xfsTJLD#s+iDeOWGh_TQ-U!4nPHG#g_vc38-#J(Qv$W)PcH@D zpVHy<U-s|pk5zHxbeMmR>HVP4zZ;`zh<}ee-R@9aD|%9K#?&0?EDk9KJV`%6-a}N@ zq8aJuXtDtpfFxvoQVhXwBF4MyVQ;bkKF}JQ8?e5X&Q4&tZA}dLL6NrE{Kd$C?%3k! zV;;MQ;)bb!d}u%odIQ{jE0E`Tu@A1u2c+pMnl?FoFTqljUr&^gRQog!Bt_nZUeuWp zu1-5N1EjYO3)0>a@u1g=0KUArd`vm`&EpH0oXV|ZT=x@k{0Ba+HyW;YGk)A4!&&bd z=a}?QLpV(mM+nMaNTm7fX5DW{W0lz$1?<GyxC`WcDgH{eu%=2sVGvTR15j^*;HwD6 zS-B|@b&H`z`)x=<YWb@X;zcb+9<%XZ=%3TV@MEFT)~LuAS~Cg7^)SYOm`ILQxw_xk zoiQ%Iu-5QLEQYvnKW1mSN=})7w`&A#IUGI*jg3(IF~~Q^bh=@@5N@_G&u|*RaGHVJ z`&}MWyYI~FqS=cQb&BKr)H=o#1<X+oz}Y&4Zq+jwONS`Jz|jYTz~czYy1TC3>A;Z% zT5hXB*aV#b(`-JO)o~i7dQ2Io-jczR$rnTe7_)iv)e!r`heW>hd*Pc&U;PUy>3lX} z#`}5%Z5vh1Q7p%H|21&#gsq5$pHU*m<FJXdh<Qp9mIVDauTbvOI1Z{Sim%i}p|2*B zI2gA$cuW)LWNg0myss)14vVEi%UenLu!s5dYl^Nv7-5ddT-Mbs@%&+NIm;-$Xq%TI z^l6<xv`UUlukl$NzaXbT6M-Gu7Bc31W!vk3P2c0h7OqjUrHt95c_v88bA4GW%uNbX zczm5MUB6t2u*UBFu3^Tq%L&&$@opErwmtk*J9AVOiV%CAGw`l-C-Im&Eq<{%KQhiX zx_5nH939*0tK>Y?4i>14CLBZdx+Xf3hf06p%)GOfL*$68V2KQMAtW8|YIBGB*mnf? z=r<;oyK6Ip;;ynW!NsrcUeU^#a#~UBzRtlO!FFt`D7yZ-wx2vKihuJUVlvpm+r;q@ z<tii^%!XY^ptHoWud)xhkDwo4!=qewN9I1lu3tf$%5*EBF=^_|FnJ4}u`g}$e5UuJ zOG8lXZRRg6f>4G~YqKGgpJZT%NyyGW9pmv$Eb24E+T!xbSbf2X^tEO%E6DCtB}a3x zYYI^#k;7pFp-8?MYhyWyH17M6OOl%~r)3Z}k*1I^dIHDZXn?Ry`;UzU15=SOa|`hp zDzpQFYHbNao;bA5t`NVaU?A~evVCiuAo=UwDLg`E=yJa-dgu_de6a=+UZj5MC~`_d zxMkF!5j|-a0U}p*ywi0Qv!F*1Us^C^?{L~-0ZP1RoUP<|WZgw4OL|h-V)#5I4aaI$ zT$ngA7&(Oif_i;8VM}9FxVwb-k@0El)KJ>-*w5GAfiV}OAniCC-8fq{%iXV|W2|dW zfkw|2z(!8uckqpE^P_=AeCyYn<%YhLWS=S5Q$Sy?$X7N4hw+4&iHknKjSHt81xvM~ z^oOlVdM;uH$=d<f?e2htg^h3h7fze>)+ukh&mZ@W<Q7lMYN?cy^-mF+sAtf|RsAz+ zr|2H}&D<s$RO9nFzhCtlYE+Yi=jGZHd*4Mp_mv&q?-i-kT^QaZ6+z0z{@ugrKgHop zzeT`*>KbVNYi;;%neRVn!x<SrWe$JWhX20!7hS_|w$VQb8z^Y0>2WCNX<2Y+m_A`m zG_*|rLQoP>5fbMUAd@pT*SDqkgK6`>OT+&r2d82Bjl=m<-ayarS24w3<qdy4%l_x5 z+y5X3{~YS49Q*(IV4$W3|2%O1JpLcs`iqqNx3v3nEPqI>|7A?S<<|e9ZSeoC?tiHF zN4vkr|5@&j-$GAM|4)7WAqD@(@%)x~e}4aKoqsC(yN&;*zyDC?|JOF?KG($G<m;bk zsJ|;-{x0?T5Bl~0xs3mE8~meu{g0dNQ={=G@`>g@$k!P^-57rYo_>2F{&(aP&A*ec z)BKBkotd8QzYuS<E4#YM-N$nsb{xbHjuFH<6T~JDi_z|3-k=3Q4vP=@`9;CPbpgeu ze989q0qOrDL;e*Fv}j4ec?fBJCO)SjR!iIKq$aLHB@a8ZKc9HTCx^Me<$EYIo$E!q z*OY$i$9AEut;gYG%c0}MW16eSV<H6T7wakzCD=oX<KsY_4IYk4Sj=Rms<97s-3Cyv zRDh*>h?maRy+fFA2Y@S;FZ)-H$dzhVi=&p6z(HIB^ec<2bX<0Nzudg$aMm?91Tlk8 zpKa$p;#Mj;2uBshT$H7uq7uZ;lS40)7Gt%C-x%s&i_#nO4EZ~6pyzexK4|LC8z0G! zIj$nMALl$GRxUo+E>r@34M(-r&CX>kcp6%EcFxNJXkiMzU>0%$4Py=C0pz)!QJa<> z{(7NdI@h-}vbGu4G=*3UC)xV-izHYs3k2$z@O@{W766VLf^|xkBbYdu>nrOA@!qX! z_sF~#B92SQQ6yYs9Y57gr4K7wM8J{^Ej@1zogy`=hcGLKm#gsTqBd1Y7Z@&iz%L%M zgRWIrZ?2xquCJ+EV5IL>SX9xJ*xSr0ve2g6KdQ)9VBV$p7oGRIzrb(h9+e!4PLE0f z@>)S6CiyKKaTEJ-w>Wf3gy|j#1a<?>YO(N7s5qfY@C)W($G+Jx2eOjyk>*^&er8N` z(#AjY2R!=u<6x_N+IG$o50g#XvW|rH=c9U_vi}rhsZssobbxYFDBG1fBZ4}-dytuE zZnZLXWCfuWwwZ7GRT;Y}W#Rz3kOu|*%n~m`?cp{ZxR+U3H{zJ;+2C2B8gL`NOW0i! z|EYa1r5+d}@<h+`Qsm^0?Gt)ipHmXn!e~eTy{%-7MXeSfJJ@7z=oLsCATkb+CJzeB zOfg*HF6kR`%cZ0k*ld590V7>uQs|k}gStew`Of7bj9~MT>NJ(`q1})-c+yw8EKHYU zl`oCG>sNqEk>E?<y;tZn50Gix3?|jEgS!Z6!HqkQR)p+6t=F&_1GP^89(*f=G>iko z2(*B88!86)x8!s^7<xQq5$XdK29Z^lHHplCi~X3{r;wSGRPZx?0{vk_k@$u=&(jps zn0tu`EoxCv&r-n{*-bzTu|JsL4A``p*r7VTzM}+da8qIUE(%pXZsWOeV~OUDdiOH} zs9J#-H=!hM!C0XiHz7a9CIntnr}f1;`B#7@p(4gku8rJLlO3-BtR^w(x$P)@V~ZF! zL|<+=(p)xN)_$O)j7{yO*jKRTU1PLB&K!{3vD`kqdVkA(^Y~!?_3Z<Xi&V*<Tf6Zp z?J9t$m{{ISmXF%ku3DFfW0^{7j$`0Kb`?0*n#0+iv5jXM-Dk50u?bRei=^%|Eks)~ zqC1?=G}PYs;kM%0WB0d6YA&4ufzl<8GVst`&8RZGn?$!@mgU_w<z!03fL;sH#)5p6 zF(Ta%To&QVSt6poLE|~ne4Z_q1n!|*w$qk#@3M<8iTh=hlZh)6SSLr#6}fC$g%vI6 zhd^iRH}tRJbo?pJTbBkQ)yX^c$wl?AU5S~7k8J1O@bwa|u!mztg)hGK6VR?W1dFMA zFG$T;lJ<eV7FDm}?)48}99G}GTf94PJsl~6*@T`k=boLdV1ELI6EJ^?byavw4ict& z0|>2Mz@}z1-KSxk-K681qRm~`Og*u(+dBldx?w+D`@(GcHMm)~QS@=y*u<2rJUz2f zY6hFJf823m69GZnqEYVj*bJtM0~4S?RfRVZoto7YaKLpUxx+%E{FzCU2}O(c)Ws$6 z1T;nbzS^L1?qpf4e1MTHSpVYE2xjXYlZH@lU030b^<75t5zqDKewp<H6vYYASa!a9 zAD+tF#KlU)m%4_?B>vj9p-DEQmc}0LER3<riOK{g|6V)GFwZ8+g<mh<!`UyFT>H>f zE4O`SL?%FF=(I8o6Zcq!7r|tv*#H6##Iua=PJh<e;2zVlbV(6cCj)TSX8))vFc%BY z=vblIN1Bf)8bcZmxJZFc+2qJe7aE;%E1%`OV~H|M&z7_}J4QYIPFtVpD;NLfN#SSo zX5#)6L_lR??<{tWX$#wwP5k4OwMaa@{+WAd*c8&IB;Ot7Iy(YK;1Y(X{2s@g^2~fQ zds~fU3u$4-W^QOSHv;3^ywN;sjO-8o0QVuzIcm=#4a3Xas2y5;3BYz<On)FHRkrT0 zbgP&E)%f=+^^Li@c*7)7dsa1R+0ZdnMq)j9%3r}iXY+d61Zqf2^W6npwZzR$ewLl% zOp)+J*42HBh{|MTHBy*n5D%`@FhZMkW<OSo<Bi`*ierr7#`{d9s6$bRZ>48MY%k<8 zqjQeav*{S5i^&}Y4C}?MHj4*SZp|vAAA~9Kt#}?+TZIR}jls)D>Lv0!Wh>UU%}j%s zz|t)RF8g*W&e^*FsAL;U4`SXeADH$g7tFRW6OP(*<oAjeEbZvJ@|f=x9|^D(&qSb- z9|E9@8yMPL$7*D_YLAn=P9spDi@$XJ&?^L;NR3x<@YkL`+dNOjS-T5|wdi2!nH4@X z0NYOR#Ok4|;AM6QSmqv#6}EHGI$p1pvAgn#-;iYXj|XO$R8~%?E&}pL>@Jf#w<mu{ zwy{kWbQjmwtbK^76h?pGj;A_{HFA><p2)U^Ki~2|dpiO0yc-?CS#;S)o?%5<=LrRV z4y}_2RNCXGs>J}Yf^DRA-THEiXA9T)6N*+WF(V%yfwywCi?$4ng5C7siMI&|BZ=dU zgu>E-v$S0=N59lfylB?7-50pIw7wHl(@$NAN6>#a`^u8?3b}Kx|BOx2KbmgjAwF!2 znK1}tx+_w)NM0b<f6zmMwtUli)4t%@)s)Mtq+lw4>M4gHP@-c(S9^~km|1rZCR#X} zD~gX*{Mr?iO@K2y9jSA1Fe??iFayU)?w%*p#oq<6k$b6|bHp6}p@wVs0hAlEM6)Mx z&a&3B6siao2qcaa5z$2MN~}gDy|~bTcGB>~SS))i8=p9LVM;VNOQyC>23}IcbS|yy z8;vtxKW*Pb>9}>f<EHTlrqMz~N=lCg)%$Uq!7`*Qt{kenxI8B|uDp=Nj(!swiB>ay zF~Eh=pq96BnQuPygHoVhF;MBkp)9O$7Y{Nk{T<I`7P8GZT7PC(H#9a>d%eAet5*m- zD(b+a_jV*hH)RrB0{oiTDrJ5KM3iiCa*YZ>|Cw^D!TM9=V@2!KWn2rP&hYgC^I5LZ z0Xh~Cc?ob`DR8Xv{;*__+F=a7S5CoGvt=@IZSckdSUGwBx@;PMy9gagXJzAr+O;Gd zv1#-=aH3S!Ykn~{hSsa@#mvt8>x%*~KuJD<o=3XUrd&<=42-AaSI_XgM6)Ii@KL-% zjbJ|^E)szSS+9%3$tXb<VmNz@-mDmSP-AG?=`o1Fs_33dGpGF|S`Abco`#UAXe9gH zkD<$ICpOPH()Vv!OfE6Ml62Goi|s(lk=9A9NnC7emTTsFZs&~!6lBwjL_YBv!o>-8 z`9Fru#JP5xs0~bnXyp%~qMxMVBg_0L6FbV{tPQ9G^xT6*ahcC9$kP{OFRIXG?S?W~ zoNv0kWecIH1t-}NS&&#B=3Fm1{5kJ9?1|Kz(cme6LBZo^s&`;KpJgy*I3GZ{!D4fK zjL{9^Z|d$6sFUSf+Q2uh;&@N=!f$I^y8z27bigs;Jvy)&ygz<p!DfZuab}JFp=@(( z)~k=Ghqxz%srG4jU?8;25s%JY*h4fWB4Xf{?e42d8zB#!oQX@RG$W-`eDVl4u=>gI z@pWJfJsyg^^frUpQKBI_W)Q4W!ZOYWnvhcZA}JA``a7_{mXHJq3FTwcs4nHC6e#$& zdCY#ho*6>)*=ZvygsfBpR=S!I5sv)~f`fHA$!O+5bvj+0!`-vmSvn&7@#Aq}MM0ht z5>XoC%>r@A)C8nw^|q99qZ74<-Cs^_3zILX?GsrcvuNw(Q1hYK9Yqp#PDF_ah6)^O z&H5v8n*uu`g%T*pnY&3zBnOG_Z`X;3VN6@8)$DYHE+J%u80B_8o$7a%lFP9@kOkVl z+3K5xn%Nq~FlZWLF2h;$G+zzf({1xr5lFyJn9seL6>EspRWpoyZ|rjCbrciO#~EL< zayy;_oIoQ-zJH%_Zr1>ZNESt*AdIS6qa_H?%MFX+jneTWndK3tG2~MywL`eKAQ2TM zbGG^rLu8$&!wyo5kWxGP<?_{8mLQ&m-PmosclJVkx=Q={tbP=F*KgEl0IWOGs?kgp zv0sSF6R1va-Hztc4MU&OH-5TC%gz|lXXQl36tawPm}p6f$*@{^WOw|y!92p{mpzf6 z;G~Q}G$vNbjI_^;YyMIILT;SKDBM&sK+K5CO_g5cne9OaCjaN7o}@b4<xas0oD6m= zOH2LJlh(w_JyuI&ojA=9Ro9j3GdlX5r}^|rj<=b!nL{Sacf@{s?K^s==r>%cJN>R% z9o}>sYG*Qw!nI&hjj5XifewjPEzd8Pk{ETx%LofeaWQ&jiV~v924+=u1$JnMN)o%+ z&5>o;E1gxQg*xdDfSnSf(j)G2<jEOUsy&!sW;{G4TO(G)VJ~fR;^3otRr>?-sK@Kp zraMW@p`q1;>?sxwEe-0o22bSL)BGRo=%iuY);?{;k4O&wVBz`dO?42&4G>ex8F>2) z{`mYhB|i@2eS`}A!TS^w3AVKROH`TCQt*h3ldM9fWTzJ{gqd<==?rfbD+MBQ&edpJ z&t5|lHr%zl&pAxdu-DXM)zehw=sb}Jv;hiqyxXXJy)oK?J;Q76+~@u0bZ{ftEqssT zAT057zNNc_(aZ9FTmo_E+j-acqm9j(0zu51*(tKGr}F4ppw)82y+s+Mb^{rR;lSAS z`cxl!-jSb6bTA1HGA@%E_m+d(hX>>t3DKZ|M`2hJ(=d~ers)&T)T;F$9O@@09AI%` zu(X;Q^6LkAhU-t(nkBDaxIUK(F0pA;yy}mu&Di0i)+KULUfv_>4A3V9q)!E7K*u8T zm<@jG4hLVk7Q~?V=-j+6HJVJ+fg{1uUqNT#o&Evu2#FZ|2@Q^-Ughh@dQK;i71Q}L z-$t+$epg^4PHwKO%cH9bbP@c9T0RrtTLtoSX?Wb*jag+K2x|rdOg7+37bn&Tm!Joa z$AR$LHOP{Hlh2Lq`n=Cm!Y=BF_eS{ZxZ_X8V;y|#CZPghN;)t=bm97#!MMHL9vVw9 zDQHr$JT|*2pkX=DZ*1c>@Jze_2syEP^<JK1n%lZ6H{(b0rf!_W`gIVeVH-*zAS@D4 zWxR^42?2IYa+n|@bA)k4;c$o88)p<#CCNj_du70Y&jUlR!xjFYx^XZ)US;jomwEB{ z2Qc&V&}J6+lQ#1V{%zE;j}hzHg9JvuL^^cWn*zznOVCH#oeE{jY1x^X^i0VIn6FkG zcV&f9{^2^DksxDyFb$?J^j}ZveMb0UWWE+M-s}(X4d^ToHo+f5?fO@`+n=3^CvK4O z%2%&}lQ7lyk3*-2r7<2rvdd&3v4jd|T%Y?wG*4{9Z$lcKp7|t~mbmuf=k1J*p(%Xe zN%6Y&N>n7a=@VSi2k(ZFlkPJ1nNQUWILYq#I3+kH#~`99`V8pF(C!O_@+()+rBZNX zEIIB9{B-`c5(=;Z=16JZ&&bE3+R~*RSR+$Rk7K*-{Y{|~3qx%jfsZrTO=$5)l9=&& z*zh)!2G(-BtCQ2?MKo}9q4h#x=2qHJr+ZJ3-~NdZdZVunT}F;hNs_skOvx;H*l0x+ zsEK0CKw4H9Q-7i<1*#9lyn#*Xctn_aL_+DZ`JI`VFiGuj@`p-Jr$->^tf=u=^7e#{ zCSbg}!=ihnq}b(sJ*B2OzdXU)$jY++b2N;UqLj-jtHI(lHbbRTHVIc#`n3ROkXl$9 zvUB^&K1^qyrkjn3121`EdA^#1|7qSC-91hfvp#k#ie%-QET3%*o<@-DYBHXHo)REO zxtPxYrlABQko5<r;HXcj_EiDKfw;O1CC>UL<gl=?;PF!csMf|bLH<D>m+W>>TjWa0 zHHB%DMBk8c!f~Yv@`q2Uv-WV)u<kR%3r(%hW$sd1i)Lz~QRwI_W>0>Xc41PEmjnPX zd~qysm<=h@Ta+CgFpYL*uByzj_~vqzrQC#sr*RC7N;aua5;URyjIyXmO!Xt4FOQXB z{4wO}6fJw2wP+iI_LZY-=n~;zHSx{K^;PsRbYG(r|Lha+80B**(lmhMS69`XOWP@u z{Ya5P_E%4*c)>=#6~FX5kkGj4N)O=w)7Z5@S6LikF{HF9T2V_tq+Y;C5J~>`BRrH6 zu&Br@a(J|~A-RE&$A#o31v!SJn4)PRRSbrQfFdH$fIztrAYd^GqM#NF;U%;pr3B#= ziiv<}`|XvG{NG|E$8*4sncdmhnc3N$e>dIYQXk#W>T~7dYds$xIrH+)9mO5Lj!tfK zGA4f7@^PgVQ<fZAk=T1xpyKa^<Il&v?OV93q2uZoqW9k$F(>Q(BX7R5>z>;C?qA(y z?7p$dWyX^^kDsZXz4SLfTU%ApR@pYD@3zv2>{bJ7yA;Gd+%>i^^5>7dke<4%;&+8r zuYA?v$gDqq61B`(ckf%zJd{>m@I;{h%YC;^&v|d}!DCmBS4C8O*#4uld)AewT=*vA zMBN+vqe~Yxw7r;HwsQNKH})=QQ#I_zc}isM>#g4U>+!?4>XSaRN+UP7e!ZXl!;-`m ziEjCOk6pMU``Itn4j*1<-EM?7@0&UO+0e;pUq2ETG1^yMHEZ(mgr!->x8{9y_`rh+ zGp9MNJ6-up%zuuwtNAqK8#Q;!;>riAI_RITzcXU^si}u|Eh!JJ>(f7`XjN!uc|q#c z=l=Ebnh)yAGq#^I7uK(ySadGYoKt*g?&;PYC+y6Pbnm_^E57{v#)c2(WzG0#WOApe zBTu}LwPoR<f}`0VKNDKI_SG5xH6KkIy7jp&DV254SJoZPf9adu2lqr2S7-mSQ%b|x zQ-$Xbb>DcjK61gz`lIu=Y{<+#Gw5zJ6jw80+{zC#7LDm#xwqZ8dE>?h7G8b6Z2j2z zzUuZ577vck-E&X$?)m$2JBQ}{zU+@(_O+jvnO|cpD*57c-wPdXo4I?`Yj;)lZBx7Z z)Rx-Mic8u|O&UL7LUHw>bE@@mL!W*F3-(sK!AnPXtn0TZSlz2zD6caA%Qg3%-qUu& z>46nlOSc`bSn~Gb*m;vi-7&FfdQn-?<)Se~cMe?Jb=*rUro`_5>8hgC72{Vu^+4>& zS0=9*(IeK4S>D>8qg+{1+OsNU!?KdncL&FGui5m6ByIidxwkKG`$XZgTk2P~O<J(} zQb|JKSLcQlonNojpIv(9O2XKPVl%X<P2Jeuw^oganSAd3hWvsGGsy_8Jyx^z;;zig zy=Q%T;_<(AK4g3{G3rkXGB*F@*Q4FKb}?&aoZsH>#rjJ>7?m(1BO-e7rn-L|JZyJd z((AU(2^q7KLo?H^{yTI1=KMFG$}HK@E%;@|g&O<BI|r*XG7ek~bln^OqcyJ{oSeR6 z&y<bEv94tY9t!{0HS~VQsefLH4jp*+f47W|d~v~z3O$V%SWVNsehpDqZ~6kOZF>qm zwvI|mQ=vz1q0nQZ(1UBIs-fE`^q?r?xy3r_$pM3tM)drJKQrGSOixJ;KLm6$wH{G* zxL)fq4Ix0&Ro3vyx2M*lHC<(u>KM&y8sGExYrHpAmk`edwNT$^v5oJP@mkI0TDvW_ z(L&$hr+LJ`i>hDavpvn_ZQj55yY%!AcpGa`;_093brP+4&Ecl&B;R2L-s`QtZ_Tw? zsU$q9-s`QXGhV;ms;X903#AfqDY>!YiNax1d8dC~a8e-07ds^2_dVth2K_xy<HY6V zpa6#je(TQ}7D&$yHlpc)?8M}t-xr&hfCs4@#n4Q})lAdqrP$pSrF)O4_#uJRmPvf^ z!*T+tc`3Lz9gC8VZ|F$hXgrw}XT;&E_3*spoZ!>GVTpqVz{LI2f@m=*J%|^wVskU| za<by`)i^zF$`nY{G^A-c{<J8(V}(K-Udg)l;|u@k%l2Kva3LIVLHsmfih{W%Of^l^ zzr1vKPK<dCJo1HzLUcCjnT)xpV|p+L6=4*jJQyTV#HDocb6q|itSDMH$86Ws9HMg+ zVDNblovxX9cFTh)hN2=cc`@B}MHQ|&ub>m9Gsax%19UEK>U-%-#Swnt<y94vWr5x3 zGUg~Oi>Yb~&ylGbcqPEw2Qc_9V>a_Tx`Qg9OedVeBeP|=q7KTKOERm9<~T%WV-##d zs$!^kgTT`su<%0!&HE6TMmhk731(i+6_0@tFT`OCA&<r;+B0a5RK?QRo`ESm=fK2j zMEzQ}$?ci8YcZYas1DgPywY-byx`TP@mjXcbl}B%WFA?<|3m#c8eZlj`E1KkQC;@% zs-|nO9RRZ=J)7`go=fmbdI7IS@}WIPnqSK>Z_u7&uzZ#w)-SKj7Q!0Mxu#lhI4>PA z-ID2ahuQ<3JYM*Mq!-&!**{cZ>>p@Pk^BP>Y;gQ@bj@HLX!u};W45L`8tVmpNV*cd z_~gc;D={{n55ddxfyXNxJX%7$AS3$&ooloHz-zFcHC>bRtm!7(xCV^<2pF_VvS_-) zwgxh+0z?PQCi~ND14c=7wuR?1CCuXWNV6>|76UWzeUn#L=!5+nbgDECN0sf=#vHMp z9aR$#afqMBwGj%H>`K@1Z58=GFqhXxBypl8PkX>LUbm2g89bLDvs{WLki}$M6TBQx zMIRh@bse7`(Rc-BQTzjDVLw4K8y3$m=p0@Lb*y<V&99(yr9NDR<A&g68%NTI9gbHP zV3Pj|jO_{-77#D5ZtKWu2y-x%q-Px%c3&PH2wCJgcXbJ~F%aUlF_*md7#dLWH-T|3 z1(_Z8RV0a;CGjE_Q@?0W<Gcyuka9gkL+q4zO$*yo)`7!zfIh@0heU_i$-KG=N{J2* zLYN3wWTysrdEN{|wjp2|ueAbmXpI7f$&~uF*)Or;VG$)d@Z!Tu?|7jV&hOzJ2CpB6 zgJ(@y2f#SqgU*r03k)B0QopXkxr!JrUJjDzB+P~C$u9-3G)Jzkix-c?Ph(6pkWsU3 zm<G~fvPl!^2KQm=s+5BWUOa^*^}#tIcn!8w^dZ>>FoV}TfpH%u(mM7nP$ARy$^r~4 zD`BQ;;=^2z%%VM;F%t_v>jk_VpAoHKS=1h+<Nb_js;;Da6Dc*tFq}vsHIdpg*oM%r z%`wMPbSTEli)2j|UsZ~qMmqo&K0feby5*4mEM!-_mjvc2q${+iunrKJG<?l0^{Y#~ zjxN4N_R50Xk?F8=;n*aySdK#i<Jb<&=6Mqs@5Pb)N&9_Eb5*t>Sj!FQkbCnuVBwq} zKriC>kjCp;ChJPW(k)>~u*jZ;EF7<dHFJ(6Fi8iPaS6j3#d`>$0~s?l&ViAbA<?0+ zp%2bg@MXNT9~8Qm#(_-U<@OZUVqd^qN_$gFB%acIVE@6ohs?ZB!8oKm(ZaIN`xIaX z#}Y9%&aZ&EybnbWQr?926pm|xmvd!Ixhnapjh!;BQRqX~0c7U$CiII9I?Wr*igRl8 zYf1hFnR)LlbS2paT!z;aw5Rj=m1W}?gyRs##``iMi{$4v(ji(yK_~6s(1*f#guo;} zhb(-~C3GsqSj?s5ztE`^6Ge{6K5aX2R@S}lO0m;+T;4}n;N@6itG3Gf3Sc_NQ%oBU zOuYR9lll<6oQn!x-fM`E!@37Ayp{XFR*h^8=7MB}>2PjL_AD}Q-Y*DV);%y(cDOwp zfyx+?QHc%#hA<?Q?7zqorM1yEOclS~!4unU!-BIC24iFWVKGvr`4CG0>(6p@UI%fW z2<M_cfN}f-9ipN{=kS>?Rtk;xoC4!m2Mil|YR^SBMLIwk04GFVI)U-N5tw+{)uTU5 znsk<nC~kB71BTNA;>D3U+X1o?EK*FTtGw61nV_`41ZJ?E3Jky6!7DQkTW)|sTSSMP zn)ltvhGh959?u8P!*I&T?Kv8+`#8_WYXuUWbSCH^<Y>+G63j`)OSn1V--t3Y{b|1_ zp7`QN1Oh=HGK6m*w;i0F761;di(fG@I1#V(_OVk_u%A^d-I<VrJ7`*pfpgE~G(T1t joDuq+eo@~~f~sivja*cE!j&7yCVpE>Ow7QcgQETqI^po5 literal 0 HcmV?d00001 diff --git a/doc/html/taskplugins.shtml b/doc/html/taskplugins.shtml index 4599bb8be..bcc3d0f1e 100644 --- a/doc/html/taskplugins.shtml +++ b/doc/html/taskplugins.shtml @@ -84,6 +84,28 @@ ID of the node on which the resources are being acquired On failure, the plugin should return SLURM_ERROR and set the errno to an appropriate value to indicate the reason for failure.</p> +<p class="commandline">int task_slurmd_suspend_job (uint32_t job_id);</p> +<p style="margin-left:.2in"><b>Description</b>: Temporarily release resources +previously reserved for a job. +Executed by the <b>slurmd</b> daemon as user root.</p> +<p style="margin-left:.2in"><b>Arguments</b>: +<span class="commandline">job_id</span> (input) +ID of the job which is being suspended.</p> +<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. +On failure, the plugin should return SLURM_ERROR and set the errno to an +appropriate value to indicate the reason for failure.</p> + +<p class="commandline">int task_slurmd_resume_job (uint32_t job_id);</p> +<p style="margin-left:.2in"><b>Description</b>: Reclaim resources which +were previously released using the task_slurmd_suspend_job function. +Executed by the <b>slurmd</b> daemon as user root.</p> +<p style="margin-left:.2in"><b>Arguments</b>: +<span class="commandline">job_id</span> (input) +ID of the job which is being resumed.</p> +<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. +On failure, the plugin should return SLURM_ERROR and set the errno to an +appropriate value to indicate the reason for failure.</p> + <p class="commandline">int task_slurmd_release_resources (uint32_t job_id);</p> <p style="margin-left:.2in"><b>Description</b>: Release resources previously reserved for a job. Executed by the <b>slurmd</b> daemon as user root.</p> @@ -142,6 +164,6 @@ appropriate value to indicate the reason for failure.</p> Future releases of SLURM may revise this API.</p> <p class="footer"><a href="#top">top</a></p> -<p style="text-align:center;">Last modified 27 March 2007</p> +<p style="text-align:center;">Last modified 28 May 2008</p> <!--#include virtual="footer.txt"--> diff --git a/doc/html/team.shtml b/doc/html/team.shtml index 74035f1e8..2e5ffd547 100644 --- a/doc/html/team.shtml +++ b/doc/html/team.shtml @@ -2,7 +2,7 @@ <h1>SLURM Team</h1> <p>SLURM development has been a joint effort of -<a href="http://www.llnl.gov/">Lawrence Livermore National Laboratory</a> (LLNL), +<a href="https://www.llnl.gov/">Lawrence Livermore National Laboratory</a> (LLNL), <a href="http://www.hp.com/">HP</a>, <a href="http://www.bull.com/">Bull</a>, <a href="http://www.lnxi.com/">Linux NetworX</a>. @@ -12,9 +12,7 @@ <li>Morris Jette (LLNL, Project leader)</li> <li>Danny Auble (LLNL)</li> <li>Susanne Balle (HP)</li> -<li>Mark Grondona (LLNL)</li> -<li>Chris Morrone (LLNL)</li> -<li>Daniel Palermo (HP)</li> +<li>Chris Holmes (HP)</li> </ul> <p> SLURM contributers include: </p> @@ -31,8 +29,9 @@ <li>Chris Dunlap (LLNL)</li> <li>Joey Ekstrom (LLNL/Bringham Young University)</li> <li>Jim Garlick (LLNL)</li> +<li>Mark Grondona (LLNL)</li> <li>Takao Hatazaki (HP, Japan)</li> -<li>Chris Holmes (HP)</li> +<li>Matthieu Hautreux (CEA, France)</li> <li>Nathan Huff (North Dakota State University)</li> <li>David Jackson (Cluster Resources)</li> <li>Greg Johnson (LANL)</li> @@ -42,10 +41,12 @@ <li>Bernard Li (Genome Sciences Centre, Canada)</li> <li>Steven McDougall (SiCortex)</li> <li>Donna Mecozzi (LLNL)</li> +<li>Chris Morrone (LLNL)</li> <li>Pere Munt (Barcelona Supercomputer Center, Spain)<li> <li>Bryan O'Sullivan (Pathscale)</li> <li>Gennaro Oliva (Institute of High Performance Computing and Networking, Italy)</li> +<li>Daniel Palermo (HP)</li> <li>Dan Phung (LLNL/Columbia University)</li> <li>Ashley Pittman (Quadrics)</li> <li>Andy Riebs (HP)</li> @@ -59,6 +60,6 @@ Networking, Italy)</li> <li>Anne-Marie Wunderlin (Bull)</li> </ul> -<p style="text-align:center;">Last modified 14 August 2007</p> +<p style="text-align:center;">Last modified 22 May 2008</p> <!--#include virtual="footer.txt"--> diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am index 5a7def9fa..e8d06ea88 100644 --- a/doc/man/Makefile.am +++ b/doc/man/Makefile.am @@ -3,6 +3,7 @@ EXTRA_DIST = man1 man3 man5 man8 man1_MANS = \ man1/sacct.1 \ + man1/sacctmgr.1 \ man1/salloc.1 \ man1/sattach.1 \ man1/sbatch.1 \ @@ -10,11 +11,12 @@ man1_MANS = \ man1/scancel.1 \ man1/scontrol.1 \ man1/sinfo.1 \ - man1/slaunch.1 \ man1/slurm.1 \ man1/smap.1 \ man1/squeue.1 \ + man1/sreport.1 \ man1/srun.1 \ + man1/sstat.1 \ man1/strigger.1 \ man1/sview.1 @@ -92,12 +94,14 @@ man3_MANS = man3/slurm_hostlist_create.3 \ man5_MANS = man5/bluegene.conf.5 \ man5/slurm.conf.5 \ + man5/slurmdbd.conf.5 \ man5/wiki.conf.5 man8_MANS = man8/slurmctld.8 \ man8/slurmd.8 \ - man8/spank.8 \ - man8/slurmstepd.8 + man8/slurmdbd.8 \ + man8/slurmstepd.8 \ + man8/spank.8 dist-hook: -rm -rf `find $(distdir) -name CVS` diff --git a/doc/man/Makefile.in b/doc/man/Makefile.in index abc5e9977..109e32167 100644 --- a/doc/man/Makefile.in +++ b/doc/man/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -41,6 +41,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -95,6 +97,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -108,10 +111,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -131,7 +137,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -142,6 +151,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -157,6 +168,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -172,6 +184,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -231,6 +244,7 @@ top_srcdir = @top_srcdir@ EXTRA_DIST = man1 man3 man5 man8 man1_MANS = \ man1/sacct.1 \ + man1/sacctmgr.1 \ man1/salloc.1 \ man1/sattach.1 \ man1/sbatch.1 \ @@ -238,11 +252,12 @@ man1_MANS = \ man1/scancel.1 \ man1/scontrol.1 \ man1/sinfo.1 \ - man1/slaunch.1 \ man1/slurm.1 \ man1/smap.1 \ man1/squeue.1 \ + man1/sreport.1 \ man1/srun.1 \ + man1/sstat.1 \ man1/strigger.1 \ man1/sview.1 @@ -320,12 +335,14 @@ man3_MANS = man3/slurm_hostlist_create.3 \ man5_MANS = man5/bluegene.conf.5 \ man5/slurm.conf.5 \ + man5/slurmdbd.conf.5 \ man5/wiki.conf.5 man8_MANS = man8/slurmctld.8 \ man8/slurmd.8 \ - man8/spank.8 \ - man8/slurmstepd.8 + man8/slurmdbd.8 \ + man8/slurmstepd.8 \ + man8/spank.8 all: all-am diff --git a/doc/man/man1/sacctmgr.1 b/doc/man/man1/sacctmgr.1 new file mode 100644 index 000000000..74b2b16b5 --- /dev/null +++ b/doc/man/man1/sacctmgr.1 @@ -0,0 +1,363 @@ +.TH SACCTMGR "1" "May 2008" "sacctmgr 1.3" "Slurm components" + +.SH "NAME" +sacctmgr \- Used to view and modify Slurm account information. + +.SH "SYNOPSIS" +\fBsacctmgr\fR [\fIOPTIONS\fR...] [\fICOMMAND\fR...] + +.SH "DESCRIPTION" +\fBsacctmgr\fR is used to view or modify Slurm account information. +The account information is maintained within a database with the interface +being provided by \fBslurmdbd\fR (Slurm Database daemon). +This database can serve as a central storehouse of user and +computer information for multiple computers at a single site. +Slurm account information is recorded based upon four parameters +that form what is refered to as an \fIassociation\fR. +These parameters are \fIuser\fR, \fIcluster\fR, \fIpartition\fR, and +\fIaccount\fR. \fIuser\fR is the login name. +\fIcluster\fR is the name of a Slurm managed cluster as specified by +the \fIClusterName\fR parameter in the \fIslurm.conf\fR configuration file. +\fIpartition\fR is the name of a Slurm partition on that cluster. +\fIaccount\fR is the bank account for a job. +The intended mode of operation is to initiate the \fBssacctmgr\fR command, +add, delete, modify, and/or list \fIassociation\fR records then +commit the changes and exit. + +.SH "OPTIONS" +.TP +\fB\-a\fR, \fB\-\-all\fR +Display information about all entities including hidden or deleted ones. +This is equivalent to the \fBall\fR command. + +.TP +\fB\-h\fR, \fB\-\-help\fR +Print a help message describing the usage of \fBssacctmgr\fR. +This is equivalent to the \fBhelp\fR command. + +.TP +\fB\-\-hide\fR +Do not display information about hidden or deleted entities. +This is equivalent to the \fBhide\fR command. + +.TP +\fB\-o\fR, \fB\-\-oneliner\fR +Print information one line per record. +This is equivalent to the \fBoneliner\fR command. + +.TP +\fB\-q\fR, \fB\-\-quiet\fR +Print no warning or informational messages, only error messages. +This is equivalent to the \fBquiet\fR command. + +.TP +\fB\-s\fR, \fB\-\-associations\fR +Show an association for entities displayed. +This is equivalent to the \fBassociations\fR command. + +.TP +\fB\-v\fR, \fB\-\-verbose\fR +Print detailed event logging. +This is equivalent to the \fBverbose\fR command. + +.TP +\fB\-V\fR , \fB\-\-version\fR +Print version information and exit. +This is equivalent to the \fBversion\fR command. + +.TP +\fBCOMMANDS\fR + +.TP +\fBall\fR +Display information about all entities including hidden or deleted ones. + +.TP +\fBadd\fR <\fIENTITY\fR> <\fISPECS\fR> +Add an entity. +Identical to the \fBcreate\fR command. + +.TP +\fBassociations\fR +Show associations for entities displayed. + +.TP +\fBcreate\fR <\fIENTITY\fR> <\fISPECS\fR> +Add an entity. +Identical to the \fBadd\fR command. + +.TP +\fBdelete\fR \fIENTITY\fR with \fISPECS\fR +Delete the specified entities. + +.TP +\fBexit\fP +Terminate the execution of sacctmgr. +Identical to the \fBquit\fR command. + +.TP +\fBhelp\fP +Display a description of sacctmgr options and commands. + +.TP +\fBhide\fP +Do not display information about hidden or deleted entities. + +.TP +\fBlist\fR <\fIENTITY\fR> [with <\fISPECS\fR>] +Display information about the specified entities. +By default, all entities are displayed. +Identical to the \fBshow\fR command. + +.TP +\fBmodify\fR <\fIENTITY\fR> \fbwith\fR <\fISPECS\fR> \fbset\fR <\fISPECS\fR> +Modify an entities. + +.TP +\fBoneliner\fP +Print information one line per record. + +.TP +\fBquiet\fP +Print no warning or informational messages, only fatal error messages. + +.TP +\fBquit\fP +Terminate the execution of sacctmgr. +Identical to the \fBexit\fR command. + +.TP +\fBshow\fR <\fIENTITY\fR> [with <\fISPECS\fR>] +Display information about the specified entities. +By default, all entities are displayed. +Identical to the \fBlist\fR command. + +.TP +\fBverbose\fP +Print detailed event logging. +This includes time\-stamps on data structures, record counts, etc. +This is an independent command with no options meant for use in interactive mode. + +.TP +\fBversion\fP +Display the version number of sacctmgr being executed. + +.TP +\fB!!\fP +Repeat the last command executed. + +.TP +\fBENTITIES\fR + +.TP +\fIaccount\fP +A bank account, typically specified at job submit time using the +\fI--account=\fR option. +These may be arranged in a hierarchical fashion, for example +accounts \fIchemistry\fR and \fIphysics\fR may be children of +the account \fIscience\fR. +The hierarchy may have an arbitrary depth. + +.TP +\fIassociation\fP +The entity used to group information consisting of four parameters: +\fIaccount\fR, \fIcluster\fR, \fIpartition\fR, and \fIuser\fR. + +.TP +\fIcluster\fP +The \fIClusterName\fR parameter in the \fIslurm.conf\fR configuration +file, used to differentiate accounts from on different machines. + +.TP +\fIuser\fR +The login name. + +.TP +\fBSPECIFICATIONS FOR ACCOUNTS\fR +.TP +\fICluster\fP=<cluster> +Specific cluster to add account to. Default is all in system. +\fIDescription\fP=<description> +An arbitrary string describing an account. +.TP +\fIFairshare\fP=<fairshare> +Number used in conjunction with other accounts to determine job priority. +To clear a previously set value use the modify command with a new value of \-1. +.TP +\fIMaxCPUSecs\fP=<max cpu seconds> +Maximum number of cpu seconds each job is able to use in this account. +This is overridden if set directly on a user. +Default is the cluster's limit. +To clear a previously set value use the modify command with a new value of \-1. +.TP +\fIMaxJobs\fP=<max jobs> +Maximum number of jobs each user is allowed to run at one time in this account. +This is overridden if set directly on a user. +Default is the cluster's limit. +To clear a previously set value use the modify command with a new value of \-1. +.TP +\fIMaxNodes\fP=<max nodes> +Maximum number of nodes each job is able to use in this account. +This is overridden if set directly on a user. +Default is the cluster's limit. +To clear a previously set value use the modify command with a new value of \-1. +.TP +\fIMaxWall\fP=<max wall> +Maximum wall clock time each job is able to use in this account. +This is overridden if set directly on a user. +Default is the cluster's limit. +<max wall> format is <min> or <min>:<sec> or <hr>:<min>:<sec> or +<days>\-<hr>:<min>:<sec> or <days>\-<hr>. +The value is recorded in minutes with rounding as needed. +To clear a previously set value use the modify command with a new value of \-1. +.TP +\fIName\fP=<name> +The name of a bank account. +.TP +\fIOrganization\fP=<org> +Organization to which the account belongs. +.TP +\fIParent\fP=<parent> +Parent account of this account. Default is no parent, a top level account. +.TP +\fIQosLevel\fP=<qos> +Quality of Service jobs are to run at for this account. Now consisting +of Normal, Standby, Expedite, and Exempt. +This is overridden if set directly on a user. + +.TP +\fBSPECIFICATIONS FOR CLUSTERS\fR +.TP +\fIFairshare\fP=<fairshare> +Number used in conjunction with other accounts to determine job priority. +To clear a previously set value use the modify command with a new value of \-1. +.TP +\fIName\fP=<name> +The name of a cluster. +This should be equal to the \fIClusterName\fR parameter in the \fIslurm.conf\fR +configuration file for some Slurm-managed cluster. +.TP +\fIMaxCPUSecs\fP=<max cpu seconds> +Maximum number of cpu seconds each job is able to use in this account. +This is overridden if set directly on an account or user. +Default is no limit. +To clear a previously set value use the modify command with a new value of \-1. +\fIMaxJobs\fP=<max jobs> +Maximum number of jobs each user is allowed to run at one time in this account. +This is overridden if set directly on an account or user. +Default is no limit. +To clear a previously set value use the modify command with a new value of \-1. +.TP +\fIMaxNodes\fP=<max nodes> +Maximum number of nodes each job is able to use in this account. +This is overridden if set directly on an account or user. +Default is no limit. +To clear a previously set value use the modify command with a new value of \-1. +.TP +\fIMaxWall\fP=<max wall> +Maximum wall clock time each job is able to use in this account. +This is overridden if set directly on an account or user. +Default is no limit. +<max wall> format is <min> or <min>:<sec> or <hr>:<min>:<sec> or +<days>\-<hr>:<min>:<sec> or <days>\-<hr>. +The value is recorded in minutes with rounding as needed. +To clear a previously set value use the modify command with a new value of \-1. +.TP +\fIQosLevel\fP=<qos> +Quality of Service jobs are to run at for this account. Now consisting +of Normal, Standby, Expedite, and Exempt. +This is overridden if set directly on an account or user. + +.TP +\fBSPECIFICATIONS FOR USERS\fR +.TP +\fIAccount\fP=<account> +Account name to add this user to. +.TP +\fIAdminLevel\fP=<level> +Admin level of user. Valid levels are None, Operator, and Admin. +.TP +\fICluster\fP=<cluster> +Specific cluster to add user to the account on. Default is all in system. +.TP +\fIDefaultAccount\fP=<account> +Identify the default bank account name to be used for a job if none is +specified at submission time. +.TP +\fIFairshare\fP=<fairshare> +Number used in conjunction with other users in the same account to +determine job priority. +To clear a previously set value use the modify command with a new value of \-1. +.TP +\fIName\fP=<name> +Name of user. +.TP +\fIQosLevel\fP=<qos> +The Quality of Service jobs are to run at for this user using the +account specified. Now consisting of Normal, Standby, Expedite, and Exempt. +.TP +\fIMaxCPUSecs\fP=<max cpu seconds> +Maximum number of cpu seconds this user can use in each job using the +account specified. +To clear a previously set value use the modify command with a new value of \-1. +.TP +\fIMaxJobs\fP=<max jobs> +Maximum number of jobs this user can run at a given time using the +account specified. +This is overridden if set directly on a user. +Default is the account's limit. +To clear a previously set value use the modify command with a new value of \-1. +.TP +\fIMaxNodes\fP=<max nodes> +Maximum number of nodes this user can allocate in each job using the +account specified. +Default is the account's limit. +.TP +\fIMaxWall\fP=<max wall> +Maximum wall clock time this user can use in each job using the +account specified. +Default is the account's limit. +<max wall> format is <min> or <min>:<sec> or <hr>:<min>:<sec> or +<days>\-<hr>:<min>:<sec> or <days>\-<hr>. +The is recorded in minutes with rounding as needed. +To clear a previously set value use the modify command with a new value of \-1. + +.SH "EXAMPLES" +.eo +.br +> sacctmgr create cluster=tux +.br +> sacctmgr create account name=science fairshare=50 +.br +> sacctmgr create account name=chemistry parent=science fairshare=30 +.br +> sacctmgr create account name=physics parent=science fairshare=20 +.br +> sacctmgr create user name=adam cluster=tux bank=physics fairshare=10 +.br +> sacctmgr modify user with name=adam cluster=tux bank=physics \ +.br + set maxjobs=2 maxtime=30:00 +.ec + +.SH "COPYING" +Copyright (C) 2008 Lawrence Livermore National Security. +Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). +LLNL\-CODE\-402394. +.LP +This file is part of SLURM, a resource management program. +For details, see <https://computing.llnl.gov/linux/slurm/>. +.LP +SLURM is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 2 of the License, or (at your option) +any later version. +.LP +SLURM is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +details. + +.SH "SEE ALSO" +\fBslurm.conf\fR(5) +\fBslurmdbd\fR(8) diff --git a/doc/man/man1/salloc.1 b/doc/man/man1/salloc.1 index 15597c132..07b2eabce 100644 --- a/doc/man/man1/salloc.1 +++ b/doc/man/man1/salloc.1 @@ -1,4 +1,4 @@ -.TH "salloc" "1" "SLURM 1.2" "February 2008" "SLURM Commands" +.TH "salloc" "1" "SLURM 1.3" "May 2008" "SLURM Commands" .SH "NAME" .LP salloc \- Obtain a SLURM job allocation (a set of nodes), execute a command, and then release the allocation when the command is finished. @@ -15,6 +15,42 @@ The command may be any program the user wishes. Some typical commands are xterm .SH "OPTIONS" .LP +.TP +\fB\-\-acctg\-freq\fR=\fIseconds\fR +Define the job accounting sampling interval. +This can be used to override the \fIJobAcctGatherFrequency\fR parameter in SLURM's +configuration file, \fIslurm.conf\fR. +A value of zero disables real the periodic job sampling and provides accounting +information only on job termination (reducing SLURM interference with the job). + +.TP +\fB\-B\fR \fB\-\-extra\-node\-info\fR=\fIsockets\fR[:\fIcores\fR[:\fIthreads\fR]] +Request a specific allocation of resources with details as to the +number and type of computational resources within a cluster: +number of sockets (or physical processors) per node, +cores per socket, and threads per core. +The total amount of resources being requested is the product of all of +the terms. +As with \-\-nodes, each value can be a single number or a range (e.g. min\-max). +An asterisk (*) can be used as a placeholder indicating that all available +resources of that type are to be utilized. +As with nodes, the individual levels can also be specified in separate +options if desired: +.nf + \fB\-\-sockets\-per\-node\fR=\fIsockets\fR + \fB\-\-cores\-per\-socket\fR=\fIcores\fR + \fB\-\-threads\-per\-core\fR=\fIthreads\fR +.fi +When the task/affinity plugin is enabled, +specifying an allocation in this manner also instructs SLURM to use +a CPU affinity mask to guarantee the request is filled as specified. +NOTE: Support for these options are configuration dependent. +The task/affinity plugin must be configured. +In addition either select/linear or select/cons_res plugin must be +configured. +If select/cons_res is configured, it must have a parameter of CR_Core, +CR_Core_Memory, CR_Socket, or CR_Socket_Memory. + .TP \fB\-\-begin\fR[=]<\fItime\fR> Submit the batch script to the SLURM controller immediately, like normal, but @@ -58,6 +94,12 @@ nodes, then use the OR operator and enclose the options within square brackets. For example: "\fB\-\-constraint="[rack1|rack2|rack3|rack4]"\fR might be used to specify that all nodes must be allocated on a single rack of the cluster, but any of those four racks can be used. +A request can also specify the number of nodes needed with some feature +by appending an asterisk and count after the feature name. +For example "\fBsalloc \-\-nodes=16 \-\-constraint=graphics*4 ..."\fR +indicates that the job requires 16 nodes at that at least four of those +nodes must have the feature "graphics." +Constraints with node counts may only be combined with AND operators. If no nodes have the requested features, then the job will be rejected by the slurm job manager. @@ -88,13 +130,6 @@ of 4 nodes, one for each of the 4 tasks. \fB\-D\fR, \fB\-\-chdir\fR=\fIpath\fR change directory to \fIpath\fR before beginning execution. -.TP -\fB\-d\fR, \fB\-\-dependency\fR[=]<\fIjobid\fR> -Defer the start of this job until the specified \fIjobid\fR has completed. -Many jobs can share the same dependency and these jobs may even belong to -different users. The value may be changed after job submission using the -scontrol command. - .TP \fB\-\-exclusive\fR The job allocation cannot share nodes with other running jobs. This is @@ -115,7 +150,7 @@ will be sorted my SLURM. This option will load login environment variables for the user specified in the \fB\-\-uid\fR option. The environment variables are retrieved by running something of this sort -"su - <username> -c /usr/bin/env" and parsing the output. +"su \- <username> \-c /usr/bin/env" and parsing the output. Be aware that any environment variables already set in salloc's environment will take precedence over any environment variables in the user's login environment. @@ -142,6 +177,26 @@ may be the group name or the numerical group ID. \fB\-h\fR, \fB\-\-help\fR Display help information and exit. +.TP +\fB\-\-hint\fR=\fItype\fR +Bind tasks according to application hints +.RS +.TP +.B compute_bound +Select settings for compute bound applications: +use all cores in each physical CPU +.TP +.B memory_bound +Select settings for memory bound applications: +use only one core in each physical CPU +.TP +.B [no]multithread +[don't] use extra threads with in-core multi-threading +which can benefit communication intensive applications +.B help +show this help message +.RE + .TP \fB\-I\fR,\fB\-\-immediate\fR Grab the requested resources immediately, or abort if the resources are not @@ -182,6 +237,60 @@ new job steps on the remaining nodes in their allocation. By default SLURM terminates the entire job allocation if any node fails in its range of allocated nodes. +.TP +\fB\-L\fR, \fB\-\-licenses\fR= +Specification of licenses (or other resources available on all +nodes of the cluster) which must be allocated to this job. +License names can be followed by an asterisk and count +(the default count is one). +Multiple license names should be comma separated (e.g. +"\-\-licenses=foo*4,bar"). + +.TP +\fB\-m\fR, \fB\-\-distribution\fR= +(\fIblock\fR|\fIcyclic\fR|\fIarbitrary\fR|\fIplane=<options>\fR) +Specify an alternate distribution method for remote processes. +.RS +.TP +.B block +The block method of distribution will allocate processes in\-order to +the cpus on a node. If the number of processes exceeds the number of +cpus on all of the nodes in the allocation then all nodes will be +utilized. For example, consider an allocation of three nodes each with +two cpus. A four\-process block distribution request will distribute +those processes to the nodes with processes one and two on the first +node, process three on the second node, and process four on the third node. +Block distribution is the default behavior if the number of tasks +exceeds the number of nodes requested. +.TP +.B cyclic +The cyclic method distributes processes in a round\-robin fashion across +the allocated nodes. That is, process one will be allocated to the first +node, process two to the second, and so on. This is the default behavior +if the number of tasks is no larger than the number of nodes requested. +.TP +.B plane +The tasks are distributed in blocks of a specified size. +The options include a number representing the size of the task block. +This is followed by an optional specification of the task distribution +scheme within a block of tasks and between the blocks of tasks. +For more details (including examples and diagrams), please see +.ad l +.nh +https://computing.llnl.gov/linux/slurm/mc_support.html and +https://computing.llnl.gov/linux/slurm/dist_plane.html. +.hy +.ad +.TP +.B arbitrary +The arbitrary method of distribution will allocate processes in\-order as +listed in file designated by the environment variable SLURM_HOSTFILE. If +this variable is listed it will over ride any other method specified. +If not set the method will default to block. Inside the hostfile must +contain at minimum the number of hosts requested. If requesting tasks +(\-n) your tasks will be laid out on the nodes in the order of the file. +.RE + .TP \fB\-\-mail\-type\fR=\fItype\fR Notify user by email when certain event types occur. @@ -196,7 +305,13 @@ The default value is the username of the submitting user. .TP \fB\-\-mem\fR[=]<\fIMB\fR> -Specify a minimum amount of real memory. +Specify the real memory required per node in MegaBytes. +If a value is specified, that quantity of memory will be +reserved for this job. +If no value is specified and real memory is exhausted on +any allocated node then the job is subject to cancellation. +Also see \fB\-\-task\-mem\fR. + .TP \fB\-\-mincores\fR[=]<\fIn\fR> @@ -236,13 +351,14 @@ If a job node limit exceeds the number of nodes configured in the partition, the job will be rejected. .TP -\fB\-n\fR, \fB\-\-tasks\fR[=]<\fInumber\fR> -salloc does not launch tasks, it requests an allocation of resources and submits -a batch script. However this \-\-tasks option advizes the SLURM controller -that job steps run within this allocation will launch a maximum of \fInumber\fR -tasks. This option, possibly with collaboration with the \-\-cpus\-per\-task -option, will directly impact the number of processors granted to the job -allocation. +\fB\-n\fR, \fB\-\-ntasks\fR[=]<\fInumber\fR> +salloc does not launch tasks, it requests an allocation of resources and +executed some command. This option advises the SLURM controller that job +steps run within this allocation will launch a maximum of \fInumber\fR +tasks and sufficient resources are allocated to accomplish this. +The default is one task per socket or core (depending upon the value +of the \fISelectTypeParameters\fR parameter in slurm.conf), but note +that the \fB\-\-cpus\-per\-task\fR option will change this default. .TP \fB\-\-nice\fR[=]<\fIadjustment\fR> @@ -254,6 +370,40 @@ a negative adjustment. NOTE: This option is presently ignored if \fISchedulerType=sched/wiki\fR or \fISchedulerType=sched/wiki2\fR. +.TP +\fB\-\-ntasks\-per\-core\fR=\fIntasks\fR +Request that no more than \fIntasks\fR be invoked on each core. +Similar to \fB\-\-ntasks\-per\-node\fR except at the core level +instead of the node level. Masks will automatically be generated +to bind the tasks to specific core unless \fB\-\-cpu_bind=none\fR +is specified. +NOTE: This option is not supported unless \fISelectType=CR_Core\fR +or \fISelectType=CR_Core_Memory\fR is configured. + +.TP +\fB\-\-ntasks\-per\-socket\fR=\fIntasks\fR +Request that no more than \fIntasks\fR be invoked on each socket. +Similar to \fB\-\-ntasks\-per\-node\fR except at the socket level +instead of the node level. Masks will automatically be generated +to bind the tasks to specific sockets unless \fB\-\-cpu_bind=none\fR +is specified. +NOTE: This option is not supported unless \fISelectType=CR_Socket\fR +or \fISelectType=CR_Socket_Memory\fR is configured. + +.TP +\fB\-\-ntasks\-per\-node\fR=\fIntasks\fR +Request that no more than \fIntasks\fR be invoked on each node. +This is similar to using \fB\-\-cpus\-per\-task\fR=\fIncpus\fR +but does not require knowledge of the actual number of cpus on +each node. In some cases, it is more convenient to be able to +request that no more than a specific number of ntasks be invoked +on each node. Examples of this include submitting +a hybrid MPI/OpenMP app where only one MPI "task/rank" should be +assigned to each node while allowing the OpenMP portion to utilize +all of the parallelism present in the node, or submitting a single +setup/cleanup/monitoring job to each node of a pre\-existing +allocation as one step in a larger job script. + .TP \fB\-\-no\-bell\fR Silence salloc's use of the terminal bell. Also see the option \fB\-\-bell\fR. @@ -276,6 +426,34 @@ Request a specific partition for the resource allocation. If not specified, the default behaviour is to allow the slurm controller to select the default partition as designated by the system administrator. +.TP +\fB\-P\fR, \fB\-\-dependency\fR[=]<\fIdependency_list\fR> +Defer the start of this job until the specified dependencies have been +satisfied completed. +<\fIdependency_list\fR> is of the form +<\fItype:job_id[:job_id][,type:job_id[:job_id]]\fR>. +Many jobs can share the same dependency and these jobs may even belong to +different users. The value may be changed after job submission using the +scontrol command. +.PD +.RS +.TP +\fBafter:job_id[:jobid...]\fR +This job can begin execution after the specified jobs have begun +execution. +.TP +\fBafterany:job_id[:jobid...]\fR +This job can begin execution after the specified jobs have terminated. +.TP +\fBafternotok:job_id[:jobid...]\fR +This job can begin execution after the specified jobs have terminated +in some failed state (non-zero exit code, node failure, timed out, etc). +.TP +\fBafterok:job_id[:jobid...]\fR +This job can begin execution after the specified jobs have successfully +executed (ran to completion with non-zero exit code). +.RE + .TP \fB\-q\fR, \fB\-\-quiet\fR Suppress informational messages from salloc. Errors will still be displayed. @@ -301,6 +479,13 @@ Acceptable time formats include "minutes", "minutes:seconds", "hours:minutes:seconds", "days\-hours", "days\-hours:minutes" and "days\-hours:minutes:seconds". +.TP +\fB\-\-task\-mem\fR[=]<\fIMB\fR> +Mimimum memory available per task in MegaBytes. +Default value is \fBDefMemPerTask\fR and the maximum value is +\fBMaxMemPerTask\fR, both of which can be seen using the +\fBscontrol show config\fR command. + .TP \fB\-\-tmp\fR[=]<\fIMB\fR> Specify a minimum amount of temporary disk space. @@ -374,7 +559,7 @@ SLURM will normally allocate a TORUS if possible for a given geometry. Specify the geometry requirements for the job. The three numbers represent the required geometry giving dimensions in the X, Y and Z directions. For example "\-\-geometry=2x3x4", specifies a block -of nodes having 2 x 3 x 4 = 24 nodes (actually base partions on +of nodes having 2 x 3 x 4 = 24 nodes (actually base partitions on Blue Gene). .TP @@ -397,6 +582,9 @@ variables settings. \fBSALLOC_ACCOUNT\fR Same as \fB\-\-account\fR. .TP +\fBSALLOC_ACCTG_FREQ\fR +Same as \fB\-\-acctg\-freq\fR. +.TP \fBSALLOC_BELL\fR Same as \fB\-\-bell\fR. .TP @@ -481,7 +669,8 @@ The block name on Blue Gene systems only. .LP While salloc is waiting for a PENDING job allocation, most signals will cause salloc to revoke the allocation request and exit. -However, if the allocation has been granted and salloc has already started the command speficied in its command line parameters salloc will ignore most signals. salloc will not exit or release the allocation until the command exits. The noteable exception is SIGHUP; a HUP signal will cause salloc to release the allocation and exit without waiting for the command to finish. +However, if the allocation has been granted and salloc has already started the command +specified in its command line parameters salloc will ignore most signals. salloc will not exit or release the allocation until the command exits. The notable exception is SIGHUP; a HUP signal will cause salloc to release the allocation and exit without waiting for the command to finish. .SH "EXAMPLES" .LP @@ -502,7 +691,7 @@ salloc \-N5 srun \-n10 myprogram .SH "COPYING" Copyright (C) 2006\-2007 The Regents of the University of California. Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -UCRL\-CODE\-226842. +LLNL\-CODE\-402394. .LP This file is part of SLURM, a resource management program. For details, see <https://computing.llnl.gov/linux/slurm/>. diff --git a/doc/man/man1/sattach.1 b/doc/man/man1/sattach.1 index 3c8464da7..a13f1da7d 100644 --- a/doc/man/man1/sattach.1 +++ b/doc/man/man1/sattach.1 @@ -69,7 +69,7 @@ sattach \-\-output\-filter 5 65386.15 .SH "COPYING" Copyright (C) 2006 The Regents of the University of California. Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -UCRL\-CODE\-226842. +LLNL\-CODE\-402394. .LP This file is part of SLURM, a resource management program. For details, see <https://computing.llnl.gov/linux/slurm/>. diff --git a/doc/man/man1/sbatch.1 b/doc/man/man1/sbatch.1 index 9971fdfc1..18e38369b 100644 --- a/doc/man/man1/sbatch.1 +++ b/doc/man/man1/sbatch.1 @@ -1,4 +1,4 @@ -.TH "sbatch" "1" "SLURM 1.2" "August 2007" "SLURM Commands" +.TH "sbatch" "1" "SLURM 1.3" "May 2008" "SLURM Commands" .SH "NAME" .LP sbatch \- Submit a batch script to SLURM. @@ -10,7 +10,7 @@ sbatch [\fIoptions\fP] \fIscript\fP [\fIargs\fP...] sbatch submits a batch script to SLURM. The batch script may be given to sbatch through a file name on the command line, or if no file name is specified, sbatch will read in a script from standard input. The batch script may contain -options preceeded with "#SBATCH" before any executable commands in the script. +options preceded with "#SBATCH" before any executable commands in the script. sbatch exits immediately after the script is successfully transferred to the SLURM controller and assigned a SLURM job ID. The batch script is not @@ -23,6 +23,42 @@ allocated nodes. .SH "OPTIONS" .LP +.TP +\fB\-\-acctg\-freq\fR=\fIseconds\fR +Define the job accounting sampling interval. +This can be used to override the \fIJobAcctGatherFrequency\fR parameter in SLURM's +configuration file, \fIslurm.conf\fR. +A value of zero disables real the periodic job sampling and provides accounting +information only on job termination (reducing SLURM interference with the job). + +.TP +\fB\-B\fR \fB\-\-extra\-node\-info\fR=\fIsockets\fR[:\fIcores\fR[:\fIthreads\fR]] +Request a specific allocation of resources with details as to the +number and type of computational resources within a cluster: +number of sockets (or physical processors) per node, +cores per socket, and threads per core. +The total amount of resources being requested is the product of all of +the terms. +As with \-\-nodes, each value can be a single number or a range (e.g. min\-max). +An asterisk (*) can be used as a placeholder indicating that all available +resources of that type are to be utilized. +As with nodes, the individual levels can also be specified in separate +options if desired: +.nf + \fB\-\-sockets\-per\-node\fR=\fIsockets\fR + \fB\-\-cores\-per\-socket\fR=\fIcores\fR + \fB\-\-threads\-per\-core\fR=\fIthreads\fR +.fi +When the task/affinity plugin is enabled, +specifying an allocation in this manner also instructs SLURM to use +a CPU affinity mask to guarantee the request is filled as specified. +NOTE: Support for these options are configuration dependent. +The task/affinity plugin must be configured. +In addition either select/linear or select/cons_res plugin must be +configured. +If select/cons_res is configured, it must have a parameter of CR_Core, +CR_Core_Memory, CR_Socket, or CR_Socket_Memory. + .TP \fB\-\-begin\fR[=]<\fItime\fR> Submit the batch script to the SLURM controller immediately, like normal, but @@ -59,6 +95,12 @@ nodes, then use the OR operator and enclose the options within square brackets. For example: "\fB\-\-constraint="[rack1|rack2|rack3|rack4]"\fR might be used to specify that all nodes must be allocated on a single rack of the cluster, but any of those four racks can be used. +A request can also specify the number of nodes needed with some feature +by appending an asterisk and count after the feature name. +For example "\fBsbatch \-\-nodes=16 \-\-constraint=graphics*4 ..."\fR +indicates that the job requires 16 nodes at that at least four of those +nodes must have the feature "graphics." +Constraints with node counts may only be combined with AND operators. If no nodes have the requested features, then the job will be rejected by the slurm job manager. @@ -90,13 +132,6 @@ Demand a contiguous range of nodes. The default is "yes". Specify Set the working directory of the batch script to \fIdirectory\fR before it it executed. -.TP -\fB\-d\fR, \fB\-\-dependency\fR[=]<\fIjobid\fR> -Defer the start of this job until the specified \fIjobid\fR has completed. -Many jobs can share the same dependency and these jobs may even belong to -different users. The value may be changed after job submission using the -scontrol command. - .TP \fB\-e\fR, \fB\-\-error\fR[=]<\fIfilename pattern\fR> Instruct SLURM to connect the batch script's standard error directly to the @@ -150,6 +185,26 @@ may be the group name or the numerical group ID. \fB\-h\fR, \fB\-\-help\fR Display help information and exit. +.TP +\fB\-\-hint\fR=\fItype\fR +Bind tasks according to application hints +.RS +.TP +.B compute_bound +Select settings for compute bound applications: +use all cores in each physical CPU +.TP +.B memory_bound +Select settings for memory bound applications: +use only one core in each physical CPU +.TP +.B [no]multithread +[don't] use extra threads with in-core multi-threading +which can benefit communication intensive applications +.B help +show this help message +.RE + .TP \fB\-I\fR,\fB\-\-immediate\fR The batch script will only be submitted to the controller if the resources @@ -206,6 +261,60 @@ new job steps on the remaining nodes in their allocation. By default SLURM terminates the entire job allocation if any node fails in its range of allocated nodes. +.TP +\fB\-L\fR, \fB\-\-licenses\fR= +Specification of licenses (or other resources available on all +nodes of the cluster) which must be allocated to this job. +License names can be followed by an asterisk and count +(the default count is one). +Multiple license names should be comma separated (e.g. +"\-\-licenses=foo*4,bar"). + +.TP +\fB\-m\fR, \fB\-\-distribution\fR= +(\fIblock\fR|\fIcyclic\fR|\fIarbitrary\fR|\fIplane=<options>\fR) +Specify an alternate distribution method for remote processes. +.RS +.TP +.B block +The block method of distribution will allocate processes in\-order to +the cpus on a node. If the number of processes exceeds the number of +cpus on all of the nodes in the allocation then all nodes will be +utilized. For example, consider an allocation of three nodes each with +two cpus. A four\-process block distribution request will distribute +those processes to the nodes with processes one and two on the first +node, process three on the second node, and process four on the third node. +Block distribution is the default behavior if the number of tasks +exceeds the number of nodes requested. +.TP +.B cyclic +The cyclic method distributes processes in a round\-robin fashion across +the allocated nodes. That is, process one will be allocated to the first +node, process two to the second, and so on. This is the default behavior +if the number of tasks is no larger than the number of nodes requested. +.TP +.B plane +The tasks are distributed in blocks of a specified size. +The options include a number representing the size of the task block. +This is followed by an optional specification of the task distribution +scheme within a block of tasks and between the blocks of tasks. +For more details (including examples and diagrams), please see +.ad l +.nh +https://computing.llnl.gov/linux/slurm/mc_support.html and +https://computing.llnl.gov/linux/slurm/dist_plane.html. +.hy +.ad +.TP +.B arbitrary +The arbitrary method of distribution will allocate processes in\-order as +listed in file designated by the environment variable SLURM_HOSTFILE. If +this variable is listed it will over ride any other method specified. +If not set the method will default to block. Inside the hostfile must +contain at minimum the number of hosts requested. If requesting tasks +(\-n) your tasks will be laid out on the nodes in the order of the file. +.RE + .TP \fB\-\-mail\-type\fR=\fItype\fR Notify user by email when certain event types occur. @@ -220,7 +329,12 @@ The default value is the username of the submitting user. .TP \fB\-\-mem\fR[=]<\fIMB\fR> -Specify a minimum amount of real memory. +Specify the real memory required per node in MegaBytes. +If a value is specified, that quantity of memory will be +reserved for this job. +If no value is specified and real memory is exhausted on +any allocated node then the job is subject to cancellation. +Also see \fB\-\-task\-mem\fR. .TP \fB\-\-mincores\fR[=]<\fIn\fR> @@ -260,13 +374,14 @@ If a job node limit exceeds the number of nodes configured in the partition, the job will be rejected. .TP -\fB\-n\fR, \fB\-\-tasks\fR[=]<\fInumber\fR> -sbatch does not launch tasks, it requests an allocation of resources and submits -a batch script. However this \-\-tasks option advizes the SLURM controller -that job steps run within this allocation will launch a maximum of \fInumber\fR -tasks. This option, possibly with collaboration with the \-\-cpus\-per\-task -option, will directly impact the number of processors granted to the job -allocation. +\fB\-n\fR, \fB\-\-ntasks\fR[=]<\fInumber\fR> +sbatch does not launch tasks, it requests an allocation of resources and +submits a batch script. This option advises the SLURM controller that job +steps run within this allocation will launch a maximum of \fInumber\fR +tasks and sufficient resources are allocated to accomplish this. +The default is one task per socket or core (depending upon the value +of the \fISelectTypeParameters\fR parameter in slurm.conf), but note +that the \fB\-\-cpus\-per\-task\fR option will change this default. .TP \fB\-\-nice\fR[=]<\fIadjustment\fR> @@ -280,15 +395,47 @@ ignored if \fISchedulerType=sched/wiki\fR or .TP \fB\-\-no\-requeue\fR -Specifies that the batch job should not be requeued. +Specifies that the batch job should not be requeued after node failure. Setting this option will prevent system administrators from being able to restart the job (for example, after a scheduled downtime). When a job is requeued, the batch script is initiated from its beginning. - -.TP -\fB\-\-ntasks\-per\-node\fR[=]<\fIn\fR> -Specify the number of tasks to be launched per node. -Equivalent to \fB\-\-tasks\-per\-node\fR. +Also see the \fB\-\-requeue\fR option. +The \fIJobRequeue\fR configuration parameter controls the default +behavior on the cluster. + +.TP +\fB\-\-ntasks\-per\-core\fR=\fIntasks\fR +Request that no more than \fIntasks\fR be invoked on each core. +Similar to \fB\-\-ntasks\-per\-node\fR except at the core level +instead of the node level. Masks will automatically be generated +to bind the tasks to specific core unless \fB\-\-cpu_bind=none\fR +is specified. +NOTE: This option is not supported unless \fISelectType=CR_Core\fR +or \fISelectType=CR_Core_Memory\fR is configured. + +.TP +\fB\-\-ntasks\-per\-socket\fR=\fIntasks\fR +Request that no more than \fIntasks\fR be invoked on each socket. +Similar to \fB\-\-ntasks\-per\-node\fR except at the socket level +instead of the node level. Masks will automatically be generated +to bind the tasks to specific sockets unless \fB\-\-cpu_bind=none\fR +is specified. +NOTE: This option is not supported unless \fISelectType=CR_Socket\fR +or \fISelectType=CR_Socket_Memory\fR is configured. + +.TP +\fB\-\-ntasks\-per\-node\fR=\fIntasks\fR +Request that no more than \fIntasks\fR be invoked on each node. +This is similiar to using \fB\-\-cpus\-per\-task\fR=\fIncpus\fR +but does not require knowledge of the actual number of cpus on +each node. In some cases, it is more convenient to be able to +request that no more than a specific number of ntasks be invoked +on each node. Examples of this include submitting +a hybrid MPI/OpenMP app where only one MPI "task/rank" should be +assigned to each node while allowing the OpenMP portion to utilize +all of the parallelism present in the node, or submitting a single +setup/cleanup/monitoring job to each node of a pre\-existing +allocation as one step in a larger job script. .TP \fB\-O\fR, \fB\-\-overcommit\fR @@ -303,6 +450,12 @@ Instruct SLURM to connect the batch script's standard output directly to the file name specified in the "\fIfilename pattern\fR". See the \fB\-\-input\fR option for filename specification options. +.TP +\fB\-\-open\-mode\fR=append|truncate +Open the output and error files using append or truncate mode as specified. +The default value is specified by the system configuration parameter +\fIJobFileAppend\fR. + .TP \fB\-p\fR, \fB\-\-partition\fR[=]<\fIpartition name\fR> Request a specific partition for the resource allocation. If not specified, the @@ -353,10 +506,46 @@ The maximum resident set size The maximum stack size .RE +.TP +\fB\-P\fR, \fB\-\-dependency\fR[=]<\fIdependency_list\fR> +Defer the start of this job until the specified dependencies have been +satisfied completed. +<\fIdependency_list\fR> is of the form +<\fItype:job_id[:job_id][,type:job_id[:job_id]]\fR>. +Many jobs can share the same dependency and these jobs may even belong to +different users. The value may be changed after job submission using the +scontrol command. +.PD +.RS +.TP +\fBafter:job_id[:jobid...]\fR +This job can begin execution after the specified jobs have begun +execution. +.TP +\fBafterany:job_id[:jobid...]\fR +This job can begin execution after the specified jobs have terminated. +.TP +\fBafternotok:job_id[:jobid...]\fR +This job can begin execution after the specified jobs have terminated +in some failed state (non-zero exit code, node failure, timed out, etc). +.TP +\fBafterok:job_id[:jobid...]\fR +This job can begin execution after the specified jobs have successfully +executed (ran to completion with non-zero exit code). +.RE + .TP \fB\-q\fR, \fB\-\-quiet\fR Suppress informational messages from sbatch. Errors will still be displayed. +.TP +\fB\-\-requeue\fR +Specifies that the batch job should be requeued after node failure. +When a job is requeued, the batch script is initiated from its beginning. +Also see the \fB\-\-no\-requeue\fR option. +The \fIJobRequeue\fR configuration parameter controls the default +behavior on the cluster. + .TP \fB\-s\fR, \fB\-\-share\fR The job allocation can share nodes with other running jobs. (The default @@ -378,6 +567,13 @@ Acceptable time formats include "minutes", "minutes:seconds", "hours:minutes:seconds", "days\-hours", "days\-hours:minutes" and "days\-hours:minutes:seconds". +.TP +\fB\-\-task\-mem\fR[=]<\fIMB\fR> +Mimimum memory available per task in MegaBytes. +Default value is \fBDefMemPerTask\fR and the maximum value is +\fBMaxMemPerTask\fR, both of which can be seen using the +\fBscontrol show config\fR command. + .TP \fB\-\-tasks\-per\-node\fR[=]<\fIn\fR> Specify the number of tasks to be launched per node. @@ -497,6 +693,9 @@ environment variables. \fBSBATCH_ACCOUNT\fR Same as \fB\-\-account\fR. .TP +\fBSALLOC_ACCTG_FREQ\fR +Same as \fB\-\-acctg\-freq\fR. +.TP \fBSBATCH_CONN_TYPE\fR Same as \fB\-\-conn\-type\fR. .TP @@ -524,6 +723,9 @@ Same as \fB\-\-no\-requeue\fR. \fBSBATCH_NO_ROTATE\fR Same as \fB\-R\fR or \fB\-\-no\-rotate\fR. .TP +\fBSLURM_OPEN_MODE\fR +Same as \fB\-\-open\-mode\fR. +.TP \fBSLURM_OVERCOMMIT\fR Same as \fB\-O, \-\-overcommit\fR .TP @@ -571,6 +773,18 @@ Do not allocate a block on Blue Gene systems only. \fBMPIRUN_NOFREE\fR Do not free a block on Blue Gene systems only. .TP +\fBSLURM_NTASKS_PER_CORE\fR +Number of tasks requested per core. +Only set if the \fB\-\-ntasks\-per\-core\fR option is specified. +.TP +\fBSLURM_NTASKS_PER_NODE\fR +Number of tasks requested per node. +Only set if the \fB\-\-ntasks\-per\-node\fR option is specified. +.TP +\fBSLURM_NTASKS_PER_SOCKET\fR +Number of tasks requested per socket. +Only set if the \fB\-\-ntasks\-per\-socket\fR option is specified. +.TP \fBMPIRUN_PARTITION\fR The block name on Blue Gene systems only. @@ -633,7 +847,7 @@ host4 .SH "COPYING" Copyright (C) 2006\-2007 The Regents of the University of California. Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -UCRL\-CODE\-226842. +LLNL\-CODE\-402394. .LP This file is part of SLURM, a resource management program. For details, see <https://computing.llnl.gov/linux/slurm/>. diff --git a/doc/man/man1/sbcast.1 b/doc/man/man1/sbcast.1 index 5984ab394..7f76a4ca3 100644 --- a/doc/man/man1/sbcast.1 +++ b/doc/man/man1/sbcast.1 @@ -100,7 +100,7 @@ srun: jobid 12345 submitted .SH "COPYING" Copyright (C) 2006 The Regents of the University of California. Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -UCRL\-CODE\-226842. +LLNL\-CODE\-402394. .LP This file is part of SLURM, a resource management program. For details, see <https://computing.llnl.gov/linux/slurm/>. diff --git a/doc/man/man1/scancel.1 b/doc/man/man1/scancel.1 index 28ba0d031..773fcecc0 100644 --- a/doc/man/man1/scancel.1 +++ b/doc/man/man1/scancel.1 @@ -1,8 +1,11 @@ .TH SCANCEL "1" "August 2007" "scancel 1.2" "Slurm components" + .SH "NAME" scancel \- Used to signal jobs or job steps that are under the control of Slurm. + .SH "SYNOPSIS" \fBscancel\fR [\fIOPTIONS\fR...] [\fIjob_id\fR[.\fIstep_id\fR]] [\fIjob_id\fR[.\fIstep_id\fR]...] + .SH "DESCRIPTION" \fBscancel\fR is used to signal or cancel jobs or job steps. An arbitrary number of jobs or job steps may be signaled using job specification filters or a @@ -46,7 +49,8 @@ This option is incompatible with the \fB\-\-verbose\fR option. .TP \fB\-s\fR, \fB\-\-signal\fR=\fIsignal_name\fR -The name or number of the signal to be send. Default value is "KILL". +The name or number of the signal to be send. +If no signal is specified, the specified job or step will be terminated. .TP \fB\-t\fR, \fB\-\-state\fR=\fIjob_state_name\fR @@ -81,10 +85,7 @@ The Slurm job ID to be signaled. \fIstep_id\fP The step ID of the job step to be signaled. If not specified, the operation is performed at the level of a job. -A "KILL" (the default) signal applied to a job cancels the job and -all of its steps. -For other signals, when \fB\-\-batch\fR is used, the batch shell -processes will be signalled. +When \fB\-\-batch\fR is used, the batch shell processes will be signalled. Otherwise the processes associated with all job steps, but not the batch script itself, will be signalled. @@ -153,7 +154,7 @@ scancel \-\-state=PENDING \-\-user=bob \-\-partition=debug .SH "COPYING" Copyright (C) 2002-2007 The Regents of the University of California. Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -UCRL\-CODE\-226842. +LLNL\-CODE\-402394. .LP This file is part of SLURM, a resource management program. For details, see <https://computing.llnl.gov/linux/slurm/>. diff --git a/doc/man/man1/scontrol.1 b/doc/man/man1/scontrol.1 index 9c6c4a7de..810dba3c9 100644 --- a/doc/man/man1/scontrol.1 +++ b/doc/man/man1/scontrol.1 @@ -1,10 +1,11 @@ -.TH SCONTROL "1" "June 2007" "scontrol 1.2" "Slurm components" +.TH SCONTROL "1" "December 2007" "scontrol 1.3" "Slurm components" .SH "NAME" scontrol \- Used view and modify Slurm configuration and state. .SH "SYNOPSIS" \fBscontrol\fR [\fIOPTIONS\fR...] [\fICOMMAND\fR...] + .SH "DESCRIPTION" \fBscontrol\fR is used to view or modify Slurm configuration including: job, job step, node, partition, and overall system configuration. Most of the @@ -52,7 +53,7 @@ Print version information and exit. .TP \fBall\fP -Show all partitiion, their jobs and jobs steps. This causes information to be +Show all partitions, their jobs and jobs steps. This causes information to be displayed about partitions that are configured as hidden and partitions that are unavailable to user's group. @@ -106,29 +107,33 @@ Display a description of scontrol options and commands. .TP \fBhide\fP -Do not display partitiion, job or jobs step information for partitions that are +Do not display partition, job or jobs step information for partitions that are configured as hidden or partitions that are unavailable to the user's group. This is the default behavior. +.TP +\fBnotify\fP \fIjob_id\fP \fImessage\fP +Send a message to standard output of the srun command associated with the +specified \fIjob_id\fP. + .TP \fBoneliner\fP Print information one line per record. -This is an independent command with no options meant for use in interactive mode. .TP -\fBpidinfo\fP \fIPROC_ID\fP +\fBpidinfo\fP \fIproc_id\fP Print the Slurm job id and scheduled termination time corresponding to the -supplied process id, \fIPROC_ID\fP, on the current node. This will work only +supplied process id, \fIproc_id\fP, on the current node. This will work only with processes on node on which scontrol is run, and only for those processes spawned by SLURM and their descendants. .TP -\fBlistpids\fP [JOBID[.STEPID]] [NodeName] +\fBlistpids\fP [\fIjob_id\fP[.\fIstep_id\fP]] [\fINodeName\fP] Print a listing of the process IDs in a job step (if JOBID.STEPID is provided), -or all of the job steps in a job (if JOBID is provided), or all of the job -steps in all of the jobs on the local node (if JOBID is not provided or JOBID -is "*"). This will work only with processes on the node on which -scontrol is run, and only for those processes spawned by SLURM and +or all of the job steps in a job (if \fIjob_id\fP is provided), or all of the job +steps in all of the jobs on the local node (if \fIjob_id\fP is not provided +or \fIjob_id\fP is "*"). This will work only with processes on the node on +which scontrol is run, and only for those processes spawned by SLURM and their descendants. Note that some SLURM configurations (\fIProctrackType\fP value of \fIpgid\fP or \fIaix\fP) are unable to identify all processes associated with a job or job step. @@ -145,12 +150,10 @@ they are responding. .TP \fBquiet\fP Print no warning or informational messages, only fatal error messages. -This is an independent command with no options meant for use in interactive mode. .TP \fBquit\fP Terminate the execution of scontrol. -This is an independent command with no options meant for use in interactive mode. .TP \fBreconfigure\fP @@ -176,6 +179,18 @@ Resume a previously suspended job. \fBrequeue\fP \fIjob_id\fP Requeue a running or pending SLURM batch job. +.TP +\fBsetdebug\fP \fILEVEL\fP +Change the debug level of the slurmctld daemon. +\fILEVEL\fP may be an integer value between zero and nine (using the +same values as \fISlurmctldDebug\fP in the \fIslurm.conf\fP file) or +the name of the most detailed message type to be printed: +"quiet", "fatal", "error", "info", "verbose", "debug", "debug2", "debug3", +"debug4", or "debug5". +This value is temporary and will be overwritten whenever the slurmctld +daemon reads the slurm.conf configuration file (e.g. when the daemon +is restarted or "scontrol reconfigure" is executed). + .TP \fBshow\fP \fIENTITY\fP \fIID\fP Display the state of the specified entity with the specified identification. @@ -235,7 +250,6 @@ the Slurm configuration file and executing the \fIreconfigure\fP command \fBverbose\fP Print detailed event logging. This includes time\-stamps on data structures, record counts, etc. -This is an independent command with no options meant for use in interactive mode. .TP \fBversion\fP @@ -356,9 +370,17 @@ can be \fIminutes\fR, \fIhours\fR, \fIdays\fR, or \fIweeks\fR and you can tell SLURM to run the job today with the keyword \fItoday\fR and to run the job tomorrow with the keyword \fItomorrow\fR. + .TP -\fITimeLimit\fP=<minutes> -Set the job's time limit to the specified value. +\fITimeLimit\fP=<time> +The job's time limit. +Output format is [days\-]hours:minutes:seconds or "UNLIMITED". +Input format (for \fBupdate\fR command) set is minutes, minutes:seconds, +hours:minutes:seconds, days\-hours, days\-hours:minutes or +days\-hours:minutes:seconds. +Time resolution is one minute and second values are rounded up to +the next minute. + .TP \fIConnection\fP=<type> Reset the node connection type. @@ -391,18 +413,21 @@ or reconfiguration. Update slurm.conf with any changes meant to be persistent. .TP \fIReason\fP=<reason> -Identify the reason the node is in a "DOWN" or "DRAINED" or "DRAINING" state. +Identify the reason the node is in a "DOWN" or "DRAINED", "DRAINING", +"FAILING" or "FAIL" state. Use quotes to enclose a reason having more than one word. .TP \fIState\fP=<state> Identify the state to be assigned to the node. Possible values are "NoResp", -"DRAIN" "RESUME", "DOWN", "IDLE", "ALLOC", and "ALLOCATED". +"ALLOC", "ALLOCATED", "DOWN", "DRAIN", "FAIL", "FAILING", "IDLE" or "RESUME". "RESUME is not an actual node state, but will return a DRAINED, DRAINING, or DOWN node to service, either IDLE or ALLOCATED state as appropriate. Setting a node "DOWN" will cause all running and suspended jobs on that node to be terminated. If you want to remove a node from service, you typically want to set it's state to "DRAIN". +"FAILING" is similar to "DRAIN" except that some applications will +seek to relinquish those nodes before the job completes. The "NoResp" state will only set the "NoResp" flag for a node without changing its underlying state. @@ -426,15 +451,23 @@ Possible values are"YES" and "NO". .TP \fIMaxNodes\fP=<count> Set the maximum number of nodes which will be allocated to any single job -in the partition. Specify a number, "INFINITE" or "UNLIMITED". +in the partition. Specify a number, "INFINITE" or "UNLIMITED". (On a +Bluegene type system this represents a c-node count.) + .TP -\fIMaxTime\fP=<minutes> -Set the maximum run time for a jobs in minutes. -Specify a number, "INFINITE" or "UNLIMITED". +\fIMaxTime\fP=<time> +The maximum run time for jobs. +Output format is [days\-]hours:minutes:seconds or "UNLIMITED". +Input format (for \fBupdate\fR command) is minutes, minutes:seconds, +hours:minutes:seconds, days\-hours, days\-hours:minutes or +days\-hours:minutes:seconds. +Time resolution is one minute and second values are rounded up to +the next minute. + .TP \fIMinNodes\fP=<count> Set the minimum number of nodes which will be allocated to any single job -in the partition. +in the partition. (On a Bluegene type system this represents a c-node count.) .TP \fINodes\fP=<name> Identify the node(s) to be associated with this partition. Multiple node names @@ -501,17 +534,17 @@ scontrol: show part class .br PartitionName=class TotalNodes=10 TotalCPUs=20 RootOnly=NO .br - Default=NO Shared=NO State=UP MaxTime=30 Hidden=NO + Default=NO Shared=NO State=UP MaxTime=0:30:00 Hidden=NO .br MinNodes=1 MaxNodes=2 AllowGroups=students .br Nodes=lx[0031-0040] NodeIndices=31,40,-1 .br -scontrol: update PartitionName=class MaxTime=99 MaxNodes=4 +scontrol: update PartitionName=class MaxTime=60:00 MaxNodes=4 .br scontrol: show job 65539 .br -JobId=65539 UserId=1500 JobState=PENDING TimeLimit=100 +JobId=65539 UserId=1500 JobState=PENDING TimeLimit=0:20:00 .br Priority=100 Partition=batch Name=job01 NodeList=(null) .br @@ -523,7 +556,7 @@ JobId=65539 UserId=1500 JobState=PENDING TimeLimit=100 .br Features=(null) JobScript=/bin/hostname .br -scontrol: update JobId=65539 TimeLimit=200 Priority=500 +scontrol: update JobId=65539 TimeLimit=30:00 Priority=500 .br scontrol: show hosts tux[1-3] .br @@ -539,7 +572,7 @@ scontrol: quit .SH "COPYING" Copyright (C) 2002\-2007 The Regents of the University of California. Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -UCRL\-CODE\-226842. +LLNL\-CODE\-402394. .LP This file is part of SLURM, a resource management program. For details, see <https://computing.llnl.gov/linux/slurm/>. diff --git a/doc/man/man1/sinfo.1 b/doc/man/man1/sinfo.1 index ae8e1c2ee..146b4dec2 100644 --- a/doc/man/man1/sinfo.1 +++ b/doc/man/man1/sinfo.1 @@ -1,4 +1,4 @@ -.TH SINFO "1" "July 2007" "sinfo 1.2" "Slurm components" +.TH SINFO "1" "July 2007" "sinfo 1.3" "Slurm components" .SH "NAME" sinfo \- view information about SLURM nodes and partitions. @@ -166,7 +166,8 @@ Partition name Only user root may initiate jobs, "yes" or "no" .TP \fB%R\fR -The reason a node is unavailable (down, drained, or draining states) +The reason a node is unavailable (down, drained, draining, +fail or failing states) .TP \fB%s\fR Maximum job size in nodes @@ -209,15 +210,17 @@ If set only report state information for responding nodes. .TP \fB\-R\fR, \fB\-\-list\-reasons\fR -List reasons nodes are down or drained. When nodes are in -these states SLURM supports optional inclusion of a "reason" -string by an administrator. This option will display the first -35 characters of the reason field and list of nodes with that -reason for all nodes that are, by default, down, drained, or -draining. This option may be used with other node filtering -options (e.g. \fB\-r\fR, \fB\-d\fR, \fB\-t\fR, \fB\-n\fR), -however, combinations of these options that result in a list of -nodes that are not down or drained will not produce any output. +List reasons nodes are in the down, drained, fail or failing state. +When nodes are in these states SLURM supports optional inclusion +of a "reason" string by an administrator. +This option will display the first 35 characters of the reason +field and list of nodes with that reason for all nodes that are, +by default, down, drained, draining or failing. +This option may be used with other node filtering options +(e.g. \fB\-r\fR, \fB\-d\fR, \fB\-t\fR, \fB\-n\fR), +however, combinations of these options that result in a +list of nodes that are not down or drained or failing will +not produce any output. When used with \fB\-l\fR the output additionally includes the current node state. @@ -248,11 +251,12 @@ default sort value is "N" (increasing node name). List nodes only having the given state(s). Multiple states may be comma separated and the comparison is case insensitive. Possible values include (case insensitive): ALLOC, ALLOCATED, -COMP, COMPLETING, DOWN, DRAIN, DRAINED, DRNG, DRAINING, IDLE, -UNK, and UNKNOWN. By default nodes in the specified state are -reported whether they are responding or not. The \fB\-\-dead\fR -and \fB\-\-responding\fR options may be used to filtering nodes by -the responding flag. +COMP, COMPLETING, DOWN, DRAIN, DRAINED, DRNG, DRAINING, FAIL, +FAILING, IDLE, UNK, and UNKNOWN. +By default nodes in the specified state are reported whether +they are responding or not. +The \fB\-\-dead\fR and \fB\-\-responding\fR options may be +used to filtering nodes by the responding flag. .TP \fB\-\-usage\fR @@ -339,11 +343,13 @@ with shared/cons_res managing individual processors). per job's resource allocation. .TP \fBSTATE\fR -State of the nodes. Possible states include: down, unknown, -idle, allocated, drained, draining, completing and their -abbreviated forms: down, unk, idle, alloc, drain, drng, and -comp respectively. Note that the suffix "*" identifies nodes -that are presently not responding. +State of the nodes. +Possible states include: allocated, completing, down, +drained, draining, fail, failing, idle, and unknown plus +their abbreviated forms: alloc, comp, donw, drain, drng, +fail, failg, idle, and unk respectively. +Note that the suffix "*" identifies nodes that are presently +not responding. .TP \fBTMP_DISK\fR Size of temporary disk space in megabytes on these nodes. @@ -355,7 +361,8 @@ If the node state code is followed by "*", this indicates the node is presently not responding and will not be allocated any new work. If the node remains non\-responsive, it will be placed in the \fBDOWN\fR state (except in the case of -\fBDRAINED\fR, \fBDRAINING\fR, or \fBCOMPLETING\fR nodes). +\fBCOMPLETING\fR, \fBDRAINED\fR, \fBDRAINING\fR, +\fBFAIL\fR, \fBFAILING\fR nodes). If the node state code is followed by "~", this indicates the node is presently in a power saving mode (typically running at reduced frequency). @@ -398,6 +405,18 @@ this state per system administrator request. See the \fBupdate node\fR command in the \fBscontrol\fR(1) man page or the \fBslurm.conf\fR(5) man page for more information. .TP +\fBFAIL\fR +The node is expected to fail soon and is unavailable for +use per system administrator request. +See the \fBupdate node\fR command in the \fBscontrol\fR(1) +man page or the \fBslurm.conf\fR(5) man page for more information. +.TP +\fBFAILING\fR +The node is currently executing a job, but is expected to fail +soon and is unavailable for use per system administrator request. +See the \fBupdate node\fR command in the \fBscontrol\fR(1) +man page or the \fBslurm.conf\fR(5) man page for more information. +.TP \fBIDLE\fR The node is not allocated to any jobs and is available for use. .TP @@ -495,7 +514,7 @@ Not Responding dev8 .SH "COPYING" Copyright (C) 2002\-2007 The Regents of the University of California. Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -UCRL\-CODE\-226842. +LLNL\-CODE\-402394. .LP This file is part of SLURM, a resource management program. For details, see <https://computing.llnl.gov/linux/slurm/>. diff --git a/doc/man/man1/slaunch.1 b/doc/man/man1/slaunch.1 deleted file mode 100644 index d227eec4a..000000000 --- a/doc/man/man1/slaunch.1 +++ /dev/null @@ -1,734 +0,0 @@ -.\" $Id: slaunch.1 13559 2008-03-11 22:41:30Z jette $ -.TH "slaunch" "1" "SLURM 1.2" "October 2006" "SLURM Commands" -.SH "NAME" -.LP -slaunch \- Launch a parallel application under a SLURM job allocation. -.SH "SYNOPSIS" -.LP -slaunch [\fIoptions\fP] <\fIcommand\fP> [\fIcommand args\fR] -.SH "DESCRIPTION" -.LP -\fBNOTE: Support of slaunch is expected to be discontinued in the near future. -Use of slaunch is not recommended.\fR -.br - -.br -slaunch launches a parallel application (a \fBjob step\fR in SLURM parlance) -on the nodes, or subset of nodes, in a \fBjob allocation\fR. A valid job -allocation is a prerequisite of running slaunch. The ID of the job allocation -may be passed to slaunch through either the \fB\-\-jobid\fR command line -parameter or the \fBSLAUNCH_JOBID\fR environment variable. The \fBsalloc\fR -and \fBsbatch\fR commands may be used to request a job allocation, and each -of those commands automatically set the \fBSLURM_JOB_ID\fR environment variable, -which is also understood by slaunch. Users should not set SLURM_JOB_ID on their -own; use SLAUNCH_JOBID instead. - -.SH "OPTIONS" -.LP - -.TP -\fB\-C\fR, \fB\-\-overcommit\fR -Permit the allocation of more tasks to a node than there are available processors. -Normally SLURM will only allow up to N tasks on a node with N processors, but -this option will allow more than N tasks to be assigned to a node. - -.TP -\fB\-c\fR, \fB\-\-cpus\-per\-task\fR[=]<\fIncpus\fR> -Specify that each task requires \fIncpus\fR number of CPUs. Useful for applications in which each task will launch multiple threads and can therefore benefit from there being free processors on the node. - -.TP -\fB\-\-comm\-hostname\fR[=]<\fIhostname|address\fR> -Specify the hostname or address to be used for PMI communications only -(MPCIH2 communication bootstrapping mechanism). -Defaults to short hostname of the node on which slaunch is running. - -.TP -\fB\-\-core\fR[=]<\fItype\fR> -Adjust corefile format for parallel job. If possible, slaunch will set -up the environment for the job such that a corefile format other than -full core dumps is enabled. If run with type = "list", slaunch will -print a list of supported corefile format types to stdout and exit. - -.TP -\fB\-\-cpu_bind\fR=[{\fIquiet,verbose\fR},]\fItype\fR -Bind tasks to CPUs. Used only when the task/affinity or task/numa -plugin is enabled. -NOTE: To have SLURM always report on the selected CPU binding for all -commands executed in a shell, you can enable verbose mode by setting -the SLURM_CPU_BIND environment variable value to "verbose". -Supported options include: -.PD 1 -.RS -.TP -.B q[uiet], -quietly bind before task runs (default) -.TP -.B v[erbose], -verbosely report binding before task runs -.TP -.B no[ne] -don't bind tasks to CPUs (default) -.TP -.B rank -bind by task rank -.TP -.B map_cpu:<list> -bind by mapping CPU IDs to tasks as specified -where <list> is <cpuid1>,<cpuid2>,...<cpuidN>. -CPU IDs are interpreted as decimal values unless they are preceded -with '0x' in which case they interpreted as hexadecimal values. -.TP -.B mask_cpu:<list> -bind by setting CPU masks on tasks as specified -where <list> is <mask1>,<mask2>,...<maskN>. -CPU masks are \fBalways\fR interpreted as hexadecimal values but can be -preceded with an optional '0x'. -.RE - -.TP -\fB\-D\fR, \fB\-\-workdir\fR[=]<\fIdirectory\fR> -Set the working directory of the tasks to \fIdirectory\fR before execution. -The default task working directory is slaunch's working directory. - -.TP -\fB\-d\fR, \fB\-\-slurmd\-debug\fR[=]<\fIlevel\fR> -Specify a debug level for slurmd(8). \fIlevel\fR may be an integer value -between 0 [quiet, only errors are displayed] and 4 [verbose operation]. -The slurmd debug information is copied onto the stderr of -the job. By default only errors are displayed. - -.TP -\fB\-E\fR, \fB\-\-task\-error\fR[=]<\fIfilename pattern\fR> -Instruct SLURM to connect each task's standard error directly to -the file name specified in the "\fIfilename pattern\fR". -See the \fB\-\-task\-input\fR option for filename specification options. - -.TP -\fB\-e\fR, \fB\-\-slaunch\-error\fR[=]<\fIfilename pattern\fR> -Instruct SLURM to connect slaunch's standard error directly to the -file name specified in the "\fIfilename pattern\fR". -See the \fB\-\-slaunch\-input\fR option for filename specification options. - -.TP -\fB\-\-epilog\fR[=]<\fIexecutable\fR> -\fBslaunch\fR will run \fIexecutable\fR just after the job step completes. -The command line arguments for \fIexecutable\fR will be the command -and arguments of the job step. If \fIexecutable\fR is "none", then -no epilog will be run. This parameter overrides the SrunEpilog -parameter in slurm.conf. - -.TP -\fB\-F\fR, \fB\-\-task\-layout\-file\fR[=]<\fIfilename\fR> -Request a specific task layout. This option is much like the -\-\-task\-layout\-byname option, except that instead of a nodelist you -supply the name of a file. The file contains a nodelist that may span -multiple lines of the file. - -NOTE: This option implicitly sets the task distribution method to "arbitrary". -Some network switch layers do not permit arbitrary task layout. - -.TP -\fB\-\-gid\fR[=]<\fIgroup\fR> -If \fBslaunch\fR is run as root, and the \fB\-\-gid\fR option is used, -submit the job with \fIgroup\fR's group access permissions. \fIgroup\fR -may be the group name or the numerical group ID. - -.TP -\fB\-h\fR, \fB\-\-help\fR -Display help information and exit. - -.TP -\fB\-I\fR, \fB\-\-task\-input\fR[=]<\fIfilename pattern\fR> -Instruct SLURM to connect each task's standard input directly to -the file name specified in the "\fIfilename pattern\fR". - -By default, the standard IO streams of all tasks are received and transmitted -over the network to commands like slaunch and sattach. These options disable -the networked standard IO streams and instead connect the standard IO streams -of the tasks directly to files on the local node of each task (although the file -may, of course, be located on a networked filesystem). - -Whether or not the tasks share a file depends on whether or not the file lives -on a local filesystem or a shared network filesytem, and on whether or not -the filename pattern expands to the same file name for each task. - -The filename pattern may -contain one or more replacement symbols, which are a percent sign "%" followed -by a letter (e.g. %t). - -Supported replacement symbols are: -.PD 0 -.RS 10 -.TP -\fB%J\fR -Job allocation number and job step number in the form "jobid.stepid". For instance, "128.0". -.PD 0 -.TP -\fB%j\fR -Job allocation number. -.PD 0 -.TP -\fB%s\fR -Job step number. -.PD 0 -.TP -\fB%N\fR -Node name. (Will result in a separate file per node.) -.PD 0 -.TP -\fB%n\fR -Relative node index number within the job step. All nodes used by the job step will be number sequentially starting at zero. (Will result in a separate file per node.) -.PD 0 -.TP -\fB%t\fR -Task rank number. (Will result in a separate file per task.) -.RE - -.TP -\fB\-i\fR, \fB\-\-slaunch\-input\fR[=]<\fIfilename pattern\fR> -.PD -Change slaunch's standard input -to be a file of name "filename pattern". These options are similar to using -shell IO redirection capabilities, but with the additional ability to replace -certain symbols in the filename with useful SLURM information. Symbols are -listed below. - -By default, slaunch broadcasts its standard input over the network to the -standard input of all tasks. Likewise, standard output and standard error -from all tasks are collected over the network by slaunch and printed on -its standard output or standard error, respectively. If you want to see -traffic from fewer tasks, see the \-\-slaunch\-[input|output|error]\-filter -options. - -Supported replacement symbols are: -.PD 0 -.RS 10 -.TP -\fB%J\fR -Job allocation number and job step number in the form "jobid.stepid". For instance, "128.0". -.PD 0 -.TP -\fB%j\fR -Job allocation number. -.PD 0 -.TP -\fB%s\fR -Job step number. -.RE - -.TP -\fB\-J\fR, \fB\-\-name\fR[=]<\fIname\fR> -Set the name of the job step. By default, the job step's name will be the -name of the executable which slaunch is launching. - -.TP -\fB\-\-jobid\fR=<\fIJOBID\fP> -The job allocation under which the parallel application should be launched. If slaunch is running under salloc or a batch script, slaunch can automatically determint the jobid from the SLURM_JOB_ID environment variable. Otherwise, you will need to tell slaunch which job allocation to use. - -.TP -\fB\-K\fR, \fB\-\-kill\-on\-bad\-exit\fR -Terminate the job step if any task exits with a non\-zero exit code. By default -slaunch will not terminate a job step because of a task with a non\-zero exit -code. - -.TP -\fB\-L\fR, \fB\-\-nodelist\-byid\fR[=]<\fInode index list\fR> -Request a specific set of nodes in a job alloction on which to run the tasks of the job step. The list may be specified as a comma\-separated list relative node indices in the job allocation (e.g., "0,2\-5,\-2,8"). Duplicate indices are permitted, but are ignored. The order of the node indices in the list is not important; the node indices will be sorted my SLURM. - -.TP -\fB\-l\fR, \fB\-\-label\fR -Prepend each line of task standard output or standard error with the task -number of its origin. - -.TP -\fB\-m\fR, \fB\-\-distribution\fR= -(\fIblock\fR|\fIcyclic\fR|\fIhostfile\fR|\fIplane=<options>\fR) -Specify an alternate distribution method for remote processes. -.PD 1 -.RS -.TP -.B block -The block method of distribution will allocate processes in\-order to -the cpus on a node. If the number of processes exceeds the number of -cpus on all of the nodes in the allocation then all nodes will be -utilized. For example, consider an allocation of three nodes each with -two cpus. A four\-process block distribution request will distribute -those processes to the nodes with processes one and two on the first -node, process three on the second node, and process four on the third node. -Block distribution is the default behavior if the number of tasks -exceeds the number of nodes requested. -.TP -.B cyclic -The cyclic method distributes processes in a round\-robin fashion across -the allocated nodes. That is, process one will be allocated to the first -node, process two to the second, and so on. This is the default behavior -if the number of tasks is no larger than the number of nodes requested. -.TP -.B plane -The tasks are distributed in blocks of a specified size. -The options include a number representing the size of the task block. -This is followed by an optional specification of the task distribution -scheme within a block of tasks and between the blocks of tasks. -For more details (including examples and diagrams), please see -.br -https://computing.llnl.gov/linux/slurm/mc_support.html -.br -and -.br -https://computing.llnl.gov/linux/slurm/dist_plane.html. -.TP -.B hostfile -The hostfile method of distribution will allocate processes in\-order as -listed in file designated by the environment variable SLURM_HOSTFILE. If -this variable is listed it will over ride any other method specified. -.RE - -.TP -\fB\-\-mem_bind\fR=[{\fIquiet,verbose\fR},]\fItype\fR -Bind tasks to memory. -Used only when task/affinity plugin is enabled and -the NUMA memory functions are available -\fBNote that the resolution of CPU and memory binding -may differ on some architectures.\fR For example, CPU binding may be performed -at the level of the cores within a processor while memory binding will -be performed at the level of nodes, where the definition of "nodes" -may differ from system to system. \fBThe use of any type other than -"none" or "local" is not recommended.\fR -If you want greater control, try running a simple test code with the -options "\-\-cpu_bind=verbose,none \-\-mem_bind=verbose,none" to determine -the specific configuration. -Note: To have SLURM always report on the selected memory binding for all -commands executed in a shell, you can enable verbose mode by setting the -SLURM_MEM_BIND environment variable value to "verbose". -Supported options include: -.PD 1 -.RS -.TP -.B q[uiet], -quietly bind before task runs (default) -.TP -.B v[erbose], -verbosely report binding before task runs -.TP -.B no[ne] -don't bind tasks to memory (default) -.TP -.B rank -bind by task rank (not recommended) -.TP -.B local -Use memory local to the processor in use -.TP -.B map_mem:<list> -bind by mapping a node's memory to tasks as specified -where <list> is <cpuid1>,<cpuid2>,...<cpuidN>. -CPU IDs are interpreted as decimal values unless they are preceded -with '0x' in which case they interpreted as hexadecimal values -(not recommended) -.TP -.B mask_mem:<list> -bind by setting memory masks on tasks as specified -where <list> is <mask1>,<mask2>,...<maskN>. -memory masks are \fBalways\fR interpreted as hexadecimal values but can be -preceded with an optional '0x' (not recommended) -.RE - -.TP -\fB\-\-mpi\fR[=]<\fImpi_type\fR> -Identify the type of MPI to be used. If run with mpi_type = "list", -slaunch will print a list of supported MPI types to stdout and exit. - -.TP -\fB\-\-multi\-prog\fR -This option allows one to launch tasks with different executables within -the same job step. When this option is present, slaunch no long accepts -the name of an executable "command" on the command line, instead it accepts -the name of a file. This file specifies which executables and command line -parameters should be used by each task in the job step. See the section -\fBMULTIPLE PROGRAMS FILE\fR below for an explanation of the multiple program -file syntax. - -.TP -\fB\-N\fR, \fB\-\-nodes\fR[=]<\fInumber\fR> -Specify the number of nodes to be used by this job step. By default, -slaunch will use all of the nodes in the specified job allocation. - -.TP -\fB\-n\fR, \fB\-\-tasks\fR[=]<\fInumber\fR> -Specify the number of processes to launch. The default is one process per node. - -.TP -\fB\-\-network\fR[=]<\fIoptions\fR> -(NOTE: this option is currently only of use on AIX systems.) -Specify the communication protocol to be used. -The interpretation of \fItype\fR is system dependent. -For AIX systems with an IBM Federation switch, the following -comma\-separated and case insensitive options are recongnized: -\fBIP\fR (the default is user\-space), \fBSN_ALL\fR, \fBSN_SINGLE\fR, -\fBBULK_XFER\fR and adapter names. For more information, on -IBM systems see \fIpoe\fR documenation on the environment variables -\fBMP_EUIDEVICE\fR and \fBMP_USE_BULK_XFER\fR. - -.TP -\fB\-O\fR, \fB\-\-task\-output\fR[=]<\fIfilename pattern\fR> -Instruct SLURM to connect each task's standard output directly to -the file name specified in the "\fIfilename pattern\fR". -See the \fB\-\-task\-input\fR option for filename specification options. - -.TP -\fB\-o\fR, \fB\-\-slaunch\-output\fR[=]<\fIfilename pattern\fR> -Instruct SLURM to connect slaunch's standard output directly to the -file name specified in the "\fIfilename pattern\fR". -See the \fB\-\-slaunch\-input\fR option for filename specification options. - -.TP -\fB\-\-propagate\fR[=\fIrlimits\fR] -Allows users to specify which of the modifiable (soft) resource limits -to propagate to the compute nodes and apply to their jobs. If -\fIrlimits\fR is not specified, then all resource limits will be -propagated. -The following rlimit names are supported by Slurm (although some -options may not be supported on some systems): -.RS -.TP 10 -\fBAS\fR -The maximum address space for a processes -.TP -\fBCORE\fR -The maximum size of core file -.TP -\fBCPU\fR -The maximum amount of CPU time -.TP -\fBDATA\fR -The maximum size of a process's data segment -.TP -\fBFSIZE\fR -The maximum size of files created -.TP -\fBMEMLOCK\fR -The maximum size that may be locked into memory -.TP -\fBNOFILE\fR -The maximum number of open files -.TP -\fBNPROC\fR -The maximum number of processes available -.TP -\fBRSS\fR -The maximum resident set size -.TP -\fBSTACK\fR -The maximum stack size -.RE - -.TP -\fB\-\-prolog\fR[=]<\fIexecutable\fR> -\fBslaunch\fR will run \fIexecutable\fR just before launching the job step. -The command line arguments for \fIexecutable\fR will be the command -and arguments of the job step. If \fIexecutable\fR is "none", then -no prolog will be run. This parameter overrides the SrunProlog -parameter in slurm.conf. - -.TP -\fB\-q\fR, \fB\-\-quiet\fR -Suppress informational messages from slaunch. Errors will still be displayed. - -.TP -\fB\-r\fR, \fB\-\-relative\fR[=]<\fInumber\fR> -Specify the first node in the allocation on which this job step will be launched. Counting starts at zero, thus the first node in the job allocation is node 0. The option to \-\-relative may also be a negative number. \-1 is the last node in the allocation, \-2 is the next to last node, etc. By default, the controller will select the starting node (assuming that there are no other nodelist or task layout options that specify specific nodes). - -.TP -\fB\-\-slaunch\-input\-filter\fR[=]<\fItask number\fR> -.PD 0 -.TP -\fB\-\-slaunch\-output\-filter\fR[=]<\fItask number\fR> -.PD 0 -.TP -\fB\-\-slaunch\-error\-filter\fR[=]<\fItask number\fR> -.PD -Only transmit standard input to a single task, or print the standard output -or standard error from a single task. These options perform the filtering -locally in slaunch. All tasks are still capable of sending or receiving -standard IO over the network, so the "sattach" command can still access the -standard IO streams of the other tasks. (NOTE: for \-output and \-error, -the streams from all tasks WILL be transmitted to slaunch, but it will only -print the streams for the selected task. If your tasks print a great deal of -data to standard output or error, this can be performance limiting.) - -.TP -\fB\-T\fR, \fB\-\-task\-layout\-byid\fR[=]<\fInode index list\fR> -Request a specific task layout using node indices within the job allocation. The node index list can contain duplicate indices, and the indices may appear in any order. The order of indices in the nodelist IS significant. Each node index in the list represents one task, with the Nth node index in the list designating on which node the Nth task should be launched. - -For example, given an allocation of nodes "linux[0\-15]" and a node index list "4,\-1,1\-3" task 0 will run on "linux4", task 1 will run on "linux15", task 2 on "linux1", task 3 on "linux2", and task 4 on "linux3". - -NOTE: This option implicitly sets the task distribution method to "arbitrary". Some network switch layers do not permit arbitrary task layout. - -.TP -\fB\-\-task\-epilog\fR[=]<\fIexecutable\fR> -The \fBslurmd\fR daemon will run \fIexecutable\fR just after each task -terminates. This will be before after any TaskEpilog parameter -in slurm.conf is executed. This is meant to be a very short\-lived -program. If it fails to terminate within a few seconds, it will -be killed along with any descendant processes. - -.TP -\fB\-\-task\-prolog\fR[=]<\fIexecutable\fR> -The \fBslurmd\fR daemon will run \fIexecutable\fR just before launching -each task. This will be executed after any TaskProlog parameter -in slurm.conf is executed. -Besides the normal environment variables, this has SLURM_TASK_PID -available to identify the process ID of the task being started. -Standard output from this program of the form -"export NAME=value" will be used to set environment variables -for the task being spawned. - -.TP -\fB\-u\fR, \fB\-\-unbuffered\fR -Do not line buffer standard output or standard error from remote tasks. -This option cannot be used with \-\-label. - -.TP -\fB\-\-uid\fR[=]<\fIuser\fR> -Attempt to submit and/or run a job as \fIuser\fR instead of the -invoking user id. The invoking user's credentials will be used -to check access permissions for the target partition. User root -may use this option to run jobs as a normal user in a RootOnly -partition for example. If run as root, \fBslaunch\fR will drop -its permissions to the uid specified after node allocation is -successful. \fIuser\fR may be the user name or numerical user ID. - -.TP -\fB\-\-usage\fR -Display brief usage message and exit. - -.TP -\fB\-V\fR, \fB\-\-version\fR -Display version information and exit. - -.TP -\fB\-v\fR, \fB\-\-verbose\fR -Increase the verbosity of slaunch's informational messages. Multiple \-v's -will further increase slaunch's verbosity. - -.TP -\fB\-W\fR, \fB\-\-wait\fR[=]<\fIseconds\fR> -slaunch will wait the specified number of seconds after the first tasks exits -before killing all tasks in the job step. If the value is 0, slaunch will -wait indefinitely for all tasks to exit. The default value is give by the -WaitTime parameter in the slurm configuration file (see \fBslurm.conf(5)\fR). - -The \-\-wait option can be used to insure that a job step terminates in a timely -fashion in the event that one or more tasks terminate prematurely. - -.TP -\fB\-w\fR, \fB\-\-nodelist\-byname\fR[=]<\fInode name list\fR> -Request a specific list of node names. The list may be specified as a comma\-separated list of node names, or a range of node names (e.g. mynode[1\-5,7,...]). Duplicate node names are not permitted in the list. -The order of the node names in the list is not important; the node names -will be sorted my SLURM. - -.TP -\fB\-Y\fR, \fB\-\-task\-layout\-byname\fR[=]<\fInode name list\fR> -Request a specific task layout. The nodelist can contain duplicate node -names, and node names may appear in any order. The order of node names in -the nodelist IS significant. Each node name in the nodes list represents -one task, with the Nth node name in the nodelist designating on which node -the Nth task should be launched. For example, a nodelist of mynode[4,3,1\-2,4] -means that tasks 0 and 4 will run on mynode4, task 1 will run on mynode3, -task 2 will run on mynode1, and task 3 will run on mynode2. - -NOTE: This option implicitly sets the task distribution method to "arbitrary". -Some network switch layers do not permit arbitrary task layout. - -.SH "INPUT ENVIRONMENT VARIABLES" -.PP -Some slaunch options may be set via environment variables. -These environment variables, along with their corresponding options, -are listed below. -Note: Command line options will always override environment variables settings. -.TP 25 -\fBSLAUNCH_COMM_HOSTNAME\fR -Same as \fB\-\-comm\-hostname\fR. -.TP -\fBSLAUNCH_CORE_FORMAT\fR -Same as \fB\-\-core\fR. -.TP -\fBSLAUNCH_CPU_BIND\fR -Same as \fB\-\-cpu_bind\fR. -.TP -\fBSLAUNCH_DEBUG\fR -Same as \fB\-v\fR or \fB\-\-verbose\fR. -.TP -\fBSLAUNCH_DISTRIBUTION\fR -Same as \fB\-m\fR or \fB\-\-distribution\fR. -.TP -\fBSLAUNCH_EPILOG\fR -Same as \fB\-\-epilog\fR=\fIexecutable\fR. -.TP -\fBSLAUNCH_JOBID\fR -Same as \fB\-\-jobid\fR. -.TP -\fBSLAUNCH_KILL_BAD_EXIT\fR -Same as \fB\-K\fR or \fB\-\-kill\-on\-bad\-exit\fR. -.TP -\fBSLAUNCH_LABELIO\fR -Same as \fB\-l\fR or \fB\-\-label\fR. -.TP -\fBSLAUNCH_MEM_BIND\fR -Same as \fB\-\-mem_bind\fR. -.TP -\fBSLAUNCH_MPI_TYPE\fR -Same as \fB\-\-mpi\fR. -.TP -\fBSLAUNCH_OVERCOMMIT\fR -Same as \fB\-C\fR or \fB\-\-overcomit\fR. -.TP -\fBSLAUNCH_PROLOG\fR -Same as \fB\-\-prolog\fR=\fIexecutable\fR. -.TP -\fBSLAUNCH_TASK_EPILOG\fR -Same as \fB\-\-task\-epilog\fR=\fIexecutable\fR. -.TP -\fBSLAUNCH_TASK_PROLOG\fR -Same as \fB\-\-task\-prolog\fR=\fIexecutable\fR. -.TP -\fBSLAUNCH_WAIT\fR -Same as \fB\-W\fR or \fB\-\-wait\fR. -.TP -\fBSLURMD_DEBUG\fR -Same as \fB\-d\fR or \fB\-\-slurmd\-debug\fR - -.SH "OUTPUT ENVIRONMENT VARIABLES" -.PP -slaunch will set the following environment variables which will -appear in the environments of all tasks in the job step. Since slaunch -sets these variables itself, they will also be available to \-\-prolog -and \-\-epilog scripts. (Notice that the "backwards compatibility" environment -variables clobber some of the variables that were set by salloc or sbatch -at job allocation time. The newer SLURM_JOB_* and SLURM_STEP_* names do not -conflict, so any task in any job step can easily determine the parameters -of the job allocation.) -.TP -\fBSLURM_STEP_ID\fR (and \fBSLURM_STEPID\fR for backwards compatibility) -The ID of the job step within the job allocation. -.TP -\fBSLURM_STEP_NODELIST\fR -The list of nodes in the job step. -.TP -\fBSLURM_STEP_NUM_NODES\fR (and \fBSLURM_NNODES\fR for backwards compatibility) -The number of nodes used by the job step. -.TP -\fBSLURM_STEP_NUM_TASKS\fR (and \fBSLURM_NPROCS\fR for backwards compatibility) -The number of tasks in the job step. -.TP -\fBSLURM_STEP_TASKS_PER_NODE\fR (and \fBSLURM_TASKS_PER_NODE\fR for backwards compatibility) -The number of tasks on each node in the job step. -.TP -\fBSLURM_STEP_LAUNCHER_HOSTNAME\fR (and \fBSLURM_SRUN_COMM_HOST\fR for backwards compatibility) -.TP -\fBSLURM_STEP_LAUNCHER_PORT\fR (and \fBSLURM_SRUN_COMM_PORT\fR for backwards compatibility) - -.PP -Additionally, SLURM daemons will ensure that the the following variables are -set in the environments of all tasks in the job step. Many of the following -variables will have different values in each task's environment. (These -variables are not available to the slaunch \-\-prolog and \-\-epilog scripts.) - -.TP -\fBSLURM_NODEID\fR -Node ID relative to other nodes in the job step. Counting begins at zero. -.TP -\fBSLURM_PROCID\fR -Task ID relative to the other tasks in the job step. Counting begins at zero. -.TP -\fBSLURM_LOCALID\fR -Task ID relative to the other tasks on the same node which belong to the -same job step. Counting begins at zero. -.TP -\fBSLURMD_NODENAME\fR -The SLURM NodeName for the node on which the task is running. Depending -on how your system administrator has configured SLURM, the NodeName for a -node may not be the same as the node's hostname. When you use commands -such as \fBsinfo\fR and \fBsqueue\fR, or look at environment variables such -as SLURM_JOB_NODELIST and SLURM_STEP_NODELIST, you are seeing SLURM NodeNames. - -.SH "MULTIPLE PROGRAMS FILE" -Comments in the configuration file must have a "#" in collumn one. -The configuration file contains the following fields separated by white -space: -.TP -Task rank -One or more task ranks to use this configuration. -Multiple values may be comma separated. -Ranges may be indicated with two numbers separated with a '\-'. -To indicate all tasks, specify a rank of '*' (in which case you probably -should not be using this option). -.TP -Executable -The name of the program to execute. -May be fully qualified pathname if desired. -.TP -Arguments -Program arguments. -The expression "%t" will be replaced with the task's number. -The expression "%o" will be replaced with the task's offset within -this range (e.g. a configured task rank value of "1\-5" would -have offset values of "0\-4"). -Single quotes may be used to avoid having the enclosed values interpretted. -This field is optional. -.PP -For example: -.nf -################################################################### -# srun multiple program configuration file -# -# srun \-n8 \-l \-\-multi\-prog silly.conf -################################################################### -4\-6 hostname -1,7 echo task:%t -0,2\-3 echo offset:%o - -$ srun \-n8 \-l \-\-multi\-prog silly.conf -0: offset:0 -1: task:1 -2: offset:1 -3: offset:2 -4: linux15.llnl.gov -5: linux16.llnl.gov -6: linux17.llnl.gov -7: task:7 - -.fi - -.SH "EXAMPLES" -.LP -To launch a job step (parallel program) in an existing job allocation: -.IP -slaunch \-\-jobid 66777 \-N2 \-n8 myprogram - -.LP -To grab an allocation of nodes and launch a parallel application on one command line (See the \fBsalloc\fR man page for more examples): -.IP -salloc \-N5 slaunch \-n10 myprogram - -.SH "COPYING" -Copyright (C) 2006\-2007 The Regents of the University of California. -Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -UCRL\-CODE\-226842. -.LP -This file is part of SLURM, a resource management program. -For details, see <https://computing.llnl.gov/linux/slurm/>. -.LP -SLURM is free software; you can redistribute it and/or modify it under -the terms of the GNU General Public License as published by the Free -Software Foundation; either version 2 of the License, or (at your option) -any later version. -.LP -SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -details. - -.SH "SEE ALSO" -.LP -sinfo(1), sattach(1), salloc(1), sbatch(1), squeue(1), scancel(1), scontrol(1), slurm.conf(5), sched_setaffinity(2), numa(3) diff --git a/doc/man/man1/slurm.1 b/doc/man/man1/slurm.1 index 096f029a7..3e4626bf6 100644 --- a/doc/man/man1/slurm.1 +++ b/doc/man/man1/slurm.1 @@ -37,7 +37,7 @@ Extensive documenation is also available on the internet at .SH "COPYING" Copyright (C) 2005\-2007 The Regents of the University of California. Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -UCRL\-CODE\-226842. +LLNL\-CODE\-402394. .LP This file is part of SLURM, a resource management program. For details, see <https://computing.llnl.gov/linux/slurm/>. @@ -55,7 +55,7 @@ details. .SH "SEE ALSO" \fBsacct\fR(1), \fBsalloc\fR(1), \fBsattach\fR(1), \fBsbatch\fR(1), \fBsbcast\fR(1), \fBscancel\fR(1), \fBscontrol\fR(1), \fBsinfo\fR(1), -\fBslaunch\fR(1), \fBsmap\fR(1), \fBsqueue\fR(1), \fBsrun\fR(1), +\fBsmap\fR(1), \fBsqueue\fR(1), \fBsrun\fR(1), \fBsview\fR(1), \fBbluegene.conf\fR(5), \fBslurm.conf\fR(5), \fBwiki.conf\fR(5), \fBslurmctld\fR(8), \fBslurmd\fR(8), \fBslurmstepd\fR(8), \fBspank\fR(8) diff --git a/doc/man/man1/smap.1 b/doc/man/man1/smap.1 index a077063ea..3319ce082 100644 --- a/doc/man/man1/smap.1 +++ b/doc/man/man1/smap.1 @@ -1,4 +1,4 @@ -.TH SMAP "1" "March 2006" "smap 1.1" "Slurm components" +.TH SMAP "1" "May 2007" "smap 1.3" "Slurm components" .SH "NAME" smap \- graphically view information about SLURM jobs, partitions, and set @@ -26,8 +26,8 @@ views and displaying a corresponding node chart. While in any display a user can switch by typing a different view letter. This is true in all modes except for 'configure mode' user can type 'quit' to exit just configure mode. Typing 'exit' will end the configuration mode and exit smap. -Note that unallocated nodes are indicated by a '.' and DOWN or DRAINED -nodes by a '#'. +Note that unallocated nodes are indicated by a '.' and nodes in the +DOWN, DRAINED or FAIL state by a '#'. .RS .TP 15 .I "j" @@ -126,11 +126,13 @@ F (failed), TO (timeout), and NF (node failure). See \fBJOB STATE CODES\fR section below for more information. .TP \fBSTATE\fR -State of the nodes. Possible states include: down, unknown, -idle, allocated, drained, draining, completing and their -abbreviated forms: down, unk, idle, alloc, drain, drng, and -comp respectively. Note that the suffix "*" identifies nodes -that are presently not responding. +State of the nodes. +Possible states include: allocated, completing, down, +drained, draining, fail, failing, idle, and unknown plus +their abbreviated forms: alloc, comp, donw, drain, drng, +fail, failg, idle, and unk respectively. +Note that the suffix "*" identifies nodes that are presently +not responding. See \fBNODE STATE CODES\fR section below for more information. .TP \fBTIMELIMIT\fR @@ -326,7 +328,9 @@ If the node state code is followed by "*", this indicates the node is presently not responding and will not be allocated any new work. If the node remains non\-responsive, it will be placed in the \fBDOWN\fR state (except in the case of -\fBDRAINED\fR, \fBDRAINING\fR, or \fBCOMPLETING\fR nodes). +\fBCOMPLETING\fR, \fBDRAINED\fR, \fBDRAINING\fR, +\fBFAIL\fR, \fBFAILING\fR nodes). + If the node state code is followed by "~", this indicates the node is presently in a power saving mode (typically running at reduced frequency). @@ -369,6 +373,18 @@ this state per system administrator request. See the \fBupdate node\fR command in the \fBscontrol\fR(1) man page or the \fBslurm.conf\fR(5) man page for more information. .TP +\fBFAIL\fR +The node is expected to fail soon and is unavailable for +use per system administrator request. +See the \fBupdate node\fR command in the \fBscontrol\fR(1) +man page or the \fBslurm.conf\fR(5) man page for more information. +.TP +\fBFAILING\fR +The node is currently executing a job, but is expected to fail +soon and is unavailable for use per system administrator request. +See the \fBupdate node\fR command in the \fBscontrol\fR(1) +man page or the \fBslurm.conf\fR(5) man page for more information. +.TP \fBIDLE\fR The node is not allocated to any jobs and is available for use. .TP @@ -421,7 +437,7 @@ The location of the SLURM configuration file. .SH "COPYING" Copyright (C) 2004\-2007 The Regents of the University of California. Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -UCRL\-CODE\-226842. +LLNL\-CODE\-402394. .LP This file is part of SLURM, a resource management program. For details, see <https://computing.llnl.gov/linux/slurm/>. diff --git a/doc/man/man1/squeue.1 b/doc/man/man1/squeue.1 index 99a5ff821..b522252ab 100644 --- a/doc/man/man1/squeue.1 +++ b/doc/man/man1/squeue.1 @@ -1,4 +1,4 @@ -.TH SQUEUE "1" "December 2006" "squeue 1.2" "Slurm components" +.TH SQUEUE "1" "May 2008" "squeue 1.3" "Slurm components" .SH "NAME" squeue \- view information about jobs located in the SLURM scheduling queue. @@ -189,7 +189,8 @@ Minimum number of nodes requested by the job. Are contiguous nodes requested by the job .TP \fB%p\fR -Priority of the job (converted to a floating point number between 0.0 and 1.0 +Priority of the job (converted to a floating point number between 0.0 and 1.0). +Also see \fB%Q\fR. .TP \fB%P\fR Partition of the job or job step @@ -197,6 +198,10 @@ Partition of the job or job step \fB%q\fR Comment associated with the job .TP +\fB%Q\fR +Priority of the job (generally a very large unsigned integer). +Also see \fB%p\fR. +.TP \fB%r\fR The reason a job is in its current state. See the \fBJOB REASON CODES\fR section below for more information. @@ -327,6 +332,7 @@ The partition required by this job is in a DOWN state. \fBPartitionNodeLimit\fR The number of nodes required by this job is outside of it's partitions current limits. +Can also indicate that required nodes are DOWN or DRAINED. .TP \fBPartitionTimeLimit\fR The job's time limit exceeds it's partition's current time limit. @@ -478,7 +484,7 @@ Print information only about job step 65552.1: .SH "COPYING" Copyright (C) 2002\-2007 The Regents of the University of California. Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -UCRL\-CODE\-226842. +LLNL\-CODE\-402394. .LP This file is part of SLURM, a resource management program. For details, see <https://computing.llnl.gov/linux/slurm/>. diff --git a/doc/man/man1/sreport.1 b/doc/man/man1/sreport.1 new file mode 100644 index 000000000..e67311c9f --- /dev/null +++ b/doc/man/man1/sreport.1 @@ -0,0 +1,111 @@ +.TH SREPORT "1" "May 2008" "sreport 1.3" "Slurm components" + +.SH "NAME" +sreport \- Used to generate reports from the slurm accounting data. + +.SH "SYNOPSIS" +\fBssreport\fR [\fIOPTIONS\fR...] [\fICOMMAND\fR...] + +.SH "DESCRIPTION" +\fBssreport\fR is used to generator certain reports. Right now +sreport is a template that does not really do anything. In the future +it will provide a view into accounting data gathered from slurm via +the account information maintained within a database with the interface +being provided by \fBslurmdbd\fR (Slurm Database daemon). + +.SH "OPTIONS" + +.TP +\fB\-h\fR, \fB\-\-help\fR +Print a help message describing the usage of \fBssreport\fR. +This is equivalent to the \fBhelp\fR command. + +.TP +\fB\-o\fR, \fB\-\-oneliner\fR +Print information one line per record. +This is equivalent to the \fBoneliner\fR command. + +.TP +\fB\-q\fR, \fB\-\-quiet\fR +Print no warning or informational messages, only error messages. +This is equivalent to the \fBquiet\fR command. + +.TP +\fB\-v\fR, \fB\-\-verbose\fR +Print detailed event logging. +This is equivalent to the \fBverbose\fR command. + +.TP +\fB\-V\fR , \fB\-\-version\fR +Print version information and exit. +This is equivalent to the \fBversion\fR command. + +.TP +\fBCOMMANDS\fR + +.TP +\fBexit\fP +Terminate the execution of sreport. +Identical to the \fBquit\fR command. + +.TP +\fBhelp\fP +Display a description of sreport options and commands. + +.TP +\fBhide\fP +Do not display information about hidden or deleted entities. + +.TP +\fBoneliner\fP +Print information one line per record. + +.TP +\fBquiet\fP +Print no warning or informational messages, only fatal error messages. + +.TP +\fBquit\fP +Terminate the execution of sreport. +Identical to the \fBexit\fR command. + +.TP +\fBverbose\fP +Print detailed event logging. +This includes time\-stamps on data structures, record counts, etc. +This is an independent command with no options meant for use in interactive mode. + +.TP +\fBversion\fP +Display the version number of sreport being executed. + +.TP +\fB!!\fP +Repeat the last command executed. + +.SH "EXAMPLES" +.eo +.br +.ec + +.SH "COPYING" +Copyright (C) 2008 Lawrence Livermore National Security. +Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). +LLNL\-CODE\-402394. +.LP +This file is part of SLURM, a resource management program. +For details, see <https://computing.llnl.gov/linux/slurm/>. +.LP +SLURM is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 2 of the License, or (at your option) +any later version. +.LP +SLURM is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +details. + +.SH "SEE ALSO" +\fBslurm.conf\fR(5) +\fBslurmdbd\fR(8) diff --git a/doc/man/man1/srun.1 b/doc/man/man1/srun.1 index 2f2249ac7..afe14ad59 100644 --- a/doc/man/man1/srun.1 +++ b/doc/man/man1/srun.1 @@ -1,27 +1,28 @@ -.\" $Id: srun.1 13766 2008-04-02 17:19:34Z jette $ +.\" $Id: srun.1 14123 2008-05-23 20:22:46Z jette $ .\" -.TH SRUN "1" "July 2007" "srun 1.2" "slurm components" +.TH SRUN "1" "May 2008" "srun 1.3" "slurm components" .SH "NAME" srun \- run parallel jobs .SH SYNOPSIS \fBsrun\fR [\fIOPTIONS\fR...] \fIexecutable \fR[\fIargs\fR...] -.br -\fBsrun\fR \-\-batch [\fIOPTIONS\fR...] job_script \fR[\fIargs\fR...] -.br -\fBsrun\fR \-\-allocate [\fIOPTIONS\fR...] [job_script \fR[\fIargs\fR...]] -.br -.B srun -\-\-attach=jobid .SH DESCRIPTION -Allocate resources and optionally initiate parallel jobs on -clusters managed by SLURM. +Run a parallel job on cluster managed by SLURM. If necessary, srun will +first create a resource allocation in which to run the parallel job. .SH "OPTIONS" .LP +.TP +\fB\-\-acctg\-freq\fR=\fIseconds\fR +Define the job accounting sampling interval. +This can be used to override the \fIJobAcctGatherFrequency\fR parameter in SLURM's +configuration file, \fIslurm.conf\fR. +A value of zero disables real the periodic job sampling and provides accounting +information only on job termination (reducing SLURM interference with the job). + .TP \fB\-B\fR \fB\-\-extra\-node\-info\fR=\fIsockets\fR[:\fIcores\fR[:\fIthreads\fR]] Request a specific allocation of resources with details as to the @@ -50,46 +51,6 @@ configured. If select/cons_res is configured, it must have a parameter of CR_Core, CR_Core_Memory, CR_Socket, or CR_Socket_Memory. -.TP -\fB\-b\fR, \fB\-\-batch\fR -\fBNOTE: This functionality has been moved to a new command, sbatch -This option will be removed from srun at a later date.\fR -.br -Submit in "batch mode." \fBsrun\fR will make a copy of the \fIexecutable\fR -file (a script) and submit the request for execution when resouces are -available. \fBsrun\fR will terminate after the request has been submitted. -The \fIexecutable\fR file will run on the first node allocated to the -job and must contain \fBsrun\fR commands to initiate parallel tasks. -stdin will be redirected from /dev/null, stdout and stderr will be -redirected to a file (default is \fIjobname\fR.out or \fIjobid\fR.out in -current working directory, see \fB\-o\fR for other IO options). -Note that if the slurm daemons are cold\-started, jobid values will be -reused. Plan accordingly to avoid over\-writing output and error files. -\fIexecutable\fR must be specified using either a fully qualified -pathname or its pathname will be relative to the current working directory. -The search path will not be used to locate the file. \fIexecutable\fR -will be interpreted by the users default shell unless the file begins -with "#!" followed by the fully qualified pathname of a valid shell. -Note that batch jobs will be re\-queued if a node fails while it is being -initiated. - -Srun commandline options can also be inserted into the script by prefacing -the option with #SLURM. Multiple options can be on one line or multiple lines. -For example: - -.nf - #SLURM \-N 2 \-n 2 - #SLURM \-\-mpi=lam -.fi - -This is run the script on 2 nodes, with 2 procs with mpi type lam. -All commandline options are able to be set inside the script with the -exception of the mode (which has already been set since to run a batch -script you are in batch mode). -.br -Options on the command line take precedence over options in the batch -script, which in turn take precedence over exiting environmement variables. - .TP \fB\-\-begin\fR=\fItime\fR Defer initiation of this job until the specified time. @@ -117,6 +78,19 @@ For example: \-\-begin=02/22/08-12:34:67 .fi +.TP +\fB\-\-checkpoint\fR=\fItime\fR +Specifies the interval between creating checkpoints of the job step. +By default, the job step will no checkpoints created. +Acceptable time formats include "minutes", "minutes:seconds", +"hours:minutes:seconds", "days\-hours", "days\-hours:minutes" and +"days\-hours:minutes:seconds". + +.TP +\fB\-\-checkpoint\-path\fR=\fIdirectory\fR +Specifies the directory into which the job step's checkpoint should +be written (used by the checkpoint/xlch plugin only). + .TP \fB\-C\fR, \fB\-\-constraint\fR[=]<\fIlist\fR> Specify a list of constraints. @@ -131,6 +105,12 @@ nodes, then use the OR operator and enclose the options within square brackets. For example: "\fB\-\-constraint="[rack1|rack2|rack3|rack4]"\fR might be used to specify that all nodes must be allocated on a single rack of the cluster, but any of those four racks can be used. +A request can also specify the number of nodes needed with some feature +by appending an asterisk and count after the feature name. +For example "\fBsrun \-\-nodes=16 \-\-constraint=graphics*4 ..."\fR +indicates that the job requires 16 nodes at that at least four of those +nodes must have the feature "graphics." +Constraints with node counts may only be combined with AND operators. If no nodes have the requested features, then the job will be rejected by the slurm job manager. @@ -276,33 +256,19 @@ parameter in slurm.conf. .TP \fB\-\-exclusive\fR -Dedicate whole nodes to the job rather than individual processors -even if consumable resources are enabled +When used to initiate a job step within an existing resource allocation, +proceed only when processors can be dedicated to the job step without +sharing with other job steps. This can be used to initiate many +job steps simultaneously within an existing job allocation and have +SLURM perform resource management for the job. +In this mode, use with the \fB\-\-ntasks\fR option and NOT the +\fB\-\-nodes\fR, \fB\-\-relative\fR, \fB\-\-relative\fR=\fIarbitrary\fR +options (which provide user control over task layout). +See \fBEXAMPLE\fR below. +When used to initiate a job, dedicate whole nodes to the job rather +than individual processors even if consumable resources are enabled (e.g. \fBSelectType=select/cons_res\fR). -.TP -\fB\-\-get\-user\-env\fR[=\fItimeout\fR][\fImode\fR] -For a batch script submission, this option will tell srun to retrieve the -login environment variables for the user specified in the \fB\-\-uid\fR option. -The environment variables are retrieved by running something of this sort -"su \- <username> \-c /usr/bin/env" and parsing the output. -Be aware that any environment variables already set in srun's environment -will take precedence over any environment variables in the user's -login environment. -The optional \fItimeout\fR value is in seconds. Default value is 8 seconds. -The optional \fImode\fR value control the "su" options. -With a \fImode\fR value of "S", "su" is executed without the "\-" option. -With a \fImode\fR value of "L", "su" is executed with the "\-" option, -replicating the login environment. -If \fImode\fR not specified, the mode established at SLURM build time -is used. -Example of use include "\-\-get\-user\-env", "\-\-get\-user\-env=10" -"\-\-get\-user\-env=10L", and "\-\-get\-user\-env=S". -NOTE: This option only works if the caller has an -effective uid of "root", and only takes effect in batch mode -(\fB\-b\fR/\fB\-\-batch\fR). -This option was originally created for use by Moab. - .TP \fB\-\-gid\fR=\fIgroup\fR If \fBsrun\fR is run as root, and the \fB\-\-gid\fR option is used, @@ -380,9 +346,7 @@ not for the submission of individual job steps. The job will assume all responsibilities for fault\-tolerance. The active job step (MPI job) will almost certainly suffer a fatal error, but subsequent job steps may be run if this option is specified. The -default action is to terminate job upon node failure. Note that -\fB\-\-batch\fR jobs will be re\-queued if a node failure occurs in the -process of initiating it. +default action is to terminate job upon node failure. .TP \fB\-l\fR, \fB\-\-label\fR @@ -393,11 +357,16 @@ The \fB\-\-label\fR option will prepend lines of output with the remote task id. .TP -\fB-u\fR, \fB\-\-unbuffered\fR -do not line buffer stdout from remote tasks. This option cannot be used -with \fI\-\-label\fR. +\fB\-L\fR, \fB\-\-licenses\fR= +Specification of licenses (or other resources available on all +nodes of the cluster) which must be allocated to this job. +License names can be followed by an asterisk and count +(the default count is one). +Multiple license names should be comma separated (e.g. +"\-\-licenses=foo*4,bar"). + .TP -\fB\-m\fR, \fB\-\-distribution\fR= +\fB\-m\fR, \fB\-\-relative\fR (\fIblock\fR|\fIcyclic\fR|\fIarbitrary\fR|\fIplane=<options>\fR) Specify an alternate distribution method for remote processes. .RS @@ -454,8 +423,13 @@ User to receive email notification of state changes as defined by The default value is the submitting user. .TP -\fB\-\-mem\fR=\fIMB\fR -Specify a minimum amount of real memory. +\fB\-\-mem\fR[=]<\fIMB\fR> +Specify the real memory required per node in MegaBytes. +If a value is specified, that quantity of memory will be +reserved for this job. +If no value is specified and real memory is exhausted on +any allocated node then the job is subject to cancellation. +Also see \fB\-\-task\-mem\fR. .TP \fB\-\-mem_bind\fR=[{\fIquiet,verbose\fR},]\fItype\fR @@ -550,8 +524,9 @@ Specify a minimum number of threads per core. .TP \fB\-\-msg\-timeout\fR=\fIseconds\fR -Modify the job launch message timeout. -Changes to this are typically not recommended. +Modify the job launch message timeout. +The default value is \fBMessageTimeout\fR in the SLURM configuration file slurm.conf. +Changes to this are typically not recommended, but could be useful to diagnose problems. .TP \fB\-\-mpi\fR=\fImpi_type\fR @@ -560,7 +535,7 @@ procedures. .RS .TP .B list -Lists avaliable mpi types to choose from. +Lists available mpi types to choose from. .TP .B lam Initiates one 'lamd' process per node and establishes necessary @@ -612,16 +587,18 @@ Note that the environment variable \fBSLURM_NNODES\fR will be set to the count of nodes actually allocated to the job. See the \fBENVIRONMENT VARIABLES \fR section for more information. If \fB\-N\fR is not specified, the default -behaviour is to allocate enough nodes to satisfy the requirements of +behavior is to allocate enough nodes to satisfy the requirements of the \fB\-n\fR and \fB\-c\fR options. -The job will be allocated as many nodes as possible within the range specified +The job will be allocated as many nodes as possible within the range specified and without delaying the initiation of the job. .TP \fB\-n\fR, \fB\-\-ntasks\fR=\fIntasks\fR -Specify the number of processes to run. Request that \fBsrun\fR -allocate \fIntasks\fR processes. The default is one process per -node, but note that the \fB\-c\fR parameter will change this default. +Specify the number of tasks to run. Request that \fBsrun\fR +allocate resources for \fIntasks\fR tasks. +The default is one task per socket or core (depending upon the value +of the \fISelectTypeParameters\fR parameter in slurm.conf), but note +that the \fB\-\-cpus\-per\-task\fR option will change this default. .TP \fB\-\-network\fR=\fItype\fR @@ -631,10 +608,10 @@ Since POE is used to launch tasks, this option is not normally used or is specified using the \fBSLURM_NETWORK\fR environment variable. The interpretation of \fItype\fR is system dependent. For systems with an IBM Federation switch, the following -comma\-separated and case insensitive types are recongnized: +comma\-separated and case insensitive types are recognized: \fBIP\fR (the default is user\-space), \fBSN_ALL\fR, \fBSN_SINGLE\fR, \fBBULK_XFER\fR and adapter names (e.g. \fBSNI0\fR and \fBSNI1\fR). -For more information, on IBM systems see \fIpoe\fR documenation on +For more information, on IBM systems see \fIpoe\fR documentation on the environment variables \fBMP_EUIDEVICE\fR and \fBMP_USE_BULK_XFER\fR. Note that only four jobs steps may be active at once on a node with the \fBBULK_XFER\fR option due to limitations in the Federation switch driver. @@ -649,14 +626,6 @@ a negative adjustment. NOTE: This option is presently ignored if \fISchedulerType=sched/wiki\fR or \fISchedulerType=sched/wiki2\fR. -.TP -\fB\-\-no\-requeue\fR -Specifies that the batch job is not requeue. -Setting this option will prevent system administrators from being able -to restart the job (for example, after a scheduled downtime). -When a job is requeued, the batch script is initiated from its beginning. -This option is only applicable to batch job submission (see \fB\-\-batch\fR). - .TP \fB\-\-ntasks\-per\-core\fR=\fIntasks\fR Request that no more than \fIntasks\fR be invoked on each core. @@ -680,7 +649,7 @@ or \fISelectType=CR_Socket_Memory\fR is configured. .TP \fB\-\-ntasks\-per\-node\fR=\fIntasks\fR Request that no more than \fIntasks\fR be invoked on each node. -This is similiar to using \fB\-\-cpus\-per\-task\fR=\fIncpus\fR +This is similar to using \fB\-\-cpus\-per\-task\fR=\fIncpus\fR but does not require knowledge of the actual number of cpus on each node. In some cases, it is more convenient to be able to request that no more than a specific number of ntasks be invoked @@ -715,12 +684,38 @@ If \fB\-\-error\fR is not also specified on the command line, both stdout and stderr will directed to the file specified by \fB\-\-output\fR. .TP -\fB\-P\fR, \fB\-\-dependency\fR=\fIjobid\fR -Defer initiation of this job until the specified jobid -has completed execution. Many jobs can share the same -dependency and these jobs may belong to different users. -The value may be changed after job submission using the -\fBscontrol\fR command. +\fB\-\-open\-mode\fR=append|truncate +Open the output and error files using append or truncate mode as specified. +The default value is specified by the system configuration parameter +\fIJobFileAppend\fR. + +.TP +\fB\-P\fR, \fB\-\-dependency\fR[=]<\fIdependency_list\fR> +Defer the start of this job until the specified dependencies have been +satisfied completed. +<\fIdependency_list\fR> is of the form +<\fItype:job_id[:job_id][,type:job_id[:job_id]]\fR>. +Many jobs can share the same dependency and these jobs may even belong to +different users. The value may be changed after job submission using the +scontrol command. +.PD +.RS +.TP +\fBafter:job_id[:jobid...]\fR +This job can begin execution after the specified jobs have begun +execution. +.TP +\fBafterany:job_id[:jobid...]\fR +This job can begin execution after the specified jobs have terminated. +.TP +\fBafternotok:job_id[:jobid...]\fR +This job can begin execution after the specified jobs have terminated +in some failed state (non-zero exit code, node failure, timed out, etc). +.TP +\fBafterok:job_id[:jobid...]\fR +This job can begin execution after the specified jobs have successfully +executed (ran to completion with non-zero exit code). +.RE .TP \fB\-p\fR, \fB\-\-partition\fR=\fIpartition\fR @@ -780,6 +775,14 @@ The maximum resident set size The maximum stack size .RE +.TP +\fB\-\-pty\fR +Execute task zero in pseudo terminal. +Implicitly sets \fB\-\-unbuffered\fR. +Implicitly sets \fB\-\-error\fR and \fB\-\-output\fR to /dev/null +for all tasks except task zero. +Not currently supported on AIX platforms. + .TP \fB\-Q\fR, \fB\-\-quiet\fR Quiet operation. Suppress informational messages. Errors will still @@ -840,6 +843,13 @@ in slurm.conf is executed. This is meant to be a very short\-lived program. If it fails to terminate within a few seconds, it will be killed along with any descendant processes. +.TP +\fB\-\-task\-mem\fR[=]<\fIMB\fR> +Mimimum memory available per task in MegaBytes. +Default value is \fBDefMemPerTask\fR and the maximum value is +\fBMaxMemPerTask\fR, both of which can be seen using the +\fBscontrol show config\fR command. + .TP \fB\-\-task\-prolog\fR=\fIexecutable\fR The \fBslurmd\fR daemon will run \fIexecutable\fR just before launching @@ -923,42 +933,6 @@ Request that a specific list of hosts not be included in the resources allocated to this job. The host list will be assumed to be a filename if it contains a "/"character. -.PP -Allocate options. \fBNOTE: This functionality has been moved to a new command, -salloc. This option will be removed from srun at a later date.\fR - -.TP -\fB\-A\fR, \fB\-\-allocate\fR -allocate resources and spawn a shell. When \fB\-\-allocate\fR is specified to -\fBsrun\fR, no remote tasks are started. Instead a subshell is started that -has access to the allocated resources. Multiple jobs can then be run on the -same cpus from within this subshell. See \fBAllocate Mode\fR below. - -.TP -\fB\-\-no\-shell\fR -immediately exit after allocating resources instead of spawning a -shell when used with the \fB\-A\fR, \fB\-\-allocate\fR option. - -.PP -Attach to running job. \fBNOTE: This functionality has been moved to a new -command, sattach. This option will be removed from srun at a later date.\fR - -.TP -\fB\-a\fR, \fB\-\-attach\fR=\fIid\fR -This option will attach \fBsrun\fR -to a running job with job id = \fIid\fR. Provided that the calling user -has access to that running job, stdout and stderr will be redirected to the -current session (assuming that the tasks' stdout and stderr are not connected -directly to files). stdin is not connected to the remote tasks, and signals -are not forwarded unless the \fB\-\-join\fR parameter is also specified. - -.TP -\fB\-j\fR, \fB\-\-join\fR -Used in conjunction with \fB\-\-attach\fR to specify that stdin should -also be connected to the remote tasks (assuming that the remote tasks' -stdin are not directly connected to files), and signals sent to \fBsrun\fR -will be forwarded to the remote tasks. - .PP The following options support Blue Gene systems, but may be applicable to other systems as well. @@ -1010,9 +984,6 @@ Default from \fIblugene.conf\fR if not set. Force the allocated nodes to reboot before starting the job. .PP -Unless the \fB\-a\fR (\fB\-\-attach\fR) or \fB\-A\fR (\fB\-\-allocate\fR) -options are specified (see \fBAllocate mode\fR and \fBAttaching to jobs\fR -below), .B srun will submit the job request to the slurm job controller, then initiate all processes on the remote nodes. If the request cannot be met immediately, @@ -1090,11 +1061,7 @@ This file will be written on the node executing the task. \fBsrun\fR will redirect stdout and/or stderr to the named file from all tasks. stdin will be redirected from the named file and broadcast to all -tasks in the job. -If the job is submitted in batch mode using the \fB\-b\fR or -\fB\-\-batch\fR option, \fIfilename\fR refers to a path on the first -node allocated to the job. -Otherwise \fIfilename\fR refers to a path on the host +tasks in the job. \fIfilename\fR refers to a path on the host that runs \fBsrun\fR. Depending on the cluster's file system layout, this may result in the output appearing in different places depending on whether the job is run in batch mode. @@ -1149,41 +1116,7 @@ job128\-00.out, job128\-01.out, ... .PP .RS -10 .PP -.B "Allocate Mode" -.PP -When the allocate option is specified (\fB\-A\fR, \fB\-\-allocate\fR) -\fBsrun\fR will not initiate any remote processes after acquiring -resources. Instead, \fBsrun\fR will spawn a subshell which has access -to the acquired resources. Subsequent instances of \fBsrun\fR from within -this subshell will then run on these resources. -.PP -If the name of a script is specified on the -commandline with \fB\-\-allocate\fR, the spawned shell will run the -specified script. Resources allocated in this way will only be freed -when the subshell terminates. -.PP -.B "Attaching to a running job" -.PP -Use of the \fB\-a\fR \fIjobid\fR (or \fB\-\-attach\fR) option allows -\fBsrun\fR to reattach to a running job, receiving stdout and stderr -from the job and forwarding signals to the job, just as if the current -session of \fBsrun\fR had started the job. (stdin, however, cannot -be forwarded to the job). -.PP -There are two ways to reattach to a running job. The default method -is to attach to the current job read\-only. In this case, -stdout and stderr are duplicated to the attaching \fBsrun\fR, but -signals are not forwarded to the remote processes (A single -Ctrl\-C will detach this read\-only \fBsrun\fR from the job). If -the \fB\-j\fR (\fB\-\-join\fR) option is is also specified, -\fBsrun\fR "joins" the running job, and is able to forward signals, -connects stdin, and acts for the most part much like the \fBsrun\fR -process that initiated the job. -.PP -Node and CPU selection options do not make sense when specifying -\fB\-\-attach\fR, and it is an error to use \fB\-n\fR, \fB\-c\fR, -or \fB\-N\fR in attach mode. -.PP + .SH "ENVIRONMENT VARIABLES" .PP Some srun options may be set via environment variables. @@ -1198,7 +1131,7 @@ sends messages to application programs (via the PMI library) and those applications may be called upon to forward that data to up to this number of additional tasks. Higher values offload work from the srun command to the applications and -likely increase the vulernability to failures. +likely increase the vulnerability to failures. The default value is 32. .TP \fBPMI_FANOUT_OFF_HOST\fR @@ -1211,8 +1144,10 @@ per host and one task on that host forwards the data to other tasks on that host up to \fBPMI_FANOUT\fR. If \fBPMI_FANOUT_OFF_HOST\fR is defined, the user task may be required to forward the data to tasks on other hosts. -Setting \fBPMI_FANOUT_OFF_HOST\fR may increase performance -and vulernability to failures. +Setting \fBPMI_FANOUT_OFF_HOST\fR may increase performance. +Since more work is performed by the PMI library loaded by +the user application, failures also can be more common and +more difficult to diagnose. .TP \fBPMI_TIME\fR This is used exclusively with PMI (MPICH2 and MVAPICH2) and @@ -1220,7 +1155,7 @@ controls how much the communications from the tasks to the srun are spread out in time in order to avoid overwhelming the srun command with work. The default value is 500 (microseconds) per task. On relatively slow processors or systems with very -large processsor counts (and large PMI data sets), higher values +large processor counts (and large PMI data sets), higher values may be required. .TP \fBSLURM_CONF\fR @@ -1229,6 +1164,9 @@ The location of the SLURM configuration file. \fBSLURM_ACCOUNT\fR Same as \fB\-U, \-\-account\fR=\fIaccount\fR .TP +\fBSLURM_CHECKPOINT\fR +Same as \fB\-\-checkpoint\fR=\fItime\fR +.TP \fBSLURM_CPU_BIND\fR Same as \fB\-\-cpu_bind\fR=\fItype\fR .TP @@ -1292,15 +1230,15 @@ Same as \fB\-\-ntasks\-per\-node\fRa \fBSLURN_NTASKS_PER_SOCKET\fR Same as \fB\-\-ntasks\-per\-socket\fRa .TP -\fBSLURM_NO_REQUEUE\fR -Same as \fB\-\-no\-requeue\fR -.TP \fBSLURM_NO_ROTATE\fR Same as \fB\-\-no\-rotate\fR .TP \fBSLURM_NPROCS\fR Same as \fB\-n, \-\-ntasks\fR=\fIn\fR .TP +\fBSLURM_OPEN_MODE\fR +Same as \fB\-\-open\-mode\fR +.TP \fBSLURM_OVERCOMMIT\fR Same as \fB\-O, \-\-overcommit\fR .TP @@ -1328,6 +1266,9 @@ Same as \fB\-o, \-\-output\fR=\fImode\fR \fBSLURM_TASK_EPILOG\fR Same as \fB\-\-task\-epilog\fR=\fIexecutable\fR .TP +\fBSLURM_TASK_MEM\fR +Same as \fB\-\-task\-mem\fR +.TP \fBSLURM_TASK_PROLOG\fR Same as \fB\-\-task\-prolog\fR=\fIexecutable\fR .TP @@ -1379,7 +1320,7 @@ Zero origin and comma separated. Job id of the executing job .TP \fBSLURM_LAUNCH_NODE_IPADDR\fR -IP adddress of the node from which the task launch was +IP address of the node from which the task launch was initiated (where the srun command ran from) .TP \fBSLURM_LOCALID\fR @@ -1410,7 +1351,7 @@ Total number of processes in the current job .TP \fBSLURM_PRIO_PROCESS\fR The scheduling priority (nice value) at the time of job submission. -This value is propaged to the spawned processes. +This value is propagated to the spawned processes. .TP \fBSLURM_PROCID\fR The MPI rank (or relative process ID) of the current process @@ -1435,7 +1376,7 @@ The umask (user file\-create mask) at the time of job submission. This value is propagated to the spawned processes. .TP \fBMPIRUN_NOALLOCATE\fR -Do not allcate a block on Blue Gene systems only. +Do not allocate a block on Blue Gene systems only. .TP \fBMPIRUN_NOFREE\fR Do not free a block on Blue Gene systems only. @@ -1509,7 +1450,7 @@ The expression "%t" will be replaced with the task's number. The expression "%o" will be replaced with the task's offset within this range (e.g. a configured task rank value of "1\-5" would have offset values of "0\-4"). -Single quotes may be used to avoid having the enclosed values interpretted. +Single quotes may be used to avoid having the enclosed values interpreted. This field is optional. .PP For example: @@ -1555,26 +1496,6 @@ the request. The output of each task will be proceeded with its task number. 6: dev3 7: dev3 -.fi -.PP -This example demonstrates how one might submit a script for later -execution (batch mode). The script will be initiated when resources -are available and no higher priority job is pending for the same -partition. The script will execute on 4 nodes with one task per node -implicit. Note that the script executes on one node. For the script -to utilize all allocated nodes, it must execute the \fBsrun\fR command -or an MPI program. - -.nf - -> cat test.sh -#!/bin/sh -date -srun \-l hostname - -> srun \-N4 \-b test.sh -srun: jobid 42 submitted - .fi .PP The output of test.sh would be found in the default output file @@ -1593,7 +1514,7 @@ echo $SLURM_NODELIST srun \-lN2 \-r2 hostname srun \-lN2 hostname -> srun \-A \-N4 test.sh +> salloc \-N4 test.sh dev[7\-10] 0: dev9 1: dev10 @@ -1616,7 +1537,7 @@ squeue squeue \-s wait -> srun \-A \-N4 test.sh +> salloc \-N4 test.sh JOBID PARTITION NAME USER ST TIME NODES NODELIST 65641 batch test.sh grondo R 0:01 4 dev[7\-10] @@ -1647,7 +1568,7 @@ mpirun \-np $SLURM_NPROCS \-machinefile $MACHINEFILE mpi\-app rm $MACHINEFILE -> srun \-AN2 \-n4 test.sh +> salloc \-N2 \-n4 test.sh .fi .PP @@ -1684,11 +1605,27 @@ dedicated to the job. > srun \-N2 \-B 4\-4:2\-2 a.out .fi +.PP +This example shows a script in which Slurm is used to provide resource +management for a job by executing the various job steps as processors +become available for their dedicated use. + +.nf + +> cat my.script +#!/bin/bash +srun \-\-exclusive \-n4 prog1 & +srun \-\-exclusive \-n3 prog2 & +srun \-\-exclusive \-n1 prog3 & +srun \-\-exclusive \-n1 prog4 & +wait +.fi + .SH "COPYING" Copyright (C) 2006\-2007 The Regents of the University of California. Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -UCRL\-CODE\-226842. +LLNL\-CODE\-402394. .LP This file is part of SLURM, a resource management program. For details, see <https://computing.llnl.gov/linux/slurm/>. diff --git a/doc/man/man1/sstat.1 b/doc/man/man1/sstat.1 new file mode 100644 index 000000000..d8ca661fc --- /dev/null +++ b/doc/man/man1/sstat.1 @@ -0,0 +1,262 @@ +.TH SSTAT "1" "May 2008" "sacctmgr 1.3" "Slurm components" + +.SH "NAME" +sstat \- Used to query running job and see various usage information +of job/step running. + +.SH "SYNOPSIS" +.BR "sstat " +\fBsstat\fR [\fIOPTIONS\fR...] + +.SH "DESCRIPTION" +.PP +Status information for jobs invoked with SLURM. +.PP +The +.BR "sstat " +command displays job accounting data stored in the job accounting log +file in a variety of forms for your analysis. +The +.BR "sstat " +command displays information on jobs, job steps, status, and exitcodes by +default. +You can tailor the output with the use of the +\f3\-\-fields=\fP +option to specify the fields to be shown. +.PP +For the root user, the +.BR "sstat " +command displays job status data for any job running on the system. +.PP +For the non\-root user, the +.BR "sstat " +command limits the display of job status data to jobs that were +launched with their own user identifier (UID) by default. + +.SS "Options" +.TP "10" + +.TP "3" +\(bu +\f3jobid\fP +.IP +and +\f3\-\-fields=\fP +options. +.IP + +.TP +\f3\-F \fP\f2field_list\fP \f3,\fP \f3\-\-fields\fP\f3=\fP\f2field_list\fP +Displays the job status data specified by the +\f2field_list\fP +operand, which is a comma\-separated list of fields. +Space characters are not allowed in the +\f2field_list\fP\c +\&. +.IP +See the +\f3\-\-help\-fields\fP +option for a list of the available fields. +See the section titled "Job Status Fields" for a description of +each field. +.IP +The job accounting data is displayed in the order specified by the +\f2field_list\fP +operand. +Thus, the following two commands display the same data but in different order: +.RS +.PP +.nf +.ft 3 +# sstat \-\-fields=jobid,state +Jobid State +\-\-\-\-\-\-\-\-\-\- \-\-\-\-\-\-\-\-\-\- +3 COMPLETED +3.0 COMPLETED + +.ft 1 +.fi +.RE +.RS +.PP +.nf +.ft 3 +# sacct \-\-fields=status,jobid +State Jobid +\-\-\-\-\-\-\-\-\-\- \-\-\-\-\-\-\-\-\-\- +COMPLETED 3 +COMPLETED 3.0 + +.ft 1 +.fi +.RE +.IP +The default value for the +\f2field_list\fP +operand is +\f3"jobid,vsize,rss,pages,cputime,ntasks,state"\fP\c +\&. +.IP +This option has no effect when the +\f3\-\-dump\fP +option is also specified. + + +.TP +\f3\-h \fP\f3,\fP \f3\-\-help\fP +Displays a general help message. +.TP +\f3\-\-help\-fields\fP +Displays a list of fields that can be specified with the +\f3\-\-fields\fP +option. +.RS +.PP +.nf +.ft 3 +Fields available: +cputime jobid ntasks pages +rss state vsize + +.ft 1 +.fi +.RE +.IP +The section titled "Job Accounting Fields" describes these fields. + +.TP +\f3\-j \fP\f2job(.step)\fP \f3,\fP \f3\-\-jobs\fP\f3=\fP\f2job(.step)\fP +Displays information about the specified job(.step) or list of job(.step)s. +.IP +The +\f2job(.step)\fP +parameter is a comma\-separated list of jobs. +Space characters are not permitted in this list. +.IP +The default is to display information on all jobs. + +.TP +\f3\-\-noheader\fP +Prevents the display of the heading over the output. +The default action is to display a header. +.IP +This option has no effect when used with the +\f3\-\-dump\fP +option. + +\f3\-S \fP\f3,\fP \f3\-\-stat\fP +.IP +Queries the status of a job as the job is running displaying +the following data: +.RS +.TP "3" +\(bu +\f3jobid\fP +.TP "3" +\(bu +\f3vsize\fP +.TP "3" +\(bu +\f3rss\fP +.TP "3" +\(bu +\f3pages\fP +.TP "3" +\(bu +\f3cputime\fP +.TP "3" +\(bu +\f3ntasks\fP +.TP "3" +\(bu +\f3status\fP +.RE +.IP +You must also include the \-\-jobs=job(.step) option if no (.step) is +given you will recieve the job.0 step. + +.TP +\f3\-\-usage\fP +Displays a help message. + +.TP +\f3\-v \fP\f3,\fP \f3\-\-verbose\fP +Reports the state of certain variables during processing. +This option is primarily used for debugging. + +.SS "Job Status Fields" +The following describes each job accounting field: +.RS +.TP +\f3cputime\fP +Minimum CPU time of any process followed by its task id along with +the average of all processes running in the step. + +.TP +\f3jobid\fP +The number of the job or job step. +It is in the form: +\f2job.jobstep\fP\c +\&. + +.TP +\f3ntasks\fP +Total number of tasks in job. + +.TP +\f3pages\fP +Maximum page faults of any process followed by its task id along with +the average of all processes running in the step. + +.TP +\f3rss\fP +Maximum resident set size of any process followed by its task id along with +the average of all processes running in the step. + +.TP +\f3state\fP +Displays the job state. +.IP +Output can be +\f3RUNNING\fP\c +\&, +\f3SUSPENDED\fP\c +\&, +\f3COMPLETED\fP\c +\&, +\f3CANCELLED\fP\c +\&, +\f3FAILED\fP\c +\&, +\f3TIMEOUT\fP\c +\&, or +\f3NODE_FAIL\fP\c +\&. + +.TP +\f3vsize\fP +Maximum Virtual Memory size of any process followed by its task id along with +the average of all processes running in the step. + +.SH "EXAMPLES" + +.SH "COPYING" +Copyright (C) 2008 Lawrence Livermore National Security. +Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). +LLNL\-CODE\-402394. +.LP +This file is part of SLURM, a resource management program. +For details, see <https://computing.llnl.gov/linux/slurm/>. +.LP +SLURM is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 2 of the License, or (at your option) +any later version. +.LP +SLURM is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +details. + +.SH "SEE ALSO" +\fBsacct\fR(1) diff --git a/doc/man/man1/strigger.1 b/doc/man/man1/strigger.1 index 5d06e3a6c..f7f298ad8 100644 --- a/doc/man/man1/strigger.1 +++ b/doc/man/man1/strigger.1 @@ -1,4 +1,4 @@ -.TH SCONTROL "1" "April 2007" "strigger 1.2" "Slurm components" +.TH STRIGGER "1" "May 2008" "strigger 1.3" "Slurm components" .SH "NAME" strigger \- Used set, get or clear Slurm trigger information. @@ -18,8 +18,22 @@ These events can cause actions such as the execution of an arbitrary script. Typical uses include notifying system administrators of node failures and gracefully terminating a job when it's time limit is approaching. - -\fBNOTE:\fR This command can only set triggers if run by the +A hostlist expression for the nodelist or job ID is passed as an argument +to the program. + +Trigger events are not processed instantly, but a check is performed for +trigger events on a periodic basis (currently every 15 seconds). +Any trigger events which occur within that interval will be compared +against the trigger programs set at the end of the time interval. +The trigger program will be executed once for any event occuring in +that interval. +The record of those events (e.g. nodes which went DOWN in the previous +15 seconds) will then be cleared. +The trigger program must set a new trigger before the end of the next +interval to insure that no trigger events are missed. +If desired, multiple trigger programs can be set for the same event. + +\fBIMPORTANT NOTE:\fR This command can only set triggers if run by the user \fISlurmUser\fR unless \fISlurmUser\fR is configured as user root. This is required for the \fIslurmctld\fR daemon to set the appropriate user and group IDs for the executed program. @@ -45,6 +59,15 @@ be cleared. \fB\-d\fR, \fB\-\-down\fR Trigger an event if the specified node goes into a DOWN state. + +.TP +\fB\-D\fR, \fB\-\-drained\fR +Trigger an event if the specified node goes into a DRAINED state. + +.TP +\fB\-F\fR, \fB\-\-fail\fR +Trigger an event if the specified node goes into a FAILING state. + .TP \fB\-f\fR, \fB\-\-fini\fR Trigger an event when the specified job completes execution. @@ -81,7 +104,8 @@ By default, all nodes associated with the job (if \fB\-\-jobid\fR is specified) or on the system are considered for event triggers. \fBNOTE:\fR The \fB\-\-node\fR option can not be used in conjunction with the \fB\-\-jobid\fR option. When the \fB\-\-jobid\fR option is -used in conjunction with the \fB\-\-up\fR or \fB\-\-down\fR option, +used in conjunction with the \fB\-\-up\fR, \fB\-\-down\fR or +\fB\-\-drained\fR option, all nodes allocated to that job will considered the nodes used as a trigger event. @@ -159,7 +183,7 @@ Resource ID: job ID or host names or "*" for any host \fBTYPE\fP Trigger type: \fItime\fR or \fIfini\fR (for jobs only), \fIdown\fR or \fIup\fR (for jobs or nodes), or -\fIidle\fR or \fIreconfig\fR (for nodes only) +\fIdrained\fR, \fIidle\fR or \fIreconfig\fR (for nodes only) .TP \fBOFFSET\fP @@ -176,7 +200,9 @@ Pathname of the program to execute when the event occurs .SH "EXAMPLES" Execute the program "/usr/sbin/slurm_admin_notify" whenever -any node in the cluster goes down. +any node in the cluster goes down. The subject line will include +the node names which have entered the down state (passed as an +argument to the script by SLURM). .nf > cat /usr/sbin/slurm_admin_notify @@ -185,7 +211,7 @@ any node in the cluster goes down. strigger \-\-set \-\-node \-\-down \\ \-\-program=/usr/sbin/slurm_admin_notify # Notify administrator using by e\-mail - /bin/mail slurm_admin@site.com \-s NodeDown + /bin/mail slurm_admin@site.com \-s NodesDown:$* > strigger \-\-set \-\-node \-\-down \\ \-\-program=/usr/sbin/slurm_admin_notify @@ -246,7 +272,7 @@ Execute /home/joe/job_fini upon completion of job 1237. .SH "COPYING" Copyright (C) 2007 The Regents of the University of California. Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -UCRL\-CODE\-226842. +LLNL\-CODE\-402394. .LP This file is part of SLURM, a resource management program. For details, see <https://computing.llnl.gov/linux/slurm/>. diff --git a/doc/man/man1/sview.1 b/doc/man/man1/sview.1 index ecbe0fc52..a6ca8cf1f 100644 --- a/doc/man/man1/sview.1 +++ b/doc/man/man1/sview.1 @@ -49,7 +49,7 @@ the sview command. .SH "COPYING" Copyright (C) 2006 The Regents of the University of California. Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -UCRL\-CODE\-226842. +LLNL\-CODE\-402394. .LP This file is part of SLURM, a resource management program. For details, see <https://computing.llnl.gov/linux/slurm/>. diff --git a/doc/man/man3/slurm_allocate_resources.3 b/doc/man/man3/slurm_allocate_resources.3 index 1c143f701..e74f053d2 100644 --- a/doc/man/man3/slurm_allocate_resources.3 +++ b/doc/man/man3/slurm_allocate_resources.3 @@ -258,7 +258,7 @@ which must be linked to your process for use .SH "COPYING" Copyright (C) 2002\-2006 The Regents of the University of California. Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -UCRL\-CODE\-226842. +LLNL\-CODE\-402394. .LP This file is part of SLURM, a resource management program. For details, see <https://computing.llnl.gov/linux/slurm/>. diff --git a/doc/man/man3/slurm_checkpoint_error.3 b/doc/man/man3/slurm_checkpoint_error.3 index fe98352bb..3a5fcf213 100644 --- a/doc/man/man3/slurm_checkpoint_error.3 +++ b/doc/man/man3/slurm_checkpoint_error.3 @@ -219,7 +219,7 @@ which must be linked to your process for use .SH "COPYING" Copyright (C) 2004 The Regents of the University of California. Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -UCRL\-CODE\-226842. +LLNL\-CODE\-402394. .LP This file is part of SLURM, a resource management program. For details, see <https://computing.llnl.gov/linux/slurm/>. diff --git a/doc/man/man3/slurm_complete_job.3 b/doc/man/man3/slurm_complete_job.3 index 41e045769..5c3a1221b 100644 --- a/doc/man/man3/slurm_complete_job.3 +++ b/doc/man/man3/slurm_complete_job.3 @@ -75,7 +75,7 @@ which must be linked to your process for use .SH "COPYING" Copyright (C) 2002 The Regents of the University of California. Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -UCRL\-CODE\-226842. +LLNL\-CODE\-402394. .LP This file is part of SLURM, a resource management program. For details, see <https://computing.llnl.gov/linux/slurm/>. diff --git a/doc/man/man3/slurm_free_ctl_conf.3 b/doc/man/man3/slurm_free_ctl_conf.3 index 0c32f5b05..04faaed7a 100644 --- a/doc/man/man3/slurm_free_ctl_conf.3 +++ b/doc/man/man3/slurm_free_ctl_conf.3 @@ -148,7 +148,7 @@ which must be linked to your process for use .SH "COPYING" Copyright (C) 2002\-2007 The Regents of the University of California. Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -UCRL\-CODE\-226842. +LLNL\-CODE\-402394. .LP This file is part of SLURM, a resource management program. For details, see <https://computing.llnl.gov/linux/slurm/>. diff --git a/doc/man/man3/slurm_free_job_info_msg.3 b/doc/man/man3/slurm_free_job_info_msg.3 index c4e4cb644..4471ae0cb 100644 --- a/doc/man/man3/slurm_free_job_info_msg.3 +++ b/doc/man/man3/slurm_free_job_info_msg.3 @@ -332,7 +332,7 @@ expressions into a collection of individual node names. .SH "COPYING" Copyright (C) 2002\-2006 The Regents of the University of California. Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -UCRL\-CODE\-226842. +LLNL\-CODE\-402394. .LP This file is part of SLURM, a resource management program. For details, see <https://computing.llnl.gov/linux/slurm/>. diff --git a/doc/man/man3/slurm_free_job_step_info_response_msg.3 b/doc/man/man3/slurm_free_job_step_info_response_msg.3 index c692a0153..cc76f32df 100644 --- a/doc/man/man3/slurm_free_job_step_info_response_msg.3 +++ b/doc/man/man3/slurm_free_job_step_info_response_msg.3 @@ -204,7 +204,7 @@ expressions into a collection of individual node names. .SH "COPYING" Copyright (C) 2002\-2006 The Regents of the University of California. Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -UCRL\-CODE\-226842. +LLNL\-CODE\-402394. .LP This file is part of SLURM, a resource management program. For details, see <https://computing.llnl.gov/linux/slurm/>. diff --git a/doc/man/man3/slurm_free_node_info.3 b/doc/man/man3/slurm_free_node_info.3 index f94fe37f9..3ce4424e4 100644 --- a/doc/man/man3/slurm_free_node_info.3 +++ b/doc/man/man3/slurm_free_node_info.3 @@ -241,7 +241,7 @@ data, these index values will be invalid. .SH "COPYING" Copyright (C) 2002\-2006 The Regents of the University of California. Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -UCRL\-CODE\-226842. +LLNL\-CODE\-402394. .LP This file is part of SLURM, a resource management program. For details, see <https://computing.llnl.gov/linux/slurm/>. diff --git a/doc/man/man3/slurm_free_partition_info.3 b/doc/man/man3/slurm_free_partition_info.3 index dee723fe2..37191e03b 100644 --- a/doc/man/man3/slurm_free_partition_info.3 +++ b/doc/man/man3/slurm_free_partition_info.3 @@ -189,7 +189,7 @@ expressions into a collection of individual node names. .SH "COPYING" Copyright (C) 2002\-2006 The Regents of the University of California. Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -UCRL\-CODE\-226842. +LLNL\-CODE\-402394. .LP This file is part of SLURM, a resource management program. For details, see <https://computing.llnl.gov/linux/slurm/>. diff --git a/doc/man/man3/slurm_get_errno.3 b/doc/man/man3/slurm_get_errno.3 index 84b8c008b..d80bf817c 100644 --- a/doc/man/man3/slurm_get_errno.3 +++ b/doc/man/man3/slurm_get_errno.3 @@ -74,7 +74,7 @@ which must be linked to your process for use .SH "COPYING" Copyright (C) 2002 The Regents of the University of California. Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -UCRL\-CODE\-226842. +LLNL\-CODE\-402394. .LP This file is part of SLURM, a resource management program. For details, see <https://computing.llnl.gov/linux/slurm/>. diff --git a/doc/man/man3/slurm_hostlist_create.3 b/doc/man/man3/slurm_hostlist_create.3 index 1f9cea53d..d08f47feb 100644 --- a/doc/man/man3/slurm_hostlist_create.3 +++ b/doc/man/man3/slurm_hostlist_create.3 @@ -108,7 +108,7 @@ which must be linked to your process for use .SH "COPYING" Copyright (C) 2002\-2006 The Regents of the University of California. Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -UCRL\-CODE\-226842. +LLNL\-CODE\-402394. .LP This file is part of SLURM, a resource management program. For details, see <https://computing.llnl.gov/linux/slurm/>. diff --git a/doc/man/man3/slurm_job_step_create.3 b/doc/man/man3/slurm_job_step_create.3 index 22fec53ab..67d848ba0 100644 --- a/doc/man/man3/slurm_job_step_create.3 +++ b/doc/man/man3/slurm_job_step_create.3 @@ -75,7 +75,7 @@ which must be linked to your process for use .SH "COPYING" Copyright (C) 2002-2007 The Regents of the University of California. Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -UCRL\-CODE\-226842. +LLNL\-CODE\-402394. .LP This file is part of SLURM, a resource management program. For details, see <https://computing.llnl.gov/linux/slurm/>. diff --git a/doc/man/man3/slurm_kill_job.3 b/doc/man/man3/slurm_kill_job.3 index 3b2e44702..0f6685bcf 100644 --- a/doc/man/man3/slurm_kill_job.3 +++ b/doc/man/man3/slurm_kill_job.3 @@ -76,7 +76,7 @@ which must be linked to your process for use .SH "COPYING" Copyright (C) 2002 The Regents of the University of California. Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -UCRL\-CODE\-226842. +LLNL\-CODE\-402394. .LP This file is part of SLURM, a resource management program. For details, see <https://computing.llnl.gov/linux/slurm/>. diff --git a/doc/man/man3/slurm_reconfigure.3 b/doc/man/man3/slurm_reconfigure.3 index 769bddfd1..ee6b45e26 100644 --- a/doc/man/man3/slurm_reconfigure.3 +++ b/doc/man/man3/slurm_reconfigure.3 @@ -1,4 +1,4 @@ -.TH "Slurm API" "3" "October 2005" "Morris Jette" "Slurm administrative calls" +.TH "Slurm API" "3" "May 2007" "Morris Jette" "Slurm administrative calls" .SH "NAME" slurm_delete_partition, slurm_init_part_desc_msg, slurm_reconfigure, slurm_shutdown, slurm_update_job, @@ -92,8 +92,8 @@ prior to setting values of the parameters to be changed. Note: values to zero. This function may only be successfully executed by user root. Note the job priority of zero represents a job that will not be scheduled. Slurm uses the priority one to represent jobs that can not be scheduled until -additional nodes are returned to service (i.e. not DOWN or DRAINED). This -permits lower priority jobs to utilize those resources which are available. +additional nodes are returned to service (i.e. not DOWN, DRAINED, or FAILED). +This permits lower priority jobs to utilize those resources which are available. .LP \fBslurm_update_node\fR Request that the state of one or more nodes be updated. Note that the state of a node (e.g. DRAINING, IDLE, etc.) may be changed, but @@ -101,7 +101,7 @@ its hardware configuration may not be changed by this function. If the hardware configuration of a node changes, update the Slurm configuration file and execute the \fBslurm_reconfigure\fR function. This function may only be successfully executed by user root. If used by some autonomous program, the state value -most likely to be used is \fBNODE_STATE_DRAIN\fR. +most likely to be used is \fBNODE_STATE_DRAIN\fR or \fBNODE_STATE_FAILING\fR. The node state flag \fBNODE_STATE_NO_RESPOND\fR may be specified without changing the underlying node state. Note that the node's \fBNODE_STATE_NO_RESPOND\fR flag will be cleared as soon as the slurmd @@ -242,9 +242,9 @@ which must be linked to your process for use (e.g. "cc \-lslurm myprog.c"). .SH "COPYING" -Copyright (C) 2002 The Regents of the University of California. +Copyright (C) 2002\-2007 The Regents of the University of California. Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -UCRL\-CODE\-226842. +LLNL\-CODE\-402394. .LP This file is part of SLURM, a resource management program. For details, see <https://computing.llnl.gov/linux/slurm/>. diff --git a/doc/man/man3/slurm_resume.3 b/doc/man/man3/slurm_resume.3 index 9dc6729ee..c31c9741b 100644 --- a/doc/man/man3/slurm_resume.3 +++ b/doc/man/man3/slurm_resume.3 @@ -75,7 +75,7 @@ which must be linked to your process for use .SH "COPYING" Copyright (C) 2005\-2006 The Regents of the University of California. Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -UCRL\-CODE\-226842. +LLNL\-CODE\-402394. .LP This file is part of SLURM, a resource management program. For details, see <https://computing.llnl.gov/linux/slurm/>. diff --git a/doc/man/man3/slurm_step_ctx_create.3 b/doc/man/man3/slurm_step_ctx_create.3 index ac62538fa..76f6cff97 100644 --- a/doc/man/man3/slurm_step_ctx_create.3 +++ b/doc/man/man3/slurm_step_ctx_create.3 @@ -204,7 +204,7 @@ which must be linked to your process for use .SH "COPYING" Copyright (C) 2004-2007 The Regents of the University of California. Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -UCRL\-CODE\-226842. +LLNL\-CODE\-402394. .LP This file is part of SLURM, a resource management program. For details, see <https://computing.llnl.gov/linux/slurm/>. diff --git a/doc/man/man3/slurm_step_launch.3 b/doc/man/man3/slurm_step_launch.3 index 70f27685d..cda4e827e 100644 --- a/doc/man/man3/slurm_step_launch.3 +++ b/doc/man/man3/slurm_step_launch.3 @@ -183,7 +183,7 @@ which must be linked to your process for use .SH "COPYING" Copyright (C) 2006 The Regents of the University of California. Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -UCRL\-CODE\-226842. +LLNL\-CODE\-402394. .LP This file is part of SLURM, a resource management program. For details, see <https://computing.llnl.gov/linux/slurm/>. diff --git a/doc/man/man5/bluegene.conf.5 b/doc/man/man5/bluegene.conf.5 index a059f07b5..29bcb9520 100644 --- a/doc/man/man5/bluegene.conf.5 +++ b/doc/man/man5/bluegene.conf.5 @@ -1,4 +1,4 @@ -.TH "bluegene.conf" "5" "November 2006" "bluegene.conf 1.2" "Slurm configuration file" +.TH "bluegene.conf" "5" "April 2008" "bluegene.conf 1.2" "Slurm configuration file" .SH "NAME" bluegene.conf \- Slurm configuration file for BlueGene systems .SH "DESCRIPTION" @@ -16,51 +16,6 @@ of the command "scontrol reconfigure" unless otherwise noted. .LP The overall configuration parameters available include: -.TP -\fBBasePartitionNodeCount\fR -The number of c\-nodes per base partition. -There is no default value and this must be specified. - -.TP -\fBBridgeAPILogFile\fR -Fully qualified pathname of a into which the Bridge API logs are -to be written. -There is no default value. - -.TP -\fBBridgeAPIVerbose\fR -Specify how verbose the Bridge API logs should be. -The default value is 0. -.RS -.TP -\fB0\fR: Log only error and warning messages -.TP -\fB1\fR: Log level 0 and information messasges -.TP -\fB2\fR: Log level 1 and basic debug messages -.TP -\fB3\fR: Log level 2 and more debug message -.TP -\fB4\fR: Log all messages -.RE - -.TP -\fBBlrtsImage\fR -BlrtsImage used for creation of all bgblocks. -There is no default value and this must be specified. -.TP -\fBLinuxImage\fR -LinuxImage used for creation of all bgblocks. -There is no default value and this must be specified. -.TP -\fBMloaderImage\fR -MloaderImage used for creation of all bgblocks. -There is no default value and this must be specified. -.TP -\fBRamDiskImage\fR -RamDiskImage used for creation of all bgblocks. -There is no default value and this must be specified. - .TP \fBAltBlrtsImage\fR Alternative BlrtsImage. This is an optional field only used for @@ -93,6 +48,40 @@ the user groups allowed to use this image (i.e. Groups=da,jette) if Groups= is not stated then this image will be able to be used by all groups. You can but as many alternative images as you want in the conf file. +.TP +\fBBasePartitionNodeCount\fR +The number of c\-nodes per base partition. +There is no default value and this must be specified. (For bgl systems this +is usually 512) + +.TP +\fBBlrtsImage\fR +BlrtsImage used for creation of all bgblocks. +There is no default value and this must be specified. + +.TP +\fBBridgeAPILogFile\fR +Fully qualified pathname of a into which the Bridge API logs are +to be written. +There is no default value. + +.TP +\fBBridgeAPIVerbose\fR +Specify how verbose the Bridge API logs should be. +The default value is 0. +.RS +.TP +\fB0\fR: Log only error and warning messages +.TP +\fB1\fR: Log level 0 and information messasges +.TP +\fB2\fR: Log level 1 and basic debug messages +.TP +\fB3\fR: Log level 2 and more debug message +.TP +\fB4\fR: Log all messages +.RE + .TP \fBLayoutMode\fR Describes how SLURM should create bgblocks. @@ -112,15 +101,34 @@ and starvation of larger jobs. \fBUse this mode with caution.\fR .RE +.TP +\fBLinuxImage\fR +LinuxImage used for creation of all bgblocks. +There is no default value and this must be specified. + +.TP +\fBMloaderImage\fR +MloaderImage used for creation of all bgblocks. +There is no default value and this must be specified. + .TP \fBNodeCardNodeCount\fR Number of c\-nodes per node card. -There is no default value and this must be specified. +There is no default value and this must be specified. (For bgl systems this +is usually 32) .TP \fBNumPsets\fR The Numpsets used for creation of all bgblocks equals this value multiplied by the number of base partitions in the bgblock. +There is no default value and this must be specified. The typical settings +for bgl systems goes as follows... For IO rich systems 64 is the value that +should be used to create small blocks. For systems that are not IO rich, or +you do not wish to create small blocks, 8 is usually the number to use. + +.TP +\fBRamDiskImage\fR +RamDiskImage used for creation of all bgblocks. There is no default value and this must be specified. .LP @@ -188,7 +196,7 @@ BasePartitionNodeCnt=512 .br NodeCardNodeCnt=32 .br -NumPsets=8 +NumPsets=64 # An I/O rich environment .br LayoutMode=STATIC .br @@ -217,7 +225,7 @@ BPs=[333x333] Type=SMALL NodeCards=4 Quarters=3 # 1/16 * 4 + 1/4 * 3 .SH "COPYING" Copyright (C) 2006 The Regents of the University of California. Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -UCRL\-CODE\-226842. +LLNL\-CODE\-402394. .LP This file is part of SLURM, a resource management program. For details, see <https://computing.llnl.gov/linux/slurm/>. diff --git a/doc/man/man5/slurm.conf.5 b/doc/man/man5/slurm.conf.5 index a49f3ada3..fb26318c4 100644 --- a/doc/man/man5/slurm.conf.5 +++ b/doc/man/man5/slurm.conf.5 @@ -1,11 +1,12 @@ -.TH "slurm.conf" "5" "July 2007" "slurm.conf 1.2" "Slurm configuration file" +.TH "slurm.conf" "5" "May 2008" "slurm.conf 1.3" "Slurm configuration file" .SH "NAME" slurm.conf \- Slurm configuration file .SH "DESCRIPTION" \fB/etc/slurm.conf\fP is an ASCII file which describes general SLURM configuration information, the nodes to be managed, information about how those nodes are grouped into partitions, and various scheduling -parameters associated with those partitions. +parameters associated with those partitions. This file should be +consistent across all nodes in the cluster. .LP The file location can be modified at system build time using the DEFAULT_SLURM_CONF parameter. In addition, you can use the @@ -14,6 +15,12 @@ location of this file. The SLURM daemons also allow you to override both the built\-in and environment\-provided location using the "\-f" option on the command line. .LP +Note the while SLURM daemons create log files and other files as needed, +it treats the lack of parent directories as a fatal error. +This prevents the daemons from running if critical file systems are +not mounted and will minimize the risk of cold\-starting (starting +without preserving jobs). +.LP The contents of the file are case insensitive except for the names of nodes and partitions. Any text following a "#" in the configuration file is treated as a comment through the end of that line. @@ -27,6 +34,81 @@ and then a file name, that file will be included inline with the current configuration file. .LP The overall configuration parameters available include: + +.TP +\fBAccountingStorageEnforce\fR +If set to a non-zero value and the user, partition, account association is not +defined for a job in the accounting database then prevent the job from being +executed. +The default value is zero. + +.TP +\fBAccountingStorageHost\fR +Define the name of the host where the database is running we are going +to store the accounting data. +Only used for database type storage plugins, ignored otherwise. +Also see \fBDefaultStorageHost\fR. + +.TP +\fBAccountingStorageLoc\fR +Specifies the location of the file or database where accounting +records are written. +Also see \fBDefaultStorageLoc\fR. + +.TP +\fBAccountingStoragePass\fR +Define the password used to gain access to the database to store the +accounting data. +Only used for database type storage plugins, ignored otherwise. +In the case of Slurm DBD (Data Base Daemon) with Munge authentication this can be +configured to use a Munge daemon specifically configured to provide authentication +between clusters while the default Munge daemon provides authentication within a cluster. +In that case, \fBAccountingStoragePass\fR should specify the named port to be used +for communications with the alternate Munge daemon (e.g. +"/var/run/munge/global.socket.2"). The default value is NULL. +Also see \fBDefaultStoragePass\fR. + +.TP +\fBAccountingStoragePort\fR +Define the port the database server is listening on where we are going +to store the accounting data. +Only used for database type storage plugins, ignored otherwise. +Also see \fBDefaultStoragePort\fR. + +.TP +\fBAccountingStorageType\fR +Define the accounting storage mechanism type. +Acceptable values at present include +"accounting_storage/filetxt", "accounting_storage/gold", +"accounting_storage/mysql", "accounting_storage/none", +"accounting_storage/pgsql", and "accounting_storage/slurmdbd". +The value "accounting_storage/filetxt" indicates that accounting records +will be written to a the file specified by the +\fBAccountingStorageLoc\fR parameter. +The value "accounting_storage/gold" indicates that account records +will be written to Gold +(http://www.clusterresources.com/pages/products/gold-allocation-manager.php), +which maintains its own database. +The value "accounting_storage/mysql" indicates that accounting records +should be written to a MySQL database specified by the +\fBAccountingStorageLoc\fR parameter. +The default value is "accounting_storage/none", which means that +account records are not maintained. +The value "accounting_storage/pgsql" indicates that accounting records +should be written to a PostgreSQL database specified by the +\fBAccountingStorageLoc\fR parameter. +The value "accounting_storage/slurmdbd" indicates that accounting records +will be written to SlurmDDB, which manages an underlying MySQL or +PostgreSQL database. See "man slurmdbd" for more information. +Also see \fBDefaultStorageType\fR. + +.TP +\fBAccountingStorageUser\fR +Define the name of the user we are going to connect to the database +with to store the accounting data. +Only used for database type storage plugins, ignored otherwise. +Also see \fBDefaultStorageUser\fR. + .TP \fBAuthType\fR Define the authentication method for communications between SLURM @@ -38,13 +120,15 @@ communication messages is not verified. This may be fine for testing purposes, but \fBdo not use "auth/none" if you desire any security\fR. "auth/authd" indicates that Brett Chun's authd is to be used (see -"http://www.theether.org/authd/" for more information). -"auth/munge" indicates that Chris Dunlap's munge is to be used +"http://www.theether.org/authd/" for more information, Note that +authd is no longer actively supported). +"auth/munge" indicates that LLNL's MUNGE is to be used (this is the best supported authentication mechanism for SLURM, -see "https://computing.llnl.gov/linux/munge/" for more information). +see "http://home.gna.org/munge/" for more information). All SLURM daemons and commands must be terminated prior to changing the value of \fBAuthType\fR and later restarted (SLURM jobs can be preserved). + .TP \fBBackupAddr\fR Name that \fBBackupController\fR should be referred to in @@ -76,15 +160,21 @@ The default value is 0 to disable caching group data. .TP \fBCheckpointType\fR Define the system\-initiated checkpoint method to be used for user jobs. -The slurmctld daemon must be restarted for a change in CheckpointType +The slurmctld daemon must be restarted for a change in \fBCheckpointType\fR to take effect. Acceptable values at present include "checkpoint/aix" (only on AIX systems), -"checkpoint/ompi" (requires OpenMPI version 1.3 or higher), and +"checkpoint/ompi" (requires OpenMPI version 1.3 or higher), +"checkpoint/xlch" (for XLCH, requires that SlurmUser be root), and "checkpoint/none". -(only on AIX systems). The default value is "checkpoint/none". +.TP +\fBClusterName\fR +The name by which this SLURM managed cluster is known for accounting +purposes. This is needed distinguish between accounting data from +multiple clusters being recorded in a single database. + .TP \fBControlAddr\fR Name that \fBControlMachine\fR should be referred to in @@ -97,23 +187,108 @@ By default the \fBControlAddr\fR will be identical in value to .TP \fBControlMachine\fR -The name of the machine where SLURM control functions are executed. -This should be a node name without the full domain name (e.g. "lx0001"). +The name of the machine where SLURM control functions are executed +as returned by the \fIgethostname()\fR function the cut at the first dot +or the \fIhostname \-s\fR command (e.g. use "tux001" rather than "tux001.my.com"). This value must be specified. See the \fBRELOCATING CONTROLLERS\fR section if you change this. +.TP +\fBCryptoType\fR +Define the cryptographic signature tool to be used in the creation of +job step credentials. +The slurmctld daemon must be restarted for a change in \fBCryptoType\fR +to take effect. +Acceptable values at present include "crypto/munge" and "crypto/openssl". +OpenSSL offers the best performance and is available with an +Apache style open source license. +Munge is a little slower, but is available under the Gnu General Public +License (GPL). +The default value is "crypto/openssl". .TP -\fBDisableRootJobs\fR -If set to 1 then user root will be prevented from running any jobs -The default value is 0, meaning user root will be able to execute jobs. +\fBDefMemPerTask\fR +Default real memory size available per task in MegaBytes. +Used to avoid over\-subscribing memory and causing paging. +Also see \fBMaxMemPerTask\fR. +The default value is 0 (unlimited). .TP +\fBDefaultStorageHost\fR +Define the name of the host where the database is running and used to +to store the accounting and job completion data. +Only used for database type storage plugins, ignored otherwise. +Also see \fBAccountingStorageHost\fR and \fBJobCompHost\fR. + +.TP +\fBDefaultStorageLoc\fR +Specifies the location of the file or database where accounting +and job completion records are written. +Also see \fBAccountingStorageLoc\fR and \fBJobCompLoc\fR. + +.TP +\fBDefaultStoragePass\fR +Define the password used to gain access to the database to store the +accounting and job completion data. +Only used for database type storage plugins, ignored otherwise. +Also see \fBAccountingStoragePass\fR and \fBJobCompPass\fR. + +.TP +\fBDefaultStoragePort\fR +Define the port the database server is listening on where we are going +to store the accounting and job completion data. +Only used for database type storage plugins, ignored otherwise. +Also see \fBAccountingStoragePort\fR and \fBJobCompPort\fR. + +.TP +\fBDefaultStorageType\fR +.ad l +Define the accounting and job completion storage mechanism type. +Acceptable values at present include +"filetxt", "gold", "mysql", "none", "pgsql", and "slurmdbd". +The value "filetxt" indicates that records will be written to a the file. +The value "gold" indicates that records will be written to Gold +(http://www.clusterresources.com/pages/products/gold-allocation-manager.php), +which maintains its own database. +The value "mysql" indicates that accounting records will be written to +a mysql database. +The default value is "none", which means that records are not maintained. +The value "pgsql" indicates that records will be written to a postresql +database. +The value "slurmdbd" indicates that records will be written to SlurmDbd, +which maintains its own database. See "man slurmdbd for more information". +Also see \fBAccountingStorageType\fR and \fBJobCompType\fR. +.ad + +.TP +\fBDefaultStorageUser\fR +Define the name of the user we are going to connect to the database +with to store the accounting and job completion data. +Only used for database type storage plugins, ignored otherwise. +Also see \fBAccountingStorageUser\fR and \fBJobCompUser\fR. + +.TP +\fBDisableRootJobs\fR +If set to "YES" then user root will be prevented from running any jobs. +The default value is "NO", meaning user root will be able to execute jobs. +\fBDisableRootJobs\fR may also be set by partition. + \fBEpilog\fR Fully qualified pathname of a script to execute as user root on every node when a user's job completes (e.g. "/usr/local/slurm/epilog"). This may be used to purge files, disable user login, etc. By default there is no epilog. +.TP +\fBEpilogMsgTime\fR +The number of microseconds the the slurmctld daemon requires to process +an epilog completion message from the slurmd dameons. This parameter can +be used to prevent a burst of epilog completion messages from being sent +at the same time which should help prevent lost messages and improve +throughput for large jobs. +The default value is 2000 microseconds. +For a 1000 node job, this spreads the epilog completion messages out over +two seconds. + .TP \fBFastSchedule\fR Controls how a nodes configuration specifications in slurm.conf are used. @@ -131,7 +306,7 @@ Consider which value you want to be used for scheduling purposes. \fB1\fR (default) Consider the configuration of each node to be that specified in the configuration file and any node with less -than the configured resouces will be set DOWN. +than the configured resources will be set DOWN. .TP \fB0\fR Base scheduling decisions upon the actual configuration of @@ -160,10 +335,22 @@ load it from a cache file. Applies when the srun or sbatch \fI--get-user-env\fR option is used. Default value is 2 seconds. .TP -\fBHeartbeatInterval\fR -Defunct paramter. -Interval of heartbeat for slurmd daemon is half of \fBSlurmdTimeout\fR. -Interval of heartbeat for slurmctld daemon is half of \fBSlurmctldTimeout\fR. +\fBHealthCheckInterval\fR +The interval in seconds between executions of \fBHealthCheckProgram\fR. +The default value is zero, which disables execution. + +.TP +\fBHealthCheckProgram\fR +Fully qualified pathname of a script to execute as user root periodically +on all compute nodes that are not in the DOWN state. This may be used to +verify the node is fully operational and DRAIN the it otherwise. +The interval is controlled using the \fBHealthCheckInterval\fR parameter. +Note that the \fBHealthCheckProgram\fR will be executed at the same time +on all nodes to minimize its impact upon parallel programs. +This program is will be killed if it does not terminate normally within +60 seconds. +By default, no program will be executed. + .TP \fBInactiveLimit\fR The interval, in seconds, a job or job step is permitted to be inactive @@ -182,49 +369,93 @@ This limit is ignored for jobs running in partitions with the responsible for the job). The default value is unlimited (zero). May not exceed 65533. + .TP -\fBJobAcctType\fR +\fBJobAcctGatherType\fR Define the job accounting mechanism type. -Acceptable values at present include "jobacct/aix" (for AIX operating -system), "jobacct/linux" (for Linux operating system) and "jobacct/none" +Acceptable values at present include "jobacct_gather/aix" (for AIX operating +system), "jobacct_gather/linux" (for Linux operating system) and "jobacct_gather/none" (no accounting data collected). -The default value is "jobacct/none". -In order to use the \fBsacct\fR tool, "jobacct/aix" or "jobacct/linux" +The default value is "jobacct_gather/none". +In order to use the \fBsacct\fR tool, "jobacct_gather/aix" or "jobacct_gather/linux" must be configured. + .TP -\fBJobAcctLogFile\fR -Define the location where job accounting logs are to be written. -For jobacct/none this parameter is ignored. -For jobacct/linux this is the fully\-qualified file name for the data file. +\fBJobAcctGatherFrequency\fR +Define the job accounting sampling interval. +For jobacct_gather/none this parameter is ignored. +For jobacct_gather/aix and jobacct_gather/linux the parameter is a number is +seconds between sampling job state. +The default value is 30 seconds. +A value of zero disables real the periodic job sampling and provides accounting +information only on job termination (reducing SLURM interference with the job). + .TP -\fBJobAcctFrequency\fR -Define the polling frequencys to pass to the job accounting plugin. -For jobacct/none this parameter is ignored. -For jobacct/linux the parameter is a number is seconds between polls. +\fBJobCompHost\fR +Define the name of the host where the database is running and used +to store the job completion data. +Only used for database type storage plugins, ignored otherwise. +Also see \fBDefaultStorageHost\fR. + .TP \fBJobCompLoc\fR The interpretation of this value depends upon the logging mechanism -specified by the \fBJobCompType\fR parameter. +specified by the \fBJobCompType\fR parameter either a filename or a +database name. +Also see \fBDefaultStorageLoc\fR. + +.TP +\fBJobCompPass\fR +Define the password used to gain access to the database to store the job completion data. +Only used for database type storage plugins, ignored otherwise. +Also see \fBDefaultStoragePass\fR. + +.TP +\fBJobCompPort\fR +Define the port the database server is listening on where we are going +to store the job completion data. +Only used for database type storage plugins, ignored otherwise. +Also see \fBDefaultStoragePort\fR. + .TP \fBJobCompType\fR Define the job completion logging mechanism type. Acceptable values at present include "jobcomp/none", "jobcomp/filetxt", -and "jobcomp/script". +"jobcomp/mysql", "jobcomp/pgsql", "jobcomp/script"and "jobcomp/slurmdbd". The default value is "jobcomp/none", which means that upon job completion the record of the job is purged from the system. The value "jobcomp/filetxt" indicates that a record of the job should be written to a text file specified by the \fBJobCompLoc\fR parameter. +The value "jobcomp/mysql" indicates that a record of the job should be +written to a mysql database specified by the \fBJobCompLoc\fR parameter. +The value "jobcomp/pgsql" indicates that a record of the job should be +written to a postgresql database specified by the \fBJobCompLoc\fR parameter. The value "jobcomp/script" indicates that a script specified by the \fBJobCompLoc\fR parameter is to be executed with environment variables indicating the job information. +The value "jobcomp/slurmdbd" indicates that job completion records +will be written to SlurmDbd, which maintains its own database. See +"man slurmdbd" for more information. +Also see \fBDefaultStorageType\fR. + +.TP +\fBJobCompUser\fR +Define the name of the user we are going to connect to the database +with to store the job completion data. +Only used for database type storage plugins, ignored otherwise. +Also see \fBDefaultStorageUser\fR. + .TP \fBJobCredentialPrivateKey\fR Fully qualified pathname of a file containing a private key used for authentication by Slurm daemons. +This parameter is ignored if \fBCryptType=munge\fR. + .TP \fBJobCredentialPublicCertificate\fR Fully qualified pathname of a file containing a public key used for authentication by Slurm daemons. +This parameter is ignored if \fBCryptType=munge\fR. .TP \fBJobFileAppend\fR @@ -233,22 +464,40 @@ exist when the job is started. If \fBJobFileAppend\fR is set to a value of 1, then append to the existing file. By default, any existing file is truncated. -NOTE: This variable does not appear in the output of the command -"scontrol show config" in versions of SLURM less than version 1.3. .TP -\fBKillTree\fR -This option is mapped to "ProctrackType=proctrack/linuxproc". -It will be removed from a future release. +\fBJobRequeue\fR +This option controls what to do by default after a node failure. +If \fBJobRequeue\fR is set to a value of 1, then any job running +on the failed node will be requeued for execution on different nodes. +If \fBJobRequeue\fR is set to a value of 0, then any job running +on the failed node will be terminated. +Use the \fBsbatch\fR \fI\-\-no\-requeue\fR or \fI\-\-requeue\fR +option to change the default behavior for individual jobs. +The default value is 1. + .TP \fBKillWait\fR The interval, in seconds, given to a job's processes between the SIGTERM and SIGKILL signals upon reaching its time limit. If the job fails to terminate gracefully -in the interval specified, it will be forcably terminated. +in the interval specified, it will be forcibly terminated. The default value is 30 seconds. May not exceed 65533. +.TP +\fBLicenses\fR +Specification of licenses (or other resources available on all +nodes of the cluster) which can be allocated to jobs. +License names can optionally be followed by an asterisk +and count with a default count of one. +Multiple license names should be comma separated (e.g. +"Licenses=foo*4,bar"). +Note that SLURM prevents jobs from being scheduled if their +required license specification is not available. +SLURM does not prevent jobs from using licenses that are +not explicitly listed in the job submission specification. + .TP \fBMailProg\fR Fully qualified pathname to the program used to send email per user request. @@ -260,11 +509,18 @@ The maximum number of jobs SLURM can have in its active database at one time. Set the values of \fBMaxJobCount\fR and \fBMinJobAge\fR to insure the slurmctld daemon does not exhaust its memory or other resources. Once this limit is reached, requests to submit additional -jobs will fail. The default value is 2000 jobs. This value may not +jobs will fail. The default value is 5000 jobs. This value may not be reset via "scontrol reconfig". It only takes effect upon restart of the slurmctld daemon. May not exceed 65533. +.TP +\fBMaxMemPerTask\fR +Maximum real memory size available per task in MegaBytes. +Used to avoid over\-subscribing memory and causing paging. +Also see \fBDefMemPerTask\fR. +The default value is 0 (unlimited). + .TP \fBMessageTimeout\fR Time permitted for a round\-trip communication to complete @@ -297,6 +553,7 @@ Identifies the places in which to look for SLURM plugins. This is a colon\-separated list of directories, like the PATH environment variable. The default value is "/usr/local/lib/slurm". + .TP \fBPlugStackConfig\fR Location of the config file for SLURM stackable plugins that use @@ -306,6 +563,14 @@ be called before and/or after execution of each task spawned as part of a user's job step. Default location is "plugstack.conf" in the same directory as the system slurm.conf. For more information on SPANK plugins, see the \fBspank\fR(8) manual. + +.TP +\fBPrivateData\fR +If non-zero then users are unable to view jobs or job steps belonging +to other users (except for SlurmUser or root, who can view all jobs). +The default value is "0", permitting any user to view any jobs or +job steps. + .TP \fBProctrackType\fR Identifies the plugin to be used for process tracking. @@ -323,7 +588,7 @@ NOTE: "proctrack/linuxproc" is not compatible with "switch/elan." Acceptable values at present include: .RS .TP -\fBproctrack/aix\fR which uses an AIX kernel extenstion and is +\fBproctrack/aix\fR which uses an AIX kernel extension and is the default for AIX systems .TP \fBproctrack/linuxproc\fR which uses linux process tree using @@ -427,6 +692,37 @@ appearing in this list. The user can override this by specifying which resource limits to propagate with the srun commands "\-\-propagate" option. See \fBPropagateResourceLimits\fR above for a list of valid limit names. +.TP +\fBResumeProgram\fR +SLURM supports a mechanism to reduce power consumption on nodes that +remain idle for an extended period of time. +This is typically accomplished by reducing voltage and frequency. +\fBResumeProgram\fR is the program that will be executed when a node +in power save mode is assigned work to perform. +The program executes as \fBSlurmUser\fR. +The argument to the program will be the names of nodes to +be removed from power savings mode (using SLURM's hostlist +expression format). +By default no program is run. +Related configuration options include \fBResumeRate\fR, \fBSuspendRate\fR, +\fBSuspendTime\fR, \fBSuspendProgram\fR, \fBSuspendExcNodes\fR, and +\fBSuspendExcParts\fR. +More information is available at the SLURM web site +(https://computing.llnl.gov/linux/slurm/power_save.html). + +.TP +\fBResumeRate\fR +The rate at which nodes in power save mode are returned to normal +operation by \fBResumeProgram\fR. +The value is number of nodes per minute and it can be used to prevent +power surges if a large number of nodes in power save mode are +assigned work at the same time (e.g. a large job starts). +A value of zero results in no limits being imposed. +The default value is 60 nodes per minute. +Related configuration options include \fBResumeProgram\fR, \fBSuspendRate\fR, +\fBSuspendTime\fR, \fBSuspendProgram\fR, \fBSuspendExcNodes\fR, and +\fBSuspendExcParts\fR. + .TP \fBReturnToService\fR If set to 1, then a non\-responding (DOWN) node will become available @@ -437,18 +733,13 @@ failure, etc.), its state will not automatically be changed. The default value is 0, which means that a node will remain in the DOWN state until a system administrator explicitly changes its state (even if the slurmd daemon registers and resumes communications). -.TP -\fBSchedulerRootFilter\fR -If set to '1' then scheduler will filter and avoid \fBRootOnly\fR -partitions (let root user or process schedule these partitions). -Otherwise scheduler will treat \fBRootOnly\fR partitions as any -other standard partition. -Currently only supported by sched/backfill schedululer plugin. + .TP \fBSchedulerPort\fR The port number on which slurmctld should listen for connection requests. This value is only used by the Maui Scheduler (see \fBSchedulerType\fR). The default value is 7321. + .TP \fBSchedulerRootFilter\fR Identifies whether or not \fBRootOnly\fR partitions should be filtered from @@ -457,12 +748,19 @@ are treated like any other partition. If set to 1, then \fBRootOnly\fR partitions are exempt from any external scheduling activities. The default value is 1. Currently only used by the built\-in backfill scheduling module "sched/backfill" (see \fBSchedulerType\fR). + +.TP +\fBSchedulerTimeSlice\fR +Number of seconds in each time slice when \fBSchedulerType=sched/gang\fR. +The default value is 30. + .TP \fBSchedulerType\fR Identifies the type of scheduler to be used. Acceptable values include "sched/builtin" for the built\-in FIFO scheduler, "sched/backfill" for a backfill scheduling module to augment the default FIFO scheduling, +"sched/gang" for gang scheduler (time\-slicing of parallel jobs), "sched/hold" to hold all newly arriving jobs if a file "/etc/slurm.hold" exists otherwise use the built\-in FIFO scheduler, and "sched/wiki" for the Wiki interface to the Maui Scheduler. @@ -481,6 +779,7 @@ should have their priority change from zero to some large number. The \fBscontrol\fR command can be used to change job priorities. The \fBslurmctld\fR daemon must be restarted for a change in scheduler type to become effective. + .TP \fBSelectType\fR Identifies the type of resource selection algorithm to be used. @@ -489,7 +788,7 @@ Acceptable values include .TP \fBselect/linear\fR for allocation of entire nodes assuming a -one\-dimentional array of nodes in which sequentially ordered +one\-dimensional array of nodes in which sequentially ordered nodes are preferable. This is the default value for non\-BlueGene systems. .TP @@ -497,23 +796,29 @@ This is the default value for non\-BlueGene systems. The resources within a node are individually allocated as consumable resources. Note that whole nodes can be allocated to jobs for selected -partitions by using the \fIShared=EXCLUSIVE\fR option. +partitions by using the \fIShared=Exclusive\fR option. See the partition \fBShared\fR parameter for more information. .TP \fBselect/bluegene\fR -for a three\-dimentional BlueGene system. +for a three\-dimensional BlueGene system. The default value is "select/bluegene" for BlueGene systems. .RE .TP \fBSelectTypeParameters\fR -This only apply for \fISelectType=select/cons_res\fR. +The permitted values of \fBSelectTypeParameters\fR depend upon the +configured value of \fBSelectType\fR. +\fBSelectType=select/bluegene\fR supports no \fBSelectTypeParameters\fR. +The only supported option for \fBSelectType=select/linear\fR is +\fBCR_Memory\fR, which treats memory as a consumable resource and +prevents memory over subscription with job preemption or gang scheduling. +The following values are supported for \fBSelectType=select/cons_res\fR: .RS .TP \fBCR_CPU\fR CPUs are consumable resources. There is no notion of sockets, cores or threads. -On a multi\-core system, each core will be consided a CPU. +On a multi\-core system, each core will be considered a CPU. On a multi\-core and hyperthreaded system, each thread will be considered a CPU. On single\-core systems, each CPUs will be considered a CPU. @@ -535,7 +840,7 @@ Memory and CPUs are consumable resources. .TP \fBCR_Memory\fR Memory is a consumable resource. -NOTE: This implies \fIShared=Yes\fR for all partitions. +NOTE: This implies \fIShared=YES\fR or \fIShared=FORCE\fR for all partitions. .RE .TP @@ -543,22 +848,26 @@ NOTE: This implies \fIShared=Yes\fR for all partitions. The name of the user that the \fBslurmctld\fR daemon executes as. For security purposes, a user other than "root" is recommended. The default value is "root". + .TP \fBSlurmctldDebug\fR The level of detail to provide \fBslurmctld\fR daemon's logs. Values from 0 to 7 are legal, with `0' being "quiet" operation and `7' being insanely verbose. The default value is 3. + .TP \fBSlurmctldLogFile\fR Fully qualified pathname of a file into which the \fBslurmctld\fR daemon's logs are written. The default value is none (performs logging via syslog). + .TP \fBSlurmctldPidFile\fR Fully qualified pathname of a file into which the \fBslurmctld\fR daemon may write its process id. This may be used for automated signal processing. The default value is "/var/run/slurmctld.pid". + .TP \fBSlurmctldPort\fR The port number that the SLURM controller, \fBslurmctld\fR, listens @@ -567,18 +876,21 @@ build time. If none is explicitly specified, it will be set to 6817. NOTE: Either \fBslurmctld\fR and \fBslurmd\fR daemons must not execute on the same nodes or the values of \fBSlurmctldPort\fR and \fBSlurmdPort\fR must be different. + .TP \fBSlurmctldTimeout\fR The interval, in seconds, that the backup controller waits for the primary controller to respond before assuming control. The default value is 120 seconds. May not exceed 65533. + .TP \fBSlurmdDebug\fR The level of detail to provide \fBslurmd\fR daemon's logs. Values from 0 to 7 are legal, with `0' being "quiet" operation and `7' being insanely verbose. The default value is 3. + .TP \fBSlurmdLogFile\fR Fully qualified pathname of a file into which the \fBslurmd\fR daemon's @@ -586,11 +898,13 @@ logs are written. The default value is none (performs logging via syslog). Any "%h" within the name is replaced with the hostname on which the \fBslurmd\fR is running. + .TP \fBSlurmdPidFile\fR Fully qualified pathname of a file into which the \fBslurmd\fR daemon may write its process id. This may be used for automated signal processing. The default value is "/var/run/slurmd.pid". + .TP \fBSlurmdPort\fR The port number that the SLURM compute node daemon, \fBslurmd\fR, listens @@ -599,6 +913,7 @@ build time. If none is explicitly specified, its value will be 6818. NOTE: Either slurmctld and slurmd daemons must not execute on the same nodes or the values of \fBSlurmctldPort\fR and \fBSlurmdPort\fR must be different. + .TP \fBSlurmdSpoolDir\fR Fully qualified pathname of a directory into which the \fBslurmd\fR @@ -611,6 +926,7 @@ shared memory lockfile, and \fBshould not be changed\fR unless the system is being cleanly restarted. If the location of \fBSlurmdSpoolDir\fR is changed and \fBslurmd\fR is restarted, the new daemon will attach to a different shared memory region and lose track of any running jobs. + .TP \fBSlurmdTimeout\fR The interval, in seconds, that the SLURM controller waits for \fBslurmd\fR @@ -622,6 +938,21 @@ a DOWN state indicating a non\-responsive \fBslurmd\fR, and some other tool will take responsibility for monitoring the state of each compute node and its \fBslurmd\fR daemon. The value may not exceed 65533. + +.TP +\fBSrunEpilog\fR +Fully qualified pathname of an executable to be run by srun following the +completion of a job step. The command line arguments for the executable will +be the command and arguments of the job step. This configuration parameter +may be overridden by srun's \fB\-\-epilog\fR parameter. + +.TP +\fBSrunProlog\fR +Fully qualified pathname of an executable to be run by srun prior to the +launch of a job step. The command line arguments for the executable will +be the command and arguments of the job step. This configuration parameter +may be overridden by srun's \fB\-\-prolog\fR parameter. + .TP \fBStateSaveLocation\fR Fully qualified pathname of a directory into which the SLURM controller, @@ -630,21 +961,66 @@ SLURM state will saved here to recover from system failures. \fBSlurmUser\fR must be able to create files in this directory. If you have a \fBBackupController\fR configured, this location should be readable and writable by both systems. +Since all running and pending job information is stored here, the use of +a reliable file system (e.g. RAID) is recommended. The default value is "/tmp". If any slurm daemons terminate abnormally, their core files will also be written into this directory. + .TP -\fBSrunEpilog\fR -Fully qualified pathname of an executable to be run by srun following the -completion of a job step. The command line arguments for the executable will -be the command and arguments of the job step. This configuration parameter -may be overridden by srun's \fB\-\-epilog\fR parameter. +\fBSuspendExcNodes\fR +Specifies the nodes which are to not be placed in power save mode, even +if the node remains idle for an extended period of time. +Use SLURM's hostlist expression to identify nodes. +By default no nodes are excluded. +Related configuration options include \fBResumeProgram\fR, \fBResumeRate\fR, +\fBSuspendProgram\fR, \fBSuspendRate\fR, \fBSuspendTime\fR and +\fBSuspendExcParts\fR. + .TP -\fBSrunProlog\fR -Fully qualified pathname of an executable to be run by srun prior to the -launch of a job step. The command line arguments for the executable will -be the command and arguments of the job step. This configuration parameter -may be overridden by srun's \fB\-\-prolog\fR parameter. +\fBSuspendExcParts\fR +Specifies the partitions whose nodes are to not be placed in power save +mode, even if the node remains idle for an extended period of time. +Multiple partitions can be identified and separated by commas. +By default no nodes are excluded. +Related configuration options include \fBResumeProgram\fR, \fBResumeRate\fR, +\fBSuspendProgram\fR, \fBSuspendRate\fR, \fBSuspendTime\fR and +\fBSuspendExcNodes\fR. + +.TP +\fBSuspendProgram\fR +\fBSuspendProgram\fR is the program that will be executed when a node +remains idle for an extended period of time. +This program is expected to place the node into some power save mode. +The program executes as \fBSlurmUser\fR. +The argument to the program will be the names of nodes to +be placed into power savings mode (using SLURM's hostlist +expression format). +By default no program is run. +Related configuration options include \fBResumeProgram\fR, \fBResumeRate\fR, +\fBSuspendRate\fR, \fBSuspendTime\fR, \fBSuspendExcNodes\fR, and +\fBSuspendExcParts\fR. + +.TP +\fBSuspendRate\fR +The rate at which nodes are place into power save mode by \fBSuspendProgram\fR. +The value is number of nodes per minute and it can be used to prevent +a large drop in power power consumption (e.g. after a large job completes). +A value of zero results in no limits being imposed. +The default value is 60 nodes per minute. +Related configuration options include \fBResumeProgram\fR, \fBResumeRate\fR, +\fBSuspendProgram\fR, \fBSuspendTime\fR, \fBSuspendExcNodes\fR, and +\fBSuspendExcParts\fR. + +.TP +\fBSuspendTime\fR +Nodes which remain idle for this number of seconds will be placed into +power save mode by \fBSuspendProgram\fR, +A value of \-1 disables power save mode and is the default. +Related configuration options include \fBResumeProgram\fR, \fBResumeRate\fR, +\fBSuspendProgram\fR, \fBSuspendRate\fR, \fBSuspendExcNodes\fR, and +\fBSuspendExcParts\fR. + .TP \fBSwitchType\fR Identifies the type of switch or interconnect used for application @@ -658,11 +1034,13 @@ All SLURM daemons, commands and running jobs must be restarted for a change in \fBSwitchType\fR to take effect. If running jobs exist at the time \fBslurmctld\fR is restarted with a new value of \fBSwitchType\fR, records of all jobs in any state may be lost. + .TP \fBTaskEpilog\fR Fully qualified pathname of a program to be execute as the slurm job's owner after termination of each task. See \fBTaskPlugin\fR for execution order details. + .TP \fBTaskPlugin\fR Identifies the type of task launch plugin, typically used to provide @@ -725,6 +1103,7 @@ Standard output from this program of the form "export NAME=value" will be used to set environment variables for the task being spawned. See \fBTaskPlugin\fR for execution order details. + .TP \fBTmpFS\fR Fully qualified pathname of the file system available to user jobs for @@ -740,7 +1119,7 @@ The default value is 50, meaning each slurmd daemon can communicate with up to 50 other slurmd daemons and over 2500 nodes can be contacted with two message hops. The default value will work well for most clusters. -Optimaly system performance can typically be achieved if \fBTreeWidth\fR +Optimal system performance can typically be achieved if \fBTreeWidth\fR is set to the square root of the number of nodes in the cluster for systems having no more than 2500 nodes or the cube root for larger systems. @@ -752,16 +1131,13 @@ of time specified by the UnkillableStepTimeout variable, the program specified by the UnkillableStepProgram string will be executed. This program can be used to take special actions to clean up the unkillable processes. The program will be run as the same user as the slurmd (usually -"root"). NOTE: This variable does not appear in the output of the command -"scontrol show config" in versions of SLURM less than version 1.3. +"root"). .TP \fBUnkillableStepTimeout\fR The length of time, in seconds, that SLURM will wait before deciding that -processes in a job step are unkillable (after they have been signalled with -SIGKILL). The default timeout value is 60 seconds. NOTE: This variable does -not appear in the output of the command "scontrol show config" in versions -of SLURM less than version 1.3. +processes in a job step are unkillable (after they have been signaled with +SIGKILL). The default timeout value is 60 seconds. .TP \fBUsePAM\fR @@ -789,6 +1165,7 @@ session required pam_unix.so .br For sites configuring PAM with a general configuration file, the appropriate lines (see above), where \fBslurm\fR is the service\-name, should be added. + .TP \fBWaitTime\fR Specifies how many seconds the srun command should by default wait after @@ -796,9 +1173,12 @@ the first task terminates before terminating all remaining tasks. The "\-\-wait" option on the srun command line overrides this value. If set to 0, this feature is disabled. May not exceed 65533. + .LP The configuration of nodes (or machines) to be managed by Slurm is also specified in \fB/etc/slurm.conf\fR. +Changes in node configuration (e.g. adding nodes, changing their +processor count, etc.) require restarting the slurmctld daemon. Only the NodeName must be supplied in the configuration file. All other node configuration information is optional. It is advisable to establish baseline node configurations, @@ -836,11 +1216,11 @@ node specifications should be place in this file in consecutive order. No single node name may be listed more than once in the configuration file. Use "DownNodes=" to record the state of nodes which are temporarily -in a DOWN or DRAIN state without altering permanent configuration -information. +in a DOWN, DRAIN or FAILING state without altering permanent +configuration information. A job step's tasks are allocated to nodes in order the nodes appear in the configuration file. There is presently no capability within -SLURM to arbitarily order a job step's tasks. +SLURM to arbitrarily order a job step's tasks. .LP Multiple node names may be comma separated (e.g. "alpha,beta,gamma") and/or a simple node range expression may optionally be used to @@ -861,12 +1241,13 @@ See BlueGene documentation for more details. Presently the numeric range must be the last characters in the node name (e.g. "unit[0\-31]rack1" is invalid). The node configuration specified the following information: + .TP \fBNodeName\fR Name that SLURM uses to refer to a node (or base partition for BlueGene systems). Typically this would be the string that "/bin/hostname \-s" -returns, however it may be an arbitary string if +returns, however it may be an arbitrary string if \fBNodeHostname\fR is specified. If the \fBNodeName\fR is "DEFAULT", the values specified with that record will apply to subsequent node specifications @@ -877,6 +1258,7 @@ nodes will be considered consecutive in the order defined. For example, if the configuration for "NodeName=charlie" immediately follows the configuration for "NodeName=baker" they will be considered adjacent in the computer. + .TP \fBNodeHostname\fR The string that "/bin/hostname \-s" returns. @@ -886,6 +1268,7 @@ If an expression is used, the number of nodes identified by be identical to the number of nodes identified by \fBNodeName\fR. By default, the \fBNodeHostname\fR will be identical in value to \fBNodeName\fR. + .TP \fBNodeAddr\fR Name that a node should be referred to in establishing @@ -898,6 +1281,16 @@ they must exactly match the entries in the \fBNodeName\fR \fBNodeAddr\fR may also contain IP addresses. By default, the \fBNodeAddr\fR will be identical in value to \fBNodeName\fR. + +.TP +\fBCoresPerSocket\fR +Number of cores in a single physical processor socket (e.g. "2"). +The CoresPerSocket value describes physical cores, not the +logical number of processors per socket. +\fBNOTE\fR: If you have multi\-core processors, you will likely +need to specify this parameter in order to optimize scheduling. +The default value is 1. + .TP \fBFeature\fR A comma delimited list of arbitrary strings indicative of some @@ -907,16 +1300,25 @@ either has a feature or it does not. If desired a feature may contain a numeric component indicating, for example, processor speed. By default a node has no features. -.TP -\fBRealMemory\fR -Size of real memory on the node in MegaBytes (e.g. "2048"). -The default value is 1. + .TP \fBProcs\fR Number of logical processors on the node (e.g. "2"). If Procs is omitted, it will be inferred from \fBSockets\fR, \fBCoresPerSocket\fR, and \fBThreadsPerCore\fR. The default value is 1. + +.TP +\fBRealMemory\fR +Size of real memory on the node in MegaBytes (e.g. "2048"). +The default value is 1. + +.TP +\fBReason\fR +Identifies the reason for a node being in state "DOWN", "DRAINED" +"DRAINING", "FAIL" or "FAILING". +Use quotes to enclose a reason having more than one word. + .TP \fBSockets\fR Number of physical processor sockets/chips on the node (e.g. "2"). @@ -925,33 +1327,30 @@ If Sockets is omitted, it will be inferred from \fBNOTE\fR: If you have multi\-core processors, you will likely need to specify these parameters. The default value is 1. -.TP -\fBCoresPerSocket\fR -Number of cores in a single physical processor socket (e.g. "2"). -The CoresPerSocket value describes physical cores, not the -logical number of processors per socket. -\fBNOTE\fR: If you have multi\-core processors, you will likely -need to specify this parameter. -The default value is 1. -.TP -\fBThreadsPerCore\fR -Number of logical threads in a single physical core (e.g. "2"). -The default value is 1. -.TP -\fBReason\fR -Identifies the reason for a node being in state "DOWN" or "DRAIN". -Use quotes to enclose a reason having more than one word. + .TP \fBState\fR State of the node with respect to the initiation of user jobs. -Acceptable values are "DOWN", "DRAIN" and "UNKNOWN". +Acceptable values are "DOWN", "DRAIN", "FAIL", "FAILING" and "UNKNOWN". "DOWN" indicates the node failed and is unavailable to be allocated work. "DRAIN" indicates the node is unavailable to be allocated work. +"FAIL" indicates the node is expected to fail soon, has +no jobs allocated to it, and will not be allocated +to any new jobs. +"FAILING" indicates the node is expected to fail soon, has +one or more jobs allocated to it, but will not be allocated +to any new jobs. "UNKNOWN" indicates the node's state is undefined (BUSY or IDLE), but will be established when the \fBslurmd\fR daemon on that node registers. The default value is "UNKNOWN". -Also see the \fBDownNodes\fR paramter below. +Also see the \fBDownNodes\fR parameter below. + +.TP +\fBThreadsPerCore\fR +Number of logical threads in a single physical core (e.g. "2"). +The default value is 1. + .TP \fBTmpDisk\fR Total size of temporary disk storage in \fBTmpFS\fR in MegaBytes @@ -965,6 +1364,7 @@ most of this space. The Prolog and/or Epilog programs (specified in the configuration file) might be used to insure the file system is kept clean. The default value is 0. + .TP \fBWeight\fR The priority of the node for scheduling purposes. @@ -981,21 +1381,32 @@ disk space, higher processor speed, etc. Weight is an integer value with a default value of 1. .LP The "DownNodes=" configuration permits you to mark certain nodes as in a -DOWN or DRAIN state without altering the permanent configuration -information listed under a "NodeName=" specification. +DOWN, DRAIN, FAIL, or FAILING state without altering the permanent +configuration information listed under a "NodeName=" specification. + .TP \fBDownNodes\fR Any node name, or list of node names, from the "NodeName=" specifications. + .TP \fBReason\fR -Identifies the reason for a node being in state "DOWN" or "DRAIN". -Use quotes to enclose a reason having more than one word. +Identifies the reason for a node being in state "DOWN", "DRAIN", +"FAIL" or "FAILING. +\Use quotes to enclose a reason having more than one word. + .TP \fBState\fR State of the node with respect to the initiation of user jobs. -Acceptable values are "DOWN", "DRAIN" and "UNKNOWN". +Acceptable values are "BUSY", "DOWN", "DRAIN", "FAIL", +"FAILING, "IDLE", and "UNKNOWN". "DOWN" indicates the node failed and is unavailable to be allocated work. "DRAIN" indicates the node is unavailable to be allocated work. +"FAIL" indicates the node is expected to fail soon, has +no jobs allocated to it, and will not be allocated +to any new jobs. +"FAILING" indicates the node is expected to fail soon, has +one or more jobs allocated to it, but will not be allocated +to any new jobs. "UNKNOWN" indicates the node's state is undefined (BUSY or IDLE), but will be established when the \fBslurmd\fR daemon on that node registers. @@ -1020,6 +1431,7 @@ describing the configuration of partitions. Each line of partition configuration information should represent a different partition. The partition configuration file contains the following information: + .TP \fBAllowGroups\fR Comma separated list of group IDs which may execute jobs in the partition. @@ -1029,14 +1441,24 @@ Jobs executed as user root can use any partition without regard to the value of AllowGroups. If user root attempts to execute a job as another user (e.g. using srun's \-\-uid option), this other user must be in one of groups -identified by AllowGroups for the job to succesfully execute. +identified by AllowGroups for the job to successfully execute. The default value is "ALL". + .TP \fBDefault\fR If this keyword is set, jobs submitted without a partition specification will utilize this partition. Possible values are "YES" and "NO". The default value is "NO". + +.TP +\fBDisableRootJobs\fR +If set to "YES" then user root will be prevented from running any jobs +on this partition. +The default value will be the value of \fBDisableRootJobs\fR set +outside of a partition specification (which is "NO", allowing user +root to execute jobs). + .TP \fBHidden\fR Specifies if the partition and its jobs are to be hidden by default. @@ -1044,33 +1466,31 @@ Hidden partitions will by default not be reported by the SLURM APIs or commands. Possible values are "YES" and "NO". The default value is "NO". -.TP -\fBRootOnly\fR -Specifies if only user ID zero (i.e. user \fIroot\fR) may allocate resources -in this partition. User root may allocate resources for any other user, -but the request must be initiated by user root. -This option can be useful for a partition to be managed by some -external entity (e.g. a higher\-level job manager) and prevents -users from directly using those resources. -Possible values are "YES" and "NO". -The default value is "NO". + .TP \fBMaxNodes\fR Maximum count of nodes (or base partitions for BlueGene systems) which may be allocated to any single job. The default value is "UNLIMITED", which is represented internally as \-1. This limit does not apply to jobs executed by SlurmUser or user root. + .TP \fBMaxTime\fR -Maximum wall\-time limit for any job in minutes. The default -value is "UNLIMITED", which is represented internally as \-1. +Maximum run time limit for jobs. +Format is minutes, minutes:seconds, hours:minutes:seconds, +days\-hours, days\-hours:minutes, days\-hours:minutes:seconds or +"UNLIMITED". +Time resolution is one minute and second values are rounded up to +the next minute. This limit does not apply to jobs executed by SlurmUser or user root. + .TP \fBMinNodes\fR Minimum count of nodes (or base partitions for BlueGene systems) which may be allocated to any single job. The default value is 1. This limit does not apply to jobs executed by SlurmUser or user root. + .TP \fBNodes\fR Comma separated list of nodes (or base partitions for BlueGene systems) @@ -1079,6 +1499,7 @@ Node names may be specified using the node range expression syntax described above. A blank list of nodes (i.e. "Nodes= ") can be used if one wants a partition to exist, but have no resources (possibly on a temporary basis). + .TP \fBPartitionName\fR Name by which the partition may be referenced (e.g. "Interactive"). @@ -1087,24 +1508,76 @@ If the \fBPartitionName\fR is "DEFAULT", the values specified with that record will apply to subsequent partition specifications unless explicitly set to other values in that partition record or replaced with a different set of default values. + +.TP +\fBPriority\fR +Jobs submitted to a higher priority partition will be dispatched +before pending jobs in lower priority partitions and if possible +they will preempt running jobs from lower priority partitions. +Note that a partition's priority takes precedence over a job's +priority. +The value may not exceed 65533. + + +.TP +\fBRootOnly\fR +Specifies if only user ID zero (i.e. user \fIroot\fR) may allocate resources +in this partition. User root may allocate resources for any other user, +but the request must be initiated by user root. +This option can be useful for a partition to be managed by some +external entity (e.g. a higher\-level job manager) and prevents +users from directly using those resources. +Possible values are "YES" and "NO". +The default value is "NO". + .TP \fBShared\fR -Ability of the partition to execute more than one job at a -time on each node. Shared nodes will offer unpredictable performance -for application programs, but can provide higher system utilization -and responsiveness than otherwise possible. -Possible values are "EXCLUSIVE", "FORCE", "YES", and "NO". -"EXCLUSIVE" allocates entire nodes to jobs even with -select/cons_res configured. +Controls the ability of the partition to execute more than one job at a +time on each resource (node, socket or core depending upon the value +of \fBSelectTypeParameters\fR). +If resources are to be shared, avoiding memory over\-subscription +is very important. +\fBSelectTypeParameters\fR should be configured to treat +memory as a consumable resource and the \fB\-\-mem\fR option +should be used for job allocations. +For more information see the following web page: +\fIhttps://computing.llnl.gov/linux/slurm/cons_res_share.html\fR. +Possible values for \fBShared\fR are "EXCLUSIVE", "FORCE", "YES", and "NO". +.RS +.TP 12 +\fBEXCLUSIVE\fR +Aallocates entire nodes to jobs even with select/cons_res configured. This can be used to allocate whole nodes in some partitions -and individual processors in other partitions. -"FORCE" makes all nodes in the partition available for sharing -without user means of disabling it. -"YES" makes nodes in the partition available for sharing if and -only if the individual jobs permit sharing (see the srun -"\-\-share" option). -"NO" makes nodes unavailable for sharing under all circumstances. -The default value is "NO". +and individual processors in other partitions. +.TP +\fBFORCE\fR +Make all resources in the partition available for sharing +without any means for users to disable it. +May be followed with a colon and maximum number of jobs in +running or suspended state. +For example "Shared=FORCE:4" enables each node, socket or +core to execute up to four jobs at once. +Recommended only for BlueGene systems configured with +small blocks or for systems running +with gang scheduling (\fBSchedulerType=sched/gang\fR). +.TP +\fBYES\fR +Make nodes in the partition available for sharing, but provides +the user with a means of getting dediated resources. +If \fBSelectType=select/cons_res\fR, then resources will be +over\-subscribed unless explicitly disabled in the job submit +request using the "\-\-exclusive" option. +With \fBSelectType=select/bluegene\fR or \fBSelectType=select/linear\fR, +resources will only be over\-subscribed when explicitly requested +by the user using the "\-\-share" option on job submission. +May be followed with a colon and maximum number of jobs in +running or suspended state. +For example "Shared=YES:4" enables each node, socket or +core to execute up to four jobs at once. +Recommended only for systems running with gang scheduling +(\fBSchedulerType=sched/gang\fR). +.RE + .TP \fBState\fR State of partition or availability for use. Possible values @@ -1158,7 +1631,7 @@ BackupAddr=edev1 .br # .br -AuthType=auth/authd +AuthType=auth/munge .br Epilog=/usr/local/slurm/epilog .br @@ -1168,13 +1641,11 @@ FastSchedule=1 .br FirstJobId=65536 .br -HeartbeatInterval=60 -.br InactiveLimit=120 .br JobCompType=jobcomp/filetxt .br -JobCompLoc=/var/log/slurm.job.log +JobCompLoc=/var/log/slurm/jobcomp .br KillWait=30 .br @@ -1186,13 +1657,11 @@ PluginDir=/usr/local/lib:/usr/local/slurm/lib .br ReturnToService=0 .br -SchedulerType=sched/wiki -.br -SchedulerPort=7004 +SchedulerType=sched/backfill .br -SlurmctldLogFile=/var/log/slurmctld.log +SlurmctldLogFile=/var/log/slurm/slurmctld.log .br -SlurmdLogFile=/var/log/slurmd.log +SlurmdLogFile=/var/log/slurm/slurmd.log .br SlurmctldPort=7002 .br @@ -1212,12 +1681,6 @@ JobCredentialPrivateKey=/usr/local/slurm/private.key .br JobCredentialPublicCertificate=/usr/local/slurm/public.cert .br -JobAcctType=jobacct/linux -.br -JobAcctLogFile=/var/log/slurm_accounting.log -.br -JobAcctParameters="Frequency=30,MaxSendRetries=5" -.br # .br # Node Configurations @@ -1250,8 +1713,9 @@ PartitionName=long Nodes=dev[9\-17] MaxTime=120 AllowGroups=admin .SH "COPYING" Copyright (C) 2002\-2007 The Regents of the University of California. +Copyright (C) 2008 Lawrence Livermore National Security. Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -UCRL\-CODE\-226842. +LLNL\-CODE\-402394. .LP This file is part of SLURM, a resource management program. For details, see <https://computing.llnl.gov/linux/slurm/>. @@ -1269,8 +1733,8 @@ details. /etc/slurm.conf .SH "SEE ALSO" .LP -\fBbluegene.conf\fR(5), -\fBgetrlimit\fR(2), -\fBgethostbyname\fR(3), \fBgroup\fR(5), \fBhostname\fR(1), -\fBscontrol\fR(1), \fBslurmctld\fR(8), \fBslurmd\fR(8), \fBspank(8)\fR, +\fBbluegene.conf\fR(5), \fBgethostbyname\fR(3), +\fBgetrlimit\fR(2), \fBgroup\fR(5), \fBhostname\fR(1), +\fBscontrol\fR(1), \fBslurmctld\fR(8), \fBslurmd\fR(8), +\fBslurmdbd\fR(8), \fBslurmdbd.conf\fR(5), \fBspank(8)\fR, \fBsyslog\fR(2), \fBwiki.conf\fR(5) diff --git a/doc/man/man5/slurmdbd.conf.5 b/doc/man/man5/slurmdbd.conf.5 new file mode 100644 index 000000000..0b55090de --- /dev/null +++ b/doc/man/man5/slurmdbd.conf.5 @@ -0,0 +1,216 @@ +.TH "slurmdbd.conf" "5" "February 2008" "slurmdbd.conf 1.3" "Slurm configuration file" +.SH "NAME" +slurmdbd.conf \- Slurm Database Daemon (SlurmDBD) configuration file + +.SH "DESCRIPTION" +\fB/etc/slurmdb.conf\fP is an ASCII file which describes Slurm Database +Daemon (SlurmDBD) configuration information. +.LP +The file location can be modified at system build time using the +DEFAULT_SLURM_CONF parameter. +The contents of the file are case insensitive except for the names of nodes +and files. Any text following a "#" in the configuration file is treated +as a comment through the end of that line. +The size of each line in the file is limited to 1024 characters. +Changes to the configuration file take effect upon restart of +SlurmDbd or daemon receipt of the SIGHUP signal unless otherwise noted. +.LP +This file should be only on the computer where SlurmDBD executes and +should only be readable by the user which executes SlurmDBD (e.g. "slurm"). +This file should be protected from unauthorized access since it +contains a database password. +The overall configuration parameters available include: + +.TP +\fBAuthInfo\fR +Additional information to be used for authentication of communications +with the Slurm control daemon (slurmctld) on each cluster. +The interpretation of this option is specific to the configured \fBAuthType\fR. +In the case of \fIauth/munge\fR, this can be configured to use a Munge daemon +specifically configured to provide authentication between clusters while the +default Munge daemon provides authentication within a cluster. +In that case, this will specify the pathname of the socket to use. +The default value is NULL, which results in the default authentication +mechanism being used. + +.TP +\fBAuthType\fR +Define the authentication method for communications between SLURM +components. +Acceptable values at present include "auth/none", "auth/authd", +and "auth/munge". +The default value is "auth/none", which means the UID included in +communication messages is not verified. +This may be fine for testing purposes, but +\fBdo not use "auth/none" if you desire any security\fR. +"auth/authd" indicates that Brett Chun's authd is to be used (see +"http://www.theether.org/authd/" for more information). +"auth/munge" indicates that LLNL's Munge system is to be used +(this is the best supported authentication mechanism for SLURM, +see "http://home.gna.org/munge/" for more information). +SlurmDbd must be terminated prior to changing the value of \fBAuthType\fR +and later restarted. + +.TP +\fBDbdAddr\fR +Name that \fBDbdHost\fR should be referred to in +establishing a communications path to the Slurm Database Daemon. +This name will be used as an argument to the gethostbyname() +function for identification. For example, "elx0000" might be used +to designate the ethernet address for node "lx0000". +By default the \fBDbdAddr\fR will be identical in value to +\fBDbdHost\fR. +This value must be equal to the \fBSlurmDbdAddr\fR parameter in +the slurm.conf file. + +.TP +\fBDbdHost\fR +The name of the machine where the Slurm Database Daemon is executed. +This should be a node name without the full domain name (e.g. "lx0001"). +This value must be specified. + +.TP +\fBDbdPort\fR +The port number that the Slurm Database Daemon (slurmdbd) listens +to for work. The default value is SLURMDBD_PORT as established at system +build time. If none is explicitly specified, it will be set to 6819. +This value must be equal to the \fBSlurmDbdPort\fR parameter in the +slurm.conf file. + +.TP +\fBDebugLevel\fR +The level of detail to provide the Slurm Database Daemon's logs. +Values from 0 to 7 are legal, with `0' being "quiet" operation and +`7' being insanely verbose. +The default value is 3. + +.TP +\fBLogFile\fR +Fully qualified pathname of a file into which the Slurm Database Daemon's +logs are written. +The default value is none (performs logging via syslog). + +.TP +\fBMessageTimeout\fR +Time permitted for a round\-trip communication to complete +in seconds. Default value is 10 seconds. + +.TP +\fBPidFile\fR +Fully qualified pathname of a file into which the Slurm Database Daemon +may write its process ID. This may be used for automated signal processing. +The default value is "/var/run/slurmdbd.pid". + +.TP +\fBPluginDir\fR +Identifies the places in which to look for SLURM plugins. +This is a colon\-separated list of directories, like the PATH +environment variable. +The default value is "/usr/local/lib/slurm". + +.TP +\fBSlurmUser\fR +The name of the user that the \fBslurmctld\fR daemon executes as. +This user must exist on the machine executing the Slurm Database Daemon +and have the same user ID as the hosts on which \fBslurmctld\fR execute. +For security purposes, a user other than "root" is recommended. +The default value is "root". + +.TP +\fBStorageHost\fR +Define the name of the host the database is running where we are going +to store the data. +Ideally this should be the host on which slurmdbd executes. + +.TP +\fBStorageLoc\fR +Specify the name of the database as the location where accounting +records are written. + +.TP +\fBStoragePass\fR +Define the password used to gain access to the database to store +the job accounting data. + +.TP +\fBStoragePort\fR +The port number that the Slurm Database Daemon (slurmdbd) communicates +with the database. + +.TP +\fBStorageType\fR +Define the accounting storage mechanism type. +Acceptable values at present include +"accounting_storage/gold", "accounting_storage/mysql", and +"accounting_storage/pgsql". +The value "accounting_storage/gold" indicates that account records +will be written to Gold +(http://www.clusterresources.com/pages/products/gold-allocation-manager.php), +which maintains its own database. +The value "accounting_storage/mysql" indicates that accounting records +should be written to a MySQL database specified by the +\fStorageLoc\fR parameter. +The value "accounting_storage/pgsql" indicates that accounting records +should be written to a PostgreSQL database specified by the +\fBStorageLoc\fR parameter. +This value must be specified. + +.TP +\fBStorageUser\fR +Define the name of the user we are going to connect to the database +with to store the job accounting data. + +.SH "EXAMPLE" +.LP +# +.br +# Sample /etc/slurmdbd.conf +.br +# +.br +AuthInfo=/var/run/munge/munge.socket.2 +.br +AuthType=auth/munge +.br +DbdHost=db_host +.br +DebugLevel=4 +.br +LogFile=/var/log/slurmdbd.log +.br +PidFile=/var/tmp/jette/slurmdbd.pid +.br +SlurmUser=slurm_mgr +.br +StoragePass=shazaam +.br +StorageType=accounting_storage/mysql +.br +StorageUser=database_mgr + +.SH "COPYING" +Copyright (C) 2008 Lawrence Livermore National Security. +Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). +LLNL\-CODE\-402394. +.LP +This file is part of SLURM, a resource management program. +For details, see <https://computing.llnl.gov/linux/slurm/>. +.LP +SLURM is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 2 of the License, or (at your option) +any later version. +.LP +SLURM is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +details. + +.SH "FILES" +/etc/slurmdbd.conf + +.SH "SEE ALSO" +.LP +\fBslurm.conf\fR(5), +\fBslurmctld\fR(8), \fBslurmdbd\fR(8) +\fBsyslog\fR(2) diff --git a/doc/man/man5/wiki.conf.5 b/doc/man/man5/wiki.conf.5 index c84957fdf..5edce905f 100644 --- a/doc/man/man5/wiki.conf.5 +++ b/doc/man/man5/wiki.conf.5 @@ -1,4 +1,4 @@ -.TH "wiki.conf" "5" "August 2007" "wiki.conf 1.2" "Slurm configuration file" +.TH "wiki.conf" "5" "December 2007" "wiki.conf 1.2" "Slurm configuration file" .SH "NAME" wiki.conf \- Slurm configuration file for wiki and wiki2 scheduler plugins .SH "DESCRIPTION" @@ -91,8 +91,11 @@ No data compression. Each host name is listed individually. .TP \fB1\fR SLURM hostlist expressions are exchanged with task counts -(e.g. "tux[0\-16]*2"). -This is currently experimental. +(e.g. "tux[0\-16]*2") in job state information and job +initiation requests. +.TP +\fB2\fR +SLURM hostlist expressions are used to report node state information. .RE .TP @@ -167,7 +170,7 @@ JobAggregationTime=15 .SH "COPYING" Copyright (C) 2006-2007 The Regents of the University of California. Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -UCRL\-CODE\-226842. +LLNL\-CODE\-402394. .LP This file is part of SLURM, a resource management program. For details, see <https://computing.llnl.gov/linux/slurm/>. diff --git a/doc/man/man8/slurmctld.8 b/doc/man/man8/slurmctld.8 index 0e37e751f..67f64b5b3 100644 --- a/doc/man/man8/slurmctld.8 +++ b/doc/man/man8/slurmctld.8 @@ -57,7 +57,7 @@ configuration file, \fBslurm.conf\fR. .SH "COPYING" Copyright (C) 2002\-2006 The Regents of the University of California. Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -UCRL\-CODE\-226842. +LLNL\-CODE\-402394. .LP This file is part of SLURM, a resource management program. For details, see <https://computing.llnl.gov/linux/slurm/>. diff --git a/doc/man/man8/slurmd.8 b/doc/man/man8/slurmd.8 index bca7fdc87..8cb552111 100644 --- a/doc/man/man8/slurmd.8 +++ b/doc/man/man8/slurmd.8 @@ -60,7 +60,7 @@ configuration file, \fBslurm.conf\fR. .SH "COPYING" Copyright (C) 2002\-2006 The Regents of the University of California. Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -UCRL\-CODE\-226842. +LLNL\-CODE\-402394. .LP This file is part of SLURM, a resource management program. For details, see <https://computing.llnl.gov/linux/slurm/>. diff --git a/doc/man/man8/slurmdbd.8 b/doc/man/man8/slurmdbd.8 new file mode 100644 index 000000000..0b3b656c6 --- /dev/null +++ b/doc/man/man8/slurmdbd.8 @@ -0,0 +1,55 @@ +.TH slurmdbd "8" "February 2008" "slurmdbd 1.3" "Slurm components" +.SH "NAME" +slurmdbd \- Slurm Database Daemon. + +.SH "SYNOPSIS" +\fBslurmdbd\fR [\fIOPTIONS\fR...] + +.SH "DESCRIPTION" +\fBslurmdbd\fR provides a secure enterprise\-wide interface to a database +for Slurm. This is particularly useful for archiving accounting records. +.TP +OPTIONS +.TP +\fB\-D\fR +Debug mode. Execute \fBslurmdbd\fR in the foreground with logging to stdout. +.TP +\fB\-h\fR +Help; print a brief summary of command options. +.TP +\fB\-v\fR +Verbose operation. Multiple \fB\-v\fR's increase verbosity. +.TP +\fB\-V\fR +Print version information and exit. + +.SH "NOTES" +It may be useful to experiment with different \fBslurmctld\fR specific +configuration parameters using a distinct configuration file +(e.g. timeouts). However, this special configuration file will not be +used by the \fBslurmd\fR daemon or the Slurm programs, unless you +specifically tell each of them to use it. If you desire changing +communication ports, the location of the temporary file system, or +other parameters used by other Slurm components, change the common +configuration file, \fBslurm.conf\fR. + +.SH "COPYING" +Copyright (C) 2008 Lawrence Livermore National Security. +Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). +LLNL\-CODE\-402394. +.LP +This file is part of SLURM, a resource management program. +For details, see <https://computing.llnl.gov/linux/slurm/>. +.LP +SLURM is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 2 of the License, or (at your option) +any later version. +.LP +SLURM is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +details. + +.SH "SEE ALSO" +\fBslurm.conf\fR(5), \fBslurmdbd.conf\fR(5), \fBslurmctld\fR(8) diff --git a/doc/man/man8/slurmstepd.8 b/doc/man/man8/slurmstepd.8 index fe27e4a23..24748ec2b 100644 --- a/doc/man/man8/slurmstepd.8 +++ b/doc/man/man8/slurmstepd.8 @@ -13,7 +13,7 @@ for the job step along with its accounting and signal processing. .SH "COPYING" Copyright (C) 2006 The Regents of the University of California. Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -UCRL\-CODE\-226842. +LLNL\-CODE\-402394. .LP This file is part of SLURM, a resource management program. For details, see <https://computing.llnl.gov/linux/slurm/>. diff --git a/doc/man/man8/spank.8 b/doc/man/man8/spank.8 index 7f0465788..7813a6d64 100644 --- a/doc/man/man8/spank.8 +++ b/doc/man/man8/spank.8 @@ -1,6 +1,7 @@ .TH "SPANK" "8" "May 2006" "SPANK" "SLURM plug\-in architecture for Node and job (K)control" .SH "NAME" \fBSPANK\fR \- SLURM Plug\-in Architecture for Node and job (K)control + .SH "DESCRIPTION" This manual briefly describes the capabilities of the SLURM Plug\-in architecture for Node and job Kontrol (\fBSPANK\fR) as well as the \fBSPANK\fR @@ -16,10 +17,12 @@ the \fBSPANK\fR infrastructure provides administrators and other developers a low cost, low effort ability to dynamically modify the runtime behavior of SLURM job launch. .LP + .SH "SPANK PLUGINS" \fBSPANK\fR plugins are loaded in two separate contexts during a -\fBSLURM\fR job. In "local" context, the plugin is loaded by \fBsrun\fR -or other \fBSLURM\fR user interface. In local context, options provided by +\fBSLURM\fR job. In "local" context, the plugin is loaded by \fBsrun\fR, +\fBsbatch\fR or other \fBSLURM\fR user interface. +In local context, options provided by plugins are read by \fBSPANK\fR, and these options are presented to the user. In "remote" context, the plugin is loaded on a compute node of the job, in other words, the plugin is loaded by \fBslurmd\fR. In local context, only @@ -36,7 +39,12 @@ just after job step is initialized. For local context, this is before user options are processed. .TP \fBslurm_spank_local_user_init\fR -Called in local (srun) context only after all options have been processed. +Called in local (\fBsrun\fR or \fBsbatch\fR) context only after all +options have been processed. +This is called after the job ID and step IDs are available. +This happens in \fBsrun\fR after the allocation is made, but before +tasks are launched. +This happens in \fBsbatch\fR after the job is submitted. .TP \fBslurm_spank_user_init\fR Called after privileges are temporarily dropped. (remote context only) @@ -56,7 +64,7 @@ Called for each task as its exit status is collected by SLURM. .TP \fBslurm_spank_exit\fR Called once just before \fBslurmstepd\fR exits in remote context. -In local context, called before \fBsrun\fR exits. +In local context, called before \fBsrun\fR or \fBsbatch\fR exits. .LP All of these functions have the same prototype, for example: .nf @@ -104,6 +112,8 @@ User id for running job. (uid_t *) is third arg of \fBspank_get_item\fR .TP \fBS_JOB_STEPID\fR Job step id for running job. (uint32_t *) is third arg of \fBspank_get_item\fR +For batch jobs (initiated by \fBsbatch\fR), the step id will be +\fBSLURM_BATCH_SCRIPT\fR as defined in the \fBslurm.h\fR file. .TP \fBS_TASK_EXIT_STATUS\fR Exit status for exited task. Only valid from \fBslurm_spank_task_exit\fR. @@ -140,11 +150,13 @@ and \fBunsetenv\fR(3) may be used in local context. .LP See \fBspank.h\fR for more information, and \fBEXAMPLES\fR below for an example for \fBspank_getenv\fR usage. + .SH "SPANK OPTIONS" .LP SPANK plugins also have an interface through which they may define and implement extra job options. These options are made available to -the user through SLURM commands such as \fBsrun\fR(1), and if the +the user through SLURM commands such as \fBsrun\fR(1) or +\fBsbatch\fR(1), and if the option is specified, its value is forwarded and registered with the plugin on the remote side. In this way, \fBSPANK\fR plugins may dynamically provide new options and functionality to SLURM. @@ -198,8 +210,8 @@ registered with SLURM. \fBspank_opt_cb_f\fR is typedef'd in .fi Where \fIval\fR is the value of the \fIval\fR field in the \fBspank_option\fR struct, \fIoptarg\fR is the supplied argument if applicable, and \fIremote\fR -is 0 if the function is being called from the "local" host (e.g. srun) or -1 from the "remote" host (slurmd). +is 0 if the function is being called from the "local" host +(e.g. \fBsrun\fR or \fBsbatch\fR) or 1 from the "remote" host (\fBslurmd\fR). .LP The last element of the array must filled with zeros. A \fBSPANK_OPTIONS_TABLE_END\fR macro is defined in \fB<slurm/spank.h>\fR @@ -260,6 +272,7 @@ The \fBSPANK\fR config file is re\-read on each job launch, so editing the config file will not affect running jobs. However care should be taken so that a partially edited config file is not read by a launching job. + .SH "EXAMPLES" .LP Simple \fBSPANK\fR config file: @@ -276,8 +289,8 @@ required /usr/lib/slurm/test.so .fi .LP The following is a simple \fBSPANK\fR plugin to modify the nice value -of job tasks. This plugin adds a \-\-renice=[prio] option to srun which -users can use to set the priority of all remote tasks. Priority may +of job tasks. This plugin adds a \-\-renice=[prio] option to \fBsrun\fR +which users can use to set the priority of all remote tasks. Priority may also be specified via a SLURM_RENICE environment variable. A minimum priority may be established via a "min_prio" parameter in \fBplugstack.conf\fR (See above for example). @@ -321,7 +334,7 @@ static int _str2prio (const char *str, int *p2int); */ struct spank_option spank_options[] = { - { "renice", "[prio]", "Re\-nice job tasks to priority [prio].", 1, 0, + { "renice", "[prio]", "Re\-nice job tasks to priority [prio].", 2, 0, (spank_opt_cb_f) _renice_opt_process }, SPANK_OPTIONS_TABLE_END @@ -430,7 +443,7 @@ static int _renice_opt_process (int val, const char *optarg, int remote) .SH "COPYING" Copyright (C) 2006 The Regents of the University of California. Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -UCRL\-CODE\-226842. +LLNL\-CODE\-402394. .LP This file is part of SLURM, a resource management program. For details, see <https://computing.llnl.gov/linux/slurm/>. @@ -452,4 +465,4 @@ details. \fB/usr/include/slurm/spank.h\fR \- SPANK header file. .SH "SEE ALSO" .LP -\fBslurm.conf\fR(5) +\fBsbatch\fR(1), \fBsrun\fR(1), \fBslurm.conf\fR(5) diff --git a/etc/bluegene.conf.example b/etc/bluegene.conf.example index a1dfa9fa1..ef9fb3859 100644 --- a/etc/bluegene.conf.example +++ b/etc/bluegene.conf.example @@ -67,7 +67,8 @@ AltRamDiskImage=* Groups=da,adamb LayoutMode=STATIC BasePartitionNodeCnt=512 NodeCardNodeCnt=32 -Numpsets=8 +#Numpsets=8 #used for IO poor systems (Can't create 32 cnode blocks) +Numpsets=64 #used for IO rich systems BridgeAPILogFile=/var/log/slurm/bridgeapi.log BridgeAPIVerbose=0 diff --git a/etc/init.d.slurm b/etc/init.d.slurm index 89aeb6ac8..72bd3ec50 100644 --- a/etc/init.d.slurm +++ b/etc/init.d.slurm @@ -70,9 +70,11 @@ fi export LD_LIBRARY_PATH="$LIBDIR:$LD_LIBRARY_PATH" start() { - echo -n "starting $1: " + prog=$1 + shift + echo -n "starting $prog: " unset HOME MAIL USER USERNAME - $STARTPROC $SBINDIR/$1 $2 + $STARTPROC $SBINDIR/$prog $* rc_status -v echo touch /var/lock/subsys/slurm diff --git a/etc/init.d.slurmdbd b/etc/init.d.slurmdbd new file mode 100755 index 000000000..55473e438 --- /dev/null +++ b/etc/init.d.slurmdbd @@ -0,0 +1,146 @@ +#!/bin/bash +# +# chkconfig: 345 90 10 +# description: SLURMDBD is a database server interface for \ +# SLURM (Simple Linux Utility for Resource Management). +# +# processname: /usr/sbin/slurmdbd +# pidfile: /var/run/slurmdbd.pid +# +# config: /etc/sysconfig/slurm +# +### BEGIN INIT INFO +# Provides: slurmbd +# Required-Start: $local_fs $syslog $network $named munge +# Required-Stop: $local_fs $syslog $network $named munge +# Default-Start: 3 5 +# Default-Stop: 0 1 2 6 +# Short-Description: SLURM database daemon +# Description: Start slurm to provide database server for SLURM +### END INIT INFO + +CONFDIR=/etc/slurm +SBINDIR=/usr/sbin + +# Source function library. +if [ -f /etc/rc.status ]; then + . /etc/rc.status + SUSE=1 + STARTPROC=startproc + + rc_reset +else + [ -f /etc/rc.d/init.d/functions ] || exit 0 + . /etc/rc.d/init.d/functions + SUSE=0 + STARTPROC=daemon + + function rc_status() { + RETVAL=$? + } + function rc_exit () { + exit $RETVAL + } + RETVAL=0 +fi + +# Source slurm specific configuration +if [ -f /etc/sysconfig/slurm ] ; then + . /etc/sysconfig/slurm +else + SLURMDBD_OPTIONS="" +fi + +[ -f $CONFDIR/slurmdbd.conf ] || exit 1 + +start() { + echo -n "starting slurmdbd: " + unset HOME MAIL USER USERNAME + $STARTPROC $SBINDIR/slurmdbd $SLURMDBD_OPTIONS + rc_status -v + echo + touch /var/lock/subsys/slurmdbd +} + +stop() { + echo -n "stopping slurmdbd: " + killproc slurmdbd -TERM + rc_status -v + echo + rm -f /var/lock/subsys/slurmdbd +} + +slurmstatus() { + local base=${1##*/} + local pid + local rpid + local pidfile + + pidfile=`grep -i PidFile $CONFDIR/slurmdbd.conf | grep -v '^ *#'` + if [ $? = 0 ]; then + pidfile=${pidfile##*=} + pidfile=${pidfile%#*} + else + pidfile=/var/run/slurmdbd.pid + fi + + pid=`pidof -o $$ -o $$PPID -o %PPID -x slurmdbd` + + if [ -f $pidfile ]; then + read rpid < $pidfile + if [ "$rpid" != "" -a "$pid" != "" ]; then + for i in $pid ; do + if [ "$i" = "$rpid" ]; then + echo $"slurmdbd (pid $pid) is running..." + return 0 + fi + done + elif [ "$rpid" != "" -a "$pid" = "" ]; then + echo $"slurmdbd is stopped" + return 1 + fi + + fi + + echo $"slurmdbd is stopped" + + return 3 +} + +# +# The pathname substitution in daemon command assumes prefix and +# exec_prefix are same. This is the default, unless the user requests +# otherwise. +# +# Any node can be a slurm controller and/or server. +# +case "$1" in + start) + start slurmdbd + ;; + stop) + stop slurmdbd + ;; + status) + slurmstatus slurmdbd + ;; + restart) + stop slurmdbd + start slurmdbd + ;; + condrestart) + if [ -f /var/lock/subsys/slurm ]; then + stop slurmdbd + start slurmdbd + fi + ;; + reconfig) + killproc slurmdbd -HUP + ;; + *) + echo "Usage: $0 {start|stop|status|restart|condrestart|reconfig}" + exit 1 + ;; +esac + +rc_exit diff --git a/etc/slurm.conf.example b/etc/slurm.conf.example index 9d48acbc5..000432c86 100644 --- a/etc/slurm.conf.example +++ b/etc/slurm.conf.example @@ -69,8 +69,13 @@ SlurmdDebug=3 JobCompType=jobcomp/none #JobCompLoc= JobAcctType=jobacct/none -#JobAcctLogfile= +#JobAcctLoc= #JobAcctFrequency= +DatabaseType=database/flatfile +#DatabaseHost=localhost +#DatabasePort=1234 +#DatabaseUser=mysql +#DatabasePass=mysql # # COMPUTE NODES NodeName=linux[1-32] Procs=1 State=UNKNOWN diff --git a/slurm.spec b/slurm.spec index bc9eaf08e..9448a9663 100644 --- a/slurm.spec +++ b/slurm.spec @@ -1,4 +1,4 @@ -# $Id: slurm.spec 13299 2008-02-19 19:46:58Z da $ +# $Id: slurm.spec 14109 2008-05-22 16:26:23Z jette $ # # Note that this package is not relocatable @@ -8,10 +8,11 @@ # --with aix %_with_aix 1 build aix-federation RPM # --with authd %_with_authd 1 build auth-authd RPM # --with auth_none %_with_auth_none 1 build auth-none RPM -# --with elan %_with_elan 1 build switch_elan RPM -# --without munge %_without_munge 1 don't build auth-munge RPM # --with bluegene %_with_bluegene 1 build bluegene RPM # --with debug %_with_debug 1 enable extra debugging within SLURM +# --with elan %_with_elan 1 build switch-elan RPM +# --without munge %_without_munge 1 don't build auth-munge RPM +# --without openssl %_without_openssl 1 don't require openssl RPM to be installed # --without pam %_without_pam 1 don't require pam-devel RPM to be installed # --without readline %_without_readline 1 don't require readline-devel RPM to be installed # --with sgijob %_with_sgijob 1 build proctrack-sgi-job RPM @@ -35,9 +36,12 @@ %slurm_without_opt auth_none %slurm_without_opt debug -# Build with munge by default on all platforms (disable with --without munge) +# Build with munge by default on all platforms (disable using --without munge) %slurm_with_opt munge +# Build with OpenSSL by default on all platforms (disable using --without openssl) +%slurm_with_opt openssl + # Use readline by default on all systems %slurm_with_opt readline @@ -60,29 +64,24 @@ %endif Name: slurm -Version: 1.2.27 +Version: 1.3.3 Release: 1 Summary: Simple Linux Utility for Resource Management License: GPL Group: System Environment/Base -Source: slurm-1.2.27.tar.bz2 +Source: slurm-1.3.3.tar.bz2 BuildRoot: %{_tmppath}/%{name}-%{version}-%{release} URL: https://computing.llnl.gov/linux/slurm/ -BuildRequires: openssl-devel >= 0.9.6 openssl >= 0.9.6 -%description -SLURM is an open source, fault-tolerant, and highly -scalable cluster management and job scheduling system for Linux clusters -containing up to thousands of nodes. Components include machine status, -partition management, job management, and scheduling modules. +Requires: slurm-plugins %ifnos aix BuildRequires: ncurses-devel %endif %ifos linux -BuildRequires: python +BuildRequires: python %endif %if %{slurm_with pam} BuildRequires: pam-devel @@ -90,6 +89,15 @@ BuildRequires: pam-devel %if %{slurm_with readline} BuildRequires: readline-devel %endif +%if %{slurm_with openssl} +BuildRequires: openssl-devel >= 0.9.6 openssl >= 0.9.6 +%endif + +%description +SLURM is an open source, fault-tolerant, and highly +scalable cluster management and job scheduling system for Linux clusters +containing up to 65,536 nodes. Components include machine status, +partition management, job management, scheduling and accounting modules. # Allow override of sysconfdir via _slurm_sysconfdir. # Note 'global' instead of 'define' needed here to work around apparent @@ -117,6 +125,7 @@ BuildRequires: readline-devel %define _perlarch %(perl -e 'use Config; $T=$Config{installsitearch}; $P=$Config{installprefix}; $P1="$P/local"; $T =~ s/$P1//; $T =~ s/$P//; print $T;') %define _perldir %{_prefix}%{_perlarch} +%define _php_extdir %(php-config --extension-dir 2>/dev/null || echo %{_libdir}/php5) %package perlapi Summary: Perl API to SLURM. @@ -153,12 +162,12 @@ SLURM authentication module for Brent Chun's authd %endif %if %{slurm_with munge} -%package auth-munge -Summary: SLURM auth implementation using Chris Dunlap's Munge +%package munge +Summary: SLURM authentication and crypto implementation using Munge Group: System Environment/Base Requires: slurm munge BuildRequires: munge-devel munge-libs -%description auth-munge +%description munge SLURM authentication module for Chris Dunlap's Munge %endif @@ -181,6 +190,19 @@ BuildRequires: qsnetlibs SLURM switch plugin for Quadrics Elan3 or Elan4. %endif +%package slurmdbd +Summary: SLURM database daemon +Group: System Environment/Base +Requires: slurm-plugins +%description slurmdbd +SLURM database daemon + +%package plugins +Summary: SLURM plugins (loadable shared objects) +Group: System Environment/Base +%description plugins +SLURM plugins (loadable shared objects) + %package torque Summary: Torque/PBS wrappers for transitition from Torque/PBS to SLURM. Group: Development/System @@ -212,7 +234,7 @@ SLURM process tracking plugin for SGI job containers. ############################################################################# %prep -%setup -n slurm-1.2.27 +%setup -n slurm-1.3.3 %build %configure --program-prefix=%{?_program_prefix:%{_program_prefix}} \ @@ -235,11 +257,11 @@ DESTDIR="$RPM_BUILD_ROOT" make install-contrib %ifos aix5.3 mv ${RPM_BUILD_ROOT}%{_bindir}/srun ${RPM_BUILD_ROOT}%{_sbindir} -mv ${RPM_BUILD_ROOT}%{_bindir}/slaunch ${RPM_BUILD_ROOT}%{_sbindir} %endif if [ -d /etc/init.d ]; then - install -D -m755 etc/init.d.slurm $RPM_BUILD_ROOT/etc/init.d/slurm + install -D -m755 etc/init.d.slurm $RPM_BUILD_ROOT/etc/init.d/slurm + install -D -m755 etc/init.d.slurmdbd $RPM_BUILD_ROOT/etc/init.d/slurmdbd fi install -D -m644 etc/slurm.conf.example ${RPM_BUILD_ROOT}%{_sysconfdir}/slurm.conf.example install -D -m755 etc/slurm.epilog.clean ${RPM_BUILD_ROOT}%{_sysconfdir}/slurm.epilog.clean @@ -251,18 +273,15 @@ rm -f $RPM_BUILD_ROOT/%{_libdir}/slurm/*.{a,la} LIST=./slurm.files touch $LIST if [ -d /etc/init.d ]; then - echo "/etc/init.d/slurm" >> $LIST + echo "/etc/init.d/slurm" >> $LIST fi -test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/task_affinity.so && - echo %{_libdir}/slurm/task_affinity.so >> $LIST -# Build file lists for optional plugin packages -for plugin in auth_munge auth_authd; do - LIST=./${plugin}.files - touch $LIST - test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/${plugin}.so && - echo %{_libdir}/slurm/${plugin}.so > $LIST -done +LIST=./munge.files +touch $LIST +test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/auth_munge.so && + echo %{_libdir}/slurm/auth_munge.so >> $LIST +test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/crypto_munge.so && + echo %{_libdir}/slurm/crypto_munge.so >> $LIST LIST=./switch_elan.files touch $LIST @@ -290,11 +309,40 @@ test -f $RPM_BUILD_ROOT/%{_perldir}/Slurm.pm && echo "%{_perldir}/Slurm.pm" >> $LIST test -f $RPM_BUILD_ROOT/%{_perldir}/auto/Slurm/Slurm.so && echo "%{_perldir}/auto/Slurm/Slurm.so" >> $LIST +test -f $RPM_BUILD_ROOT/%{_mandir}/man3/Slurm.3 && +echo "%{_mandir}/man3/Slurm.3" >> $LIST test -f $RPM_BUILD_ROOT/%{_perldir}/auto/Slurm/Slurm.bs && echo "%{_perldir}/auto/Slurm/Slurm.bs" >> $LIST test -f $RPM_BUILD_ROOT/%{_perldir}/auto/Slurm/autosplit.ix && echo "%{_perldir}/auto/Slurm/autosplit.ix" >> $LIST +LIST=./slurmdbd.files +touch $LIST +if [ -d /etc/init.d ]; then + echo "/etc/init.d/slurmdbd" >> $LIST +fi + +LIST=./plugins.files +test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/task_affinity.so && + echo %{_libdir}/slurm/task_affinity.so >> $LIST +test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/crypto_openssl.so && + echo %{_libdir}/slurm/crypto_openssl.so >> $LIST +test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/accounting_storage_gold.so + echo %{_libdir}/slurm/accounting_storage_gold.so >> $LIST + +# Build file lists for optional plugin packages +for plugin in auth_authd; do + LIST=./${plugin}.files + touch $LIST + test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/${plugin}.so && + echo %{_libdir}/slurm/${plugin}.so > $LIST +done + + + + + + LIST=./torque.files touch $LIST echo "%{_bindir}/pbsnodes" >> $LIST @@ -342,7 +390,6 @@ rm -rf $RPM_BUILD_ROOT %doc RELEASE_NOTES %doc DISCLAIMER %doc COPYING -%doc etc/slurm.conf.example %doc doc/html %{_bindir}/s* %{_sbindir}/slurmctld @@ -350,43 +397,17 @@ rm -rf $RPM_BUILD_ROOT %{_sbindir}/slurmstepd %ifos aix5.3 %{_sbindir}/srun -%{_sbindir}/slaunch %endif %{_libdir}/*.so* %{_libdir}/slurm/src/* %{_mandir}/man1/* %{_mandir}/man5/slurm.* %{_mandir}/man5/wiki.* -%{_mandir}/man8/* +%{_mandir}/man8/slurmctld.* +%{_mandir}/man8/slurmd.* +%{_mandir}/man8/slurmstepd* +%{_mandir}/man8/spank* %dir %{_sysconfdir} -%dir %{_libdir}/slurm -%{_libdir}/slurm/checkpoint_none.so -%{_libdir}/slurm/checkpoint_ompi.so -%{_libdir}/slurm/jobacct_gold.so -%{_libdir}/slurm/jobacct_linux.so -%{_libdir}/slurm/jobacct_none.so -%{_libdir}/slurm/jobcomp_none.so -%{_libdir}/slurm/jobcomp_filetxt.so -%{_libdir}/slurm/jobcomp_script.so -%{_libdir}/slurm/proctrack_pgid.so -%{_libdir}/slurm/proctrack_linuxproc.so -%{_libdir}/slurm/sched_backfill.so -%{_libdir}/slurm/sched_builtin.so -%{_libdir}/slurm/sched_hold.so -%{_libdir}/slurm/sched_gang.so -%{_libdir}/slurm/sched_wiki.so -%{_libdir}/slurm/sched_wiki2.so -%{_libdir}/slurm/select_cons_res.so -%{_libdir}/slurm/select_linear.so -%{_libdir}/slurm/switch_none.so -%{_libdir}/slurm/mpi_none.so -%{_libdir}/slurm/mpi_mpich1_p4.so -%{_libdir}/slurm/mpi_mpich1_shmem.so -%{_libdir}/slurm/mpi_mpichgm.so -%{_libdir}/slurm/mpi_mpichmx.so -%{_libdir}/slurm/mpi_mvapich.so -%{_libdir}/slurm/mpi_lam.so -%{_libdir}/slurm/task_none.so %dir %{_libdir}/slurm/src %config %{_sysconfdir}/slurm.conf.example %config %{_sysconfdir}/slurm.epilog.clean @@ -411,7 +432,7 @@ rm -rf $RPM_BUILD_ROOT ############################################################################# %if %{slurm_with munge} -%files -f auth_munge.files auth-munge +%files -f munge.files munge %defattr(-,root,root) %endif ############################################################################# @@ -438,6 +459,55 @@ rm -rf $RPM_BUILD_ROOT %endif ############################################################################# +%files -f slurmdbd.files slurmdbd +%defattr(-,root,root) +%{_sbindir}/slurmdbd +%{_mandir}/man5/slurmdbd.* +%{_mandir}/man8/slurmdbd.* +############################################################################# + +%files -f plugins.files plugins +%defattr(-,root,root) +%dir %{_libdir}/slurm +%{_libdir}/slurm/accounting_storage_filetxt.so +%{_libdir}/slurm/accounting_storage_mysql.so +%{_libdir}/slurm/accounting_storage_none.so +%{_libdir}/slurm/accounting_storage_pgsql.so +%{_libdir}/slurm/accounting_storage_slurmdbd.so +%{_libdir}/slurm/checkpoint_none.so +%{_libdir}/slurm/checkpoint_ompi.so +%{_libdir}/slurm/checkpoint_xlch.so +%{_libdir}/slurm/jobacct_gather_aix.so +%{_libdir}/slurm/jobacct_gather_linux.so +%{_libdir}/slurm/jobacct_gather_none.so +%{_libdir}/slurm/jobcomp_none.so +%{_libdir}/slurm/jobcomp_filetxt.so +%{_libdir}/slurm/jobcomp_mysql.so +%{_libdir}/slurm/jobcomp_pgsql.so +%{_libdir}/slurm/jobcomp_script.so +%{_libdir}/slurm/jobcomp_slurmdbd.so +%{_libdir}/slurm/proctrack_pgid.so +%{_libdir}/slurm/proctrack_linuxproc.so +%{_libdir}/slurm/sched_backfill.so +%{_libdir}/slurm/sched_builtin.so +%{_libdir}/slurm/sched_hold.so +%{_libdir}/slurm/sched_gang.so +%{_libdir}/slurm/sched_wiki.so +%{_libdir}/slurm/sched_wiki2.so +%{_libdir}/slurm/select_cons_res.so +%{_libdir}/slurm/select_linear.so +%{_libdir}/slurm/switch_none.so +%{_libdir}/slurm/mpi_lam.so +%{_libdir}/slurm/mpi_mpich1_p4.so +%{_libdir}/slurm/mpi_mpich1_shmem.so +%{_libdir}/slurm/mpi_mpichgm.so +%{_libdir}/slurm/mpi_mpichmx.so +%{_libdir}/slurm/mpi_mvapich.so +%{_libdir}/slurm/mpi_none.so +%{_libdir}/slurm/mpi_openmpi.so +%{_libdir}/slurm/task_none.so +############################################################################# + %files -f torque.files torque %defattr(-,root,root) ############################################################################# @@ -460,6 +530,11 @@ rm -rf $RPM_BUILD_ROOT # /etc/init.d/slurm stop # fi #fi +#if [ -x /etc/init.d/slurmdbd ]; then +# if /etc/init.d/slurmdbd status | grep -q running; then +# /etc/init.d/slurmdbd stop +# fi +#fi %post if [ -x /sbin/ldconfig ]; then @@ -484,6 +559,12 @@ if [ "$1" = 0 ]; then /etc/init.d/slurm stop fi fi + if [ -x /etc/init.d/slurmdbd ]; then + [ -x /sbin/chkconfig ] && /sbin/chkconfig --del slurmdbd + if /etc/init.d/slurmdbd status | grep -q running; then + /etc/init.d/slurmdbd stop + fi + fi fi %postun diff --git a/slurm/slurm.h.in b/slurm/slurm.h.in index 53a6d9079..04fc802e2 100644 --- a/slurm/slurm.h.in +++ b/slurm/slurm.h.in @@ -2,10 +2,11 @@ * slurm.h - Definitions for all of the SLURM RPCs ***************************************************************************** * Copyright (C) 2002-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov>, * Joey Ekstrom <ekstrom1@llnl.gov> et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -83,9 +84,11 @@ BEGIN_C_DECLS #if HAVE_INTTYPES_H # include <inttypes.h> /* for uint16_t, uint32_t definitions */ #endif +#include <stdbool.h> #include <stdio.h> /* for FILE definitions */ +#include <sys/types.h> /* for uid_t definition */ #include <time.h> /* for time_t definitions */ -#include <stdbool.h> +#include <unistd.h> #ifdef CRAPPY_COMPILER /* @@ -142,6 +145,14 @@ BEGIN_C_DECLS typedef struct jobacctinfo *jobacctinfo_t; /* opaque data type */ #endif +/* Define allocation_msg_thread_t below to avoid including extraneous + slurm headers */ +#ifndef __allocation_msg_thread_t_defined +# define __allocation_msg_thread_t_defined + typedef struct allocation_msg_thread *allocation_msg_thread_t; +#endif + + /*****************************************************************************\ * DEFINITIONS FOR VERSION MANAGEMENT \*****************************************************************************/ @@ -209,6 +220,8 @@ enum job_state_reason { WAIT_PART_STATE, /* requested partition is down */ WAIT_HELD, /* job is held, priority==0 */ WAIT_TIME, /* job waiting for specific begin time */ + WAIT_LICENSES, /* job is waiting for licenses */ + WAIT_ASSOC_LIMIT, /* user/bank job limit reached */ WAIT_TBD1, WAIT_TBD2, FAIL_DOWN_PARTITION, /* partition for job is DOWN */ @@ -218,7 +231,8 @@ enum job_state_reason { FAIL_LAUNCH, /* unable to launch job */ FAIL_EXIT_CODE, /* exit code was non-zero */ FAIL_TIMEOUT, /* reached end of time limit */ - FAIL_INACTIVE_LIMIT /* reached slurm InactiveLimit */ + FAIL_INACTIVE_LIMIT, /* reached slurm InactiveLimit */ + FAIL_BANK_ACCOUNT /* invalid bank account */ }; enum job_acct_types { @@ -253,6 +267,7 @@ enum select_data_type { SELECT_DATA_ROTATE, /* data-> uint16_t rotate */ SELECT_DATA_CONN_TYPE, /* data-> uint16_t connection_type */ SELECT_DATA_BLOCK_ID, /* data-> char *bg_block_id */ + SELECT_DATA_NODES, /* data-> char *nodes */ SELECT_DATA_IONODES, /* data-> char *ionodes */ SELECT_DATA_NODE_CNT, /* data-> uint32_t node_cnt */ SELECT_DATA_ALTERED, /* data-> uint16_t altered */ @@ -261,7 +276,7 @@ enum select_data_type { SELECT_DATA_LINUX_IMAGE,/* data-> char *linuximage */ SELECT_DATA_MLOADER_IMAGE,/* data-> char *mloaderimage */ SELECT_DATA_RAMDISK_IMAGE,/* data-> char *ramdiskimage */ - SELECT_DATA_REBOOT /* data-> uint16_t reboot */ + SELECT_DATA_REBOOT, /* data-> uint16_t reboot */ }; enum select_print_mode { @@ -269,6 +284,7 @@ enum select_print_mode { SELECT_PRINT_DATA, /* Print just the data */ SELECT_PRINT_MIXED, /* Print "field=value" */ SELECT_PRINT_BG_ID, /* Print just the BG_ID */ + SELECT_PRINT_NODES, /* Print the nodelist */ SELECT_PRINT_CONNECTION,/* Print just the CONNECTION type */ SELECT_PRINT_ROTATE, /* Print just the ROTATE */ SELECT_PRINT_GEOMETRY, /* Print just the GEO */ @@ -278,21 +294,24 @@ enum select_print_mode { SELECT_PRINT_LINUX_IMAGE,/* Print just the LINUX IMAGE */ SELECT_PRINT_MLOADER_IMAGE,/* Print just the MLOADER IMAGE */ SELECT_PRINT_RAMDISK_IMAGE,/* Print just the RAMDISK IMAGE */ - SELECT_PRINT_REBOOT /* Print just the REBOOT */ + SELECT_PRINT_REBOOT, /* Print just the REBOOT */ }; enum select_node_cnt { SELECT_GET_NODE_SCALING, /* Give scaling factor for node count */ SELECT_APPLY_NODE_MIN_OFFSET, /* Apply min offset to variable */ SELECT_APPLY_NODE_MAX_OFFSET, /* Apply max offset to variable */ - SELECT_SET_NODE_CNT /* Set altered node cnt */ + SELECT_SET_NODE_CNT, /* Set altered node cnt */ + SELECT_SET_BP_CNT /* Given a node cnt return the + * base partition count */ }; /* jobacct data types */ enum jobacct_data_type { JOBACCT_DATA_TOTAL, /* data-> jobacctinfo_t */ JOBACCT_DATA_PIPE, /* data-> file descriptor */ - JOBACCT_DATA_RUSAGE, /* data-> struct rusage */ + JOBACCT_DATA_RUSAGE, /* data-> rusage set user_cpu_sec, + * user_cpu_usec, sys_cpu_sec, sys_cpu_usec */ JOBACCT_DATA_MAX_VSIZE, /* data-> uint32_t vsize */ JOBACCT_DATA_MAX_VSIZE_ID, /* data-> jobacct_id_t vsize */ JOBACCT_DATA_TOT_VSIZE, /* data-> uint32_t vsize */ @@ -328,6 +347,10 @@ typedef enum task_dist_states { SLURM_DIST_UNKNOWN /* unknown dist */ } task_dist_states_t; +/* Open stdout/err file mode, 0 for system default (JobFileAppend) */ +#define OPEN_MODE_APPEND 1 +#define OPEN_MODE_TRUNCATE 2 + typedef enum cpu_bind_type { /* cpu binding type from --cpu_bind=... */ /* the following auto-binding flags are mutually exclusive */ CPU_BIND_TO_THREADS= 0x01, /* =threads */ @@ -369,14 +392,16 @@ enum node_states { }; #define NODE_STATE_BASE 0x00ff #define NODE_STATE_FLAGS 0xff00 -#define NODE_RESUME 0x0100 /* Restore a DRAINED, DRAINING, or - * DOWN node to service (e.g. IDLE or - * ALLOCATED). Used in +#define NODE_RESUME 0x0100 /* Restore a DRAINED, DRAINING, DOWN + * or FAILING node to service (e.g. + * IDLE or ALLOCATED). Used in * slurm_update_node() request */ -#define NODE_STATE_DRAIN 0x0200 /* node not be be allocated work */ +#define NODE_STATE_DRAIN 0x0200 /* node do not new allocated work */ #define NODE_STATE_COMPLETING 0x0400 /* node is completing allocated job */ #define NODE_STATE_NO_RESPOND 0x0800 /* node is not responding */ #define NODE_STATE_POWER_SAVE 0x1000 /* node in power save mode */ +#define NODE_STATE_FAIL 0x2000 /* node is failing, do not allocate + * new work */ /* used to define the size of the credential.signature size * used to define the key size of the io_stream_header_t @@ -423,53 +448,88 @@ typedef enum select_type_plugin_info { #define TASK_PARAM_CPUSETS 0x0001 #define TASK_PARAM_SCHED 0x0002 -enum part_shared { - SHARED_NO = 0, /* Nodes never shared in partition */ - SHARED_YES, /* Nodes possible to share in partition */ - SHARED_FORCE, /* Nodes always shares in partition */ - SHARED_EXCLUSIVE /* Nodes never shared even with cons_res */ -}; +#define SHARED_FORCE 0x8000 /*****************************************************************************\ * PROTOCOL DATA STRUCTURE DEFINITIONS \*****************************************************************************/ typedef struct job_descriptor { /* For submit, allocate, and update requests */ + char *account; /* charge to specified account */ + uint16_t acctg_freq; /* accounting polling interval (seconds) */ + char *alloc_node; /* node making resource allocation request + * NOTE: Normally set by slurm_submit* or + * slurm_allocate* function */ + uint16_t alloc_resp_port; /* port to send allocation confirmation to */ + uint32_t alloc_sid; /* local sid making resource allocation request + * NOTE: Normally set by slurm_submit* or + * slurm_allocate* function */ + uint32_t argc; /* number of arguments to the script */ + char **argv; /* arguments to the script */ + time_t begin_time; /* delay initiation until this time */ + char *comment; /* arbitrary comment (used by Moab scheduler) */ uint16_t contiguous; /* 1 if job requires contiguous nodes, * 0 otherwise,default=0 */ - uint16_t kill_on_node_fail; /* 1 if node failure to kill job, - * 0 otherwise,default=1 */ + char *dependency; /* syncrhonize job execution with other jobs */ char **environment; /* environment variables to set for job, * name=value pairs, one per line */ - uint16_t env_size; /* element count in environment */ + uint32_t env_size; /* element count in environment */ + char *err; /* pathname of stderr */ + char *exc_nodes; /* comma separated list of nodes excluded + * from job's allocation, default NONE */ char *features; /* comma separated list of required features, * default NONE */ + uint32_t group_id; /* group to assume, if run as root. */ uint16_t immediate; /* 1 if allocate to run or fail immediately, * 0 if to be queued awaiting resources */ + char *in; /* pathname of stdin */ uint32_t job_id; /* job ID, default set by SLURM */ + uint16_t kill_on_node_fail; /* 1 if node failure to kill job, + * 0 otherwise,default=1 */ + char *licenses; /* licenses required by the job */ + uint16_t mail_type; /* see MAIL_JOB_ definitions above */ + char *mail_user; /* user to receive notification */ char *name; /* name of the job, default "" */ - /* job constraints: */ - uint16_t job_min_procs; /* minimum processors per node, default=0 */ - uint16_t job_min_sockets; /* minimum sockets per node, default=0 */ - uint16_t job_min_cores; /* minimum cores per processor, default=0 */ - uint16_t job_min_threads; /* minimum threads per core, default=0 */ - uint32_t job_min_memory; /* minimum real memory per node, default=0 */ - uint32_t job_max_memory; /* maximum real memory per node, default=0 */ - uint32_t job_min_tmp_disk; /* minimum tmp disk per node, default=0 */ + char *network; /* network use spec */ + uint16_t nice; /* requested priority change, + * NICE_OFFSET == no change */ + uint32_t num_tasks; /* number of tasks to be started, for batch only */ + uint8_t open_mode; /* out/err open mode truncate or append, + * see OPEN_MODE_* */ + uint16_t other_port; /* port to send various notification msg to */ + char *out; /* pathname of stdout */ + uint8_t overcommit; /* over subscribe resources, for batch only */ char *partition; /* name of requested partition, * default in SLURM config */ + uint16_t plane_size; /* plane size when task_dist = + SLURM_DIST_PLANE */ uint32_t priority; /* relative priority of the job, * explicitly set only for user root, * 0 == held (don't initiate) */ + char *resp_host; /* NOTE: Set by slurmctld */ char *req_nodes; /* comma separated list of required nodes * default NONE */ - char *exc_nodes; /* comma separated list of nodes excluded - * from job's allocation, default NONE */ + uint16_t requeue; /* enable or disable job requeue option */ + + char *script; /* the actual job script, default NONE */ uint16_t shared; /* 1 if job can share nodes with other jobs, * 0 if job needs exclusive access to the node, - * or NO_VAL to accept the system default. */ + * or NO_VAL to accept the system default. + * SHARED_FORCE to eliminate user control. */ + uint16_t task_dist; /* see enum task_dist_state */ uint32_t time_limit; /* maximum run time in minutes, default is * partition limit */ + uint32_t user_id; /* set only if different from current UID, + * can only be explicitly set by user root */ + char *work_dir; /* pathname of working directory */ + + /* job constraints: */ + uint16_t job_min_procs; /* minimum processors per node, default=0 */ + uint16_t job_min_sockets; /* minimum sockets per node, default=0 */ + uint16_t job_min_cores; /* minimum cores per processor, default=0 */ + uint16_t job_min_threads; /* minimum threads per core, default=0 */ + uint32_t job_min_memory; /* minimum real memory per node, default=0 */ + uint32_t job_min_tmp_disk; /* minimum tmp disk per node, default=0 */ uint32_t num_procs; /* total count of processors required, * default=0 */ uint32_t min_nodes; /* minimum number of nodes required by job, @@ -492,49 +552,7 @@ typedef struct job_descriptor { /* For submit, allocate, and update requests */ uint16_t ntasks_per_node;/* number of tasks to invoke on each node */ uint16_t ntasks_per_socket;/* number of tasks to invoke on each socket */ uint16_t ntasks_per_core;/* number of tasks to invoke on each core */ - char *script; /* the actual job script, default NONE */ - char **argv; /* arguments to the script */ - uint16_t argc; /* number of arguments to the script */ - char *err; /* pathname of stderr */ - char *in; /* pathname of stdin */ - char *out; /* pathname of stdout */ - uint32_t user_id; /* set only if different from current UID, - * can only be explicitly set by user root */ - uint32_t group_id; /* group to assume, if run as root. */ - char *work_dir; /* pathname of working directory */ - char *alloc_node; /* node making resource allocation request - * NOTE: Normally set by slurm_submit* or - * slurm_allocate* function */ - uint32_t alloc_sid; /* local sid making resource allocation request - * NOTE: Normally set by slurm_submit* or - * slurm_allocate* function */ - /* If the requested allocation is not immediately available, - * The controller sends the RESPONSE_RESOURCE_ALLOCATION message to - * the address designated by the alloc_hostname and alloc_port. - * All other messages (SRUN_PING, SRUN_TIMEOUT, etc.) are sent to - * the address designated by other_hostname/other_port. - */ - char *alloc_resp_hostname; - uint16_t alloc_resp_port; - char *other_hostname; - uint16_t other_port; - - uint32_t dependency; /* defer until specified job completes */ - uint16_t overcommit; /* over subscribe resources, for batch only */ - uint32_t num_tasks; /* number of tasks to be started, for batch only */ - uint16_t nice; /* requested priority change, - * NICE_OFFSET == no change */ - char *account; /* charge to specified account */ - char *network; /* network use spec */ - char *comment; /* arbitrary comment (used by Moab scheduler) */ - uint16_t task_dist; /* see enum task_dist_state */ - uint16_t plane_size; /* plane size when task_dist = - SLURM_DIST_PLANE */ - time_t begin_time; /* delay initiation until this time */ - uint16_t mail_type; /* see MAIL_JOB_ definitions above */ - char *mail_user; /* user to receive notification */ - uint16_t no_requeue; /* disable job requeue option */ /* * The following parameters are only meaningful on a Blue Gene * system at present. Some will be of value on other system. Don't remove these @@ -560,27 +578,62 @@ typedef struct job_descriptor { /* For submit, allocate, and update requests */ } job_desc_msg_t; typedef struct job_info { - uint32_t job_id; /* job ID */ - char *name; /* name of the job */ - uint16_t batch_flag; /* 1 if batch: queued job with script */ - uint32_t alloc_sid; /* local sid making resource alloc */ + char *account; /* charge to specified account */ char *alloc_node; /* local node making resource alloc */ - uint32_t user_id; /* user the job runs as */ + uint32_t alloc_sid; /* local sid making resource alloc */ + uint16_t batch_flag; /* 1 if batch: queued job with script */ + char *command; /* command to be executed */ + char *comment; /* arbitrary comment (used by Moab scheduler) */ + uint16_t contiguous; /* 1 if job requires contiguous nodes */ + uint16_t cpus_per_task; /* number of processors required for each task */ + char *dependency; /* syncrhonize job execution with other jobs */ + time_t end_time; /* time of termination, actual or expected */ + char *exc_nodes; /* comma separated list of excluded nodes */ + int *exc_node_inx; /* excluded list index pairs into node_table: + * start_range_1, end_range_1, + * start_range_2, .., -1 */ + uint32_t exit_code; /* exit code for job (status from wait call) */ + char *features; /* comma separated list of required features */ uint32_t group_id; /* group job sumitted as */ + uint32_t job_id; /* job ID */ + uint16_t job_min_cores; /* minimum cores per processor, default=0 */ + uint32_t job_min_memory; /* minimum real memory per node, default=0 */ + uint16_t job_min_procs; /* minimum processors per node, default=0 */ + uint16_t job_min_sockets; /* minimum sockets per node, default=0 */ + uint16_t job_min_threads; /* minimum threads per core, default=0 */ + uint32_t job_min_tmp_disk; /* minimum tmp disk per node, default=0 */ uint16_t job_state; /* state of the job, see enum job_states */ - uint32_t time_limit; /* maximum run time in minutes or INFINITE */ - time_t submit_time; /* time of job submission */ - time_t start_time; /* time execution begins, actual or expected */ - time_t end_time; /* time of termination, actual or expected */ - time_t suspend_time; /* time job last suspended or resumed */ - time_t pre_sus_time; /* time job ran prior to last suspend */ - uint32_t priority; /* relative priority of the job, - * 0=held, 1=required nodes DOWN/DRAINED */ + char *licenses; /* licenses required by the job */ + char *name; /* name of the job */ + char *network; /* network specification */ char *nodes; /* list of nodes allocated to job */ int *node_inx; /* list index pairs into node_table for *nodes: * start_range_1, end_range_1, * start_range_2, .., -1 */ + uint16_t ntasks_per_core;/* number of tasks to invoke on each core */ + uint16_t ntasks_per_node;/* number of tasks to invoke on each node */ + uint16_t ntasks_per_socket;/* number of tasks to invoke on each socket */ char *partition; /* name of assigned partition */ + time_t pre_sus_time; /* time job ran prior to last suspend */ + uint32_t priority; /* relative priority of the job, + * 0=held, 1=required nodes DOWN/DRAINED */ + char *req_nodes; /* comma separated list of required nodes */ + int *req_node_inx; /* required list index pairs into node_table: + * start_range_1, end_range_1, + * start_range_2, .., -1 */ + uint16_t requeue; /* enable or disable job requeue option */ + select_jobinfo_t select_jobinfo; /* opaque data type, + * process using select_g_get_jobinfo() */ + uint16_t shared; /* 1 if job can share nodes with other jobs */ + time_t start_time; /* time execution begins, actual or expected */ + uint16_t state_reason; /* reason job still pending or failed, see + * slurm.h:enum job_state_reason */ + time_t submit_time; /* time of job submission */ + time_t suspend_time; /* time job last suspended or resumed */ + uint32_t time_limit; /* maximum run time in minutes or INFINITE */ + uint32_t user_id; /* user the job runs as */ + char *work_dir; /* pathname of working directory */ + uint16_t num_cpu_groups;/* elements in below cpu arrays */ uint32_t *cpus_per_node;/* cpus per node */ uint32_t *cpu_count_reps;/* how many nodes have same cpu count */ @@ -593,38 +646,6 @@ typedef struct job_info { uint16_t max_cores; /* maximum number of cores per cpu */ uint16_t min_threads; /* minimum number of threads per core */ uint16_t max_threads; /* maximum number of threads per core */ - uint16_t shared; /* 1 if job can share nodes with other jobs */ - uint16_t contiguous; /* 1 if job requires contiguous nodes */ - uint16_t cpus_per_task; /* number of processors required for each task */ - uint16_t ntasks_per_node;/* number of tasks to invoke on each node */ - uint16_t ntasks_per_socket;/* number of tasks to invoke on each socket */ - uint16_t ntasks_per_core;/* number of tasks to invoke on each core */ - /* job constraints: */ - uint16_t job_min_procs; /* minimum processors per node, default=0 */ - uint16_t job_min_sockets; /* minimum sockets per node, default=0 */ - uint16_t job_min_cores; /* minimum cores per processor, default=0 */ - uint16_t job_min_threads; /* minimum threads per core, default=0 */ - uint32_t job_min_memory; /* minimum real memory per node, default=0 */ - uint32_t job_max_memory; /* maximum real memory per node, default=0 */ - uint32_t job_min_tmp_disk; /* minimum tmp disk per node, default=0 */ - char *req_nodes; /* comma separated list of required nodes */ - int *req_node_inx; /* required list index pairs into node_table: - * start_range_1, end_range_1, - * start_range_2, .., -1 */ - char *exc_nodes; /* comma separated list of excluded nodes */ - int *exc_node_inx; /* excluded list index pairs into node_table: - * start_range_1, end_range_1, - * start_range_2, .., -1 */ - char *features; /* comma separated list of required features */ - uint32_t dependency; /* defer until specified job completes */ - uint32_t exit_code; /* exit code for job (status from wait call) */ - char *account; /* charge to specified account */ - uint16_t state_reason; /* reason job still pending or failed, see - * slurm.h:enum job_state_reason */ - char *network; /* network specification */ - char *comment; /* arbitrary comment (used by Moab scheduler) */ - select_jobinfo_t select_jobinfo; /* opaque data type, - * process using select_g_get_jobinfo() */ } job_info_t; typedef struct job_info_msg { @@ -634,21 +655,22 @@ typedef struct job_info_msg { } job_info_msg_t; typedef struct slurm_step_layout { - uint16_t node_cnt; /* node count */ + uint32_t node_cnt; /* node count */ uint32_t task_cnt; /* total number of tasks in the step */ + char *node_list; /* list of nodes in step */ /* Array of length "node_cnt". Each element of the array - is the number of tasks assigned to the corresponding node */ + * is the number of tasks assigned to the corresponding node */ uint16_t *tasks; /* Array (of length "node_cnt") of task ID arrays. The length - of each subarray is designated by the corresponding value in - the tasks array. */ + * of each subarray is designated by the corresponding value in + * the tasks array. */ uint32_t **tids; /* host id => task id mapping */ uint16_t task_dist; /* see enum task_dist_state */ uint16_t plane_size; /* plane size when task_dist = - SLURM_DIST_PLANE */ + * SLURM_DIST_PLANE */ } slurm_step_layout_t; typedef struct slurm_step_io_fds { @@ -678,6 +700,33 @@ typedef struct task_ext_msg { uint32_t return_code; } task_exit_msg_t; +typedef struct srun_ping_msg { + uint32_t job_id; /* slurm job_id */ + uint32_t step_id; /* step_id or NO_VAL */ +} srun_ping_msg_t; + +typedef struct srun_job_complete_msg { + uint32_t job_id; /* slurm job_id */ + uint32_t step_id; /* step_id or NO_VAL */ +} srun_job_complete_msg_t; + +typedef struct srun_timeout_msg { + uint32_t job_id; /* slurm job_id */ + uint32_t step_id; /* step_id or NO_VAL */ + time_t timeout; /* when job scheduled to be killed */ +} srun_timeout_msg_t; + +typedef struct srun_user_msg { + uint32_t job_id; /* slurm job_id */ + char *msg; /* message to user's srun */ +} srun_user_msg_t; + +typedef struct srun_node_fail_msg { + uint32_t job_id; /* slurm job_id */ + uint32_t step_id; /* step_id or NO_VAL */ + char *nodelist; /* name of failed node(s) */ +} srun_node_fail_msg_t; + typedef struct { uint32_t job_id; /* job ID */ uid_t uid; @@ -692,19 +741,29 @@ typedef struct { SLURM_DIST_PLANE */ char *node_list; /* list of required nodes */ char *network; /* network use spec */ + uint16_t immediate; /* 1 if allocate to run or fail immediately, + * 0 if to be queued awaiting resources */ + uint16_t exclusive; /* 1 if CPUs not shared with other steps */ bool overcommit; /* "true" to allow the allocation of more tasks to a node than available processors, "false" to accept at most one task per processor. "false" by default. */ + uint16_t ckpt_interval; /* checkpoint interval in minutes */ + char *ckpt_path; /* path to store checkpoint image files */ + uint16_t verbose_level; /* for extra logging decisions in step + launch api */ + + uint16_t mem_per_task; /* memory required per task (MB), 0=no limit */ } slurm_step_ctx_params_t; typedef struct { - uint16_t argc; + uint32_t argc; char **argv; - uint16_t envc; + uint32_t envc; char **env; char *cwd; bool user_managed_io; + uint32_t msg_timeout; /* timeout set for sending message */ /* START - only used if user_managed_io is false */ bool buffered_stdio; @@ -726,6 +785,9 @@ typedef struct { uint16_t mem_bind_type; /* use mem_bind_type_t */ char *mem_bind; + uint16_t max_sockets; + uint16_t max_cores; + uint16_t max_threads; uint16_t cpus_per_task; uint16_t ntasks_per_node; uint16_t ntasks_per_socket; @@ -734,6 +796,10 @@ typedef struct { uint16_t plane_size; char *mpi_plugin_name; + uint8_t open_mode; + uint16_t acctg_freq; + bool pty; + char *ckpt_path; } slurm_step_launch_params_t; typedef struct { @@ -741,6 +807,13 @@ typedef struct { void (*task_finish)(task_exit_msg_t *); } slurm_step_launch_callbacks_t; +typedef struct { + void (*ping)(srun_ping_msg_t *); + void (*job_complete)(srun_job_complete_msg_t *); + void (*timeout)(srun_timeout_msg_t *); + void (*user_msg)(srun_user_msg_t *); + void (*node_fail)(srun_node_fail_msg_t *); +} slurm_allocation_callbacks_t; typedef struct { uint32_t job_id; /* job ID */ @@ -756,6 +829,8 @@ typedef struct { int *node_inx; /* list index pairs into node_table for *nodes: * start_range_1, end_range_1, * start_range_2, .., -1 */ + uint16_t ckpt_interval; /* checkpoint interval in minutes */ + char *ckpt_path; /* path to store checkpoint image files */ } job_step_info_t; typedef struct job_step_info_response_msg { @@ -776,7 +851,9 @@ typedef struct node_info { uint32_t real_memory; /* configured MB of real memory on the node */ uint32_t tmp_disk; /* configured MB of total disk in TMP_FS */ uint32_t weight; /* arbitrary priority of node for scheduling */ + char *arch; /* computer architecture */ char *features; /* arbitrary list of features for node */ + char *os; /* operating system currently running */ char *reason; /* reason for node being DOWN or DRAINING */ } node_info_t; @@ -791,24 +868,26 @@ typedef struct job_alloc_info_msg { } job_alloc_info_msg_t; typedef struct partition_info { - char *name; /* name of the partition */ - uint32_t max_time; /* minutes or INFINITE */ - uint32_t max_nodes; /* per job or INFINITE */ - uint32_t min_nodes; /* per job */ - uint32_t total_nodes; /* total number of nodes in the partition */ - uint32_t total_cpus; /* total number of cpus in the partition */ - uint16_t node_scaling; /* select plugin node scaling factor */ + char *allow_groups; /* comma delimited list of groups, + * null indicates all */ uint16_t default_part; /* 1 if this is default partition */ + uint16_t disable_root_jobs; /* 1 if user root jobs disabled */ uint16_t hidden; /* 1 if partition is hidden by default */ - uint16_t root_only; /* 1 if allocate must come for user root */ - uint16_t shared; /* See part_shared above */ - uint16_t state_up; /* 1 if state is up, 0 if down */ - char *nodes; /* list names of nodes in partition */ + uint32_t max_nodes; /* per job or INFINITE */ + uint16_t max_share; /* number of jobs to gang schedule */ + uint32_t max_time; /* minutes or INFINITE */ + uint32_t min_nodes; /* per job */ + char *name; /* name of the partition */ int *node_inx; /* list index pairs into node_table: * start_range_1, end_range_1, * start_range_2, .., -1 */ - char *allow_groups; /* comma delimited list of groups, - * null indicates all */ + uint16_t node_scaling; /* select plugin node scaling factor */ + char *nodes; /* list names of nodes in partition */ + uint16_t priority; /* scheduling priority for jobs */ + uint16_t root_only; /* 1 if allocate must come for user root */ + uint16_t state_up; /* 1 if state is up, 0 if down */ + uint32_t total_cpus; /* total number of cpus in the partition */ + uint32_t total_nodes; /* total number of nodes in the partition */ } partition_info_t; typedef struct delete_partition_msg { @@ -848,46 +927,81 @@ typedef struct partition_info_msg { typedef struct slurm_ctl_conf { time_t last_update; /* last update time of the build parameters */ + uint16_t accounting_storage_enforce; /* job requires valid association: + * user/account/partition/cluster */ + char *accounting_storage_host; /* accounting storage host */ + char *accounting_storage_loc; /* accounting storage (db table) + * location */ + char *accounting_storage_pass; /* accounting storage + password */ + uint32_t accounting_storage_port;/* node accountinging storage port */ + char *accounting_storage_type; /* accounting storage type */ + char *accounting_storage_user; /* accounting storage user */ char *authtype; /* authentication type */ char *backup_addr; /* comm path of slurmctld secondary server */ char *backup_controller;/* name of slurmctld secondary server */ + time_t boot_time; /* time slurmctld last booted */ uint16_t cache_groups; /* cache /etc/groups to avoid initgroups(2) */ char *checkpoint_type; /* checkpoint plugin type */ + char *cluster_name; /* general name of the entire cluster */ char *control_addr; /* comm path of slurmctld primary server */ char *control_machine; /* name of slurmctld primary server */ + char *crypto_type; /* cryptographic signature plugin */ + uint32_t def_mem_per_task; /* default MB memory per spawned task */ uint16_t disable_root_jobs; /* if set then user root can't run jobs */ char *epilog; /* pathname of job epilog */ + uint32_t epilog_msg_time; /* usecs for slurmctld to process an + * epilog complete message */ + uint16_t fast_schedule; /* 1 to *not* check configurations by node + * (only check configuration file, faster) */ uint32_t first_job_id; /* first slurm generated job_id to assign */ uint32_t next_job_id; /* next slurm generated job_id to assign */ - uint16_t fast_schedule; /* 1 to *not* check configurations by node - * (only check configuration file, faster) */ + uint16_t get_env_timeout; /* timeout for srun --get-user-env option */ + uint16_t health_check_interval; /* secs between health checks */ + char * health_check_program; /* pathname of health check program */ uint16_t inactive_limit;/* seconds of inactivity before a * inactive resource allocation is released */ - char *job_acct_logfile; /* job accounting log location */ - uint16_t job_acct_freq; /* poll frequency for job accounting plugins */ - char *job_acct_type; /* job accounting type */ - char *job_comp_type; /* job completion logger type */ + char *job_acct_gather_type; /* job accounting gather type */ + uint16_t job_acct_gather_freq; /* poll frequency for job accounting + * gather plugins */ + char *job_comp_host; /* job completion logging host */ char *job_comp_loc; /* job completion logging location */ + char *job_comp_pass; /* job completion storage password */ + uint32_t job_comp_port; /* job completion storage port */ + char *job_comp_type; /* job completion storage type */ + char *job_comp_user; /* job completion storage user */ + char *job_credential_private_key; /* path to private key */ + char *job_credential_public_certificate;/* path to public certificate*/ uint16_t job_file_append; /* if set, append to stdout/err file */ - uint16_t get_env_timeout; /* secs allowed for srun --get-user-env */ + uint16_t job_requeue; /* If set, jobs get requeued on node failre */ uint16_t kill_wait; /* seconds between SIGXCPU to SIGKILL * on job termination */ + char *licenses; /* licenses available on this cluster */ char *mail_prog; /* pathname of mail program */ uint16_t max_job_cnt; /* maximum number of active jobs */ + uint32_t max_mem_per_task; /* maximum MB memory per spawned task */ uint16_t min_job_age; /* COMPLETED jobs over this age (secs) * purged from in memory records */ char *mpi_default; /* Default version of MPI in use */ uint16_t msg_timeout; /* message timeout */ + char *node_prefix; /* prefix of nodes in partition, only set in + bluegene clusters NULL otherwise */ char *plugindir; /* pathname to plugins */ char *plugstack; /* pathname to plugin stack config file */ + uint16_t private_data; /* block viewing of other user jobs */ char *proctrack_type; /* process tracking plugin type */ char *prolog; /* pathname of job prolog */ uint16_t propagate_prio_process; /* 1 if process priority should * be propagated */ char *propagate_rlimits;/* Propagate (all/specific) resource limits */ char *propagate_rlimits_except;/* Propagate all rlimits except these */ + uint16_t resume_rate; /* nodes to make full power, per minute */ + char *resume_program; /* program to make nodes full power */ uint16_t ret2service; /* 1 return DOWN node to service at * registration */ + char *sched_params; /* SchedulerParameters OR + * contents of scheduler plugin config file */ + uint16_t sched_time_slice; /* gang scheduler slice time, secs */ char *schedtype; /* type of scheduler to use */ uint16_t schedport; /* port for scheduler connection */ uint16_t schedrootfltr; /* 1 if rootOnly partitions should be @@ -905,34 +1019,35 @@ typedef struct slurm_ctl_conf { * on non-responding primarly controller */ uint16_t slurmd_debug; /* slurmd logging level */ char *slurmd_logfile; /* where slurmd error log gets written */ + char *slurmd_pidfile; /* where to put slurmd pidfile */ uint32_t slurmd_port; /* default communications port to slurmd */ char *slurmd_spooldir; /* where slurmd put temporary state info */ - char *slurmd_pidfile; /* where to put slurmd pidfile */ uint16_t slurmd_timeout;/* how long slurmctld waits for slurmd before * considering node DOWN */ char *slurm_conf; /* pathname of slurm config file */ + char *srun_epilog; /* srun epilog program */ + char *srun_prolog; /* srun prolog program */ char *state_save_location;/* pathname of slurmctld state save * directory */ + char *suspend_exc_nodes;/* nodes to not make power saving */ + char *suspend_exc_parts;/* partitions to not make power saving */ + char *suspend_program; /* program to make nodes power saving */ + uint16_t suspend_rate; /* nodes to make power saving, per minute */ + uint16_t suspend_time; /* node idle for this long before power save mode */ char *switch_type; /* switch or interconnect type */ char *task_epilog; /* pathname of task launch epilog */ char *task_plugin; /* task launch plugin */ uint16_t task_plugin_param; /* see TASK_PARAM_* */ char *task_prolog; /* pathname of task launch prolog */ char *tmp_fs; /* pathname of temporary file system */ - uint16_t wait_time; /* default job --wait time */ - char *job_credential_private_key; /* path to private key */ - char *job_credential_public_certificate;/* path to public certificate*/ - char *srun_prolog; /* srun prolog program */ - char *srun_epilog; /* srun epilog program */ - char *node_prefix; /* prefix of nodes in partition only set in - bluegene clusters NULL otherwise */ uint16_t tree_width; /* number of threads per node to span */ - uint16_t use_pam; /* enable/disable PAM support */ char *unkillable_program; /* program run by the slurmstepd when * processes in a job step are unkillable */ uint16_t unkillable_timeout; /* time in seconds, after processes in a * job step have been signalled, before * they are considered "unkillable". */ + uint16_t use_pam; /* enable/disable PAM support */ + uint16_t wait_time; /* default job --wait time */ } slurm_ctl_conf_t; typedef struct slurmd_status_msg { @@ -969,20 +1084,23 @@ typedef struct partition_info update_part_msg_t; /* Opaque data type for slurm_step_ctx_* functions */ typedef struct slurm_step_ctx_struct *slurm_step_ctx; +typedef struct slurm_step_ctx_struct slurm_step_ctx_t; #define TRIGGER_RES_TYPE_JOB 1 #define TRIGGER_RES_TYPE_NODE 2 -#define TRIGGER_TYPE_UP 0x01 -#define TRIGGER_TYPE_DOWN 0x02 -#define TRIGGER_TYPE_TIME 0x04 -#define TRIGGER_TYPE_FINI 0x08 -#define TRIGGER_TYPE_RECONFIG 0x10 -#define TRIGGER_TYPE_BLOCK_ERR 0x20 -#define TRIGGER_TYPE_IDLE 0x40 +#define TRIGGER_TYPE_UP 0x0001 +#define TRIGGER_TYPE_DOWN 0x0002 +#define TRIGGER_TYPE_FAIL 0x0004 +#define TRIGGER_TYPE_TIME 0x0008 +#define TRIGGER_TYPE_FINI 0x0010 +#define TRIGGER_TYPE_RECONFIG 0x0020 +#define TRIGGER_TYPE_BLOCK_ERR 0x0040 +#define TRIGGER_TYPE_IDLE 0x0080 +#define TRIGGER_TYPE_DRAINED 0x0100 typedef struct trigger_info { uint32_t trig_id; /* trigger ID */ - uint8_t res_type; /* TRIGGER_RES_TYPE_* */ + uint16_t res_type; /* TRIGGER_RES_TYPE_* */ char * res_id; /* resource ID */ uint16_t trig_type; /* TRIGGER_TYPE_* */ uint16_t offset; /* seconds from trigger, 0x8000 origin */ @@ -1100,6 +1218,26 @@ extern int slurm_allocation_lookup_lite PARAMS(( */ extern char *slurm_read_hostfile PARAMS((char *filename, int n)); +/* + * slurm_allocation_msg_thr_create - startup a message handler talking + * with the controller dealing with messages from the controller during an + * allocation. + * IN port - port we are listening for messages on from the controller + * IN callbacks - callbacks for different types of messages + * RET allocation_msg_thread_t * or NULL on failure + */ +extern allocation_msg_thread_t *slurm_allocation_msg_thr_create PARAMS( + (uint16_t *port, const slurm_allocation_callbacks_t *callbacks)); + +/* + * slurm_allocation_msg_thr_destroy - shutdown the message handler talking + * with the controller dealing with messages from the controller during an + * allocation. + * IN msg_thr - allocation_msg_thread_t pointer allocated with + * slurm_allocation_msg_thr_create + */ +extern void slurm_allocation_msg_thr_destroy PARAMS( + (allocation_msg_thread_t * msg_thr)); /* * slurm_submit_batch_job - issue RPC to submit a job for later execution * NOTE: free the response using slurm_free_submit_response_response_msg @@ -1229,15 +1367,27 @@ extern void slurm_step_ctx_params_t_init PARAMS((slurm_step_ctx_params_t *ptr)); * RET the step context or NULL on failure with slurm errno set * NOTE: Free allocated memory using slurm_step_ctx_destroy. */ -extern slurm_step_ctx slurm_step_ctx_create PARAMS(( +extern slurm_step_ctx_t *slurm_step_ctx_create PARAMS(( const slurm_step_ctx_params_t *step_params)); +/* + * slurm_step_ctx_create_no_alloc - Create a job step and its context without + * getting an allocation. + * IN step_params - job step parameters + * IN step_id - since we are faking it give me the id to use + * RET the step context or NULL on failure with slurm errno set + * NOTE: Free allocated memory using slurm_step_ctx_destroy. + */ +extern slurm_step_ctx_t * +slurm_step_ctx_create_no_alloc PARAMS(( + const slurm_step_ctx_params_t *step_params, uint32_t step_id)); + /* * slurm_step_ctx_get - get parameters from a job step context. * IN ctx - job step context generated by slurm_step_ctx_create * RET SLURM_SUCCESS or SLURM_ERROR (with slurm_errno set) */ -extern int slurm_step_ctx_get PARAMS((slurm_step_ctx ctx, +extern int slurm_step_ctx_get PARAMS((slurm_step_ctx_t *ctx, int ctx_key, ...)); /* @@ -1262,14 +1412,14 @@ extern int slurm_jobinfo_ctx_get PARAMS((switch_jobinfo_t jobinfo, * IN ctx - job step context generated by slurm_step_ctx_create * RET SLURM_SUCCESS or SLURM_ERROR (with slurm_errno set) */ -extern int slurm_step_ctx_daemon_per_node_hack PARAMS((slurm_step_ctx ctx)); +extern int slurm_step_ctx_daemon_per_node_hack PARAMS((slurm_step_ctx_t *ctx)); /* * slurm_step_ctx_destroy - free allocated memory for a job step context. * IN ctx - job step context generated by slurm_step_ctx_create * RET SLURM_SUCCESS or SLURM_ERROR (with slurm_errno set) */ -extern int slurm_step_ctx_destroy PARAMS((slurm_step_ctx ctx)); +extern int slurm_step_ctx_destroy PARAMS((slurm_step_ctx_t *ctx)); /* * slurm_step_launch_params_t_init - initialize a user-allocated @@ -1284,28 +1434,37 @@ extern void slurm_step_launch_params_t_init /* * slurm_step_launch - launch a parallel job step * IN ctx - job step context generated by slurm_step_ctx_create + * IN launcher_host - address used for PMI communications + * IN callbacks - Identify functions to be called when various events occur * RET SLURM_SUCCESS or SLURM_ERROR (with errno set) */ -extern int slurm_step_launch PARAMS((slurm_step_ctx ctx, +extern int slurm_step_launch PARAMS((slurm_step_ctx_t *ctx, + char *launcher_host, const slurm_step_launch_params_t *params, const slurm_step_launch_callbacks_t *callbacks)); /* * Block until all tasks have started. */ -extern int slurm_step_launch_wait_start PARAMS((slurm_step_ctx ctx)); +extern int slurm_step_launch_wait_start PARAMS((slurm_step_ctx_t *ctx)); /* * Block until all tasks have finished (or failed to start altogether). */ -extern void slurm_step_launch_wait_finish PARAMS((slurm_step_ctx ctx)); +extern void slurm_step_launch_wait_finish PARAMS((slurm_step_ctx_t *ctx)); /* * Abort an in-progress launch, or terminate the fully launched job step. * * Can be called from a signal handler. */ -void slurm_step_launch_abort PARAMS((slurm_step_ctx ctx)); +extern void slurm_step_launch_abort PARAMS((slurm_step_ctx_t *ctx)); + +/* + * Forward a signal to all those nodes with running tasks + */ +extern void slurm_step_launch_fwd_signal PARAMS((slurm_step_ctx_t *ctx, + int signo)); /*****************************************************************************\ * SLURM CONTROL CONFIGURATION READ/PRINT/UPDATE FUNCTIONS @@ -1355,7 +1514,7 @@ extern void slurm_print_ctl_conf PARAMS(( * RET 0 or -1 on error * NOTE: free the response using slurm_free_slurmd_status() */ -extern int slurm_load_slurmd_status(slurmd_status_t **slurmd_status_ptr); +extern int slurm_load_slurmd_status PARAMS((slurmd_status_t **slurmd_status_ptr)); /* * slurm_free_slurmd_status - free slurmd state information @@ -1471,6 +1630,15 @@ extern int slurm_update_job PARAMS(( job_desc_msg_t * job_msg )) ; extern int slurm_get_select_jobinfo PARAMS((select_jobinfo_t jobinfo, enum select_data_type data_type, void *data)); +/* + * slurm_notify_job - send message to the job's stdout, + * usable only by user root + * IN job_id - slurm job_id or 0 for all jobs + * IN message - arbitrary message + * RET 0 or -1 on error + */ +extern int slurm_notify_job PARAMS(( uint32_t job_id, char *message )); + /*****************************************************************************\ * SLURM JOB STEP CONFIGURATION READ/PRINT/UPDATE FUNCTIONS \*****************************************************************************/ @@ -1720,6 +1888,14 @@ extern int slurm_reconfigure PARAMS(( void )); */ extern int slurm_shutdown PARAMS(( uint16_t core )); +/* + * slurm_set_debug_level - issue RPC to set slurm controller debug level + * IN debug_level - requested debug level + * RET 0 on success, otherwise return -1 and set errno to indicate the error + */ +extern int slurm_set_debug_level PARAMS((uint32_t debug_level)); + + /*****************************************************************************\ * SLURM JOB SUSPEND FUNCTIONS \*****************************************************************************/ @@ -1824,6 +2000,21 @@ extern int slurm_checkpoint_complete PARAMS(( uint32_t job_id, uint32_t step_id, time_t begin_time, uint32_t error_code, char *error_msg )); +/* + * slurm_checkpoint_task_complete - note the completion of a task's checkpoint + * operation. + * IN job_id - job on which to perform operation + * IN step_id - job step on which to perform operation + * IN task_id - task which completed the operation + * IN begin_time - time at which checkpoint began + * IN error_code - error code, highest value for all complete calls is preserved + * IN error_msg - error message, preserved for highest error_code + * RET 0 or a slurm error code + */ +extern int slurm_checkpoint_task_complete (uint32_t job_id, uint32_t step_id, + uint32_t task_id, time_t begin_time, + uint32_t error_code, char *error_msg); + /* * slurm_checkpoint_error - gather error information for the last checkpoint * operation for some job step @@ -1841,6 +2032,15 @@ extern int slurm_checkpoint_error PARAMS(( uint32_t job_id, uint32_t step_id, uint32_t *error_code, char **error_msg )); +/* + * slurm_get_checkpoint_file_path - return the checkpoint file + * path of this process, creating the directory if needed. + * IN len: length of the file path buffer + * OUT buf: buffer to store the checkpoint file path + * RET: 0 on success, -1 on failure with errno set + */ +extern int slurm_get_checkpoint_file_path(size_t len, char *buf); + /*****************************************************************************\ * SLURM HOSTLIST FUNCTIONS \*****************************************************************************/ diff --git a/slurm/slurm_errno.h b/slurm/slurm_errno.h index 6143acfde..3e9b7e9b7 100644 --- a/slurm/slurm_errno.h +++ b/slurm/slurm_errno.h @@ -5,7 +5,7 @@ * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Kevin Tew <tew1@llnl.gov>, * Jim Garlick <garlick@llnl.gov>, et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -150,6 +150,13 @@ enum { ESLURM_TASKDIST_ARBITRARY_UNSUPPORTED, ESLURM_TASKDIST_REQUIRES_OVERCOMMIT, ESLURM_JOB_HELD, + ESLURM_INVALID_CRYPTO_TYPE_CHANGE, + ESLURM_INVALID_BANK_ACCOUNT, + ESLURM_INVALID_TASK_MEMORY, + ESLURM_INVALID_ACCOUNT, + ESLURM_INVALID_LICENSES, + ESLURM_NEED_RESTART, + ESLURM_ACCOUNTING_POLICY, /* switch specific error codes, specific values defined in plugin module */ ESLURM_SWITCH_MIN = 3000, diff --git a/slurm/spank.h b/slurm/spank.h index 20bc4090a..27f2a7f9c 100644 --- a/slurm/spank.h +++ b/slurm/spank.h @@ -3,7 +3,7 @@ ***************************************************************************** * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/Makefile.am b/src/Makefile.am index 1289d0193..a852e7d97 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -1,5 +1,6 @@ -SUBDIRS = common api slurmctld slurmd plugins srun sacct sbcast \ - scontrol scancel squeue sinfo smap sview slaunch salloc \ - sbatch sattach strigger +SUBDIRS = common api database \ + slurmctld slurmd slurmdbd plugins srun sbcast \ + scontrol scancel squeue sinfo smap sview salloc \ + sbatch sattach strigger sacct sacctmgr sreport sstat diff --git a/src/Makefile.in b/src/Makefile.in index 80490da89..a7366a69a 100644 --- a/src/Makefile.in +++ b/src/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -41,6 +41,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -99,6 +101,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -112,10 +115,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -135,7 +141,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -146,6 +155,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -161,6 +172,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -176,6 +188,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -232,9 +245,10 @@ target_os = @target_os@ target_vendor = @target_vendor@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ -SUBDIRS = common api slurmctld slurmd plugins srun sacct sbcast \ - scontrol scancel squeue sinfo smap sview slaunch salloc \ - sbatch sattach strigger +SUBDIRS = common api database \ + slurmctld slurmd slurmdbd plugins srun sbcast \ + scontrol scancel squeue sinfo smap sview salloc \ + sbatch sattach strigger sacct sacctmgr sreport sstat all: all-recursive @@ -350,8 +364,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -376,8 +390,8 @@ TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -387,13 +401,12 @@ ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/api/Makefile.am b/src/api/Makefile.am index 6023cef72..b5634c6e7 100644 --- a/src/api/Makefile.am +++ b/src/api/Makefile.am @@ -59,6 +59,7 @@ noinst_LTLIBRARIES = libslurmhelper.la slurmapi_src = \ allocate.c \ + allocate_msg.c \ cancel.c \ checkpoint.c \ complete.c \ @@ -104,6 +105,11 @@ libslurm_la_LDFLAGS = \ -version-info $(current):$(rev):$(age) \ $(OTHER_FLAGS) +# +# The libpmi_la_LIBADD specification below causes libpmi.la to relink +# when running "make install", but removing it prevents essential slurm +# symbols from being available in programs linking with libpmi +# libpmi_la_SOURCES = pmi.c libpmi_la_LIBADD = $(top_builddir)/src/api/libslurm.la libpmi_la_LDFLAGS = $(LIB_LDFLAGS) \ diff --git a/src/api/Makefile.in b/src/api/Makefile.in index 7b0a3474e..107f973d1 100644 --- a/src/api/Makefile.in +++ b/src/api/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -45,6 +45,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -89,15 +91,15 @@ libslurm_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ am__DEPENDENCIES_1 = $(common_dir)/libcommon.la \ $(common_dir)/libspank.la $(common_dir)/libeio.la libslurmhelper_la_DEPENDENCIES = $(am__DEPENDENCIES_1) -am__objects_1 = allocate.lo cancel.lo checkpoint.lo complete.lo \ - config_info.lo init_msg.lo job_info.lo job_step_info.lo \ - node_info.lo node_select_info.lo partition_info.lo signal.lo \ - slurm_pmi.lo step_ctx.lo step_io.lo step_launch.lo \ - pmi_server.lo submit.lo suspend.lo triggers.lo reconfigure.lo \ - update_config.lo +am__objects_1 = allocate.lo allocate_msg.lo cancel.lo checkpoint.lo \ + complete.lo config_info.lo init_msg.lo job_info.lo \ + job_step_info.lo node_info.lo node_select_info.lo \ + partition_info.lo signal.lo slurm_pmi.lo step_ctx.lo \ + step_io.lo step_launch.lo pmi_server.lo submit.lo suspend.lo \ + triggers.lo reconfigure.lo update_config.lo am_libslurmhelper_la_OBJECTS = $(am__objects_1) libslurmhelper_la_OBJECTS = $(am_libslurmhelper_la_OBJECTS) -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -139,6 +141,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -152,10 +155,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -175,7 +181,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -186,6 +195,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -201,6 +212,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -216,6 +228,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -328,6 +341,7 @@ BUILT_SOURCES = $(VERSION_SCRIPT) $(PMI_VERSION_SCRIPT) libslurm.la noinst_LTLIBRARIES = libslurmhelper.la slurmapi_src = \ allocate.c \ + allocate_msg.c \ cancel.c \ checkpoint.c \ complete.c \ @@ -371,6 +385,12 @@ libslurm_la_LDFLAGS = \ -version-info $(current):$(rev):$(age) \ $(OTHER_FLAGS) + +# +# The libpmi_la_LIBADD specification below causes libpmi.la to relink +# when running "make install", but removing it prevents essential slurm +# symbols from being available in programs linking with libpmi +# libpmi_la_SOURCES = pmi.c libpmi_la_LIBADD = $(top_builddir)/src/api/libslurm.la libpmi_la_LDFLAGS = $(LIB_LDFLAGS) \ @@ -422,8 +442,8 @@ install-libLTLIBRARIES: $(lib_LTLIBRARIES) @list='$(lib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(libLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(libdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(libLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(libdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(libLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(libdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(libLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(libdir)/$$f"; \ else :; fi; \ done @@ -431,8 +451,8 @@ uninstall-libLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(lib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$p"; \ done clean-libLTLIBRARIES: @@ -466,6 +486,7 @@ distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/allocate.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/allocate_msg.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cancel.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/checkpoint.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/complete.Plo@am__quote@ @@ -521,8 +542,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -534,8 +555,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -545,13 +566,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/api/allocate.c b/src/api/allocate.c index 438035bf7..192c15226 100644 --- a/src/api/allocate.c +++ b/src/api/allocate.c @@ -1,11 +1,11 @@ /*****************************************************************************\ * allocate.c - allocate nodes for a job or step with supplied contraints - * $Id: allocate.c 11342 2007-04-10 22:54:27Z da $ + * $Id: allocate.c 13722 2008-03-27 16:39:22Z jette $ ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -16,7 +16,7 @@ * any later version. * * In addition, as a special exception, the copyright holders give permission - * to link the code of portions of this program with the OpenSSL library under + * to link the code of portions of this program with the OpenSSL library under * certain conditions as described in each individual source file, and * distribute linked combinations including the two. You must obey the GNU * General Public License in all respects for all of the code used other than @@ -63,7 +63,9 @@ extern pid_t getsid(pid_t pid); /* missing from <unistd.h> */ #include "src/common/xstring.h" #include "src/common/forward.h" #include "src/common/fd.h" +#include "src/common/parse_time.h" #include "src/common/slurm_auth.h" +#include "src/common/slurm_protocol_defs.h" #define BUFFER_SIZE 1024 #define MAX_ALLOC_WAIT 60 /* seconds */ @@ -212,7 +214,6 @@ slurm_allocate_resources_blocking (const job_desc_msg_t *user_req, xfree(req); return NULL; } - req->alloc_resp_hostname = listen->hostname; req->alloc_resp_port = listen->port; } @@ -279,7 +280,6 @@ slurm_allocate_resources_blocking (const job_desc_msg_t *user_req, return resp; } - /* * slurm_job_will_run - determine if a job would execute immediately if * submitted now @@ -288,19 +288,40 @@ slurm_allocate_resources_blocking (const job_desc_msg_t *user_req, */ int slurm_job_will_run (job_desc_msg_t *req) { - slurm_msg_t req_msg; - int rc; + slurm_msg_t req_msg, resp_msg; + will_run_response_msg_t *will_run_resp; + char buf[64]; /* req.immediate = true; implicit */ + if ((req->alloc_node == NULL) + && (gethostname_short(buf, sizeof(buf)) == 0)) + req->alloc_node = buf; slurm_msg_t_init(&req_msg); req_msg.msg_type = REQUEST_JOB_WILL_RUN; req_msg.data = req; - if (slurm_send_recv_controller_rc_msg(&req_msg, &rc) < 0) + if (slurm_send_recv_controller_msg(&req_msg, &resp_msg) < 0) return SLURM_SOCKET_ERROR; - if (rc) - slurm_seterrno_ret(rc); + switch (resp_msg.msg_type) { + case RESPONSE_SLURM_RC: + if (_handle_rc_msg(&resp_msg) < 0) + return SLURM_PROTOCOL_ERROR; + break; + case RESPONSE_JOB_WILL_RUN: + will_run_resp = (will_run_response_msg_t *) resp_msg.data; + slurm_make_time_str(&will_run_resp->start_time, + buf, sizeof(buf)); + info("Job %u to start at %s using %u processors on %s", + will_run_resp->job_id, buf, + will_run_resp->proc_cnt, + will_run_resp->node_list); + slurm_free_will_run_response_msg(will_run_resp); + break; + default: + slurm_seterrno_ret(SLURM_UNEXPECTED_MSG_ERROR); + break; + } return SLURM_PROTOCOL_SUCCESS; } @@ -316,8 +337,7 @@ int slurm_job_step_create (job_step_create_request_msg_t *req, job_step_create_response_msg_t **resp) { - slurm_msg_t req_msg; - slurm_msg_t resp_msg; + slurm_msg_t req_msg, resp_msg; slurm_msg_t_init(&req_msg); slurm_msg_t_init(&resp_msg); @@ -556,12 +576,9 @@ static listen_t *_create_allocation_response_socket(char *interface_hostname) listen_t *listen = NULL; listen = xmalloc(sizeof(listen_t)); - if (listen == NULL) - return NULL; /* port "0" lets the operating system pick any port */ - slurm_set_addr(&listen->address, 0, interface_hostname); - if ((listen->fd = slurm_init_msg_engine(&listen->address)) < 0) { + if ((listen->fd = slurm_init_msg_engine_port(0)) < 0) { error("slurm_init_msg_engine_port error %m"); return NULL; } @@ -596,7 +613,7 @@ static void _destroy_allocation_response_socket(listen_t *listen) static int _handle_msg(slurm_msg_t *msg, resource_allocation_response_msg_t **resp) { - uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred); + uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); uid_t uid = getuid(); uid_t slurm_uid = (uid_t) slurm_get_slurm_user_id(); int rc = 0; diff --git a/src/salloc/msg.c b/src/api/allocate_msg.c similarity index 60% rename from src/salloc/msg.c rename to src/api/allocate_msg.c index 12a6834e6..9ae77a6e8 100644 --- a/src/salloc/msg.c +++ b/src/api/allocate_msg.c @@ -1,10 +1,12 @@ /*****************************************************************************\ - * src/salloc/msg.c - Message handler for salloc + * allocate_msg.c - Message handler for communication with with + * the slurmctld during an allocation. + * $Id: allocate_msg.c 11641 2007-06-05 23:03:51Z jette $ ***************************************************************************** - * Copyright (C) 2006 The Regents of the University of California. + * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Christopher J. Morrone <morrone2@llnl.gov> - * UCRL-CODE-226842. + * Written by Morris Jette <jette1@llnl.gov>. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -13,7 +15,18 @@ * the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. - * + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more @@ -21,7 +34,7 @@ * * You should have received a copy of the GNU General Public License along * with SLURM; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. \*****************************************************************************/ #if HAVE_CONFIG_H @@ -49,17 +62,15 @@ #include "src/common/eio.h" #include "src/common/xsignal.h" -#include "src/salloc/salloc.h" -#include "src/salloc/opt.h" -#include "src/salloc/msg.h" - -struct salloc_msg_thread { +struct allocation_msg_thread { + slurm_allocation_callbacks_t callback; eio_handle_t *handle; pthread_t id; }; static uid_t slurm_uid; -static void _handle_msg(slurm_msg_t *msg); +static void _handle_msg(struct allocation_msg_thread *msg_thr, + slurm_msg_t *msg); static bool _message_socket_readable(eio_obj_t *obj); static int _message_socket_accept(eio_obj_t *obj, List objs); static pthread_mutex_t msg_thr_start_lock = PTHREAD_MUTEX_INITIALIZER; @@ -85,15 +96,31 @@ static void *_msg_thr_internal(void *arg) return NULL; } -extern salloc_msg_thread_t *msg_thr_create(uint16_t *port) +extern allocation_msg_thread_t *slurm_allocation_msg_thr_create( + uint16_t *port, + const slurm_allocation_callbacks_t *callbacks) { + pthread_attr_t attr; int sock = -1; eio_obj_t *obj; - salloc_msg_thread_t *msg_thr = NULL; + struct allocation_msg_thread *msg_thr = NULL; + + debug("Entering slurm_allocation_msg_thr_create()"); - debug("Entering _msg_thr_create()"); slurm_uid = (uid_t) slurm_get_slurm_user_id(); - msg_thr = (salloc_msg_thread_t *)xmalloc(sizeof(salloc_msg_thread_t)); + msg_thr = (struct allocation_msg_thread *)xmalloc( + sizeof(struct allocation_msg_thread)); + + /* Initialize the callback pointers */ + if (callbacks != NULL) { + /* copy the user specified callback pointers */ + memcpy(&(msg_thr->callback), callbacks, + sizeof(slurm_allocation_callbacks_t)); + } else { + /* set all callbacks to NULL */ + memset(&(msg_thr->callback), 0, + sizeof(slurm_allocation_callbacks_t)); + } if (net_stream_listen(&sock, (short *)port) < 0) { error("unable to intialize step launch listening socket: %m"); @@ -102,31 +129,38 @@ extern salloc_msg_thread_t *msg_thr_create(uint16_t *port) } debug("port from net_stream_listen is %hu", *port); - obj = eio_obj_create(sock, &message_socket_ops, NULL); + obj = eio_obj_create(sock, &message_socket_ops, (void *)msg_thr); msg_thr->handle = eio_handle_create(); eio_new_initial_obj(msg_thr->handle, obj); pthread_mutex_lock(&msg_thr_start_lock); - if (pthread_create(&msg_thr->id, NULL, + slurm_attr_init(&attr); + if (pthread_create(&msg_thr->id, &attr, _msg_thr_internal, (void *)msg_thr->handle) != 0) { error("pthread_create of message thread: %m"); + slurm_attr_destroy(&attr); eio_handle_destroy(msg_thr->handle); xfree(msg_thr); return NULL; } + slurm_attr_destroy(&attr); /* Wait until the message thread has blocked signals before continuing. */ pthread_cond_wait(&msg_thr_start_cond, &msg_thr_start_lock); pthread_mutex_unlock(&msg_thr_start_lock); - return msg_thr; + return (allocation_msg_thread_t *)msg_thr; } -extern void msg_thr_destroy(salloc_msg_thread_t *msg_thr) +extern void slurm_allocation_msg_thr_destroy( + allocation_msg_thread_t *arg) { + struct allocation_msg_thread *msg_thr = + (struct allocation_msg_thread *)arg; if (msg_thr == NULL) return; + debug2("slurm_allocation_msg_thr_destroy: clearing up message thread"); eio_signal_shutdown(msg_thr->handle); pthread_join(msg_thr->id, NULL); eio_handle_destroy(msg_thr->handle); @@ -154,6 +188,9 @@ static bool _message_socket_readable(eio_obj_t *obj) static int _message_socket_accept(eio_obj_t *obj, List objs) { + struct allocation_msg_thread *msg_thr = + (struct allocation_msg_thread *)obj->arg; + int fd; unsigned char *uc; short port; @@ -161,12 +198,13 @@ static int _message_socket_accept(eio_obj_t *obj, List objs) slurm_msg_t *msg = NULL; int len = sizeof(addr); - debug3("Called _msg_socket_accept"); + debug2("Called _msg_socket_accept"); while ((fd = accept(obj->fd, (struct sockaddr *)&addr, (socklen_t *)&len)) < 0) { if (errno == EINTR) continue; + if (errno == EAGAIN || errno == ECONNABORTED || errno == EWOULDBLOCK) { @@ -184,7 +222,7 @@ static int _message_socket_accept(eio_obj_t *obj, List objs) in /etc/hosts. */ uc = (unsigned char *)&((struct sockaddr_in *)&addr)->sin_addr.s_addr; port = ((struct sockaddr_in *)&addr)->sin_port; - debug2("got message connection from %u.%u.%u.%u:%hu", + debug2("allocation got message connection from %u.%u.%u.%u:%hu", uc[0], uc[1], uc[2], uc[3], ntohs(port)); fflush(stdout); @@ -202,7 +240,7 @@ again: goto cleanup; } - _handle_msg(msg); /* handle_msg frees msg->data */ + _handle_msg(msg_thr, msg); /* handle_msg frees msg->data */ cleanup: if ((msg->conn_fd >= 0) && slurm_close_accepted_conn(msg->conn_fd) < 0) error ("close(%d): %m", msg->conn_fd); @@ -212,11 +250,13 @@ cleanup: } -static void _handle_node_fail(slurm_msg_t *msg) +static void _handle_node_fail(struct allocation_msg_thread *msg_thr, + slurm_msg_t *msg) { srun_node_fail_msg_t *nf = (srun_node_fail_msg_t *)msg->data; - error("Node failure on %s", nf->nodelist); + if (msg_thr->callback.node_fail != NULL) + (msg_thr->callback.node_fail)(nf); slurm_free_srun_node_fail_msg(msg->data); } @@ -226,66 +266,59 @@ static void _handle_node_fail(slurm_msg_t *msg) * Job will be killed shortly after timeout. * This RPC can arrive multiple times with the same or updated timeouts. */ -static void _handle_timeout(slurm_msg_t *msg) +static void _handle_timeout(struct allocation_msg_thread *msg_thr, + slurm_msg_t *msg) { - static time_t last_timeout = 0; srun_timeout_msg_t *to = (srun_timeout_msg_t *)msg->data; debug3("received timeout message"); - if (to->timeout != last_timeout) { - last_timeout = to->timeout; - info("Job allocation time limit to be reached at %s", - ctime(&to->timeout)); - } + + if (msg_thr->callback.timeout != NULL) + (msg_thr->callback.timeout)(to); slurm_free_srun_timeout_msg(msg->data); } -static void _handle_user_msg(slurm_msg_t *msg) +static void _handle_user_msg(struct allocation_msg_thread *msg_thr, + slurm_msg_t *msg) { - srun_user_msg_t *um; - - um = msg->data; - info("%s", um->msg); + srun_user_msg_t *um = (srun_user_msg_t *)msg->data; + debug3("received user message"); + + if (msg_thr->callback.user_msg != NULL) + (msg_thr->callback.user_msg)(um); + slurm_free_srun_user_msg(msg->data); } -static void _handle_job_complete(slurm_msg_t *msg) +static void _handle_ping(struct allocation_msg_thread *msg_thr, + slurm_msg_t *msg) +{ + srun_ping_msg_t *ping = (srun_ping_msg_t *)msg->data; + debug3("received ping message"); + slurm_send_rc_msg(msg, SLURM_SUCCESS); + + if (msg_thr->callback.ping != NULL) + (msg_thr->callback.ping)(ping); + + slurm_free_srun_ping_msg(msg->data); +} +static void _handle_job_complete(struct allocation_msg_thread *msg_thr, + slurm_msg_t *msg) { srun_job_complete_msg_t *comp = (srun_job_complete_msg_t *)msg->data; debug3("job complete message received"); - if (comp->step_id == NO_VAL) { - pthread_mutex_lock(&allocation_state_lock); - if (allocation_state != REVOKED) { - /* If the allocation_state is already REVOKED, then - * no need to print this message. We probably - * relinquished the allocation ourself. - */ - info("Job allocation %u has been revoked.", - comp->job_id); - } - if (allocation_state == GRANTED - && command_pid > -1 - && opt.kill_command_signal_set) { - verbose("Sending signal %d to command \"%s\", pid %d", - opt.kill_command_signal, - command_argv[0], command_pid); - kill(command_pid, opt.kill_command_signal); - } - allocation_state = REVOKED; - pthread_mutex_unlock(&allocation_state_lock); - } else { - verbose("Job step %u.%u is finished.", - comp->job_id, comp->step_id); - } + if (msg_thr->callback.job_complete != NULL) + (msg_thr->callback.job_complete)(comp); + slurm_free_srun_job_complete_msg(msg->data); } static void -_handle_msg(slurm_msg_t *msg) +_handle_msg(struct allocation_msg_thread *msg_thr, slurm_msg_t *msg) { - uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred); + uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); uid_t uid = getuid(); if ((req_uid != slurm_uid) && (req_uid != 0) && (req_uid != uid)) { @@ -296,21 +329,19 @@ _handle_msg(slurm_msg_t *msg) switch (msg->msg_type) { case SRUN_PING: - debug("received ping message"); - slurm_send_rc_msg(msg, SLURM_SUCCESS); - slurm_free_srun_ping_msg(msg->data); + _handle_ping(msg_thr, msg); break; case SRUN_JOB_COMPLETE: - _handle_job_complete(msg); + _handle_job_complete(msg_thr, msg); break; case SRUN_TIMEOUT: - _handle_timeout(msg); + _handle_timeout(msg_thr, msg); break; case SRUN_USER_MSG: - _handle_user_msg(msg); + _handle_user_msg(msg_thr, msg); break; case SRUN_NODE_FAIL: - _handle_node_fail(msg); + _handle_node_fail(msg_thr, msg); break; default: error("received spurious message type: %d\n", diff --git a/src/api/cancel.c b/src/api/cancel.c index 231554747..678605d23 100644 --- a/src/api/cancel.c +++ b/src/api/cancel.c @@ -1,11 +1,11 @@ /*****************************************************************************\ * cancel.c - cancel a slurm job or job step - * $Id: cancel.c 10574 2006-12-15 23:38:29Z jette $ + * $Id: cancel.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/api/checkpoint.c b/src/api/checkpoint.c index fd5659ae2..be3c46acb 100644 --- a/src/api/checkpoint.c +++ b/src/api/checkpoint.c @@ -1,11 +1,11 @@ /*****************************************************************************\ * checkpoint.c - Process checkpoint related functions. - * $Id: checkpoint.c 10574 2006-12-15 23:38:29Z jette $ + * $Id: checkpoint.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -41,11 +41,21 @@ #endif #include <string.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <unistd.h> +#include <stdlib.h> #include <slurm/slurm.h> #include "src/common/checkpoint.h" #include "src/common/slurm_protocol_api.h" +#ifdef HAVE_AIX + char *__progname = "PROGRAM"; +#else + extern char * __progname; +#endif + static int _handle_rc_msg(slurm_msg_t *msg); static int _checkpoint_op (uint16_t op, uint16_t data, uint32_t job_id, uint32_t step_id); @@ -294,3 +304,114 @@ _handle_rc_msg(slurm_msg_t *msg) slurm_seterrno(rc); return rc; } + +/* + * slurm_checkpoint_task_complete - note the completion of a task's checkpoint + * operation. + * IN job_id - job on which to perform operation + * IN step_id - job step on which to perform operation + * IN task_id - task which completed the operation + * IN begin_time - time at which checkpoint began + * IN error_code - error code, highest value for all complete calls is preserved + * IN error_msg - error message, preserved for highest error_code + * RET 0 or a slurm error code + */ +extern int slurm_checkpoint_task_complete (uint32_t job_id, uint32_t step_id, + uint32_t task_id, time_t begin_time, uint32_t error_code, char *error_msg) +{ + int rc; + slurm_msg_t msg; + checkpoint_task_comp_msg_t req; + + slurm_msg_t_init(&msg); + req.job_id = job_id; + req.step_id = step_id; + req.task_id = task_id; + req.begin_time = begin_time; + req.error_code = error_code; + req.error_msg = error_msg; + msg.msg_type = REQUEST_CHECKPOINT_TASK_COMP; + msg.data = &req; + + if (slurm_send_recv_controller_rc_msg(&msg, &rc) < 0) + return SLURM_ERROR; + if (rc) + slurm_seterrno_ret(rc); + return SLURM_SUCCESS; +} + +/* + * slurm_get_checkpoint_file_path - return the checkpoint file + * path of this process, creating the directory if needed. + * IN len: length of the file path buffer + * OUT buf: buffer to store the checkpoint file path + * RET: 0 on success, -1 on failure with errno set + */ +extern int +slurm_get_checkpoint_file_path(size_t len, char *buf) +{ + char *ckpt_path, *job_id, *step_id, *proc_id; + struct stat mystat; + int idx; + + len --; /* for a terminating 0 */ + + ckpt_path = getenv("SLURM_CHECKPOINT_PATH"); + if (ckpt_path == NULL) { /* this should not happen since the program may chdir */ + ckpt_path = getcwd(buf, len); + if (ckpt_path == NULL) /* ERANGE: len is too short */ + return -1; + } else { + if (snprintf(buf, len, "%s", ckpt_path) >= len) { /* glibc >= 2.1 */ + errno = ERANGE; + return -1; + } + ckpt_path = buf; + } + idx = strlen(ckpt_path) - 1; + while (idx > 0 && ckpt_path[idx] == '/') + ckpt_path[idx --] = 0; + + if (stat(ckpt_path, &mystat) < 0) + return -1; + if (! S_ISDIR(mystat.st_mode)) { + errno = ENOTDIR; + return -1; + } + + job_id = getenv("SLURM_JOBID"); + step_id = getenv("SLURM_STEPID"); + proc_id = getenv("SLURM_PROCID"); + if (job_id == NULL || step_id == NULL || proc_id == NULL) { + errno = ENODATA; + return -1; + } + idx = strlen(buf); + if (snprintf(buf + idx, len - idx, "/%s.%s", job_id, step_id) >= len - idx) { + errno = ERANGE; + return -1; + } + + if (stat(buf, &mystat) < 0) { + if (errno == ENOENT) { /* dir does not exists */ + if (mkdir(buf, 0750) < 0 && errno != EEXIST) + return -1; + if (stat(buf, &mystat) < 0) + return -1; + } + else + return -1; + } + if (! S_ISDIR(mystat.st_mode)) { + errno = ENOTDIR; + return -1; + } + + idx = strlen(buf); + if (snprintf(buf + idx, len - idx, "/%s.%s.ckpt", __progname, proc_id) >= len - idx) { + errno = ERANGE; + return -1; + } + + return 0; +} diff --git a/src/api/complete.c b/src/api/complete.c index 2ea808ccb..15ce8c44a 100644 --- a/src/api/complete.c +++ b/src/api/complete.c @@ -1,11 +1,11 @@ /*****************************************************************************\ * complete.c - note the completion a slurm job or job step - * $Id: complete.c 10574 2006-12-15 23:38:29Z jette $ + * $Id: complete.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/api/config_info.c b/src/api/config_info.c index 3147cdee7..e649accd8 100644 --- a/src/api/config_info.c +++ b/src/api/config_info.c @@ -1,11 +1,11 @@ /****************************************************************************\ * config_info.c - get/print the system configuration information of slurm - * $Id: config_info.c 11798 2007-07-07 00:02:31Z jette $ ***************************************************************************** - * Copyright (C) 2002-2006 The Regents of the University of California. + * Copyright (C) 2002-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> and Kevin Tew <tew1@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -117,144 +117,224 @@ void slurm_print_ctl_conf ( FILE* out, slurm_make_time_str ((time_t *)&slurm_ctl_conf_ptr->last_update, time_str, sizeof(time_str)); fprintf(out, "Configuration data as of %s\n", time_str); - fprintf(out, "AuthType = %s\n", + fprintf(out, "AccountingStorageEnforce = %u\n", + slurm_ctl_conf_ptr->accounting_storage_enforce); + fprintf(out, "AccountingStorageHost = %s\n", + slurm_ctl_conf_ptr->accounting_storage_host); + fprintf(out, "AccountingStorageLoc = %s\n", + slurm_ctl_conf_ptr->accounting_storage_loc); + fprintf(out, "AccountingStoragePass = %s\n", + slurm_ctl_conf_ptr->accounting_storage_pass); + fprintf(out, "AccountingStoragePort = %u\n", + slurm_ctl_conf_ptr->accounting_storage_port); + fprintf(out, "AccountingStorageType = %s\n", + slurm_ctl_conf_ptr->accounting_storage_type); + fprintf(out, "AccountingStorageUser = %s\n", + slurm_ctl_conf_ptr->accounting_storage_user); + fprintf(out, "AuthType = %s\n", slurm_ctl_conf_ptr->authtype); - fprintf(out, "BackupAddr = %s\n", + fprintf(out, "BackupAddr = %s\n", slurm_ctl_conf_ptr->backup_addr); - fprintf(out, "BackupController = %s\n", + fprintf(out, "BackupController = %s\n", slurm_ctl_conf_ptr->backup_controller); - fprintf(out, "CacheGroups = %u\n", + slurm_make_time_str ((time_t *)&slurm_ctl_conf_ptr->boot_time, + time_str, sizeof(time_str)); + fprintf(out, "BOOT_TIME = %s\n", + time_str); + fprintf(out, "CacheGroups = %u\n", slurm_ctl_conf_ptr->cache_groups); - fprintf(out, "CheckpointType = %s\n", + fprintf(out, "CheckpointType = %s\n", slurm_ctl_conf_ptr->checkpoint_type); - fprintf(out, "ControlAddr = %s\n", + fprintf(out, "ClusterName = %s\n", + slurm_ctl_conf_ptr->cluster_name); + fprintf(out, "ControlAddr = %s\n", slurm_ctl_conf_ptr->control_addr); - fprintf(out, "ControlMachine = %s\n", + fprintf(out, "ControlMachine = %s\n", slurm_ctl_conf_ptr->control_machine); - fprintf(out, "Epilog = %s\n", + fprintf(out, "CryptoType = %s\n", + slurm_ctl_conf_ptr->crypto_type); + if (slurm_ctl_conf_ptr->def_mem_per_task) { + fprintf(out, "DefMemPerTask = %u\n", + slurm_ctl_conf_ptr->def_mem_per_task); + } else + fprintf(out, "DefMemPerTask = UNLIMITED\n"); + if (slurm_ctl_conf_ptr->disable_root_jobs) + fprintf(out, "DisableRootJobs = YES\n"); + else + fprintf(out, "DisableRootJobs = NO\n"); + fprintf(out, "Epilog = %s\n", slurm_ctl_conf_ptr->epilog); - fprintf(out, "FastSchedule = %u\n", + fprintf(out, "EpilogMsgTime = %u\n", + slurm_ctl_conf_ptr->epilog_msg_time); + fprintf(out, "FastSchedule = %u\n", slurm_ctl_conf_ptr->fast_schedule); - fprintf(out, "FirstJobId = %u\n", + fprintf(out, "FirstJobId = %u\n", slurm_ctl_conf_ptr->first_job_id); + fprintf(out, "GetEnvTimeout = %u\n", + slurm_ctl_conf_ptr->get_env_timeout); + fprintf(out, "HealthCheckInterval = %u\n", + slurm_ctl_conf_ptr->health_check_interval); + fprintf(out, "HealthCheckProgram = %s\n", + slurm_ctl_conf_ptr->health_check_program); #ifdef HAVE_XCPU - fprintf(out, "HAVE_XCPU = %d\n", HAVE_XCPU); + fprintf(out, "HAVE_XCPU = %d\n", HAVE_XCPU); #endif - fprintf(out, "InactiveLimit = %u\n", + fprintf(out, "InactiveLimit = %u\n", slurm_ctl_conf_ptr->inactive_limit); - fprintf(out, "JobAcctLogFile = %s\n", - slurm_ctl_conf_ptr->job_acct_logfile); - fprintf(out, "JobAcctFrequency = %u\n", - slurm_ctl_conf_ptr->job_acct_freq); - fprintf(out, "JobAcctType = %s\n", - slurm_ctl_conf_ptr->job_acct_type); - fprintf(out, "JobCompLoc = %s\n", - slurm_ctl_conf_ptr->job_comp_loc); - fprintf(out, "JobCompType = %s\n", + fprintf(out, "JobAcctGatherFrequency = %u\n", + slurm_ctl_conf_ptr->job_acct_gather_freq); + fprintf(out, "JobAcctGatherType = %s\n", + slurm_ctl_conf_ptr->job_acct_gather_type); + fprintf(out, "JobCompHost = %s\n", + slurm_ctl_conf_ptr->job_comp_host); + fprintf(out, "JobCompLoc = %s\n", + slurm_ctl_conf_ptr->job_comp_loc); + fprintf(out, "JobCompPass = %s\n", + slurm_ctl_conf_ptr->job_comp_pass); + fprintf(out, "JobCompPort = %u\n", + slurm_ctl_conf_ptr->job_comp_port); + fprintf(out, "JobCompType = %s\n", slurm_ctl_conf_ptr->job_comp_type); + fprintf(out, "JobCompUser = %s\n", + slurm_ctl_conf_ptr->job_comp_user); fprintf(out, "JobCredentialPrivateKey = %s\n", slurm_ctl_conf_ptr->job_credential_private_key); fprintf(out, "JobCredentialPublicCertificate = %s\n", slurm_ctl_conf_ptr->job_credential_public_certificate); - fprintf(out, "KillWait = %u\n", + fprintf(out, "JobFileAppend = %u\n", + slurm_ctl_conf_ptr->job_file_append); + fprintf(out, "JobRequeue = %u\n", + slurm_ctl_conf_ptr->job_requeue); + fprintf(out, "KillWait = %u\n", slurm_ctl_conf_ptr->kill_wait); - fprintf(out, "MailProg = %s\n", + fprintf(out, "Licenses = %s\n", + slurm_ctl_conf_ptr->licenses); + fprintf(out, "MailProg = %s\n", slurm_ctl_conf_ptr->mail_prog); - fprintf(out, "MaxJobCount = %u\n", + fprintf(out, "MaxJobCount = %u\n", slurm_ctl_conf_ptr->max_job_cnt); - fprintf(out, "MessageTimeout = %u\n", + if (slurm_ctl_conf_ptr->max_mem_per_task) { + fprintf(out, "MaxMemPerTask = %u\n", + slurm_ctl_conf_ptr->max_mem_per_task); + } else + fprintf(out, "MaxMemPerTask = UNLIMITED\n"); + fprintf(out, "MessageTimeout = %u\n", slurm_ctl_conf_ptr->msg_timeout); - fprintf(out, "MinJobAge = %u\n", + fprintf(out, "MinJobAge = %u\n", slurm_ctl_conf_ptr->min_job_age); - fprintf(out, "MpiDefault = %s\n", + fprintf(out, "MpiDefault = %s\n", slurm_ctl_conf_ptr->mpi_default); #ifdef MULTIPLE_SLURMD - fprintf(out, "MULTIPLE_SLURMD = %d\n", MULTIPLE_SLURMD); + fprintf(out, "MULTIPLE_SLURMD = %d\n", MULTIPLE_SLURMD); #endif - fprintf(out, "NEXT_JOB_ID = %u\n", + fprintf(out, "NEXT_JOB_ID = %u\n", slurm_ctl_conf_ptr->next_job_id); - fprintf(out, "PluginDir = %s\n", + fprintf(out, "PluginDir = %s\n", slurm_ctl_conf_ptr->plugindir); - fprintf(out, "PlugStackConfig = %s\n", + fprintf(out, "PlugStackConfig = %s\n", slurm_ctl_conf_ptr->plugstack); - fprintf(out, "ProctrackType = %s\n", + fprintf(out, "PrivateData = %u\n", + slurm_ctl_conf_ptr->private_data); + fprintf(out, "ProctrackType = %s\n", slurm_ctl_conf_ptr->proctrack_type); - fprintf(out, "Prolog = %s\n", + fprintf(out, "Prolog = %s\n", slurm_ctl_conf_ptr->prolog); - fprintf(out, "PropagatePrioProcess = %u\n", + fprintf(out, "PropagatePrioProcess = %u\n", slurm_ctl_conf_ptr->propagate_prio_process); fprintf(out, "PropagateResourceLimits = %s\n", slurm_ctl_conf_ptr->propagate_rlimits); fprintf(out, "PropagateResourceLimitsExcept = %s\n", slurm_ctl_conf_ptr->propagate_rlimits_except); - fprintf(out, "ReturnToService = %u\n", + fprintf(out, "ResumeProgram = %s\n", + slurm_ctl_conf_ptr->resume_program); + fprintf(out, "ResumeRate = %u\n", + slurm_ctl_conf_ptr->resume_rate); + fprintf(out, "ReturnToService = %u\n", slurm_ctl_conf_ptr->ret2service); - fprintf(out, "SchedulerPort = %u\n", + fprintf(out, "SchedulerParameters = %s\n", + slurm_ctl_conf_ptr->sched_params); + fprintf(out, "SchedulerPort = %u\n", slurm_ctl_conf_ptr->schedport); - fprintf(out, "SchedulerRootFilter = %u\n", + fprintf(out, "SchedulerRootFilter = %u\n", slurm_ctl_conf_ptr->schedrootfltr); - fprintf(out, "SchedulerType = %s\n", + fprintf(out, "SchedulerTimeSlice = %u\n", + slurm_ctl_conf_ptr->sched_time_slice); + fprintf(out, "SchedulerType = %s\n", slurm_ctl_conf_ptr->schedtype); - fprintf(out, "SelectType = %s\n", + fprintf(out, "SelectType = %s\n", slurm_ctl_conf_ptr->select_type); if (slurm_ctl_conf_ptr->select_type_param) { - fprintf(out, "SelectTypeParameters = %s\n", + fprintf(out, "SelectTypeParameters = %s\n", _select_info(slurm_ctl_conf_ptr-> select_type_param)); } - fprintf(out, "SlurmUser = %s(%u)\n", + fprintf(out, "SlurmUser = %s(%u)\n", slurm_ctl_conf_ptr->slurm_user_name, slurm_ctl_conf_ptr->slurm_user_id); - fprintf(out, "SlurmctldDebug = %u\n", + fprintf(out, "SlurmctldDebug = %u\n", slurm_ctl_conf_ptr->slurmctld_debug); - fprintf(out, "SlurmctldLogFile = %s\n", + fprintf(out, "SlurmctldLogFile = %s\n", slurm_ctl_conf_ptr->slurmctld_logfile); - fprintf(out, "SlurmctldPidFile = %s\n", + fprintf(out, "SlurmctldPidFile = %s\n", slurm_ctl_conf_ptr->slurmctld_pidfile); - fprintf(out, "SlurmctldPort = %u\n", + fprintf(out, "SlurmctldPort = %u\n", slurm_ctl_conf_ptr->slurmctld_port); - fprintf(out, "SlurmctldTimeout = %u\n", + fprintf(out, "SlurmctldTimeout = %u\n", slurm_ctl_conf_ptr->slurmctld_timeout); - fprintf(out, "SlurmdDebug = %u\n", + fprintf(out, "SlurmdDebug = %u\n", slurm_ctl_conf_ptr->slurmd_debug); - fprintf(out, "SlurmdLogFile = %s\n", + fprintf(out, "SlurmdLogFile = %s\n", slurm_ctl_conf_ptr->slurmd_logfile); - fprintf(out, "SlurmdPidFile = %s\n", + fprintf(out, "SlurmdPidFile = %s\n", slurm_ctl_conf_ptr->slurmd_pidfile); #ifndef MULTIPLE_SLURMD - fprintf(out, "SlurmdPort = %u\n", + fprintf(out, "SlurmdPort = %u\n", slurm_ctl_conf_ptr->slurmd_port); #endif - fprintf(out, "SlurmdSpoolDir = %s\n", + fprintf(out, "SlurmdSpoolDir = %s\n", slurm_ctl_conf_ptr->slurmd_spooldir); - fprintf(out, "SlurmdTimeout = %u\n", + fprintf(out, "SlurmdTimeout = %u\n", slurm_ctl_conf_ptr->slurmd_timeout); - fprintf(out, "SLURM_CONFIG_FILE = %s\n", + fprintf(out, "SLURM_CONFIG_FILE = %s\n", slurm_ctl_conf_ptr->slurm_conf); - fprintf(out, "SLURM_VERSION = %s\n", SLURM_VERSION); - fprintf(out, "SrunProlog = %s\n", - slurm_ctl_conf_ptr->srun_prolog); - fprintf(out, "SrunEpilog = %s\n", + fprintf(out, "SLURM_VERSION = %s\n", SLURM_VERSION); + fprintf(out, "SrunEpilog = %s\n", slurm_ctl_conf_ptr->srun_epilog); - fprintf(out, "StateSaveLocation = %s\n", + fprintf(out, "SrunProlog = %s\n", + slurm_ctl_conf_ptr->srun_prolog); + fprintf(out, "StateSaveLocation = %s\n", slurm_ctl_conf_ptr->state_save_location); - fprintf(out, "SwitchType = %s\n", + fprintf(out, "SuspendExcNodes = %s\n", + slurm_ctl_conf_ptr->suspend_exc_nodes); + fprintf(out, "SuspendExcParts = %s\n", + slurm_ctl_conf_ptr->suspend_exc_parts); + fprintf(out, "SuspendProgram = %s\n", + slurm_ctl_conf_ptr->suspend_program); + fprintf(out, "SuspendRate = %u\n", + slurm_ctl_conf_ptr->suspend_rate); + fprintf(out, "SuspendTime = %d\n", + ((int)slurm_ctl_conf_ptr->suspend_time - 1)); + fprintf(out, "SwitchType = %s\n", slurm_ctl_conf_ptr->switch_type); - fprintf(out, "TaskEpilog = %s\n", + fprintf(out, "TaskEpilog = %s\n", slurm_ctl_conf_ptr->task_epilog); - fprintf(out, "TaskPlugin = %s\n", + fprintf(out, "TaskPlugin = %s\n", slurm_ctl_conf_ptr->task_plugin); - fprintf(out, "TaskPluginParam = %s\n", + fprintf(out, "TaskPluginParam = %s\n", _task_plugin_param(slurm_ctl_conf_ptr->task_plugin_param)); - fprintf(out, "TaskProlog = %s\n", + fprintf(out, "TaskProlog = %s\n", slurm_ctl_conf_ptr->task_prolog); - fprintf(out, "TmpFS = %s\n", + fprintf(out, "TmpFS = %s\n", slurm_ctl_conf_ptr->tmp_fs); - fprintf(out, "TreeWidth = %u\n", + fprintf(out, "TreeWidth = %u\n", slurm_ctl_conf_ptr->tree_width); - fprintf(out, "UsePam = %u\n", + fprintf(out, "UsePam = %u\n", slurm_ctl_conf_ptr->use_pam); - fprintf(out, "WaitTime = %u\n", + fprintf(out, "UnkillableStepProgram = %s\n", + slurm_ctl_conf_ptr->unkillable_program); + fprintf(out, "UnkillableStepTimeout = %u\n", + slurm_ctl_conf_ptr->unkillable_timeout); + fprintf(out, "WaitTime = %u\n", slurm_ctl_conf_ptr->wait_time); } diff --git a/src/api/init_msg.c b/src/api/init_msg.c index 528eb3850..ec1e1f797 100644 --- a/src/api/init_msg.c +++ b/src/api/init_msg.c @@ -1,11 +1,11 @@ /*****************************************************************************\ * init_msg.c - initialize RPC messages contents - * $Id: init_msg.c 12457 2007-10-05 23:15:28Z jette $ + * $Id: init_msg.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -56,6 +56,7 @@ void slurm_init_job_desc_msg(job_desc_msg_t * job_desc_msg) { job_desc_msg->account = NULL; + job_desc_msg->acctg_freq = (uint16_t) NO_VAL; job_desc_msg->alloc_node = NULL; job_desc_msg->alloc_sid = NO_VAL; job_desc_msg->comment = NULL; @@ -64,7 +65,7 @@ void slurm_init_job_desc_msg(job_desc_msg_t * job_desc_msg) job_desc_msg->ntasks_per_node = (uint16_t) NO_VAL; job_desc_msg->ntasks_per_socket = (uint16_t) NO_VAL; job_desc_msg->ntasks_per_core = (uint16_t) NO_VAL; - job_desc_msg->dependency = NO_VAL; + job_desc_msg->dependency = NULL; job_desc_msg->environment = ((char **) NULL); job_desc_msg->env_size = 0; job_desc_msg->features = NULL; @@ -74,10 +75,10 @@ void slurm_init_job_desc_msg(job_desc_msg_t * job_desc_msg) job_desc_msg->job_min_procs = (uint16_t) NO_VAL; job_desc_msg->job_min_sockets = (uint16_t) NO_VAL; job_desc_msg->job_min_threads = (uint16_t) NO_VAL; - job_desc_msg->job_max_memory = NO_VAL; job_desc_msg->job_min_memory = NO_VAL; job_desc_msg->job_min_tmp_disk= NO_VAL; job_desc_msg->kill_on_node_fail = (uint16_t) NO_VAL; + job_desc_msg->licenses = NULL; job_desc_msg->name = NULL; job_desc_msg->network = NULL; job_desc_msg->nice = NICE_OFFSET; @@ -85,7 +86,8 @@ void slurm_init_job_desc_msg(job_desc_msg_t * job_desc_msg) job_desc_msg->ntasks_per_node = (uint16_t) NO_VAL; job_desc_msg->ntasks_per_socket = (uint16_t) NO_VAL; job_desc_msg->num_tasks = NO_VAL; - job_desc_msg->overcommit = (uint16_t) NO_VAL; + job_desc_msg->open_mode = 0; /* system default */ + job_desc_msg->overcommit = (uint8_t) NO_VAL; job_desc_msg->partition = NULL; job_desc_msg->plane_size = (uint16_t) NO_VAL; job_desc_msg->priority = NO_VAL; @@ -112,14 +114,12 @@ void slurm_init_job_desc_msg(job_desc_msg_t * job_desc_msg) job_desc_msg->user_id = NO_VAL; job_desc_msg->group_id = NO_VAL; job_desc_msg->work_dir = NULL; - job_desc_msg->alloc_resp_hostname = NULL; - job_desc_msg->alloc_resp_port = 0; - job_desc_msg->other_hostname = NULL; + job_desc_msg->alloc_resp_port = 0; job_desc_msg->other_port = 0; job_desc_msg->mail_type = 0; job_desc_msg->mail_user = NULL; job_desc_msg->begin_time = 0; - job_desc_msg->no_requeue = (uint16_t) NO_VAL; + job_desc_msg->requeue = (uint16_t) NO_VAL; #if SYSTEM_DIMENSIONS { int i; @@ -153,7 +153,8 @@ void slurm_init_part_desc_msg (update_part_msg_t * update_part_msg) update_part_msg->hidden = (uint16_t) NO_VAL; update_part_msg->default_part = (uint16_t) NO_VAL; update_part_msg->root_only = (uint16_t) NO_VAL; - update_part_msg->shared = (uint16_t) NO_VAL; + update_part_msg->max_share = (uint16_t) NO_VAL; + update_part_msg->priority = (uint16_t) NO_VAL; update_part_msg->state_up = (uint16_t) NO_VAL; } diff --git a/src/api/job_info.c b/src/api/job_info.c index a71cdde15..26bf30965 100644 --- a/src/api/job_info.c +++ b/src/api/job_info.c @@ -1,11 +1,11 @@ /*****************************************************************************\ * job_info.c - get/print the job state information of slurm - * $Id: job_info.c 13465 2008-03-04 16:51:08Z jette $ + * $Id: job_info.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -189,8 +189,11 @@ slurm_sprint_job_info ( job_info_t * job_ptr, int one_liner ) sprintf(tmp_line, "UNLIMITED "); else if (job_ptr->time_limit == NO_VAL) sprintf(tmp_line, "Partition_Limit "); - else - sprintf(tmp_line, "%u ", job_ptr->time_limit); + else { + secs2time_str(job_ptr->time_limit * 60, tmp1, + sizeof(tmp1)); + sprintf(tmp_line, "%s ", tmp1); + } xstrcat(out, tmp_line); if (WIFSIGNALED(job_ptr->exit_code)) term_sig = WTERMSIG(job_ptr->exit_code); @@ -339,10 +342,11 @@ slurm_sprint_job_info ( job_info_t * job_ptr, int one_liner ) /****** Line 8 ******/ snprintf(tmp_line, sizeof(tmp_line), - "Shared=%s Contiguous=%d CPUs/task=%u", + "Shared=%s Contiguous=%d CPUs/task=%u Licenses=%s", (job_ptr->shared == 0 ? "0" : job_ptr->shared == 1 ? "1" : "OK"), - job_ptr->contiguous, job_ptr->cpus_per_task); + job_ptr->contiguous, job_ptr->cpus_per_task, + job_ptr->licenses); xstrcat(out, tmp_line); if (one_liner) @@ -377,8 +381,17 @@ slurm_sprint_job_info ( job_info_t * job_ptr, int one_liner ) /****** Line 11 ******/ snprintf(tmp_line, sizeof(tmp_line), - "Dependency=%u Account=%s Reason=%s Network=%s", - job_ptr->dependency, job_ptr->account, + "Dependency=%s Account=%s Requeue=%u", + job_ptr->dependency, job_ptr->account, job_ptr->requeue); + xstrcat(out, tmp_line); + if (one_liner) + xstrcat(out, " "); + else + xstrcat(out, "\n "); + + /****** Line 12 ******/ + snprintf(tmp_line, sizeof(tmp_line), + "Reason=%s Network=%s", job_reason_string(job_ptr->state_reason), job_ptr->network); xstrcat(out, tmp_line); @@ -387,7 +400,7 @@ slurm_sprint_job_info ( job_info_t * job_ptr, int one_liner ) else xstrcat(out, "\n "); - /****** Line 12 ******/ + /****** Line 13 ******/ snprintf(tmp_line, sizeof(tmp_line), "Req%s=%s Req%sIndices=", nodelist, job_ptr->req_nodes, nodelist); xstrcat(out, tmp_line); @@ -404,7 +417,7 @@ slurm_sprint_job_info ( job_info_t * job_ptr, int one_liner ) else xstrcat(out, "\n "); - /****** Line 13 ******/ + /****** Line 14 ******/ snprintf(tmp_line, sizeof(tmp_line), "Exc%s=%s Exc%sIndices=", nodelist, job_ptr->exc_nodes, nodelist); xstrcat(out, tmp_line); @@ -421,7 +434,7 @@ slurm_sprint_job_info ( job_info_t * job_ptr, int one_liner ) else xstrcat(out, "\n "); - /****** Line 14 ******/ + /****** Line 15 ******/ slurm_make_time_str((time_t *)&job_ptr->submit_time, time_str, sizeof(time_str)); snprintf(tmp_line, sizeof(tmp_line), "SubmitTime=%s ", @@ -437,7 +450,24 @@ slurm_sprint_job_info ( job_info_t * job_ptr, int one_liner ) time_str, (long int)job_ptr->pre_sus_time); xstrcat(out, tmp_line); - /****** Line 15 (optional) ******/ + /****** Lines 16, 17 (optional, batch only) ******/ + if (job_ptr->batch_flag) { + if (one_liner) + xstrcat(out, " "); + else + xstrcat(out, "\n "); + sprintf(tmp_line, "Command=%s", job_ptr->command); + xstrcat(out, tmp_line); + + if (one_liner) + xstrcat(out, " "); + else + xstrcat(out, "\n "); + sprintf(tmp_line, "WorkDir=%s", job_ptr->work_dir); + xstrcat(out, tmp_line); + } + + /****** Line 18 (optional) ******/ if (job_ptr->comment) { if (one_liner) xstrcat(out, " "); @@ -448,7 +478,7 @@ slurm_sprint_job_info ( job_info_t * job_ptr, int one_liner ) xstrcat(out, tmp_line); } - /****** Line 16 (optional) ******/ + /****** Line 19 (optional) ******/ select_g_sprint_jobinfo(job_ptr->select_jobinfo, select_buf, sizeof(select_buf), SELECT_PRINT_MIXED); @@ -459,7 +489,7 @@ slurm_sprint_job_info ( job_info_t * job_ptr, int one_liner ) xstrcat(out, "\n "); xstrcat(out, select_buf); } - /****** Line 17 (optional) ******/ + /****** Line 20 (optional) ******/ select_g_sprint_jobinfo(job_ptr->select_jobinfo, select_buf, sizeof(select_buf), SELECT_PRINT_BLRTS_IMAGE); @@ -472,7 +502,7 @@ slurm_sprint_job_info ( job_info_t * job_ptr, int one_liner ) "BlrtsImage=%s", select_buf); xstrcat(out, tmp_line); } - /****** Line 18 (optional) ******/ + /****** Line 21 (optional) ******/ select_g_sprint_jobinfo(job_ptr->select_jobinfo, select_buf, sizeof(select_buf), SELECT_PRINT_LINUX_IMAGE); @@ -485,7 +515,7 @@ slurm_sprint_job_info ( job_info_t * job_ptr, int one_liner ) "LinuxImage=%s", select_buf); xstrcat(out, tmp_line); } - /****** Line 19 (optional) ******/ + /****** Line 22 (optional) ******/ select_g_sprint_jobinfo(job_ptr->select_jobinfo, select_buf, sizeof(select_buf), SELECT_PRINT_MLOADER_IMAGE); @@ -498,7 +528,7 @@ slurm_sprint_job_info ( job_info_t * job_ptr, int one_liner ) "MloaderImage=%s", select_buf); xstrcat(out, tmp_line); } - /****** Line 20 (optional) ******/ + /****** Line 23 (optional) ******/ select_g_sprint_jobinfo(job_ptr->select_jobinfo, select_buf, sizeof(select_buf), SELECT_PRINT_RAMDISK_IMAGE); diff --git a/src/api/job_info.h b/src/api/job_info.h index 0c4c78855..b9fe249ba 100644 --- a/src/api/job_info.h +++ b/src/api/job_info.h @@ -1,12 +1,12 @@ /*****************************************************************************\ * job_info.h - get/print the job state information of slurm * - * $Id: job_info.h 10574 2006-12-15 23:38:29Z jette $ + * $Id: job_info.h 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/api/job_step_info.c b/src/api/job_step_info.c index 08cdee087..f90d56f92 100644 --- a/src/api/job_step_info.c +++ b/src/api/job_step_info.c @@ -1,12 +1,12 @@ /*****************************************************************************\ * job_step_info.c - get/print the job step state information of slurm - * $Id: job_step_info.c 10574 2006-12-15 23:38:29Z jette $ + * $Id: job_step_info.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov>, * Joey Ekstrom <ekstrom1@llnl.gov>, et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -128,9 +128,20 @@ slurm_sprint_job_step_info ( job_step_info_t * job_step_ptr, /****** Line 2 ******/ snprintf(tmp_line, sizeof(tmp_line), - "Partition=%s Nodes=%s Name=%s Network=%s\n\n", + "Partition=%s Nodes=%s Name=%s Network=%s Checkpoint=%u", job_step_ptr->partition, job_step_ptr->nodes, - job_step_ptr->name, job_step_ptr->network); + job_step_ptr->name, job_step_ptr->network, + job_step_ptr->ckpt_interval); + xstrcat(out, tmp_line); + if (one_liner) + xstrcat(out, " "); + else + xstrcat(out, "\n "); + + /****** Line 3 ******/ + snprintf(tmp_line, sizeof(tmp_line), + "CheckpointPath=%s\n\n", + job_step_ptr->ckpt_path); xstrcat(out, tmp_line); return out; diff --git a/src/api/node_info.c b/src/api/node_info.c index b22adfa2e..b26c8c365 100644 --- a/src/api/node_info.c +++ b/src/api/node_info.c @@ -1,11 +1,11 @@ /*****************************************************************************\ * node_info.c - get/print the node state information of slurm - * $Id: node_info.c 12825 2007-12-14 21:23:57Z jette $ + * $Id: node_info.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -147,6 +147,18 @@ slurm_sprint_node_table (node_info_t * node_ptr, int one_liner ) node_ptr->weight, node_ptr->features, node_ptr->reason); xstrcat(out, tmp_line); + + /****** Line 3 (optional) ******/ + if (node_ptr->arch || node_ptr->os) { + if (one_liner) + xstrcat(out, " "); + else + xstrcat(out, "\n "); + snprintf(tmp_line, sizeof(tmp_line), + "Arch=%s OS=%s", + node_ptr->arch, node_ptr->os); + xstrcat(out, tmp_line); + } xstrcat(out, "\n"); return out; diff --git a/src/api/node_select_info.c b/src/api/node_select_info.c index cefbe01d0..2e5f2f982 100644 --- a/src/api/node_select_info.c +++ b/src/api/node_select_info.c @@ -1,12 +1,12 @@ /*****************************************************************************\ * node_select_info.c - get the node select plugin state information of slurm * - * $Id: node_select_info.c 12825 2007-12-14 21:23:57Z jette $ + * $Id: node_select_info.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2005 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/api/node_select_info.h b/src/api/node_select_info.h index 6c4c03f49..be41041e5 100644 --- a/src/api/node_select_info.h +++ b/src/api/node_select_info.h @@ -5,12 +5,12 @@ * NOTE: This software specifically supports only BlueGene/L for now. It * will be made more general in the future * - * $Id: node_select_info.h 10744 2007-01-11 20:09:18Z da $ + * $Id: node_select_info.h 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2005 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/api/partition_info.c b/src/api/partition_info.c index 5729074ce..416d43440 100644 --- a/src/api/partition_info.c +++ b/src/api/partition_info.c @@ -1,11 +1,11 @@ /*****************************************************************************\ * partition_info.c - get/print the partition state information of slurm - * $Id: partition_info.c 12627 2007-11-06 19:48:55Z jette $ ***************************************************************************** - * Copyright (C) 2002-2006 The Regents of the University of California. + * Copyright (C) 2002-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -108,6 +108,7 @@ char *slurm_sprint_partition_info ( partition_info_t * part_ptr, char tmp1[16], tmp2[16]; char tmp_line[MAXHOSTRANGELEN]; char *out = NULL; + uint16_t force, val; /****** Line 1 ******/ #ifdef HAVE_BG @@ -139,14 +140,20 @@ char *slurm_sprint_partition_info ( partition_info_t * part_ptr, else sprintf(tmp_line, "Default=NO "); xstrcat(out, tmp_line); - if (part_ptr->shared == SHARED_NO) - sprintf(tmp_line, "Shared=NO "); - else if (part_ptr->shared == SHARED_YES) - sprintf(tmp_line, "Shared=YES "); - else if (part_ptr->shared == SHARED_EXCLUSIVE) - sprintf(tmp_line, "Shared=EXCLUSIVE "); - else - sprintf(tmp_line, "Shared=FORCE "); + force = part_ptr->max_share & SHARED_FORCE; + val = part_ptr->max_share & (~SHARED_FORCE); + if (val == 0) + xstrcat(out, "Shared=EXCLUSIVE "); + else if (force) { + sprintf(tmp_line, "Shared=FORCE:%u ", val); + xstrcat(out, tmp_line); + } else if (val == 1) + xstrcat(out, "Shared=NO "); + else { + sprintf(tmp_line, "Shared=YES:%u ", val); + xstrcat(out, tmp_line); + } + sprintf(tmp_line, "Priority=%u ", part_ptr->priority); xstrcat(out, tmp_line); if (part_ptr->state_up) sprintf(tmp_line, "State=UP "); @@ -155,8 +162,12 @@ char *slurm_sprint_partition_info ( partition_info_t * part_ptr, xstrcat(out, tmp_line); if (part_ptr->max_time == INFINITE) sprintf(tmp_line, "MaxTime=UNLIMITED "); - else - sprintf(tmp_line, "MaxTime=%u ", part_ptr->max_time); + else { + char time_line[32]; + secs2time_str(part_ptr->max_time * 60, time_line, + sizeof(time_line)); + sprintf(tmp_line, "MaxTime=%s ", time_line); + } xstrcat(out, tmp_line); if (part_ptr->hidden) sprintf(tmp_line, "Hidden=YES"); @@ -169,7 +180,6 @@ char *slurm_sprint_partition_info ( partition_info_t * part_ptr, xstrcat(out, "\n "); /****** Line 3 ******/ - #ifdef HAVE_BG convert_num_unit((float)part_ptr->min_nodes, tmp1, sizeof(tmp1), UNIT_NONE); @@ -191,6 +201,11 @@ char *slurm_sprint_partition_info ( partition_info_t * part_ptr, sprintf(tmp_line, "MaxNodes=%s ", tmp1); } xstrcat(out, tmp_line); + if (part_ptr->disable_root_jobs) + sprintf(tmp_line, "DisableRootJobs=YES "); + else + sprintf(tmp_line, "DisableRootJobs=NO "); + xstrcat(out, tmp_line); if ((part_ptr->allow_groups == NULL) || (part_ptr->allow_groups[0] == '\0')) sprintf(tmp_line, "AllowGroups=ALL"); diff --git a/src/api/pmi.c b/src/api/pmi.c index 677426127..d0a7754be 100644 --- a/src/api/pmi.c +++ b/src/api/pmi.c @@ -49,7 +49,7 @@ * Copyright (C) 2005-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -60,7 +60,7 @@ * any later version. * * In addition, as a special exception, the copyright holders give permission - * to link the code of portions of this program with the OpenSSL library under + * to link the code of portions of this program with the OpenSSL library under * certain conditions as described in each individual source file, and * distribute linked combinations including the two. You must obey the GNU * General Public License in all respects for all of the code used other than @@ -79,7 +79,9 @@ * with SLURM; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. \*****************************************************************************/ -#define _GNU_SOURCE +#ifndef _GNU_SOURCE +# define _GNU_SOURCE +#endif #include <pthread.h> #include <signal.h> diff --git a/src/api/pmi_server.c b/src/api/pmi_server.c index 406da910d..ee0122497 100644 --- a/src/api/pmi_server.c +++ b/src/api/pmi_server.c @@ -1,11 +1,11 @@ /*****************************************************************************\ - * pmi.c - Global PMI data as maintained within srun - * $Id: pmi_server.c 12620 2007-11-05 19:00:45Z jette $ + * pmi_server.c - Global PMI data as maintained within srun + * $Id: pmi_server.c 14078 2008-05-19 23:56:20Z jette $ ***************************************************************************** * Copyright (C) 2005-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/api/pmi_server.h b/src/api/pmi_server.h index d8d24ad69..03e42c52e 100644 --- a/src/api/pmi_server.h +++ b/src/api/pmi_server.h @@ -1,11 +1,11 @@ /*****************************************************************************\ * pmi.h - Global PMI data as maintained within srun - * $Id: pmi_server.h 10574 2006-12-15 23:38:29Z jette $ + * $Id: pmi_server.h 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2005 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/api/reconfigure.c b/src/api/reconfigure.c index b281ce7f6..9ac7fc5db 100644 --- a/src/api/reconfigure.c +++ b/src/api/reconfigure.c @@ -1,12 +1,12 @@ /*****************************************************************************\ * reconfigure.c - request that slurmctld shutdown or re-read the * configuration files - * $Id: reconfigure.c 10574 2006-12-15 23:38:29Z jette $ + * $Id: reconfigure.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -138,7 +138,7 @@ _send_message_controller (enum controller_id dest, slurm_msg_t *req) slurm_fd fd = -1; slurm_msg_t *resp_msg = NULL; - /*always only going to 1 node */ + /* always going to one node (primary or backup per value of "dest") */ if ((fd = slurm_open_controller_conn_spec(dest)) < 0) slurm_seterrno_ret(SLURMCTLD_COMMUNICATIONS_CONNECTION_ERROR); @@ -165,3 +165,39 @@ _send_message_controller (enum controller_id dest, slurm_msg_t *req) return rc; } +/* + * slurm_set_debug_level - issue RPC to set slurm controller debug level + * IN debug_level - requested debug level + * RET 0 on success, otherwise return -1 and set errno to indicate the error + */ +int +slurm_set_debug_level (uint32_t debug_level) +{ + int rc; + slurm_msg_t req_msg; + slurm_msg_t resp_msg; + set_debug_level_msg_t req; + + slurm_msg_t_init(&req_msg); + slurm_msg_t_init(&resp_msg); + + req.debug_level = debug_level; + req_msg.msg_type = REQUEST_SET_DEBUG_LEVEL; + req_msg.data = &req; + + if (slurm_send_recv_controller_msg(&req_msg, &resp_msg) < 0) + return SLURM_ERROR; + + switch (resp_msg.msg_type) { + case RESPONSE_SLURM_RC: + rc = ((return_code_msg_t *) resp_msg.data)->return_code; + slurm_free_return_code_msg(resp_msg.data); + if (rc) + slurm_seterrno_ret(rc); + break; + default: + slurm_seterrno_ret(SLURM_UNEXPECTED_MSG_ERROR); + break; + } + return SLURM_PROTOCOL_SUCCESS; +} diff --git a/src/api/signal.c b/src/api/signal.c index 3091fa6de..76916aa66 100644 --- a/src/api/signal.c +++ b/src/api/signal.c @@ -1,11 +1,11 @@ /*****************************************************************************\ * signal.c - Send a signal to a slurm job or job step - * $Id: signal.c 12647 2007-11-12 17:09:47Z da $ + * $Id: signal.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2005 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Christopher J. Morrone <morrone2@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -404,3 +404,37 @@ static int _terminate_batch_script_step( return rc; } +/* + * slurm_notify_job - send message to the job's stdout, + * usable only by user root + * IN job_id - slurm job_id or 0 for all jobs + * IN message - arbitrary message + * RET 0 or -1 on error + */ +extern int slurm_notify_job (uint32_t job_id, char *message) +{ + int rc; + slurm_msg_t msg; + job_notify_msg_t req; + + slurm_msg_t_init(&msg); + /* + * Request message: + */ + req.job_id = job_id; + req.job_step_id = NO_VAL; /* currently not used */ + req.message = message; + msg.msg_type = REQUEST_JOB_NOTIFY; + msg.data = &req; + + if (slurm_send_recv_controller_rc_msg(&msg, &rc) < 0) + return SLURM_FAILURE; + + if (rc) { + slurm_seterrno_ret(rc); + return SLURM_FAILURE; + } + + return SLURM_SUCCESS; +} + diff --git a/src/api/slurm_pmi.c b/src/api/slurm_pmi.c index f1c2454e4..54165bfd1 100644 --- a/src/api/slurm_pmi.c +++ b/src/api/slurm_pmi.c @@ -4,7 +4,7 @@ * Copyright (C) 2005-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/api/slurm_pmi.h b/src/api/slurm_pmi.h index 8a46e05b8..8b0a61ba2 100644 --- a/src/api/slurm_pmi.h +++ b/src/api/slurm_pmi.h @@ -4,7 +4,7 @@ * Copyright (C) 2005-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/api/step_ctx.c b/src/api/step_ctx.c index f43a12aa3..3b3bd1d1c 100644 --- a/src/api/step_ctx.c +++ b/src/api/step_ctx.c @@ -1,12 +1,12 @@ /*****************************************************************************\ * step_ctx.c - step_ctx task functions for use by AIX/POE * - * $Id: step_ctx.c 12171 2007-08-29 17:22:48Z jette $ + * $Id: step_ctx.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -50,16 +50,56 @@ #include "src/common/slurm_protocol_defs.h" #include "src/common/xmalloc.h" #include "src/common/xstring.h" +#include "src/common/slurm_cred.h" #include "src/api/step_ctx.h" +static void +_job_fake_cred(struct slurm_step_ctx_struct *ctx) +{ + slurm_cred_arg_t arg; + arg.jobid = ctx->job_id; + arg.stepid = ctx->step_resp->job_step_id; + arg.uid = ctx->user_id; + arg.hostlist = ctx->step_req->node_list; + arg.alloc_lps_cnt = 0; + arg.alloc_lps = NULL; + ctx->step_resp->cred = slurm_cred_faker(&arg); +} + +static job_step_create_request_msg_t *_create_step_request( + const slurm_step_ctx_params_t *step_params) +{ + job_step_create_request_msg_t *step_req = + xmalloc(sizeof(job_step_create_request_msg_t)); + step_req->job_id = step_params->job_id; + step_req->user_id = (uint32_t)step_params->uid; + step_req->node_count = step_params->node_count; + step_req->cpu_count = step_params->cpu_count; + step_req->num_tasks = step_params->task_count; + step_req->relative = step_params->relative; + step_req->exclusive = step_params->exclusive; + step_req->immediate = step_params->immediate; + step_req->ckpt_interval = step_params->ckpt_interval; + step_req->ckpt_path = xstrdup(step_params->ckpt_path); + step_req->task_dist = step_params->task_dist; + step_req->plane_size = step_params->plane_size; + step_req->node_list = xstrdup(step_params->node_list); + step_req->network = xstrdup(step_params->network); + step_req->name = xstrdup(step_params->name); + step_req->overcommit = step_params->overcommit ? 1 : 0; + step_req->mem_per_task = step_params->mem_per_task; + + return step_req; +} + /* * slurm_step_ctx_create - Create a job step and its context. * IN step_params - job step parameters * RET the step context or NULL on failure with slurm errno set * NOTE: Free allocated memory using slurm_step_ctx_destroy. */ -extern slurm_step_ctx +extern slurm_step_ctx_t * slurm_step_ctx_create (const slurm_step_ctx_params_t *step_params) { struct slurm_step_ctx_struct *ctx = NULL; @@ -69,21 +109,9 @@ slurm_step_ctx_create (const slurm_step_ctx_params_t *step_params) short port = 0; int errnum = 0; - /* First copy the user's step_params into a step request struct */ - step_req = (job_step_create_request_msg_t *) - xmalloc(sizeof(job_step_create_request_msg_t)); - step_req->job_id = step_params->job_id; - step_req->user_id = (uint32_t)step_params->uid; - step_req->node_count = step_params->node_count; - step_req->cpu_count = step_params->cpu_count; - step_req->num_tasks = step_params->task_count; - step_req->relative = step_params->relative; - step_req->task_dist = step_params->task_dist; - step_req->plane_size = step_params->plane_size; - step_req->node_list = xstrdup(step_params->node_list); - step_req->network = xstrdup(step_params->network); - step_req->name = xstrdup(step_params->name); - step_req->overcommit = step_params->overcommit ? 1 : 0; + /* First copy the user's step_params into a step request + * struct */ + step_req = _create_step_request(step_params); /* We will handle the messages in the step_launch.c mesage handler, * but we need to open the socket right now so we can tell the @@ -112,13 +140,90 @@ slurm_step_ctx_create (const slurm_step_ctx_params_t *step_params) ctx->user_id = step_req->user_id; ctx->step_req = step_req; ctx->step_resp = step_resp; + ctx->verbose_level = step_params->verbose_level; ctx->launch_state = step_launch_state_create(ctx); ctx->launch_state->slurmctld_socket_fd = sock; +fail: + errno = errnum; + return (slurm_step_ctx_t *)ctx; +} + +/* + * slurm_step_ctx_create_no_alloc - Create a job step and its context without + * getting an allocation. + * IN step_params - job step parameters + * IN step_id - since we are faking it give me the id to use + * RET the step context or NULL on failure with slurm errno set + * NOTE: Free allocated memory using slurm_step_ctx_destroy. + */ +extern slurm_step_ctx_t * +slurm_step_ctx_create_no_alloc (const slurm_step_ctx_params_t *step_params, + uint32_t step_id) +{ + struct slurm_step_ctx_struct *ctx = NULL; + job_step_create_request_msg_t *step_req = NULL; + job_step_create_response_msg_t *step_resp = NULL; + int sock = -1; + short port = 0; + int errnum = 0; + int cyclic = (step_params->task_dist == SLURM_DIST_CYCLIC); + + /* First copy the user's step_params into a step request struct */ + step_req = _create_step_request(step_params); + + /* We will handle the messages in the step_launch.c mesage handler, + * but we need to open the socket right now so we can tell the + * controller which port to use. + */ + if (net_stream_listen(&sock, &port) < 0) { + errnum = errno; + error("unable to intialize step context socket: %m"); + slurm_free_job_step_create_request_msg(step_req); + goto fail; + } + step_req->port = port; + step_req->host = xshort_hostname(); + + /* Then make up a reponse with only certain things filled in */ + step_resp = (job_step_create_response_msg_t *) + xmalloc(sizeof(job_step_create_response_msg_t)); + + step_resp->step_layout = fake_slurm_step_layout_create( + step_req->node_list, + NULL, NULL, + step_req->node_count, + step_req->num_tasks); + + if (switch_alloc_jobinfo(&step_resp->switch_job) < 0) + fatal("switch_alloc_jobinfo: %m"); + if (switch_build_jobinfo(step_resp->switch_job, + step_resp->step_layout->node_list, + step_resp->step_layout->tasks, + cyclic, step_req->network) < 0) + fatal("switch_build_jobinfo: %m"); + + + step_resp->job_step_id = step_id; + + ctx = xmalloc(sizeof(struct slurm_step_ctx_struct)); + ctx->launch_state = NULL; + ctx->magic = STEP_CTX_MAGIC; + ctx->job_id = step_req->job_id; + ctx->user_id = step_req->user_id; + ctx->step_req = step_req; + ctx->step_resp = step_resp; + ctx->verbose_level = step_params->verbose_level; + + ctx->launch_state = step_launch_state_create(ctx); + ctx->launch_state->slurmctld_socket_fd = sock; + + _job_fake_cred(ctx); + fail: errno = errnum; - return (slurm_step_ctx)ctx; + return (slurm_step_ctx_t *)ctx; } /* @@ -127,7 +232,7 @@ fail: * RET SLURM_SUCCESS or SLURM_ERROR (with slurm_errno set) */ extern int -slurm_step_ctx_get (slurm_step_ctx ctx, int ctx_key, ...) +slurm_step_ctx_get (slurm_step_ctx_t *ctx, int ctx_key, ...) { va_list ap; int rc = SLURM_SUCCESS; @@ -252,7 +357,7 @@ slurm_jobinfo_ctx_get(switch_jobinfo_t jobinfo, int data_type, void *data) * RET SLURM_SUCCESS or SLURM_ERROR (with slurm_errno set) */ extern int -slurm_step_ctx_destroy (slurm_step_ctx ctx) +slurm_step_ctx_destroy (slurm_step_ctx_t *ctx) { if ((ctx == NULL) || (ctx->magic != STEP_CTX_MAGIC)) { slurm_seterrno(EINVAL); @@ -278,7 +383,7 @@ slurm_step_ctx_destroy (slurm_step_ctx ctx) * RET SLURM_SUCCESS or SLURM_ERROR (with slurm_errno set) */ extern int -slurm_step_ctx_daemon_per_node_hack(slurm_step_ctx ctx) +slurm_step_ctx_daemon_per_node_hack(slurm_step_ctx_t *ctx) { slurm_step_layout_t *new_layout, *old_layout; int i; @@ -347,3 +452,4 @@ extern void slurm_step_ctx_params_t_init (slurm_step_ctx_params_t *ptr) ptr->job_id = (uint32_t)NO_VAL; } } + diff --git a/src/api/step_ctx.h b/src/api/step_ctx.h index 023e5113a..7b1ae2a5c 100644 --- a/src/api/step_ctx.h +++ b/src/api/step_ctx.h @@ -7,7 +7,7 @@ * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov>, * Christopher J. Morrone <morrone2@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -47,12 +47,14 @@ struct slurm_step_ctx_struct { uint32_t job_id; /* assigned job id */ uint32_t user_id; /* user the job runs as */ - + job_step_create_request_msg_t *step_req; job_step_create_response_msg_t *step_resp; /* Used by slurm_step_launch() */ struct step_launch_state *launch_state; + uint16_t verbose_level; /* for extra logging decisions in step + launch api */ }; #endif /* _STEP_CTX_H */ diff --git a/src/api/step_io.c b/src/api/step_io.c index 2b2bca71d..37e673bd9 100644 --- a/src/api/step_io.c +++ b/src/api/step_io.c @@ -1,11 +1,11 @@ /****************************************************************************\ * step_io.c - process stdin, stdout, and stderr for parallel jobs. - * $Id: step_io.c 10799 2007-01-18 19:20:00Z morrone $ + * $Id: step_io.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <grondona@llnl.gov>, et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/api/step_io.h b/src/api/step_io.h index 74b0a4014..477eb9a64 100644 --- a/src/api/step_io.h +++ b/src/api/step_io.h @@ -1,11 +1,11 @@ /*****************************************************************************\ * src/api/step_io.h - job-step client-side I/O routines - * $Id: step_io.h 10574 2006-12-15 23:38:29Z jette $ + * $Id: step_io.h 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Christopher J. Morrone <morrone2@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/api/step_launch.c b/src/api/step_launch.c index c018fabe6..2656d598f 100644 --- a/src/api/step_launch.c +++ b/src/api/step_launch.c @@ -1,12 +1,12 @@ /*****************************************************************************\ * step_launch.c - launch a parallel job step * - * $Id: step_launch.c 13373 2008-02-27 16:47:13Z jette $ + * $Id: step_launch.c 14142 2008-05-28 20:07:50Z jette $ ***************************************************************************** * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Christopher J. Morrone <morrone2@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -31,6 +31,7 @@ #endif #include <errno.h> +#include <fcntl.h> #include <pthread.h> #include <stdarg.h> #include <stdlib.h> @@ -40,6 +41,7 @@ #include <netinet/in.h> #include <sys/param.h> #include <sys/socket.h> +#include <sys/stat.h> #include <sys/types.h> #include <sys/un.h> #include <netdb.h> /* for gethostbyname */ @@ -69,18 +71,25 @@ extern char **environ; /********************************************************************** * General declarations for step launch code **********************************************************************/ -static int _launch_tasks(slurm_step_ctx ctx, - launch_tasks_request_msg_t *launch_msg); +static int _launch_tasks(slurm_step_ctx_t *ctx, + launch_tasks_request_msg_t *launch_msg, + uint32_t timeout); static char *_lookup_cwd(void); +static void _print_launch_msg(launch_tasks_request_msg_t *msg, + char *hostname, int nodeid); /********************************************************************** * Message handler declarations **********************************************************************/ +static pid_t srun_ppid = (pid_t) 0; static uid_t slurm_uid; -static int _msg_thr_create(struct step_launch_state *sls, int num_nodes); +static bool force_terminated_job = false; +static int task_exit_signal = 0; +static void _exec_prog(slurm_msg_t *msg); +static int _msg_thr_create(struct step_launch_state *sls, int num_nodes); static void _handle_msg(struct step_launch_state *sls, slurm_msg_t *msg); static bool _message_socket_readable(eio_obj_t *obj); -static int _message_socket_accept(eio_obj_t *obj, List objs); +static int _message_socket_accept(eio_obj_t *obj, List objs); static struct io_operations message_socket_ops = { readable: &_message_socket_readable, @@ -109,15 +118,17 @@ void slurm_step_launch_params_t_init (slurm_step_launch_params_t *ptr) ptr->buffered_stdio = true; memcpy(&ptr->local_fds, &fds, sizeof(fds)); ptr->gid = getgid(); + ptr->acctg_freq = (uint16_t) NO_VAL; } /* * slurm_step_launch - launch a parallel job step * IN ctx - job step context generated by slurm_step_ctx_create + * IN launcher_host - address used for PMI communications * IN callbacks - Identify functions to be called when various events occur * RET SLURM_SUCCESS or SLURM_ERROR (with errno set) */ -int slurm_step_launch (slurm_step_ctx ctx, +int slurm_step_launch (slurm_step_ctx_t *ctx, char *launcher_host, const slurm_step_launch_params_t *params, const slurm_step_launch_callbacks_t *callbacks) { @@ -131,8 +142,7 @@ int slurm_step_launch (slurm_step_ctx ctx, memset(&launch, 0, sizeof(launch)); if (ctx == NULL || ctx->magic != STEP_CTX_MAGIC) { - error("Not a valid slurm_step_ctx!"); - + error("Not a valid slurm_step_ctx_t!"); slurm_seterrno(EINVAL); return SLURM_ERROR; } @@ -166,7 +176,8 @@ int slurm_step_launch (slurm_step_ctx ctx, } /* Create message receiving sockets and handler thread */ - _msg_thr_create(ctx->launch_state, ctx->step_req->node_count); + _msg_thr_create(ctx->launch_state, + ctx->step_resp->step_layout->node_cnt); /* Start tasks on compute nodes */ launch.job_id = ctx->step_req->job_id; @@ -183,18 +194,8 @@ int slurm_step_launch (slurm_step_ctx ctx, } else { env_array_merge(&env, (const char **)params->env); } - { - /* FIXME - hostname and IP need to be user settable */ - char *launcher_hostname = xshort_hostname(); - struct hostent *ent = gethostbyname(launcher_hostname); - - env_array_for_step(&env, - ctx->step_resp, - launcher_hostname, - ctx->launch_state->resp_port[0], - ent->h_addr_list[0]); - xfree(launcher_hostname); - } + env_array_for_step(&env, ctx->step_resp, launcher_host, + ctx->launch_state->resp_port[0]); env_array_merge(&env, (const char **)mpi_env); env_array_free(mpi_env); @@ -205,24 +206,31 @@ int slurm_step_launch (slurm_step_ctx ctx, } else { launch.cwd = _lookup_cwd(); } - launch.nnodes = ctx->step_req->node_count; - launch.nprocs = ctx->step_req->num_tasks; - launch.slurmd_debug = params->slurmd_debug; - launch.switch_job = ctx->step_resp->switch_job; - launch.task_prolog = params->task_prolog; - launch.task_epilog = params->task_epilog; - launch.cpu_bind_type = params->cpu_bind_type; - launch.cpu_bind = params->cpu_bind; - launch.mem_bind_type = params->mem_bind_type; - launch.mem_bind = params->mem_bind; - launch.multi_prog = params->multi_prog ? 1 : 0; + launch.nnodes = ctx->step_resp->step_layout->node_cnt; + launch.nprocs = ctx->step_resp->step_layout->task_cnt; + launch.slurmd_debug = params->slurmd_debug; + launch.switch_job = ctx->step_resp->switch_job; + launch.task_prolog = params->task_prolog; + launch.task_epilog = params->task_epilog; + launch.cpu_bind_type = params->cpu_bind_type; + launch.cpu_bind = params->cpu_bind; + launch.mem_bind_type = params->mem_bind_type; + launch.mem_bind = params->mem_bind; + launch.multi_prog = params->multi_prog ? 1 : 0; + launch.max_sockets = params->max_sockets; + launch.max_cores = params->max_cores; + launch.max_threads = params->max_threads; launch.cpus_per_task = params->cpus_per_task; launch.ntasks_per_node = params->ntasks_per_node; launch.ntasks_per_socket= params->ntasks_per_socket; launch.ntasks_per_core = params->ntasks_per_core; launch.task_dist = params->task_dist; launch.plane_size = params->plane_size; - launch.options = job_options_create(); + launch.pty = params->pty; + launch.ckpt_path = params->ckpt_path; + launch.acctg_freq = params->acctg_freq; + launch.open_mode = params->open_mode; + launch.options = job_options_create(); launch.complete_nodelist = xstrdup(ctx->step_resp->step_layout->node_list); spank_set_remote_options (launch.options); @@ -236,6 +244,7 @@ int slurm_step_launch (slurm_step_ctx ctx, launch.user_managed_io = params->user_managed_io ? 1 : 0; ctx->launch_state->user_managed_io = params->user_managed_io; + if (!ctx->launch_state->user_managed_io) { launch.ofname = params->remote_output_filename; launch.efname = params->remote_error_filename; @@ -277,7 +286,7 @@ int slurm_step_launch (slurm_step_ctx ctx, launch.resp_port[i] = ctx->launch_state->resp_port[i]; } - _launch_tasks(ctx, &launch); + rc = _launch_tasks(ctx, &launch, params->msg_timeout); /* clean up */ xfree(launch.resp_port); @@ -298,7 +307,7 @@ done: /* * Block until all tasks have started. */ -int slurm_step_launch_wait_start(slurm_step_ctx ctx) +int slurm_step_launch_wait_start(slurm_step_ctx_t *ctx) { struct step_launch_state *sls = ctx->launch_state; /* Wait for all tasks to start */ @@ -341,7 +350,7 @@ int slurm_step_launch_wait_start(slurm_step_ctx ctx) /* * Block until all tasks have finished (or failed to start altogether). */ -void slurm_step_launch_wait_finish(slurm_step_ctx ctx) +void slurm_step_launch_wait_finish(slurm_step_ctx_t *ctx) { struct step_launch_state *sls = ctx->launch_state; struct timespec ts = {0, 0}; @@ -400,10 +409,15 @@ void slurm_step_launch_wait_finish(slurm_step_ctx ctx) } } } - + + if (!force_terminated_job && task_exit_signal) + info("Force Terminated job step"); + /* Then shutdown the message handler thread */ eio_signal_shutdown(sls->msg_handle); + pthread_mutex_unlock(&sls->lock); pthread_join(sls->msg_thread, NULL); + pthread_mutex_lock(&sls->lock); eio_handle_destroy(sls->msg_handle); /* Then wait for the IO thread to finish */ @@ -413,7 +427,6 @@ void slurm_step_launch_wait_finish(slurm_step_ctx ctx) } mpi_hook_client_fini(sls->mpi_state); - pthread_mutex_unlock(&sls->lock); } @@ -422,7 +435,7 @@ void slurm_step_launch_wait_finish(slurm_step_ctx ctx) * * Can be called from a signal handler. */ -void slurm_step_launch_abort(slurm_step_ctx ctx) +void slurm_step_launch_abort(slurm_step_ctx_t *ctx) { struct step_launch_state *sls = ctx->launch_state; @@ -430,6 +443,100 @@ void slurm_step_launch_abort(slurm_step_ctx ctx) pthread_cond_signal(&sls->cond); } +/* + * Forward a signal to all those nodes with running tasks + */ +void slurm_step_launch_fwd_signal(slurm_step_ctx_t *ctx, int signo) +{ + int node_id, j, active, num_tasks; + slurm_msg_t req; + kill_tasks_msg_t msg; + hostlist_t hl; + char *name = NULL; + char buf[8192]; + List ret_list = NULL; + ListIterator itr; + ret_data_info_t *ret_data_info = NULL; + int rc = SLURM_SUCCESS; + struct step_launch_state *sls = ctx->launch_state; + + debug2("forward signal %d to job", signo); + + /* common to all tasks */ + msg.job_id = ctx->job_id; + msg.job_step_id = ctx->step_resp->job_step_id; + msg.signal = (uint32_t) signo; + + pthread_mutex_lock(&sls->lock); + + hl = hostlist_create(""); + for (node_id = 0; + node_id < ctx->step_resp->step_layout->node_cnt; + node_id++) { + active = 0; + num_tasks = sls->layout->tasks[node_id]; + for (j = 0; j < num_tasks; j++) { + if(bit_test(sls->tasks_started, + sls->layout->tids[node_id][j]) && + !bit_test(sls->tasks_exited, + sls->layout->tids[node_id][j])) { + /* this one has active tasks */ + active = 1; + break; + } + } + + if (!active) + continue; + + name = nodelist_nth_host(sls->layout->node_list, node_id); + hostlist_push(hl, name); + free(name); + } + + pthread_mutex_unlock(&sls->lock); + + if(!hostlist_count(hl)) { + hostlist_destroy(hl); + goto nothing_left; + } + hostlist_ranged_string(hl, sizeof(buf), buf); + hostlist_destroy(hl); + name = xstrdup(buf); + + slurm_msg_t_init(&req); + req.msg_type = REQUEST_SIGNAL_TASKS; + req.data = &msg; + + debug3("sending signal to host %s", name); + + if (!(ret_list = slurm_send_recv_msgs(name, &req, 0))) { + error("fwd_signal: slurm_send_recv_msgs really failed bad"); + xfree(name); + return; + } + xfree(name); + itr = list_iterator_create(ret_list); + while((ret_data_info = list_next(itr))) { + rc = slurm_get_return_code(ret_data_info->type, + ret_data_info->data); + /* + * Report error unless it is "Invalid job id" which + * probably just means the tasks exited in the meanwhile. + */ + if ((rc != 0) && (rc != ESLURM_INVALID_JOB_ID) + && (rc != ESLURMD_JOB_NOTRUNNING) && (rc != ESRCH)) { + error("%s: signal: %s", + ret_data_info->node_name, + slurm_strerror(rc)); + } + } + list_iterator_destroy(itr); + list_destroy(ret_list); +nothing_left: + debug2("All tasks have been signalled"); + +} /********************************************************************** * Functions used by step_ctx code, but not exported throught the API @@ -437,27 +544,30 @@ void slurm_step_launch_abort(slurm_step_ctx ctx) /* * Create a launch state structure for a specified step context, "ctx". */ -struct step_launch_state *step_launch_state_create(slurm_step_ctx ctx) +struct step_launch_state *step_launch_state_create(slurm_step_ctx_t *ctx) { struct step_launch_state *sls; + slurm_step_layout_t *layout = ctx->step_resp->step_layout; sls = xmalloc(sizeof(struct step_launch_state)); - if (sls != NULL) { - sls->slurmctld_socket_fd = -1; - sls->tasks_requested = ctx->step_req->num_tasks; - sls->tasks_started = bit_alloc(ctx->step_req->num_tasks); - sls->tasks_exited = bit_alloc(ctx->step_req->num_tasks); - sls->layout = ctx->step_resp->step_layout; - sls->resp_port = NULL; - sls->abort = false; - sls->abort_action_taken = false; - sls->mpi_info->jobid = ctx->step_req->job_id; - sls->mpi_info->stepid = ctx->step_resp->job_step_id; - sls->mpi_info->step_layout = ctx->step_resp->step_layout; - sls->mpi_state = NULL; - pthread_mutex_init(&sls->lock, NULL); - pthread_cond_init(&sls->cond, NULL); - } + sls->slurmctld_socket_fd = -1; + /* Hack for LAM-MPI's lamboot, launch one task per node */ + if (mpi_hook_client_single_task_per_node()) + sls->tasks_requested = layout->node_cnt; + else + sls->tasks_requested = layout->task_cnt; + sls->tasks_started = bit_alloc(layout->task_cnt); + sls->tasks_exited = bit_alloc(layout->task_cnt); + sls->layout = layout; + sls->resp_port = NULL; + sls->abort = false; + sls->abort_action_taken = false; + sls->mpi_info->jobid = ctx->step_req->job_id; + sls->mpi_info->stepid = ctx->step_resp->job_step_id; + sls->mpi_info->step_layout = layout; + sls->mpi_state = NULL; + pthread_mutex_init(&sls->lock, NULL); + pthread_cond_init(&sls->cond, NULL); return sls; } @@ -478,7 +588,6 @@ void step_launch_state_destroy(struct step_launch_state *sls) } } - /********************************************************************** * Message handler functions **********************************************************************/ @@ -504,7 +613,8 @@ static int _msg_thr_create(struct step_launch_state *sls, int num_nodes) int sock = -1; short port = -1; eio_obj_t *obj; - int i; + int i, rc = SLURM_SUCCESS; + pthread_attr_t attr; debug("Entering _msg_thr_create()"); slurm_uid = (uid_t) slurm_get_slurm_user_id(); @@ -529,12 +639,15 @@ static int _msg_thr_create(struct step_launch_state *sls, int num_nodes) eio_new_initial_obj(sls->msg_handle, obj); } - if (pthread_create(&sls->msg_thread, NULL, + slurm_attr_init(&attr); + if (pthread_create(&sls->msg_thread, &attr, _msg_thr_internal, (void *)sls) != 0) { error("pthread_create of message thread: %m"); - return SLURM_ERROR; + + rc = SLURM_ERROR; } - return SLURM_SUCCESS; + slurm_attr_destroy(&attr); + return rc; } static bool _message_socket_readable(eio_obj_t *obj) @@ -590,7 +703,7 @@ static int _message_socket_accept(eio_obj_t *obj, List objs) in /etc/hosts. */ uc = (unsigned char *)&((struct sockaddr_in *)&addr)->sin_addr.s_addr; port = ((struct sockaddr_in *)&addr)->sin_port; - debug2("got message connection from %u.%u.%u.%u:%hu", + debug2("step got message connection from %u.%u.%u.%u:%hu", uc[0], uc[1], uc[2], uc[3], ntohs(port)); fflush(stdout); @@ -646,6 +759,14 @@ _exit_handler(struct step_launch_state *sls, slurm_msg_t *exit_msg) task_exit_msg_t *msg = (task_exit_msg_t *) exit_msg->data; int i; + /* Record SIGTERM and SIGKILL termination codes to + * recognize abnormal termination */ + if (WIFSIGNALED(msg->return_code)) { + i = WTERMSIG(msg->return_code); + if ((i == SIGKILL) || (i == SIGTERM)) + task_exit_signal = i; + } + pthread_mutex_lock(&sls->lock); for (i = 0; i < msg->num_tasks; i++) { @@ -660,6 +781,36 @@ _exit_handler(struct step_launch_state *sls, slurm_msg_t *exit_msg) pthread_mutex_unlock(&sls->lock); } +static void +_job_complete_handler(struct step_launch_state *sls, slurm_msg_t *complete_msg) +{ + srun_job_complete_msg_t *step_msg = + (srun_job_complete_msg_t *) complete_msg->data; + + if (step_msg->step_id == NO_VAL) { + verbose("Complete job %u received", + step_msg->job_id); + } else { + verbose("Complete job step %u.%u received", + step_msg->job_id, step_msg->step_id); + } + + /* FIXME: does nothing yet */ + + pthread_mutex_lock(&sls->lock); + pthread_cond_signal(&sls->cond); + pthread_mutex_unlock(&sls->lock); +} + +static void +_timeout_handler(struct step_launch_state *sls, slurm_msg_t *timeout_msg) +{ + /* FIXME: does nothing yet */ + pthread_mutex_lock(&sls->lock); + pthread_cond_signal(&sls->cond); + pthread_mutex_unlock(&sls->lock); +} + /* * Take the list of node names of down nodes and convert into an * array of nodeids for the step. The nodeid array is passed to @@ -766,8 +917,9 @@ _task_user_managed_io_handler(struct step_launch_state *sls, static void _handle_msg(struct step_launch_state *sls, slurm_msg_t *msg) { - uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred); + uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); uid_t uid = getuid(); + srun_user_msg_t *um; int rc; if ((req_uid != slurm_uid) && (req_uid != 0) && (req_uid != uid)) { @@ -787,21 +939,36 @@ _handle_msg(struct step_launch_state *sls, slurm_msg_t *msg) _exit_handler(sls, msg); slurm_free_task_exit_msg(msg->data); break; - case SRUN_NODE_FAIL: - debug2("received srun node fail"); - _node_fail_handler(sls, msg); - slurm_free_srun_node_fail_msg(msg->data); + case SRUN_PING: + debug3("slurmctld ping received"); + slurm_send_rc_msg(msg, SLURM_SUCCESS); + slurm_free_srun_ping_msg(msg->data); break; - case SRUN_TIMEOUT: - debug2("received job step timeout message"); - /* FIXME - does nothing yet */ - slurm_free_srun_timeout_msg(msg->data); + case SRUN_EXEC: + _exec_prog(msg); + slurm_free_srun_exec_msg(msg->data); break; case SRUN_JOB_COMPLETE: debug2("received job step complete message"); - /* FIXME - does nothing yet */ + force_terminated_job = true; + _job_complete_handler(sls, msg); slurm_free_srun_job_complete_msg(msg->data); break; + case SRUN_TIMEOUT: + debug2("received job step timeout message"); + _timeout_handler(sls, msg); + slurm_free_srun_timeout_msg(msg->data); + break; + case SRUN_USER_MSG: + um = msg->data; + info("%s", um->msg); + slurm_free_srun_user_msg(msg->data); + break; + case SRUN_NODE_FAIL: + debug2("received srun node fail"); + _node_fail_handler(sls, msg); + slurm_free_srun_node_fail_msg(msg->data); + break; case PMI_KVS_PUT_REQ: debug2("PMI_KVS_PUT_REQ received"); rc = pmi_kvs_put((struct kvs_comm_set *) msg->data); @@ -828,8 +995,9 @@ _handle_msg(struct step_launch_state *sls, slurm_msg_t *msg) /********************************************************************** * Task launch functions **********************************************************************/ -static int _launch_tasks(slurm_step_ctx ctx, - launch_tasks_request_msg_t *launch_msg) +static int _launch_tasks(slurm_step_ctx_t *ctx, + launch_tasks_request_msg_t *launch_msg, + uint32_t timeout) { slurm_msg_t msg; List ret_list = NULL; @@ -838,13 +1006,24 @@ static int _launch_tasks(slurm_step_ctx ctx, int rc = SLURM_SUCCESS; debug("Entering _launch_tasks"); + if (ctx->verbose_level) { + char *name = NULL; + hostlist_t hl = hostlist_create(launch_msg->complete_nodelist); + int i = 0; + while((name = hostlist_shift(hl))) { + _print_launch_msg(launch_msg, name, i++); + free(name); + } + hostlist_destroy(hl); + } + slurm_msg_t_init(&msg); msg.msg_type = REQUEST_LAUNCH_TASKS; msg.data = launch_msg; if(!(ret_list = slurm_send_recv_msgs( ctx->step_resp->step_layout->node_list, - &msg, 0))) { + &msg, timeout))) { error("slurm_send_recv_msgs failed miserably: %m"); return SLURM_ERROR; } @@ -855,9 +1034,13 @@ static int _launch_tasks(slurm_step_ctx ctx, debug("launch returned msg_rc=%d err=%d type=%d", rc, ret_data->err, ret_data->type); if (rc != SLURM_SUCCESS) { - errno = ret_data->err; + if (ret_data->err) + errno = ret_data->err; + else + errno = rc; error("Task launch failed on node %s: %m", ret_data->node_name); + rc = SLURM_ERROR; } else { #if 0 /* only for debugging, might want to make this a callback */ errno = ret_data->err; @@ -868,7 +1051,7 @@ static int _launch_tasks(slurm_step_ctx ctx, } list_iterator_destroy(ret_itr); list_destroy(ret_list); - return SLURM_SUCCESS; + return rc; } /* returns an xmalloc cwd string, or NULL if lookup failed. */ @@ -882,3 +1065,123 @@ static char *_lookup_cwd(void) return NULL; } } + +static void _print_launch_msg(launch_tasks_request_msg_t *msg, + char *hostname, int nodeid) +{ + int i; + char tmp_str[10], task_list[4096]; + hostlist_t hl = hostlist_create(""); + + for (i=0; i<msg->tasks_to_launch[nodeid]; i++) { + sprintf(tmp_str, "%u", msg->global_task_ids[nodeid][i]); + hostlist_push(hl, tmp_str); + } + hostlist_ranged_string(hl, 4096, task_list); + hostlist_destroy(hl); + + info("launching %u.%u on host %s, %u tasks: %s", + msg->job_id, msg->job_step_id, hostname, + msg->tasks_to_launch[nodeid], task_list); + + debug3("uid:%ld gid:%ld cwd:%s %d", (long) msg->uid, + (long) msg->gid, msg->cwd, nodeid); +} + +void record_ppid(void) +{ + srun_ppid = getppid(); +} + +/* This is used to initiate an OpenMPI checkpoint program, + * but is written to be general purpose */ +static void +_exec_prog(slurm_msg_t *msg) +{ + pid_t child; + int pfd[2], status, exit_code = 0, i; + ssize_t len; + char *argv[4], buf[256] = ""; + time_t now = time(NULL); + bool checkpoint = false; + srun_exec_msg_t *exec_msg = msg->data; + + if (exec_msg->argc > 2) { + verbose("Exec '%s %s' for %u.%u", + exec_msg->argv[0], exec_msg->argv[1], + exec_msg->job_id, exec_msg->step_id); + } else { + verbose("Exec '%s' for %u.%u", + exec_msg->argv[0], + exec_msg->job_id, exec_msg->step_id); + } + + if (strcmp(exec_msg->argv[0], "ompi-checkpoint") == 0) { + if (srun_ppid) + checkpoint = true; + else { + error("Can not create checkpoint, no srun_ppid set"); + exit_code = EINVAL; + goto fini; + } + } + if (checkpoint) { + /* OpenMPI specific checkpoint support */ + info("Checkpoint started at %s", ctime(&now)); + for (i=0; (exec_msg->argv[i] && (i<2)); i++) { + argv[i] = exec_msg->argv[i]; + } + snprintf(buf, sizeof(buf), "%ld", (long) srun_ppid); + argv[i] = buf; + argv[i+1] = NULL; + } + + if (pipe(pfd) == -1) { + snprintf(buf, sizeof(buf), "pipe: %s", strerror(errno)); + error("%s", buf); + exit_code = errno; + goto fini; + } + + child = fork(); + if (child == 0) { + int fd = open("/dev/null", O_RDONLY); + dup2(fd, 0); /* stdin from /dev/null */ + dup2(pfd[1], 1); /* stdout to pipe */ + dup2(pfd[1], 2); /* stderr to pipe */ + close(pfd[0]); + close(pfd[1]); + if (checkpoint) + execvp(exec_msg->argv[0], argv); + else + execvp(exec_msg->argv[0], exec_msg->argv); + error("execvp(%s): %m", exec_msg->argv[0]); + } else if (child < 0) { + snprintf(buf, sizeof(buf), "fork: %s", strerror(errno)); + error("%s", buf); + exit_code = errno; + goto fini; + } else { + close(pfd[1]); + len = read(pfd[0], buf, sizeof(buf)); + close(pfd[0]); + waitpid(child, &status, 0); + exit_code = WEXITSTATUS(status); + } + +fini: if (checkpoint) { + now = time(NULL); + if (exit_code) { + info("Checkpoint completion code %d at %s", + exit_code, ctime(&now)); + } else { + info("Checkpoint completed successfully at %s", + ctime(&now)); + } + if (buf[0]) + info("Checkpoint location: %s", buf); + slurm_checkpoint_complete(exec_msg->job_id, exec_msg->step_id, + time(NULL), (uint32_t) exit_code, buf); + } +} + diff --git a/src/api/step_launch.h b/src/api/step_launch.h index eed284cd2..529193eb6 100644 --- a/src/api/step_launch.h +++ b/src/api/step_launch.h @@ -1,12 +1,12 @@ /*****************************************************************************\ * step_launch.h - launch a parallel job step * - * $Id: step_launch.h 10920 2007-02-02 03:01:14Z morrone $ + * $Id: step_launch.h 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Christopher J. Morrone <morrone2@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -87,11 +87,16 @@ struct step_launch_state { /* * Create a launch state structure for a specified step context, "ctx". */ -struct step_launch_state * step_launch_state_create(slurm_step_ctx ctx); +struct step_launch_state * step_launch_state_create(slurm_step_ctx_t *ctx); /* * Free the memory associated with the a launch state structure. */ void step_launch_state_destroy(struct step_launch_state *sls); +/* + * Record the parent process ID of the program which spawned this. + * Needed to locate the mpirun program for OpenMPI checkpoint + */ +void record_ppid(void); #endif /* _STEP_LAUNCH_H */ diff --git a/src/api/submit.c b/src/api/submit.c index 0a90e26cb..c66b34961 100644 --- a/src/api/submit.c +++ b/src/api/submit.c @@ -1,11 +1,11 @@ /*****************************************************************************\ * submit.c - submit a job with supplied contraints - * $Id: submit.c 10574 2006-12-15 23:38:29Z jette $ + * $Id: submit.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/api/suspend.c b/src/api/suspend.c index 7be3d5423..d447a8d97 100644 --- a/src/api/suspend.c +++ b/src/api/suspend.c @@ -1,11 +1,11 @@ /*****************************************************************************\ * suspend.c - job step suspend and resume functions. - * $Id: suspend.c 10574 2006-12-15 23:38:29Z jette $ + * $Id: suspend.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2005-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/api/triggers.c b/src/api/triggers.c index 1dc88e03c..67981abd3 100644 --- a/src/api/triggers.c +++ b/src/api/triggers.c @@ -4,7 +4,7 @@ * Copyright (C) 2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/api/update_config.c b/src/api/update_config.c index 6ad715fd4..b317331d6 100644 --- a/src/api/update_config.c +++ b/src/api/update_config.c @@ -1,11 +1,11 @@ /****************************************************************************\ * update_config.c - request that slurmctld update its configuration - * $Id: update_config.c 10574 2006-12-15 23:38:29Z jette $ + * $Id: update_config.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> and Kevin Tew <tew1@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/Makefile.am b/src/common/Makefile.am index 0dfe31e6b..8d61a722e 100644 --- a/src/common/Makefile.am +++ b/src/common/Makefile.am @@ -1,4 +1,12 @@ # Makefile for common library +# If you are linking to this lib any know you are not linking to +# anything else that will need variables here link to +# $(top_builddir)/src/common/libcommon.la +# if you are linking to a stand alone program that needs all symbols link to +# $(top_builddir)/src/common/libcommon.o with -ldl added +# This avoids having multiple symbols running around when there should +# only be 1 address per symbol. If you link to the libcommon.la in +# a plugin you will get 2 addresses for one symbol which could lead to problems. AUTOMAKE_OPTIONS = foreign @@ -10,7 +18,9 @@ build_unsetenv_src = unsetenv.c unsetenv.h extra_unsetenv_src = endif -INCLUDES = -I$(top_srcdir) $(SSL_CPPFLAGS) +INCLUDES = -I$(top_srcdir) + +noinst_PROGRAMS = libcommon.o noinst_LTLIBRARIES = \ libcommon.la \ @@ -19,6 +29,7 @@ noinst_LTLIBRARIES = \ libspank.la libcommon_la_SOURCES = \ + assoc_mgr.c assoc_mgr.h \ xmalloc.c xmalloc.h \ xassert.c xassert.h \ xstring.c xstring.h \ @@ -58,10 +69,13 @@ libcommon_la_SOURCES = \ slurm_protocol_defs.h \ slurm_rlimits_info.h \ slurm_rlimits_info.c \ + slurmdbd_defs.c slurmdbd_defs.h \ uid.c uid.h \ util-net.c util-net.h \ slurm_auth.c slurm_auth.h \ - slurm_jobacct.c slurm_jobacct.h \ + jobacct_common.c jobacct_common.h \ + slurm_accounting_storage.c slurm_accounting_storage.h \ + slurm_jobacct_gather.c slurm_jobacct_gather.h \ slurm_jobcomp.c slurm_jobcomp.h \ switch.c switch.h \ arg_desc.c arg_desc.h \ @@ -80,7 +94,8 @@ libcommon_la_SOURCES = \ global_defaults.c \ timers.c timers.h \ slurm_xlator.h \ - stepd_api.c stepd_api.h + stepd_api.c stepd_api.h \ + proc_args.c proc_args.h EXTRA_libcommon_la_SOURCES = \ $(extra_unsetenv_src) @@ -98,9 +113,18 @@ libspank_la_SOURCES = \ plugstack.c plugstack.h \ optz.c optz.h -libcommon_la_LIBADD = $(SSL_LIBS) -ldl +libcommon_la_LIBADD = -ldl + +libcommon_la_LDFLAGS = $(LIB_LDFLAGS) -module --export-dynamic + +libcommon_o_SOURCES = + + +# This was made so we chould export all symbols from libcommon +# on multiple platforms +libcommon.o : $(libcommon_la_OBJECTS) $(libcommon_la_DEPENDENCIES) + $(libcommon_la_LINK) $(libcommon_la_OBJECTS) -libcommon_la_LDFLAGS = $(LIB_LDFLAGS) $(SSL_LDFLAGS) global_defaults.c : $(top_builddir)/config.h Makefile @( echo "/* This file autogenerated by src/common/Makefile */"; \ diff --git a/src/common/Makefile.in b/src/common/Makefile.in index 39c4e23ad..ae339dd57 100644 --- a/src/common/Makefile.in +++ b/src/common/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -15,6 +15,15 @@ @SET_MAKE@ # Makefile for common library +# If you are linking to this lib any know you are not linking to +# anything else that will need variables here link to +# $(top_builddir)/src/common/libcommon.la +# if you are linking to a stand alone program that needs all symbols link to +# $(top_builddir)/src/common/libcommon.o with -ldl added +# This avoids having multiple symbols running around when there should +# only be 1 address per symbol. If you link to the libcommon.la in +# a plugin you will get 2 addresses for one symbol which could lead to problems. + VPATH = @srcdir@ pkgdatadir = $(datadir)/@PACKAGE@ @@ -35,6 +44,7 @@ POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ target_triplet = @target@ +noinst_PROGRAMS = libcommon.o$(EXEEXT) subdir = src/common DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 @@ -44,6 +54,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -65,48 +77,52 @@ mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h CONFIG_CLEAN_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) -am__DEPENDENCIES_1 = -libcommon_la_DEPENDENCIES = $(am__DEPENDENCIES_1) -am__libcommon_la_SOURCES_DIST = xmalloc.c xmalloc.h xassert.c \ - xassert.h xstring.c xstring.h xsignal.c xsignal.h forward.c \ - forward.h strlcpy.c strlcpy.h list.c list.h net.c net.h fd.c \ - fd.h log.c log.h cbuf.c cbuf.h safeopen.c safeopen.h \ - bitstring.c bitstring.h mpi.c mpi.h pack.c pack.h \ - parse_config.c parse_config.h parse_spec.c parse_spec.h \ - plugin.c plugin.h plugrack.c plugrack.h read_config.c \ - read_config.h node_select.c node_select.h env.c env.h \ - slurm_cred.h slurm_cred.c slurm_errno.c slurm_protocol_api.c \ - slurm_protocol_api.h slurm_protocol_pack.c \ - slurm_protocol_pack.h slurm_protocol_util.c \ - slurm_protocol_util.h slurm_protocol_socket_implementation.c \ +libcommon_la_DEPENDENCIES = +am__libcommon_la_SOURCES_DIST = assoc_mgr.c assoc_mgr.h xmalloc.c \ + xmalloc.h xassert.c xassert.h xstring.c xstring.h xsignal.c \ + xsignal.h forward.c forward.h strlcpy.c strlcpy.h list.c \ + list.h net.c net.h fd.c fd.h log.c log.h cbuf.c cbuf.h \ + safeopen.c safeopen.h bitstring.c bitstring.h mpi.c mpi.h \ + pack.c pack.h parse_config.c parse_config.h parse_spec.c \ + parse_spec.h plugin.c plugin.h plugrack.c plugrack.h \ + read_config.c read_config.h node_select.c node_select.h env.c \ + env.h slurm_cred.h slurm_cred.c slurm_errno.c \ + slurm_protocol_api.c slurm_protocol_api.h \ + slurm_protocol_pack.c slurm_protocol_pack.h \ + slurm_protocol_util.c slurm_protocol_util.h \ + slurm_protocol_socket_implementation.c \ slurm_protocol_socket_common.h slurm_protocol_common.h \ slurm_protocol_interface.h slurm_protocol_defs.c \ slurm_protocol_defs.h slurm_rlimits_info.h \ - slurm_rlimits_info.c uid.c uid.h util-net.c util-net.h \ - slurm_auth.c slurm_auth.h slurm_jobacct.c slurm_jobacct.h \ - slurm_jobcomp.c slurm_jobcomp.h switch.c switch.h arg_desc.c \ - arg_desc.h macros.h malloc.c malloc.h getopt.h getopt.c \ - getopt1.c unsetenv.c unsetenv.h slurm_selecttype_info.c \ - slurm_resource_info.c slurm_resource_info.h hostlist.c \ - hostlist.h slurm_step_layout.c slurm_step_layout.h \ - checkpoint.c checkpoint.h parse_time.c parse_time.h \ - job_options.c job_options.h global_defaults.c timers.c \ - timers.h slurm_xlator.h stepd_api.c stepd_api.h + slurm_rlimits_info.c slurmdbd_defs.c slurmdbd_defs.h uid.c \ + uid.h util-net.c util-net.h slurm_auth.c slurm_auth.h \ + jobacct_common.c jobacct_common.h slurm_accounting_storage.c \ + slurm_accounting_storage.h slurm_jobacct_gather.c \ + slurm_jobacct_gather.h slurm_jobcomp.c slurm_jobcomp.h \ + switch.c switch.h arg_desc.c arg_desc.h macros.h malloc.c \ + malloc.h getopt.h getopt.c getopt1.c unsetenv.c unsetenv.h \ + slurm_selecttype_info.c slurm_resource_info.c \ + slurm_resource_info.h hostlist.c hostlist.h \ + slurm_step_layout.c slurm_step_layout.h checkpoint.c \ + checkpoint.h parse_time.c parse_time.h job_options.c \ + job_options.h global_defaults.c timers.c timers.h \ + slurm_xlator.h stepd_api.c stepd_api.h proc_args.c proc_args.h @HAVE_UNSETENV_FALSE@am__objects_1 = unsetenv.lo -am_libcommon_la_OBJECTS = xmalloc.lo xassert.lo xstring.lo xsignal.lo \ - forward.lo strlcpy.lo list.lo net.lo fd.lo log.lo cbuf.lo \ - safeopen.lo bitstring.lo mpi.lo pack.lo parse_config.lo \ - parse_spec.lo plugin.lo plugrack.lo read_config.lo \ - node_select.lo env.lo slurm_cred.lo slurm_errno.lo \ - slurm_protocol_api.lo slurm_protocol_pack.lo \ +am_libcommon_la_OBJECTS = assoc_mgr.lo xmalloc.lo xassert.lo \ + xstring.lo xsignal.lo forward.lo strlcpy.lo list.lo net.lo \ + fd.lo log.lo cbuf.lo safeopen.lo bitstring.lo mpi.lo pack.lo \ + parse_config.lo parse_spec.lo plugin.lo plugrack.lo \ + read_config.lo node_select.lo env.lo slurm_cred.lo \ + slurm_errno.lo slurm_protocol_api.lo slurm_protocol_pack.lo \ slurm_protocol_util.lo slurm_protocol_socket_implementation.lo \ - slurm_protocol_defs.lo slurm_rlimits_info.lo uid.lo \ - util-net.lo slurm_auth.lo slurm_jobacct.lo slurm_jobcomp.lo \ - switch.lo arg_desc.lo malloc.lo getopt.lo getopt1.lo \ - $(am__objects_1) slurm_selecttype_info.lo \ + slurm_protocol_defs.lo slurm_rlimits_info.lo slurmdbd_defs.lo \ + uid.lo util-net.lo slurm_auth.lo jobacct_common.lo \ + slurm_accounting_storage.lo slurm_jobacct_gather.lo \ + slurm_jobcomp.lo switch.lo arg_desc.lo malloc.lo getopt.lo \ + getopt1.lo $(am__objects_1) slurm_selecttype_info.lo \ slurm_resource_info.lo hostlist.lo slurm_step_layout.lo \ checkpoint.lo parse_time.lo job_options.lo global_defaults.lo \ - timers.lo stepd_api.lo + timers.lo stepd_api.lo proc_args.lo am__EXTRA_libcommon_la_SOURCES_DIST = unsetenv.c unsetenv.h libcommon_la_OBJECTS = $(am_libcommon_la_OBJECTS) libcommon_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ @@ -121,7 +137,11 @@ libeio_la_OBJECTS = $(am_libeio_la_OBJECTS) libspank_la_LIBADD = am_libspank_la_OBJECTS = plugstack.lo optz.lo libspank_la_OBJECTS = $(am_libspank_la_OBJECTS) -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +PROGRAMS = $(noinst_PROGRAMS) +am_libcommon_o_OBJECTS = +libcommon_o_OBJECTS = $(am_libcommon_o_OBJECTS) +libcommon_o_LDADD = $(LDADD) +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -135,11 +155,11 @@ LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ $(LDFLAGS) -o $@ SOURCES = $(libcommon_la_SOURCES) $(EXTRA_libcommon_la_SOURCES) \ $(libdaemonize_la_SOURCES) $(libeio_la_SOURCES) \ - $(libspank_la_SOURCES) + $(libspank_la_SOURCES) $(libcommon_o_SOURCES) DIST_SOURCES = $(am__libcommon_la_SOURCES_DIST) \ $(am__EXTRA_libcommon_la_SOURCES_DIST) \ $(libdaemonize_la_SOURCES) $(libeio_la_SOURCES) \ - $(libspank_la_SOURCES) + $(libspank_la_SOURCES) $(libcommon_o_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) @@ -166,6 +186,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -179,10 +200,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -202,7 +226,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -213,6 +240,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -228,6 +257,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -243,6 +273,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -304,7 +335,7 @@ AUTOMAKE_OPTIONS = foreign @HAVE_UNSETENV_TRUE@build_unsetenv_src = @HAVE_UNSETENV_FALSE@extra_unsetenv_src = @HAVE_UNSETENV_TRUE@extra_unsetenv_src = unsetenv.c unsetenv.h -INCLUDES = -I$(top_srcdir) $(SSL_CPPFLAGS) +INCLUDES = -I$(top_srcdir) noinst_LTLIBRARIES = \ libcommon.la \ libdaemonize.la \ @@ -312,6 +343,7 @@ noinst_LTLIBRARIES = \ libspank.la libcommon_la_SOURCES = \ + assoc_mgr.c assoc_mgr.h \ xmalloc.c xmalloc.h \ xassert.c xassert.h \ xstring.c xstring.h \ @@ -351,10 +383,13 @@ libcommon_la_SOURCES = \ slurm_protocol_defs.h \ slurm_rlimits_info.h \ slurm_rlimits_info.c \ + slurmdbd_defs.c slurmdbd_defs.h \ uid.c uid.h \ util-net.c util-net.h \ slurm_auth.c slurm_auth.h \ - slurm_jobacct.c slurm_jobacct.h \ + jobacct_common.c jobacct_common.h \ + slurm_accounting_storage.c slurm_accounting_storage.h \ + slurm_jobacct_gather.c slurm_jobacct_gather.h \ slurm_jobcomp.c slurm_jobcomp.h \ switch.c switch.h \ arg_desc.c arg_desc.h \ @@ -373,7 +408,8 @@ libcommon_la_SOURCES = \ global_defaults.c \ timers.c timers.h \ slurm_xlator.h \ - stepd_api.c stepd_api.h + stepd_api.c stepd_api.h \ + proc_args.c proc_args.h EXTRA_libcommon_la_SOURCES = \ $(extra_unsetenv_src) @@ -391,8 +427,9 @@ libspank_la_SOURCES = \ plugstack.c plugstack.h \ optz.c optz.h -libcommon_la_LIBADD = $(SSL_LIBS) -ldl -libcommon_la_LDFLAGS = $(LIB_LDFLAGS) $(SSL_LDFLAGS) +libcommon_la_LIBADD = -ldl +libcommon_la_LDFLAGS = $(LIB_LDFLAGS) -module --export-dynamic +libcommon_o_SOURCES = all: all-am .SUFFIXES: @@ -444,6 +481,13 @@ libeio.la: $(libeio_la_OBJECTS) $(libeio_la_DEPENDENCIES) libspank.la: $(libspank_la_OBJECTS) $(libspank_la_DEPENDENCIES) $(LINK) $(libspank_la_OBJECTS) $(libspank_la_LIBADD) $(LIBS) +clean-noinstPROGRAMS: + @list='$(noinst_PROGRAMS)'; for p in $$list; do \ + f=`echo $$p|sed 's/$(EXEEXT)$$//'`; \ + echo " rm -f $$p $$f"; \ + rm -f $$p $$f ; \ + done + mostlyclean-compile: -rm -f *.$(OBJEXT) @@ -451,6 +495,7 @@ distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arg_desc.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/assoc_mgr.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bitstring.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cbuf.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/checkpoint.Plo@am__quote@ @@ -465,6 +510,7 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hostlist.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/io_hdr.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/job_options.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/jobacct_common.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/list.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/log.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/malloc.Plo@am__quote@ @@ -479,12 +525,14 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/plugin.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/plugrack.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/plugstack.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc_args.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/read_config.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/safeopen.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurm_accounting_storage.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurm_auth.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurm_cred.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurm_errno.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurm_jobacct.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurm_jobacct_gather.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurm_jobcomp.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurm_protocol_api.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurm_protocol_defs.Plo@am__quote@ @@ -495,6 +543,7 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurm_rlimits_info.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurm_selecttype_info.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurm_step_layout.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurmdbd_defs.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/stepd_api.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/strlcpy.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/switch.Plo@am__quote@ @@ -539,8 +588,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -552,8 +601,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -563,13 +612,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique @@ -610,7 +658,7 @@ distdir: $(DISTFILES) done check-am: all-am check: check-am -all-am: Makefile $(LTLIBRARIES) +all-am: Makefile $(LTLIBRARIES) $(PROGRAMS) installdirs: install: install-am install-exec: install-exec-am @@ -639,7 +687,7 @@ maintainer-clean-generic: clean: clean-am clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ - mostlyclean-am + clean-noinstPROGRAMS mostlyclean-am distclean: distclean-am -rm -rf ./$(DEPDIR) @@ -698,18 +746,24 @@ uninstall-am: .MAKE: install-am install-strip .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ - clean-libtool clean-noinstLTLIBRARIES ctags distclean \ - distclean-compile distclean-generic distclean-libtool \ - distclean-tags distdir dvi dvi-am html html-am info info-am \ - install install-am install-data install-data-am install-dvi \ - install-dvi-am install-exec install-exec-am install-html \ - install-html-am install-info install-info-am install-man \ - install-pdf install-pdf-am install-ps install-ps-am \ - install-strip installcheck installcheck-am installdirs \ - maintainer-clean maintainer-clean-generic mostlyclean \ - mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ - pdf pdf-am ps ps-am tags uninstall uninstall-am - + clean-libtool clean-noinstLTLIBRARIES clean-noinstPROGRAMS \ + ctags distclean distclean-compile distclean-generic \ + distclean-libtool distclean-tags distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-compile \ + mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ + tags uninstall uninstall-am + + +# This was made so we chould export all symbols from libcommon +# on multiple platforms +libcommon.o : $(libcommon_la_OBJECTS) $(libcommon_la_DEPENDENCIES) + $(libcommon_la_LINK) $(libcommon_la_OBJECTS) global_defaults.c : $(top_builddir)/config.h Makefile @( echo "/* This file autogenerated by src/common/Makefile */"; \ diff --git a/src/common/arg_desc.c b/src/common/arg_desc.c index 20bbb9f5a..72b82dfeb 100644 --- a/src/common/arg_desc.c +++ b/src/common/arg_desc.c @@ -3,7 +3,7 @@ ***************************************************************************** * Copyright (C) 2005 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/arg_desc.h b/src/common/arg_desc.h index 9a0ce1857..f365582ef 100644 --- a/src/common/arg_desc.h +++ b/src/common/arg_desc.h @@ -3,7 +3,7 @@ ***************************************************************************** * Copyright (C) 2005 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/assoc_mgr.c b/src/common/assoc_mgr.c new file mode 100644 index 000000000..1a35ca432 --- /dev/null +++ b/src/common/assoc_mgr.c @@ -0,0 +1,675 @@ +/*****************************************************************************\ + * accounting_storage_slurmdbd.c - accounting interface to slurmdbd. + ***************************************************************************** + * Copyright (C) 2004-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#include "assoc_mgr.h" + +#include <sys/types.h> +#include <pwd.h> + +#include "src/common/xstring.h" +#include "src/slurmdbd/read_config.h" + +static List local_association_list = NULL; +static List local_user_list = NULL; +static char *local_cluster_name = NULL; + +static pthread_mutex_t local_association_lock = PTHREAD_MUTEX_INITIALIZER; +static pthread_mutex_t local_user_lock = PTHREAD_MUTEX_INITIALIZER; + +static int _get_local_association_list(void *db_conn, int enforce) +{ + acct_association_cond_t assoc_q; + char *cluster_name = NULL; +// DEF_TIMERS; + slurm_mutex_lock(&local_association_lock); + if(local_association_list) + list_destroy(local_association_list); + + memset(&assoc_q, 0, sizeof(acct_association_cond_t)); + if(local_cluster_name) { + assoc_q.cluster_list = list_create(slurm_destroy_char); + cluster_name = xstrdup(local_cluster_name); + if(!cluster_name) { + if(enforce && !slurmdbd_conf) { + error("_get_local_association_list: " + "no cluster name here going to get " + "all associations."); + } + } else + list_append(assoc_q.cluster_list, cluster_name); + } + +// START_TIMER; + local_association_list = + acct_storage_g_get_associations(db_conn, &assoc_q); +// END_TIMER2("get_associations"); + + if(assoc_q.cluster_list) + list_destroy(assoc_q.cluster_list); + + if(!local_association_list) { + /* create list so we don't keep calling this if there + isn't anything there */ + local_association_list = list_create(NULL); + slurm_mutex_unlock(&local_association_lock); + if(enforce) { + error("_get_local_association_list: " + "no list was made."); + return SLURM_ERROR; + } else { + debug3("not enforcing associations and no " + "list was given so we are giving a blank list"); + return SLURM_SUCCESS; + } + } else { + acct_association_rec_t *assoc = NULL; + acct_association_rec_t *assoc2 = NULL; + struct passwd *passwd_ptr = NULL; + ListIterator itr = list_iterator_create(local_association_list); + ListIterator itr2 = + list_iterator_create(local_association_list); + //START_TIMER; + while((assoc = list_next(itr))) { + if(assoc->parent_id) { + while((assoc2 = list_next(itr2))) { + if(assoc2->id == assoc->parent_id) { + assoc->parent_acct_ptr = assoc2; + break; + } + } + list_iterator_reset(itr2); + } + if(!assoc->user) { + continue; + } + passwd_ptr = getpwnam(assoc->user); + if(passwd_ptr) + assoc->uid = passwd_ptr->pw_uid; + //log_assoc_rec(assoc); + } + list_iterator_destroy(itr2); + list_iterator_destroy(itr); + //END_TIMER2("load_associations"); + } + slurm_mutex_unlock(&local_association_lock); + + return SLURM_SUCCESS; +} + +static int _get_local_user_list(void *db_conn, int enforce) +{ + acct_user_cond_t user_q; + + memset(&user_q, 0, sizeof(acct_user_cond_t)); + + slurm_mutex_lock(&local_user_lock); + if(local_user_list) + list_destroy(local_user_list); + local_user_list = acct_storage_g_get_users(db_conn, &user_q); + + if(!local_user_list) { + slurm_mutex_unlock(&local_user_lock); + if(enforce) { + error("_get_local_user_list: " + "no list was made."); + return SLURM_ERROR; + } else { + return SLURM_SUCCESS; + } + } + + slurm_mutex_unlock(&local_user_lock); + return SLURM_SUCCESS; +} + +extern int assoc_mgr_init(void *db_conn, int enforce) +{ + if(!local_cluster_name && !slurmdbd_conf) + local_cluster_name = slurm_get_cluster_name(); + + if(!local_association_list) + if(_get_local_association_list(db_conn, enforce) == SLURM_ERROR) + return SLURM_ERROR; + + if(!local_user_list) + if(_get_local_user_list(db_conn, enforce) == SLURM_ERROR) + return SLURM_ERROR; + + return SLURM_SUCCESS; +} + +extern int assoc_mgr_fini() +{ + if(local_association_list) + list_destroy(local_association_list); + if(local_user_list) + list_destroy(local_user_list); + xfree(local_cluster_name); + local_association_list = NULL; + local_user_list = NULL; + + return SLURM_SUCCESS; +} + +extern int assoc_mgr_fill_in_assoc(void *db_conn, acct_association_rec_t *assoc, + int enforce, + acct_association_rec_t **assoc_pptr) +{ + ListIterator itr = NULL; + acct_association_rec_t * found_assoc = NULL; + acct_association_rec_t * ret_assoc = NULL; + + if (assoc_pptr) + *assoc_pptr = NULL; + if(!local_association_list) { + if(_get_local_association_list(db_conn, enforce) == SLURM_ERROR) + return SLURM_ERROR; + } + if((!local_association_list || !list_count(local_association_list)) + && !enforce) + return SLURM_SUCCESS; + + if(!assoc->id) { + if(!assoc->acct) { + acct_user_rec_t user; + + if(!assoc->uid) { + if(enforce) { + error("get_assoc_id: " + "Not enough info to " + "get an association"); + return SLURM_ERROR; + } else { + return SLURM_SUCCESS; + } + } + memset(&user, 0, sizeof(acct_user_rec_t)); + user.uid = assoc->uid; + if(assoc_mgr_fill_in_user(db_conn, &user, enforce) + == SLURM_ERROR) { + if(enforce) + return SLURM_ERROR; + else { + return SLURM_SUCCESS; + } + } + assoc->user = user.name; + assoc->acct = user.default_acct; + } + + if(!assoc->cluster) + assoc->cluster = local_cluster_name; + } +/* info("looking for assoc of user=%s(%u), acct=%s, " */ +/* "cluster=%s, partition=%s", */ +/* assoc->user, assoc->uid, assoc->acct, */ +/* assoc->cluster, assoc->partition); */ + slurm_mutex_lock(&local_association_lock); + itr = list_iterator_create(local_association_list); + while((found_assoc = list_next(itr))) { + if(assoc->id) { + if(assoc->id == found_assoc->id) { + ret_assoc = found_assoc; + break; + } + continue; + } else { + if(!assoc->uid && found_assoc->uid) { + debug3("we are looking for a " + "nonuser association"); + continue; + } else if(assoc->uid != found_assoc->uid) { + debug3("not the right user %u != %u", + assoc->uid, found_assoc->uid); + continue; + } + + if(found_assoc->acct + && strcasecmp(assoc->acct, found_assoc->acct)) { + debug3("not the right account"); + continue; + } + + /* only check for on the slurmdbd */ + if(!local_cluster_name && found_assoc->cluster + && strcasecmp(assoc->cluster, + found_assoc->cluster)) { + debug3("not the right cluster"); + continue; + } + + if(assoc->partition + && (!found_assoc->partition + || strcasecmp(assoc->partition, + found_assoc->partition))) { + ret_assoc = found_assoc; + debug3("found association for no partition"); + continue; + } + } + ret_assoc = found_assoc; + break; + } + list_iterator_destroy(itr); + + if(!ret_assoc) { + slurm_mutex_unlock(&local_association_lock); + if(enforce) + return SLURM_ERROR; + else + return SLURM_SUCCESS; + } + debug3("found correct association"); + if (assoc_pptr) + *assoc_pptr = ret_assoc; + assoc->id = ret_assoc->id; + if(!assoc->user) + assoc->user = ret_assoc->user; + if(!assoc->acct) + assoc->acct = ret_assoc->acct; + if(!assoc->cluster) + assoc->cluster = ret_assoc->cluster; + if(!assoc->partition) + assoc->partition = ret_assoc->partition; + assoc->fairshare = ret_assoc->fairshare; + assoc->max_cpu_secs_per_job = ret_assoc->max_cpu_secs_per_job; + assoc->max_jobs = ret_assoc->max_jobs; + assoc->max_nodes_per_job = ret_assoc->max_nodes_per_job; + assoc->max_wall_duration_per_job = ret_assoc->max_wall_duration_per_job; + assoc->parent_acct_ptr = ret_assoc->parent_acct_ptr; + if(assoc->parent_acct) { + xfree(assoc->parent_acct); + assoc->parent_acct = xstrdup(ret_assoc->parent_acct); + } else + assoc->parent_acct = ret_assoc->parent_acct; + slurm_mutex_unlock(&local_association_lock); + + return SLURM_SUCCESS; +} + +extern int assoc_mgr_fill_in_user(void *db_conn, acct_user_rec_t *user, + int enforce) +{ + ListIterator itr = NULL; + acct_user_rec_t * found_user = NULL; + + if(!local_user_list) + if(_get_local_user_list(db_conn, enforce) == SLURM_ERROR) + return SLURM_ERROR; + + if((!local_user_list || !list_count(local_user_list)) && !enforce) + return SLURM_SUCCESS; + + slurm_mutex_lock(&local_user_lock); + itr = list_iterator_create(local_user_list); + while((found_user = list_next(itr))) { + if(user->uid == found_user->uid) + break; + } + list_iterator_destroy(itr); + + if(found_user) { + memcpy(user, found_user, sizeof(acct_user_rec_t)); + slurm_mutex_unlock(&local_user_lock); + return SLURM_SUCCESS; + } + slurm_mutex_unlock(&local_user_lock); + return SLURM_ERROR; +} + +extern acct_admin_level_t assoc_mgr_get_admin_level(void *db_conn, + uint32_t uid) +{ + ListIterator itr = NULL; + acct_user_rec_t * found_user = NULL; + + if(!local_user_list) + if(_get_local_user_list(db_conn, 0) == SLURM_ERROR) + return ACCT_ADMIN_NOTSET; + + if(!local_user_list) + return ACCT_ADMIN_NOTSET; + + slurm_mutex_lock(&local_user_lock); + itr = list_iterator_create(local_user_list); + while((found_user = list_next(itr))) { + if(uid == found_user->uid) + break; + } + list_iterator_destroy(itr); + slurm_mutex_unlock(&local_user_lock); + + if(found_user) + return found_user->admin_level; + + return ACCT_ADMIN_NOTSET; +} + +extern int assoc_mgr_is_user_acct_coord(void *db_conn, + uint32_t uid, + char *acct_name) +{ + ListIterator itr = NULL; + acct_coord_rec_t *acct = NULL; + acct_user_rec_t * found_user = NULL; + + if(!local_user_list) + if(_get_local_user_list(db_conn, 0) == SLURM_ERROR) + return ACCT_ADMIN_NOTSET; + + if(!local_user_list) + return ACCT_ADMIN_NOTSET; + + slurm_mutex_lock(&local_user_lock); + itr = list_iterator_create(local_user_list); + while((found_user = list_next(itr))) { + if(uid == found_user->uid) + break; + } + list_iterator_destroy(itr); + + if(!found_user) { + slurm_mutex_unlock(&local_user_lock); + return 0; + } + itr = list_iterator_create(found_user->coord_accts); + while((acct = list_next(itr))) { + if(!strcmp(acct_name, acct->acct_name)) + break; + } + list_iterator_destroy(itr); + + if(acct) { + slurm_mutex_unlock(&local_user_lock); + return 1; + } + slurm_mutex_unlock(&local_user_lock); + + return 0; +} + +extern int assoc_mgr_update_local_assocs(acct_update_object_t *update) +{ + acct_association_rec_t * rec = NULL; + acct_association_rec_t * object = NULL; + ListIterator itr = NULL; + int rc = SLURM_SUCCESS; + int parents_changed = 0; + + if(!local_association_list) + return SLURM_SUCCESS; + + slurm_mutex_lock(&local_association_lock); + itr = list_iterator_create(local_association_list); + while((object = list_pop(update->objects))) { + if(object->cluster && local_cluster_name) { + /* only update the local clusters assocs */ + if(strcasecmp(object->cluster, local_cluster_name)) + continue; + } + list_iterator_reset(itr); + while((rec = list_next(itr))) { + if(object->id) { + if(object->id == rec->id) { + break; + } + continue; + } else { + if(!object->user && rec->user) { + debug3("we are looking for a " + "nonuser association"); + continue; + } else if(object->uid != rec->uid) { + debug3("not the right user"); + continue; + } + + if(object->acct + && (!rec->acct + || strcasecmp(object->acct, + rec->acct))) { + debug3("not the right account"); + continue; + } + + /* only check for on the slurmdbd */ + if(!local_cluster_name && object->acct + && (!rec->cluster + || strcasecmp(object->cluster, + rec->cluster))) { + debug3("not the right cluster"); + continue; + } + + if(object->partition + && (!rec->partition + || strcasecmp(object->partition, + rec->partition))) { + debug3("not the right partition"); + continue; + } + break; + } + } + //info("%d assoc %u", update->type, object->id); + switch(update->type) { + case ACCT_MODIFY_ASSOC: + if(!rec) { + rc = SLURM_ERROR; + break; + } + debug("updating assoc %u", rec->id); + if(object->fairshare != (uint32_t)NO_VAL) { + rec->fairshare = object->fairshare; + } + + if(object->max_jobs != (uint32_t)NO_VAL) { + rec->max_jobs = object->max_jobs; + } + + if(object->max_nodes_per_job != (uint32_t)NO_VAL) { + rec->max_nodes_per_job = + object->max_nodes_per_job; + } + + if(object->max_wall_duration_per_job != + (uint32_t)NO_VAL) { + rec->max_wall_duration_per_job = + object->max_wall_duration_per_job; + } + + if(object->max_cpu_secs_per_job != (uint32_t)NO_VAL) { + rec->max_cpu_secs_per_job = + object->max_cpu_secs_per_job; + } + + if(object->parent_acct) { + xfree(rec->parent_acct); + rec->parent_acct = xstrdup(object->parent_acct); + } + if(object->parent_id) { + rec->parent_id = object->parent_id; + // after all new parents have been set we will + // reset the parent pointers below + parents_changed = 1; + + } + log_assoc_rec(rec); + /* FIX ME: do more updates here */ + break; + case ACCT_ADD_ASSOC: + if(rec) { + //rc = SLURM_ERROR; + break; + } + list_append(local_association_list, object); + case ACCT_REMOVE_ASSOC: + if(!rec) { + //rc = SLURM_ERROR; + break; + } + list_delete_item(itr); + break; + default: + break; + } + if(update->type != ACCT_ADD_ASSOC) { + destroy_acct_association_rec(object); + } + } + + if(parents_changed) { + ListIterator itr2 = + list_iterator_create(local_association_list); + list_iterator_reset(itr); + + while((object = list_next(itr))) { + if(object->parent_id) { + while((rec = list_next(itr2))) { + if(rec->id == object->parent_id) { + object->parent_acct_ptr = rec; + break; + } + } + list_iterator_reset(itr2); + } + } + list_iterator_destroy(itr2); + } + + list_iterator_destroy(itr); + slurm_mutex_unlock(&local_association_lock); + + return rc; +} + +extern int assoc_mgr_update_local_users(acct_update_object_t *update) +{ + acct_user_rec_t * rec = NULL; + acct_user_rec_t * object = NULL; + ListIterator itr = NULL; + int rc = SLURM_SUCCESS; + + if(!local_user_list) + return SLURM_SUCCESS; + + slurm_mutex_lock(&local_user_lock); + itr = list_iterator_create(local_user_list); + while((object = list_pop(update->objects))) { + list_iterator_reset(itr); + while((rec = list_next(itr))) { + if(!strcasecmp(object->name, rec->name)) { + break; + } + } + //info("%d user %s", update->type, object->name); + switch(update->type) { + case ACCT_MODIFY_USER: + if(!rec) { + rc = SLURM_ERROR; + break; + } + + if(object->default_acct) { + xfree(rec->default_acct); + rec->default_acct = object->default_acct; + object->default_acct = NULL; + } + + if(object->qos != ACCT_QOS_NOTSET) + rec->qos = object->qos; + + if(object->admin_level != ACCT_ADMIN_NOTSET) + rec->admin_level = rec->admin_level; + + break; + case ACCT_ADD_USER: + if(rec) { + //rc = SLURM_ERROR; + break; + } + list_append(local_user_list, object); + case ACCT_REMOVE_USER: + if(!rec) { + //rc = SLURM_ERROR; + break; + } + list_delete_item(itr); + break; + default: + break; + } + if(update->type != ACCT_ADD_USER) { + destroy_acct_user_rec(object); + } + } + list_iterator_destroy(itr); + slurm_mutex_unlock(&local_user_lock); + + return rc; +} + +extern int assoc_mgr_validate_assoc_id(void *db_conn, + uint32_t assoc_id, + int enforce) +{ + ListIterator itr = NULL; + acct_association_rec_t * found_assoc = NULL; + + if(!local_association_list) + if(_get_local_association_list(db_conn, enforce) == SLURM_ERROR) + return SLURM_ERROR; + + if((!local_association_list || !list_count(local_association_list)) + && !enforce) + return SLURM_SUCCESS; + + slurm_mutex_lock(&local_association_lock); + itr = list_iterator_create(local_association_list); + while((found_assoc = list_next(itr))) { + if(assoc_id == found_assoc->id) + break; + } + list_iterator_destroy(itr); + slurm_mutex_unlock(&local_association_lock); + + if(found_assoc || !enforce) + return SLURM_SUCCESS; + + return SLURM_ERROR; +} + diff --git a/src/common/assoc_mgr.h b/src/common/assoc_mgr.h new file mode 100644 index 000000000..d0d1826c5 --- /dev/null +++ b/src/common/assoc_mgr.h @@ -0,0 +1,123 @@ +/*****************************************************************************\ + * assoc_mgr.h - keep track of local cache of accounting data. + ***************************************************************************** + * Copyright (C) 2004-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * LLNL-CODE-402394. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#ifndef _SLURM_ASSOC_MGR_H +#define _SLURM_ASSOC_MGR_H + +#ifdef HAVE_CONFIG_H +# include "config.h" +#endif + +#include "src/common/list.h" +#include "src/common/slurm_accounting_storage.h" +#include "src/slurmctld/slurmctld.h" +#include <slurm/slurm.h> +#include <slurm/slurm_errno.h> + +/* + * get info from the storage + * IN/OUT: user - acct_user_rec_t with the name set of the user. + * "default_account" will be filled in on + * successful return DO NOT FREE. + * RET: SLURM_SUCCESS on success SLURM_ERROR else + */ +extern int assoc_mgr_fill_in_user(void *db_conn, acct_user_rec_t *user, + int enforce); + +/* + * get info from the storage + * IN/OUT: assoc - acct_association_rec_t with at least cluster and + * account set for account association. To get user + * association set user, and optional partition. + * Sets "id" field with the association ID. + * IN: enforce - return an error if no such association exists + * IN/OUT: assoc_pptr - if non-NULL then return a pointer to the + * acct_association record in cache on success + * RET: SLURM_SUCCESS on success, else SLURM_ERROR + */ +extern int assoc_mgr_fill_in_assoc(void *db_conn, + acct_association_rec_t *assoc, + int enforce, + acct_association_rec_t **assoc_pptr); + +/* + * get admin_level of uid + * IN: uid - uid of user to check admin_level of. + * RET: admin level ACCT_ADMIN_NOTSET on error + */ +extern acct_admin_level_t assoc_mgr_get_admin_level(void *db_conn, + uint32_t uid); + +/* + * see if user is coordinator of given acct + * IN: uid - uid of user to check. + * IN: acct - name of account + * RET: 0 for no, 1 for yes + */ +extern int assoc_mgr_is_user_acct_coord(void *db_conn, uint32_t uid, + char *acct); + +extern int assoc_mgr_init(void *db_conn, int enforce); +extern int assoc_mgr_fini(); + +/* + * update associations in local cache + * IN: acct_update_object_t *object + * RET: SLURM_SUCCESS on success (or not found) SLURM_ERROR else + */ +extern int assoc_mgr_update_local_assocs(acct_update_object_t *update); + +/* + * update users in local cache + * IN: acct_update_object_t *object + * RET: SLURM_SUCCESS on success (or not found) SLURM_ERROR else + */ +extern int assoc_mgr_update_local_users(acct_update_object_t *update); + +/* + * validate that an association ID is still valid + * IN: assoc_id - association ID previously returned by + * get_assoc_id(void *db_conn, + ) + * RET: SLURM_SUCCESS on success SLURM_ERROR else + */ +extern int assoc_mgr_validate_assoc_id(void *db_conn, + uint32_t assoc_id, + int enforce); + +#endif /* _SLURM_ASSOC_MGR_H */ diff --git a/src/common/bitstring.c b/src/common/bitstring.c index 69d7029c7..a1ecb9473 100644 --- a/src/common/bitstring.c +++ b/src/common/bitstring.c @@ -7,7 +7,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Jim Garlick <garlick@llnl.gov>, Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/bitstring.h b/src/common/bitstring.h index 01ca9b4c0..e937b5725 100644 --- a/src/common/bitstring.h +++ b/src/common/bitstring.h @@ -8,7 +8,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Jim Garlick <garlick@llnl.gov>, Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/checkpoint.c b/src/common/checkpoint.c index 83a8fec6c..54e7874b0 100644 --- a/src/common/checkpoint.c +++ b/src/common/checkpoint.c @@ -1,11 +1,11 @@ /*****************************************************************************\ * checkpoint.c - implementation-independent checkpoint functions - * $Id: checkpoint.c 10574 2006-12-15 23:38:29Z jette $ + * $Id: checkpoint.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.com> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -62,6 +62,8 @@ typedef struct slurm_checkpoint_ops { uint32_t *error_code, char **error_msg); int (*ckpt_comp) (struct step_record * step_ptr, time_t event_time, uint32_t error_code, char *error_msg); + int (*ckpt_task_comp) (struct step_record * step_ptr, uint32_t task_id, + time_t event_time, uint32_t error_code, char *error_msg); int (*ckpt_alloc_jobinfo) (check_jobinfo_t *jobinfo); int (*ckpt_free_jobinfo) (check_jobinfo_t jobinfo); @@ -146,6 +148,7 @@ _slurm_checkpoint_get_ops( slurm_checkpoint_context_t c ) static const char *syms[] = { "slurm_ckpt_op", "slurm_ckpt_comp", + "slurm_ckpt_task_comp", "slurm_ckpt_alloc_job", "slurm_ckpt_free_job", "slurm_ckpt_pack_job", @@ -277,6 +280,25 @@ checkpoint_comp(void * step_ptr, time_t event_time, uint32_t error_code, return retval; } +extern int +checkpoint_task_comp(void * step_ptr, uint32_t task_id, time_t event_time, + uint32_t error_code, char *error_msg) +{ + int retval = SLURM_SUCCESS; + + slurm_mutex_lock( &context_lock ); + if ( g_context ) + retval = (*(g_context->ops.ckpt_task_comp))( + (struct step_record *) step_ptr, task_id, + event_time, error_code, error_msg); + else { + error ("slurm_checkpoint plugin context not initialized"); + retval = ENOENT; + } + slurm_mutex_unlock( &context_lock ); + return retval; +} + /* allocate and initialize a job step's checkpoint context */ extern int checkpoint_alloc_jobinfo(check_jobinfo_t *jobinfo) { diff --git a/src/common/checkpoint.h b/src/common/checkpoint.h index e4ce11356..0a5c8b92c 100644 --- a/src/common/checkpoint.h +++ b/src/common/checkpoint.h @@ -1,11 +1,11 @@ /*****************************************************************************\ * checkpoint.h - implementation-independent checkpoint API definitions. - * $Id: checkpoint.h 10574 2006-12-15 23:38:29Z jette $ + * $Id: checkpoint.h 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.com> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -78,6 +78,9 @@ extern int checkpoint_op(uint16_t op, uint16_t data, void * step_ptr, extern int checkpoint_comp(void * step_ptr, time_t event_time, uint32_t error_code, char *error_msg); +extern int checkpoint_task_comp(void * step_ptr, uint32_t task_id, + time_t event_time, uint32_t error_code, char *error_msg); + /* gather checkpoint error info */ extern int checkpoint_error(void * step_ptr, uint16_t *ckpt_errno, char **ckpt_strerror); diff --git a/src/common/daemonize.c b/src/common/daemonize.c index 528aaaa14..c396d8431 100644 --- a/src/common/daemonize.c +++ b/src/common/daemonize.c @@ -1,11 +1,11 @@ /*****************************************************************************\ * daemonize.c - daemonization routine - * $Id: daemonize.c 12723 2007-11-29 18:55:48Z jette $ + * $Id: daemonize.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark A. Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/daemonize.h b/src/common/daemonize.h index e976d42e3..93c57a1c0 100644 --- a/src/common/daemonize.h +++ b/src/common/daemonize.h @@ -1,11 +1,11 @@ /*****************************************************************************\ * src/slurmd/daemonize.h - function definition for making a daemon - * $Id: daemonize.h 10574 2006-12-15 23:38:29Z jette $ + * $Id: daemonize.h 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/eio.c b/src/common/eio.c index 2053fae9e..3f7ce2ca8 100644 --- a/src/common/eio.c +++ b/src/common/eio.c @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/eio.h b/src/common/eio.h index eabd120fa..1351ad560 100644 --- a/src/common/eio.h +++ b/src/common/eio.h @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/env.c b/src/common/env.c index e28cc028a..f7e5b5e51 100644 --- a/src/common/env.c +++ b/src/common/env.c @@ -1,11 +1,11 @@ /*****************************************************************************\ * src/common/env.c - add an environment variable to environment vector - * $Id: env.c 13678 2008-03-20 21:02:07Z jette $ + * $Id: env.c 14025 2008-05-09 16:37:03Z jette $ ***************************************************************************** * Copyright (C) 2002-2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov>, Danny Auble <da@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -48,8 +48,9 @@ #include <strings.h> #include <unistd.h> #include <sys/poll.h> +#include <sys/stat.h> #include <sys/types.h> - +#include <sys/param.h> /* MAXPATHLEN */ #include "src/common/macros.h" #include "slurm/slurm.h" #include "src/common/log.h" @@ -77,7 +78,7 @@ strong_alias(env_array_append_fmt, slurm_env_array_append_fmt); strong_alias(env_array_overwrite, slurm_env_array_overwrite); strong_alias(env_array_overwrite_fmt, slurm_env_array_overwrite_fmt); -#define ENV_BUFSIZE (64 * 1024) +#define ENV_BUFSIZE (256 * 1024) /* * Return pointer to `name' entry in environment if found, or @@ -172,7 +173,7 @@ setenvfs(const char *fmt, ...) int rc; va_start(ap, fmt); - vsnprintf(buf, ENV_BUFSIZE, fmt, ap); + vsnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); bufcpy = xstrdup(buf); @@ -191,7 +192,7 @@ setenvf(char ***envp, const char *name, const char *fmt, ...) char *bufcpy; va_start(ap, fmt); - vsnprintf (buf, ENV_BUFSIZE, fmt, ap); + vsnprintf (buf, sizeof(buf), fmt, ap); va_end(ap); bufcpy = xstrdup(buf); @@ -649,8 +650,28 @@ int setup_env(env_t *env) setenvf(&env->env, "LOADL_ACTIVE", "3.2.0"); } #endif - - return SLURM_SUCCESS; + + if (env->pty_port + && setenvf(&env->env, "SLURM_PTY_PORT", "%hu", env->pty_port)) { + error("Can't set SLURM_PTY_PORT env variable"); + rc = SLURM_FAILURE; + } + if (env->ws_col + && setenvf(&env->env, "SLURM_PTY_WIN_COL", "%hu", env->ws_col)) { + error("Can't set SLURM_PTY_WIN_COL env variable"); + rc = SLURM_FAILURE; + } + if (env->ws_row + && setenvf(&env->env, "SLURM_PTY_WIN_ROW", "%hu", env->ws_row)) { + error("Can't set SLURM_PTY_WIN_ROW env variable"); + rc = SLURM_FAILURE; + } + if (env->ckpt_path + && setenvf(&env->env, "SLURM_CHECKPOINT_PATH", "%s", env->ckpt_path)) { + error("Can't set SLURM_CHECKPOINT_PATH env variable"); + rc = SLURM_FAILURE; + } + return rc; } /********************************************************************** @@ -878,8 +899,7 @@ void env_array_for_step(char ***dest, const job_step_create_response_msg_t *step, const char *launcher_hostname, - uint16_t launcher_port, - const char *ip_addr_str) + uint16_t launcher_port) { char *tmp; @@ -897,8 +917,6 @@ env_array_for_step(char ***dest, "%s", launcher_hostname); env_array_overwrite_fmt(dest, "SLURM_STEP_LAUNCHER_PORT", "%hu", launcher_port); -/* env_array_overwrite_fmt(dest, "SLURM_STEP_LAUNCHER_IPADDR", */ -/* "%s", ip_addr_str); */ /* OBSOLETE */ env_array_overwrite_fmt(dest, "SLURM_STEPID", "%u", step->job_step_id); @@ -911,8 +929,6 @@ env_array_for_step(char ***dest, "%s", launcher_hostname); env_array_overwrite_fmt(dest, "SLURM_SRUN_COMM_PORT", "%hu", launcher_port); -/* env_array_overwrite_fmt(dest, "SLURM_LAUNCH_NODE_IPADDR", */ -/* "%s", ip_addr_str); */ xfree(tmp); } @@ -977,7 +993,7 @@ int env_array_append_fmt(char ***array_ptr, const char *name, } va_start(ap, value_fmt); - vsnprintf (buf, ENV_BUFSIZE, value_fmt, ap); + vsnprintf (buf, sizeof(buf), value_fmt, ap); va_end(ap); ep = _find_name_in_env(*array_ptr, name); @@ -1053,7 +1069,7 @@ int env_array_overwrite_fmt(char ***array_ptr, const char *name, } va_start(ap, value_fmt); - vsnprintf (buf, ENV_BUFSIZE, value_fmt, ap); + vsnprintf (buf, sizeof(buf), value_fmt, ap); va_end(ap); xstrfmtcat (str, "%s=%s", name, buf); @@ -1172,11 +1188,11 @@ static int _env_array_entry_splitter(const char *entry, */ static int _env_array_putenv(const char *string) { - char name[ENV_BUFSIZE]; + char name[256]; char value[ENV_BUFSIZE]; - if (!_env_array_entry_splitter(string, name, ENV_BUFSIZE, value, - ENV_BUFSIZE)) + if (!_env_array_entry_splitter(string, name, sizeof(name), + value, sizeof(value))) return 0; if (setenv(name, value, 1) == -1) return 0; @@ -1208,16 +1224,16 @@ void env_array_set_environment(char **env_array) void env_array_merge(char ***dest_array, const char **src_array) { char **ptr; - char name[ENV_BUFSIZE]; + char name[256]; char value[ENV_BUFSIZE]; if (src_array == NULL) return; for (ptr = (char **)src_array; *ptr != NULL; ptr++) { - _env_array_entry_splitter(*ptr, name, ENV_BUFSIZE, value, - ENV_BUFSIZE); - env_array_overwrite(dest_array, name, value); + if (_env_array_entry_splitter(*ptr, name, sizeof(name), + value, sizeof(value))) + env_array_overwrite(dest_array, name, value); } } @@ -1244,8 +1260,8 @@ static void _strip_cr_nl(char *line) */ char **_load_env_cache(const char *username) { - char *state_save_loc, fname[ENV_BUFSIZE]; - char line[ENV_BUFSIZE], name[ENV_BUFSIZE], value[ENV_BUFSIZE]; + char *state_save_loc, fname[MAXPATHLEN]; + char line[ENV_BUFSIZE], name[256], value[ENV_BUFSIZE]; char **env = NULL; FILE *fp; int i; @@ -1267,11 +1283,11 @@ char **_load_env_cache(const char *username) info("Getting cached environment variables at %s", fname); env = env_array_create(); while (1) { - if (!fgets(line, ENV_BUFSIZE, fp)) + if (!fgets(line, sizeof(line), fp)) break; _strip_cr_nl(line); - if (_env_array_entry_splitter(line, name, ENV_BUFSIZE, value, - ENV_BUFSIZE) && + if (_env_array_entry_splitter(line, name, sizeof(name), + value, sizeof(value)) && (!_discard_env(name, value))) env_array_overwrite(&env, name, value); } @@ -1297,17 +1313,29 @@ char **_load_env_cache(const char *username) */ char **env_array_user_default(const char *username, int timeout, int mode) { - char *line, *last, name[128], value[ENV_BUFSIZE]; + char *line = NULL, *last = NULL, name[128], value[ENV_BUFSIZE]; char buffer[ENV_BUFSIZE]; char **env = NULL; char *starttoken = "XXXXSLURMSTARTPARSINGHEREXXXX"; char *stoptoken = "XXXXSLURMSTOPPARSINGHEREXXXXX"; - char cmdstr[256]; + char cmdstr[256], *env_loc = NULL; int fildes[2], found, fval, len, rc, timeleft; int buf_read, buf_rem; pid_t child; struct timeval begin, now; struct pollfd ufds; + struct stat buf; + + if (stat("/bin/su", &buf)) + fatal("Could not locate command: /bin/su"); + if (stat("/bin/echo", &buf)) + fatal("Could not locate command: /bin/echo"); + if (stat("/bin/env", &buf) == 0) + env_loc = "/bin/env"; + else if (stat("/usr/bin/env", &buf) == 0) + env_loc = "/usr/bin/env"; + else + fatal("Could not location command: env"); if (geteuid() != (uid_t)0) { fatal("WARNING: you must be root to use --get-user-env"); @@ -1332,8 +1360,8 @@ char **env_array_user_default(const char *username, int timeout, int mode) open("/dev/null", O_WRONLY); snprintf(cmdstr, sizeof(cmdstr), "/bin/echo; /bin/echo; /bin/echo; " - "/bin/echo %s; /bin/env; /bin/echo %s", - starttoken, stoptoken); + "/bin/echo %s; %s; /bin/echo %s", + starttoken, env_loc, stoptoken); if (mode == 1) execl("/bin/su", "su", username, "-c", cmdstr, NULL); else if (mode == 2) diff --git a/src/common/env.h b/src/common/env.h index 3f8901d66..f59adaf17 100644 --- a/src/common/env.h +++ b/src/common/env.h @@ -4,7 +4,7 @@ * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -70,6 +70,10 @@ typedef struct env_options { int cpus_on_node; pid_t task_pid; char *sgtids; /* global ranks array of integers */ + uint16_t pty_port; /* used to communicate window size changes */ + uint8_t ws_col; /* window size, columns */ + uint8_t ws_row; /* window size, row count */ + char *ckpt_path; /* --ckpt-path= */ } env_t; @@ -161,8 +165,7 @@ void env_array_for_step(char ***dest, const job_step_create_response_msg_t *step, const char *launcher_hostname, - uint16_t launcher_port, - const char *ip_addr_str); + uint16_t launcher_port); /* * Return an empty environment variable array (contains a single diff --git a/src/common/forward.c b/src/common/forward.c index f1c8ec408..05b62d661 100644 --- a/src/common/forward.c +++ b/src/common/forward.c @@ -5,7 +5,7 @@ * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <auble1@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/forward.h b/src/common/forward.h index 086d4cdbc..c7e92eee6 100644 --- a/src/common/forward.h +++ b/src/common/forward.h @@ -6,7 +6,7 @@ * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <auble1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/hostlist.c b/src/common/hostlist.c index 19f340942..2b9cf4a1b 100644 --- a/src/common/hostlist.c +++ b/src/common/hostlist.c @@ -1,12 +1,12 @@ /*****************************************************************************\ - * $Id: hostlist.c 13270 2008-02-14 19:40:44Z da $ + * $Id: hostlist.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * $LSDId: hostlist.c,v 1.14 2003/10/14 20:11:54 grondo Exp $ ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -168,7 +168,7 @@ strong_alias(hostset_nth, slurm_hostset_nth); /* ----[ Internal Data Structures ]---- */ -char *alpha_num = "0123456789ABCDEFGHIJKLMNOPQRSTUZWXYZ"; +char *alpha_num = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; #ifdef HAVE_BG /* logic for block node description */ diff --git a/src/common/hostlist.h b/src/common/hostlist.h index b60e86f59..bc4061cc8 100644 --- a/src/common/hostlist.h +++ b/src/common/hostlist.h @@ -1,12 +1,12 @@ /*****************************************************************************\ - * $Id: hostlist.h 11402 2007-04-25 17:39:08Z da $ + * $Id: hostlist.h 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * $LSDId: hostlist.h,v 1.4 2003/09/19 21:37:34 grondo Exp $ ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/io_hdr.c b/src/common/io_hdr.c index cce4ed808..3a782025f 100644 --- a/src/common/io_hdr.c +++ b/src/common/io_hdr.c @@ -1,11 +1,11 @@ /*****************************************************************************\ * src/common/io_hdr.c - IO connection header functions - * $Id: io_hdr.c 10574 2006-12-15 23:38:29Z jette $ + * $Id: io_hdr.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark A. Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -179,7 +179,7 @@ io_init_msg_packed_size(void) len = sizeof(uint16_t) /* version */ + sizeof(uint32_t) /* nodeid */ - + (SLURM_IO_KEY_SIZE + sizeof(uint16_t)) /* signature */ + + (SLURM_IO_KEY_SIZE + sizeof(uint32_t)) /* signature */ + sizeof(uint32_t) /* stdout_objs */ + sizeof(uint32_t); /* stderr_objs */ return len; @@ -193,15 +193,14 @@ io_init_msg_pack(struct slurm_io_init_msg *hdr, Buf buffer) pack32(hdr->stdout_objs, buffer); pack32(hdr->stderr_objs, buffer); packmem((char *) hdr->cred_signature, - (uint16_t) SLURM_IO_KEY_SIZE, buffer); + (uint32_t) SLURM_IO_KEY_SIZE, buffer); } static int io_init_msg_unpack(struct slurm_io_init_msg *hdr, Buf buffer) { - uint16_t val; - + uint32_t val; safe_unpack16(&hdr->version, buffer); safe_unpack32(&hdr->nodeid, buffer); safe_unpack32(&hdr->stdout_objs, buffer); diff --git a/src/common/io_hdr.h b/src/common/io_hdr.h index e5f60f49c..ec6f3d149 100644 --- a/src/common/io_hdr.h +++ b/src/common/io_hdr.h @@ -1,11 +1,11 @@ /*****************************************************************************\ * src/common/io_hdr.h - IO connection header functions - * $Id: io_hdr.h 10574 2006-12-15 23:38:29Z jette $ + * $Id: io_hdr.h 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark A. Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/job_options.c b/src/common/job_options.c index f4ba8a9cb..95ae5be10 100644 --- a/src/common/job_options.c +++ b/src/common/job_options.c @@ -5,7 +5,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <grondona1@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -94,19 +94,16 @@ static struct job_option_info * job_option_info_unpack (Buf buf) { struct job_option_info *ji = xmalloc (sizeof (*ji)); uint32_t type; - uint16_t len; + uint32_t len; - if (unpack32 (&type, buf) != SLURM_SUCCESS) - goto error; - if (unpackstr_xmalloc (&ji->option, &len, buf) != SLURM_SUCCESS) - goto error; - if (unpackstr_xmalloc (&ji->optarg, &len, buf) != SLURM_SUCCESS) - goto error; + safe_unpack32 (&type, buf); + safe_unpackstr_xmalloc (&ji->option, &len, buf); + safe_unpackstr_xmalloc (&ji->optarg, &len, buf); ji->type = (int) type; return (ji); - error: + unpack_error: job_option_info_destroy (ji); return (NULL); } @@ -196,19 +193,18 @@ int job_options_pack (job_options_t opts, Buf buf) int job_options_unpack (job_options_t opts, Buf buf) { uint32_t count; - uint16_t len; - char * tag; + uint32_t len; + char * tag = NULL; int i; - if (unpackstr_xmalloc (&tag, &len, buf) != SLURM_SUCCESS) - return (SLURM_ERROR); + safe_unpackstr_xmalloc (&tag, &len, buf); if (strncmp (tag, JOB_OPTIONS_PACK_TAG, len) != 0) { xfree(tag); return (-1); } xfree(tag); - unpack32 (&count, buf); + safe_unpack32 (&count, buf); for (i = 0; i < count; i++) { struct job_option_info *ji; @@ -218,6 +214,10 @@ int job_options_unpack (job_options_t opts, Buf buf) } return (0); + + unpack_error: + xfree(tag); + return SLURM_ERROR; } /* diff --git a/src/common/job_options.h b/src/common/job_options.h index 7c910d006..18a50a28c 100644 --- a/src/common/job_options.h +++ b/src/common/job_options.h @@ -5,7 +5,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <grondona1@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/jobacct_common.c b/src/common/jobacct_common.c new file mode 100644 index 000000000..316123bfa --- /dev/null +++ b/src/common/jobacct_common.c @@ -0,0 +1,886 @@ +/*****************************************************************************\ + * jobacct_common.c - common functions for almost all jobacct plugins. + ***************************************************************************** + * + * Copyright (C) 2005 Hewlett-Packard Development Company, L.P. + * Written by Danny Auble, <da@llnl.gov> + * LLNL-CODE-402394. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * This file is patterned after jobcomp_linux.c, written by Morris Jette and + * Copyright (C) 2002 The Regents of the University of California. +\*****************************************************************************/ + +#include "jobacct_common.h" + +bool jobacct_shutdown = false; +bool jobacct_suspended = false; +List task_list = NULL; +pthread_mutex_t jobacct_lock = PTHREAD_MUTEX_INITIALIZER; +uint32_t cont_id = (uint32_t)NO_VAL; +uint32_t acct_job_id = 0; +uint32_t job_mem_limit = 0; +bool pgid_plugin = false; + +static void _pack_jobacct_id(jobacct_id_t *jobacct_id, Buf buffer) +{ + pack32((uint32_t)jobacct_id->nodeid, buffer); + pack16((uint16_t)jobacct_id->taskid, buffer); +} + +static int _unpack_jobacct_id(jobacct_id_t *jobacct_id, Buf buffer) +{ + safe_unpack32(&jobacct_id->nodeid, buffer); + safe_unpack16(&jobacct_id->taskid, buffer); + return SLURM_SUCCESS; +unpack_error: + return SLURM_ERROR; +} + +static void _pack_sacct(sacct_t *sacct, Buf buffer) +{ + int i=0; + int mult = 1000000; + + if(!sacct) { + for(i=0; i<8; i++) + pack32((uint32_t) 0, buffer); + + for(i=0; i<4; i++) { /* _pack_jobacct_id() */ + pack32((uint32_t) 0, buffer); + pack16((uint16_t) 0, buffer); + } + return; + } + pack32((uint32_t)sacct->max_vsize, buffer); + pack32((uint32_t)(sacct->ave_vsize*mult), buffer); + pack32((uint32_t)sacct->max_rss, buffer); + pack32((uint32_t)(sacct->ave_rss*mult), buffer); + pack32((uint32_t)sacct->max_pages, buffer); + pack32((uint32_t)(sacct->ave_pages*mult), buffer); + pack32((uint32_t)(sacct->min_cpu*mult), buffer); + pack32((uint32_t)(sacct->ave_cpu*mult), buffer); + + _pack_jobacct_id(&sacct->max_vsize_id, buffer); + _pack_jobacct_id(&sacct->max_rss_id, buffer); + _pack_jobacct_id(&sacct->max_pages_id, buffer); + _pack_jobacct_id(&sacct->min_cpu_id, buffer); +} + +/* you need to xfree this */ +static int _unpack_sacct(sacct_t *sacct, Buf buffer) +{ + int mult = 1000000; + + safe_unpack32(&sacct->max_vsize, buffer); + safe_unpack32((uint32_t *)&sacct->ave_vsize, buffer); + sacct->ave_vsize /= mult; + safe_unpack32(&sacct->max_rss, buffer); + safe_unpack32((uint32_t *)&sacct->ave_rss, buffer); + sacct->ave_rss /= mult; + safe_unpack32(&sacct->max_pages, buffer); + safe_unpack32((uint32_t *)&sacct->ave_pages, buffer); + sacct->ave_pages /= mult; + safe_unpack32((uint32_t *)&sacct->min_cpu, buffer); + sacct->min_cpu /= mult; + safe_unpack32((uint32_t *)&sacct->ave_cpu, buffer); + sacct->ave_cpu /= mult; + if(_unpack_jobacct_id(&sacct->max_vsize_id, buffer) != SLURM_SUCCESS) + goto unpack_error; + if(_unpack_jobacct_id(&sacct->max_rss_id, buffer) != SLURM_SUCCESS) + goto unpack_error; + if(_unpack_jobacct_id(&sacct->max_pages_id, buffer) != SLURM_SUCCESS) + goto unpack_error; + if(_unpack_jobacct_id(&sacct->min_cpu_id, buffer) != SLURM_SUCCESS) + goto unpack_error; + + return SLURM_SUCCESS; + +unpack_error: + sacct = NULL; + return SLURM_ERROR; +} +extern jobacct_job_rec_t *create_jobacct_job_rec() +{ + jobacct_job_rec_t *job = xmalloc(sizeof(jobacct_job_rec_t)); + memset(&job->sacct, 0, sizeof(sacct_t)); + job->sacct.min_cpu = (float)NO_VAL; + job->state = JOB_PENDING; + job->steps = list_create(destroy_jobacct_step_rec); + job->requid = -1; + + return job; +} + +extern jobacct_step_rec_t *create_jobacct_step_rec() +{ + jobacct_step_rec_t *step = xmalloc(sizeof(jobacct_job_rec_t)); + memset(&step->sacct, 0, sizeof(sacct_t)); + step->stepid = (uint32_t)NO_VAL; + step->state = NO_VAL; + step->exitcode = NO_VAL; + step->ncpus = (uint32_t)NO_VAL; + step->elapsed = (uint32_t)NO_VAL; + step->tot_cpu_sec = (uint32_t)NO_VAL; + step->tot_cpu_usec = (uint32_t)NO_VAL; + step->requid = -1; + + return step; +} + +extern void destroy_jobacct_job_rec(void *object) +{ + jobacct_job_rec_t *job = (jobacct_job_rec_t *)object; + if (job) { + xfree(job->account); + xfree(job->blockid); + xfree(job->cluster); + xfree(job->jobname); + xfree(job->partition); + xfree(job->nodes); + xfree(job->user); + if(job->steps) + list_destroy(job->steps); + xfree(job); + } +} + +extern void destroy_jobacct_step_rec(void *object) +{ + jobacct_step_rec_t *step = (jobacct_step_rec_t *)object; + if (step) { + xfree(step->nodes); + xfree(step->stepname); + xfree(step); + } +} + +extern void destroy_jobacct_selected_step(void *object) +{ + jobacct_selected_step_t *step = (jobacct_selected_step_t *)object; + if (step) { + xfree(step->job); + xfree(step->step); + xfree(step); + } +} + + +extern void pack_jobacct_job_rec(void *object, Buf buffer) +{ + jobacct_job_rec_t *job = (jobacct_job_rec_t *)object; + ListIterator itr = NULL; + jobacct_step_rec_t *step = NULL; + uint32_t count = 0; + + pack32(job->alloc_cpus, buffer); + pack32(job->associd, buffer); + packstr(job->account, buffer); + packstr(job->blockid, buffer); + packstr(job->cluster, buffer); + pack32(job->elapsed, buffer); + pack_time(job->eligible, buffer); + pack_time(job->end, buffer); + pack32(job->exitcode, buffer); + pack32(job->gid, buffer); + pack32(job->jobid, buffer); + packstr(job->jobname, buffer); + packstr(job->partition, buffer); + packstr(job->nodes, buffer); + pack32(job->priority, buffer); + pack16(job->qos, buffer); + pack32(job->req_cpus, buffer); + pack32(job->requid, buffer); + _pack_sacct(&job->sacct, buffer); + pack32(job->show_full, buffer); + pack_time(job->start, buffer); + pack16(job->state, buffer); + if(job->steps) + count = list_count(job->steps); + pack32(count, buffer); + if(count) { + itr = list_iterator_create(job->steps); + while((step = list_next(itr))) { + pack_jobacct_step_rec(step, buffer); + } + list_iterator_destroy(itr); + } + pack_time(job->submit, buffer); + pack32(job->suspended, buffer); + pack32(job->sys_cpu_sec, buffer); + pack32(job->sys_cpu_usec, buffer); + pack32(job->tot_cpu_sec, buffer); + pack32(job->tot_cpu_usec, buffer); + pack16(job->track_steps, buffer); + pack32(job->uid, buffer); + //packstr(job->user, buffer); + pack32(job->user_cpu_sec, buffer); + pack32(job->user_cpu_usec, buffer); +} + +extern int unpack_jobacct_job_rec(void **job, Buf buffer) +{ + jobacct_job_rec_t *job_ptr = xmalloc(sizeof(jobacct_job_rec_t)); + int i = 0; + jobacct_step_rec_t *step = NULL; + uint32_t count = 0; + uint32_t uint32_tmp; + + *job = job_ptr; + + safe_unpack32(&job_ptr->alloc_cpus, buffer); + safe_unpack32(&job_ptr->associd, buffer); + safe_unpackstr_xmalloc(&job_ptr->account, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&job_ptr->blockid, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&job_ptr->cluster, &uint32_tmp, buffer); + safe_unpack32(&job_ptr->elapsed, buffer); + safe_unpack_time(&job_ptr->eligible, buffer); + safe_unpack_time(&job_ptr->end, buffer); + safe_unpack32((uint32_t *)&job_ptr->exitcode, buffer); + safe_unpack32(&job_ptr->gid, buffer); + safe_unpack32(&job_ptr->jobid, buffer); + safe_unpackstr_xmalloc(&job_ptr->jobname, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&job_ptr->partition, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&job_ptr->nodes, &uint32_tmp, buffer); + safe_unpack32((uint32_t *)&job_ptr->priority, buffer); + safe_unpack16(&job_ptr->qos, buffer); + safe_unpack32(&job_ptr->req_cpus, buffer); + safe_unpack32(&job_ptr->requid, buffer); + _pack_sacct(&job_ptr->sacct, buffer); + safe_unpack32(&job_ptr->show_full, buffer); + safe_unpack_time(&job_ptr->start, buffer); + safe_unpack16((uint16_t *)&job_ptr->state, buffer); + safe_unpack32(&count, buffer); + + job_ptr->steps = list_create(destroy_jobacct_step_rec); + for(i=0; i<count; i++) { + unpack_jobacct_step_rec(&step, buffer); + if(step) + list_append(job_ptr->steps, step); + } + + safe_unpack_time(&job_ptr->submit, buffer); + safe_unpack32(&job_ptr->suspended, buffer); + safe_unpack32(&job_ptr->sys_cpu_sec, buffer); + safe_unpack32(&job_ptr->sys_cpu_usec, buffer); + safe_unpack32(&job_ptr->tot_cpu_sec, buffer); + safe_unpack32(&job_ptr->tot_cpu_usec, buffer); + safe_unpack16(&job_ptr->track_steps, buffer); + safe_unpack32(&job_ptr->uid, buffer); + //safe_unpackstr_xmalloc(&job_ptr->user, &uint32_tmp, buffer); + safe_unpack32(&job_ptr->user_cpu_sec, buffer); + safe_unpack32(&job_ptr->user_cpu_usec, buffer); + + return SLURM_SUCCESS; + +unpack_error: + xfree(job_ptr->account); + xfree(job_ptr->blockid); + xfree(job_ptr->cluster); + xfree(job_ptr->jobname); + xfree(job_ptr->partition); + xfree(job_ptr->nodes); + if(job_ptr->steps) + list_destroy(job_ptr->steps); + xfree(job_ptr->user); + xfree(job_ptr); + *job = NULL; + return SLURM_ERROR; +} + +extern void pack_jobacct_step_rec(jobacct_step_rec_t *step, Buf buffer) +{ + pack32(step->elapsed, buffer); + pack_time(step->end, buffer); + pack32((uint32_t)step->exitcode, buffer); + pack32(step->jobid, buffer); + pack32(step->ncpus, buffer); + packstr(step->nodes, buffer); + pack32(step->requid, buffer); + _pack_sacct(&step->sacct, buffer); + pack_time(step->start, buffer); + pack16(step->state, buffer); + pack32(step->stepid, buffer); /* job's step number */ + packstr(step->stepname, buffer); + pack32(step->suspended, buffer); + pack32(step->sys_cpu_sec, buffer); + pack32(step->sys_cpu_usec, buffer); + pack32(step->tot_cpu_sec, buffer); + pack32(step->tot_cpu_usec, buffer); + pack32(step->user_cpu_sec, buffer); + pack32(step->user_cpu_usec, buffer); +} + +extern int unpack_jobacct_step_rec(jobacct_step_rec_t **step, Buf buffer) +{ + uint32_t uint32_tmp; + jobacct_step_rec_t *step_ptr = xmalloc(sizeof(jobacct_step_rec_t)); + + *step = step_ptr; + + safe_unpack32(&step_ptr->elapsed, buffer); + safe_unpack_time(&step_ptr->end, buffer); + safe_unpack32((uint32_t *)&step_ptr->exitcode, buffer); + safe_unpack32(&step_ptr->jobid, buffer); + safe_unpack32(&step_ptr->ncpus, buffer); + safe_unpackstr_xmalloc(&step_ptr->nodes, &uint32_tmp, buffer); + safe_unpack32(&step_ptr->requid, buffer); + _unpack_sacct(&step_ptr->sacct, buffer); + safe_unpack_time(&step_ptr->start, buffer); + safe_unpack16((uint16_t *)&step_ptr->state, buffer); + safe_unpack32(&step_ptr->stepid, buffer); /* job's step number */ + safe_unpackstr_xmalloc(&step_ptr->stepname, &uint32_tmp, buffer); + safe_unpack32(&step_ptr->suspended, buffer); + safe_unpack32(&step_ptr->sys_cpu_sec, buffer); + safe_unpack32(&step_ptr->sys_cpu_usec, buffer); + safe_unpack32(&step_ptr->tot_cpu_sec, buffer); + safe_unpack32(&step_ptr->tot_cpu_usec, buffer); + safe_unpack32(&step_ptr->user_cpu_sec, buffer); + safe_unpack32(&step_ptr->user_cpu_usec, buffer); + + return SLURM_SUCCESS; + +unpack_error: + xfree(step_ptr->nodes); + xfree(step_ptr->stepname); + xfree(step_ptr); + *step = NULL; + return SLURM_ERROR; +} + +extern void pack_jobacct_selected_step(jobacct_selected_step_t *step, + Buf buffer) +{ + packstr(step->job, buffer); + packstr(step->step, buffer); + pack32(step->jobid, buffer); + pack32(step->stepid, buffer); +} + +extern int unpack_jobacct_selected_step(jobacct_selected_step_t **step, + Buf buffer) +{ + uint32_t uint32_tmp; + jobacct_selected_step_t *step_ptr = + xmalloc(sizeof(jobacct_selected_step_t)); + + *step = step_ptr; + + safe_unpackstr_xmalloc(&step_ptr->job, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&step_ptr->step, &uint32_tmp, buffer); + safe_unpack32(&step_ptr->jobid, buffer); + safe_unpack32(&step_ptr->stepid, buffer); + + return SLURM_SUCCESS; + +unpack_error: + xfree(step_ptr->job); + xfree(step_ptr->step); + xfree(step_ptr); + *step = NULL; + return SLURM_ERROR; +} + +extern int jobacct_common_init_struct(struct jobacctinfo *jobacct, + jobacct_id_t *jobacct_id) +{ + if(!jobacct_id) { + jobacct_id_t temp_id; + temp_id.taskid = (uint16_t)NO_VAL; + temp_id.nodeid = (uint32_t)NO_VAL; + jobacct_id = &temp_id; + } + memset(jobacct, 0, sizeof(struct jobacctinfo)); + jobacct->sys_cpu_sec = 0; + jobacct->sys_cpu_usec = 0; + jobacct->user_cpu_sec = 0; + jobacct->user_cpu_usec = 0; + + jobacct->max_vsize = 0; + memcpy(&jobacct->max_vsize_id, jobacct_id, sizeof(jobacct_id_t)); + jobacct->tot_vsize = 0; + jobacct->max_rss = 0; + memcpy(&jobacct->max_rss_id, jobacct_id, sizeof(jobacct_id_t)); + jobacct->tot_rss = 0; + jobacct->max_pages = 0; + memcpy(&jobacct->max_pages_id, jobacct_id, sizeof(jobacct_id_t)); + jobacct->tot_pages = 0; + jobacct->min_cpu = (uint32_t)NO_VAL; + memcpy(&jobacct->min_cpu_id, jobacct_id, sizeof(jobacct_id_t)); + jobacct->tot_cpu = 0; + + return SLURM_SUCCESS; +} + +extern struct jobacctinfo *jobacct_common_alloc_jobacct( + jobacct_id_t *jobacct_id) +{ + struct jobacctinfo *jobacct = xmalloc(sizeof(struct jobacctinfo)); + jobacct_common_init_struct(jobacct, jobacct_id); + return jobacct; +} + +extern void jobacct_common_free_jobacct(void *object) +{ + struct jobacctinfo *jobacct = (struct jobacctinfo *)object; + xfree(jobacct); + jobacct = NULL; +} + +extern int jobacct_common_setinfo(struct jobacctinfo *jobacct, + enum jobacct_data_type type, void *data) +{ + int rc = SLURM_SUCCESS; + int *fd = (int *)data; + struct rusage *rusage = (struct rusage *)data; + uint32_t *uint32 = (uint32_t *) data; + jobacct_id_t *jobacct_id = (jobacct_id_t *) data; + struct jobacctinfo *send = (struct jobacctinfo *) data; + + slurm_mutex_lock(&jobacct_lock); + switch (type) { + case JOBACCT_DATA_TOTAL: + memcpy(jobacct, send, sizeof(struct jobacctinfo)); + break; + case JOBACCT_DATA_PIPE: + safe_write(*fd, jobacct, sizeof(struct jobacctinfo)); + break; + case JOBACCT_DATA_RUSAGE: + jobacct->user_cpu_sec = rusage->ru_utime.tv_sec; + jobacct->user_cpu_usec = rusage->ru_utime.tv_usec; + jobacct->sys_cpu_sec = rusage->ru_stime.tv_sec; + jobacct->sys_cpu_usec = rusage->ru_stime.tv_usec; + break; + case JOBACCT_DATA_MAX_RSS: + jobacct->max_rss = *uint32; + break; + case JOBACCT_DATA_MAX_RSS_ID: + jobacct->max_rss_id = *jobacct_id; + break; + case JOBACCT_DATA_TOT_RSS: + jobacct->tot_rss = *uint32; + break; + case JOBACCT_DATA_MAX_VSIZE: + jobacct->max_vsize = *uint32; + break; + case JOBACCT_DATA_MAX_VSIZE_ID: + jobacct->max_vsize_id = *jobacct_id; + break; + case JOBACCT_DATA_TOT_VSIZE: + jobacct->tot_vsize = *uint32; + break; + case JOBACCT_DATA_MAX_PAGES: + jobacct->max_pages = *uint32; + break; + case JOBACCT_DATA_MAX_PAGES_ID: + jobacct->max_pages_id = *jobacct_id; + break; + case JOBACCT_DATA_TOT_PAGES: + jobacct->tot_pages = *uint32; + break; + case JOBACCT_DATA_MIN_CPU: + jobacct->min_cpu = *uint32; + break; + case JOBACCT_DATA_MIN_CPU_ID: + jobacct->min_cpu_id = *jobacct_id; + break; + case JOBACCT_DATA_TOT_CPU: + jobacct->tot_cpu = *uint32; + break; + default: + debug("jobacct_g_set_setinfo data_type %d invalid", + type); + } + slurm_mutex_unlock(&jobacct_lock); + return rc; +rwfail: + slurm_mutex_unlock(&jobacct_lock); + return SLURM_ERROR; + +} + +extern int jobacct_common_getinfo(struct jobacctinfo *jobacct, + enum jobacct_data_type type, void *data) +{ + int rc = SLURM_SUCCESS; + int *fd = (int *)data; + uint32_t *uint32 = (uint32_t *) data; + jobacct_id_t *jobacct_id = (jobacct_id_t *) data; + struct rusage *rusage = (struct rusage *)data; + struct jobacctinfo *send = (struct jobacctinfo *) data; + + slurm_mutex_lock(&jobacct_lock); + switch (type) { + case JOBACCT_DATA_TOTAL: + memcpy(send, jobacct, sizeof(struct jobacctinfo)); + break; + case JOBACCT_DATA_PIPE: + safe_read(*fd, jobacct, sizeof(struct jobacctinfo)); + break; + case JOBACCT_DATA_RUSAGE: + memset(rusage, 0, sizeof(struct rusage)); + rusage->ru_utime.tv_sec = jobacct->user_cpu_sec; + rusage->ru_utime.tv_usec = jobacct->user_cpu_usec; + rusage->ru_stime.tv_sec = jobacct->sys_cpu_sec; + rusage->ru_stime.tv_usec = jobacct->sys_cpu_usec; + break; + case JOBACCT_DATA_MAX_RSS: + *uint32 = jobacct->max_rss; + break; + case JOBACCT_DATA_MAX_RSS_ID: + *jobacct_id = jobacct->max_rss_id; + break; + case JOBACCT_DATA_TOT_RSS: + *uint32 = jobacct->tot_rss; + break; + case JOBACCT_DATA_MAX_VSIZE: + *uint32 = jobacct->max_vsize; + break; + case JOBACCT_DATA_MAX_VSIZE_ID: + *jobacct_id = jobacct->max_vsize_id; + break; + case JOBACCT_DATA_TOT_VSIZE: + *uint32 = jobacct->tot_vsize; + break; + case JOBACCT_DATA_MAX_PAGES: + *uint32 = jobacct->max_pages; + break; + case JOBACCT_DATA_MAX_PAGES_ID: + *jobacct_id = jobacct->max_pages_id; + break; + case JOBACCT_DATA_TOT_PAGES: + *uint32 = jobacct->tot_pages; + break; + case JOBACCT_DATA_MIN_CPU: + *uint32 = jobacct->min_cpu; + break; + case JOBACCT_DATA_MIN_CPU_ID: + *jobacct_id = jobacct->min_cpu_id; + break; + case JOBACCT_DATA_TOT_CPU: + *uint32 = jobacct->tot_cpu; + break; + default: + debug("jobacct_g_set_setinfo data_type %d invalid", + type); + } + slurm_mutex_unlock(&jobacct_lock); + return rc; +rwfail: + slurm_mutex_unlock(&jobacct_lock); + return SLURM_ERROR; + +} + +extern void jobacct_common_aggregate(struct jobacctinfo *dest, + struct jobacctinfo *from) +{ + xassert(dest); + xassert(from); + + slurm_mutex_lock(&jobacct_lock); + if(dest->max_vsize < from->max_vsize) { + dest->max_vsize = from->max_vsize; + dest->max_vsize_id = from->max_vsize_id; + } + dest->tot_vsize += from->tot_vsize; + + if(dest->max_rss < from->max_rss) { + dest->max_rss = from->max_rss; + dest->max_rss_id = from->max_rss_id; + } + dest->tot_rss += from->tot_rss; + + if(dest->max_pages < from->max_pages) { + dest->max_pages = from->max_pages; + dest->max_pages_id = from->max_pages_id; + } + dest->tot_pages += from->tot_pages; + if((dest->min_cpu > from->min_cpu) + || (dest->min_cpu == (uint32_t)NO_VAL)) { + if(from->min_cpu == (uint32_t)NO_VAL) + from->min_cpu = 0; + dest->min_cpu = from->min_cpu; + dest->min_cpu_id = from->min_cpu_id; + } + dest->tot_cpu += from->tot_cpu; + + if(dest->max_vsize_id.taskid == (uint16_t)NO_VAL) + dest->max_vsize_id = from->max_vsize_id; + + if(dest->max_rss_id.taskid == (uint16_t)NO_VAL) + dest->max_rss_id = from->max_rss_id; + + if(dest->max_pages_id.taskid == (uint16_t)NO_VAL) + dest->max_pages_id = from->max_pages_id; + + if(dest->min_cpu_id.taskid == (uint16_t)NO_VAL) + dest->min_cpu_id = from->min_cpu_id; + + dest->user_cpu_sec += from->user_cpu_sec; + dest->user_cpu_usec += from->user_cpu_usec; + while (dest->user_cpu_usec >= 1E6) { + dest->user_cpu_sec++; + dest->user_cpu_usec -= 1E6; + } + dest->sys_cpu_sec += from->sys_cpu_sec; + dest->sys_cpu_usec += from->sys_cpu_usec; + while (dest->sys_cpu_usec >= 1E6) { + dest->sys_cpu_sec++; + dest->sys_cpu_usec -= 1E6; + } + + slurm_mutex_unlock(&jobacct_lock); +} + +extern void jobacct_common_2_sacct(sacct_t *sacct, struct jobacctinfo *jobacct) +{ + xassert(jobacct); + xassert(sacct); + slurm_mutex_lock(&jobacct_lock); + sacct->max_vsize = jobacct->max_vsize; + sacct->max_vsize_id = jobacct->max_vsize_id; + sacct->ave_vsize = jobacct->tot_vsize; + sacct->max_rss = jobacct->max_rss; + sacct->max_rss_id = jobacct->max_rss_id; + sacct->ave_rss = jobacct->tot_rss; + sacct->max_pages = jobacct->max_pages; + sacct->max_pages_id = jobacct->max_pages_id; + sacct->ave_pages = jobacct->tot_pages; + sacct->min_cpu = jobacct->min_cpu; + sacct->min_cpu_id = jobacct->min_cpu_id; + sacct->ave_cpu = jobacct->tot_cpu; + slurm_mutex_unlock(&jobacct_lock); +} + +extern void jobacct_common_pack(struct jobacctinfo *jobacct, Buf buffer) +{ + int i=0; + + if(!jobacct) { + for(i=0; i<16; i++) + pack32((uint32_t) 0, buffer); + for(i=0; i<4; i++) + pack16((uint16_t) 0, buffer); + return; + } + slurm_mutex_lock(&jobacct_lock); + pack32((uint32_t)jobacct->user_cpu_sec, buffer); + pack32((uint32_t)jobacct->user_cpu_usec, buffer); + pack32((uint32_t)jobacct->sys_cpu_sec, buffer); + pack32((uint32_t)jobacct->sys_cpu_usec, buffer); + pack32((uint32_t)jobacct->max_vsize, buffer); + pack32((uint32_t)jobacct->tot_vsize, buffer); + pack32((uint32_t)jobacct->max_rss, buffer); + pack32((uint32_t)jobacct->tot_rss, buffer); + pack32((uint32_t)jobacct->max_pages, buffer); + pack32((uint32_t)jobacct->tot_pages, buffer); + pack32((uint32_t)jobacct->min_cpu, buffer); + pack32((uint32_t)jobacct->tot_cpu, buffer); + _pack_jobacct_id(&jobacct->max_vsize_id, buffer); + _pack_jobacct_id(&jobacct->max_rss_id, buffer); + _pack_jobacct_id(&jobacct->max_pages_id, buffer); + _pack_jobacct_id(&jobacct->min_cpu_id, buffer); + slurm_mutex_unlock(&jobacct_lock); +} + +/* you need to xfree this */ +extern int jobacct_common_unpack(struct jobacctinfo **jobacct, Buf buffer) +{ + uint32_t uint32_tmp; + *jobacct = xmalloc(sizeof(struct jobacctinfo)); + safe_unpack32(&uint32_tmp, buffer); + (*jobacct)->user_cpu_sec = uint32_tmp; + safe_unpack32(&uint32_tmp, buffer); + (*jobacct)->user_cpu_usec = uint32_tmp; + safe_unpack32(&uint32_tmp, buffer); + (*jobacct)->sys_cpu_sec = uint32_tmp; + safe_unpack32(&uint32_tmp, buffer); + (*jobacct)->sys_cpu_usec = uint32_tmp; + safe_unpack32(&(*jobacct)->max_vsize, buffer); + safe_unpack32(&(*jobacct)->tot_vsize, buffer); + safe_unpack32(&(*jobacct)->max_rss, buffer); + safe_unpack32(&(*jobacct)->tot_rss, buffer); + safe_unpack32(&(*jobacct)->max_pages, buffer); + safe_unpack32(&(*jobacct)->tot_pages, buffer); + safe_unpack32(&(*jobacct)->min_cpu, buffer); + safe_unpack32(&(*jobacct)->tot_cpu, buffer); + if(_unpack_jobacct_id(&(*jobacct)->max_vsize_id, buffer) + != SLURM_SUCCESS) + goto unpack_error; + if(_unpack_jobacct_id(&(*jobacct)->max_rss_id, buffer) + != SLURM_SUCCESS) + goto unpack_error; + if(_unpack_jobacct_id(&(*jobacct)->max_pages_id, buffer) + != SLURM_SUCCESS) + goto unpack_error; + if(_unpack_jobacct_id(&(*jobacct)->min_cpu_id, buffer) + != SLURM_SUCCESS) + goto unpack_error; + return SLURM_SUCCESS; + +unpack_error: + xfree(*jobacct); + return SLURM_ERROR; +} + +extern int jobacct_common_set_proctrack_container_id(uint32_t id) +{ + if(pgid_plugin) + return SLURM_SUCCESS; + + if(cont_id != (uint32_t)NO_VAL) + info("Warning: jobacct: set_proctrack_container_id: " + "cont_id is already set to %d you are setting it to %d", + cont_id, id); + if(id <= 0) { + error("jobacct: set_proctrack_container_id: " + "I was given most likely an unset cont_id %d", + id); + return SLURM_ERROR; + } + cont_id = id; + + return SLURM_SUCCESS; +} + +extern int jobacct_common_set_mem_limit(uint32_t job_id, uint32_t mem_limit) +{ + if ((job_id == 0) || (mem_limit == 0)) { + error("jobacct_common_set_mem_limit: jobid:%u mem_limit:%u", + job_id, mem_limit); + return SLURM_ERROR; + } + + acct_job_id = job_id; + job_mem_limit = mem_limit * 1024; /* MB to KB */ + return SLURM_SUCCESS; +} + +extern int jobacct_common_add_task(pid_t pid, jobacct_id_t *jobacct_id) +{ + struct jobacctinfo *jobacct = jobacct_common_alloc_jobacct(jobacct_id); + + slurm_mutex_lock(&jobacct_lock); + if(pid <= 0) { + error("invalid pid given (%d) for task acct", pid); + goto error; + } else if (!task_list) { + error("no task list created!"); + goto error; + } + + jobacct->pid = pid; + jobacct->min_cpu = 0; + debug2("adding task %u pid %d on node %u to jobacct", + jobacct_id->taskid, pid, jobacct_id->nodeid); + list_push(task_list, jobacct); + slurm_mutex_unlock(&jobacct_lock); + + return SLURM_SUCCESS; +error: + slurm_mutex_unlock(&jobacct_lock); + jobacct_common_free_jobacct(jobacct); + return SLURM_ERROR; +} + +extern struct jobacctinfo *jobacct_common_stat_task(pid_t pid) +{ + struct jobacctinfo *jobacct = NULL; + struct jobacctinfo *ret_jobacct = NULL; + ListIterator itr = NULL; + + slurm_mutex_lock(&jobacct_lock); + if (!task_list) { + error("no task list created!"); + goto error; + } + + itr = list_iterator_create(task_list); + while((jobacct = list_next(itr))) { + if(jobacct->pid == pid) + break; + } + list_iterator_destroy(itr); + ret_jobacct = xmalloc(sizeof(struct jobacctinfo)); + memcpy(ret_jobacct, jobacct, sizeof(struct jobacctinfo)); +error: + slurm_mutex_unlock(&jobacct_lock); + return ret_jobacct; +} + +extern struct jobacctinfo *jobacct_common_remove_task(pid_t pid) +{ + struct jobacctinfo *jobacct = NULL; + + ListIterator itr = NULL; + + slurm_mutex_lock(&jobacct_lock); + if (!task_list) { + error("no task list created!"); + goto error; + } + + itr = list_iterator_create(task_list); + while((jobacct = list_next(itr))) { + if(jobacct->pid == pid) { + list_remove(itr); + break; + } + } + list_iterator_destroy(itr); + if(jobacct) { + debug2("removing task %u pid %d from jobacct", + jobacct->max_vsize_id.taskid, jobacct->pid); + } else { + error("pid(%d) not being watched in jobacct!", pid); + } +error: + slurm_mutex_unlock(&jobacct_lock); + return jobacct; +} + +extern int jobacct_common_endpoll() +{ + jobacct_shutdown = true; + + return SLURM_SUCCESS; +} + +extern void jobacct_common_suspend_poll() +{ + jobacct_suspended = true; +} + +extern void jobacct_common_resume_poll() +{ + jobacct_suspended = false; +} + diff --git a/src/common/jobacct_common.h b/src/common/jobacct_common.h new file mode 100644 index 000000000..e9e7089d5 --- /dev/null +++ b/src/common/jobacct_common.h @@ -0,0 +1,267 @@ +/*****************************************************************************\ + * jobacct_common.h - common functions for almost all jobacct plugins. + ***************************************************************************** + * + * Copyright (C) 2005 Hewlett-Packard Development Company, L.P. + * Written by Danny Auble, <da@llnl.gov> + * LLNL-CODE-402394. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * This file is patterned after jobcomp_linux.c, written by Morris Jette and + * Copyright (C) 2002 The Regents of the University of California. +\*****************************************************************************/ + +#ifndef _HAVE_JOBACCT_COMMON_H +#define _HAVE_JOBACCT_COMMON_H + +#ifdef HAVE_CONFIG_H +# include "config.h" +#endif + +#if HAVE_STDINT_H +# include <stdint.h> +#endif +#if HAVE_INTTYPES_H +# include <inttypes.h> +#endif + +#include <dirent.h> +#include <sys/stat.h> + +#include "src/common/xmalloc.h" +#include "src/common/list.h" +#include "src/common/xstring.h" +#include "src/common/node_select.h" + + +#include <ctype.h> + +#define BUFFER_SIZE 4096 + +typedef struct { + uint16_t taskid; /* contains which task number it was on */ + uint32_t nodeid; /* contains which node number it was on */ +} jobacct_id_t; + +typedef struct { + uint32_t max_vsize; + jobacct_id_t max_vsize_id; + float ave_vsize; + uint32_t max_rss; + jobacct_id_t max_rss_id; + float ave_rss; + uint32_t max_pages; + jobacct_id_t max_pages_id; + float ave_pages; + float min_cpu; + jobacct_id_t min_cpu_id; + float ave_cpu; +} sacct_t; + +typedef struct { + char *opt_cluster; /* --cluster */ + int opt_completion; /* --completion */ + int opt_dump; /* --dump */ + int opt_dup; /* --duplicates; +1 = explicitly set */ + int opt_fdump; /* --formattted_dump */ + int opt_stat; /* --stat */ + int opt_gid; /* --gid (-1=wildcard, 0=root) */ + int opt_header; /* can only be cleared */ + int opt_help; /* --help */ + int opt_long; /* --long */ + int opt_lowmem; /* --low_memory */ + int opt_raw; /* --raw */ + int opt_purge; /* --purge */ + int opt_total; /* --total */ + int opt_uid; /* --uid (-1=wildcard, 0=root) */ + int opt_uid_set; + int opt_verbose; /* --verbose */ + long opt_expire; /* --expire= */ + char *opt_expire_timespec; /* --expire= */ + char *opt_field_list; /* --fields= */ + char *opt_filein; /* --file */ + char *opt_job_list; /* --jobs */ + char *opt_partition_list;/* --partitions */ + char *opt_state_list; /* --states */ +} sacct_parameters_t; + +typedef struct { + uint32_t alloc_cpus; + uint32_t associd; + char *account; + char *blockid; + char *cluster; + uint32_t elapsed; + time_t eligible; + time_t end; + int32_t exitcode; + uint32_t gid; + uint32_t jobid; + char *jobname; + char *partition; + char *nodes; + int32_t priority; + uint16_t qos; + uint32_t req_cpus; + uint32_t requid; + sacct_t sacct; + uint32_t show_full; + time_t start; + enum job_states state; + List steps; /* list of jobacct_step_rec_t *'s */ + time_t submit; + uint32_t suspended; + uint32_t sys_cpu_sec; + uint32_t sys_cpu_usec; + uint32_t tot_cpu_sec; + uint32_t tot_cpu_usec; + uint16_t track_steps; + uint32_t uid; + char *user; + uint32_t user_cpu_sec; + uint32_t user_cpu_usec; +} jobacct_job_rec_t; + +typedef struct { + uint32_t elapsed; + time_t end; + int32_t exitcode; + uint32_t jobid; + uint32_t ncpus; + char *nodes; + uint32_t requid; + sacct_t sacct; + time_t start; + enum job_states state; + uint32_t stepid; /* job's step number */ + char *stepname; + uint32_t suspended; + uint32_t sys_cpu_sec; + uint32_t sys_cpu_usec; + uint32_t tot_cpu_sec; + uint32_t tot_cpu_usec; + uint32_t user_cpu_sec; + uint32_t user_cpu_usec; +} jobacct_step_rec_t; + +typedef struct selected_step_t { + char *job; + char *step; + uint32_t jobid; + uint32_t stepid; +} jobacct_selected_step_t; + +struct jobacctinfo { + pid_t pid; + uint32_t sys_cpu_sec; + uint32_t sys_cpu_usec; + uint32_t user_cpu_sec; + uint32_t user_cpu_usec; + uint32_t max_vsize; /* max size of virtual memory */ + jobacct_id_t max_vsize_id; /* contains which task number it was on */ + uint32_t tot_vsize; /* total virtual memory + (used to figure out ave later) */ + uint32_t max_rss; /* max Resident Set Size */ + jobacct_id_t max_rss_id; /* contains which task it was on */ + uint32_t tot_rss; /* total rss + (used to figure out ave later) */ + uint32_t max_pages; /* max pages */ + jobacct_id_t max_pages_id; /* contains which task it was on */ + uint32_t tot_pages; /* total pages + (used to figure out ave later) */ + uint32_t min_cpu; /* min cpu time */ + jobacct_id_t min_cpu_id; /* contains which task it was on */ + uint32_t tot_cpu; /* total cpu time + (used to figure out ave later) */ +}; + +/* Define jobacctinfo_t below to avoid including extraneous slurm headers */ +#ifndef __jobacctinfo_t_defined +# define __jobacctinfo_t_defined + typedef struct jobacctinfo *jobacctinfo_t; /* opaque data type */ +#endif + +extern jobacct_step_rec_t *create_jobacct_step_rec(); +extern jobacct_job_rec_t *create_jobacct_job_rec(); +extern void free_jobacct_header(void *object); +extern void destroy_jobacct_job_rec(void *object); +extern void destroy_jobacct_step_rec(void *object); +extern void destroy_jobacct_selected_step(void *object); + +extern void pack_jobacct_job_rec(void *object, Buf buffer); +extern int unpack_jobacct_job_rec(void **object, Buf buffer); + +extern void pack_jobacct_step_rec(jobacct_step_rec_t *step, Buf buffer); +extern int unpack_jobacct_step_rec(jobacct_step_rec_t **step, Buf buffer); + +extern void pack_jobacct_selected_step(jobacct_selected_step_t *step, + Buf buffer); +extern int unpack_jobacct_selected_step(jobacct_selected_step_t **step, + Buf buffer); + +/* These should only be called from the jobacct-gather plugin */ +extern int jobacct_common_init_struct(struct jobacctinfo *jobacct, + jobacct_id_t *jobacct_id); +extern struct jobacctinfo *jobacct_common_alloc_jobacct( + jobacct_id_t *jobacct_id); +extern void jobacct_common_free_jobacct(void *object); +extern int jobacct_common_setinfo(struct jobacctinfo *jobacct, + enum jobacct_data_type type, void *data); +extern int jobacct_common_getinfo(struct jobacctinfo *jobacct, + enum jobacct_data_type type, void *data); +extern void jobacct_common_aggregate(struct jobacctinfo *dest, + struct jobacctinfo *from); +extern void jobacct_common_2_sacct(sacct_t *sacct, + struct jobacctinfo *jobacct); +extern void jobacct_common_pack(struct jobacctinfo *jobacct, Buf buffer); +extern int jobacct_common_unpack(struct jobacctinfo **jobacct, Buf buffer); + +extern int jobacct_common_endpoll(); +extern int jobacct_common_set_proctrack_container_id(uint32_t id); +extern int jobacct_common_set_mem_limit(uint32_t job_id, uint32_t mem_limit); +extern int jobacct_common_add_task(pid_t pid, jobacct_id_t *jobacct_id); +extern struct jobacctinfo *jobacct_common_stat_task(pid_t pid); +extern struct jobacctinfo *jobacct_common_remove_task(pid_t pid); +extern void jobacct_common_suspend_poll(); +extern void jobacct_common_resume_poll(); +/***************************************************************/ + + +/* defined in common_jobacct.c */ +extern bool jobacct_shutdown; +extern bool jobacct_suspended; +extern List task_list; +extern pthread_mutex_t jobacct_lock; +extern uint32_t cont_id; +extern uint32_t acct_job_id; +extern uint32_t job_mem_limit; /* job's memory limit in KB */ +extern bool pgid_plugin; + +#endif diff --git a/src/common/list.c b/src/common/list.c index c55c69ce6..3162fe012 100644 --- a/src/common/list.c +++ b/src/common/list.c @@ -63,6 +63,8 @@ strong_alias(list_destroy, slurm_list_destroy); strong_alias(list_is_empty, slurm_list_is_empty); strong_alias(list_count, slurm_list_count); strong_alias(list_append, slurm_list_append); +strong_alias(list_append_list, slurm_list_append_list); +strong_alias(list_transfer, slurm_list_transfer); strong_alias(list_prepend, slurm_list_prepend); strong_alias(list_find_first, slurm_list_find_first); strong_alias(list_delete_all, slurm_list_delete_all); @@ -80,7 +82,7 @@ strong_alias(list_next, slurm_list_next); strong_alias(list_insert, slurm_list_insert); strong_alias(list_find, slurm_list_find); strong_alias(list_remove, slurm_list_remove); -strong_alias(list_delete, slurm_list_delete); +strong_alias(list_delete_item, slurm_list_delete_item); strong_alias(list_install_fork_handlers, slurm_list_install_fork_handlers); /********************* * lsd_fatal_error * @@ -369,6 +371,50 @@ list_append (List l, void *x) } +int +list_append_list (List l, List sub) +{ + ListIterator itr; + void *v; + int n = 0; + + assert(l != NULL); + assert(l->fDel == NULL); + assert(sub != NULL); + itr = list_iterator_create(sub); + while((v = list_next(itr))) { + if(list_append(l, v)) + n++; + else + break; + } + list_iterator_destroy(itr); + + return n; +} + +int +list_transfer (List l, List sub) +{ + void *v; + int n = 0; + + assert(l != NULL); + assert(sub != NULL); + assert(l->fDel == sub->fDel); + while((v = list_pop(sub))) { + if(list_append(l, v)) + n++; + else { + if(l->fDel) + l->fDel(v); + break; + } + } + + return n; +} + void * list_prepend (List l, void *x) { @@ -457,6 +503,29 @@ list_for_each (List l, ListForF f, void *arg) } +int +list_flush (List l) +{ + ListNode *pp; + void *v; + int n = 0; + + assert(l != NULL); + list_mutex_lock(&l->mutex); + assert(l->magic == LIST_MAGIC); + pp = &l->head; + while (*pp) { + if ((v = list_node_destroy(l, pp))) { + if (l->fDel) + l->fDel(v); + n++; + } + } + list_mutex_unlock(&l->mutex); + return(n); +} + + void list_sort (List l, ListCmpF f) { @@ -501,7 +570,6 @@ list_sort (List l, ListCmpF f) return; } - void * list_push (List l, void *x) { @@ -697,7 +765,7 @@ list_remove (ListIterator i) int -list_delete (ListIterator i) +list_delete_item (ListIterator i) { void *v; diff --git a/src/common/list.h b/src/common/list.h index d9efbd693..9039c8fd6 100644 --- a/src/common/list.h +++ b/src/common/list.h @@ -145,6 +145,21 @@ void * list_append (List l, void *x); * Returns the data's ptr, or lsd_nomem_error() if insertion failed. */ +int list_append_list (List l, List sub); +/* + * Inserts list [sub] at the end of list [l]. + * Note: list [l] must have a destroy function of NULL. + * Returns a count of the number of items added to list [l]. + */ + +int list_transfer (List l, List sub); +/* + * Pops off list [sub] and appends data at the end of list [l]. + * Note: list [l] must have the same destroy function as list [sub]. + * Note: list [sub] will be returned empty, but not destroyed. + * Returns a count of the number of items added to list [l]. + */ + void * list_prepend (List l, void *x); /* * Inserts data [x] at the beginning of list [l]. @@ -178,6 +193,14 @@ int list_for_each (List l, ListForF f, void *arg); * function returns the negative of that item's position in the list. */ +int list_flush (List l); +/* + * Traverses list [l] and removes all items in list + * If a deletion function was specified when the list was + * created, it will be called to deallocate each item being removed. + * Returns a count of the number of items removed from the list. + */ + void list_sort (List l, ListCmpF f); /* * Sorts list [l] into ascending order according to the function [f]. @@ -185,7 +208,6 @@ void list_sort (List l, ListCmpF f); * Note: The sort algorithm is stable. */ - /**************************** * Stack Access Functions * ****************************/ @@ -281,7 +303,7 @@ void * list_remove (ListIterator i); * Note: The client is responsible for freeing the returned data. */ -int list_delete (ListIterator i); +int list_delete_item (ListIterator i); /* * Removes from the list the last item returned via list iterator [i]; * if a deletion function was specified when the list was created, diff --git a/src/common/log.c b/src/common/log.c index 9493af549..7a6acc7e0 100644 --- a/src/common/log.c +++ b/src/common/log.c @@ -1,11 +1,11 @@ /*****************************************************************************\ * log.c - slurm logging facilities - * $Id: log.c 12825 2007-12-14 21:23:57Z jette $ + * $Id: log.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * Much of this code was derived or adapted from the log.c component of * openssh which contains the following notices: diff --git a/src/common/log.h b/src/common/log.h index b257b138c..e159a9b76 100644 --- a/src/common/log.h +++ b/src/common/log.h @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * Much of this code was derived or adapted from the log.c component of * openssh which contains the following notices: diff --git a/src/common/macros.h b/src/common/macros.h index ac4b53eaa..a2492f767 100644 --- a/src/common/macros.h +++ b/src/common/macros.h @@ -1,11 +1,11 @@ /*****************************************************************************\ * src/common/macros.h - some standard macros for slurm - * $Id: macros.h 10574 2006-12-15 23:38:29Z jette $ + * $Id: macros.h 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/mpi.c b/src/common/mpi.c index 4d54fbf5e..ace6fab97 100644 --- a/src/common/mpi.c +++ b/src/common/mpi.c @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <grondo1@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/mpi.h b/src/common/mpi.h index 8a6dc7af6..75255181a 100644 --- a/src/common/mpi.h +++ b/src/common/mpi.h @@ -4,7 +4,7 @@ * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <grondo1@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/net.c b/src/common/net.c index 59a6329dc..5fb33c058 100644 --- a/src/common/net.c +++ b/src/common/net.c @@ -5,7 +5,7 @@ * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <grondona1@llnl.gov>, Kevin Tew <tew1@llnl.gov>, * et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/net.h b/src/common/net.h index 94bdf6d8a..7b5e8548b 100644 --- a/src/common/net.h +++ b/src/common/net.h @@ -5,7 +5,7 @@ * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <grondona1@llnl.gov>, Kevin Tew <tew1@llnl.gov>, * et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/node_select.c b/src/common/node_select.c index ede1c38fc..d247fbd4d 100644 --- a/src/common/node_select.c +++ b/src/common/node_select.c @@ -9,12 +9,12 @@ * the plugin. This is because functions required by the plugin can not be * resolved on the front-end nodes, so we can't load the plugins there. * - * $Id: node_select.c 13270 2008-02-14 19:40:44Z da $ + * $Id: node_select.c 13697 2008-03-21 21:56:40Z da $ ***************************************************************************** * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -81,12 +81,15 @@ typedef struct slurm_select_ops { uint32_t min_nodes, uint32_t max_nodes, uint32_t req_nodes, - bool test_only); + int mode); + int (*job_list_test) (List req_list); int (*job_begin) (struct job_record *job_ptr); int (*job_ready) (struct job_record *job_ptr); int (*job_fini) (struct job_record *job_ptr); int (*job_suspend) (struct job_record *job_ptr); int (*job_resume) (struct job_record *job_ptr); + int (*get_job_cores) (uint32_t job_id, + int alloc_index, int s); int (*pack_node_info) (time_t last_query_time, Buf *buffer_ptr); int (*get_extra_jobinfo) (struct node_record *node_ptr, @@ -106,6 +109,9 @@ typedef struct slurm_select_ops { int (*update_node_state) (int index, uint16_t state); int (*alter_node_cnt) (enum select_node_cnt type, void *data); + int (*reconfigure) (void); + int (*step_begin) (struct step_record *step_ptr); + int (*step_fini) (struct step_record *step_ptr); } slurm_select_ops_t; typedef struct slurm_select_context { @@ -132,6 +138,7 @@ struct select_jobinfo { uint16_t rotate; /* permit geometry rotation if set */ char *bg_block_id; /* Blue Gene block ID */ uint16_t magic; /* magic number */ + char *nodes; /* node list given for estimated start */ char *ionodes; /* for bg to tell which ionodes of a small * block the job is running */ uint32_t node_cnt; /* how many cnodes in block */ @@ -167,11 +174,13 @@ static slurm_select_ops_t * _select_get_ops(slurm_select_context_t *c) "select_p_node_init", "select_p_block_init", "select_p_job_test", + "select_p_job_list_test", "select_p_job_begin", "select_p_job_ready", "select_p_job_fini", "select_p_job_suspend", "select_p_job_resume", + "select_p_get_job_cores", "select_p_pack_node_info", "select_p_get_extra_jobinfo", "select_p_get_select_nodeinfo", @@ -180,7 +189,10 @@ static slurm_select_ops_t * _select_get_ops(slurm_select_context_t *c) "select_p_update_sub_node", "select_p_get_info_from_plugin", "select_p_update_node_state", - "select_p_alter_node_cnt" + "select_p_alter_node_cnt", + "select_p_reconfigure", + "select_p_step_begin", + "select_p_step_fini", }; int n_syms = sizeof( syms ) / sizeof( char * ); @@ -499,26 +511,61 @@ extern int select_g_alter_node_cnt (enum select_node_cnt type, void *data) return (*(g_select_context->ops.alter_node_cnt))(type, data); } +/* + * Note reconfiguration or change in partition configuration + */ +extern int select_g_reconfigure (void) +{ + if (slurm_select_init() < 0) + return SLURM_ERROR; + + return (*(g_select_context->ops.reconfigure))(); +} + /* * Select the "best" nodes for given job from those available - * IN job_ptr - pointer to job being considered for initiation + * IN/OUT job_ptr - pointer to job being considered for initiation, + * set's start_time when job expected to start * IN/OUT bitmap - map of nodes being considered for allocation on input, * map of nodes actually to be assigned on output * IN min_nodes - minimum number of nodes to allocate to job * IN max_nodes - maximum number of nodes to allocate to job * IN req_nodes - requested (or desired) count of nodes - * IN test_only - if true, only test if ever could run, not necessarily now + * IN mode - SELECT_MODE_RUN_NOW: try to schedule job now + * SELECT_MODE_TEST_ONLY: test if job can ever run + * SELECT_MODE_WILL_RUN: determine when and where job can run + * RET zero on success, EINVAL otherwise */ extern int select_g_job_test(struct job_record *job_ptr, bitstr_t *bitmap, - uint32_t min_nodes, uint32_t max_nodes, uint32_t req_nodes, - bool test_only) + uint32_t min_nodes, uint32_t max_nodes, + uint32_t req_nodes, int mode) { if (slurm_select_init() < 0) return SLURM_ERROR; return (*(g_select_context->ops.job_test))(job_ptr, bitmap, min_nodes, max_nodes, - req_nodes, test_only); + req_nodes, mode); +} + +/* + * Given a list of select_will_run_t's in + * accending priority order we will see if we can start and + * finish all the jobs without increasing the start times of the + * jobs specified and fill in the est_start of requests with no + * est_start. If you are looking to see if one job will ever run + * then use select_p_job_test instead. + * IN/OUT req_list - list of select_will_run_t's in asscending + * priority order on success of placement fill in + * est_start of request with time. + * RET zero on success, EINVAL otherwise + */ +extern int select_g_job_list_test(List req_list) +{ + if (slurm_select_init() < 0) + return SLURM_ERROR; + + return (*(g_select_context->ops.job_list_test))(req_list); } /* @@ -559,6 +606,7 @@ extern int select_g_job_fini(struct job_record *job_ptr) return (*(g_select_context->ops.job_fini))(job_ptr); } + /* * Suspend a job. Executed from slurmctld. * IN job_ptr - pointer to job being suspended @@ -585,6 +633,21 @@ extern int select_g_job_resume(struct job_record *job_ptr) return (*(g_select_context->ops.job_resume))(job_ptr); } +/* + * Get job core info. Executed from sched/gang. + * IN job_id - id of job from which to obtain data + * IN alloc_index - allocated node index + * IN s - socket index + * RET number of allocated cores on the given socket from the given node + */ +extern int select_g_get_job_cores(uint32_t job_id, int alloc_index, int s) +{ + if (slurm_select_init() < 0) + return 0; + + return (*(g_select_context->ops.get_job_cores))(job_id, alloc_index, s); +} + extern int select_g_pack_node_info(time_t last_query_time, Buf *buffer) { if (slurm_select_init() < 0) @@ -594,6 +657,28 @@ extern int select_g_pack_node_info(time_t last_query_time, Buf *buffer) (last_query_time, buffer); } +/* Prepare to start a job step, allocate memory as needed + * RET - slurm error code + */ +extern int select_g_step_begin(struct step_record *step_ptr) +{ + if (slurm_select_init() < 0) + return SLURM_ERROR; + + return (*(g_select_context->ops.step_begin))(step_ptr); +} + +/* Prepare to terminate a job step, release memory as needed + * RET - slurm error code + */ +extern int select_g_step_fini(struct step_record *step_ptr) +{ + if (slurm_select_init() < 0) + return SLURM_ERROR; + + return (*(g_select_context->ops.step_fini))(step_ptr); +} + #ifdef HAVE_BG /* node selection specific logic */ static void _free_node_info(bg_info_record_t *bg_info_record) { @@ -619,14 +704,12 @@ static int _unpack_node_info(bg_info_record_t *bg_info_record, Buf buffer) uint32_t uint32_tmp; char *bp_inx_str; - safe_unpackstr_xmalloc(&(bg_info_record->nodes), &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&(bg_info_record->ionodes), &uint16_tmp, - buffer); - safe_unpackstr_xmalloc(&bg_info_record->owner_name, &uint16_tmp, - buffer); - safe_unpackstr_xmalloc(&bg_info_record->bg_block_id, &uint16_tmp, - buffer); - + safe_unpackstr_xmalloc(&(bg_info_record->nodes), &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&(bg_info_record->ionodes), &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&bg_info_record->owner_name, + &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&bg_info_record->bg_block_id, + &uint32_tmp, buffer); safe_unpack16(&uint16_tmp, buffer); bg_info_record->state = (int) uint16_tmp; safe_unpack16(&uint16_tmp, buffer); @@ -639,32 +722,33 @@ static int _unpack_node_info(bg_info_record_t *bg_info_record, Buf buffer) bg_info_record->nodecard = (int) uint16_tmp; safe_unpack32(&uint32_tmp, buffer); bg_info_record->node_cnt = (int) uint32_tmp; - safe_unpackstr_xmalloc(&bp_inx_str, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&bp_inx_str, &uint32_tmp, buffer); if (bp_inx_str == NULL) { bg_info_record->bp_inx = bitfmt2int(""); } else { bg_info_record->bp_inx = bitfmt2int(bp_inx_str); xfree(bp_inx_str); } - safe_unpackstr_xmalloc(&bp_inx_str, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&bp_inx_str, &uint32_tmp, buffer); if (bp_inx_str == NULL) { bg_info_record->ionode_inx = bitfmt2int(""); } else { bg_info_record->ionode_inx = bitfmt2int(bp_inx_str); xfree(bp_inx_str); } - safe_unpackstr_xmalloc(&bg_info_record->blrtsimage, &uint16_tmp, + safe_unpackstr_xmalloc(&bg_info_record->blrtsimage, &uint32_tmp, buffer); - safe_unpackstr_xmalloc(&bg_info_record->linuximage, &uint16_tmp, + safe_unpackstr_xmalloc(&bg_info_record->linuximage, &uint32_tmp, buffer); - safe_unpackstr_xmalloc(&bg_info_record->mloaderimage, &uint16_tmp, + safe_unpackstr_xmalloc(&bg_info_record->mloaderimage, &uint32_tmp, buffer); - safe_unpackstr_xmalloc(&bg_info_record->ramdiskimage, &uint16_tmp, + safe_unpackstr_xmalloc(&bg_info_record->ramdiskimage, &uint32_tmp, buffer); - + return SLURM_SUCCESS; unpack_error: + error("_unpack_node_info: error unpacking here"); _free_node_info(bg_info_record); return SLURM_ERROR; } @@ -711,6 +795,7 @@ extern int select_g_alloc_jobinfo (select_jobinfo_t *jobinfo) (*jobinfo)->rotate = (uint16_t) NO_VAL; (*jobinfo)->bg_block_id = NULL; (*jobinfo)->magic = JOBINFO_MAGIC; + (*jobinfo)->nodes = NULL; (*jobinfo)->ionodes = NULL; (*jobinfo)->node_cnt = NO_VAL; (*jobinfo)->max_procs = NO_VAL; @@ -767,6 +852,10 @@ extern int select_g_set_jobinfo (select_jobinfo_t jobinfo, xfree(jobinfo->bg_block_id); jobinfo->bg_block_id = xstrdup(tmp_char); break; + case SELECT_DATA_NODES: + xfree(jobinfo->nodes); + jobinfo->nodes = xstrdup(tmp_char); + break; case SELECT_DATA_IONODES: xfree(jobinfo->ionodes); jobinfo->ionodes = xstrdup(tmp_char); @@ -858,6 +947,13 @@ extern int select_g_get_jobinfo (select_jobinfo_t jobinfo, else *tmp_char = xstrdup(jobinfo->bg_block_id); break; + case SELECT_DATA_NODES: + if ((jobinfo->nodes == NULL) + || (jobinfo->nodes[0] == '\0')) + *tmp_char = NULL; + else + *tmp_char = xstrdup(jobinfo->nodes); + break; case SELECT_DATA_IONODES: if ((jobinfo->ionodes == NULL) || (jobinfo->ionodes[0] == '\0')) @@ -937,6 +1033,7 @@ extern select_jobinfo_t select_g_copy_jobinfo(select_jobinfo_t jobinfo) rc->rotate = jobinfo->rotate; rc->bg_block_id = xstrdup(jobinfo->bg_block_id); rc->magic = JOBINFO_MAGIC; + rc->nodes = xstrdup(jobinfo->nodes); rc->ionodes = xstrdup(jobinfo->ionodes); rc->node_cnt = jobinfo->node_cnt; rc->altered = jobinfo->altered; @@ -945,7 +1042,6 @@ extern select_jobinfo_t select_g_copy_jobinfo(select_jobinfo_t jobinfo) rc->linuximage = xstrdup(jobinfo->linuximage); rc->mloaderimage = xstrdup(jobinfo->mloaderimage); rc->ramdiskimage = xstrdup(jobinfo->ramdiskimage); - } return rc; @@ -967,6 +1063,7 @@ extern int select_g_free_jobinfo (select_jobinfo_t *jobinfo) } else { (*jobinfo)->magic = 0; xfree((*jobinfo)->bg_block_id); + xfree((*jobinfo)->nodes); xfree((*jobinfo)->ionodes); xfree((*jobinfo)->blrtsimage); xfree((*jobinfo)->linuximage); @@ -1001,24 +1098,29 @@ extern int select_g_pack_jobinfo (select_jobinfo_t jobinfo, Buf buffer) pack32(jobinfo->max_procs, buffer); packstr(jobinfo->bg_block_id, buffer); + packstr(jobinfo->nodes, buffer); packstr(jobinfo->ionodes, buffer); packstr(jobinfo->blrtsimage, buffer); packstr(jobinfo->linuximage, buffer); packstr(jobinfo->mloaderimage, buffer); packstr(jobinfo->ramdiskimage, buffer); } else { + /* pack space for 3 positions for start and for geo + * then 1 for conn_type, reboot, and rotate + */ for (i=0; i<((SYSTEM_DIMENSIONS*2)+3); i++) pack16((uint16_t) 0, buffer); - pack32((uint32_t) 0, buffer); - pack32((uint32_t) 0, buffer); + pack32((uint32_t) 0, buffer); //node_cnt + pack32((uint32_t) 0, buffer); //max_procs - packstr("", buffer); - packstr("", buffer); - packstr("", buffer); - packstr("", buffer); - packstr("", buffer); - packstr("", buffer); + packnull(buffer); //bg_block_id + packnull(buffer); //nodes + packnull(buffer); //ionodes + packnull(buffer); //blrts + packnull(buffer); //linux + packnull(buffer); //mloader + packnull(buffer); //ramdisk } return SLURM_SUCCESS; @@ -1033,7 +1135,7 @@ extern int select_g_pack_jobinfo (select_jobinfo_t jobinfo, Buf buffer) extern int select_g_unpack_jobinfo(select_jobinfo_t jobinfo, Buf buffer) { int i; - uint16_t uint16_tmp; + uint32_t uint32_tmp; for (i=0; i<SYSTEM_DIMENSIONS; i++) { safe_unpack16(&(jobinfo->start[i]), buffer); @@ -1046,13 +1148,14 @@ extern int select_g_unpack_jobinfo(select_jobinfo_t jobinfo, Buf buffer) safe_unpack32(&(jobinfo->node_cnt), buffer); safe_unpack32(&(jobinfo->max_procs), buffer); - safe_unpackstr_xmalloc(&(jobinfo->bg_block_id), &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&(jobinfo->ionodes), &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&(jobinfo->blrtsimage), &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&(jobinfo->linuximage), &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&(jobinfo->mloaderimage), &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&(jobinfo->ramdiskimage), &uint16_tmp, buffer); - + safe_unpackstr_xmalloc(&(jobinfo->bg_block_id), &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&(jobinfo->nodes), &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&(jobinfo->ionodes), &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&(jobinfo->blrtsimage), &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&(jobinfo->linuximage), &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&(jobinfo->mloaderimage), &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&(jobinfo->ramdiskimage), &uint32_tmp, buffer); + return SLURM_SUCCESS; unpack_error: @@ -1162,6 +1265,13 @@ extern char *select_g_sprint_jobinfo(select_jobinfo_t jobinfo, case SELECT_PRINT_BG_ID: snprintf(buf, size, "%s", jobinfo->bg_block_id); break; + case SELECT_PRINT_NODES: + if(jobinfo->ionodes && jobinfo->ionodes[0]) + snprintf(buf, size, "%s[%s]", + jobinfo->nodes, jobinfo->ionodes); + else + snprintf(buf, size, "%s", jobinfo->nodes); + break; case SELECT_PRINT_CONNECTION: snprintf(buf, size, "%s", _job_conn_type_string(jobinfo->conn_type)); diff --git a/src/common/node_select.h b/src/common/node_select.h index 281fca313..46e1defa2 100644 --- a/src/common/node_select.h +++ b/src/common/node_select.h @@ -1,12 +1,12 @@ /*****************************************************************************\ * node_select.h - Define node selection plugin functions. * - * $Id: node_select.h 10574 2006-12-15 23:38:29Z jette $ + * $Id: node_select.h 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2004-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -46,6 +46,21 @@ #include <slurm/slurm.h> #include <slurm/slurm_errno.h> +typedef struct { + bitstr_t *avail_nodes; /* usable nodes are set on input, nodes + * not required to satisfy the request + * are cleared, other left set */ + struct job_record *job_ptr; /* pointer to job being scheduled + * start_time is set when we can + * possibly start job. Or must not + * increase for success of running + * other jobs. + */ + uint32_t max_nodes; /* maximum count of nodes (0==don't care) */ + uint32_t min_nodes; /* minimum count of nodes */ + uint32_t req_nodes; /* requested (or desired) count of nodes */ +} select_will_run_t; + /*****************************************\ * GLOBAL SELECT STATE MANGEMENT FUNCIONS * \*****************************************/ @@ -152,19 +167,42 @@ extern int select_g_block_init(List part_list); * JOB-SPECIFIC SELECT CREDENTIAL MANAGEMENT FUNCIONS * \******************************************************/ +#define SELECT_MODE_RUN_NOW 0 +#define SELECT_MODE_TEST_ONLY 1 +#define SELECT_MODE_WILL_RUN 2 + /* * Select the "best" nodes for given job from those available - * IN job_ptr - pointer to job being considered for initiation + * IN/OUT job_ptr - pointer to job being considered for initiation, + * set's start_time when job expected to start * IN/OUT bitmap - map of nodes being considered for allocation on input, * map of nodes actually to be assigned on output * IN min_nodes - minimum number of nodes to allocate to job * IN max_nodes - maximum number of nodes to allocate to job * IN req_nodes - requested (or desired) count of nodes - * IN test_only - if true, only test if ever could run, not necessarily now + * IN mode - SELECT_MODE_RUN_NOW: try to schedule job now + * SELECT_MODE_TEST_ONLY: test if job can ever run + * SELECT_MODE_WILL_RUN: determine when and where job can run + * RET zero on success, EINVAL otherwise */ extern int select_g_job_test(struct job_record *job_ptr, bitstr_t *bitmap, uint32_t min_nodes, uint32_t max_nodes, - uint32_t req_nodes, bool test_only); + uint32_t req_nodes, int mode); + + +/* + * Given a list of select_will_run_t's in + * accending priority order we will see if we can start and + * finish all the jobs without increasing the start times of the + * jobs specified and fill in the est_start of requests with no + * est_start. If you are looking to see if one job will ever run + * then use select_p_job_test instead. + * IN/OUT req_list - list of select_will_run_t's in asscending + * priority order on success of placement fill in + * est_start of request with time. + * RET zero on success, EINVAL otherwise + */ +extern int select_g_job_list_test(List req_list); /* * Note initiation of job is about to begin. Called immediately @@ -200,6 +238,14 @@ extern int select_g_job_suspend(struct job_record *job_ptr); */ extern int select_g_job_resume(struct job_record *job_ptr); +/* + * Get number of allocated cores per socket from a job + * IN job_id - identifies the job + * IN alloc_index - allocated node index + * IN s - socket index + */ +extern int select_g_get_job_cores(uint32_t job_id, int alloc_index, int s); + /* allocate storage for a select job credential * OUT jobinfo - storage for a select job credential * RET - slurm error code @@ -274,6 +320,16 @@ extern int select_g_unpack_jobinfo(select_jobinfo_t jobinfo, Buf buffer); extern char *select_g_sprint_jobinfo(select_jobinfo_t jobinfo, char *buf, size_t size, int mode); +/* Prepare to start a job step, allocate memory as needed + * RET - slurm error code + */ +extern int select_g_step_begin(struct step_record *step_ptr); + +/* Prepare to terminate a job step, release memory as needed + * RET - slurm error code + */ +extern int select_g_step_fini(struct step_record *step_ptr); + /******************************************************\ * NODE-SELECT PLUGIN SPECIFIC INFORMATION FUNCTIONS * \******************************************************/ @@ -294,4 +350,7 @@ extern int select_g_unpack_node_info(node_select_info_msg_t ** extern int select_g_free_node_info(node_select_info_msg_t ** node_select_info_msg_pptr); +/* Note reconfiguration or change in partition configuration */ +extern int select_g_reconfigure(void); + #endif /*__SELECT_PLUGIN_API_H__*/ diff --git a/src/common/optz.c b/src/common/optz.c index f8afe2238..ec9d67d84 100644 --- a/src/common/optz.c +++ b/src/common/optz.c @@ -3,7 +3,7 @@ ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/optz.h b/src/common/optz.h index 4898b4d6e..07cf1b257 100644 --- a/src/common/optz.h +++ b/src/common/optz.h @@ -3,7 +3,7 @@ ***************************************************************************** * Copyright (C) 2005 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/pack.c b/src/common/pack.c index 50bbe43d9..53902756f 100644 --- a/src/common/pack.c +++ b/src/common/pack.c @@ -6,7 +6,7 @@ * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Jim Garlick <garlick@llnl.gov>, * Morris Jette <jette1@llnl.gov>, et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -333,9 +333,9 @@ int unpack8(uint8_t * valp, Buf buffer) * size_val to network byte order and store at buffer followed by * the data at valp. Adjust buffer counters. */ -void packmem(char *valp, uint16_t size_val, Buf buffer) +void packmem(char *valp, uint32_t size_val, Buf buffer) { - uint16_t ns = htons(size_val); + uint32_t ns = htonl(size_val); if (remaining_buf(buffer) < (sizeof(ns) + size_val)) { buffer->size += (size_val + BUF_SIZE); @@ -360,15 +360,15 @@ void packmem(char *valp, uint16_t size_val, Buf buffer) * NOTE: valp is set to point into the buffer bufp, a copy of * the data is not made */ -int unpackmem_ptr(char **valp, uint16_t * size_valp, Buf buffer) +int unpackmem_ptr(char **valp, uint32_t * size_valp, Buf buffer) { - uint16_t ns; + uint32_t ns; if (remaining_buf(buffer) < sizeof(ns)) return SLURM_ERROR; memcpy(&ns, &buffer->head[buffer->processed], sizeof(ns)); - *size_valp = ntohs(ns); + *size_valp = ntohl(ns); buffer->processed += sizeof(ns); if (*size_valp > 0) { @@ -390,15 +390,15 @@ int unpackmem_ptr(char **valp, uint16_t * size_valp, Buf buffer) * NOTE: The caller is responsible for the management of valp and * insuring it has sufficient size */ -int unpackmem(char *valp, uint16_t * size_valp, Buf buffer) +int unpackmem(char *valp, uint32_t * size_valp, Buf buffer) { - uint16_t ns; + uint32_t ns; if (remaining_buf(buffer) < sizeof(ns)) return SLURM_ERROR; memcpy(&ns, &buffer->head[buffer->processed], sizeof(ns)); - *size_valp = ntohs(ns); + *size_valp = ntohl(ns); buffer->processed += sizeof(ns); if (*size_valp > 0) { @@ -420,15 +420,15 @@ int unpackmem(char *valp, uint16_t * size_valp, Buf buffer) * the caller is responsible for calling xfree() on *valp * if non-NULL (set to NULL on zero size buffer value) */ -int unpackmem_xmalloc(char **valp, uint16_t * size_valp, Buf buffer) +int unpackmem_xmalloc(char **valp, uint32_t * size_valp, Buf buffer) { - uint16_t ns; + uint32_t ns; if (remaining_buf(buffer) < sizeof(ns)) return SLURM_ERROR; memcpy(&ns, &buffer->head[buffer->processed], sizeof(ns)); - *size_valp = ntohs(ns); + *size_valp = ntohl(ns); buffer->processed += sizeof(ns); if (*size_valp > 0) { @@ -452,15 +452,15 @@ int unpackmem_xmalloc(char **valp, uint16_t * size_valp, Buf buffer) * the caller is responsible for calling free() on *valp * if non-NULL (set to NULL on zero size buffer value) */ -int unpackmem_malloc(char **valp, uint16_t * size_valp, Buf buffer) +int unpackmem_malloc(char **valp, uint32_t * size_valp, Buf buffer) { - uint16_t ns; + uint32_t ns; if (remaining_buf(buffer) < sizeof(ns)) return SLURM_ERROR; memcpy(&ns, &buffer->head[buffer->processed], sizeof(ns)); - *size_valp = ntohs(ns); + *size_valp = ntohl(ns); buffer->processed += sizeof(ns); if (*size_valp > 0) { @@ -480,10 +480,10 @@ int unpackmem_malloc(char **valp, uint16_t * size_valp, Buf buffer) * (size_val), convert size_val to network byte order and store in the * buffer followed by the data at valp. Adjust buffer counters. */ -void packstr_array(char **valp, uint16_t size_val, Buf buffer) +void packstr_array(char **valp, uint32_t size_val, Buf buffer) { int i; - uint16_t ns = htons(size_val); + uint32_t ns = htonl(size_val); if (remaining_buf(buffer) < sizeof(ns)) { buffer->size += BUF_SIZE; @@ -507,23 +507,23 @@ void packstr_array(char **valp, uint16_t size_val, Buf buffer) * the caller is responsible for calling xfree on *valp * if non-NULL (set to NULL on zero size buffer value) */ -int unpackstr_array(char ***valp, uint16_t * size_valp, Buf buffer) +int unpackstr_array(char ***valp, uint32_t * size_valp, Buf buffer) { int i; - uint16_t ns; - uint16_t uint16_tmp; + uint32_t ns; + uint32_t uint32_tmp; if (remaining_buf(buffer) < sizeof(ns)) return SLURM_ERROR; memcpy(&ns, &buffer->head[buffer->processed], sizeof(ns)); - *size_valp = ntohs(ns); + *size_valp = ntohl(ns); buffer->processed += sizeof(ns); if (*size_valp > 0) { *valp = xmalloc(sizeof(char *) * (*size_valp + 1)); for (i = 0; i < *size_valp; i++) { - if (unpackmem_xmalloc(&(*valp)[i], &uint16_tmp, buffer)) + if (unpackmem_xmalloc(&(*valp)[i], &uint32_tmp, buffer)) return SLURM_ERROR; } (*valp)[i] = NULL; /* NULL terminated array so that execle */ diff --git a/src/common/pack.h b/src/common/pack.h index fd5b8e6bb..4b18355e9 100644 --- a/src/common/pack.h +++ b/src/common/pack.h @@ -5,7 +5,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Kevin Tew <tew1@llnl.gov>, Morris Jette <jette1@llnl.gov>, et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -98,14 +98,14 @@ int unpack16_array(uint16_t **valp, uint32_t* size_val, Buf buffer); void pack32_array(uint32_t *valp, uint32_t size_val, Buf buffer); int unpack32_array(uint32_t **valp, uint32_t* size_val, Buf buffer); -void packmem(char *valp, uint16_t size_val, Buf buffer); -int unpackmem(char *valp, uint16_t *size_valp, Buf buffer); -int unpackmem_ptr(char **valp, uint16_t *size_valp, Buf buffer); -int unpackmem_xmalloc(char **valp, uint16_t *size_valp, Buf buffer); -int unpackmem_malloc(char **valp, uint16_t *size_valp, Buf buffer); +void packmem(char *valp, uint32_t size_val, Buf buffer); +int unpackmem(char *valp, uint32_t *size_valp, Buf buffer); +int unpackmem_ptr(char **valp, uint32_t *size_valp, Buf buffer); +int unpackmem_xmalloc(char **valp, uint32_t *size_valp, Buf buffer); +int unpackmem_malloc(char **valp, uint32_t *size_valp, Buf buffer); -void packstr_array(char **valp, uint16_t size_val, Buf buffer); -int unpackstr_array(char ***valp, uint16_t* size_val, Buf buffer); +void packstr_array(char **valp, uint32_t size_val, Buf buffer); +int unpackstr_array(char ***valp, uint32_t* size_val, Buf buffer); void packmem_array(char *valp, uint32_t size_val, Buf buffer); int unpackmem_array(char *valp, uint32_t size_valp, Buf buffer); @@ -189,7 +189,7 @@ int unpackmem_array(char *valp, uint32_t size_valp, Buf buffer); } while (0) #define safe_packmem(valp,size_val,buf) do { \ - assert(sizeof(size_val) == sizeof(uint16_t)); \ + assert(sizeof(size_val) == sizeof(uint32_t)); \ assert(size_val == 0 || valp != NULL); \ assert(buf->magic == BUF_MAGIC); \ packmem(valp,size_val,buf); \ @@ -197,7 +197,7 @@ int unpackmem_array(char *valp, uint32_t size_valp, Buf buffer); #define safe_unpackmem(valp,size_valp,buf) do { \ assert(valp != NULL); \ - assert(sizeof(*size_valp) == sizeof(uint16_t)); \ + assert(sizeof(*size_valp) == sizeof(uint32_t)); \ assert(buf->magic == BUF_MAGIC); \ if (unpackmem(valp,size_valp,buf)) \ goto unpack_error; \ @@ -205,7 +205,7 @@ int unpackmem_array(char *valp, uint32_t size_valp, Buf buffer); #define safe_unpackmem_ptr(valp,size_valp,buf) do { \ assert(valp != NULL); \ - assert(sizeof(*size_valp) == sizeof(uint16_t)); \ + assert(sizeof(*size_valp) == sizeof(uint32_t)); \ assert(buf->magic == BUF_MAGIC); \ if (unpackmem_ptr(valp,size_valp,buf)) \ goto unpack_error; \ @@ -213,7 +213,7 @@ int unpackmem_array(char *valp, uint32_t size_valp, Buf buffer); #define safe_unpackmem_xmalloc(valp,size_valp,buf) do { \ assert(valp != NULL); \ - assert(sizeof(*size_valp) == sizeof(uint16_t)); \ + assert(sizeof(*size_valp) == sizeof(uint32_t)); \ assert(buf->magic == BUF_MAGIC); \ if (unpackmem_xmalloc(valp,size_valp,buf)) \ goto unpack_error; \ @@ -221,7 +221,7 @@ int unpackmem_array(char *valp, uint32_t size_valp, Buf buffer); #define safe_unpackmem_malloc(valp,size_valp,buf) do { \ assert(valp != NULL); \ - assert(sizeof(*size_valp) == sizeof(uint16_t)); \ + assert(sizeof(*size_valp) == sizeof(uint32_t)); \ assert(buf->magic == BUF_MAGIC); \ if (unpackmem_malloc(valp,size_valp,buf)) \ goto unpack_error; \ @@ -235,31 +235,32 @@ int unpackmem_array(char *valp, uint32_t size_valp, Buf buffer); uint32_t _size; \ bit_fmt(_tmp_str,max_len,bitmap); \ _size = strlen(_tmp_str)+1; \ - packmem(_tmp_str,(uint16_t)_size,buf); \ + packmem(_tmp_str,_size,buf); \ } else \ - packmem(NULL,(uint16_t)0,buf); \ + packmem(NULL,(uint32_t)0,buf); \ } while (0) #define safe_packstr(str,max_len,buf) do { \ uint32_t _size; \ assert(buf->magic == BUF_MAGIC); \ - assert(max_len <= 0xffff); \ + assert(sizeof(*max_len) === sizeof(uint32_t)); \ _size = (str ? strlen(str)+1 : 0); \ assert(_size == 0 || str != NULL); \ if (_size <= max_len) \ - packmem(str,(uint16_t)_size,buf); \ + packmem(str,_size,buf); \ else { \ char tmp_str[max_len]; \ strncpy(tmp_str, str, max_len-1); \ tmp_str[max_len - 1] = (char) NULL; \ - packmem(tmp_str,(uint16_t)max_len,buf); \ + packmem(tmp_str,max_len,buf); \ } \ } while (0) #define packstr(str,buf) do { \ - uint32_t _size; \ - _size = (uint32_t)(str ? strlen(str)+1 : 0); \ - assert(_size == 0 || str != NULL); \ + uint32_t _size = 0; \ + if((char *)str != NULL) \ + _size = (uint32_t)strlen(str)+1; \ + assert(_size == 0 || str != NULL); \ assert(_size <= 0xffff); \ assert(buf->magic == BUF_MAGIC); \ packmem(str,(uint16_t)_size,buf); \ @@ -278,9 +279,9 @@ int unpackmem_array(char *valp, uint32_t size_valp, Buf buffer); uint32_t _size; \ bit_fmt(_tmp_str,0xfffe,bitmap); \ _size = strlen(_tmp_str)+1; \ - packmem(_tmp_str,(uint16_t)_size,buf); \ + packmem(_tmp_str,_size,buf); \ } else \ - packmem(NULL,(uint16_t)0,buf); \ + packmem(NULL,(uint32_t)0,buf); \ } while (0) #define unpackstr_ptr \ @@ -300,6 +301,7 @@ int unpackmem_array(char *valp, uint32_t size_valp, Buf buffer); #define safe_packstr_array(array,size_val,buf) do { \ assert(size_val == 0 || array != NULL); \ + assert(sizeof(*size_valp) == sizeof(uint32_t)); \ assert(buf->magic == BUF_MAGIC); \ packstr_array(array,size_val,buf); \ } while (0) @@ -307,7 +309,7 @@ int unpackmem_array(char *valp, uint32_t size_valp, Buf buffer); #define safe_unpackstr_array(valp,size_valp,buf) do { \ assert(valp != NULL); \ assert(size_valp != NULL); \ - assert(sizeof(*size_valp) == sizeof(uint16_t)); \ + assert(sizeof(*size_valp) == sizeof(uint32_t)); \ assert(buf->magic == BUF_MAGIC); \ if (unpackstr_array(valp,size_valp,buf)) \ goto unpack_error; \ diff --git a/src/common/parse_config.c b/src/common/parse_config.c index bcca5b03d..cf156a0fc 100644 --- a/src/common/parse_config.c +++ b/src/common/parse_config.c @@ -3,12 +3,12 @@ * * NOTE: when you see the prefix "s_p_", think "slurm parser". * - * $Id: parse_config.c 10574 2006-12-15 23:38:29Z jette $ + * $Id: parse_config.c 14064 2008-05-15 23:53:06Z jette $ ***************************************************************************** * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Christopher J. Morrone <morrone2@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -511,10 +511,14 @@ static int _handle_uint32(s_p_values_t *v, errno = 0; num = strtoul(value, &endptr, 0); + if ((endptr[0] == 'k') || (endptr[0] == 'K')) { + num *= 1024; + endptr++; + } if ((num == 0 && errno == EINVAL) || (*endptr != '\0')) { - if (strcasecmp(value, "UNLIMITED") == 0 - || strcasecmp(value, "INFINITE") == 0) { + if ((strcasecmp(value, "UNLIMITED") == 0) || + (strcasecmp(value, "INFINITE") == 0)) { num = (uint32_t)-1; } else { error("%s value (%s) is not a valid number", diff --git a/src/common/parse_config.h b/src/common/parse_config.h index 3b7ff37ff..acbbdf02b 100644 --- a/src/common/parse_config.h +++ b/src/common/parse_config.h @@ -3,12 +3,12 @@ * * NOTE: when you see the prefix "s_p_", think "slurm parser". * - * $Id: parse_config.h 10574 2006-12-15 23:38:29Z jette $ + * $Id: parse_config.h 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Christopher J. Morrone <morrone2@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/parse_spec.c b/src/common/parse_spec.c index 0bfbc78f0..e1ff00c3f 100644 --- a/src/common/parse_spec.c +++ b/src/common/parse_spec.c @@ -1,11 +1,11 @@ -/* $Id: parse_spec.c 12452 2007-10-05 19:07:07Z da $ */ +/* $Id: parse_spec.c 13672 2008-03-19 23:10:58Z jette $ */ /*****************************************************************************\ * parse_spec.c - configuration file parser ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/parse_spec.h b/src/common/parse_spec.h index 678eb287b..6b4359c3c 100644 --- a/src/common/parse_spec.h +++ b/src/common/parse_spec.h @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/parse_time.c b/src/common/parse_time.c index b7253ac91..e72f2c93e 100644 --- a/src/common/parse_time.c +++ b/src/common/parse_time.c @@ -1,11 +1,10 @@ /*****************************************************************************\ * src/common/parse_time.c - time parsing utility functions - * $Id$ ***************************************************************************** * Copyright (C) 2005-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -462,7 +461,7 @@ slurm_make_time_str (time_t *time, char *string, int size) * days-hr:min:sec * days-hr * output: - * minutes (or -1 on error) (or INFINITE value defined in slurm.h + * minutes (or -2 on error, INFINITE is -1 as defined in slurm.h) * if unlimited is the value of string) */ extern int time_str2mins(char *string) @@ -472,8 +471,9 @@ extern int time_str2mins(char *string) if ((string == NULL) || (string[0] == '\0')) return -1; /* invalid input */ - - if (!strcasecmp(string, "UNLIMITED")) { + if ((!strcasecmp(string, "-1")) || + (!strcasecmp(string, "INFINITE")) || + (!strcasecmp(string, "UNLIMITED"))) { return INFINITE; } @@ -482,7 +482,7 @@ extern int time_str2mins(char *string) tmp = (tmp * 10) + (string[i] - '0'); } else if (string[i] == '-') { if (days != -1) - return -1; /* invalid input */ + return -2; /* invalid input */ days = tmp; tmp = 0; } else if ((string[i] == ':') || (string[i] == '\0')) { @@ -495,10 +495,10 @@ extern int time_str2mins(char *string) min = sec; sec = tmp; } else - return -1; /* invalid input */ + return -2; /* invalid input */ tmp = 0; } else - return -1; /* invalid input */ + return -2; /* invalid input */ if (string[i] == '\0') break; @@ -546,3 +546,25 @@ extern void secs2time_str(time_t time, char *string, int size) hours, minutes, seconds); } } + +extern void mins2time_str(uint32_t time, char *string, int size) +{ + if (time == INFINITE) { + snprintf(string, size, "UNLIMITED"); + } else { + long days, hours, minutes, seconds; + seconds = 0; + minutes = time % 60; + hours = time / 60 % 24; + days = time / 1440; + + if (days) + snprintf(string, size, + "%ld-%2.2ld:%2.2ld:%2.2ld", + days, hours, minutes, seconds); + else + snprintf(string, size, + "%2.2ld:%2.2ld:%2.2ld", + hours, minutes, seconds); + } +} diff --git a/src/common/parse_time.h b/src/common/parse_time.h index 5b4a7b547..0189f81aa 100644 --- a/src/common/parse_time.h +++ b/src/common/parse_time.h @@ -5,7 +5,7 @@ * Copyright (C) 2005-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -16,7 +16,7 @@ * any later version. * * In addition, as a special exception, the copyright holders give permission - * to link the code of portions of this program with the OpenSSL library under + * to link the code of portions of this program with the OpenSSL library under * certain conditions as described in each individual source file, and * distribute linked combinations including the two. You must obey the GNU * General Public License in all respects for all of the code used other than @@ -39,6 +39,19 @@ #ifndef _PARSE_TIME_H_ #define _PARSE_TIME_H_ +#if HAVE_CONFIG_H +# include "config.h" +# if HAVE_INTTYPES_H +# include <inttypes.h> +# else +# if HAVE_STDINT_H +# include <stdint.h> +# endif +# endif /* HAVE_INTTYPES_H */ +#else /* !HAVE_CONFIG_H */ +# include <inttypes.h> +#endif + #include <time.h> /* Convert string to equivalent time value @@ -62,8 +75,7 @@ extern time_t parse_time(char *time_str); * IN size - length of string buffer, we recommend a size of 32 bytes to * easily support different site-specific formats */ -extern void -slurm_make_time_str (time_t *time, char *string, int size); +extern void slurm_make_time_str (time_t *time, char *string, int size); /* Convert a string to an equivalent time value * input formats: @@ -82,5 +94,6 @@ extern int time_str2mins(char *string); * fill in string with HH:MM:SS or D-HH:MM:SS */ extern void secs2time_str(time_t time, char *string, int size); +extern void mins2time_str(uint32_t time, char *string, int size); #endif diff --git a/src/common/plugin.c b/src/common/plugin.c index b31db7459..5ca7f372c 100644 --- a/src/common/plugin.c +++ b/src/common/plugin.c @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Jay Windley <jwindley@lnxi.com>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -220,8 +220,12 @@ plugin_get_syms( plugin_handle_t plug, count = 0; for ( i = 0; i < n_syms; ++i ) { ptrs[ i ] = dlsym( plug, names[ i ] ); - if ( ptrs[ i ] ) ++count; - } + if ( ptrs[ i ] ) + ++count; + else + error("Couldn't find sym '%s' in the plugin", + names[ i ]); + } return count; } diff --git a/src/common/plugin.h b/src/common/plugin.h index c57d2be1f..9db07b24d 100644 --- a/src/common/plugin.h +++ b/src/common/plugin.h @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Jay Windlay <jwindley@lnxi.com>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/plugrack.c b/src/common/plugrack.c index cb40dfebd..cc4a948b3 100644 --- a/src/common/plugrack.c +++ b/src/common/plugrack.c @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Jay Windley <jwindley@lnxi.com>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/plugrack.h b/src/common/plugrack.h index 2c8161d90..3937f0bd1 100644 --- a/src/common/plugrack.h +++ b/src/common/plugrack.h @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Jay Windley <jwindley@lnxi.com>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/plugstack.c b/src/common/plugstack.c index 2c9f30e6a..16b3a137d 100644 --- a/src/common/plugstack.c +++ b/src/common/plugstack.c @@ -3,7 +3,7 @@ ***************************************************************************** * Copyright (C) 2005 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -1017,7 +1017,7 @@ static struct spank_plugin_opt *_find_remote_option_by_name(const char } if (!(name = strchr(buf, ':'))) { - error("Malformed plugin option \"%s\" recieved. Ignoring", + error("Malformed plugin option \"%s\" received. Ignoring", str); return (NULL); } diff --git a/src/common/plugstack.h b/src/common/plugstack.h index 7a7f57a80..288663c76 100644 --- a/src/common/plugstack.h +++ b/src/common/plugstack.h @@ -3,7 +3,7 @@ ***************************************************************************** * Copyright (C) 2005 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/proc_args.c b/src/common/proc_args.c new file mode 100644 index 000000000..e46fbf4b8 --- /dev/null +++ b/src/common/proc_args.c @@ -0,0 +1,653 @@ +/*****************************************************************************\ + * proc_args.c - helper functions for command argument processing + * $Id: opt.h 11996 2007-08-10 20:36:26Z jette $ + ***************************************************************************** + * Copyright (C) 2007 Hewlett-Packard Development Company, L.P. + * Written by Christopher Holmes <cholmes@hp.com>, who borrowed heavily + * from existing SLURM source code, particularly src/srun/opt.c + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. +\*****************************************************************************/ + +#if HAVE_CONFIG_H +# include "config.h" +#endif + +#include <string.h> /* strcpy, strncasecmp */ + +#ifdef HAVE_STRINGS_H +# include <strings.h> +#endif + +#ifndef _GNU_SOURCE +# define _GNU_SOURCE +#endif + +#include <fcntl.h> +#include <stdarg.h> /* va_start */ +#include <stdio.h> +#include <stdlib.h> /* getenv */ +#include <pwd.h> /* getpwuid */ +#include <ctype.h> /* isdigit */ +#include <sys/param.h> /* MAXPATHLEN */ +#include <sys/stat.h> +#include <unistd.h> +#include <sys/stat.h> +#include <sys/types.h> +#include <sys/utsname.h> + +#include "src/common/list.h" +#include "src/common/xmalloc.h" +#include "src/common/xstring.h" + +#include "src/common/proc_args.h" + + + + +/* print this version of SLURM */ +void print_slurm_version(void) +{ + printf("%s %s\n", PACKAGE, SLURM_VERSION); +} + +/* + * verify that a distribution type in arg is of a known form + * returns the task_dist_states, or -1 if state is unknown + */ +task_dist_states_t verify_dist_type(const char *arg, uint32_t *plane_size) +{ + int len = strlen(arg); + char *dist_str = NULL; + task_dist_states_t result = SLURM_DIST_UNKNOWN; + bool lllp_dist = false, plane_dist = false; + + dist_str = strchr(arg,':'); + if (dist_str != NULL) { + /* -m cyclic|block:cyclic|block */ + lllp_dist = true; + } else { + /* -m plane=<plane_size> */ + dist_str = strchr(arg,'='); + if(dist_str != NULL) { + *plane_size=atoi(dist_str+1); + len = dist_str-arg; + plane_dist = true; + } + } + + if (lllp_dist) { + if (strcasecmp(arg, "cyclic:cyclic") == 0) { + result = SLURM_DIST_CYCLIC_CYCLIC; + } else if (strcasecmp(arg, "cyclic:block") == 0) { + result = SLURM_DIST_CYCLIC_BLOCK; + } else if (strcasecmp(arg, "block:block") == 0) { + result = SLURM_DIST_BLOCK_BLOCK; + } else if (strcasecmp(arg, "block:cyclic") == 0) { + result = SLURM_DIST_BLOCK_CYCLIC; + } + } else if (plane_dist) { + if (strncasecmp(arg, "plane", len) == 0) { + result = SLURM_DIST_PLANE; + } + } else { + if (strncasecmp(arg, "cyclic", len) == 0) { + result = SLURM_DIST_CYCLIC; + } else if (strncasecmp(arg, "block", len) == 0) { + result = SLURM_DIST_BLOCK; + } else if ((strncasecmp(arg, "arbitrary", len) == 0) || + (strncasecmp(arg, "hostfile", len) == 0)) { + result = SLURM_DIST_ARBITRARY; + } + } + + return result; +} + +/* + * verify that a connection type in arg is of known form + * returns the connection_type or -1 if not recognized + */ +int verify_conn_type(const char *arg) +{ + int len = strlen(arg); + + if (!strncasecmp(arg, "MESH", len)) + return SELECT_MESH; + else if (!strncasecmp(arg, "TORUS", len)) + return SELECT_TORUS; + else if (!strncasecmp(arg, "NAV", len)) + return SELECT_NAV; + + error("invalid --conn-type argument %s ignored.", arg); + return -1; +} + +/* + * verify geometry arguments, must have proper count + * returns -1 on error, 0 otherwise + */ +int verify_geometry(const char *arg, uint16_t *geometry) +{ + char* token, *delimiter = ",x", *next_ptr; + int i, rc = 0; + char* geometry_tmp = xstrdup(arg); + char* original_ptr = geometry_tmp; + + token = strtok_r(geometry_tmp, delimiter, &next_ptr); + for (i=0; i<SYSTEM_DIMENSIONS; i++) { + if (token == NULL) { + error("insufficient dimensions in --geometry"); + rc = -1; + break; + } + geometry[i] = (uint16_t)atoi(token); + if (geometry[i] == 0 || geometry[i] == (uint16_t)NO_VAL) { + error("invalid --geometry argument"); + rc = -1; + break; + } + geometry_tmp = next_ptr; + token = strtok_r(geometry_tmp, delimiter, &next_ptr); + } + if (token != NULL) { + error("too many dimensions in --geometry"); + rc = -1; + } + + if (original_ptr) + xfree(original_ptr); + + return rc; +} + +/* return command name from its full path name */ +char * base_name(char* command) +{ + char *char_ptr, *name; + int i; + + if (command == NULL) + return NULL; + + char_ptr = strrchr(command, (int)'/'); + if (char_ptr == NULL) + char_ptr = command; + else + char_ptr++; + + i = strlen(char_ptr); + name = xmalloc(i+1); + strcpy(name, char_ptr); + return name; +} + +/* + * str_to_bytes(): verify that arg is numeric with optional "G" or "M" at end + * if "G" or "M" is there, multiply by proper power of 2 and return + * number in bytes + */ +long str_to_bytes(const char *arg) +{ + char *buf; + char *endptr; + int end; + int multiplier = 1; + long result; + + buf = xstrdup(arg); + + end = strlen(buf) - 1; + + if (isdigit(buf[end])) { + result = strtol(buf, &endptr, 10); + + if (*endptr != '\0') + result = -result; + + } else { + + switch (toupper(buf[end])) { + + case 'G': + multiplier = 1024; + break; + + case 'M': + /* do nothing */ + break; + + default: + multiplier = -1; + } + + buf[end] = '\0'; + + result = multiplier * strtol(buf, &endptr, 10); + + if (*endptr != '\0') + result = -result; + } + + return result; +} + +/* Convert a string into a node count */ +static int +_str_to_nodes(const char *num_str, char **leftover) +{ + long int num; + char *endptr; + + num = strtol(num_str, &endptr, 10); + if (endptr == num_str) { /* no valid digits */ + *leftover = (char *)num_str; + return 0; + } + if (*endptr != '\0' && (*endptr == 'k' || *endptr == 'K')) { + num *= 1024; + endptr++; + } + *leftover = endptr; + + return (int)num; +} + +/* + * verify that a node count in arg is of a known form (count or min-max) + * OUT min, max specified minimum and maximum node counts + * RET true if valid + */ +bool verify_node_count(const char *arg, int *min_nodes, int *max_nodes) +{ + char *ptr, *min_str, *max_str; + char *leftover; + + /* Does the string contain a "-" character? If so, treat as a range. + * otherwise treat as an absolute node count. */ + if ((ptr = index(arg, '-')) != NULL) { + min_str = xstrndup(arg, ptr-arg); + *min_nodes = _str_to_nodes(min_str, &leftover); + if (!xstring_is_whitespace(leftover)) { + error("\"%s\" is not a valid node count", min_str); + xfree(min_str); + return false; + } + xfree(min_str); + if (*min_nodes == 0) + *min_nodes = 1; + + max_str = xstrndup(ptr+1, strlen(arg)-((ptr+1)-arg)); + *max_nodes = _str_to_nodes(max_str, &leftover); + if (!xstring_is_whitespace(leftover)) { + error("\"%s\" is not a valid node count", max_str); + xfree(max_str); + return false; + } + xfree(max_str); + } else { + *min_nodes = *max_nodes = _str_to_nodes(arg, &leftover); + if (!xstring_is_whitespace(leftover)) { + error("\"%s\" is not a valid node count", arg); + return false; + } + if (*min_nodes == 0) { + /* whitespace does not a valid node count make */ + error("\"%s\" is not a valid node count", arg); + return false; + } + } + + if ((*max_nodes != 0) && (*max_nodes < *min_nodes)) { + error("Maximum node count %d is less than" + " minimum node count %d", + *max_nodes, *min_nodes); + return false; + } + + return true; +} + +/* + * get either 1 or 2 integers for a resource count in the form of either + * (count, min-max, or '*') + * A partial error message is passed in via the 'what' param. + * RET true if valid + */ +bool +get_resource_arg_range(const char *arg, const char *what, int* min, int *max, + bool isFatal) +{ + char *p; + long int result; + + if (*arg == '\0') return true; + + /* wildcard meaning every possible value in range */ + if (*arg == '*' ) { + *min = 1; + *max = INT_MAX; + return true; + } + + result = strtol(arg, &p, 10); + if (*p == 'k' || *p == 'K') { + result *= 1024; + p++; + } + + if (((*p != '\0')&&(*p != '-')) || (result <= 0L)) { + error ("Invalid numeric value \"%s\" for %s.", arg, what); + if (isFatal) exit(1); + return false; + } else if (result > INT_MAX) { + error ("Numeric argument (%ld) to big for %s.", result, what); + if (isFatal) exit(1); + return false; + } + + *min = (int) result; + + if (*p == '\0') return true; + if (*p == '-') p++; + + result = strtol(p, &p, 10); + if (*p == 'k' || *p == 'K') { + result *= 1024; + p++; + } + + if (((*p != '\0')&&(*p != '-')) || (result <= 0L)) { + error ("Invalid numeric value \"%s\" for %s.", arg, what); + if (isFatal) exit(1); + return false; + } else if (result > INT_MAX) { + error ("Numeric argument (%ld) to big for %s.", result, what); + if (isFatal) exit(1); + return false; + } + + *max = (int) result; + + return true; +} + +/* + * verify that a resource counts in arg are of a known form X, X:X, X:X:X, or + * X:X:X:X, where X is defined as either (count, min-max, or '*') + * RET true if valid + */ +bool verify_socket_core_thread_count(const char *arg, + int *min_sockets, int *max_sockets, + int *min_cores, int *max_cores, + int *min_threads, int *max_threads, + cpu_bind_type_t *cpu_bind_type) +{ + bool tmp_val,ret_val; + int i,j; + const char *cur_ptr = arg; + char buf[3][48]; /* each can hold INT64_MAX - INT64_MAX */ + buf[0][0] = '\0'; + buf[1][0] = '\0'; + buf[2][0] = '\0'; + + for (j=0;j<3;j++) { + for (i=0;i<47;i++) { + if (*cur_ptr == '\0' || *cur_ptr ==':') break; + buf[j][i] = *cur_ptr++; + } + if (*cur_ptr == '\0') break; + xassert(*cur_ptr == ':'); + buf[j][i] = '\0'; + cur_ptr++; + } + /* if cpu_bind_type doesn't already have a auto preference, choose + * the level based on the level of the -E specification + */ + if (!(*cpu_bind_type & (CPU_BIND_TO_SOCKETS | + CPU_BIND_TO_CORES | + CPU_BIND_TO_THREADS))) { + if (j == 0) { + *cpu_bind_type |= CPU_BIND_TO_SOCKETS; + } else if (j == 1) { + *cpu_bind_type |= CPU_BIND_TO_CORES; + } else if (j == 2) { + *cpu_bind_type |= CPU_BIND_TO_THREADS; + } + } + buf[j][i] = '\0'; + + ret_val = true; + tmp_val = get_resource_arg_range(&buf[0][0], "first arg of -B", + min_sockets, max_sockets, true); + ret_val = ret_val && tmp_val; + tmp_val = get_resource_arg_range(&buf[1][0], "second arg of -B", + min_cores, max_cores, true); + ret_val = ret_val && tmp_val; + tmp_val = get_resource_arg_range(&buf[2][0], "third arg of -B", + min_threads, max_threads, true); + ret_val = ret_val && tmp_val; + + return ret_val; +} + +/* + * verify that a hint is valid and convert it into the implied settings + * RET true if valid + */ +bool verify_hint(const char *arg, int *min_sockets, int *max_sockets, + int *min_cores, int *max_cores, int *min_threads, + int *max_threads, cpu_bind_type_t *cpu_bind_type) +{ + char *buf, *p, *tok; + if (!arg) { + return true; + } + + buf = xstrdup(arg); + p = buf; + /* change all ',' delimiters not followed by a digit to ';' */ + /* simplifies parsing tokens while keeping map/mask together */ + while (p[0] != '\0') { + if ((p[0] == ',') && (!isdigit(p[1]))) + p[0] = ';'; + p++; + } + + p = buf; + while ((tok = strsep(&p, ";"))) { + if (strcasecmp(tok, "help") == 0) { + printf( +"Application hint options:\n" +" --hint= Bind tasks according to application hints\n" +" compute_bound use all cores in each physical CPU\n" +" memory_bound use only one core in each physical CPU\n" +" [no]multithread [don't] use extra threads with in-core multi-threading\n" +" help show this help message\n"); + return 1; + } else if (strcasecmp(tok, "compute_bound") == 0) { + *min_sockets = 1; + *max_sockets = INT_MAX; + *min_cores = 1; + *max_cores = INT_MAX; + *cpu_bind_type |= CPU_BIND_TO_CORES; + } else if (strcasecmp(tok, "memory_bound") == 0) { + *min_cores = 1; + *max_cores = 1; + *cpu_bind_type |= CPU_BIND_TO_CORES; + } else if (strcasecmp(tok, "multithread") == 0) { + *min_threads = 1; + *max_threads = INT_MAX; + *cpu_bind_type |= CPU_BIND_TO_THREADS; + } else if (strcasecmp(tok, "nomultithread") == 0) { + *min_threads = 1; + *max_threads = 1; + *cpu_bind_type |= CPU_BIND_TO_THREADS; + } else { + error("unrecognized --hint argument \"%s\", see --hint=help", tok); + xfree(buf); + return 1; + } + } + + xfree(buf); + return 0; +} + +uint16_t parse_mail_type(const char *arg) +{ + uint16_t rc; + + if (strcasecmp(arg, "BEGIN") == 0) + rc = MAIL_JOB_BEGIN; + else if (strcasecmp(arg, "END") == 0) + rc = MAIL_JOB_END; + else if (strcasecmp(arg, "FAIL") == 0) + rc = MAIL_JOB_FAIL; + else if (strcasecmp(arg, "ALL") == 0) + rc = MAIL_JOB_BEGIN | MAIL_JOB_END | MAIL_JOB_FAIL; + else + rc = 0; /* failure */ + + return rc; +} +char *print_mail_type(const uint16_t type) +{ + if (type == 0) + return "NONE"; + + if (type == MAIL_JOB_BEGIN) + return "BEGIN"; + if (type == MAIL_JOB_END) + return "END"; + if (type == MAIL_JOB_FAIL) + return "FAIL"; + if (type == (MAIL_JOB_BEGIN | MAIL_JOB_END | MAIL_JOB_FAIL)) + return "ALL"; + + return "MULTIPLE"; +} + +static void +_freeF(void *data) +{ + xfree(data); +} + +static List +_create_path_list(void) +{ + List l = list_create(_freeF); + char *path = xstrdup(getenv("PATH")); + char *c, *lc; + + c = getenv("PATH"); + if (!c) { + error("No PATH environment variable"); + return l; + } + path = xstrdup(c); + c = lc = path; + + while (*c != '\0') { + if (*c == ':') { + /* nullify and push token onto list */ + *c = '\0'; + if (lc != NULL && strlen(lc) > 0) + list_append(l, xstrdup(lc)); + lc = ++c; + } else + c++; + } + + if (strlen(lc) > 0) + list_append(l, xstrdup(lc)); + + xfree(path); + + return l; +} + +char * +search_path(char *cwd, char *cmd, bool check_current_dir, int access_mode) +{ + List l = NULL; + ListIterator i = NULL; + char *path, *fullpath = NULL; + + if ( (cmd[0] == '.' || cmd[0] == '/') + && (access(cmd, access_mode) == 0 ) ) { + if (cmd[0] == '.') + xstrfmtcat(fullpath, "%s/", cwd); + xstrcat(fullpath, cmd); + goto done; + } + + l = _create_path_list(); + if (l == NULL) + return NULL; + + if (check_current_dir) + list_prepend(l, xstrdup(cwd)); + + i = list_iterator_create(l); + while ((path = list_next(i))) { + xstrfmtcat(fullpath, "%s/%s", path, cmd); + + if (access(fullpath, access_mode) == 0) + goto done; + + xfree(fullpath); + fullpath = NULL; + } + done: + if (l) + list_destroy(l); + return fullpath; +} + +char *print_commandline(const int script_argc, char **script_argv) +{ + int i; + char buf[256]; + + buf[0] = '\0'; + for (i = 0; i < script_argc; i++) + snprintf(buf, 256, "%s", script_argv[i]); + return xstrdup(buf); +} + +char *print_geometry(const uint16_t *geometry) +{ + int i; + char buf[32], *rc = NULL; + + if ((SYSTEM_DIMENSIONS == 0) + || (geometry[0] == (uint16_t)NO_VAL)) + return NULL; + + for (i=0; i<SYSTEM_DIMENSIONS; i++) { + if (i > 0) + snprintf(buf, sizeof(buf), "x%u", geometry[i]); + else + snprintf(buf, sizeof(buf), "%u", geometry[i]); + xstrcat(rc, buf); + } + + return rc; +} diff --git a/src/common/proc_args.h b/src/common/proc_args.h new file mode 100644 index 000000000..109556bad --- /dev/null +++ b/src/common/proc_args.h @@ -0,0 +1,106 @@ +/*****************************************************************************\ + * proc_args.h - helper functions for command argument processing + * $Id: opt.h 11996 2007-08-10 20:36:26Z jette $ + ***************************************************************************** + * Copyright (C) 2007 Hewlett-Packard Development Company, L.P. + * Written by Christopher Holmes <cholmes@hp.com>, who borrowed heavily + * from existing SLURM source code, particularly src/srun/opt.c + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. +\*****************************************************************************/ + +#ifndef _PROC_ARGS_H +#define _PROC_ARGS_H + + +#if HAVE_CONFIG_H +# include "config.h" +#endif + +#include <sys/types.h> +#include <unistd.h> + +#include "src/common/macros.h" /* true and false */ +#include "src/common/env.h" + + +#define format_task_dist_states(t) (t == SLURM_DIST_BLOCK) ? "block" : \ + (t == SLURM_DIST_CYCLIC) ? "cyclic" : \ + (t == SLURM_DIST_PLANE) ? "plane" : \ + (t == SLURM_DIST_CYCLIC_CYCLIC) ? "cyclic:cyclic" : \ + (t == SLURM_DIST_CYCLIC_BLOCK) ? "cyclic:block" : \ + (t == SLURM_DIST_BLOCK_CYCLIC) ? "block:cyclic" : \ + (t == SLURM_DIST_BLOCK_BLOCK) ? "block:block" : \ + (t == SLURM_DIST_ARBITRARY) ? "arbitrary" : \ + "unknown" + + +/* print this version of SLURM */ +void print_slurm_version(void); + +/* verify the requested distribution type */ +task_dist_states_t verify_dist_type(const char *arg, uint32_t *plane_size); + +/* verify the requested connection type */ +int verify_conn_type(const char *arg); + +/* verify the requested geometry arguments */ +int verify_geometry(const char *arg, uint16_t *geometry); + +/* return command name from its full path name */ +char * base_name(char* command); + +/* confirm and convert a str to it's presented numeric value */ +long str_to_bytes(const char *arg); + +/* verify that a node count in arg is of a known form (count or min-max) */ +bool verify_node_count(const char *arg, int *min_nodes, int *max_nodes); + +/* parse a possible range of values from the form: count, min-max, or '*' */ +bool get_resource_arg_range(const char *arg, const char *what, + int* min, int *max, bool isFatal); + +/* verify resource counts from a complex form of: X, X:X, X:X:X or X:X:X:X */ +bool verify_socket_core_thread_count(const char *arg, + int *min_sockets, int *max_sockets, + int *min_cores, int *max_cores, + int *min_threads, int *max_threads, + cpu_bind_type_t *cpu_bind_type); + +/* verify a hint and convert it into the implied settings */ +bool verify_hint(const char *arg, int *min_sockets, int *max_sockets, + int *min_cores, int *max_cores, int *min_threads, + int *max_threads, cpu_bind_type_t *cpu_bind_type); + +/* parse the mail type */ +uint16_t parse_mail_type(const char *arg); + +/* print the mail type */ +char *print_mail_type(const uint16_t type); + +/* search PATH to confirm the access of the given command */ +char *search_path(char *cwd, char *cmd, bool check_current_dir, + int access_mode); + +/* helper function for printing options */ +char *print_commandline(const int script_argc, char **script_argv); + +/* helper function for printing geometry option */ +char *print_geometry(const uint16_t *geometry); + +#endif /* !_PROC_ARGS_H */ diff --git a/src/common/read_config.c b/src/common/read_config.c index 1d9c515a4..131fb27d3 100644 --- a/src/common/read_config.c +++ b/src/common/read_config.c @@ -1,10 +1,11 @@ /*****************************************************************************\ * read_config.c - read the overall slurm configuration file ***************************************************************************** - * Copyright (C) 2002-2006 The Regents of the University of California. + * Copyright (C) 2002-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -64,6 +65,7 @@ #include "src/common/xstring.h" #include "src/common/slurm_rlimits_info.h" #include "src/common/parse_config.h" +#include "src/common/parse_time.h" #include "src/common/slurm_selecttype_info.h" /* Instantiation of the "extern slurm_ctl_conf_t slurmcltd_conf" @@ -119,48 +121,81 @@ static void validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl); s_p_options_t slurm_conf_options[] = { + {"AccountingStorageEnforce", S_P_UINT16}, + {"AccountingStorageHost", S_P_STRING}, + {"AccountingStorageLoc", S_P_STRING}, + {"AccountingStoragePass", S_P_STRING}, + {"AccountingStoragePort", S_P_UINT32}, + {"AccountingStorageType", S_P_STRING}, + {"AccountingStorageUser", S_P_STRING}, {"AuthType", S_P_STRING}, - {"CheckpointType", S_P_STRING}, - {"CacheGroups", S_P_UINT16}, {"BackupAddr", S_P_STRING}, {"BackupController", S_P_STRING}, + {"CheckpointType", S_P_STRING}, + {"CacheGroups", S_P_UINT16}, + {"ClusterName", S_P_STRING}, {"ControlAddr", S_P_STRING}, {"ControlMachine", S_P_STRING}, - {"DisableRootJobs", S_P_UINT16}, + {"CryptoType", S_P_STRING}, + {"DefaultStorageHost", S_P_STRING}, + {"DefaultStorageLoc", S_P_STRING}, + {"DefaultStoragePass", S_P_STRING}, + {"DefaultStoragePort", S_P_UINT32}, + {"DefaultStorageType", S_P_STRING}, + {"DefaultStorageUser", S_P_STRING}, + {"DefMemPerTask", S_P_UINT32}, + {"DisableRootJobs", S_P_BOOLEAN}, {"Epilog", S_P_STRING}, + {"EpilogMsgTime", S_P_UINT32}, {"FastSchedule", S_P_UINT16}, {"FirstJobId", S_P_UINT32}, + {"GetEnvTimeout", S_P_UINT16}, {"HashBase", S_P_LONG, defunct_option}, {"HeartbeatInterval", S_P_LONG, defunct_option}, + {"HealthCheckInterval", S_P_UINT16}, + {"HealthCheckProgram", S_P_STRING}, {"InactiveLimit", S_P_UINT16}, + {"JobAcctGatherType", S_P_STRING}, + {"JobAcctFrequency", S_P_UINT16, defunct_option}, + {"JobAcctGatherFrequency", S_P_UINT16}, {"JobAcctLogFile", S_P_STRING}, - {"JobAcctFrequency", S_P_UINT16}, {"JobAcctType", S_P_STRING}, + {"JobCompHost", S_P_STRING}, {"JobCompLoc", S_P_STRING}, + {"JobCompPass", S_P_STRING}, + {"JobCompPort", S_P_UINT32}, {"JobCompType", S_P_STRING}, + {"JobCompUser", S_P_STRING}, {"JobCredentialPrivateKey", S_P_STRING}, {"JobCredentialPublicCertificate", S_P_STRING}, {"JobFileAppend", S_P_UINT16}, - {"GetEnvTimeout", S_P_UINT16}, + {"JobRequeue", S_P_UINT16}, {"KillTree", S_P_UINT16, defunct_option}, {"KillWait", S_P_UINT16}, + {"Licenses", S_P_STRING}, {"MailProg", S_P_STRING}, {"MaxJobCount", S_P_UINT16}, + {"MaxMemPerTask", S_P_UINT32}, {"MessageTimeout", S_P_UINT16}, {"MinJobAge", S_P_UINT16}, - {"MpichGmDirectSupport", S_P_LONG}, + {"MpichGmDirectSupport", S_P_LONG, defunct_option}, {"MpiDefault", S_P_STRING}, {"PluginDir", S_P_STRING}, {"PlugStackConfig", S_P_STRING}, + {"PrivateData", S_P_UINT16}, {"ProctrackType", S_P_STRING}, {"Prolog", S_P_STRING}, {"PropagatePrioProcess", S_P_UINT16}, {"PropagateResourceLimitsExcept", S_P_STRING}, {"PropagateResourceLimits", S_P_STRING}, + {"ResumeProgram", S_P_STRING}, + {"ResumeRate", S_P_UINT16}, {"ReturnToService", S_P_UINT16}, - {"SchedulerAuth", S_P_STRING}, + {"SchedulerAuth", S_P_STRING, defunct_option}, + {"SchedulerParameters", S_P_STRING}, {"SchedulerPort", S_P_UINT16}, {"SchedulerRootFilter", S_P_UINT16}, + {"SchedulerTimeSlice", S_P_UINT16}, {"SchedulerType", S_P_STRING}, {"SelectType", S_P_STRING}, {"SelectTypeParameters", S_P_STRING}, @@ -179,6 +214,11 @@ s_p_options_t slurm_conf_options[] = { {"SrunEpilog", S_P_STRING}, {"SrunProlog", S_P_STRING}, {"StateSaveLocation", S_P_STRING}, + {"SuspendExcNodes", S_P_STRING}, + {"SuspendExcParts", S_P_STRING}, + {"SuspendProgram", S_P_STRING}, + {"SuspendRate", S_P_UINT16}, + {"SuspendTime", S_P_LONG}, {"SwitchType", S_P_STRING}, {"TaskEpilog", S_P_STRING}, {"TaskProlog", S_P_STRING}, @@ -214,10 +254,10 @@ static int parse_nodename(void **dest, slurm_parser_enum_t type, s_p_hashtbl_t *tbl, *dflt; slurm_conf_node_t *n; static s_p_options_t _nodename_options[] = { - {"NodeHostname", S_P_STRING}, - {"NodeAddr", S_P_STRING}, {"CoresPerSocket", S_P_UINT16}, {"Feature", S_P_STRING}, + {"NodeAddr", S_P_STRING}, + {"NodeHostname", S_P_STRING}, {"Port", S_P_UINT16}, {"Procs", S_P_UINT16}, {"RealMemory", S_P_UINT32}, @@ -280,8 +320,8 @@ static int parse_nodename(void **dest, slurm_parser_enum_t type, if (!s_p_get_uint16(&n->port, "Port", tbl) && !s_p_get_uint16(&n->port, "Port", dflt)) { - /* This gets resolved in slurm_conf_get_port() - * and slurm_conf_get_addr(). For now just + /* This gets resolved in slurm_conf_get_port() + * and slurm_conf_get_addr(). For now just * leave with a value of zero */ n->port = 0; } @@ -400,11 +440,13 @@ static int parse_partitionname(void **dest, slurm_parser_enum_t type, static s_p_options_t _partition_options[] = { {"AllowGroups", S_P_STRING}, {"Default", S_P_BOOLEAN}, /* YES or NO */ + {"DisableRootJobs", S_P_BOOLEAN}, /* YES or NO */ {"Hidden", S_P_BOOLEAN}, /* YES or NO */ - {"MaxTime", S_P_UINT32}, /* INFINITE or a number */ + {"MaxTime", S_P_STRING}, {"MaxNodes", S_P_UINT32}, /* INFINITE or a number */ {"MinNodes", S_P_UINT32}, {"Nodes", S_P_STRING}, + {"Priority", S_P_UINT16}, {"RootOnly", S_P_BOOLEAN}, /* YES or NO */ {"Shared", S_P_STRING}, /* YES, NO, or FORCE */ {"State", S_P_BOOLEAN}, /* UP or DOWN */ @@ -439,13 +481,29 @@ static int parse_partitionname(void **dest, slurm_parser_enum_t type, && !s_p_get_boolean(&p->default_flag, "Default", dflt)) p->default_flag = false; + if (!s_p_get_boolean((bool *)&p->disable_root_jobs, + "DisableRootJobs", tbl)) + p->disable_root_jobs = (uint16_t)NO_VAL; + if (!s_p_get_boolean(&p->hidden_flag, "Hidden", tbl) && !s_p_get_boolean(&p->hidden_flag, "Hidden", dflt)) p->hidden_flag = false; - if (!s_p_get_uint32(&p->max_time, "MaxTime", tbl) - && !s_p_get_uint32(&p->max_time, "MaxTime", dflt)) + if (!s_p_get_string(&tmp, "MaxTime", tbl) && + !s_p_get_string(&tmp, "MaxTime", dflt)) p->max_time = INFINITE; + else { + int max_time = time_str2mins(tmp); + if ((max_time < 0) && (max_time != INFINITE)) { + error("Bad value \"%s\" for MaxTime", tmp); + destroy_partitionname(p); + s_p_hashtbl_destroy(tbl); + xfree(tmp); + return -1; + } + p->max_time = max_time; + xfree(tmp); + } if (!s_p_get_uint32(&p->max_nodes, "MaxNodes", tbl) && !s_p_get_uint32(&p->max_nodes, "MaxNodes", dflt)) @@ -470,20 +528,27 @@ static int parse_partitionname(void **dest, slurm_parser_enum_t type, && !s_p_get_boolean(&p->root_only_flag, "RootOnly", dflt)) p->root_only_flag = false; - if (!s_p_get_string(&tmp, "Shared", tbl) - && !s_p_get_string(&tmp, "Shared", dflt)) { - p->shared = SHARED_NO; - } else { + if (!s_p_get_uint16(&p->priority, "Priority", tbl) && + !s_p_get_uint16(&p->priority, "Priority", dflt)) + p->priority = 1; + + if (s_p_get_string(&tmp, "Shared", tbl) || + s_p_get_string(&tmp, "Shared", dflt)) { if (strcasecmp(tmp, "NO") == 0) - p->shared = SHARED_NO; + p->max_share = 1; #ifndef HAVE_XCPU /* Only "Shared=NO" is valid on XCPU systems */ - else if (strcasecmp(tmp, "YES") == 0) - p->shared = SHARED_YES; else if (strcasecmp(tmp, "EXCLUSIVE") == 0) - p->shared = SHARED_EXCLUSIVE; - else if (strcasecmp(tmp, "FORCE") == 0) - p->shared = SHARED_FORCE; + p->max_share = 0; + else if (strncasecmp(tmp, "YES:", 4) == 0) + p->max_share = strtol(&tmp[4], (char **) NULL, 10); + else if (strcasecmp(tmp, "YES") == 0) + p->max_share = 4; + else if (strncasecmp(tmp, "FORCE:", 6) == 0) { + p->max_share = strtol(&tmp[6], (char **) NULL, 10) | + SHARED_FORCE; + } else if (strcasecmp(tmp, "FORCE") == 0) + p->max_share = 4 | SHARED_FORCE; #endif else { error("Bad value \"%s\" for Shared", tmp); @@ -492,7 +557,9 @@ static int parse_partitionname(void **dest, slurm_parser_enum_t type, xfree(tmp); return -1; } - } + } else + p->max_share = 1; + xfree(tmp); if (!s_p_get_boolean(&p->state_up_flag, "State", tbl) @@ -875,8 +942,8 @@ extern uint16_t slurm_conf_get_port(const char *node_name) p = node_to_host_hashtbl[idx]; while (p) { if (strcmp(p->alias, node_name) == 0) { - uint16_t port = p->port; - if (!port) + uint16_t port; + if (!p->port) p->port = (uint16_t) conf_ptr->slurmd_port; port = p->port; slurm_conf_unlock(); @@ -998,46 +1065,60 @@ gethostname_short (char *name, size_t len) extern void free_slurm_conf (slurm_ctl_conf_t *ctl_conf_ptr, bool purge_node_hash) { + xfree (ctl_conf_ptr->accounting_storage_host); + xfree (ctl_conf_ptr->accounting_storage_loc); + xfree (ctl_conf_ptr->accounting_storage_pass); + xfree (ctl_conf_ptr->accounting_storage_type); + xfree (ctl_conf_ptr->accounting_storage_user); xfree (ctl_conf_ptr->authtype); - xfree (ctl_conf_ptr->checkpoint_type); xfree (ctl_conf_ptr->backup_addr); xfree (ctl_conf_ptr->backup_controller); + xfree (ctl_conf_ptr->checkpoint_type); + xfree (ctl_conf_ptr->cluster_name); xfree (ctl_conf_ptr->control_addr); xfree (ctl_conf_ptr->control_machine); + xfree (ctl_conf_ptr->crypto_type); xfree (ctl_conf_ptr->epilog); - xfree (ctl_conf_ptr->job_acct_logfile); - xfree (ctl_conf_ptr->job_acct_type); + xfree (ctl_conf_ptr->health_check_program); + xfree (ctl_conf_ptr->job_acct_gather_type); + xfree (ctl_conf_ptr->job_comp_host); xfree (ctl_conf_ptr->job_comp_loc); + xfree (ctl_conf_ptr->job_comp_pass); xfree (ctl_conf_ptr->job_comp_type); + xfree (ctl_conf_ptr->job_comp_user); xfree (ctl_conf_ptr->job_credential_private_key); xfree (ctl_conf_ptr->job_credential_public_certificate); + xfree (ctl_conf_ptr->licenses); xfree (ctl_conf_ptr->mail_prog); xfree (ctl_conf_ptr->mpi_default); + xfree (ctl_conf_ptr->node_prefix); xfree (ctl_conf_ptr->plugindir); xfree (ctl_conf_ptr->plugstack); xfree (ctl_conf_ptr->proctrack_type); xfree (ctl_conf_ptr->prolog); xfree (ctl_conf_ptr->propagate_rlimits_except); xfree (ctl_conf_ptr->propagate_rlimits); + xfree (ctl_conf_ptr->resume_program); + xfree (ctl_conf_ptr->slurm_conf); xfree (ctl_conf_ptr->schedtype); xfree (ctl_conf_ptr->select_type); - xfree (ctl_conf_ptr->slurm_conf); xfree (ctl_conf_ptr->slurm_user_name); xfree (ctl_conf_ptr->slurmctld_logfile); xfree (ctl_conf_ptr->slurmctld_pidfile); xfree (ctl_conf_ptr->slurmd_logfile); xfree (ctl_conf_ptr->slurmd_pidfile); xfree (ctl_conf_ptr->slurmd_spooldir); + xfree (ctl_conf_ptr->srun_epilog); + xfree (ctl_conf_ptr->srun_prolog); xfree (ctl_conf_ptr->state_save_location); + xfree (ctl_conf_ptr->suspend_exc_nodes); + xfree (ctl_conf_ptr->suspend_exc_parts); + xfree (ctl_conf_ptr->suspend_program); xfree (ctl_conf_ptr->switch_type); - xfree (ctl_conf_ptr->tmp_fs); xfree (ctl_conf_ptr->task_epilog); - xfree (ctl_conf_ptr->task_prolog); xfree (ctl_conf_ptr->task_plugin); + xfree (ctl_conf_ptr->task_prolog); xfree (ctl_conf_ptr->tmp_fs); - xfree (ctl_conf_ptr->srun_prolog); - xfree (ctl_conf_ptr->srun_epilog); - xfree (ctl_conf_ptr->node_prefix); xfree (ctl_conf_ptr->unkillable_program); if (purge_node_hash) @@ -1054,44 +1135,69 @@ void init_slurm_conf (slurm_ctl_conf_t *ctl_conf_ptr) { ctl_conf_ptr->last_update = time(NULL); - xfree (ctl_conf_ptr->authtype); ctl_conf_ptr->cache_groups = (uint16_t) NO_VAL; - xfree (ctl_conf_ptr->checkpoint_type); + xfree (ctl_conf_ptr->accounting_storage_host); + xfree (ctl_conf_ptr->accounting_storage_loc); + xfree (ctl_conf_ptr->accounting_storage_pass); + ctl_conf_ptr->accounting_storage_port = 0; + xfree (ctl_conf_ptr->accounting_storage_type); + xfree (ctl_conf_ptr->accounting_storage_user); + xfree (ctl_conf_ptr->authtype); xfree (ctl_conf_ptr->backup_addr); xfree (ctl_conf_ptr->backup_controller); + ctl_conf_ptr->cache_groups = 0; + xfree (ctl_conf_ptr->checkpoint_type); + xfree (ctl_conf_ptr->cluster_name); xfree (ctl_conf_ptr->control_addr); xfree (ctl_conf_ptr->control_machine); + xfree (ctl_conf_ptr->crypto_type); + ctl_conf_ptr->def_mem_per_task = 0; ctl_conf_ptr->disable_root_jobs = 0; xfree (ctl_conf_ptr->epilog); + ctl_conf_ptr->epilog_msg_time = (uint32_t) NO_VAL; ctl_conf_ptr->fast_schedule = (uint16_t) NO_VAL; ctl_conf_ptr->first_job_id = (uint32_t) NO_VAL; + ctl_conf_ptr->get_env_timeout = 0; + ctl_conf_ptr->health_check_interval = 0; + xfree(ctl_conf_ptr->health_check_program); ctl_conf_ptr->inactive_limit = (uint16_t) NO_VAL; - xfree (ctl_conf_ptr->job_acct_logfile); - ctl_conf_ptr->job_acct_freq = 0; - xfree (ctl_conf_ptr->job_acct_type); + xfree (ctl_conf_ptr->job_acct_gather_type); + ctl_conf_ptr->job_acct_gather_freq = 0; xfree (ctl_conf_ptr->job_comp_loc); + xfree (ctl_conf_ptr->job_comp_pass); + ctl_conf_ptr->job_comp_port = 0; xfree (ctl_conf_ptr->job_comp_type); + xfree (ctl_conf_ptr->job_comp_user); xfree (ctl_conf_ptr->job_credential_private_key); xfree (ctl_conf_ptr->job_credential_public_certificate); ctl_conf_ptr->job_file_append = (uint16_t) NO_VAL; + ctl_conf_ptr->job_requeue = (uint16_t) NO_VAL; ctl_conf_ptr->kill_wait = (uint16_t) NO_VAL; + xfree (ctl_conf_ptr->licenses); xfree (ctl_conf_ptr->mail_prog); ctl_conf_ptr->max_job_cnt = (uint16_t) NO_VAL; + ctl_conf_ptr->max_mem_per_task = 0; ctl_conf_ptr->min_job_age = (uint16_t) NO_VAL; xfree (ctl_conf_ptr->mpi_default); ctl_conf_ptr->msg_timeout = (uint16_t) NO_VAL; ctl_conf_ptr->next_job_id = (uint32_t) NO_VAL; + xfree (ctl_conf_ptr->node_prefix); xfree (ctl_conf_ptr->plugindir); xfree (ctl_conf_ptr->plugstack); + ctl_conf_ptr->private_data = 0; xfree (ctl_conf_ptr->proctrack_type); xfree (ctl_conf_ptr->prolog); ctl_conf_ptr->propagate_prio_process = (uint16_t) NO_VAL; - xfree (ctl_conf_ptr->propagate_rlimits_except); xfree (ctl_conf_ptr->propagate_rlimits); + xfree (ctl_conf_ptr->propagate_rlimits_except); + xfree (ctl_conf_ptr->resume_program); + ctl_conf_ptr->resume_rate = (uint16_t) NO_VAL; ctl_conf_ptr->ret2service = (uint16_t) NO_VAL; + xfree( ctl_conf_ptr->sched_params ); + ctl_conf_ptr->sched_time_slice = (uint16_t) NO_VAL; + xfree( ctl_conf_ptr->schedtype ); ctl_conf_ptr->schedport = (uint16_t) NO_VAL; ctl_conf_ptr->schedrootfltr = (uint16_t) NO_VAL; - xfree( ctl_conf_ptr->schedtype ); xfree( ctl_conf_ptr->select_type ); ctl_conf_ptr->select_type_param = (uint16_t) NO_VAL; ctl_conf_ptr->slurm_user_id = (uint16_t) NO_VAL; @@ -1107,20 +1213,25 @@ init_slurm_conf (slurm_ctl_conf_t *ctl_conf_ptr) ctl_conf_ptr->slurmd_port = (uint32_t) NO_VAL; xfree (ctl_conf_ptr->slurmd_spooldir); ctl_conf_ptr->slurmd_timeout = (uint16_t) NO_VAL; + xfree (ctl_conf_ptr->srun_prolog); + xfree (ctl_conf_ptr->srun_epilog); xfree (ctl_conf_ptr->state_save_location); + xfree (ctl_conf_ptr->suspend_exc_nodes); + xfree (ctl_conf_ptr->suspend_exc_parts); + xfree (ctl_conf_ptr->suspend_program); + ctl_conf_ptr->suspend_rate = (uint16_t) NO_VAL; + ctl_conf_ptr->suspend_time = (uint16_t) NO_VAL; xfree (ctl_conf_ptr->switch_type); xfree (ctl_conf_ptr->task_epilog); - xfree (ctl_conf_ptr->task_prolog); xfree (ctl_conf_ptr->task_plugin); + ctl_conf_ptr->task_plugin_param = 0; + xfree (ctl_conf_ptr->task_prolog); xfree (ctl_conf_ptr->tmp_fs); - ctl_conf_ptr->wait_time = (uint16_t) NO_VAL; - xfree (ctl_conf_ptr->srun_prolog); - xfree (ctl_conf_ptr->srun_epilog); - xfree (ctl_conf_ptr->node_prefix); ctl_conf_ptr->tree_width = (uint16_t) NO_VAL; - ctl_conf_ptr->use_pam = 0; xfree (ctl_conf_ptr->unkillable_program); ctl_conf_ptr->unkillable_timeout = (uint16_t) NO_VAL; + ctl_conf_ptr->use_pam = 0; + ctl_conf_ptr->wait_time = (uint16_t) NO_VAL; _free_name_hashtbl(); _init_name_hashtbl(); @@ -1339,8 +1450,13 @@ static void validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl) { char *temp_str = NULL; + long long_suspend_time; bool truth; - + char *default_storage_type = NULL, *default_storage_host = NULL; + char *default_storage_user = NULL, *default_storage_pass = NULL; + char *default_storage_loc = NULL; + uint32_t default_storage_port = 0; + if (s_p_get_string(&conf->backup_controller, "BackupController", hashtbl) && strcasecmp("localhost", conf->backup_controller) == 0) { @@ -1359,6 +1475,8 @@ validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl) conf->backup_addr = xstrdup(conf->backup_controller); } + s_p_get_string(&conf->cluster_name, "ClusterName", hashtbl); + if (!s_p_get_string(&conf->control_machine, "ControlMachine", hashtbl)) fatal ("validate_and_set_defaults: " "ControlMachine not specified."); @@ -1380,6 +1498,13 @@ validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl) xfree(conf->backup_controller); } + s_p_get_string(&default_storage_type, "DefaultStorageType", hashtbl); + s_p_get_string(&default_storage_host, "DefaultStorageHost", hashtbl); + s_p_get_string(&default_storage_user, "DefaultStorageUser", hashtbl); + s_p_get_string(&default_storage_pass, "DefaultStoragePass", hashtbl); + s_p_get_string(&default_storage_loc, "DefaultStorageLoc", hashtbl); + s_p_get_uint32(&default_storage_port, "DefaultStoragePort", hashtbl); + if (!s_p_get_string(&conf->job_credential_private_key, "JobCredentialPrivateKey", hashtbl)) fatal("JobCredentialPrivateKey not set"); @@ -1400,12 +1525,21 @@ validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl) if (!s_p_get_string(&conf->checkpoint_type, "CheckpointType", hashtbl)) conf->checkpoint_type = xstrdup(DEFAULT_CHECKPOINT_TYPE); - if (!s_p_get_uint16(&conf->disable_root_jobs, "DisableRootJobs", - hashtbl)) + if (!s_p_get_string(&conf->crypto_type, "CryptoType", hashtbl)) + conf->crypto_type = xstrdup(DEFAULT_CRYPTO_TYPE); + + if (!s_p_get_uint32(&conf->def_mem_per_task, "DefMemPerTask", hashtbl)) + conf->def_mem_per_task = DEFAULT_MEM_PER_TASK; + + if (!s_p_get_boolean((bool *) &conf->disable_root_jobs, + "DisableRootJobs", hashtbl)) conf->disable_root_jobs = DEFAULT_DISABLE_ROOT_JOBS; s_p_get_string(&conf->epilog, "Epilog", hashtbl); + if (!s_p_get_uint32(&conf->epilog_msg_time, "EpilogMsgTime", hashtbl)) + conf->epilog_msg_time = DEFAULT_EPILOG_MSG_TIME; + if (!s_p_get_uint16(&conf->fast_schedule, "FastSchedule", hashtbl)) conf->fast_schedule = DEFAULT_FAST_SCHEDULE; @@ -1428,40 +1562,108 @@ validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl) conf->inactive_limit = DEFAULT_INACTIVE_LIMIT; } - if (!s_p_get_string(&conf->job_acct_logfile, - "JobAcctLogFile", hashtbl)) - conf->job_acct_logfile = xstrdup(DEFAULT_JOB_ACCT_LOGFILE); - - if (!s_p_get_uint16(&conf->job_acct_freq, "JobAcctFrequency", hashtbl)) - conf->job_acct_freq = DEFAULT_JOB_ACCT_FREQ; - - if (!s_p_get_string(&conf->job_acct_type, "JobAcctType", hashtbl)) - conf->job_acct_type = xstrdup(DEFAULT_JOB_ACCT_TYPE); - - s_p_get_string(&conf->job_comp_loc, "JobCompLoc", hashtbl); - - if (!s_p_get_string(&conf->job_comp_type, "JobCompType", hashtbl)) - conf->job_comp_type = xstrdup(DEFAULT_JOB_COMP_TYPE); + if (!s_p_get_uint16(&conf->job_acct_gather_freq, + "JobAcctGatherFrequency", hashtbl)) + conf->job_acct_gather_freq = DEFAULT_JOB_ACCT_GATHER_FREQ; + + if (s_p_get_string(&conf->job_acct_gather_type, + "JobAcctType", hashtbl)) { + fatal("JobAcctType is no longer a valid parameter.\n" + "The job accounting plugin has changed to 2 different " + "plugins one for gathering and one for storing the " + "gathered information.\n" + "Please change this to JobAcctGatherType to " + "correctly work.\n" + "The major 'jobacct' is now 'jobacct_gather' and " + "'jobacct_storage' your declarations will also need " + "to change in your slurm.conf file.\n" + "Refer to the slurm.conf man page or the web " + "documentation for further explanation."); + } + + if(!s_p_get_string(&conf->job_acct_gather_type, + "JobAcctGatherType", hashtbl)) + conf->job_acct_gather_type = + xstrdup(DEFAULT_JOB_ACCT_GATHER_TYPE); + + if (!s_p_get_string(&conf->job_comp_type, "JobCompType", hashtbl)) { + if(default_storage_type) + conf->job_comp_type = + xstrdup_printf("jobcomp/%s", + default_storage_type); + else + conf->job_comp_type = xstrdup(DEFAULT_JOB_COMP_TYPE); + } + if (!s_p_get_string(&conf->job_comp_loc, "JobCompLoc", hashtbl)) { + if(default_storage_loc) + conf->job_comp_loc = xstrdup(default_storage_loc); + else + conf->job_comp_loc = xstrdup(DEFAULT_JOB_COMP_LOC); + } + if (!s_p_get_string(&conf->job_comp_host, "JobCompHost", + hashtbl)) { + if(default_storage_host) + conf->job_comp_host = + xstrdup(default_storage_host); + else + conf->job_comp_host = xstrdup(DEFAULT_STORAGE_HOST); + } + if (!s_p_get_string(&conf->job_comp_user, "JobCompUser", + hashtbl)) { + if(default_storage_user) + conf->job_comp_user = + xstrdup(default_storage_user); + else + conf->job_comp_user = xstrdup(DEFAULT_STORAGE_USER); + } + if (!s_p_get_string(&conf->job_comp_pass, "JobCompPass", + hashtbl)) { + if(default_storage_pass) + conf->job_comp_pass = + xstrdup(default_storage_pass); + } + if (!s_p_get_uint32(&conf->job_comp_port, "JobCompPort", + hashtbl)) { + if(default_storage_port) + conf->job_comp_port = default_storage_port; + else + conf->job_comp_port = DEFAULT_STORAGE_PORT; + } if (!s_p_get_uint16(&conf->job_file_append, "JobFileAppend", hashtbl)) conf->job_file_append = 0; + if (!s_p_get_uint16(&conf->job_requeue, "JobRequeue", hashtbl)) + conf->job_requeue = 1; + else if (conf->job_requeue > 1) + conf->job_requeue = 1; + if (!s_p_get_uint16(&conf->get_env_timeout, "GetEnvTimeout", hashtbl)) conf->get_env_timeout = DEFAULT_GET_ENV_TIMEOUT; + s_p_get_uint16(&conf->health_check_interval, "HealthCheckInterval", hashtbl); + s_p_get_string(&conf->health_check_program, "HealthCheckProgram", hashtbl); + if (!s_p_get_uint16(&conf->kill_wait, "KillWait", hashtbl)) conf->kill_wait = DEFAULT_KILL_WAIT; + s_p_get_string(&conf->licenses, "Licenses", hashtbl); + if (!s_p_get_string(&conf->mail_prog, "MailProg", hashtbl)) conf->mail_prog = xstrdup(DEFAULT_MAIL_PROG); if (!s_p_get_uint16(&conf->max_job_cnt, "MaxJobCount", hashtbl)) conf->max_job_cnt = DEFAULT_MAX_JOB_COUNT; + if (!s_p_get_uint32(&conf->max_mem_per_task, "MaxMemPerTask", hashtbl)) + conf->max_mem_per_task = DEFAULT_MAX_MEM_PER_TASK; + if (!s_p_get_uint16(&conf->msg_timeout, "MessageTimeout", hashtbl)) conf->msg_timeout = DEFAULT_MSG_TIMEOUT; - else if (conf->msg_timeout > 100) - info("WARNING: MessageTimeout is too high for effective fault-tolerance"); + else if (conf->msg_timeout > 100) { + info("WARNING: MessageTimeout is too high for effective " + "fault-tolerance"); + } if (!s_p_get_uint16(&conf->min_job_age, "MinJobAge", hashtbl)) conf->min_job_age = DEFAULT_MIN_JOB_AGE; @@ -1469,6 +1671,69 @@ validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl) if (!s_p_get_string(&conf->mpi_default, "MpiDefault", hashtbl)) conf->mpi_default = xstrdup(DEFAULT_MPI_DEFAULT); + if (!s_p_get_string(&conf->accounting_storage_type, + "AccountingStorageType", hashtbl)) { + if(default_storage_type) + conf->accounting_storage_type = + xstrdup_printf("accounting_storage/%s", + default_storage_type); + else + conf->accounting_storage_type = + xstrdup(DEFAULT_ACCOUNTING_STORAGE_TYPE); + } + + if (!s_p_get_uint16(&conf->accounting_storage_enforce, + "AccountingStorageEnforce", hashtbl)) + conf->accounting_storage_enforce = DEFAULT_ACCOUNTING_ENFORCE; + + if (!s_p_get_string(&conf->accounting_storage_host, + "AccountingStorageHost", hashtbl)) { + if(default_storage_host) + conf->accounting_storage_host = + xstrdup(default_storage_host); + else + conf->accounting_storage_host = + xstrdup(DEFAULT_STORAGE_HOST); + } + + /* AccountingStorageLoc replaces JobAcctLogFile since it now represents + * the database name also depending on the storage type you + * use so we still check JobAcctLogFile for the same thing + */ + if (!s_p_get_string(&conf->accounting_storage_loc, + "AccountingStorageLoc", hashtbl) + && !s_p_get_string(&conf->accounting_storage_loc, + "JobAcctLogFile", hashtbl)) { + if(default_storage_loc) + conf->accounting_storage_loc = + xstrdup(default_storage_loc); + else + conf->accounting_storage_loc = + xstrdup(DEFAULT_STORAGE_LOC); + } + + if (!s_p_get_string(&conf->accounting_storage_user, + "AccountingStorageUser", hashtbl)) { + if(default_storage_user) + conf->accounting_storage_user = + xstrdup(default_storage_user); + else + conf->accounting_storage_user = + xstrdup(DEFAULT_STORAGE_USER); + } + if (!s_p_get_string(&conf->accounting_storage_pass, + "AccountingStoragePass", hashtbl)) { + if(default_storage_pass) + conf->accounting_storage_pass = + xstrdup(default_storage_pass); + } + if (!s_p_get_uint32(&conf->accounting_storage_port, + "AccountingStoragePort", hashtbl)) { + if(default_storage_port) + conf->accounting_storage_port = default_storage_port; + else + conf->accounting_storage_port = DEFAULT_STORAGE_PORT; + } if (!s_p_get_string(&conf->plugindir, "PluginDir", hashtbl)) conf->plugindir = xstrdup(default_plugin_path); @@ -1489,6 +1754,8 @@ validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl) && (!strcmp(conf->proctrack_type,"proctrack/linuxproc"))) fatal("proctrack/linuxproc is incompatable with switch/elan"); + s_p_get_uint16(&conf->private_data, "PrivateData", hashtbl); + s_p_get_string(&conf->prolog, "Prolog", hashtbl); if (!s_p_get_uint16(&conf->propagate_prio_process, @@ -1518,6 +1785,12 @@ validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl) if (!s_p_get_uint16(&conf->ret2service, "ReturnToService", hashtbl)) conf->ret2service = DEFAULT_RETURN_TO_SERVICE; + s_p_get_string(&conf->resume_program, "ResumeProgram", hashtbl); + if (!s_p_get_uint16(&conf->resume_rate, "ResumeRate", hashtbl)) + conf->resume_rate = DEFAULT_RESUME_RATE; + + s_p_get_string(&conf->sched_params, "SchedulerParameters", hashtbl); + if (s_p_get_uint16(&conf->schedport, "SchedulerPort", hashtbl)) { if (conf->schedport == 0) { error("SchedulerPort=0 is invalid"); @@ -1531,6 +1804,10 @@ validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl) "SchedulerRootFilter", hashtbl)) conf->schedrootfltr = DEFAULT_SCHEDROOTFILTER; + if (!s_p_get_uint16(&conf->sched_time_slice, "SchedulerTimeSlice", + hashtbl)) + conf->sched_time_slice = DEFAULT_SCHED_TIME_SLICE; + if (!s_p_get_string(&conf->schedtype, "SchedulerType", hashtbl)) conf->schedtype = xstrdup(DEFAULT_SCHEDTYPE); @@ -1565,11 +1842,7 @@ validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl) conf->slurm_user_name); xfree(conf->slurm_user_name); } else { - if (slurm_passwd->pw_uid > 0xffff) - error("SlurmUser numeric overflow, " - "will be fixed soon"); - else - conf->slurm_user_id = slurm_passwd->pw_uid; + conf->slurm_user_id = slurm_passwd->pw_uid; } } @@ -1617,6 +1890,16 @@ validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl) "StateSaveLocation", hashtbl)) conf->state_save_location = xstrdup(DEFAULT_SAVE_STATE_LOC); + s_p_get_string(&conf->suspend_exc_nodes, "SuspendExcNodes", hashtbl); + s_p_get_string(&conf->suspend_exc_parts, "SuspendExcParts", hashtbl); + s_p_get_string(&conf->suspend_program, "SuspendProgram", hashtbl); + if (!s_p_get_uint16(&conf->suspend_rate, "SuspendRate", hashtbl)) + conf->suspend_rate = DEFAULT_SUSPEND_RATE; + if (s_p_get_long(&long_suspend_time, "SuspendTime", hashtbl)) + conf->suspend_time = long_suspend_time + 1; + else + conf->suspend_time = 0; + /* see above for switch_type, order dependent */ if (!s_p_get_string(&conf->task_plugin, "TaskPlugin", hashtbl)) @@ -1663,6 +1946,11 @@ validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl) if (!s_p_get_uint16(&conf->unkillable_timeout, "UnkillableStepTimeout", hashtbl)) conf->unkillable_timeout = DEFAULT_UNKILLABLE_TIMEOUT; + xfree(default_storage_type); + xfree(default_storage_loc); + xfree(default_storage_host); + xfree(default_storage_user); + xfree(default_storage_pass); } /* diff --git a/src/common/read_config.h b/src/common/read_config.h index fa2156dd1..8badcb691 100644 --- a/src/common/read_config.h +++ b/src/common/read_config.h @@ -5,7 +5,7 @@ * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Mette <jette1@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -16,7 +16,7 @@ * any later version. * * In addition, as a special exception, the copyright holders give permission - * to link the code of portions of this program with the OpenSSL library under + * to link the code of portions of this program with the OpenSSL library under * certain conditions as described in each individual source file, and * distribute linked combinations including the two. You must obey the GNU * General Public License in all respects for all of the code used other than @@ -48,22 +48,30 @@ extern char *default_slurm_config_file; extern char *default_plugin_path; extern char *default_plugstack; +#define DEFAULT_ACCOUNTING_ENFORCE 0 +#define DEFAULT_ACCOUNTING_STORAGE_TYPE "accounting_storage/none" #define DEFAULT_AUTH_TYPE "auth/none" #define DEFAULT_CACHE_GROUPS 0 -#define DEFAULT_DISABLE_ROOT_JOBS 0 +#define DEFAULT_CRYPTO_TYPE "crypto/openssl" +#define DEFAULT_EPILOG_MSG_TIME 2000 #define DEFAULT_FAST_SCHEDULE 1 #define DEFAULT_FIRST_JOB_ID 1 #define DEFAULT_GET_ENV_TIMEOUT 2 /* NOTE: DEFAULT_INACTIVE_LIMIT must be 0 for Blue Gene/L systems */ #define DEFAULT_INACTIVE_LIMIT 0 -#define DEFAULT_JOB_ACCT_LOGFILE "/var/log/slurm_accounting.log" -#define DEFAULT_JOB_ACCT_FREQ 30 -#define DEFAULT_JOB_ACCT_TYPE "jobacct/none" +#define DEFAULT_JOB_ACCT_GATHER_TYPE "jobacct_gather/none" +#define JOB_ACCT_GATHER_TYPE_NONE "jobacct_gather/none" +#define DEFAULT_JOB_ACCT_GATHER_FREQ 30 +#define ACCOUNTING_STORAGE_TYPE_NONE "accounting_storage/none" +#define DEFAULT_DISABLE_ROOT_JOBS 0 #define DEFAULT_JOB_COMP_TYPE "jobcomp/none" +#define DEFAULT_JOB_COMP_LOC "/var/log/slurm_jobcomp.log" #define DEFAULT_KILL_TREE 0 #define DEFAULT_KILL_WAIT 30 #define DEFAULT_MAIL_PROG "/bin/mail" -#define DEFAULT_MAX_JOB_COUNT 2000 +#define DEFAULT_MAX_JOB_COUNT 5000 +#define DEFAULT_MEM_PER_TASK 0 +#define DEFAULT_MAX_MEM_PER_TASK 0 #define DEFAULT_MIN_JOB_AGE 300 #define DEFAULT_MPI_DEFAULT "none" #define DEFAULT_MSG_TIMEOUT 10 @@ -76,9 +84,11 @@ extern char *default_plugstack; #endif #define DEFAULT_PROPAGATE_PRIO_PROCESS 0 #define DEFAULT_RETURN_TO_SERVICE 0 +#define DEFAULT_RESUME_RATE 60 #define DEFAULT_SAVE_STATE_LOC "/tmp" #define DEFAULT_SCHEDROOTFILTER 1 #define DEFAULT_SCHEDULER_PORT 7321 +#define DEFAULT_SCHED_TIME_SLICE 30 #define DEFAULT_SCHEDTYPE "sched/builtin" #ifdef HAVE_BG /* Blue Gene specific default configuration parameters */ # define DEFAULT_SELECT_TYPE "select/bluegene" @@ -90,6 +100,12 @@ extern char *default_plugstack; #define DEFAULT_SLURMD_PIDFILE "/var/run/slurmd.pid" #define DEFAULT_SLURMD_TIMEOUT 300 #define DEFAULT_SPOOLDIR "/var/spool/slurmd" +#define DEFAULT_STORAGE_HOST "localhost" +#define DEFAULT_STORAGE_LOC "/var/log/slurm_jobacct.log" +#define DEFAULT_STORAGE_USER "root" +#define DEFAULT_STORAGE_PORT 0 +#define DEFAULT_SUSPEND_RATE 60 +#define DEFAULT_SUSPEND_TIME 0 #define DEFAULT_SWITCH_TYPE "switch/none" #define DEFAULT_TASK_PLUGIN "task/none" #define DEFAULT_TMP_FS "/tmp" @@ -116,6 +132,9 @@ typedef struct slurm_conf_node { } slurm_conf_node_t; typedef struct slurm_conf_partition { + uint16_t disable_root_jobs; /* if set then user root can't run + * jobs if NO_VAL use global + * default */ char *name; /* name of the partition */ bool hidden_flag; /* 1 if hidden by default */ uint32_t max_time; /* minutes or INFINITE */ @@ -123,10 +142,10 @@ typedef struct slurm_conf_partition { uint32_t min_nodes; /* per job */ uint32_t total_nodes; /* total number of nodes in the partition */ uint32_t total_cpus; /* total number of cpus in the partition */ + uint16_t priority; /* scheduling priority for jobs */ bool root_only_flag;/* 1 if allocate/submit RPC can only be issued by user root */ - uint16_t shared; /* 1 if job can share a node, - 2 if sharing required */ + uint16_t max_share; /* number of jobs to gang schedule */ bool state_up_flag; /* 1 if state is up, 0 if down */ char *nodes; /* comma delimited list names of nodes */ char *allow_groups; /* comma delimited list of groups, diff --git a/src/common/safeopen.c b/src/common/safeopen.c index 3843a5a61..21e1bd8c1 100644 --- a/src/common/safeopen.c +++ b/src/common/safeopen.c @@ -1,10 +1,10 @@ /*****************************************************************************\ * safeopen.c - safer interface to open() - * $Id: safeopen.c 10574 2006-12-15 23:38:29Z jette $ + * $Id: safeopen.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/safeopen.h b/src/common/safeopen.h index e119bee1b..3708a2657 100644 --- a/src/common/safeopen.h +++ b/src/common/safeopen.h @@ -1,10 +1,10 @@ /*****************************************************************************\ * safeopen.h - safer interface to open() - * $Id: safeopen.h 10574 2006-12-15 23:38:29Z jette $ + * $Id: safeopen.h 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/slurm_accounting_storage.c b/src/common/slurm_accounting_storage.c new file mode 100644 index 000000000..31d94d627 --- /dev/null +++ b/src/common/slurm_accounting_storage.c @@ -0,0 +1,2029 @@ +/*****************************************************************************\ + * slurm_accounting_storage.c - account storage plugin wrapper. + * + * $Id: slurm_accounting_storage.c 10744 2007-01-11 20:09:18Z da $ + ***************************************************************************** + * Copyright (C) 2002-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Aubke <da@llnl.gov>. + * LLNL-CODE-402394. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#ifdef HAVE_CONFIG_H +# include "config.h" +#endif + +#include <pthread.h> + +#include "src/common/list.h" +#include "src/common/slurm_accounting_storage.h" +#include "src/common/plugin.h" +#include "src/common/plugrack.h" +#include "src/common/slurm_protocol_api.h" +#include "src/common/xstring.h" +#include "src/slurmctld/slurmctld.h" +#include "src/sacctmgr/sacctmgr.h" + +/* + * Local data + */ + +typedef struct slurm_acct_storage_ops { + void *(*get_conn) (bool make_agent, bool rollback); + int (*close_conn) (void **db_conn); + int (*commit) (void *db_conn, bool commit); + int (*add_users) (void *db_conn, uint32_t uid, + List user_list); + int (*add_coord) (void *db_conn, uint32_t uid, + char *acct, + acct_user_cond_t *user_q); + int (*add_accts) (void *db_conn, uint32_t uid, + List acct_list); + int (*add_clusters) (void *db_conn, uint32_t uid, + List cluster_list); + int (*add_associations) (void *db_conn, uint32_t uid, + List association_list); + List (*modify_users) (void *db_conn, uint32_t uid, + acct_user_cond_t *user_q, + acct_user_rec_t *user); + List (*modify_accts) (void *db_conn, uint32_t uid, + acct_account_cond_t *acct_q, + acct_account_rec_t *acct); + List (*modify_clusters) (void *db_conn, uint32_t uid, + acct_cluster_cond_t *cluster_q, + acct_cluster_rec_t *cluster); + List (*modify_associations)(void *db_conn, uint32_t uid, + acct_association_cond_t *assoc_q, + acct_association_rec_t *assoc); + List (*remove_users) (void *db_conn, uint32_t uid, + acct_user_cond_t *user_q); + List (*remove_coord) (void *db_conn, uint32_t uid, + char *acct, + acct_user_cond_t *user_q); + List (*remove_accts) (void *db_conn, uint32_t uid, + acct_account_cond_t *acct_q); + List (*remove_clusters) (void *db_conn, uint32_t uid, + acct_cluster_cond_t *cluster_q); + List (*remove_associations)(void *db_conn, uint32_t uid, + acct_association_cond_t *assoc_q); + List (*get_users) (void *db_conn, + acct_user_cond_t *user_q); + List (*get_accts) (void *db_conn, + acct_account_cond_t *acct_q); + List (*get_clusters) (void *db_conn, + acct_cluster_cond_t *cluster_q); + List (*get_associations) (void *db_conn, + acct_association_cond_t *assoc_q); + int (*get_usage) (void *db_conn, + void *acct_assoc, + time_t start, + time_t end); + int (*roll_usage) (void *db_conn, + time_t sent_start); + int (*node_down) (void *db_conn, + char *cluster, + struct node_record *node_ptr, + time_t event_time, + char *reason); + int (*node_up) (void *db_conn, + char *cluster, + struct node_record *node_ptr, + time_t event_time); + int (*cluster_procs) (void *db_conn, + char *cluster, + uint32_t procs, time_t event_time); + int (*c_get_usage) (void *db_conn, + void *cluster_rec, + time_t start, time_t end); + int (*register_ctld) (char *cluster, uint16_t port); + int (*job_start) (void *db_conn, + struct job_record *job_ptr); + int (*job_complete) (void *db_conn, + struct job_record *job_ptr); + int (*step_start) (void *db_conn, + struct step_record *step_ptr); + int (*step_complete) (void *db_conn, + struct step_record *step_ptr); + int (*job_suspend) (void *db_conn, + struct job_record *job_ptr); + List (*get_jobs) (void *db_conn, + List selected_steps, + List selected_parts, + void *params); + void (*job_archive) (void *db_conn, + List selected_parts, void *params); + int (*update_shares_used) (void *db_conn, + List shares_used); + int (*flush_jobs) (void *db_conn, + char *cluster, + time_t event_time); +} slurm_acct_storage_ops_t; + +typedef struct slurm_acct_storage_context { + char *acct_storage_type; + plugrack_t plugin_list; + plugin_handle_t cur_plugin; + int acct_storage_errno; + slurm_acct_storage_ops_t ops; +} slurm_acct_storage_context_t; + +static slurm_acct_storage_context_t * g_acct_storage_context = NULL; +static pthread_mutex_t g_acct_storage_context_lock = + PTHREAD_MUTEX_INITIALIZER; + +/* + * Local functions + */ +static slurm_acct_storage_ops_t *_acct_storage_get_ops( + slurm_acct_storage_context_t *c); +static slurm_acct_storage_context_t *_acct_storage_context_create( + const char *acct_storage_type); +static int _acct_storage_context_destroy( + slurm_acct_storage_context_t *c); + +/* + * Locate and load the appropriate plugin + */ +static slurm_acct_storage_ops_t * _acct_storage_get_ops( + slurm_acct_storage_context_t *c) +{ + /* + * Must be synchronized with slurm_acct_storage_ops_t above. + */ + static const char *syms[] = { + "acct_storage_p_get_connection", + "acct_storage_p_close_connection", + "acct_storage_p_commit", + "acct_storage_p_add_users", + "acct_storage_p_add_coord", + "acct_storage_p_add_accts", + "acct_storage_p_add_clusters", + "acct_storage_p_add_associations", + "acct_storage_p_modify_users", + "acct_storage_p_modify_accts", + "acct_storage_p_modify_clusters", + "acct_storage_p_modify_associations", + "acct_storage_p_remove_users", + "acct_storage_p_remove_coord", + "acct_storage_p_remove_accts", + "acct_storage_p_remove_clusters", + "acct_storage_p_remove_associations", + "acct_storage_p_get_users", + "acct_storage_p_get_accts", + "acct_storage_p_get_clusters", + "acct_storage_p_get_associations", + "acct_storage_p_get_usage", + "acct_storage_p_roll_usage", + "clusteracct_storage_p_node_down", + "clusteracct_storage_p_node_up", + "clusteracct_storage_p_cluster_procs", + "clusteracct_storage_p_get_usage", + "clusteracct_storage_p_register_ctld", + "jobacct_storage_p_job_start", + "jobacct_storage_p_job_complete", + "jobacct_storage_p_step_start", + "jobacct_storage_p_step_complete", + "jobacct_storage_p_suspend", + "jobacct_storage_p_get_jobs", + "jobacct_storage_p_archive", + "acct_storage_p_update_shares_used", + "acct_storage_p_flush_jobs_on_cluster" + }; + int n_syms = sizeof( syms ) / sizeof( char * ); + + /* Get plugin list. */ + if ( c->plugin_list == NULL ) { + char *plugin_dir; + c->plugin_list = plugrack_create(); + if ( c->plugin_list == NULL ) { + error( "cannot create plugin manager" ); + return NULL; + } + plugrack_set_major_type( c->plugin_list, "accounting_storage" ); + plugrack_set_paranoia( c->plugin_list, + PLUGRACK_PARANOIA_NONE, + 0 ); + plugin_dir = slurm_get_plugin_dir(); + plugrack_read_dir( c->plugin_list, plugin_dir ); + xfree(plugin_dir); + } + + c->cur_plugin = plugrack_use_by_type( c->plugin_list, + c->acct_storage_type ); + if ( c->cur_plugin == PLUGIN_INVALID_HANDLE ) { + error( "cannot find accounting_storage plugin for %s", + c->acct_storage_type ); + return NULL; + } + + /* Dereference the API. */ + if ( plugin_get_syms( c->cur_plugin, + n_syms, + syms, + (void **) &c->ops ) < n_syms ) { + error( "incomplete acct_storage plugin detected" ); + return NULL; + } + + return &c->ops; +} + +/* + * Create a acct_storage context + */ +static slurm_acct_storage_context_t *_acct_storage_context_create( + const char *acct_storage_type) +{ + slurm_acct_storage_context_t *c; + + if ( acct_storage_type == NULL ) { + debug3( "_acct_storage_context_create: no uler type" ); + return NULL; + } + + c = xmalloc( sizeof( slurm_acct_storage_context_t ) ); + c->acct_storage_type = xstrdup( acct_storage_type ); + c->plugin_list = NULL; + c->cur_plugin = PLUGIN_INVALID_HANDLE; + c->acct_storage_errno = SLURM_SUCCESS; + + return c; +} + +/* + * Destroy a acct_storage context + */ +static int _acct_storage_context_destroy(slurm_acct_storage_context_t *c) +{ + /* + * Must check return code here because plugins might still + * be loaded and active. + */ + if ( c->plugin_list ) { + if ( plugrack_destroy( c->plugin_list ) != SLURM_SUCCESS ) { + return SLURM_ERROR; + } + } + + xfree( c->acct_storage_type ); + xfree( c ); + + return SLURM_SUCCESS; +} + +extern void destroy_acct_user_rec(void *object) +{ + acct_user_rec_t *acct_user = (acct_user_rec_t *)object; + + if(acct_user) { + if(acct_user->assoc_list) + list_destroy(acct_user->assoc_list); + if(acct_user->coord_accts) + list_destroy(acct_user->coord_accts); + xfree(acct_user->default_acct); + xfree(acct_user->name); + xfree(acct_user); + } +} + +extern void destroy_acct_account_rec(void *object) +{ + acct_account_rec_t *acct_account = + (acct_account_rec_t *)object; + + if(acct_account) { + if(acct_account->assoc_list) + list_destroy(acct_account->assoc_list); + if(acct_account->coordinators) + list_destroy(acct_account->coordinators); + xfree(acct_account->description); + xfree(acct_account->name); + xfree(acct_account->organization); + xfree(acct_account); + } +} + +extern void destroy_acct_coord_rec(void *object) +{ + acct_coord_rec_t *acct_coord = + (acct_coord_rec_t *)object; + + if(acct_coord) { + xfree(acct_coord->acct_name); + xfree(acct_coord); + } +} + +extern void destroy_cluster_accounting_rec(void *object) +{ + cluster_accounting_rec_t *clusteracct_rec = + (cluster_accounting_rec_t *)object; + + if(clusteracct_rec) { + xfree(clusteracct_rec); + } +} + +extern void destroy_acct_cluster_rec(void *object) +{ + acct_cluster_rec_t *acct_cluster = + (acct_cluster_rec_t *)object; + + if(acct_cluster) { + if(acct_cluster->accounting_list) + list_destroy(acct_cluster->accounting_list); + xfree(acct_cluster->control_host); + xfree(acct_cluster->name); + xfree(acct_cluster); + } +} + +extern void destroy_acct_accounting_rec(void *object) +{ + acct_accounting_rec_t *acct_accounting = + (acct_accounting_rec_t *)object; + + if(acct_accounting) { + xfree(acct_accounting); + } +} + +extern void destroy_acct_association_rec(void *object) +{ + acct_association_rec_t *acct_association = + (acct_association_rec_t *)object; + + if(acct_association) { + if(acct_association->accounting_list) + list_destroy(acct_association->accounting_list); + xfree(acct_association->acct); + xfree(acct_association->cluster); + xfree(acct_association->parent_acct); + xfree(acct_association->partition); + xfree(acct_association->user); + xfree(acct_association); + } +} + +extern void destroy_acct_user_cond(void *object) +{ + acct_user_cond_t *acct_user = (acct_user_cond_t *)object; + + if(acct_user) { + destroy_acct_association_cond(acct_user->assoc_cond); + if(acct_user->def_acct_list) + list_destroy(acct_user->def_acct_list); + if(acct_user->user_list) + list_destroy(acct_user->user_list); + xfree(acct_user); + } +} + +extern void destroy_acct_account_cond(void *object) +{ + acct_account_cond_t *acct_account = + (acct_account_cond_t *)object; + + if(acct_account) { + if(acct_account->acct_list) + list_destroy(acct_account->acct_list); + destroy_acct_association_cond(acct_account->assoc_cond); + if(acct_account->description_list) + list_destroy(acct_account->description_list); + if(acct_account->organization_list) + list_destroy(acct_account->organization_list); + xfree(acct_account); + } +} + +extern void destroy_acct_cluster_cond(void *object) +{ + acct_cluster_cond_t *acct_cluster = + (acct_cluster_cond_t *)object; + + if(acct_cluster) { + if(acct_cluster->cluster_list) + list_destroy(acct_cluster->cluster_list); + xfree(acct_cluster); + } +} + +extern void destroy_acct_association_cond(void *object) +{ + acct_association_cond_t *acct_association = + (acct_association_cond_t *)object; + + if(acct_association) { + if(acct_association->acct_list) + list_destroy(acct_association->acct_list); + if(acct_association->cluster_list) + list_destroy(acct_association->cluster_list); + if(acct_association->id_list) + list_destroy(acct_association->id_list); + if(acct_association->partition_list) + list_destroy(acct_association->partition_list); + xfree(acct_association->parent_acct); + if(acct_association->user_list) + list_destroy(acct_association->user_list); + xfree(acct_association); + } +} + +extern void destroy_acct_update_object(void *object) +{ + acct_update_object_t *acct_update = + (acct_update_object_t *) object; + + if(acct_update) { + if(acct_update->objects) + list_destroy(acct_update->objects); + xfree(acct_update); + } +} + +extern void destroy_update_shares_rec(void *object) +{ + xfree(object); +} + +/****************************************************************************\ + * Pack and unpack data structures +\****************************************************************************/ +extern void pack_acct_user_rec(void *in, Buf buffer) +{ + ListIterator itr = NULL; + acct_user_rec_t *object = (acct_user_rec_t *)in; + uint32_t count = 0; + acct_coord_rec_t *coord = NULL; + acct_association_rec_t *assoc = NULL; + + if(!object) { + pack16(0, buffer); + pack32(0, buffer); + pack32(0, buffer); + packnull(buffer); + packnull(buffer); + pack16(0, buffer); + pack32(0, buffer); + return; + } + + pack16((uint16_t)object->admin_level, buffer); + if(object->assoc_list) + count = list_count(object->assoc_list); + + pack32(count, buffer); + if(count) { + itr = list_iterator_create(object->assoc_list); + while((assoc = list_next(itr))) { + pack_acct_association_rec(assoc, buffer); + } + list_iterator_destroy(itr); + } + count = 0; + + if(object->coord_accts) + count = list_count(object->coord_accts); + + pack32(count, buffer); + if(count) { + itr = list_iterator_create(object->coord_accts); + while((coord = list_next(itr))) { + pack_acct_coord_rec(coord, buffer); + } + list_iterator_destroy(itr); + } + count = 0; + + packstr(object->default_acct, buffer); + packstr(object->name, buffer); + pack16((uint16_t)object->qos, buffer); + pack32(object->uid, buffer); +} + +extern int unpack_acct_user_rec(void **object, Buf buffer) +{ + uint32_t uint32_tmp; + acct_user_rec_t *object_ptr = xmalloc(sizeof(acct_user_rec_t)); + uint32_t count = 0; + acct_coord_rec_t *coord = NULL; + acct_association_rec_t *assoc = NULL; + int i; + + *object = object_ptr; + safe_unpack16((uint16_t *)&object_ptr->admin_level, buffer); + safe_unpack32(&count, buffer); + if(count) { + object_ptr->assoc_list = + list_create(destroy_acct_association_rec); + for(i=0; i<count; i++) { + unpack_acct_association_rec((void *)&assoc, buffer); + list_append(object_ptr->assoc_list, assoc); + } + } + safe_unpack32(&count, buffer); + if(count) { + object_ptr->coord_accts = list_create(destroy_acct_coord_rec); + for(i=0; i<count; i++) { + unpack_acct_coord_rec((void *)&coord, buffer); + list_append(object_ptr->coord_accts, coord); + } + } + safe_unpackstr_xmalloc(&object_ptr->default_acct, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&object_ptr->name, &uint32_tmp, buffer); + safe_unpack16((uint16_t *)&object_ptr->qos, buffer); + safe_unpack32(&object_ptr->uid, buffer); + + return SLURM_SUCCESS; + +unpack_error: + destroy_acct_user_rec(object_ptr); + *object = NULL; + return SLURM_ERROR; +} + +extern void pack_update_shares_used(void *object, Buf buffer) +{ + shares_used_object_t *object_ptr = (shares_used_object_t *) object; + pack32(object_ptr->assoc_id, buffer); + pack32(object_ptr->shares_used, buffer); +} + +extern int unpack_update_shares_used(void **object, Buf buffer) +{ + shares_used_object_t *object_ptr = xmalloc(sizeof(shares_used_object_t)); + + *object = (void *) object_ptr; + safe_unpack32(&object_ptr->assoc_id, buffer); + safe_unpack32(&object_ptr->shares_used, buffer); + + return SLURM_SUCCESS; + +unpack_error: + destroy_update_shares_rec(object_ptr); + *object = NULL; + return SLURM_ERROR; +} +extern void pack_acct_account_rec(void *in, Buf buffer) +{ + char *coord = NULL; + ListIterator itr = NULL; + uint32_t count = 0; + acct_account_rec_t *object = (acct_account_rec_t *)in; + acct_association_rec_t *assoc = NULL; + + if(!object) { + pack32(0, buffer); + pack32(0, buffer); + packnull(buffer); + packnull(buffer); + packnull(buffer); + pack16(0, buffer); + return; + } + + if(object->assoc_list) + count = list_count(object->assoc_list); + + pack32(count, buffer); + if(count) { + itr = list_iterator_create(object->assoc_list); + while((assoc = list_next(itr))) { + pack_acct_association_rec(assoc, buffer); + } + list_iterator_destroy(itr); + } + count = 0; + + if(object->coordinators) + count = list_count(object->coordinators); + + pack32(count, buffer); + if(count) { + itr = list_iterator_create(object->coordinators); + while((coord = list_next(itr))) { + packstr(coord, buffer); + } + list_iterator_destroy(itr); + } + count = 0; + + packstr(object->description, buffer); + packstr(object->name, buffer); + packstr(object->organization, buffer); + pack16((uint16_t)object->qos, buffer); +} + +extern int unpack_acct_account_rec(void **object, Buf buffer) +{ + uint32_t uint32_tmp; + int i; + uint32_t count; + char *coord = NULL; + acct_association_rec_t *assoc = NULL; + acct_account_rec_t *object_ptr = xmalloc(sizeof(acct_account_rec_t)); + + *object = object_ptr; + + safe_unpack32(&count, buffer); + if(count) { + object_ptr->assoc_list = + list_create(destroy_acct_association_rec); + for(i=0; i<count; i++) { + unpack_acct_association_rec((void *)&assoc, buffer); + list_append(object_ptr->assoc_list, assoc); + } + } + safe_unpack32(&count, buffer); + if(count) { + object_ptr->coordinators = list_create(slurm_destroy_char); + for(i=0; i<count; i++) { + safe_unpackstr_xmalloc(&coord, &uint32_tmp, buffer); + list_append(object_ptr->coordinators, coord); + } + } + safe_unpackstr_xmalloc(&object_ptr->description, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&object_ptr->name, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&object_ptr->organization, &uint32_tmp, buffer); + safe_unpack16((uint16_t *)&object_ptr->qos, buffer); + + return SLURM_SUCCESS; + +unpack_error: + destroy_acct_account_rec(object_ptr); + *object = NULL; + return SLURM_ERROR; +} + +extern void pack_acct_coord_rec(void *in, Buf buffer) +{ + acct_coord_rec_t *object = (acct_coord_rec_t *)in; + + if(!object) { + packnull(buffer); + pack16(0, buffer); + return; + } + + packstr(object->acct_name, buffer); + pack16(object->sub_acct, buffer); +} + +extern int unpack_acct_coord_rec(void **object, Buf buffer) +{ + uint32_t uint32_tmp; + acct_coord_rec_t *object_ptr = xmalloc(sizeof(acct_coord_rec_t)); + + *object = object_ptr; + safe_unpackstr_xmalloc(&object_ptr->acct_name, &uint32_tmp, buffer); + safe_unpack16(&object_ptr->sub_acct, buffer); + +unpack_error: + destroy_acct_coord_rec(object_ptr); + *object = NULL; + return SLURM_ERROR; +} + +extern void pack_cluster_accounting_rec(void *in, Buf buffer) +{ + cluster_accounting_rec_t *object = (cluster_accounting_rec_t *)in; + + if(!object) { + pack32(0, buffer); + pack32(0, buffer); + pack32(0, buffer); + pack32(0, buffer); + pack_time(0, buffer); + pack32(0, buffer); + return; + } + + pack32(object->alloc_secs, buffer); + pack32(object->cpu_count, buffer); + pack32(object->down_secs, buffer); + pack32(object->idle_secs, buffer); + pack_time(object->period_start, buffer); + pack32(object->resv_secs, buffer); +} + +extern int unpack_cluster_accounting_rec(void **object, Buf buffer) +{ + cluster_accounting_rec_t *object_ptr = + xmalloc(sizeof(cluster_accounting_rec_t)); + + *object = object_ptr; + safe_unpack32(&object_ptr->alloc_secs, buffer); + safe_unpack32(&object_ptr->cpu_count, buffer); + safe_unpack32(&object_ptr->down_secs, buffer); + safe_unpack32(&object_ptr->idle_secs, buffer); + safe_unpack_time(&object_ptr->period_start, buffer); + safe_unpack32(&object_ptr->resv_secs, buffer); + + return SLURM_SUCCESS; + +unpack_error: + destroy_cluster_accounting_rec(object_ptr); + *object = NULL; + return SLURM_ERROR; +} + +extern void pack_acct_cluster_rec(void *in, Buf buffer) +{ + cluster_accounting_rec_t *acct_info = NULL; + ListIterator itr = NULL; + uint32_t count = 0; + acct_cluster_rec_t *object = (acct_cluster_rec_t *)in; + + if(!object) { + pack32(0, buffer); + packnull(buffer); + pack32(0, buffer); + pack32(0, buffer); + pack32(0, buffer); + pack32(0, buffer); + pack32(0, buffer); + pack32(0, buffer); + packnull(buffer); + return; + } + + if(object->accounting_list) + count = list_count(object->accounting_list); + + pack32(count, buffer); + + if(count) { + itr = list_iterator_create(object->accounting_list); + while((acct_info = list_next(itr))) { + pack_cluster_accounting_rec(acct_info, buffer); + } + list_iterator_destroy(itr); + } + count = 0; + + packstr(object->control_host, buffer); + pack32(object->control_port, buffer); + pack32(object->default_fairshare, buffer); + pack32(object->default_max_cpu_secs_per_job, buffer); + pack32(object->default_max_jobs, buffer); + pack32(object->default_max_nodes_per_job, buffer); + pack32(object->default_max_wall_duration_per_job, buffer); + + packstr(object->name, buffer); +} + +extern int unpack_acct_cluster_rec(void **object, Buf buffer) +{ + uint32_t uint32_tmp; + int i; + uint32_t count; + acct_cluster_rec_t *object_ptr = xmalloc(sizeof(acct_cluster_rec_t)); + cluster_accounting_rec_t *acct_info = NULL; + + *object = object_ptr; + + safe_unpack32(&count, buffer); + if(count) { + object_ptr->accounting_list = + list_create(destroy_cluster_accounting_rec); + for(i=0; i<count; i++) { + unpack_cluster_accounting_rec((void *)&acct_info, + buffer); + list_append(object_ptr->accounting_list, acct_info); + } + } + safe_unpackstr_xmalloc(&object_ptr->control_host, &uint32_tmp, buffer); + safe_unpack32(&object_ptr->control_port, buffer); + safe_unpack32(&object_ptr->default_fairshare, buffer); + safe_unpack32(&object_ptr->default_max_cpu_secs_per_job, buffer); + safe_unpack32(&object_ptr->default_max_jobs, buffer); + safe_unpack32(&object_ptr->default_max_nodes_per_job, buffer); + safe_unpack32(&object_ptr->default_max_wall_duration_per_job, buffer); + safe_unpackstr_xmalloc(&object_ptr->name, &uint32_tmp, buffer); + + return SLURM_SUCCESS; + +unpack_error: + destroy_acct_cluster_rec(object_ptr); + *object = NULL; + return SLURM_ERROR; +} + +extern void pack_acct_accounting_rec(void *in, Buf buffer) +{ + acct_accounting_rec_t *object = (acct_accounting_rec_t *)in; + + if(!object) { + pack_time(0, buffer); + pack32(0, buffer); + return; + } + + pack_time(object->period_start, buffer); + pack32(object->alloc_secs, buffer); +} + +extern int unpack_acct_accounting_rec(void **object, Buf buffer) +{ + acct_accounting_rec_t *object_ptr = + xmalloc(sizeof(acct_accounting_rec_t)); + + *object = object_ptr; + safe_unpack_time(&object_ptr->period_start, buffer); + safe_unpack32(&object_ptr->alloc_secs, buffer); + + return SLURM_SUCCESS; + +unpack_error: + destroy_acct_accounting_rec(object_ptr); + *object = NULL; + return SLURM_ERROR; +} + +extern void pack_acct_association_rec(void *in, Buf buffer) +{ + acct_accounting_rec_t *acct_info = NULL; + ListIterator itr = NULL; + uint32_t count = 0; + acct_association_rec_t *object = (acct_association_rec_t *)in; + + if(!object) { + pack32(0, buffer); + packnull(buffer); + packnull(buffer); + pack32(0, buffer); + pack32(0, buffer); + pack32(0, buffer); + pack32(0, buffer); + pack32(0, buffer); + pack32(0, buffer); + packnull(buffer); + pack32(0, buffer); + packnull(buffer); + pack32(0, buffer); + pack32(0, buffer); + packnull(buffer); + return; + } + + if(object->accounting_list) + count = list_count(object->accounting_list); + + pack32(count, buffer); + + if(count) { + itr = list_iterator_create(object->accounting_list); + while((acct_info = list_next(itr))) { + pack_acct_accounting_rec(acct_info, buffer); + } + list_iterator_destroy(itr); + } + count = 0; + + packstr(object->acct, buffer); + packstr(object->cluster, buffer); + pack32(object->fairshare, buffer); + pack32(object->id, buffer); + pack32(object->max_cpu_secs_per_job, buffer); + pack32(object->max_jobs, buffer); + pack32(object->max_nodes_per_job, buffer); + pack32(object->max_wall_duration_per_job, buffer); + packstr(object->parent_acct, buffer); + pack32(object->parent_id, buffer); + packstr(object->partition, buffer); + pack32(object->uid, buffer); + pack32(object->used_share, buffer); + packstr(object->user, buffer); +} + +extern int unpack_acct_association_rec(void **object, Buf buffer) +{ + uint32_t uint32_tmp; + int i; + uint32_t count; + acct_association_rec_t *object_ptr = + xmalloc(sizeof(acct_association_rec_t)); + acct_accounting_rec_t *acct_info = NULL; + + *object = object_ptr; + + safe_unpack32(&count, buffer); + if(count) { + object_ptr->accounting_list = + list_create(destroy_acct_accounting_rec); + for(i=0; i<count; i++) { + unpack_acct_accounting_rec((void **)&acct_info, buffer); + list_append(object_ptr->accounting_list, acct_info); + } + } + safe_unpackstr_xmalloc(&object_ptr->acct, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&object_ptr->cluster, &uint32_tmp, buffer); + safe_unpack32(&object_ptr->fairshare, buffer); + safe_unpack32(&object_ptr->id, buffer); + safe_unpack32(&object_ptr->max_cpu_secs_per_job, buffer); + safe_unpack32(&object_ptr->max_jobs, buffer); + safe_unpack32(&object_ptr->max_nodes_per_job, buffer); + safe_unpack32(&object_ptr->max_wall_duration_per_job, buffer); + safe_unpackstr_xmalloc(&object_ptr->parent_acct, &uint32_tmp, buffer); + safe_unpack32(&object_ptr->parent_id, buffer); + safe_unpackstr_xmalloc(&object_ptr->partition, &uint32_tmp, buffer); + safe_unpack32(&object_ptr->uid, buffer); + safe_unpack32(&object_ptr->used_share, buffer); + safe_unpackstr_xmalloc(&object_ptr->user, &uint32_tmp, buffer); + + //log_assoc_rec(object_ptr); + return SLURM_SUCCESS; + +unpack_error: + destroy_acct_association_rec(object_ptr); + *object = NULL; + return SLURM_ERROR; +} + +extern void pack_acct_user_cond(void *in, Buf buffer) +{ + char *tmp_info = NULL; + ListIterator itr = NULL; + acct_user_cond_t *object = (acct_user_cond_t *)in; + uint32_t count = 0; + + if(!object) { + pack16(0, buffer); + pack_acct_association_cond(NULL, buffer); + pack32(0, buffer); + pack16(0, buffer); + pack32(0, buffer); + pack16(0, buffer); + return; + } + + pack16((uint16_t)object->admin_level, buffer); + + pack_acct_association_cond(object->assoc_cond, buffer); + + if(object->def_acct_list) + count = list_count(object->def_acct_list); + + pack32(count, buffer); + + if(count) { + itr = list_iterator_create(object->def_acct_list); + while((tmp_info = list_next(itr))) { + packstr(tmp_info, buffer); + } + list_iterator_destroy(itr); + } + count = 0; + + pack16((uint16_t)object->qos, buffer); + + if(object->user_list) + count = list_count(object->user_list); + + pack32(count, buffer); + + if(count) { + itr = list_iterator_create(object->user_list); + while((tmp_info = list_next(itr))) { + packstr(tmp_info, buffer); + } + list_iterator_destroy(itr); + } + pack16((uint16_t)object->with_assocs, buffer); + +} + +extern int unpack_acct_user_cond(void **object, Buf buffer) +{ + uint32_t uint32_tmp; + int i; + uint32_t count; + acct_user_cond_t *object_ptr = xmalloc(sizeof(acct_user_cond_t)); + char *tmp_info = NULL; + + *object = object_ptr; + + safe_unpack16((uint16_t *)&object_ptr->admin_level, buffer); + + if(unpack_acct_association_cond((void **)&object_ptr->assoc_cond, + buffer) == SLURM_ERROR) + goto unpack_error; + + safe_unpack32(&count, buffer); + if(count) { + object_ptr->def_acct_list = list_create(slurm_destroy_char); + for(i=0; i<count; i++) { + safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer); + list_append(object_ptr->def_acct_list, tmp_info); + } + } + safe_unpack16((uint16_t *)&object_ptr->qos, buffer); + safe_unpack32(&count, buffer); + if(count) { + object_ptr->user_list = list_create(slurm_destroy_char); + for(i=0; i<count; i++) { + safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer); + list_append(object_ptr->user_list, tmp_info); + } + } + safe_unpack16((uint16_t *)&object_ptr->with_assocs, buffer); + + return SLURM_SUCCESS; + +unpack_error: + destroy_acct_user_cond(object_ptr); + *object = NULL; + return SLURM_ERROR; +} + +extern void pack_acct_account_cond(void *in, Buf buffer) +{ + char *tmp_info = NULL; + ListIterator itr = NULL; + acct_account_cond_t *object = (acct_account_cond_t *)in; + uint32_t count = 0; + + if(!object) { + pack32(0, buffer); + pack_acct_association_cond(NULL, buffer); + pack32(0, buffer); + pack32(0, buffer); + pack16(0, buffer); + pack16(0, buffer); + return; + } + if(object->acct_list) + count = list_count(object->acct_list); + + pack32(count, buffer); + + if(count) { + itr = list_iterator_create(object->acct_list); + while((tmp_info = list_next(itr))) { + packstr(tmp_info, buffer); + } + list_iterator_destroy(itr); + } + + pack_acct_association_cond(object->assoc_cond, buffer); + + count = 0; + if(object->description_list) + count = list_count(object->description_list); + + pack32(count, buffer); + + if(count) { + itr = list_iterator_create(object->description_list); + while((tmp_info = list_next(itr))) { + packstr(tmp_info, buffer); + } + list_iterator_destroy(itr); + } + count = 0; + + if(object->organization_list) + count = list_count(object->organization_list); + + pack32(count, buffer); + + if(count) { + itr = list_iterator_create(object->organization_list); + while((tmp_info = list_next(itr))) { + packstr(tmp_info, buffer); + } + list_iterator_destroy(itr); + } + pack16((uint16_t)object->qos, buffer); + pack16((uint16_t)object->with_assocs, buffer); +} + +extern int unpack_acct_account_cond(void **object, Buf buffer) +{ + uint32_t uint32_tmp; + int i; + uint32_t count; + acct_account_cond_t *object_ptr = xmalloc(sizeof(acct_account_cond_t)); + char *tmp_info = NULL; + + *object = object_ptr; + safe_unpack32(&count, buffer); + if(count) { + object_ptr->acct_list = list_create(slurm_destroy_char); + for(i=0; i<count; i++) { + safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer); + list_append(object_ptr->acct_list, tmp_info); + } + } + + if(unpack_acct_association_cond((void **)&object_ptr->assoc_cond, + buffer) == SLURM_ERROR) + goto unpack_error; + + safe_unpack32(&count, buffer); + if(count) { + object_ptr->description_list = list_create(slurm_destroy_char); + for(i=0; i<count; i++) { + safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer); + list_append(object_ptr->description_list, tmp_info); + } + } + safe_unpack32(&count, buffer); + if(count) { + object_ptr->organization_list = list_create(slurm_destroy_char); + for(i=0; i<count; i++) { + safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer); + list_append(object_ptr->organization_list, tmp_info); + } + } + safe_unpack16((uint16_t *)&object_ptr->qos, buffer); + safe_unpack16((uint16_t *)&object_ptr->with_assocs, buffer); + + return SLURM_SUCCESS; + +unpack_error: + destroy_acct_account_cond(object_ptr); + *object = NULL; + return SLURM_ERROR; +} + +extern void pack_acct_cluster_cond(void *in, Buf buffer) +{ + char *tmp_info = NULL; + ListIterator itr = NULL; + acct_cluster_cond_t *object = (acct_cluster_cond_t *)in; + uint32_t count = 0; + + if(!object) { + pack32(0, buffer); + pack16(0, buffer); + return; + } + + if(object->cluster_list) + count = list_count(object->cluster_list); + + pack32(count, buffer); + + if(count) { + itr = list_iterator_create(object->cluster_list); + while((tmp_info = list_next(itr))) { + packstr(tmp_info, buffer); + } + list_iterator_destroy(itr); + } +} + +extern int unpack_acct_cluster_cond(void **object, Buf buffer) +{ + uint32_t uint32_tmp; + int i; + uint32_t count; + acct_cluster_cond_t *object_ptr = xmalloc(sizeof(acct_cluster_cond_t)); + char *tmp_info = NULL; + + *object = object_ptr; + safe_unpack32(&count, buffer); + if(count) { + object_ptr->cluster_list = list_create(slurm_destroy_char); + for(i=0; i<count; i++) { + safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer); + list_append(object_ptr->cluster_list, tmp_info); + } + } + + return SLURM_SUCCESS; + +unpack_error: + destroy_acct_cluster_cond(object_ptr); + *object = NULL; + return SLURM_ERROR; +} + +extern void pack_acct_association_cond(void *in, Buf buffer) +{ + char *tmp_info = NULL; + uint32_t count = 0; + + ListIterator itr = NULL; + acct_association_cond_t *object = (acct_association_cond_t *)in; + + if(!object) { + pack32(0, buffer); + pack32(0, buffer); + pack32(0, buffer); + pack32(0, buffer); + pack32(0, buffer); + pack32(0, buffer); + pack32(0, buffer); + pack32(0, buffer); + pack32(0, buffer); + packnull(buffer); + pack32(0, buffer); + return; + } + + if(object->acct_list) + count = list_count(object->acct_list); + + pack32(count, buffer); + if(count) { + itr = list_iterator_create(object->acct_list); + while((tmp_info = list_next(itr))) { + packstr(tmp_info, buffer); + } + list_iterator_destroy(itr); + } + count = 0; + + if(object->cluster_list) + count = list_count(object->cluster_list); + + pack32(count, buffer); + if(count) { + itr = list_iterator_create(object->cluster_list); + while((tmp_info = list_next(itr))) { + packstr(tmp_info, buffer); + } + list_iterator_destroy(itr); + } + count = 0; + + pack32(object->fairshare, buffer); + + if(object->id_list) + count = list_count(object->id_list); + + pack32(count, buffer); + if(count) { + itr = list_iterator_create(object->id_list); + while((tmp_info = list_next(itr))) { + packstr(tmp_info, buffer); + } + } + count = 0; + + pack32(object->max_cpu_secs_per_job, buffer); + pack32(object->max_jobs, buffer); + pack32(object->max_nodes_per_job, buffer); + pack32(object->max_wall_duration_per_job, buffer); + + if(object->partition_list) + count = list_count(object->partition_list); + + pack32(count, buffer); + if(count) { + itr = list_iterator_create(object->partition_list); + while((tmp_info = list_next(itr))) { + packstr(tmp_info, buffer); + } + list_iterator_destroy(itr); + } + count = 0; + + packstr(object->parent_acct, buffer); + + if(object->user_list) + count = list_count(object->user_list); + + pack32(count, buffer); + if(count) { + itr = list_iterator_create(object->user_list); + while((tmp_info = list_next(itr))) { + packstr(tmp_info, buffer); + } + list_iterator_destroy(itr); + } + count = 0; +} + +extern int unpack_acct_association_cond(void **object, Buf buffer) +{ + uint32_t uint32_tmp; + int i; + uint32_t count; + acct_association_cond_t *object_ptr = + xmalloc(sizeof(acct_association_cond_t)); + char *tmp_info = NULL; + + *object = object_ptr; + safe_unpack32(&count, buffer); + if(count) { + object_ptr->acct_list = list_create(slurm_destroy_char); + for(i=0; i<count; i++) { + safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer); + list_append(object_ptr->acct_list, tmp_info); + } + } + safe_unpack32(&count, buffer); + if(count) { + object_ptr->cluster_list = list_create(slurm_destroy_char); + for(i=0; i<count; i++) { + safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer); + list_append(object_ptr->cluster_list, tmp_info); + } + } + + safe_unpack32(&object_ptr->fairshare, buffer); + + safe_unpack32(&count, buffer); + if(count) { + object_ptr->id_list = list_create(slurm_destroy_char); + for(i=0; i<count; i++) { + safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer); + list_append(object_ptr->id_list, tmp_info); + } + } + + safe_unpack32(&object_ptr->max_cpu_secs_per_job, buffer); + safe_unpack32(&object_ptr->max_jobs, buffer); + safe_unpack32(&object_ptr->max_nodes_per_job, buffer); + safe_unpack32(&object_ptr->max_wall_duration_per_job, buffer); + + safe_unpack32(&count, buffer); + if(count) { + object_ptr->partition_list = list_create(slurm_destroy_char); + for(i=0; i<count; i++) { + safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer); + list_append(object_ptr->partition_list, tmp_info); + } + } + + safe_unpackstr_xmalloc(&object_ptr->parent_acct, &uint32_tmp, buffer); + + safe_unpack32(&count, buffer); + if(count) { + object_ptr->user_list = list_create(slurm_destroy_char); + for(i=0; i<count; i++) { + safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer); + list_append(object_ptr->user_list, tmp_info); + } + } + + return SLURM_SUCCESS; + +unpack_error: + destroy_acct_association_cond(object_ptr); + *object = NULL; + return SLURM_ERROR; +} + +extern void pack_acct_update_object(acct_update_object_t *object, Buf buffer) +{ + uint32_t count = 0; + ListIterator itr = NULL; + void *acct_object = NULL; + void (*my_function) (void *object, Buf buffer); + + pack16(object->type, buffer); + switch(object->type) { + case ACCT_MODIFY_USER: + case ACCT_ADD_USER: + case ACCT_REMOVE_USER: + my_function = pack_acct_user_rec; + break; + case ACCT_ADD_ASSOC: + case ACCT_MODIFY_ASSOC: + case ACCT_REMOVE_ASSOC: + my_function = pack_acct_association_rec; + break; + case ACCT_UPDATE_NOTSET: + default: + error("unknown type set in update_object: %d", object->type); + return; + } + if(object->objects) + count = list_count(object->objects); + + pack32(count, buffer); + if(count) { + itr = list_iterator_create(object->objects); + while((acct_object = list_next(itr))) { + (*(my_function))(acct_object, buffer); + } + list_iterator_destroy(itr); + } +} + +extern int unpack_acct_update_object(acct_update_object_t **object, Buf buffer) +{ + int i; + uint32_t count; + acct_update_object_t *object_ptr = + xmalloc(sizeof(acct_update_object_t)); + void *acct_object = NULL; + int (*my_function) (void **object, Buf buffer); + void (*my_destroy) (void *object); + + *object = object_ptr; + + safe_unpack16((uint16_t *)&object_ptr->type, buffer); + switch(object_ptr->type) { + case ACCT_MODIFY_USER: + case ACCT_ADD_USER: + case ACCT_REMOVE_USER: + my_function = unpack_acct_user_rec; + my_destroy = destroy_acct_user_rec; + break; + case ACCT_ADD_ASSOC: + case ACCT_MODIFY_ASSOC: + case ACCT_REMOVE_ASSOC: + my_function = unpack_acct_association_rec; + my_destroy = destroy_acct_association_rec; + break; + case ACCT_UPDATE_NOTSET: + default: + error("unknown type set in update_object: %d", + object_ptr->type); + goto unpack_error; + } + safe_unpack32(&count, buffer); + if(count) { + object_ptr->objects = list_create((*(my_destroy))); + for(i=0; i<count; i++) { + if(((*(my_function))(&acct_object, buffer)) + == SLURM_ERROR) + goto unpack_error; + list_append(object_ptr->objects, acct_object); + } + } + return SLURM_SUCCESS; + +unpack_error: + destroy_acct_update_object(object_ptr); + *object = NULL; + return SLURM_ERROR; +} + +extern char *acct_qos_str(acct_qos_level_t level) +{ + switch(level) { + case ACCT_QOS_NOTSET: + return "Not Set"; + break; + case ACCT_QOS_NORMAL: + return "Normal"; + break; + case ACCT_QOS_EXPEDITE: + return "Expedite"; + break; + case ACCT_QOS_STANDBY: + return "Standby"; + break; + case ACCT_QOS_EXEMPT: + return "Exempt"; + break; + default: + return "Unknown"; + break; + } + return "Unknown"; +} + +extern acct_qos_level_t str_2_acct_qos(char *level) +{ + if(!level) { + return ACCT_QOS_NOTSET; + } else if(!strncasecmp(level, "Normal", 1)) { + return ACCT_QOS_NORMAL; + } else if(!strncasecmp(level, "Expedite", 3)) { + return ACCT_QOS_EXPEDITE; + } else if(!strncasecmp(level, "Standby", 1)) { + return ACCT_QOS_STANDBY; + } else if(!strncasecmp(level, "Exempt", 3)) { + return ACCT_QOS_EXEMPT; + } else { + return ACCT_QOS_NOTSET; + } +} + +extern char *acct_admin_level_str(acct_admin_level_t level) +{ + switch(level) { + case ACCT_ADMIN_NOTSET: + return "Not Set"; + break; + case ACCT_ADMIN_NONE: + return "None"; + break; + case ACCT_ADMIN_OPERATOR: + return "Operator"; + break; + case ACCT_ADMIN_SUPER_USER: + return "Administrator"; + break; + default: + return "Unknown"; + break; + } + return "Unknown"; +} + +extern acct_admin_level_t str_2_acct_admin_level(char *level) +{ + if(!level) { + return ACCT_ADMIN_NOTSET; + } else if(!strncasecmp(level, "None", 1)) { + return ACCT_ADMIN_NONE; + } else if(!strncasecmp(level, "Operator", 1)) { + return ACCT_ADMIN_OPERATOR; + } else if(!strncasecmp(level, "SuperUser", 1) + || !strncasecmp(level, "Admin", 1)) { + return ACCT_ADMIN_SUPER_USER; + } else { + return ACCT_ADMIN_NOTSET; + } +} + +extern void log_assoc_rec(acct_association_rec_t *assoc_ptr) +{ + debug("association rec id : %u", assoc_ptr->id); + debug(" acct : %s", assoc_ptr->acct); + debug(" cluster : %s", assoc_ptr->cluster); + if(assoc_ptr->fairshare == INFINITE) + debug(" fairshare : NONE"); + else + debug(" fairshare : %u", + assoc_ptr->fairshare); + if(assoc_ptr->max_cpu_secs_per_job == INFINITE) + debug(" max_cpu_secs_per_job : NONE"); + else + debug(" max_cpu_secs_per_job : %d", + assoc_ptr->max_cpu_secs_per_job); + if(assoc_ptr->max_jobs == INFINITE) + debug(" max_jobs : NONE"); + else + debug(" max_jobs : %u", assoc_ptr->max_jobs); + if(assoc_ptr->max_nodes_per_job == INFINITE) + debug(" max_nodes_per_job : NONE"); + else + debug(" max_nodes_per_job : %d", + assoc_ptr->max_nodes_per_job); + if(assoc_ptr->max_wall_duration_per_job == INFINITE) + debug(" max_wall_duration_per_job : NONE"); + else + debug(" max_wall_duration_per_job : %d", + assoc_ptr->max_wall_duration_per_job); + debug(" parent_acct : %s", assoc_ptr->parent_acct); + debug(" partition : %s", assoc_ptr->partition); + debug(" user : %s(%u)", + assoc_ptr->user, assoc_ptr->uid); + debug(" used_jobs : %u", assoc_ptr->used_jobs); + debug(" used_share : %u", assoc_ptr->used_share); +} + +/* + * Initialize context for acct_storage plugin + */ +extern int slurm_acct_storage_init(char *loc) +{ + int retval = SLURM_SUCCESS; + char *acct_storage_type = NULL; + + slurm_mutex_lock( &g_acct_storage_context_lock ); + + if ( g_acct_storage_context ) + goto done; + if(loc) + slurm_set_accounting_storage_loc(loc); + + acct_storage_type = slurm_get_accounting_storage_type(); + + g_acct_storage_context = _acct_storage_context_create( + acct_storage_type); + if ( g_acct_storage_context == NULL ) { + error( "cannot create acct_storage context for %s", + acct_storage_type ); + retval = SLURM_ERROR; + goto done; + } + + if ( _acct_storage_get_ops( g_acct_storage_context ) == NULL ) { + error( "cannot resolve acct_storage plugin operations" ); + _acct_storage_context_destroy( g_acct_storage_context ); + g_acct_storage_context = NULL; + retval = SLURM_ERROR; + } + +done: + slurm_mutex_unlock( &g_acct_storage_context_lock ); + xfree(acct_storage_type); + return retval; +} + +extern int slurm_acct_storage_fini(void) +{ + int rc; + + if (!g_acct_storage_context) + return SLURM_SUCCESS; + +// (*(g_acct_storage_context->ops.acct_storage_fini))(); + rc = _acct_storage_context_destroy( g_acct_storage_context ); + g_acct_storage_context = NULL; + return rc; +} + +extern void *acct_storage_g_get_connection(bool make_agent, bool rollback) +{ + if (slurm_acct_storage_init(NULL) < 0) + return NULL; + return (*(g_acct_storage_context->ops.get_conn))(make_agent, rollback); +} + +extern int acct_storage_g_close_connection(void **db_conn) +{ + if (slurm_acct_storage_init(NULL) < 0) + return SLURM_ERROR; + return (*(g_acct_storage_context->ops.close_conn))(db_conn); + +} + +extern int acct_storage_g_commit(void *db_conn, bool commit) +{ + if (slurm_acct_storage_init(NULL) < 0) + return SLURM_ERROR; + return (*(g_acct_storage_context->ops.commit))(db_conn, commit); + +} + +extern int acct_storage_g_add_users(void *db_conn, uint32_t uid, + List user_list) +{ + if (slurm_acct_storage_init(NULL) < 0) + return SLURM_ERROR; + return (*(g_acct_storage_context->ops.add_users)) + (db_conn, uid, user_list); +} + +extern int acct_storage_g_add_coord(void *db_conn, uint32_t uid, + char *acct, acct_user_cond_t *user_q) +{ + if (slurm_acct_storage_init(NULL) < 0) + return SLURM_ERROR; + return (*(g_acct_storage_context->ops.add_coord)) + (db_conn, uid, acct, user_q); +} + +extern int acct_storage_g_add_accounts(void *db_conn, uint32_t uid, + List acct_list) +{ + if (slurm_acct_storage_init(NULL) < 0) + return SLURM_ERROR; + return (*(g_acct_storage_context->ops.add_accts)) + (db_conn, uid, acct_list); +} + +extern int acct_storage_g_add_clusters(void *db_conn, uint32_t uid, + List cluster_list) +{ + if (slurm_acct_storage_init(NULL) < 0) + return SLURM_ERROR; + return (*(g_acct_storage_context->ops.add_clusters)) + (db_conn, uid, cluster_list); +} + +extern int acct_storage_g_add_associations(void *db_conn, uint32_t uid, + List association_list) +{ + if (slurm_acct_storage_init(NULL) < 0) + return SLURM_ERROR; + return (*(g_acct_storage_context->ops.add_associations)) + (db_conn, uid, association_list); +} + +extern List acct_storage_g_modify_users(void *db_conn, uint32_t uid, + acct_user_cond_t *user_q, + acct_user_rec_t *user) +{ + if (slurm_acct_storage_init(NULL) < 0) + return NULL; + return (*(g_acct_storage_context->ops.modify_users)) + (db_conn, uid, user_q, user); +} + +extern List acct_storage_g_modify_accounts(void *db_conn, uint32_t uid, + acct_account_cond_t *acct_q, + acct_account_rec_t *acct) +{ + if (slurm_acct_storage_init(NULL) < 0) + return NULL; + return (*(g_acct_storage_context->ops.modify_accts)) + (db_conn, uid, acct_q, acct); +} + +extern List acct_storage_g_modify_clusters(void *db_conn, uint32_t uid, + acct_cluster_cond_t *cluster_q, + acct_cluster_rec_t *cluster) +{ + if (slurm_acct_storage_init(NULL) < 0) + return NULL; + return (*(g_acct_storage_context->ops.modify_clusters)) + (db_conn, uid, cluster_q, cluster); +} + +extern List acct_storage_g_modify_associations(void *db_conn, uint32_t uid, + acct_association_cond_t *assoc_q, + acct_association_rec_t *assoc) +{ + if (slurm_acct_storage_init(NULL) < 0) + return NULL; + return (*(g_acct_storage_context->ops.modify_associations)) + (db_conn, uid, assoc_q, assoc); +} + +extern List acct_storage_g_remove_users(void *db_conn, uint32_t uid, + acct_user_cond_t *user_q) +{ + if (slurm_acct_storage_init(NULL) < 0) + return NULL; + return (*(g_acct_storage_context->ops.remove_users)) + (db_conn, uid, user_q); +} + +extern List acct_storage_g_remove_coord(void *db_conn, uint32_t uid, + char *acct, acct_user_cond_t *user_q) +{ + if (slurm_acct_storage_init(NULL) < 0) + return NULL; + return (*(g_acct_storage_context->ops.remove_coord)) + (db_conn, uid, acct, user_q); +} + +extern List acct_storage_g_remove_accounts(void *db_conn, uint32_t uid, + acct_account_cond_t *acct_q) +{ + if (slurm_acct_storage_init(NULL) < 0) + return NULL; + return (*(g_acct_storage_context->ops.remove_accts)) + (db_conn, uid, acct_q); +} + +extern List acct_storage_g_remove_clusters(void *db_conn, uint32_t uid, + acct_cluster_cond_t *cluster_q) +{ + if (slurm_acct_storage_init(NULL) < 0) + return NULL; + return (*(g_acct_storage_context->ops.remove_clusters)) + (db_conn, uid, cluster_q); +} + +extern List acct_storage_g_remove_associations(void *db_conn, uint32_t uid, + acct_association_cond_t *assoc_q) +{ + if (slurm_acct_storage_init(NULL) < 0) + return NULL; + return (*(g_acct_storage_context->ops.remove_associations)) + (db_conn, uid, assoc_q); +} + +extern List acct_storage_g_get_users(void *db_conn, + acct_user_cond_t *user_q) +{ + if (slurm_acct_storage_init(NULL) < 0) + return NULL; + return (*(g_acct_storage_context->ops.get_users))(db_conn, user_q); +} + +extern List acct_storage_g_get_accounts(void *db_conn, + acct_account_cond_t *acct_q) +{ + if (slurm_acct_storage_init(NULL) < 0) + return NULL; + return (*(g_acct_storage_context->ops.get_accts)) + (db_conn, acct_q); +} + +extern List acct_storage_g_get_clusters(void *db_conn, + acct_cluster_cond_t *cluster_q) +{ + if (slurm_acct_storage_init(NULL) < 0) + return NULL; + return (*(g_acct_storage_context->ops.get_clusters)) + (db_conn, cluster_q); +} + +extern List acct_storage_g_get_associations(void *db_conn, + acct_association_cond_t *assoc_q) +{ + if (slurm_acct_storage_init(NULL) < 0) + return NULL; + return (*(g_acct_storage_context->ops.get_associations)) + (db_conn, assoc_q); +} + +extern int acct_storage_g_get_usage(void *db_conn, + void *acct_assoc, + time_t start, time_t end) +{ + if (slurm_acct_storage_init(NULL) < 0) + return SLURM_ERROR; + return (*(g_acct_storage_context->ops.get_usage)) + (db_conn, acct_assoc, start, end); +} + +extern int acct_storage_g_roll_usage(void *db_conn, + time_t sent_start) +{ + if (slurm_acct_storage_init(NULL) < 0) + return SLURM_ERROR; + return (*(g_acct_storage_context->ops.roll_usage))(db_conn, sent_start); +} + +extern int clusteracct_storage_g_node_down(void *db_conn, + char *cluster, + struct node_record *node_ptr, + time_t event_time, + char *reason) +{ + if (slurm_acct_storage_init(NULL) < 0) + return SLURM_ERROR; + return (*(g_acct_storage_context->ops.node_down)) + (db_conn, cluster, node_ptr, event_time, reason); +} + +extern int clusteracct_storage_g_node_up(void *db_conn, + char *cluster, + struct node_record *node_ptr, + time_t event_time) +{ + if (slurm_acct_storage_init(NULL) < 0) + return SLURM_ERROR; + return (*(g_acct_storage_context->ops.node_up)) + (db_conn, cluster, node_ptr, event_time); +} + + +extern int clusteracct_storage_g_cluster_procs(void *db_conn, + char *cluster, + uint32_t procs, + time_t event_time) +{ + if (slurm_acct_storage_init(NULL) < 0) + return SLURM_ERROR; + return (*(g_acct_storage_context->ops.cluster_procs)) + (db_conn, cluster, procs, event_time); +} + + +extern int clusteracct_storage_g_get_usage( + void *db_conn, void *cluster_rec, + time_t start, time_t end) +{ + if (slurm_acct_storage_init(NULL) < 0) + return SLURM_ERROR; + return (*(g_acct_storage_context->ops.c_get_usage)) + (db_conn, cluster_rec, start, end); +} + +extern int clusteracct_storage_g_register_ctld(char *cluster, uint16_t port) +{ + if (slurm_acct_storage_init(NULL) < 0) + return SLURM_ERROR; + return (*(g_acct_storage_context->ops.register_ctld))(cluster, port); +} + +/* + * load into the storage the start of a job + */ +extern int jobacct_storage_g_job_start (void *db_conn, + struct job_record *job_ptr) +{ + if (slurm_acct_storage_init(NULL) < 0) + return SLURM_ERROR; + return (*(g_acct_storage_context->ops.job_start))(db_conn, job_ptr); +} + +/* + * load into the storage the end of a job + */ +extern int jobacct_storage_g_job_complete (void *db_conn, + struct job_record *job_ptr) +{ + if (slurm_acct_storage_init(NULL) < 0) + return SLURM_ERROR; + return (*(g_acct_storage_context->ops.job_complete))(db_conn, job_ptr); +} + +/* + * load into the storage the start of a job step + */ +extern int jobacct_storage_g_step_start (void *db_conn, + struct step_record *step_ptr) +{ + if (slurm_acct_storage_init(NULL) < 0) + return SLURM_ERROR; + return (*(g_acct_storage_context->ops.step_start))(db_conn, step_ptr); +} + +/* + * load into the storage the end of a job step + */ +extern int jobacct_storage_g_step_complete (void *db_conn, + struct step_record *step_ptr) +{ + if (slurm_acct_storage_init(NULL) < 0) + return SLURM_ERROR; + return (*(g_acct_storage_context->ops.step_complete))(db_conn, step_ptr); +} + +/* + * load into the storage a suspention of a job + */ +extern int jobacct_storage_g_job_suspend (void *db_conn, + struct job_record *job_ptr) +{ + if (slurm_acct_storage_init(NULL) < 0) + return SLURM_ERROR; + return (*(g_acct_storage_context->ops.job_suspend))(db_conn, job_ptr); +} + + +/* + * get info from the storage + * returns List of job_rec_t * + * note List needs to be freed when called + */ +extern List jobacct_storage_g_get_jobs(void *db_conn, + List selected_steps, + List selected_parts, + void *params) +{ + if (slurm_acct_storage_init(NULL) < 0) + return NULL; + return (*(g_acct_storage_context->ops.get_jobs)) + (db_conn, selected_steps, selected_parts, params); +} + +/* + * expire old info from the storage + */ +extern void jobacct_storage_g_archive(void *db_conn, + List selected_parts, void *params) +{ + if (slurm_acct_storage_init(NULL) < 0) + return; + (*(g_acct_storage_context->ops.job_archive))(db_conn, selected_parts, params); + return; +} + +/* + * record shares used information for backup in case slurmctld restarts + * IN: account_list List of shares_used_object_t * + * RET: SLURM_SUCCESS on success SLURM_ERROR else + */ +extern int acct_storage_g_update_shares_used(void *db_conn, List acct_list) +{ + if (slurm_acct_storage_init(NULL) < 0) + return SLURM_ERROR; + return (*(g_acct_storage_context->ops.update_shares_used))(db_conn, + acct_list); +} + +/* + * This should be called when a cluster does a cold start to flush out + * any jobs that were running during the restart so we don't have any + * jobs in the database "running" forever since no endtime will be + * placed in there other wise. + * IN: char * = cluster name + * RET: SLURM_SUCCESS on success SLURM_ERROR else + */ +extern int acct_storage_g_flush_jobs_on_cluster( + void *db_conn, char *cluster, time_t event_time) +{ + if (slurm_acct_storage_init(NULL) < 0) + return SLURM_ERROR; + return (*(g_acct_storage_context->ops.flush_jobs)) + (db_conn, cluster, event_time); + +} + diff --git a/src/common/slurm_accounting_storage.h b/src/common/slurm_accounting_storage.h new file mode 100644 index 000000000..710d0457a --- /dev/null +++ b/src/common/slurm_accounting_storage.h @@ -0,0 +1,563 @@ +/*****************************************************************************\ + * slurm_accounting_storage.h - Define accounting storage plugin functions. + ***************************************************************************** + * Copyright (C) 2004-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * LLNL-CODE-402394. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#ifndef _SLURM_ACCOUNTING_STORAGE_H +#define _SLURM_ACCOUNTING_STORAGE_H + +#include "src/common/list.h" +#include "src/slurmctld/slurmctld.h" +#include <slurm/slurm.h> +#include <slurm/slurm_errno.h> +#include <sys/types.h> +#include <pwd.h> + +typedef enum { + ACCT_ADMIN_NOTSET, + ACCT_ADMIN_NONE, + ACCT_ADMIN_OPERATOR, + ACCT_ADMIN_SUPER_USER +} acct_admin_level_t; + +typedef enum { + ACCT_QOS_NOTSET, + ACCT_QOS_NORMAL, + ACCT_QOS_EXPEDITE, + ACCT_QOS_STANDBY, + ACCT_QOS_EXEMPT +} acct_qos_level_t; + +typedef enum { + ACCT_UPDATE_NOTSET, + ACCT_ADD_USER, + ACCT_ADD_ASSOC, + ACCT_MODIFY_USER, + ACCT_MODIFY_ASSOC, + ACCT_REMOVE_USER, + ACCT_REMOVE_ASSOC +} acct_update_type_t; + +/* Association conditions used for queries of the database */ +typedef struct { + List acct_list; /* list of char * */ + List cluster_list; /* list of char * */ + uint32_t fairshare; /* fairshare number */ + List id_list; /* list of char */ + uint32_t max_cpu_secs_per_job; /* max number of cpu seconds this + * association can have per job */ + uint32_t max_jobs; /* max number of jobs this association can run + * at one time */ + uint32_t max_nodes_per_job; /* max number of nodes this + * association can allocate per job */ + uint32_t max_wall_duration_per_job; /* longest time this association + * can run a job (seconds) */ + List partition_list; /* list of char * */ + char *parent_acct; /* name of parent account */ + List user_list; /* list of char * */ +} acct_association_cond_t; + +typedef struct { + List acct_list; /* list of char * */ + acct_association_cond_t *assoc_cond; + List description_list; /* list of char * */ + List organization_list; /* list of char * */ + acct_qos_level_t qos; + uint16_t with_assocs; +} acct_account_cond_t; + +typedef struct { + List assoc_list; /* list of acct_association_rec_t *'s */ + List coordinators; /* list of char *'s */ + char *description; + char *name; + char *organization; + acct_qos_level_t qos; +} acct_account_rec_t; + +typedef struct { + uint32_t alloc_secs; /* number of cpu seconds allocated */ + time_t period_start; +} acct_accounting_rec_t; + +typedef struct acct_association_rec { + List accounting_list; /* list of acct_accounting_rec_t *'s */ + char *acct; /* account/project associated to association */ + char *cluster; /* cluster associated to association */ + uint32_t fairshare; /* fairshare number */ + uint32_t id; /* id identifing a combination of + * user-account-cluster(-partition) */ + uint32_t max_cpu_secs_per_job; /* max number of cpu seconds this + * association can have per job */ + uint32_t max_jobs; /* max number of jobs this association can run + * at one time */ + uint32_t max_nodes_per_job; /* max number of nodes this + * association can allocate per job */ + uint32_t max_wall_duration_per_job; /* longest time this + * association can run a job */ + char *parent_acct; /* name of parent account */ + struct acct_association_rec *parent_acct_ptr; /* ptr to parent acct + * set in slurmctld */ + uint32_t parent_id; /* id of parent account */ + char *partition; /* optional partition in a cluster + * associated to association */ + uint32_t uid; /* user ID */ + uint32_t used_jobs; /* count of active jobs */ + uint32_t used_share; /* measure of resource usage */ + char *user; /* user associated to association */ +} acct_association_rec_t; + +typedef struct { + List cluster_list; /* list of char * */ +} acct_cluster_cond_t; + +typedef struct { + List accounting_list; /* list of cluster_accounting_rec_t *'s */ + char *control_host; + uint32_t control_port; + uint32_t default_fairshare; /* fairshare number */ + uint32_t default_max_cpu_secs_per_job; /* max number of cpu seconds this + * association can have per job */ + uint32_t default_max_jobs; /* max number of jobs this association can run + * at one time */ + uint32_t default_max_nodes_per_job; /* max number of nodes this + * association can allocate per job */ + uint32_t default_max_wall_duration_per_job; /* longest time this + * association can run a job */ + char *name; + +} acct_cluster_rec_t; + +typedef struct { + char *acct_name; + uint16_t sub_acct; +} acct_coord_rec_t; + +typedef struct { + acct_admin_level_t admin_level; + acct_association_cond_t *assoc_cond; + List def_acct_list; /* list of char * */ + acct_qos_level_t qos; + List user_list; /* list of char * */ + uint16_t with_assocs; +} acct_user_cond_t; + +typedef struct { + acct_admin_level_t admin_level; + List assoc_list; /* list of acct_association_rec_t *'s */ + List coord_accts; /* list of acct_coord_rec_t *'s */ + char *default_acct; + char *name; + acct_qos_level_t qos; + uint32_t uid; +} acct_user_rec_t; + +typedef struct { + List objects; /* depending on type */ + acct_update_type_t type; +} acct_update_object_t; + +typedef struct { + uint32_t assoc_id; /* association ID */ + uint32_t shares_used; /* measure of recent usage */ +} shares_used_object_t; + +typedef struct { + uint32_t alloc_secs; /* number of cpu seconds allocated */ + uint32_t cpu_count; /* number of cpus during time period */ + uint32_t down_secs; /* number of cpu seconds down */ + uint32_t idle_secs; /* number of cpu seconds idle */ + time_t period_start; /* when this record was started */ + uint32_t resv_secs; /* number of cpu seconds reserved */ +} cluster_accounting_rec_t; + +extern void destroy_acct_user_rec(void *object); +extern void destroy_acct_account_rec(void *object); +extern void destroy_acct_coord_rec(void *object); +extern void destroy_cluster_accounting_rec(void *object); +extern void destroy_acct_cluster_rec(void *object); +extern void destroy_acct_accounting_rec(void *object); +extern void destroy_acct_association_rec(void *object); + +extern void destroy_acct_user_cond(void *object); +extern void destroy_acct_account_cond(void *object); +extern void destroy_acct_cluster_cond(void *object); +extern void destroy_acct_association_cond(void *object); + +extern void destroy_acct_update_object(void *object); +extern void destroy_update_shares_rec(void *object); + +/* pack functions */ +extern void pack_acct_user_rec(void *object, Buf buffer); +extern int unpack_acct_user_rec(void **object, Buf buffer); +extern void pack_acct_account_rec(void *object, Buf buffer); +extern int unpack_acct_account_rec(void **object, Buf buffer); +extern void pack_acct_coord_rec(void *object, Buf buffer); +extern int unpack_acct_coord_rec(void **object, Buf buffer); +extern void pack_cluster_accounting_rec(void *object, Buf buffer); +extern int unpack_cluster_accounting_rec(void **object, Buf buffer); +extern void pack_acct_cluster_rec(void *object, Buf buffer); +extern int unpack_acct_cluster_rec(void **object, Buf buffer); +extern void pack_acct_accounting_rec(void *object, Buf buffer); +extern int unpack_acct_accounting_rec(void **object, Buf buffer); +extern void pack_acct_association_rec(void *object, Buf buffer); +extern int unpack_acct_association_rec(void **object, Buf buffer); + +extern void pack_acct_user_cond(void *object, Buf buffer); +extern int unpack_acct_user_cond(void **object, Buf buffer); +extern void pack_acct_account_cond(void *object, Buf buffer); +extern int unpack_acct_account_cond(void **object, Buf buffer); +extern void pack_acct_cluster_cond(void *object, Buf buffer); +extern int unpack_acct_cluster_cond(void **object, Buf buffer); +extern void pack_acct_association_cond(void *object, Buf buffer); +extern int unpack_acct_association_cond(void **object, Buf buffer); + +extern void pack_acct_update_object(acct_update_object_t *object, Buf buffer); +extern int unpack_acct_update_object(acct_update_object_t **object, Buf buffer); + +extern void pack_update_shares_used(void *object, Buf buffer); +extern int unpack_update_shares_used(void **object, Buf buffer); + +extern char *acct_qos_str(acct_qos_level_t level); +extern acct_qos_level_t str_2_acct_qos(char *level); +extern char *acct_admin_level_str(acct_admin_level_t level); +extern acct_admin_level_t str_2_acct_admin_level(char *level); + +extern void log_assoc_rec(acct_association_rec_t *assoc_ptr); + +extern int slurm_acct_storage_init(char *loc); /* load the plugin */ +extern int slurm_acct_storage_fini(void); /* unload the plugin */ + +/* + * get a new connection to the storage unit + * IN: make_agent - Make an agent to manage queued requests + * IN: rollback - maintain journal of changes to permit rollback + * RET: pointer used to access db + */ +extern void *acct_storage_g_get_connection(bool make_agent, bool rollback); + +/* + * release connection to the storage unit + * IN/OUT: void ** pointer returned from + * acct_storage_g_get_connection() which will be freed. + * RET: SLURM_SUCCESS on success SLURM_ERROR else + */ +extern int acct_storage_g_close_connection(void **db_conn); + +/* + * commit or rollback changes made without closing connection + * IN: void * pointer returned from acct_storage_g_get_connection() + * IN: bool - true will commit changes false will rollback + * RET: SLURM_SUCCESS on success SLURM_ERROR else + */ +extern int acct_storage_g_commit(void *db_conn, bool commit); + +/* + * add users to accounting system + * IN: user_list List of acct_user_rec_t * + * RET: SLURM_SUCCESS on success SLURM_ERROR else + */ +extern int acct_storage_g_add_users(void *db_conn, uint32_t uid, + List user_list); + +/* + * add users as account coordinators + * IN: acct name of account + * IN: acct_user_cond_t *user_q + * RET: SLURM_SUCCESS on success SLURM_ERROR else + */ +extern int acct_storage_g_add_coord(void *db_conn, uint32_t uid, + char *acct, acct_user_cond_t *user_q); + + +/* + * add accounts to accounting system + * IN: account_list List of acct_account_rec_t * + * RET: SLURM_SUCCESS on success SLURM_ERROR else + */ +extern int acct_storage_g_add_accounts(void *db_conn, uint32_t uid, + List acct_list); + +/* + * add clusters to accounting system + * IN: cluster_list List of acct_cluster_rec_t * + * RET: SLURM_SUCCESS on success SLURM_ERROR else + */ +extern int acct_storage_g_add_clusters(void *db_conn, uint32_t uid, + List cluster_list); + +/* + * add accts to accounting system + * IN: association_list List of acct_association_rec_t * + * RET: SLURM_SUCCESS on success SLURM_ERROR else + */ +extern int acct_storage_g_add_associations(void *db_conn, uint32_t uid, + List association_list); + +/* + * modify existing users in the accounting system + * IN: acct_user_cond_t *user_q + * IN: acct_user_rec_t *user + * RET: SLURM_SUCCESS on success SLURM_ERROR else + */ +extern List acct_storage_g_modify_users(void *db_conn, uint32_t uid, + acct_user_cond_t *user_q, + acct_user_rec_t *user); + +/* + * modify existing accounts in the accounting system + * IN: acct_acct_cond_t *acct_q + * IN: acct_account_rec_t *acct + * RET: SLURM_SUCCESS on success SLURM_ERROR else + */ +extern List acct_storage_g_modify_accounts(void *db_conn, uint32_t uid, + acct_account_cond_t *acct_q, + acct_account_rec_t *acct); + +/* + * modify existing clusters in the accounting system + * IN: acct_cluster_cond_t *cluster_q + * IN: acct_cluster_rec_t *cluster + * RET: SLURM_SUCCESS on success SLURM_ERROR else + */ +extern List acct_storage_g_modify_clusters(void *db_conn, uint32_t uid, + acct_cluster_cond_t *cluster_q, + acct_cluster_rec_t *cluster); + +/* + * modify existing associations in the accounting system + * IN: acct_association_cond_t *assoc_q + * IN: acct_association_rec_t *assoc + * RET: SLURM_SUCCESS on success SLURM_ERROR else + */ +extern List acct_storage_g_modify_associations(void *db_conn, uint32_t uid, + acct_association_cond_t *assoc_q, + acct_association_rec_t *assoc); + +/* + * remove users from accounting system + * IN: acct_user_cond_t *user_q + * RET: SLURM_SUCCESS on success SLURM_ERROR else + */ +extern List acct_storage_g_remove_users(void *db_conn, uint32_t uid, + acct_user_cond_t *user_q); + +/* + * remove users from being a coordinator of an account + * IN: acct name of acct + * IN: acct_user_cond_t *user_q + * RET: SLURM_SUCCESS on success SLURM_ERROR else + */ +extern List acct_storage_g_remove_coord(void *db_conn, uint32_t uid, + char *acct, acct_user_cond_t *user_q); + +/* + * remove accounts from accounting system + * IN: acct_account_cond_t *acct_q + * RET: SLURM_SUCCESS on success SLURM_ERROR else + */ +extern List acct_storage_g_remove_accounts(void *db_conn, uint32_t uid, + acct_account_cond_t *acct_q); + +/* + * remove clusters from accounting system + * IN: acct_cluster_cond_t *cluster_q + * RET: SLURM_SUCCESS on success SLURM_ERROR else + */ +extern List acct_storage_g_remove_clusters(void *db_conn, uint32_t uid, + acct_cluster_cond_t *cluster_q); + +/* + * remove associations from accounting system + * IN: acct_association_cond_t *assoc_q + * RET: SLURM_SUCCESS on success SLURM_ERROR else + */ +extern List acct_storage_g_remove_associations(void *db_conn, uint32_t uid, + acct_association_cond_t *assoc_q); + +/* + * get info from the storage + * IN: acct_user_cond_t * + * IN: params void * + * returns List of acct_user_rec_t * + * note List needs to be freed when called + */ +extern List acct_storage_g_get_users(void *db_conn, + acct_user_cond_t *user_q); + +/* + * get info from the storage + * IN: acct_account_cond_t * + * IN: params void * + * returns List of acct_account_rec_t * + * note List needs to be freed when called + */ +extern List acct_storage_g_get_accounts(void *db_conn, + acct_account_cond_t *acct_q); + +/* + * get info from the storage + * IN: acct_cluster_cond_t * + * IN: params void * + * returns List of acct_cluster_rec_t * + * note List needs to be freed when called + */ +extern List acct_storage_g_get_clusters(void *db_conn, + acct_cluster_cond_t *cluster_q); + +/* + * get info from the storage + * IN: acct_association_cond_t * + * RET: List of acct_association_rec_t * + * note List needs to be freed when called + */ +extern List acct_storage_g_get_associations(void *db_conn, + acct_association_cond_t *assoc_q); + +/* + * get info from the storage + * IN/OUT: assoc void * (acct_association_rec_t *) with the id set + * IN: start time stamp for records >= + * IN: end time stamp for records <= + * RET: SLURM_SUCCESS on success SLURM_ERROR else + */ +extern int acct_storage_g_get_usage( + void *db_conn, void *assoc, time_t start, time_t end); +/* + * roll up data in the storage + * IN: sent_start (option time to do a re-roll or start from this point) + * RET: SLURM_SUCCESS on success SLURM_ERROR else + */ +extern int acct_storage_g_roll_usage(void *db_conn, + time_t sent_start); +/* + * record shares used information for backup in case slurmctld restarts + * IN: account_list List of shares_used_object_t * + * RET: SLURM_SUCCESS on success SLURM_ERROR else + */ +extern int acct_storage_g_update_shares_used(void *db_conn, List acct_list); + +/* + * This should be called when a cluster does a cold start to flush out + * any jobs that were running during the restart so we don't have any + * jobs in the database "running" forever since no endtime will be + * placed in there other wise. + * IN: char * = cluster name + * RET: SLURM_SUCCESS on success SLURM_ERROR else + */ +extern int acct_storage_g_flush_jobs_on_cluster( + void *db_conn, char *cluster, time_t event_time); + +/*********************** CLUSTER ACCOUNTING STORAGE **************************/ + +extern int clusteracct_storage_g_node_down(void *db_conn, + char *cluster, + struct node_record *node_ptr, + time_t event_time, + char *reason); + +extern int clusteracct_storage_g_node_up(void *db_conn, + char *cluster, + struct node_record *node_ptr, + time_t event_time); + +extern int clusteracct_storage_g_cluster_procs(void *db_conn, + char *cluster, + uint32_t procs, + time_t event_time); + +extern int clusteracct_storage_g_register_ctld(char *cluster, uint16_t port); + +/* + * get info from the storage + * IN/OUT: cluster_rec void * (acct_cluster_rec_t *) with the name set + * IN: start time stamp for records >= + * IN: end time stamp for records < + * IN: params void * + * RET: SLURM_SUCCESS on success SLURM_ERROR else + */ +extern int clusteracct_storage_g_get_usage( + void *db_conn, void *cluster_rec, + time_t start, time_t end); + +/* + * load into the storage the start of a job + */ +extern int jobacct_storage_g_job_start (void *db_conn, + struct job_record *job_ptr); + +/* + * load into the storage the end of a job + */ +extern int jobacct_storage_g_job_complete (void *db_conn, + struct job_record *job_ptr); + +/* + * load into the storage the start of a job step + */ +extern int jobacct_storage_g_step_start (void *db_conn, + struct step_record *step_ptr); + +/* + * load into the storage the end of a job step + */ +extern int jobacct_storage_g_step_complete (void *db_conn, + struct step_record *step_ptr); + +/* + * load into the storage a suspention of a job + */ +extern int jobacct_storage_g_job_suspend (void *db_conn, + struct job_record *job_ptr); + +/* + * get info from the storage + * returns List of jobacct_job_rec_t * + * note List needs to be freed when called + */ +extern List jobacct_storage_g_get_jobs(void *db_conn, + List selected_steps, + List selected_parts, + void *params); + +/* + * expire old info from the storage + */ +extern void jobacct_storage_g_archive(void *db_conn, + List selected_parts, + void *params); + +#endif /*_SLURM_ACCOUNTING_STORAGE_H*/ diff --git a/src/common/slurm_auth.c b/src/common/slurm_auth.c index 99d101c8f..9e2305b5d 100644 --- a/src/common/slurm_auth.c +++ b/src/common/slurm_auth.c @@ -4,7 +4,7 @@ * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Jay Windley <jwindley@lnxi.com> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -59,11 +59,11 @@ static bool auth_dummy = false; /* for security testing */ * end of the structure. */ typedef struct slurm_auth_ops { - void * (*create) ( void *argv[] ); + void * (*create) ( void *argv[], char *auth_info ); int (*destroy) ( void *cred ); - int (*verify) ( void *cred, void *argv[] ); - uid_t (*get_uid) ( void *cred ); - gid_t (*get_gid) ( void *cred ); + int (*verify) ( void *cred, void *argv[], char *auth_info ); + uid_t (*get_uid) ( void *cred, char *auth_info ); + gid_t (*get_gid) ( void *cred, char *auth_info ); int (*pack) ( void *cred, Buf buf ); void * (*unpack) ( Buf buf ); int (*print) ( void *cred, FILE *fp ); @@ -193,7 +193,7 @@ slurm_auth_marshal_args( void *hosts, int timeout ) if ( ( hostlist_idx == -1 ) && ( timeout_idx == -1 ) ) { hostlist_idx = arg_idx_by_name( auth_args, ARG_HOST_LIST ); - timeout_idx = arg_idx_by_name( auth_args, ARG_TIMEOUT ); + timeout_idx = arg_idx_by_name( auth_args, ARG_TIMEOUT ); } argv = xmalloc( count * sizeof( void * ) ); @@ -287,17 +287,20 @@ _slurm_auth_context_destroy( slurm_auth_context_t c ) } int inline -slurm_auth_init( void ) +slurm_auth_init( char *auth_type ) { int retval = SLURM_SUCCESS; - char *auth_type = NULL; - + char *auth_type_local = NULL; + slurm_mutex_lock( &context_lock ); if ( g_context ) goto done; - auth_type = slurm_get_auth_type(); + if (auth_type == NULL) { + auth_type_local = slurm_get_auth_type(); + auth_type = auth_type_local; + } if (strcmp(auth_type, "auth/dummy") == 0) { info( "warning: %s plugin selected", auth_type); auth_dummy = true; @@ -320,7 +323,7 @@ slurm_auth_init( void ) } done: - xfree(auth_type); + xfree(auth_type_local); slurm_mutex_unlock( &context_lock ); return retval; } @@ -347,12 +350,12 @@ slurm_auth_fini( void ) */ void * -g_slurm_auth_create( void *hosts, int timeout ) +g_slurm_auth_create( void *hosts, int timeout, char *auth_info ) { void **argv; void *ret; - if ( slurm_auth_init() < 0 ) + if ( slurm_auth_init(NULL) < 0 ) return NULL; if ( auth_dummy ) @@ -362,7 +365,7 @@ g_slurm_auth_create( void *hosts, int timeout ) return NULL; } - ret = (*(g_context->ops.create))( argv ); + ret = (*(g_context->ops.create))( argv, auth_info ); xfree( argv ); return ret; } @@ -370,7 +373,7 @@ g_slurm_auth_create( void *hosts, int timeout ) int g_slurm_auth_destroy( void *cred ) { - if ( slurm_auth_init() < 0 ) + if ( slurm_auth_init(NULL) < 0 ) return SLURM_ERROR; if ( auth_dummy ) /* don't worry about leak in testing */ @@ -380,12 +383,12 @@ g_slurm_auth_destroy( void *cred ) } int -g_slurm_auth_verify( void *cred, void *hosts, int timeout ) +g_slurm_auth_verify( void *cred, void *hosts, int timeout, char *auth_info ) { int ret; void **argv; - if ( slurm_auth_init() < 0 ) + if ( slurm_auth_init(NULL) < 0 ) return SLURM_ERROR; if ( auth_dummy ) @@ -395,33 +398,33 @@ g_slurm_auth_verify( void *cred, void *hosts, int timeout ) return SLURM_ERROR; } - ret = (*(g_context->ops.verify))( cred, argv ); + ret = (*(g_context->ops.verify))( cred, argv, auth_info ); xfree( argv ); return ret; } uid_t -g_slurm_auth_get_uid( void *cred ) +g_slurm_auth_get_uid( void *cred, char *auth_info ) { - if (( slurm_auth_init() < 0 ) || auth_dummy ) + if (( slurm_auth_init(NULL) < 0 ) || auth_dummy ) return SLURM_AUTH_NOBODY; - return (*(g_context->ops.get_uid))( cred ); + return (*(g_context->ops.get_uid))( cred, auth_info ); } gid_t -g_slurm_auth_get_gid( void *cred ) +g_slurm_auth_get_gid( void *cred, char *auth_info ) { - if (( slurm_auth_init() < 0 ) || auth_dummy ) + if (( slurm_auth_init(NULL) < 0 ) || auth_dummy ) return SLURM_AUTH_NOBODY; - return (*(g_context->ops.get_gid))( cred ); + return (*(g_context->ops.get_gid))( cred, auth_info ); } int g_slurm_auth_pack( void *cred, Buf buf ) { - if ( slurm_auth_init() < 0 ) + if ( slurm_auth_init(NULL) < 0 ) return SLURM_ERROR; if ( auth_dummy ) @@ -433,7 +436,7 @@ g_slurm_auth_pack( void *cred, Buf buf ) void * g_slurm_auth_unpack( Buf buf ) { - if (( slurm_auth_init() < 0 ) || auth_dummy ) + if (( slurm_auth_init(NULL) < 0 ) || auth_dummy ) return NULL; return (*(g_context->ops.unpack))( buf ); @@ -442,7 +445,7 @@ g_slurm_auth_unpack( Buf buf ) int g_slurm_auth_print( void *cred, FILE *fp ) { - if ( slurm_auth_init() < 0 ) + if ( slurm_auth_init(NULL) < 0 ) return SLURM_ERROR; if ( auth_dummy ) @@ -454,7 +457,7 @@ g_slurm_auth_print( void *cred, FILE *fp ) int g_slurm_auth_errno( void *cred ) { - if (( slurm_auth_init() < 0 ) || auth_dummy ) + if (( slurm_auth_init(NULL) < 0 ) || auth_dummy ) return SLURM_ERROR; return (*(g_context->ops.sa_errno))( cred ); @@ -466,7 +469,7 @@ g_slurm_auth_errstr( int slurm_errno ) static char auth_init_msg[] = "authentication initialization failure"; char *generic; - if (( slurm_auth_init() < 0 ) || auth_dummy ) + if (( slurm_auth_init(NULL) < 0 ) || auth_dummy ) return auth_init_msg; if (( generic = (char *) slurm_auth_generic_errstr( slurm_errno ) )) diff --git a/src/common/slurm_auth.h b/src/common/slurm_auth.h index 995011506..3c5bcdee4 100644 --- a/src/common/slurm_auth.h +++ b/src/common/slurm_auth.h @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Kevin Tew <tew1@llnl.gov> et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -136,8 +136,10 @@ int slurm_auth_context_destroy( slurm_auth_context_t ctxt ); /* * Prepare the global context. + * auth_type IN: authentication mechanism (e.g. "auth/munge") or + * NULL to select based upon slurm_get_auth_type() results */ -extern int slurm_auth_init( void ); +extern int slurm_auth_init( char *auth_type ); /* * Destroy global context, free memory. @@ -147,11 +149,12 @@ extern int slurm_auth_fini( void ); /* * Static bindings for the global authentication context. */ -extern void *g_slurm_auth_create( void *hosts, int timeout ); +extern void * g_slurm_auth_create( void *hosts, int timeout, char *auth_info ); extern int g_slurm_auth_destroy( void *cred ); -extern int g_slurm_auth_verify( void *cred, void *hosts, int timeout ); -extern uid_t g_slurm_auth_get_uid( void *cred ); -extern gid_t g_slurm_auth_get_gid( void *cred ); +extern int g_slurm_auth_verify( void *cred, void *hosts, int timeout, + char *auth_info ); +extern uid_t g_slurm_auth_get_uid( void *cred, char *auth_info ); +extern gid_t g_slurm_auth_get_gid( void *cred, char *auth_info ); extern int g_slurm_auth_pack( void *cred, Buf buf ); /* diff --git a/src/common/slurm_cred.c b/src/common/slurm_cred.c index d71919465..ed7dd4319 100644 --- a/src/common/slurm_cred.c +++ b/src/common/slurm_cred.c @@ -1,11 +1,12 @@ /*****************************************************************************\ * src/common/slurm_cred.c - SLURM job credential functions - * $Id: slurm_cred.c 11821 2007-07-11 22:25:02Z jette $ + * $Id: slurm_cred.c 14148 2008-05-28 23:35:40Z jette $ ***************************************************************************** - * Copyright (C) 2002-2006 The Regents of the University of California. + * Copyright (C) 2002-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Mark A. Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * Written by Morris Jette <jette1@llnl.gov>. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -47,24 +48,21 @@ #include <stdlib.h> #include <sys/time.h> -/* - * OpenSSL includes - */ -#include <openssl/evp.h> -#include <openssl/pem.h> -#include <openssl/err.h> - #if WITH_PTHREADS # include <pthread.h> #endif /* WITH_PTHREADS */ -#include "src/common/macros.h" +#include "src/common/io_hdr.h" #include "src/common/list.h" #include "src/common/log.h" +#include "src/common/macros.h" +#include "src/common/plugin.h" +#include "src/common/plugrack.h" +#include "src/common/slurm_protocol_api.h" #include "src/common/xmalloc.h" #include "src/common/xassert.h" #include "src/common/xstring.h" -#include "src/common/io_hdr.h" + #include "src/common/slurm_cred.h" @@ -106,6 +104,9 @@ enum ctx_type { SLURM_CRED_VERIFIER }; +/* + * Credential context, slurm_cred_ctx_t: + */ struct slurm_cred_context { #ifndef NDEBUG # define CRED_CTX_MAGIC 0x0c0c0c @@ -115,20 +116,19 @@ struct slurm_cred_context { pthread_mutex_t mutex; #endif enum ctx_type type; /* type of context (creator or verifier) */ - EVP_PKEY *key; /* private or public key */ + void *key; /* private or public key */ List job_list; /* List of used jobids (for verifier) */ List state_list; /* List of cred states (for verifier) */ - int expiry_window; /* expiration window for cached creds */ + int expiry_window;/* expiration window for cached creds */ - EVP_PKEY *exkey; /* Old public key if key is updated */ + void *exkey; /* Old public key if key is updated */ time_t exkey_exp; /* Old key expiration time */ }; /* - * Completion of slurm job credential type: - * + * Completion of slurm job credential type, slurm_cred_t: */ struct slurm_job_credential { #ifndef NDEBUG @@ -138,18 +138,55 @@ struct slurm_job_credential { #ifdef WITH_PTHREADS pthread_mutex_t mutex; #endif - uint32_t jobid; /* Job ID associated with this credential */ - uint32_t stepid; /* Job step ID for this credential */ - uid_t uid; /* user for which this cred is valid */ - time_t ctime; /* time of credential creation */ - char *nodes; /* list of hostnames for which the cred is ok*/ - uint32_t alloc_lps_cnt; /* Number of hosts in the list above */ - uint32_t *alloc_lps; /* Number of tasks on each host */ - - unsigned char *signature; /* credential signature */ - unsigned int siglen; /* signature length in bytes */ + uint32_t jobid; /* Job ID associated with this cred */ + uint32_t stepid; /* Job step ID for this credential */ + uid_t uid; /* user for which this cred is valid */ + uint32_t job_mem; /* MB of memory reserved for job */ + uint32_t task_mem; /* MB of memory reserved per task */ + time_t ctime; /* time of credential creation */ + char *nodes; /* hostnames for which the cred is ok */ + uint32_t alloc_lps_cnt;/* Number of hosts in the list above */ + uint32_t *alloc_lps; /* Number of tasks on each host */ + + char *signature; /* credential signature */ + unsigned int siglen; /* signature length in bytes */ }; +/* + * WARNING: Do not change the order of these fields or add additional + * fields at the beginning of the structure. If you do, job accounting + * plugins will stop working. If you need to add fields, add them + * at the end of the structure. + */ +typedef struct slurm_crypto_ops { + void *(*crypto_read_private_key) (const char *path); + void *(*crypto_read_public_key) (const char *path); + void (*crypto_destroy_key) (void *key); + int (*crypto_sign) (void * key, char *buffer, + int buf_size, char **sig_pp, + unsigned int *sig_size_p); + int (*crypto_verify_sign) (void * key, char *buffer, + unsigned int buf_size, + char *signature, + unsigned int sig_size); + char *(*crypto_str_error) (void); +} slurm_crypto_ops_t; + +/* + * A global cryptographic context. "Global" in the sense that there's + * only one, with static bindings. We don't export it. + */ + +typedef struct slurm_crypto_context { + char *crypto_type; + plugrack_t plugin_list; + plugin_handle_t cur_plugin; + int crypto_errno; + slurm_crypto_ops_t ops; +} slurm_crypto_context_t; + +static slurm_crypto_context_t *g_crypto_context = NULL; +static pthread_mutex_t g_crypto_context_lock = PTHREAD_MUTEX_INITIALIZER; /* @@ -180,12 +217,12 @@ static void _verifier_ctx_init(slurm_cred_ctx_t ctx); static bool _credential_replayed(slurm_cred_ctx_t ctx, slurm_cred_t cred); static bool _credential_revoked(slurm_cred_ctx_t ctx, slurm_cred_t cred); -static EVP_PKEY * _read_private_key(const char *path); -static EVP_PKEY * _read_public_key(const char *path); - static int _slurm_cred_sign(slurm_cred_ctx_t ctx, slurm_cred_t cred); static int _slurm_cred_verify_signature(slurm_cred_ctx_t ctx, slurm_cred_t c); +static int _slurm_crypto_init(void); +static int _slurm_crypto_fini(void); + static job_state_t * _job_state_unpack_one(Buf buffer); static cred_state_t * _cred_state_unpack_one(Buf buffer); @@ -201,20 +238,185 @@ static void _cred_state_pack_one(cred_state_t *s, Buf buffer); static char * timestr (const time_t *tp, char *buf, size_t n); #endif + +static slurm_crypto_context_t * +_slurm_crypto_context_create( const char *crypto_type) +{ + slurm_crypto_context_t *c; + + if ( crypto_type == NULL ) { + error( "_slurm_crypto_context_create: no crypto type" ); + return NULL; + } + + c = xmalloc( sizeof( struct slurm_crypto_context ) ); + + c->crypto_errno = SLURM_SUCCESS; + + /* Copy the job completion job completion type. */ + c->crypto_type = xstrdup( crypto_type ); + if ( c->crypto_type == NULL ) { + error( "can't make local copy of crypto type" ); + xfree( c ); + return NULL; + } + + /* Plugin rack is demand-loaded on first reference. */ + c->plugin_list = NULL; + c->cur_plugin = PLUGIN_INVALID_HANDLE; + c->crypto_errno = SLURM_SUCCESS; + + return c; +} + +static int +_slurm_crypto_context_destroy( slurm_crypto_context_t *c ) +{ + /* + * Must check return code here because plugins might still + * be loaded and active. + */ + if ( c->plugin_list ) { + if ( plugrack_destroy( c->plugin_list ) != SLURM_SUCCESS ) { + return SLURM_ERROR; + } + } + + xfree( c->crypto_type ); + xfree( c ); + + return SLURM_SUCCESS; +} + +/* + * Resolve the operations from the plugin. + */ +static slurm_crypto_ops_t * +_slurm_crypto_get_ops( slurm_crypto_context_t *c ) +{ + /* + * These strings must be in the same order as the fields declared + * for slurm_crypto_ops_t. + */ + static const char *syms[] = { + "crypto_read_private_key", + "crypto_read_public_key", + "crypto_destroy_key", + "crypto_sign", + "crypto_verify_sign", + "crypto_str_error" + }; + int n_syms = sizeof( syms ) / sizeof( char * ); + int rc = 0; + /* Get the plugin list, if needed. */ + if ( c->plugin_list == NULL ) { + char *plugin_dir; + c->plugin_list = plugrack_create(); + if ( c->plugin_list == NULL ) { + error( "Unable to create a plugin manager" ); + return NULL; + } + + plugrack_set_major_type( c->plugin_list, "crypto" ); + plugrack_set_paranoia( c->plugin_list, + PLUGRACK_PARANOIA_NONE, + 0 ); + plugin_dir = slurm_get_plugin_dir(); + plugrack_read_dir( c->plugin_list, plugin_dir ); + xfree(plugin_dir); + } + + /* Find the correct plugin. */ + c->cur_plugin = + plugrack_use_by_type( c->plugin_list, c->crypto_type ); + if ( c->cur_plugin == PLUGIN_INVALID_HANDLE ) { + error( "can't find a plugin for type %s", c->crypto_type ); + return NULL; + } + + /* Dereference the API. */ + if ( (rc = plugin_get_syms( c->cur_plugin, + n_syms, + syms, + (void **) &c->ops )) < n_syms ) { + error( "incomplete crypto plugin detected only " + "got %d out of %d", + rc, n_syms); + return NULL; + } + + return &c->ops; +} + +static int _slurm_crypto_init(void) +{ + char *crypto_type = NULL; + int retval = SLURM_SUCCESS; + + slurm_mutex_lock( &g_crypto_context_lock ); + if ( g_crypto_context ) + goto done; + + crypto_type = slurm_get_crypto_type(); + g_crypto_context = _slurm_crypto_context_create( crypto_type ); + if ( g_crypto_context == NULL ) { + error( "cannot create a context for %s", crypto_type ); + retval = SLURM_ERROR; + goto done; + } + + if ( _slurm_crypto_get_ops( g_crypto_context ) == NULL ) { + error( "cannot resolve crypto plugin operations" ); + _slurm_crypto_context_destroy( g_crypto_context ); + g_crypto_context = NULL; + retval = SLURM_ERROR; + } + + done: + slurm_mutex_unlock( &g_crypto_context_lock ); + xfree(crypto_type); + + return(retval); +} + +static int _slurm_crypto_fini(void) +{ + int rc; + + if (!g_crypto_context) + return SLURM_SUCCESS; + + rc = _slurm_crypto_context_destroy(g_crypto_context); + g_crypto_context = NULL; + return rc; +} + +/* Terminate the plugin and release all memory. */ +extern int slurm_crypto_fini(void) +{ + if (_slurm_crypto_fini() < 0) + return SLURM_ERROR; + + return SLURM_SUCCESS; +} + slurm_cred_ctx_t slurm_cred_creator_ctx_create(const char *path) { slurm_cred_ctx_t ctx = NULL; xassert(path != NULL); + if (_slurm_crypto_init() < 0) + return NULL; ctx = _slurm_cred_ctx_alloc(); slurm_mutex_lock(&ctx->mutex); ctx->type = SLURM_CRED_CREATOR; - if (!(ctx->key = _read_private_key(path))) - goto fail; + ctx->key = (*(g_crypto_context->ops.crypto_read_private_key))(path); + if (!ctx->key) + goto fail; slurm_mutex_unlock(&ctx->mutex); return ctx; @@ -222,6 +424,7 @@ slurm_cred_creator_ctx_create(const char *path) fail: slurm_mutex_unlock(&ctx->mutex); slurm_cred_ctx_destroy(ctx); + error("Can not open data encryption key file %s", path); return NULL; } @@ -232,13 +435,16 @@ slurm_cred_verifier_ctx_create(const char *path) slurm_cred_ctx_t ctx = NULL; xassert(path != NULL); + if (_slurm_crypto_init() < 0) + return NULL; ctx = _slurm_cred_ctx_alloc(); slurm_mutex_lock(&ctx->mutex); ctx->type = SLURM_CRED_VERIFIER; - if (!(ctx->key = _read_public_key(path))) + ctx->key = (*(g_crypto_context->ops.crypto_read_public_key))(path); + if (!ctx->key) goto fail; _verifier_ctx_init(ctx); @@ -249,6 +455,7 @@ slurm_cred_verifier_ctx_create(const char *path) fail: slurm_mutex_unlock(&ctx->mutex); slurm_cred_ctx_destroy(ctx); + error("Can not open data encryption key file %s", path); return NULL; } @@ -258,12 +465,14 @@ slurm_cred_ctx_destroy(slurm_cred_ctx_t ctx) { if (ctx == NULL) return; + if (_slurm_crypto_init() < 0) + return; slurm_mutex_lock(&ctx->mutex); xassert(ctx->magic == CRED_CTX_MAGIC); if (ctx->key) - EVP_PKEY_free(ctx->key); + (*(g_crypto_context->ops.crypto_destroy_key))(ctx->key); if (ctx->job_list) list_destroy(ctx->job_list); if (ctx->state_list) @@ -344,6 +553,9 @@ slurm_cred_ctx_get(slurm_cred_ctx_t ctx, slurm_cred_opt_t opt, ...) int slurm_cred_ctx_key_update(slurm_cred_ctx_t ctx, const char *path) { + if (_slurm_crypto_init() < 0) + return SLURM_ERROR; + if (ctx->type == SLURM_CRED_CREATOR) return _ctx_update_private_key(ctx, path); else @@ -358,6 +570,8 @@ slurm_cred_create(slurm_cred_ctx_t ctx, slurm_cred_arg_t *arg) xassert(ctx != NULL); xassert(arg != NULL); + if (_slurm_crypto_init() < 0) + return NULL; slurm_mutex_lock(&ctx->mutex); @@ -375,12 +589,15 @@ slurm_cred_create(slurm_cred_ctx_t ctx, slurm_cred_arg_t *arg) cred->jobid = arg->jobid; cred->stepid = arg->stepid; cred->uid = arg->uid; + cred->job_mem = arg->job_mem; + cred->task_mem = arg->task_mem; cred->nodes = xstrdup(arg->hostlist); cred->alloc_lps_cnt = arg->alloc_lps_cnt; cred->alloc_lps = NULL; if (cred->alloc_lps_cnt > 0) { cred->alloc_lps = xmalloc(cred->alloc_lps_cnt * sizeof(uint32_t)); - memcpy(cred->alloc_lps, arg->alloc_lps, cred->alloc_lps_cnt * sizeof(uint32_t)); + memcpy(cred->alloc_lps, arg->alloc_lps, + cred->alloc_lps_cnt * sizeof(uint32_t)); } cred->ctime = time(NULL); @@ -419,6 +636,8 @@ slurm_cred_copy(slurm_cred_t cred) rcred->jobid = cred->jobid; rcred->stepid = cred->stepid; rcred->uid = cred->uid; + rcred->job_mem = cred->job_mem; + rcred->task_mem = cred->task_mem; rcred->nodes = xstrdup(cred->nodes); rcred->alloc_lps_cnt = cred->alloc_lps_cnt; rcred->alloc_lps = NULL; @@ -428,7 +647,10 @@ slurm_cred_copy(slurm_cred_t cred) rcred->alloc_lps_cnt * sizeof(uint32_t)); } rcred->ctime = cred->ctime; - rcred->signature = (unsigned char *)xstrdup((char *)cred->signature); + rcred->siglen = cred->siglen; + /* Assumes signature is a string, + * otherwise use xmalloc and strcpy here */ + rcred->signature = xstrdup(cred->signature); slurm_mutex_unlock(&cred->mutex); slurm_mutex_unlock(&rcred->mutex); @@ -448,16 +670,19 @@ slurm_cred_faker(slurm_cred_arg_t *arg) slurm_mutex_lock(&cred->mutex); - cred->jobid = arg->jobid; - cred->stepid = arg->stepid; - cred->uid = arg->uid; - cred->nodes = xstrdup(arg->hostlist); - cred->alloc_lps_cnt = arg->alloc_lps_cnt; - cred->alloc_lps = NULL; - if (cred->alloc_lps_cnt > 0) { - cred->alloc_lps = xmalloc(cred->alloc_lps_cnt * sizeof(uint32_t)); - memcpy(cred->alloc_lps, arg->alloc_lps, cred->alloc_lps_cnt * sizeof(uint32_t)); - } + cred->jobid = arg->jobid; + cred->stepid = arg->stepid; + cred->uid = arg->uid; + cred->job_mem = arg->job_mem; + cred->task_mem = arg->task_mem; + cred->nodes = xstrdup(arg->hostlist); + cred->alloc_lps_cnt = arg->alloc_lps_cnt; + cred->alloc_lps = NULL; + if (cred->alloc_lps_cnt > 0) { + cred->alloc_lps = xmalloc(cred->alloc_lps_cnt * sizeof(uint32_t)); + memcpy(cred->alloc_lps, arg->alloc_lps, + cred->alloc_lps_cnt * sizeof(uint32_t)); + } cred->ctime = time(NULL); cred->siglen = SLURM_IO_KEY_SIZE; @@ -494,6 +719,8 @@ slurm_cred_verify(slurm_cred_ctx_t ctx, slurm_cred_t cred, xassert(ctx != NULL); xassert(cred != NULL); xassert(arg != NULL); + if (_slurm_crypto_init() < 0) + return SLURM_ERROR; slurm_mutex_lock(&ctx->mutex); slurm_mutex_lock(&cred->mutex); @@ -532,12 +759,15 @@ slurm_cred_verify(slurm_cred_ctx_t ctx, slurm_cred_t cred, arg->jobid = cred->jobid; arg->stepid = cred->stepid; arg->uid = cred->uid; + arg->job_mem = cred->job_mem; + arg->task_mem = cred->task_mem; arg->hostlist = xstrdup(cred->nodes); arg->alloc_lps_cnt = cred->alloc_lps_cnt; arg->alloc_lps = NULL; if (arg->alloc_lps_cnt > 0) { arg->alloc_lps = xmalloc(arg->alloc_lps_cnt * sizeof(uint32_t)); - memcpy(arg->alloc_lps, cred->alloc_lps, arg->alloc_lps_cnt * sizeof(uint32_t)); + memcpy(arg->alloc_lps, cred->alloc_lps, + arg->alloc_lps_cnt * sizeof(uint32_t)); } slurm_mutex_unlock(&cred->mutex); @@ -746,7 +976,7 @@ slurm_cred_pack(slurm_cred_t cred, Buf buffer) slurm_cred_t slurm_cred_unpack(Buf buffer) { - uint16_t len; + uint32_t len; uint32_t tmpint; slurm_cred_t cred = NULL; char **sigp; @@ -762,6 +992,8 @@ slurm_cred_unpack(Buf buffer) safe_unpack32( &cred->stepid, buffer); safe_unpack32( &tmpint, buffer); cred->uid = tmpint; + safe_unpack32( &cred->job_mem, buffer); + safe_unpack32( &cred->task_mem, buffer); safe_unpackstr_xmalloc( &cred->nodes, &len, buffer); safe_unpack32( &cred->alloc_lps_cnt, buffer); if (cred->alloc_lps_cnt > 0) @@ -826,65 +1058,35 @@ slurm_cred_print(slurm_cred_t cred) xassert(cred->magic == CRED_MAGIC); - info("Cred: Jobid %u", cred->jobid ); - info("Cred: Stepid %u", cred->jobid ); - info("Cred: UID %lu", (u_long) cred->uid ); - info("Cred: Nodes %s", cred->nodes ); - info("Cred: alloc_lps_cnt %d", cred->alloc_lps_cnt ); - info("Cred: alloc_lps: "); - for (i=0; i<cred->alloc_lps_cnt; i++) - info("alloc_lps[%d] = %u ", i, cred->alloc_lps[i]); - info("Cred: ctime %s", ctime(&cred->ctime) ); - info("Cred: siglen %u", cred->siglen ); + info("Cred: Jobid %u", cred->jobid ); + info("Cred: Stepid %u", cred->jobid ); + info("Cred: UID %lu", (u_long) cred->uid ); + info("Cred: job_mem %u", cred->job_mem ); + info("Cred: task_mem %u", cred->task_mem ); + info("Cred: Nodes %s", cred->nodes ); + info("Cred: alloc_lps_cnt %u", cred->alloc_lps_cnt ); + info("Cred: alloc_lps: "); + for (i=0; i<cred->alloc_lps_cnt; i++) + info("alloc_lps[%d] = %u ", i, cred->alloc_lps[i]); + info("Cred: ctime %s", ctime(&cred->ctime) ); + info("Cred: siglen %u", cred->siglen ); slurm_mutex_unlock(&cred->mutex); } - -static EVP_PKEY * -_read_private_key(const char *path) +int slurm_cred_get_alloc_lps(slurm_cred_t cred, char **nodes, + uint32_t *alloc_lps_cnt, uint32_t **alloc_lps) { - FILE *fp = NULL; - EVP_PKEY *pk = NULL; - - xassert(path != NULL); - - if (!(fp = fopen(path, "r"))) { - error ("can't open key file '%s' : %m", path); - return NULL; - } - - if (!PEM_read_PrivateKey(fp, &pk, NULL, NULL)) - error ("PEM_read_PrivateKey [%s]: %m", path); - - fclose(fp); - - return pk; -} - - -static EVP_PKEY * -_read_public_key(const char *path) -{ - FILE *fp = NULL; - EVP_PKEY *pk = NULL; - - xassert(path != NULL); - - if ((fp = fopen(path, "r")) == NULL) { - error ("can't open public key '%s' : %m ", path); - return NULL; - } - - if (!PEM_read_PUBKEY(fp, &pk, NULL, NULL)) - error("PEM_read_PUBKEY[%s]: %m", path); - - fclose(fp); + if ((cred == NULL) || (nodes == NULL) || + (alloc_lps_cnt == NULL) || (alloc_lps == NULL)) + return EINVAL; - return pk; + *nodes = cred->nodes; + *alloc_lps_cnt = cred->alloc_lps_cnt; + *alloc_lps = cred->alloc_lps; + return SLURM_SUCCESS; } - static void _verifier_ctx_init(slurm_cred_ctx_t ctx) { @@ -902,12 +1104,13 @@ _verifier_ctx_init(slurm_cred_ctx_t ctx) static int _ctx_update_private_key(slurm_cred_ctx_t ctx, const char *path) { - EVP_PKEY *pk = NULL; - EVP_PKEY *tmpk = NULL; + void *pk = NULL; + void *tmpk = NULL; xassert(ctx != NULL); - if (!(pk = _read_private_key(path))) + pk = (*(g_crypto_context->ops.crypto_read_private_key))(path); + if (!pk) return SLURM_ERROR; slurm_mutex_lock(&ctx->mutex); @@ -920,7 +1123,7 @@ _ctx_update_private_key(slurm_cred_ctx_t ctx, const char *path) slurm_mutex_unlock(&ctx->mutex); - EVP_PKEY_free(tmpk); + (*(g_crypto_context->ops.crypto_destroy_key))(tmpk); return SLURM_SUCCESS; } @@ -929,11 +1132,11 @@ _ctx_update_private_key(slurm_cred_ctx_t ctx, const char *path) static int _ctx_update_public_key(slurm_cred_ctx_t ctx, const char *path) { - EVP_PKEY *pk = NULL; + void *pk = NULL; xassert(ctx != NULL); - - if (!(pk = _read_public_key(path))) + pk = (*(g_crypto_context->ops.crypto_read_public_key))(path); + if (!pk) return SLURM_ERROR; slurm_mutex_lock(&ctx->mutex); @@ -941,8 +1144,8 @@ _ctx_update_public_key(slurm_cred_ctx_t ctx, const char *path) xassert(ctx->magic == CRED_CTX_MAGIC); xassert(ctx->type == SLURM_CRED_VERIFIER); - if (ctx->exkey) - EVP_PKEY_free(ctx->exkey); + if (ctx->exkey) + (*(g_crypto_context->ops.crypto_destroy_key))(ctx->exkey); ctx->exkey = ctx->key; ctx->key = pk; @@ -965,7 +1168,7 @@ _exkey_is_valid(slurm_cred_ctx_t ctx) if (time(NULL) > ctx->exkey_exp) { debug2("old job credential key slurmd expired"); - EVP_PKEY_free(ctx->exkey); + (*(g_crypto_context->ops.crypto_destroy_key))(ctx->exkey); ctx->exkey = NULL; return false; } @@ -978,16 +1181,12 @@ static slurm_cred_ctx_t _slurm_cred_ctx_alloc(void) { slurm_cred_ctx_t ctx = xmalloc(sizeof(*ctx)); + /* Contents initialized to zero */ slurm_mutex_init(&ctx->mutex); slurm_mutex_lock(&ctx->mutex); - ctx->key = NULL; - ctx->job_list = NULL; - ctx->state_list = NULL; ctx->expiry_window = DEFAULT_EXPIRATION_WINDOW; - - ctx->exkey = NULL; ctx->exkey_exp = (time_t) -1; xassert(ctx->magic = CRED_CTX_MAGIC); @@ -1000,28 +1199,16 @@ static slurm_cred_t _slurm_cred_alloc(void) { slurm_cred_t cred = xmalloc(sizeof(*cred)); + /* Contents initialized to zero */ slurm_mutex_init(&cred->mutex); - - cred->jobid = 0; - cred->stepid = 0; - cred->uid = (uid_t) -1; - cred->nodes = NULL; - cred->alloc_lps_cnt = 0; - cred->alloc_lps = NULL; - cred->signature = NULL; - cred->siglen = 0; + cred->uid = (uid_t) -1; xassert(cred->magic = CRED_MAGIC); return cred; } -static const char * -_ssl_error(void) -{ - return ERR_reason_error_string(ERR_get_error()); -} #ifdef EXTREME_DEBUG static void @@ -1039,89 +1226,62 @@ _print_data(char *data, int datalen) static int _slurm_cred_sign(slurm_cred_ctx_t ctx, slurm_cred_t cred) { - EVP_MD_CTX ectx; Buf buffer; - int rc = SLURM_SUCCESS; - unsigned int *lenp = &cred->siglen; - int ksize = EVP_PKEY_size(ctx->key); - - /* - * Allocate memory for signature: at most EVP_PKEY_size() bytes - */ - cred->signature = xmalloc(ksize * sizeof(unsigned char)); + int rc; buffer = init_buf(4096); _pack_cred(cred, buffer); - - EVP_SignInit(&ectx, EVP_sha1()); - EVP_SignUpdate(&ectx, get_buf_data(buffer), get_buf_offset(buffer)); - - if (!(EVP_SignFinal(&ectx, cred->signature, lenp, ctx->key))) { - ERR_print_errors_fp(log_fp()); - rc = SLURM_ERROR; - } - -#ifdef HAVE_EVP_MD_CTX_CLEANUP - /* Note: Likely memory leak if this function is absent */ - EVP_MD_CTX_cleanup(&ectx); -#endif + rc = (*(g_crypto_context->ops.crypto_sign))(ctx->key, + get_buf_data(buffer), get_buf_offset(buffer), + &cred->signature, &cred->siglen); free_buf(buffer); - return rc; + if (rc) + return SLURM_ERROR; + return SLURM_SUCCESS; } static int _slurm_cred_verify_signature(slurm_cred_ctx_t ctx, slurm_cred_t cred) { - EVP_MD_CTX ectx; Buf buffer; int rc; - unsigned char *sig = cred->signature; - int siglen = cred->siglen; + debug("Checking credential with %d bytes of sig data", cred->siglen); buffer = init_buf(4096); _pack_cred(cred, buffer); - debug("Checking credential with %d bytes of sig data", siglen); - - EVP_VerifyInit(&ectx, EVP_sha1()); - EVP_VerifyUpdate(&ectx, get_buf_data(buffer), get_buf_offset(buffer)); - - if (!(rc = EVP_VerifyFinal(&ectx, sig, siglen, ctx->key))) { - /* - * Check against old key if one exists and is valid - */ - if (_exkey_is_valid(ctx)) - rc = EVP_VerifyFinal(&ectx, sig, siglen, ctx->exkey); + rc = (*(g_crypto_context->ops.crypto_verify_sign))(ctx->key, + get_buf_data(buffer), get_buf_offset(buffer), + cred->signature, cred->siglen); + if (rc && _exkey_is_valid(ctx)) { + rc = (*(g_crypto_context->ops.crypto_verify_sign))(ctx->key, + get_buf_data(buffer), get_buf_offset(buffer), + cred->signature, cred->siglen); } - - if (!rc) { - ERR_load_crypto_strings(); - info("Credential signature check: %s", _ssl_error()); - rc = SLURM_ERROR; - } else - rc = SLURM_SUCCESS; - -#ifdef HAVE_EVP_MD_CTX_CLEANUP - /* Note: Likely memory leak if this function is absent */ - EVP_MD_CTX_cleanup(&ectx); -#endif free_buf(buffer); - return rc; + if (rc) { + info("Credential signature check: %s", + (*(g_crypto_context->ops.crypto_str_error))()); + return SLURM_ERROR; + } + return SLURM_SUCCESS; } static void _pack_cred(slurm_cred_t cred, Buf buffer) { - pack32( cred->jobid, buffer); - pack32( cred->stepid, buffer); - pack32((uint32_t) cred->uid, buffer); - packstr( cred->nodes, buffer); + pack32( cred->jobid, buffer); + pack32( cred->stepid, buffer); + pack32((uint32_t) cred->uid, buffer); + pack32( cred->job_mem, buffer); + pack32( cred->task_mem, buffer); + packstr( cred->nodes, buffer); pack32( cred->alloc_lps_cnt, buffer); - if (cred->alloc_lps_cnt > 0) - pack32_array( cred->alloc_lps, cred->alloc_lps_cnt, buffer); + if (cred->alloc_lps_cnt > 0) + pack32_array( cred->alloc_lps, cred->alloc_lps_cnt, buffer); pack_time( cred->ctime, buffer); } @@ -1146,7 +1306,8 @@ _credential_replayed(slurm_cred_ctx_t ctx, slurm_cred_t cred) /* * If we found a match, this credential is being replayed. */ - if (s) return true; + if (s) + return true; /* * Otherwise, save the credential state @@ -1165,8 +1326,10 @@ static char * timestr (const time_t *tp, char *buf, size_t n) struct tm tmval; #ifdef DISABLE_LOCALTIME static int disabled = 0; - if (buf == NULL) disabled=1; - if (disabled) return NULL; + if (buf == NULL) + disabled=1; + if (disabled) + return NULL; #endif if (!localtime_r (tp, &tmval)) error ("localtime_r: %m"); @@ -1179,15 +1342,15 @@ slurm_cred_handle_reissue(slurm_cred_ctx_t ctx, slurm_cred_t cred) { job_state_t *j = _find_job_state(ctx, cred->jobid); - if (j != NULL && j->revoked && cred->ctime > j->revoked) { + if (j != NULL && j->revoked && (cred->ctime > j->revoked)) { /* The credential has been reissued. Purge the - old record so that "cred" will look like a new - credential to any ensuing commands. */ + * old record so that "cred" will look like a new + * credential to any ensuing commands. */ info("reissued job credential for job %u", j->jobid); /* Setting j->expiration to zero will make - _clear_expired_job_states() remove this job credential - from the cred context. */ + * _clear_expired_job_states() remove this + * job credential from the cred context. */ j->expiration = 0; _clear_expired_job_states(ctx); } @@ -1221,7 +1384,7 @@ _credential_revoked(slurm_cred_ctx_t ctx, slurm_cred_t cred) if (cred->ctime <= j->revoked) { char buf[64]; - debug ("cred for %d revoked. expires at %s", + debug ("cred for %u revoked. expires at %s", j->jobid, timestr (&j->expiration, buf, 64)); return true; } @@ -1237,7 +1400,10 @@ _find_job_state(slurm_cred_ctx_t ctx, uint32_t jobid) job_state_t *j = NULL; i = list_iterator_create(ctx->job_list); - while ((j = list_next(i)) && (j->jobid != jobid)) {;} + while ((j = list_next(i))) { + if (j->jobid == jobid) + break; + } list_iterator_destroy(i); return j; } @@ -1305,7 +1471,7 @@ _clear_expired_job_states(slurm_cred_ctx_t ctx) j->jobid, timestr(&j->ctime, t1, 64), t2, t3); if (j->revoked && (now > j->expiration)) { - list_delete(i); + list_delete_item(i); } } @@ -1324,7 +1490,7 @@ _clear_expired_credential_states(slurm_cred_ctx_t ctx) while ((s = list_next(i))) { if (now > s->expiration) - list_delete(i); + list_delete_item(i); } list_iterator_destroy(i); diff --git a/src/common/slurm_cred.h b/src/common/slurm_cred.h index fd974979e..9fb47d6ab 100644 --- a/src/common/slurm_cred.h +++ b/src/common/slurm_cred.h @@ -1,11 +1,11 @@ /*****************************************************************************\ * src/common/slurm_cred.h - SLURM job credential operations - * $Id: slurm_cred.h 10574 2006-12-15 23:38:29Z jette $ + * $Id: slurm_cred.h 14148 2008-05-28 23:35:40Z jette $ ***************************************************************************** * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <grondona1@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -129,12 +129,17 @@ int slurm_cred_ctx_unpack(slurm_cred_ctx_t ctx, Buf buffer); typedef struct { uint32_t jobid; uint32_t stepid; + uint32_t job_mem; /* MB of memory reserved for job */ + uint32_t task_mem; /* MB of memory reserved per task */ uid_t uid; char *hostlist; uint32_t alloc_lps_cnt; uint32_t *alloc_lps; } slurm_cred_arg_t; +/* Terminate the plugin and release all memory. */ +int slurm_crypto_fini(void); + /* * Create a slurm credential using the values in `arg.' * The credential is signed using the creators public key. @@ -262,6 +267,11 @@ int slurm_cred_get_signature(slurm_cred_t cred, char **datap, int *len); */ void slurm_cred_print(slurm_cred_t cred); +/* + * Get count of allocated LPS (processors) by node + */ +int slurm_cred_get_alloc_lps(slurm_cred_t cred, char **nodes, + uint32_t *alloc_lps_cnt, uint32_t **alloc_lps); #ifdef DISABLE_LOCALTIME extern char * timestr (const time_t *tp, char *buf, size_t n); #endif diff --git a/src/common/slurm_errno.c b/src/common/slurm_errno.c index ff413445e..4720b2189 100644 --- a/src/common/slurm_errno.c +++ b/src/common/slurm_errno.c @@ -4,7 +4,7 @@ * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Jim Garlick <garlick@llnl.gov>, et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -114,7 +114,7 @@ static slurm_errtab_t slurm_errtab[] = { { ESLURM_DEFAULT_PARTITION_NOT_SET, "No partition specified or system default partition" }, { ESLURM_ACCESS_DENIED, - "Access denied" }, + "Access/permission denied" }, { ESLURM_JOB_MISSING_REQUIRED_PARTITION_GROUP, "User's group not permitted to use this partition" }, { ESLURM_REQUESTED_NODES_NOT_IN_PARTITION, @@ -122,7 +122,7 @@ static slurm_errtab_t slurm_errtab[] = { { ESLURM_TOO_MANY_REQUESTED_CPUS, "More processors requested than permitted" }, { ESLURM_TOO_MANY_REQUESTED_NODES, - "More nodes requested than permitted" }, + "Node count specification invalid" }, { ESLURM_ERROR_ON_DESC_TO_RECORD_COPY, "Unable to create job record, try again" }, { ESLURM_JOB_MISSING_SIZE_SPECIFICATION, @@ -134,7 +134,7 @@ static slurm_errtab_t slurm_errtab[] = { { ESLURM_DUPLICATE_JOB_ID, "Duplicate job id" }, { ESLURM_PATHNAME_TOO_LONG, - "Pathname of a file or directory too long" }, + "Pathname of a file, directory or other parameter too long" }, { ESLURM_NOT_TOP_PRIORITY, "Immediate execution impossible, insufficient priority" }, { ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE, @@ -170,15 +170,23 @@ static slurm_errtab_t slurm_errtab[] = { { ESLURM_INVALID_FEATURE, "Invalid feature specification" }, { ESLURM_INVALID_AUTHTYPE_CHANGE, - "AuthType change requires restart of all SLURM daemons and commands"}, + "AuthType change requires restart of all SLURM daemons and " + "commands to take effect"}, { ESLURM_INVALID_CHECKPOINT_TYPE_CHANGE, - "Invalid change in CheckpointType requested" }, + "CheckpointType change requires restart of all SLURM daemons " + "to take effect" }, + { ESLURM_INVALID_CRYPTO_TYPE_CHANGE, + "CryptoType change requires restart of all SLURM daemons " + "to take effect" }, { ESLURM_INVALID_SCHEDTYPE_CHANGE, - "Invalid change in SchedulerType requested" }, + "SchedulerType change requires restart of the slurmctld daemon " + "to take effect" }, { ESLURM_INVALID_SELECTTYPE_CHANGE, - "Invalid change in SelectType requested" }, + "SelectType change requires restart of the slurmctld daemon " + "to take effect" }, { ESLURM_INVALID_SWITCHTYPE_CHANGE, - "SwitchType change requires restart of all SLURM daemons and jobs"}, + "SwitchType change requires restart of all SLURM daemons and " + "jobs to take effect" }, { ESLURM_FRAGMENTATION, "Immediate execution impossible, " "resources too fragmented for allocation" }, @@ -187,7 +195,7 @@ static slurm_errtab_t slurm_errtab[] = { { ESLURM_DISABLED, "Requested operation is presently disabled" }, { ESLURM_DEPENDENCY, - "Immediate execution impossible, job dependency problem"}, + "Job dependency problem" }, { ESLURM_BATCH_ONLY, "Only batch jobs are accepted or processed" }, { ESLURM_TASKDIST_ARBITRARY_UNSUPPORTED, @@ -196,6 +204,19 @@ static slurm_errtab_t slurm_errtab[] = { "Requested more tasks than available processors" }, { ESLURM_JOB_HELD, "Job is in held state, pending scheduler release" }, + { ESLURM_INVALID_BANK_ACCOUNT, + "Invalid bank account specified" }, + { ESLURM_INVALID_TASK_MEMORY, + "Memory required by task is not available" }, + { ESLURM_INVALID_ACCOUNT, + "Job has invalid account" }, + { ESLURM_INVALID_LICENSES, + "Job has invalid license specification" }, + { ESLURM_NEED_RESTART, + "The node configuration changes that were made require restart " + "of the slurmctld daemon to take effect"}, + { ESLURM_ACCOUNTING_POLICY, + "Job violates accounting policy (the user's size and/or time limits)"}, /* slurmd error codes */ diff --git a/src/common/slurm_jobacct.c b/src/common/slurm_jobacct.c deleted file mode 100644 index 41a55ff04..000000000 --- a/src/common/slurm_jobacct.c +++ /dev/null @@ -1,692 +0,0 @@ -/*****************************************************************************\ - * slurm_jobacct.c - implementation-independent job accounting logging - * functions - ***************************************************************************** - * Copyright (C) 2003 The Regents of the University of California. - * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Jay Windley <jwindley@lnxi.com>, Morris Jette <jette1@llnl.com> - * UCRL-CODE-226842. - * - * Copyright (C) 2005 Hewlett-Packard Development Company, L.P. - * - * This file is part of SLURM, a resource management program. - * For details, see <http://www.llnl.gov/linux/slurm/>. - * - * SLURM is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * In addition, as a special exception, the copyright holders give permission - * to link the code of portions of this program with the OpenSSL library under - * certain conditions as described in each individual source file, and - * distribute linked combinations including the two. You must obey the GNU - * General Public License in all respects for all of the code used other than - * OpenSSL. If you modify file(s) with this exception, you may extend this - * exception to your version of the file(s), but you are not obligated to do - * so. If you do not wish to do so, delete this exception statement from your - * version. If you delete this exception statement from all source files in - * the program, then also delete it here. - * - * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY - * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License along - * with SLURM; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -\*****************************************************************************/ - -/*****************************************************************************\ - * Modification history - * - * 19 Jan 2005 by Andy Riebs <andy.riebs@hp.com> - * This file is derived from the file slurm_jobcomp.c, written by - * Morris Jette, et al. -\*****************************************************************************/ - -#include <pthread.h> -#include <stdlib.h> -#include <string.h> - -#include "src/common/macros.h" -#include "src/common/plugin.h" -#include "src/common/plugrack.h" -#include "src/common/slurm_jobacct.h" -#include "src/common/xmalloc.h" -#include "src/common/xstring.h" -#include "src/slurmd/slurmstepd/slurmstepd_job.h" - - -/* - * The following global is used by the jobacct/log plugin; it must - * persist when the plugin is reloaded, so we define it here. - */ -extern FILE * JOBACCT_LOGFILE; - -/* - * WARNING: Do not change the order of these fields or add additional - * fields at the beginning of the structure. If you do, job accounting - * plugins will stop working. If you need to add fields, add them - * at the end of the structure. - */ -typedef struct slurm_jobacct_ops { - int (*jobacct_init_struct) (jobacctinfo_t *jobacct, - jobacct_id_t *jobacct_id); - jobacctinfo_t *(*jobacct_alloc)(jobacct_id_t *jobacct_id); - void (*jobacct_free) (jobacctinfo_t *jobacct); - int (*jobacct_setinfo) (jobacctinfo_t *jobacct, - enum jobacct_data_type type, - void *data); - int (*jobacct_getinfo) (jobacctinfo_t *jobacct, - enum jobacct_data_type type, - void *data); - void (*jobacct_aggregate) (jobacctinfo_t *dest, - jobacctinfo_t *from); - void (*jobacct_2_sacct) (sacct_t *sacct, - jobacctinfo_t *jobacct); - void (*jobacct_pack) (jobacctinfo_t *jobacct, Buf buffer); - int (*jobacct_unpack) (jobacctinfo_t **jobacct, Buf buffer); - int (*jobacct_init) (char *job_acct_log); - int (*jobacct_fini) (); - int (*jobacct_job_start) (struct job_record *job_ptr); - int (*jobacct_job_complete) (struct job_record *job_ptr); - int (*jobacct_step_start) (struct step_record *step); - int (*jobacct_step_complete) (struct step_record *step); - int (*jobacct_suspend) (struct job_record *job_ptr); - int (*jobacct_startpoll) (int frequency); - int (*jobacct_endpoll) (); - int (*jobacct_set_proctrack_container_id)(uint32_t id); - int (*jobacct_add_task) (pid_t pid, jobacct_id_t *jobacct_id); - jobacctinfo_t *(*jobacct_stat_task)(pid_t pid); - jobacctinfo_t *(*jobacct_remove_task)(pid_t pid); - void (*jobacct_suspend_poll) (); - void (*jobacct_resume_poll) (); - int (*jobacct_node_down) (struct node_record *node_ptr, - time_t event_time, char *reason); - int (*jobacct_node_up) (struct node_record *node_ptr, - time_t event_time); - int (*jobacct_cluster_procs) (uint32_t procs, time_t event_time); - -} slurm_jobacct_ops_t; - -/* - * A global job accounting context. "Global" in the sense that there's - * only one, with static bindings. We don't export it. - */ - -typedef struct slurm_jobacct_context { - char *jobacct_type; - plugrack_t plugin_list; - plugin_handle_t cur_plugin; - int jobacct_errno; - slurm_jobacct_ops_t ops; -} slurm_jobacct_context_t; - -static slurm_jobacct_context_t *g_jobacct_context = NULL; -static pthread_mutex_t g_jobacct_context_lock = PTHREAD_MUTEX_INITIALIZER; - -static int _slurm_jobacct_init(void); -static int _slurm_jobacct_fini(void); - -static slurm_jobacct_context_t * -_slurm_jobacct_context_create( const char *jobacct_type) -{ - slurm_jobacct_context_t *c; - - if ( jobacct_type == NULL ) { - error( "_slurm_jobacct_context_create: no jobacct type" ); - return NULL; - } - - c = xmalloc( sizeof( struct slurm_jobacct_context ) ); - - c->jobacct_errno = SLURM_SUCCESS; - - /* Copy the job completion job completion type. */ - c->jobacct_type = xstrdup( jobacct_type ); - if ( c->jobacct_type == NULL ) { - error( "can't make local copy of jobacct type" ); - xfree( c ); - return NULL; - } - - /* Plugin rack is demand-loaded on first reference. */ - c->plugin_list = NULL; - c->cur_plugin = PLUGIN_INVALID_HANDLE; - c->jobacct_errno = SLURM_SUCCESS; - - return c; -} - -static int -_slurm_jobacct_context_destroy( slurm_jobacct_context_t *c ) -{ - /* - * Must check return code here because plugins might still - * be loaded and active. - */ - if ( c->plugin_list ) { - if ( plugrack_destroy( c->plugin_list ) != SLURM_SUCCESS ) { - return SLURM_ERROR; - } - } - - xfree( c->jobacct_type ); - xfree( c ); - - return SLURM_SUCCESS; -} - -/* - * Resolve the operations from the plugin. - */ -static slurm_jobacct_ops_t * -_slurm_jobacct_get_ops( slurm_jobacct_context_t *c ) -{ - /* - * These strings must be in the same order as the fields declared - * for slurm_jobacct_ops_t. - */ - static const char *syms[] = { - "jobacct_p_init_struct", - "jobacct_p_alloc", - "jobacct_p_free", - "jobacct_p_setinfo", - "jobacct_p_getinfo", - "jobacct_p_aggregate", - "jobacct_p_2_sacct", - "jobacct_p_pack", - "jobacct_p_unpack", - "jobacct_p_init_slurmctld", - "jobacct_p_fini_slurmctld", - "jobacct_p_job_start_slurmctld", - "jobacct_p_job_complete_slurmctld", - "jobacct_p_step_start_slurmctld", - "jobacct_p_step_complete_slurmctld", - "jobacct_p_suspend_slurmctld", - "jobacct_p_startpoll", - "jobacct_p_endpoll", - "jobacct_p_set_proctrack_container_id", - "jobacct_p_add_task", - "jobacct_p_stat_task", - "jobacct_p_remove_task", - "jobacct_p_suspend_poll", - "jobacct_p_resume_poll", - "jobacct_p_node_down", - "jobacct_p_node_up", - "jobacct_p_cluster_procs" - }; - int n_syms = sizeof( syms ) / sizeof( char * ); - int rc = 0; - /* Get the plugin list, if needed. */ - if ( c->plugin_list == NULL ) { - char *plugin_dir; - c->plugin_list = plugrack_create(); - if ( c->plugin_list == NULL ) { - error( "Unable to create a plugin manager" ); - return NULL; - } - - plugrack_set_major_type( c->plugin_list, "jobacct" ); - plugrack_set_paranoia( c->plugin_list, - PLUGRACK_PARANOIA_NONE, - 0 ); - plugin_dir = slurm_get_plugin_dir(); - plugrack_read_dir( c->plugin_list, plugin_dir ); - xfree(plugin_dir); - } - - /* Find the correct plugin. */ - c->cur_plugin = - plugrack_use_by_type( c->plugin_list, c->jobacct_type ); - if ( c->cur_plugin == PLUGIN_INVALID_HANDLE ) { - error( "can't find a plugin for type %s", c->jobacct_type ); - return NULL; - } - - /* Dereference the API. */ - if ( (rc = plugin_get_syms( c->cur_plugin, - n_syms, - syms, - (void **) &c->ops )) < n_syms ) { - error( "incomplete jobacct plugin detected only " - "got %d out of %d", - rc, n_syms); - return NULL; - } - - return &c->ops; -} - -static int _slurm_jobacct_init(void) -{ - char *jobacct_type = NULL; - int retval=SLURM_SUCCESS; - - slurm_mutex_lock( &g_jobacct_context_lock ); - if ( g_jobacct_context ) - goto done; - - jobacct_type = slurm_get_jobacct_type(); - g_jobacct_context = _slurm_jobacct_context_create( jobacct_type ); - if ( g_jobacct_context == NULL ) { - error( "cannot create a context for %s", jobacct_type ); - retval = SLURM_ERROR; - goto done; - } - - if ( _slurm_jobacct_get_ops( g_jobacct_context ) == NULL ) { - error( "cannot resolve job accounting plugin operations" ); - _slurm_jobacct_context_destroy( g_jobacct_context ); - g_jobacct_context = NULL; - retval = SLURM_ERROR; - } - - done: - slurm_mutex_unlock( &g_jobacct_context_lock ); - xfree(jobacct_type); - - return(retval); -} - -static int _slurm_jobacct_fini(void) -{ - int rc; - - if (!g_jobacct_context) - return SLURM_SUCCESS; - - rc = _slurm_jobacct_context_destroy(g_jobacct_context); - g_jobacct_context = NULL; - return rc; -} - -extern int jobacct_init(void) -{ - if (_slurm_jobacct_init() < 0) - return SLURM_ERROR; - - return SLURM_SUCCESS; -} - -extern int jobacct_g_init_struct(jobacctinfo_t *jobacct, - jobacct_id_t *jobacct_id) -{ - int retval = SLURM_SUCCESS; - - if (_slurm_jobacct_init() < 0) - return SLURM_ERROR; - - slurm_mutex_lock( &g_jobacct_context_lock ); - if ( g_jobacct_context ) - retval = (*(g_jobacct_context->ops.jobacct_init_struct)) - (jobacct, jobacct_id); - slurm_mutex_unlock( &g_jobacct_context_lock ); - return retval; -} - -extern jobacctinfo_t *jobacct_g_alloc(jobacct_id_t *jobacct_id) -{ - jobacctinfo_t *jobacct = NULL; - - if (_slurm_jobacct_init() < 0) - return jobacct; - - slurm_mutex_lock( &g_jobacct_context_lock ); - if ( g_jobacct_context ) - jobacct = (*(g_jobacct_context->ops.jobacct_alloc)) - (jobacct_id); - - slurm_mutex_unlock( &g_jobacct_context_lock ); - return jobacct; -} - -extern void jobacct_g_free(jobacctinfo_t *jobacct) -{ - if (_slurm_jobacct_init() < 0) - return; - - slurm_mutex_lock( &g_jobacct_context_lock ); - if ( g_jobacct_context ) - (*(g_jobacct_context->ops.jobacct_free))(jobacct); - slurm_mutex_unlock( &g_jobacct_context_lock ); - return; -} - -extern int jobacct_g_setinfo(jobacctinfo_t *jobacct, - enum jobacct_data_type type, void *data) -{ - int retval = SLURM_SUCCESS; - - if (_slurm_jobacct_init() < 0) - return SLURM_ERROR; - - slurm_mutex_lock( &g_jobacct_context_lock ); - if ( g_jobacct_context ) - retval = (*(g_jobacct_context->ops.jobacct_setinfo)) - (jobacct, type, data); - slurm_mutex_unlock( &g_jobacct_context_lock ); - return retval; -} - -extern int jobacct_g_getinfo(jobacctinfo_t *jobacct, - enum jobacct_data_type type, void *data) -{ - int retval = SLURM_SUCCESS; - - if (_slurm_jobacct_init() < 0) - return SLURM_ERROR; - - slurm_mutex_lock( &g_jobacct_context_lock ); - if ( g_jobacct_context ) - retval = (*(g_jobacct_context->ops.jobacct_getinfo)) - (jobacct, type, data); - slurm_mutex_unlock( &g_jobacct_context_lock ); - return retval; -} - -extern void jobacct_g_aggregate(jobacctinfo_t *dest, jobacctinfo_t *from) -{ - if (_slurm_jobacct_init() < 0) - return; - - slurm_mutex_lock( &g_jobacct_context_lock ); - if ( g_jobacct_context ) - (*(g_jobacct_context->ops.jobacct_aggregate))(dest, from); - slurm_mutex_unlock( &g_jobacct_context_lock ); - return; -} - -extern void jobacct_g_2_sacct(sacct_t *sacct, jobacctinfo_t *jobacct) -{ - if (_slurm_jobacct_init() < 0) - return; - - slurm_mutex_lock( &g_jobacct_context_lock ); - if ( g_jobacct_context ) - (*(g_jobacct_context->ops.jobacct_2_sacct))(sacct, jobacct); - slurm_mutex_unlock( &g_jobacct_context_lock ); - return; -} - -extern void jobacct_g_pack(jobacctinfo_t *jobacct, Buf buffer) -{ - if (_slurm_jobacct_init() < 0) - return; - - slurm_mutex_lock( &g_jobacct_context_lock ); - if ( g_jobacct_context ) - (*(g_jobacct_context->ops.jobacct_pack))(jobacct, buffer); - slurm_mutex_unlock( &g_jobacct_context_lock ); - return; -} - -extern int jobacct_g_unpack(jobacctinfo_t **jobacct, Buf buffer) -{ - int retval = SLURM_SUCCESS; - - if (_slurm_jobacct_init() < 0) - return SLURM_ERROR; - - slurm_mutex_lock( &g_jobacct_context_lock ); - if ( g_jobacct_context ) - retval = (*(g_jobacct_context->ops.jobacct_unpack)) - (jobacct, buffer); - slurm_mutex_unlock( &g_jobacct_context_lock ); - return retval; -} - -extern int jobacct_g_init_slurmctld(char *job_acct_log) -{ - int retval = SLURM_SUCCESS; - if (_slurm_jobacct_init() < 0) - return SLURM_ERROR; - - slurm_mutex_lock( &g_jobacct_context_lock ); - if ( g_jobacct_context ) - retval = (*(g_jobacct_context->ops.jobacct_init)) - (job_acct_log); - slurm_mutex_unlock( &g_jobacct_context_lock ); - return retval; -} - -extern int jobacct_g_fini_slurmctld() -{ - int retval = SLURM_SUCCESS; - if (_slurm_jobacct_init() < 0) - return SLURM_ERROR; - - slurm_mutex_lock( &g_jobacct_context_lock ); - if ( g_jobacct_context ) - retval = (*(g_jobacct_context->ops.jobacct_fini))(); - slurm_mutex_unlock( &g_jobacct_context_lock ); - - if (_slurm_jobacct_fini() < 0) - return SLURM_ERROR; - return retval; -} - -extern int jobacct_g_job_start_slurmctld(struct job_record *job_ptr) -{ - int retval = SLURM_SUCCESS; - if (_slurm_jobacct_init() < 0) - return SLURM_ERROR; - - slurm_mutex_lock( &g_jobacct_context_lock ); - if ( g_jobacct_context ) - retval = (*(g_jobacct_context->ops.jobacct_job_start)) - (job_ptr); - slurm_mutex_unlock( &g_jobacct_context_lock ); - return retval; -} - -extern int jobacct_g_job_complete_slurmctld(struct job_record *job_ptr) -{ - int retval = SLURM_SUCCESS; - if (_slurm_jobacct_init() < 0) - return SLURM_ERROR; - - slurm_mutex_lock( &g_jobacct_context_lock ); - if ( g_jobacct_context ) - retval = (*(g_jobacct_context->ops.jobacct_job_complete)) - (job_ptr); - slurm_mutex_unlock( &g_jobacct_context_lock ); - return retval; -} - -extern int jobacct_g_step_start_slurmctld(struct step_record *step_ptr) -{ - int retval = SLURM_SUCCESS; - if (_slurm_jobacct_init() < 0) - return SLURM_ERROR; - - slurm_mutex_lock( &g_jobacct_context_lock ); - if ( g_jobacct_context ) - retval = (*(g_jobacct_context->ops.jobacct_step_start)) - (step_ptr); - slurm_mutex_unlock( &g_jobacct_context_lock ); - return retval; -} - -extern int jobacct_g_step_complete_slurmctld(struct step_record *step_ptr) -{ - int retval = SLURM_SUCCESS; - if (_slurm_jobacct_init() < 0) - return SLURM_ERROR; - - slurm_mutex_lock( &g_jobacct_context_lock ); - if ( g_jobacct_context ) - retval = (*(g_jobacct_context->ops.jobacct_step_complete)) - (step_ptr); - slurm_mutex_unlock( &g_jobacct_context_lock ); - return retval; -} - -extern int jobacct_g_suspend_slurmctld(struct job_record *job_ptr) -{ - int retval = SLURM_SUCCESS; - if (_slurm_jobacct_init() < 0) - return SLURM_ERROR; - - slurm_mutex_lock( &g_jobacct_context_lock ); - if ( g_jobacct_context ) - retval = (*(g_jobacct_context->ops.jobacct_suspend)) - (job_ptr); - slurm_mutex_unlock( &g_jobacct_context_lock ); - return retval; -} - -extern int jobacct_g_startpoll(int frequency) -{ - int retval = SLURM_SUCCESS; - if (_slurm_jobacct_init() < 0) - return SLURM_ERROR; - - if (_slurm_jobacct_init() < 0) - return SLURM_ERROR; - - slurm_mutex_lock( &g_jobacct_context_lock ); - if ( g_jobacct_context ) - retval = (*(g_jobacct_context->ops.jobacct_startpoll)) - (frequency); - - slurm_mutex_unlock( &g_jobacct_context_lock ); - return retval; -} - -extern int jobacct_g_endpoll() -{ - int retval = SLURM_SUCCESS; - if (_slurm_jobacct_init() < 0) - return SLURM_ERROR; - - slurm_mutex_lock( &g_jobacct_context_lock ); - if ( g_jobacct_context ) - retval = (*(g_jobacct_context->ops.jobacct_endpoll))(); - slurm_mutex_unlock( &g_jobacct_context_lock ); - return retval; -} - -extern int jobacct_g_set_proctrack_container_id(uint32_t id) -{ - int retval = SLURM_SUCCESS; - if (_slurm_jobacct_init() < 0) - return SLURM_ERROR; - - slurm_mutex_lock( &g_jobacct_context_lock ); - if ( g_jobacct_context ) - retval = (*(g_jobacct_context->ops. - jobacct_set_proctrack_container_id))(id); - slurm_mutex_unlock( &g_jobacct_context_lock ); - return retval; - -} - -extern int jobacct_g_add_task(pid_t pid, jobacct_id_t *jobacct_id) -{ - int retval = SLURM_SUCCESS; - if (_slurm_jobacct_init() < 0) - return SLURM_ERROR; - - slurm_mutex_lock( &g_jobacct_context_lock ); - if ( g_jobacct_context ) - retval = (*(g_jobacct_context->ops.jobacct_add_task)) - (pid, jobacct_id); - slurm_mutex_unlock( &g_jobacct_context_lock ); - return retval; -} - -extern jobacctinfo_t *jobacct_g_stat_task(pid_t pid) -{ - jobacctinfo_t *jobacct = NULL; - if (_slurm_jobacct_init() < 0) - return jobacct; - - slurm_mutex_lock( &g_jobacct_context_lock ); - if ( g_jobacct_context ) - jobacct = (*(g_jobacct_context->ops.jobacct_stat_task))(pid); - slurm_mutex_unlock( &g_jobacct_context_lock ); - return jobacct; -} - -extern jobacctinfo_t *jobacct_g_remove_task(pid_t pid) -{ - jobacctinfo_t *jobacct = NULL; - if (_slurm_jobacct_init() < 0) - return jobacct; - - slurm_mutex_lock( &g_jobacct_context_lock ); - if ( g_jobacct_context ) - jobacct = (*(g_jobacct_context->ops.jobacct_remove_task))(pid); - slurm_mutex_unlock( &g_jobacct_context_lock ); - return jobacct; -} - -extern void jobacct_g_suspend_poll() -{ - if (_slurm_jobacct_init() < 0) - return; - - slurm_mutex_lock( &g_jobacct_context_lock ); - if ( g_jobacct_context ) - (*(g_jobacct_context->ops.jobacct_suspend_poll))(); - slurm_mutex_unlock( &g_jobacct_context_lock ); - return; -} - -extern void jobacct_g_resume_poll() -{ - if (_slurm_jobacct_init() < 0) - return; - - slurm_mutex_lock( &g_jobacct_context_lock ); - if ( g_jobacct_context ) - (*(g_jobacct_context->ops.jobacct_resume_poll))(); - slurm_mutex_unlock( &g_jobacct_context_lock ); - return; -} - -extern int jobacct_g_node_down(struct node_record *node_ptr, time_t event_time, - char *reason) -{ - int retval = SLURM_SUCCESS; - if (_slurm_jobacct_init() < 0) - return SLURM_ERROR; - - slurm_mutex_lock( &g_jobacct_context_lock ); - if ( g_jobacct_context ) - retval = (*(g_jobacct_context->ops.jobacct_node_down)) - (node_ptr, event_time, reason); - slurm_mutex_unlock( &g_jobacct_context_lock ); - return retval; -} - -extern int jobacct_g_node_up(struct node_record *node_ptr, time_t event_time) -{ - int retval = SLURM_SUCCESS; - if (_slurm_jobacct_init() < 0) - return SLURM_ERROR; - - slurm_mutex_lock( &g_jobacct_context_lock ); - if ( g_jobacct_context ) - retval = (*(g_jobacct_context->ops.jobacct_node_up)) - (node_ptr, event_time); - slurm_mutex_unlock( &g_jobacct_context_lock ); - return retval; -} - -extern int jobacct_g_cluster_procs(uint32_t procs, time_t event_time) -{ - int retval = SLURM_SUCCESS; - if (_slurm_jobacct_init() < 0) - return SLURM_ERROR; - - slurm_mutex_lock( &g_jobacct_context_lock ); - if ( g_jobacct_context ) - retval = (*(g_jobacct_context->ops.jobacct_cluster_procs)) - (procs, event_time); - slurm_mutex_unlock( &g_jobacct_context_lock ); - return retval; -} diff --git a/src/common/slurm_jobacct_gather.c b/src/common/slurm_jobacct_gather.c new file mode 100644 index 000000000..2902c4a27 --- /dev/null +++ b/src/common/slurm_jobacct_gather.c @@ -0,0 +1,524 @@ +/*****************************************************************************\ + * slurm_jobacct_gather.c - implementation-independent job accounting logging + * functions + ***************************************************************************** + * Copyright (C) 2003-2007/ The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Jay Windley <jwindley@lnxi.com>, Morris Jette <jette1@llnl.com> + * LLNL-CODE-402394. + * + * Copyright (C) 2005 Hewlett-Packard Development Company, L.P. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +/*****************************************************************************\ + * Modification history + * + * 19 Jan 2005 by Andy Riebs <andy.riebs@hp.com> + * This file is derived from the file slurm_jobcomp.c, written by + * Morris Jette, et al. +\*****************************************************************************/ + +#include <pthread.h> +#include <stdlib.h> +#include <string.h> + +#include "src/common/macros.h" +#include "src/common/plugin.h" +#include "src/common/plugrack.h" +#include "src/common/slurm_jobacct_gather.h" +#include "src/common/xmalloc.h" +#include "src/common/xstring.h" +#include "src/slurmd/slurmstepd/slurmstepd_job.h" + + +/* + * WARNING: Do not change the order of these fields or add additional + * fields at the beginning of the structure. If you do, job accounting + * plugins will stop working. If you need to add fields, add them + * at the end of the structure. + */ +typedef struct slurm_jobacct_gather_ops { + jobacctinfo_t *(*jobacct_gather_create)(jobacct_id_t *jobacct_id); + void (*jobacct_gather_destroy) (jobacctinfo_t *jobacct); + int (*jobacct_gather_setinfo) (jobacctinfo_t *jobacct, + enum jobacct_data_type type, + void *data); + int (*jobacct_gather_getinfo) (jobacctinfo_t *jobacct, + enum jobacct_data_type type, + void *data); + void (*jobacct_gather_pack) (jobacctinfo_t *jobacct, Buf buffer); + int (*jobacct_gather_unpack) (jobacctinfo_t **jobacct, Buf buffer); + void (*jobacct_gather_aggregate) (jobacctinfo_t *dest, + jobacctinfo_t *from); + int (*jobacct_gather_startpoll) (uint16_t frequency); + int (*jobacct_gather_endpoll) (); + void (*jobacct_gather_change_poll) (uint16_t frequency); + void (*jobacct_gather_suspend_poll) (); + void (*jobacct_gather_resume_poll) (); + int (*jobacct_gather_set_proctrack_container_id)(uint32_t id); + int (*jobacct_gather_add_task) (pid_t pid, jobacct_id_t *jobacct_id); + jobacctinfo_t *(*jobacct_gather_stat_task)(pid_t pid); + jobacctinfo_t *(*jobacct_gather_remove_task)(pid_t pid); + void (*jobacct_gather_2_sacct) (sacct_t *sacct, + jobacctinfo_t *jobacct); +} slurm_jobacct_gather_ops_t; + +/* + * A global job accounting context. "Global" in the sense that there's + * only one, with static bindings. We don't export it. + */ + +typedef struct slurm_jobacct_gather_context { + char *jobacct_gather_type; + plugrack_t plugin_list; + plugin_handle_t cur_plugin; + int jobacct_gather_errno; + slurm_jobacct_gather_ops_t ops; +} slurm_jobacct_gather_context_t; + +static slurm_jobacct_gather_context_t *g_jobacct_gather_context = NULL; +static pthread_mutex_t g_jobacct_gather_context_lock = PTHREAD_MUTEX_INITIALIZER; + +static int _slurm_jobacct_gather_init(void); + +static slurm_jobacct_gather_context_t * +_slurm_jobacct_gather_context_create( const char *jobacct_gather_type) +{ + slurm_jobacct_gather_context_t *c; + + if ( jobacct_gather_type == NULL ) { + error("_slurm_jobacct_gather_context_create: no jobacct type"); + return NULL; + } + + c = xmalloc( sizeof( struct slurm_jobacct_gather_context ) ); + + c->jobacct_gather_errno = SLURM_SUCCESS; + + /* Copy the job completion job completion type. */ + c->jobacct_gather_type = xstrdup( jobacct_gather_type ); + if ( c->jobacct_gather_type == NULL ) { + error( "can't make local copy of jobacct type" ); + xfree( c ); + return NULL; + } + + /* Plugin rack is demand-loaded on first reference. */ + c->plugin_list = NULL; + c->cur_plugin = PLUGIN_INVALID_HANDLE; + c->jobacct_gather_errno = SLURM_SUCCESS; + + return c; +} + +static int +_slurm_jobacct_gather_context_destroy( slurm_jobacct_gather_context_t *c ) +{ + /* + * Must check return code here because plugins might still + * be loaded and active. + */ + if ( c->plugin_list ) { + if ( plugrack_destroy( c->plugin_list ) != SLURM_SUCCESS ) { + return SLURM_ERROR; + } + } + + xfree( c->jobacct_gather_type ); + xfree( c ); + + return SLURM_SUCCESS; +} + +/* + * Resolve the operations from the plugin. + */ +static slurm_jobacct_gather_ops_t * +_slurm_jobacct_gather_get_ops( slurm_jobacct_gather_context_t *c ) +{ + /* + * These strings must be in the same order as the fields declared + * for slurm_jobacct_gather_ops_t. + */ + static const char *syms[] = { + "jobacct_gather_p_create", + "jobacct_gather_p_destroy", + "jobacct_gather_p_setinfo", + "jobacct_gather_p_getinfo", + "jobacct_gather_p_pack", + "jobacct_gather_p_unpack", + "jobacct_gather_p_aggregate", + "jobacct_gather_p_startpoll", + "jobacct_gather_p_endpoll", + "jobacct_gather_p_change_poll", + "jobacct_gather_p_suspend_poll", + "jobacct_gather_p_resume_poll", + "jobacct_gather_p_set_proctrack_container_id", + "jobacct_gather_p_add_task", + "jobacct_gather_p_stat_task", + "jobacct_gather_p_remove_task", + "jobacct_gather_p_2_sacct" + }; + int n_syms = sizeof( syms ) / sizeof( char * ); + int rc = 0; + /* Get the plugin list, if needed. */ + if ( c->plugin_list == NULL ) { + char *plugin_dir; + c->plugin_list = plugrack_create(); + if ( c->plugin_list == NULL ) { + error( "Unable to create a plugin manager" ); + return NULL; + } + + plugrack_set_major_type( c->plugin_list, "jobacct_gather" ); + plugrack_set_paranoia( c->plugin_list, + PLUGRACK_PARANOIA_NONE, + 0 ); + plugin_dir = slurm_get_plugin_dir(); + plugrack_read_dir( c->plugin_list, plugin_dir ); + xfree(plugin_dir); + } + + /* Find the correct plugin. */ + c->cur_plugin = + plugrack_use_by_type( c->plugin_list, c->jobacct_gather_type ); + if ( c->cur_plugin == PLUGIN_INVALID_HANDLE ) { + error( "can't find a plugin for type %s", + c->jobacct_gather_type ); + return NULL; + } + + /* Dereference the API. */ + if ( (rc = plugin_get_syms( c->cur_plugin, + n_syms, + syms, + (void **) &c->ops )) < n_syms ) { + error( "incomplete jobacct_gather plugin detected only " + "got %d out of %d", + rc, n_syms); + return NULL; + } + + return &c->ops; + +} + +static int _slurm_jobacct_gather_init(void) +{ + char *jobacct_gather_type = NULL; + int retval=SLURM_SUCCESS; + + slurm_mutex_lock( &g_jobacct_gather_context_lock ); + if ( g_jobacct_gather_context ) + goto done; + + jobacct_gather_type = slurm_get_jobacct_gather_type(); + g_jobacct_gather_context = _slurm_jobacct_gather_context_create( + jobacct_gather_type); + if ( g_jobacct_gather_context == NULL ) { + error( "cannot create a context for %s", jobacct_gather_type ); + retval = SLURM_ERROR; + goto done; + } + + if ( _slurm_jobacct_gather_get_ops( g_jobacct_gather_context ) + == NULL ) { + error( "cannot resolve job accounting plugin operations" ); + _slurm_jobacct_gather_context_destroy( + g_jobacct_gather_context); + g_jobacct_gather_context = NULL; + retval = SLURM_ERROR; + } + + done: + slurm_mutex_unlock( &g_jobacct_gather_context_lock ); + xfree(jobacct_gather_type); + + return(retval); +} + +extern int slurm_jobacct_gather_init(void) +{ + if (_slurm_jobacct_gather_init() < 0) + return SLURM_ERROR; + + return SLURM_SUCCESS; +} + +extern int slurm_jobacct_gather_fini(void) +{ + int rc; + + if (!g_jobacct_gather_context) + return SLURM_SUCCESS; + + rc = _slurm_jobacct_gather_context_destroy(g_jobacct_gather_context); + g_jobacct_gather_context = NULL; + return rc; +} + +extern jobacctinfo_t *jobacct_gather_g_create(jobacct_id_t *jobacct_id) +{ + jobacctinfo_t *jobacct = NULL; + + if (_slurm_jobacct_gather_init() < 0) + return jobacct; + + slurm_mutex_lock( &g_jobacct_gather_context_lock ); + if ( g_jobacct_gather_context ) + jobacct = (*(g_jobacct_gather_context-> + ops.jobacct_gather_create))(jobacct_id); + + slurm_mutex_unlock( &g_jobacct_gather_context_lock ); + return jobacct; +} + +extern void jobacct_gather_g_destroy(jobacctinfo_t *jobacct) +{ + if (_slurm_jobacct_gather_init() < 0) + return; + + slurm_mutex_lock( &g_jobacct_gather_context_lock ); + if ( g_jobacct_gather_context ) + (*(g_jobacct_gather_context->ops.jobacct_gather_destroy)) + (jobacct); + slurm_mutex_unlock( &g_jobacct_gather_context_lock ); + return; +} + +extern int jobacct_gather_g_setinfo(jobacctinfo_t *jobacct, + enum jobacct_data_type type, void *data) +{ + int retval = SLURM_SUCCESS; + + if (_slurm_jobacct_gather_init() < 0) + return SLURM_ERROR; + + slurm_mutex_lock( &g_jobacct_gather_context_lock ); + if ( g_jobacct_gather_context ) + retval = (*(g_jobacct_gather_context-> + ops.jobacct_gather_setinfo))(jobacct, type, data); + slurm_mutex_unlock( &g_jobacct_gather_context_lock ); + return retval; +} + +extern int jobacct_gather_g_getinfo(jobacctinfo_t *jobacct, + enum jobacct_data_type type, void *data) +{ + int retval = SLURM_SUCCESS; + + if (_slurm_jobacct_gather_init() < 0) + return SLURM_ERROR; + + slurm_mutex_lock( &g_jobacct_gather_context_lock ); + if ( g_jobacct_gather_context ) + retval = (*(g_jobacct_gather_context-> + ops.jobacct_gather_getinfo))(jobacct, type, data); + slurm_mutex_unlock( &g_jobacct_gather_context_lock ); + return retval; +} + +extern void jobacct_gather_g_pack(jobacctinfo_t *jobacct, Buf buffer) +{ + if (_slurm_jobacct_gather_init() < 0) + return; + + slurm_mutex_lock( &g_jobacct_gather_context_lock ); + if ( g_jobacct_gather_context ) + (*(g_jobacct_gather_context->ops.jobacct_gather_pack)) + (jobacct, buffer); + slurm_mutex_unlock( &g_jobacct_gather_context_lock ); + return; +} + +extern int jobacct_gather_g_unpack(jobacctinfo_t **jobacct, Buf buffer) +{ + int retval = SLURM_SUCCESS; + + if (_slurm_jobacct_gather_init() < 0) + return SLURM_ERROR; + + slurm_mutex_lock( &g_jobacct_gather_context_lock ); + if ( g_jobacct_gather_context ) + retval = (*(g_jobacct_gather_context-> + ops.jobacct_gather_unpack))(jobacct, buffer); + slurm_mutex_unlock( &g_jobacct_gather_context_lock ); + return retval; +} + +extern void jobacct_gather_g_aggregate(jobacctinfo_t *dest, + jobacctinfo_t *from) +{ + if (_slurm_jobacct_gather_init() < 0) + return; + + slurm_mutex_lock( &g_jobacct_gather_context_lock ); + if ( g_jobacct_gather_context ) + (*(g_jobacct_gather_context->ops.jobacct_gather_aggregate)) + (dest, from); + slurm_mutex_unlock( &g_jobacct_gather_context_lock ); + return; +} + +extern int jobacct_gather_g_startpoll(uint16_t frequency) +{ + int retval = SLURM_SUCCESS; + if (_slurm_jobacct_gather_init() < 0) + return SLURM_ERROR; + + slurm_mutex_lock( &g_jobacct_gather_context_lock ); + if ( g_jobacct_gather_context ) + retval = (*(g_jobacct_gather_context->ops.jobacct_gather_startpoll)) + (frequency); + + slurm_mutex_unlock( &g_jobacct_gather_context_lock ); + return retval; +} + +extern int jobacct_gather_g_endpoll() +{ + int retval = SLURM_SUCCESS; + if (_slurm_jobacct_gather_init() < 0) + return SLURM_ERROR; + + slurm_mutex_lock( &g_jobacct_gather_context_lock ); + if ( g_jobacct_gather_context ) + retval = (*(g_jobacct_gather_context->ops.jobacct_gather_endpoll))(); + slurm_mutex_unlock( &g_jobacct_gather_context_lock ); + return retval; +} + +extern void jobacct_gather_g_change_poll(uint16_t frequency) +{ + if (_slurm_jobacct_gather_init() < 0) + return; + + slurm_mutex_lock( &g_jobacct_gather_context_lock ); + if ( g_jobacct_gather_context ) + (*(g_jobacct_gather_context->ops.jobacct_gather_change_poll)) + (frequency); + + slurm_mutex_unlock( &g_jobacct_gather_context_lock ); +} + +extern void jobacct_gather_g_suspend_poll() +{ + if (_slurm_jobacct_gather_init() < 0) + return; + + slurm_mutex_lock( &g_jobacct_gather_context_lock ); + if ( g_jobacct_gather_context ) + (*(g_jobacct_gather_context->ops.jobacct_gather_suspend_poll))(); + slurm_mutex_unlock( &g_jobacct_gather_context_lock ); + return; +} + +extern void jobacct_gather_g_resume_poll() +{ + if (_slurm_jobacct_gather_init() < 0) + return; + + slurm_mutex_lock( &g_jobacct_gather_context_lock ); + if ( g_jobacct_gather_context ) + (*(g_jobacct_gather_context->ops.jobacct_gather_resume_poll))(); + slurm_mutex_unlock( &g_jobacct_gather_context_lock ); + return; +} + +extern int jobacct_gather_g_set_proctrack_container_id(uint32_t id) +{ + int retval = SLURM_SUCCESS; + if (_slurm_jobacct_gather_init() < 0) + return SLURM_ERROR; + + slurm_mutex_lock( &g_jobacct_gather_context_lock ); + if ( g_jobacct_gather_context ) + retval = (*(g_jobacct_gather_context->ops. + jobacct_gather_set_proctrack_container_id))(id); + slurm_mutex_unlock( &g_jobacct_gather_context_lock ); + return retval; + +} + +extern int jobacct_gather_g_add_task(pid_t pid, jobacct_id_t *jobacct_id) +{ + int retval = SLURM_SUCCESS; + if (_slurm_jobacct_gather_init() < 0) + return SLURM_ERROR; + + slurm_mutex_lock( &g_jobacct_gather_context_lock ); + if ( g_jobacct_gather_context ) + retval = (*(g_jobacct_gather_context-> + ops.jobacct_gather_add_task))(pid, jobacct_id); + slurm_mutex_unlock( &g_jobacct_gather_context_lock ); + return retval; +} + +extern jobacctinfo_t *jobacct_gather_g_stat_task(pid_t pid) +{ + jobacctinfo_t *jobacct = NULL; + if (_slurm_jobacct_gather_init() < 0) + return jobacct; + + slurm_mutex_lock( &g_jobacct_gather_context_lock ); + if ( g_jobacct_gather_context ) + jobacct = (*(g_jobacct_gather_context-> + ops.jobacct_gather_stat_task))(pid); + slurm_mutex_unlock( &g_jobacct_gather_context_lock ); + return jobacct; +} + +extern jobacctinfo_t *jobacct_gather_g_remove_task(pid_t pid) +{ + jobacctinfo_t *jobacct = NULL; + if (_slurm_jobacct_gather_init() < 0) + return jobacct; + + slurm_mutex_lock( &g_jobacct_gather_context_lock ); + if ( g_jobacct_gather_context ) + jobacct = (*(g_jobacct_gather_context-> + ops.jobacct_gather_remove_task))(pid); + slurm_mutex_unlock( &g_jobacct_gather_context_lock ); + return jobacct; +} + +extern void jobacct_gather_g_2_sacct(sacct_t *sacct, jobacctinfo_t *jobacct) +{ + if (_slurm_jobacct_gather_init() < 0) + return; + + slurm_mutex_lock( &g_jobacct_gather_context_lock ); + if ( g_jobacct_gather_context ) + (*(g_jobacct_gather_context->ops.jobacct_gather_2_sacct)) + (sacct, jobacct); + slurm_mutex_unlock( &g_jobacct_gather_context_lock ); + return; +} diff --git a/src/common/slurm_jobacct.h b/src/common/slurm_jobacct_gather.h similarity index 50% rename from src/common/slurm_jobacct.h rename to src/common/slurm_jobacct_gather.h index a7c4ac55e..3059872e2 100644 --- a/src/common/slurm_jobacct.h +++ b/src/common/slurm_jobacct_gather.h @@ -1,11 +1,11 @@ /*****************************************************************************\ - * slurm_jobacct.h - implementation-independent job completion logging + * slurm_jobacct_gather.h - implementation-independent job completion logging * API definitions ***************************************************************************** * Copyright (C) 2003 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette@llnl.com> et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * Copyright (C) 2005 Hewlett-Packard Development Company, L.P. * @@ -47,70 +47,65 @@ \*****************************************************************************/ -#ifndef __SLURM_JOBACCT_H__ -#define __SLURM_JOBACCT_H__ +#ifndef __SLURM_JOBACCT_GATHER_H__ +#define __SLURM_JOBACCT_GATHER_H__ + +#if HAVE_CONFIG_H +# include "config.h" +# if HAVE_INTTYPES_H +# include <inttypes.h> +# else +# if HAVE_STDINT_H +# include <stdint.h> +# endif +# endif /* HAVE_INTTYPES_H */ +#else /* !HAVE_CONFIG_H */ +# include <inttypes.h> +#endif /* HAVE_CONFIG_H */ -#if HAVE_STDINT_H -# include <stdint.h> /* for uint16_t, uint32_t definitions */ -#endif -#if HAVE_INTTYPES_H -# include <inttypes.h> /* for uint16_t, uint32_t definitions */ -#endif #include <sys/resource.h> #include <sys/types.h> #include <time.h> #include <unistd.h> -#include "src/slurmd/slurmstepd/slurmstepd_job.h" -#include "src/slurmctld/slurmctld.h" -#include "src/sacct/sacct_stat.h" +#include <slurm/slurm.h> -/* common */ -extern int jobacct_init(void); /* load the plugin */ -extern int jobacct_g_init_struct(jobacctinfo_t *jobacct, - jobacct_id_t *jobacct_id); -/* must free jobacctinfo_t if not NULL */ -extern jobacctinfo_t *jobacct_g_alloc(jobacct_id_t *jobacct_id); -extern void jobacct_g_free(jobacctinfo_t *jobacct); -extern int jobacct_g_setinfo(jobacctinfo_t *jobacct, - enum jobacct_data_type type, void *data); -extern int jobacct_g_getinfo(jobacctinfo_t *jobacct, - enum jobacct_data_type type, void *data); -extern void jobacct_g_aggregate(jobacctinfo_t *dest, jobacctinfo_t *from); -extern void jobacct_g_2_sacct(sacct_t *sacct, jobacctinfo_t *jobacct); -extern void jobacct_g_pack(jobacctinfo_t *jobacct, Buf buffer); -extern int jobacct_g_unpack(jobacctinfo_t **jobacct, Buf buffer); - -/*functions used in slurmctld */ -extern int jobacct_g_init_slurmctld(char *job_acct_log); -extern int jobacct_g_fini_slurmctld(); -extern int jobacct_g_job_start_slurmctld(struct job_record *job_ptr); -extern int jobacct_g_job_complete_slurmctld(struct job_record *job_ptr); -extern int jobacct_g_step_start_slurmctld(struct step_record *step); -extern int jobacct_g_step_complete_slurmctld(struct step_record *step); -extern int jobacct_g_suspend_slurmctld(struct job_record *job_ptr); - -/*functions used in slurmstepd */ -extern int jobacct_g_startpoll(int frequency); -extern int jobacct_g_endpoll(); -extern int jobacct_g_set_proctrack_container_id(uint32_t id); -extern int jobacct_g_add_task(pid_t pid, jobacct_id_t *jobacct_id); +#include "src/common/macros.h" +#include "src/common/pack.h" +#include "src/common/list.h" +#include "src/common/xmalloc.h" +#include "src/common/jobacct_common.h" + +extern int slurm_jobacct_gather_init(void); /* load the plugin */ +extern int slurm_jobacct_gather_fini(void); /* unload the plugin */ + +extern jobacctinfo_t *jobacct_gather_g_create(jobacct_id_t *jobacct_id); +extern void jobacct_gather_g_destroy(jobacctinfo_t *jobacct); +extern int jobacct_gather_g_setinfo(jobacctinfo_t *jobacct, + enum jobacct_data_type type, void *data); +extern int jobacct_gather_g_getinfo(jobacctinfo_t *jobacct, + enum jobacct_data_type type, void *data); +extern void jobacct_gather_g_pack(jobacctinfo_t *jobacct, Buf buffer); +extern int jobacct_gather_g_unpack(jobacctinfo_t **jobacct, Buf buffer); + +extern void jobacct_gather_g_aggregate(jobacctinfo_t *dest, + jobacctinfo_t *from); + +extern void jobacct_gather_g_change_poll(uint16_t frequency); +extern int jobacct_gather_g_startpoll(uint16_t frequency); +extern int jobacct_gather_g_endpoll(); +extern void jobacct_gather_g_suspend_poll(); +extern void jobacct_gather_g_resume_poll(); + +extern int jobacct_gather_g_set_proctrack_container_id(uint32_t id); +extern int jobacct_gather_g_add_task(pid_t pid, jobacct_id_t *jobacct_id); /* must free jobacctinfo_t if not NULL */ -extern jobacctinfo_t *jobacct_g_stat_task(pid_t pid); +extern jobacctinfo_t *jobacct_gather_g_stat_task(pid_t pid); /* must free jobacctinfo_t if not NULL */ -extern jobacctinfo_t *jobacct_g_remove_task(pid_t pid); -extern void jobacct_g_suspend_poll(); -extern void jobacct_g_resume_poll(); - -/* functions only to be used by the gold plugin in 1.2 since we had to - * make some mods to get the plugin to work with node states also - */ -extern int jobacct_g_node_down(struct node_record *node_ptr, time_t event_time, - char *reason); -extern int jobacct_g_node_up(struct node_record *node_ptr, time_t event_time); -extern int jobacct_g_cluster_procs(uint32_t procs, time_t event_time); +extern jobacctinfo_t *jobacct_gather_g_remove_task(pid_t pid); +extern void jobacct_gather_g_2_sacct(sacct_t *sacct, jobacctinfo_t *jobacct); -#endif /*__SLURM_JOBACCT_H__*/ +#endif /*__SLURM_JOBACCT_GATHER_H__*/ diff --git a/src/common/slurm_jobcomp.c b/src/common/slurm_jobcomp.c index ce6ae16f4..3de84f64c 100644 --- a/src/common/slurm_jobcomp.c +++ b/src/common/slurm_jobcomp.c @@ -5,7 +5,7 @@ * Copyright (C) 2003 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Jay Windley <jwindley@lnxi.com>, Morris Jette <jette1@llnl.com> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -65,6 +65,9 @@ typedef struct slurm_jobcomp_ops { int (*job_write) ( struct job_record *job_ptr); int (*sa_errno) ( void ); char * (*job_strerror) ( int errnum ); + List (*get_jobs) ( List selected_steps, + List selected_parts, void *params ); + void (*archive) ( List selected_parts, void *params ); } slurm_jobcomp_ops_t; @@ -146,7 +149,9 @@ _slurm_jobcomp_get_ops( slurm_jobcomp_context_t c ) "slurm_jobcomp_set_location", "slurm_jobcomp_log_record", "slurm_jobcomp_get_errno", - "slurm_jobcomp_strerror" + "slurm_jobcomp_strerror", + "slurm_jobcomp_get_jobs", + "slurm_jobcomp_archive" }; int n_syms = sizeof( syms ) / sizeof( char * ); @@ -188,6 +193,33 @@ _slurm_jobcomp_get_ops( slurm_jobcomp_context_t c ) return &c->ops; } +extern void +jobcomp_destroy_job(void *object) +{ + jobcomp_job_rec_t *job = (jobcomp_job_rec_t *)object; + if (job) { + xfree(job->partition); + xfree(job->start_time); + xfree(job->end_time); + xfree(job->uid_name); + xfree(job->gid_name); + xfree(job->nodelist); + xfree(job->jobname); + xfree(job->state); + xfree(job->timelimit); +#ifdef HAVE_BG + xfree(job->blockid); + xfree(job->connection); + xfree(job->reboot); + xfree(job->rotate); + xfree(job->geo); + xfree(job->bg_start_point); +#endif + xfree(job); + } +} + + extern int g_slurm_jobcomp_init( char *jobcomp_loc ) { @@ -281,3 +313,29 @@ g_slurm_jobcomp_strerror(int errnum) slurm_mutex_unlock( &context_lock ); return retval; } + +extern List +g_slurm_jobcomp_get_jobs(List selected_steps, + List selected_parts, void *params) +{ + slurm_mutex_lock( &context_lock ); + if ( g_context ) + return (*(g_context->ops.get_jobs)) + (selected_steps, selected_parts, params); + else + error ("slurm_jobcomp plugin context not initialized"); + slurm_mutex_unlock( &context_lock ); + return NULL ; +} + +extern void +g_slurm_jobcomp_archive(List selected_parts, void *params) +{ + slurm_mutex_lock( &context_lock ); + if ( g_context ) + (*(g_context->ops.archive))(selected_parts, params); + else + error ("slurm_jobcomp plugin context not initialized"); + slurm_mutex_unlock( &context_lock ); + return; +} diff --git a/src/common/slurm_jobcomp.h b/src/common/slurm_jobcomp.h index 0397e46b0..b99b7b418 100644 --- a/src/common/slurm_jobcomp.h +++ b/src/common/slurm_jobcomp.h @@ -5,7 +5,7 @@ * Copyright (C) 2003 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.com> et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -54,8 +54,35 @@ #include "src/slurmctld/slurmctld.h" +typedef struct { + uint32_t jobid; + char *partition; + char *start_time; + char *end_time; + uint32_t uid; + char *uid_name; + uint32_t gid; + char *gid_name; + uint32_t node_cnt; + char *nodelist; + char *jobname; + char *state; + char *timelimit; +#ifdef HAVE_BG + char *blockid; + char *connection; + char *reboot; + char *rotate; + uint32_t max_procs; + char *geo; + char *bg_start_point; +#endif +} jobcomp_job_rec_t; + typedef struct slurm_jobcomp_context * slurm_jobcomp_context_t; +extern void jobcomp_destroy_job(void *object); + /* initialization of job completion logging */ extern int g_slurm_jobcomp_init(char *jobcomp_loc); @@ -71,5 +98,19 @@ extern int g_slurm_jobcomp_errno(void); /* convert job completion logger specific error code to a string */ extern char *g_slurm_jobcomp_strerror(int errnum); +/* + * get info from the storage + * returns List of jobcomp_job_rec_t * + * note List needs to be freed when called + */ +extern List g_slurm_jobcomp_get_jobs(List selected_steps, + List selected_parts, + void *params); + +/* + * expire old info from the storage + */ +extern void g_slurm_jobcomp_archive(List selected_parts, void *params); + #endif /*__SLURM_JOBCOMP_H__*/ diff --git a/src/common/slurm_protocol_api.c b/src/common/slurm_protocol_api.c index d8d87d88c..dbf3e2c3b 100644 --- a/src/common/slurm_protocol_api.c +++ b/src/common/slurm_protocol_api.c @@ -1,10 +1,11 @@ /*****************************************************************************\ * slurm_protocol_api.c - high-level slurm communication functions ***************************************************************************** - * Copyright (C) 2002-2006 The Regents of the University of California. + * Copyright (C) 2002-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Kevin Tew <tew1@llnl.gov>, et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -69,6 +70,7 @@ #include "src/common/xstring.h" #include "src/common/log.h" #include "src/common/forward.h" +#include "src/slurmdbd/read_config.h" /* EXTERNAL VARIABLES */ @@ -85,7 +87,12 @@ static slurm_protocol_config_t *proto_conf = &proto_conf_default; static int message_timeout = -1; /* STATIC FUNCTIONS */ -static void _remap_slurmctld_errno(void); +static char *_global_auth_key(void); +static void _remap_slurmctld_errno(void); +static int _unpack_msg_uid(Buf buffer); + +/* define the slurmdbd_options flag */ +slurm_dbd_conf_t *slurmdbd_conf = NULL; /**********************************************************************\ * protocol configuration functions @@ -182,17 +189,71 @@ void slurm_api_clear_config(void) /* slurm_mutex_lock(&config_lock); */ /* } */ +/* slurm_get_def_mem_per_task + * RET DefMemPerTask value from slurm.conf + */ +uint32_t slurm_get_def_mem_per_task(void) +{ + uint32_t mem_per_task = 0; + slurm_ctl_conf_t *conf; + + if(slurmdbd_conf) { + } else { + conf = slurm_conf_lock(); + mem_per_task = conf->def_mem_per_task; + slurm_conf_unlock(); + } + return mem_per_task; +} + +/* slurm_get_max_mem_per_task + * RET MaxMemPerTask value from slurm.conf + */ +uint32_t slurm_get_max_mem_per_task(void) +{ + uint32_t mem_per_task = 0; + slurm_ctl_conf_t *conf; + + if(slurmdbd_conf) { + } else { + conf = slurm_conf_lock(); + mem_per_task = conf->max_mem_per_task; + slurm_conf_unlock(); + } + return mem_per_task; +} + +/* slurm_get_epilog_msg_time + * RET EpilogMsgTime value from slurm.conf + */ +uint32_t slurm_get_epilog_msg_time(void) +{ + uint32_t epilog_msg_time = 0; + slurm_ctl_conf_t *conf; + + if(slurmdbd_conf) { + } else { + conf = slurm_conf_lock(); + epilog_msg_time = conf->epilog_msg_time; + slurm_conf_unlock(); + } + return epilog_msg_time; +} + /* slurm_get_env_timeout * return default timeout for srun/sbatch --get-user-env option */ int inline slurm_get_env_timeout(void) { - int timeout; + int timeout = 0; slurm_ctl_conf_t *conf; - conf = slurm_conf_lock(); - timeout = conf->get_env_timeout; - slurm_conf_unlock(); + if(slurmdbd_conf) { + } else { + conf = slurm_conf_lock(); + timeout = conf->get_env_timeout; + slurm_conf_unlock(); + } return timeout; } @@ -202,12 +263,15 @@ int inline slurm_get_env_timeout(void) */ char *slurm_get_mpi_default(void) { - char *mpi_default; + char *mpi_default = NULL; slurm_ctl_conf_t *conf; - conf = slurm_conf_lock(); - mpi_default = xstrdup(conf->mpi_default); - slurm_conf_unlock(); + if(slurmdbd_conf) { + } else { + conf = slurm_conf_lock(); + mpi_default = xstrdup(conf->mpi_default); + slurm_conf_unlock(); + } return mpi_default; } @@ -216,15 +280,19 @@ char *slurm_get_mpi_default(void) */ uint16_t slurm_get_msg_timeout(void) { - uint16_t msg_timeout; + uint16_t msg_timeout = 0; slurm_ctl_conf_t *conf; - conf = slurm_conf_lock(); - msg_timeout = conf->msg_timeout; - slurm_conf_unlock(); + if(slurmdbd_conf) { + msg_timeout = slurmdbd_conf->msg_timeout; + } else { + conf = slurm_conf_lock(); + msg_timeout = conf->msg_timeout; + slurm_conf_unlock(); #ifdef MEMORY_LEAK_DEBUG - msg_timeout *= 4; + msg_timeout *= 4; #endif + } return msg_timeout; } @@ -234,12 +302,16 @@ uint16_t slurm_get_msg_timeout(void) */ char *slurm_get_plugin_dir(void) { - char *plugin_dir; + char *plugin_dir = NULL; slurm_ctl_conf_t *conf; - conf = slurm_conf_lock(); - plugin_dir = xstrdup(conf->plugindir); - slurm_conf_unlock(); + if(slurmdbd_conf) { + plugin_dir = xstrdup(slurmdbd_conf->plugindir); + } else { + conf = slurm_conf_lock(); + plugin_dir = xstrdup(conf->plugindir); + slurm_conf_unlock(); + } return plugin_dir; } @@ -249,12 +321,15 @@ char *slurm_get_plugin_dir(void) */ char *slurm_get_state_save_location(void) { - char *state_save_loc; + char *state_save_loc = NULL; slurm_ctl_conf_t *conf; - conf = slurm_conf_lock(); - state_save_loc = xstrdup(conf->state_save_location); - slurm_conf_unlock(); + if(slurmdbd_conf) { + } else { + conf = slurm_conf_lock(); + state_save_loc = xstrdup(conf->state_save_location); + slurm_conf_unlock(); + } return state_save_loc; } @@ -264,26 +339,87 @@ char *slurm_get_state_save_location(void) */ char *slurm_get_auth_type(void) { - char *auth_type; - slurm_ctl_conf_t *conf; + char *auth_type = NULL; + slurm_ctl_conf_t *conf = NULL; - conf = slurm_conf_lock(); - auth_type = xstrdup(conf->authtype); - slurm_conf_unlock(); + if(slurmdbd_conf) { + auth_type = xstrdup(slurmdbd_conf->auth_type); + } else { + conf = slurm_conf_lock(); + auth_type = xstrdup(conf->authtype); + slurm_conf_unlock(); + } return auth_type; } +/* slurm_get_checkpoint_type + * returns the checkpoint_type from slurmctld_conf object + * RET char * - checkpoint type, MUST be xfreed by caller + */ +extern char *slurm_get_checkpoint_type(void) +{ + char *checkpoint_type = NULL; + slurm_ctl_conf_t *conf; + + if(slurmdbd_conf) { + } else { + conf = slurm_conf_lock(); + checkpoint_type = xstrdup(conf->checkpoint_type); + slurm_conf_unlock(); + } + return checkpoint_type; +} + +/* slurm_get_cluster_name + * returns the cluster name from slurmctld_conf object + * RET char * - cluster name, MUST be xfreed by caller + */ +char *slurm_get_cluster_name(void) +{ + char *name = NULL; + slurm_ctl_conf_t *conf; + + if(slurmdbd_conf) { + } else { + conf = slurm_conf_lock(); + name = xstrdup(conf->cluster_name); + slurm_conf_unlock(); + } + return name; +} + +/* slurm_get_crypto_type + * returns the crypto_type from slurmctld_conf object + * RET char * - crypto type, MUST be xfreed by caller + */ +extern char *slurm_get_crypto_type(void) +{ + char *crypto_type = NULL; + slurm_ctl_conf_t *conf; + + if(slurmdbd_conf) { + } else { + conf = slurm_conf_lock(); + crypto_type = xstrdup(conf->crypto_type); + slurm_conf_unlock(); + } + return crypto_type; +} + /* slurm_get_propagate_prio_process * return the PropagatePrioProcess flag from slurmctld_conf object */ extern uint16_t slurm_get_propagate_prio_process(void) { - uint16_t propagate_prio; + uint16_t propagate_prio = 0; slurm_ctl_conf_t *conf; - conf = slurm_conf_lock(); - propagate_prio = conf->propagate_prio_process; - slurm_conf_unlock(); + if(slurmdbd_conf) { + } else { + conf = slurm_conf_lock(); + propagate_prio = conf->propagate_prio_process; + slurm_conf_unlock(); + } return propagate_prio; } @@ -292,12 +428,15 @@ extern uint16_t slurm_get_propagate_prio_process(void) */ extern uint16_t slurm_get_fast_schedule(void) { - uint16_t fast_val; + uint16_t fast_val = 0; slurm_ctl_conf_t *conf; - conf = slurm_conf_lock(); - fast_val = conf->fast_schedule; - slurm_conf_unlock(); + if(slurmdbd_conf) { + } else { + conf = slurm_conf_lock(); + fast_val = conf->fast_schedule; + slurm_conf_unlock(); + } return fast_val; } @@ -309,26 +448,32 @@ extern int slurm_set_tree_width(uint16_t tree_width) { slurm_ctl_conf_t *conf; - conf = slurm_conf_lock(); - if (tree_width == 0) { - error("can't have span count of 0"); - return SLURM_ERROR; + if(slurmdbd_conf) { + } else { + conf = slurm_conf_lock(); + if (tree_width == 0) { + error("can't have span count of 0"); + return SLURM_ERROR; + } + conf->tree_width = tree_width; + slurm_conf_unlock(); } - conf->tree_width = tree_width; - slurm_conf_unlock(); - return SLURM_SUCCESS; + return 0; } /* slurm_get_tree_width * returns the value of tree_width in slurmctld_conf object */ extern uint16_t slurm_get_tree_width(void) { - uint16_t tree_width; + uint16_t tree_width = 0; slurm_ctl_conf_t *conf; - conf = slurm_conf_lock(); - tree_width = conf->tree_width; - slurm_conf_unlock(); + if(slurmdbd_conf) { + } else { + conf = slurm_conf_lock(); + tree_width = conf->tree_width; + slurm_conf_unlock(); + } return tree_width; } @@ -341,85 +486,369 @@ extern int slurm_set_auth_type(char *auth_type) { slurm_ctl_conf_t *conf; - conf = slurm_conf_lock(); - xfree(conf->authtype); - conf->authtype = xstrdup(auth_type); - slurm_conf_unlock(); + if(slurmdbd_conf) { + xfree(slurmdbd_conf->auth_type); + slurmdbd_conf->auth_type = xstrdup(auth_type); + } else { + conf = slurm_conf_lock(); + xfree(conf->authtype); + conf->authtype = xstrdup(auth_type); + slurm_conf_unlock(); + } return 0; } -/* slurm_get_jobacct_loc - * returns the job accounting loc from the slurmctld_conf object - * RET char * - job accounting loc, MUST be xfreed by caller +/* slurm_get_health_check_program + * get health_check_program from slurmctld_conf object from slurmctld_conf object + * RET char * - health_check_program, MUST be xfreed by caller */ -char *slurm_get_jobacct_loc(void) +char *slurm_get_health_check_program(void) { - char *jobacct_logfile; + char *health_check_program = NULL; slurm_ctl_conf_t *conf; - conf = slurm_conf_lock(); - jobacct_logfile = xstrdup(conf->job_acct_logfile); - slurm_conf_unlock(); - return jobacct_logfile; + if(slurmdbd_conf) { + } else { + conf = slurm_conf_lock(); + health_check_program = xstrdup(conf->health_check_program); + slurm_conf_unlock(); + } + return health_check_program; } -/* slurm_get_jobacct_freq - * returns the job accounting poll frequency from the slurmctld_conf object - * RET int - job accounting frequency +/* slurm_get_accounting_storage_type + * returns the accounting storage type from slurmctld_conf object + * RET char * - accounting storage type, MUST be xfreed by caller */ -uint16_t slurm_get_jobacct_freq(void) +char *slurm_get_accounting_storage_type(void) { - uint16_t freq; + char *accounting_type; slurm_ctl_conf_t *conf; - conf = slurm_conf_lock(); - freq = conf->job_acct_freq; - slurm_conf_unlock(); - return freq; + if(slurmdbd_conf) { + accounting_type = xstrdup(slurmdbd_conf->storage_type); + } else { + conf = slurm_conf_lock(); + accounting_type = xstrdup(conf->accounting_storage_type); + slurm_conf_unlock(); + } + return accounting_type; + } -/* slurm_get_jobacct_type +/* slurm_get_accounting_storage_user + * returns the storage user from slurmctld_conf object + * RET char * - storage user, MUST be xfreed by caller + */ +char *slurm_get_accounting_storage_user(void) +{ + char *storage_user; + slurm_ctl_conf_t *conf; + + if(slurmdbd_conf) { + storage_user = xstrdup(slurmdbd_conf->storage_user); + } else { + conf = slurm_conf_lock(); + storage_user = xstrdup(conf->accounting_storage_user); + slurm_conf_unlock(); + } + return storage_user; +} + +/* slurm_get_accounting_storage_host + * returns the storage host from slurmctld_conf object + * RET char * - storage host, MUST be xfreed by caller + */ +char *slurm_get_accounting_storage_host(void) +{ + char *storage_host; + slurm_ctl_conf_t *conf; + + if(slurmdbd_conf) { + storage_host = xstrdup(slurmdbd_conf->storage_host); + } else { + conf = slurm_conf_lock(); + storage_host = xstrdup(conf->accounting_storage_host); + slurm_conf_unlock(); + } + return storage_host; +} + +/* slurm_get_accounting_storage_loc + * returns the storage location from slurmctld_conf object + * RET char * - storage location, MUST be xfreed by caller + */ +char *slurm_get_accounting_storage_loc(void) +{ + char *storage_loc; + slurm_ctl_conf_t *conf; + + if(slurmdbd_conf) { + storage_loc = xstrdup(slurmdbd_conf->storage_loc); + } else { + conf = slurm_conf_lock(); + storage_loc = xstrdup(conf->accounting_storage_loc); + slurm_conf_unlock(); + } + return storage_loc; +} + +/* slurm_set_accounting_storage_loc + * IN: char *loc (name of file or database) + * RET 0 or error code + */ +int slurm_set_accounting_storage_loc(char *loc) +{ + slurm_ctl_conf_t *conf; + + if(slurmdbd_conf) { + xfree(slurmdbd_conf->storage_loc); + slurmdbd_conf->storage_loc = xstrdup(loc); + } else { + conf = slurm_conf_lock(); + xfree(conf->accounting_storage_loc); + conf->accounting_storage_loc = xstrdup(loc); + slurm_conf_unlock(); + } + return 0; +} + +/* slurm_get_accounting_storage_pass + * returns the storage password from slurmctld_conf object + * RET char * - storage password, MUST be xfreed by caller + */ +char *slurm_get_accounting_storage_pass(void) +{ + char *storage_pass; + slurm_ctl_conf_t *conf; + + if(slurmdbd_conf) { + storage_pass = xstrdup(slurmdbd_conf->storage_pass); + } else { + conf = slurm_conf_lock(); + storage_pass = xstrdup(conf->accounting_storage_pass); + slurm_conf_unlock(); + } + return storage_pass; +} + +/* _global_auth_key + * returns the storage password from slurmctld_conf or slurmdbd_conf object + * cache value in local buffer for best performance + * RET char * - storage password + */ +static char *_global_auth_key(void) +{ + static bool loaded_storage_pass = false; + static char storage_pass[512] = "\0"; + slurm_ctl_conf_t *conf; + + if(loaded_storage_pass) + return storage_pass; + + if(slurmdbd_conf) { + if(slurmdbd_conf->storage_pass) { + if(strlen(slurmdbd_conf->storage_pass) > + sizeof(storage_pass)) + fatal("StoragePass is too long"); + strncpy(storage_pass, slurmdbd_conf->storage_pass, + sizeof(storage_pass)); + } + } else { + conf = slurm_conf_lock(); + if(conf->accounting_storage_pass) { + if(strlen(conf->accounting_storage_pass) > + sizeof(storage_pass)) + fatal("AccountingStoragePass is too long"); + strncpy(storage_pass, conf->accounting_storage_pass, + sizeof(storage_pass)); + } + slurm_conf_unlock(); + } + loaded_storage_pass = true; + return storage_pass; +} + +/* slurm_get_accounting_storage_port + * returns the storage port from slurmctld_conf object + * RET uint32_t - storage port + */ +uint32_t slurm_get_accounting_storage_port(void) +{ + uint32_t storage_port; + slurm_ctl_conf_t *conf; + + if(slurmdbd_conf) { + storage_port = slurmdbd_conf->storage_port; + } else { + conf = slurm_conf_lock(); + storage_port = conf->accounting_storage_port; + slurm_conf_unlock(); + } + return storage_port; + +} + +/* slurm_get_jobacct_gather_type * returns the job accounting type from the slurmctld_conf object * RET char * - job accounting type, MUST be xfreed by caller */ -char *slurm_get_jobacct_type(void) +char *slurm_get_jobacct_gather_type(void) { - char *jobacct_type; + char *jobacct_type = NULL; slurm_ctl_conf_t *conf; - conf = slurm_conf_lock(); - jobacct_type = xstrdup(conf->job_acct_type); - slurm_conf_unlock(); + if(slurmdbd_conf) { + } else { + conf = slurm_conf_lock(); + jobacct_type = xstrdup(conf->job_acct_gather_type); + slurm_conf_unlock(); + } return jobacct_type; } +/* slurm_get_jobacct_freq + * returns the job accounting poll frequency from the slurmctld_conf object + * RET int - job accounting frequency + */ +uint16_t slurm_get_jobacct_gather_freq(void) +{ + uint16_t freq = 0; + slurm_ctl_conf_t *conf; + + if(slurmdbd_conf) { + } else { + conf = slurm_conf_lock(); + freq = conf->job_acct_gather_freq; + slurm_conf_unlock(); + } + return freq; +} + /* slurm_get_jobcomp_type * returns the job completion logger type from slurmctld_conf object * RET char * - job completion type, MUST be xfreed by caller */ char *slurm_get_jobcomp_type(void) { - char *jobcomp_type; + char *jobcomp_type = NULL; slurm_ctl_conf_t *conf; - conf = slurm_conf_lock(); - jobcomp_type = xstrdup(conf->job_comp_type); - slurm_conf_unlock(); + if(slurmdbd_conf) { + } else { + conf = slurm_conf_lock(); + jobcomp_type = xstrdup(conf->job_comp_type); + slurm_conf_unlock(); + } return jobcomp_type; } +/* slurm_get_jobcomp_loc + * returns the job completion loc from slurmctld_conf object + * RET char * - job completion location, MUST be xfreed by caller + */ +char *slurm_get_jobcomp_loc(void) +{ + char *jobcomp_loc = 0; + slurm_ctl_conf_t *conf; + + if(slurmdbd_conf) { + } else { + conf = slurm_conf_lock(); + jobcomp_loc = xstrdup(conf->job_comp_loc); + slurm_conf_unlock(); + } + return jobcomp_loc; +} + +/* slurm_get_jobcomp_user + * returns the storage user from slurmctld_conf object + * RET char * - storage user, MUST be xfreed by caller + */ +char *slurm_get_jobcomp_user(void) +{ + char *storage_user = NULL; + slurm_ctl_conf_t *conf; + + if(slurmdbd_conf) { + } else { + conf = slurm_conf_lock(); + storage_user = xstrdup(conf->job_comp_user); + slurm_conf_unlock(); + } + return storage_user; +} + +/* slurm_get_jobcomp_host + * returns the storage host from slurmctld_conf object + * RET char * - storage host, MUST be xfreed by caller + */ +char *slurm_get_jobcomp_host(void) +{ + char *storage_host = NULL; + slurm_ctl_conf_t *conf; + + if(slurmdbd_conf) { + } else { + conf = slurm_conf_lock(); + storage_host = xstrdup(conf->job_comp_host); + slurm_conf_unlock(); + } + return storage_host; +} + +/* slurm_get_jobcomp_pass + * returns the storage password from slurmctld_conf object + * RET char * - storage password, MUST be xfreed by caller + */ +char *slurm_get_jobcomp_pass(void) +{ + char *storage_pass = NULL; + slurm_ctl_conf_t *conf; + + if(slurmdbd_conf) { + } else { + conf = slurm_conf_lock(); + storage_pass = xstrdup(conf->job_comp_pass); + slurm_conf_unlock(); + } + return storage_pass; +} + +/* slurm_get_jobcomp_port + * returns the storage port from slurmctld_conf object + * RET uint32_t - storage port + */ +uint32_t slurm_get_jobcomp_port(void) +{ + uint32_t storage_port = 0; + slurm_ctl_conf_t *conf; + + if(slurmdbd_conf) { + } else { + conf = slurm_conf_lock(); + storage_port = conf->job_comp_port; + slurm_conf_unlock(); + } + return storage_port; + +} + /* slurm_get_proctrack_type * get ProctrackType from slurmctld_conf object * RET char * - proctrack type, MUST be xfreed by caller */ char *slurm_get_proctrack_type(void) { - char *proctrack_type; + char *proctrack_type = NULL; slurm_ctl_conf_t *conf; - conf = slurm_conf_lock(); - proctrack_type = xstrdup(conf->proctrack_type); - slurm_conf_unlock(); + if(slurmdbd_conf) { + } else { + conf = slurm_conf_lock(); + proctrack_type = xstrdup(conf->proctrack_type); + slurm_conf_unlock(); + } return proctrack_type; } @@ -429,12 +858,15 @@ char *slurm_get_proctrack_type(void) */ uint16_t slurm_get_slurmd_port(void) { - uint16_t slurmd_port; + uint16_t slurmd_port = 0; slurm_ctl_conf_t *conf; - conf = slurm_conf_lock(); - slurmd_port = conf->slurmd_port; - slurm_conf_unlock(); + if(slurmdbd_conf) { + } else { + conf = slurm_conf_lock(); + slurmd_port = conf->slurmd_port; + slurm_conf_unlock(); + } return slurmd_port; } @@ -444,12 +876,16 @@ uint16_t slurm_get_slurmd_port(void) */ uint32_t slurm_get_slurm_user_id(void) { - uint32_t slurm_uid; + uint32_t slurm_uid = 0; slurm_ctl_conf_t *conf; - conf = slurm_conf_lock(); - slurm_uid = conf->slurm_user_id; - slurm_conf_unlock(); + if(slurmdbd_conf) { + slurm_uid = slurmdbd_conf->slurm_user_id; + } else { + conf = slurm_conf_lock(); + slurm_uid = conf->slurm_user_id; + slurm_conf_unlock(); + } return slurm_uid; } @@ -457,24 +893,30 @@ uint32_t slurm_get_slurm_user_id(void) * RET uint16_t - Value of SchedulerRootFilter */ extern uint16_t slurm_get_root_filter(void) { - uint16_t root_filter; + uint16_t root_filter = 0; slurm_ctl_conf_t *conf; - conf = slurm_conf_lock(); - root_filter = conf->schedrootfltr; - slurm_conf_unlock(); + if(slurmdbd_conf) { + } else { + conf = slurm_conf_lock(); + root_filter = conf->schedrootfltr; + slurm_conf_unlock(); + } return root_filter; } /* slurm_get_sched_port * RET uint16_t - Value of SchedulerPort */ extern uint16_t slurm_get_sched_port(void) { - uint16_t port; + uint16_t port = 0; slurm_ctl_conf_t *conf; - conf = slurm_conf_lock(); - port = conf->schedport; - slurm_conf_unlock(); + if(slurmdbd_conf) { + } else { + conf = slurm_conf_lock(); + port = conf->schedport; + slurm_conf_unlock(); + } return port; } @@ -484,12 +926,15 @@ extern uint16_t slurm_get_sched_port(void) */ char *slurm_get_sched_type(void) { - char *sched_type; + char *sched_type = NULL; slurm_ctl_conf_t *conf; - conf = slurm_conf_lock(); - sched_type = xstrdup(conf->schedtype); - slurm_conf_unlock(); + if(slurmdbd_conf) { + } else { + conf = slurm_conf_lock(); + sched_type = xstrdup(conf->schedtype); + slurm_conf_unlock(); + } return sched_type; } @@ -499,12 +944,15 @@ char *slurm_get_sched_type(void) */ char *slurm_get_select_type(void) { - char *select_type; + char *select_type = NULL; slurm_ctl_conf_t *conf; - conf = slurm_conf_lock(); - select_type = xstrdup(conf->select_type); - slurm_conf_unlock(); + if(slurmdbd_conf) { + } else { + conf = slurm_conf_lock(); + select_type = xstrdup(conf->select_type); + slurm_conf_unlock(); + } return select_type; } @@ -514,7 +962,7 @@ char *slurm_get_select_type(void) */ char *slurm_get_switch_type(void) { - char *switch_type; + char *switch_type = NULL; slurm_ctl_conf_t *conf; conf = slurm_conf_lock(); @@ -529,12 +977,15 @@ char *slurm_get_switch_type(void) */ uint16_t slurm_get_wait_time(void) { - uint16_t wait_time; + uint16_t wait_time = 0; slurm_ctl_conf_t *conf; - conf = slurm_conf_lock(); - wait_time = conf->wait_time; - slurm_conf_unlock(); + if(slurmdbd_conf) { + } else { + conf = slurm_conf_lock(); + wait_time = conf->wait_time; + slurm_conf_unlock(); + } return wait_time; } @@ -544,12 +995,15 @@ uint16_t slurm_get_wait_time(void) */ char *slurm_get_srun_prolog(void) { - char *prolog; + char *prolog = NULL; slurm_ctl_conf_t *conf; - conf = slurm_conf_lock(); - prolog = xstrdup(conf->srun_prolog); - slurm_conf_unlock(); + if(slurmdbd_conf) { + } else { + conf = slurm_conf_lock(); + prolog = xstrdup(conf->srun_prolog); + slurm_conf_unlock(); + } return prolog; } @@ -559,12 +1013,15 @@ char *slurm_get_srun_prolog(void) */ char *slurm_get_srun_epilog(void) { - char *epilog; + char *epilog = NULL; slurm_ctl_conf_t *conf; - conf = slurm_conf_lock(); - epilog = xstrdup(conf->srun_epilog); - slurm_conf_unlock(); + if(slurmdbd_conf) { + } else { + conf = slurm_conf_lock(); + epilog = xstrdup(conf->srun_epilog); + slurm_conf_unlock(); + } return epilog; } @@ -572,12 +1029,15 @@ char *slurm_get_srun_epilog(void) * RET task_epilog name, must be xfreed by caller */ char *slurm_get_task_epilog(void) { - char *task_epilog; + char *task_epilog = NULL; slurm_ctl_conf_t *conf; - conf = slurm_conf_lock(); - task_epilog = xstrdup(conf->task_epilog); - slurm_conf_unlock(); + if(slurmdbd_conf) { + } else { + conf = slurm_conf_lock(); + task_epilog = xstrdup(conf->task_epilog); + slurm_conf_unlock(); + } return task_epilog; } @@ -585,12 +1045,15 @@ char *slurm_get_task_epilog(void) * RET task_prolog name, must be xfreed by caller */ char *slurm_get_task_prolog(void) { - char *task_prolog; + char *task_prolog = NULL; slurm_ctl_conf_t *conf; - conf = slurm_conf_lock(); - task_prolog = xstrdup(conf->task_prolog); - slurm_conf_unlock(); + if(slurmdbd_conf) { + } else { + conf = slurm_conf_lock(); + task_prolog = xstrdup(conf->task_prolog); + slurm_conf_unlock(); + } return task_prolog; } @@ -598,7 +1061,7 @@ char *slurm_get_task_prolog(void) * RET task_plugin name, must be xfreed by caller */ char *slurm_get_task_plugin(void) { - char *task_plugin; + char *task_plugin = NULL; slurm_ctl_conf_t *conf; conf = slurm_conf_lock(); @@ -610,12 +1073,15 @@ char *slurm_get_task_plugin(void) /* slurm_get_task_plugin_param */ uint16_t slurm_get_task_plugin_param(void) { - uint16_t task_plugin_param; + uint16_t task_plugin_param = 0; slurm_ctl_conf_t *conf; - conf = slurm_conf_lock(); - task_plugin_param = conf->task_plugin_param; - slurm_conf_unlock(); + if(slurmdbd_conf) { + } else { + conf = slurm_conf_lock(); + task_plugin_param = conf->task_plugin_param; + slurm_conf_unlock(); + } return task_plugin_param; } @@ -756,12 +1222,19 @@ slurm_fd slurm_open_controller_conn_spec(enum controller_id dest) debug3("Error: Unable to set default config"); return SLURM_ERROR; } - - addr = (dest == PRIMARY_CONTROLLER) ? - &proto_conf->primary_controller : - &proto_conf->secondary_controller; - if (!addr) return SLURM_ERROR; + if (dest == PRIMARY_CONTROLLER) + addr = &proto_conf->primary_controller; + else { /* (dest == SECONDARY_CONTROLLER) */ + slurm_ctl_conf_t *conf; + addr = NULL; + conf = slurm_conf_lock(); + if (conf->backup_addr) + addr = &proto_conf->secondary_controller; + slurm_conf_unlock(); + if (!addr) + return SLURM_ERROR; + } rc = slurm_open_msg_conn(addr); if (rc == -1) @@ -864,6 +1337,9 @@ int slurm_receive_msg(slurm_fd fd, slurm_msg_t *msg, int timeout) } if (check_header_version(&header) < 0) { + int uid = _unpack_msg_uid(buffer); + error("Invalid Protocol Version %u from uid=%d", + header.version, uid); free_buf(buffer); rc = SLURM_PROTOCOL_VERSION_ERROR; goto total_return; @@ -891,7 +1367,11 @@ int slurm_receive_msg(slurm_fd fd, slurm_msg_t *msg, int timeout) rc = ESLURM_PROTOCOL_INCOMPLETE_PACKET; goto total_return; } - rc = g_slurm_auth_verify( auth_cred, NULL, 2 ); + if(header.flags & SLURM_GLOBAL_AUTH_KEY) { + rc = g_slurm_auth_verify( auth_cred, NULL, 2, + _global_auth_key() ); + } else + rc = g_slurm_auth_verify( auth_cred, NULL, 2, NULL ); if (rc != SLURM_SUCCESS) { error( "authentication: %s ", @@ -906,6 +1386,7 @@ int slurm_receive_msg(slurm_fd fd, slurm_msg_t *msg, int timeout) * Unpack message body */ msg->msg_type = header.msg_type; + msg->flags = header.flags; if ( (header.body_length > remaining_buf(buffer)) || (unpack_msg(msg, buffer) != SLURM_SUCCESS) ) { @@ -1014,6 +1495,9 @@ List slurm_receive_msgs(slurm_fd fd, int steps, int timeout) } if(check_header_version(&header) < 0) { + int uid = _unpack_msg_uid(buffer); + error("Invalid Protocol Version %u from uid=%d", + header.version, uid); free_buf(buffer); rc = SLURM_PROTOCOL_VERSION_ERROR; goto total_return; @@ -1041,7 +1525,11 @@ List slurm_receive_msgs(slurm_fd fd, int steps, int timeout) rc = ESLURM_PROTOCOL_INCOMPLETE_PACKET; goto total_return; } - rc = g_slurm_auth_verify( auth_cred, NULL, 2 ); + if(header.flags & SLURM_GLOBAL_AUTH_KEY) { + rc = g_slurm_auth_verify( auth_cred, NULL, 2, + _global_auth_key() ); + } else + rc = g_slurm_auth_verify( auth_cred, NULL, 2, NULL ); if(rc != SLURM_SUCCESS) { error("authentication: %s ", @@ -1056,6 +1544,7 @@ List slurm_receive_msgs(slurm_fd fd, int steps, int timeout) * Unpack message body */ msg.msg_type = header.msg_type; + msg.flags = header.flags; if((header.body_length > remaining_buf(buffer)) || (unpack_msg(&msg, buffer) != SLURM_SUCCESS)) { @@ -1098,6 +1587,21 @@ total_return: } +/* try to determine the UID associated with a message with different + * message header version, return -1 if we can't tell */ +static int _unpack_msg_uid(Buf buffer) +{ + int uid = -1; + void *auth_cred = NULL; + + if ((auth_cred = g_slurm_auth_unpack(buffer)) == NULL) + return uid; + uid = (int) g_slurm_auth_get_uid(auth_cred, NULL); + g_slurm_auth_destroy(auth_cred); + + return uid; +} + /* * NOTE: memory is allocated for the returned msg and the returned list * both must be freed at some point using the slurm_free_functions @@ -1174,6 +1678,9 @@ int slurm_receive_msg_and_forward(slurm_fd fd, slurm_addr *orig_addr, } if (check_header_version(&header) < 0) { + int uid = _unpack_msg_uid(buffer); + error("Invalid Protocol Version %u from uid=%d", + header.version, uid); free_buf(buffer); rc = SLURM_PROTOCOL_VERSION_ERROR; goto total_return; @@ -1238,7 +1745,11 @@ int slurm_receive_msg_and_forward(slurm_fd fd, slurm_addr *orig_addr, rc = ESLURM_PROTOCOL_INCOMPLETE_PACKET; goto total_return; } - rc = g_slurm_auth_verify( auth_cred, NULL, 2 ); + if(header.flags & SLURM_GLOBAL_AUTH_KEY) { + rc = g_slurm_auth_verify( auth_cred, NULL, 2, + _global_auth_key() ); + } else + rc = g_slurm_auth_verify( auth_cred, NULL, 2, NULL ); if (rc != SLURM_SUCCESS) { error( "authentication: %s ", @@ -1253,6 +1764,7 @@ int slurm_receive_msg_and_forward(slurm_fd fd, slurm_addr *orig_addr, * Unpack message body */ msg->msg_type = header.msg_type; + msg->flags = header.flags; if ( (header.body_length > remaining_buf(buffer)) || (unpack_msg(msg, buffer) != SLURM_SUCCESS) ) { @@ -1321,11 +1833,16 @@ int slurm_send_node_msg(slurm_fd fd, slurm_msg_t * msg) Buf buffer; int rc; void * auth_cred; + uint16_t auth_flags = SLURM_PROTOCOL_NO_FLAGS; /* * Initialize header with Auth credential and message type. */ - auth_cred = g_slurm_auth_create(NULL, 2); + if (msg->flags & SLURM_GLOBAL_AUTH_KEY) { + auth_flags = SLURM_GLOBAL_AUTH_KEY; + auth_cred = g_slurm_auth_create(NULL, 2, _global_auth_key()); + } else + auth_cred = g_slurm_auth_create(NULL, 2, NULL); if (auth_cred == NULL) { error("authentication: %s", g_slurm_auth_errstr(g_slurm_auth_errno(NULL)) ); @@ -1338,7 +1855,7 @@ int slurm_send_node_msg(slurm_fd fd, slurm_msg_t * msg) } forward_wait(msg); - init_header(&header, msg, SLURM_PROTOCOL_NO_FLAGS); + init_header(&header, msg, msg->flags); /* * Pack header into buffer for transmission @@ -1660,11 +2177,11 @@ int slurm_unpack_slurm_addr_no_alloc(slurm_addr * slurm_address, * returns - SLURM error code */ void slurm_pack_slurm_addr_array(slurm_addr * slurm_address, - uint16_t size_val, Buf buffer) + uint32_t size_val, Buf buffer) { int i = 0; - uint16_t nl = htons(size_val); - pack16((uint16_t)nl, buffer); + uint32_t nl = htonl(size_val); + pack32(nl, buffer); for (i = 0; i < size_val; i++) { slurm_pack_slurm_addr(slurm_address + i, buffer); @@ -1680,14 +2197,14 @@ void slurm_pack_slurm_addr_array(slurm_addr * slurm_address, * returns - SLURM error code */ int slurm_unpack_slurm_addr_array(slurm_addr ** slurm_address, - uint16_t * size_val, Buf buffer) + uint32_t * size_val, Buf buffer) { int i = 0; - uint16_t nl; + uint32_t nl; *slurm_address = NULL; - safe_unpack16(&nl, buffer); - *size_val = ntohs(nl); + safe_unpack32(&nl, buffer); + *size_val = ntohl(nl); *slurm_address = xmalloc((*size_val) * sizeof(slurm_addr)); for (i = 0; i < *size_val; i++) { @@ -1728,6 +2245,7 @@ int slurm_send_rc_msg(slurm_msg_t *msg, int rc) } rc_msg.return_code = rc; + slurm_msg_t_init(&resp_msg); resp_msg.address = msg->address; resp_msg.msg_type = RESPONSE_SLURM_RC; resp_msg.data = &rc_msg; diff --git a/src/common/slurm_protocol_api.h b/src/common/slurm_protocol_api.h index 10c19e124..8c2fbabef 100644 --- a/src/common/slurm_protocol_api.h +++ b/src/common/slurm_protocol_api.h @@ -3,9 +3,10 @@ * definitions ***************************************************************************** * Copyright (C) 2002-2006 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Kevin Tew <tew1@llnl.gov>, et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -96,6 +97,21 @@ int inline slurm_set_api_config(slurm_protocol_config_t * protocol_conf); */ inline slurm_protocol_config_t *slurm_get_api_config(); +/* slurm_get_def_mem_per_task + * RET DefMemPerTask value from slurm.conf + */ +uint32_t slurm_get_def_mem_per_task(void); + +/* slurm_get_max_mem_per_task + * RET MaxMemPerTask value from slurm.conf + */ +uint32_t slurm_get_max_mem_per_task(void); + +/* slurm_get_epilog_msg_time + * RET EpilogMsgTime value from slurm.conf + */ +uint32_t slurm_get_epilog_msg_time(void); + /* slurm_get_env_timeout * return default timeout for srun/sbatch --get-user-env option */ @@ -129,6 +145,24 @@ int inline slurm_api_set_default_config(); * execute this only at program termination to free all memory */ void inline slurm_api_clear_config(void); +/* slurm_get_health_check_program + * get health_check_program from slurmctld_conf object from slurmctld_conf object + * RET char * - health_check_program, MUST be xfreed by caller + */ +char *slurm_get_health_check_program(void); + +/* slurm_get_slurmdbd_addr + * get slurm_dbd_addr from slurmctld_conf object from slurmctld_conf object + * RET char * - slurmdbd_addr, MUST be xfreed by caller + */ +char *slurm_get_slurmdbd_addr(void); + +/* slurm_get_slurmdbd_port + * get slurm_dbd_port from slurmctld_conf object from slurmctld_conf object + * RET uint16_t - dbd_port + */ +uint16_t slurm_get_slurmdbd_port(void); + /* slurm_get_plugin_dir * get plugin directory from slurmctld_conf object from slurmctld_conf object * RET char * - plugin directory, MUST be xfreed by caller @@ -154,6 +188,24 @@ extern char *slurm_get_auth_type(void); */ extern int slurm_set_auth_type(char *auth_type); +/* slurm_get_checkpoint_type + * returns the checkpoint_type from slurmctld_conf object + * RET char * - checkpoint type, MUST be xfreed by caller + */ +extern char *slurm_get_checkpoint_type(void); + +/* slurm_get_cluster_name + * returns the cluster name from slurmctld_conf object + * RET char * - cluster name, MUST be xfreed by caller + */ +char *slurm_get_cluster_name(void); + +/* slurm_get_crypto_type + * returns the crypto_type from slurmctld_conf object + * RET char * - crypto type, MUST be xfreed by caller + */ +extern char *slurm_get_crypto_type(void); + /* slurm_get_fast_schedule * returns the value of fast_schedule in slurmctld_conf object */ @@ -169,23 +221,59 @@ extern int slurm_set_tree_width(uint16_t tree_width); */ extern uint16_t slurm_get_tree_width(void); -/* slurm_get_jobacct_loc - * returns the job accounting loc from slurmctld_conf object - * RET char * - job accounting location, MUST be xfreed by caller +/* slurm_get_accounting_storage_type + * returns the accounting storage type from slurmctld_conf object + * RET char * - accounting storage type, MUST be xfreed by caller */ -char *slurm_get_jobacct_loc(void); +char *slurm_get_accounting_storage_type(void); -/* slurm_get_jobacct_freq - * returns the job accounting poll frequency from the slurmctld_conf object - * RET int - job accounting frequency +/* slurm_get_accounting_storage_user + * returns the storage user from slurmctld_conf object + * RET char * - storage user, MUST be xfreed by caller */ -uint16_t slurm_get_jobacct_freq(void); +char *slurm_get_accounting_storage_user(void); -/* slurm_get_jobacct_type +/* slurm_get_accounting_storage_host + * returns the storage host from slurmctld_conf object + * RET char * - storage host, MUST be xfreed by caller + */ +char *slurm_get_accounting_storage_host(void); + +/* slurm_get_accounting_storage_pass + * returns the storage password from slurmctld_conf object + * RET char * - storage location, MUST be xfreed by caller + */ +char *slurm_get_accounting_storage_loc(void); + +/* slurm_set_accounting_storage_loc + * IN: char *loc (name of file or database) + * RET 0 or error code + */ +int slurm_set_accounting_storage_loc(char *loc); + +/* slurm_get_accounting_storage_pass + * returns the storage password from slurmctld_conf object + * RET char * - storage password, MUST be xfreed by caller + */ +char *slurm_get_accounting_storage_pass(void); + +/* slurm_get_accounting_storage_port + * returns the storage port from slurmctld_conf object + * RET uint32_t - storage port + */ +uint32_t slurm_get_accounting_storage_port(void); + +/* slurm_get_jobacct_gather_type * returns the job accounting type from slurmctld_conf object * RET char * - job accounting type, MUST be xfreed by caller */ -char *slurm_get_jobacct_type(void); +char *slurm_get_jobacct_gather_type(void); + +/* slurm_get_jobacct_gather_freq + * returns the job accounting poll frequency from the slurmctld_conf object + * RET int - job accounting frequency + */ +uint16_t slurm_get_jobacct_gather_freq(void); /* slurm_get_jobcomp_type * returns the job completion logger type from slurmctld_conf object @@ -193,6 +281,36 @@ char *slurm_get_jobacct_type(void); */ char *slurm_get_jobcomp_type(void); +/* slurm_get_jobcomp_loc + * returns the job completion loc from slurmctld_conf object + * RET char * - job completion location, MUST be xfreed by caller + */ +char *slurm_get_jobcomp_loc(void); + +/* slurm_get_jobcomp_user + * returns the storage user from slurmctld_conf object + * RET char * - storage user, MUST be xfreed by caller + */ +char *slurm_get_jobcomp_user(void); + +/* slurm_get_jobcomp_host + * returns the storage host from slurmctld_conf object + * RET char * - storage host, MUST be xfreed by caller + */ +char *slurm_get_jobcomp_host(void); + +/* slurm_get_jobcomp_pass + * returns the storage password from slurmctld_conf object + * RET char * - storage password, MUST be xfreed by caller + */ +char *slurm_get_jobcomp_pass(void); + +/* slurm_get_jobcomp_port + * returns the storage port from slurmctld_conf object + * RET uint32_t - storage port + */ +uint32_t slurm_get_jobcomp_port(void); + /* slurm_get_propagate_prio_process * return the PropagatePrioProcess flag from slurmctld_conf object */ @@ -607,7 +725,7 @@ int inline slurm_unpack_slurm_addr_no_alloc(slurm_addr * slurm_address, * returns - SLURM error code */ void inline slurm_pack_slurm_addr_array(slurm_addr * slurm_address, - uint16_t size_val, Buf buffer); + uint32_t size_val, Buf buffer); /* slurm_unpack_slurm_addr_array * unpacks an array of slurm_addrs from a buffer * OUT slurm_address - slurm_addr to unpack to @@ -616,7 +734,7 @@ void inline slurm_pack_slurm_addr_array(slurm_addr * slurm_address, * returns - SLURM error code */ int inline slurm_unpack_slurm_addr_array(slurm_addr ** slurm_address, - uint16_t * size_val, Buf buffer); + uint32_t * size_val, Buf buffer); /**********************************************************************\ * simplified communication routines diff --git a/src/common/slurm_protocol_common.h b/src/common/slurm_protocol_common.h index e9308e824..11efc3867 100644 --- a/src/common/slurm_protocol_common.h +++ b/src/common/slurm_protocol_common.h @@ -5,7 +5,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Kevin Tew <tew1@llnl.gov>, et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -66,6 +66,7 @@ /* used to set flags to empty */ #define SLURM_PROTOCOL_NO_FLAGS 0 +#define SLURM_GLOBAL_AUTH_KEY 1 #if MONGO_IMPLEMENTATION # include <src/common/slurm_protocol_mongo_common.h> diff --git a/src/common/slurm_protocol_defs.c b/src/common/slurm_protocol_defs.c index d7d27d4a5..c513b3f8b 100644 --- a/src/common/slurm_protocol_defs.c +++ b/src/common/slurm_protocol_defs.c @@ -2,13 +2,12 @@ * slurm_protocol_defs.c - functions for initializing and releasing * storage for RPC data structures. these are the functions used by * the slurm daemons directly, not for user client use. - * - * $Id: slurm_protocol_defs.c 12088 2007-08-22 18:02:24Z jette $ ***************************************************************************** - * Copyright (C) 2002-2006 The Regents of the University of California. + * Copyright (C) 2002-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Kevin Tew <tew1@llnl.gov> et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -50,14 +49,15 @@ #include <stdio.h> #include "src/common/log.h" -#include "src/common/slurm_jobacct.h" #include "src/common/node_select.h" +#include "src/common/slurm_accounting_storage.h" #include "src/common/slurm_cred.h" #include "src/common/slurm_protocol_defs.h" #include "src/common/switch.h" #include "src/common/xmalloc.h" #include "src/common/job_options.h" #include "src/common/forward.h" +#include "src/common/slurm_jobacct_gather.h" static void _free_all_job_info (job_info_msg_t *msg); @@ -104,6 +104,12 @@ extern void slurm_msg_t_copy(slurm_msg_t *dest, slurm_msg_t *src) return; } +extern void slurm_destroy_char(void *object) +{ + char *tmp = (char *)object; + xfree(tmp); +} + void slurm_free_last_update_msg(last_update_msg_t * msg) { @@ -184,6 +190,7 @@ void slurm_free_job_desc_msg(job_desc_msg_t * msg) xfree(msg->environment[i]); xfree(msg->environment); xfree(msg->features); + xfree(msg->licenses); xfree(msg->mail_user); xfree(msg->name); xfree(msg->partition); @@ -197,11 +204,11 @@ void slurm_free_job_desc_msg(job_desc_msg_t * msg) xfree(msg->in); xfree(msg->out); xfree(msg->work_dir); - xfree(msg->alloc_resp_hostname); - xfree(msg->other_hostname); xfree(msg->account); xfree(msg->network); xfree(msg->comment); + xfree(msg->dependency); + xfree(msg->resp_host); xfree(msg->blrtsimage); xfree(msg->linuximage); xfree(msg->mloaderimage); @@ -268,6 +275,10 @@ void slurm_free_job_info_members(job_info_t * job) xfree(job->exc_node_inx); xfree(job->network); xfree(job->comment); + xfree(job->dependency); + xfree(job->work_dir); + xfree(job->command); + xfree(job->licenses); } } @@ -275,8 +286,10 @@ void slurm_free_node_registration_status_msg( slurm_node_registration_status_msg_t * msg) { if (msg) { - xfree(msg->node_name); + xfree(msg->arch); xfree(msg->job_id); + xfree(msg->node_name); + xfree(msg->os); xfree(msg->step_id); if (msg->startup) switch_g_free_node_info(&msg->switch_nodeinfo); @@ -321,6 +334,7 @@ void slurm_free_job_step_create_request_msg(job_step_create_request_msg_t * xfree(msg->name); xfree(msg->network); xfree(msg->node_list); + xfree(msg->ckpt_path); xfree(msg); } } @@ -421,6 +435,8 @@ void slurm_free_launch_tasks_request_msg(launch_tasks_request_msg_t * msg) xfree(msg->task_epilog); xfree(msg->complete_nodelist); + xfree(msg->ckpt_path); + if (msg->switch_job) switch_free_jobinfo(msg->switch_job); @@ -432,9 +448,6 @@ void slurm_free_launch_tasks_request_msg(launch_tasks_request_msg_t * msg) void slurm_free_task_user_managed_io_stream_msg(task_user_managed_io_msg_t *msg) { - if (msg == NULL) - return; - xfree(msg); } @@ -469,6 +482,11 @@ void slurm_free_kill_tasks_msg(kill_tasks_msg_t * msg) xfree(msg); } +void slurm_free_checkpoint_tasks_msg(checkpoint_tasks_msg_t * msg) +{ + xfree(msg); +} + void slurm_free_epilog_complete_msg(epilog_complete_msg_t * msg) { if (msg) { @@ -534,6 +552,14 @@ void inline slurm_free_checkpoint_comp_msg(checkpoint_comp_msg_t *msg) } } +void inline slurm_free_checkpoint_task_comp_msg(checkpoint_task_comp_msg_t *msg) +{ + if (msg) { + xfree(msg->error_msg); + xfree(msg); + } +} + void inline slurm_free_checkpoint_resp_msg(checkpoint_resp_msg_t *msg) { if (msg) { @@ -568,6 +594,10 @@ extern char *job_reason_string(enum job_state_reason inx) return "JobHeld"; case WAIT_TIME: return "BeginTime"; + case WAIT_LICENSES: + return "Licenses"; + case WAIT_ASSOC_LIMIT: + return "AssociationLimit"; case FAIL_DOWN_PARTITION: return "PartitionDown"; case FAIL_DOWN_NODE: @@ -584,6 +614,8 @@ extern char *job_reason_string(enum job_state_reason inx) return "TimeLimit"; case FAIL_INACTIVE_LIMIT: return "InactiveLimit"; + case FAIL_BANK_ACCOUNT: + return "InvalidBankAccount"; default: return "?"; } @@ -597,6 +629,14 @@ void inline slurm_free_get_kvs_msg(kvs_get_msg_t *msg) } } +void inline slurm_free_will_run_response_msg(will_run_response_msg_t *msg) +{ + if (msg) { + xfree(msg->node_list); + xfree(msg); + } +} + char *job_state_string(enum job_states inx) { if (inx & JOB_COMPLETING) @@ -653,8 +693,9 @@ char *job_state_string_compact(enum job_states inx) char *node_state_string(enum node_states inx) { - bool drain_flag = (inx & NODE_STATE_DRAIN); bool comp_flag = (inx & NODE_STATE_COMPLETING); + bool drain_flag = (inx & NODE_STATE_DRAIN); + bool fail_flag = (inx & NODE_STATE_FAIL); bool no_resp_flag = (inx & NODE_STATE_NO_RESPOND); bool power_flag = (inx & NODE_STATE_POWER_SAVE); @@ -671,6 +712,17 @@ char *node_state_string(enum node_states inx) return "DRAINED"; } } + if (fail_flag) { + if (comp_flag || (inx == NODE_STATE_ALLOCATED)) { + if (no_resp_flag) + return "FAILING*"; + return "FAILING"; + } else { + if (no_resp_flag) + return "FAIL*"; + return "FAIL"; + } + } if (inx == NODE_STATE_DOWN) { if (no_resp_flag) return "DOWN*"; @@ -705,8 +757,9 @@ char *node_state_string(enum node_states inx) char *node_state_string_compact(enum node_states inx) { - bool drain_flag = (inx & NODE_STATE_DRAIN); bool comp_flag = (inx & NODE_STATE_COMPLETING); + bool drain_flag = (inx & NODE_STATE_DRAIN); + bool fail_flag = (inx & NODE_STATE_FAIL); bool no_resp_flag = (inx & NODE_STATE_NO_RESPOND); bool power_flag = (inx & NODE_STATE_POWER_SAVE); @@ -723,6 +776,17 @@ char *node_state_string_compact(enum node_states inx) return "DRAIN"; } } + if (fail_flag) { + if (comp_flag || (inx == NODE_STATE_ALLOCATED)) { + if (no_resp_flag) + return "FAILG*"; + return "FAILG"; + } else { + if (no_resp_flag) + return "FAIL*"; + return "FAIL"; + } + } if (inx == NODE_STATE_DOWN) { if (no_resp_flag) return "DOWN*"; @@ -834,37 +898,62 @@ void slurm_free_submit_response_response_msg(submit_response_msg_t * msg) void slurm_free_ctl_conf(slurm_ctl_conf_info_msg_t * config_ptr) { if (config_ptr) { + xfree(config_ptr->accounting_storage_host); + xfree(config_ptr->accounting_storage_loc); + xfree(config_ptr->accounting_storage_pass); + xfree(config_ptr->accounting_storage_type); + xfree(config_ptr->accounting_storage_user); xfree(config_ptr->authtype); xfree(config_ptr->backup_addr); xfree(config_ptr->backup_controller); + xfree(config_ptr->checkpoint_type); + xfree(config_ptr->cluster_name); xfree(config_ptr->control_addr); xfree(config_ptr->control_machine); + xfree(config_ptr->crypto_type); xfree(config_ptr->epilog); - xfree(config_ptr->job_acct_logfile); - xfree(config_ptr->job_acct_type); + xfree(config_ptr->health_check_program); + xfree(config_ptr->job_acct_gather_type); + xfree(config_ptr->job_comp_host); xfree(config_ptr->job_comp_loc); + xfree(config_ptr->job_comp_pass); xfree(config_ptr->job_comp_type); + xfree(config_ptr->job_comp_user); xfree(config_ptr->job_credential_private_key); xfree(config_ptr->job_credential_public_certificate); + xfree(config_ptr->licenses); xfree(config_ptr->mail_prog); xfree(config_ptr->mpi_default); + xfree(config_ptr->node_prefix); xfree(config_ptr->plugindir); + xfree(config_ptr->plugstack); xfree(config_ptr->proctrack_type); xfree(config_ptr->prolog); + xfree(config_ptr->propagate_rlimits); + xfree(config_ptr->propagate_rlimits_except); + xfree(config_ptr->resume_program); + xfree(config_ptr->sched_params); + xfree(config_ptr->schedtype); + xfree(config_ptr->select_type); + xfree(config_ptr->slurm_conf); xfree(config_ptr->slurm_user_name); xfree(config_ptr->slurmctld_pidfile); xfree(config_ptr->slurmctld_logfile); xfree(config_ptr->slurmd_logfile); xfree(config_ptr->slurmd_pidfile); xfree(config_ptr->slurmd_spooldir); - xfree(config_ptr->slurm_conf); - xfree(config_ptr->state_save_location); xfree(config_ptr->srun_epilog); xfree(config_ptr->srun_prolog); + xfree(config_ptr->state_save_location); + xfree(config_ptr->suspend_exc_nodes); + xfree(config_ptr->suspend_exc_parts); + xfree(config_ptr->suspend_program); + xfree(config_ptr->switch_type); xfree(config_ptr->task_epilog); - xfree(config_ptr->task_prolog); xfree(config_ptr->task_plugin); + xfree(config_ptr->task_prolog); xfree(config_ptr->tmp_fs); + xfree(config_ptr->unkillable_program); xfree(config_ptr); } } @@ -948,6 +1037,7 @@ static void _slurm_free_job_step_info_members (job_step_info_t * msg) if (msg != NULL) { xfree(msg->partition); xfree(msg->nodes); + xfree(msg->ckpt_path); } } @@ -984,7 +1074,9 @@ static void _slurm_free_node_info_members(node_info_t * node) { if (node) { xfree(node->name); + xfree(node->arch); xfree(node->features); + xfree(node->os); xfree(node->reason); } } @@ -1033,12 +1125,9 @@ static void _slurm_free_partition_info_members(partition_info_t * part) extern void slurm_free_file_bcast_msg(file_bcast_msg_t *msg) { - int i; - if (msg) { xfree(msg->fname); - for (i=0; i<FILE_BLOCKS; i++) - xfree(msg->block[i]); + xfree(msg->block); xfree(msg); } } @@ -1046,7 +1135,7 @@ extern void slurm_free_file_bcast_msg(file_bcast_msg_t *msg) extern void slurm_free_step_complete_msg(step_complete_msg_t *msg) { if (msg) { - jobacct_g_free(msg->jobacct); + jobacct_gather_g_destroy(msg->jobacct); xfree(msg); } } @@ -1054,7 +1143,7 @@ extern void slurm_free_step_complete_msg(step_complete_msg_t *msg) extern void slurm_free_stat_jobacct_msg(stat_jobacct_msg_t *msg) { if (msg) { - jobacct_g_free(msg->jobacct); + jobacct_gather_g_destroy(msg->jobacct); xfree(msg); } } @@ -1076,6 +1165,21 @@ void inline slurm_free_trigger_msg(trigger_info_msg_t *msg) xfree(msg); } +void slurm_free_set_debug_level_msg(set_debug_level_msg_t *msg) +{ + xfree(msg); +} + + +void inline slurm_free_accounting_update_msg(accounting_update_msg_t *msg) +{ + if(msg) { + if(msg->update_list) + list_destroy(msg->update_list); + xfree(msg); + } +} + extern int slurm_free_msg_data(slurm_msg_type_t type, void *data) { switch(type) { @@ -1143,6 +1247,9 @@ extern int slurm_free_msg_data(slurm_msg_type_t type, void *data) case REQUEST_CHECKPOINT_COMP: slurm_free_checkpoint_comp_msg(data); break; + case REQUEST_CHECKPOINT_TASK_COMP: + slurm_free_checkpoint_task_comp_msg(data); + break; case REQUEST_SUSPEND: slurm_free_suspend_msg(data); break; @@ -1171,6 +1278,9 @@ extern int slurm_free_msg_data(slurm_msg_type_t type, void *data) case REQUEST_TERMINATE_TASKS: slurm_free_kill_tasks_msg(data); break; + case REQUEST_CHECKPOINT_TASKS: + slurm_free_checkpoint_tasks_msg(data); + break; case REQUEST_KILL_TIMELIMIT: slurm_free_timelimit_msg(data); break; @@ -1198,6 +1308,9 @@ extern int slurm_free_msg_data(slurm_msg_type_t type, void *data) case RESPONSE_SLURM_RC: slurm_free_return_code_msg(data); break; + case REQUEST_SET_DEBUG_LEVEL: + slurm_free_set_debug_level_msg(data); + break; case SLURM_SUCCESS: case REQUEST_PING: case REQUEST_RECONFIGURE: @@ -1205,9 +1318,12 @@ extern int slurm_free_msg_data(slurm_msg_type_t type, void *data) case REQUEST_SHUTDOWN_IMMEDIATE: case RESPONSE_FORWARD_FAILED: case REQUEST_DAEMON_STATUS: + case REQUEST_HEALTH_CHECK: /* No body to free */ break; - + case ACCOUNTING_UPDATE_MSG: + slurm_free_accounting_update_msg(data); + break; default: error("invalid type trying to be freed %u", type); break; @@ -1248,3 +1364,10 @@ extern uint32_t slurm_get_return_code(slurm_msg_type_t type, void *data) return rc; } +void inline slurm_free_job_notify_msg(job_notify_msg_t * msg) +{ + if (msg) { + xfree(msg->message); + xfree(msg); + } +} diff --git a/src/common/slurm_protocol_defs.h b/src/common/slurm_protocol_defs.h index 94fab224f..799f83ea8 100644 --- a/src/common/slurm_protocol_defs.h +++ b/src/common/slurm_protocol_defs.h @@ -1,12 +1,12 @@ /****************************************************************************\ * slurm_protocol_defs.h - definitions used for RPCs * - * $Id: slurm_protocol_defs.h 12088 2007-08-22 18:02:24Z jette $ + * $Id: slurm_protocol_defs.h 13755 2008-04-01 19:12:53Z jette $ ***************************************************************************** * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Kevin Tew <tew1@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -64,6 +64,7 @@ #include "src/common/job_options.h" #include "src/common/slurm_step_layout.h" #include "src/common/xassert.h" +//#include "src/common/slurm_jobacct_common.h" #define MAX_SLURM_NAME 64 #define FORWARD_INIT 0xfffe @@ -93,7 +94,9 @@ typedef enum { RESPONSE_SHUTDOWN, REQUEST_PING, REQUEST_CONTROL, - + REQUEST_SET_DEBUG_LEVEL, + REQUEST_HEALTH_CHECK, + REQUEST_BUILD_INFO = 2001, RESPONSE_BUILD_INFO, REQUEST_JOB_INFO, @@ -141,6 +144,7 @@ typedef enum { REQUEST_JOB_READY, RESPONSE_JOB_READY, REQUEST_JOB_END_TIME, + REQUEST_JOB_NOTIFY, REQUEST_JOB_STEP_CREATE = 5001, RESPONSE_JOB_STEP_CREATE, @@ -153,6 +157,7 @@ typedef enum { REQUEST_CHECKPOINT, RESPONSE_CHECKPOINT, REQUEST_CHECKPOINT_COMP, + REQUEST_CHECKPOINT_TASK_COMP, RESPONSE_CHECKPOINT_COMP, REQUEST_SUSPEND, RESPONSE_SUSPEND, @@ -171,6 +176,7 @@ typedef enum { RESPONSE_LAUNCH_TASKS, MESSAGE_TASK_EXIT, REQUEST_SIGNAL_TASKS, + REQUEST_CHECKPOINT_TASKS, REQUEST_TERMINATE_TASKS, REQUEST_REATTACH_TASKS, RESPONSE_REATTACH_TASKS, @@ -196,7 +202,10 @@ typedef enum { RESPONSE_SLURM_RC = 8001, - RESPONSE_FORWARD_FAILED = 9001 + RESPONSE_FORWARD_FAILED = 9001, + + ACCOUNTING_UPDATE_MSG = 10001, + } slurm_msg_type_t; typedef enum { @@ -254,6 +263,7 @@ typedef struct slurm_protocol_config { typedef struct slurm_msg { slurm_msg_type_t msg_type; + uint16_t flags; slurm_addr address; slurm_fd conn_fd; void *auth_cred; @@ -286,6 +296,12 @@ typedef struct job_step_kill_msg { uint16_t batch_flag; } job_step_kill_msg_t; +typedef struct job_notify_msg { + uint32_t job_id; + uint32_t job_step_id; /* currently not used */ + char * message; +} job_notify_msg_t; + typedef struct job_id_msg { uint32_t job_id; } job_id_msg_t; @@ -356,6 +372,13 @@ typedef struct kill_tasks_msg { uint32_t signal; } kill_tasks_msg_t; +typedef struct checkpoint_tasks_msg { + uint32_t job_id; + uint32_t job_step_id; + uint32_t signal; + time_t timestamp; +} checkpoint_tasks_msg_t; + typedef struct epilog_complete_msg { uint32_t job_id; uint32_t return_code; @@ -371,6 +394,10 @@ typedef struct last_update_msg { time_t last_update; } last_update_msg_t; +typedef struct set_debug_level_msg { + uint32_t debug_level; +} set_debug_level_msg_t; + typedef struct job_step_specs { uint32_t job_id; /* job ID */ uint32_t user_id; /* user the job runs as */ @@ -382,10 +409,16 @@ typedef struct job_step_specs { uint16_t plane_size; /* plane size when task_dist = SLURM_DIST_PLANE */ uint16_t port; /* port to contact initiating srun */ + uint16_t ckpt_interval; /* checkpoint creation interval (minutes) */ + uint16_t exclusive; /* 1 if CPUs not shared with other steps */ + uint16_t immediate; /* 1 if allocate to run or fail immediately, + * 0 if to be queued awaiting resources */ + uint16_t mem_per_task; /* MB memory required per task, 0=no limit */ char *host; /* host to contact initiating srun */ char *node_list; /* list of required nodes */ char *network; /* network use spec */ char *name; /* name of the job step, default "" */ + char *ckpt_path; /* path to store checkpoint image files */ uint8_t overcommit; /* flag, 1 to allow overcommit of processors, 0 to disallow overcommit. default is 0 */ } job_step_create_request_msg_t; @@ -406,9 +439,11 @@ typedef struct launch_tasks_request_msg { uint32_t nprocs; /* number of processes in this job step */ uint32_t uid; uint32_t gid; + uint32_t job_mem; /* MB of memory reserved by job, 0 if no limit */ + uint32_t task_mem; /* MB of memory reserved per task, 0 if no limit */ uint16_t *tasks_to_launch; - uint16_t envc; - uint16_t argc; + uint32_t envc; + uint32_t argc; uint16_t multi_prog; uint16_t *cpus_allocated; uint16_t max_sockets; @@ -438,6 +473,9 @@ typedef struct launch_tasks_request_msg { uint16_t user_managed_io; /* 0 for "normal" IO, 1 for "user manged" IO */ + uint8_t open_mode; /* stdout/err append or truncate */ + uint8_t pty; /* use pseudo tty */ + uint16_t acctg_freq; /* accounting polling interval */ /********** START "normal" IO only options **********/ /* These options are ignored if user_managed_io is 1 */ @@ -458,6 +496,7 @@ typedef struct launch_tasks_request_msg { switch_jobinfo_t switch_job; /* switch credential for the job */ job_options_t options; /* Arbitrary job options */ char *complete_nodelist; + char *ckpt_path; /* checkpoint path */ } launch_tasks_request_msg_t; typedef struct task_user_managed_io_msg { @@ -477,8 +516,11 @@ typedef struct return_code_msg { * the event of some launch failure or race condition preventing slurmd * from getting the MPIRUN_PARTITION at that time. It is needed for * the job epilog. */ +#define SIG_NODE_FAIL 998 /* Dummy signal value to signify node failure */ +#define SIG_FAILURE 999 /* Dummy signal value to signify sys failure */ typedef struct kill_job_msg { uint32_t job_id; + uint16_t job_state; uint32_t job_uid; time_t time; /* slurmctld's time of request */ char *nodes; @@ -531,14 +573,17 @@ typedef struct batch_job_launch_msg { char *in; /* pathname of stdin */ char *out; /* pathname of stdout */ char *work_dir; /* full pathname of working directory */ - uint16_t argc; + uint32_t argc; char **argv; - uint16_t envc; /* element count in environment */ + uint32_t envc; /* element count in environment */ char **environment; /* environment variables to set for job, * name=value pairs, one per line */ select_jobinfo_t select_jobinfo; /* opaque data type */ slurm_cred_t cred; - uint16_t overcommit; /* if resources being over subscribed */ + uint8_t open_mode; /* stdout/err append or truncate */ + uint8_t overcommit; /* if resources being over subscribed */ + uint16_t acctg_freq; /* accounting polling interval */ + uint32_t job_mem; /* memory limit for job */ } batch_job_launch_msg_t; typedef struct job_id_request_msg { @@ -550,35 +595,13 @@ typedef struct job_id_response_msg { uint32_t return_code; /* slurm return code */ } job_id_response_msg_t; -typedef struct srun_ping_msg { - uint32_t job_id; /* slurm job_id */ - uint32_t step_id; /* step_id or NO_VAL */ -} srun_ping_msg_t; - -typedef struct srun_job_complete_msg { - uint32_t job_id; /* slurm job_id */ - uint32_t step_id; /* step_id or NO_VAL */ -} srun_job_complete_msg_t; - typedef struct srun_exec_msg { uint32_t job_id; /* slurm job_id */ uint32_t step_id; /* step_id or NO_VAL */ - uint16_t argc; /* argument count */ + uint32_t argc; /* argument count */ char ** argv; /* program arguments */ } srun_exec_msg_t; -typedef struct srun_node_fail_msg { - uint32_t job_id; /* slurm job_id */ - uint32_t step_id; /* step_id or NO_VAL */ - char *nodelist; /* name of failed node(s) */ -} srun_node_fail_msg_t; - -typedef struct srun_timeout_msg { - uint32_t job_id; /* slurm job_id */ - uint32_t step_id; /* step_id or NO_VAL */ - time_t timeout; /* when job scheduled to be killed */ -} srun_timeout_msg_t; - typedef struct checkpoint_msg { uint16_t op; /* checkpoint operation, see enum check_opts */ uint16_t data; /* operation specific data */ @@ -594,6 +617,15 @@ typedef struct checkpoint_comp_msg { char * error_msg; /* error message on failure */ } checkpoint_comp_msg_t; +typedef struct checkpoint_task_comp_msg { + uint32_t job_id; /* slurm job_id */ + uint32_t step_id; /* slurm step_id */ + uint32_t task_id; /* task id */ + time_t begin_time; /* time checkpoint began */ + uint32_t error_code; /* error code on failure */ + char * error_msg; /* error message on failure */ +} checkpoint_task_comp_msg_t; + typedef struct checkpoint_resp_msg { time_t event_time; /* time of checkpoint start/finish */ uint32_t error_code; /* error code on failure */ @@ -605,11 +637,6 @@ typedef struct suspend_msg { uint32_t job_id; /* slurm job_id */ } suspend_msg_t; -typedef struct srun_user_msg { - uint32_t job_id; /* slurm job_id */ - char *msg; /* message to user's srun */ -} srun_user_msg_t; - typedef struct kvs_get_msg { uint16_t task_id; /* job step's task id */ uint16_t size; /* count of tasks in job */ @@ -617,7 +644,6 @@ typedef struct kvs_get_msg { char * hostname; /* hostname to be sent the kvs data */ } kvs_get_msg_t; -#define FILE_BLOCKS 8 typedef struct file_bcast_msg { char *fname; /* name of the destination file */ uint16_t block_no; /* block number of this data */ @@ -628,8 +654,8 @@ typedef struct file_bcast_msg { uint32_t gid; /* group for destination file */ time_t atime; /* last access time for destination file */ time_t mtime; /* last modification time for dest file */ - uint32_t block_len[FILE_BLOCKS];/* length of this data block */ - char *block[FILE_BLOCKS]; /* data for this block, 64k max */ + uint32_t block_len; /* length of this data block */ + char *block; /* data for this block */ } file_bcast_msg_t; typedef struct multi_core_data { @@ -655,18 +681,32 @@ typedef struct multi_core_data { uint16_t plane_size; /* plane size when task_dist = SLURM_DIST_PLANE */ } multi_core_data_t; +typedef struct pty_winsz { + uint16_t cols; + uint16_t rows; +} pty_winsz_t; + +typedef struct will_run_response_msg { + uint32_t job_id; + uint32_t proc_cnt; + time_t start_time; + char *node_list; +} will_run_response_msg_t; + /*****************************************************************************\ * Slurm API Message Types \*****************************************************************************/ typedef struct slurm_node_registration_status_msg { time_t timestamp; char *node_name; + char *arch; + char *os; uint16_t cpus; uint16_t sockets; uint16_t cores; uint16_t threads; - uint32_t real_memory_size; - uint32_t temporary_disk_space; + uint32_t real_memory; + uint32_t tmp_disk; uint32_t job_count; /* number of associate job_id's */ uint32_t *job_id; /* IDs of running job (if any) */ uint16_t *step_id; /* IDs of running job steps (if any) */ @@ -675,6 +715,15 @@ typedef struct slurm_node_registration_status_msg { switch_node_info_t switch_nodeinfo; /* set only if startup != 0 */ } slurm_node_registration_status_msg_t; + +/*****************************************************************************\ + * ACCOUNTING PUSHS +\*****************************************************************************/ + +typedef struct { + List update_list; /* of type acct_update_object_t *'s */ +} accounting_update_msg_t; + typedef struct slurm_ctl_conf slurm_ctl_conf_info_msg_t; /*****************************************************************************\ * SLURM MESSAGE INITIALIZATION @@ -696,7 +745,10 @@ extern void slurm_msg_t_init (slurm_msg_t *msg); */ extern void slurm_msg_t_copy(slurm_msg_t *dest, slurm_msg_t *src); +extern void slurm_destroy_char(void *object); + /* free message functions */ +void inline slurm_free_checkpoint_tasks_msg(checkpoint_tasks_msg_t * msg); void inline slurm_free_last_update_msg(last_update_msg_t * msg); void inline slurm_free_return_code_msg(return_code_msg_t * msg); void inline slurm_free_job_alloc_info_msg(job_alloc_info_msg_t * msg); @@ -705,6 +757,7 @@ void inline slurm_free_job_step_info_request_msg( job_step_info_request_msg_t *msg); void inline slurm_free_node_info_request_msg(node_info_request_msg_t *msg); void inline slurm_free_part_info_request_msg(part_info_request_msg_t *msg); +void inline slurm_free_set_debug_level_msg(set_debug_level_msg_t *msg); #define slurm_free_timelimit_msg(msg) \ slurm_free_kill_job_msg(msg) @@ -764,6 +817,7 @@ void inline slurm_free_srun_timeout_msg(srun_timeout_msg_t * msg); void inline slurm_free_srun_user_msg(srun_user_msg_t * msg); void inline slurm_free_checkpoint_msg(checkpoint_msg_t *msg); void inline slurm_free_checkpoint_comp_msg(checkpoint_comp_msg_t *msg); +void inline slurm_free_checkpoint_task_comp_msg(checkpoint_task_comp_msg_t *msg); void inline slurm_free_checkpoint_resp_msg(checkpoint_resp_msg_t *msg); void inline slurm_free_suspend_msg(suspend_msg_t *msg); void slurm_free_resource_allocation_response_msg ( @@ -780,11 +834,16 @@ void slurm_free_job_step_info_response_msg( void slurm_free_node_info_msg(node_info_msg_t * msg); void slurm_free_partition_info_msg(partition_info_msg_t * msg); void slurm_free_get_kvs_msg(kvs_get_msg_t *msg); +void slurm_free_will_run_response_msg(will_run_response_msg_t *msg); void inline slurm_free_file_bcast_msg(file_bcast_msg_t *msg); void inline slurm_free_step_complete_msg(step_complete_msg_t *msg); void inline slurm_free_stat_jobacct_msg(stat_jobacct_msg_t *msg); void inline slurm_free_node_select_msg( node_info_select_request_msg_t *msg); +void inline slurm_free_job_notify_msg(job_notify_msg_t * msg); + +void inline slurm_free_accounting_update_msg(accounting_update_msg_t *msg); + extern int slurm_free_msg_data(slurm_msg_type_t type, void *data); extern uint32_t slurm_get_return_code(slurm_msg_type_t type, void *data); diff --git a/src/common/slurm_protocol_interface.h b/src/common/slurm_protocol_interface.h index 64bffe2b5..036f89db8 100644 --- a/src/common/slurm_protocol_interface.h +++ b/src/common/slurm_protocol_interface.h @@ -4,7 +4,7 @@ * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Chris Dunlap <cdunlap@llnl.gov>, et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/slurm_protocol_mongo_common.h b/src/common/slurm_protocol_mongo_common.h index 666a25cdb..60e5ef42c 100644 --- a/src/common/slurm_protocol_mongo_common.h +++ b/src/common/slurm_protocol_mongo_common.h @@ -6,7 +6,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Kevin Tew <tew1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/slurm_protocol_pack.c b/src/common/slurm_protocol_pack.c index 9a67b46f5..a0abfea97 100644 --- a/src/common/slurm_protocol_pack.c +++ b/src/common/slurm_protocol_pack.c @@ -1,12 +1,11 @@ /****************************************************************************\ * slurm_protocol_pack.c - functions to pack and unpack structures for RPCs - * - * $Id: slurm_protocol_pack.c 12814 2007-12-11 23:28:22Z jette $ ***************************************************************************** * Copyright (C) 2002-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Kevin Tew <tew1@llnl.gov>, et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -50,7 +49,8 @@ #include "src/common/bitstring.h" #include "src/common/log.h" #include "src/common/node_select.h" -#include "src/common/slurm_jobacct.h" +#include "src/common/slurm_accounting_storage.h" +#include "src/common/slurm_jobacct_gather.h" #include "src/common/pack.h" #include "src/common/slurm_auth.h" #include "src/common/slurm_cred.h" @@ -165,6 +165,9 @@ static int _unpack_task_user_managed_io_stream_msg(task_user_managed_io_msg_t ** static void _pack_cancel_tasks_msg(kill_tasks_msg_t * msg, Buf buffer); static int _unpack_cancel_tasks_msg(kill_tasks_msg_t ** msg_ptr, Buf buffer); +static void _pack_checkpoint_tasks_msg(checkpoint_tasks_msg_t * msg, Buf buffer); +static int _unpack_checkpoint_tasks_msg(checkpoint_tasks_msg_t ** msg_ptr, Buf buffer); + static void _pack_launch_tasks_response_msg(launch_tasks_response_msg_t * msg, Buf buffer); static int _unpack_launch_tasks_response_msg(launch_tasks_response_msg_t ** @@ -188,7 +191,7 @@ static int _unpack_task_exit_msg(task_exit_msg_t ** msg_ptr, Buf buffer); static void _pack_job_alloc_info_msg(job_alloc_info_msg_t * job_desc_ptr, Buf buffer); -static int +static int _unpack_job_alloc_info_msg(job_alloc_info_msg_t **job_desc_buffer_ptr, Buf buffer); @@ -253,9 +256,9 @@ static void _pack_last_update_msg(last_update_msg_t * msg, Buf buffer); static int _unpack_last_update_msg(last_update_msg_t ** msg, Buf buffer); static void _pack_slurm_addr_array(slurm_addr * slurm_address, - uint16_t size_val, Buf buffer); + uint32_t size_val, Buf buffer); static int _unpack_slurm_addr_array(slurm_addr ** slurm_address, - uint16_t * size_val, Buf buffer); + uint32_t * size_val, Buf buffer); static void _pack_ret_list(List ret_list, uint16_t size_val, Buf buffer); static int _unpack_ret_list(List *ret_list, uint16_t size_val, Buf buffer); @@ -300,6 +303,10 @@ static void _pack_checkpoint_comp(checkpoint_comp_msg_t *msg, Buf buffer); static int _unpack_checkpoint_comp(checkpoint_comp_msg_t **msg_ptr, Buf buffer); +static void _pack_checkpoint_task_comp(checkpoint_task_comp_msg_t *msg, Buf buffer); +static int _unpack_checkpoint_task_comp(checkpoint_task_comp_msg_t **msg_ptr, + Buf buffer); + static void _pack_suspend_msg(suspend_msg_t *msg, Buf buffer); static int _unpack_suspend_msg(suspend_msg_t **msg_ptr, Buf buffer); @@ -326,6 +333,21 @@ static int _unpack_trigger_msg(trigger_info_msg_t ** msg_ptr , Buf buffer ); static void _pack_slurmd_status(slurmd_status_t *msg, Buf buffer); static int _unpack_slurmd_status(slurmd_status_t **msg_ptr, Buf buffer); +static void _pack_job_notify(job_notify_msg_t *msg, Buf buffer); +static int _unpack_job_notify(job_notify_msg_t **msg_ptr, Buf buffer); + +static void _pack_set_debug_level_msg(set_debug_level_msg_t * msg, Buf buffer); +static int _unpack_set_debug_level_msg(set_debug_level_msg_t ** msg_ptr, + Buf buffer); + +static void _pack_will_run_response_msg(will_run_response_msg_t *msg, Buf buffer); +static int _unpack_will_run_response_msg(will_run_response_msg_t ** msg_ptr, + Buf buffer); + +static void _pack_accounting_update_msg(accounting_update_msg_t *msg, Buf buffer); +static int _unpack_accounting_update_msg(accounting_update_msg_t **msg, + Buf buffer); + /* pack_header * packs a slurm protocol header that proceeds every slurm message * IN header - the header structure to pack @@ -363,7 +385,8 @@ pack_header(header_t * header, Buf buffer) int unpack_header(header_t * header, Buf buffer) { - uint16_t uint16_tmp = 0; + uint16_t uint16_tmp; + uint32_t uint32_tmp = 0; memset(header, 0, sizeof(header_t)); forward_init(&header->forward, NULL); @@ -376,7 +399,7 @@ unpack_header(header_t * header, Buf buffer) safe_unpack16(&header->forward.cnt, buffer); if (header->forward.cnt > 0) { safe_unpackstr_xmalloc(&header->forward.nodelist, - &uint16_tmp, buffer); + &uint32_tmp, buffer); safe_unpack32(&header->forward.timeout, buffer); } @@ -462,6 +485,7 @@ pack_msg(slurm_msg_t const *msg, Buf buffer) case REQUEST_PING: case REQUEST_CONTROL: case REQUEST_DAEMON_STATUS: + case REQUEST_HEALTH_CHECK: /* Message contains no body/information */ break; case REQUEST_SHUTDOWN: @@ -473,11 +497,14 @@ pack_msg(slurm_msg_t const *msg, Buf buffer) break; case RESPONSE_JOB_ALLOCATION_INFO_LITE: case RESPONSE_RESOURCE_ALLOCATION: - case RESPONSE_JOB_WILL_RUN: _pack_resource_allocation_response_msg ((resource_allocation_response_msg_t *) msg->data, buffer); break; + case RESPONSE_JOB_WILL_RUN: + _pack_will_run_response_msg((will_run_response_msg_t *) + msg->data, buffer); + break; case RESPONSE_JOB_ALLOCATION_INFO: _pack_job_alloc_info_response_msg( (job_alloc_info_response_msg_t *) @@ -521,6 +548,10 @@ pack_msg(slurm_msg_t const *msg, Buf buffer) _pack_cancel_tasks_msg((kill_tasks_msg_t *) msg->data, buffer); break; + case REQUEST_CHECKPOINT_TASKS: + _pack_checkpoint_tasks_msg((checkpoint_tasks_msg_t *) msg->data, + buffer); + break; case REQUEST_JOB_STEP_INFO: _pack_job_step_info_req_msg((job_step_info_request_msg_t *) msg->data, buffer); @@ -645,6 +676,10 @@ pack_msg(slurm_msg_t const *msg, Buf buffer) _pack_checkpoint_comp((checkpoint_comp_msg_t *)msg->data, buffer); break; + case REQUEST_CHECKPOINT_TASK_COMP: + _pack_checkpoint_task_comp((checkpoint_task_comp_msg_t *)msg->data, + buffer); + break; case RESPONSE_CHECKPOINT: case RESPONSE_CHECKPOINT_COMP: _pack_checkpoint_resp_msg((checkpoint_resp_msg_t *)msg->data, @@ -689,6 +724,18 @@ pack_msg(slurm_msg_t const *msg, Buf buffer) case RESPONSE_SLURMD_STATUS: _pack_slurmd_status((slurmd_status_t *) msg->data, buffer); break; + case REQUEST_JOB_NOTIFY: + _pack_job_notify((job_notify_msg_t *) msg->data, buffer); + break; + case REQUEST_SET_DEBUG_LEVEL: + _pack_set_debug_level_msg( + (set_debug_level_msg_t *)msg->data, buffer); + break; + case ACCOUNTING_UPDATE_MSG: + _pack_accounting_update_msg( + (accounting_update_msg_t *)msg->data, + buffer); + break; default: debug("No pack method for msg type %u", msg->msg_type); return EINVAL; @@ -766,6 +813,7 @@ unpack_msg(slurm_msg_t * msg, Buf buffer) case REQUEST_PING: case REQUEST_CONTROL: case REQUEST_DAEMON_STATUS: + case REQUEST_HEALTH_CHECK: /* Message contains no body/information */ break; case REQUEST_SHUTDOWN: @@ -778,11 +826,14 @@ unpack_msg(slurm_msg_t * msg, Buf buffer) break; case RESPONSE_JOB_ALLOCATION_INFO_LITE: case RESPONSE_RESOURCE_ALLOCATION: - case RESPONSE_JOB_WILL_RUN: rc = _unpack_resource_allocation_response_msg( (resource_allocation_response_msg_t **) & (msg->data), buffer); break; + case RESPONSE_JOB_WILL_RUN: + rc = _unpack_will_run_response_msg((will_run_response_msg_t **) + &(msg->data), buffer); + break; case RESPONSE_JOB_ALLOCATION_INFO: rc = _unpack_job_alloc_info_response_msg( (job_alloc_info_response_msg_t **) @@ -829,6 +880,10 @@ unpack_msg(slurm_msg_t * msg, Buf buffer) rc = _unpack_cancel_tasks_msg((kill_tasks_msg_t **) & (msg->data), buffer); break; + case REQUEST_CHECKPOINT_TASKS: + rc = _unpack_checkpoint_tasks_msg((checkpoint_tasks_msg_t **) & + (msg->data), buffer); + break; case REQUEST_JOB_STEP_INFO: rc = _unpack_job_step_info_req_msg( (job_step_info_request_msg_t **) @@ -969,6 +1024,10 @@ unpack_msg(slurm_msg_t * msg, Buf buffer) rc = _unpack_checkpoint_comp((checkpoint_comp_msg_t **) & msg->data, buffer); break; + case REQUEST_CHECKPOINT_TASK_COMP: + rc = _unpack_checkpoint_task_comp((checkpoint_task_comp_msg_t **) + & msg->data, buffer); + break; case RESPONSE_CHECKPOINT: case RESPONSE_CHECKPOINT_COMP: rc = _unpack_checkpoint_resp_msg((checkpoint_resp_msg_t **) @@ -1020,6 +1079,19 @@ unpack_msg(slurm_msg_t * msg, Buf buffer) rc = _unpack_slurmd_status((slurmd_status_t **) &msg->data, buffer); break; + case REQUEST_JOB_NOTIFY: + rc = _unpack_job_notify((job_notify_msg_t **) + &msg->data, buffer); + break; + case REQUEST_SET_DEBUG_LEVEL: + rc = _unpack_set_debug_level_msg( + (set_debug_level_msg_t **)&(msg->data), buffer); + break; + case ACCOUNTING_UPDATE_MSG: + _unpack_accounting_update_msg( + (accounting_update_msg_t **)&msg->data, + buffer); + break; default: debug("No unpack method for msg type %u", msg->msg_type); return EINVAL; @@ -1045,7 +1117,7 @@ _pack_update_node_msg(update_node_msg_t * msg, Buf buffer) static int _unpack_update_node_msg(update_node_msg_t ** msg, Buf buffer) { - uint16_t uint16_tmp; + uint32_t uint32_tmp; update_node_msg_t *tmp_ptr; /* alloc memory for structure */ @@ -1053,10 +1125,10 @@ _unpack_update_node_msg(update_node_msg_t ** msg, Buf buffer) tmp_ptr = xmalloc(sizeof(update_node_msg_t)); *msg = tmp_ptr; - safe_unpackstr_xmalloc(&tmp_ptr->node_names, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&tmp_ptr->node_names, &uint32_tmp, buffer); safe_unpack16(&tmp_ptr->node_state, buffer); - safe_unpackstr_xmalloc(&tmp_ptr->features, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&tmp_ptr->reason, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&tmp_ptr->features, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&tmp_ptr->reason, &uint32_tmp, buffer); return SLURM_SUCCESS; unpack_error: @@ -1078,12 +1150,14 @@ _pack_node_registration_status_msg(slurm_node_registration_status_msg_t * pack_time(msg->timestamp, buffer); pack32((uint32_t)msg->status, buffer); packstr(msg->node_name, buffer); + packstr(msg->arch, buffer); + packstr(msg->os, buffer); pack16((uint32_t)msg->cpus, buffer); pack16((uint32_t)msg->sockets, buffer); pack16((uint32_t)msg->cores, buffer); pack16((uint32_t)msg->threads, buffer); - pack32((uint32_t)msg->real_memory_size, buffer); - pack32((uint32_t)msg->temporary_disk_space, buffer); + pack32((uint32_t)msg->real_memory, buffer); + pack32((uint32_t)msg->tmp_disk, buffer); pack32((uint32_t)msg->job_count, buffer); for (i = 0; i < msg->job_count; i++) { pack32((uint32_t)msg->job_id[i], buffer); @@ -1100,7 +1174,7 @@ static int _unpack_node_registration_status_msg(slurm_node_registration_status_msg_t ** msg, Buf buffer) { - uint16_t uint16_tmp; + uint32_t uint32_tmp; int i; slurm_node_registration_status_msg_t *node_reg_ptr; @@ -1113,13 +1187,15 @@ _unpack_node_registration_status_msg(slurm_node_registration_status_msg_t safe_unpack_time(&node_reg_ptr->timestamp, buffer); /* load the data values */ safe_unpack32(&node_reg_ptr->status, buffer); - safe_unpackstr_xmalloc(&node_reg_ptr->node_name, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&node_reg_ptr->node_name, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&node_reg_ptr->arch, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&node_reg_ptr->os, &uint32_tmp, buffer); safe_unpack16(&node_reg_ptr->cpus, buffer); safe_unpack16(&node_reg_ptr->sockets, buffer); safe_unpack16(&node_reg_ptr->cores, buffer); safe_unpack16(&node_reg_ptr->threads, buffer); - safe_unpack32(&node_reg_ptr->real_memory_size, buffer); - safe_unpack32(&node_reg_ptr->temporary_disk_space, buffer); + safe_unpack32(&node_reg_ptr->real_memory, buffer); + safe_unpack32(&node_reg_ptr->tmp_disk, buffer); safe_unpack32(&node_reg_ptr->job_count, buffer); node_reg_ptr->job_id = xmalloc(sizeof(uint32_t) * node_reg_ptr->job_count); @@ -1141,8 +1217,10 @@ _unpack_node_registration_status_msg(slurm_node_registration_status_msg_t return SLURM_SUCCESS; unpack_error: - xfree(node_reg_ptr->node_name); + xfree(node_reg_ptr->arch); xfree(node_reg_ptr->job_id); + xfree(node_reg_ptr->node_name); + xfree(node_reg_ptr->os); xfree(node_reg_ptr->step_id); switch_g_free_node_info(&node_reg_ptr->switch_nodeinfo); xfree(node_reg_ptr); @@ -1175,7 +1253,6 @@ static int _unpack_resource_allocation_response_msg(resource_allocation_response_msg_t ** msg, Buf buffer) { - uint16_t uint16_tmp; uint32_t uint32_tmp; resource_allocation_response_msg_t *tmp_ptr; @@ -1187,7 +1264,7 @@ _unpack_resource_allocation_response_msg(resource_allocation_response_msg_t /* load the data values */ safe_unpack32(&tmp_ptr->error_code, buffer); safe_unpack32(&tmp_ptr->job_id, buffer); - safe_unpackstr_xmalloc(&tmp_ptr->node_list, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&tmp_ptr->node_list, &uint32_tmp, buffer); safe_unpack16(&tmp_ptr->num_cpu_groups, buffer); if (tmp_ptr->num_cpu_groups > 0) { @@ -1251,7 +1328,6 @@ static int _unpack_job_alloc_info_response_msg(job_alloc_info_response_msg_t ** msg, Buf buffer) { - uint16_t uint16_tmp; uint32_t uint32_tmp; job_alloc_info_response_msg_t *tmp_ptr; @@ -1263,7 +1339,7 @@ _unpack_job_alloc_info_response_msg(job_alloc_info_response_msg_t ** msg, /* load the data values */ safe_unpack32(&tmp_ptr->error_code, buffer); safe_unpack32(&tmp_ptr->job_id, buffer); - safe_unpackstr_xmalloc(&tmp_ptr->node_list, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&tmp_ptr->node_list, &uint32_tmp, buffer); safe_unpack16(&tmp_ptr->num_cpu_groups, buffer); if (tmp_ptr->num_cpu_groups > 0) { @@ -1285,9 +1361,9 @@ _unpack_job_alloc_info_response_msg(job_alloc_info_response_msg_t ** msg, safe_unpack32(&tmp_ptr->node_cnt, buffer); if (tmp_ptr->node_cnt > 0) { if (_unpack_slurm_addr_array(&(tmp_ptr->node_addr), - &uint16_tmp, buffer)) + &uint32_tmp, buffer)) goto unpack_error; - if (uint16_tmp != tmp_ptr->node_cnt) + if (uint32_tmp != tmp_ptr->node_cnt) goto unpack_error; } else tmp_ptr->node_addr = NULL; @@ -1374,11 +1450,11 @@ unpack_error: static int _unpack_node_info_members(node_info_t * node, Buf buffer) { - uint16_t uint16_tmp; + uint32_t uint32_tmp; xassert(node != NULL); - safe_unpackstr_xmalloc(&node->name, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&node->name, &uint32_tmp, buffer); safe_unpack16(&node->node_state, buffer); safe_unpack16(&node->cpus, buffer); safe_unpack16(&node->sockets, buffer); @@ -1390,14 +1466,18 @@ _unpack_node_info_members(node_info_t * node, Buf buffer) safe_unpack32(&node->weight, buffer); safe_unpack16(&node->used_cpus, buffer); - safe_unpackstr_xmalloc(&node->features, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&node->reason, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&node->arch, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&node->features, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&node->os, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&node->reason, &uint32_tmp, buffer); return SLURM_SUCCESS; unpack_error: xfree(node->name); + xfree(node->arch); xfree(node->features); + xfree(node->os); xfree(node->reason); return SLURM_ERROR; } @@ -1423,16 +1503,17 @@ _pack_update_partition_msg(update_part_msg_t * msg, Buf buffer) packstr(msg->name, buffer); packstr(msg->nodes, buffer); - pack16(msg-> hidden, buffer); + pack16(msg-> hidden, buffer); + pack16(msg-> max_share, buffer); + pack16(msg-> priority, buffer); pack16(msg-> root_only, buffer); - pack16(msg-> shared, buffer); pack16(msg-> state_up, buffer); } static int _unpack_update_partition_msg(update_part_msg_t ** msg, Buf buffer) { - uint16_t uint16_tmp; + uint32_t uint32_tmp; update_part_msg_t *tmp_ptr; xassert(msg != NULL); @@ -1441,18 +1522,19 @@ _unpack_update_partition_msg(update_part_msg_t ** msg, Buf buffer) tmp_ptr = xmalloc(sizeof(update_part_msg_t)); *msg = tmp_ptr; - safe_unpackstr_xmalloc(&tmp_ptr->allow_groups, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&tmp_ptr->allow_groups, &uint32_tmp, buffer); safe_unpack16(&tmp_ptr->default_part, buffer); safe_unpack32(&tmp_ptr->max_time, buffer); safe_unpack32(&tmp_ptr->max_nodes, buffer); safe_unpack32(&tmp_ptr->min_nodes, buffer); - safe_unpackstr_xmalloc(&tmp_ptr->name, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&tmp_ptr->nodes, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&tmp_ptr->name, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&tmp_ptr->nodes, &uint32_tmp, buffer); - safe_unpack16(&tmp_ptr->hidden, buffer); + safe_unpack16(&tmp_ptr->hidden, buffer); + safe_unpack16(&tmp_ptr->max_share, buffer); + safe_unpack16(&tmp_ptr->priority, buffer); safe_unpack16(&tmp_ptr->root_only, buffer); - safe_unpack16(&tmp_ptr->shared, buffer); - safe_unpack16(&tmp_ptr->state_up, buffer); + safe_unpack16(&tmp_ptr->state_up, buffer); return SLURM_SUCCESS; unpack_error: @@ -1475,7 +1557,7 @@ _pack_delete_partition_msg(delete_part_msg_t * msg, Buf buffer) static int _unpack_delete_partition_msg(delete_part_msg_t ** msg, Buf buffer) { - uint16_t uint16_tmp; + uint32_t uint32_tmp; delete_part_msg_t *tmp_ptr; xassert(msg != NULL); @@ -1484,7 +1566,7 @@ _unpack_delete_partition_msg(delete_part_msg_t ** msg, Buf buffer) tmp_ptr = xmalloc(sizeof(delete_part_msg_t)); *msg = tmp_ptr; - safe_unpackstr_xmalloc(&tmp_ptr->name, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&tmp_ptr->name, &uint32_tmp, buffer); return SLURM_SUCCESS; unpack_error: @@ -1510,11 +1592,16 @@ _pack_job_step_create_request_msg(job_step_create_request_msg_t pack16(msg->task_dist, buffer); pack16(msg->plane_size, buffer); pack16(msg->port, buffer); + pack16(msg->ckpt_interval, buffer); + pack16(msg->exclusive, buffer); + pack16(msg->immediate, buffer); + pack16(msg->mem_per_task, buffer); packstr(msg->host, buffer); packstr(msg->name, buffer); packstr(msg->network, buffer); packstr(msg->node_list, buffer); + packstr(msg->ckpt_path, buffer); pack8(msg->overcommit, buffer); } @@ -1523,7 +1610,7 @@ static int _unpack_job_step_create_request_msg(job_step_create_request_msg_t ** msg, Buf buffer) { - uint16_t uint16_tmp; + uint32_t uint32_tmp; job_step_create_request_msg_t *tmp_ptr; /* alloc memory for structure */ @@ -1541,11 +1628,16 @@ _unpack_job_step_create_request_msg(job_step_create_request_msg_t ** msg, safe_unpack16(&(tmp_ptr->task_dist), buffer); safe_unpack16(&(tmp_ptr->plane_size), buffer); safe_unpack16(&(tmp_ptr->port), buffer); + safe_unpack16(&(tmp_ptr->ckpt_interval), buffer); + safe_unpack16(&(tmp_ptr->exclusive), buffer); + safe_unpack16(&(tmp_ptr->immediate), buffer); + safe_unpack16(&(tmp_ptr->mem_per_task), buffer); - safe_unpackstr_xmalloc(&(tmp_ptr->host), &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&(tmp_ptr->name), &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&(tmp_ptr->network), &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&(tmp_ptr->node_list), &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&(tmp_ptr->host), &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&(tmp_ptr->name), &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&(tmp_ptr->network), &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&(tmp_ptr->node_list), &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&(tmp_ptr->ckpt_path), &uint32_tmp, buffer); safe_unpack8(&(tmp_ptr->overcommit), buffer); @@ -1566,8 +1658,9 @@ _pack_kill_job_msg(kill_job_msg_t * msg, Buf buffer) { xassert(msg != NULL); - pack32((uint32_t)msg->job_id, buffer); - pack32((uint32_t)msg->job_uid, buffer); + pack32(msg->job_id, buffer); + pack16(msg->job_state, buffer); + pack32(msg->job_uid, buffer); pack_time(msg->time, buffer); packstr(msg->nodes, buffer); select_g_pack_jobinfo(msg->select_jobinfo, buffer); @@ -1576,7 +1669,7 @@ _pack_kill_job_msg(kill_job_msg_t * msg, Buf buffer) static int _unpack_kill_job_msg(kill_job_msg_t ** msg, Buf buffer) { - uint16_t uint16_tmp; + uint32_t uint32_tmp; kill_job_msg_t *tmp_ptr; /* alloc memory for structure */ @@ -1585,9 +1678,10 @@ _unpack_kill_job_msg(kill_job_msg_t ** msg, Buf buffer) *msg = tmp_ptr; safe_unpack32(&(tmp_ptr->job_id), buffer); + safe_unpack16(&(tmp_ptr->job_state), buffer); safe_unpack32(&(tmp_ptr->job_uid), buffer); safe_unpack_time(&(tmp_ptr->time), buffer); - safe_unpackstr_xmalloc(&(tmp_ptr->nodes), &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&(tmp_ptr->nodes), &uint32_tmp, buffer); if (select_g_alloc_jobinfo (&tmp_ptr->select_jobinfo) || select_g_unpack_jobinfo(tmp_ptr->select_jobinfo, buffer)) goto unpack_error; @@ -1648,7 +1742,7 @@ static int _unpack_epilog_comp_msg(epilog_complete_msg_t ** msg, Buf buffer) { epilog_complete_msg_t *tmp_ptr; - uint16_t uint16_tmp; + uint32_t uint32_tmp; /* alloc memory for structure */ xassert(msg); @@ -1657,7 +1751,7 @@ _unpack_epilog_comp_msg(epilog_complete_msg_t ** msg, Buf buffer) safe_unpack32(&(tmp_ptr->job_id), buffer); safe_unpack32(&(tmp_ptr->return_code), buffer); - safe_unpackstr_xmalloc(& (tmp_ptr->node_name), &uint16_tmp, buffer); + safe_unpackstr_xmalloc(& (tmp_ptr->node_name), &uint32_tmp, buffer); if (switch_g_alloc_node_info(&tmp_ptr->switch_nodeinfo) || switch_g_unpack_node_info(tmp_ptr->switch_nodeinfo, buffer)) goto unpack_error; @@ -1789,10 +1883,10 @@ unpack_error: static int _unpack_partition_info_members(partition_info_t * part, Buf buffer) { - uint16_t uint16_tmp; + uint32_t uint32_tmp; char *node_inx_str = NULL; - safe_unpackstr_xmalloc(&part->name, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&part->name, &uint32_tmp, buffer); if (part->name == NULL) part->name = xmalloc(1); /* part->name = "" implicit */ safe_unpack32(&part->max_time, buffer); @@ -1803,14 +1897,16 @@ _unpack_partition_info_members(partition_info_t * part, Buf buffer) safe_unpack32(&part->total_cpus, buffer); safe_unpack16(&part->default_part, buffer); + safe_unpack16(&part->disable_root_jobs, buffer); safe_unpack16(&part->hidden, buffer); safe_unpack16(&part->root_only, buffer); - safe_unpack16(&part->shared, buffer); + safe_unpack16(&part->max_share, buffer); + safe_unpack16(&part->priority, buffer); safe_unpack16(&part->state_up, buffer); - safe_unpackstr_xmalloc(&part->allow_groups, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&part->nodes, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&node_inx_str, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&part->allow_groups, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&part->nodes, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&node_inx_str, &uint32_tmp, buffer); if (node_inx_str == NULL) part->node_inx = bitfmt2int(""); else { @@ -1880,21 +1976,23 @@ unpack_error: static int _unpack_job_step_info_members(job_step_info_t * step, Buf buffer) { - uint16_t uint16_tmp = 0; + uint32_t uint32_tmp = 0; char *node_inx_str; safe_unpack32(&step->job_id, buffer); safe_unpack16(&step->step_id, buffer); + safe_unpack16(&step->ckpt_interval, buffer); safe_unpack32(&step->user_id, buffer); safe_unpack32(&step->num_tasks, buffer); safe_unpack_time(&step->start_time, buffer); safe_unpack_time(&step->run_time, buffer); - safe_unpackstr_xmalloc(&step->partition, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&step->nodes, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&step->name, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&step->network, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&node_inx_str, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&step->partition, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&step->nodes, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&step->name, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&step->network, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&node_inx_str, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&step->ckpt_path, &uint32_tmp, buffer); if (node_inx_str == NULL) step->node_inx = bitfmt2int(""); else { @@ -1987,7 +2085,6 @@ unpack_error: static int _unpack_job_info_members(job_info_t * job, Buf buffer) { - uint16_t uint16_tmp; uint32_t uint32_tmp; char *node_inx_str; multi_core_data_t *mc_ptr; @@ -2010,21 +2107,21 @@ _unpack_job_info_members(job_info_t * job, Buf buffer) safe_unpack_time(&job->pre_sus_time, buffer); safe_unpack32(&job->priority, buffer); - safe_unpackstr_xmalloc(&job->nodes, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&job->partition, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&job->account, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&job->network, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&job->comment, &uint16_tmp, buffer); - safe_unpack32(&job->dependency, buffer); - safe_unpack32(&job->exit_code, buffer); + safe_unpackstr_xmalloc(&job->nodes, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&job->partition, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&job->account, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&job->network, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&job->comment, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&job->licenses, &uint32_tmp, buffer); + safe_unpack32(&job->exit_code, buffer); safe_unpack16(&job->num_cpu_groups, buffer); safe_unpack32_array(&job->cpus_per_node, &uint32_tmp, buffer); safe_unpack32_array(&job->cpu_count_reps, &uint32_tmp, buffer); - safe_unpackstr_xmalloc(&job->name, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&job->alloc_node, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&node_inx_str, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&job->name, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&job->alloc_node, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&node_inx_str, &uint32_tmp, buffer); if (node_inx_str == NULL) job->node_inx = bitfmt2int(""); else { @@ -2038,10 +2135,14 @@ _unpack_job_info_members(job_info_t * job, Buf buffer) goto unpack_error; /*** unpack default job details ***/ - safe_unpackstr_xmalloc(&job->features, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&job->features, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&job->work_dir, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&job->dependency, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&job->command, &uint32_tmp, buffer); + safe_unpack32(&job->num_nodes, buffer); safe_unpack32(&job->max_nodes, buffer); - + safe_unpack16(&job->requeue, buffer); /*** unpack pending job details ***/ safe_unpack16(&job->shared, buffer); @@ -2050,19 +2151,18 @@ _unpack_job_info_members(job_info_t * job, Buf buffer) safe_unpack16(&job->job_min_procs, buffer); safe_unpack32(&job->job_min_memory, buffer); - safe_unpack32(&job->job_max_memory, buffer); safe_unpack32(&job->job_min_tmp_disk, buffer); - safe_unpackstr_xmalloc(&job->req_nodes, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&node_inx_str, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&job->req_nodes, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&node_inx_str, &uint32_tmp, buffer); if (node_inx_str == NULL) job->req_node_inx = bitfmt2int(""); else { job->req_node_inx = bitfmt2int(node_inx_str); xfree(node_inx_str); } - safe_unpackstr_xmalloc(&job->exc_nodes, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&node_inx_str, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&job->exc_nodes, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&node_inx_str, &uint32_tmp, buffer); if (node_inx_str == NULL) job->exc_node_inx = bitfmt2int(""); else { @@ -2093,17 +2193,24 @@ unpack_error: xfree(job->nodes); xfree(job->partition); xfree(job->account); + xfree(job->network); + xfree(job->comment); + xfree(job->dependency); + xfree(job->cpus_per_node); + xfree(job->cpu_count_reps); xfree(job->name); xfree(job->alloc_node); xfree(job->node_inx); select_g_free_jobinfo(&job->select_jobinfo); xfree(job->features); + xfree(job->work_dir); + xfree(job->command); + xfree(job->licenses); xfree(job->req_nodes); xfree(job->req_node_inx); xfree(job->exc_nodes); xfree(job->exc_node_inx); - xfree(job->network); - xfree(job->comment); + return SLURM_ERROR; } @@ -2111,80 +2218,142 @@ static void _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer) { pack_time(build_ptr->last_update, buffer); + + pack16(build_ptr->accounting_storage_enforce, buffer); + packstr(build_ptr->accounting_storage_host, buffer); + packstr(build_ptr->accounting_storage_loc, buffer); + packstr(build_ptr->accounting_storage_pass, buffer); + pack32(build_ptr->accounting_storage_port, buffer); + packstr(build_ptr->accounting_storage_type, buffer); + packstr(build_ptr->accounting_storage_user, buffer); + packstr(build_ptr->authtype, buffer); + packstr(build_ptr->backup_addr, buffer); packstr(build_ptr->backup_controller, buffer); - pack16((uint16_t)build_ptr->cache_groups, buffer); + pack_time(build_ptr->boot_time, buffer); + + pack16(build_ptr->cache_groups, buffer); packstr(build_ptr->checkpoint_type, buffer); + packstr(build_ptr->cluster_name, buffer); packstr(build_ptr->control_addr, buffer); packstr(build_ptr->control_machine, buffer); + packstr(build_ptr->crypto_type, buffer); + + pack32(build_ptr->def_mem_per_task, buffer); + pack16(build_ptr->disable_root_jobs, buffer); + packstr(build_ptr->epilog, buffer); - pack16((uint16_t)build_ptr->fast_schedule, buffer); - pack32((uint32_t)build_ptr->first_job_id, buffer); - pack16((uint16_t)build_ptr->inactive_limit, buffer); - packstr(build_ptr->job_acct_logfile, buffer); - pack16(build_ptr->job_acct_freq, buffer); - packstr(build_ptr->job_acct_type, buffer); + pack32(build_ptr->epilog_msg_time, buffer); + + pack16(build_ptr->fast_schedule, buffer); + pack32(build_ptr->first_job_id, buffer); + + pack16(build_ptr->get_env_timeout, buffer); + + pack16(build_ptr->health_check_interval, buffer); + packstr(build_ptr->health_check_program, buffer); + + pack16(build_ptr->inactive_limit, buffer); + + pack16(build_ptr->job_acct_gather_freq, buffer); + packstr(build_ptr->job_acct_gather_type, buffer); + + packstr(build_ptr->job_comp_host, buffer); packstr(build_ptr->job_comp_loc, buffer); + packstr(build_ptr->job_comp_pass, buffer); + pack32((uint32_t)build_ptr->job_comp_port, buffer); packstr(build_ptr->job_comp_type, buffer); - pack16((uint16_t)build_ptr->kill_wait, buffer); + packstr(build_ptr->job_comp_user, buffer); + + packstr(build_ptr->job_credential_private_key, buffer); + packstr(build_ptr->job_credential_public_certificate, buffer); + pack16(build_ptr->job_file_append, buffer); + pack16(build_ptr->job_requeue, buffer); + + pack16(build_ptr->kill_wait, buffer); + + packstr(build_ptr->licenses, buffer); + packstr(build_ptr->mail_prog, buffer); - pack16((uint16_t)build_ptr->max_job_cnt, buffer); - pack16((uint16_t)build_ptr->min_job_age, buffer); + pack16(build_ptr->max_job_cnt, buffer); + pack32(build_ptr->max_mem_per_task, buffer); + pack16(build_ptr->min_job_age, buffer); packstr(build_ptr->mpi_default, buffer); - pack16((uint16_t)build_ptr->msg_timeout, buffer); - pack32((uint32_t)build_ptr->next_job_id, buffer); + pack16(build_ptr->msg_timeout, buffer); + + pack32(build_ptr->next_job_id, buffer); + packstr(build_ptr->node_prefix, buffer); + packstr(build_ptr->plugindir, buffer); packstr(build_ptr->plugstack, buffer); + pack16(build_ptr->private_data, buffer); packstr(build_ptr->proctrack_type, buffer); packstr(build_ptr->prolog, buffer); pack16(build_ptr->propagate_prio_process, buffer); packstr(build_ptr->propagate_rlimits, buffer); packstr(build_ptr->propagate_rlimits_except, buffer); - pack16((uint16_t)build_ptr->ret2service, buffer); - pack16((uint16_t)build_ptr->schedport, buffer); - pack16((uint16_t)build_ptr->schedrootfltr, buffer); + + packstr(build_ptr->resume_program, buffer); + pack16(build_ptr->resume_rate, buffer); + pack16(build_ptr->ret2service, buffer); + + packstr(build_ptr->sched_params, buffer); + pack16(build_ptr->schedport, buffer); + pack16(build_ptr->schedrootfltr, buffer); + pack16(build_ptr->sched_time_slice, buffer); packstr(build_ptr->schedtype, buffer); packstr(build_ptr->select_type, buffer); - pack16((uint16_t)build_ptr->select_type_param, buffer); - pack32((uint32_t)build_ptr->slurm_user_id, buffer); + pack16(build_ptr->select_type_param, buffer); + + packstr(build_ptr->slurm_conf, buffer); + pack32(build_ptr->slurm_user_id, buffer); packstr(build_ptr->slurm_user_name, buffer); - pack16((uint16_t)build_ptr->slurmctld_debug, buffer); + + pack16(build_ptr->slurmctld_debug, buffer); packstr(build_ptr->slurmctld_logfile, buffer); packstr(build_ptr->slurmctld_pidfile, buffer); - pack32((uint32_t)build_ptr->slurmctld_port, buffer); - pack16((uint16_t)build_ptr->slurmctld_timeout, buffer); - pack16((uint16_t)build_ptr->slurmd_debug, buffer); + pack32(build_ptr->slurmctld_port, buffer); + pack16(build_ptr->slurmctld_timeout, buffer); + + pack16(build_ptr->slurmd_debug, buffer); packstr(build_ptr->slurmd_logfile, buffer); packstr(build_ptr->slurmd_pidfile, buffer); #ifndef MULTIPLE_SLURMD - pack32((uint32_t)build_ptr->slurmd_port, buffer); + pack32(build_ptr->slurmd_port, buffer); #endif packstr(build_ptr->slurmd_spooldir, buffer); - pack16((uint16_t)build_ptr->slurmd_timeout, buffer); - packstr(build_ptr->slurm_conf, buffer); + pack16(build_ptr->slurmd_timeout, buffer); + + packstr(build_ptr->srun_epilog, buffer); + packstr(build_ptr->srun_prolog, buffer); packstr(build_ptr->state_save_location, buffer); + packstr(build_ptr->suspend_exc_nodes, buffer); + packstr(build_ptr->suspend_exc_parts, buffer); + packstr(build_ptr->suspend_program, buffer); + pack16(build_ptr->suspend_rate, buffer); + pack16(build_ptr->suspend_time, buffer); packstr(build_ptr->switch_type, buffer); + packstr(build_ptr->task_epilog, buffer); packstr(build_ptr->task_prolog, buffer); packstr(build_ptr->task_plugin, buffer); pack16(build_ptr->task_plugin_param, buffer); packstr(build_ptr->tmp_fs, buffer); - pack16((uint16_t)build_ptr->wait_time, buffer); - packstr(build_ptr->job_credential_private_key, buffer); - packstr(build_ptr->job_credential_public_certificate, buffer); - packstr(build_ptr->srun_prolog, buffer); - packstr(build_ptr->srun_epilog, buffer); - packstr(build_ptr->node_prefix, buffer); - pack16((uint16_t)build_ptr->tree_width, buffer); + pack16(build_ptr->tree_width, buffer); + pack16(build_ptr->use_pam, buffer); + packstr(build_ptr->unkillable_program, buffer); + pack16(build_ptr->unkillable_timeout, buffer); + + pack16(build_ptr->wait_time, buffer); } static int _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t ** build_buffer_ptr, Buf buffer) { - uint16_t uint16_tmp; + uint32_t uint32_tmp; slurm_ctl_conf_info_msg_t *build_ptr; /* alloc memory for structure */ @@ -2194,115 +2363,204 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t ** /* load the data values */ /* unpack timestamp of snapshot */ safe_unpack_time(&build_ptr->last_update, buffer); - safe_unpackstr_xmalloc(&build_ptr->authtype, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&build_ptr->backup_addr, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&build_ptr->backup_controller, &uint16_tmp, + + safe_unpack16(&build_ptr->accounting_storage_enforce, buffer); + safe_unpackstr_xmalloc(&build_ptr->accounting_storage_host, + &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&build_ptr->accounting_storage_loc, + &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&build_ptr->accounting_storage_pass, + &uint32_tmp, buffer); + safe_unpack32(&build_ptr->accounting_storage_port, buffer); + safe_unpackstr_xmalloc(&build_ptr->accounting_storage_type, + &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&build_ptr->accounting_storage_user, + &uint32_tmp, buffer); + + safe_unpackstr_xmalloc(&build_ptr->authtype, &uint32_tmp, buffer); + + safe_unpackstr_xmalloc(&build_ptr->backup_addr, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&build_ptr->backup_controller, &uint32_tmp, buffer); + safe_unpack_time(&build_ptr->boot_time, buffer); + safe_unpack16(&build_ptr->cache_groups, buffer); - safe_unpackstr_xmalloc(&build_ptr->checkpoint_type, &uint16_tmp, + safe_unpackstr_xmalloc(&build_ptr->checkpoint_type, &uint32_tmp, + buffer); + safe_unpackstr_xmalloc(&build_ptr->cluster_name, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&build_ptr->control_addr, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&build_ptr->control_machine, &uint32_tmp, buffer); - safe_unpackstr_xmalloc(&build_ptr->control_addr, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&build_ptr->control_machine, &uint16_tmp, + safe_unpackstr_xmalloc(&build_ptr->crypto_type, &uint32_tmp, buffer); - safe_unpackstr_xmalloc(&build_ptr->epilog, &uint16_tmp, buffer); + + safe_unpack32(&build_ptr->def_mem_per_task, buffer); + safe_unpack16(&build_ptr->disable_root_jobs, buffer); + + safe_unpackstr_xmalloc(&build_ptr->epilog, &uint32_tmp, buffer); + safe_unpack32(&build_ptr->epilog_msg_time, buffer); + safe_unpack16(&build_ptr->fast_schedule, buffer); safe_unpack32(&build_ptr->first_job_id, buffer); + + safe_unpack16(&build_ptr->get_env_timeout, buffer); + + safe_unpack16(&build_ptr->health_check_interval, buffer); + safe_unpackstr_xmalloc(&build_ptr->health_check_program, + &uint32_tmp, buffer); + safe_unpack16(&build_ptr->inactive_limit, buffer); - safe_unpackstr_xmalloc(&build_ptr->job_acct_logfile, &uint16_tmp, - buffer); - safe_unpack16(&build_ptr->job_acct_freq, buffer); - safe_unpackstr_xmalloc(&build_ptr->job_acct_type, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&build_ptr->job_comp_loc, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&build_ptr->job_comp_type, &uint16_tmp, buffer); + + safe_unpack16(&build_ptr->job_acct_gather_freq, buffer); + safe_unpackstr_xmalloc(&build_ptr->job_acct_gather_type, + &uint32_tmp, buffer); + + safe_unpackstr_xmalloc(&build_ptr->job_comp_host, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&build_ptr->job_comp_loc, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&build_ptr->job_comp_pass, &uint32_tmp, buffer); + safe_unpack32(&build_ptr->job_comp_port, buffer); + safe_unpackstr_xmalloc(&build_ptr->job_comp_type, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&build_ptr->job_comp_user, &uint32_tmp, buffer); + + safe_unpackstr_xmalloc(&build_ptr->job_credential_private_key, + &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&build_ptr-> + job_credential_public_certificate, + &uint32_tmp, buffer); + safe_unpack16(&build_ptr->job_file_append, buffer); + safe_unpack16(&build_ptr->job_requeue, buffer); + safe_unpack16(&build_ptr->kill_wait, buffer); - safe_unpackstr_xmalloc(&build_ptr->mail_prog, &uint16_tmp, buffer); + + safe_unpackstr_xmalloc(&build_ptr->licenses, &uint32_tmp, buffer); + + safe_unpackstr_xmalloc(&build_ptr->mail_prog, &uint32_tmp, buffer); safe_unpack16(&build_ptr->max_job_cnt, buffer); + safe_unpack32(&build_ptr->max_mem_per_task, buffer); safe_unpack16(&build_ptr->min_job_age, buffer); - safe_unpackstr_xmalloc(&build_ptr->mpi_default, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&build_ptr->mpi_default, &uint32_tmp, buffer); safe_unpack16(&build_ptr->msg_timeout, buffer); + safe_unpack32(&build_ptr->next_job_id, buffer); - safe_unpackstr_xmalloc(&build_ptr->plugindir, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&build_ptr->plugstack, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&build_ptr->proctrack_type, &uint16_tmp, + safe_unpackstr_xmalloc(&build_ptr->node_prefix, &uint32_tmp, buffer); + + safe_unpackstr_xmalloc(&build_ptr->plugindir, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&build_ptr->plugstack, &uint32_tmp, buffer); + safe_unpack16(&build_ptr->private_data, buffer); + safe_unpackstr_xmalloc(&build_ptr->proctrack_type, &uint32_tmp, buffer); - safe_unpackstr_xmalloc(&build_ptr->prolog, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&build_ptr->prolog, &uint32_tmp, buffer); safe_unpack16(&build_ptr->propagate_prio_process, buffer); safe_unpackstr_xmalloc(&build_ptr->propagate_rlimits, - &uint16_tmp, buffer); + &uint32_tmp, buffer); safe_unpackstr_xmalloc(&build_ptr->propagate_rlimits_except, - &uint16_tmp, buffer); + &uint32_tmp, buffer); + + safe_unpackstr_xmalloc(&build_ptr->resume_program, + &uint32_tmp, buffer); + safe_unpack16(&build_ptr->resume_rate, buffer); safe_unpack16(&build_ptr->ret2service, buffer); + + safe_unpackstr_xmalloc(&build_ptr->sched_params, &uint32_tmp, buffer); safe_unpack16(&build_ptr->schedport, buffer); safe_unpack16(&build_ptr->schedrootfltr, buffer); - safe_unpackstr_xmalloc(&build_ptr->schedtype, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&build_ptr->select_type, &uint16_tmp, buffer); + safe_unpack16(&build_ptr->sched_time_slice, buffer); + safe_unpackstr_xmalloc(&build_ptr->schedtype, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&build_ptr->select_type, &uint32_tmp, buffer); safe_unpack16(&build_ptr->select_type_param, buffer); + + safe_unpackstr_xmalloc(&build_ptr->slurm_conf, + &uint32_tmp, buffer); safe_unpack32(&build_ptr->slurm_user_id, buffer); safe_unpackstr_xmalloc(&build_ptr->slurm_user_name, - &uint16_tmp, buffer); + &uint32_tmp, buffer); + safe_unpack16(&build_ptr->slurmctld_debug, buffer); safe_unpackstr_xmalloc(&build_ptr->slurmctld_logfile, - &uint16_tmp, buffer); + &uint32_tmp, buffer); safe_unpackstr_xmalloc(&build_ptr->slurmctld_pidfile, - &uint16_tmp, buffer); + &uint32_tmp, buffer); safe_unpack32(&build_ptr->slurmctld_port, buffer); safe_unpack16(&build_ptr->slurmctld_timeout, buffer); + safe_unpack16(&build_ptr->slurmd_debug, buffer); - safe_unpackstr_xmalloc(&build_ptr->slurmd_logfile, &uint16_tmp, + safe_unpackstr_xmalloc(&build_ptr->slurmd_logfile, &uint32_tmp, buffer); - safe_unpackstr_xmalloc(&build_ptr->slurmd_pidfile, &uint16_tmp, + safe_unpackstr_xmalloc(&build_ptr->slurmd_pidfile, &uint32_tmp, buffer); #ifndef MULTIPLE_SLURMD safe_unpack32(&build_ptr->slurmd_port, buffer); #endif - safe_unpackstr_xmalloc(&build_ptr->slurmd_spooldir, &uint16_tmp, + safe_unpackstr_xmalloc(&build_ptr->slurmd_spooldir, &uint32_tmp, buffer); safe_unpack16(&build_ptr->slurmd_timeout, buffer); - safe_unpackstr_xmalloc(&build_ptr->slurm_conf, &uint16_tmp, buffer); + + safe_unpackstr_xmalloc(&build_ptr->srun_epilog, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&build_ptr->srun_prolog, &uint32_tmp, buffer); safe_unpackstr_xmalloc(&build_ptr->state_save_location, - &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&build_ptr->switch_type, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&build_ptr->task_epilog, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&build_ptr->task_prolog, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&build_ptr->task_plugin, &uint16_tmp, buffer); + &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&build_ptr->suspend_exc_nodes, + &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&build_ptr->suspend_exc_parts, + &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&build_ptr->suspend_program, + &uint32_tmp, buffer); + safe_unpack16(&build_ptr->suspend_rate, buffer); + safe_unpack16(&build_ptr->suspend_time, buffer); + safe_unpackstr_xmalloc(&build_ptr->switch_type, &uint32_tmp, buffer); + + safe_unpackstr_xmalloc(&build_ptr->task_epilog, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&build_ptr->task_prolog, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&build_ptr->task_plugin, &uint32_tmp, buffer); safe_unpack16(&build_ptr->task_plugin_param, buffer); - safe_unpackstr_xmalloc(&build_ptr->tmp_fs, &uint16_tmp, buffer); - safe_unpack16(&build_ptr->wait_time, buffer); - safe_unpackstr_xmalloc(&build_ptr->job_credential_private_key, - &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&build_ptr-> - job_credential_public_certificate, - &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&build_ptr->srun_prolog, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&build_ptr->srun_epilog, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&build_ptr->node_prefix, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&build_ptr->tmp_fs, &uint32_tmp, buffer); safe_unpack16(&build_ptr->tree_width, buffer); + safe_unpack16(&build_ptr->use_pam, buffer); + safe_unpackstr_xmalloc(&build_ptr->unkillable_program, + &uint32_tmp, buffer); + safe_unpack16(&build_ptr->unkillable_timeout, buffer); + + safe_unpack16(&build_ptr->wait_time, buffer); return SLURM_SUCCESS; unpack_error: + xfree(build_ptr->accounting_storage_host); + xfree(build_ptr->accounting_storage_loc); + xfree(build_ptr->accounting_storage_pass); + xfree(build_ptr->accounting_storage_type); + xfree(build_ptr->accounting_storage_user); xfree(build_ptr->authtype); xfree(build_ptr->backup_addr); xfree(build_ptr->backup_controller); xfree(build_ptr->checkpoint_type); + xfree(build_ptr->cluster_name); xfree(build_ptr->control_addr); xfree(build_ptr->control_machine); + xfree(build_ptr->crypto_type); xfree(build_ptr->epilog); - xfree(build_ptr->job_acct_logfile); - xfree(build_ptr->job_acct_type); + xfree(build_ptr->health_check_program); + xfree(build_ptr->job_acct_gather_type); xfree(build_ptr->job_comp_loc); + xfree(build_ptr->job_comp_pass); xfree(build_ptr->job_comp_type); + xfree(build_ptr->job_comp_user); xfree(build_ptr->job_credential_private_key); xfree(build_ptr->job_credential_public_certificate); + xfree(build_ptr->health_check_program); + xfree(build_ptr->licenses); xfree(build_ptr->mail_prog); xfree(build_ptr->mpi_default); + xfree(build_ptr->node_prefix); xfree(build_ptr->plugindir); xfree(build_ptr->plugstack); xfree(build_ptr->proctrack_type); xfree(build_ptr->prolog); xfree(build_ptr->propagate_rlimits); xfree(build_ptr->propagate_rlimits_except); + xfree(build_ptr->resume_program); + xfree(build_ptr->sched_params); xfree(build_ptr->schedtype); xfree(build_ptr->select_type); xfree(build_ptr->slurm_conf); @@ -2312,15 +2570,19 @@ unpack_error: xfree(build_ptr->slurmd_logfile); xfree(build_ptr->slurmd_pidfile); xfree(build_ptr->slurmd_spooldir); + xfree(build_ptr->srun_epilog); + xfree(build_ptr->srun_prolog); xfree(build_ptr->state_save_location); + xfree(build_ptr->suspend_exc_nodes); + xfree(build_ptr->suspend_exc_parts); + xfree(build_ptr->suspend_program); xfree(build_ptr->switch_type); + xfree(build_ptr->node_prefix); xfree(build_ptr->task_epilog); xfree(build_ptr->task_prolog); xfree(build_ptr->task_plugin); xfree(build_ptr->tmp_fs); - xfree(build_ptr->srun_prolog); - xfree(build_ptr->srun_epilog); - xfree(build_ptr->node_prefix); + xfree(build_ptr->unkillable_program); xfree(build_ptr); *build_buffer_ptr = NULL; return SLURM_ERROR; @@ -2351,17 +2613,19 @@ _pack_job_desc_msg(job_desc_msg_t * job_desc_ptr, Buf buffer) pack16(job_desc_ptr->job_min_cores, buffer); pack16(job_desc_ptr->job_min_threads, buffer); pack32(job_desc_ptr->job_min_memory, buffer); - pack32(job_desc_ptr->job_max_memory, buffer); pack32(job_desc_ptr->job_min_tmp_disk, buffer); packstr(job_desc_ptr->partition, buffer); pack32(job_desc_ptr->priority, buffer); - pack32(job_desc_ptr->dependency, buffer); + packstr(job_desc_ptr->dependency, buffer); packstr(job_desc_ptr->account, buffer); packstr(job_desc_ptr->comment, buffer); pack16(job_desc_ptr->nice, buffer); - pack16(job_desc_ptr->overcommit, buffer); - pack32(job_desc_ptr->num_tasks, buffer); + + pack8(job_desc_ptr->open_mode, buffer); + pack8(job_desc_ptr->overcommit, buffer); + pack16(job_desc_ptr->acctg_freq, buffer); + pack32(job_desc_ptr->num_tasks, buffer); packstr(job_desc_ptr->req_nodes, buffer); packstr(job_desc_ptr->exc_nodes, buffer); @@ -2376,7 +2640,7 @@ _pack_job_desc_msg(job_desc_msg_t * job_desc_ptr, Buf buffer) packstr(job_desc_ptr->work_dir, buffer); pack16(job_desc_ptr->immediate, buffer); - pack16(job_desc_ptr->no_requeue, buffer); + pack16(job_desc_ptr->requeue, buffer); pack16(job_desc_ptr->shared, buffer); pack16(job_desc_ptr->cpus_per_task, buffer); pack16(job_desc_ptr->ntasks_per_node, buffer); @@ -2397,12 +2661,11 @@ _pack_job_desc_msg(job_desc_msg_t * job_desc_ptr, Buf buffer) pack32(job_desc_ptr->group_id, buffer); pack16(job_desc_ptr->alloc_resp_port, buffer); - packstr(job_desc_ptr->alloc_resp_hostname, buffer); pack16(job_desc_ptr->other_port, buffer); - packstr(job_desc_ptr->other_hostname, buffer); packstr(job_desc_ptr->network, buffer); pack_time(job_desc_ptr->begin_time, buffer); + packstr(job_desc_ptr->licenses, buffer); pack16(job_desc_ptr->mail_type, buffer); packstr(job_desc_ptr->mail_user, buffer); if(job_desc_ptr->select_jobinfo) @@ -2459,7 +2722,7 @@ _pack_job_desc_msg(job_desc_msg_t * job_desc_ptr, Buf buffer) static int _unpack_job_desc_msg(job_desc_msg_t ** job_desc_buffer_ptr, Buf buffer) { - uint16_t uint16_tmp; + uint32_t uint32_tmp; job_desc_msg_t *job_desc_ptr; /* alloc memory for structure */ @@ -2471,43 +2734,45 @@ _unpack_job_desc_msg(job_desc_msg_t ** job_desc_buffer_ptr, Buf buffer) safe_unpack16(&job_desc_ptr->task_dist, buffer); safe_unpack16(&job_desc_ptr->plane_size, buffer); safe_unpack16(&job_desc_ptr->kill_on_node_fail, buffer); - safe_unpackstr_xmalloc(&job_desc_ptr->features, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&job_desc_ptr->features, &uint32_tmp, buffer); safe_unpack32(&job_desc_ptr->job_id, buffer); - safe_unpackstr_xmalloc(&job_desc_ptr->name, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&job_desc_ptr->name, &uint32_tmp, buffer); - safe_unpackstr_xmalloc(&job_desc_ptr->alloc_node, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&job_desc_ptr->alloc_node, &uint32_tmp, buffer); safe_unpack32(&job_desc_ptr->alloc_sid, buffer); safe_unpack16(&job_desc_ptr->job_min_procs, buffer); safe_unpack16(&job_desc_ptr->job_min_sockets, buffer); safe_unpack16(&job_desc_ptr->job_min_cores, buffer); safe_unpack16(&job_desc_ptr->job_min_threads, buffer); safe_unpack32(&job_desc_ptr->job_min_memory, buffer); - safe_unpack32(&job_desc_ptr->job_max_memory, buffer); safe_unpack32(&job_desc_ptr->job_min_tmp_disk, buffer); - safe_unpackstr_xmalloc(&job_desc_ptr->partition, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&job_desc_ptr->partition, &uint32_tmp, buffer); safe_unpack32(&job_desc_ptr->priority, buffer); - safe_unpack32(&job_desc_ptr->dependency, buffer); - safe_unpackstr_xmalloc(&job_desc_ptr->account, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&job_desc_ptr->comment, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&job_desc_ptr->dependency, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&job_desc_ptr->account, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&job_desc_ptr->comment, &uint32_tmp, buffer); safe_unpack16(&job_desc_ptr->nice, buffer); - safe_unpack16(&job_desc_ptr->overcommit, buffer); - safe_unpack32(&job_desc_ptr->num_tasks, buffer); - safe_unpackstr_xmalloc(&job_desc_ptr->req_nodes, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&job_desc_ptr->exc_nodes, &uint16_tmp, buffer); + safe_unpack8(&job_desc_ptr->open_mode, buffer); + safe_unpack8(&job_desc_ptr->overcommit, buffer); + safe_unpack16(&job_desc_ptr->acctg_freq, buffer); + safe_unpack32(&job_desc_ptr->num_tasks, buffer); + + safe_unpackstr_xmalloc(&job_desc_ptr->req_nodes, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&job_desc_ptr->exc_nodes, &uint32_tmp, buffer); safe_unpackstr_array(&job_desc_ptr->environment, &job_desc_ptr->env_size, buffer); - safe_unpackstr_xmalloc(&job_desc_ptr->script, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&job_desc_ptr->script, &uint32_tmp, buffer); safe_unpackstr_array(&job_desc_ptr->argv, &job_desc_ptr->argc, buffer); - safe_unpackstr_xmalloc(&job_desc_ptr->err, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&job_desc_ptr->in, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&job_desc_ptr->out, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&job_desc_ptr->work_dir, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&job_desc_ptr->err, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&job_desc_ptr->in, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&job_desc_ptr->out, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&job_desc_ptr->work_dir, &uint32_tmp, buffer); safe_unpack16(&job_desc_ptr->immediate, buffer); - safe_unpack16(&job_desc_ptr->no_requeue, buffer); + safe_unpack16(&job_desc_ptr->requeue, buffer); safe_unpack16(&job_desc_ptr->shared, buffer); safe_unpack16(&job_desc_ptr->cpus_per_task, buffer); safe_unpack16(&job_desc_ptr->ntasks_per_node, buffer); @@ -2528,16 +2793,13 @@ _unpack_job_desc_msg(job_desc_msg_t ** job_desc_buffer_ptr, Buf buffer) safe_unpack32(&job_desc_ptr->group_id, buffer); safe_unpack16(&job_desc_ptr->alloc_resp_port, buffer); - safe_unpackstr_xmalloc(&job_desc_ptr->alloc_resp_hostname, - &uint16_tmp, buffer); safe_unpack16(&job_desc_ptr->other_port, buffer); - safe_unpackstr_xmalloc(&job_desc_ptr->other_hostname, - &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&job_desc_ptr->network, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&job_desc_ptr->network, &uint32_tmp, buffer); safe_unpack_time(&job_desc_ptr->begin_time, buffer); + safe_unpackstr_xmalloc(&job_desc_ptr->licenses, &uint32_tmp, buffer); safe_unpack16(&job_desc_ptr->mail_type, buffer); - safe_unpackstr_xmalloc(&job_desc_ptr->mail_user, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&job_desc_ptr->mail_user, &uint32_tmp, buffer); if (select_g_alloc_jobinfo (&job_desc_ptr->select_jobinfo) || select_g_unpack_jobinfo(job_desc_ptr->select_jobinfo, buffer)) @@ -2555,11 +2817,15 @@ _unpack_job_desc_msg(job_desc_msg_t ** job_desc_buffer_ptr, Buf buffer) return SLURM_SUCCESS; unpack_error: - select_g_free_jobinfo(&job_desc_ptr->select_jobinfo); + xfree(job_desc_ptr->features); xfree(job_desc_ptr->name); xfree(job_desc_ptr->partition); + xfree(job_desc_ptr->dependency); + xfree(job_desc_ptr->account); + xfree(job_desc_ptr->comment); xfree(job_desc_ptr->req_nodes); + xfree(job_desc_ptr->exc_nodes); xfree(job_desc_ptr->environment); xfree(job_desc_ptr->script); xfree(job_desc_ptr->argv); @@ -2567,10 +2833,10 @@ unpack_error: xfree(job_desc_ptr->in); xfree(job_desc_ptr->out); xfree(job_desc_ptr->work_dir); - xfree(job_desc_ptr->alloc_resp_hostname); - xfree(job_desc_ptr->other_hostname); xfree(job_desc_ptr->network); + xfree(job_desc_ptr->licenses); xfree(job_desc_ptr->mail_user); + select_g_free_jobinfo(&job_desc_ptr->select_jobinfo); xfree(job_desc_ptr); *job_desc_buffer_ptr = NULL; return SLURM_ERROR; @@ -2732,14 +2998,14 @@ _unpack_reattach_tasks_response_msg(reattach_tasks_response_msg_t ** msg_ptr, Buf buffer) { uint32_t ntasks; - uint16_t uint16_tmp; + uint32_t uint32_tmp; reattach_tasks_response_msg_t *msg = xmalloc(sizeof(*msg)); int i; xassert(msg_ptr != NULL); *msg_ptr = msg; - safe_unpackstr_xmalloc(&msg->node_name, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&msg->node_name, &uint32_tmp, buffer); safe_unpack32(&msg->return_code, buffer); safe_unpack32(&msg->ntasks, buffer); safe_unpack32_array(&msg->gtids, &ntasks, buffer); @@ -2748,7 +3014,7 @@ _unpack_reattach_tasks_response_msg(reattach_tasks_response_msg_t ** msg_ptr, goto unpack_error; msg->executable_names = (char **)xmalloc(sizeof(char *) * msg->ntasks); for (i = 0; i < msg->ntasks; i++) { - safe_unpackstr_xmalloc(&(msg->executable_names[i]), &uint16_tmp, + safe_unpackstr_xmalloc(&(msg->executable_names[i]), &uint32_tmp, buffer); } return SLURM_SUCCESS; @@ -2809,7 +3075,6 @@ static int _unpack_launch_tasks_response_msg(launch_tasks_response_msg_t ** msg_ptr, Buf buffer) { - uint16_t uint16_tmp; uint32_t uint32_tmp; launch_tasks_response_msg_t *msg; @@ -2818,7 +3083,7 @@ _unpack_launch_tasks_response_msg(launch_tasks_response_msg_t ** *msg_ptr = msg; safe_unpack32(&msg->return_code, buffer); - safe_unpackstr_xmalloc(&msg->node_name, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&msg->node_name, &uint32_tmp, buffer); safe_unpack32(&msg->count_of_pids, buffer); safe_unpack32_array(&msg->local_pids, &uint32_tmp, buffer); if (msg->count_of_pids != uint32_tmp) @@ -2848,6 +3113,8 @@ _pack_launch_tasks_request_msg(launch_tasks_request_msg_t * msg, Buf buffer) pack32(msg->nprocs, buffer); pack32(msg->uid, buffer); pack32(msg->gid, buffer); + pack32(msg->job_mem, buffer); + pack32(msg->task_mem, buffer); pack32(msg->nnodes, buffer); pack16(msg->max_sockets, buffer); @@ -2897,13 +3164,17 @@ _pack_launch_tasks_request_msg(launch_tasks_request_msg_t * msg, Buf buffer) switch_pack_jobinfo(msg->switch_job, buffer); job_options_pack(msg->options, buffer); packstr(msg->complete_nodelist, buffer); + + pack8(msg->open_mode, buffer); + pack8(msg->pty, buffer); + pack16(msg->acctg_freq, buffer); + packstr(msg->ckpt_path, buffer); } static int _unpack_launch_tasks_request_msg(launch_tasks_request_msg_t ** msg_ptr, Buf buffer) { - uint16_t uint16_tmp; uint32_t uint32_tmp; launch_tasks_request_msg_t *msg; int i=0; @@ -2917,6 +3188,8 @@ _unpack_launch_tasks_request_msg(launch_tasks_request_msg_t ** safe_unpack32(&msg->nprocs, buffer); safe_unpack32(&msg->uid, buffer); safe_unpack32(&msg->gid, buffer); + safe_unpack32(&msg->job_mem, buffer); + safe_unpack32(&msg->task_mem, buffer); safe_unpack32(&msg->nnodes, buffer); safe_unpack16(&msg->max_sockets, buffer); @@ -2952,19 +3225,19 @@ _unpack_launch_tasks_request_msg(launch_tasks_request_msg_t ** } slurm_unpack_slurm_addr_no_alloc(&msg->orig_addr, buffer); safe_unpackstr_array(&msg->env, &msg->envc, buffer); - safe_unpackstr_xmalloc(&msg->cwd, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&msg->cwd, &uint32_tmp, buffer); safe_unpack16(&msg->cpu_bind_type, buffer); - safe_unpackstr_xmalloc(&msg->cpu_bind, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&msg->cpu_bind, &uint32_tmp, buffer); safe_unpack16(&msg->mem_bind_type, buffer); - safe_unpackstr_xmalloc(&msg->mem_bind, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&msg->mem_bind, &uint32_tmp, buffer); safe_unpackstr_array(&msg->argv, &msg->argc, buffer); safe_unpack16(&msg->task_flags, buffer); safe_unpack16(&msg->multi_prog, buffer); safe_unpack16(&msg->user_managed_io, buffer); if (msg->user_managed_io == 0) { - safe_unpackstr_xmalloc(&msg->ofname, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&msg->efname, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&msg->ifname, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&msg->ofname, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&msg->efname, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&msg->ifname, &uint32_tmp, buffer); safe_unpack8(&msg->buffered_stdio, buffer); safe_unpack16(&msg->num_io_port, buffer); if (msg->num_io_port > 0) { @@ -2974,8 +3247,8 @@ _unpack_launch_tasks_request_msg(launch_tasks_request_msg_t ** safe_unpack16(&msg->io_port[i], buffer); } } - safe_unpackstr_xmalloc(&msg->task_prolog, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&msg->task_epilog, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&msg->task_prolog, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&msg->task_epilog, &uint32_tmp, buffer); safe_unpack16(&msg->slurmd_debug, buffer); switch_alloc_jobinfo(&msg->switch_job); @@ -2989,7 +3262,12 @@ _unpack_launch_tasks_request_msg(launch_tasks_request_msg_t ** error("Unable to unpack extra job options: %m"); goto unpack_error; } - safe_unpackstr_xmalloc(&msg->complete_nodelist, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&msg->complete_nodelist, &uint32_tmp, buffer); + + safe_unpack8(&msg->open_mode, buffer); + safe_unpack8(&msg->pty, buffer); + safe_unpack16(&msg->acctg_freq, buffer); + safe_unpackstr_xmalloc(&msg->ckpt_path, &uint32_tmp, buffer); return SLURM_SUCCESS; unpack_error: @@ -3053,6 +3331,35 @@ unpack_error: return SLURM_ERROR; } +static void +_pack_checkpoint_tasks_msg(checkpoint_tasks_msg_t * msg, Buf buffer) +{ + pack32((uint32_t)msg->job_id, buffer); + pack32((uint32_t)msg->job_step_id, buffer); + pack32((uint32_t)msg->signal, buffer); + pack_time((time_t)msg->timestamp, buffer); +} + +static int +_unpack_checkpoint_tasks_msg(checkpoint_tasks_msg_t ** msg_ptr, Buf buffer) +{ + checkpoint_tasks_msg_t *msg; + + msg = xmalloc(sizeof(checkpoint_tasks_msg_t)); + *msg_ptr = msg; + + safe_unpack32(&msg->job_id, buffer); + safe_unpack32(&msg->job_step_id, buffer); + safe_unpack32(&msg->signal, buffer); + safe_unpack_time(&msg->timestamp, buffer); + return SLURM_SUCCESS; + +unpack_error: + xfree(msg); + *msg_ptr = NULL; + return SLURM_ERROR; +} + static void _pack_shutdown_msg(shutdown_msg_t * msg, Buf buffer) { @@ -3159,7 +3466,7 @@ _unpack_complete_batch_script_msg( complete_batch_script_msg_t ** msg_ptr, Buf buffer) { complete_batch_script_msg_t *msg; - uint16_t uint16_tmp; + uint32_t uint32_tmp; msg = xmalloc(sizeof(complete_batch_script_msg_t)); *msg_ptr = msg; @@ -3167,7 +3474,7 @@ _unpack_complete_batch_script_msg( safe_unpack32(&msg->job_id, buffer); safe_unpack32(&msg->job_rc, buffer); safe_unpack32(&msg->slurm_rc, buffer); - safe_unpackstr_xmalloc(&msg->node_name, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&msg->node_name, &uint32_tmp, buffer); return SLURM_SUCCESS; unpack_error: @@ -3183,7 +3490,7 @@ _pack_stat_jobacct_msg(stat_jobacct_msg_t * msg, Buf buffer) pack32((uint32_t)msg->return_code, buffer); pack32((uint32_t)msg->step_id, buffer); pack32((uint32_t)msg->num_tasks, buffer); - jobacct_g_pack(msg->jobacct, buffer); + jobacct_gather_g_pack(msg->jobacct, buffer); } @@ -3199,7 +3506,7 @@ _unpack_stat_jobacct_msg(stat_jobacct_msg_t ** msg_ptr, Buf buffer) safe_unpack32(&msg->return_code, buffer); safe_unpack32(&msg->step_id, buffer); safe_unpack32(&msg->num_tasks, buffer); - if (jobacct_g_unpack(&msg->jobacct, buffer) != SLURM_SUCCESS) + if (jobacct_gather_g_unpack(&msg->jobacct, buffer) != SLURM_SUCCESS) goto unpack_error; return SLURM_SUCCESS; @@ -3248,7 +3555,7 @@ _pack_step_complete_msg(step_complete_msg_t * msg, Buf buffer) pack32((uint32_t)msg->range_first, buffer); pack32((uint32_t)msg->range_last, buffer); pack32((uint32_t)msg->step_rc, buffer); - jobacct_g_pack(msg->jobacct, buffer); + jobacct_gather_g_pack(msg->jobacct, buffer); } static int @@ -3264,7 +3571,7 @@ _unpack_step_complete_msg(step_complete_msg_t ** msg_ptr, Buf buffer) safe_unpack32(&msg->range_first, buffer); safe_unpack32(&msg->range_last, buffer); safe_unpack32(&msg->step_rc, buffer); - if (jobacct_g_unpack(&msg->jobacct, buffer) != SLURM_SUCCESS) + if (jobacct_gather_g_unpack(&msg->jobacct, buffer) != SLURM_SUCCESS) goto unpack_error; return SLURM_SUCCESS; @@ -3406,14 +3713,14 @@ unpack_error: static void _pack_slurm_addr_array(slurm_addr * slurm_address, - uint16_t size_val, Buf buffer) + uint32_t size_val, Buf buffer) { slurm_pack_slurm_addr_array(slurm_address, size_val, buffer); } static int _unpack_slurm_addr_array(slurm_addr ** slurm_address, - uint16_t * size_val, Buf buffer) + uint32_t * size_val, Buf buffer) { return slurm_unpack_slurm_addr_array(slurm_address, size_val, buffer); } @@ -3444,8 +3751,9 @@ static int _unpack_ret_list(List *ret_list, uint16_t size_val, Buf buffer) { - int i = 0, j = 0; - uint16_t nl = 0, uint16_tmp; + int i = 0; + uint16_t uint16_tmp; + uint32_t uint32_tmp; ret_data_info_t *ret_data_info = NULL; slurm_msg_t msg; *ret_list = list_create(destroy_data_info); @@ -3458,7 +3766,7 @@ _unpack_ret_list(List *ret_list, safe_unpack16(&uint16_tmp, buffer); ret_data_info->type = (slurm_msg_type_t)uint16_tmp; safe_unpackstr_xmalloc(&ret_data_info->node_name, - &uint16_tmp, buffer); + &uint32_tmp, buffer); msg.msg_type = ret_data_info->type; if (unpack_msg(&msg, buffer) != SLURM_SUCCESS) goto unpack_error; @@ -3470,7 +3778,7 @@ _unpack_ret_list(List *ret_list, unpack_error: if (ret_data_info && ret_data_info->type) { error("_unpack_ret_list: message type %u, record %d of %u", - ret_data_info->type, j, nl); + ret_data_info->type, i, size_val); } list_destroy(*ret_list); *ret_list = NULL; @@ -3482,14 +3790,18 @@ _pack_batch_job_launch_msg(batch_job_launch_msg_t * msg, Buf buffer) { xassert(msg != NULL); - pack32((uint32_t)msg->job_id, buffer); - pack32((uint32_t)msg->step_id, buffer); - pack32((uint32_t)msg->uid, buffer); - pack32((uint32_t)msg->gid, buffer); - pack32((uint32_t)msg->nprocs, buffer); + pack32(msg->job_id, buffer); + pack32(msg->step_id, buffer); + pack32(msg->uid, buffer); + pack32(msg->gid, buffer); + pack32(msg->nprocs, buffer); + + pack8(msg->open_mode, buffer); + pack8(msg->overcommit, buffer); + + pack16(msg->acctg_freq, buffer); + pack16(msg->num_cpu_groups, buffer); - pack16((uint16_t)msg->overcommit, buffer); - pack16((uint16_t)msg->num_cpu_groups, buffer); pack32_array(msg->cpus_per_node, msg->num_cpu_groups, buffer); pack32_array(msg->cpu_count_reps, msg->num_cpu_groups, buffer); @@ -3501,12 +3813,14 @@ _pack_batch_job_launch_msg(batch_job_launch_msg_t * msg, Buf buffer) packstr(msg->in, buffer); packstr(msg->out, buffer); - pack16((uint16_t)msg->argc, buffer); + pack32(msg->argc, buffer); packstr_array(msg->argv, msg->argc, buffer); - pack16((uint16_t)msg->envc, buffer); + pack32(msg->envc, buffer); packstr_array(msg->environment, msg->envc, buffer); + pack32(msg->job_mem, buffer); + slurm_cred_pack(msg->cred, buffer); select_g_pack_jobinfo(msg->select_jobinfo, buffer); @@ -3515,7 +3829,6 @@ _pack_batch_job_launch_msg(batch_job_launch_msg_t * msg, Buf buffer) static int _unpack_batch_job_launch_msg(batch_job_launch_msg_t ** msg, Buf buffer) { - uint16_t uint16_tmp; uint32_t uint32_tmp; batch_job_launch_msg_t *launch_msg_ptr; @@ -3529,8 +3842,12 @@ _unpack_batch_job_launch_msg(batch_job_launch_msg_t ** msg, Buf buffer) safe_unpack32(&launch_msg_ptr->gid, buffer); safe_unpack32(&launch_msg_ptr->nprocs, buffer); - safe_unpack16(&launch_msg_ptr->overcommit, buffer); + safe_unpack8(&launch_msg_ptr->open_mode, buffer); + safe_unpack8(&launch_msg_ptr->overcommit, buffer); + + safe_unpack16(&launch_msg_ptr->acctg_freq, buffer); safe_unpack16(&launch_msg_ptr->num_cpu_groups, buffer); + safe_unpack32_array((uint32_t **) &(launch_msg_ptr->cpus_per_node), &uint32_tmp, buffer); @@ -3542,23 +3859,24 @@ _unpack_batch_job_launch_msg(batch_job_launch_msg_t ** msg, Buf buffer) if (launch_msg_ptr->num_cpu_groups != uint32_tmp) goto unpack_error; - safe_unpackstr_xmalloc(&launch_msg_ptr->nodes, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&launch_msg_ptr->script, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&launch_msg_ptr->work_dir, &uint16_tmp, - buffer); + safe_unpackstr_xmalloc(&launch_msg_ptr->nodes, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&launch_msg_ptr->script, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&launch_msg_ptr->work_dir, &uint32_tmp, buffer); - safe_unpackstr_xmalloc(&launch_msg_ptr->err, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&launch_msg_ptr->in, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&launch_msg_ptr->out, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&launch_msg_ptr->err, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&launch_msg_ptr->in, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&launch_msg_ptr->out, &uint32_tmp, buffer); - safe_unpack16(&launch_msg_ptr->argc, buffer); + safe_unpack32(&launch_msg_ptr->argc, buffer); safe_unpackstr_array(&launch_msg_ptr->argv, &launch_msg_ptr->argc, buffer); - safe_unpack16(&launch_msg_ptr->envc, buffer); + safe_unpack32(&launch_msg_ptr->envc, buffer); safe_unpackstr_array(&launch_msg_ptr->environment, &launch_msg_ptr->envc, buffer); + safe_unpack32(&launch_msg_ptr->job_mem, buffer); + if (!(launch_msg_ptr->cred = slurm_cred_unpack(buffer))) goto unpack_error; @@ -3703,7 +4021,7 @@ _pack_srun_node_fail_msg(srun_node_fail_msg_t * msg, Buf buffer) static int _unpack_srun_node_fail_msg(srun_node_fail_msg_t ** msg_ptr, Buf buffer) { - uint16_t uint16_tmp; + uint32_t uint32_tmp; srun_node_fail_msg_t * msg; xassert ( msg_ptr != NULL ); @@ -3712,7 +4030,7 @@ _unpack_srun_node_fail_msg(srun_node_fail_msg_t ** msg_ptr, Buf buffer) safe_unpack32(&msg->job_id , buffer ) ; safe_unpack32(&msg->step_id , buffer ) ; - safe_unpackstr_xmalloc ( & msg->nodelist, &uint16_tmp, buffer); + safe_unpackstr_xmalloc ( & msg->nodelist, &uint32_tmp, buffer); return SLURM_SUCCESS; @@ -3791,7 +4109,7 @@ _pack_srun_user_msg(srun_user_msg_t * msg, Buf buffer) static int _unpack_srun_user_msg(srun_user_msg_t ** msg_ptr, Buf buffer) { - uint16_t uint16_tmp; + uint32_t uint32_tmp; srun_user_msg_t * msg_user; xassert ( msg_ptr != NULL ); @@ -3799,7 +4117,7 @@ _unpack_srun_user_msg(srun_user_msg_t ** msg_ptr, Buf buffer) *msg_ptr = msg_user; safe_unpack32(&msg_user->job_id, buffer); - safe_unpackstr_xmalloc(&msg_user->msg, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&msg_user->msg, &uint32_tmp, buffer); return SLURM_SUCCESS; unpack_error: @@ -3880,7 +4198,7 @@ _pack_checkpoint_comp(checkpoint_comp_msg_t *msg, Buf buffer) static int _unpack_checkpoint_comp(checkpoint_comp_msg_t **msg_ptr, Buf buffer) { - uint16_t uint16_tmp; + uint32_t uint32_tmp; checkpoint_comp_msg_t * msg; xassert ( msg_ptr != NULL ); @@ -3890,7 +4208,45 @@ _unpack_checkpoint_comp(checkpoint_comp_msg_t **msg_ptr, Buf buffer) safe_unpack32(& msg -> job_id , buffer ) ; safe_unpack32(& msg -> step_id , buffer ) ; safe_unpack32(& msg -> error_code , buffer ) ; - safe_unpackstr_xmalloc ( & msg -> error_msg, & uint16_tmp , buffer ) ; + safe_unpackstr_xmalloc ( & msg -> error_msg, & uint32_tmp , buffer ) ; + safe_unpack_time ( & msg -> begin_time , buffer ) ; + return SLURM_SUCCESS; + +unpack_error: + *msg_ptr = NULL; + xfree (msg->error_msg); + xfree (msg); + return SLURM_ERROR; +} + +static void +_pack_checkpoint_task_comp(checkpoint_task_comp_msg_t *msg, Buf buffer) +{ + xassert ( msg != NULL ); + + pack32((uint32_t)msg -> job_id, buffer ) ; + pack32((uint32_t)msg -> step_id, buffer ) ; + pack32((uint32_t)msg -> task_id, buffer ) ; + pack32((uint32_t)msg -> error_code, buffer ) ; + packstr ( msg -> error_msg, buffer ) ; + pack_time ( msg -> begin_time, buffer ) ; +} + +static int +_unpack_checkpoint_task_comp(checkpoint_task_comp_msg_t **msg_ptr, Buf buffer) +{ + uint32_t uint32_tmp; + checkpoint_task_comp_msg_t * msg; + xassert ( msg_ptr != NULL ); + + msg = xmalloc ( sizeof (checkpoint_task_comp_msg_t) ); + *msg_ptr = msg ; + + safe_unpack32(& msg -> job_id , buffer ) ; + safe_unpack32(& msg -> step_id , buffer ) ; + safe_unpack32(& msg -> task_id , buffer ) ; + safe_unpack32(& msg -> error_code , buffer ) ; + safe_unpackstr_xmalloc ( & msg -> error_msg, & uint32_tmp , buffer ) ; safe_unpack_time ( & msg -> begin_time , buffer ) ; return SLURM_SUCCESS; @@ -3915,7 +4271,7 @@ static int _unpack_checkpoint_resp_msg(checkpoint_resp_msg_t **msg_ptr, Buf buffer) { checkpoint_resp_msg_t * msg; - uint16_t uint16_tmp; + uint32_t uint32_tmp; xassert ( msg_ptr != NULL ); msg = xmalloc ( sizeof (checkpoint_resp_msg_t) ) ; @@ -3923,7 +4279,7 @@ _unpack_checkpoint_resp_msg(checkpoint_resp_msg_t **msg_ptr, Buf buffer) safe_unpack_time ( & msg -> event_time, buffer ) ; safe_unpack32(& msg -> error_code , buffer ) ; - safe_unpackstr_xmalloc ( & msg -> error_msg, & uint16_tmp , buffer ) ; + safe_unpackstr_xmalloc ( & msg -> error_msg, & uint32_tmp , buffer ) ; return SLURM_SUCCESS; unpack_error: @@ -3934,12 +4290,9 @@ unpack_error: static void _pack_file_bcast(file_bcast_msg_t * msg , Buf buffer ) { - int buf_size = 1024, i; xassert ( msg != NULL ); - for (i=0; i<FILE_BLOCKS; i++) - buf_size += msg->block_len[i]; - grow_buf(buffer, buf_size); + grow_buf(buffer, msg->block_len); pack16 ( msg->block_no, buffer ); pack16 ( msg->last_block, buffer ); @@ -3953,16 +4306,13 @@ static void _pack_file_bcast(file_bcast_msg_t * msg , Buf buffer ) pack_time ( msg->mtime, buffer ); packstr ( msg->fname, buffer ); - for (i=0; i<FILE_BLOCKS; i++) { - pack32 ( msg->block_len[i], buffer ); - packmem ( msg->block[i], msg->block_len[i], buffer ); - } + pack32 ( msg->block_len, buffer ); + packmem ( msg->block, msg->block_len, buffer ); } static int _unpack_file_bcast(file_bcast_msg_t ** msg_ptr , Buf buffer ) { - int i; - uint16_t uint16_tmp; + uint32_t uint32_tmp; file_bcast_msg_t *msg ; xassert ( msg_ptr != NULL ); @@ -3981,19 +4331,16 @@ static int _unpack_file_bcast(file_bcast_msg_t ** msg_ptr , Buf buffer ) safe_unpack_time ( & msg->atime, buffer ); safe_unpack_time ( & msg->mtime, buffer ); - safe_unpackstr_xmalloc ( & msg->fname, &uint16_tmp, buffer ); - for (i=0; i<FILE_BLOCKS; i++) { - safe_unpack32 ( & msg->block_len[i], buffer ); - safe_unpackmem_xmalloc ( & msg->block[i], &uint16_tmp , buffer ) ; - if ( uint16_tmp != msg->block_len[i] ) - goto unpack_error; - } + safe_unpackstr_xmalloc ( & msg->fname, &uint32_tmp, buffer ); + safe_unpack32 ( & msg->block_len, buffer ); + safe_unpackmem_xmalloc ( & msg->block, &uint32_tmp , buffer ) ; + if ( uint32_tmp != msg->block_len ) + goto unpack_error; return SLURM_SUCCESS; unpack_error: xfree( msg -> fname ); - for (i=0; i<FILE_BLOCKS; i++) - xfree( msg -> block[i] ); + xfree( msg -> block ); xfree( msg ); *msg_ptr = NULL; return SLURM_ERROR; @@ -4006,7 +4353,7 @@ static void _pack_trigger_msg(trigger_info_msg_t *msg , Buf buffer) pack32(msg->record_count, buffer); for (i=0; i<msg->record_count; i++) { pack32 (msg->trigger_array[i].trig_id, buffer); - pack8 (msg->trigger_array[i].res_type, buffer); + pack16 (msg->trigger_array[i].res_type, buffer); packstr(msg->trigger_array[i].res_id, buffer); pack16 (msg->trigger_array[i].trig_type, buffer); pack16 (msg->trigger_array[i].offset, buffer); @@ -4018,7 +4365,7 @@ static void _pack_trigger_msg(trigger_info_msg_t *msg , Buf buffer) static int _unpack_trigger_msg(trigger_info_msg_t ** msg_ptr , Buf buffer) { int i; - uint16_t uint16_tmp; + uint32_t uint32_tmp; trigger_info_msg_t *msg = xmalloc(sizeof(trigger_info_msg_t)); safe_unpack32 (&msg->record_count, buffer); @@ -4026,14 +4373,14 @@ static int _unpack_trigger_msg(trigger_info_msg_t ** msg_ptr , Buf buffer) msg->record_count); for (i=0; i<msg->record_count; i++) { safe_unpack32(&msg->trigger_array[i].trig_id, buffer); - safe_unpack8 (&msg->trigger_array[i].res_type, buffer); + safe_unpack16(&msg->trigger_array[i].res_type, buffer); safe_unpackstr_xmalloc(&msg->trigger_array[i].res_id, - &uint16_tmp, buffer); + &uint32_tmp, buffer); safe_unpack16(&msg->trigger_array[i].trig_type, buffer); safe_unpack16(&msg->trigger_array[i].offset, buffer); safe_unpack32(&msg->trigger_array[i].user_id, buffer); safe_unpackstr_xmalloc(&msg->trigger_array[i].program, - &uint16_tmp, buffer); + &uint32_tmp, buffer); } *msg_ptr = msg; return SLURM_SUCCESS; @@ -4053,11 +4400,11 @@ static void _pack_kvs_host_rec(struct kvs_hosts *msg_ptr, Buf buffer) static int _unpack_kvs_host_rec(struct kvs_hosts *msg_ptr, Buf buffer) { - uint16_t uint16_tmp; + uint32_t uint32_tmp; safe_unpack16(&msg_ptr->task_id, buffer); safe_unpack16(&msg_ptr->port, buffer); - safe_unpackstr_xmalloc(&msg_ptr->hostname, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&msg_ptr->hostname, &uint32_tmp, buffer); return SLURM_SUCCESS; unpack_error: @@ -4078,21 +4425,21 @@ static void _pack_kvs_rec(struct kvs_comm *msg_ptr, Buf buffer) } static int _unpack_kvs_rec(struct kvs_comm **msg_ptr, Buf buffer) { - uint16_t uint16_tmp; + uint32_t uint32_tmp; int i; struct kvs_comm *msg; msg = xmalloc(sizeof(struct kvs_comm)); *msg_ptr = msg; - safe_unpackstr_xmalloc(&msg->kvs_name, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&msg->kvs_name, &uint32_tmp, buffer); safe_unpack16(&msg->kvs_cnt, buffer); msg->kvs_keys = xmalloc(sizeof(char *) * msg->kvs_cnt); msg->kvs_values = xmalloc(sizeof(char *) * msg->kvs_cnt); for (i=0; i<msg->kvs_cnt; i++) { safe_unpackstr_xmalloc(&msg->kvs_keys[i], - &uint16_tmp, buffer); + &uint32_tmp, buffer); safe_unpackstr_xmalloc(&msg->kvs_values[i], - &uint16_tmp, buffer); + &uint32_tmp, buffer); } return SLURM_SUCCESS; @@ -4167,7 +4514,7 @@ static void _pack_kvs_get(kvs_get_msg_t *msg_ptr, Buf buffer) static int _unpack_kvs_get(kvs_get_msg_t **msg_ptr, Buf buffer) { - uint16_t uint16_tmp; + uint32_t uint32_tmp; kvs_get_msg_t *msg; msg = xmalloc(sizeof(struct kvs_get_msg)); @@ -4175,7 +4522,7 @@ static int _unpack_kvs_get(kvs_get_msg_t **msg_ptr, Buf buffer) safe_unpack16(&msg->task_id, buffer); safe_unpack16(&msg->size, buffer); safe_unpack16(&msg->port, buffer); - safe_unpackstr_xmalloc(&msg->hostname, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&msg->hostname, &uint32_tmp, buffer); return SLURM_SUCCESS; unpack_error: @@ -4271,7 +4618,7 @@ static void _pack_slurmd_status(slurmd_status_t *msg, Buf buffer) static int _unpack_slurmd_status(slurmd_status_t **msg_ptr, Buf buffer) { - uint16_t uint16_tmp; + uint32_t uint32_tmp; slurmd_status_t *msg; xassert(msg_ptr); @@ -4291,10 +4638,10 @@ static int _unpack_slurmd_status(slurmd_status_t **msg_ptr, Buf buffer) safe_unpack32(&msg->actual_tmp_disk, buffer); safe_unpack32(&msg->pid, buffer); - safe_unpackstr_xmalloc(&msg->hostname, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&msg->slurmd_logfile, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&msg->step_list, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&msg->version, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&msg->hostname, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&msg->slurmd_logfile, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&msg->step_list, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&msg->version, &uint32_tmp, buffer); *msg_ptr = msg; return SLURM_SUCCESS; @@ -4309,6 +4656,141 @@ unpack_error: return SLURM_ERROR; } +static void _pack_job_notify(job_notify_msg_t *msg, Buf buffer) +{ + xassert(msg); + + pack32(msg->job_id, buffer); + pack32(msg->job_step_id, buffer); + packstr(msg->message, buffer); +} + +static int _unpack_job_notify(job_notify_msg_t **msg_ptr, Buf buffer) +{ + uint32_t uint32_tmp; + job_notify_msg_t *msg; + + xassert(msg_ptr); + + msg = xmalloc(sizeof(job_notify_msg_t)); + + safe_unpack32(&msg->job_id, buffer); + safe_unpack32(&msg->job_step_id, buffer); + safe_unpackstr_xmalloc(&msg->message, &uint32_tmp, buffer); + + *msg_ptr = msg; + return SLURM_SUCCESS; + +unpack_error: + xfree(msg->message); + xfree(msg); + *msg_ptr = NULL; + return SLURM_ERROR; +} + +static void +_pack_set_debug_level_msg(set_debug_level_msg_t * msg, Buf buffer) +{ + pack32(msg->debug_level, buffer); +} + +static int +_unpack_set_debug_level_msg(set_debug_level_msg_t ** msg_ptr, Buf buffer) +{ + set_debug_level_msg_t *msg; + + msg = xmalloc(sizeof(set_debug_level_msg_t)); + *msg_ptr = msg; + + safe_unpack32(&msg->debug_level, buffer); + return SLURM_SUCCESS; + + unpack_error: + xfree(msg); + *msg_ptr = NULL; + return SLURM_ERROR; +} + +static void +_pack_will_run_response_msg(will_run_response_msg_t *msg, Buf buffer) +{ + pack32(msg->job_id, buffer); + pack32(msg->proc_cnt, buffer); + pack_time(msg->start_time, buffer); + packstr(msg->node_list, buffer); +} + +static int +_unpack_will_run_response_msg(will_run_response_msg_t ** msg_ptr, Buf buffer) +{ + will_run_response_msg_t *msg; + uint32_t uint32_tmp; + + msg = xmalloc(sizeof(will_run_response_msg_t)); + safe_unpack32(&msg->job_id, buffer); + safe_unpack32(&msg->proc_cnt, buffer); + safe_unpack_time(&msg->start_time, buffer); + safe_unpackstr_xmalloc(&msg->node_list, &uint32_tmp, buffer); + *msg_ptr = msg; + return SLURM_SUCCESS; + + unpack_error: + xfree(msg->node_list); + xfree(msg); + *msg_ptr = NULL; + return SLURM_ERROR; +} + +static void _pack_accounting_update_msg(accounting_update_msg_t *msg, + Buf buffer) +{ + uint32_t count = 0; + ListIterator itr = NULL; + acct_update_object_t *rec = NULL; + + if(msg->update_list) + count = list_count(msg->update_list); + + pack32(count, buffer); + + if(count) { + itr = list_iterator_create(msg->update_list); + while((rec = list_next(itr))) { + pack_acct_update_object(rec, buffer); + } + list_iterator_destroy(itr); + } +} + +static int _unpack_accounting_update_msg(accounting_update_msg_t **msg, + Buf buffer) +{ + uint32_t count = 0; + int i = 0; + accounting_update_msg_t *msg_ptr = + xmalloc(sizeof(accounting_update_msg_t)); + acct_update_object_t *rec = NULL; + + *msg = msg_ptr; + + safe_unpack32(&count, buffer); + msg_ptr->update_list = list_create(destroy_acct_update_object); + for(i=0; i<count; i++) { + if((unpack_acct_update_object(&rec, buffer)) == SLURM_ERROR) + goto unpack_error; + list_append(msg_ptr->update_list, rec); + } + + return SLURM_SUCCESS; + +unpack_error: + if(msg_ptr->update_list) + list_destroy(msg_ptr->update_list); + xfree(msg_ptr); + *msg = NULL; + return SLURM_ERROR; +} + /* template void pack_ ( * msg , Buf buffer ) { @@ -4322,7 +4804,7 @@ unpack_error: int unpack_ ( ** msg_ptr , Buf buffer ) { - uint16_t uint16_tmp; + uint32_t uint32_tmp; * msg ; xassert ( msg_ptr != NULL ); @@ -4333,7 +4815,7 @@ unpack_error: safe_unpack16( & msg -> , buffer ) ; safe_unpack32( & msg -> , buffer ) ; safe_unpack_time ( & msg -> , buffer ) ; - safe_unpackstr_xmalloc ( & msg -> x, & uint16_tmp , buffer ) ; + safe_unpackstr_xmalloc ( & msg -> x, & uint32_tmp , buffer ) ; return SLURM_SUCCESS; unpack_error: diff --git a/src/common/slurm_protocol_pack.h b/src/common/slurm_protocol_pack.h index dd710790e..aae531ae0 100644 --- a/src/common/slurm_protocol_pack.h +++ b/src/common/slurm_protocol_pack.h @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Kevin Tew <tew1@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/slurm_protocol_socket_common.h b/src/common/slurm_protocol_socket_common.h index ae927579b..1f0c9be3a 100644 --- a/src/common/slurm_protocol_socket_common.h +++ b/src/common/slurm_protocol_socket_common.h @@ -5,7 +5,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Kevin Tew <tew1@llnl.gov>, et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/slurm_protocol_socket_implementation.c b/src/common/slurm_protocol_socket_implementation.c index 023f377b6..ea2f01cd4 100644 --- a/src/common/slurm_protocol_socket_implementation.c +++ b/src/common/slurm_protocol_socket_implementation.c @@ -1,12 +1,12 @@ /*****************************************************************************\ * slurm_protocol_socket_implementation.c - slurm communications interfaces * based upon sockets. - * $Id: slurm_protocol_socket_implementation.c 12827 2007-12-14 22:29:30Z da $ + * $Id: slurm_protocol_socket_implementation.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Kevin Tew <tew1@llnl.gov>, et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -466,8 +466,11 @@ slurm_fd _slurm_open_stream(slurm_addr *addr, bool retry) int retry_cnt; slurm_fd fd; - if ( (addr->sin_family == 0) || (addr->sin_port == 0) ) - return SLURM_SOCKET_ERROR; + if ( (addr->sin_family == 0) || (addr->sin_port == 0) ) { + error("Error connecting, bad data: family = %u, port = %u", + addr->sin_family, addr->sin_port); + return SLURM_SOCKET_ERROR; + } for (retry_cnt=0; ; retry_cnt++) { int rc; @@ -614,7 +617,7 @@ again: rc = poll(&ufds, 1, 5000); /* poll failed */ if (errno == EINTR) { /* NOTE: connect() is non-interruptible in Linux */ - debug3("_slurm_connect poll failed: %m"); + debug2("_slurm_connect poll failed: %m"); goto again; } else error("_slurm_connect poll failed: %m"); diff --git a/src/common/slurm_protocol_util.c b/src/common/slurm_protocol_util.c index ada81c9e4..d2d8c4c06 100644 --- a/src/common/slurm_protocol_util.c +++ b/src/common/slurm_protocol_util.c @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Kevin Tew <tew1@llnl.gov> et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -53,10 +53,9 @@ */ int check_header_version(header_t * header) { - if (header->version != SLURM_PROTOCOL_VERSION) { - debug("Invalid Protocol Version %d", header->version); + if (header->version != SLURM_PROTOCOL_VERSION) slurm_seterrno_ret(SLURM_PROTOCOL_VERSION_ERROR); - } + return SLURM_PROTOCOL_SUCCESS; } diff --git a/src/common/slurm_protocol_util.h b/src/common/slurm_protocol_util.h index 3838e8df0..b8698de0b 100644 --- a/src/common/slurm_protocol_util.h +++ b/src/common/slurm_protocol_util.h @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Kevin Tew <tew1@llnl.gov> et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/slurm_resource_info.c b/src/common/slurm_resource_info.c index 6dfd58ddc..572ba109a 100644 --- a/src/common/slurm_resource_info.c +++ b/src/common/slurm_resource_info.c @@ -4,7 +4,7 @@ ***************************************************************************** * Copyright (C) 2006 Hewlett-Packard Development Company, L.P. * Written by Susanne M. Balle, <susanne.balle@hp.com> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -56,12 +56,12 @@ * given this number given the number of cpus_per_task and * maximum sockets, cores, threads. Note that the value of * cpus is the lowest-level logical processor (LLLP). - * IN mxsockets - Job requested max sockets - * IN mxcores - Job requested max cores - * IN mxthreads - Job requested max threads - * IN minsockets - Job requested min sockets - * IN mincores - Job requested min cores - * IN cpuspertask - Job requested cpus per task + * IN max_sockets - Job requested max sockets + * IN max_cores - Job requested max cores + * IN max_threads - Job requested max threads + * IN min_sockets - Job requested min sockets + * IN min_cores - Job requested min cores + * IN cpus_per_task - Job requested cpus per task * IN ntaskspernode - number of tasks per node * IN ntaskspersocket- number of tasks per socket * IN ntaskspercore - number of tasks per core @@ -69,18 +69,17 @@ * IN/OUT sockets - Available socket count * IN/OUT cores - Available core count * IN/OUT threads - Available thread count - * IN alloc_sockets - Allocated socket count to other jobs - * IN alloc_lps - Allocated cpu count to other jobs + * IN alloc_cores - Allocated cores (per socket) count to other jobs * IN cr_type - Consumable Resource type * * Note: used in both the select/{linear,cons_res} plugins. */ -int slurm_get_avail_procs(const uint16_t mxsockets, - const uint16_t mxcores, - const uint16_t mxthreads, - const uint16_t minsockets, - const uint16_t mincores, - const uint16_t cpuspertask, +int slurm_get_avail_procs(const uint16_t max_sockets, + const uint16_t max_cores, + const uint16_t max_threads, + const uint16_t min_sockets, + const uint16_t min_cores, + uint16_t cpus_per_task, const uint16_t ntaskspernode, const uint16_t ntaskspersocket, const uint16_t ntaskspercore, @@ -88,21 +87,14 @@ int slurm_get_avail_procs(const uint16_t mxsockets, uint16_t *sockets, uint16_t *cores, uint16_t *threads, - const uint16_t alloc_sockets, const uint16_t *alloc_cores, - const uint16_t alloc_lps, const select_type_plugin_info_t cr_type, uint32_t job_id, char *name) { uint16_t avail_cpus = 0, max_cpus = 0; + uint16_t allocated_cpus = 0, allocated_cores = 0, allocated_sockets = 0; uint16_t max_avail_cpus = 0xffff; /* for alloc_* accounting */ - uint16_t max_sockets = mxsockets; - uint16_t max_cores = mxcores; - uint16_t max_threads = mxthreads; - uint16_t min_sockets = minsockets; - uint16_t min_cores = mincores; - uint16_t cpus_per_task = cpuspertask; int i; /* pick defaults for any unspecified items */ @@ -114,6 +106,11 @@ int slurm_get_avail_procs(const uint16_t mxsockets, *cores = 1; if (*sockets <= 0) *sockets = *cpus / *cores / *threads; + for (i = 0 ; alloc_cores && i < *sockets; i++) { + allocated_cores += alloc_cores[i]; + if (alloc_cores[i]) + allocated_sockets++; + } #if(DEBUG) info("get_avail_procs %u %s MAX User_ sockets %u cores %u threads %u", job_id, name, max_sockets, max_cores, max_threads); @@ -121,39 +118,33 @@ int slurm_get_avail_procs(const uint16_t mxsockets, job_id, name, min_sockets, min_cores); info("get_avail_procs %u %s HW_ sockets %u cores %u threads %u", job_id, name, *sockets, *cores, *threads); - info("get_avail_procs %u %s Ntask node %u sockets %u core %u", - job_id, name, ntaskspernode, ntaskspersocket, + info("get_avail_procs %u %s Ntask node %u sockets %u core %u", + job_id, name, ntaskspernode, ntaskspersocket, ntaskspercore); - info("get_avail_procs %u %s cr_type %d cpus %u Allocated sockets %u lps %u", - job_id, name, cr_type, *cpus, alloc_sockets, alloc_lps); - if (((cr_type == CR_CORE) || (cr_type == CR_CORE_MEMORY)) - && (alloc_lps != 0)) { - for (i = 0; i < *sockets; i++) - info("get_avail_procs %u %s alloc_cores[%d] = %u", - job_id, name, i, alloc_cores[i]); - } + info("get_avail_procs %u %s cr_type %d cpus %u alloc_ c %u s %u", + job_id, name, cr_type, *cpus, allocated_cores, + allocated_sockets); + for (i = 0; alloc_cores && i < *sockets; i++) + info("get_avail_procs %u %s alloc_cores[%d] = %u", + job_id, name, i, alloc_cores[i]); #endif - + allocated_cpus = allocated_cores * (*threads); switch(cr_type) { /* For the following CR types, nodes have no notion of socket, core, and thread. Only one level of logical processors */ + case SELECT_TYPE_INFO_NONE: + /* Default for select/linear */ case CR_CPU: case CR_CPU_MEMORY: - case CR_MEMORY: - switch(cr_type) { - case CR_CPU: - case CR_CPU_MEMORY: - if (*cpus >= alloc_lps) - *cpus -= alloc_lps; - else { - *cpus = 0; - error("cons_res: *cpus underflow"); - } - break; - default: - break; + + if (*cpus >= allocated_cpus) + *cpus -= allocated_cpus; + else { + *cpus = 0; + error("cons_res: *cpus underflow"); } + case CR_MEMORY: /*** compute an overall maximum cpu count honoring ntasks* ***/ max_cpus = *cpus; if (ntaskspernode > 0) { @@ -164,19 +155,21 @@ int slurm_get_avail_procs(const uint16_t mxsockets, /* For all other types, nodes contain sockets, cores, and threads */ case CR_CORE: case CR_CORE_MEMORY: - if (*cpus >= alloc_lps) - *cpus -= alloc_lps; + if (*cpus >= allocated_cpus) + *cpus -= allocated_cpus; else { *cpus = 0; error("cons_res: *cpus underflow"); } - if (alloc_lps > 0) { + if (allocated_cores > 0) { max_avail_cpus = 0; int tmp_diff = 0; for (i=0; i<*sockets; i++) { tmp_diff = *cores - alloc_cores[i]; - if (min_cores <= tmp_diff) + if (min_cores <= tmp_diff) { + tmp_diff *= (*threads); max_avail_cpus += tmp_diff; + } } } @@ -220,14 +213,14 @@ int slurm_get_avail_procs(const uint16_t mxsockets, case CR_SOCKET: case CR_SOCKET_MEMORY: default: - if (*sockets >= alloc_sockets) - *sockets -= alloc_sockets; /* sockets count */ + if (*sockets >= allocated_sockets) + *sockets -= allocated_sockets; /* sockets count */ else { *sockets = 0; error("cons_res: *sockets underflow"); } - if (*cpus >= alloc_lps) - *cpus -= alloc_lps; + if (*cpus >= allocated_cpus) + *cpus -= allocated_cpus; else { *cpus = 0; error("cons_res: *cpus underflow"); diff --git a/src/common/slurm_resource_info.h b/src/common/slurm_resource_info.h index 330c876f0..25733ff8e 100644 --- a/src/common/slurm_resource_info.h +++ b/src/common/slurm_resource_info.h @@ -4,7 +4,7 @@ ***************************************************************************** * Copyright (C) 2006 Hewlett-Packard Development Company, L.P. * Written by Susanne M. Balle, <susanne.balle@hp.com> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -62,9 +62,7 @@ int slurm_get_avail_procs(const uint16_t mxsockets, uint16_t *sockets, uint16_t *cores, uint16_t *threads, - const uint16_t alloc_sockets, const uint16_t *alloc_cores, - const uint16_t alloc_lps, const select_type_plugin_info_t cr_type, uint32_t job_id, char *name); diff --git a/src/common/slurm_selecttype_info.c b/src/common/slurm_selecttype_info.c index 969adf08d..baa5213f4 100644 --- a/src/common/slurm_selecttype_info.c +++ b/src/common/slurm_selecttype_info.c @@ -4,7 +4,7 @@ * * Copyright (C) 2006 Hewlett-Packard Development Company, L.P. * Written by Susanne M. Balle, <susanne.balle@hp.com> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/slurm_selecttype_info.h b/src/common/slurm_selecttype_info.h index b1b046dbf..f602ca136 100644 --- a/src/common/slurm_selecttype_info.h +++ b/src/common/slurm_selecttype_info.h @@ -4,7 +4,7 @@ * * Copyright (C) 2006 Hewlett-Packard Development Company, L.P. * Written by Susanne M. Balle, <susanne.balle@hp.com> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/slurm_step_layout.c b/src/common/slurm_step_layout.c index e49e01dc4..4d7f27256 100644 --- a/src/common/slurm_step_layout.c +++ b/src/common/slurm_step_layout.c @@ -6,7 +6,7 @@ * Copyright (C) 2005 Hewlett-Packard Development Company, L.P. * Written by Chris Holmes, <cholmes@hp.com>, who borrowed heavily * from other parts of SLURM. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -88,7 +88,7 @@ static int _task_layout_hostfile(slurm_step_layout_t *step_layout, slurm_step_layout_t *slurm_step_layout_create( const char *tlist, uint32_t *cpus_per_node, uint32_t *cpu_count_reps, - uint16_t num_hosts, + uint32_t num_hosts, uint32_t num_tasks, uint16_t task_dist, uint16_t plane_size) @@ -154,7 +154,7 @@ slurm_step_layout_t *fake_slurm_step_layout_create( const char *tlist, uint32_t *cpus_per_node, uint32_t *cpu_count_reps, - uint16_t node_cnt, + uint32_t node_cnt, uint32_t task_cnt) { uint32_t cpn = 1; @@ -287,7 +287,7 @@ extern void pack_slurm_step_layout(slurm_step_layout_t *step_layout, if(!i) return; packstr(step_layout->node_list, buffer); - pack16(step_layout->node_cnt, buffer); + pack32(step_layout->node_cnt, buffer); pack32(step_layout->task_cnt, buffer); /* slurm_pack_slurm_addr_array(step_layout->node_addr, */ /* step_layout->node_cnt, buffer); */ @@ -301,7 +301,7 @@ extern void pack_slurm_step_layout(slurm_step_layout_t *step_layout, extern int unpack_slurm_step_layout(slurm_step_layout_t **layout, Buf buffer) { uint16_t uint16_tmp; - uint32_t num_tids; + uint32_t num_tids, uint32_tmp; slurm_step_layout_t *step_layout = NULL; int i; @@ -316,17 +316,17 @@ extern int unpack_slurm_step_layout(slurm_step_layout_t **layout, Buf buffer) step_layout->node_cnt = 0; step_layout->tids = NULL; step_layout->tasks = NULL; - safe_unpackstr_xmalloc(&step_layout->node_list, &uint16_tmp, buffer); - safe_unpack16(&step_layout->node_cnt, buffer); + safe_unpackstr_xmalloc(&step_layout->node_list, &uint32_tmp, buffer); + safe_unpack32(&step_layout->node_cnt, buffer); safe_unpack32(&step_layout->task_cnt, buffer); /* if (slurm_unpack_slurm_addr_array(&(step_layout->node_addr), */ -/* &uint16_tmp, buffer)) */ +/* &uint32_tmp, buffer)) */ /* goto unpack_error; */ -/* if (uint16_tmp != step_layout->node_cnt) */ +/* if (uint32_tmp != step_layout->node_cnt) */ /* goto unpack_error; */ - step_layout->tasks = xmalloc(sizeof(uint16_t) * step_layout->node_cnt); + step_layout->tasks = xmalloc(sizeof(uint32_t) * step_layout->node_cnt); step_layout->tids = xmalloc(sizeof(uint32_t *) * step_layout->node_cnt); for(i = 0; i < step_layout->node_cnt; i++) { @@ -415,7 +415,7 @@ static int _init_task_layout(slurm_step_layout_t *step_layout, i = hostlist_count(hl); if(step_layout->node_cnt > i) step_layout->node_cnt = i; - debug("laying out the %d tasks on %d hosts %s\n", + debug("laying out the %u tasks on %u hosts %s\n", step_layout->task_cnt, step_layout->node_cnt, step_layout->node_list); if(step_layout->node_cnt < 1) { diff --git a/src/common/slurm_step_layout.h b/src/common/slurm_step_layout.h index 2a03b6205..efc8b13b9 100644 --- a/src/common/slurm_step_layout.h +++ b/src/common/slurm_step_layout.h @@ -6,7 +6,7 @@ * Copyright (C) 2005 Hewlett-Packard Development Company, L.P. * Written by Chris Holmes, <cholmes@hp.com>, who borrowed heavily * from other parts of SLURM. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -63,7 +63,7 @@ extern slurm_step_layout_t *slurm_step_layout_create(const char *tlist, uint32_t *cpus_per_node, uint32_t *cpu_count_reps, - uint16_t node_cnt, + uint32_t node_cnt, uint32_t task_cnt, uint16_t task_dist, uint16_t plane_size); @@ -86,7 +86,7 @@ extern slurm_step_layout_t *fake_slurm_step_layout_create( const char *tlist, uint32_t *cpus_per_node, uint32_t *cpu_count_reps, - uint16_t node_cnt, + uint32_t node_cnt, uint32_t task_cnt); /* copys structure for step layout */ diff --git a/src/common/slurm_xlator.h b/src/common/slurm_xlator.h index 8e38ed87b..4513785d1 100644 --- a/src/common/slurm_xlator.h +++ b/src/common/slurm_xlator.h @@ -30,7 +30,7 @@ * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <grondona1@llnl.gov>, Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -179,7 +179,7 @@ #define list_insert slurm_list_insert #define list_find slurm_list_find #define list_remove slurm_list_remove -#define list_delete slurm_list_delete +#define list_delete_item slurm_list_delete_item #define list_install_fork_handlers slurm_list_install_fork_handlers /* log.[ch] functions */ diff --git a/src/common/slurmdbd_defs.c b/src/common/slurmdbd_defs.c new file mode 100644 index 000000000..7cec1b496 --- /dev/null +++ b/src/common/slurmdbd_defs.c @@ -0,0 +1,2299 @@ +/****************************************************************************\ + * slurmdbd_defs.c - functions for use with Slurm DBD RPCs + ***************************************************************************** + * Copyright (C) 2008 Lawrence Livermore National Security. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Morris Jette <jette1@llnl.gov> + * LLNL-CODE-402394. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#if HAVE_CONFIG_H +# include "config.h" +# if HAVE_INTTYPES_H +# include <inttypes.h> +# else +# if HAVE_STDINT_H +# include <stdint.h> +# endif +# endif /* HAVE_INTTYPES_H */ +#else /* !HAVE_CONFIG_H */ +# include <inttypes.h> +#endif /* HAVE_CONFIG_H */ + +#include <arpa/inet.h> +#include <fcntl.h> +#include <pthread.h> +#include <stdio.h> +#include <syslog.h> +#include <sys/poll.h> +#include <sys/stat.h> +#include <sys/types.h> +#include <time.h> +#include <unistd.h> + +#include "slurm/slurm_errno.h" +#include "src/common/fd.h" +#include "src/common/pack.h" +#include "src/common/slurmdbd_defs.h" +#include "src/common/slurm_auth.h" +#include "src/common/slurm_protocol_api.h" +#include "src/common/slurm_accounting_storage.h" +#include "src/common/slurm_jobacct_gather.h" +#include "src/common/xmalloc.h" +#include "src/common/xsignal.h" +#include "src/common/xstring.h" + +#define DBD_MAGIC 0xDEAD3219 +#define MAX_AGENT_QUEUE 10000 +#define MAX_DBD_MSG_LEN 16384 + +static pthread_mutex_t agent_lock = PTHREAD_MUTEX_INITIALIZER; +static pthread_cond_t agent_cond = PTHREAD_COND_INITIALIZER; +static List agent_list = (List) NULL; +static pthread_t agent_tid = 0; +static time_t agent_shutdown = 0; + +static pthread_mutex_t slurmdbd_lock = PTHREAD_MUTEX_INITIALIZER; +static slurm_fd slurmdbd_fd = -1; +static char * slurmdbd_auth_info = NULL; +static bool rollback_started = 0; + +static void * _agent(void *x); +static void _agent_queue_del(void *x); +static void _close_slurmdbd_fd(void); +static void _create_agent(void); +static bool _fd_readable(slurm_fd fd); +static int _fd_writeable(slurm_fd fd); +static int _get_return_code(void); +static Buf _load_dbd_rec(int fd); +static void _load_dbd_state(void); +static void _open_slurmdbd_fd(void); +static int _purge_job_start_req(void); +static Buf _recv_msg(void); +static void _reopen_slurmdbd_fd(void); +static int _save_dbd_rec(int fd, Buf buffer); +static void _save_dbd_state(void); +static int _send_init_msg(void); +static int _send_fini_msg(void); +static int _send_msg(Buf buffer); +static void _sig_handler(int signal); +static void _shutdown_agent(void); +static void _slurmdbd_packstr(void *str, Buf buffer); +static int _slurmdbd_unpackstr(void **str, Buf buffer); +static int _tot_wait (struct timeval *start_time); + +/**************************************************************************** + * Socket open/close/read/write functions + ****************************************************************************/ + +/* Open a socket connection to SlurmDbd + * auth_info IN - alternate authentication key + * make_agent IN - make agent to process RPCs if set + * rollback IN - keep journal and permit rollback if set + * Returns SLURM_SUCCESS or an error code */ +extern int slurm_open_slurmdbd_conn(char *auth_info, bool make_agent, + bool rollback) +{ + slurm_mutex_lock(&agent_lock); + if (make_agent && ((agent_tid == 0) || (agent_list == NULL))) + _create_agent(); + slurm_mutex_unlock(&agent_lock); + + slurm_mutex_lock(&slurmdbd_lock); + xfree(slurmdbd_auth_info); + if (auth_info) + slurmdbd_auth_info = xstrdup(auth_info); + + rollback_started = rollback; + + if (slurmdbd_fd < 0) + _open_slurmdbd_fd(); + slurm_mutex_unlock(&slurmdbd_lock); + + return SLURM_SUCCESS; +} + +/* Close the SlurmDBD socket connection */ +extern int slurm_close_slurmdbd_conn(void) +{ + /* NOTE: agent_lock not needed for _shutdown_agent() */ + _shutdown_agent(); + + if (rollback_started) { + if (_send_fini_msg() != SLURM_SUCCESS) + error("slurmdbd: Sending fini msg: %m"); + else + debug("slurmdbd: Sent fini msg"); + } + + slurm_mutex_lock(&slurmdbd_lock); + _close_slurmdbd_fd(); + xfree(slurmdbd_auth_info); + slurm_mutex_unlock(&slurmdbd_lock); + + return SLURM_SUCCESS; +} + +/* Send an RPC to the SlurmDBD and wait for the return code reply. + * The RPC will not be queued if an error occurs. + * Returns SLURM_SUCCESS or an error code */ +extern int slurm_send_slurmdbd_recv_rc_msg(slurmdbd_msg_t *req, int *resp_code) +{ + int rc; + slurmdbd_msg_t *resp; + + xassert(req); + xassert(resp_code); + + resp = xmalloc(sizeof(slurmdbd_msg_t)); + rc = slurm_send_recv_slurmdbd_msg(req, resp); + if (rc != SLURM_SUCCESS) { + ; /* error message already sent */ + } else if (resp->msg_type != DBD_RC) { + error("slurmdbd: response is type DBD_RC: %d", resp->msg_type); + rc = SLURM_ERROR; + } else { /* resp->msg_type == DBD_RC */ + dbd_rc_msg_t *msg = resp->data; + *resp_code = msg->return_code; + if(msg->return_code != SLURM_SUCCESS) + error("slurmdbd(%d): from %u: %s", msg->return_code, + msg->sent_type, msg->comment); + slurmdbd_free_rc_msg(msg); + } + xfree(resp); + + return rc; +} + +/* Send an RPC to the SlurmDBD and wait for an arbitrary reply message. + * The RPC will not be queued if an error occurs. + * The "resp" message must be freed by the caller. + * Returns SLURM_SUCCESS or an error code */ +extern int slurm_send_recv_slurmdbd_msg(slurmdbd_msg_t *req, + slurmdbd_msg_t *resp) +{ + int rc = SLURM_SUCCESS; + Buf buffer; + + xassert(req); + xassert(resp); + + slurm_mutex_lock(&slurmdbd_lock); + if (slurmdbd_fd < 0) { + /* Either slurm_open_slurmdbd_conn() was not executed or + * the connection to Slurm DBD has been closed */ + _open_slurmdbd_fd(); + if (slurmdbd_fd < 0) { + slurm_mutex_unlock(&slurmdbd_lock); + return SLURM_ERROR; + } + } + + buffer = pack_slurmdbd_msg(req); + + rc = _send_msg(buffer); + free_buf(buffer); + if (rc != SLURM_SUCCESS) { + error("slurmdbd: Sending message type %u", req->msg_type); + slurm_mutex_unlock(&slurmdbd_lock); + return SLURM_ERROR; + } + + buffer = _recv_msg(); + if (buffer == NULL) { + error("slurmdbd: Getting response to message type %u", + req->msg_type); + slurm_mutex_unlock(&slurmdbd_lock); + return SLURM_ERROR; + } + + rc = unpack_slurmdbd_msg(resp, buffer); + + free_buf(buffer); + slurm_mutex_unlock(&slurmdbd_lock); + return rc; +} + +/* Send an RPC to the SlurmDBD. Do not wait for the reply. The RPC + * will be queued and processed later if the SlurmDBD is not responding. + * NOTE: slurm_open_slurmdbd_conn() must have been called with make_agent set + * + * Returns SLURM_SUCCESS or an error code */ +extern int slurm_send_slurmdbd_msg(slurmdbd_msg_t *req) +{ + Buf buffer; + int cnt, rc = SLURM_SUCCESS; + static time_t syslog_time = 0; + + + buffer = pack_slurmdbd_msg(req); + + slurm_mutex_lock(&agent_lock); + if ((agent_tid == 0) || (agent_list == NULL)) { + _create_agent(); + if ((agent_tid == 0) || (agent_list == NULL)) { + slurm_mutex_unlock(&agent_lock); + free_buf(buffer); + return SLURM_ERROR; + } + } + cnt = list_count(agent_list); + if ((cnt >= (MAX_AGENT_QUEUE / 2)) && + (difftime(time(NULL), syslog_time) > 120)) { + /* Record critical error every 120 seconds */ + syslog_time = time(NULL); + error("slurmdbd: agent queue filling, RESTART SLURMDBD NOW"); + syslog(LOG_CRIT, "*** RESTART SLURMDBD NOW ***"); + } + if (cnt == (MAX_AGENT_QUEUE - 1)) + cnt -= _purge_job_start_req(); + if (cnt < MAX_AGENT_QUEUE) { + if (list_enqueue(agent_list, buffer) == NULL) + fatal("list_enqueue: memory allocation failure"); + } else { + error("slurmdbd: agent queue is full, discarding request"); + rc = SLURM_ERROR; + } + slurm_mutex_unlock(&agent_lock); + pthread_cond_broadcast(&agent_cond); + return rc; +} + +/* Open a connection to the Slurm DBD and set slurmdbd_fd */ +static void _open_slurmdbd_fd() +{ + slurm_addr dbd_addr; + uint16_t slurmdbd_port; + char * slurmdbd_host; + + if (slurmdbd_fd >= 0) { + debug("Attempt to re-open slurmdbd socket"); + return; + } + + slurmdbd_host = slurm_get_accounting_storage_host(); + slurmdbd_port = slurm_get_accounting_storage_port(); + if ((slurmdbd_host == NULL) || (slurmdbd_port == 0)) { + error("Invalid SlurmDbd address %s:%u", + slurmdbd_host, slurmdbd_port); + xfree(slurmdbd_host); + return; + } + + slurm_set_addr(&dbd_addr, slurmdbd_port, slurmdbd_host); + if (dbd_addr.sin_port == 0) + error("Unable to locate SlurmDBD host %s:%u", + slurmdbd_host, slurmdbd_port); + else { + slurmdbd_fd = slurm_open_msg_conn(&dbd_addr); + if (slurmdbd_fd < 0) + error("slurmdbd: slurm_open_msg_conn: %m"); + else { + fd_set_nonblocking(slurmdbd_fd); + if (_send_init_msg() != SLURM_SUCCESS) + error("slurmdbd: Sending DdbInit msg: %m"); + else + debug("slurmdbd: Sent DbdInit msg"); + } + } + xfree(slurmdbd_host); +} + +extern Buf pack_slurmdbd_msg(slurmdbd_msg_t *req) +{ + Buf buffer = init_buf(MAX_DBD_MSG_LEN); + pack16(req->msg_type, buffer); + + switch (req->msg_type) { + case DBD_ADD_ACCOUNTS: + case DBD_ADD_ASSOCS: + case DBD_ADD_CLUSTERS: + case DBD_ADD_USERS: + case DBD_GOT_ACCOUNTS: + case DBD_GOT_ASSOCS: + case DBD_GOT_CLUSTERS: + case DBD_GOT_JOBS: + case DBD_GOT_LIST: + case DBD_GOT_USERS: + case DBD_UPDATE_SHARES_USED: + slurmdbd_pack_list_msg( + req->msg_type, (dbd_list_msg_t *)req->data, buffer); + break; + case DBD_ADD_ACCOUNT_COORDS: + case DBD_REMOVE_ACCOUNT_COORDS: + slurmdbd_pack_acct_coord_msg((dbd_acct_coord_msg_t *)req->data, + buffer); + break; + case DBD_CLUSTER_PROCS: + case DBD_FLUSH_JOBS: + slurmdbd_pack_cluster_procs_msg( + (dbd_cluster_procs_msg_t *)req->data, buffer); + break; + case DBD_GET_ACCOUNTS: + case DBD_GET_ASSOCS: + case DBD_GET_CLUSTERS: + case DBD_GET_USERS: + case DBD_REMOVE_ACCOUNTS: + case DBD_REMOVE_ASSOCS: + case DBD_REMOVE_CLUSTERS: + case DBD_REMOVE_USERS: + slurmdbd_pack_cond_msg( + req->msg_type, (dbd_cond_msg_t *)req->data, buffer); + break; + case DBD_GET_ASSOC_USAGE: + case DBD_GOT_ASSOC_USAGE: + case DBD_GET_CLUSTER_USAGE: + case DBD_GOT_CLUSTER_USAGE: + slurmdbd_pack_usage_msg( + req->msg_type, (dbd_usage_msg_t *)req->data, + buffer); + break; + case DBD_GET_JOBS: + slurmdbd_pack_get_jobs_msg( + (dbd_get_jobs_msg_t *)req->data, buffer); + break; + case DBD_INIT: + slurmdbd_pack_init_msg((dbd_init_msg_t *)req->data, buffer, + slurmdbd_auth_info); + break; + case DBD_FINI: + slurmdbd_pack_fini_msg((dbd_fini_msg_t *)req->data, buffer); + break; + case DBD_JOB_COMPLETE: + slurmdbd_pack_job_complete_msg((dbd_job_comp_msg_t *)req->data, + buffer); + break; + case DBD_JOB_START: + slurmdbd_pack_job_start_msg((dbd_job_start_msg_t *)req->data, + buffer); + break; + case DBD_JOB_START_RC: + slurmdbd_pack_job_start_rc_msg( + (dbd_job_start_rc_msg_t *)req->data, buffer); + break; + case DBD_JOB_SUSPEND: + slurmdbd_pack_job_suspend_msg( + (dbd_job_suspend_msg_t *)req->data, buffer); + break; + case DBD_MODIFY_ACCOUNTS: + case DBD_MODIFY_ASSOCS: + case DBD_MODIFY_CLUSTERS: + case DBD_MODIFY_USERS: + slurmdbd_pack_modify_msg( + req->msg_type, (dbd_modify_msg_t *)req->data, buffer); + break; + case DBD_NODE_STATE: + slurmdbd_pack_node_state_msg( + (dbd_node_state_msg_t *)req->data, buffer); + break; + case DBD_RC: + slurmdbd_pack_rc_msg((dbd_rc_msg_t *)req->data, buffer); + break; + case DBD_STEP_COMPLETE: + slurmdbd_pack_step_complete_msg( + (dbd_step_comp_msg_t *)req->data, buffer); + break; + case DBD_STEP_START: + slurmdbd_pack_step_start_msg((dbd_step_start_msg_t *)req->data, + buffer); + break; + case DBD_REGISTER_CTLD: + slurmdbd_pack_register_ctld_msg((dbd_register_ctld_msg_t *) + req->data, buffer); + break; + case DBD_ROLL_USAGE: + slurmdbd_pack_roll_usage_msg((dbd_roll_usage_msg_t *) + req->data, buffer); + break; + default: + error("slurmdbd: Invalid message type %u", req->msg_type); + free_buf(buffer); + return NULL; + } + return buffer; +} + +extern int unpack_slurmdbd_msg(slurmdbd_msg_t *resp, Buf buffer) +{ + int rc = SLURM_SUCCESS; + + safe_unpack16(&resp->msg_type, buffer); + + switch (resp->msg_type) { + case DBD_ADD_ACCOUNTS: + case DBD_ADD_ASSOCS: + case DBD_ADD_CLUSTERS: + case DBD_ADD_USERS: + case DBD_GOT_ACCOUNTS: + case DBD_GOT_ASSOCS: + case DBD_GOT_CLUSTERS: + case DBD_GOT_JOBS: + case DBD_GOT_LIST: + case DBD_GOT_USERS: + case DBD_UPDATE_SHARES_USED: + rc = slurmdbd_unpack_list_msg( + resp->msg_type, (dbd_list_msg_t **)&resp->data, buffer); + break; + case DBD_ADD_ACCOUNT_COORDS: + case DBD_REMOVE_ACCOUNT_COORDS: + rc = slurmdbd_unpack_acct_coord_msg( + (dbd_acct_coord_msg_t **)&resp->data, buffer); + break; + case DBD_CLUSTER_PROCS: + case DBD_FLUSH_JOBS: + rc = slurmdbd_unpack_cluster_procs_msg( + (dbd_cluster_procs_msg_t **)&resp->data, buffer); + break; + case DBD_GET_ACCOUNTS: + case DBD_GET_ASSOCS: + case DBD_GET_CLUSTERS: + case DBD_GET_USERS: + case DBD_REMOVE_ACCOUNTS: + case DBD_REMOVE_ASSOCS: + case DBD_REMOVE_CLUSTERS: + case DBD_REMOVE_USERS: + rc = slurmdbd_unpack_cond_msg( + resp->msg_type, (dbd_cond_msg_t **)&resp->data, buffer); + break; + case DBD_GET_ASSOC_USAGE: + case DBD_GOT_ASSOC_USAGE: + case DBD_GET_CLUSTER_USAGE: + case DBD_GOT_CLUSTER_USAGE: + rc = slurmdbd_unpack_usage_msg( + resp->msg_type, (dbd_usage_msg_t **)&resp->data, + buffer); + break; + case DBD_GET_JOBS: + rc = slurmdbd_unpack_get_jobs_msg( + (dbd_get_jobs_msg_t **)&resp->data, buffer); + break; + case DBD_INIT: + rc = slurmdbd_unpack_init_msg((dbd_init_msg_t **)&resp->data, + buffer, + slurmdbd_auth_info); + break; + case DBD_FINI: + rc = slurmdbd_unpack_fini_msg((dbd_fini_msg_t **)&resp->data, + buffer); + break; + case DBD_JOB_COMPLETE: + rc = slurmdbd_unpack_job_complete_msg( + (dbd_job_comp_msg_t **)&resp->data, buffer); + break; + case DBD_JOB_START: + rc = slurmdbd_unpack_job_start_msg( + (dbd_job_start_msg_t **)&resp->data, buffer); + break; + case DBD_JOB_START_RC: + rc = slurmdbd_unpack_job_start_rc_msg( + (dbd_job_start_rc_msg_t **)&resp->data, buffer); + break; + case DBD_JOB_SUSPEND: + rc = slurmdbd_unpack_job_suspend_msg( + (dbd_job_suspend_msg_t **)&resp->data, buffer); + break; + case DBD_MODIFY_ACCOUNTS: + case DBD_MODIFY_ASSOCS: + case DBD_MODIFY_CLUSTERS: + case DBD_MODIFY_USERS: + rc = slurmdbd_unpack_modify_msg( + resp->msg_type, (dbd_modify_msg_t **)&resp->data, + buffer); + break; + case DBD_NODE_STATE: + rc = slurmdbd_unpack_node_state_msg( + (dbd_node_state_msg_t **)&resp->data, buffer); + break; + case DBD_RC: + rc = slurmdbd_unpack_rc_msg((dbd_rc_msg_t **)&resp->data, + buffer); + break; + case DBD_STEP_COMPLETE: + rc = slurmdbd_unpack_step_complete_msg( + (dbd_step_comp_msg_t **)&resp->data, buffer); + break; + case DBD_STEP_START: + rc = slurmdbd_unpack_step_start_msg( + (dbd_step_start_msg_t **)&resp->data, buffer); + break; + case DBD_REGISTER_CTLD: + rc = slurmdbd_unpack_register_ctld_msg( + (dbd_register_ctld_msg_t **)&resp->data, buffer); + break; + case DBD_ROLL_USAGE: + rc = slurmdbd_unpack_roll_usage_msg( + (dbd_roll_usage_msg_t **)&resp->data, buffer); + break; + default: + error("slurmdbd: Invalid message type %u", resp->msg_type); + return SLURM_ERROR; + } + return rc; + +unpack_error: + return SLURM_ERROR; +} + +static int _send_init_msg(void) +{ + int rc; + Buf buffer; + dbd_init_msg_t req; + + buffer = init_buf(1024); + pack16((uint16_t) DBD_INIT, buffer); + req.rollback = rollback_started; + req.version = SLURMDBD_VERSION; + slurmdbd_pack_init_msg(&req, buffer, slurmdbd_auth_info); + + rc = _send_msg(buffer); + free_buf(buffer); + if (rc != SLURM_SUCCESS) { + error("slurmdbd: Sending DBD_INIT message"); + return rc; + } + + rc = _get_return_code(); + return rc; +} + +static int _send_fini_msg(void) +{ + Buf buffer; + dbd_fini_msg_t req; + + buffer = init_buf(1024); + pack16((uint16_t) DBD_FINI, buffer); + req.commit = 0; + req.close_conn = 1; + slurmdbd_pack_fini_msg(&req, buffer); + + _send_msg(buffer); + free_buf(buffer); + + return SLURM_SUCCESS; +} + +/* Close the SlurmDbd connection */ +static void _close_slurmdbd_fd(void) +{ + if (slurmdbd_fd >= 0) { + close(slurmdbd_fd); + slurmdbd_fd = -1; + } +} + +/* Reopen the Slurm DBD connection due to some error */ +static void _reopen_slurmdbd_fd(void) +{ + info("slurmdbd: reopening connection"); + _close_slurmdbd_fd(); + _open_slurmdbd_fd(); +} + +static int _send_msg(Buf buffer) +{ + uint32_t msg_size, nw_size; + char *msg; + ssize_t msg_wrote; + int rc, retry_cnt = 0; + + if (slurmdbd_fd < 0) + return EAGAIN; + + rc =_fd_writeable(slurmdbd_fd); + if (rc == -1) { + re_open: /* SlurmDBD shutdown, try to reopen a connection now */ + if (retry_cnt++ > 3) + return EAGAIN; + _reopen_slurmdbd_fd(); + rc = _fd_writeable(slurmdbd_fd); + } + if (rc < 1) + return EAGAIN; + + msg_size = get_buf_offset(buffer); + nw_size = htonl(msg_size); + msg_wrote = write(slurmdbd_fd, &nw_size, sizeof(nw_size)); + if (msg_wrote != sizeof(nw_size)) + return EAGAIN; + + msg = get_buf_data(buffer); + while (msg_size > 0) { + rc = _fd_writeable(slurmdbd_fd); + if (rc == -1) + goto re_open; + if (rc < 1) + return EAGAIN; + msg_wrote = write(slurmdbd_fd, msg, msg_size); + if (msg_wrote <= 0) + return EAGAIN; + msg += msg_wrote; + msg_size -= msg_wrote; + } + + return SLURM_SUCCESS; +} + +static int _get_return_code(void) +{ + Buf buffer; + uint16_t msg_type; + dbd_rc_msg_t *msg; + dbd_job_start_rc_msg_t *js_msg; + int rc = SLURM_ERROR; + + buffer = _recv_msg(); + if (buffer == NULL) + return rc; + + safe_unpack16(&msg_type, buffer); + switch(msg_type) { + case DBD_JOB_START_RC: + if (slurmdbd_unpack_job_start_rc_msg(&js_msg, buffer) + == SLURM_SUCCESS) { + rc = js_msg->return_code; + slurmdbd_free_job_start_rc_msg(js_msg); + if (rc != SLURM_SUCCESS) + error("slurmdbd: DBD_JOB_START_RC is %d", rc); + } else + error("slurmdbd: unpack message error"); + break; + case DBD_RC: + if (slurmdbd_unpack_rc_msg(&msg, buffer) == SLURM_SUCCESS) { + rc = msg->return_code; + if (rc != SLURM_SUCCESS) + error("slurmdbd: DBD_RC is %d from %u: %s", + rc, msg->sent_type, msg->comment); + slurmdbd_free_rc_msg(msg); + } else + error("slurmdbd: unpack message error"); + break; + default: + error("slurmdbd: bad message type %d != DBD_RC", msg_type); + } + +unpack_error: + free_buf(buffer); + return rc; +} + +static Buf _recv_msg(void) +{ + uint32_t msg_size, nw_size; + char *msg; + ssize_t msg_read, offset; + Buf buffer; + + if (slurmdbd_fd < 0) + return NULL; + + if (!_fd_readable(slurmdbd_fd)) + return NULL; + msg_read = read(slurmdbd_fd, &nw_size, sizeof(nw_size)); + if (msg_read != sizeof(nw_size)) + return NULL; + msg_size = ntohl(nw_size); + if ((msg_size < 2) || (msg_size > 1000000)) { + error("slurmdbd: Invalid msg_size (%u)"); + return NULL; + } + + msg = xmalloc(msg_size); + offset = 0; + while (msg_size > offset) { + if (!_fd_readable(slurmdbd_fd)) + break; /* problem with this socket */ + msg_read = read(slurmdbd_fd, (msg + offset), + (msg_size - offset)); + if (msg_read <= 0) { + error("slurmdbd: read: %m"); + break; + } + offset += msg_read; + } + if (msg_size != offset) { + if (agent_shutdown == 0) { + error("slurmdbd: only read %d of %d bytes", + offset, msg_size); + } /* else in shutdown mode */ + xfree(msg); + return NULL; + } + + buffer = create_buf(msg, msg_size); + if (buffer == NULL) + fatal("create_buf: malloc failure"); + return buffer; +} + +/* Return time in msec since "start time" */ +static int _tot_wait (struct timeval *start_time) +{ + struct timeval end_time; + int msec_delay; + + gettimeofday(&end_time, NULL); + msec_delay = (end_time.tv_sec - start_time->tv_sec ) * 1000; + msec_delay += ((end_time.tv_usec - start_time->tv_usec + 500) / 1000); + return msec_delay; +} + +/* Wait until a file is readable, + * RET false if can not be read */ +static bool _fd_readable(slurm_fd fd) +{ + struct pollfd ufds; + static int msg_timeout = -1; + int rc, time_left; + struct timeval tstart; + + if (msg_timeout == -1) + msg_timeout = slurm_get_msg_timeout() * 1000; + + ufds.fd = fd; + ufds.events = POLLIN; + gettimeofday(&tstart, NULL); + while (agent_shutdown == 0) { + time_left = msg_timeout - _tot_wait(&tstart); + rc = poll(&ufds, 1, time_left); + if (rc == -1) { + if ((errno == EINTR) || (errno == EAGAIN)) + continue; + error("poll: %m"); + return false; + } + if (rc == 0) + return false; + if (ufds.revents & POLLHUP) { + debug2("SlurmDBD connection closed"); + return false; + } + if (ufds.revents & POLLNVAL) { + error("SlurmDBD connection is invalid"); + return false; + } + if (ufds.revents & POLLERR) { + error("SlurmDBD connection experienced an error"); + return false; + } + if ((ufds.revents & POLLIN) == 0) { + error("SlurmDBD connection %d events %d", + fd, ufds.revents); + return false; + } + /* revents == POLLIN */ + return true; + } + return false; +} + +/* Wait until a file is writable, + * RET 1 if file can be written now, + * 0 if can not be written to within 5 seconds + * -1 if file has been closed POLLHUP + */ +static int _fd_writeable(slurm_fd fd) +{ + struct pollfd ufds; + int msg_timeout = 5000; + int rc, time_left; + struct timeval tstart; + + ufds.fd = fd; + ufds.events = POLLOUT; + gettimeofday(&tstart, NULL); + while (agent_shutdown == 0) { + time_left = msg_timeout - _tot_wait(&tstart); + rc = poll(&ufds, 1, time_left); + if (rc == -1) { + if ((errno == EINTR) || (errno == EAGAIN)) + continue; + error("poll: %m"); + return -1; + } + if (rc == 0) + return 0; + if (ufds.revents & POLLHUP) { + debug2("SlurmDBD connection is closed"); + return -1; + } + if (ufds.revents & POLLNVAL) { + error("SlurmDBD connection is invalid"); + return 0; + } + if (ufds.revents & POLLERR) { + error("SlurmDBD connection experienced an error: %m"); + return 0; + } + if ((ufds.revents & POLLOUT) == 0) { + error("SlurmDBD connection %d events %d", + fd, ufds.revents); + return 0; + } + /* revents == POLLOUT */ + return 1; + } + return 0; +} + +/**************************************************************************** + * Functions for agent to manage queue of pending message for the Slurm DBD + ****************************************************************************/ +static void _create_agent(void) +{ + if (agent_list == NULL) { + agent_list = list_create(_agent_queue_del); + if (agent_list == NULL) + fatal("list_create: malloc failure"); + _load_dbd_state(); + } + + if (agent_tid == 0) { + pthread_attr_t agent_attr; + slurm_attr_init(&agent_attr); + if (pthread_create(&agent_tid, &agent_attr, _agent, NULL) || + (agent_tid == 0)) + fatal("pthread_create: %m"); + } +} + +static void _agent_queue_del(void *x) +{ + Buf buffer = (Buf) x; + free_buf(buffer); +} + +static void _shutdown_agent(void) +{ + int i; + + if (agent_tid) { + agent_shutdown = time(NULL); + for (i=0; i<50; i++) { /* up to 5 secs total */ + pthread_cond_broadcast(&agent_cond); + usleep(100000); /* 0.1 sec per try */ + if (pthread_kill(agent_tid, SIGUSR1)) + break; + + } + /* On rare occasions agent thread may not end quickly, + * perhaps due to communication problems with slurmdbd. + * Cancel it and join before returning or we could remove + * and leave the agent without valid data */ + if (pthread_kill(agent_tid, 0) == 0) { + error("slurmdbd: agent failed to shutdown gracefully"); + error("slurmdbd: unable to save pending requests"); + pthread_cancel(agent_tid); + } + pthread_join(agent_tid, NULL); + agent_tid = 0; + } +} + +static void _slurmdbd_packstr(void *str, Buf buffer) +{ + packstr((char *)str, buffer); +} + +static int _slurmdbd_unpackstr(void **str, Buf buffer) +{ + uint32_t uint32_tmp; + safe_unpackstr_xmalloc((char **)str, &uint32_tmp, buffer); + return SLURM_SUCCESS; +unpack_error: + return SLURM_ERROR; +} + +static void *_agent(void *x) +{ + int cnt, rc; + Buf buffer; + struct timespec abs_time; + static time_t fail_time = 0; + int sigarray[] = {SIGUSR1, 0}; + + /* Prepare to catch SIGUSR1 to interrupt pending + * I/O and terminate in a timely fashion. */ + xsignal(SIGUSR1, _sig_handler); + xsignal_unblock(sigarray); + + while (agent_shutdown == 0) { + + slurm_mutex_lock(&slurmdbd_lock); + if ((slurmdbd_fd < 0) && + (difftime(time(NULL), fail_time) >= 10)) { + /* The connection to Slurm DBD is not open */ + _open_slurmdbd_fd(); + if (slurmdbd_fd < 0) + fail_time = time(NULL); + } + + slurm_mutex_lock(&agent_lock); + if (agent_list && slurmdbd_fd) + cnt = list_count(agent_list); + else + cnt = 0; + if ((cnt == 0) || (slurmdbd_fd < 0) || + (fail_time && (difftime(time(NULL), fail_time) < 10))) { + slurm_mutex_unlock(&slurmdbd_lock); + abs_time.tv_sec = time(NULL) + 10; + abs_time.tv_nsec = 0; + rc = pthread_cond_timedwait(&agent_cond, &agent_lock, + &abs_time); + slurm_mutex_unlock(&agent_lock); + continue; + } else if ((cnt > 0) && ((cnt % 50) == 0)) + info("slurmdbd: agent queue size %u", cnt); + /* Leave item on the queue until processing complete */ + if (agent_list) + buffer = (Buf) list_peek(agent_list); + else + buffer = NULL; + slurm_mutex_unlock(&agent_lock); + if (buffer == NULL) { + slurm_mutex_unlock(&slurmdbd_lock); + continue; + } + + /* NOTE: agent_lock is clear here, so we can add more + * requests to the queue while waiting for this RPC to + * complete. */ + rc = _send_msg(buffer); + if (rc != SLURM_SUCCESS) { + if (agent_shutdown) + break; + error("slurmdbd: Failure sending message"); + } else { + rc = _get_return_code(); + if (rc == EAGAIN) { + if (agent_shutdown) + break; + error("slurmdbd: Failure with " + "message need to resend"); + } + } + slurm_mutex_unlock(&slurmdbd_lock); + + slurm_mutex_lock(&agent_lock); + if (agent_list && (rc == SLURM_SUCCESS)) { + buffer = (Buf) list_dequeue(agent_list); + free_buf(buffer); + fail_time = 0; + } else { + fail_time = time(NULL); + } + slurm_mutex_unlock(&agent_lock); + } + + slurm_mutex_lock(&agent_lock); + _save_dbd_state(); + if (agent_list) { + list_destroy(agent_list); + agent_list = NULL; + } + slurm_mutex_unlock(&agent_lock); + return NULL; +} + +static void _save_dbd_state(void) +{ + char *dbd_fname; + Buf buffer; + int fd, rc, wrote = 0; + + dbd_fname = slurm_get_state_save_location(); + xstrcat(dbd_fname, "/dbd.messages"); + fd = open(dbd_fname, O_WRONLY | O_CREAT | O_TRUNC, 0600); + if (fd < 0) { + error("slurmdbd: Creating state save file %s", dbd_fname); + } else if (agent_list) { + while ((buffer = list_dequeue(agent_list))) { + rc = _save_dbd_rec(fd, buffer); + free_buf(buffer); + if (rc != SLURM_SUCCESS) + break; + wrote++; + } + } + if (fd >= 0) { + verbose("slurmdbd: saved %d pending RPCs", wrote); + (void) close(fd); + } + xfree(dbd_fname); +} + +static void _load_dbd_state(void) +{ + char *dbd_fname; + Buf buffer; + int fd, recovered = 0; + + dbd_fname = slurm_get_state_save_location(); + xstrcat(dbd_fname, "/dbd.messages"); + fd = open(dbd_fname, O_RDONLY); + if (fd < 0) { + error("slurmdbd: Opening state save file %s", dbd_fname); + } else { + while (1) { + buffer = _load_dbd_rec(fd); + if (buffer == NULL) + break; + if (list_enqueue(agent_list, buffer) == NULL) + fatal("slurmdbd: list_enqueue, no memory"); + recovered++; + } + } + if (fd >= 0) { + verbose("slurmdbd: recovered %d pending RPCs", recovered); + (void) close(fd); + (void) unlink(dbd_fname); /* clear save state */ + } + xfree(dbd_fname); +} + +static int _save_dbd_rec(int fd, Buf buffer) +{ + ssize_t size, wrote; + uint32_t msg_size = get_buf_offset(buffer); + uint32_t magic = DBD_MAGIC; + char *msg = get_buf_data(buffer); + + size = sizeof(msg_size); + wrote = write(fd, &msg_size, size); + if (wrote != size) { + error("slurmdbd: state save error: %m"); + return SLURM_ERROR; + } + + wrote = 0; + while (wrote < msg_size) { + wrote = write(fd, msg, msg_size); + if (wrote > 0) { + msg += wrote; + msg_size -= wrote; + } else if ((wrote == -1) && (errno == EINTR)) + continue; + else { + error("slurmdbd: state save error: %m"); + return SLURM_ERROR; + } + } + + size = sizeof(magic); + wrote = write(fd, &magic, size); + if (wrote != size) { + error("slurmdbd: state save error: %m"); + return SLURM_ERROR; + } + + return SLURM_SUCCESS; +} + +static Buf _load_dbd_rec(int fd) +{ + ssize_t size, rd_size; + uint32_t msg_size, magic; + char *msg; + Buf buffer; + + size = sizeof(msg_size); + rd_size = read(fd, &msg_size, size); + if (rd_size == 0) + return (Buf) NULL; + if (rd_size != size) { + error("slurmdbd: state recover error: %m"); + return (Buf) NULL; + } + if (msg_size > MAX_DBD_MSG_LEN) { + error("slurmdbd: state recover error, msg_size=%u", msg_size); + return (Buf) NULL; + } + + buffer = init_buf((int) msg_size); + if (buffer == NULL) + fatal("slurmdbd: create_buf malloc failure"); + set_buf_offset(buffer, msg_size); + msg = get_buf_data(buffer); + size = msg_size; + while (size) { + rd_size = read(fd, msg, size); + if (rd_size > 0) { + msg += rd_size; + size -= rd_size; + } else if ((rd_size == -1) && (errno == EINTR)) + continue; + else { + error("slurmdbd: state recover error: %m"); + free_buf(buffer); + return (Buf) NULL; + } + } + + size = sizeof(magic); + rd_size = read(fd, &magic, size); + if ((rd_size != size) || (magic != DBD_MAGIC)) { + error("slurmdbd: state recover error"); + free_buf(buffer); + return (Buf) NULL; + } + + return buffer; +} + +static void _sig_handler(int signal) +{ +} + +/* Purge queued job/step start records from the agent queue + * RET number of records purged */ +static int _purge_job_start_req(void) +{ + int purged = 0; + ListIterator iter; + uint16_t msg_type; + uint32_t offset; + Buf buffer; + + iter = list_iterator_create(agent_list); + while ((buffer = list_next(iter))) { + offset = get_buf_offset(buffer); + if (offset < 2) + continue; + set_buf_offset(buffer, 0); + unpack16(&msg_type, buffer); + set_buf_offset(buffer, offset); + if ((msg_type == DBD_JOB_START) || + (msg_type == DBD_STEP_START)) { + list_remove(iter); + purged++; + } + } + list_iterator_destroy(iter); + info("slurmdbd: purge %d job/step start records", purged); + return purged; +} + +/****************************************************************************\ + * Free data structures +\****************************************************************************/ +void inline slurmdbd_free_acct_coord_msg(dbd_acct_coord_msg_t *msg) +{ + if(msg) { + xfree(msg->acct); + destroy_acct_user_cond(msg->cond); + xfree(msg); + } +} +void inline slurmdbd_free_cluster_procs_msg(dbd_cluster_procs_msg_t *msg) +{ + if (msg) { + xfree(msg->cluster_name); + xfree(msg); + } +} + +void inline slurmdbd_free_cond_msg(slurmdbd_msg_type_t type, + dbd_cond_msg_t *msg) +{ + void (*my_destroy) (void *object); + + if (msg) { + switch(type) { + case DBD_GET_ACCOUNTS: + case DBD_REMOVE_ACCOUNTS: + my_destroy = destroy_acct_account_cond; + break; + case DBD_GET_ASSOCS: + case DBD_REMOVE_ASSOCS: + my_destroy = destroy_acct_association_cond; + break; + case DBD_GET_CLUSTERS: + case DBD_REMOVE_CLUSTERS: + my_destroy = destroy_acct_cluster_cond; + break; + case DBD_GET_USERS: + case DBD_REMOVE_USERS: + my_destroy = destroy_acct_user_cond; + break; + default: + fatal("Unknown cond type"); + return; + } + if(msg->cond) + (*(my_destroy))(msg->cond); + xfree(msg); + } +} + +void inline slurmdbd_free_get_jobs_msg(dbd_get_jobs_msg_t *msg) +{ + if (msg) { + xfree(msg->cluster_name); + if(msg->selected_steps) + list_destroy(msg->selected_steps); + if(msg->selected_parts) + list_destroy(msg->selected_parts); + xfree(msg->user); + xfree(msg); + } +} + +void inline slurmdbd_free_init_msg(dbd_init_msg_t *msg) +{ + xfree(msg); +} + +void inline slurmdbd_free_fini_msg(dbd_fini_msg_t *msg) +{ + xfree(msg); +} + +void inline slurmdbd_free_job_complete_msg(dbd_job_comp_msg_t *msg) +{ + if (msg) { + xfree(msg->nodes); + xfree(msg); + } +} + +void inline slurmdbd_free_job_start_msg(dbd_job_start_msg_t *msg) +{ + if (msg) { + xfree(msg->account); + xfree(msg->block_id); + xfree(msg->name); + xfree(msg->nodes); + xfree(msg->partition); + xfree(msg); + } +} + +void inline slurmdbd_free_job_start_rc_msg(dbd_job_start_rc_msg_t *msg) +{ + xfree(msg); +} + +void inline slurmdbd_free_job_suspend_msg(dbd_job_suspend_msg_t *msg) +{ + xfree(msg); +} + +void inline slurmdbd_free_list_msg(dbd_list_msg_t *msg) +{ + if (msg) { + if(msg->my_list) + list_destroy(msg->my_list); + xfree(msg); + } +} + +void inline slurmdbd_free_modify_msg(slurmdbd_msg_type_t type, + dbd_modify_msg_t *msg) +{ + void (*destroy_cond) (void *object); + void (*destroy_rec) (void *object); + + if (msg) { + switch(type) { + case DBD_MODIFY_ACCOUNTS: + destroy_cond = destroy_acct_account_cond; + destroy_rec = destroy_acct_account_rec; + break; + case DBD_MODIFY_ASSOCS: + destroy_cond = destroy_acct_association_cond; + destroy_rec = destroy_acct_association_rec; + break; + case DBD_MODIFY_CLUSTERS: + destroy_cond = destroy_acct_cluster_cond; + destroy_rec = destroy_acct_cluster_rec; + break; + case DBD_MODIFY_USERS: + destroy_cond = destroy_acct_user_cond; + destroy_rec = destroy_acct_user_rec; + break; + default: + fatal("Unknown modify type"); + return; + } + + if(msg->cond) + (*(destroy_cond))(msg->cond); + if(msg->rec) + (*(destroy_rec))(msg->rec); + xfree(msg); + } +} + +void inline slurmdbd_free_node_state_msg(dbd_node_state_msg_t *msg) +{ + if (msg) { + xfree(msg->cluster_name); + xfree(msg->hostlist); + xfree(msg->reason); + xfree(msg); + } +} + +void inline slurmdbd_free_rc_msg(dbd_rc_msg_t *msg) +{ + if(msg) { + xfree(msg->comment); + xfree(msg); + } +} + +void inline slurmdbd_free_register_ctld_msg(dbd_register_ctld_msg_t *msg) +{ + if(msg) { + xfree(msg->cluster_name); + xfree(msg); + } +} + +void inline slurmdbd_free_roll_usage_msg(dbd_roll_usage_msg_t *msg) +{ + xfree(msg); +} + +void inline slurmdbd_free_step_complete_msg(dbd_step_comp_msg_t *msg) +{ + if (msg) { + xfree(msg->jobacct); + xfree(msg); + } +} + +void inline slurmdbd_free_step_start_msg(dbd_step_start_msg_t *msg) +{ + if (msg) { + xfree(msg->name); + xfree(msg->nodes); + xfree(msg); + } +} + +void inline slurmdbd_free_usage_msg(slurmdbd_msg_type_t type, + dbd_usage_msg_t *msg) +{ + void (*destroy_rec) (void *object); + if (msg) { + switch(type) { + case DBD_GET_ASSOC_USAGE: + case DBD_GOT_ASSOC_USAGE: + destroy_rec = destroy_acct_association_rec; + break; + case DBD_GET_CLUSTER_USAGE: + case DBD_GOT_CLUSTER_USAGE: + destroy_rec = destroy_acct_cluster_rec; + break; + default: + fatal("Unknown usuage type"); + return; + } + + if(msg->rec) + (*(destroy_rec))(msg->rec); + xfree(msg); + } +} + +/****************************************************************************\ + * Pack and unpack data structures +\****************************************************************************/ +void inline +slurmdbd_pack_acct_coord_msg(dbd_acct_coord_msg_t *msg, Buf buffer) +{ + packstr(msg->acct, buffer); + pack_acct_user_cond(msg->cond, buffer); +} + +int inline +slurmdbd_unpack_acct_coord_msg(dbd_acct_coord_msg_t **msg, Buf buffer) +{ + uint32_t uint32_tmp; + dbd_acct_coord_msg_t *msg_ptr = xmalloc(sizeof(dbd_acct_coord_msg_t)); + *msg = msg_ptr; + + safe_unpackstr_xmalloc(&msg_ptr->acct, &uint32_tmp, buffer); + if(unpack_acct_user_cond((void *)&msg_ptr->cond, buffer) == SLURM_ERROR) + goto unpack_error; +unpack_error: + slurmdbd_free_acct_coord_msg(msg_ptr); + *msg = NULL; + return SLURM_ERROR; +} + +void inline +slurmdbd_pack_cluster_procs_msg(dbd_cluster_procs_msg_t *msg, Buf buffer) +{ + packstr(msg->cluster_name, buffer); + pack32(msg->proc_count, buffer); + pack_time(msg->event_time, buffer); +} + +int inline +slurmdbd_unpack_cluster_procs_msg(dbd_cluster_procs_msg_t **msg, Buf buffer) +{ + dbd_cluster_procs_msg_t *msg_ptr; + uint32_t uint32_tmp; + + msg_ptr = xmalloc(sizeof(dbd_cluster_procs_msg_t)); + *msg = msg_ptr; + safe_unpackstr_xmalloc(&msg_ptr->cluster_name, &uint32_tmp, buffer); + safe_unpack32(&msg_ptr->proc_count, buffer); + safe_unpack_time(&msg_ptr->event_time, buffer); + return SLURM_SUCCESS; + +unpack_error: + slurmdbd_free_cluster_procs_msg(msg_ptr); + *msg = NULL; + return SLURM_ERROR; +} + +void inline slurmdbd_pack_cond_msg(slurmdbd_msg_type_t type, + dbd_cond_msg_t *msg, Buf buffer) +{ + void (*my_function) (void *object, Buf buffer); + + switch(type) { + case DBD_GET_ACCOUNTS: + case DBD_REMOVE_ACCOUNTS: + my_function = pack_acct_account_cond; + break; + case DBD_GET_ASSOCS: + case DBD_REMOVE_ASSOCS: + my_function = pack_acct_association_cond; + break; + case DBD_GET_CLUSTERS: + case DBD_REMOVE_CLUSTERS: + my_function = pack_acct_cluster_cond; + break; + case DBD_GET_USERS: + case DBD_REMOVE_USERS: + my_function = pack_acct_user_cond; + break; + default: + fatal("Unknown pack type"); + return; + } + + (*(my_function))(msg->cond, buffer); +} + +int inline slurmdbd_unpack_cond_msg(slurmdbd_msg_type_t type, + dbd_cond_msg_t **msg, Buf buffer) +{ + dbd_cond_msg_t *msg_ptr = NULL; + int (*my_function) (void **object, Buf buffer); + + switch(type) { + case DBD_GET_ACCOUNTS: + case DBD_REMOVE_ACCOUNTS: + my_function = unpack_acct_account_cond; + break; + case DBD_GET_ASSOCS: + case DBD_REMOVE_ASSOCS: + my_function = unpack_acct_association_cond; + break; + case DBD_GET_CLUSTERS: + case DBD_REMOVE_CLUSTERS: + my_function = unpack_acct_cluster_cond; + break; + case DBD_GET_USERS: + case DBD_REMOVE_USERS: + my_function = unpack_acct_user_cond; + break; + default: + fatal("Unknown unpack type"); + return SLURM_ERROR; + } + + msg_ptr = xmalloc(sizeof(dbd_cond_msg_t)); + *msg = msg_ptr; + + if((*(my_function))(&msg_ptr->cond, buffer) == SLURM_ERROR) + goto unpack_error; + + return SLURM_SUCCESS; + +unpack_error: + slurmdbd_free_cond_msg(type, msg_ptr); + *msg = NULL; + return SLURM_ERROR; +} + +void inline slurmdbd_pack_get_jobs_msg(dbd_get_jobs_msg_t *msg, Buf buffer) +{ + uint32_t i = 0; + ListIterator itr = NULL; + jobacct_selected_step_t *job = NULL; + char *part = NULL; + + packstr(msg->cluster_name, buffer); + + pack16(msg->completion, buffer); + + pack32(msg->gid, buffer); + + pack_time(msg->last_update, buffer); + + if(msg->selected_steps) + i = list_count(msg->selected_steps); + + pack32(i, buffer); + if(i) { + itr = list_iterator_create(msg->selected_steps); + while((job = list_next(itr))) { + pack_jobacct_selected_step(job, buffer); + } + list_iterator_destroy(itr); + } + + i = 0; + if(msg->selected_parts) + i = list_count(msg->selected_parts); + + pack32(i, buffer); + if(i) { + itr = list_iterator_create(msg->selected_parts); + while((part = list_next(itr))) { + packstr(part, buffer); + } + list_iterator_destroy(itr); + } + packstr(msg->user, buffer); +} + +int inline slurmdbd_unpack_get_jobs_msg(dbd_get_jobs_msg_t **msg, Buf buffer) +{ + int i; + uint32_t count = 0; + uint32_t uint32_tmp; + dbd_get_jobs_msg_t *msg_ptr; + jobacct_selected_step_t *job = NULL; + char *part = NULL; + + msg_ptr = xmalloc(sizeof(dbd_get_jobs_msg_t)); + *msg = msg_ptr; + + safe_unpackstr_xmalloc(&msg_ptr->cluster_name, &uint32_tmp, buffer); + + safe_unpack16(&msg_ptr->completion, buffer); + + safe_unpack32(&msg_ptr->gid, buffer); + + safe_unpack_time(&msg_ptr->last_update, buffer); + + safe_unpack32(&count, buffer); + if(count) { + msg_ptr->selected_steps = + list_create(destroy_jobacct_selected_step); + for(i=0; i<count; i++) { + unpack_jobacct_selected_step(&job, buffer); + list_append(msg_ptr->selected_steps, job); + } + } + safe_unpack32(&count, buffer); + if(count) { + msg_ptr->selected_parts = list_create(slurm_destroy_char); + for(i=0; i<count; i++) { + safe_unpackstr_xmalloc(&part, &uint32_tmp, buffer); + list_append(msg_ptr->selected_parts, part); + } + } + + safe_unpackstr_xmalloc(&msg_ptr->user, &uint32_tmp, buffer); + + return SLURM_SUCCESS; + +unpack_error: + slurmdbd_free_get_jobs_msg(msg_ptr); + *msg = NULL; + return SLURM_ERROR; +} + +void inline +slurmdbd_pack_init_msg(dbd_init_msg_t *msg, Buf buffer, char *auth_info) +{ + int rc; + void *auth_cred; + + pack16(msg->rollback, buffer); + pack16(msg->version, buffer); + auth_cred = g_slurm_auth_create(NULL, 2, auth_info); + if (auth_cred == NULL) { + error("Creating authentication credential: %s", + g_slurm_auth_errstr(g_slurm_auth_errno(NULL))); + } else { + rc = g_slurm_auth_pack(auth_cred, buffer); + (void) g_slurm_auth_destroy(auth_cred); + if (rc) { + error("Packing authentication credential: %s", + g_slurm_auth_errstr(g_slurm_auth_errno(auth_cred))); + } + } +} + +int inline +slurmdbd_unpack_init_msg(dbd_init_msg_t **msg, Buf buffer, char *auth_info) +{ + void *auth_cred; + + dbd_init_msg_t *msg_ptr = xmalloc(sizeof(dbd_init_msg_t)); + *msg = msg_ptr; + + safe_unpack16(&msg_ptr->rollback, buffer); + safe_unpack16(&msg_ptr->version, buffer); + auth_cred = g_slurm_auth_unpack(buffer); + if (auth_cred == NULL) { + error("Unpacking authentication credential: %s", + g_slurm_auth_errstr(g_slurm_auth_errno(NULL))); + goto unpack_error; + } + msg_ptr->uid = g_slurm_auth_get_uid(auth_cred, auth_info); + g_slurm_auth_destroy(auth_cred); + return SLURM_SUCCESS; + +unpack_error: + slurmdbd_free_init_msg(msg_ptr); + *msg = NULL; + return SLURM_ERROR; +} + +void inline +slurmdbd_pack_fini_msg(dbd_fini_msg_t *msg, Buf buffer) +{ + pack16(msg->close_conn, buffer); + pack16(msg->commit, buffer); +} + +int inline +slurmdbd_unpack_fini_msg(dbd_fini_msg_t **msg, Buf buffer) +{ + dbd_fini_msg_t *msg_ptr = xmalloc(sizeof(dbd_fini_msg_t)); + *msg = msg_ptr; + + safe_unpack16(&msg_ptr->close_conn, buffer); + safe_unpack16(&msg_ptr->commit, buffer); + + return SLURM_SUCCESS; + +unpack_error: + slurmdbd_free_fini_msg(msg_ptr); + *msg = NULL; + return SLURM_ERROR; +} + +void inline +slurmdbd_pack_job_complete_msg(dbd_job_comp_msg_t *msg, Buf buffer) +{ + pack32(msg->assoc_id, buffer); + pack32(msg->db_index, buffer); + pack_time(msg->end_time, buffer); + pack32(msg->exit_code, buffer); + pack32(msg->job_id, buffer); + pack16(msg->job_state, buffer); + packstr(msg->nodes, buffer); + pack_time(msg->start_time, buffer); + pack_time(msg->submit_time, buffer); +} + +int inline +slurmdbd_unpack_job_complete_msg(dbd_job_comp_msg_t **msg, Buf buffer) +{ + uint32_t uint32_tmp; + dbd_job_comp_msg_t *msg_ptr = xmalloc(sizeof(dbd_job_comp_msg_t)); + *msg = msg_ptr; + safe_unpack32(&msg_ptr->assoc_id, buffer); + safe_unpack32(&msg_ptr->db_index, buffer); + safe_unpack_time(&msg_ptr->end_time, buffer); + safe_unpack32(&msg_ptr->exit_code, buffer); + safe_unpack32(&msg_ptr->job_id, buffer); + safe_unpack16(&msg_ptr->job_state, buffer); + safe_unpackstr_xmalloc(&msg_ptr->nodes, &uint32_tmp, buffer); + safe_unpack_time(&msg_ptr->start_time, buffer); + safe_unpack_time(&msg_ptr->submit_time, buffer); + return SLURM_SUCCESS; + +unpack_error: + slurmdbd_free_job_complete_msg(msg_ptr); + *msg = NULL; + return SLURM_ERROR; +} + +void inline +slurmdbd_pack_job_start_msg(dbd_job_start_msg_t *msg, Buf buffer) +{ + packstr(msg->account, buffer); + pack32(msg->alloc_cpus, buffer); + pack32(msg->assoc_id, buffer); + packstr(msg->block_id, buffer); + pack32(msg->db_index, buffer); + pack_time(msg->eligible_time, buffer); + pack32(msg->gid, buffer); + pack32(msg->job_id, buffer); + pack16(msg->job_state, buffer); + packstr(msg->name, buffer); + packstr(msg->nodes, buffer); + packstr(msg->partition, buffer); + pack32(msg->priority, buffer); + pack32(msg->req_cpus, buffer); + pack_time(msg->start_time, buffer); + pack_time(msg->submit_time, buffer); + pack32(msg->uid, buffer); +} + +int inline +slurmdbd_unpack_job_start_msg(dbd_job_start_msg_t **msg, Buf buffer) +{ + uint32_t uint32_tmp; + dbd_job_start_msg_t *msg_ptr = xmalloc(sizeof(dbd_job_start_msg_t)); + *msg = msg_ptr; + safe_unpackstr_xmalloc(&msg_ptr->account, &uint32_tmp, buffer); + safe_unpack32(&msg_ptr->alloc_cpus, buffer); + safe_unpack32(&msg_ptr->assoc_id, buffer); + safe_unpackstr_xmalloc(&msg_ptr->block_id, &uint32_tmp, buffer); + safe_unpack32(&msg_ptr->db_index, buffer); + safe_unpack_time(&msg_ptr->eligible_time, buffer); + safe_unpack32(&msg_ptr->gid, buffer); + safe_unpack32(&msg_ptr->job_id, buffer); + safe_unpack16(&msg_ptr->job_state, buffer); + safe_unpackstr_xmalloc(&msg_ptr->name, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&msg_ptr->nodes, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&msg_ptr->partition, &uint32_tmp, buffer); + safe_unpack32(&msg_ptr->priority, buffer); + safe_unpack32(&msg_ptr->req_cpus, buffer); + safe_unpack_time(&msg_ptr->start_time, buffer); + safe_unpack_time(&msg_ptr->submit_time, buffer); + safe_unpack32(&msg_ptr->uid, buffer); + return SLURM_SUCCESS; + +unpack_error: + slurmdbd_free_job_start_msg(msg_ptr); + *msg = NULL; + return SLURM_ERROR; +} + +void inline +slurmdbd_pack_job_start_rc_msg(dbd_job_start_rc_msg_t *msg, Buf buffer) +{ + pack32(msg->db_index, buffer); + pack32(msg->return_code, buffer); +} + +int inline +slurmdbd_unpack_job_start_rc_msg(dbd_job_start_rc_msg_t **msg, Buf buffer) +{ + dbd_job_start_rc_msg_t *msg_ptr = + xmalloc(sizeof(dbd_job_start_rc_msg_t)); + *msg = msg_ptr; + safe_unpack32(&msg_ptr->db_index, buffer); + safe_unpack32(&msg_ptr->return_code, buffer); + return SLURM_SUCCESS; + +unpack_error: + slurmdbd_free_job_start_rc_msg(msg_ptr); + *msg = NULL; + return SLURM_ERROR; +} + +void inline +slurmdbd_pack_job_suspend_msg(dbd_job_suspend_msg_t *msg, Buf buffer) +{ + pack32(msg->assoc_id, buffer); + pack32(msg->db_index, buffer); + pack32(msg->job_id, buffer); + pack16(msg->job_state, buffer); + pack_time(msg->submit_time, buffer); + pack_time(msg->suspend_time, buffer); +} + +int inline +slurmdbd_unpack_job_suspend_msg(dbd_job_suspend_msg_t **msg, Buf buffer) +{ + dbd_job_suspend_msg_t *msg_ptr = xmalloc(sizeof(dbd_job_suspend_msg_t)); + *msg = msg_ptr; + safe_unpack32(&msg_ptr->assoc_id, buffer); + safe_unpack32(&msg_ptr->db_index, buffer); + safe_unpack32(&msg_ptr->job_id, buffer); + safe_unpack16(&msg_ptr->job_state, buffer); + safe_unpack_time(&msg_ptr->submit_time, buffer); + safe_unpack_time(&msg_ptr->suspend_time, buffer); + return SLURM_SUCCESS; + +unpack_error: + slurmdbd_free_job_suspend_msg(msg_ptr); + *msg = NULL; + return SLURM_ERROR; +} + +void inline slurmdbd_pack_list_msg(slurmdbd_msg_type_t type, + dbd_list_msg_t *msg, Buf buffer) +{ + uint32_t count = 0; + ListIterator itr = NULL; + void *object = NULL; + void (*my_function) (void *object, Buf buffer); + + switch(type) { + case DBD_ADD_ACCOUNTS: + case DBD_GOT_ACCOUNTS: + my_function = pack_acct_account_rec; + break; + case DBD_ADD_ASSOCS: + case DBD_GOT_ASSOCS: + my_function = pack_acct_association_rec; + break; + case DBD_ADD_CLUSTERS: + case DBD_GOT_CLUSTERS: + my_function = pack_acct_cluster_rec; + break; + case DBD_GOT_JOBS: + my_function = pack_jobacct_job_rec; + break; + case DBD_GOT_LIST: + my_function = _slurmdbd_packstr; + break; + case DBD_ADD_USERS: + case DBD_GOT_USERS: + my_function = pack_acct_user_rec; + break; + case DBD_UPDATE_SHARES_USED: + my_function = pack_update_shares_used; + break; + default: + fatal("Unknown pack type"); + return; + } + if(msg->my_list) { + count = list_count(msg->my_list); + pack32(count, buffer); + } else { + // to let user know there wasn't a list (error) + pack32((uint32_t)-1, buffer); + } + if(count) { + itr = list_iterator_create(msg->my_list); + while((object = list_next(itr))) { + (*(my_function))(object, buffer); + } + list_iterator_destroy(itr); + } +} + +int inline slurmdbd_unpack_list_msg(slurmdbd_msg_type_t type, + dbd_list_msg_t **msg, Buf buffer) +{ + int i; + uint32_t count; + dbd_list_msg_t *msg_ptr = NULL; + void *object = NULL; + int (*my_function) (void **object, Buf buffer); + void (*my_destroy) (void *object); + + switch(type) { + case DBD_ADD_ACCOUNTS: + case DBD_GOT_ACCOUNTS: + my_function = unpack_acct_account_rec; + my_destroy = destroy_acct_account_rec; + break; + case DBD_ADD_ASSOCS: + case DBD_GOT_ASSOCS: + my_function = unpack_acct_association_rec; + my_destroy = destroy_acct_association_rec; + break; + case DBD_ADD_CLUSTERS: + case DBD_GOT_CLUSTERS: + my_function = unpack_acct_cluster_rec; + my_destroy = destroy_acct_cluster_rec; + break; + case DBD_GOT_JOBS: + my_function = unpack_jobacct_job_rec; + my_destroy = destroy_jobacct_job_rec; + break; + case DBD_GOT_LIST: + my_function = _slurmdbd_unpackstr; + my_destroy = slurm_destroy_char; + break; + case DBD_ADD_USERS: + case DBD_GOT_USERS: + my_function = unpack_acct_user_rec; + my_destroy = destroy_acct_user_rec; + break; + case DBD_UPDATE_SHARES_USED: + my_function = unpack_update_shares_used; + my_destroy = destroy_update_shares_rec; + break; + default: + fatal("Unknown unpack type"); + return SLURM_ERROR; + } + + msg_ptr = xmalloc(sizeof(dbd_list_msg_t)); + *msg = msg_ptr; + safe_unpack32(&count, buffer); + if((int)count > -1) { + /* here we are looking to make the list if -1 or + higher than 0. If -1 we don't want to have the + list be NULL meaning an error occured. + */ + msg_ptr->my_list = list_create((*(my_destroy))); + for(i=0; i<count; i++) { + if(((*(my_function))(&object, buffer)) == SLURM_ERROR) + goto unpack_error; + list_append(msg_ptr->my_list, object); + } + } + return SLURM_SUCCESS; + +unpack_error: + slurmdbd_free_list_msg(msg_ptr); + *msg = NULL; + return SLURM_ERROR; +} + +void inline slurmdbd_pack_modify_msg(slurmdbd_msg_type_t type, + dbd_modify_msg_t *msg, Buf buffer) +{ + void (*my_cond) (void *object, Buf buffer); + void (*my_rec) (void *object, Buf buffer); + + switch(type) { + case DBD_MODIFY_ACCOUNTS: + my_cond = pack_acct_account_cond; + my_rec = pack_acct_account_rec; + break; + case DBD_MODIFY_ASSOCS: + my_cond = pack_acct_association_cond; + my_rec = pack_acct_association_rec; + break; + case DBD_MODIFY_CLUSTERS: + my_cond = pack_acct_cluster_cond; + my_rec = pack_acct_cluster_rec; + break; + case DBD_MODIFY_USERS: + my_cond = pack_acct_user_cond; + my_rec = pack_acct_user_rec; + break; + default: + fatal("Unknown pack type"); + return; + } + (*(my_cond))(msg->cond, buffer); + (*(my_rec))(msg->rec, buffer); +} + +int inline slurmdbd_unpack_modify_msg(slurmdbd_msg_type_t type, + dbd_modify_msg_t **msg, Buf buffer) +{ + dbd_modify_msg_t *msg_ptr = NULL; + int (*my_cond) (void **object, Buf buffer); + int (*my_rec) (void **object, Buf buffer); + + msg_ptr = xmalloc(sizeof(dbd_modify_msg_t)); + *msg = msg_ptr; + + switch(type) { + case DBD_MODIFY_ACCOUNTS: + my_cond = unpack_acct_account_cond; + my_rec = unpack_acct_account_rec; + break; + case DBD_MODIFY_ASSOCS: + my_cond = unpack_acct_association_cond; + my_rec = unpack_acct_association_rec; + break; + case DBD_MODIFY_CLUSTERS: + my_cond = unpack_acct_cluster_cond; + my_rec = unpack_acct_cluster_rec; + break; + case DBD_MODIFY_USERS: + my_cond = unpack_acct_user_cond; + my_rec = unpack_acct_user_rec; + break; + default: + fatal("Unknown unpack type"); + return SLURM_ERROR; + } + + if((*(my_cond))(&msg_ptr->cond, buffer) == SLURM_ERROR) + goto unpack_error; + if((*(my_rec))(&msg_ptr->rec, buffer) == SLURM_ERROR) + goto unpack_error; + + return SLURM_SUCCESS; + +unpack_error: + slurmdbd_free_modify_msg(type, msg_ptr); + *msg = NULL; + return SLURM_ERROR; +} + +void inline +slurmdbd_pack_node_state_msg(dbd_node_state_msg_t *msg, Buf buffer) +{ + packstr(msg->cluster_name, buffer); + pack32(msg->cpu_count, buffer); + packstr(msg->hostlist, buffer); + packstr(msg->reason, buffer); + pack16(msg->new_state, buffer); + pack_time(msg->event_time, buffer); +} + +int inline +slurmdbd_unpack_node_state_msg(dbd_node_state_msg_t **msg, Buf buffer) +{ + dbd_node_state_msg_t *msg_ptr; + uint32_t uint32_tmp; + + msg_ptr = xmalloc(sizeof(dbd_node_state_msg_t)); + *msg = msg_ptr; + safe_unpackstr_xmalloc(&msg_ptr->cluster_name, &uint32_tmp, buffer); + safe_unpack32(&msg_ptr->cpu_count, buffer); + safe_unpackstr_xmalloc(&msg_ptr->hostlist, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&msg_ptr->reason, &uint32_tmp, buffer); + safe_unpack16(&msg_ptr->new_state, buffer); + safe_unpack_time(&msg_ptr->event_time, buffer); + return SLURM_SUCCESS; + +unpack_error: + slurmdbd_free_node_state_msg(msg_ptr); + *msg = NULL; + return SLURM_ERROR; +} + +void inline +slurmdbd_pack_rc_msg(dbd_rc_msg_t *msg, Buf buffer) +{ + packstr(msg->comment, buffer); + pack32(msg->return_code, buffer); + pack16(msg->sent_type, buffer); +} + +int inline +slurmdbd_unpack_rc_msg(dbd_rc_msg_t **msg, Buf buffer) +{ + uint32_t uint32_tmp; + dbd_rc_msg_t *msg_ptr = xmalloc(sizeof(dbd_rc_msg_t)); + *msg = msg_ptr; + safe_unpackstr_xmalloc(&msg_ptr->comment, &uint32_tmp, buffer); + safe_unpack32(&msg_ptr->return_code, buffer); + safe_unpack16(&msg_ptr->sent_type, buffer); + return SLURM_SUCCESS; + +unpack_error: + slurmdbd_free_rc_msg(msg_ptr); + *msg = NULL; + return SLURM_ERROR; +} + +void inline +slurmdbd_pack_register_ctld_msg(dbd_register_ctld_msg_t *msg, Buf buffer) +{ + packstr(msg->cluster_name, buffer); + pack16(msg->port, buffer); +} + +int inline +slurmdbd_unpack_register_ctld_msg(dbd_register_ctld_msg_t **msg, Buf buffer) +{ + uint32_t uint32_tmp; + dbd_register_ctld_msg_t *msg_ptr = xmalloc( + sizeof(dbd_register_ctld_msg_t)); + *msg = msg_ptr; + safe_unpackstr_xmalloc(&msg_ptr->cluster_name, &uint32_tmp, buffer); + safe_unpack16(&msg_ptr->port, buffer); + return SLURM_SUCCESS; + +unpack_error: + slurmdbd_free_register_ctld_msg(msg_ptr); + *msg = NULL; + return SLURM_ERROR; +} + +void inline +slurmdbd_pack_roll_usage_msg(dbd_roll_usage_msg_t *msg, Buf buffer) +{ + pack_time(msg->start, buffer); +} + +int inline +slurmdbd_unpack_roll_usage_msg(dbd_roll_usage_msg_t **msg, Buf buffer) +{ + dbd_roll_usage_msg_t *msg_ptr = xmalloc(sizeof(dbd_roll_usage_msg_t)); + + *msg = msg_ptr; + safe_unpack_time(&msg_ptr->start, buffer); + return SLURM_SUCCESS; + +unpack_error: + slurmdbd_free_roll_usage_msg(msg_ptr); + *msg = NULL; + return SLURM_ERROR; +} + +void inline +slurmdbd_pack_step_complete_msg(dbd_step_comp_msg_t *msg, Buf buffer) +{ + pack32(msg->assoc_id, buffer); + pack32(msg->db_index, buffer); + pack_time(msg->end_time, buffer); + jobacct_common_pack((struct jobacctinfo *)msg->jobacct, buffer); + pack32(msg->job_id, buffer); + pack32(msg->req_uid, buffer); + pack_time(msg->start_time, buffer); + pack_time(msg->job_submit_time, buffer); + pack32(msg->step_id, buffer); + pack32(msg->total_procs, buffer); +} + +int inline +slurmdbd_unpack_step_complete_msg(dbd_step_comp_msg_t **msg, Buf buffer) +{ + dbd_step_comp_msg_t *msg_ptr = xmalloc(sizeof(dbd_step_comp_msg_t)); + *msg = msg_ptr; + safe_unpack32(&msg_ptr->assoc_id, buffer); + safe_unpack32(&msg_ptr->db_index, buffer); + safe_unpack_time(&msg_ptr->end_time, buffer); + jobacct_common_unpack((struct jobacctinfo **)&msg_ptr->jobacct, buffer); + safe_unpack32(&msg_ptr->job_id, buffer); + safe_unpack32(&msg_ptr->req_uid, buffer); + safe_unpack_time(&msg_ptr->start_time, buffer); + safe_unpack_time(&msg_ptr->job_submit_time, buffer); + safe_unpack32(&msg_ptr->step_id, buffer); + safe_unpack32(&msg_ptr->total_procs, buffer); + return SLURM_SUCCESS; + +unpack_error: + slurmdbd_free_step_complete_msg(msg_ptr); + *msg = NULL; + return SLURM_ERROR; +} + +void inline +slurmdbd_pack_step_start_msg(dbd_step_start_msg_t *msg, Buf buffer) +{ + pack32(msg->assoc_id, buffer); + pack32(msg->db_index, buffer); + pack32(msg->job_id, buffer); + packstr(msg->name, buffer); + packstr(msg->nodes, buffer); + pack_time(msg->start_time, buffer); + pack_time(msg->job_submit_time, buffer); + pack32(msg->step_id, buffer); + pack32(msg->total_procs, buffer); +} + +int inline +slurmdbd_unpack_step_start_msg(dbd_step_start_msg_t **msg, Buf buffer) +{ + uint32_t uint32_tmp; + dbd_step_start_msg_t *msg_ptr = xmalloc(sizeof(dbd_step_start_msg_t)); + *msg = msg_ptr; + safe_unpack32(&msg_ptr->assoc_id, buffer); + safe_unpack32(&msg_ptr->db_index, buffer); + safe_unpack32(&msg_ptr->job_id, buffer); + safe_unpackstr_xmalloc(&msg_ptr->name, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&msg_ptr->nodes, &uint32_tmp, buffer); + safe_unpack_time(&msg_ptr->start_time, buffer); + safe_unpack_time(&msg_ptr->job_submit_time, buffer); + safe_unpack32(&msg_ptr->step_id, buffer); + safe_unpack32(&msg_ptr->total_procs, buffer); + return SLURM_SUCCESS; + +unpack_error: + slurmdbd_free_step_start_msg(msg_ptr); + *msg = NULL; + return SLURM_ERROR; +} + +void inline slurmdbd_pack_usage_msg(slurmdbd_msg_type_t type, + dbd_usage_msg_t *msg, Buf buffer) +{ + void (*my_rec) (void *object, Buf buffer); + + switch(type) { + case DBD_GET_ASSOC_USAGE: + case DBD_GOT_ASSOC_USAGE: + my_rec = pack_acct_association_rec; + break; + case DBD_GET_CLUSTER_USAGE: + case DBD_GOT_CLUSTER_USAGE: + my_rec = pack_acct_cluster_rec; + break; + default: + fatal("Unknown pack type"); + return; + } + + (*(my_rec))(msg->rec, buffer); + pack_time(msg->start, buffer); + pack_time(msg->end, buffer); +} + +int inline slurmdbd_unpack_usage_msg(slurmdbd_msg_type_t type, + dbd_usage_msg_t **msg, Buf buffer) +{ + dbd_usage_msg_t *msg_ptr = NULL; + int (*my_rec) (void **object, Buf buffer); + + msg_ptr = xmalloc(sizeof(dbd_usage_msg_t)); + *msg = msg_ptr; + + switch(type) { + case DBD_GET_ASSOC_USAGE: + case DBD_GOT_ASSOC_USAGE: + my_rec = unpack_acct_association_rec; + break; + case DBD_GET_CLUSTER_USAGE: + case DBD_GOT_CLUSTER_USAGE: + my_rec = unpack_acct_cluster_rec; + break; + default: + fatal("Unknown pack type"); + return SLURM_ERROR; + } + + if((*(my_rec))(&msg_ptr->rec, buffer) == SLURM_ERROR) + goto unpack_error; + + unpack_time(&msg_ptr->start, buffer); + unpack_time(&msg_ptr->end, buffer); + + + return SLURM_SUCCESS; + +unpack_error: + slurmdbd_free_usage_msg(type, msg_ptr); + *msg = NULL; + return SLURM_ERROR; +} + diff --git a/src/common/slurmdbd_defs.h b/src/common/slurmdbd_defs.h new file mode 100644 index 000000000..9e3ff3f7a --- /dev/null +++ b/src/common/slurmdbd_defs.h @@ -0,0 +1,424 @@ +/****************************************************************************\ + * slurmdbd_defs.h - definitions used for Slurm DBD RPCs + ***************************************************************************** + * Copyright (C) 2008 Lawrence Livermore National Security. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Morris Jette <jette1@llnl.gov> + * LLNL-CODE-402394. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#ifndef _SLURMDBD_DEFS_H +#define _SLURMDBD_DEFS_H + +#if HAVE_CONFIG_H +# include "config.h" +# if HAVE_INTTYPES_H +# include <inttypes.h> +# else +# if HAVE_STDINT_H +# include <stdint.h> +# endif +# endif /* HAVE_INTTYPES_H */ +#else /* !HAVE_CONFIG_H */ +# include <inttypes.h> +#endif /* HAVE_CONFIG_H */ + +#include <slurm/slurm.h> + +#include "src/common/pack.h" +#include "src/common/list.h" +#include "src/common/slurm_accounting_storage.h" + +/* Increment SLURMDBD_VERSION if any of the RPCs change */ +#define SLURMDBD_VERSION 01 + +/* SLURM DBD message types */ +typedef enum { + DBD_INIT = 1400, /* Connection initialization */ + DBD_FINI, /* Connection finalization */ + DBD_ADD_ACCOUNTS, /* Add new account to the mix */ + DBD_ADD_ACCOUNT_COORDS, /* Add new coordinatior to an account */ + DBD_ADD_ASSOCS, /* Add new association to the mix */ + DBD_ADD_CLUSTERS, /* Add new cluster to the mix */ + DBD_ADD_USERS, /* Add new user to the mix */ + DBD_CLUSTER_PROCS, /* Record total processors on cluster */ + DBD_FLUSH_JOBS, /* End jobs that are still running + * when a controller is restarted. */ + DBD_GET_ACCOUNTS, /* Get account information */ + DBD_GET_ASSOCS, /* Get assocation information */ + DBD_GET_ASSOC_USAGE, /* Get assoc usage information */ + DBD_GET_CLUSTERS, /* Get account information */ + DBD_GET_CLUSTER_USAGE, /* Get cluster usage information */ + DBD_GET_JOBS, /* Get job information */ + DBD_GET_USERS, /* Get account information */ + DBD_GOT_ACCOUNTS, /* Response to DBD_GET_ACCOUNTS */ + DBD_GOT_ASSOCS, /* Response to DBD_GET_ASSOCS */ + DBD_GOT_ASSOC_USAGE, /* Response to DBD_GET_ASSOC_USAGE */ + DBD_GOT_CLUSTERS, /* Response to DBD_GET_CLUSTERS */ + DBD_GOT_CLUSTER_USAGE, /* Response to DBD_GET_CLUSTER_USAGE */ + DBD_GOT_JOBS, /* Response to DBD_GET_JOBS */ + DBD_GOT_LIST, /* Response to DBD_MODIFY/REMOVE MOVE_* */ + DBD_GOT_USERS, /* Response to DBD_GET_USERS */ + DBD_JOB_COMPLETE, /* Record job completion */ + DBD_JOB_START, /* Record job starting */ + DBD_JOB_START_RC, /* return db_index from job insertion */ + DBD_JOB_SUSPEND, /* Record job suspension */ + DBD_MODIFY_ACCOUNTS, /* Modify existing account */ + DBD_MODIFY_ASSOCS, /* Modify existing association */ + DBD_MODIFY_CLUSTERS, /* Modify existing cluster */ + DBD_MODIFY_USERS, /* Modify existing user */ + DBD_NODE_STATE, /* Record node state transition */ + DBD_RC, /* Return code from operation */ + DBD_REGISTER_CTLD, /* Register a slurmctld's comm port */ + DBD_REMOVE_ACCOUNTS, /* Remove existing account */ + DBD_REMOVE_ACCOUNT_COORDS,/* Remove existing coordinatior from + * an account */ + DBD_REMOVE_ASSOCS, /* Remove existing association */ + DBD_REMOVE_CLUSTERS, /* Remove existing cluster */ + DBD_REMOVE_USERS, /* Remove existing user */ + DBD_ROLL_USAGE, /* Roll up usage */ + DBD_STEP_COMPLETE, /* Record step completion */ + DBD_STEP_START, /* Record step starting */ + DBD_UPDATE_SHARES_USED /* Record current share usage */ +} slurmdbd_msg_type_t; + +/*****************************************************************************\ + * Slurm DBD protocol data structures +\*****************************************************************************/ + +typedef struct slurmdbd_msg { + uint16_t msg_type; /* see slurmdbd_msg_type_t above */ + void * data; /* pointer to a message type below */ +} slurmdbd_msg_t; + +typedef struct { + char *acct; + acct_user_cond_t *cond; +} dbd_acct_coord_msg_t; + +typedef struct dbd_cluster_procs_msg { + char *cluster_name; /* name of cluster */ + uint32_t proc_count; /* total processor count */ + time_t event_time; /* time of transition */ +} dbd_cluster_procs_msg_t; + +typedef struct { + void *cond; /* this could be anything based on the type types + * are defined in slurm_accounting_storage.h + * *_cond_t */ +} dbd_cond_msg_t; + +typedef struct { + time_t start; +} dbd_roll_usage_msg_t; + +typedef struct { + void *rec; + time_t start; + time_t end; +} dbd_usage_msg_t; + +typedef struct dbd_get_jobs_msg { + char *cluster_name; /* name of cluster to query */ + uint16_t completion; /* get job completion records instead + * of accounting record */ + uint32_t gid; /* group id */ + time_t last_update; /* time of latest info */ + List selected_steps; /* List of jobacct_selected_step_t *'s */ + List selected_parts; /* List of char *'s */ + char *user; /* user name */ +} dbd_get_jobs_msg_t; + +typedef struct dbd_init_msg { + uint16_t rollback; /* to allow rollbacks or not */ + uint16_t version; /* protocol version */ + uint32_t uid; /* UID originating connection, + * filled by authtentication plugin*/ +} dbd_init_msg_t; + +typedef struct dbd_fini_msg { + uint16_t close_conn; /* to close connection 1, 0 will keep + connection open */ + uint16_t commit; /* to rollback(0) or commit(1) changes */ +} dbd_fini_msg_t; + +typedef struct dbd_job_comp_msg { + uint32_t assoc_id; /* accounting association id needed to + * find job record in db */ + uint32_t db_index; /* index into the db for this job */ + time_t end_time; /* job termintation time */ + uint32_t exit_code; /* job exit code or signal */ + uint32_t job_id; /* job ID */ + uint16_t job_state; /* job state */ + char * nodes; /* hosts allocated to the job */ + time_t start_time; /* job start time */ + time_t submit_time; /* job submit time needed to find job + * record in db */ +} dbd_job_comp_msg_t; + +typedef struct dbd_job_start_msg { + char * account; /* Account name for those not running + * with associations */ + uint32_t alloc_cpus; /* count of allocated processors */ + uint32_t assoc_id; /* accounting association id */ + char * block_id; /* Bluegene block id */ + uint32_t db_index; /* index into the db for this job */ + time_t eligible_time; /* time job becomes eligible to run */ + uint32_t gid; /* group ID */ + uint32_t job_id; /* job ID */ + uint16_t job_state; /* job state */ + char * name; /* job name */ + char * nodes; /* hosts allocated to the job */ + char * partition; /* partition job is running on */ + uint32_t priority; /* job priority */ + uint32_t req_cpus; /* count of req processors */ + time_t start_time; /* job start time */ + time_t submit_time; /* job submit time */ + uint32_t uid; /* user ID if associations are being used */ +} dbd_job_start_msg_t; + +typedef struct dbd_job_start_rc_msg { + uint32_t db_index; /* db_index */ + uint32_t return_code; +} dbd_job_start_rc_msg_t; + +typedef struct dbd_job_suspend_msg { + uint32_t assoc_id; /* accounting association id needed + * to find job record in db */ + uint32_t db_index; /* index into the db for this job */ + uint32_t job_id; /* job ID needed to find job record + * in db */ + uint16_t job_state; /* job state */ + time_t submit_time; /* job submit time needed to find job record + * in db */ + time_t suspend_time; /* job suspend or resume time */ +} dbd_job_suspend_msg_t; + +typedef struct { + List my_list; /* this list could be of any type as long as it + * is handled correctly on both ends */ +} dbd_list_msg_t; + +typedef struct { + void *cond; + void *rec; +} dbd_modify_msg_t; + +#define DBD_NODE_STATE_DOWN 1 +#define DBD_NODE_STATE_UP 2 +typedef struct dbd_node_state_msg { + char *cluster_name; /* name of cluster */ + uint32_t cpu_count; /* number of cpus on node */ + time_t event_time; /* time of transition */ + char *hostlist; /* name of hosts */ + uint16_t new_state; /* new state of host, see DBD_NODE_STATE_* */ + char *reason; /* explanation for the node's state */ +} dbd_node_state_msg_t; + +typedef struct dbd_rc_msg { + char * comment; /* reason for failure */ + uint32_t return_code; + uint16_t sent_type; /* type of message this is in response to */ +} dbd_rc_msg_t; + +typedef struct dbd_register_ctld_msg { + char *cluster_name; /* name of cluster */ + uint16_t port; /* slurmctld's comm port */ +} dbd_register_ctld_msg_t; + +typedef struct dbd_step_comp_msg { + uint32_t assoc_id; /* accounting association id */ + uint32_t db_index; /* index into the db for this job */ + time_t end_time; /* job termintation time */ + jobacctinfo_t *jobacct; /* status info */ + uint32_t job_id; /* job ID */ + uint32_t req_uid; /* requester user ID */ + time_t start_time; /* step start time */ + time_t job_submit_time;/* job submit time needed to find job record + * in db */ + uint32_t step_id; /* step ID */ + uint32_t total_procs; /* count of allocated processors */ +} dbd_step_comp_msg_t; + +typedef struct dbd_step_start_msg { + uint32_t assoc_id; /* accounting association id */ + uint32_t db_index; /* index into the db for this job */ + uint32_t job_id; /* job ID */ + char * name; /* step name */ + char * nodes; /* hosts allocated to the step */ + time_t start_time; /* step start time */ + time_t job_submit_time;/* job submit time needed to find job record + * in db */ + uint32_t step_id; /* step ID */ + uint32_t total_procs; /* count of allocated processors */ +} dbd_step_start_msg_t; + +/*****************************************************************************\ + * Slurm DBD message processing functions +\*****************************************************************************/ + +/* Open a socket connection to SlurmDbd + * auth_info IN - alternate authentication key + * make_agent IN - make agent to process RPCs if set + * rollback IN - keep journal and permit rollback if set + * Returns SLURM_SUCCESS or an error code */ +extern int slurm_open_slurmdbd_conn(char *auth_info, bool make_agent, + bool rollback); + +/* Close the SlurmDBD socket connection */ +extern int slurm_close_slurmdbd_conn(); + +/* Send an RPC to the SlurmDBD. Do not wait for the reply. The RPC + * will be queued and processed later if the SlurmDBD is not responding. + * NOTE: slurm_open_slurmdbd_conn() must have been called with make_agent set + * + * Returns SLURM_SUCCESS or an error code */ +extern int slurm_send_slurmdbd_msg(slurmdbd_msg_t *req); + +/* Send an RPC to the SlurmDBD and wait for an arbitrary reply message. + * The RPC will not be queued if an error occurs. + * The "resp" message must be freed by the caller. + * Returns SLURM_SUCCESS or an error code */ +extern int slurm_send_recv_slurmdbd_msg(slurmdbd_msg_t *req, + slurmdbd_msg_t *resp); + +/* Send an RPC to the SlurmDBD and wait for the return code reply. + * The RPC will not be queued if an error occurs. + * Returns SLURM_SUCCESS or an error code */ +extern int slurm_send_slurmdbd_recv_rc_msg(slurmdbd_msg_t *req, int *rc); + +extern Buf pack_slurmdbd_msg(slurmdbd_msg_t *req); +extern int unpack_slurmdbd_msg(slurmdbd_msg_t *resp, Buf buffer); +/*****************************************************************************\ + * Free various SlurmDBD message structures +\*****************************************************************************/ +void inline slurmdbd_free_acct_coord_msg(dbd_acct_coord_msg_t *msg); +void inline slurmdbd_free_cluster_procs_msg(dbd_cluster_procs_msg_t *msg); +void inline slurmdbd_free_cond_msg(slurmdbd_msg_type_t type, + dbd_cond_msg_t *msg); +void inline slurmdbd_free_get_jobs_msg(dbd_get_jobs_msg_t *msg); +void inline slurmdbd_free_init_msg(dbd_init_msg_t *msg); +void inline slurmdbd_free_fini_msg(dbd_fini_msg_t *msg); +void inline slurmdbd_free_job_complete_msg(dbd_job_comp_msg_t *msg); +void inline slurmdbd_free_job_start_msg(dbd_job_start_msg_t *msg); +void inline slurmdbd_free_job_start_rc_msg(dbd_job_start_rc_msg_t *msg); +void inline slurmdbd_free_job_suspend_msg(dbd_job_suspend_msg_t *msg); +void inline slurmdbd_free_list_msg(dbd_list_msg_t *msg); +void inline slurmdbd_free_modify_msg(slurmdbd_msg_type_t type, + dbd_modify_msg_t *msg); +void inline slurmdbd_free_node_state_msg(dbd_node_state_msg_t *msg); +void inline slurmdbd_free_rc_msg(dbd_rc_msg_t *msg); +void inline slurmdbd_free_register_ctld_msg(dbd_register_ctld_msg_t *msg); +void inline slurmdbd_free_roll_usage_msg(dbd_roll_usage_msg_t *msg); +void inline slurmdbd_free_step_complete_msg(dbd_step_comp_msg_t *msg); +void inline slurmdbd_free_step_start_msg(dbd_step_start_msg_t *msg); +void inline slurmdbd_free_usage_msg(slurmdbd_msg_type_t type, + dbd_usage_msg_t *msg); + +/*****************************************************************************\ + * Pack various SlurmDBD message structures into a buffer +\*****************************************************************************/ +void inline slurmdbd_pack_acct_coord_msg(dbd_acct_coord_msg_t *msg, + Buf buffer); +void inline slurmdbd_pack_cluster_procs_msg(dbd_cluster_procs_msg_t *msg, + Buf buffer); +void inline slurmdbd_pack_cond_msg(slurmdbd_msg_type_t type, + dbd_cond_msg_t *msg, Buf buffer); +void inline slurmdbd_pack_get_jobs_msg(dbd_get_jobs_msg_t *msg, Buf buffer); +void inline slurmdbd_pack_init_msg(dbd_init_msg_t *msg, Buf buffer, + char *auth_info); +void inline slurmdbd_pack_fini_msg(dbd_fini_msg_t *msg, Buf buffer); +void inline slurmdbd_pack_job_complete_msg(dbd_job_comp_msg_t *msg, + Buf buffer); +void inline slurmdbd_pack_job_start_msg(dbd_job_start_msg_t *msg, + Buf buffer); +void inline slurmdbd_pack_job_start_rc_msg(dbd_job_start_rc_msg_t *msg, + Buf buffer); +void inline slurmdbd_pack_job_suspend_msg(dbd_job_suspend_msg_t *msg, + Buf buffer); +void inline slurmdbd_pack_list_msg(slurmdbd_msg_type_t type, + dbd_list_msg_t *msg, Buf buffer); +void inline slurmdbd_pack_modify_msg(slurmdbd_msg_type_t type, + dbd_modify_msg_t *msg, Buf buffer); +void inline slurmdbd_pack_node_state_msg(dbd_node_state_msg_t *msg, + Buf buffer); +void inline slurmdbd_pack_rc_msg(dbd_rc_msg_t *msg, Buf buffer); +void inline slurmdbd_pack_register_ctld_msg(dbd_register_ctld_msg_t *msg, + Buf buffer); +void inline slurmdbd_pack_roll_usage_msg(dbd_roll_usage_msg_t *msg, Buf buffer); +void inline slurmdbd_pack_step_complete_msg(dbd_step_comp_msg_t *msg, + Buf buffer); +void inline slurmdbd_pack_step_start_msg(dbd_step_start_msg_t *msg, + Buf buffer); +void inline slurmdbd_pack_usage_msg(slurmdbd_msg_type_t type, + dbd_usage_msg_t *msg, Buf buffer); + +/*****************************************************************************\ + * Unpack various SlurmDBD message structures from a buffer +\*****************************************************************************/ +int inline slurmdbd_unpack_acct_coord_msg(dbd_acct_coord_msg_t **msg, + Buf buffer); +int inline slurmdbd_unpack_cluster_procs_msg(dbd_cluster_procs_msg_t **msg, + Buf buffer); +int inline slurmdbd_unpack_cond_msg(slurmdbd_msg_type_t type, + dbd_cond_msg_t **msg, Buf buffer); +int inline slurmdbd_unpack_get_jobs_msg(dbd_get_jobs_msg_t **msg, Buf buffer); +int inline slurmdbd_unpack_init_msg(dbd_init_msg_t **msg, Buf buffer, + char *auth_info); +int inline slurmdbd_unpack_fini_msg(dbd_fini_msg_t **msg, Buf buffer); +int inline slurmdbd_unpack_job_complete_msg(dbd_job_comp_msg_t **msg, + Buf buffer); +int inline slurmdbd_unpack_job_start_msg(dbd_job_start_msg_t **msg, + Buf buffer); +int inline slurmdbd_unpack_job_start_rc_msg(dbd_job_start_rc_msg_t **msg, + Buf buffer); +int inline slurmdbd_unpack_job_suspend_msg(dbd_job_suspend_msg_t **msg, + Buf buffer); +int inline slurmdbd_unpack_list_msg(slurmdbd_msg_type_t type, + dbd_list_msg_t **msg, Buf buffer); +int inline slurmdbd_unpack_modify_msg(slurmdbd_msg_type_t type, + dbd_modify_msg_t **msg, Buf buffer); +int inline slurmdbd_unpack_node_state_msg(dbd_node_state_msg_t **msg, + Buf buffer); +int inline slurmdbd_unpack_rc_msg(dbd_rc_msg_t **msg, Buf buffer); +int inline slurmdbd_unpack_register_ctld_msg(dbd_register_ctld_msg_t **msg, + Buf buffer); +int inline slurmdbd_unpack_roll_usage_msg(dbd_roll_usage_msg_t **msg, + Buf buffer); +int inline slurmdbd_unpack_step_complete_msg(dbd_step_comp_msg_t **msg, + Buf buffer); +int inline slurmdbd_unpack_step_start_msg(dbd_step_start_msg_t **msg, + Buf buffer); +int inline slurmdbd_unpack_usage_msg(slurmdbd_msg_type_t type, + dbd_usage_msg_t **msg, + Buf buffer); + +#endif /* !_SLURMDBD_DEFS_H */ diff --git a/src/common/stepd_api.c b/src/common/stepd_api.c index 57946193e..88374de26 100644 --- a/src/common/stepd_api.c +++ b/src/common/stepd_api.c @@ -1,11 +1,11 @@ /*****************************************************************************\ * src/common/stepd_api.c - slurmstepd message API - * $Id: stepd_api.c 12808 2007-12-11 17:25:08Z jette $ + * $Id: stepd_api.c 13695 2008-03-21 21:28:17Z jette $ ***************************************************************************** * Copyright (C) 2005 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Christopher Morrone <morrone2@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -58,7 +58,7 @@ #include "src/common/pack.h" #include "src/common/slurm_auth.h" #include "src/common/slurm_cred.h" -#include "src/common/slurm_jobacct.h" +#include "src/common/slurm_jobacct_gather.h" #include "src/common/list.h" #include "src/common/slurm_protocol_api.h" #include "src/common/read_config.h" @@ -211,7 +211,7 @@ stepd_connect(const char *directory, const char *nodename, buffer = init_buf(0); /* Create an auth credential */ - auth_cred = g_slurm_auth_create(NULL, 2); + auth_cred = g_slurm_auth_create(NULL, 2, NULL); if (auth_cred == NULL) { error("Creating authentication credential: %s", g_slurm_auth_errstr(g_slurm_auth_errno(NULL))); @@ -289,6 +289,7 @@ stepd_get_info(int fd) safe_read(fd, &info->jobid, sizeof(uint32_t)); safe_read(fd, &info->stepid, sizeof(uint32_t)); safe_read(fd, &info->nodeid, sizeof(uint32_t)); + safe_read(fd, &info->job_mem_limit, sizeof(uint32_t)); return info; rwfail: @@ -315,6 +316,26 @@ rwfail: return -1; } +/* + * Send a checkpoint request to all tasks of a job step. + */ +int +stepd_checkpoint(int fd, int signal, time_t timestamp) +{ + int req = REQUEST_CHECKPOINT_TASKS; + int rc; + + safe_write(fd, &req, sizeof(int)); + safe_write(fd, &signal, sizeof(int)); + safe_write(fd, ×tamp, sizeof(time_t)); + + /* Receive the return code */ + safe_read(fd, &rc, sizeof(int)); + return rc; + rwfail: + return -1; +} + /* * Send a signal to a single task in a job step. */ @@ -647,21 +668,21 @@ rwfail: return (pid_t)-1; } -/* - * Suspend execution of the job step. Only root or SlurmUser is - * authorized to use this call. - * - * Returns SLURM_SUCCESS is successful. On error returns SLURM_ERROR - * and sets errno. - */ int -stepd_suspend(int fd) +_step_suspend_write(int fd) { int req = REQUEST_STEP_SUSPEND; - int rc; - int errnum = 0; safe_write(fd, &req, sizeof(int)); + return 0; +rwfail: + return -1; +} + +int +_step_suspend_read(int fd) +{ + int rc, errnum = 0; /* Receive the return code and errno */ safe_read(fd, &rc, sizeof(int)); @@ -673,6 +694,43 @@ rwfail: return -1; } + +/* + * Suspend execution of the job step. Only root or SlurmUser is + * authorized to use this call. Since this activity includes a 'sleep 1' + * in the slurmstepd, initiate the the "suspend" in parallel + * + * Returns SLURM_SUCCESS is successful. On error returns SLURM_ERROR + * and sets errno. + */ +int +stepd_suspend(int *fd, int size, uint32_t jobid) +{ + int i; + int rc = 0; + + for (i = 0; i < size; i++) { + debug2("Suspending job %u cached step count %d", jobid, i); + if (_step_suspend_write(fd[i]) < 0) { + debug(" suspend send failed: job %u (%d): %m", + jobid, i); + close(fd[i]); + fd[i] = -1; + rc = -1; + } + } + for (i = 0; i < size; i++) { + if (fd[i] == -1) + continue; + if (_step_suspend_read(fd[i]) < 0) { + debug(" resume failed for cached step count %d: %m", + i); + rc = -1; + } + } + return rc; +} + /* * Resume execution of the job step that has been suspended by a * call to stepd_suspend(). Only root or SlurmUser is @@ -743,7 +801,7 @@ stepd_completion(int fd, step_complete_msg_t *sent) safe_write(fd, &sent->range_first, sizeof(int)); safe_write(fd, &sent->range_last, sizeof(int)); safe_write(fd, &sent->step_rc, sizeof(int)); - jobacct_g_setinfo(sent->jobacct, JOBACCT_DATA_PIPE, &fd); + jobacct_gather_g_setinfo(sent->jobacct, JOBACCT_DATA_PIPE, &fd); /* Receive the return code and errno */ safe_read(fd, &rc, sizeof(int)); safe_read(fd, &errnum, sizeof(int)); @@ -771,15 +829,16 @@ stepd_stat_jobacct(int fd, stat_jobacct_msg_t *sent, stat_jobacct_msg_t *resp) safe_write(fd, &req, sizeof(int)); /* Receive the jobacct struct and return */ - resp->jobacct = jobacct_g_alloc(NULL); + resp->jobacct = jobacct_gather_g_create(NULL); + + rc = jobacct_gather_g_getinfo(resp->jobacct, JOBACCT_DATA_PIPE, &fd); - rc = jobacct_g_getinfo(resp->jobacct, JOBACCT_DATA_PIPE, &fd); safe_read(fd, &tasks, sizeof(int)); resp->num_tasks = tasks; return rc; rwfail: error("an error occured %d", rc); - jobacct_g_free(resp->jobacct); + jobacct_gather_g_destroy(resp->jobacct); resp->jobacct = NULL; return rc; } diff --git a/src/common/stepd_api.h b/src/common/stepd_api.h index 230a39651..a160324ea 100644 --- a/src/common/stepd_api.h +++ b/src/common/stepd_api.h @@ -1,11 +1,11 @@ /*****************************************************************************\ * src/common/stepd_api.h - slurmstepd message API - * $Id: stepd_api.h 10574 2006-12-15 23:38:29Z jette $ + * $Id: stepd_api.h 13695 2008-03-21 21:28:17Z jette $ ***************************************************************************** * Copyright (C) 2005 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Christopher Morrone <morrone2@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -44,6 +44,7 @@ #include "slurm/slurm.h" #include "src/common/list.h" #include "src/common/slurm_protocol_defs.h" +#include "src/common/io_hdr.h" typedef struct step_location { uint32_t jobid; @@ -83,6 +84,7 @@ typedef struct { uint32_t jobid; uint32_t stepid; uint32_t nodeid; + uint32_t job_mem_limit; /* job's memory limit, MB */ } slurmstepd_info_t; typedef struct { @@ -131,6 +133,11 @@ slurmstepd_info_t *stepd_get_info(int fd); */ int stepd_signal(int fd, int signal); +/* + * Send a checkpoint request to all tasks of a job step. + */ +int stepd_checkpoint(int fd, int signal, time_t timestamp); + /* * Send a signal to a single task in a job step. */ @@ -190,7 +197,7 @@ pid_t stepd_daemon_pid(int fd); * Returns SLURM_SUCCESS is successful. On error returns SLURM_ERROR * and sets errno. */ -int stepd_suspend(int fd); +int stepd_suspend(int *fd, int size, uint32_t jobid); /* * Resume execution of the job step that has been suspended by a @@ -212,7 +219,7 @@ int stepd_completion(int fd, step_complete_msg_t *sent); /* * * Returns SLURM_SUCCESS on success or SLURM_ERROR on error. - * resp recieves a jobacctinfo_t which must be freed if SUCCESS. + * resp receives a jobacctinfo_t which must be freed if SUCCESS. */ int stepd_stat_jobacct(int fd, stat_jobacct_msg_t *sent, stat_jobacct_msg_t *resp); diff --git a/src/common/switch.c b/src/common/switch.c index 15d3cf321..e3cc7fdf6 100644 --- a/src/common/switch.c +++ b/src/common/switch.c @@ -4,7 +4,7 @@ * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/switch.h b/src/common/switch.h index 94280364b..ec574710c 100644 --- a/src/common/switch.h +++ b/src/common/switch.h @@ -4,7 +4,7 @@ * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/timers.c b/src/common/timers.c index 06cdec28d..3f34a4598 100644 --- a/src/common/timers.c +++ b/src/common/timers.c @@ -4,7 +4,7 @@ * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/timers.h b/src/common/timers.h index 0404c4e67..b580270b6 100644 --- a/src/common/timers.h +++ b/src/common/timers.h @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> and Kevin Tew <tew1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/uid.c b/src/common/uid.c index de6f3ef58..299c98406 100644 --- a/src/common/uid.c +++ b/src/common/uid.c @@ -1,11 +1,11 @@ /*****************************************************************************\ * src/common/uid.c - uid/gid lookup utility functions - * $Id: uid.c 10574 2006-12-15 23:38:29Z jette $ + * $Id: uid.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/uid.h b/src/common/uid.h index cfc82dbf5..a7dabb2ca 100644 --- a/src/common/uid.h +++ b/src/common/uid.h @@ -1,11 +1,11 @@ /*****************************************************************************\ * src/common/uid.h - uid/gid lookup utility functions - * $Id: uid.h 10574 2006-12-15 23:38:29Z jette $ + * $Id: uid.h 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/unsetenv.c b/src/common/unsetenv.c index a888df44a..5fd8b4cb4 100644 --- a/src/common/unsetenv.c +++ b/src/common/unsetenv.c @@ -4,7 +4,7 @@ * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/unsetenv.h b/src/common/unsetenv.h index 10db70a8e..4d98de3ab 100644 --- a/src/common/unsetenv.h +++ b/src/common/unsetenv.h @@ -4,7 +4,7 @@ * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/xassert.c b/src/common/xassert.c index 33b91cafe..2b2d2362c 100644 --- a/src/common/xassert.c +++ b/src/common/xassert.c @@ -1,12 +1,12 @@ /*****************************************************************************\ * xassert.c - replacement for assert which sends error to log instead * of stderr - * $Id: xassert.c 10574 2006-12-15 23:38:29Z jette $ + * $Id: xassert.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/xassert.h b/src/common/xassert.h index 45b8bb338..2e5e6fb44 100644 --- a/src/common/xassert.h +++ b/src/common/xassert.h @@ -2,12 +2,12 @@ * xassert.h: assert type macro with configurable handling * If NDEBUG is defined, do nothing. * If not, and expression is zero, log an error message and abort. - * $Id: xassert.h 10574 2006-12-15 23:38:29Z jette $ + * $Id: xassert.h 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/xmalloc.c b/src/common/xmalloc.c index 41a617718..30a2075b7 100644 --- a/src/common/xmalloc.c +++ b/src/common/xmalloc.c @@ -2,13 +2,13 @@ * xmalloc.c - enhanced malloc routines * Started with Jim Garlick's xmalloc and tied into slurm log facility. * Also added ability to print file, line, and function of caller. - * $Id: xmalloc.c 10574 2006-12-15 23:38:29Z jette $ + * $Id: xmalloc.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Jim Garlick <garlick1@llnl.gov> and * Mark Grondona <mgrondona@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/xmalloc.h b/src/common/xmalloc.h index 690f9c13a..4d8c796ac 100644 --- a/src/common/xmalloc.h +++ b/src/common/xmalloc.h @@ -3,13 +3,13 @@ * - default: never return if errors are encountered. * - attempt to report file, line, and calling function on assertion failure * - use configurable slurm log facility for reporting errors - * $Id: xmalloc.h 11400 2007-04-24 18:50:38Z da $ + * $Id: xmalloc.h 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Jim Garlick <garlick1@llnl.gov> and * Mark Grondona <mgrondona@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/xsignal.c b/src/common/xsignal.c index 81fa3c317..fcc5e577f 100644 --- a/src/common/xsignal.c +++ b/src/common/xsignal.c @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/xsignal.h b/src/common/xsignal.h index 6734648c1..ad66edb77 100644 --- a/src/common/xsignal.h +++ b/src/common/xsignal.h @@ -1,11 +1,11 @@ /*****************************************************************************\ * src/common/xsignal.h - POSIX signal wrapper functions - * $Id: xsignal.h 10574 2006-12-15 23:38:29Z jette $ + * $Id: xsignal.h 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/common/xstring.c b/src/common/xstring.c index 765ed7928..ec6652814 100644 --- a/src/common/xstring.c +++ b/src/common/xstring.c @@ -7,7 +7,7 @@ * Written by Jim Garlick <garlick@llnl.gov> * Mark Grondona <grondona@llnl.gov>, et al. * - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -78,6 +78,7 @@ strong_alias(_xstrftimecat, slurm_xstrftimecat); strong_alias(_xstrfmtcat, slurm_xstrfmtcat); strong_alias(_xmemcat, slurm_xmemcat); strong_alias(xstrdup, slurm_xstrdup); +strong_alias(xstrdup_printf, slurm_xstrdup_printf); strong_alias(xstrndup, slurm_xstrndup); strong_alias(xbasename, slurm_xbasename); strong_alias(_xstrsubstitute, slurm_xstrsubstitute); @@ -196,6 +197,9 @@ void _xstrftimecat(char **buf, const char *fmt) */ int _xstrfmtcat(char **str, const char *fmt, ...) { + /* This code is the same as xstrdup_printf, but couldn't + * figure out how to pass the ... so just copied the code + */ /* Start out with a size of 100 bytes. */ int n, size = 100; char *p = NULL; @@ -288,6 +292,38 @@ char * xstrdup(const char *str) return result; } +/* + * Give me a copy of the string as if it were printf. + * fmt (IN) format of string and args if any + * RETURN copy of formated string + */ +char *xstrdup_printf(const char *fmt, ...) +{ + /* Start out with a size of 100 bytes. */ + int n, size = 100; + char *p = NULL; + va_list ap; + + if((p = xmalloc(size)) == NULL) + return NULL; + while(1) { + /* Try to print in the allocated space. */ + va_start(ap, fmt); + n = vsnprintf(p, size, fmt, ap); + va_end (ap); + /* If that worked, return the string. */ + if (n > -1 && n < size) + return p; + /* Else try again with more space. */ + if (n > -1) /* glibc 2.1 */ + size = n + 1; /* precisely what is needed */ + else /* glibc 2.0 */ + size *= 2; /* twice the old size */ + if ((p = xrealloc(p, size)) == NULL) + return NULL; + } +} + /* * Duplicate at most "n" characters of a string. * str (IN) string to duplicate diff --git a/src/common/xstring.h b/src/common/xstring.h index b29056c42..0345b5adb 100644 --- a/src/common/xstring.h +++ b/src/common/xstring.h @@ -5,7 +5,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Jim Garlick <garlick@llnl.gov>, et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -98,6 +98,11 @@ void _xmemcat(char **str, char *start, char *end); */ char *xstrdup(const char *str); +/* +** strdup formatted which uses xmalloc routines +*/ +char *xstrdup_printf(const char *fmt, ...); + /* ** strndup which uses xmalloc routines */ diff --git a/src/database/Makefile.am b/src/database/Makefile.am new file mode 100644 index 000000000..c1538b051 --- /dev/null +++ b/src/database/Makefile.am @@ -0,0 +1,39 @@ +# Makefile for database library + +AUTOMAKE_OPTIONS = foreign + +INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common + +if HAVE_OPENSSL + +noinst_LTLIBRARIES = \ + libslurm_mysql.la \ + libslurm_pgsql.la \ + libslurm_gold.la + +libslurm_gold_la_SOURCES = gold_interface.c gold_interface.h \ + base64.c base64.h +libslurm_gold_la_LIBADD = $(SSL_LIBS) +libslurm_gold_la_LDFLAGS = $(LIB_LDFLAGS) $(SSL_LDFLAGS) +libslurm_gold_la_CFLAGS = $(SSL_CPPFLAGS) + +else + +noinst_LTLIBRARIES = \ + libslurm_mysql.la \ + libslurm_pgsql.la + +endif + +libslurm_mysql_la_SOURCES = mysql_common.c mysql_common.h +libslurm_pgsql_la_SOURCES = pgsql_common.c pgsql_common.h + +libslurm_mysql_la_LIBADD = $(MYSQL_LIBS) +libslurm_pgsql_la_LIBADD = $(PGSQL_LIBS) + +libslurm_mysql_la_LDFLAGS = $(LIB_LDFLAGS) +libslurm_pgsql_la_LDFLAGS = $(LIB_LDFLAGS) + +libslurm_mysql_la_CFLAGS = $(MYSQL_CFLAGS) +libslurm_pgsql_la_CFLAGS = $(PGSQL_CFLAGS) + diff --git a/src/database/Makefile.in b/src/database/Makefile.in new file mode 100644 index 000000000..2861f5f4e --- /dev/null +++ b/src/database/Makefile.in @@ -0,0 +1,604 @@ +# Makefile.in generated by automake 1.10.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# Makefile for database library + +VPATH = @srcdir@ +pkgdatadir = $(datadir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +target_triplet = @target@ +subdir = src/database +DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ + $(top_srcdir)/auxdir/slurm.m4 \ + $(top_srcdir)/auxdir/x_ac__system_configuration.m4 \ + $(top_srcdir)/auxdir/x_ac_affinity.m4 \ + $(top_srcdir)/auxdir/x_ac_aix.m4 \ + $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ + $(top_srcdir)/auxdir/x_ac_debug.m4 \ + $(top_srcdir)/auxdir/x_ac_elan.m4 \ + $(top_srcdir)/auxdir/x_ac_federation.m4 \ + $(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \ + $(top_srcdir)/auxdir/x_ac_gtk.m4 \ + $(top_srcdir)/auxdir/x_ac_munge.m4 \ + $(top_srcdir)/auxdir/x_ac_ncurses.m4 \ + $(top_srcdir)/auxdir/x_ac_pam.m4 \ + $(top_srcdir)/auxdir/x_ac_ptrace.m4 \ + $(top_srcdir)/auxdir/x_ac_readline.m4 \ + $(top_srcdir)/auxdir/x_ac_setpgrp.m4 \ + $(top_srcdir)/auxdir/x_ac_setproctitle.m4 \ + $(top_srcdir)/auxdir/x_ac_sgi_job.m4 \ + $(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \ + $(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h +CONFIG_CLEAN_FILES = +LTLIBRARIES = $(noinst_LTLIBRARIES) +am__DEPENDENCIES_1 = +@HAVE_OPENSSL_TRUE@libslurm_gold_la_DEPENDENCIES = \ +@HAVE_OPENSSL_TRUE@ $(am__DEPENDENCIES_1) +am__libslurm_gold_la_SOURCES_DIST = gold_interface.c gold_interface.h \ + base64.c base64.h +@HAVE_OPENSSL_TRUE@am_libslurm_gold_la_OBJECTS = \ +@HAVE_OPENSSL_TRUE@ libslurm_gold_la-gold_interface.lo \ +@HAVE_OPENSSL_TRUE@ libslurm_gold_la-base64.lo +libslurm_gold_la_OBJECTS = $(am_libslurm_gold_la_OBJECTS) +libslurm_gold_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=link $(CCLD) $(libslurm_gold_la_CFLAGS) \ + $(CFLAGS) $(libslurm_gold_la_LDFLAGS) $(LDFLAGS) -o $@ +@HAVE_OPENSSL_TRUE@am_libslurm_gold_la_rpath = +libslurm_mysql_la_DEPENDENCIES = $(am__DEPENDENCIES_1) +am_libslurm_mysql_la_OBJECTS = libslurm_mysql_la-mysql_common.lo +libslurm_mysql_la_OBJECTS = $(am_libslurm_mysql_la_OBJECTS) +libslurm_mysql_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=link $(CCLD) \ + $(libslurm_mysql_la_CFLAGS) $(CFLAGS) \ + $(libslurm_mysql_la_LDFLAGS) $(LDFLAGS) -o $@ +@HAVE_OPENSSL_FALSE@am_libslurm_mysql_la_rpath = +@HAVE_OPENSSL_TRUE@am_libslurm_mysql_la_rpath = +libslurm_pgsql_la_DEPENDENCIES = $(am__DEPENDENCIES_1) +am_libslurm_pgsql_la_OBJECTS = libslurm_pgsql_la-pgsql_common.lo +libslurm_pgsql_la_OBJECTS = $(am_libslurm_pgsql_la_OBJECTS) +libslurm_pgsql_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=link $(CCLD) \ + $(libslurm_pgsql_la_CFLAGS) $(CFLAGS) \ + $(libslurm_pgsql_la_LDFLAGS) $(LDFLAGS) -o $@ +@HAVE_OPENSSL_FALSE@am_libslurm_pgsql_la_rpath = +@HAVE_OPENSSL_TRUE@am_libslurm_pgsql_la_rpath = +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm +depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp +am__depfiles_maybe = depfiles +COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ + $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ + $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +CCLD = $(CC) +LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ + $(LDFLAGS) -o $@ +SOURCES = $(libslurm_gold_la_SOURCES) $(libslurm_mysql_la_SOURCES) \ + $(libslurm_pgsql_la_SOURCES) +DIST_SOURCES = $(am__libslurm_gold_la_SOURCES_DIST) \ + $(libslurm_mysql_la_SOURCES) $(libslurm_pgsql_la_SOURCES) +ETAGS = etags +CTAGS = ctags +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AR = @AR@ +AUTHD_CFLAGS = @AUTHD_CFLAGS@ +AUTHD_LIBS = @AUTHD_LIBS@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +BG_INCLUDES = @BG_INCLUDES@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CMD_LDFLAGS = @CMD_LDFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ +ECHO = @ECHO@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +ELAN_LIBS = @ELAN_LIBS@ +EXEEXT = @EXEEXT@ +F77 = @F77@ +FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@ +FFLAGS = @FFLAGS@ +GREP = @GREP@ +GTK2_CFLAGS = @GTK2_CFLAGS@ +GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ +HAVEPKGCONFIG = @HAVEPKGCONFIG@ +HAVE_AIX = @HAVE_AIX@ +HAVE_ELAN = @HAVE_ELAN@ +HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ +HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIB_LDFLAGS = @LIB_LDFLAGS@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MKDIR_P = @MKDIR_P@ +MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ +MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ +MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ +NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ +NUMA_LIBS = @NUMA_LIBS@ +OBJEXT = @OBJEXT@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PAM_LIBS = @PAM_LIBS@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ +PLPA_LIBS = @PLPA_LIBS@ +PROCTRACKDIR = @PROCTRACKDIR@ +PROJECT = @PROJECT@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +RANLIB = @RANLIB@ +READLINE_LIBS = @READLINE_LIBS@ +RELEASE = @RELEASE@ +SED = @SED@ +SEMAPHORE_LIBS = @SEMAPHORE_LIBS@ +SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ +SLURMD_PORT = @SLURMD_PORT@ +SLURM_API_AGE = @SLURM_API_AGE@ +SLURM_API_CURRENT = @SLURM_API_CURRENT@ +SLURM_API_MAJOR = @SLURM_API_MAJOR@ +SLURM_API_REVISION = @SLURM_API_REVISION@ +SLURM_API_VERSION = @SLURM_API_VERSION@ +SLURM_MAJOR = @SLURM_MAJOR@ +SLURM_MICRO = @SLURM_MICRO@ +SLURM_MINOR = @SLURM_MINOR@ +SLURM_VERSION = @SLURM_VERSION@ +SO_LDFLAGS = @SO_LDFLAGS@ +SSL_CPPFLAGS = @SSL_CPPFLAGS@ +SSL_LDFLAGS = @SSL_LDFLAGS@ +SSL_LIBS = @SSL_LIBS@ +STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ +VERSION = @VERSION@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_F77 = @ac_ct_F77@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target = @target@ +target_alias = @target_alias@ +target_cpu = @target_cpu@ +target_os = @target_os@ +target_vendor = @target_vendor@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +AUTOMAKE_OPTIONS = foreign +INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common +@HAVE_OPENSSL_FALSE@noinst_LTLIBRARIES = \ +@HAVE_OPENSSL_FALSE@ libslurm_mysql.la \ +@HAVE_OPENSSL_FALSE@ libslurm_pgsql.la + +@HAVE_OPENSSL_TRUE@noinst_LTLIBRARIES = \ +@HAVE_OPENSSL_TRUE@ libslurm_mysql.la \ +@HAVE_OPENSSL_TRUE@ libslurm_pgsql.la \ +@HAVE_OPENSSL_TRUE@ libslurm_gold.la + +@HAVE_OPENSSL_TRUE@libslurm_gold_la_SOURCES = gold_interface.c gold_interface.h \ +@HAVE_OPENSSL_TRUE@ base64.c base64.h + +@HAVE_OPENSSL_TRUE@libslurm_gold_la_LIBADD = $(SSL_LIBS) +@HAVE_OPENSSL_TRUE@libslurm_gold_la_LDFLAGS = $(LIB_LDFLAGS) $(SSL_LDFLAGS) +@HAVE_OPENSSL_TRUE@libslurm_gold_la_CFLAGS = $(SSL_CPPFLAGS) +libslurm_mysql_la_SOURCES = mysql_common.c mysql_common.h +libslurm_pgsql_la_SOURCES = pgsql_common.c pgsql_common.h +libslurm_mysql_la_LIBADD = $(MYSQL_LIBS) +libslurm_pgsql_la_LIBADD = $(PGSQL_LIBS) +libslurm_mysql_la_LDFLAGS = $(LIB_LDFLAGS) +libslurm_pgsql_la_LDFLAGS = $(LIB_LDFLAGS) +libslurm_mysql_la_CFLAGS = $(MYSQL_CFLAGS) +libslurm_pgsql_la_CFLAGS = $(PGSQL_CFLAGS) +all: all-am + +.SUFFIXES: +.SUFFIXES: .c .lo .o .obj +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ + && exit 0; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/database/Makefile'; \ + cd $(top_srcdir) && \ + $(AUTOMAKE) --foreign src/database/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +clean-noinstLTLIBRARIES: + -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) + @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ + dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ + test "$$dir" != "$$p" || dir=.; \ + echo "rm -f \"$${dir}/so_locations\""; \ + rm -f "$${dir}/so_locations"; \ + done +libslurm_gold.la: $(libslurm_gold_la_OBJECTS) $(libslurm_gold_la_DEPENDENCIES) + $(libslurm_gold_la_LINK) $(am_libslurm_gold_la_rpath) $(libslurm_gold_la_OBJECTS) $(libslurm_gold_la_LIBADD) $(LIBS) +libslurm_mysql.la: $(libslurm_mysql_la_OBJECTS) $(libslurm_mysql_la_DEPENDENCIES) + $(libslurm_mysql_la_LINK) $(am_libslurm_mysql_la_rpath) $(libslurm_mysql_la_OBJECTS) $(libslurm_mysql_la_LIBADD) $(LIBS) +libslurm_pgsql.la: $(libslurm_pgsql_la_OBJECTS) $(libslurm_pgsql_la_DEPENDENCIES) + $(libslurm_pgsql_la_LINK) $(am_libslurm_pgsql_la_rpath) $(libslurm_pgsql_la_OBJECTS) $(libslurm_pgsql_la_LIBADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + +distclean-compile: + -rm -f *.tab.c + +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libslurm_gold_la-base64.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libslurm_gold_la-gold_interface.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libslurm_mysql_la-mysql_common.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libslurm_pgsql_la-pgsql_common.Plo@am__quote@ + +.c.o: +@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c $< + +.c.obj: +@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'` + +.c.lo: +@am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $< + +libslurm_gold_la-gold_interface.lo: gold_interface.c +@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libslurm_gold_la_CFLAGS) $(CFLAGS) -MT libslurm_gold_la-gold_interface.lo -MD -MP -MF $(DEPDIR)/libslurm_gold_la-gold_interface.Tpo -c -o libslurm_gold_la-gold_interface.lo `test -f 'gold_interface.c' || echo '$(srcdir)/'`gold_interface.c +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/libslurm_gold_la-gold_interface.Tpo $(DEPDIR)/libslurm_gold_la-gold_interface.Plo +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='gold_interface.c' object='libslurm_gold_la-gold_interface.lo' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libslurm_gold_la_CFLAGS) $(CFLAGS) -c -o libslurm_gold_la-gold_interface.lo `test -f 'gold_interface.c' || echo '$(srcdir)/'`gold_interface.c + +libslurm_gold_la-base64.lo: base64.c +@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libslurm_gold_la_CFLAGS) $(CFLAGS) -MT libslurm_gold_la-base64.lo -MD -MP -MF $(DEPDIR)/libslurm_gold_la-base64.Tpo -c -o libslurm_gold_la-base64.lo `test -f 'base64.c' || echo '$(srcdir)/'`base64.c +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/libslurm_gold_la-base64.Tpo $(DEPDIR)/libslurm_gold_la-base64.Plo +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='base64.c' object='libslurm_gold_la-base64.lo' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libslurm_gold_la_CFLAGS) $(CFLAGS) -c -o libslurm_gold_la-base64.lo `test -f 'base64.c' || echo '$(srcdir)/'`base64.c + +libslurm_mysql_la-mysql_common.lo: mysql_common.c +@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libslurm_mysql_la_CFLAGS) $(CFLAGS) -MT libslurm_mysql_la-mysql_common.lo -MD -MP -MF $(DEPDIR)/libslurm_mysql_la-mysql_common.Tpo -c -o libslurm_mysql_la-mysql_common.lo `test -f 'mysql_common.c' || echo '$(srcdir)/'`mysql_common.c +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/libslurm_mysql_la-mysql_common.Tpo $(DEPDIR)/libslurm_mysql_la-mysql_common.Plo +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='mysql_common.c' object='libslurm_mysql_la-mysql_common.lo' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libslurm_mysql_la_CFLAGS) $(CFLAGS) -c -o libslurm_mysql_la-mysql_common.lo `test -f 'mysql_common.c' || echo '$(srcdir)/'`mysql_common.c + +libslurm_pgsql_la-pgsql_common.lo: pgsql_common.c +@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libslurm_pgsql_la_CFLAGS) $(CFLAGS) -MT libslurm_pgsql_la-pgsql_common.lo -MD -MP -MF $(DEPDIR)/libslurm_pgsql_la-pgsql_common.Tpo -c -o libslurm_pgsql_la-pgsql_common.lo `test -f 'pgsql_common.c' || echo '$(srcdir)/'`pgsql_common.c +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/libslurm_pgsql_la-pgsql_common.Tpo $(DEPDIR)/libslurm_pgsql_la-pgsql_common.Plo +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='pgsql_common.c' object='libslurm_pgsql_la-pgsql_common.lo' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(libslurm_pgsql_la_CFLAGS) $(CFLAGS) -c -o libslurm_pgsql_la-pgsql_common.lo `test -f 'pgsql_common.c' || echo '$(srcdir)/'`pgsql_common.c + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + mkid -fID $$unique +tags: TAGS + +TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + here=`pwd`; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$tags $$unique; \ + fi +ctags: CTAGS +CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + test -z "$(CTAGS_ARGS)$$tags$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$tags $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && cd $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) $$here + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ + fi; \ + cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ + else \ + test -f $(distdir)/$$file \ + || cp -p $$d/$$file $(distdir)/$$file \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(LTLIBRARIES) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + `test -z '$(STRIP)' || \ + echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \ + mostlyclean-am + +distclean: distclean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-exec-am: + +install-html: install-html-am + +install-info: install-info-am + +install-man: + +install-pdf: install-pdf-am + +install-ps: install-ps-am + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ + clean-libtool clean-noinstLTLIBRARIES ctags distclean \ + distclean-compile distclean-generic distclean-libtool \ + distclean-tags distdir dvi dvi-am html html-am info info-am \ + install install-am install-data install-data-am install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-pdf install-pdf-am install-ps install-ps-am \ + install-strip installcheck installcheck-am installdirs \ + maintainer-clean maintainer-clean-generic mostlyclean \ + mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ + pdf pdf-am ps ps-am tags uninstall uninstall-am + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/src/plugins/jobacct/gold/base64.c b/src/database/base64.c similarity index 97% rename from src/plugins/jobacct/gold/base64.c rename to src/database/base64.c index 803f2b053..92a8d8446 100644 --- a/src/plugins/jobacct/gold/base64.c +++ b/src/database/base64.c @@ -74,7 +74,7 @@ extern unsigned char *encode_base64(const unsigned char* in_str, rlen++; /* for the eol */ ret_str = xmalloc(sizeof(unsigned char) * rlen); - debug2("encoding %s", in_str); + debug4("encoding %s", in_str); while (in_len--) { char_array_3[i++] = *(in_str++); @@ -111,7 +111,7 @@ extern unsigned char *encode_base64(const unsigned char* in_str, } - debug2("encoded %s", ret_str); + debug4("encoded %s", ret_str); return ret_str; } @@ -136,7 +136,7 @@ extern unsigned char *decode_base64(const unsigned char *in_str) int rlen = in_len * 3 / 4; /* always enough, but sometimes too * much */ - debug2("decoding %s", in_str); + debug4("decoding %s", in_str); ret_str = xmalloc(sizeof(unsigned char) * rlen); memset(ret_str, 0, rlen); @@ -193,7 +193,7 @@ extern unsigned char *decode_base64(const unsigned char *in_str) ret_str[pos++] = char_array_3[j]; } - debug2("decoded %s", ret_str); + debug4("decoded %s", ret_str); return ret_str; } diff --git a/src/plugins/jobacct/gold/base64.h b/src/database/base64.h similarity index 100% rename from src/plugins/jobacct/gold/base64.h rename to src/database/base64.h diff --git a/src/plugins/jobacct/gold/gold_interface.c b/src/database/gold_interface.c similarity index 90% rename from src/plugins/jobacct/gold/gold_interface.c rename to src/database/gold_interface.c index 8ca66f540..f39ad68a0 100644 --- a/src/plugins/jobacct/gold/gold_interface.c +++ b/src/database/gold_interface.c @@ -44,10 +44,28 @@ #include "src/common/slurm_protocol_interface.h" #include "src/common/slurm_protocol_api.h" +#include "src/common/uid.h" #define MAX_RETRY 5 -static char *gold_machine = NULL; +/* This should be updated to match the gold_object_t enum */ +char *GOLD_OBJECT_STR[] = { + "Account", + "User", + "Project", + "Machine", + "Job", + "RoleUser", + "EventLog", + "MachineHourUsage", + "MachineDayUsage", + "MachineMonthUsage", + "AccountHourUsage", + "AccountDayUsage", + "AccountMonthUsage", + NULL +}; + static char *gold_key = NULL; static char *gold_host = NULL; static uint16_t gold_port = 0; @@ -120,8 +138,8 @@ static gold_response_entry_t *_create_response_entry(char *object, name_val->name = _get_return_name(gold_msg, i); name_val->value = _get_return_value(gold_msg, i); - debug3("got %s = %s", name_val->name, name_val->value); - list_push(resp_entry->name_val, name_val); + debug4("got %s = %s", name_val->name, name_val->value); + list_append(resp_entry->name_val, name_val); } (*i)++; } @@ -181,20 +199,22 @@ static int _end_communication(slurm_fd gold_fd) return rc; } -extern int init_gold(char *machine, char *keyfile, char *host, uint16_t port) +extern int init_gold(char *keyfile, char *host, uint16_t port) { int fp; char key[256]; int i, bytes_read; - if(!keyfile || !host || !machine) { - error("init_gold: Either no keyfile or host or machine given"); + if(!keyfile || !host) { + error("init_gold: Either no keyfile or host given"); return SLURM_ERROR; } fp = open(keyfile, O_RDONLY); - bytes_read = read(fp, key, sizeof(key)); - if ( bytes_read == -1) { + if (fp < 0) + fatal("Error opening gold keyfile (%s): %m\n", keyfile); + bytes_read = read(fp, key, sizeof(key) - 1); + if (bytes_read == -1) { fatal("Error reading hash key from keyfile (%s): %m\n", keyfile); } @@ -210,7 +230,6 @@ extern int init_gold(char *machine, char *keyfile, char *host, uint16_t port) /* Close the file */ close(fp); //debug4("got the tolken as %s\n", key); - gold_machine = xstrdup(machine); gold_key = xstrdup(key); gold_host = xstrdup(host); gold_port = port; @@ -222,7 +241,6 @@ extern int init_gold(char *machine, char *keyfile, char *host, uint16_t port) extern int fini_gold() { gold_init = 0; - xfree(gold_machine); xfree(gold_key); xfree(gold_host); @@ -268,27 +286,29 @@ extern int gold_request_add_assignment(gold_request_t *gold_request, gold_name_value_t *name_val = xmalloc(sizeof(gold_name_value_t)); name_val->name = xstrdup(name); name_val->value = xstrdup(value); - list_push(gold_request->assignments, name_val); + list_append(gold_request->assignments, name_val); return SLURM_SUCCESS; } extern int gold_request_add_condition(gold_request_t *gold_request, char *name, char *value, - gold_operator_t op) + gold_operator_t op, + int or_statement) { gold_name_value_t *name_val = xmalloc(sizeof(gold_name_value_t)); name_val->name = xstrdup(name); name_val->value = xstrdup(value); name_val->op = op; - list_push(gold_request->conditions, name_val); + name_val->or_statement = or_statement; + list_append(gold_request->conditions, name_val); return SLURM_SUCCESS; } extern int gold_request_add_selection(gold_request_t *gold_request, char *name) { - list_push(gold_request->selections, xstrdup(name)); + list_append(gold_request->selections, xstrdup(name)); return SLURM_SUCCESS; } @@ -326,34 +346,12 @@ extern gold_response_t *get_gold_response(gold_request_t *gold_request) if(!timeout) timeout = (slurm_get_msg_timeout() * 1000); - - switch(gold_request->object) { - case GOLD_OBJECT_ACCOUNT: - object = GOLD_OBJECT_ACCOUNT_STR; - break; - case GOLD_OBJECT_USER: - object = GOLD_OBJECT_USER_STR; - break; - case GOLD_OBJECT_PROJECT: - object = GOLD_OBJECT_PROJECT_STR; - break; - case GOLD_OBJECT_MACHINE: - object = GOLD_OBJECT_MACHINE_STR; - break; - case GOLD_OBJECT_JOB: - object = GOLD_OBJECT_JOB_STR; - break; - case GOLD_OBJECT_EVENT: - object = GOLD_OBJECT_EVENT_STR; - break; - case GOLD_OBJECT_ROLEUSER: - object = GOLD_OBJECT_ROLEUSER_STR; - break; - default: + if(gold_request->object >= GOLD_OBJECT_COUNT) { error("get_gold_response: " "unsupported object %d", gold_request->object); return NULL; } + object = GOLD_OBJECT_STR[gold_request->object]; switch(gold_request->action) { case GOLD_ACTION_QUERY: @@ -394,6 +392,8 @@ extern gold_response_t *get_gold_response(gold_request_t *gold_request) itr = list_iterator_create(gold_request->conditions); while((name_val = list_next(itr))) { + xstrfmtcat(innerds, "<Where name=\"%s\"", name_val->name); + if(name_val->op != GOLD_OPERATOR_NONE) { char *op = NULL; switch (name_val->op) { @@ -418,20 +418,23 @@ extern gold_response_t *get_gold_response(gold_request_t *gold_request) list_iterator_destroy(itr); return NULL; } - xstrfmtcat(innerds, - "<Where name=\"%s\" op=\"%s\">%s</Where>", - name_val->name, op, name_val->value); - } else { - xstrfmtcat(innerds, "<Where name=\"%s\">%s</Where>", - name_val->name, name_val->value); - } + + xstrfmtcat(innerds, " op=\"%s\"", op); + } + + if(name_val->or_statement == 1) + xstrfmtcat(innerds, " conj=\"Or\" groups=\"-1\""); + else if (name_val->or_statement == 2) + xstrfmtcat(innerds, " conj=\"And\" groups=\"+1\""); + + xstrfmtcat(innerds, ">%s</Where>", name_val->value); } list_iterator_destroy(itr); xstrfmtcat(gold_request->body, "<Body><Request action=\"%s\" actor=\"%s\">" "<Object>%s</Object>", - action, "slurm", object); + action, uid_to_string(geteuid()), object); if(innerds) { xstrcat(gold_request->body, innerds); xfree(innerds); @@ -477,7 +480,7 @@ extern gold_response_t *get_gold_response(gold_request_t *gold_request) goto error; } - debug2("sending %d '%s'", rc, gold_msg); + debug3("sending %d '%s'", rc, gold_msg); xstrcat(gold_msg, "0\r\n"); rc = _slurm_send_timeout(gold_fd, gold_msg, strlen(gold_msg), @@ -518,7 +521,7 @@ extern gold_response_t *get_gold_response(gold_request_t *gold_request) tmp_buff[i] = '\0'; ret_len = xstrntol(tmp_buff, NULL, i, 16); - debug3("got size %d", ret_len); + debug4("got size %d", ret_len); gold_msg = xmalloc(ret_len+1); @@ -528,7 +531,7 @@ extern gold_response_t *get_gold_response(gold_request_t *gold_request) goto error; } - debug2("got back '%s'", gold_msg); + debug3("got back '%s'", gold_msg); if(_slurm_recv_timeout(gold_fd, tmp_buff, 3, 0, timeout) < 0) { error("get_gold_response: " "couldn't get the end of the message"); @@ -559,7 +562,7 @@ extern gold_response_t *get_gold_response(gold_request_t *gold_request) } else if(!strncmp(gold_msg+i, object, strlen(object))) { gold_response_entry_t *resp_entry = _create_response_entry(object, gold_msg, &i); - list_push(gold_response->entries, resp_entry); + list_append(gold_response->entries, resp_entry); } i++; } diff --git a/src/plugins/jobacct/gold/gold_interface.h b/src/database/gold_interface.h similarity index 82% rename from src/plugins/jobacct/gold/gold_interface.h rename to src/database/gold_interface.h index 5ee82ef9d..0ed77a11b 100644 --- a/src/plugins/jobacct/gold/gold_interface.h +++ b/src/database/gold_interface.h @@ -64,29 +64,45 @@ #define GOLD_ACTION_MODIFY_STR "Modify" #define GOLD_ACTION_DELETE_STR "Delete" -#define GOLD_OBJECT_ACCOUNT_STR "Account" +#define GOLD_OBJECT_ACCT_STR "Account" #define GOLD_OBJECT_USER_STR "User" #define GOLD_OBJECT_PROJECT_STR "Project" #define GOLD_OBJECT_MACHINE_STR "Machine" #define GOLD_OBJECT_JOB_STR "Job" #define GOLD_OBJECT_ROLEUSER_STR "RoleUser" #define GOLD_OBJECT_EVENT_STR "EventLog" +#define GOLD_OBJECT_MACHINE_HOUR_STR "MachineHourUsage" +#define GOLD_OBJECT_MACHINE_DAY_STR "MachineDayUsage" +#define GOLD_OBJECT_MACHINE_MONTH_STR "MachineMonthUsage" +#define GOLD_OBJECT_ACCT_HOUR_STR "AccountHourUsage" +#define GOLD_OBJECT_ACCT_DAY_STR "AccountDayUsage" +#define GOLD_OBJECT_ACCT_MONTH_STR "AccountMonthUsage" typedef enum { GOLD_ACTION_QUERY, GOLD_ACTION_CREATE, GOLD_ACTION_MODIFY, - GOLD_ACTION_DELETE + GOLD_ACTION_DELETE, + GOLD_ACTION_COUNT } gold_action_t; +/* When changing this you would also make GOLD_OBJECT_STR match + * defined in gold_interface.c */ typedef enum { - GOLD_OBJECT_ACCOUNT, + GOLD_OBJECT_ACCT, GOLD_OBJECT_USER, GOLD_OBJECT_PROJECT, GOLD_OBJECT_MACHINE, GOLD_OBJECT_JOB, GOLD_OBJECT_ROLEUSER, - GOLD_OBJECT_EVENT + GOLD_OBJECT_EVENT, + GOLD_OBJECT_MACHINE_HOUR_USAGE, + GOLD_OBJECT_MACHINE_DAY_USAGE, + GOLD_OBJECT_MACHINE_MONTH_USAGE, + GOLD_OBJECT_ACCT_HOUR_USAGE, + GOLD_OBJECT_ACCT_DAY_USAGE, + GOLD_OBJECT_ACCT_MONTH_USAGE, + GOLD_OBJECT_COUNT } gold_object_t; typedef enum { @@ -94,13 +110,15 @@ typedef enum { GOLD_OPERATOR_G, GOLD_OPERATOR_GE, GOLD_OPERATOR_L, - GOLD_OPERATOR_LE + GOLD_OPERATOR_LE, + GOLD_OPERATOR_COUNT } gold_operator_t; typedef struct { char *name; char *value; gold_operator_t op; + int or_statement; // 0 for nothing 1 for or last 2 for or next } gold_name_value_t; typedef struct { @@ -125,8 +143,9 @@ typedef struct { int rc; } gold_response_t; +extern char *GOLD_OBJECT_STR[]; -extern int init_gold(char *machine, char *keyfile, char *host, uint16_t port); +extern int init_gold(char *keyfile, char *host, uint16_t port); extern int fini_gold(); extern gold_request_t *create_gold_request(gold_object_t object, @@ -137,7 +156,8 @@ extern int gold_request_add_assignment(gold_request_t *gold_request, char *name, char *value); extern int gold_request_add_condition(gold_request_t *gold_request, char *name, char *value, - gold_operator_t op); + gold_operator_t op, + int or_statement); extern int gold_request_add_selection(gold_request_t *gold_request, char *name); extern gold_response_t *get_gold_response(gold_request_t *gold_request); diff --git a/src/database/mysql_common.c b/src/database/mysql_common.c new file mode 100644 index 000000000..24e4de9ec --- /dev/null +++ b/src/database/mysql_common.c @@ -0,0 +1,428 @@ +/*****************************************************************************\ + * mysql_common.c - common functions for the the mysql storage plugin. + ***************************************************************************** + * + * Copyright (C) 2004-2007 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * This file is patterned after jobcomp_linux.c, written by Morris Jette and + * Copyright (C) 2002 The Regents of the University of California. +\*****************************************************************************/ + +#include "mysql_common.h" +#include "src/common/xmalloc.h" +#include "src/common/timers.h" +#include "src/common/slurm_protocol_api.h" + +pthread_mutex_t mysql_lock = PTHREAD_MUTEX_INITIALIZER; + +#ifdef HAVE_MYSQL + +static int _clear_results(MYSQL *mysql_db) +{ + MYSQL_RES *result = NULL; + int rc = 0; + do { + /* did current statement return data? */ + if((result = mysql_store_result(mysql_db))) + mysql_free_result(result); + + /* more results? -1 = no, >0 = error, 0 = yes (keep looping) */ + if ((rc = mysql_next_result(mysql_db)) > 0) + error("Could not execute statement %d %s\n", + mysql_errno(mysql_db), + mysql_error(mysql_db)); + } while (rc == 0); + + return SLURM_SUCCESS; +} + +static MYSQL_RES *_get_first_result(MYSQL *mysql_db) +{ + MYSQL_RES *result = NULL; + int rc = 0; + do { + /* did current statement return data? */ + if((result = mysql_store_result(mysql_db))) + return result; + + /* more results? -1 = no, >0 = error, 0 = yes (keep looping) */ + if ((rc = mysql_next_result(mysql_db)) > 0) + debug3("error: Could not execute statement %d\n", rc); + + } while (rc == 0); + + return NULL; +} + +static MYSQL_RES *_get_last_result(MYSQL *mysql_db) +{ + MYSQL_RES *result = NULL; + MYSQL_RES *last_result = NULL; + int rc = 0; + do { + /* did current statement return data? */ + if((result = mysql_store_result(mysql_db))) { + if(last_result) + mysql_free_result(last_result); + last_result = result; + } + /* more results? -1 = no, >0 = error, 0 = yes (keep looping) */ + if ((rc = mysql_next_result(mysql_db)) > 0) + debug3("error: Could not execute statement %d\n", rc); + } while (rc == 0); + + return last_result; +} + +static int _mysql_make_table_current(MYSQL *mysql_db, char *table_name, + storage_field_t *fields) +{ + char *query = NULL; + MYSQL_RES *result = NULL; + MYSQL_ROW row; + int i = 0; + List columns = NULL; + ListIterator itr = NULL; + char *col = NULL; + DEF_TIMERS; + + query = xstrdup_printf("show columns from %s", table_name); + + if(!(result = mysql_db_query_ret(mysql_db, query, 0))) { + xfree(query); + return SLURM_ERROR; + } + xfree(query); + columns = list_create(slurm_destroy_char); + while((row = mysql_fetch_row(result))) { + col = xstrdup(row[0]); //Field + list_append(columns, col); + } + mysql_free_result(result); + itr = list_iterator_create(columns); + query = xstrdup_printf("alter table %s", table_name); + START_TIMER; + while(fields[i].name) { + int found = 0; + list_iterator_reset(itr); + while((col = list_next(itr))) { + if(!strcmp(col, fields[i].name)) { + xstrfmtcat(query, " modify %s %s,", + fields[i].name, + fields[i].options); + list_delete_item(itr); + found = 1; + break; + } + } + if(!found) { + info("adding column %s after %s", fields[i].name, + fields[i-1].name); + xstrfmtcat(query, " add %s %s after %s,", + fields[i].name, + fields[i].options, + fields[i-1].name); + } + + i++; + } + list_iterator_destroy(itr); + list_destroy(columns); + query[strlen(query)-1] = ';'; + if(mysql_db_query(mysql_db, query)) { + xfree(query); + return SLURM_ERROR; + } + xfree(query); + + END_TIMER2("make table current"); + return SLURM_SUCCESS; +} + +static int _create_db(char *db_name, mysql_db_info_t *db_info) +{ + char create_line[50]; + MYSQL *mysql_db = NULL; + +// slurm_mutex_lock(&mysql_lock); + if(!(mysql_db = mysql_init(mysql_db))) + fatal("mysql_init failed: %s", mysql_error(mysql_db)); + + if(mysql_real_connect(mysql_db, db_info->host, db_info->user, + db_info->pass, NULL, db_info->port, NULL, 0)) { + snprintf(create_line, sizeof(create_line), + "create database %s", db_name); + if(mysql_query(mysql_db, create_line)) { + fatal("mysql_real_query failed: %d %s\n%s", + mysql_errno(mysql_db), + mysql_error(mysql_db), create_line); + } + mysql_close_db_connection(&mysql_db); + } else { + info("Connection failed to host = %s " + "user = %s pass = %s port = %u", + db_info->host, db_info->user, + db_info->pass, db_info->port); + slurm_mutex_unlock(&mysql_lock); + fatal("mysql_real_connect failed: %d %s\n", + mysql_errno(mysql_db), + mysql_error(mysql_db)); + } +// slurm_mutex_unlock(&mysql_lock); + return SLURM_SUCCESS; +} + +extern int *destroy_mysql_db_info(mysql_db_info_t *db_info) +{ + if(db_info) { + xfree(db_info->host); + xfree(db_info->user); + xfree(db_info->pass); + xfree(db_info); + } + return SLURM_SUCCESS; +} + +extern int mysql_get_db_connection(MYSQL **mysql_db, char *db_name, + mysql_db_info_t *db_info) +{ + int rc = SLURM_SUCCESS; + bool storage_init = false; + + if(!(*mysql_db = mysql_init(*mysql_db))) + fatal("mysql_init failed: %s", mysql_error(*mysql_db)); + else { +#ifdef MYSQL_OPT_RECONNECT +{ + my_bool reconnect = 1; + /* make sure reconnect is on */ + mysql_options(*mysql_db, MYSQL_OPT_RECONNECT, &reconnect); +} +#endif + while(!storage_init) { + if(!mysql_real_connect(*mysql_db, db_info->host, + db_info->user, db_info->pass, + db_name, db_info->port, + NULL, CLIENT_MULTI_STATEMENTS)) { + if(mysql_errno(*mysql_db) == ER_BAD_DB_ERROR) { + debug("Database %s not created. " + "Creating", db_name); + _create_db(db_name, db_info); + } else { + fatal("mysql_real_connect failed: " + "%d %s", + mysql_errno(*mysql_db), + mysql_error(*mysql_db)); + } + } else { + storage_init = true; + } + } + } + return rc; +} + +extern int mysql_close_db_connection(MYSQL **mysql_db) +{ + if(mysql_db && *mysql_db) { + if(mysql_thread_safe()) + mysql_thread_end(); + mysql_close(*mysql_db); + *mysql_db = NULL; + } + + return SLURM_SUCCESS; +} + +extern int mysql_cleanup() +{ + debug3("starting mysql cleaning up"); + +#ifdef mysql_library_end + mysql_library_end(); +#else + mysql_server_end(); +#endif + + debug3("finished mysql cleaning up"); + return SLURM_SUCCESS; +} + +extern int mysql_db_query(MYSQL *mysql_db, char *query) +{ + if(!mysql_db) + fatal("You haven't inited this storage yet."); + slurm_mutex_lock(&mysql_lock); + + /* clear out the old results so we don't get a 2014 error */ + _clear_results(mysql_db); +//try_again: + if(mysql_query(mysql_db, query)) { + /* if(mysql_errno(mysql_db) == CR_SERVER_GONE_ERROR) { */ +/* /\* FIX ME: this means the connection went away *\/ */ +/* } */ + + error("mysql_query failed: %d %s\n%s", + mysql_errno(mysql_db), + mysql_error(mysql_db), query); + errno = mysql_errno(mysql_db); + slurm_mutex_unlock(&mysql_lock); + return SLURM_ERROR; + } + slurm_mutex_unlock(&mysql_lock); + + return SLURM_SUCCESS; +} + +extern int mysql_db_ping(MYSQL *mysql_db) +{ + /* clear out the old results so we don't get a 2014 error */ + _clear_results(mysql_db); + return mysql_ping(mysql_db); +} + +extern int mysql_db_commit(MYSQL *mysql_db) +{ + //slurm_mutex_lock(&mysql_lock); + + /* clear out the old results so we don't get a 2014 error */ + _clear_results(mysql_db); + if(mysql_commit(mysql_db)) { + error("mysql_commit failed: %d %s", + mysql_errno(mysql_db), + mysql_error(mysql_db)); + errno = mysql_errno(mysql_db); + //slurm_mutex_unlock(&mysql_lock); + return SLURM_ERROR; + } + //slurm_mutex_unlock(&mysql_lock); + + return SLURM_SUCCESS; +} + +extern int mysql_db_rollback(MYSQL *mysql_db) +{ + //slurm_mutex_lock(&mysql_lock); + + /* clear out the old results so we don't get a 2014 error */ + _clear_results(mysql_db); + if(mysql_rollback(mysql_db)) { + error("mysql_commit failed: %d %s", + mysql_errno(mysql_db), + mysql_error(mysql_db)); + errno = mysql_errno(mysql_db); + //slurm_mutex_unlock(&mysql_lock); + return SLURM_ERROR; + } + //mysql_db_query(mysql_db, "unlock tables;"); + //slurm_mutex_unlock(&mysql_lock); + + return SLURM_SUCCESS; + +} + +extern MYSQL_RES *mysql_db_query_ret(MYSQL *mysql_db, char *query, bool last) +{ + MYSQL_RES *result = NULL; + + if(mysql_db_query(mysql_db, query) != SLURM_ERROR) { + if(last) + result = _get_last_result(mysql_db); + else + result = _get_first_result(mysql_db); + if(!result && mysql_field_count(mysql_db)) { + /* should have returned data */ + error("We should have gotten a result: %s", + mysql_error(mysql_db)); + } + } + + return result; +} + +extern int mysql_insert_ret_id(MYSQL *mysql_db, char *query) +{ + int new_id = 0; + + if(mysql_db_query(mysql_db, query) != SLURM_ERROR) { + new_id = mysql_insert_id(mysql_db); + if(!new_id) { + /* should have new id */ + error("We should have gotten a new id: %s", + mysql_error(mysql_db)); + } + } + + return new_id; + +} + +extern int mysql_db_create_table(MYSQL *mysql_db, char *table_name, + storage_field_t *fields, char *ending) +{ + char *query = NULL; + int i = 0; + storage_field_t *first_field = fields; + + if(!fields || !fields->name) { + error("Not creating an empty table"); + return SLURM_ERROR; + } + + query = xstrdup_printf("create table if not exists %s (%s %s", + table_name, fields->name, fields->options); + i=1; + fields++; + + while(fields && fields->name) { + xstrfmtcat(query, ", %s %s", fields->name, fields->options); + fields++; + i++; + } + xstrcat(query, ending); + + /* make sure we can do a rollback */ + xstrcat(query, " engine='innodb'"); + + if(mysql_db_query(mysql_db, query) == SLURM_ERROR) { + xfree(query); + return SLURM_ERROR; + } + xfree(query); + + return _mysql_make_table_current(mysql_db, table_name, first_field); +} + + +#endif + diff --git a/src/database/mysql_common.h b/src/database/mysql_common.h new file mode 100644 index 000000000..69acb54c4 --- /dev/null +++ b/src/database/mysql_common.h @@ -0,0 +1,98 @@ +/*****************************************************************************\ + * mysql_common.h - common functions for the the mysql storage plugin. + ***************************************************************************** + * + * Copyright (C) 2004-2007 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * This file is patterned after jobcomp_linux.c, written by Morris Jette and + * Copyright (C) 2002 The Regents of the University of California. +\*****************************************************************************/ +#ifndef _HAVE_MYSQL_COMMON_H +#define _HAVE_MYSQL_COMMON_H + +#ifdef HAVE_CONFIG_H +# include "config.h" +#endif + +#if HAVE_STDINT_H +# include <stdint.h> +#endif +#if HAVE_INTTYPES_H +# include <inttypes.h> +#endif + +#include <stdio.h> +#include <slurm/slurm_errno.h> +#include "src/common/list.h" +#include "src/common/xstring.h" + +#ifdef HAVE_MYSQL +#include <mysql.h> +#include <mysqld_error.h> + +typedef struct { + uint32_t port; + char *host; + char *user; + char *pass; +} mysql_db_info_t; + +typedef struct { + char *name; + char *options; +} storage_field_t; + +extern pthread_mutex_t mysql_lock; + +extern int *destroy_mysql_db_info(mysql_db_info_t *db_info); + +extern int mysql_get_db_connection(MYSQL **mysql_db, char *db_name, + mysql_db_info_t *db_info); +extern int mysql_close_db_connection(MYSQL **mysql_db); +extern int mysql_cleanup(); +extern int mysql_db_query(MYSQL *mysql_db, char *query); +extern int mysql_db_ping(MYSQL *mysql_db); +extern int mysql_db_commit(MYSQL *mysql_db); +extern int mysql_db_rollback(MYSQL *mysql_db); + +extern MYSQL_RES *mysql_db_query_ret(MYSQL *mysql_db, char *query, bool last); + +extern int mysql_insert_ret_id(MYSQL *mysql_db, char *query); + +extern int mysql_db_create_table(MYSQL *mysql_db, char *table_name, + storage_field_t *fields, char *ending); + + +#endif + +#endif diff --git a/src/database/pgsql_common.c b/src/database/pgsql_common.c new file mode 100644 index 000000000..819836e83 --- /dev/null +++ b/src/database/pgsql_common.c @@ -0,0 +1,390 @@ +/*****************************************************************************\ + * pgsql_common.c - common functions for the the pgsql storage plugin. + ***************************************************************************** + * + * Copyright (C) 2004-2007 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * This file is patterned after jobcomp_linux.c, written by Morris Jette and + * Copyright (C) 2002 The Regents of the University of California. +\*****************************************************************************/ + +#include "pgsql_common.h" +#include <stdlib.h> + +pthread_mutex_t pgsql_lock = PTHREAD_MUTEX_INITIALIZER; + +#ifdef HAVE_PGSQL + +extern int *destroy_pgsql_db_info(pgsql_db_info_t *db_info) +{ + if(db_info) { + xfree(db_info->host); + xfree(db_info->user); + xfree(db_info->pass); + xfree(db_info); + } + return SLURM_SUCCESS; +} + +extern int _create_db(char *db_name, pgsql_db_info_t *db_info) +{ + char create_line[50]; + PGconn *pgsql_db = NULL; + char *connect_line = xstrdup_printf("dbname = 'postgres'" + " host = '%s'" + " port = '%u'" + " user = '%s'" + " password = '%s'", + db_info->host, + db_info->port, + db_info->user, + db_info->pass); + + pgsql_db = PQconnectdb(connect_line); + + if (PQstatus(pgsql_db) == CONNECTION_OK) { + PGresult *result = NULL; + snprintf(create_line, sizeof(create_line), + "create database %s", db_name); + result = PQexec(pgsql_db, create_line); + if (PQresultStatus(result) != PGRES_COMMAND_OK) { + fatal("PQexec failed: %d %s\n%s", + PQresultStatus(result), PQerrorMessage(pgsql_db), create_line); + } + PQclear(result); + pgsql_close_db_connection(&pgsql_db); + } else { + info("Connection failed to %s", connect_line); + fatal("Status was: %d %s", + PQstatus(pgsql_db), PQerrorMessage(pgsql_db)); + } + xfree(connect_line); + + return SLURM_SUCCESS; +} + +extern int pgsql_get_db_connection(PGconn **pgsql_db, char *db_name, + pgsql_db_info_t *db_info) +{ + int rc = SLURM_SUCCESS; + bool storage_init = false; + char *connect_line = xstrdup_printf("dbname = '%s'" + " host = '%s'" + " port = '%u'" + " user = '%s'" + " password = '%s'", + db_name, + db_info->host, + db_info->port, + db_info->user, + db_info->pass); + + while(!storage_init) { + *pgsql_db = PQconnectdb(connect_line); + + if(PQstatus(*pgsql_db) != CONNECTION_OK) { + if(!strcmp(PQerrorMessage(*pgsql_db), + "no password supplied")) { + PQfinish(*pgsql_db); + fatal("This Postgres connection needs " + "a password. It doesn't appear to " + "like blank ones"); + } + + info("Database %s not created. Creating", db_name); + pgsql_close_db_connection(pgsql_db); + _create_db(db_name, db_info); + } else { + storage_init = true; + } + } + xfree(connect_line); + return rc; +} + +extern int pgsql_close_db_connection(PGconn **pgsql_db) +{ + if(pgsql_db && *pgsql_db) { + PQfinish(*pgsql_db); + *pgsql_db = NULL; + } + return SLURM_SUCCESS; +} + +extern int pgsql_db_query(PGconn *pgsql_db, char *query) +{ + PGresult *result = NULL; + + if(!pgsql_db) + fatal("You haven't inited this storage yet."); + + if(!(result = pgsql_db_query_ret(pgsql_db, query))) + return SLURM_ERROR; + + PQclear(result); + return SLURM_SUCCESS; +} + +extern int pgsql_db_commit(PGconn *pgsql_db) +{ + return pgsql_db_query(pgsql_db, "COMMIT WORK"); +} + +extern int pgsql_db_rollback(PGconn *pgsql_db) +{ + return pgsql_db_query(pgsql_db, "ROLLBACK WORK"); + +} + +extern PGresult *pgsql_db_query_ret(PGconn *pgsql_db, char *query) +{ + PGresult *result = NULL; + + if(!pgsql_db) + fatal("You haven't inited this storage yet."); + + result = PQexec(pgsql_db, query); + + if(PQresultStatus(result) != PGRES_COMMAND_OK + && PQresultStatus(result) != PGRES_TUPLES_OK) { + error("PQexec failed: %d %s", PQresultStatus(result), + PQerrorMessage(pgsql_db)); + info("query was %s", query); + PQclear(result); + return NULL; + } + return result; +} + +extern int pgsql_insert_ret_id(PGconn *pgsql_db, char *sequence_name, + char *query) +{ + int new_id = 0; + PGresult *result = NULL; + + slurm_mutex_lock(&pgsql_lock); + if(pgsql_db_query(pgsql_db, query) != SLURM_ERROR) { + char *new_query = xstrdup_printf( + "select last_value from %s", sequence_name); + + if((result = pgsql_db_query_ret(pgsql_db, new_query))) { + new_id = atoi(PQgetvalue(result, 0, 0)); + PQclear(result); + } + xfree(new_query); + if(!new_id) { + /* should have new id */ + error("We should have gotten a new id: %s", + PQerrorMessage(pgsql_db)); + } + } + slurm_mutex_unlock(&pgsql_lock); + + return new_id; + +} + +extern int pgsql_db_create_table(PGconn *pgsql_db, + char *table_name, storage_field_t *fields, + char *ending) +{ + char *query = NULL; + char *tmp = NULL; + char *next = NULL; + int i = 0; + + query = xstrdup_printf("create table %s (", table_name); + i=0; + while(fields && fields->name) { + next = xstrdup_printf(" %s %s", + fields->name, + fields->options); + if(i) + xstrcat(tmp, ","); + xstrcat(tmp, next); + xfree(next); + fields++; + i++; + } + xstrcat(query, tmp); + xfree(tmp); + xstrcat(query, ending); + + if(pgsql_db_query(pgsql_db, query) == SLURM_ERROR) { + xfree(query); + return SLURM_ERROR; + } + xfree(query); + + return SLURM_SUCCESS; +} + +extern int pgsql_db_make_table_current(PGconn *pgsql_db, char *table_name, + storage_field_t *fields) +{ + char *query = NULL, *opt_part = NULL, *temp_char = NULL; + char *type = NULL; + int not_null = 0; + char *default_str = NULL; + char* original_ptr = NULL; + int i = 0; + PGresult *result = NULL; + List columns = NULL; + ListIterator itr = NULL; + char *col = NULL; + + DEF_TIMERS; + + query = xstrdup_printf("select column_name from " + "information_schema.columns where " + "table_name='%s'", table_name); + + if(!(result = pgsql_db_query_ret(pgsql_db, query))) { + xfree(query); + return SLURM_ERROR; + } + xfree(query); + columns = list_create(slurm_destroy_char); + for (i = 0; i < PQntuples(result); i++) { + col = xstrdup(PQgetvalue(result, i, 0)); //column_name + list_append(columns, col); + } + PQclear(result); + itr = list_iterator_create(columns); + query = xstrdup_printf("alter table %s", table_name); + START_TIMER; + i=0; + while(fields[i].name) { + int found = 0; + if(!strcmp("serial", fields[i].options)) { + i++; + continue; + } + opt_part = xstrdup(fields[i].options); + original_ptr = opt_part; + opt_part = strtok_r(opt_part, " ", &temp_char); + if(opt_part) { + type = xstrdup(opt_part); + opt_part = temp_char; + opt_part = strtok_r(opt_part, " ", &temp_char); + while(opt_part) { + if(!strcmp("not null", opt_part)) { + not_null = 1; + opt_part = temp_char; + opt_part = strtok_r(opt_part, + " ", &temp_char); + } else if(!strcmp("default", opt_part)){ + opt_part = temp_char; + opt_part = strtok_r(opt_part, + " ", &temp_char); + default_str = xstrdup(opt_part); + } + if(opt_part) { + opt_part = temp_char; + opt_part = strtok_r(opt_part, + " ", &temp_char); + } + } + } else { + type = xstrdup(fields[i].options); + } + xfree(original_ptr); + list_iterator_reset(itr); + while((col = list_next(itr))) { + if(!strcmp(col, fields[i].name)) { + list_delete_item(itr); + found = 1; + break; + } + } + + temp_char = NULL; + if(!found) { + info("adding column %s", fields[i].name); + if(default_str) + xstrfmtcat(temp_char, + " default %s", default_str); + + if(not_null) + xstrcat(temp_char, " not null"); + + xstrfmtcat(query, + " add %s %s", + fields[i].name, type); + if(temp_char) + xstrcat(query, temp_char); + xstrcat(query, ","); + } else { + if(default_str) + xstrfmtcat(temp_char, + " alter %s set default %s,", + fields[i].name, default_str); + else + xstrfmtcat(temp_char, + " alter %s drop default,", + fields[i].name); + + if(not_null) + xstrfmtcat(temp_char, + " alter %s set not null,", + fields[i].name); + else + xstrfmtcat(temp_char, + " alter %s drop not null,", + fields[i].name); + xstrfmtcat(query, " alter %s type %s,%s", + fields[i].name, type, temp_char); + } + xfree(temp_char); + xfree(default_str); + xfree(type); + + i++; + } + list_iterator_destroy(itr); + list_destroy(columns); + query[strlen(query)-1] = ';'; + + if(pgsql_db_query(pgsql_db, query)) { + xfree(query); + return SLURM_ERROR; + } + xfree(query); + + END_TIMER2("make table current"); + return SLURM_SUCCESS; +} + + +#endif + diff --git a/src/database/pgsql_common.h b/src/database/pgsql_common.h new file mode 100644 index 000000000..cc48e8c5f --- /dev/null +++ b/src/database/pgsql_common.h @@ -0,0 +1,100 @@ +/*****************************************************************************\ + * pgsql_common.h - common functions for the the pgsql storage plugin. + ***************************************************************************** + * + * Copyright (C) 2004-2007 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * This file is patterned after jobcomp_linux.c, written by Morris Jette and + * Copyright (C) 2002 The Regents of the University of California. +\*****************************************************************************/ +#ifndef _HAVE_PGSQL_COMMON_H +#define _HAVE_PGSQL_COMMON_H + +#ifdef HAVE_CONFIG_H +# include "config.h" +#endif + +#if HAVE_STDINT_H +# include <stdint.h> +#endif +#if HAVE_INTTYPES_H +# include <inttypes.h> +#endif + +#include <stdio.h> +#include <slurm/slurm_errno.h> +#include "src/slurmctld/slurmctld.h" +#include "src/common/xstring.h" + +#ifdef HAVE_PGSQL +#include <libpq-fe.h> + +typedef struct { + uint32_t port; + char *host; + char *user; + char *pass; +} pgsql_db_info_t; + +typedef struct { + char *name; + char *options; +} storage_field_t; + +extern pthread_mutex_t pgsql_lock; + +extern int *destroy_pgsql_db_info(pgsql_db_info_t *db_info); + +extern int pgsql_get_db_connection(PGconn **pgsql_db, char *db_name, + pgsql_db_info_t *db_info); + +extern int pgsql_close_db_connection(PGconn **pgsql_db); + +extern int pgsql_db_query(PGconn *pgsql_db, char *query); +extern int pgsql_db_commit(PGconn *pgsql_db); +extern int pgsql_db_rollback(PGconn *pgsql_db); + +extern PGresult *pgsql_db_query_ret(PGconn *pgsql_db, char *query); + +extern int pgsql_insert_ret_id(PGconn *pgsql_db, + char *sequence_name, char *query); + +extern int pgsql_db_create_table(PGconn *pgsql_db, + char *table_name, storage_field_t *fields, + char *ending); + +extern int pgsql_db_make_table_current(PGconn *pgsql_db, char *table_name, + storage_field_t *fields); +#endif + +#endif diff --git a/src/plugins/Makefile.am b/src/plugins/Makefile.am index 25088e401..a8e77ec42 100644 --- a/src/plugins/Makefile.am +++ b/src/plugins/Makefile.am @@ -1,2 +1 @@ - -SUBDIRS = auth checkpoint jobacct jobcomp mpi proctrack sched select switch task +SUBDIRS = accounting_storage auth checkpoint crypto jobacct_gather jobcomp mpi proctrack sched select switch task diff --git a/src/plugins/Makefile.in b/src/plugins/Makefile.in index 67192790a..97b1039e4 100644 --- a/src/plugins/Makefile.in +++ b/src/plugins/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -41,6 +41,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -99,6 +101,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -112,10 +115,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -135,7 +141,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -146,6 +155,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -161,6 +172,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -176,6 +188,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -232,7 +245,7 @@ target_os = @target_os@ target_vendor = @target_vendor@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ -SUBDIRS = auth checkpoint jobacct jobcomp mpi proctrack sched select switch task +SUBDIRS = accounting_storage auth checkpoint crypto jobacct_gather jobcomp mpi proctrack sched select switch task all: all-recursive .SUFFIXES: @@ -347,8 +360,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -373,8 +386,8 @@ TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -384,13 +397,12 @@ ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/accounting_storage/Makefile.am b/src/plugins/accounting_storage/Makefile.am new file mode 100644 index 000000000..c4879b80a --- /dev/null +++ b/src/plugins/accounting_storage/Makefile.am @@ -0,0 +1,3 @@ +# Makefile for storage plugins + +SUBDIRS = filetxt gold mysql pgsql none slurmdbd diff --git a/src/plugins/accounting_storage/Makefile.in b/src/plugins/accounting_storage/Makefile.in new file mode 100644 index 000000000..251b0d160 --- /dev/null +++ b/src/plugins/accounting_storage/Makefile.in @@ -0,0 +1,565 @@ +# Makefile.in generated by automake 1.10.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# Makefile for storage plugins +VPATH = @srcdir@ +pkgdatadir = $(datadir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +target_triplet = @target@ +subdir = src/plugins/accounting_storage +DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ + $(top_srcdir)/auxdir/slurm.m4 \ + $(top_srcdir)/auxdir/x_ac__system_configuration.m4 \ + $(top_srcdir)/auxdir/x_ac_affinity.m4 \ + $(top_srcdir)/auxdir/x_ac_aix.m4 \ + $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ + $(top_srcdir)/auxdir/x_ac_debug.m4 \ + $(top_srcdir)/auxdir/x_ac_elan.m4 \ + $(top_srcdir)/auxdir/x_ac_federation.m4 \ + $(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \ + $(top_srcdir)/auxdir/x_ac_gtk.m4 \ + $(top_srcdir)/auxdir/x_ac_munge.m4 \ + $(top_srcdir)/auxdir/x_ac_ncurses.m4 \ + $(top_srcdir)/auxdir/x_ac_pam.m4 \ + $(top_srcdir)/auxdir/x_ac_ptrace.m4 \ + $(top_srcdir)/auxdir/x_ac_readline.m4 \ + $(top_srcdir)/auxdir/x_ac_setpgrp.m4 \ + $(top_srcdir)/auxdir/x_ac_setproctitle.m4 \ + $(top_srcdir)/auxdir/x_ac_sgi_job.m4 \ + $(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \ + $(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h +CONFIG_CLEAN_FILES = +SOURCES = +DIST_SOURCES = +RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ + html-recursive info-recursive install-data-recursive \ + install-dvi-recursive install-exec-recursive \ + install-html-recursive install-info-recursive \ + install-pdf-recursive install-ps-recursive install-recursive \ + installcheck-recursive installdirs-recursive pdf-recursive \ + ps-recursive uninstall-recursive +RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ + distclean-recursive maintainer-clean-recursive +ETAGS = etags +CTAGS = ctags +DIST_SUBDIRS = $(SUBDIRS) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AR = @AR@ +AUTHD_CFLAGS = @AUTHD_CFLAGS@ +AUTHD_LIBS = @AUTHD_LIBS@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +BG_INCLUDES = @BG_INCLUDES@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CMD_LDFLAGS = @CMD_LDFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ +ECHO = @ECHO@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +ELAN_LIBS = @ELAN_LIBS@ +EXEEXT = @EXEEXT@ +F77 = @F77@ +FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@ +FFLAGS = @FFLAGS@ +GREP = @GREP@ +GTK2_CFLAGS = @GTK2_CFLAGS@ +GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ +HAVEPKGCONFIG = @HAVEPKGCONFIG@ +HAVE_AIX = @HAVE_AIX@ +HAVE_ELAN = @HAVE_ELAN@ +HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ +HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIB_LDFLAGS = @LIB_LDFLAGS@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MKDIR_P = @MKDIR_P@ +MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ +MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ +MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ +NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ +NUMA_LIBS = @NUMA_LIBS@ +OBJEXT = @OBJEXT@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PAM_LIBS = @PAM_LIBS@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ +PLPA_LIBS = @PLPA_LIBS@ +PROCTRACKDIR = @PROCTRACKDIR@ +PROJECT = @PROJECT@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +RANLIB = @RANLIB@ +READLINE_LIBS = @READLINE_LIBS@ +RELEASE = @RELEASE@ +SED = @SED@ +SEMAPHORE_LIBS = @SEMAPHORE_LIBS@ +SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ +SLURMD_PORT = @SLURMD_PORT@ +SLURM_API_AGE = @SLURM_API_AGE@ +SLURM_API_CURRENT = @SLURM_API_CURRENT@ +SLURM_API_MAJOR = @SLURM_API_MAJOR@ +SLURM_API_REVISION = @SLURM_API_REVISION@ +SLURM_API_VERSION = @SLURM_API_VERSION@ +SLURM_MAJOR = @SLURM_MAJOR@ +SLURM_MICRO = @SLURM_MICRO@ +SLURM_MINOR = @SLURM_MINOR@ +SLURM_VERSION = @SLURM_VERSION@ +SO_LDFLAGS = @SO_LDFLAGS@ +SSL_CPPFLAGS = @SSL_CPPFLAGS@ +SSL_LDFLAGS = @SSL_LDFLAGS@ +SSL_LIBS = @SSL_LIBS@ +STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ +VERSION = @VERSION@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_F77 = @ac_ct_F77@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target = @target@ +target_alias = @target_alias@ +target_cpu = @target_cpu@ +target_os = @target_os@ +target_vendor = @target_vendor@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +SUBDIRS = filetxt gold mysql pgsql none slurmdbd +all: all-recursive + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ + && exit 0; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu src/plugins/accounting_storage/Makefile'; \ + cd $(top_srcdir) && \ + $(AUTOMAKE) --gnu src/plugins/accounting_storage/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +# This directory's subdirectories are mostly independent; you can cd +# into them and run `make' without going through this Makefile. +# To change the values of `make' variables: instead of editing Makefiles, +# (1) if the variable is set in `config.status', edit `config.status' +# (which will cause the Makefiles to be regenerated when you run `make'); +# (2) otherwise, pass the desired values on the `make' command line. +$(RECURSIVE_TARGETS): + @failcom='exit 1'; \ + for f in x $$MAKEFLAGS; do \ + case $$f in \ + *=* | --[!k]*);; \ + *k*) failcom='fail=yes';; \ + esac; \ + done; \ + dot_seen=no; \ + target=`echo $@ | sed s/-recursive//`; \ + list='$(SUBDIRS)'; for subdir in $$list; do \ + echo "Making $$target in $$subdir"; \ + if test "$$subdir" = "."; then \ + dot_seen=yes; \ + local_target="$$target-am"; \ + else \ + local_target="$$target"; \ + fi; \ + (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ + || eval $$failcom; \ + done; \ + if test "$$dot_seen" = "no"; then \ + $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ + fi; test -z "$$fail" + +$(RECURSIVE_CLEAN_TARGETS): + @failcom='exit 1'; \ + for f in x $$MAKEFLAGS; do \ + case $$f in \ + *=* | --[!k]*);; \ + *k*) failcom='fail=yes';; \ + esac; \ + done; \ + dot_seen=no; \ + case "$@" in \ + distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ + *) list='$(SUBDIRS)' ;; \ + esac; \ + rev=''; for subdir in $$list; do \ + if test "$$subdir" = "."; then :; else \ + rev="$$subdir $$rev"; \ + fi; \ + done; \ + rev="$$rev ."; \ + target=`echo $@ | sed s/-recursive//`; \ + for subdir in $$rev; do \ + echo "Making $$target in $$subdir"; \ + if test "$$subdir" = "."; then \ + local_target="$$target-am"; \ + else \ + local_target="$$target"; \ + fi; \ + (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ + || eval $$failcom; \ + done && test -z "$$fail" +tags-recursive: + list='$(SUBDIRS)'; for subdir in $$list; do \ + test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ + done +ctags-recursive: + list='$(SUBDIRS)'; for subdir in $$list; do \ + test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ + done + +ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + mkid -fID $$unique +tags: TAGS + +TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + here=`pwd`; \ + if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ + include_option=--etags-include; \ + empty_fix=.; \ + else \ + include_option=--include; \ + empty_fix=; \ + fi; \ + list='$(SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + test ! -f $$subdir/TAGS || \ + tags="$$tags $$include_option=$$here/$$subdir/TAGS"; \ + fi; \ + done; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$tags $$unique; \ + fi +ctags: CTAGS +CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + test -z "$(CTAGS_ARGS)$$tags$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$tags $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && cd $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) $$here + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ + fi; \ + cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ + else \ + test -f $(distdir)/$$file \ + || cp -p $$d/$$file $(distdir)/$$file \ + || exit 1; \ + fi; \ + done + list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + test -d "$(distdir)/$$subdir" \ + || $(MKDIR_P) "$(distdir)/$$subdir" \ + || exit 1; \ + distdir=`$(am__cd) $(distdir) && pwd`; \ + top_distdir=`$(am__cd) $(top_distdir) && pwd`; \ + (cd $$subdir && \ + $(MAKE) $(AM_MAKEFLAGS) \ + top_distdir="$$top_distdir" \ + distdir="$$distdir/$$subdir" \ + am__remove_distdir=: \ + am__skip_length_check=: \ + distdir) \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-recursive +all-am: Makefile +installdirs: installdirs-recursive +installdirs-am: +install: install-recursive +install-exec: install-exec-recursive +install-data: install-data-recursive +uninstall: uninstall-recursive + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-recursive +install-strip: + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + `test -z '$(STRIP)' || \ + echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-recursive + +clean-am: clean-generic clean-libtool mostlyclean-am + +distclean: distclean-recursive + -rm -f Makefile +distclean-am: clean-am distclean-generic distclean-tags + +dvi: dvi-recursive + +dvi-am: + +html: html-recursive + +info: info-recursive + +info-am: + +install-data-am: + +install-dvi: install-dvi-recursive + +install-exec-am: + +install-html: install-html-recursive + +install-info: install-info-recursive + +install-man: + +install-pdf: install-pdf-recursive + +install-ps: install-ps-recursive + +installcheck-am: + +maintainer-clean: maintainer-clean-recursive + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-recursive + +mostlyclean-am: mostlyclean-generic mostlyclean-libtool + +pdf: pdf-recursive + +pdf-am: + +ps: ps-recursive + +ps-am: + +uninstall-am: + +.MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) install-am \ + install-strip + +.PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ + all all-am check check-am clean clean-generic clean-libtool \ + ctags ctags-recursive distclean distclean-generic \ + distclean-libtool distclean-tags distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs installdirs-am maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic \ + mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ + uninstall uninstall-am + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/src/plugins/accounting_storage/filetxt/Makefile.am b/src/plugins/accounting_storage/filetxt/Makefile.am new file mode 100644 index 000000000..4ea567fb1 --- /dev/null +++ b/src/plugins/accounting_storage/filetxt/Makefile.am @@ -0,0 +1,14 @@ +# Makefile for accounting_storage/filetxt plugin + +AUTOMAKE_OPTIONS = foreign + +PLUGIN_FLAGS = -module -avoid-version --export-dynamic + +INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common + +pkglib_LTLIBRARIES = accounting_storage_filetxt.la + +accounting_storage_filetxt_la_SOURCES = accounting_storage_filetxt.c \ + filetxt_jobacct_process.c filetxt_jobacct_process.h +accounting_storage_filetxt_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) + diff --git a/src/plugins/accounting_storage/filetxt/Makefile.in b/src/plugins/accounting_storage/filetxt/Makefile.in new file mode 100644 index 000000000..672f6b887 --- /dev/null +++ b/src/plugins/accounting_storage/filetxt/Makefile.in @@ -0,0 +1,559 @@ +# Makefile.in generated by automake 1.10.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# Makefile for accounting_storage/filetxt plugin + +VPATH = @srcdir@ +pkgdatadir = $(datadir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +target_triplet = @target@ +subdir = src/plugins/accounting_storage/filetxt +DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ + $(top_srcdir)/auxdir/slurm.m4 \ + $(top_srcdir)/auxdir/x_ac__system_configuration.m4 \ + $(top_srcdir)/auxdir/x_ac_affinity.m4 \ + $(top_srcdir)/auxdir/x_ac_aix.m4 \ + $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ + $(top_srcdir)/auxdir/x_ac_debug.m4 \ + $(top_srcdir)/auxdir/x_ac_elan.m4 \ + $(top_srcdir)/auxdir/x_ac_federation.m4 \ + $(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \ + $(top_srcdir)/auxdir/x_ac_gtk.m4 \ + $(top_srcdir)/auxdir/x_ac_munge.m4 \ + $(top_srcdir)/auxdir/x_ac_ncurses.m4 \ + $(top_srcdir)/auxdir/x_ac_pam.m4 \ + $(top_srcdir)/auxdir/x_ac_ptrace.m4 \ + $(top_srcdir)/auxdir/x_ac_readline.m4 \ + $(top_srcdir)/auxdir/x_ac_setpgrp.m4 \ + $(top_srcdir)/auxdir/x_ac_setproctitle.m4 \ + $(top_srcdir)/auxdir/x_ac_sgi_job.m4 \ + $(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \ + $(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h +CONFIG_CLEAN_FILES = +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; +am__installdirs = "$(DESTDIR)$(pkglibdir)" +pkglibLTLIBRARIES_INSTALL = $(INSTALL) +LTLIBRARIES = $(pkglib_LTLIBRARIES) +accounting_storage_filetxt_la_LIBADD = +am_accounting_storage_filetxt_la_OBJECTS = \ + accounting_storage_filetxt.lo filetxt_jobacct_process.lo +accounting_storage_filetxt_la_OBJECTS = \ + $(am_accounting_storage_filetxt_la_OBJECTS) +accounting_storage_filetxt_la_LINK = $(LIBTOOL) --tag=CC \ + $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CCLD) \ + $(AM_CFLAGS) $(CFLAGS) \ + $(accounting_storage_filetxt_la_LDFLAGS) $(LDFLAGS) -o $@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm +depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp +am__depfiles_maybe = depfiles +COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ + $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ + $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +CCLD = $(CC) +LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ + $(LDFLAGS) -o $@ +SOURCES = $(accounting_storage_filetxt_la_SOURCES) +DIST_SOURCES = $(accounting_storage_filetxt_la_SOURCES) +ETAGS = etags +CTAGS = ctags +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AR = @AR@ +AUTHD_CFLAGS = @AUTHD_CFLAGS@ +AUTHD_LIBS = @AUTHD_LIBS@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +BG_INCLUDES = @BG_INCLUDES@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CMD_LDFLAGS = @CMD_LDFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ +ECHO = @ECHO@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +ELAN_LIBS = @ELAN_LIBS@ +EXEEXT = @EXEEXT@ +F77 = @F77@ +FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@ +FFLAGS = @FFLAGS@ +GREP = @GREP@ +GTK2_CFLAGS = @GTK2_CFLAGS@ +GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ +HAVEPKGCONFIG = @HAVEPKGCONFIG@ +HAVE_AIX = @HAVE_AIX@ +HAVE_ELAN = @HAVE_ELAN@ +HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ +HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIB_LDFLAGS = @LIB_LDFLAGS@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MKDIR_P = @MKDIR_P@ +MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ +MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ +MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ +NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ +NUMA_LIBS = @NUMA_LIBS@ +OBJEXT = @OBJEXT@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PAM_LIBS = @PAM_LIBS@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ +PLPA_LIBS = @PLPA_LIBS@ +PROCTRACKDIR = @PROCTRACKDIR@ +PROJECT = @PROJECT@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +RANLIB = @RANLIB@ +READLINE_LIBS = @READLINE_LIBS@ +RELEASE = @RELEASE@ +SED = @SED@ +SEMAPHORE_LIBS = @SEMAPHORE_LIBS@ +SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ +SLURMD_PORT = @SLURMD_PORT@ +SLURM_API_AGE = @SLURM_API_AGE@ +SLURM_API_CURRENT = @SLURM_API_CURRENT@ +SLURM_API_MAJOR = @SLURM_API_MAJOR@ +SLURM_API_REVISION = @SLURM_API_REVISION@ +SLURM_API_VERSION = @SLURM_API_VERSION@ +SLURM_MAJOR = @SLURM_MAJOR@ +SLURM_MICRO = @SLURM_MICRO@ +SLURM_MINOR = @SLURM_MINOR@ +SLURM_VERSION = @SLURM_VERSION@ +SO_LDFLAGS = @SO_LDFLAGS@ +SSL_CPPFLAGS = @SSL_CPPFLAGS@ +SSL_LDFLAGS = @SSL_LDFLAGS@ +SSL_LIBS = @SSL_LIBS@ +STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ +VERSION = @VERSION@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_F77 = @ac_ct_F77@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target = @target@ +target_alias = @target_alias@ +target_cpu = @target_cpu@ +target_os = @target_os@ +target_vendor = @target_vendor@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +AUTOMAKE_OPTIONS = foreign +PLUGIN_FLAGS = -module -avoid-version --export-dynamic +INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common +pkglib_LTLIBRARIES = accounting_storage_filetxt.la +accounting_storage_filetxt_la_SOURCES = accounting_storage_filetxt.c \ + filetxt_jobacct_process.c filetxt_jobacct_process.h + +accounting_storage_filetxt_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) +all: all-am + +.SUFFIXES: +.SUFFIXES: .c .lo .o .obj +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ + && exit 0; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/accounting_storage/filetxt/Makefile'; \ + cd $(top_srcdir) && \ + $(AUTOMAKE) --foreign src/plugins/accounting_storage/filetxt/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) + @$(NORMAL_INSTALL) + test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" + @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ + if test -f $$p; then \ + f=$(am__strip_dir) \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + else :; fi; \ + done + +uninstall-pkglibLTLIBRARIES: + @$(NORMAL_UNINSTALL) + @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ + p=$(am__strip_dir) \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + done + +clean-pkglibLTLIBRARIES: + -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) + @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ + dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ + test "$$dir" != "$$p" || dir=.; \ + echo "rm -f \"$${dir}/so_locations\""; \ + rm -f "$${dir}/so_locations"; \ + done +accounting_storage_filetxt.la: $(accounting_storage_filetxt_la_OBJECTS) $(accounting_storage_filetxt_la_DEPENDENCIES) + $(accounting_storage_filetxt_la_LINK) -rpath $(pkglibdir) $(accounting_storage_filetxt_la_OBJECTS) $(accounting_storage_filetxt_la_LIBADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + +distclean-compile: + -rm -f *.tab.c + +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/accounting_storage_filetxt.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/filetxt_jobacct_process.Plo@am__quote@ + +.c.o: +@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c $< + +.c.obj: +@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'` + +.c.lo: +@am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $< + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + mkid -fID $$unique +tags: TAGS + +TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + here=`pwd`; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$tags $$unique; \ + fi +ctags: CTAGS +CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + test -z "$(CTAGS_ARGS)$$tags$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$tags $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && cd $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) $$here + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ + fi; \ + cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ + else \ + test -f $(distdir)/$$file \ + || cp -p $$d/$$file $(distdir)/$$file \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(LTLIBRARIES) +installdirs: + for dir in "$(DESTDIR)$(pkglibdir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + `test -z '$(STRIP)' || \ + echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ + mostlyclean-am + +distclean: distclean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-exec-am: install-pkglibLTLIBRARIES + +install-html: install-html-am + +install-info: install-info-am + +install-man: + +install-pdf: install-pdf-am + +install-ps: install-ps-am + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-pkglibLTLIBRARIES + +.MAKE: install-am install-strip + +.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ + clean-libtool clean-pkglibLTLIBRARIES ctags distclean \ + distclean-compile distclean-generic distclean-libtool \ + distclean-tags distdir dvi dvi-am html html-am info info-am \ + install install-am install-data install-data-am install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-pdf install-pdf-am install-pkglibLTLIBRARIES \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-compile \ + mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ + tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/src/plugins/accounting_storage/filetxt/accounting_storage_filetxt.c b/src/plugins/accounting_storage/filetxt/accounting_storage_filetxt.c new file mode 100644 index 000000000..1e95e1bae --- /dev/null +++ b/src/plugins/accounting_storage/filetxt/accounting_storage_filetxt.c @@ -0,0 +1,830 @@ +/*****************************************************************************\ + * accounting_storage_filetxt.c - account interface to filetxt. + * + * $Id: accounting_storage_filetxt.c 13061 2008-01-22 21:23:56Z da $ + ***************************************************************************** + * Copyright (C) 2004-2008 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#include <strings.h> +#include "src/common/slurm_accounting_storage.h" +#include "filetxt_jobacct_process.h" + +/* + * These variables are required by the generic plugin interface. If they + * are not found in the plugin, the plugin loader will ignore it. + * + * plugin_name - a string giving a human-readable description of the + * plugin. There is no maximum length, but the symbol must refer to + * a valid string. + * + * plugin_type - a string suggesting the type of the plugin or its + * applicability to a particular form of data or method of data handling. + * If the low-level plugin API is used, the contents of this string are + * unimportant and may be anything. SLURM uses the higher-level plugin + * interface which requires this string to be of the form + * + * <application>/<method> + * + * where <application> is a description of the intended application of + * the plugin (e.g., "jobacct" for SLURM job completion logging) and <method> + * is a description of how this plugin satisfies that application. SLURM will + * only load job completion logging plugins if the plugin_type string has a + * prefix of "jobacct/". + * + * plugin_version - an unsigned 32-bit integer giving the version number + * of the plugin. If major and minor revisions are desired, the major + * version number may be multiplied by a suitable magnitude constant such + * as 100 or 1000. Various SLURM versions will likely require a certain + * minimum versions for their plugins as the job accounting API + * matures. + */ +const char plugin_name[] = "Accounting storage FileTxt plugin"; +const char plugin_type[] = "accounting_storage/filetxt"; +const uint32_t plugin_version = 100; +static FILE * LOGFILE; +static int LOGFILE_FD; +static pthread_mutex_t logfile_lock = PTHREAD_MUTEX_INITIALIZER; +static int storage_init; +/* Format of the JOB_STEP record */ +const char *_jobstep_format = +"%d " +"%u " /* stepid */ +"%d " /* completion status */ +"%u " /* completion code */ +"%u " /* nprocs */ +"%u " /* number of cpus */ +"%u " /* elapsed seconds */ +"%u " /* total cputime seconds */ +"%u " /* total cputime microseconds */ +"%u " /* user seconds */ +"%u " /* user microseconds */ +"%u " /* system seconds */ +"%u " /* system microseconds */ +"%u " /* max rss */ +"%u " /* max ixrss */ +"%u " /* max idrss */ +"%u " /* max isrss */ +"%u " /* max minflt */ +"%u " /* max majflt */ +"%u " /* max nswap */ +"%u " /* total inblock */ +"%u " /* total outblock */ +"%u " /* total msgsnd */ +"%u " /* total msgrcv */ +"%u " /* total nsignals */ +"%u " /* total nvcsw */ +"%u " /* total nivcsw */ +"%u " /* max vsize */ +"%u " /* max vsize task */ +"%.2f " /* ave vsize */ +"%u " /* max rss */ +"%u " /* max rss task */ +"%.2f " /* ave rss */ +"%u " /* max pages */ +"%u " /* max pages task */ +"%.2f " /* ave pages */ +"%.2f " /* min cpu */ +"%u " /* min cpu task */ +"%.2f " /* ave cpu */ +"%s " /* step process name */ +"%s " /* step node names */ +"%u " /* max vsize node */ +"%u " /* max rss node */ +"%u " /* max pages node */ +"%u " /* min cpu node */ +"%s " /* account */ +"%u"; /* requester user id */ + +/* + * Print the record to the log file. + */ + +static int _print_record(struct job_record *job_ptr, + time_t time, char *data) +{ + static int rc=SLURM_SUCCESS; + char *block_id = NULL; + if(!job_ptr->details) { + error("job_acct: job=%u doesn't exist", job_ptr->job_id); + return SLURM_ERROR; + } + debug2("_print_record, job=%u, \"%s\"", + job_ptr->job_id, data); +#ifdef HAVE_BG + select_g_get_jobinfo(job_ptr->select_jobinfo, + SELECT_DATA_BLOCK_ID, + &block_id); + +#endif + if(!block_id) + block_id = xstrdup("-"); + + slurm_mutex_lock( &logfile_lock ); + + if (fprintf(LOGFILE, + "%u %s %d %d %u %u %s - %s\n", + job_ptr->job_id, job_ptr->partition, + (int)job_ptr->details->submit_time, (int)time, + job_ptr->user_id, job_ptr->group_id, block_id, data) + < 0) + rc=SLURM_ERROR; +#ifdef HAVE_FDATASYNC + fdatasync(LOGFILE_FD); +#endif + slurm_mutex_unlock( &logfile_lock ); + xfree(block_id); + + return rc; +} + +/* + * init() is called when the plugin is loaded, before any other functions + * are called. Put global initialization here. + */ +extern int init ( void ) +{ + static int first = 1; + char *log_file = NULL; + int rc = SLURM_SUCCESS; + mode_t prot = 0600; + struct stat statbuf; + + if(first) { + debug2("jobacct_init() called"); + log_file = slurm_get_accounting_storage_loc(); + if(!log_file) + log_file = xstrdup(DEFAULT_STORAGE_LOC); + slurm_mutex_lock( &logfile_lock ); + if (LOGFILE) + fclose(LOGFILE); + + if (*log_file != '/') + fatal("JobAcctLogfile must specify an " + "absolute pathname"); + if (stat(log_file, &statbuf)==0)/* preserve current file mode */ + prot = statbuf.st_mode; + LOGFILE = fopen(log_file, "a"); + if (LOGFILE == NULL) { + error("open %s: %m", log_file); + storage_init = 0; + xfree(log_file); + slurm_mutex_unlock( &logfile_lock ); + return SLURM_ERROR; + } else + chmod(log_file, prot); + + xfree(log_file); + + if (setvbuf(LOGFILE, NULL, _IOLBF, 0)) + error("setvbuf() failed"); + LOGFILE_FD = fileno(LOGFILE); + slurm_mutex_unlock( &logfile_lock ); + storage_init = 1; + /* since this can be loaded from many different places + only tell us once. */ + verbose("%s loaded", plugin_name); + first = 0; + } else { + debug4("%s loaded", plugin_name); + } + return rc; +} + + +extern int fini ( void ) +{ + if (LOGFILE) + fclose(LOGFILE); + return SLURM_SUCCESS; +} + +extern void * acct_storage_p_get_connection(bool make_agent, bool rollback) +{ + return NULL; +} + +extern int acct_storage_p_close_connection(void **db_conn) +{ + return SLURM_SUCCESS; +} + +extern int acct_storage_p_commit(void *db_conn, bool commit) +{ + return SLURM_SUCCESS; +} + +extern int acct_storage_p_add_users(void *db_conn, uint32_t uid, + List user_list) +{ + return SLURM_SUCCESS; +} + +extern int acct_storage_p_add_coord(void *db_conn, uint32_t uid, + char *acct, acct_user_cond_t *user_q) +{ + return SLURM_SUCCESS; +} + +extern int acct_storage_p_add_accts(void *db_conn, uint32_t uid, + List acct_list) +{ + return SLURM_SUCCESS; +} + +extern int acct_storage_p_add_clusters(void *db_conn, uint32_t uid, + List cluster_list) +{ + return SLURM_SUCCESS; +} + +extern int acct_storage_p_add_associations(void *db_conn, uint32_t uid, + List association_list) +{ + return SLURM_SUCCESS; +} + +extern List acct_storage_p_modify_users(void *db_conn, uint32_t uid, + acct_user_cond_t *user_q, + acct_user_rec_t *user) +{ + return SLURM_SUCCESS; +} + +extern List acct_storage_p_modify_accts(void *db_conn, uint32_t uid, + acct_account_cond_t *acct_q, + acct_account_rec_t *acct) +{ + return SLURM_SUCCESS; +} + +extern List acct_storage_p_modify_clusters(void *db_conn, uint32_t uid, + acct_cluster_cond_t *cluster_q, + acct_cluster_rec_t *cluster) +{ + return SLURM_SUCCESS; +} + +extern List acct_storage_p_modify_associations(void *db_conn, uint32_t uid, + acct_association_cond_t *assoc_q, + acct_association_rec_t *assoc) +{ + return SLURM_SUCCESS; +} + +extern List acct_storage_p_remove_users(void *db_conn, uint32_t uid, + acct_user_cond_t *user_q) +{ + return SLURM_SUCCESS; +} + +extern List acct_storage_p_remove_coord(void *db_conn, uint32_t uid, + char *acct, acct_user_cond_t *user_q) +{ + return SLURM_SUCCESS; +} + +extern List acct_storage_p_remove_accts(void *db_conn, uint32_t uid, + acct_account_cond_t *acct_q) +{ + return SLURM_SUCCESS; +} + +extern List acct_storage_p_remove_clusters(void *db_conn, uint32_t uid, + acct_account_cond_t *cluster_q) +{ + return SLURM_SUCCESS; +} + +extern List acct_storage_p_remove_associations(void *db_conn, uint32_t uid, + acct_association_cond_t *assoc_q) +{ + return SLURM_SUCCESS; +} + +extern List acct_storage_p_get_users(void *db_conn, + acct_user_cond_t *user_q) +{ + return NULL; +} + +extern List acct_storage_p_get_accts(void *db_conn, + acct_account_cond_t *acct_q) +{ + return NULL; +} + +extern List acct_storage_p_get_clusters(void *db_conn, + acct_account_cond_t *cluster_q) +{ + return NULL; +} + +extern List acct_storage_p_get_associations(void *db_conn, + acct_association_cond_t *assoc_q) +{ + return NULL; +} + +extern int acct_storage_p_get_usage(void *db_conn, + acct_association_rec_t *acct_assoc, + time_t start, time_t end) +{ + int rc = SLURM_SUCCESS; + + return rc; +} + +extern int acct_storage_p_roll_usage(void *db_conn, + time_t sent_start) +{ + int rc = SLURM_SUCCESS; + + return rc; +} + +extern int clusteracct_storage_p_node_down(void *db_conn, + char *cluster, + struct node_record *node_ptr, + time_t event_time, char *reason) +{ + return SLURM_SUCCESS; +} +extern int clusteracct_storage_p_node_up(void *db_conn, + char *cluster, + struct node_record *node_ptr, + time_t event_time) +{ + return SLURM_SUCCESS; +} + +extern int clusteracct_storage_p_register_ctld(char *cluster, + uint16_t port) +{ + return SLURM_SUCCESS; +} + +extern int clusteracct_storage_p_cluster_procs(void *db_conn, + char *cluster, + uint32_t procs, + time_t event_time) +{ + return SLURM_SUCCESS; +} + +extern int clusteracct_storage_p_get_usage( + void *db_conn, + acct_cluster_rec_t *cluster_rec, time_t start, time_t end) +{ + + return SLURM_SUCCESS; +} + +/* + * load into the storage the start of a job + */ +extern int jobacct_storage_p_job_start(void *db_conn, + struct job_record *job_ptr) +{ + int i, + rc=SLURM_SUCCESS, + tmp; + char buf[BUFFER_SIZE], *jname, *account, *nodes; + long priority; + int track_steps = 0; + + if(!storage_init) { + debug("jobacct init was not called or it failed"); + return SLURM_ERROR; + } + + debug2("jobacct_job_start() called"); + + if (job_ptr->start_time == 0) { + /* This function is called when a job becomes elligible to run + * in order to record reserved time (a measure of system + * over-subscription). We only use this with database + * plugins. */ + return rc; + } + + priority = (job_ptr->priority == NO_VAL) ? + -1L : (long) job_ptr->priority; + + if (job_ptr->name && (tmp = strlen(job_ptr->name))) { + jname = xmalloc(++tmp); + for (i=0; i<tmp; i++) { + if (isspace(job_ptr->name[i])) + jname[i]='_'; + else + jname[i]=job_ptr->name[i]; + } + } else { + jname = xstrdup("allocation"); + track_steps = 1; + } + + if (job_ptr->account && job_ptr->account[0]) + account = job_ptr->account; + else + account = "(null)"; + if (job_ptr->nodes && job_ptr->nodes[0]) + nodes = job_ptr->nodes; + else + nodes = "(null)"; + + if(job_ptr->batch_flag) + track_steps = 1; + + job_ptr->requid = -1; /* force to -1 for sacct to know this + * hasn't been set yet */ + + tmp = snprintf(buf, BUFFER_SIZE, + "%d %s %d %ld %u %s %s", + JOB_START, jname, + track_steps, priority, job_ptr->total_procs, + nodes, account); + + rc = _print_record(job_ptr, job_ptr->start_time, buf); + + xfree(jname); + return rc; +} + +/* + * load into the storage the end of a job + */ +extern int jobacct_storage_p_job_complete(void *db_conn, + struct job_record *job_ptr) +{ + char buf[BUFFER_SIZE]; + if(!storage_init) { + debug("jobacct init was not called or it failed"); + return SLURM_ERROR; + } + + debug2("jobacct_job_complete() called"); + if (job_ptr->end_time == 0) { + debug("jobacct: job %u never started", job_ptr->job_id); + return SLURM_ERROR; + } + /* leave the requid as a %d since we want to see if it is -1 + in sacct */ + snprintf(buf, BUFFER_SIZE, "%d %d %d %u %u", + JOB_TERMINATED, + (int) (job_ptr->end_time - job_ptr->start_time), + job_ptr->job_state & (~JOB_COMPLETING), + job_ptr->requid, job_ptr->exit_code); + + return _print_record(job_ptr, job_ptr->end_time, buf); +} + +/* + * load into the storage the start of a job step + */ +extern int jobacct_storage_p_step_start(void *db_conn, + struct step_record *step_ptr) +{ + char buf[BUFFER_SIZE]; + int cpus = 0; + char node_list[BUFFER_SIZE]; +#ifdef HAVE_BG + char *ionodes = NULL; +#endif + float float_tmp = 0; + char *account; + + if(!storage_init) { + debug("jobacct init was not called or it failed"); + return SLURM_ERROR; + } + +#ifdef HAVE_BG + cpus = step_ptr->job_ptr->num_procs; + select_g_get_jobinfo(step_ptr->job_ptr->select_jobinfo, + SELECT_DATA_IONODES, + &ionodes); + if(ionodes) { + snprintf(node_list, BUFFER_SIZE, + "%s[%s]", step_ptr->job_ptr->nodes, ionodes); + xfree(ionodes); + } else + snprintf(node_list, BUFFER_SIZE, "%s", + step_ptr->job_ptr->nodes); + +#else + if(!step_ptr->step_layout || !step_ptr->step_layout->task_cnt) { + cpus = step_ptr->job_ptr->total_procs; + snprintf(node_list, BUFFER_SIZE, "%s", step_ptr->job_ptr->nodes); + } else { + cpus = step_ptr->step_layout->task_cnt; + snprintf(node_list, BUFFER_SIZE, "%s", + step_ptr->step_layout->node_list); + } +#endif + if (step_ptr->job_ptr->account && step_ptr->job_ptr->account[0]) + account = step_ptr->job_ptr->account; + else + account = "(null)"; + + step_ptr->job_ptr->requid = -1; /* force to -1 for sacct to know this + * hasn't been set yet */ + + snprintf(buf, BUFFER_SIZE, _jobstep_format, + JOB_STEP, + step_ptr->step_id, /* stepid */ + JOB_RUNNING, /* completion status */ + 0, /* completion code */ + cpus, /* number of tasks */ + cpus, /* number of cpus */ + 0, /* elapsed seconds */ + 0, /* total cputime seconds */ + 0, /* total cputime seconds */ + 0, /* user seconds */ + 0,/* user microseconds */ + 0, /* system seconds */ + 0,/* system microsecs */ + 0, /* max rss */ + 0, /* max ixrss */ + 0, /* max idrss */ + 0, /* max isrss */ + 0, /* max minflt */ + 0, /* max majflt */ + 0, /* max nswap */ + 0, /* total inblock */ + 0, /* total outblock */ + 0, /* total msgsnd */ + 0, /* total msgrcv */ + 0, /* total nsignals */ + 0, /* total nvcsw */ + 0, /* total nivcsw */ + 0, /* max vsize */ + 0, /* max vsize task */ + float_tmp, /* ave vsize */ + 0, /* max rss */ + 0, /* max rss task */ + float_tmp, /* ave rss */ + 0, /* max pages */ + 0, /* max pages task */ + float_tmp, /* ave pages */ + float_tmp, /* min cpu */ + 0, /* min cpu task */ + float_tmp, /* ave cpu */ + step_ptr->name, /* step exe name */ + node_list, /* name of nodes step running on */ + 0, /* max vsize node */ + 0, /* max rss node */ + 0, /* max pages node */ + 0, /* min cpu node */ + account, + step_ptr->job_ptr->requid); /* requester user id */ + + return _print_record(step_ptr->job_ptr, step_ptr->start_time, buf); +} + +/* + * load into the storage the end of a job step + */ +extern int jobacct_storage_p_step_complete(void *db_conn, + struct step_record *step_ptr) +{ + char buf[BUFFER_SIZE]; + time_t now; + int elapsed; + int comp_status; + int cpus = 0; + char node_list[BUFFER_SIZE]; + struct jobacctinfo *jobacct = (struct jobacctinfo *)step_ptr->jobacct; + struct jobacctinfo dummy_jobacct; +#ifdef HAVE_BG + char *ionodes = NULL; +#endif + float ave_vsize = 0, ave_rss = 0, ave_pages = 0; + float ave_cpu = 0, ave_cpu2 = 0; + char *account; + + if(!storage_init) { + debug("jobacct init was not called or it failed"); + return SLURM_ERROR; + } + + now = time(NULL); + + if (jobacct == NULL) { + /* JobAcctGather=jobacct_gather/none, no data to process */ + bzero(&dummy_jobacct, sizeof(dummy_jobacct)); + jobacct = &dummy_jobacct; + } + + if ((elapsed=now-step_ptr->start_time)<0) + elapsed=0; /* For *very* short jobs, if clock is wrong */ + if (step_ptr->exit_code) + comp_status = JOB_FAILED; + else + comp_status = JOB_COMPLETE; + +#ifdef HAVE_BG + cpus = step_ptr->job_ptr->num_procs; + select_g_get_jobinfo(step_ptr->job_ptr->select_jobinfo, + SELECT_DATA_IONODES, + &ionodes); + if(ionodes) { + snprintf(node_list, BUFFER_SIZE, + "%s[%s]", step_ptr->job_ptr->nodes, ionodes); + xfree(ionodes); + } else + snprintf(node_list, BUFFER_SIZE, "%s", + step_ptr->job_ptr->nodes); + +#else + if(!step_ptr->step_layout || !step_ptr->step_layout->task_cnt) { + cpus = step_ptr->job_ptr->total_procs; + snprintf(node_list, BUFFER_SIZE, "%s", step_ptr->job_ptr->nodes); + + } else { + cpus = step_ptr->step_layout->task_cnt; + snprintf(node_list, BUFFER_SIZE, "%s", + step_ptr->step_layout->node_list); + } +#endif + /* figure out the ave of the totals sent */ + if(cpus > 0) { + ave_vsize = jobacct->tot_vsize; + ave_vsize /= cpus; + ave_rss = jobacct->tot_rss; + ave_rss /= cpus; + ave_pages = jobacct->tot_pages; + ave_pages /= cpus; + ave_cpu = jobacct->tot_cpu; + ave_cpu /= cpus; + ave_cpu /= 100; + } + + if(jobacct->min_cpu != (uint32_t)NO_VAL) { + ave_cpu2 = jobacct->min_cpu; + ave_cpu2 /= 100; + } + + if (step_ptr->job_ptr->account && step_ptr->job_ptr->account[0]) + account = step_ptr->job_ptr->account; + else + account = "(null)"; + + snprintf(buf, BUFFER_SIZE, _jobstep_format, + JOB_STEP, + step_ptr->step_id, /* stepid */ + comp_status, /* completion status */ + step_ptr->exit_code, /* completion code */ + cpus, /* number of tasks */ + cpus, /* number of cpus */ + elapsed, /* elapsed seconds */ + /* total cputime seconds */ + jobacct->user_cpu_sec + + jobacct->sys_cpu_sec, + /* total cputime seconds */ + jobacct->user_cpu_usec + + jobacct->sys_cpu_usec, + jobacct->user_cpu_sec, /* user seconds */ + jobacct->user_cpu_usec,/* user microseconds */ + jobacct->sys_cpu_sec, /* system seconds */ + jobacct->sys_cpu_usec,/* system microsecs */ + 0, /* max rss */ + 0, /* max ixrss */ + 0, /* max idrss */ + 0, /* max isrss */ + 0, /* max minflt */ + 0, /* max majflt */ + 0, /* max nswap */ + 0, /* total inblock */ + 0, /* total outblock */ + 0, /* total msgsnd */ + 0, /* total msgrcv */ + 0, /* total nsignals */ + 0, /* total nvcsw */ + 0, /* total nivcsw */ + jobacct->max_vsize, /* max vsize */ + jobacct->max_vsize_id.taskid, /* max vsize node */ + ave_vsize, /* ave vsize */ + jobacct->max_rss, /* max vsize */ + jobacct->max_rss_id.taskid, /* max rss node */ + ave_rss, /* ave rss */ + jobacct->max_pages, /* max pages */ + jobacct->max_pages_id.taskid, /* max pages node */ + ave_pages, /* ave pages */ + ave_cpu2, /* min cpu */ + jobacct->min_cpu_id.taskid, /* min cpu node */ + ave_cpu, /* ave cpu */ + step_ptr->name, /* step exe name */ + node_list, /* name of nodes step running on */ + jobacct->max_vsize_id.nodeid, /* max vsize task */ + jobacct->max_rss_id.nodeid, /* max rss task */ + jobacct->max_pages_id.nodeid, /* max pages task */ + jobacct->min_cpu_id.nodeid, /* min cpu task */ + account, + step_ptr->job_ptr->requid); /* requester user id */ + + return _print_record(step_ptr->job_ptr, now, buf); +} + +/* + * load into the storage a suspention of a job + */ +extern int jobacct_storage_p_suspend(void *db_conn, + struct job_record *job_ptr) +{ + char buf[BUFFER_SIZE]; + static time_t now = 0; + static time_t temp = 0; + int elapsed; + if(!storage_init) { + debug("jobacct init was not called or it failed"); + return SLURM_ERROR; + } + + /* tell what time has passed */ + if(!now) + now = job_ptr->start_time; + temp = now; + now = time(NULL); + + if ((elapsed=now-temp) < 0) + elapsed=0; /* For *very* short jobs, if clock is wrong */ + + /* here we are really just going for a marker in time to tell when + * the process was suspended or resumed (check job state), we don't + * really need to keep track of anything else */ + snprintf(buf, BUFFER_SIZE, "%d %d %d", + JOB_SUSPEND, + elapsed, + job_ptr->job_state & (~JOB_COMPLETING));/* job status */ + + return _print_record(job_ptr, now, buf); +} + +/* + * get info from the storage + * in/out job_list List of job_rec_t * + * note List needs to be freed when called + */ +extern List jobacct_storage_p_get_jobs(void *db_conn, + List selected_steps, + List selected_parts, + void *params) +{ + return filetxt_jobacct_process_get_jobs(selected_steps, selected_parts, + params); +} + +/* + * expire old info from the storage + */ +extern void jobacct_storage_p_archive(void *db_conn, + List selected_parts, + void *params) +{ + filetxt_jobacct_process_archive(selected_parts, params); + return; +} + +extern int acct_storage_p_update_shares_used(void *db_conn, + List shares_used) +{ + return SLURM_SUCCESS; +} + +extern int acct_storage_p_flush_jobs_on_cluster( + void *db_conn, char *cluster, time_t event_time) +{ + /* put end times for a clean start */ + return SLURM_SUCCESS; +} diff --git a/src/plugins/accounting_storage/filetxt/filetxt_jobacct_process.c b/src/plugins/accounting_storage/filetxt/filetxt_jobacct_process.c new file mode 100644 index 000000000..a9d0f7470 --- /dev/null +++ b/src/plugins/accounting_storage/filetxt/filetxt_jobacct_process.c @@ -0,0 +1,1450 @@ +/*****************************************************************************\ + * filetxt_jobacct_process.c - functions the processing of + * information from the filetxt jobacct + * storage. + ***************************************************************************** + * + * Copyright (C) 2004-2007 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * This file is patterned after jobcomp_linux.c, written by Morris Jette and + * Copyright (C) 2002 The Regents of the University of California. +\*****************************************************************************/ +#include <stdlib.h> +#include <ctype.h> +#include <sys/stat.h> + +#include "src/common/xstring.h" +#include "src/common/xmalloc.h" +#include "src/common/slurm_protocol_api.h" +#include "src/common/jobacct_common.h" +#include "src/slurmctld/slurmctld.h" +#include "src/slurmdbd/read_config.h" +/* Map field names to positions */ + +/* slurmd uses "(uint32_t) -2" to track data for batch allocations + * which have no logical jobsteps. */ +#define BATCH_JOB_TIMESTAMP 0 +#define EXPIRE_READ_LENGTH 10 +#define MAX_RECORD_FIELDS 100 + +typedef struct expired_rec { /* table of expired jobs */ + uint32_t job; + time_t job_submit; + char *line; +} expired_rec_t; + +typedef struct header { + uint32_t jobnum; + char *partition; + char *blockid; + time_t job_submit; + time_t timestamp; + uint32_t uid; + uint32_t gid; + uint16_t rec_type; +} filetxt_header_t; + +typedef struct { + uint32_t job_start_seen, /* useful flags */ + job_step_seen, + job_terminated_seen, + jobnum_superseded; /* older jobnum was reused */ + filetxt_header_t header; + uint16_t show_full; + char *nodes; + char *jobname; + uint16_t track_steps; + int32_t priority; + uint32_t ncpus; + uint32_t ntasks; + enum job_states status; + int32_t exitcode; + uint32_t elapsed; + time_t end; + uint32_t tot_cpu_sec; + uint32_t tot_cpu_usec; + struct rusage rusage; + sacct_t sacct; + List steps; + char *account; + uint32_t requid; +} filetxt_job_rec_t; + +typedef struct { + filetxt_header_t header; + uint32_t stepnum; /* job's step number */ + char *nodes; + char *stepname; + enum job_states status; + int32_t exitcode; + uint32_t ntasks; + uint32_t ncpus; + uint32_t elapsed; + time_t end; + uint32_t tot_cpu_sec; + uint32_t tot_cpu_usec; + struct rusage rusage; + sacct_t sacct; + char *account; + uint32_t requid; +} filetxt_step_rec_t; + +/* Fields common to all records */ +enum { F_JOB = 0, + F_PARTITION, + F_JOB_SUBMIT, + F_TIMESTAMP, + F_UID, + F_GID, + F_BLOCKID, + F_RESERVED2, + F_RECTYPE, + HEADER_LENGTH +}; + +/* JOB_START fields */ +enum { F_JOBNAME = HEADER_LENGTH, + F_TRACK_STEPS, + F_PRIORITY, + F_NCPUS, + F_NODES, + F_JOB_ACCOUNT, + JOB_START_LENGTH +}; + +/* JOB_STEP fields */ +enum { F_JOBSTEP = HEADER_LENGTH, + F_STATUS, + F_EXITCODE, + F_NTASKS, + F_STEPNCPUS, + F_ELAPSED, + F_CPU_SEC, + F_CPU_USEC, + F_USER_SEC, + F_USER_USEC, + F_SYS_SEC, + F_SYS_USEC, + F_RSS, + F_IXRSS, + F_IDRSS, + F_ISRSS, + F_MINFLT, + F_MAJFLT, + F_NSWAP, + F_INBLOCKS, + F_OUBLOCKS, + F_MSGSND, + F_MSGRCV, + F_NSIGNALS, + F_NVCSW, + F_NIVCSW, + F_MAX_VSIZE, + F_MAX_VSIZE_TASK, + F_AVE_VSIZE, + F_MAX_RSS, + F_MAX_RSS_TASK, + F_AVE_RSS, + F_MAX_PAGES, + F_MAX_PAGES_TASK, + F_AVE_PAGES, + F_MIN_CPU, + F_MIN_CPU_TASK, + F_AVE_CPU, + F_STEPNAME, + F_STEPNODES, + F_MAX_VSIZE_NODE, + F_MAX_RSS_NODE, + F_MAX_PAGES_NODE, + F_MIN_CPU_NODE, + F_STEP_ACCOUNT, + F_STEP_REQUID, + JOB_STEP_LENGTH +}; + +/* JOB_TERM / JOB_SUSPEND fields */ +enum { F_TOT_ELAPSED = HEADER_LENGTH, + F_TERM_STATUS, + F_JOB_REQUID, + F_JOB_EXITCODE, + JOB_TERM_LENGTH +}; + +static void _destroy_exp(void *object) +{ + expired_rec_t *exp_rec = (expired_rec_t *)object; + if(exp_rec) { + xfree(exp_rec->line); + xfree(exp_rec); + } +} + +static void _free_filetxt_header(void *object) +{ + filetxt_header_t *header = (filetxt_header_t *)object; + if(header) { + xfree(header->partition); +#ifdef HAVE_BG + xfree(header->blockid); +#endif + } +} + +static void _destroy_filetxt_job_rec(void *object) +{ + filetxt_job_rec_t *job = (filetxt_job_rec_t *)object; + if (job) { + if(job->steps) + list_destroy(job->steps); + _free_filetxt_header(&job->header); + xfree(job->jobname); + xfree(job->account); + xfree(job->nodes); + xfree(job); + } +} + +static void _destroy_filetxt_step_rec(void *object) +{ + filetxt_step_rec_t *step = (filetxt_step_rec_t *)object; + if (step) { + _free_filetxt_header(&step->header); + xfree(step->stepname); + xfree(step->nodes); + xfree(step->account); + xfree(step); + } +} + +static jobacct_step_rec_t *_create_jobacct_step_rec( + filetxt_step_rec_t *filetxt_step) +{ + jobacct_step_rec_t *jobacct_step = create_jobacct_step_rec(); + + jobacct_step->jobid = filetxt_step->header.jobnum; + jobacct_step->elapsed = filetxt_step->elapsed; + jobacct_step->end = filetxt_step->header.timestamp; + jobacct_step->exitcode = filetxt_step->exitcode; + jobacct_step->ncpus = filetxt_step->ncpus; + jobacct_step->nodes = xstrdup(filetxt_step->nodes); + jobacct_step->requid = filetxt_step->requid; + memcpy(&jobacct_step->sacct, &filetxt_step->sacct, sizeof(sacct_t)); + jobacct_step->start = filetxt_step->header.timestamp - + jobacct_step->elapsed; + jobacct_step->state = filetxt_step->status; + jobacct_step->stepid = filetxt_step->stepnum; + jobacct_step->stepname = xstrdup(filetxt_step->stepname); + jobacct_step->sys_cpu_sec = filetxt_step->rusage.ru_stime.tv_sec; + jobacct_step->sys_cpu_usec = filetxt_step->rusage.ru_stime.tv_usec; + jobacct_step->tot_cpu_sec = filetxt_step->tot_cpu_sec; + jobacct_step->tot_cpu_usec = filetxt_step->tot_cpu_usec; + jobacct_step->user_cpu_sec = filetxt_step->rusage.ru_utime.tv_sec; + jobacct_step->user_cpu_usec = filetxt_step->rusage.ru_utime.tv_usec; + + return jobacct_step; +} + +static jobacct_job_rec_t *_create_jobacct_job_rec( + filetxt_job_rec_t *filetxt_job) +{ + jobacct_job_rec_t *jobacct_job = create_jobacct_job_rec(); + ListIterator itr = NULL; + filetxt_step_rec_t *filetxt_step = NULL; + + jobacct_job->associd = 0; + jobacct_job->account = xstrdup(filetxt_job->account); + jobacct_job->blockid = xstrdup(filetxt_job->header.blockid); + jobacct_job->cluster = NULL; + jobacct_job->elapsed = filetxt_job->elapsed; + jobacct_job->eligible = filetxt_job->header.job_submit; + jobacct_job->end = filetxt_job->header.timestamp; + jobacct_job->exitcode = filetxt_job->exitcode; + jobacct_job->gid = filetxt_job->header.gid; + jobacct_job->jobid = filetxt_job->header.jobnum; + jobacct_job->jobname = xstrdup(filetxt_job->jobname); + jobacct_job->partition = xstrdup(filetxt_job->header.partition); + jobacct_job->req_cpus = filetxt_job->ncpus; + jobacct_job->alloc_cpus = filetxt_job->ncpus; + jobacct_job->nodes = xstrdup(filetxt_job->nodes); + jobacct_job->priority = filetxt_job->priority; + jobacct_job->requid = filetxt_job->requid; + memcpy(&jobacct_job->sacct, &filetxt_job->sacct, sizeof(sacct_t)); + jobacct_job->start = filetxt_job->header.timestamp - + jobacct_job->elapsed; + jobacct_job->state = filetxt_job->status; + + jobacct_job->steps = list_create(destroy_jobacct_step_rec); + if(filetxt_job->steps) { + itr = list_iterator_create(filetxt_job->steps); + while((filetxt_step = list_next(itr))) { + list_append(jobacct_job->steps, + _create_jobacct_step_rec(filetxt_step)); + } + list_iterator_destroy(itr); + } + jobacct_job->submit = filetxt_job->header.job_submit; + + jobacct_job->sys_cpu_sec = filetxt_job->rusage.ru_stime.tv_sec; + jobacct_job->sys_cpu_usec = filetxt_job->rusage.ru_stime.tv_usec; + jobacct_job->tot_cpu_sec = filetxt_job->tot_cpu_sec; + jobacct_job->tot_cpu_usec = filetxt_job->tot_cpu_usec; + jobacct_job->track_steps = filetxt_job->track_steps; + jobacct_job->uid = filetxt_job->header.uid; + jobacct_job->user = NULL; + jobacct_job->user_cpu_sec = filetxt_job->rusage.ru_utime.tv_sec; + jobacct_job->user_cpu_usec = filetxt_job->rusage.ru_utime.tv_usec; + + return jobacct_job; +} + +static filetxt_job_rec_t *_create_filetxt_job_rec(filetxt_header_t header) +{ + filetxt_job_rec_t *job = xmalloc(sizeof(filetxt_job_rec_t)); + memcpy(&job->header, &header, sizeof(filetxt_header_t)); + memset(&job->rusage, 0, sizeof(struct rusage)); + memset(&job->sacct, 0, sizeof(sacct_t)); + job->sacct.min_cpu = (float)NO_VAL; + job->job_start_seen = 0; + job->job_step_seen = 0; + job->job_terminated_seen = 0; + job->jobnum_superseded = 0; + job->jobname = NULL; + job->status = JOB_PENDING; + job->nodes = NULL; + job->jobname = NULL; + job->exitcode = 0; + job->priority = 0; + job->ntasks = 0; + job->ncpus = 0; + job->elapsed = 0; + job->tot_cpu_sec = 0; + job->tot_cpu_usec = 0; + job->steps = list_create(_destroy_filetxt_step_rec); + job->nodes = NULL; + job->track_steps = 0; + job->account = NULL; + job->requid = -1; + + return job; +} + +static filetxt_step_rec_t *_create_filetxt_step_rec(filetxt_header_t header) +{ + filetxt_step_rec_t *step = xmalloc(sizeof(filetxt_job_rec_t)); + memcpy(&step->header, &header, sizeof(filetxt_header_t)); + memset(&step->rusage, 0, sizeof(struct rusage)); + memset(&step->sacct, 0, sizeof(sacct_t)); + step->stepnum = (uint32_t)NO_VAL; + step->nodes = NULL; + step->stepname = NULL; + step->status = NO_VAL; + step->exitcode = NO_VAL; + step->ntasks = (uint32_t)NO_VAL; + step->ncpus = (uint32_t)NO_VAL; + step->elapsed = (uint32_t)NO_VAL; + step->tot_cpu_sec = (uint32_t)NO_VAL; + step->tot_cpu_usec = (uint32_t)NO_VAL; + step->account = NULL; + step->requid = -1; + + return step; +} + +/* prefix_filename() -- insert a filename prefix into a path + * + * IN: path = fully-qualified path+file name + * prefix = the prefix to insert into the file name + * RETURNS: pointer to the updated path+file name + */ + +static char *_prefix_filename(char *path, char *prefix) { + char *out; + int i, + plen; + + plen = strlen(path); + out = xmalloc(plen+strlen(prefix)+1); + for (i=plen-1; i>=0; i--) + if (path[i]=='/') { + break; + } + i++; + *out = 0; + strncpy(out, path, i); + out[i] = 0; + strcat(out, prefix); + strcat(out, path+i); + return(out); +} + +/* _open_log_file() -- find the current or specified log file, and open it + * + * IN: Nothing + * RETURNS: Nothing + * + * Side effects: + * - Sets opt_filein to the current system accounting log unless + * the user specified another file. + */ + +static FILE *_open_log_file(char *logfile) +{ + FILE *fd = fopen(logfile, "r"); + if (fd == NULL) { + perror(logfile); + exit(1); + } + return fd; +} + +static char *_convert_type(int rec_type) +{ + switch(rec_type) { + case JOB_START: + return "JOB_START"; + case JOB_STEP: + return "JOB_STEP"; + case JOB_TERMINATED: + return "JOB_TERMINATED"; + default: + return "UNKNOWN"; + } +} + +static int _cmp_jrec(const void *a1, const void *a2) { + expired_rec_t *j1 = (expired_rec_t *) a1; + expired_rec_t *j2 = (expired_rec_t *) a2; + + if (j1->job < j2->job) + return -1; + else if (j1->job == j2->job) { + if(j1->job_submit == j2->job_submit) + return 0; + else + return 1; + } + return 1; +} + +static void _show_rec(char *f[]) +{ + int i; + fprintf(stderr, "rec>"); + for (i=0; f[i]; i++) + fprintf(stderr, " %s", f[i]); + fprintf(stderr, "\n"); + return; +} + +static void _do_fdump(char* f[], int lc) +{ + int i=0, j=0; + char **type; + char *header[] = {"job", /* F_JOB */ + "partition", /* F_PARTITION */ + "job_submit", /* F_JOB_SUBMIT */ + "timestamp", /* F_TIMESTAMP */ + "uid", /* F_UIDGID */ + "gid", /* F_UIDGID */ + "BlockID", /* F_BLOCKID */ + "reserved-2",/* F_RESERVED1 */ + "recordType",/* F_RECTYPE */ + NULL}; + + char *start[] = {"jobName", /* F_JOBNAME */ + "TrackSteps", /* F_TRACK_STEPS */ + "priority", /* F_PRIORITY */ + "ncpus", /* F_NCPUS */ + "nodeList", /* F_NODES */ + "account", /* F_JOB_ACCOUNT */ + NULL}; + + char *step[] = {"jobStep", /* F_JOBSTEP */ + "status", /* F_STATUS */ + "exitcode", /* F_EXITCODE */ + "ntasks", /* F_NTASKS */ + "ncpus", /* F_STEPNCPUS */ + "elapsed", /* F_ELAPSED */ + "cpu_sec", /* F_CPU_SEC */ + "cpu_usec", /* F_CPU_USEC */ + "user_sec", /* F_USER_SEC */ + "user_usec", /* F_USER_USEC */ + "sys_sec", /* F_SYS_SEC */ + "sys_usec", /* F_SYS_USEC */ + "rss", /* F_RSS */ + "ixrss", /* F_IXRSS */ + "idrss", /* F_IDRSS */ + "isrss", /* F_ISRSS */ + "minflt", /* F_MINFLT */ + "majflt", /* F_MAJFLT */ + "nswap", /* F_NSWAP */ + "inblocks", /* F_INBLOCKS */ + "oublocks", /* F_OUTBLOCKS */ + "msgsnd", /* F_MSGSND */ + "msgrcv", /* F_MSGRCV */ + "nsignals", /* F_NSIGNALS */ + "nvcsw", /* F_VCSW */ + "nivcsw", /* F_NIVCSW */ + "max_vsize", /* F_MAX_VSIZE */ + "max_vsize_task", /* F_MAX_VSIZE_TASK */ + "ave_vsize", /* F_AVE_VSIZE */ + "max_rss", /* F_MAX_RSS */ + "max_rss_task", /* F_MAX_RSS_TASK */ + "ave_rss", /* F_AVE_RSS */ + "max_pages", /* F_MAX_PAGES */ + "max_pages_task", /* F_MAX_PAGES_TASK */ + "ave_pages", /* F_AVE_PAGES */ + "min_cputime", /* F_MIN_CPU */ + "min_cputime_task", /* F_MIN_CPU_TASK */ + "ave_cputime", /* F_AVE_RSS */ + "StepName", /* F_STEPNAME */ + "StepNodes", /* F_STEPNODES */ + "max_vsize_node", /* F_MAX_VSIZE_NODE */ + "max_rss_node", /* F_MAX_RSS_NODE */ + "max_pages_node", /* F_MAX_PAGES_NODE */ + "min_cputime_node", /* F_MIN_CPU_NODE */ + "account", /* F_STEP_ACCOUNT */ + "requid", /* F_STEP_REQUID */ + NULL}; + + char *suspend[] = {"Suspend/Run time", /* F_TOT_ELAPSED */ + "status", /* F_STATUS */ + NULL}; + + char *term[] = {"totElapsed", /* F_TOT_ELAPSED */ + "status", /* F_STATUS */ + "requid", /* F_JOB_REQUID */ + "exitcode", /* F_EXITCODE */ + NULL}; + + i = atoi(f[F_RECTYPE]); + printf("\n------- Line %d %s -------\n", lc, _convert_type(i)); + + for(j=0; j < HEADER_LENGTH; j++) + printf("%12s: %s\n", header[j], f[j]); + switch(i) { + case JOB_START: + type = start; + j = JOB_START_LENGTH; + break; + case JOB_STEP: + type = step; + j = JOB_STEP_LENGTH; + break; + case JOB_SUSPEND: + type = suspend; + j = JOB_TERM_LENGTH; + case JOB_TERMINATED: + type = term; + j = JOB_TERM_LENGTH; + break; + default: + while(f[j]) { + printf(" Field[%02d]: %s\n", j, f[j]); + j++; + } + return; + } + + for(i=HEADER_LENGTH; i < j; i++) + printf("%12s: %s\n", type[i-HEADER_LENGTH], f[i]); +} + +static filetxt_job_rec_t *_find_job_record(List job_list, + filetxt_header_t header, + int type) +{ + filetxt_job_rec_t *job = NULL; + ListIterator itr = list_iterator_create(job_list); + + while((job = (filetxt_job_rec_t *)list_next(itr)) != NULL) { + if (job->header.jobnum == header.jobnum) { + if(job->header.job_submit == 0 && type == JOB_START) { + list_remove(itr); + _destroy_filetxt_job_rec(job); + job = NULL; + break; + } + + if(job->header.job_submit == BATCH_JOB_TIMESTAMP) { + job->header.job_submit = header.job_submit; + break; + } + + if(job->header.job_submit == header.job_submit) + break; + else { + /* If we're looking for a later + * record with this job number, we + * know that this one is an older, + * duplicate record. + * We assume that the newer record + * will be created if it doesn't + * already exist. */ + job->jobnum_superseded = 1; + } + } + } + list_iterator_destroy(itr); + return job; +} + +static int _remove_job_record(List job_list, uint32_t jobnum) +{ + filetxt_job_rec_t *job = NULL; + int rc = SLURM_ERROR; + ListIterator itr = list_iterator_create(job_list); + + while((job = (filetxt_job_rec_t *)list_next(itr)) != NULL) { + if (job->header.jobnum == jobnum) { + list_remove(itr); + _destroy_filetxt_job_rec(job); + rc = SLURM_SUCCESS; + } + } + list_iterator_destroy(itr); + return rc; +} + +static filetxt_step_rec_t *_find_step_record(filetxt_job_rec_t *job, + long stepnum) +{ + filetxt_step_rec_t *step = NULL; + ListIterator itr = NULL; + + if(!list_count(job->steps)) + return step; + + itr = list_iterator_create(job->steps); + while((step = (filetxt_step_rec_t *)list_next(itr)) != NULL) { + if (step->stepnum == stepnum) + break; + } + list_iterator_destroy(itr); + return step; +} + +static int _parse_header(char *f[], filetxt_header_t *header) +{ + header->jobnum = atoi(f[F_JOB]); + header->partition = xstrdup(f[F_PARTITION]); + header->job_submit = atoi(f[F_JOB_SUBMIT]); + header->timestamp = atoi(f[F_TIMESTAMP]); + header->uid = atoi(f[F_UID]); + header->gid = atoi(f[F_GID]); + header->blockid = xstrdup(f[F_BLOCKID]); + + return SLURM_SUCCESS; +} + +static int _parse_line(char *f[], void **data, int len) +{ + int i = atoi(f[F_RECTYPE]); + filetxt_job_rec_t **job = (filetxt_job_rec_t **)data; + filetxt_step_rec_t **step = (filetxt_step_rec_t **)data; + filetxt_header_t header; + _parse_header(f, &header); + + switch(i) { + case JOB_START: + *job = _create_filetxt_job_rec(header); + (*job)->jobname = xstrdup(f[F_JOBNAME]); + (*job)->track_steps = atoi(f[F_TRACK_STEPS]); + (*job)->priority = atoi(f[F_PRIORITY]); + (*job)->ncpus = atoi(f[F_NCPUS]); + (*job)->nodes = xstrdup(f[F_NODES]); + for (i=0; (*job)->nodes[i]; i++) { /* discard trailing <CR> */ + if (isspace((*job)->nodes[i])) + (*job)->nodes[i] = '\0'; + } + if (!strcmp((*job)->nodes, "(null)")) { + xfree((*job)->nodes); + (*job)->nodes = xstrdup("(unknown)"); + } + if (len > F_JOB_ACCOUNT) { + (*job)->account = xstrdup(f[F_JOB_ACCOUNT]); + for (i=0; (*job)->account[i]; i++) { + /* discard trailing <CR> */ + if (isspace((*job)->account[i])) + (*job)->account[i] = '\0'; + } + } + break; + case JOB_STEP: + *step = _create_filetxt_step_rec(header); + (*step)->stepnum = atoi(f[F_JOBSTEP]); + (*step)->status = atoi(f[F_STATUS]); + (*step)->exitcode = atoi(f[F_EXITCODE]); + (*step)->ntasks = atoi(f[F_NTASKS]); + (*step)->ncpus = atoi(f[F_STEPNCPUS]); + (*step)->elapsed = atoi(f[F_ELAPSED]); + (*step)->tot_cpu_sec = atoi(f[F_CPU_SEC]); + (*step)->tot_cpu_usec = atoi(f[F_CPU_USEC]); + (*step)->rusage.ru_utime.tv_sec = atoi(f[F_USER_SEC]); + (*step)->rusage.ru_utime.tv_usec = atoi(f[F_USER_USEC]); + (*step)->rusage.ru_stime.tv_sec = atoi(f[F_SYS_SEC]); + (*step)->rusage.ru_stime.tv_usec = atoi(f[F_SYS_USEC]); + (*step)->rusage.ru_maxrss = atoi(f[F_RSS]); + (*step)->rusage.ru_ixrss = atoi(f[F_IXRSS]); + (*step)->rusage.ru_idrss = atoi(f[F_IDRSS]); + (*step)->rusage.ru_isrss = atoi(f[F_ISRSS]); + (*step)->rusage.ru_minflt = atoi(f[F_MINFLT]); + (*step)->rusage.ru_majflt = atoi(f[F_MAJFLT]); + (*step)->rusage.ru_nswap = atoi(f[F_NSWAP]); + (*step)->rusage.ru_inblock = atoi(f[F_INBLOCKS]); + (*step)->rusage.ru_oublock = atoi(f[F_OUBLOCKS]); + (*step)->rusage.ru_msgsnd = atoi(f[F_MSGSND]); + (*step)->rusage.ru_msgrcv = atoi(f[F_MSGRCV]); + (*step)->rusage.ru_nsignals = atoi(f[F_NSIGNALS]); + (*step)->rusage.ru_nvcsw = atoi(f[F_NVCSW]); + (*step)->rusage.ru_nivcsw = atoi(f[F_NIVCSW]); + (*step)->sacct.max_vsize = atoi(f[F_MAX_VSIZE]) * 1024; + if(len > F_STEPNODES) { + (*step)->sacct.max_vsize_id.taskid = + atoi(f[F_MAX_VSIZE_TASK]); + (*step)->sacct.ave_vsize = atof(f[F_AVE_VSIZE]) * 1024; + (*step)->sacct.max_rss = atoi(f[F_MAX_RSS]) * 1024; + (*step)->sacct.max_rss_id.taskid = + atoi(f[F_MAX_RSS_TASK]); + (*step)->sacct.ave_rss = atof(f[F_AVE_RSS]) * 1024; + (*step)->sacct.max_pages = atoi(f[F_MAX_PAGES]); + (*step)->sacct.max_pages_id.taskid = + atoi(f[F_MAX_PAGES_TASK]); + (*step)->sacct.ave_pages = atof(f[F_AVE_PAGES]); + (*step)->sacct.min_cpu = atof(f[F_MIN_CPU]); + (*step)->sacct.min_cpu_id.taskid = + atoi(f[F_MIN_CPU_TASK]); + (*step)->sacct.ave_cpu = atof(f[F_AVE_CPU]); + (*step)->stepname = xstrdup(f[F_STEPNAME]); + (*step)->nodes = xstrdup(f[F_STEPNODES]); + } else { + (*step)->sacct.max_vsize_id.taskid = (uint16_t)NO_VAL; + (*step)->sacct.ave_vsize = (float)NO_VAL; + (*step)->sacct.max_rss = (uint32_t)NO_VAL; + (*step)->sacct.max_rss_id.taskid = (uint16_t)NO_VAL; + (*step)->sacct.ave_rss = (float)NO_VAL; + (*step)->sacct.max_pages = (uint32_t)NO_VAL; + (*step)->sacct.max_pages_id.taskid = (uint16_t)NO_VAL; + (*step)->sacct.ave_pages = (float)NO_VAL; + (*step)->sacct.min_cpu = (uint32_t)NO_VAL; + (*step)->sacct.min_cpu_id.taskid = (uint16_t)NO_VAL; + (*step)->sacct.ave_cpu = (float)NO_VAL; + (*step)->stepname = NULL; + (*step)->nodes = NULL; + } + if(len > F_MIN_CPU_NODE) { + (*step)->sacct.max_vsize_id.nodeid = + atoi(f[F_MAX_VSIZE_NODE]); + (*step)->sacct.max_rss_id.nodeid = + atoi(f[F_MAX_RSS_NODE]); + (*step)->sacct.max_pages_id.nodeid = + atoi(f[F_MAX_PAGES_NODE]); + (*step)->sacct.min_cpu_id.nodeid = + atoi(f[F_MIN_CPU_NODE]); + } else { + (*step)->sacct.max_vsize_id.nodeid = + (uint32_t)NO_VAL; + (*step)->sacct.max_rss_id.nodeid = + (uint32_t)NO_VAL; + (*step)->sacct.max_pages_id.nodeid = + (uint32_t)NO_VAL; + (*step)->sacct.min_cpu_id.nodeid = + (uint32_t)NO_VAL; + } + if(len > F_STEP_ACCOUNT) + (*step)->account = xstrdup(f[F_STEP_ACCOUNT]); + if(len > F_STEP_REQUID) + (*step)->requid = atoi(f[F_STEP_REQUID]); + break; + case JOB_SUSPEND: + case JOB_TERMINATED: + *job = _create_filetxt_job_rec(header); + (*job)->elapsed = atoi(f[F_TOT_ELAPSED]); + (*job)->status = atoi(f[F_STATUS]); + if(len > F_JOB_REQUID) + (*job)->requid = atoi(f[F_JOB_REQUID]); + if(len > F_JOB_EXITCODE) + (*job)->exitcode = atoi(f[F_JOB_EXITCODE]); + break; + default: + printf("UNKOWN TYPE %d",i); + break; + } + return SLURM_SUCCESS; +} + +static void _process_start(List job_list, char *f[], int lc, + int show_full, int len) +{ + filetxt_job_rec_t *job = NULL; + filetxt_job_rec_t *temp = NULL; + + _parse_line(f, (void **)&temp, len); + job = _find_job_record(job_list, temp->header, JOB_START); + if (job) { /* Hmmm... that's odd */ + printf("job->header.job_submit = %d", + (int)job->header.job_submit); + if(job->header.job_submit == 0) + _remove_job_record(job_list, job->header.jobnum); + else { + fprintf(stderr, + "Conflicting JOB_START for job %u at" + " line %d -- ignoring it\n", + job->header.jobnum, lc); + _destroy_filetxt_job_rec(temp); + return; + } + } + + job = temp; + job->show_full = show_full; + list_append(job_list, job); + job->job_start_seen = 1; + +} + +static void _process_step(List job_list, char *f[], int lc, + int show_full, int len, + sacct_parameters_t *params) +{ + filetxt_job_rec_t *job = NULL; + + filetxt_step_rec_t *step = NULL; + filetxt_step_rec_t *temp = NULL; + + _parse_line(f, (void **)&temp, len); + + job = _find_job_record(job_list, temp->header, JOB_STEP); + + if (temp->stepnum == -2) { + _destroy_filetxt_step_rec(temp); + return; + } + if (!job) { /* fake it for now */ + job = _create_filetxt_job_rec(temp->header); + job->jobname = xstrdup("(unknown)"); + if (params->opt_verbose > 1) + fprintf(stderr, + "Note: JOB_STEP record %u.%u preceded " + "JOB_START record at line %d\n", + temp->header.jobnum, temp->stepnum, lc); + } + job->show_full = show_full; + + if ((step = _find_step_record(job, temp->stepnum))) { + + if (temp->status == JOB_RUNNING) { + _destroy_filetxt_step_rec(temp); + return;/* if "R" record preceded by F or CD; unusual */ + } + if (step->status != JOB_RUNNING) { /* if not JOB_RUNNING */ + fprintf(stderr, + "Conflicting JOB_STEP record for " + "jobstep %u.%u at line %d " + "-- ignoring it\n", + step->header.jobnum, + step->stepnum, lc); + _destroy_filetxt_step_rec(temp); + return; + } + step->status = temp->status; + step->exitcode = temp->exitcode; + step->ntasks = temp->ntasks; + step->ncpus = temp->ncpus; + step->elapsed = temp->elapsed; + step->tot_cpu_sec = temp->tot_cpu_sec; + step->tot_cpu_usec = temp->tot_cpu_usec; + job->requid = temp->requid; + step->requid = temp->requid; + memcpy(&step->rusage, &temp->rusage, sizeof(struct rusage)); + memcpy(&step->sacct, &temp->sacct, sizeof(sacct_t)); + xfree(step->stepname); + step->stepname = xstrdup(temp->stepname); + step->end = temp->header.timestamp; + _destroy_filetxt_step_rec(temp); + goto got_step; + } + step = temp; + temp = NULL; + list_append(job->steps, step); + if(list_count(job->steps) > 1) + job->track_steps = 1; + if(job->header.timestamp == 0) + job->header.timestamp = step->header.timestamp; + job->job_step_seen = 1; + job->ntasks += step->ntasks; + if(!job->nodes || !strcmp(job->nodes, "(unknown)")) { + xfree(job->nodes); + job->nodes = xstrdup(step->nodes); + } + +got_step: + + + if (job->job_terminated_seen == 0) { /* If the job is still running, + this is the most recent + status */ + if ( job->exitcode == 0 ) + job->exitcode = step->exitcode; + job->status = JOB_RUNNING; + job->elapsed = step->header.timestamp - job->header.timestamp; + } +} + +static void _process_suspend(List job_list, char *f[], int lc, + int show_full, int len) +{ + filetxt_job_rec_t *job = NULL; + filetxt_job_rec_t *temp = NULL; + + _parse_line(f, (void **)&temp, len); + job = _find_job_record(job_list, temp->header, JOB_SUSPEND); + if (!job) { /* fake it for now */ + job = _create_filetxt_job_rec(temp->header); + job->jobname = xstrdup("(unknown)"); + } + + job->show_full = show_full; + if (job->status == JOB_SUSPENDED) + job->elapsed -= temp->elapsed; + + //job->header.timestamp = temp->header.timestamp; + job->status = temp->status; + _destroy_filetxt_job_rec(temp); +} + +static void _process_terminated(List job_list, char *f[], int lc, + int show_full, int len, + sacct_parameters_t *params) +{ + filetxt_job_rec_t *job = NULL; + filetxt_job_rec_t *temp = NULL; + + _parse_line(f, (void **)&temp, len); + job = _find_job_record(job_list, temp->header, JOB_TERMINATED); + if (!job) { /* fake it for now */ + job = _create_filetxt_job_rec(temp->header); + job->jobname = xstrdup("(unknown)"); + if (params->opt_verbose > 1) + fprintf(stderr, "Note: JOB_TERMINATED record for job " + "%u preceded " + "other job records at line %d\n", + temp->header.jobnum, lc); + } else if (job->job_terminated_seen) { + if (temp->status == JOB_NODE_FAIL) { + /* multiple node failures - extra TERMINATED records */ + if (params->opt_verbose > 1) + fprintf(stderr, + "Note: Duplicate JOB_TERMINATED " + "record (nf) for job %u at " + "line %d\n", + temp->header.jobnum, lc); + /* JOB_TERMINATED/NF records may be preceded + * by a JOB_TERMINATED/CA record; NF is much + * more interesting. + */ + job->status = temp->status; + goto finished; + } + + fprintf(stderr, + "Conflicting JOB_TERMINATED record (%s) for " + "job %u at line %d -- ignoring it\n", + job_state_string(temp->status), + job->header.jobnum, lc); + goto finished; + } + job->job_terminated_seen = 1; + job->elapsed = temp->elapsed; + job->end = temp->header.timestamp; + job->status = temp->status; + job->requid = temp->requid; + job->exitcode = temp->exitcode; + if(list_count(job->steps) > 1) + job->track_steps = 1; + job->show_full = show_full; + +finished: + _destroy_filetxt_job_rec(temp); +} + +extern List filetxt_jobacct_process_get_jobs(List selected_steps, + List selected_parts, + sacct_parameters_t *params) +{ + char line[BUFFER_SIZE]; + char *f[MAX_RECORD_FIELDS+1]; /* End list with null entry and, + possibly, more data than we + expected */ + char *fptr; + int i; + FILE *fd = NULL; + int lc = 0; + int rec_type = -1; + filetxt_job_rec_t *filetxt_job = NULL; + jobacct_selected_step_t *selected_step = NULL; + char *selected_part = NULL; + ListIterator itr = NULL; + int show_full = 0; + List ret_job_list = list_create(destroy_jobacct_job_rec); + List job_list = list_create(_destroy_filetxt_job_rec); + + if(slurmdbd_conf) { + params->opt_filein = slurm_get_accounting_storage_loc(); + } + + fd = _open_log_file(params->opt_filein); + + while (fgets(line, BUFFER_SIZE, fd)) { + lc++; + fptr = line; /* break the record into NULL- + terminated strings */ + for (i = 0; i < MAX_RECORD_FIELDS; i++) { + f[i] = fptr; + fptr = strstr(fptr, " "); + if (fptr == NULL) { + fptr = strstr(f[i], "\n"); + if (fptr) + *fptr = 0; + break; + } else + *fptr++ = 0; + } + f[++i] = 0; + + if(i < HEADER_LENGTH) { + continue; + } + + rec_type = atoi(f[F_RECTYPE]); + + if (list_count(selected_steps)) { + itr = list_iterator_create(selected_steps); + while((selected_step = list_next(itr))) { + if (strcmp(selected_step->job, f[F_JOB])) + continue; + /* job matches; does the step? */ + if(selected_step->step == NULL) { + show_full = 1; + list_iterator_destroy(itr); + goto foundjob; + } else if (rec_type != JOB_STEP + || !strcmp(f[F_JOBSTEP], + selected_step->step)) { + list_iterator_destroy(itr); + goto foundjob; + } + } + list_iterator_destroy(itr); + continue; /* no match */ + } else { + show_full = 1; + } + foundjob: + + if (list_count(selected_parts)) { + itr = list_iterator_create(selected_parts); + while((selected_part = list_next(itr))) + if (!strcasecmp(f[F_PARTITION], + selected_part)) { + list_iterator_destroy(itr); + goto foundp; + } + list_iterator_destroy(itr); + continue; /* no match */ + } + foundp: + + if (params->opt_fdump) { + _do_fdump(f, lc); + continue; + } + + /* Build suitable tables with all the data */ + switch(rec_type) { + case JOB_START: + if(i < F_JOB_ACCOUNT) { + printf("Bad data on a Job Start\n"); + _show_rec(f); + } else + _process_start(job_list, f, lc, show_full, i); + break; + case JOB_STEP: + if(i < F_MAX_VSIZE) { + printf("Bad data on a Step entry\n"); + _show_rec(f); + } else + _process_step(job_list, f, lc, show_full, i, + params); + break; + case JOB_SUSPEND: + if(i < F_JOB_REQUID) { + printf("Bad data on a Suspend entry\n"); + _show_rec(f); + } else + _process_suspend(job_list, f, lc, + show_full, i); + break; + case JOB_TERMINATED: + if(i < F_JOB_REQUID) { + printf("Bad data on a Job Term\n"); + _show_rec(f); + } else + _process_terminated(job_list, f, lc, + show_full, i, params); + break; + default: + if (params->opt_verbose > 1) + fprintf(stderr, + "Invalid record at line %d of " + "input file\n", + lc); + if (params->opt_verbose > 2) + _show_rec(f); + break; + } + } + + if (ferror(fd)) { + perror(params->opt_filein); + exit(1); + } + fclose(fd); + + itr = list_iterator_create(job_list); + while((filetxt_job = list_next(itr))) { + list_append(ret_job_list, _create_jobacct_job_rec(filetxt_job)); + } + list_iterator_destroy(itr); + list_destroy(job_list); + + if(slurmdbd_conf) { + xfree(params->opt_filein); + } + return ret_job_list; +} + +extern void filetxt_jobacct_process_archive(List selected_parts, + sacct_parameters_t *params) +{ + char line[BUFFER_SIZE], + *f[EXPIRE_READ_LENGTH], + *fptr = NULL, + *logfile_name = NULL, + *old_logfile_name = NULL; + int file_err=0, + new_file, + i = 0; + expired_rec_t *exp_rec = NULL; + expired_rec_t *exp_rec2 = NULL; + List keep_list = list_create(_destroy_exp); + List exp_list = list_create(_destroy_exp); + List other_list = list_create(_destroy_exp); + struct stat statbuf; + mode_t prot = 0600; + uid_t uid; + gid_t gid; + FILE *expired_logfile = NULL, + *new_logfile = NULL; + FILE *fd = NULL; + int lc=0; + int rec_type = -1; + ListIterator itr = NULL; + ListIterator itr2 = NULL; + char *temp = NULL; + + /* Figure out our expiration date */ + time_t expiry; + + if(slurmdbd_conf) { + params->opt_filein = slurm_get_accounting_storage_loc(); + } + + expiry = time(NULL)-params->opt_expire; + if (params->opt_verbose) + fprintf(stderr, "Purging jobs completed prior to %d\n", + (int)expiry); + + /* Open the current or specified logfile, or quit */ + fd = _open_log_file(params->opt_filein); + if (stat(params->opt_filein, &statbuf)) { + perror("stat'ing logfile"); + goto finished; + } + if ((statbuf.st_mode & S_IFLNK) == S_IFLNK) { + fprintf(stderr, "%s is a symbolic link; --expire requires " + "a hard-linked file name\n", params->opt_filein); + goto finished; + } + if (!(statbuf.st_mode & S_IFREG)) { + fprintf(stderr, "%s is not a regular file; --expire " + "only works on accounting log files\n", + params->opt_filein); + goto finished; + } + prot = statbuf.st_mode & 0777; + gid = statbuf.st_gid; + uid = statbuf.st_uid; + old_logfile_name = _prefix_filename(params->opt_filein, ".old."); + if (stat(old_logfile_name, &statbuf)) { + if (errno != ENOENT) { + fprintf(stderr,"Error checking for %s: ", + old_logfile_name); + perror(""); + goto finished; + } + } else { + fprintf(stderr, "Warning! %s exists -- please remove " + "or rename it before proceeding\n", + old_logfile_name); + goto finished; + } + + /* create our initial buffer */ + while (fgets(line, BUFFER_SIZE, fd)) { + lc++; + fptr = line; /* break the record into NULL- + terminated strings */ + exp_rec = xmalloc(sizeof(expired_rec_t)); + exp_rec->line = xstrdup(line); + + for (i = 0; i < EXPIRE_READ_LENGTH; i++) { + f[i] = fptr; + fptr = strstr(fptr, " "); + if (fptr == NULL) + break; + else + *fptr++ = 0; + } + + exp_rec->job = atoi(f[F_JOB]); + exp_rec->job_submit = atoi(f[F_JOB_SUBMIT]); + + rec_type = atoi(f[F_RECTYPE]); + /* Odd, but complain some other time */ + if (rec_type == JOB_TERMINATED) { + if (expiry < atoi(f[F_TIMESTAMP])) { + list_append(keep_list, exp_rec); + continue; + } + if (list_count(selected_parts)) { + itr = list_iterator_create(selected_parts); + while((temp = list_next(itr))) + if(!strcasecmp(f[F_PARTITION], temp)) + break; + list_iterator_destroy(itr); + if(!temp) { + list_append(keep_list, exp_rec); + continue; + } /* no match */ + } + list_append(exp_list, exp_rec); + if (params->opt_verbose > 2) + fprintf(stderr, "Selected: %8d %d\n", + exp_rec->job, + (int)exp_rec->job_submit); + } else { + list_append(other_list, exp_rec); + } + } + if (!list_count(exp_list)) { + printf("No job records were purged.\n"); + goto finished; + } + logfile_name = xmalloc(strlen(params->opt_filein)+sizeof(".expired")); + sprintf(logfile_name, "%s.expired", params->opt_filein); + new_file = stat(logfile_name, &statbuf); + if ((expired_logfile = fopen(logfile_name, "a"))==NULL) { + fprintf(stderr, "Error while opening %s", + logfile_name); + perror(""); + xfree(logfile_name); + goto finished; + } + + if (new_file) { /* By default, the expired file looks like the log */ + chmod(logfile_name, prot); + chown(logfile_name, uid, gid); + } + xfree(logfile_name); + + logfile_name = _prefix_filename(params->opt_filein, ".new."); + if ((new_logfile = fopen(logfile_name, "w"))==NULL) { + fprintf(stderr, "Error while opening %s", + logfile_name); + perror(""); + fclose(expired_logfile); + goto finished; + } + chmod(logfile_name, prot); /* preserve file protection */ + chown(logfile_name, uid, gid); /* and ownership */ + /* Use line buffering to allow us to safely write + * to the log file at the same time as slurmctld. */ + if (setvbuf(new_logfile, NULL, _IOLBF, 0)) { + perror("setvbuf()"); + fclose(expired_logfile); + goto finished2; + } + + list_sort(exp_list, (ListCmpF) _cmp_jrec); + list_sort(keep_list, (ListCmpF) _cmp_jrec); + + if (params->opt_verbose > 2) { + fprintf(stderr, "--- contents of exp_list ---"); + itr = list_iterator_create(exp_list); + while((exp_rec = list_next(itr))) { + if (!(i%5)) + fprintf(stderr, "\n"); + else + fprintf(stderr, "\t"); + fprintf(stderr, "%d", exp_rec->job); + } + fprintf(stderr, "\n---- end of exp_list ---\n"); + list_iterator_destroy(itr); + } + /* write the expired file */ + itr = list_iterator_create(exp_list); + while((exp_rec = list_next(itr))) { + itr2 = list_iterator_create(other_list); + while((exp_rec2 = list_next(itr2))) { + if((exp_rec2->job != exp_rec->job) + || (exp_rec2->job_submit != exp_rec->job_submit)) + continue; + if (fputs(exp_rec2->line, expired_logfile)<0) { + perror("writing expired_logfile"); + list_iterator_destroy(itr2); + list_iterator_destroy(itr); + fclose(expired_logfile); + goto finished2; + } + list_remove(itr2); + _destroy_exp(exp_rec2); + } + list_iterator_destroy(itr2); + if (fputs(exp_rec->line, expired_logfile)<0) { + perror("writing expired_logfile"); + list_iterator_destroy(itr); + fclose(expired_logfile); + goto finished2; + } + } + list_iterator_destroy(itr); + fclose(expired_logfile); + + /* write the new log */ + itr = list_iterator_create(keep_list); + while((exp_rec = list_next(itr))) { + itr2 = list_iterator_create(other_list); + while((exp_rec2 = list_next(itr2))) { + if(exp_rec2->job != exp_rec->job) + continue; + if (fputs(exp_rec2->line, new_logfile)<0) { + perror("writing keep_logfile"); + list_iterator_destroy(itr2); + list_iterator_destroy(itr); + goto finished2; + } + list_remove(itr2); + _destroy_exp(exp_rec2); + } + list_iterator_destroy(itr2); + if (fputs(exp_rec->line, new_logfile)<0) { + perror("writing keep_logfile"); + list_iterator_destroy(itr); + goto finished2; + } + } + list_iterator_destroy(itr); + + if (rename(params->opt_filein, old_logfile_name)) { + perror("renaming logfile to .old."); + goto finished2; + } + if (rename(logfile_name, params->opt_filein)) { + perror("renaming new logfile"); + /* undo it? */ + if (!rename(old_logfile_name, params->opt_filein)) + fprintf(stderr, "Please correct the problem " + "and try again"); + else + fprintf(stderr, "SEVERE ERROR: Current accounting " + "log may have been renamed %s;\n" + "please rename it to \"%s\" if necessary, " + "and try again\n", + old_logfile_name, params->opt_filein); + goto finished2; + } + fflush(new_logfile); /* Flush the buffers before forking */ + fflush(fd); + + file_err = slurm_reconfigure(); + if (file_err) { + file_err = 1; + fprintf(stderr, "Error: Attempt to reconfigure " + "SLURM failed.\n"); + if (rename(old_logfile_name, params->opt_filein)) { + perror("renaming logfile from .old."); + goto finished2; + } + + } + if (fseek(fd, 0, SEEK_CUR)) { /* clear EOF */ + perror("looking for late-arriving records"); + goto finished2; + } + while (fgets(line, BUFFER_SIZE, fd)) { + if (fputs(line, new_logfile)<0) { + perror("writing final records"); + goto finished2; + } + } + + printf("%d jobs expired.\n", list_count(exp_list)); +finished2: + fclose(new_logfile); + if (!file_err) { + if (unlink(old_logfile_name) == -1) + error("Unable to unlink old logfile %s: %m", + old_logfile_name); + } +finished: + if(slurmdbd_conf) { + xfree(params->opt_filein); + } + fclose(fd); + list_destroy(exp_list); + list_destroy(keep_list); + list_destroy(other_list); + xfree(old_logfile_name); + xfree(logfile_name); +} diff --git a/src/plugins/accounting_storage/filetxt/filetxt_jobacct_process.h b/src/plugins/accounting_storage/filetxt/filetxt_jobacct_process.h new file mode 100644 index 000000000..a5ba22def --- /dev/null +++ b/src/plugins/accounting_storage/filetxt/filetxt_jobacct_process.h @@ -0,0 +1,55 @@ +/*****************************************************************************\ + * filetxt_jobacct_process.h - functions the processing of + * information from the filetxt jobacct + * storage. + ***************************************************************************** + * + * Copyright (C) 2004-2007 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * This file is patterned after jobcomp_linux.c, written by Morris Jette and + * Copyright (C) 2002 The Regents of the University of California. +\*****************************************************************************/ + +#ifndef _HAVE_FILETXT_JOBACCT_PROCESS_H +#define _HAVE_FILETXT_JOBACCT_PROCESS_H + +#include "src/common/jobacct_common.h" +#include "src/slurmdbd/read_config.h" + +extern List filetxt_jobacct_process_get_jobs(List selected_steps, + List selected_parts, + sacct_parameters_t *params); +extern void filetxt_jobacct_process_archive(List selected_parts, + sacct_parameters_t *params); + +#endif diff --git a/src/plugins/accounting_storage/gold/Makefile.am b/src/plugins/accounting_storage/gold/Makefile.am new file mode 100644 index 000000000..cd5495140 --- /dev/null +++ b/src/plugins/accounting_storage/gold/Makefile.am @@ -0,0 +1,31 @@ +# Makefile for accounting_storage/gold plugin + +AUTOMAKE_OPTIONS = foreign + +if HAVE_OPENSSL +gold_lib = accounting_storage_gold.la +else +gold_lib = +endif + +PLUGIN_FLAGS = -module -avoid-version --export-dynamic + +INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common + +pkglib_LTLIBRARIES = $(gold_lib) +if HAVE_OPENSSL +accounting_storage_gold_la_SOURCES = accounting_storage_gold.c +accounting_storage_gold_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) + +# Add libcommon to provide some symbols that are not +# available in slurmctld (create_jobacct_job_rec) + +accounting_storage_gold_la_LIBADD = \ + $(top_builddir)/src/database/libslurm_gold.la +accounting_storage_gold_la_DEPENDENCIES = \ + $(top_builddir)/src/database/libslurm_gold.la + +else +EXTRA_accounting_storage_gold_la_SOURCES = accounting_storage_gold.c +endif + diff --git a/src/plugins/accounting_storage/gold/Makefile.in b/src/plugins/accounting_storage/gold/Makefile.in new file mode 100644 index 000000000..c70269ee4 --- /dev/null +++ b/src/plugins/accounting_storage/gold/Makefile.in @@ -0,0 +1,575 @@ +# Makefile.in generated by automake 1.10.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# Makefile for accounting_storage/gold plugin + +VPATH = @srcdir@ +pkgdatadir = $(datadir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +target_triplet = @target@ +subdir = src/plugins/accounting_storage/gold +DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ + $(top_srcdir)/auxdir/slurm.m4 \ + $(top_srcdir)/auxdir/x_ac__system_configuration.m4 \ + $(top_srcdir)/auxdir/x_ac_affinity.m4 \ + $(top_srcdir)/auxdir/x_ac_aix.m4 \ + $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ + $(top_srcdir)/auxdir/x_ac_debug.m4 \ + $(top_srcdir)/auxdir/x_ac_elan.m4 \ + $(top_srcdir)/auxdir/x_ac_federation.m4 \ + $(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \ + $(top_srcdir)/auxdir/x_ac_gtk.m4 \ + $(top_srcdir)/auxdir/x_ac_munge.m4 \ + $(top_srcdir)/auxdir/x_ac_ncurses.m4 \ + $(top_srcdir)/auxdir/x_ac_pam.m4 \ + $(top_srcdir)/auxdir/x_ac_ptrace.m4 \ + $(top_srcdir)/auxdir/x_ac_readline.m4 \ + $(top_srcdir)/auxdir/x_ac_setpgrp.m4 \ + $(top_srcdir)/auxdir/x_ac_setproctitle.m4 \ + $(top_srcdir)/auxdir/x_ac_sgi_job.m4 \ + $(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \ + $(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h +CONFIG_CLEAN_FILES = +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; +am__installdirs = "$(DESTDIR)$(pkglibdir)" +pkglibLTLIBRARIES_INSTALL = $(INSTALL) +LTLIBRARIES = $(pkglib_LTLIBRARIES) +am__accounting_storage_gold_la_SOURCES_DIST = \ + accounting_storage_gold.c +@HAVE_OPENSSL_TRUE@am_accounting_storage_gold_la_OBJECTS = \ +@HAVE_OPENSSL_TRUE@ accounting_storage_gold.lo +am__EXTRA_accounting_storage_gold_la_SOURCES_DIST = \ + accounting_storage_gold.c +accounting_storage_gold_la_OBJECTS = \ + $(am_accounting_storage_gold_la_OBJECTS) +accounting_storage_gold_la_LINK = $(LIBTOOL) --tag=CC \ + $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CCLD) \ + $(AM_CFLAGS) $(CFLAGS) $(accounting_storage_gold_la_LDFLAGS) \ + $(LDFLAGS) -o $@ +@HAVE_OPENSSL_TRUE@am_accounting_storage_gold_la_rpath = -rpath \ +@HAVE_OPENSSL_TRUE@ $(pkglibdir) +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm +depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp +am__depfiles_maybe = depfiles +COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ + $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ + $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +CCLD = $(CC) +LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ + $(LDFLAGS) -o $@ +SOURCES = $(accounting_storage_gold_la_SOURCES) \ + $(EXTRA_accounting_storage_gold_la_SOURCES) +DIST_SOURCES = $(am__accounting_storage_gold_la_SOURCES_DIST) \ + $(am__EXTRA_accounting_storage_gold_la_SOURCES_DIST) +ETAGS = etags +CTAGS = ctags +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AR = @AR@ +AUTHD_CFLAGS = @AUTHD_CFLAGS@ +AUTHD_LIBS = @AUTHD_LIBS@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +BG_INCLUDES = @BG_INCLUDES@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CMD_LDFLAGS = @CMD_LDFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ +ECHO = @ECHO@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +ELAN_LIBS = @ELAN_LIBS@ +EXEEXT = @EXEEXT@ +F77 = @F77@ +FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@ +FFLAGS = @FFLAGS@ +GREP = @GREP@ +GTK2_CFLAGS = @GTK2_CFLAGS@ +GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ +HAVEPKGCONFIG = @HAVEPKGCONFIG@ +HAVE_AIX = @HAVE_AIX@ +HAVE_ELAN = @HAVE_ELAN@ +HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ +HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIB_LDFLAGS = @LIB_LDFLAGS@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MKDIR_P = @MKDIR_P@ +MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ +MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ +MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ +NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ +NUMA_LIBS = @NUMA_LIBS@ +OBJEXT = @OBJEXT@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PAM_LIBS = @PAM_LIBS@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ +PLPA_LIBS = @PLPA_LIBS@ +PROCTRACKDIR = @PROCTRACKDIR@ +PROJECT = @PROJECT@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +RANLIB = @RANLIB@ +READLINE_LIBS = @READLINE_LIBS@ +RELEASE = @RELEASE@ +SED = @SED@ +SEMAPHORE_LIBS = @SEMAPHORE_LIBS@ +SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ +SLURMD_PORT = @SLURMD_PORT@ +SLURM_API_AGE = @SLURM_API_AGE@ +SLURM_API_CURRENT = @SLURM_API_CURRENT@ +SLURM_API_MAJOR = @SLURM_API_MAJOR@ +SLURM_API_REVISION = @SLURM_API_REVISION@ +SLURM_API_VERSION = @SLURM_API_VERSION@ +SLURM_MAJOR = @SLURM_MAJOR@ +SLURM_MICRO = @SLURM_MICRO@ +SLURM_MINOR = @SLURM_MINOR@ +SLURM_VERSION = @SLURM_VERSION@ +SO_LDFLAGS = @SO_LDFLAGS@ +SSL_CPPFLAGS = @SSL_CPPFLAGS@ +SSL_LDFLAGS = @SSL_LDFLAGS@ +SSL_LIBS = @SSL_LIBS@ +STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ +VERSION = @VERSION@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_F77 = @ac_ct_F77@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target = @target@ +target_alias = @target_alias@ +target_cpu = @target_cpu@ +target_os = @target_os@ +target_vendor = @target_vendor@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +AUTOMAKE_OPTIONS = foreign +@HAVE_OPENSSL_FALSE@gold_lib = +@HAVE_OPENSSL_TRUE@gold_lib = accounting_storage_gold.la +PLUGIN_FLAGS = -module -avoid-version --export-dynamic +INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common +pkglib_LTLIBRARIES = $(gold_lib) +@HAVE_OPENSSL_TRUE@accounting_storage_gold_la_SOURCES = accounting_storage_gold.c +@HAVE_OPENSSL_TRUE@accounting_storage_gold_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) + +# Add libcommon to provide some symbols that are not +# available in slurmctld (create_jobacct_job_rec) +@HAVE_OPENSSL_TRUE@accounting_storage_gold_la_LIBADD = \ +@HAVE_OPENSSL_TRUE@ $(top_builddir)/src/database/libslurm_gold.la + +@HAVE_OPENSSL_TRUE@accounting_storage_gold_la_DEPENDENCIES = \ +@HAVE_OPENSSL_TRUE@ $(top_builddir)/src/database/libslurm_gold.la + +@HAVE_OPENSSL_FALSE@EXTRA_accounting_storage_gold_la_SOURCES = accounting_storage_gold.c +all: all-am + +.SUFFIXES: +.SUFFIXES: .c .lo .o .obj +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ + && exit 0; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/accounting_storage/gold/Makefile'; \ + cd $(top_srcdir) && \ + $(AUTOMAKE) --foreign src/plugins/accounting_storage/gold/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) + @$(NORMAL_INSTALL) + test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" + @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ + if test -f $$p; then \ + f=$(am__strip_dir) \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + else :; fi; \ + done + +uninstall-pkglibLTLIBRARIES: + @$(NORMAL_UNINSTALL) + @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ + p=$(am__strip_dir) \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + done + +clean-pkglibLTLIBRARIES: + -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) + @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ + dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ + test "$$dir" != "$$p" || dir=.; \ + echo "rm -f \"$${dir}/so_locations\""; \ + rm -f "$${dir}/so_locations"; \ + done +accounting_storage_gold.la: $(accounting_storage_gold_la_OBJECTS) $(accounting_storage_gold_la_DEPENDENCIES) + $(accounting_storage_gold_la_LINK) $(am_accounting_storage_gold_la_rpath) $(accounting_storage_gold_la_OBJECTS) $(accounting_storage_gold_la_LIBADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + +distclean-compile: + -rm -f *.tab.c + +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/accounting_storage_gold.Plo@am__quote@ + +.c.o: +@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c $< + +.c.obj: +@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'` + +.c.lo: +@am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $< + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + mkid -fID $$unique +tags: TAGS + +TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + here=`pwd`; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$tags $$unique; \ + fi +ctags: CTAGS +CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + test -z "$(CTAGS_ARGS)$$tags$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$tags $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && cd $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) $$here + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ + fi; \ + cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ + else \ + test -f $(distdir)/$$file \ + || cp -p $$d/$$file $(distdir)/$$file \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(LTLIBRARIES) +installdirs: + for dir in "$(DESTDIR)$(pkglibdir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + `test -z '$(STRIP)' || \ + echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ + mostlyclean-am + +distclean: distclean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-exec-am: install-pkglibLTLIBRARIES + +install-html: install-html-am + +install-info: install-info-am + +install-man: + +install-pdf: install-pdf-am + +install-ps: install-ps-am + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-pkglibLTLIBRARIES + +.MAKE: install-am install-strip + +.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ + clean-libtool clean-pkglibLTLIBRARIES ctags distclean \ + distclean-compile distclean-generic distclean-libtool \ + distclean-tags distdir dvi dvi-am html html-am info info-am \ + install install-am install-data install-data-am install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-pdf install-pdf-am install-pkglibLTLIBRARIES \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-compile \ + mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ + tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/src/plugins/accounting_storage/gold/accounting_storage_gold.c b/src/plugins/accounting_storage/gold/accounting_storage_gold.c new file mode 100644 index 000000000..1697520cd --- /dev/null +++ b/src/plugins/accounting_storage/gold/accounting_storage_gold.c @@ -0,0 +1,3259 @@ +/*****************************************************************************\ + * accounting_storage_gold.c - accounting interface to gold. + * + * $Id: accounting_gold.c 13061 2008-01-22 21:23:56Z da $ + ***************************************************************************** + * Copyright (C) 2004-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#include <stdlib.h> +#include <ctype.h> +#include <sys/stat.h> +#include <pwd.h> + + +#include "src/common/xmalloc.h" +#include "src/common/list.h" +#include "src/common/xstring.h" +#include "src/common/uid.h" +#include <src/common/parse_time.h> + +#include "src/slurmctld/slurmctld.h" +#include "src/slurmd/slurmd/slurmd.h" +#include "src/slurmdbd/read_config.h" +#include "src/common/slurm_protocol_api.h" +#include "src/common/slurm_accounting_storage.h" +#include "src/common/jobacct_common.h" + +#include "src/database/gold_interface.h" + +/* + * These variables are required by the generic plugin interface. If they + * are not found in the plugin, the plugin loader will ignore it. + * + * plugin_name - a string giving a human-readable description of the + * plugin. There is no maximum length, but the symbol must refer to + * a valid string. + * + * plugin_type - a string suggesting the type of the plugin or its + * applicability to a particular form of data or method of data handling. + * If the low-level plugin API is used, the contents of this string are + * unimportant and may be anything. SLURM uses the higher-level plugin + * interface which requires this string to be of the form + * + * <application>/<method> + * + * where <application> is a description of the intended application of + * the plugin (e.g., "jobacct" for SLURM job completion logging) and <method> + * is a description of how this plugin satisfies that application. SLURM will + * only load job completion logging plugins if the plugin_type string has a + * prefix of "jobacct/". + * + * plugin_version - an unsigned 32-bit integer giving the version number + * of the plugin. If major and minor revisions are desired, the major + * version number may be multiplied by a suitable magnitude constant such + * as 100 or 1000. Various SLURM versions will likely require a certain + * minimum versions for their plugins as the job accounting API + * matures. + */ +const char plugin_name[] = "Accounting storage GOLD plugin"; +const char plugin_type[] = "accounting_storage/gold"; +const uint32_t plugin_version = 100; + +static List local_association_list = NULL; + +static int _add_edit_job(struct job_record *job_ptr, gold_object_t action); +static int _check_for_job(uint32_t jobid, time_t submit); +static List _get_association_list_from_response(gold_response_t *gold_response); +/* static int _get_cluster_accounting_list_from_response( */ +/* gold_response_t *gold_response, */ +/* acct_cluster_rec_t *cluster_rec); */ +/* static int _get_acct_accounting_list_from_response( */ +/* gold_response_t *gold_response, */ +/* acct_association_rec_t *account_rec); */ +static List _get_user_list_from_response(gold_response_t *gold_response); +static List _get_acct_list_from_response(gold_response_t *gold_response); +static List _get_cluster_list_from_response(gold_response_t *gold_response); +static int _remove_association_accounting(List id_list); + + +static int _add_edit_job(struct job_record *job_ptr, gold_object_t action) +{ + gold_request_t *gold_request = create_gold_request(GOLD_OBJECT_JOB, + action); + gold_response_t *gold_response = NULL; + char tmp_buff[50]; + int rc = SLURM_ERROR; + char *jname = NULL; + char *nodes = "(null)"; + + if(!gold_request) + return rc; + + if (job_ptr->nodes && job_ptr->nodes[0]) + nodes = job_ptr->nodes; + + +//info("total procs is %d", job_ptr->total_procs); + if(action == GOLD_ACTION_CREATE) { + snprintf(tmp_buff, sizeof(tmp_buff), "%u", job_ptr->job_id); + gold_request_add_assignment(gold_request, "JobId", tmp_buff); + + snprintf(tmp_buff, sizeof(tmp_buff), "%u", + (int)job_ptr->details->submit_time); + gold_request_add_assignment(gold_request, "SubmitTime", + tmp_buff); + } else if (action == GOLD_ACTION_MODIFY) { + snprintf(tmp_buff, sizeof(tmp_buff), "%u", job_ptr->job_id); + gold_request_add_condition(gold_request, "JobId", tmp_buff, + GOLD_OPERATOR_NONE, 0); + + snprintf(tmp_buff, sizeof(tmp_buff), "%u", + (int)job_ptr->details->submit_time); + gold_request_add_condition(gold_request, "SubmitTime", + tmp_buff, + GOLD_OPERATOR_NONE, 0); + } else { + destroy_gold_request(gold_request); + error("_add_edit_job: bad action given %d", action); + return rc; + } + + if (job_ptr->name && job_ptr->name[0]) { + int i; + jname = xmalloc(strlen(job_ptr->name) + 1); + for (i=0; job_ptr->name[i]; i++) { + if (isalnum(job_ptr->name[i])) + jname[i] = job_ptr->name[i]; + else + jname[i] = '_'; + } + } else + jname = xstrdup("allocation"); + + gold_request_add_assignment(gold_request, "JobName", jname); + xfree(jname); + + gold_request_add_assignment(gold_request, "Partition", + job_ptr->partition); + + snprintf(tmp_buff, sizeof(tmp_buff), "%u", + job_ptr->total_procs); + gold_request_add_assignment(gold_request, "RequestedCPUCount", + tmp_buff); + snprintf(tmp_buff, sizeof(tmp_buff), "%u", + job_ptr->total_procs); + gold_request_add_assignment(gold_request, "AllocatedCPUCount", + tmp_buff); + + snprintf(tmp_buff, sizeof(tmp_buff), "%u", + (int)job_ptr->details->begin_time); + gold_request_add_assignment(gold_request, "EligibleTime", + tmp_buff); + + snprintf(tmp_buff, sizeof(tmp_buff), "%u", job_ptr->assoc_id); + gold_request_add_assignment(gold_request, "GoldAccountId", tmp_buff); + + gold_request_add_assignment(gold_request, "NodeList", nodes); + + if(job_ptr->job_state >= JOB_COMPLETE) { + snprintf(tmp_buff, sizeof(tmp_buff), "%u", + (int)job_ptr->end_time); + gold_request_add_assignment(gold_request, "EndTime", + tmp_buff); + + snprintf(tmp_buff, sizeof(tmp_buff), "%u", + (int)job_ptr->exit_code); + gold_request_add_assignment(gold_request, "ExitCode", + tmp_buff); + } + + snprintf(tmp_buff, sizeof(tmp_buff), "%u", + (int)job_ptr->start_time); + gold_request_add_assignment(gold_request, "StartTime", tmp_buff); + + snprintf(tmp_buff, sizeof(tmp_buff), "%u", + job_ptr->job_state & (~JOB_COMPLETING)); + gold_request_add_assignment(gold_request, "State", tmp_buff); + + gold_response = get_gold_response(gold_request); + destroy_gold_request(gold_request); + + if(!gold_response) { + error("_add_edit_job: no response received"); + return rc; + } + + if(!gold_response->rc) + rc = SLURM_SUCCESS; + else { + if(gold_response->rc == 720) + error("gold_response has non-zero rc(%d): " + "NOT PRINTING MESSAGE: this was a parser error", + gold_response->rc); + else + error("gold_response has non-zero rc(%d): %s", + gold_response->rc, + gold_response->message); + errno = gold_response->rc; + } + destroy_gold_response(gold_response); + + return rc; +} + +static int _check_for_job(uint32_t jobid, time_t submit) +{ + gold_request_t *gold_request = create_gold_request(GOLD_OBJECT_JOB, + GOLD_ACTION_QUERY); + gold_response_t *gold_response = NULL; + char tmp_buff[50]; + int rc = 0; + + if(!gold_request) + return rc; + + gold_request_add_selection(gold_request, "JobId"); + + snprintf(tmp_buff, sizeof(tmp_buff), "%u", jobid); + gold_request_add_condition(gold_request, "JobId", tmp_buff, + GOLD_OPERATOR_NONE, 0); + + snprintf(tmp_buff, sizeof(tmp_buff), "%u", (int)submit); + gold_request_add_condition(gold_request, "SubmitTime", tmp_buff, + GOLD_OPERATOR_NONE, 0); + + gold_response = get_gold_response(gold_request); + destroy_gold_request(gold_request); + + if(!gold_response) { + error("_check_for_job: no response received"); + return 0; + } + + if(gold_response->entry_cnt > 0) + rc = 1; + destroy_gold_response(gold_response); + + return rc; +} + +static List _get_association_list_from_response(gold_response_t *gold_response) +{ + ListIterator itr = NULL; + ListIterator itr2 = NULL; + List association_list = NULL; + acct_association_rec_t *acct_rec = NULL; + gold_response_entry_t *resp_entry = NULL; + gold_name_value_t *name_val = NULL; + + association_list = list_create(destroy_acct_association_rec); + + itr = list_iterator_create(gold_response->entries); + while((resp_entry = list_next(itr))) { + acct_rec = xmalloc(sizeof(acct_association_rec_t)); + + itr2 = list_iterator_create(resp_entry->name_val); + while((name_val = list_next(itr2))) { + if(!strcmp(name_val->name, "Id")) { + acct_rec->id = + atoi(name_val->value); + } else if(!strcmp(name_val->name, + "FairShare")) { + acct_rec->fairshare = + atoi(name_val->value); + } else if(!strcmp(name_val->name, + "MaxJobs")) { + acct_rec->max_jobs = + atoi(name_val->value); + } else if(!strcmp(name_val->name, + "MaxNodesPerJob")) { + acct_rec->max_nodes_per_job = + atoi(name_val->value); + } else if(!strcmp(name_val->name, + "MaxWallDurationPerJob")) { + acct_rec->max_wall_duration_per_job = + atoi(name_val->value); + } else if(!strcmp(name_val->name, + "MaxProcSecondsPerJob")) { + acct_rec->max_cpu_secs_per_job = + atoi(name_val->value); + } else if(!strcmp(name_val->name, + "User")) { + if(strcmp(name_val->name, "NONE")) + acct_rec->user = + xstrdup(name_val->value); + } else if(!strcmp(name_val->name, + "Project")) { + acct_rec->acct = + xstrdup(name_val->value); + } else if(!strcmp(name_val->name, + "Machine")) { + acct_rec->cluster = + xstrdup(name_val->value); + } else { + error("Unknown name val of '%s' = '%s'", + name_val->name, name_val->value); + } + } + list_iterator_destroy(itr2); + list_append(association_list, acct_rec); + } + list_iterator_destroy(itr); + + return association_list; +} + +/* static int _get_cluster_accounting_list_from_response( */ +/* gold_response_t *gold_response, */ +/* acct_cluster_rec_t *cluster_rec) */ +/* { */ +/* ListIterator itr = NULL; */ +/* ListIterator itr2 = NULL; */ +/* cluster_accounting_rec_t *clusteracct_rec = NULL; */ +/* gold_response_entry_t *resp_entry = NULL; */ +/* gold_name_value_t *name_val = NULL; */ + +/* if(gold_response->entry_cnt <= 0) { */ +/* debug2("_get_list_from_response: No entries given"); */ +/* return SLURM_ERROR; */ +/* } */ +/* if(!cluster_rec->accounting_list) */ +/* cluster_rec->accounting_list = */ +/* list_create(destroy_cluster_accounting_rec); */ + +/* itr = list_iterator_create(gold_response->entries); */ +/* while((resp_entry = list_next(itr))) { */ +/* clusteracct_rec = xmalloc(sizeof(cluster_accounting_rec_t)); */ +/* itr2 = list_iterator_create(resp_entry->name_val); */ +/* while((name_val = list_next(itr2))) { */ +/* if(!strcmp(name_val->name, "CPUCount")) { */ +/* clusteracct_rec->cpu_count = */ +/* atoi(name_val->value); */ +/* } else if(!strcmp(name_val->name, */ +/* "PeriodStart")) { */ +/* clusteracct_rec->period_start = */ +/* atoi(name_val->value); */ +/* } else if(!strcmp(name_val->name, */ +/* "IdleCPUSeconds")) { */ +/* clusteracct_rec->idle_secs = */ +/* atoi(name_val->value); */ +/* } else if(!strcmp(name_val->name, */ +/* "DownCPUSeconds")) { */ +/* clusteracct_rec->down_secs = */ +/* atoi(name_val->value); */ +/* } else if(!strcmp(name_val->name, */ +/* "AllocatedCPUSeconds")) { */ +/* clusteracct_rec->alloc_secs = */ +/* atoi(name_val->value); */ +/* } else if(!strcmp(name_val->name, */ +/* "ReservedCPUSeconds")) { */ +/* clusteracct_rec->resv_secs = */ +/* atoi(name_val->value); */ +/* } else { */ +/* error("Unknown name val of '%s' = '%s'", */ +/* name_val->name, name_val->value); */ +/* } */ +/* } */ +/* list_iterator_destroy(itr2); */ +/* list_append(cluster_rec->accounting_list, clusteracct_rec); */ +/* } */ +/* list_iterator_destroy(itr); */ + +/* return SLURM_SUCCESS; */ +/* } */ + +/* static int _get_acct_accounting_list_from_response( */ +/* gold_response_t *gold_response, */ +/* acct_association_rec_t *acct_rec) */ +/* { */ +/* ListIterator itr = NULL; */ +/* ListIterator itr2 = NULL; */ +/* acct_accounting_rec_t *accounting_rec = NULL; */ +/* gold_response_entry_t *resp_entry = NULL; */ +/* gold_name_value_t *name_val = NULL; */ + +/* if(!acct_rec->accounting_list) */ +/* acct_rec->accounting_list = */ +/* list_create(destroy_acct_accounting_rec); */ + +/* itr = list_iterator_create(gold_response->entries); */ +/* while((resp_entry = list_next(itr))) { */ +/* accounting_rec = xmalloc(sizeof(acct_accounting_rec_t)); */ + +/* itr2 = list_iterator_create(resp_entry->name_val); */ +/* while((name_val = list_next(itr2))) { */ +/* if(!strcmp(name_val->name, "PeriodStart")) { */ +/* accounting_rec->period_start = */ +/* atoi(name_val->value); */ +/* } else if(!strcmp(name_val->name, */ +/* "AllocatedCPUSeconds")) { */ +/* accounting_rec->alloc_secs = */ +/* atoi(name_val->value); */ +/* } else { */ +/* error("Unknown name val of '%s' = '%s'", */ +/* name_val->name, name_val->value); */ +/* } */ +/* } */ +/* list_iterator_destroy(itr2); */ +/* list_append(acct_rec->accounting_list, accounting_rec); */ +/* } */ +/* list_iterator_destroy(itr); */ + +/* return SLURM_SUCCESS; */ + +/* } */ + +static List _get_user_list_from_response(gold_response_t *gold_response) +{ + ListIterator itr = NULL; + ListIterator itr2 = NULL; + List user_list = NULL; + acct_user_rec_t *user_rec = NULL; + gold_response_entry_t *resp_entry = NULL; + gold_name_value_t *name_val = NULL; + + user_list = list_create(destroy_acct_user_rec); + + itr = list_iterator_create(gold_response->entries); + while((resp_entry = list_next(itr))) { + user_rec = xmalloc(sizeof(acct_user_rec_t)); + + itr2 = list_iterator_create(resp_entry->name_val); + while((name_val = list_next(itr2))) { + if(!strcmp(name_val->name, "Name")) { + user_rec->name = + xstrdup(name_val->value); + } else if(!strcmp(name_val->name, "Expedite")) { + user_rec->qos = + atoi(name_val->value)+1; + } else if(!strcmp(name_val->name, "DefaultProject")) { + user_rec->default_acct = + xstrdup(name_val->value); + } else { + error("Unknown name val of '%s' = '%s'", + name_val->name, name_val->value); + } + } + list_iterator_destroy(itr2); + list_append(user_list, user_rec); + } + list_iterator_destroy(itr); + + return user_list; +} + +static List _get_acct_list_from_response(gold_response_t *gold_response) +{ + ListIterator itr = NULL; + ListIterator itr2 = NULL; + List acct_list = NULL; + acct_account_rec_t *acct_rec = NULL; + gold_response_entry_t *resp_entry = NULL; + gold_name_value_t *name_val = NULL; + + acct_list = list_create(destroy_acct_account_rec); + + itr = list_iterator_create(gold_response->entries); + while((resp_entry = list_next(itr))) { + acct_rec = xmalloc(sizeof(acct_account_rec_t)); + + itr2 = list_iterator_create(resp_entry->name_val); + while((name_val = list_next(itr2))) { + if(!strcmp(name_val->name, "Expedite")) { + acct_rec->qos = + atoi(name_val->value)+1; + } else if(!strcmp(name_val->name, + "Name")) { + acct_rec->name = + xstrdup(name_val->value); + } else if(!strcmp(name_val->name, + "Organization")) { + acct_rec->organization = + xstrdup(name_val->value); + } else if(!strcmp(name_val->name, + "Description")) { + acct_rec->description = + xstrdup(name_val->value); + } else { + error("Unknown name val of '%s' = '%s'", + name_val->name, name_val->value); + } + } + list_iterator_destroy(itr2); + list_append(acct_list, acct_rec); + } + list_iterator_destroy(itr); + + return acct_list; +} + +static List _get_cluster_list_from_response(gold_response_t *gold_response) +{ + ListIterator itr = NULL; + ListIterator itr2 = NULL; + List cluster_list = NULL; + acct_cluster_rec_t *cluster_rec = NULL; + gold_response_entry_t *resp_entry = NULL; + gold_name_value_t *name_val = NULL; + + cluster_list = list_create(destroy_acct_cluster_rec); + + itr = list_iterator_create(gold_response->entries); + while((resp_entry = list_next(itr))) { + cluster_rec = xmalloc(sizeof(acct_cluster_rec_t)); + + itr2 = list_iterator_create(resp_entry->name_val); + while((name_val = list_next(itr2))) { + if(!strcmp(name_val->name, + "Name")) { + cluster_rec->name = + xstrdup(name_val->value); + } else { + error("Unknown name val of '%s' = '%s'", + name_val->name, name_val->value); + } + } + list_iterator_destroy(itr2); + list_append(cluster_list, cluster_rec); + } + list_iterator_destroy(itr); + + return cluster_list; +} + +static int _remove_association_accounting(List id_list) +{ + gold_request_t *gold_request = NULL; + gold_response_t *gold_response = NULL; + int rc = SLURM_SUCCESS; + char *object = NULL; + int set = 0; + ListIterator itr = NULL; + + gold_request = create_gold_request(GOLD_OBJECT_ACCT_HOUR_USAGE, + GOLD_ACTION_DELETE); + if(!gold_request) { + error("couldn't create gold_request"); + rc = SLURM_ERROR; + return rc; + } + + if(id_list && list_count(id_list)) { + itr = list_iterator_create(id_list); + if(list_count(id_list) > 1) + set = 2; + else + set = 0; + + while((object = list_next(itr))) { + gold_request_add_condition(gold_request, "Acct", + object, + GOLD_OPERATOR_NONE, set); + set = 1; + } + list_iterator_destroy(itr); + } + + gold_response = get_gold_response(gold_request); + + if(!gold_response) { + error("acct_storage_p_modify_associations: " + "no response received"); + destroy_gold_request(gold_request); + rc = SLURM_ERROR; + return rc; + } + + if(gold_response->rc) { + error("gold_response has non-zero rc(%d): %s", + gold_response->rc, + gold_response->message); + errno = gold_response->rc; + destroy_gold_request(gold_request); + destroy_gold_response(gold_response); + rc = SLURM_ERROR; + return rc; + } + + destroy_gold_response(gold_response); + + gold_request->object = GOLD_OBJECT_ACCT_DAY_USAGE; + gold_response = get_gold_response(gold_request); + + if(!gold_response) { + error("acct_storage_p_modify_associations: " + "no response received"); + destroy_gold_request(gold_request); + rc = SLURM_ERROR; + return rc; + } + + if(gold_response->rc) { + error("gold_response has non-zero rc(%d): %s", + gold_response->rc, + gold_response->message); + errno = gold_response->rc; + destroy_gold_request(gold_request); + destroy_gold_response(gold_response); + rc = SLURM_ERROR; + return rc; + } + destroy_gold_response(gold_response); + + gold_request->object = GOLD_OBJECT_ACCT_MONTH_USAGE; + gold_response = get_gold_response(gold_request); + destroy_gold_request(gold_request); + + if(!gold_response) { + error("acct_storage_p_modify_associations: " + "no response received"); + destroy_gold_request(gold_request); + rc = SLURM_ERROR; + return rc; + } + + if(gold_response->rc) { + error("gold_response has non-zero rc(%d): %s", + gold_response->rc, + gold_response->message); + errno = gold_response->rc; + rc = SLURM_ERROR; + } + + destroy_gold_response(gold_response); + + + return rc; +} + +/* + * init() is called when the plugin is loaded, before any other functions + * are called. Put global initialization here. + */ +extern int init ( void ) +{ + char *keyfile = NULL; + char *host = NULL; + uint32_t port = 0; + struct stat statbuf; + + if(!(keyfile = slurm_get_accounting_storage_pass()) + || strlen(keyfile) < 1) { + keyfile = xstrdup("/etc/gold/auth_key"); + debug2("No keyfile specified with AcctStoragePass, " + "gold using default %s", keyfile); + } + + + if(stat(keyfile, &statbuf)) { + fatal("Can't stat key file %s. " + "To run acct_storage/gold you have to set " + "your gold keyfile as " + "AcctStoragePass in your slurm.conf", keyfile); + } + + + if(!(host = slurm_get_accounting_storage_host())) { + host = xstrdup("localhost"); + debug2("No host specified with AcctStorageHost, " + "gold using default %s", host); + } + + if(!(port = slurm_get_accounting_storage_port())) { + port = 7112; + debug2("No port specified with AcctStoragePort, " + "gold using default %u", port); + } + + debug2("connecting to gold with keyfile='%s' for %s(%d)", + keyfile, host, port); + + init_gold(keyfile, host, port); + + xfree(keyfile); + xfree(host); + + verbose("%s loaded", plugin_name); + return SLURM_SUCCESS; +} + +extern int fini ( void ) +{ + if(local_association_list) + list_destroy(local_association_list); + fini_gold(); + return SLURM_SUCCESS; +} + +extern void * acct_storage_p_get_connection(bool make_agent, bool rollback) +{ + return NULL; +} + +extern int acct_storage_p_close_connection(void **db_conn) +{ + return SLURM_SUCCESS; +} + +extern int acct_storage_p_commit(void *db_conn, bool commit) +{ + return SLURM_SUCCESS; +} + +extern int acct_storage_p_add_users(void *db_conn, + List user_list) +{ + ListIterator itr = NULL; + int rc = SLURM_SUCCESS; + gold_request_t *gold_request = NULL; + gold_response_t *gold_response = NULL; + acct_user_rec_t *object = NULL; + char tmp_buff[50]; + + itr = list_iterator_create(user_list); + while((object = list_next(itr))) { + if(!object->name || !object->default_acct) { + error("We need a user name and " + "default acct to add."); + rc = SLURM_ERROR; + continue; + } + gold_request = create_gold_request(GOLD_OBJECT_USER, + GOLD_ACTION_CREATE); + if(!gold_request) { + error("couldn't create gold_request"); + rc = SLURM_ERROR; + break; + } + gold_request_add_assignment(gold_request, "Name", + object->name); + gold_request_add_assignment(gold_request, "DefaultProject", + object->default_acct); + + if(object->qos != ACCT_QOS_NOTSET) { + snprintf(tmp_buff, sizeof(tmp_buff), "%u", + object->qos-1); + gold_request_add_assignment(gold_request, "Expedite", + tmp_buff); + } + gold_response = get_gold_response(gold_request); + destroy_gold_request(gold_request); + + if(!gold_response) { + error("acct_storage_p_add_users: " + "no response received"); + rc = SLURM_ERROR; + break; + } + + if(gold_response->rc) { + error("gold_response has non-zero rc(%d): %s", + gold_response->rc, + gold_response->message); + errno = gold_response->rc; + destroy_gold_response(gold_response); + rc = SLURM_ERROR; + break; + } + destroy_gold_response(gold_response); + } + list_iterator_destroy(itr); + + return rc; +} + +extern int acct_storage_p_add_coord(void *db_conn, + char *acct, + acct_user_cond_t *user_q) +{ + return SLURM_SUCCESS; +} + +extern int acct_storage_p_add_accts(void *db_conn, + List acct_list) +{ + ListIterator itr = NULL; + int rc = SLURM_SUCCESS; + gold_request_t *gold_request = NULL; + gold_response_t *gold_response = NULL; + acct_account_rec_t *object = NULL; + char tmp_buff[50]; + + itr = list_iterator_create(acct_list); + while((object = list_next(itr))) { + if(!object->name || !object->description + || !object->organization) { + error("We need a acct name, description, and " + "organization to add one."); + rc = SLURM_ERROR; + continue; + } + gold_request = create_gold_request(GOLD_OBJECT_PROJECT, + GOLD_ACTION_CREATE); + if(!gold_request) { + error("couldn't create gold_request"); + rc = SLURM_ERROR; + break; + } + gold_request_add_assignment(gold_request, "Name", + object->name); + gold_request_add_assignment(gold_request, "Description", + object->description); + gold_request_add_assignment(gold_request, "Organization", + object->organization); + if(object->qos != ACCT_QOS_NOTSET) { + snprintf(tmp_buff, sizeof(tmp_buff), "%u", + object->qos-1); + gold_request_add_assignment(gold_request, "Expedite", + tmp_buff); + } + gold_response = get_gold_response(gold_request); + destroy_gold_request(gold_request); + + if(!gold_response) { + error("acct_storage_p_add_accts: " + "no response received"); + rc = SLURM_ERROR; + break; + } + + if(gold_response->rc) { + error("gold_response has non-zero rc(%d): %s", + gold_response->rc, + gold_response->message); + errno = gold_response->rc; + destroy_gold_response(gold_response); + rc = SLURM_ERROR; + break; + } + destroy_gold_response(gold_response); + } + list_iterator_destroy(itr); + + return rc; +} + +extern int acct_storage_p_add_clusters(void *db_conn, + List cluster_list) +{ + ListIterator itr = NULL; + int rc = SLURM_SUCCESS; + gold_request_t *gold_request = NULL; + gold_response_t *gold_response = NULL; + acct_cluster_rec_t *object = NULL; + + itr = list_iterator_create(cluster_list); + while((object = list_next(itr))) { + if(!object->name) { + error("We need a cluster name to add."); + rc = SLURM_ERROR; + continue; + } + gold_request = create_gold_request(GOLD_OBJECT_MACHINE, + GOLD_ACTION_CREATE); + if(!gold_request) { + error("couldn't create gold_request"); + rc = SLURM_ERROR; + break; + } + gold_request_add_assignment(gold_request, "Name", + object->name); + + gold_response = get_gold_response(gold_request); + destroy_gold_request(gold_request); + + if(!gold_response) { + error("acct_storage_p_add_clusters: " + "no response received"); + rc = SLURM_ERROR; + break; + } + + if(gold_response->rc) { + error("gold_response has non-zero rc(%d): %s", + gold_response->rc, + gold_response->message); + errno = gold_response->rc; + destroy_gold_response(gold_response); + rc = SLURM_ERROR; + break; + } + destroy_gold_response(gold_response); + } + list_iterator_destroy(itr); + + return rc; +} + +extern int acct_storage_p_add_associations(void *db_conn, + List association_list) +{ + ListIterator itr = NULL; + int rc = SLURM_SUCCESS; + gold_request_t *gold_request = NULL; + gold_response_t *gold_response = NULL; + acct_association_rec_t *object = NULL; + char tmp_buff[50]; + + itr = list_iterator_create(association_list); + while((object = list_next(itr))) { + if(!object->cluster || !object->acct) { + error("We need a association cluster and " + "acct to add one."); + rc = SLURM_ERROR; + continue; + } + gold_request = create_gold_request(GOLD_OBJECT_ACCT, + GOLD_ACTION_CREATE); + if(!gold_request) { + error("couldn't create gold_request"); + rc = SLURM_ERROR; + break; + } + if(object->user) { + gold_request_add_assignment(gold_request, "User", + object->user); + snprintf(tmp_buff, sizeof(tmp_buff), + "%s on %s for %s", + object->acct, + object->cluster, + object->user); + } else if(object->parent_acct) + snprintf(tmp_buff, sizeof(tmp_buff), + "%s of %s on %s", + object->acct, + object->parent_acct, + object->cluster); + else + snprintf(tmp_buff, sizeof(tmp_buff), + "%s on %s", + object->acct, + object->cluster); + + gold_request_add_assignment(gold_request, "Name", tmp_buff); + + gold_request_add_assignment(gold_request, "Project", + object->acct); + gold_request_add_assignment(gold_request, "Machine", + object->cluster); + + if(object->fairshare) { + snprintf(tmp_buff, sizeof(tmp_buff), "%u", + object->fairshare); + gold_request_add_assignment(gold_request, "FairShare", + tmp_buff); + } + + if(object->max_jobs) { + snprintf(tmp_buff, sizeof(tmp_buff), "%u", + object->max_jobs); + gold_request_add_assignment(gold_request, "MaxJobs", + tmp_buff); + } + + if(object->max_nodes_per_job) { + snprintf(tmp_buff, sizeof(tmp_buff), "%u", + object->max_nodes_per_job); + gold_request_add_assignment(gold_request, + "MaxNodesPerJob", + tmp_buff); + } + + if(object->max_wall_duration_per_job) { + snprintf(tmp_buff, sizeof(tmp_buff), "%u", + object->max_wall_duration_per_job); + gold_request_add_assignment(gold_request, + "MaxWallDurationPerJob", + tmp_buff); + } + + if(object->max_cpu_secs_per_job) { + snprintf(tmp_buff, sizeof(tmp_buff), "%u", + object->max_cpu_secs_per_job); + gold_request_add_assignment(gold_request, + "MaxProcSecondsPerJob", + tmp_buff); + } + + gold_response = get_gold_response(gold_request); + destroy_gold_request(gold_request); + + if(!gold_response) { + error("acct_storage_p_add_associations: " + "no response received"); + rc = SLURM_ERROR; + break; + } + + if(gold_response->rc) { + error("gold_response has non-zero rc(%d): %s", + gold_response->rc, + gold_response->message); + errno = gold_response->rc; + destroy_gold_response(gold_response); + rc = SLURM_ERROR; + break; + } + destroy_gold_response(gold_response); + } + list_iterator_destroy(itr); + + return rc; +} + +extern int acct_storage_p_get_assoc_id(void *db_conn, + acct_association_rec_t *assoc) +{ + ListIterator itr = NULL; + acct_association_rec_t * found_assoc = NULL; + acct_association_rec_t * ret_assoc = NULL; + + if(!local_association_list) + local_association_list = acct_storage_g_get_associations(NULL, + NULL); + + if((!assoc->cluster && !assoc->acct) && !assoc->id) { + error("acct_storage_p_get_assoc_id: " + "You need to supply a cluster and account name to get " + "an association."); + return SLURM_ERROR; + } + + itr = list_iterator_create(local_association_list); + while((found_assoc = list_next(itr))) { + if(assoc->id) { + if(assoc->id == found_assoc->id) { + ret_assoc = found_assoc; + break; + } + continue; + } else { + if((!found_assoc->acct + || strcasecmp(assoc->acct, + found_assoc->acct)) + || (!assoc->cluster + || strcasecmp(assoc->cluster, + found_assoc->cluster)) + || (assoc->user + && (!found_assoc->user + || strcasecmp(assoc->user, + found_assoc->user))) + || (!assoc->user && found_assoc->user + && strcasecmp("none", + found_assoc->user))) + continue; + if(assoc->partition + && (!assoc->partition + || strcasecmp(assoc->partition, + found_assoc->partition))) { + ret_assoc = found_assoc; + continue; + } + } + ret_assoc = found_assoc; + break; + } + list_iterator_destroy(itr); + + if(!ret_assoc) + return SLURM_ERROR; + + assoc->id = ret_assoc->id; + if(!assoc->user) + assoc->user = ret_assoc->user; + if(!assoc->acct) + assoc->acct = ret_assoc->acct; + if(!assoc->cluster) + assoc->cluster = ret_assoc->cluster; + if(!assoc->partition) + assoc->partition = ret_assoc->partition; + + return SLURM_SUCCESS; +} + +extern int acct_storage_p_validate_assoc_id(void *db_conn, + uint32_t assoc_id) +{ + return SLURM_SUCCESS; +} + +extern List acct_storage_p_modify_users(void *db_conn, + acct_user_cond_t *user_q, + acct_user_rec_t *user) +{ + ListIterator itr = NULL; +// int rc = SLURM_SUCCESS; + gold_request_t *gold_request = NULL; + gold_response_t *gold_response = NULL; + char *object = NULL; + char tmp_buff[50]; + int set = 0; + + if(!user_q) { + error("acct_storage_p_modify_users: " + "we need conditions to modify"); + return NULL; + } + + if(!user) { + error("acct_storage_p_modify_users: " + "we need something to change"); + return NULL; + } + + gold_request = create_gold_request(GOLD_OBJECT_USER, + GOLD_ACTION_MODIFY); + if(!gold_request) { + error("acct_storage_p_modify_users: " + "couldn't create gold_request"); + return NULL; + } + + if(user_q->user_list && list_count(user_q->user_list)) { + itr = list_iterator_create(user_q->user_list); + if(list_count(user_q->user_list) > 1) + set = 2; + else + set = 0; + + while((object = list_next(itr))) { + gold_request_add_condition(gold_request, "Name", + object, + GOLD_OPERATOR_NONE, set); + set = 1; + } + list_iterator_destroy(itr); + } + + if(user_q->def_acct_list && list_count(user_q->def_acct_list)) { + itr = list_iterator_create(user_q->def_acct_list); + if(list_count(user_q->def_acct_list) > 1) + set = 2; + else + set = 0; + + while((object = list_next(itr))) { + gold_request_add_condition(gold_request, + "DefaultProject", + object, + GOLD_OPERATOR_NONE, set); + set = 1; + } + list_iterator_destroy(itr); + } + + if(user->default_acct) + gold_request_add_assignment(gold_request, + "DefaultProject", + user->default_acct); + + if(user->qos != ACCT_QOS_NOTSET) { + snprintf(tmp_buff, sizeof(tmp_buff), "%u", + user->qos-1); + gold_request_add_assignment(gold_request, "Expedite", + tmp_buff); + } + + gold_response = get_gold_response(gold_request); + destroy_gold_request(gold_request); + + if(!gold_response) { + error("acct_storage_p_modify_users: " + "no response received"); + return NULL; + } + + if(gold_response->rc) { + error("gold_response has non-zero rc(%d): %s", + gold_response->rc, + gold_response->message); + errno = gold_response->rc; + } + + destroy_gold_response(gold_response); + + return NULL; +} + +extern List acct_storage_p_modify_user_admin_level(void *db_conn, + acct_user_cond_t *user_q) +{ + ListIterator itr = NULL; +// int rc = SLURM_SUCCESS; + gold_request_t *gold_request = NULL; + gold_response_t *gold_response = NULL; + char *object = NULL; + int set = 0; + + if(!user_q || user_q->admin_level == ACCT_ADMIN_NOTSET) { + error("acct_storage_p_modify_users: " + "we need conditions to modify"); + return NULL; + } + + if(user_q->admin_level == ACCT_ADMIN_NONE) + gold_request = create_gold_request(GOLD_OBJECT_ROLEUSER, + GOLD_ACTION_DELETE); + else + gold_request = create_gold_request(GOLD_OBJECT_ROLEUSER, + GOLD_ACTION_CREATE); + + if(!gold_request) { + error("couldn't create gold_request"); + return NULL; + } + + if(user_q->admin_level == ACCT_ADMIN_NONE) { + gold_request_add_condition(gold_request, + "Role", + "SystemAdmin", + GOLD_OPERATOR_NONE, 2); + + gold_request_add_condition(gold_request, + "Role", + "Operator", + GOLD_OPERATOR_NONE, 1); + } else if(user_q->admin_level == ACCT_ADMIN_SUPER_USER) + gold_request_add_assignment(gold_request, + "Role", + "SystemAdmin"); + else if(user_q->admin_level == ACCT_ADMIN_OPERATOR) + gold_request_add_assignment(gold_request, + "Role", + "Operator"); + else { + error("acct_storage_p_modify_user_admin_level: " + "unknown admin level %d", user_q->admin_level); + return NULL; + } + + if(user_q->user_list && list_count(user_q->user_list)) { + itr = list_iterator_create(user_q->user_list); + if(list_count(user_q->user_list) > 1) + set = 2; + else + set = 0; + + while((object = list_next(itr))) { + gold_request_add_condition(gold_request, "Name", + object, + GOLD_OPERATOR_NONE, set); + set = 1; + } + list_iterator_destroy(itr); + } + + if(user_q->def_acct_list && list_count(user_q->def_acct_list)) { + itr = list_iterator_create(user_q->def_acct_list); + if(list_count(user_q->def_acct_list) > 1) + set = 2; + else + set = 0; + + while((object = list_next(itr))) { + gold_request_add_condition(gold_request, + "DefaultProject", + object, + GOLD_OPERATOR_NONE, set); + set = 1; + } + list_iterator_destroy(itr); + } + + gold_response = get_gold_response(gold_request); + destroy_gold_request(gold_request); + + if(!gold_response) { + error("acct_storage_p_modify_users: " + "no response received"); + return NULL; + } + + if(gold_response->rc) { + error("gold_response has non-zero rc(%d): %s", + gold_response->rc, + gold_response->message); + errno = gold_response->rc; + + } + destroy_gold_response(gold_response); + + return NULL; +} + +extern List acct_storage_p_modify_accts(void *db_conn, + acct_account_cond_t *acct_q, + acct_account_rec_t *acct) +{ + ListIterator itr = NULL; +// int rc = SLURM_SUCCESS; + gold_request_t *gold_request = NULL; + gold_response_t *gold_response = NULL; + char tmp_buff[50]; + int set = 0; + char *object = NULL; + + if(!acct_q) { + error("acct_storage_p_modify_accts: " + "we need conditions to modify"); + return NULL; + } + + if(!acct) { + error("acct_storage_p_modify_accts: " + "we need something to change"); + return NULL; + } + + gold_request = create_gold_request(GOLD_OBJECT_ACCT, + GOLD_ACTION_MODIFY); + if(!gold_request) { + error("couldn't create gold_request"); + return NULL; + } + + if(acct_q->acct_list && list_count(acct_q->acct_list)) { + itr = list_iterator_create(acct_q->acct_list); + if(list_count(acct_q->acct_list) > 1) + set = 2; + else + set = 0; + + while((object = list_next(itr))) { + gold_request_add_condition(gold_request, "Name", + object, + GOLD_OPERATOR_NONE, set); + set = 1; + } + list_iterator_destroy(itr); + } + + if(acct_q->description_list + && list_count(acct_q->description_list)) { + itr = list_iterator_create(acct_q->description_list); + if(list_count(acct_q->description_list) > 1) + set = 2; + else + set = 0; + + while((object = list_next(itr))) { + gold_request_add_condition(gold_request, "Description", + object, + GOLD_OPERATOR_NONE, set); + set = 1; + } + list_iterator_destroy(itr); + } + + if(acct_q->organization_list + && list_count(acct_q->organization_list)) { + itr = list_iterator_create(acct_q->organization_list); + if(list_count(acct_q->organization_list) > 1) + set = 2; + else + set = 0; + + while((object = list_next(itr))) { + gold_request_add_condition(gold_request, "Organization", + object, + GOLD_OPERATOR_NONE, set); + set = 1; + } + list_iterator_destroy(itr); + } + + if(acct->description) + gold_request_add_assignment(gold_request, + "Description", + acct->description); + if(acct->organization) + gold_request_add_assignment(gold_request, + "Organization", + acct->organization); + + if(acct->qos != ACCT_QOS_NOTSET) { + snprintf(tmp_buff, sizeof(tmp_buff), "%u", + acct->qos-1); + gold_request_add_assignment(gold_request, "Expedite", + tmp_buff); + } + gold_response = get_gold_response(gold_request); + destroy_gold_request(gold_request); + + if(!gold_response) { + error("acct_storage_p_modify_accts: " + "no response received"); + return NULL; + } + + if(gold_response->rc) { + error("gold_response has non-zero rc(%d): %s", + gold_response->rc, + gold_response->message); + errno = gold_response->rc; + + } + + destroy_gold_response(gold_response); + + return NULL; +} + +extern List acct_storage_p_modify_clusters(void *db_conn, + acct_cluster_cond_t *cluster_q, + acct_cluster_rec_t *cluster) +{ + return SLURM_SUCCESS; +} + +extern List acct_storage_p_modify_associations(void *db_conn, + acct_association_cond_t *assoc_q, + acct_association_rec_t *assoc) +{ + ListIterator itr = NULL; +// int rc = SLURM_SUCCESS; + gold_request_t *gold_request = NULL; + gold_response_t *gold_response = NULL; + char tmp_buff[50]; + char *object = NULL; + int set = 0; + + if(!assoc_q) { + error("acct_storage_p_modify_associations: " + "we need conditions to modify"); + return NULL; + } + + if(!assoc) { + error("acct_storage_p_modify_associations: " + "we need something to change"); + return NULL; + } + + gold_request = create_gold_request(GOLD_OBJECT_ACCT, + GOLD_ACTION_MODIFY); + if(!gold_request) { + error("couldn't create gold_request"); + return NULL; + } + + if(assoc_q->id_list && list_count(assoc_q->id_list)) { + itr = list_iterator_create(assoc_q->id_list); + if(list_count(assoc_q->id_list) > 1) + set = 2; + else + set = 0; + + while((object = list_next(itr))) { + gold_request_add_condition(gold_request, "Id", + object, + GOLD_OPERATOR_NONE, set); + set = 1; + } + list_iterator_destroy(itr); + } + + if(assoc_q->user_list && list_count(assoc_q->user_list)) { + itr = list_iterator_create(assoc_q->user_list); + if(list_count(assoc_q->user_list) > 1) + set = 2; + else + set = 0; + + while((object = list_next(itr))) { + gold_request_add_condition(gold_request, "User", + object, + GOLD_OPERATOR_NONE, set); + set = 1; + } + list_iterator_destroy(itr); + } + + if(assoc_q->acct_list && list_count(assoc_q->acct_list)) { + itr = list_iterator_create(assoc_q->acct_list); + if(list_count(assoc_q->acct_list) > 1) + set = 2; + else + set = 0; + + while((object = list_next(itr))) { + gold_request_add_condition(gold_request, "Project", + object, + GOLD_OPERATOR_NONE, set); + set = 1; + } + list_iterator_destroy(itr); + } + + if(assoc_q->cluster_list && list_count(assoc_q->cluster_list)) { + itr = list_iterator_create(assoc_q->cluster_list); + if(list_count(assoc_q->cluster_list) > 1) + set = 2; + else + set = 0; + + while((object = list_next(itr))) { + gold_request_add_condition(gold_request, "Machine", + object, + GOLD_OPERATOR_NONE, set); + set = 1; + } + list_iterator_destroy(itr); + } + + if(assoc->fairshare) { + snprintf(tmp_buff, sizeof(tmp_buff), "%u", + assoc->fairshare); + gold_request_add_assignment(gold_request, "Fairshare", + tmp_buff); + } + + if(assoc->max_jobs) { + snprintf(tmp_buff, sizeof(tmp_buff), "%u", + assoc->max_jobs); + gold_request_add_assignment(gold_request, "MaxJobs", + tmp_buff); + } + + if(assoc->max_nodes_per_job) { + snprintf(tmp_buff, sizeof(tmp_buff), "%u", + assoc->max_nodes_per_job); + gold_request_add_assignment(gold_request, + "MaxNodesPerJob", + tmp_buff); + } + + if(assoc->max_wall_duration_per_job) { + snprintf(tmp_buff, sizeof(tmp_buff), "%u", + assoc->max_wall_duration_per_job); + gold_request_add_assignment(gold_request, + "MaxWallDurationPerJob", + tmp_buff); + } + + if(assoc->max_cpu_secs_per_job) { + snprintf(tmp_buff, sizeof(tmp_buff), "%u", + assoc->max_cpu_secs_per_job); + gold_request_add_assignment(gold_request, + "MaxProcSecondsPerJob", + tmp_buff); + } + + gold_response = get_gold_response(gold_request); + destroy_gold_request(gold_request); + + if(!gold_response) { + error("acct_storage_p_modify_associations: " + "no response received"); + return NULL; + } + + if(gold_response->rc) { + error("gold_response has non-zero rc(%d): %s", + gold_response->rc, + gold_response->message); + errno = gold_response->rc; + + } + destroy_gold_response(gold_response); + + return NULL; +} + +extern List acct_storage_p_remove_users(void *db_conn, + acct_user_cond_t *user_q) +{ + ListIterator itr = NULL; +// int rc = SLURM_SUCCESS; + gold_request_t *gold_request = NULL; + gold_response_t *gold_response = NULL; + char *object = NULL; + int set = 0; + + if(!user_q) { + error("acct_storage_p_remove_users: " + "we need conditions to remove"); + return NULL; + } + + gold_request = create_gold_request(GOLD_OBJECT_USER, + GOLD_ACTION_DELETE); + if(!gold_request) { + error("acct_storage_p_remove_users: " + "couldn't create gold_request"); + return NULL; + } + + if(user_q->user_list && list_count(user_q->user_list)) { + itr = list_iterator_create(user_q->user_list); + if(list_count(user_q->user_list) > 1) + set = 2; + else + set = 0; + + while((object = list_next(itr))) { + gold_request_add_condition(gold_request, "Name", + object, + GOLD_OPERATOR_NONE, set); + set = 1; + } + list_iterator_destroy(itr); + } + + if(user_q->def_acct_list && list_count(user_q->def_acct_list)) { + itr = list_iterator_create(user_q->def_acct_list); + if(list_count(user_q->def_acct_list) > 1) + set = 2; + else + set = 0; + + while((object = list_next(itr))) { + gold_request_add_condition(gold_request, + "DefaultProject", + object, + GOLD_OPERATOR_NONE, set); + set = 1; + } + list_iterator_destroy(itr); + } + + gold_response = get_gold_response(gold_request); + destroy_gold_request(gold_request); + + if(!gold_response) { + error("acct_storage_p_remove_users: " + "no response received"); + return NULL; + } + + if(gold_response->rc) { + error("acct_storage_p_remove_users: " + "gold_response has non-zero rc(%d): %s", + gold_response->rc, + gold_response->message); + errno = gold_response->rc; + + } + destroy_gold_response(gold_response); + + return NULL; +} + +extern List acct_storage_p_remove_coord(void *db_conn, + char *acct, + acct_user_cond_t *user_q) +{ + return SLURM_SUCCESS; +} + +extern List acct_storage_p_remove_accts(void *db_conn, + acct_account_cond_t *acct_q) +{ + ListIterator itr = NULL; +// int rc = SLURM_SUCCESS; + gold_request_t *gold_request = NULL; + gold_response_t *gold_response = NULL; + char *object = NULL; + int set = 0; + + if(!acct_q) { + error("acct_storage_p_remove_accts: " + "we need conditions to remove"); + return NULL; + } + + gold_request = create_gold_request(GOLD_OBJECT_PROJECT, + GOLD_ACTION_DELETE); + if(!gold_request) { + error("acct_storage_p_remove_accts: " + "couldn't create gold_request"); + return NULL; + } + + if(acct_q->acct_list && list_count(acct_q->acct_list)) { + itr = list_iterator_create(acct_q->acct_list); + if(list_count(acct_q->acct_list) > 1) + set = 2; + else + set = 0; + + while((object = list_next(itr))) { + gold_request_add_condition(gold_request, "Name", + object, + GOLD_OPERATOR_NONE, set); + set = 1; + } + list_iterator_destroy(itr); + } + + if(acct_q->description_list + && list_count(acct_q->description_list)) { + itr = list_iterator_create(acct_q->description_list); + if(list_count(acct_q->description_list) > 1) + set = 2; + else + set = 0; + + while((object = list_next(itr))) { + gold_request_add_condition(gold_request, "Description", + object, + GOLD_OPERATOR_NONE, set); + set = 1; + } + list_iterator_destroy(itr); + } + + if(acct_q->organization_list + && list_count(acct_q->organization_list)) { + itr = list_iterator_create(acct_q->organization_list); + if(list_count(acct_q->organization_list) > 1) + set = 2; + else + set = 0; + + while((object = list_next(itr))) { + gold_request_add_condition(gold_request, "Organization", + object, + GOLD_OPERATOR_NONE, set); + set = 1; + } + list_iterator_destroy(itr); + } + + gold_response = get_gold_response(gold_request); + destroy_gold_request(gold_request); + + if(!gold_response) { + error("acct_storage_p_remove_accts: " + "no response received"); + return NULL; + } + + if(gold_response->rc) { + error("acct_storage_p_remove_accts: " + "gold_response has non-zero rc(%d): %s", + gold_response->rc, + gold_response->message); + errno = gold_response->rc; + + } + destroy_gold_response(gold_response); + + return NULL; +} + +extern List acct_storage_p_remove_clusters(void *db_conn, + acct_cluster_cond_t *cluster_q) +{ + ListIterator itr = NULL; +// int rc = SLURM_SUCCESS; + gold_request_t *gold_request = NULL; + gold_response_t *gold_response = NULL; + char *object = NULL; + int set = 0; + + if(!cluster_q) { + error("acct_storage_p_modify_clusters: " + "we need conditions to modify"); + return NULL; + } + + gold_request = create_gold_request(GOLD_OBJECT_MACHINE, + GOLD_ACTION_DELETE); + if(!gold_request) { + error("acct_storage_p_remove_clusters: " + "couldn't create gold_request"); + return NULL; + } + + if(cluster_q->cluster_list && list_count(cluster_q->cluster_list)) { + itr = list_iterator_create(cluster_q->cluster_list); + if(list_count(cluster_q->cluster_list) > 1) + set = 2; + else + set = 0; + + while((object = list_next(itr))) { + gold_request_add_condition(gold_request, "Name", + object, + GOLD_OPERATOR_NONE, set); + set = 1; + } + list_iterator_destroy(itr); + } + + + gold_response = get_gold_response(gold_request); + destroy_gold_request(gold_request); + + if(!gold_response) { + error("acct_storage_p_remove_clusters: " + "no response received"); + return NULL; + } + + if(gold_response->rc) { + error("acct_storage_p_remove_clusters: " + "gold_response has non-zero rc(%d): %s", + gold_response->rc, + gold_response->message); + errno = gold_response->rc; + destroy_gold_response(gold_response); + return NULL; + } + destroy_gold_response(gold_response); + + gold_request = create_gold_request(GOLD_OBJECT_MACHINE_HOUR_USAGE, + GOLD_ACTION_DELETE); + if(!gold_request) { + error("acct_storage_p_remove_clusters: " + "couldn't create gold_request"); + return NULL; + } + + if(cluster_q->cluster_list && list_count(cluster_q->cluster_list)) { + itr = list_iterator_create(cluster_q->cluster_list); + if(list_count(cluster_q->cluster_list) > 1) + set = 2; + else + set = 0; + + while((object = list_next(itr))) { + gold_request_add_condition(gold_request, "Machine", + object, + GOLD_OPERATOR_NONE, set); + set = 1; + } + list_iterator_destroy(itr); + } + gold_response = get_gold_response(gold_request); + destroy_gold_request(gold_request); + + if(!gold_response) { + error("acct_storage_p_remove_clusters: " + "no response received"); + destroy_gold_request(gold_request); + return NULL; + } + + if(gold_response->rc) { + error("acct_storage_p_remove_clusters: " + "gold_response has non-zero rc(%d): %s", + gold_response->rc, + gold_response->message); + errno = gold_response->rc; + destroy_gold_request(gold_request); + destroy_gold_response(gold_response); + return NULL; + } + destroy_gold_response(gold_response); + + gold_request->object = GOLD_OBJECT_MACHINE_DAY_USAGE; + gold_response = get_gold_response(gold_request); + if(!gold_response) { + error("acct_storage_p_remove_clusters: " + "no response received"); + destroy_gold_request(gold_request); + return NULL; + } + + if(gold_response->rc) { + error("acct_storage_p_remove_clusters: " + "gold_response has non-zero rc(%d): %s", + gold_response->rc, + gold_response->message); + errno = gold_response->rc; + destroy_gold_request(gold_request); + destroy_gold_response(gold_response); + return NULL; + } + + destroy_gold_response(gold_response); + + gold_request->object = GOLD_OBJECT_MACHINE_MONTH_USAGE; + gold_response = get_gold_response(gold_request); + if(!gold_response) { + error("acct_storage_p_remove_clusters: " + "no response received"); + destroy_gold_request(gold_request); + return NULL; + } + + if(gold_response->rc) { + error("acct_storage_p_remove_clusters: " + "gold_response has non-zero rc(%d): %s", + gold_response->rc, + gold_response->message); + errno = gold_response->rc; + + } + + destroy_gold_request(gold_request); + destroy_gold_response(gold_response); + + return NULL; +} + +extern List acct_storage_p_remove_associations(void *db_conn, + acct_association_cond_t *assoc_q) +{ + ListIterator itr = NULL; +// int rc = SLURM_SUCCESS; + gold_request_t *gold_request = NULL; + gold_response_t *gold_response = NULL; + char *object = NULL; + int set = 0; + + if(!assoc_q) { + error("acct_storage_p_remove_associations: " + "we need conditions to remove"); + return NULL; + } + + gold_request = create_gold_request(GOLD_OBJECT_ACCT, + GOLD_ACTION_DELETE); + if(!gold_request) { + error("couldn't create gold_request"); + return NULL; + } + + if(assoc_q->id_list && list_count(assoc_q->id_list)) { + itr = list_iterator_create(assoc_q->id_list); + if(list_count(assoc_q->id_list) > 1) + set = 2; + else + set = 0; + + while((object = list_next(itr))) { + gold_request_add_condition(gold_request, "Id", + object, + GOLD_OPERATOR_NONE, set); + set = 1; + } + list_iterator_destroy(itr); + } + + if(assoc_q->user_list && list_count(assoc_q->user_list)) { + itr = list_iterator_create(assoc_q->user_list); + if(list_count(assoc_q->user_list) > 1) + set = 2; + else + set = 0; + + while((object = list_next(itr))) { + gold_request_add_condition(gold_request, "User", + object, + GOLD_OPERATOR_NONE, set); + set = 1; + } + list_iterator_destroy(itr); + } + + if(assoc_q->acct_list && list_count(assoc_q->acct_list)) { + itr = list_iterator_create(assoc_q->acct_list); + if(list_count(assoc_q->acct_list) > 1) + set = 2; + else + set = 0; + + while((object = list_next(itr))) { + gold_request_add_condition(gold_request, "Project", + object, + GOLD_OPERATOR_NONE, set); + set = 1; + } + list_iterator_destroy(itr); + } + + if(assoc_q->cluster_list && list_count(assoc_q->cluster_list)) { + itr = list_iterator_create(assoc_q->cluster_list); + if(list_count(assoc_q->cluster_list) > 1) + set = 2; + else + set = 0; + + while((object = list_next(itr))) { + gold_request_add_condition(gold_request, "Machine", + object, + GOLD_OPERATOR_NONE, set); + set = 1; + } + list_iterator_destroy(itr); + } + + gold_response = get_gold_response(gold_request); + destroy_gold_request(gold_request); + + if(!gold_response) { + error("acct_storage_p_modify_associations: " + "no response received"); + return NULL; + } + + if(gold_response->rc) { + error("gold_response has non-zero rc(%d): %s", + gold_response->rc, + gold_response->message); + errno = gold_response->rc; + + } + + if(gold_response->entry_cnt > 0) { + ListIterator itr = NULL; + ListIterator itr2 = NULL; + gold_response_entry_t *resp_entry = NULL; + gold_name_value_t *name_val = NULL; + List id_list = list_create(slurm_destroy_char); + + itr = list_iterator_create(gold_response->entries); + while((resp_entry = list_next(itr))) { + itr2 = list_iterator_create( + resp_entry->name_val); + while((name_val = list_next(itr2))) { + if(!strcmp(name_val->name, "Id")) { + list_push(id_list, name_val->value); + break; + } + } + list_iterator_destroy(itr2); + } + list_iterator_destroy(itr); + _remove_association_accounting(id_list); + list_destroy(id_list); + } else { + debug3("no associations found"); + } + destroy_gold_response(gold_response); + + return NULL; +} + +extern List acct_storage_p_get_users(void *db_conn, + acct_user_cond_t *user_q) +{ + gold_request_t *gold_request = NULL; + gold_response_t *gold_response = NULL; + List user_list = NULL; + ListIterator itr = NULL; + char *object = NULL; + int set = 0; + char tmp_buff[50]; + + gold_request = create_gold_request(GOLD_OBJECT_USER, + GOLD_ACTION_QUERY); + + if(!gold_request) + return NULL; + + if(!user_q) + goto empty; + + if(user_q->user_list && list_count(user_q->user_list)) { + itr = list_iterator_create(user_q->user_list); + if(list_count(user_q->user_list) > 1) + set = 2; + else + set = 0; + + while((object = list_next(itr))) { + gold_request_add_condition(gold_request, "Name", + object, + GOLD_OPERATOR_NONE, set); + set = 1; + } + list_iterator_destroy(itr); + } + + if(user_q->def_acct_list && list_count(user_q->def_acct_list)) { + itr = list_iterator_create(user_q->def_acct_list); + if(list_count(user_q->def_acct_list) > 1) + set = 2; + else + set = 0; + + while((object = list_next(itr))) { + gold_request_add_condition(gold_request, + "DefaultProject", + object, + GOLD_OPERATOR_NONE, set); + set = 1; + } + list_iterator_destroy(itr); + } + + if(user_q->qos != ACCT_QOS_NOTSET) { + snprintf(tmp_buff, sizeof(tmp_buff), "%u", + user_q->qos-1); + gold_request_add_condition(gold_request, "Expedite", + tmp_buff, + GOLD_OPERATOR_NONE, 0); + } + +empty: + gold_request_add_condition(gold_request, "Active", + "True", + GOLD_OPERATOR_NONE, + 0); + + gold_request_add_condition(gold_request, "Special", + "False", + GOLD_OPERATOR_NONE, + 0); + + gold_request_add_selection(gold_request, "Name"); + gold_request_add_selection(gold_request, "DefaultProject"); + gold_request_add_selection(gold_request, "Expedite"); + + gold_response = get_gold_response(gold_request); + destroy_gold_request(gold_request); + + if(!gold_response) { + error("acct_storage_p_get_users: no response received"); + return NULL; + } + + user_list = _get_user_list_from_response(gold_response); + + destroy_gold_response(gold_response); + + return user_list; +} + +extern List acct_storage_p_get_accts(void *db_conn, + acct_account_cond_t *acct_q) +{ + gold_request_t *gold_request = NULL; + gold_response_t *gold_response = NULL; + List acct_list = NULL; + ListIterator itr = NULL; + int set = 0; + char *object = NULL; + char tmp_buff[50]; + + + gold_request = create_gold_request(GOLD_OBJECT_PROJECT, + GOLD_ACTION_QUERY); + if(!gold_request) + return NULL; + + if(!acct_q) + goto empty; + + if(acct_q->acct_list && list_count(acct_q->acct_list)) { + itr = list_iterator_create(acct_q->acct_list); + if(list_count(acct_q->acct_list) > 1) + set = 2; + else + set = 0; + + while((object = list_next(itr))) { + gold_request_add_condition(gold_request, "Name", + object, + GOLD_OPERATOR_NONE, set); + set = 1; + } + list_iterator_destroy(itr); + } + + if(acct_q->description_list + && list_count(acct_q->description_list)) { + itr = list_iterator_create(acct_q->description_list); + if(list_count(acct_q->description_list) > 1) + set = 2; + else + set = 0; + + while((object = list_next(itr))) { + gold_request_add_condition(gold_request, "Description", + object, + GOLD_OPERATOR_NONE, set); + set = 1; + } + list_iterator_destroy(itr); + } + + if(acct_q->organization_list + && list_count(acct_q->organization_list)) { + itr = list_iterator_create(acct_q->organization_list); + if(list_count(acct_q->organization_list) > 1) + set = 2; + else + set = 0; + + while((object = list_next(itr))) { + gold_request_add_condition(gold_request, "Organization", + object, + GOLD_OPERATOR_NONE, set); + set = 1; + } + list_iterator_destroy(itr); + } + + if(acct_q->qos != ACCT_QOS_NOTSET) { + snprintf(tmp_buff, sizeof(tmp_buff), "%u", + acct_q->qos-1); + gold_request_add_condition(gold_request, "Expedite", + tmp_buff, + GOLD_OPERATOR_NONE, 0); + } +empty: + gold_request_add_condition(gold_request, "Active", + "True", + GOLD_OPERATOR_NONE, + 0); + + gold_request_add_condition(gold_request, "Special", + "False", + GOLD_OPERATOR_NONE, + 0); + + gold_request_add_selection(gold_request, "Name"); + gold_request_add_selection(gold_request, "Organization"); + gold_request_add_selection(gold_request, "Description"); + gold_request_add_selection(gold_request, "Expedite"); + + gold_response = get_gold_response(gold_request); + destroy_gold_request(gold_request); + + if(!gold_response) { + error("acct_storage_p_get_accts: no response received"); + return NULL; + } + + acct_list = _get_acct_list_from_response(gold_response); + + destroy_gold_response(gold_response); + + return acct_list; +} + +extern List acct_storage_p_get_clusters(void *db_conn, + acct_cluster_cond_t *cluster_q) +{ + gold_request_t *gold_request = NULL; + gold_response_t *gold_response = NULL; + List cluster_list = NULL; + ListIterator itr = NULL; + int set = 0; + char *object = NULL; + + + gold_request = create_gold_request(GOLD_OBJECT_MACHINE, + GOLD_ACTION_QUERY); + if(!gold_request) + return NULL; + + if(!cluster_q) + goto empty; + + if(cluster_q->cluster_list && list_count(cluster_q->cluster_list)) { + itr = list_iterator_create(cluster_q->cluster_list); + if(list_count(cluster_q->cluster_list) > 1) + set = 2; + else + set = 0; + + while((object = list_next(itr))) { + gold_request_add_condition(gold_request, "Name", + object, + GOLD_OPERATOR_NONE, set); + set = 1; + } + list_iterator_destroy(itr); + } + +empty: + gold_request_add_condition(gold_request, "Active", + "True", + GOLD_OPERATOR_NONE, + 0); + + gold_request_add_condition(gold_request, "Special", + "False", + GOLD_OPERATOR_NONE, + 0); + + gold_request_add_selection(gold_request, "Name"); + + gold_response = get_gold_response(gold_request); + destroy_gold_request(gold_request); + + if(!gold_response) { + error("acct_storage_p_get_clusters: no response received"); + return NULL; + } + + cluster_list = _get_cluster_list_from_response(gold_response); + + destroy_gold_response(gold_response); + + return cluster_list; +} + +extern List acct_storage_p_get_associations(void *db_conn, + acct_association_cond_t *assoc_q) +{ + + gold_request_t *gold_request = NULL; + gold_response_t *gold_response = NULL; + List association_list = NULL; + ListIterator itr = NULL; + int set = 0; + char *object = NULL; + + gold_request = create_gold_request(GOLD_OBJECT_ACCT, + GOLD_ACTION_QUERY); + + if(!gold_request) + return NULL; + + if(!assoc_q) + goto empty; + + if(assoc_q->id_list && list_count(assoc_q->id_list)) { + itr = list_iterator_create(assoc_q->id_list); + if(list_count(assoc_q->id_list) > 1) + set = 2; + else + set = 0; + + while((object = list_next(itr))) { + gold_request_add_condition(gold_request, "Id", + object, + GOLD_OPERATOR_NONE, set); + set = 1; + } + list_iterator_destroy(itr); + } + + if(assoc_q->user_list && list_count(assoc_q->user_list)) { + itr = list_iterator_create(assoc_q->user_list); + if(list_count(assoc_q->user_list) > 1) + set = 2; + else + set = 0; + + while((object = list_next(itr))) { + gold_request_add_condition(gold_request, "User", + object, + GOLD_OPERATOR_NONE, set); + set = 1; + } + list_iterator_destroy(itr); + } + + if(assoc_q->acct_list && list_count(assoc_q->acct_list)) { + itr = list_iterator_create(assoc_q->acct_list); + if(list_count(assoc_q->acct_list) > 1) + set = 2; + else + set = 0; + + while((object = list_next(itr))) { + gold_request_add_condition(gold_request, "Project", + object, + GOLD_OPERATOR_NONE, set); + set = 1; + } + list_iterator_destroy(itr); + } + + if(assoc_q->cluster_list && list_count(assoc_q->cluster_list)) { + itr = list_iterator_create(assoc_q->cluster_list); + if(list_count(assoc_q->cluster_list) > 1) + set = 2; + else + set = 0; + + while((object = list_next(itr))) { + gold_request_add_condition(gold_request, "Machine", + object, + GOLD_OPERATOR_NONE, set); + set = 1; + } + list_iterator_destroy(itr); + } + +empty: + gold_request_add_selection(gold_request, "Id"); + gold_request_add_selection(gold_request, "User"); + gold_request_add_selection(gold_request, "Project"); + gold_request_add_selection(gold_request, "Machine"); + gold_request_add_selection(gold_request, "Parent"); + gold_request_add_selection(gold_request, "FairShare"); + gold_request_add_selection(gold_request, "MaxJobs"); + gold_request_add_selection(gold_request, "MaxNodesPerJob"); + gold_request_add_selection(gold_request, "MaxWallDurationPerJob"); + gold_request_add_selection(gold_request, "MaxProcSecondsPerJob"); + + gold_response = get_gold_response(gold_request); + destroy_gold_request(gold_request); + + if(!gold_response) { + error("acct_storage_p_get_associations: " + "no response received"); + return NULL; + } + + association_list = _get_association_list_from_response(gold_response); + + destroy_gold_response(gold_response); + + return association_list; +} + +extern int acct_storage_p_get_usage(void *db_conn, + acct_association_rec_t *acct_assoc, + time_t start, time_t end) +{ + int rc = SLURM_ERROR; +/* gold_request_t *gold_request = NULL; */ +/* gold_response_t *gold_response = NULL; */ +/* char tmp_buff[50]; */ +/* gold_object_t g_object; */ +/* char *req_cpu_type = NULL; */ + +/* if(!acct_assoc || acct_assoc->id) { */ +/* error("acct_storage_p_get_usage: " */ +/* "We need an id to go off to query off of"); */ +/* return rc; */ +/* } */ + +/* switch(type) { */ +/* case ACCT_USAGE_HOUR: */ +/* g_object = GOLD_OBJECT_ACCT_HOUR_USAGE; */ +/* req_cpu_type = "AllocatedCPUSeconds"; */ +/* break; */ +/* case ACCT_USAGE_DAY: */ +/* g_object = GOLD_OBJECT_ACCT_DAY_USAGE; */ +/* req_cpu_type = "AllocatedCPUSeconds"; */ +/* break; */ +/* case ACCT_USAGE_MONTH: */ +/* g_object = GOLD_OBJECT_ACCT_MONTH_USAGE; */ +/* req_cpu_type = "AllocatedCPUHours"; */ +/* break; */ +/* default: */ +/* error("Unknown usage type"); */ +/* return rc; */ +/* } */ +/* gold_request = create_gold_request( */ +/* g_object, GOLD_ACTION_QUERY); */ + +/* if(!gold_request) */ +/* return rc; */ + +/* snprintf(tmp_buff, sizeof(tmp_buff), "%u", acct_assoc->id); */ +/* gold_request_add_condition(gold_request, "Acct", tmp_buff, */ +/* GOLD_OPERATOR_NONE, 0); */ + +/* if(start) { */ +/* snprintf(tmp_buff, sizeof(tmp_buff), "%d", (int)start); */ +/* gold_request_add_condition(gold_request, "PeriodStart", */ +/* tmp_buff, */ +/* GOLD_OPERATOR_GE, 0); */ +/* } */ +/* if(end) { */ +/* snprintf(tmp_buff, sizeof(tmp_buff), "%u", (int)end); */ +/* gold_request_add_condition(gold_request, "PeriodStart", */ +/* tmp_buff, */ +/* GOLD_OPERATOR_L, 0); */ +/* } */ + +/* gold_request_add_selection(gold_request, "PeriodStart"); */ +/* gold_request_add_selection(gold_request, req_cpu_type); */ + +/* gold_response = get_gold_response(gold_request); */ +/* destroy_gold_request(gold_request); */ + +/* if(!gold_response) { */ +/* error("acct_storage_p_get_usage: " */ +/* "no response received"); */ +/* return rc; */ +/* } */ + +/* rc = _get_acct_accounting_list_from_response( */ +/* gold_response, acct_assoc); */ + +/* destroy_gold_response(gold_response); */ + + return rc; +} + +extern int acct_storage_p_roll_usage(void *db_conn, + time_t sent_start) +{ + int rc = SLURM_ERROR; + /* FIX ME: This doesn't do anything now */ +/* gold_request_t *gold_request = NULL; */ +/* gold_response_t *gold_response = NULL; */ +/* char tmp_buff[50]; */ + +/* if(!acct_assoc || acct_assoc->id) { */ +/* error("acct_storage_p_roll_usage: " */ +/* "We need an id to go off to query off of"); */ +/* return rc; */ +/* } */ + +/* switch(type) { */ +/* case ACCT_USAGE_HOUR: */ +/* g_object = GOLD_OBJECT_ACCT_HOUR_USAGE; */ +/* req_cpu_type = "AllocatedCPUSecs"; */ +/* break; */ +/* case ACCT_USAGE_DAY: */ +/* g_object = GOLD_OBJECT_ACCT_DAY_USAGE; */ +/* req_cpu_type = "AllocatedCPUSecs"; */ +/* break; */ +/* case ACCT_USAGE_MONTH: */ +/* g_object = GOLD_OBJECT_ACCT_MONTH_USAGE; */ +/* req_cpu_type = "AllocatedCPUHours"; */ +/* break; */ +/* default: */ +/* error("Unknown usage type"); */ +/* return rc; */ +/* } */ +/* gold_request = create_gold_request( */ +/* GOLD_OBJECT_ACCT_DAY_USAGE, GOLD_ACTION_QUERY); */ + +/* if(!gold_request) */ +/* return rc; */ + +/* snprintf(tmp_buff, sizeof(tmp_buff), "%u", acct_assoc->id); */ +/* gold_request_add_condition(gold_request, "Acct", tmp_buff, */ +/* GOLD_OPERATOR_NONE, 0); */ + +/* if(start) { */ +/* snprintf(tmp_buff, sizeof(tmp_buff), "%d", (int)start); */ +/* gold_request_add_condition(gold_request, "PeriodStart", */ +/* tmp_buff, */ +/* GOLD_OPERATOR_GE, 0); */ +/* } */ +/* if(end) { */ +/* snprintf(tmp_buff, sizeof(tmp_buff), "%u", (int)end); */ +/* gold_request_add_condition(gold_request, "PeriodStart", */ +/* tmp_buff, */ +/* GOLD_OPERATOR_L, 0); */ +/* } */ + +/* gold_request_add_selection(gold_request, "PeriodStart"); */ +/* gold_request_add_selection(gold_request, "AllocatedCPUSecs"); */ + +/* gold_response = get_gold_response(gold_request); */ +/* destroy_gold_request(gold_request); */ + +/* if(!gold_response) { */ +/* error("acct_storage_p_get_daily_usage: " */ +/* "no response received"); */ +/* return rc; */ +/* } */ + +/* rc = _get_acct_accounting_list_from_response( */ +/* gold_response, acct_assoc); */ + +/* destroy_gold_response(gold_response); */ + + return rc; +} + +extern int clusteracct_storage_p_node_down(void *db_conn, + char *cluster, + struct node_record *node_ptr, + time_t event_time, + char *reason) +{ + uint16_t cpus; + int rc = SLURM_ERROR; + gold_request_t *gold_request = NULL; + gold_response_t *gold_response = NULL; + char tmp_buff[50]; + char *my_reason; + + if (slurmctld_conf.fast_schedule && !slurmdbd_conf) + cpus = node_ptr->config_ptr->cpus; + else + cpus = node_ptr->cpus; + + if (reason) + my_reason = reason; + else + my_reason = node_ptr->reason; + +#if _DEBUG + slurm_make_time_str(&event_time, tmp_buff, sizeof(tmp_buff)); + info("cluster_acct_down: %s at %s with %u cpus due to %s", + node_ptr->name, tmp_buff, cpus, reason); +#endif + /* If the node was already down end that record since the + * reason will most likely be different + */ + + gold_request = create_gold_request(GOLD_OBJECT_EVENT, + GOLD_ACTION_MODIFY); + if(!gold_request) + return rc; + + gold_request_add_condition(gold_request, "Machine", cluster, + GOLD_OPERATOR_NONE, 0); + gold_request_add_condition(gold_request, "EndTime", "0", + GOLD_OPERATOR_NONE, 0); + gold_request_add_condition(gold_request, "Name", node_ptr->name, + GOLD_OPERATOR_NONE, 0); + + snprintf(tmp_buff, sizeof(tmp_buff), "%d", ((int)event_time - 1)); + gold_request_add_assignment(gold_request, "EndTime", tmp_buff); + + gold_response = get_gold_response(gold_request); + destroy_gold_request(gold_request); + + if(!gold_response) { + error("clusteracct_storage_p_node_down: no response received"); + return rc; + } + + if(gold_response->rc) { + error("gold_response has non-zero rc(%d): %s", + gold_response->rc, + gold_response->message); + errno = gold_response->rc; + destroy_gold_response(gold_response); + return rc; + } + destroy_gold_response(gold_response); + + /* now add the new one */ + gold_request = create_gold_request(GOLD_OBJECT_EVENT, + GOLD_ACTION_CREATE); + if(!gold_request) + return rc; + + gold_request_add_assignment(gold_request, "Machine", cluster); + snprintf(tmp_buff, sizeof(tmp_buff), "%d", (int)event_time); + gold_request_add_assignment(gold_request, "StartTime", tmp_buff); + gold_request_add_assignment(gold_request, "Name", node_ptr->name); + snprintf(tmp_buff, sizeof(tmp_buff), "%u", cpus); + gold_request_add_assignment(gold_request, "CPUCount", tmp_buff); + gold_request_add_assignment(gold_request, "Reason", my_reason); + + gold_response = get_gold_response(gold_request); + destroy_gold_request(gold_request); + + if(!gold_response) { + error("clusteracct_p_node_down: no response received"); + return rc; + } + + if(!gold_response->rc) + rc = SLURM_SUCCESS; + else { + error("gold_response has non-zero rc(%d): %s", + gold_response->rc, + gold_response->message); + errno = gold_response->rc; + } + destroy_gold_response(gold_response); + + return rc; +} + +extern int clusteracct_storage_p_node_up(void *db_conn, + char *cluster, + struct node_record *node_ptr, + time_t event_time) +{ + int rc = SLURM_ERROR; + gold_request_t *gold_request = NULL; + gold_response_t *gold_response = NULL; + char tmp_buff[50]; + +#if _DEBUG + slurm_make_time_str(&event_time, tmp_buff, sizeof(tmp_buff)); + info("cluster_acct_up: %s at %s", node_ptr->name, tmp_buff); +#endif + + gold_request = create_gold_request(GOLD_OBJECT_EVENT, + GOLD_ACTION_MODIFY); + if(!gold_request) + return rc; + + gold_request_add_condition(gold_request, "Machine", cluster, + GOLD_OPERATOR_NONE, 0); + gold_request_add_condition(gold_request, "EndTime", "0", + GOLD_OPERATOR_NONE, 0); + gold_request_add_condition(gold_request, "Name", node_ptr->name, + GOLD_OPERATOR_NONE, 0); + + snprintf(tmp_buff, sizeof(tmp_buff), "%d", ((int)event_time - 1)); + gold_request_add_assignment(gold_request, "EndTime", tmp_buff); + + gold_response = get_gold_response(gold_request); + destroy_gold_request(gold_request); + + if(!gold_response) { + error("clusteracct_p_node_up: no response received"); + return rc; + } + + if(gold_response->rc) { + error("gold_response has non-zero rc(%d): %s", + gold_response->rc, + gold_response->message); + errno = gold_response->rc; + destroy_gold_response(gold_response); + return rc; + } + rc = SLURM_SUCCESS; + destroy_gold_response(gold_response); + + + return rc; +} + +extern int clusteracct_storage_p_register_ctld(char *cluster, + uint16_t port) +{ + return SLURM_SUCCESS; +} + +extern int clusteracct_storage_p_cluster_procs(void *db_conn, + char *cluster, + uint32_t procs, + time_t event_time) +{ + static uint32_t last_procs = -1; + gold_request_t *gold_request = NULL; + gold_response_t *gold_response = NULL; + char tmp_buff[50]; + int rc = SLURM_ERROR; + bool no_modify = 0; + + if (procs == last_procs) { + debug3("we have the same procs as before no need to " + "query the database."); + return SLURM_SUCCESS; + } + last_procs = procs; + + /* Record the processor count */ +#if _DEBUG + slurm_make_time_str(&event_time, tmp_buff, sizeof(tmp_buff)); + info("cluster_acct_procs: %s has %u total CPUs at %s", + cluster, procs, tmp_buff); +#endif + + /* get the last known one */ + gold_request = create_gold_request(GOLD_OBJECT_EVENT, + GOLD_ACTION_QUERY); + if(!gold_request) + return rc; + gold_request_add_condition(gold_request, "Machine", cluster, + GOLD_OPERATOR_NONE, 0); + gold_request_add_condition(gold_request, "EndTime", "0", + GOLD_OPERATOR_NONE, 0); + gold_request_add_condition(gold_request, "Name", "NULL", + GOLD_OPERATOR_NONE, 0); + + gold_request_add_selection(gold_request, "CPUCount"); + + gold_response = get_gold_response(gold_request); + destroy_gold_request(gold_request); + + if(!gold_response) { + error("clusteracct_p_cluster_procs: no response received"); + return rc; + } + + if(gold_response->entry_cnt > 0) { + gold_response_entry_t *resp_entry = + list_pop(gold_response->entries); + gold_name_value_t *name_val = list_pop(resp_entry->name_val); + + if(procs == atoi(name_val->value)) { + debug("System hasn't changed since last entry"); + destroy_gold_name_value(name_val); + destroy_gold_response_entry(resp_entry); + destroy_gold_response(gold_response); + return SLURM_SUCCESS; + } else { + debug("System has changed from %s cpus to %d", + name_val->value, procs); + } + + destroy_gold_name_value(name_val); + destroy_gold_response_entry(resp_entry); + } else { + debug("We don't have an entry for this machine " + "most likely a first time running."); + no_modify = 1; + } + + destroy_gold_response(gold_response); + + if(no_modify) { + gold_request = create_gold_request(GOLD_OBJECT_EVENT, + GOLD_ACTION_MODIFY); + if(!gold_request) + return rc; + + gold_request_add_condition(gold_request, "Machine", + cluster, + GOLD_OPERATOR_NONE, 0); + gold_request_add_condition(gold_request, "EndTime", "0", + GOLD_OPERATOR_NONE, 0); + gold_request_add_condition(gold_request, "Name", "NULL", + GOLD_OPERATOR_NONE, 0); + + snprintf(tmp_buff, sizeof(tmp_buff), "%d", + ((int)event_time - 1)); + gold_request_add_assignment(gold_request, "EndTime", tmp_buff); + + gold_response = get_gold_response(gold_request); + destroy_gold_request(gold_request); + + if(!gold_response) { + error("jobacct_p_cluster_procs: no response received"); + return rc; + } + + if(gold_response->rc) { + error("gold_response has non-zero rc(%d): %s", + gold_response->rc, + gold_response->message); + errno = gold_response->rc; + destroy_gold_response(gold_response); + return rc; + } + destroy_gold_response(gold_response); + } + + /* now add the new one */ + gold_request = create_gold_request(GOLD_OBJECT_EVENT, + GOLD_ACTION_CREATE); + if(!gold_request) + return rc; + + gold_request_add_assignment(gold_request, "Machine", cluster); + snprintf(tmp_buff, sizeof(tmp_buff), "%d", (int)event_time); + gold_request_add_assignment(gold_request, "StartTime", tmp_buff); + snprintf(tmp_buff, sizeof(tmp_buff), "%u", procs); + gold_request_add_assignment(gold_request, "CPUCount", tmp_buff); + + gold_response = get_gold_response(gold_request); + destroy_gold_request(gold_request); + + if(!gold_response) { + error("clusteracct_p_cluster_procs: no response received"); + return rc; + } + + if(!gold_response->rc) + rc = SLURM_SUCCESS; + else { + error("gold_response has non-zero rc(%d): %s", + gold_response->rc, + gold_response->message); + errno = gold_response->rc; + } + destroy_gold_response(gold_response); + + return rc; +} + +extern int clusteracct_storage_p_get_usage( + void *db_conn, + acct_cluster_rec_t *cluster_rec, time_t start, + time_t end) +{ + int rc = SLURM_ERROR; +/* gold_request_t *gold_request = NULL; */ +/* gold_response_t *gold_response = NULL; */ +/* char tmp_buff[50]; */ +/* gold_object_t g_object; */ +/* char *alloc_cpu = NULL; */ +/* char *idle_cpu = NULL; */ +/* char *down_cpu = NULL; */ +/* char *resv_cpu = NULL; */ + +/* if(!cluster_rec || !cluster_rec->name) { */ +/* error("clusteracct_storage_p_get_hourly_usage:" */ +/* "no cluster name given to query."); */ +/* return rc; */ +/* } */ +/* switch(type) { */ +/* case ACCT_USAGE_HOUR: */ +/* g_object = GOLD_OBJECT_MACHINE_HOUR_USAGE; */ +/* alloc_cpu = "AllocatedCPUSeconds"; */ +/* idle_cpu = "IdleCPUSeconds"; */ +/* down_cpu = "DownCPUSeconds"; */ +/* resv_cpu = "ReservedCPUSeconds"; */ +/* break; */ +/* case ACCT_USAGE_DAY: */ +/* g_object = GOLD_OBJECT_MACHINE_DAY_USAGE; */ +/* alloc_cpu = "AllocatedCPUSeconds"; */ +/* idle_cpu = "IdleCPUSeconds"; */ +/* down_cpu = "DownCPUSeconds"; */ +/* resv_cpu = "ReservedCPUSeconds"; */ +/* break; */ +/* case ACCT_USAGE_MONTH: */ +/* g_object = GOLD_OBJECT_MACHINE_MONTH_USAGE; */ +/* alloc_cpu = "AllocatedCPUHours"; */ +/* idle_cpu = "IdleCPUHours"; */ +/* down_cpu = "DownCPUHours"; */ +/* resv_cpu = "ReservedCPUHours"; */ +/* break; */ +/* default: */ +/* error("Unknown usage type"); */ +/* return rc; */ +/* } */ +/* /\* get the last known one *\/ */ +/* gold_request = create_gold_request(GOLD_OBJECT_MACHINE_HOUR_USAGE, */ +/* GOLD_ACTION_QUERY); */ +/* if(!gold_request) */ +/* return rc; */ + +/* gold_request_add_condition(gold_request, "Machine", cluster_rec->name, */ +/* GOLD_OPERATOR_NONE, 0); */ +/* if(start) { */ +/* snprintf(tmp_buff, sizeof(tmp_buff), "%d", (int)start); */ +/* gold_request_add_condition(gold_request, "PeriodStart", */ +/* tmp_buff, */ +/* GOLD_OPERATOR_GE, 0); */ +/* } */ +/* if(end) { */ +/* snprintf(tmp_buff, sizeof(tmp_buff), "%u", (int)end); */ +/* gold_request_add_condition(gold_request, "PeriodStart", */ +/* tmp_buff, */ +/* GOLD_OPERATOR_L, 0); */ +/* } */ + +/* gold_request_add_selection(gold_request, "CPUCount"); */ +/* gold_request_add_selection(gold_request, "PeriodStart"); */ +/* gold_request_add_selection(gold_request, idle_cpu); */ +/* gold_request_add_selection(gold_request, down_cpu); */ +/* gold_request_add_selection(gold_request, alloc_cpu); */ +/* gold_request_add_selection(gold_request, resv_cpu); */ + +/* gold_response = get_gold_response(gold_request); */ +/* destroy_gold_request(gold_request); */ + +/* if(!gold_response) { */ +/* error("clusteracct_p_get_hourly_usage: no response received"); */ +/* return rc; */ +/* } */ + +/* if(gold_response->entry_cnt > 0) { */ +/* rc = _get_cluster_accounting_list_from_response( */ +/* gold_response, cluster_rec); */ +/* } else { */ +/* debug("We don't have an entry for this machine for this time"); */ +/* } */ +/* destroy_gold_response(gold_response); */ + + return rc; +} + +extern int jobacct_storage_p_job_start(void *db_conn, + struct job_record *job_ptr) +{ + gold_object_t action = GOLD_ACTION_CREATE; + + if(_check_for_job(job_ptr->job_id, job_ptr->details->submit_time)) { + debug3("It looks like this job is already in GOLD."); + action = GOLD_ACTION_MODIFY; + } + + return _add_edit_job(job_ptr, action); +} + +extern int jobacct_storage_p_job_complete(void *db_conn, + struct job_record *job_ptr) +{ + gold_object_t action = GOLD_ACTION_MODIFY; + + if(!_check_for_job(job_ptr->job_id, job_ptr->details->submit_time)) { + error("Couldn't find this job entry. " + "This shouldn't happen, we are going to create one."); + action = GOLD_ACTION_CREATE; + } + + return _add_edit_job(job_ptr, action); +} + +extern int jobacct_storage_p_step_start(void *db_conn, + struct step_record *step) +{ + gold_object_t action = GOLD_ACTION_MODIFY; + + if(!_check_for_job(step->job_ptr->job_id, + step->job_ptr->details->submit_time)) { + error("Couldn't find this job entry. " + "This shouldn't happen, we are going to create one."); + action = GOLD_ACTION_CREATE; + } + + return _add_edit_job(step->job_ptr, action); + +} + +extern int jobacct_storage_p_step_complete(void *db_conn, + struct step_record *step) +{ + return SLURM_SUCCESS; +} + +extern int jobacct_storage_p_suspend(void *db_conn, + struct job_record *job_ptr) +{ + return SLURM_SUCCESS; +} + +/* + * get info from the storage + * returns List of jobacct_job_rec_t * + * note List needs to be freed when called + */ +extern List jobacct_storage_p_get_jobs(void *db_conn, + List selected_steps, + List selected_parts, + sacct_parameters_t *params) +{ + gold_request_t *gold_request = create_gold_request(GOLD_OBJECT_JOB, + GOLD_ACTION_QUERY); + gold_response_t *gold_response = NULL; + gold_response_entry_t *resp_entry = NULL; + gold_name_value_t *name_val = NULL; + char tmp_buff[50]; + int set = 0; + char *selected_part = NULL; + jobacct_selected_step_t *selected_step = NULL; + jobacct_job_rec_t *job = NULL; + ListIterator itr = NULL; + ListIterator itr2 = NULL; + List job_list = NULL; + + if(!gold_request) + return NULL; + + + if(selected_steps && list_count(selected_steps)) { + itr = list_iterator_create(selected_steps); + if(list_count(selected_steps) > 1) + set = 2; + else + set = 0; + while((selected_step = list_next(itr))) { + snprintf(tmp_buff, sizeof(tmp_buff), "%u", + selected_step->jobid); + gold_request_add_condition(gold_request, "JobId", + tmp_buff, + GOLD_OPERATOR_NONE, + set); + set = 1; + } + list_iterator_destroy(itr); + } + + if(selected_parts && list_count(selected_parts)) { + if(list_count(selected_parts) > 1) + set = 2; + else + set = 0; + itr = list_iterator_create(selected_parts); + while((selected_part = list_next(itr))) { + gold_request_add_condition(gold_request, "Partition", + selected_part, + GOLD_OPERATOR_NONE, + set); + set = 1; + } + list_iterator_destroy(itr); + } + + gold_request_add_selection(gold_request, "JobId"); + gold_request_add_selection(gold_request, "GoldAccountId"); + gold_request_add_selection(gold_request, "Partition"); + gold_request_add_selection(gold_request, "RequestedCPUCount"); + gold_request_add_selection(gold_request, "AllocatedCPUCount"); + gold_request_add_selection(gold_request, "NodeList"); + gold_request_add_selection(gold_request, "JobName"); + gold_request_add_selection(gold_request, "SubmitTime"); + gold_request_add_selection(gold_request, "EligibleTime"); + gold_request_add_selection(gold_request, "StartTime"); + gold_request_add_selection(gold_request, "EndTime"); + gold_request_add_selection(gold_request, "Suspended"); + gold_request_add_selection(gold_request, "State"); + gold_request_add_selection(gold_request, "ExitCode"); + gold_request_add_selection(gold_request, "QoS"); + + gold_response = get_gold_response(gold_request); + destroy_gold_request(gold_request); + + if(!gold_response) { + error("_check_for_job: no response received"); + return NULL; + } + + job_list = list_create(destroy_jobacct_job_rec); + if(gold_response->entry_cnt > 0) { + itr = list_iterator_create(gold_response->entries); + while((resp_entry = list_next(itr))) { + job = create_jobacct_job_rec(); + itr2 = list_iterator_create(resp_entry->name_val); + while((name_val = list_next(itr2))) { + if(!strcmp(name_val->name, "JobId")) { + job->jobid = atoi(name_val->value); + } else if(!strcmp(name_val->name, + "GoldAccountId")) { + acct_association_rec_t account_rec; + memset(&account_rec, 0, + sizeof(acct_association_rec_t)); + account_rec.id = atoi(name_val->value); + /* FIX ME: We need to get the + * parts of the association from + * gold here + */ + /* if(acct_storage_p_get_assoc_id( */ +/* db_conn, */ +/* &account_rec) == SLURM_ERROR) */ +/* error("no assoc found for " */ +/* "id %u", */ +/* account_rec.id); */ + + if(account_rec.cluster) { + if(params->opt_cluster && + strcmp(params->opt_cluster, + account_rec. + cluster)) { + destroy_jobacct_job_rec( + job); + job = NULL; + break; + } + job->cluster = + xstrdup(account_rec. + cluster); + } + + if(account_rec.user) { + struct passwd *passwd_ptr = + getpwnam(account_rec. + user); + job->user = xstrdup(account_rec. + user); + if(passwd_ptr) { + job->uid = + passwd_ptr-> + pw_uid; + job->gid = + passwd_ptr-> + pw_gid; + } + } + if(account_rec.acct) + job->account = + xstrdup(account_rec. + acct); + } else if(!strcmp(name_val->name, + "Partition")) { + job->partition = + xstrdup(name_val->value); + } else if(!strcmp(name_val->name, + "RequestedCPUCount")) { + job->req_cpus = atoi(name_val->value); + } else if(!strcmp(name_val->name, + "AllocatedCPUCount")) { + job->alloc_cpus = atoi(name_val->value); + } else if(!strcmp(name_val->name, "NodeList")) { + job->nodes = xstrdup(name_val->value); + } else if(!strcmp(name_val->name, "JobName")) { + job->jobname = xstrdup(name_val->value); + } else if(!strcmp(name_val->name, + "SubmitTime")) { + job->submit = atoi(name_val->value); + } else if(!strcmp(name_val->name, + "EligibleTime")) { + job->eligible = atoi(name_val->value); + } else if(!strcmp(name_val->name, + "StartTime")) { + job->start = atoi(name_val->value); + } else if(!strcmp(name_val->name, "EndTime")) { + job->end = atoi(name_val->value); + } else if(!strcmp(name_val->name, + "Suspended")) { + job->suspended = atoi(name_val->value); + } else if(!strcmp(name_val->name, "State")) { + job->state = atoi(name_val->value); + } else if(!strcmp(name_val->name, "ExitCode")) { + job->exitcode = atoi(name_val->value); + } else if(!strcmp(name_val->name, "QoS")) { + job->qos = atoi(name_val->value); + } + } + list_iterator_destroy(itr2); + + if(!job) + continue; + + job->show_full = 1; + job->track_steps = 0; + job->priority = 0; + + if (!job->nodes) + job->nodes = xstrdup("(unknown)"); + + list_append(job_list, job); + } + list_iterator_destroy(itr); + } + destroy_gold_response(gold_response); + + return job_list; +} + +/* + * expire old info from the storage + */ +extern void jobacct_storage_p_archive(void *db_conn, + List selected_parts, + void *params) +{ + info("not implemented"); + + return; +} + +extern int acct_storage_p_update_shares_used(void *db_conn, + List shares_used) +{ + return SLURM_SUCCESS; +} diff --git a/src/plugins/accounting_storage/mysql/Makefile.am b/src/plugins/accounting_storage/mysql/Makefile.am new file mode 100644 index 000000000..a34ba8aa8 --- /dev/null +++ b/src/plugins/accounting_storage/mysql/Makefile.am @@ -0,0 +1,21 @@ +# Makefile for accounting_storage/mysql plugin + +AUTOMAKE_OPTIONS = foreign + +PLUGIN_FLAGS = -module -avoid-version --export-dynamic + +INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common + +pkglib_LTLIBRARIES = accounting_storage_mysql.la + +# Mysql storage plugin. +accounting_storage_mysql_la_SOURCES = accounting_storage_mysql.c \ + mysql_jobacct_process.c mysql_jobacct_process.h \ + mysql_rollup.c mysql_rollup.h +accounting_storage_mysql_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) +accounting_storage_mysql_la_CFLAGS = $(MYSQL_CFLAGS) +accounting_storage_mysql_la_LIBADD = \ + $(top_builddir)/src/database/libslurm_mysql.la $(MYSQL_LIBS) +accounting_storage_mysql_la_DEPENDENCIES = \ + $(top_builddir)/src/database/libslurm_mysql.la + diff --git a/src/plugins/accounting_storage/mysql/Makefile.in b/src/plugins/accounting_storage/mysql/Makefile.in new file mode 100644 index 000000000..2a69a97b1 --- /dev/null +++ b/src/plugins/accounting_storage/mysql/Makefile.in @@ -0,0 +1,593 @@ +# Makefile.in generated by automake 1.10.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# Makefile for accounting_storage/mysql plugin + +VPATH = @srcdir@ +pkgdatadir = $(datadir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +target_triplet = @target@ +subdir = src/plugins/accounting_storage/mysql +DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ + $(top_srcdir)/auxdir/slurm.m4 \ + $(top_srcdir)/auxdir/x_ac__system_configuration.m4 \ + $(top_srcdir)/auxdir/x_ac_affinity.m4 \ + $(top_srcdir)/auxdir/x_ac_aix.m4 \ + $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ + $(top_srcdir)/auxdir/x_ac_debug.m4 \ + $(top_srcdir)/auxdir/x_ac_elan.m4 \ + $(top_srcdir)/auxdir/x_ac_federation.m4 \ + $(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \ + $(top_srcdir)/auxdir/x_ac_gtk.m4 \ + $(top_srcdir)/auxdir/x_ac_munge.m4 \ + $(top_srcdir)/auxdir/x_ac_ncurses.m4 \ + $(top_srcdir)/auxdir/x_ac_pam.m4 \ + $(top_srcdir)/auxdir/x_ac_ptrace.m4 \ + $(top_srcdir)/auxdir/x_ac_readline.m4 \ + $(top_srcdir)/auxdir/x_ac_setpgrp.m4 \ + $(top_srcdir)/auxdir/x_ac_setproctitle.m4 \ + $(top_srcdir)/auxdir/x_ac_sgi_job.m4 \ + $(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \ + $(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h +CONFIG_CLEAN_FILES = +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; +am__installdirs = "$(DESTDIR)$(pkglibdir)" +pkglibLTLIBRARIES_INSTALL = $(INSTALL) +LTLIBRARIES = $(pkglib_LTLIBRARIES) +am__DEPENDENCIES_1 = +am_accounting_storage_mysql_la_OBJECTS = \ + accounting_storage_mysql_la-accounting_storage_mysql.lo \ + accounting_storage_mysql_la-mysql_jobacct_process.lo \ + accounting_storage_mysql_la-mysql_rollup.lo +accounting_storage_mysql_la_OBJECTS = \ + $(am_accounting_storage_mysql_la_OBJECTS) +accounting_storage_mysql_la_LINK = $(LIBTOOL) --tag=CC \ + $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CCLD) \ + $(accounting_storage_mysql_la_CFLAGS) $(CFLAGS) \ + $(accounting_storage_mysql_la_LDFLAGS) $(LDFLAGS) -o $@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm +depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp +am__depfiles_maybe = depfiles +COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ + $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ + $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +CCLD = $(CC) +LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ + $(LDFLAGS) -o $@ +SOURCES = $(accounting_storage_mysql_la_SOURCES) +DIST_SOURCES = $(accounting_storage_mysql_la_SOURCES) +ETAGS = etags +CTAGS = ctags +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AR = @AR@ +AUTHD_CFLAGS = @AUTHD_CFLAGS@ +AUTHD_LIBS = @AUTHD_LIBS@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +BG_INCLUDES = @BG_INCLUDES@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CMD_LDFLAGS = @CMD_LDFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ +ECHO = @ECHO@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +ELAN_LIBS = @ELAN_LIBS@ +EXEEXT = @EXEEXT@ +F77 = @F77@ +FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@ +FFLAGS = @FFLAGS@ +GREP = @GREP@ +GTK2_CFLAGS = @GTK2_CFLAGS@ +GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ +HAVEPKGCONFIG = @HAVEPKGCONFIG@ +HAVE_AIX = @HAVE_AIX@ +HAVE_ELAN = @HAVE_ELAN@ +HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ +HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIB_LDFLAGS = @LIB_LDFLAGS@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MKDIR_P = @MKDIR_P@ +MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ +MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ +MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ +NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ +NUMA_LIBS = @NUMA_LIBS@ +OBJEXT = @OBJEXT@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PAM_LIBS = @PAM_LIBS@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ +PLPA_LIBS = @PLPA_LIBS@ +PROCTRACKDIR = @PROCTRACKDIR@ +PROJECT = @PROJECT@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +RANLIB = @RANLIB@ +READLINE_LIBS = @READLINE_LIBS@ +RELEASE = @RELEASE@ +SED = @SED@ +SEMAPHORE_LIBS = @SEMAPHORE_LIBS@ +SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ +SLURMD_PORT = @SLURMD_PORT@ +SLURM_API_AGE = @SLURM_API_AGE@ +SLURM_API_CURRENT = @SLURM_API_CURRENT@ +SLURM_API_MAJOR = @SLURM_API_MAJOR@ +SLURM_API_REVISION = @SLURM_API_REVISION@ +SLURM_API_VERSION = @SLURM_API_VERSION@ +SLURM_MAJOR = @SLURM_MAJOR@ +SLURM_MICRO = @SLURM_MICRO@ +SLURM_MINOR = @SLURM_MINOR@ +SLURM_VERSION = @SLURM_VERSION@ +SO_LDFLAGS = @SO_LDFLAGS@ +SSL_CPPFLAGS = @SSL_CPPFLAGS@ +SSL_LDFLAGS = @SSL_LDFLAGS@ +SSL_LIBS = @SSL_LIBS@ +STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ +VERSION = @VERSION@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_F77 = @ac_ct_F77@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target = @target@ +target_alias = @target_alias@ +target_cpu = @target_cpu@ +target_os = @target_os@ +target_vendor = @target_vendor@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +AUTOMAKE_OPTIONS = foreign +PLUGIN_FLAGS = -module -avoid-version --export-dynamic +INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common +pkglib_LTLIBRARIES = accounting_storage_mysql.la + +# Mysql storage plugin. +accounting_storage_mysql_la_SOURCES = accounting_storage_mysql.c \ + mysql_jobacct_process.c mysql_jobacct_process.h \ + mysql_rollup.c mysql_rollup.h + +accounting_storage_mysql_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) +accounting_storage_mysql_la_CFLAGS = $(MYSQL_CFLAGS) +accounting_storage_mysql_la_LIBADD = \ + $(top_builddir)/src/database/libslurm_mysql.la $(MYSQL_LIBS) + +accounting_storage_mysql_la_DEPENDENCIES = \ + $(top_builddir)/src/database/libslurm_mysql.la + +all: all-am + +.SUFFIXES: +.SUFFIXES: .c .lo .o .obj +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ + && exit 0; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/accounting_storage/mysql/Makefile'; \ + cd $(top_srcdir) && \ + $(AUTOMAKE) --foreign src/plugins/accounting_storage/mysql/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) + @$(NORMAL_INSTALL) + test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" + @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ + if test -f $$p; then \ + f=$(am__strip_dir) \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + else :; fi; \ + done + +uninstall-pkglibLTLIBRARIES: + @$(NORMAL_UNINSTALL) + @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ + p=$(am__strip_dir) \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + done + +clean-pkglibLTLIBRARIES: + -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) + @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ + dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ + test "$$dir" != "$$p" || dir=.; \ + echo "rm -f \"$${dir}/so_locations\""; \ + rm -f "$${dir}/so_locations"; \ + done +accounting_storage_mysql.la: $(accounting_storage_mysql_la_OBJECTS) $(accounting_storage_mysql_la_DEPENDENCIES) + $(accounting_storage_mysql_la_LINK) -rpath $(pkglibdir) $(accounting_storage_mysql_la_OBJECTS) $(accounting_storage_mysql_la_LIBADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + +distclean-compile: + -rm -f *.tab.c + +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/accounting_storage_mysql_la-accounting_storage_mysql.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/accounting_storage_mysql_la-mysql_jobacct_process.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/accounting_storage_mysql_la-mysql_rollup.Plo@am__quote@ + +.c.o: +@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c $< + +.c.obj: +@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'` + +.c.lo: +@am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $< + +accounting_storage_mysql_la-accounting_storage_mysql.lo: accounting_storage_mysql.c +@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(accounting_storage_mysql_la_CFLAGS) $(CFLAGS) -MT accounting_storage_mysql_la-accounting_storage_mysql.lo -MD -MP -MF $(DEPDIR)/accounting_storage_mysql_la-accounting_storage_mysql.Tpo -c -o accounting_storage_mysql_la-accounting_storage_mysql.lo `test -f 'accounting_storage_mysql.c' || echo '$(srcdir)/'`accounting_storage_mysql.c +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/accounting_storage_mysql_la-accounting_storage_mysql.Tpo $(DEPDIR)/accounting_storage_mysql_la-accounting_storage_mysql.Plo +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='accounting_storage_mysql.c' object='accounting_storage_mysql_la-accounting_storage_mysql.lo' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(accounting_storage_mysql_la_CFLAGS) $(CFLAGS) -c -o accounting_storage_mysql_la-accounting_storage_mysql.lo `test -f 'accounting_storage_mysql.c' || echo '$(srcdir)/'`accounting_storage_mysql.c + +accounting_storage_mysql_la-mysql_jobacct_process.lo: mysql_jobacct_process.c +@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(accounting_storage_mysql_la_CFLAGS) $(CFLAGS) -MT accounting_storage_mysql_la-mysql_jobacct_process.lo -MD -MP -MF $(DEPDIR)/accounting_storage_mysql_la-mysql_jobacct_process.Tpo -c -o accounting_storage_mysql_la-mysql_jobacct_process.lo `test -f 'mysql_jobacct_process.c' || echo '$(srcdir)/'`mysql_jobacct_process.c +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/accounting_storage_mysql_la-mysql_jobacct_process.Tpo $(DEPDIR)/accounting_storage_mysql_la-mysql_jobacct_process.Plo +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='mysql_jobacct_process.c' object='accounting_storage_mysql_la-mysql_jobacct_process.lo' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(accounting_storage_mysql_la_CFLAGS) $(CFLAGS) -c -o accounting_storage_mysql_la-mysql_jobacct_process.lo `test -f 'mysql_jobacct_process.c' || echo '$(srcdir)/'`mysql_jobacct_process.c + +accounting_storage_mysql_la-mysql_rollup.lo: mysql_rollup.c +@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(accounting_storage_mysql_la_CFLAGS) $(CFLAGS) -MT accounting_storage_mysql_la-mysql_rollup.lo -MD -MP -MF $(DEPDIR)/accounting_storage_mysql_la-mysql_rollup.Tpo -c -o accounting_storage_mysql_la-mysql_rollup.lo `test -f 'mysql_rollup.c' || echo '$(srcdir)/'`mysql_rollup.c +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/accounting_storage_mysql_la-mysql_rollup.Tpo $(DEPDIR)/accounting_storage_mysql_la-mysql_rollup.Plo +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='mysql_rollup.c' object='accounting_storage_mysql_la-mysql_rollup.lo' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(accounting_storage_mysql_la_CFLAGS) $(CFLAGS) -c -o accounting_storage_mysql_la-mysql_rollup.lo `test -f 'mysql_rollup.c' || echo '$(srcdir)/'`mysql_rollup.c + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + mkid -fID $$unique +tags: TAGS + +TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + here=`pwd`; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$tags $$unique; \ + fi +ctags: CTAGS +CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + test -z "$(CTAGS_ARGS)$$tags$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$tags $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && cd $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) $$here + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ + fi; \ + cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ + else \ + test -f $(distdir)/$$file \ + || cp -p $$d/$$file $(distdir)/$$file \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(LTLIBRARIES) +installdirs: + for dir in "$(DESTDIR)$(pkglibdir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + `test -z '$(STRIP)' || \ + echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ + mostlyclean-am + +distclean: distclean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-exec-am: install-pkglibLTLIBRARIES + +install-html: install-html-am + +install-info: install-info-am + +install-man: + +install-pdf: install-pdf-am + +install-ps: install-ps-am + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-pkglibLTLIBRARIES + +.MAKE: install-am install-strip + +.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ + clean-libtool clean-pkglibLTLIBRARIES ctags distclean \ + distclean-compile distclean-generic distclean-libtool \ + distclean-tags distdir dvi dvi-am html html-am info info-am \ + install install-am install-data install-data-am install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-pdf install-pdf-am install-pkglibLTLIBRARIES \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-compile \ + mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ + tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c b/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c new file mode 100644 index 000000000..21aeaf65d --- /dev/null +++ b/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c @@ -0,0 +1,5468 @@ +/*****************************************************************************\ + * accounting_storage_mysql.c - accounting interface to mysql. + * + * $Id: accounting_storage_mysql.c 13061 2008-01-22 21:23:56Z da $ + ***************************************************************************** + * Copyright (C) 2004-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + ***************************************************************************** + * Notes on mysql configuration + * Assumes mysql is installed as user root + * Assumes SlurmUser is configured as user slurm + * # mysqladmin create <db_name> + * The <db_name> goes into slurmdbd.conf as StorageLoc + * # mysql --user=root -p + * mysql> GRANT ALL ON *.* TO 'slurm'@'localhost' IDENTIFIED BY PASSWORD 'pw'; + * mysql> GRANT SELECT, INSERT ON *.* TO 'slurm'@'localhost'; +\*****************************************************************************/ + +#include <strings.h> +#include "mysql_jobacct_process.h" +#include "mysql_rollup.h" +#include "src/common/slurmdbd_defs.h" +#include "src/common/slurm_auth.h" + +/* + * These variables are required by the generic plugin interface. If they + * are not found in the plugin, the plugin loader will ignore it. + * + * plugin_name - a string giving a human-readable description of the + * plugin. There is no maximum length, but the symbol must refer to + * a valid string. + * + * plugin_type - a string suggesting the type of the plugin or its + * applicability to a particular form of data or method of data handling. + * If the low-level plugin API is used, the contents of this string are + * unimportant and may be anything. SLURM uses the higher-level plugin + * interface which requires this string to be of the form + * + * <application>/<method> + * + * where <application> is a description of the intended application of + * the plugin (e.g., "accounting_storage" for SLURM job completion + * logging) and <method> + * is a description of how this plugin satisfies that application. SLURM will + * only load job completion logging plugins if the plugin_type string has a + * prefix of "accounting_storage/". + * + * plugin_version - an unsigned 32-bit integer giving the version number + * of the plugin. If major and minor revisions are desired, the major + * version number may be multiplied by a suitable magnitude constant such + * as 100 or 1000. Various SLURM versions will likely require a certain + * minimum versions for their plugins as the job accounting API + * matures. + */ +const char plugin_name[] = "Accounting storage MYSQL plugin"; +const char plugin_type[] = "accounting_storage/mysql"; +const uint32_t plugin_version = 100; + +#ifdef HAVE_MYSQL + +static mysql_db_info_t *mysql_db_info = NULL; +static char *mysql_db_name = NULL; + +#define DEFAULT_ACCT_DB "slurm_acct_db" +#define DELETE_SEC_BACK 86400 + +char *acct_coord_table = "acct_coord_table"; +char *acct_table = "acct_table"; +char *assoc_day_table = "assoc_day_usage_table"; +char *assoc_hour_table = "assoc_hour_usage_table"; +char *assoc_month_table = "assoc_month_usage_table"; +char *assoc_table = "assoc_table"; +char *cluster_day_table = "cluster_day_usage_table"; +char *cluster_hour_table = "cluster_hour_usage_table"; +char *cluster_month_table = "cluster_month_usage_table"; +char *cluster_table = "cluster_table"; +char *event_table = "cluster_event_table"; +char *job_table = "job_table"; +char *step_table = "step_table"; +char *txn_table = "txn_table"; +char *user_table = "user_table"; +char *last_ran_table = "last_ran_table"; +char *suspend_table = "suspend_table"; + +extern int acct_storage_p_commit(mysql_conn_t *mysql_conn, bool commit); + +extern int acct_storage_p_add_associations(mysql_conn_t *mysql_conn, + uint32_t uid, + List association_list); +extern List acct_storage_p_get_associations(mysql_conn_t *mysql_conn, + acct_association_cond_t *assoc_q); + +/* This function will take the object given and free it later so it + * needed to be removed from a list if in one before + */ +static int _addto_update_list(List update_list, acct_update_type_t type, + void *object) +{ + acct_update_object_t *update_object = NULL; + ListIterator itr = NULL; + if(!update_list) { + error("no update list given"); + return SLURM_ERROR; + } + + itr = list_iterator_create(update_list); + while((update_object = list_next(itr))) { + if(update_object->type == type) + break; + } + list_iterator_destroy(itr); + + if(update_object) { + list_append(update_object->objects, object); + return SLURM_SUCCESS; + } + update_object = xmalloc(sizeof(acct_update_object_t)); + + list_append(update_list, update_object); + + update_object->type = type; + + switch(type) { + case ACCT_MODIFY_USER: + case ACCT_ADD_USER: + case ACCT_REMOVE_USER: + update_object->objects = list_create(destroy_acct_user_rec); + break; + case ACCT_ADD_ASSOC: + case ACCT_MODIFY_ASSOC: + case ACCT_REMOVE_ASSOC: + update_object->objects = list_create( + destroy_acct_association_rec); + break; + case ACCT_UPDATE_NOTSET: + default: + error("unknown type set in update_object: %d", type); + return SLURM_ERROR; + } + list_append(update_object->objects, object); + return SLURM_SUCCESS; +} + +static int _move_account(mysql_conn_t *mysql_conn, uint32_t lft, uint32_t rgt, + char *cluster, + char *id, char *parent) +{ +/* + tested sql... + + SELECT @parLeft := lft from assoc_table where cluster='name' && acct='new parent' && user=''; + + SELECT @oldLeft := lft, @oldRight := rgt, @myWidth := (rgt - lft + 1), @myDiff := (@parLeft+1) - lft FROM assoc_table WHERE id = 'account id'; + + update assoc_table set deleted = deleted + 2, lft = lft + @myDiff, rgt = rgt + @myDiff WHERE lft BETWEEN @oldLeft AND @oldRight; + + UPDATE assoc_table SET rgt = rgt + @myWidth WHERE rgt > @parLeft && deleted < 2; + UPDATE assoc_table SET lft = lft + @myWidth WHERE lft > @parLeft && deleted < 2; + + UPDATE assoc_table SET rgt = rgt - @myWidth WHERE (@myDiff < 0 && rgt > @oldRight && deleted < 2) || (@myDiff >= 0 && rgt > @oldLeft); + UPDATE assoc_table SET lft = lft - @myWidth WHERE (@myDiff < 0 && lft > @oldRight && deleted < 2) || (@myDiff >= 0 && lft > @oldLeft); + + update assoc_table set deleted = deleted - 2 WHERE deleted > 1; + + update assoc_table set parent_acct='new parent' where id = 'account id'; + +*/ + int rc = SLURM_SUCCESS; + MYSQL_RES *result = NULL; + MYSQL_ROW row; + int par_left = 0; + int diff = 0; + int width = 0; + char *query = xstrdup_printf( + "SELECT lft from %s " + "where cluster='%s' && acct='%s' && user='';", + assoc_table, + cluster, parent); + debug3("%d query\n%s", mysql_conn->conn, query); + if(!(result = mysql_db_query_ret( + mysql_conn->acct_mysql_db, query, 0))) { + xfree(query); + return SLURM_ERROR; + } + xfree(query); + if(!(row = mysql_fetch_row(result))) { + error("no row"); + mysql_free_result(result); + return SLURM_ERROR; + } + par_left = atoi(row[0]); + mysql_free_result(result); + + width = (rgt - lft + 1); + diff = ((par_left + 1) - lft); + + xstrfmtcat(query, + "update %s set deleted = deleted + 2, " + "lft = lft + %d, rgt = rgt + %d " + "WHERE lft BETWEEN %u AND %u;", + assoc_table, diff, diff, lft, rgt); + + xstrfmtcat(query, + "UPDATE %s SET rgt = rgt + %d WHERE " + "rgt > %d && deleted < 2;" + "UPDATE %s SET lft = lft + %d WHERE " + "lft > %d && deleted < 2;", + assoc_table, width, + par_left, + assoc_table, width, + par_left); + + xstrfmtcat(query, + "UPDATE %s SET rgt = rgt - %d WHERE " + "(%d < 0 && rgt > %u && deleted < 2) " + "|| (%d >= 0 && rgt > %u);" + "UPDATE %s SET lft = lft - %d WHERE " + "(%d < 0 && lft > %u && deleted < 2) " + "|| (%d >= 0 && lft > %u);", + assoc_table, width, + diff, rgt, + diff, lft, + assoc_table, width, + diff, rgt, + diff, lft); + + xstrfmtcat(query, + "update %s set deleted = deleted - 2 WHERE deleted > 1;", + assoc_table); + xstrfmtcat(query, + "update %s set parent_acct='%s' where id = %s;", + assoc_table, parent, id); + debug3("%d query\n%s", mysql_conn->conn, query); + rc = mysql_db_query(mysql_conn->acct_mysql_db, query); + xfree(query); + + return rc; +} + +static int _move_parent(mysql_conn_t *mysql_conn, uint32_t lft, uint32_t rgt, + char *cluster, + char *id, char *old_parent, char *new_parent) +{ + MYSQL_RES *result = NULL; + MYSQL_ROW row; + char *query = NULL; + int rc = SLURM_SUCCESS; + List assoc_list = NULL; + ListIterator itr = NULL; + acct_association_rec_t *assoc = NULL; + + /* first we need to see if we are + * going to make a child of this + * account the new parent. If so we + * need to move that child to this + * accounts parent and then do the move + */ + query = xstrdup_printf( + "select id, lft, rgt from %s where lft between %d and %d " + "&& acct='%s' && user='' order by lft;", + assoc_table, lft, rgt, + new_parent); + debug3("%d query\n%s", mysql_conn->conn, query); + if(!(result = + mysql_db_query_ret(mysql_conn->acct_mysql_db, query, 0))) { + xfree(query); + return SLURM_ERROR; + } + xfree(query); + + if((row = mysql_fetch_row(result))) { + debug4("%s(%s) %s,%s is a child of %s", + new_parent, row[0], row[1], row[2], id); + rc = _move_account(mysql_conn, atoi(row[1]), atoi(row[2]), + cluster, row[0], old_parent); + } + + mysql_free_result(result); + + if(rc == SLURM_ERROR) + return rc; + + /* now move the one we wanted to move in the first place */ + rc = _move_account(mysql_conn, lft, rgt, cluster, id, new_parent); + + if(rc == SLURM_ERROR) + return rc; + + /* now we need to send the update of the new parents and + * limits, so just to be safe, send the whole tree + */ + assoc_list = acct_storage_p_get_associations(mysql_conn, NULL); + /* NOTE: you can not use list_pop, or list_push + anywhere either, since mysql is + exporting something of the same type as a macro, + which messes everything up (my_list.h is the bad boy). + So we are just going to delete each item as it + comes out since we are moving it to the update_list. + */ + itr = list_iterator_create(assoc_list); + while((assoc = list_next(itr))) { + if(_addto_update_list(mysql_conn->update_list, + ACCT_MODIFY_ASSOC, + assoc) == SLURM_SUCCESS) + list_remove(itr); + } + list_iterator_destroy(itr); + list_destroy(assoc_list); + return rc; +} + +static int _last_affected_rows(MYSQL *mysql_db) +{ + int status=0, rows=0; + MYSQL_RES *result = NULL; + + do { + result = mysql_store_result(mysql_db); + if (result) + mysql_free_result(result); + else + if (mysql_field_count(mysql_db) == 0) { + status = mysql_affected_rows(mysql_db); + if(status > 0) + rows = status; + } + if ((status = mysql_next_result(mysql_db)) > 0) + debug3("Could not execute statement\n"); + } while (status == 0); + + return rows; +} + +static int _modify_common(mysql_conn_t *mysql_conn, + uint16_t type, + time_t now, + char *user_name, + char *table, + char *cond_char, + char *vals) +{ + char *query = NULL; + int rc = SLURM_SUCCESS; + + xstrfmtcat(query, + "update %s set mod_time=%d%s " + "where deleted=0 && %s;", + table, now, vals, + cond_char); + xstrfmtcat(query, + "insert into %s " + "(timestamp, action, name, actor, info) " + "values (%d, %d, \"%s\", '%s', \"%s\");", + txn_table, + now, type, cond_char, user_name, vals); + debug3("%d query\n%s", mysql_conn->conn, query); + rc = mysql_db_query(mysql_conn->acct_mysql_db, query); + xfree(query); + + if(rc != SLURM_SUCCESS) { + if(mysql_conn->rollback) { + mysql_db_rollback(mysql_conn->acct_mysql_db); + } + list_flush(mysql_conn->update_list); + + return SLURM_ERROR; + } + + return SLURM_SUCCESS; +} +static int _modify_unset_users(mysql_conn_t *mysql_conn, + acct_association_rec_t *assoc, + char *acct, + uint32_t lft, uint32_t rgt, + List ret_list) +{ + MYSQL_RES *result = NULL; + MYSQL_ROW row; + char *query = NULL, *object = NULL; + int i; + + char *assoc_req_inx[] = { + "id", + "user", + "acct", + "cluster", + "partition", + "max_jobs", + "max_nodes_per_job", + "max_wall_duration_per_job", + "max_cpu_secs_per_job", + "lft", + "rgt" + }; + + enum { + ASSOC_ID, + ASSOC_USER, + ASSOC_ACCT, + ASSOC_CLUSTER, + ASSOC_PART, + ASSOC_MJ, + ASSOC_MNPJ, + ASSOC_MWPJ, + ASSOC_MCPJ, + ASSOC_LFT, + ASSOC_RGT, + ASSOC_COUNT + }; + + if(!ret_list || !acct) + return SLURM_ERROR; + + for(i=0; i<ASSOC_COUNT; i++) { + if(i) + xstrcat(object, ", "); + xstrcat(object, assoc_req_inx[i]); + } + + query = xstrdup_printf("select distinct %s from %s where deleted=0 " + "&& lft between %d and %d && " + "((user = '' && parent_acct = '%s') || " + "(user != '' && acct = '%s')) " + "order by lft;", + object, assoc_table, lft, rgt, acct, acct); +/* query = xstrdup_printf("select distinct %s from %s where deleted=0 " */ +/* "&& lft between %d and %d and user != ''" */ +/* "order by lft;", */ +/* object, assoc_table, lft, rgt); */ + xfree(object); + debug3("%d query\n%s", mysql_conn->conn, query); + if(!(result = + mysql_db_query_ret(mysql_conn->acct_mysql_db, query, 0))) { + xfree(query); + return SLURM_ERROR; + } + xfree(query); + + while((row = mysql_fetch_row(result))) { + acct_association_rec_t *mod_assoc = NULL; + int modified = 0; + + mod_assoc = xmalloc(sizeof(acct_association_rec_t)); + mod_assoc->id = atoi(row[ASSOC_ID]); + + if(!row[ASSOC_MJ] && assoc->max_jobs != (uint32_t)NO_VAL) { + mod_assoc->max_jobs = assoc->max_jobs; + modified = 1; + } else + mod_assoc->max_jobs = (uint32_t)NO_VAL; + + if(!row[ASSOC_MNPJ] && + assoc->max_nodes_per_job != (uint32_t)NO_VAL) { + mod_assoc->max_nodes_per_job = + assoc->max_nodes_per_job; + modified = 1; + } else + mod_assoc->max_nodes_per_job = (uint32_t)NO_VAL; + + + if(!row[ASSOC_MWPJ] && + assoc->max_wall_duration_per_job != (uint32_t)NO_VAL) { + mod_assoc->max_wall_duration_per_job = + assoc->max_wall_duration_per_job; + modified = 1; + } else + mod_assoc->max_wall_duration_per_job = (uint32_t)NO_VAL; + + if(!row[ASSOC_MCPJ] && + assoc->max_cpu_secs_per_job != (uint32_t)NO_VAL) { + mod_assoc->max_cpu_secs_per_job = + assoc->max_cpu_secs_per_job; + modified = 1; + } else + mod_assoc->max_cpu_secs_per_job = (uint32_t)NO_VAL; + + + if(modified) { + if(!row[ASSOC_USER][0]) { + _modify_unset_users(mysql_conn, + mod_assoc, + row[ASSOC_ACCT], + atoi(row[ASSOC_LFT]), + atoi(row[ASSOC_RGT]), + ret_list); + destroy_acct_association_rec(mod_assoc); + continue; + } + + mod_assoc->fairshare = (uint32_t)NO_VAL; + if(row[ASSOC_PART][0]) { + // see if there is a partition name + object = xstrdup_printf( + "C = %-10s A = %-20s U = %-9s P = %s", + row[ASSOC_CLUSTER], row[ASSOC_ACCT], + row[ASSOC_USER], row[ASSOC_PART]); + } else { + object = xstrdup_printf( + "C = %-10s A = %-20s U = %-9s", + row[ASSOC_CLUSTER], + row[ASSOC_ACCT], + row[ASSOC_USER]); + } + + list_append(ret_list, object); + + if(_addto_update_list(mysql_conn->update_list, + ACCT_MODIFY_ASSOC, + mod_assoc) != SLURM_SUCCESS) + error("couldn't add to the update list"); + } else { + xfree(mod_assoc); + } + } + mysql_free_result(result); + + return SLURM_SUCCESS; +} + + + +/* Every option in assoc_char should have a 't1.' infront of it. */ +static int _remove_common(mysql_conn_t *mysql_conn, + uint16_t type, + time_t now, + char *user_name, + char *table, + char *name_char, + char *assoc_char) +{ + int rc = SLURM_SUCCESS; + char *query = NULL; + char *loc_assoc_char = NULL; + MYSQL_RES *result = NULL; + MYSQL_ROW row; + time_t day_old = now - DELETE_SEC_BACK; + + /* we want to remove completely all that is less than a day old */ + if(table != assoc_table) { + query = xstrdup_printf("delete from %s where creation_time>%d " + "&& (%s);", + table, day_old, name_char); + } + + xstrfmtcat(query, + "update %s set mod_time=%d, deleted=1 " + "where deleted=0 && (%s);", + table, now, name_char); + xstrfmtcat(query, + "insert into %s (timestamp, action, name, actor) " + "values (%d, %d, \"%s\", '%s');", + txn_table, + now, type, name_char, user_name); + + debug3("%d query\n%s", mysql_conn->conn, query); + rc = mysql_db_query(mysql_conn->acct_mysql_db, query); + xfree(query); + if(rc != SLURM_SUCCESS) { + if(mysql_conn->rollback) { + mysql_db_rollback(mysql_conn->acct_mysql_db); + } + list_flush(mysql_conn->update_list); + + return SLURM_ERROR; + } + + /* mark deleted=1 or remove completely the + accounting tables + */ + if(table != assoc_table) { + if(!assoc_char) { + error("no assoc_char"); + if(mysql_conn->rollback) { + mysql_db_rollback(mysql_conn->acct_mysql_db); + } + list_destroy(mysql_conn->update_list); + mysql_conn->update_list = + list_create(destroy_acct_update_object); + return SLURM_ERROR; + } + + /* If we are doing this on an assoc_table we have + already done this, so don't */ +/* query = xstrdup_printf("select lft, rgt " */ +/* "from %s as t2 where %s order by lft;", */ +/* assoc_table, assoc_char); */ + query = xstrdup_printf("select distinct t1.id " + "from %s as t1, %s as t2 " + "where %s && t1.lft between " + "t2.lft and t2.rgt;", + assoc_table, assoc_table, assoc_char); + + debug3("%d query\n%s", mysql_conn->conn, query); + if(!(result = mysql_db_query_ret( + mysql_conn->acct_mysql_db, query, 0))) { + xfree(query); + if(mysql_conn->rollback) { + mysql_db_rollback(mysql_conn->acct_mysql_db); + } + list_destroy(mysql_conn->update_list); + mysql_conn->update_list = + list_create(destroy_acct_update_object); + return SLURM_ERROR; + } + xfree(query); + + rc = 0; + loc_assoc_char = NULL; + while((row = mysql_fetch_row(result))) { + acct_association_rec_t *rem_assoc = NULL; + if(!rc) { + xstrfmtcat(loc_assoc_char, "id=%s", row[0]); + rc = 1; + } else { + xstrfmtcat(loc_assoc_char, + " || id=%s", row[0]); + } + rem_assoc = xmalloc(sizeof(acct_association_rec_t)); + rem_assoc->id = atoi(row[0]); + if(_addto_update_list(mysql_conn->update_list, + ACCT_REMOVE_ASSOC, + rem_assoc) != SLURM_SUCCESS) + error("couldn't add to the update list"); + } + mysql_free_result(result); + } else + loc_assoc_char = assoc_char; + +/* query = xstrdup_printf( */ +/* "delete t2 from %s as t2, %s as t1 where t1.creation_time>%d && (%s);" */ +/* "delete t2 from %s as t2, %s as t1 where t1.creation_time>%d && (%s);" */ +/* "delete t2 from %s as t2, %s as t1 where t1.creation_time>%d && (%s);", */ +/* assoc_day_table, assoc_table, day_old, loc_assoc_char, */ +/* assoc_hour_table, assoc_table, day_old, loc_assoc_char, */ +/* assoc_month_table, assoc_table, day_old, loc_assoc_char); */ + query = xstrdup_printf( + "delete from %s where creation_time>%d && (%s);" + "delete from %s where creation_time>%d && (%s);" + "delete from %s where creation_time>%d && (%s);", + assoc_day_table, day_old, loc_assoc_char, + assoc_hour_table, day_old, loc_assoc_char, + assoc_month_table, day_old, loc_assoc_char); + xstrfmtcat(query, + "update %s set mod_time=%d, deleted=1 where (%s);" + "update %s set mod_time=%d, deleted=1 where (%s);" + "update %s set mod_time=%d, deleted=1 where (%s);", + assoc_day_table, now, loc_assoc_char, + assoc_hour_table, now, loc_assoc_char, + assoc_month_table, now, loc_assoc_char); + + debug3("%d query\n%s %d", mysql_conn->conn, query, strlen(query)); + rc = mysql_db_query(mysql_conn->acct_mysql_db, query); + xfree(query); + if(rc != SLURM_SUCCESS) { + if(mysql_conn->rollback) { + mysql_db_rollback(mysql_conn->acct_mysql_db); + } + list_flush(mysql_conn->update_list); + return SLURM_ERROR; + } + + /* remove completely all the associations for this added in the last + * day, since they are most likely nothing we really wanted in + * the first place. + */ + query = xstrdup_printf("select id from %s as t1 where " + "creation_time>%d && (%s);", + assoc_table, day_old, loc_assoc_char); + + debug3("%d query\n%s", mysql_conn->conn, query); + if(!(result = mysql_db_query_ret( + mysql_conn->acct_mysql_db, query, 0))) { + xfree(query); + if(mysql_conn->rollback) { + mysql_db_rollback(mysql_conn->acct_mysql_db); + } + list_flush(mysql_conn->update_list); + return SLURM_ERROR; + } + xfree(query); + + /* we have to do this one at a time since the lft's and rgt's + change */ + while((row = mysql_fetch_row(result))) { + MYSQL_RES *result2 = NULL; + MYSQL_ROW row2; + + xstrfmtcat(query, + "SELECT lft, rgt, (rgt - lft + 1) " + "FROM %s WHERE id = %s;", + assoc_table, row[0]); + debug3("%d query\n%s", mysql_conn->conn, query); + if(!(result2 = mysql_db_query_ret( + mysql_conn->acct_mysql_db, query, 0))) { + xfree(query); + rc = SLURM_ERROR; + break; + } + xfree(query); + if(!(row2 = mysql_fetch_row(result2))) { + mysql_free_result(result2); + continue; + } + + xstrfmtcat(query, + "delete quick from %s where lft between " + "%s AND %s;", + assoc_table, + row2[0], row2[1]); + xstrfmtcat(query, + "UPDATE %s SET rgt = rgt - %s WHERE " + "rgt > %s;" + "UPDATE %s SET lft = lft - %s WHERE " + "lft > %s;", + assoc_table, row2[2], + row2[1], + assoc_table, row2[2], row2[1]); + + mysql_free_result(result2); + + debug3("%d query\n%s", mysql_conn->conn, query); + rc = mysql_db_query(mysql_conn->acct_mysql_db, query); + xfree(query); + if(rc != SLURM_SUCCESS) { + error("couldn't remove assoc"); + break; + } + } + mysql_free_result(result); + if(rc == SLURM_ERROR) { + if(mysql_conn->rollback) { + mysql_db_rollback(mysql_conn->acct_mysql_db); + } + list_flush(mysql_conn->update_list); + return rc; + } + + if(table == assoc_table) + return SLURM_SUCCESS; + + /* now update the associations themselves that are still around */ + query = xstrdup_printf("update %s as t1 set mod_time=%d, deleted=1 " + "where deleted=0 && (%s);", + assoc_table, now, + loc_assoc_char); + xfree(loc_assoc_char); + debug3("%d query\n%s", mysql_conn->conn, query); + rc = mysql_db_query(mysql_conn->acct_mysql_db, query); + xfree(query); + if(rc != SLURM_SUCCESS) { + if(mysql_conn->rollback) { + mysql_db_rollback(mysql_conn->acct_mysql_db); + } + list_flush(mysql_conn->update_list); + } + + return rc; +} + +static int _get_db_index(MYSQL *acct_mysql_db, + time_t submit, uint32_t jobid, uint32_t associd) +{ + MYSQL_RES *result = NULL; + MYSQL_ROW row; + int db_index = -1; + char *query = xstrdup_printf("select id from %s where " + "submit=%d and jobid=%u and associd=%u", + job_table, (int)submit, jobid, associd); + + if(!(result = mysql_db_query_ret(acct_mysql_db, query, 0))) { + xfree(query); + return -1; + } + xfree(query); + + row = mysql_fetch_row(result); + if(!row) { + mysql_free_result(result); + error("We can't get a db_index for this combo, " + "submit=%d and jobid=%u and associd=%u.", + (int)submit, jobid, associd); + return -1; + } + db_index = atoi(row[0]); + mysql_free_result(result); + + return db_index; +} + +static mysql_db_info_t *_mysql_acct_create_db_info() +{ + mysql_db_info_t *db_info = xmalloc(sizeof(mysql_db_info_t)); + db_info->port = slurm_get_accounting_storage_port(); + if(!db_info->port) + db_info->port = 3306; + db_info->host = slurm_get_accounting_storage_host(); + db_info->user = slurm_get_accounting_storage_user(); + db_info->pass = slurm_get_accounting_storage_pass(); + return db_info; +} + +static int _mysql_acct_check_tables(MYSQL *acct_mysql_db) +{ + int rc = SLURM_SUCCESS; + storage_field_t acct_coord_table_fields[] = { + { "deleted", "tinyint default 0" }, + { "acct", "tinytext not null" }, + { "user", "tinytext not null" }, + { NULL, NULL} + }; + + storage_field_t acct_table_fields[] = { + { "creation_time", "int unsigned not null" }, + { "mod_time", "int unsigned default 0 not null" }, + { "deleted", "tinyint default 0" }, + { "name", "tinytext not null" }, + { "description", "text not null" }, + { "organization", "text not null" }, + { "qos", "smallint default 1 not null" }, + { NULL, NULL} + }; + + storage_field_t assoc_table_fields[] = { + { "creation_time", "int unsigned not null" }, + { "mod_time", "int unsigned default 0 not null" }, + { "deleted", "tinyint default 0" }, + { "id", "int not null auto_increment" }, + { "user", "tinytext not null default ''" }, + { "acct", "tinytext not null" }, + { "cluster", "tinytext not null" }, + { "partition", "tinytext not null default ''" }, + { "parent_acct", "tinytext not null default ''" }, + { "lft", "int not null" }, + { "rgt", "int not null" }, + { "fairshare", "int default 1 not null" }, + { "max_jobs", "int default NULL" }, + { "max_nodes_per_job", "int default NULL" }, + { "max_wall_duration_per_job", "int default NULL" }, + { "max_cpu_secs_per_job", "int default NULL" }, + { NULL, NULL} + }; + + storage_field_t assoc_usage_table_fields[] = { + { "creation_time", "int unsigned not null" }, + { "mod_time", "int unsigned default 0 not null" }, + { "deleted", "tinyint default 0" }, + { "id", "int not null" }, + { "period_start", "int unsigned not null" }, + { "alloc_cpu_secs", "bigint default 0" }, + { NULL, NULL} + }; + + storage_field_t cluster_table_fields[] = { + { "creation_time", "int unsigned not null" }, + { "mod_time", "int unsigned default 0 not null" }, + { "deleted", "tinyint default 0" }, + { "name", "tinytext not null" }, + { "control_host", "tinytext not null default ''" }, + { "control_port", "mediumint not null default 0" }, + { NULL, NULL} + }; + + storage_field_t cluster_usage_table_fields[] = { + { "creation_time", "int unsigned not null" }, + { "mod_time", "int unsigned default 0 not null" }, + { "deleted", "tinyint default 0" }, + { "cluster", "tinytext not null" }, + { "period_start", "int unsigned not null" }, + { "cpu_count", "int default 0" }, + { "alloc_cpu_secs", "bigint default 0" }, + { "down_cpu_secs", "bigint default 0" }, + { "idle_cpu_secs", "bigint default 0" }, + { "resv_cpu_secs", "bigint default 0" }, + { "over_cpu_secs", "bigint default 0" }, + { NULL, NULL} + }; + + storage_field_t event_table_fields[] = { + { "node_name", "tinytext default '' not null" }, + { "cluster", "tinytext not null" }, + { "cpu_count", "int not null" }, + { "period_start", "int unsigned not null" }, + { "period_end", "int unsigned default 0 not null" }, + { "reason", "tinytext not null" }, + { NULL, NULL} + }; + + storage_field_t job_table_fields[] = { + { "id", "int not null auto_increment" }, + { "jobid", "mediumint unsigned not null" }, + { "associd", "mediumint unsigned not null" }, + { "uid", "smallint unsigned not null" }, + { "gid", "smallint unsigned not null" }, + { "partition", "tinytext not null" }, + { "blockid", "tinytext" }, + { "account", "tinytext" }, + { "eligible", "int unsigned default 0 not null" }, + { "submit", "int unsigned default 0 not null" }, + { "start", "int unsigned default 0 not null" }, + { "end", "int unsigned default 0 not null" }, + { "suspended", "int unsigned default 0 not null" }, + { "name", "tinytext not null" }, + { "track_steps", "tinyint not null" }, + { "state", "smallint not null" }, + { "comp_code", "int default 0 not null" }, + { "priority", "int unsigned not null" }, + { "req_cpus", "mediumint unsigned not null" }, + { "alloc_cpus", "mediumint unsigned not null" }, + { "nodelist", "text" }, + { "kill_requid", "smallint default -1 not null" }, + { "qos", "smallint default 0" }, + { NULL, NULL} + }; + + storage_field_t last_ran_table_fields[] = { + { "hourly_rollup", "int unsigned default 0 not null" }, + { "daily_rollup", "int unsigned default 0 not null" }, + { "monthly_rollup", "int unsigned default 0 not null" }, + { NULL, NULL} + }; + + storage_field_t step_table_fields[] = { + { "id", "int not null" }, + { "stepid", "smallint not null" }, + { "start", "int unsigned default 0 not null" }, + { "end", "int unsigned default 0 not null" }, + { "suspended", "int unsigned default 0 not null" }, + { "name", "text not null" }, + { "nodelist", "text not null" }, + { "state", "smallint not null" }, + { "kill_requid", "smallint default -1 not null" }, + { "comp_code", "int default 0 not null" }, + { "cpus", "mediumint unsigned not null" }, + { "user_sec", "int unsigned default 0 not null" }, + { "user_usec", "int unsigned default 0 not null" }, + { "sys_sec", "int unsigned default 0 not null" }, + { "sys_usec", "int unsigned default 0 not null" }, + { "max_vsize", "mediumint unsigned default 0 not null" }, + { "max_vsize_task", "smallint unsigned default 0 not null" }, + { "max_vsize_node", "mediumint unsigned default 0 not null" }, + { "ave_vsize", "float default 0.0 not null" }, + { "max_rss", "mediumint unsigned default 0 not null" }, + { "max_rss_task", "smallint unsigned default 0 not null" }, + { "max_rss_node", "mediumint unsigned default 0 not null" }, + { "ave_rss", "float default 0.0 not null" }, + { "max_pages", "mediumint unsigned default 0 not null" }, + { "max_pages_task", "smallint unsigned default 0 not null" }, + { "max_pages_node", "mediumint unsigned default 0 not null" }, + { "ave_pages", "float default 0.0 not null" }, + { "min_cpu", "mediumint unsigned default 0 not null" }, + { "min_cpu_task", "smallint unsigned default 0 not null" }, + { "min_cpu_node", "mediumint unsigned default 0 not null" }, + { "ave_cpu", "float default 0.0 not null" }, + { NULL, NULL} + }; + + storage_field_t suspend_table_fields[] = { + { "id", "int not null" }, + { "associd", "mediumint not null" }, + { "start", "int unsigned default 0 not null" }, + { "end", "int unsigned default 0 not null" }, + { NULL, NULL} + }; + + storage_field_t txn_table_fields[] = { + { "id", "int not null auto_increment" }, + { "timestamp", "int unsigned default 0 not null" }, + { "action", "smallint not null" }, + { "name", "tinytext not null" }, + { "actor", "tinytext not null" }, + { "info", "text" }, + { NULL, NULL} + }; + + storage_field_t user_table_fields[] = { + { "creation_time", "int unsigned not null" }, + { "mod_time", "int unsigned default 0 not null" }, + { "deleted", "tinyint default 0" }, + { "name", "tinytext not null" }, + { "default_acct", "tinytext not null" }, + { "qos", "smallint default 1 not null" }, + { "admin_level", "smallint default 1 not null" }, + { NULL, NULL} + }; + + char *get_parent_proc = + "drop procedure if exists get_parent_limits; " + "create procedure get_parent_limits(" + "my_table text, acct text, cluster text) " + "begin " + "set @par_id = NULL; " + "set @mj = NULL; " + "set @mnpj = NULL; " + "set @mwpj = NULL; " + "set @mcpj = NULL; " + "set @my_acct = acct; " + "REPEAT " + "set @s = 'select '; " + "if @par_id is NULL then set @s = CONCAT(" + "@s, '@par_id := id, '); " + "end if; " + "if @mj is NULL then set @s = CONCAT(" + "@s, '@mj := max_jobs, '); " + "end if; " + "if @mnpj is NULL then set @s = CONCAT(" + "@s, '@mnpj := max_nodes_per_job, ') ;" + "end if; " + "if @mwpj is NULL then set @s = CONCAT(" + "@s, '@mwpj := max_wall_duration_per_job, '); " + "end if; " + "if @mcpj is NULL then set @s = CONCAT(" + "@s, '@mcpj := max_cpu_secs_per_job, '); " + "end if; " + "set @s = concat(@s, ' @my_acct := parent_acct from ', " + "my_table, ' where acct = \"', @my_acct, '\" && " + "cluster = \"', cluster, '\" && user=\"\"'); " + "prepare query from @s; " + "execute query; " + "deallocate prepare query; " + "UNTIL (@mj != -1 && @mnpj != -1 && @mwpj != -1 " + "&& @mcpj != -1) || @my_acct = '' END REPEAT; " + "END;"; + + if(mysql_db_create_table(acct_mysql_db, acct_coord_table, + acct_coord_table_fields, + ", primary key (acct(20), user(20)))") + == SLURM_ERROR) + return SLURM_ERROR; + + if(mysql_db_create_table(acct_mysql_db, acct_table, acct_table_fields, + ", primary key (name(20)))") == SLURM_ERROR) + return SLURM_ERROR; + + if(mysql_db_create_table(acct_mysql_db, assoc_day_table, + assoc_usage_table_fields, + ", primary key (id, period_start))") + == SLURM_ERROR) + return SLURM_ERROR; + + if(mysql_db_create_table(acct_mysql_db, assoc_hour_table, + assoc_usage_table_fields, + ", primary key (id, period_start))") + == SLURM_ERROR) + return SLURM_ERROR; + + if(mysql_db_create_table(acct_mysql_db, assoc_month_table, + assoc_usage_table_fields, + ", primary key (id, period_start))") + == SLURM_ERROR) + return SLURM_ERROR; + + if(mysql_db_create_table(acct_mysql_db, assoc_table, assoc_table_fields, + ", primary key (id), " + " unique index (user(20), acct(20), " + "cluster(20), partition(20)))" +/* " unique index (lft), " */ +/* " unique index (rgt))" */) + == SLURM_ERROR) + return SLURM_ERROR; + + if(mysql_db_create_table(acct_mysql_db, cluster_day_table, + cluster_usage_table_fields, + ", primary key (cluster(20), period_start))") + == SLURM_ERROR) + return SLURM_ERROR; + + if(mysql_db_create_table(acct_mysql_db, cluster_hour_table, + cluster_usage_table_fields, + ", primary key (cluster(20), period_start))") + == SLURM_ERROR) + return SLURM_ERROR; + + if(mysql_db_create_table(acct_mysql_db, cluster_month_table, + cluster_usage_table_fields, + ", primary key (cluster(20), period_start))") + == SLURM_ERROR) + return SLURM_ERROR; + + if(mysql_db_create_table(acct_mysql_db, cluster_table, + cluster_table_fields, + ", primary key (name(20)))") == SLURM_ERROR) + return SLURM_ERROR; + + if(mysql_db_create_table(acct_mysql_db, event_table, + event_table_fields, + ", primary key (node_name(20), cluster(20), " + "period_start))") == SLURM_ERROR) + return SLURM_ERROR; + + if(mysql_db_create_table(acct_mysql_db, job_table, job_table_fields, + ", primary key (id), " + "unique index (jobid, associd, submit))") + == SLURM_ERROR) + return SLURM_ERROR; + + if(mysql_db_create_table(acct_mysql_db, last_ran_table, + last_ran_table_fields, + ")") == SLURM_ERROR) + return SLURM_ERROR; + + if(mysql_db_create_table(acct_mysql_db, step_table, + step_table_fields, + ", primary key (id, stepid))") == SLURM_ERROR) + return SLURM_ERROR; + + if(mysql_db_create_table(acct_mysql_db, suspend_table, + suspend_table_fields, + ")") == SLURM_ERROR) + return SLURM_ERROR; + + if(mysql_db_create_table(acct_mysql_db, txn_table, txn_table_fields, + ", primary key (id))") == SLURM_ERROR) + return SLURM_ERROR; + + if(mysql_db_create_table(acct_mysql_db, user_table, user_table_fields, + ", primary key (name(20)))") == SLURM_ERROR) + return SLURM_ERROR; + + rc = mysql_db_query(acct_mysql_db, get_parent_proc); + + return rc; +} +#endif + +/* + * init() is called when the plugin is loaded, before any other functions + * are called. Put global initialization here. + */ +extern int init ( void ) +{ + static int first = 1; + int rc = SLURM_SUCCESS; +#ifdef HAVE_MYSQL + MYSQL *acct_mysql_db = NULL; + char *location = NULL; +#else + fatal("No MySQL database was found on the machine. " + "Please check the configure log and run again."); +#endif + + /* since this can be loaded from many different places + only tell us once. */ + if(!first) + return SLURM_SUCCESS; + + first = 0; + +#ifdef HAVE_MYSQL + mysql_db_info = _mysql_acct_create_db_info(); + + location = slurm_get_accounting_storage_loc(); + if(!location) + mysql_db_name = xstrdup(DEFAULT_ACCT_DB); + else { + int i = 0; + while(location[i]) { + if(location[i] == '.' || location[i] == '/') { + debug("%s doesn't look like a database " + "name using %s", + location, DEFAULT_ACCT_DB); + break; + } + i++; + } + if(location[i]) { + mysql_db_name = xstrdup(DEFAULT_ACCT_DB); + xfree(location); + } else + mysql_db_name = location; + } + + debug2("mysql_connect() called for db %s", mysql_db_name); + + mysql_get_db_connection(&acct_mysql_db, mysql_db_name, mysql_db_info); + + rc = _mysql_acct_check_tables(acct_mysql_db); + + mysql_close_db_connection(&acct_mysql_db); +#endif + + if(rc == SLURM_SUCCESS) + verbose("%s loaded", plugin_name); + else + verbose("%s failed", plugin_name); + + return rc; +} + +extern int fini ( void ) +{ +#ifdef HAVE_MYSQL + destroy_mysql_db_info(mysql_db_info); + xfree(mysql_db_name); + mysql_cleanup(); + return SLURM_SUCCESS; +#else + return SLURM_ERROR; +#endif +} + +extern void *acct_storage_p_get_connection(bool make_agent, bool rollback) +{ +#ifdef HAVE_MYSQL + mysql_conn_t *mysql_conn = xmalloc(sizeof(mysql_conn_t)); + static int conn = 0; + if(!mysql_db_info) + init(); + + debug2("acct_storage_p_get_connection: request new connection"); + + mysql_get_db_connection(&mysql_conn->acct_mysql_db, + mysql_db_name, mysql_db_info); + mysql_conn->rollback = rollback; + if(rollback) { + mysql_autocommit(mysql_conn->acct_mysql_db, 0); + } + mysql_conn->conn = conn++; + mysql_conn->update_list = list_create(destroy_acct_update_object); + return (void *)mysql_conn; +#else + return NULL; +#endif +} + +extern int acct_storage_p_close_connection(mysql_conn_t **mysql_conn) +{ +#ifdef HAVE_MYSQL + + if(!mysql_conn || !(*mysql_conn)) + return SLURM_SUCCESS; + + acct_storage_p_commit((*mysql_conn), 0); + mysql_close_db_connection(&(*mysql_conn)->acct_mysql_db); + list_destroy((*mysql_conn)->update_list); + xfree((*mysql_conn)); + + return SLURM_SUCCESS; +#else + return SLURM_ERROR; +#endif +} + +extern int acct_storage_p_commit(mysql_conn_t *mysql_conn, bool commit) +{ +#ifdef HAVE_MYSQL + + if(!mysql_conn) { + error("We need a connection to run this"); + return SLURM_ERROR; + } else if(!mysql_conn->acct_mysql_db + || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) { + if(mysql_get_db_connection(&mysql_conn->acct_mysql_db, + mysql_db_name, mysql_db_info) + != SLURM_SUCCESS) { + error("unable to re-connect to mysql database"); + return SLURM_ERROR; + } + } + + debug4("got %d commits", list_count(mysql_conn->update_list)); + + if(mysql_conn->rollback) { + if(!commit) { + if(mysql_db_rollback(mysql_conn->acct_mysql_db)) + error("rollback failed"); + } else { + if(mysql_db_commit(mysql_conn->acct_mysql_db)) + error("commit failed"); + } + } + + if(commit && list_count(mysql_conn->update_list)) { + int rc; + char *query = NULL; + MYSQL_RES *result = NULL; + MYSQL_ROW row; + accounting_update_msg_t msg; + slurm_msg_t req; + slurm_msg_t resp; + ListIterator itr = NULL; + acct_update_object_t *object = NULL; + + slurm_msg_t_init(&req); + slurm_msg_t_init(&resp); + + memset(&msg, 0, sizeof(accounting_update_msg_t)); + msg.update_list = mysql_conn->update_list; + + xstrfmtcat(query, "select control_host, control_port from %s " + "where deleted=0 && control_port != 0", + cluster_table); + if(!(result = mysql_db_query_ret( + mysql_conn->acct_mysql_db, query, 0))) { + xfree(query); + goto skip; + } + xfree(query); + while((row = mysql_fetch_row(result))) { + info("sending to %s(%s)", row[0], row[1]); + slurm_set_addr_char(&req.address, atoi(row[1]), row[0]); + req.msg_type = ACCOUNTING_UPDATE_MSG; + req.flags = SLURM_GLOBAL_AUTH_KEY; + req.data = &msg; + + rc = slurm_send_recv_node_msg(&req, &resp, 0); + if ((rc != 0) || !resp.auth_cred) { + error("update cluster: %m to %s(%s)", + row[0], row[1]); + if (resp.auth_cred) + g_slurm_auth_destroy(resp.auth_cred); + rc = SLURM_ERROR; + } + if (resp.auth_cred) + g_slurm_auth_destroy(resp.auth_cred); + + switch (resp.msg_type) { + case RESPONSE_SLURM_RC: + rc = ((return_code_msg_t *)resp.data)-> + return_code; + slurm_free_return_code_msg(resp.data); + break; + default: + break; + } + //info("got rc of %d", rc); + } + mysql_free_result(result); + skip: + /* NOTE: you can not use list_pop, or list_push + anywhere either, since mysql is + exporting something of the same type as a macro, + which messes everything up (my_list.h is the bad boy). + So we are just going to delete each item as it + comes out. + */ + itr = list_iterator_create(mysql_conn->update_list); + while((object = list_next(itr))) { + if(!object->objects || !list_count(object->objects)) { + list_delete_item(itr); + continue; + } + switch(object->type) { + case ACCT_MODIFY_USER: + case ACCT_ADD_USER: + case ACCT_REMOVE_USER: + rc = assoc_mgr_update_local_users(object); + break; + case ACCT_ADD_ASSOC: + case ACCT_MODIFY_ASSOC: + case ACCT_REMOVE_ASSOC: + rc = assoc_mgr_update_local_assocs(object); + break; + case ACCT_UPDATE_NOTSET: + default: + error("unknown type set in " + "update_object: %d", + object->type); + break; + } + list_delete_item(itr); + } + list_iterator_destroy(itr); + } + list_flush(mysql_conn->update_list); + + return SLURM_SUCCESS; +#else + return SLURM_ERROR; +#endif +} + +extern int acct_storage_p_add_users(mysql_conn_t *mysql_conn, uint32_t uid, + List user_list) +{ +#ifdef HAVE_MYSQL + ListIterator itr = NULL; + int rc = SLURM_SUCCESS; + acct_user_rec_t *object = NULL; + char *cols = NULL, *vals = NULL, *query = NULL, *txn_query = NULL; + struct passwd *pw = NULL; + time_t now = time(NULL); + char *user = NULL; + char *extra = NULL; + int affect_rows = 0; + List assoc_list = list_create(destroy_acct_association_rec); + + if(!mysql_conn) { + error("We need a connection to run this"); + return SLURM_ERROR; + } else if(!mysql_conn->acct_mysql_db + || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) { + if(mysql_get_db_connection(&mysql_conn->acct_mysql_db, + mysql_db_name, mysql_db_info) + != SLURM_SUCCESS) { + error("unable to re-connect to mysql database"); + return SLURM_ERROR; + } + } + + if((pw=getpwuid(uid))) { + user = pw->pw_name; + } + + itr = list_iterator_create(user_list); + while((object = list_next(itr))) { + if(!object->name || !object->default_acct) { + error("We need a user name and " + "default acct to add."); + rc = SLURM_ERROR; + continue; + } + xstrcat(cols, "creation_time, mod_time, name, default_acct"); + xstrfmtcat(vals, "%d, %d, '%s', '%s'", + now, now, object->name, object->default_acct); + xstrfmtcat(extra, ", default_acct='%s'", object->default_acct); + if(object->qos != ACCT_QOS_NOTSET) { + xstrcat(cols, ", qos"); + xstrfmtcat(vals, ", %u", object->qos); + xstrfmtcat(extra, ", qos=%u", object->qos); + } + + if(object->admin_level != ACCT_ADMIN_NOTSET) { + xstrcat(cols, ", admin_level"); + xstrfmtcat(vals, ", %u", object->admin_level); + } + + query = xstrdup_printf( + "insert into %s (%s) values (%s) " + "on duplicate key update deleted=0, mod_time=%d %s;", + user_table, cols, vals, + now, extra); + + xfree(cols); + xfree(vals); + rc = mysql_db_query(mysql_conn->acct_mysql_db, query); + xfree(query); + if(rc != SLURM_SUCCESS) { + error("Couldn't add user %s", object->name); + xfree(extra); + continue; + } + + affect_rows = _last_affected_rows(mysql_conn->acct_mysql_db); + if(!affect_rows) { + debug("nothing changed"); + xfree(extra); + continue; + } + + if(_addto_update_list(mysql_conn->update_list, ACCT_ADD_USER, + object) == SLURM_SUCCESS) + list_remove(itr); + + + if(txn_query) + xstrfmtcat(txn_query, + ", (%d, %u, '%s', '%s', \"%s\")", + now, DBD_ADD_USERS, object->name, + user, extra); + else + xstrfmtcat(txn_query, + "insert into %s " + "(timestamp, action, name, actor, info) " + "values (%d, %u, '%s', '%s', \"%s\")", + txn_table, + now, DBD_ADD_USERS, object->name, + user, extra); + xfree(extra); + + if(!object->assoc_list) + continue; + + list_transfer(assoc_list, object->assoc_list); + } + list_iterator_destroy(itr); + + if(rc != SLURM_ERROR) { + if(txn_query) { + xstrcat(txn_query, ";"); + rc = mysql_db_query(mysql_conn->acct_mysql_db, + txn_query); + xfree(txn_query); + if(rc != SLURM_SUCCESS) { + error("Couldn't add txn"); + rc = SLURM_SUCCESS; + } + } + } else + xfree(txn_query); + + if(list_count(assoc_list)) { + if(acct_storage_p_add_associations(mysql_conn, uid, assoc_list) + == SLURM_ERROR) { + error("Problem adding user associations"); + rc = SLURM_ERROR; + } + } + list_destroy(assoc_list); + + return rc; +#else + return SLURM_ERROR; +#endif +} + +extern int acct_storage_p_add_coord(mysql_conn_t *mysql_conn, uint32_t uid, + char *acct, acct_user_cond_t *user_q) +{ +#ifdef HAVE_MYSQL + return SLURM_SUCCESS; +#else + return SLURM_ERROR; +#endif +} + +extern int acct_storage_p_add_accts(mysql_conn_t *mysql_conn, uint32_t uid, + List acct_list) +{ +#ifdef HAVE_MYSQL + ListIterator itr = NULL; + int rc = SLURM_SUCCESS; + acct_account_rec_t *object = NULL; + char *cols = NULL, *vals = NULL, *query = NULL, *txn_query = NULL; + struct passwd *pw = NULL; + time_t now = time(NULL); + char *user = NULL; + char *extra = NULL; + int affect_rows = 0; + List assoc_list = list_create(destroy_acct_association_rec); + + if(!mysql_conn) { + error("We need a connection to run this"); + return SLURM_ERROR; + } else if(!mysql_conn->acct_mysql_db + || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) { + if(mysql_get_db_connection(&mysql_conn->acct_mysql_db, + mysql_db_name, mysql_db_info) + != SLURM_SUCCESS) { + error("unable to re-connect to mysql database"); + return SLURM_ERROR; + } + } + + if((pw=getpwuid(uid))) { + user = pw->pw_name; + } + + itr = list_iterator_create(acct_list); + while((object = list_next(itr))) { + if(!object->name || !object->description + || !object->organization) { + error("We need an account name, description, and " + "organization to add. %s %s %s", + object->name, object->description, + object->organization); + rc = SLURM_ERROR; + continue; + } + xstrcat(cols, "creation_time, mod_time, name, " + "description, organization"); + xstrfmtcat(vals, "%d, %d, '%s', '%s', '%s'", + now, now, object->name, + object->description, object->organization); + xstrfmtcat(extra, ", description='%s', organization='%s'", + object->description, object->organization); + + if(object->qos != ACCT_QOS_NOTSET) { + xstrcat(cols, ", qos"); + xstrfmtcat(vals, ", %u", object->qos); + xstrfmtcat(extra, ", qos=%u", object->qos); + } + + query = xstrdup_printf( + "insert into %s (%s) values (%s) " + "on duplicate key update deleted=0, mod_time=%d %s;", + acct_table, cols, vals, + now, extra); + debug3("%d query\n%s", mysql_conn->conn, query); + rc = mysql_db_query(mysql_conn->acct_mysql_db, query); + xfree(cols); + xfree(vals); + xfree(query); + if(rc != SLURM_SUCCESS) { + error("Couldn't add acct"); + xfree(extra); + continue; + } + affect_rows = _last_affected_rows(mysql_conn->acct_mysql_db); +/* debug3("affected %d", affect_rows); */ + + if(!affect_rows) { + debug3("nothing changed"); + xfree(extra); + continue; + } + + if(txn_query) + xstrfmtcat(txn_query, + ", (%d, %u, '%s', '%s', \"%s\")", + now, DBD_ADD_ACCOUNTS, object->name, + user, extra); + else + xstrfmtcat(txn_query, + "insert into %s " + "(timestamp, action, name, actor, info) " + "values (%d, %u, '%s', '%s', \"%s\")", + txn_table, + now, DBD_ADD_ACCOUNTS, object->name, + user, extra); + xfree(extra); + + if(!object->assoc_list) + continue; + + list_transfer(assoc_list, object->assoc_list); + } + list_iterator_destroy(itr); + + if(rc != SLURM_ERROR) { + if(txn_query) { + xstrcat(txn_query, ";"); + rc = mysql_db_query(mysql_conn->acct_mysql_db, + txn_query); + xfree(txn_query); + if(rc != SLURM_SUCCESS) { + error("Couldn't add txn"); + rc = SLURM_SUCCESS; + } + } + } else + xfree(txn_query); + + if(list_count(assoc_list)) { + if(acct_storage_p_add_associations(mysql_conn, uid, assoc_list) + == SLURM_ERROR) { + error("Problem adding user associations"); + rc = SLURM_ERROR; + } + } + list_destroy(assoc_list); + + return rc; +#else + return SLURM_ERROR; +#endif +} + +extern int acct_storage_p_add_clusters(mysql_conn_t *mysql_conn, uint32_t uid, + List cluster_list) +{ +#ifdef HAVE_MYSQL + ListIterator itr = NULL; + int rc = SLURM_SUCCESS; + acct_cluster_rec_t *object = NULL; + char *cols = NULL, *vals = NULL, *extra = NULL, *query = NULL; + time_t now = time(NULL); + struct passwd *pw = NULL; + char *user = NULL; + int affect_rows = 0; + + if(!mysql_conn) { + error("We need a connection to run this"); + return SLURM_ERROR; + } else if(!mysql_conn->acct_mysql_db + || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) { + if(mysql_get_db_connection(&mysql_conn->acct_mysql_db, + mysql_db_name, mysql_db_info) + != SLURM_SUCCESS) { + error("unable to re-connect to mysql database"); + return SLURM_ERROR; + } + } + + if((pw=getpwuid(uid))) { + user = pw->pw_name; + } + + itr = list_iterator_create(cluster_list); + while((object = list_next(itr))) { + if(!object->name) { + error("We need a cluster name to add."); + rc = SLURM_ERROR; + continue; + } + + xstrcat(cols, "creation_time, mod_time, acct, cluster"); + xstrfmtcat(vals, "%d, %d, 'root', '%s'", + now, now, object->name); + xstrfmtcat(extra, ", mod_time=%d", now); + + if((int)object->default_fairshare >= 0) { + xstrcat(cols, ", fairshare"); + xstrfmtcat(vals, ", %u", object->default_fairshare); + xstrfmtcat(extra, ", fairshare=%u", + object->default_fairshare); + } else if ((int)object->default_fairshare == -1) { + xstrcat(cols, ", fairshare"); + xstrfmtcat(vals, ", NULL"); + xstrfmtcat(extra, ", fairshare=NULL"); + } + + if((int)object->default_max_cpu_secs_per_job >= 0) { + xstrcat(cols, ", max_cpu_secs_per_job"); + xstrfmtcat(vals, ", %u", + object->default_max_cpu_secs_per_job); + xstrfmtcat(extra, ", max_cpu_secs_per_job=%u", + object->default_max_cpu_secs_per_job); + } else if((int)object->default_max_cpu_secs_per_job == -1) { + xstrcat(cols, ", max_cpu_secs_per_job"); + xstrfmtcat(vals, ", NULL"); + xstrfmtcat(extra, ", max_cpu_secs_per_job=NULL"); + } + + if((int)object->default_max_jobs >= 0) { + xstrcat(cols, ", max_jobs"); + xstrfmtcat(vals, ", %u", object->default_max_jobs); + xstrfmtcat(extra, ", max_jobs=%u", + object->default_max_jobs); + } else if((int)object->default_max_jobs == -1) { + xstrcat(cols, ", max_jobs"); + xstrfmtcat(vals, ", NULL"); + xstrfmtcat(extra, ", max_jobs=NULL"); + } + + if((int)object->default_max_nodes_per_job >= 0) { + xstrcat(cols, ", max_nodes_per_job"); + xstrfmtcat(vals, ", %u", + object->default_max_nodes_per_job); + xstrfmtcat(extra, ", max_nodes_per_job=%u", + object->default_max_nodes_per_job); + } else if((int)object->default_max_nodes_per_job == -1) { + xstrcat(cols, ", max_nodes_per_job"); + xstrfmtcat(vals, ", NULL"); + xstrfmtcat(extra, ", max_nodes_per_job=NULL"); + } + + if((int)object->default_max_wall_duration_per_job >= 0) { + xstrcat(cols, ", max_wall_duration_per_job"); + xstrfmtcat(vals, ", %u", + object->default_max_wall_duration_per_job); + xstrfmtcat(extra, ", max_wall_duration_per_job=%u", + object->default_max_wall_duration_per_job); + } else if((int)object->default_max_wall_duration_per_job + == -1) { + xstrcat(cols, ", max_wall_duration_per_job"); + xstrfmtcat(vals, ", NULL"); + xstrfmtcat(extra, ", max_duration_per_job=NULL"); + } + + xstrfmtcat(query, + "insert into %s (creation_time, mod_time, name) " + "values (%d, %d, '%s') " + "on duplicate key update deleted=0, mod_time=%d;", + cluster_table, + now, now, object->name, + now); + debug3("%d query\n%s", mysql_conn->conn, query); + rc = mysql_db_query(mysql_conn->acct_mysql_db, query); + xfree(query); + if(rc != SLURM_SUCCESS) { + error("Couldn't add cluster %s", object->name); + xfree(extra); + xfree(cols); + xfree(vals); + continue; + } + + affect_rows = _last_affected_rows(mysql_conn->acct_mysql_db); + + if(!affect_rows) { + debug2("nothing changed %d", affect_rows); + xfree(extra); + xfree(cols); + xfree(vals); + continue; + } + + xstrfmtcat(query, + "SELECT @MyMax := coalesce(max(rgt), 0) FROM %s " + "FOR UPDATE;", + assoc_table); + xstrfmtcat(query, + "insert into %s (%s, lft, rgt) " + "values (%s, @MyMax+1, @MyMax+2) " + "on duplicate key update deleted=0, " + "id=LAST_INSERT_ID(id)%s;", + assoc_table, cols, + vals, + extra); + + xfree(cols); + xfree(vals); + debug3("%d query\n%s", mysql_conn->conn, query); + + rc = mysql_db_query(mysql_conn->acct_mysql_db, query); + xfree(query); + + if(rc != SLURM_SUCCESS) { + error("Couldn't add cluster root assoc"); + xfree(extra); + continue; + } + xstrfmtcat(query, + "insert into %s " + "(timestamp, action, name, actor, info) " + "values (%d, %u, '%s', '%s', \"%s\");", + txn_table, + now, DBD_ADD_CLUSTERS, object->name, user, extra); + xfree(extra); + debug4("query\n%s",query); + rc = mysql_db_query(mysql_conn->acct_mysql_db, query); + xfree(query); + if(rc != SLURM_SUCCESS) { + error("Couldn't add txn"); + } + } + list_iterator_destroy(itr); + + return rc; +#else + return SLURM_ERROR; +#endif +} + +extern int acct_storage_p_add_associations(mysql_conn_t *mysql_conn, + uint32_t uid, + List association_list) +{ +#ifdef HAVE_MYSQL + ListIterator itr = NULL; + int rc = SLURM_SUCCESS; + int i=0; + acct_association_rec_t *object = NULL; + char *cols = NULL, *vals = NULL, *txn_query = NULL, + *extra = NULL, *query = NULL, *update = NULL; + char *parent = NULL; + time_t now = time(NULL); + struct passwd *pw = NULL; + char *user = NULL; + char *tmp_char = NULL; + int assoc_id = 0; + int incr = 0, my_left = 0; + int affect_rows = 0; + MYSQL_RES *result = NULL; + MYSQL_ROW row; + char *old_parent = NULL, *old_cluster = NULL; + char *massoc_req_inx[] = { + "id", + "parent_acct", + "lft", + "rgt", + "deleted" + }; + + enum { + MASSOC_ID, + MASSOC_PACCT, + MASSOC_LFT, + MASSOC_RGT, + MASSOC_DELETED, + MASSOC_COUNT + }; + + if(!association_list) { + error("No association list given"); + return SLURM_ERROR; + } + + if(!mysql_conn) { + error("We need a connection to run this"); + return SLURM_ERROR; + } else if(!mysql_conn->acct_mysql_db + || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) { + if(mysql_get_db_connection(&mysql_conn->acct_mysql_db, + mysql_db_name, mysql_db_info) + != SLURM_SUCCESS) { + error("unable to re-connect to mysql database"); + return SLURM_ERROR; + } + } + + if((pw=getpwuid(uid))) { + user = pw->pw_name; + } + + itr = list_iterator_create(association_list); + while((object = list_next(itr))) { + if(!object->cluster || !object->acct) { + error("We need a association cluster and " + "acct to add one."); + rc = SLURM_ERROR; + continue; + } + + if(object->parent_acct) { + parent = object->parent_acct; + } else if(object->user) { + parent = object->acct; + } else { + parent = "root"; + } + + xstrcat(cols, "creation_time, mod_time, cluster, acct"); + xstrfmtcat(vals, "%d, %d, '%s', '%s'", + now, now, object->cluster, object->acct); + xstrfmtcat(update, "where id>=0 && cluster='%s' && acct='%s'", + object->cluster, object->acct); + + xstrfmtcat(extra, ", mod_time=%d", now); + if(!object->user) { + xstrcat(cols, ", parent_acct"); + xstrfmtcat(vals, ", '%s'", parent); + xstrfmtcat(extra, ", parent_acct='%s'", parent); + xstrfmtcat(update, " && user=''"); + } + + if(object->user) { + xstrcat(cols, ", user"); + xstrfmtcat(vals, ", '%s'", object->user); + xstrfmtcat(extra, ", user='%s'", object->user); + xstrfmtcat(update, " && user='%s'", + object->user); + + if(object->partition) { + xstrcat(cols, ", partition"); + xstrfmtcat(vals, ", '%s'", object->partition); + xstrfmtcat(extra, ", partition='%s'", + object->partition); + xstrfmtcat(update, " && partition='%s'", + object->partition); + } + } + + if((int)object->fairshare >= 0) { + xstrcat(cols, ", fairshare"); + xstrfmtcat(vals, ", %d", object->fairshare); + xstrfmtcat(extra, ", fairshare=%d", + object->fairshare); + } + + if((int)object->max_jobs >= 0) { + xstrcat(cols, ", max_jobs"); + xstrfmtcat(vals, ", %d", object->max_jobs); + xstrfmtcat(extra, ", max_jobs=%d", + object->max_jobs); + } + + if((int)object->max_nodes_per_job >= 0) { + xstrcat(cols, ", max_nodes_per_job"); + xstrfmtcat(vals, ", %d", object->max_nodes_per_job); + xstrfmtcat(extra, ", max_nodes_per_job=%d", + object->max_nodes_per_job); + } + + if((int)object->max_wall_duration_per_job >= 0) { + xstrcat(cols, ", max_wall_duration_per_job"); + xstrfmtcat(vals, ", %d", + object->max_wall_duration_per_job); + xstrfmtcat(extra, ", max_wall_duration_per_job=%d", + object->max_wall_duration_per_job); + } + + if((int)object->max_cpu_secs_per_job >= 0) { + xstrcat(cols, ", max_cpu_secs_per_job"); + xstrfmtcat(vals, ", %d", object->max_cpu_secs_per_job); + xstrfmtcat(extra, ", max_cpu_secs_per_job=%d", + object->max_cpu_secs_per_job); + } + + for(i=0; i<MASSOC_COUNT; i++) { + if(i) + xstrcat(tmp_char, ", "); + xstrcat(tmp_char, massoc_req_inx[i]); + } + + xstrfmtcat(query, + "select distinct %s from %s %s order by lft " + "FOR UPDATE;", + tmp_char, assoc_table, update); + xfree(tmp_char); + debug3("%d query\n%s", mysql_conn->conn, query); + if(!(result = mysql_db_query_ret( + mysql_conn->acct_mysql_db, query, 0))) { + xfree(query); + xfree(cols); + xfree(vals); + xfree(extra); + xfree(update); + error("couldn't query the database"); + rc = SLURM_ERROR; + break; + } + xfree(query); + + assoc_id = 0; + if(!(row = mysql_fetch_row(result))) { + if(!old_parent || !old_cluster + || strcasecmp(parent, old_parent) + || strcasecmp(object->cluster, old_cluster)) { + char *sel_query = xstrdup_printf( + "SELECT lft FROM %s WHERE " + "acct = '%s' and cluster = '%s' " + "and user = '' order by lft;", + assoc_table, + parent, object->cluster); + MYSQL_RES *sel_result = NULL; + + if(incr) { + char *up_query = xstrdup_printf( + "UPDATE %s SET rgt = rgt+%d " + "WHERE rgt > %d && deleted < 2;" + "UPDATE %s SET lft = lft+%d " + "WHERE lft > %d " + "&& deleted < 2;" + "UPDATE %s SET deleted = 0 " + "WHERE deleted = 2;", + assoc_table, incr, + my_left, + assoc_table, incr, + my_left, + assoc_table); + debug3("%d query\n%s", mysql_conn->conn, + up_query); + rc = mysql_db_query( + mysql_conn->acct_mysql_db, + up_query); + xfree(up_query); + if(rc != SLURM_SUCCESS) { + error("Couldn't do update"); + xfree(cols); + xfree(vals); + xfree(update); + xfree(extra); + xfree(sel_query); + break; + } + } + + debug3("%d query\n%s", mysql_conn->conn, + sel_query); + if(!(sel_result = mysql_db_query_ret( + mysql_conn->acct_mysql_db, + sel_query, 0))) { + xfree(cols); + xfree(vals); + xfree(update); + xfree(extra); + xfree(sel_query); + rc = SLURM_ERROR; + break; + } + + if(!(row = mysql_fetch_row(sel_result))) { + error("Couldn't get left from query\n", + sel_query); + mysql_free_result(sel_result); + xfree(cols); + xfree(vals); + xfree(update); + xfree(extra); + xfree(sel_query); + rc = SLURM_ERROR; + break; + } + xfree(sel_query); + + my_left = atoi(row[0]); + mysql_free_result(sel_result); + //info("left is %d", my_left); + xfree(old_parent); + xfree(old_cluster); + old_parent = xstrdup(parent); + old_cluster = xstrdup(object->cluster); + incr = 0; + } + incr += 2; + xstrfmtcat(query, + "insert into %s (%s, lft, rgt, deleted) " + "values (%s, %d, %d, 2);", + assoc_table, cols, + vals, my_left+(incr-1), my_left+incr); + + /* definantly works but slow */ +/* xstrfmtcat(query, */ +/* "SELECT @myLeft := lft FROM %s WHERE " */ +/* "acct = '%s' " */ +/* "and cluster = '%s' and user = '';", */ +/* assoc_table, */ +/* parent, */ +/* object->cluster); */ +/* xstrfmtcat(query, */ +/* "UPDATE %s SET rgt = rgt+2 " */ +/* "WHERE rgt > @myLeft;" */ +/* "UPDATE %s SET lft = lft+2 " */ +/* "WHERE lft > @myLeft;", */ +/* assoc_table, */ +/* assoc_table); */ +/* xstrfmtcat(query, */ +/* "insert into %s (%s, lft, rgt) " */ +/* "values (%s, @myLeft+1, @myLeft+2);", */ +/* assoc_table, cols, */ +/* vals); */ + } else if(!atoi(row[MASSOC_DELETED])) { + debug("This account was added already"); + xfree(cols); + xfree(vals); + xfree(update); + mysql_free_result(result); + xfree(extra); + continue; + } else { + assoc_id = atoi(row[MASSOC_ID]); + if(object->parent_acct + && strcasecmp(object->parent_acct, + row[MASSOC_PACCT])) { + + /* We need to move the parent! */ + if(_move_parent(mysql_conn, + atoi(row[MASSOC_LFT]), + atoi(row[MASSOC_RGT]), + object->cluster, + row[MASSOC_ID], + row[MASSOC_PACCT], + object->parent_acct) + == SLURM_ERROR) + continue; + } + + + affect_rows = 2; + xstrfmtcat(query, + "update %s set deleted=0, " + "id=LAST_INSERT_ID(id)%s %s;", + assoc_table, + extra, update); + } + mysql_free_result(result); + + xfree(cols); + xfree(vals); + xfree(update); + debug3("%d query\n%s", mysql_conn->conn, query); + rc = mysql_db_query(mysql_conn->acct_mysql_db, query); + xfree(query); + if(rc != SLURM_SUCCESS) { + error("Couldn't add assoc"); + xfree(extra); + break; + } + /* see if this was an insert or update. On an update + * the assoc_id will already be set + */ + if(!assoc_id) { + affect_rows = _last_affected_rows( + mysql_conn->acct_mysql_db); + assoc_id = mysql_insert_id(mysql_conn->acct_mysql_db); + //info("last id was %d", assoc_id); + } + + object->id = assoc_id; + + if(_addto_update_list(mysql_conn->update_list, ACCT_ADD_ASSOC, + object) == SLURM_SUCCESS) { + list_remove(itr); + } + + if(txn_query) + xstrfmtcat(txn_query, + ", (%d, %d, '%d', '%s', \"%s\")", + now, DBD_ADD_ASSOCS, assoc_id, user, extra); + else + xstrfmtcat(txn_query, + "insert into %s " + "(timestamp, action, name, actor, info) " + "values (%d, %d, '%d', '%s', \"%s\")", + txn_table, + now, DBD_ADD_ASSOCS, assoc_id, user, extra); + xfree(extra); + } + list_iterator_destroy(itr); + if(incr) { + char *up_query = xstrdup_printf( + "UPDATE %s SET rgt = rgt+%d " + "WHERE rgt > %d && deleted < 2;" + "UPDATE %s SET lft = lft+%d " + "WHERE lft > %d " + "&& deleted < 2;" + "UPDATE %s SET deleted = 0 " + "WHERE deleted = 2;", + assoc_table, incr, + my_left, + assoc_table, incr, + my_left, + assoc_table); + debug3("%d query\n%s", mysql_conn->conn, up_query); + rc = mysql_db_query(mysql_conn->acct_mysql_db, up_query); + xfree(up_query); + if(rc != SLURM_SUCCESS) + error("Couldn't do update 2"); + + } + + if(rc != SLURM_ERROR) { + if(txn_query) { + xstrcat(txn_query, ";"); + rc = mysql_db_query(mysql_conn->acct_mysql_db, + txn_query); + xfree(txn_query); + if(rc != SLURM_SUCCESS) { + error("Couldn't add txn"); + rc = SLURM_SUCCESS; + } + } + } else + xfree(txn_query); + + xfree(old_parent); + xfree(old_cluster); + + return rc; +#else + return SLURM_ERROR; +#endif +} + +extern List acct_storage_p_modify_users(mysql_conn_t *mysql_conn, uint32_t uid, + acct_user_cond_t *user_q, + acct_user_rec_t *user) +{ +#ifdef HAVE_MYSQL + ListIterator itr = NULL; + List ret_list = NULL; + int rc = SLURM_SUCCESS; + char *object = NULL; + char *vals = NULL, *extra = NULL, *query = NULL, *name_char = NULL; + time_t now = time(NULL); + struct passwd *pw = NULL; + char *user_name = NULL; + int set = 0; + MYSQL_RES *result = NULL; + MYSQL_ROW row; + + if(!user_q) { + error("we need something to change"); + return NULL; + } + + if(!mysql_conn) { + error("We need a connection to run this"); + return NULL; + } else if(!mysql_conn->acct_mysql_db + || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) { + if(!mysql_get_db_connection(&mysql_conn->acct_mysql_db, + mysql_db_name, mysql_db_info)) + return NULL; + } + + if((pw=getpwuid(uid))) { + user_name = pw->pw_name; + } + + xstrcat(extra, "where deleted=0"); + if(user_q->user_list && list_count(user_q->user_list)) { + set = 0; + xstrcat(extra, " && ("); + itr = list_iterator_create(user_q->user_list); + while((object = list_next(itr))) { + if(set) + xstrcat(extra, " || "); + xstrfmtcat(extra, "name='%s'", object); + set = 1; + } + list_iterator_destroy(itr); + xstrcat(extra, ")"); + } + + if(user_q->def_acct_list && list_count(user_q->def_acct_list)) { + set = 0; + xstrcat(extra, " && ("); + itr = list_iterator_create(user_q->def_acct_list); + while((object = list_next(itr))) { + if(set) + xstrcat(extra, " || "); + xstrfmtcat(extra, "default_acct='%s'", object); + set = 1; + } + list_iterator_destroy(itr); + xstrcat(extra, ")"); + } + + if(user_q->qos != ACCT_QOS_NOTSET) { + xstrfmtcat(extra, " && qos=%u", user_q->qos); + } + + if(user_q->admin_level != ACCT_ADMIN_NOTSET) { + xstrfmtcat(extra, " && admin_level=%u", user_q->admin_level); + } + + if(user->default_acct) + xstrfmtcat(vals, ", default_acct='%s'", user->default_acct); + + if(user->qos != ACCT_QOS_NOTSET) + xstrfmtcat(vals, ", qos=%u", user->qos); + + if(user->admin_level != ACCT_ADMIN_NOTSET) + xstrfmtcat(vals, ", admin_level=%u", user->admin_level); + + if(!extra || !vals) { + error("Nothing to change"); + return NULL; + } + query = xstrdup_printf("select name from %s %s;", user_table, extra); + xfree(extra); + if(!(result = mysql_db_query_ret( + mysql_conn->acct_mysql_db, query, 0))) { + xfree(query); + return NULL; + } + xfree(query); + + rc = 0; + ret_list = list_create(slurm_destroy_char); + while((row = mysql_fetch_row(result))) { + object = xstrdup(row[0]); + list_append(ret_list, object); + if(!rc) { + xstrfmtcat(name_char, "(name='%s'", object); + rc = 1; + } else { + xstrfmtcat(name_char, " || name='%s'", object); + } + } + mysql_free_result(result); + + if(!list_count(ret_list)) { + debug3("didn't effect anything"); + xfree(vals); + return ret_list; + } + xstrcat(name_char, ")"); + + if(_modify_common(mysql_conn, DBD_MODIFY_USERS, now, + user_name, user_table, name_char, vals) + == SLURM_ERROR) { + error("Couldn't modify users"); + list_destroy(ret_list); + ret_list = NULL; + } + + xfree(name_char); + xfree(vals); + + return ret_list; +#else + return NULL; +#endif +} + +extern List acct_storage_p_modify_accts(mysql_conn_t *mysql_conn, uint32_t uid, + acct_account_cond_t *acct_q, + acct_account_rec_t *acct) +{ +#ifdef HAVE_MYSQL + ListIterator itr = NULL; + List ret_list = NULL; + int rc = SLURM_SUCCESS; + char *object = NULL; + char *vals = NULL, *extra = NULL, *query = NULL, *name_char = NULL; + time_t now = time(NULL); + struct passwd *pw = NULL; + char *user = NULL; + int set = 0; + MYSQL_RES *result = NULL; + MYSQL_ROW row; + + if(!acct_q) { + error("we need something to change"); + return NULL; + } + + if(!mysql_conn) { + error("We need a connection to run this"); + return NULL; + } else if(!mysql_conn->acct_mysql_db + || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) { + if(mysql_get_db_connection(&mysql_conn->acct_mysql_db, + mysql_db_name, mysql_db_info) + != SLURM_SUCCESS) { + error("unable to re-connect to mysql database"); + return NULL; + } + } + + if((pw=getpwuid(uid))) { + user = pw->pw_name; + } + + xstrcat(extra, "where deleted=0"); + if(acct_q->acct_list && list_count(acct_q->acct_list)) { + set = 0; + xstrcat(extra, " && ("); + itr = list_iterator_create(acct_q->acct_list); + while((object = list_next(itr))) { + if(set) + xstrcat(extra, " || "); + xstrfmtcat(extra, "name='%s'", object); + set = 1; + } + list_iterator_destroy(itr); + xstrcat(extra, ")"); + } + + if(acct_q->description_list && list_count(acct_q->description_list)) { + set = 0; + xstrcat(extra, " && ("); + itr = list_iterator_create(acct_q->description_list); + while((object = list_next(itr))) { + if(set) + xstrcat(extra, " || "); + xstrfmtcat(extra, "description='%s'", object); + set = 1; + } + list_iterator_destroy(itr); + xstrcat(extra, ")"); + } + + if(acct_q->organization_list && list_count(acct_q->organization_list)) { + set = 0; + xstrcat(extra, " && ("); + itr = list_iterator_create(acct_q->organization_list); + while((object = list_next(itr))) { + if(set) + xstrcat(extra, " || "); + xstrfmtcat(extra, "organization='%s'", object); + set = 1; + } + list_iterator_destroy(itr); + xstrcat(extra, ")"); + } + + if(acct_q->qos != ACCT_QOS_NOTSET) { + xstrfmtcat(extra, " && qos=%u", acct_q->qos); + } + + if(acct->description) + xstrfmtcat(vals, ", description='%s'", acct->description); + if(acct->organization) + xstrfmtcat(vals, ", organization='%u'", acct->organization); + if(acct->qos != ACCT_QOS_NOTSET) + xstrfmtcat(vals, ", qos='%u'", acct->qos); + + if(!extra || !vals) { + error("Nothing to change"); + return NULL; + } + + query = xstrdup_printf("select name from %s %s;", acct_table, extra); + xfree(extra); + debug3("%d query\n%s", mysql_conn->conn, query); + if(!(result = mysql_db_query_ret( + mysql_conn->acct_mysql_db, query, 0))) { + xfree(query); + xfree(vals); + return NULL; + } + xfree(query); + + rc = 0; + ret_list = list_create(slurm_destroy_char); + while((row = mysql_fetch_row(result))) { + object = xstrdup(row[0]); + list_append(ret_list, object); + if(!rc) { + xstrfmtcat(name_char, "(name='%s'", object); + rc = 1; + } else { + xstrfmtcat(name_char, " || name='%s'", object); + } + + } + mysql_free_result(result); + + if(!list_count(ret_list)) { + debug3("didn't effect anything"); + xfree(vals); + return ret_list; + } + xstrcat(name_char, ")"); + + if(_modify_common(mysql_conn, DBD_MODIFY_ACCOUNTS, now, + user, acct_table, name_char, vals) + == SLURM_ERROR) { + error("Couldn't modify accounts"); + list_destroy(ret_list); + ret_list = NULL; + } + + xfree(name_char); + xfree(vals); + + return ret_list; +#else + return NULL; +#endif +} + +extern List acct_storage_p_modify_clusters(mysql_conn_t *mysql_conn, + uint32_t uid, + acct_cluster_cond_t *cluster_q, + acct_cluster_rec_t *cluster) +{ +#ifdef HAVE_MYSQL + ListIterator itr = NULL; + List ret_list = NULL; + int rc = SLURM_SUCCESS; + char *object = NULL; + char *vals = NULL, *extra = NULL, *query = NULL, + *name_char = NULL, *assoc_char= NULL, *send_char = NULL; + time_t now = time(NULL); + struct passwd *pw = NULL; + char *user = NULL; + int set = 0; + MYSQL_RES *result = NULL; + MYSQL_ROW row; + + /* If you need to alter the default values of the cluster use + * modify_associations since this is used only for registering + * the controller when it loads + */ + + if(!cluster_q) { + error("we need something to change"); + return NULL; + } + + if(!mysql_conn) { + error("We need a connection to run this"); + return NULL; + } else if(!mysql_conn->acct_mysql_db + || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) { + if(mysql_get_db_connection(&mysql_conn->acct_mysql_db, + mysql_db_name, mysql_db_info) + != SLURM_SUCCESS) { + error("unable to re-connect to mysql database"); + return NULL; + } + } + + if((pw=getpwuid(uid))) { + user = pw->pw_name; + } + + xstrcat(extra, "where deleted=0"); + if(cluster_q->cluster_list && list_count(cluster_q->cluster_list)) { + set = 0; + xstrcat(extra, " && ("); + itr = list_iterator_create(cluster_q->cluster_list); + while((object = list_next(itr))) { + if(set) + xstrcat(extra, " || "); + xstrfmtcat(extra, "name='%s'", object); + set = 1; + } + list_iterator_destroy(itr); + xstrcat(extra, ")"); + } + + + if(cluster->control_host) { + xstrfmtcat(vals, ", control_host='%s'", cluster->control_host); + } + if(cluster->control_port) { + xstrfmtcat(vals, ", control_port=%u", cluster->control_port); + } + + if(!vals) { + error("Nothing to change"); + return NULL; + } + + xstrfmtcat(query, "select name from %s %s;", cluster_table, extra); + xfree(extra); + debug3("%d query\n%s", mysql_conn->conn, query); + if(!(result = mysql_db_query_ret( + mysql_conn->acct_mysql_db, query, 0))) { + xfree(query); + xfree(vals); + error("no result given for %s", extra); + return NULL; + } + xfree(query); + + rc = 0; + ret_list = list_create(slurm_destroy_char); + while((row = mysql_fetch_row(result))) { + object = xstrdup(row[0]); + list_append(ret_list, object); + if(!rc) { + xstrfmtcat(name_char, "name='%s'", object); + rc = 1; + } else { + xstrfmtcat(name_char, " || name='%s'", object); + } + } + mysql_free_result(result); + + if(!list_count(ret_list)) { + debug3("didn't effect anything"); + xfree(vals); + return ret_list; + } + + if(vals) { + send_char = xstrdup_printf("(%s)", name_char); + if(_modify_common(mysql_conn, DBD_MODIFY_CLUSTERS, now, + user, cluster_table, send_char, vals) + == SLURM_ERROR) { + error("Couldn't modify cluster 1"); + list_destroy(ret_list); + ret_list = NULL; + goto end_it; + } + } + +end_it: + xfree(name_char); + xfree(assoc_char); + xfree(vals); + xfree(send_char); + + return ret_list; +#else + return NULL; +#endif +} + +extern List acct_storage_p_modify_associations(mysql_conn_t *mysql_conn, + uint32_t uid, + acct_association_cond_t *assoc_q, + acct_association_rec_t *assoc) +{ +#ifdef HAVE_MYSQL + ListIterator itr = NULL; + List ret_list = NULL; + int rc = SLURM_SUCCESS; + char *object = NULL; + char *vals = NULL, *extra = NULL, *query = NULL, *name_char = NULL; + time_t now = time(NULL); + struct passwd *pw = NULL; + char *user = NULL; + int set = 0, i = 0; + MYSQL_RES *result = NULL; + MYSQL_ROW row; + char *massoc_req_inx[] = { + "id", + "acct", + "parent_acct", + "cluster", + "user", + "partition", + "lft", + "rgt" + }; + + enum { + MASSOC_ID, + MASSOC_ACCT, + MASSOC_PACCT, + MASSOC_CLUSTER, + MASSOC_USER, + MASSOC_PART, + MASSOC_LFT, + MASSOC_RGT, + MASSOC_COUNT + }; + + if(!assoc_q) { + error("we need something to change"); + return NULL; + } + + if(!mysql_conn) { + error("We need a connection to run this"); + return NULL; + } else if(!mysql_conn->acct_mysql_db + || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) { + if(mysql_get_db_connection(&mysql_conn->acct_mysql_db, + mysql_db_name, mysql_db_info) + != SLURM_SUCCESS) { + error("unable to re-connect to mysql database"); + return NULL; + } + } + + if((pw=getpwuid(uid))) { + user = pw->pw_name; + } + + if(assoc_q->acct_list && list_count(assoc_q->acct_list)) { + set = 0; + xstrcat(extra, " && ("); + itr = list_iterator_create(assoc_q->acct_list); + while((object = list_next(itr))) { + if(set) + xstrcat(extra, " || "); + xstrfmtcat(extra, "acct='%s'", object); + set = 1; + } + list_iterator_destroy(itr); + xstrcat(extra, ")"); + } + + if(assoc_q->cluster_list && list_count(assoc_q->cluster_list)) { + set = 0; + xstrcat(extra, " && ("); + itr = list_iterator_create(assoc_q->cluster_list); + while((object = list_next(itr))) { + if(set) + xstrcat(extra, " || "); + xstrfmtcat(extra, "cluster='%s'", object); + set = 1; + } + list_iterator_destroy(itr); + xstrcat(extra, ")"); + } + + if(assoc_q->user_list && list_count(assoc_q->user_list)) { + set = 0; + xstrcat(extra, " && ("); + itr = list_iterator_create(assoc_q->user_list); + while((object = list_next(itr))) { + if(set) + xstrcat(extra, " || "); + xstrfmtcat(extra, "user='%s'", object); + set = 1; + } + list_iterator_destroy(itr); + xstrcat(extra, ")"); + } else { + info("no user specified"); + xstrcat(extra, " && user = '' "); + } + + if(assoc_q->id_list && list_count(assoc_q->id_list)) { + set = 0; + xstrcat(extra, " && ("); + itr = list_iterator_create(assoc_q->id_list); + while((object = list_next(itr))) { + if(set) + xstrcat(extra, " || "); + xstrfmtcat(extra, "id=%s", object); + set = 1; + } + list_iterator_destroy(itr); + xstrcat(extra, ")"); + } + + if(assoc_q->parent_acct) { + xstrfmtcat(extra, " && parent_acct='%s'", assoc_q->parent_acct); + } + + if((int)assoc->fairshare >= 0) + xstrfmtcat(vals, ", fairshare=%u", assoc->fairshare); + else if((int)assoc->fairshare == -1) + xstrfmtcat(vals, ", fairshare=1"); + + if((int)assoc->max_cpu_secs_per_job >= 0) + xstrfmtcat(vals, ", max_cpu_secs_per_job=%u", + assoc->max_cpu_secs_per_job); + else if((int)assoc->max_cpu_secs_per_job == -1) + xstrfmtcat(vals, ", max_cpu_secs_per_job=NULL"); + + if((int)assoc->max_jobs >= 0) + xstrfmtcat(vals, ", max_jobs=%u", assoc->max_jobs); + else if((int)assoc->max_jobs == -1) + xstrfmtcat(vals, ", max_jobs=NULL"); + + if((int)assoc->max_nodes_per_job >= 0) + xstrfmtcat(vals, ", max_nodes_per_job=%u", + assoc->max_nodes_per_job); + else if((int)assoc->max_nodes_per_job == -1) + xstrfmtcat(vals, ", max_nodes_per_job=NULL"); + + if((int)assoc->max_wall_duration_per_job >= 0) + xstrfmtcat(vals, ", max_wall_duration_per_job=%u", + assoc->max_wall_duration_per_job); + else if((int)assoc->max_wall_duration_per_job == -1) + xstrfmtcat(vals, ", max_wall_duration_per_job=NULL"); + + if(!extra || (!vals && !assoc->parent_acct)) { + error("Nothing to change"); + return NULL; + } + + for(i=0; i<MASSOC_COUNT; i++) { + if(i) + xstrcat(object, ", "); + xstrcat(object, massoc_req_inx[i]); + } + + query = xstrdup_printf("select distinct %s from %s where deleted=0%s " + "order by lft FOR UPDATE;", + object, assoc_table, extra); + xfree(object); + xfree(extra); + + debug3("%d query\n%s", mysql_conn->conn, query); + if(!(result = mysql_db_query_ret( + mysql_conn->acct_mysql_db, query, 0))) { + xfree(query); + return NULL; + } + xfree(query); + + rc = SLURM_SUCCESS; + set = 0; + extra = NULL; + ret_list = list_create(slurm_destroy_char); + while((row = mysql_fetch_row(result))) { + acct_association_rec_t *mod_assoc = NULL; + int account_type=0; +/* MYSQL_RES *result2 = NULL; */ +/* MYSQL_ROW row2; */ + if(row[MASSOC_PART][0]) { + // see if there is a partition name + object = xstrdup_printf( + "C = %-10s A = %-20s U = %-9s P = %s", + row[MASSOC_CLUSTER], row[MASSOC_ACCT], + row[MASSOC_USER], row[MASSOC_PART]); + } else if(row[MASSOC_USER][0]){ + object = xstrdup_printf( + "C = %-10s A = %-20s U = %-9s", + row[MASSOC_CLUSTER], row[MASSOC_ACCT], + row[MASSOC_USER]); + } else { + if(row[MASSOC_PACCT][0]) { + object = xstrdup_printf( + "C = %-10s A = %s of %s", + row[MASSOC_CLUSTER], row[MASSOC_ACCT], + row[MASSOC_PACCT]); + } else { + object = xstrdup_printf( + "C = %-10s A = %s", + row[MASSOC_CLUSTER], row[MASSOC_ACCT]); + } + if(assoc->parent_acct) { + if(!strcasecmp(row[MASSOC_ACCT], + assoc->parent_acct)) { + error("You can't make an account be a " + "child of it's self"); + xfree(object); + continue; + } + + if(_move_parent(mysql_conn, + atoi(row[MASSOC_LFT]), + atoi(row[MASSOC_RGT]), + row[MASSOC_CLUSTER], + row[MASSOC_ID], + row[MASSOC_PACCT], + assoc->parent_acct) + == SLURM_ERROR) + break; + } + account_type = 1; + } + list_append(ret_list, object); + + if(!set) { + xstrfmtcat(name_char, "(id=%s", row[MASSOC_ID]); + set = 1; + } else { + xstrfmtcat(name_char, " || id=%s", row[MASSOC_ID]); + } + + mod_assoc = xmalloc(sizeof(acct_association_rec_t)); + mod_assoc->id = atoi(row[MASSOC_ID]); + + mod_assoc->max_cpu_secs_per_job = assoc->max_cpu_secs_per_job; + mod_assoc->fairshare = assoc->fairshare; + mod_assoc->max_jobs = assoc->max_jobs; + mod_assoc->max_nodes_per_job = assoc->max_nodes_per_job; + mod_assoc->max_wall_duration_per_job = + assoc->max_wall_duration_per_job; + if(!row[MASSOC_USER][0]) + mod_assoc->parent_acct = xstrdup(assoc->parent_acct); + + if(_addto_update_list(mysql_conn->update_list, + ACCT_MODIFY_ASSOC, + mod_assoc) != SLURM_SUCCESS) + error("couldn't add to the update list"); + if(account_type) { + _modify_unset_users(mysql_conn, + mod_assoc, + row[MASSOC_ACCT], + atoi(row[MASSOC_LFT]), + atoi(row[MASSOC_RGT]), + ret_list); + } + } + mysql_free_result(result); + + if(assoc->parent_acct) { + if(rc != SLURM_SUCCESS) { + if(mysql_conn->rollback) { + mysql_db_rollback(mysql_conn->acct_mysql_db); + } + list_destroy(mysql_conn->update_list); + mysql_conn->update_list = + list_create(destroy_acct_update_object); + list_destroy(ret_list); + xfree(vals); + return NULL; + } + } + + + if(!list_count(ret_list)) { + debug3("didn't effect anything"); + xfree(vals); + return ret_list; + } + xstrcat(name_char, ")"); + + if(vals) { + if(_modify_common(mysql_conn, DBD_MODIFY_ASSOCS, now, + user, assoc_table, name_char, vals) + == SLURM_ERROR) { + error("Couldn't modify associations"); + list_destroy(ret_list); + ret_list = NULL; + goto end_it; + } + } + +end_it: + xfree(name_char); + xfree(vals); + + return ret_list; +#else + return NULL; +#endif +} + +extern List acct_storage_p_remove_users(mysql_conn_t *mysql_conn, uint32_t uid, + acct_user_cond_t *user_q) +{ +#ifdef HAVE_MYSQL + ListIterator itr = NULL; + List ret_list = NULL; + int rc = SLURM_SUCCESS; + char *object = NULL; + char *extra = NULL, *query = NULL, + *name_char = NULL, *assoc_char = NULL; + time_t now = time(NULL); + struct passwd *pw = NULL; + char *user_name = NULL; + int set = 0; + MYSQL_RES *result = NULL; + MYSQL_ROW row; + + if(!user_q) { + error("we need something to change"); + return NULL; + } + + if(!mysql_conn) { + error("We need a connection to run this"); + return NULL; + } else if(!mysql_conn->acct_mysql_db + || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) { + if(mysql_get_db_connection(&mysql_conn->acct_mysql_db, + mysql_db_name, mysql_db_info) + != SLURM_SUCCESS) { + error("unable to re-connect to mysql database"); + return NULL; + } + } + + if((pw=getpwuid(uid))) { + user_name = pw->pw_name; + } + + xstrcat(extra, "where deleted=0"); + + if(user_q->user_list && list_count(user_q->user_list)) { + set = 0; + xstrcat(extra, " && ("); + itr = list_iterator_create(user_q->user_list); + while((object = list_next(itr))) { + if(set) + xstrcat(extra, " || "); + xstrfmtcat(extra, "name='%s'", object); + set = 1; + } + list_iterator_destroy(itr); + xstrcat(extra, ")"); + } + + if(user_q->def_acct_list && list_count(user_q->def_acct_list)) { + set = 0; + xstrcat(extra, " && ("); + itr = list_iterator_create(user_q->def_acct_list); + while((object = list_next(itr))) { + if(set) + xstrcat(extra, " || "); + xstrfmtcat(extra, "default_acct='%s'", object); + set = 1; + } + list_iterator_destroy(itr); + xstrcat(extra, ")"); + } + + if(user_q->qos != ACCT_QOS_NOTSET) { + xstrfmtcat(extra, " && qos=%u", user_q->qos); + } + + if(user_q->admin_level != ACCT_ADMIN_NOTSET) { + xstrfmtcat(extra, " && admin_level=%u", user_q->admin_level); + } + + if(!extra) { + error("Nothing to remove"); + return NULL; + } + + query = xstrdup_printf("select name from %s %s;", user_table, extra); + xfree(extra); + if(!(result = mysql_db_query_ret( + mysql_conn->acct_mysql_db, query, 0))) { + xfree(query); + return NULL; + } + xfree(query); + + rc = 0; + ret_list = list_create(slurm_destroy_char); + while((row = mysql_fetch_row(result))) { + char *object = xstrdup(row[0]); + list_append(ret_list, object); + if(!rc) { + xstrfmtcat(name_char, "name='%s'", object); + xstrfmtcat(assoc_char, "t2.user='%s'", object); + rc = 1; + } else { + xstrfmtcat(name_char, " || name='%s'", object); + xstrfmtcat(assoc_char, " || t2.user='%s'", object); + } + } + mysql_free_result(result); + + if(!list_count(ret_list)) { + debug3("didn't effect anything"); + return ret_list; + } + + if(_remove_common(mysql_conn, DBD_REMOVE_USERS, now, + user_name, user_table, name_char, assoc_char) + == SLURM_ERROR) { + list_destroy(ret_list); + xfree(name_char); + xfree(assoc_char); + return NULL; + } + xfree(name_char); + xfree(assoc_char); + + return ret_list; + +#else + return NULL; +#endif +} + +extern List acct_storage_p_remove_coord(mysql_conn_t *mysql_conn, uint32_t uid, + char *acct, acct_user_cond_t *user_q) +{ +#ifdef HAVE_MYSQL + return NULL; +#else + return NULL; +#endif +} + +extern List acct_storage_p_remove_accts(mysql_conn_t *mysql_conn, uint32_t uid, + acct_account_cond_t *acct_q) +{ +#ifdef HAVE_MYSQL + ListIterator itr = NULL; + List ret_list = NULL; + int rc = SLURM_SUCCESS; + char *object = NULL; + char *extra = NULL, *query = NULL, + *name_char = NULL, *assoc_char = NULL; + time_t now = time(NULL); + struct passwd *pw = NULL; + char *user_name = NULL; + int set = 0; + MYSQL_RES *result = NULL; + MYSQL_ROW row; + + if(!acct_q) { + error("we need something to change"); + return NULL; + } + + if((pw=getpwuid(uid))) { + user_name = pw->pw_name; + } + + if(!mysql_conn) { + error("We need a connection to run this"); + return NULL; + } else if(!mysql_conn->acct_mysql_db + || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) { + if(mysql_get_db_connection(&mysql_conn->acct_mysql_db, + mysql_db_name, mysql_db_info) + != SLURM_SUCCESS) { + error("unable to re-connect to mysql database"); + return NULL; + } + } + + xstrcat(extra, "where deleted=0"); + if(acct_q->acct_list && list_count(acct_q->acct_list)) { + set = 0; + xstrcat(extra, " && ("); + itr = list_iterator_create(acct_q->acct_list); + while((object = list_next(itr))) { + if(set) + xstrcat(extra, " || "); + xstrfmtcat(extra, "name='%s'", object); + set = 1; + } + list_iterator_destroy(itr); + xstrcat(extra, ")"); + } + + if(acct_q->description_list && list_count(acct_q->description_list)) { + set = 0; + xstrcat(extra, " && ("); + itr = list_iterator_create(acct_q->description_list); + while((object = list_next(itr))) { + if(set) + xstrcat(extra, " || "); + xstrfmtcat(extra, "description='%s'", object); + set = 1; + } + list_iterator_destroy(itr); + xstrcat(extra, ")"); + } + + if(acct_q->organization_list && list_count(acct_q->organization_list)) { + set = 0; + xstrcat(extra, " && ("); + itr = list_iterator_create(acct_q->organization_list); + while((object = list_next(itr))) { + if(set) + xstrcat(extra, " || "); + xstrfmtcat(extra, "organization='%s'", object); + set = 1; + } + list_iterator_destroy(itr); + xstrcat(extra, ")"); + } + + if(acct_q->qos != ACCT_QOS_NOTSET) { + xstrfmtcat(extra, " && qos=%u", acct_q->qos); + } + + if(!extra) { + error("Nothing to remove"); + return NULL; + } + + query = xstrdup_printf("select name from %s %s;", acct_table, extra); + xfree(extra); + if(!(result = mysql_db_query_ret( + mysql_conn->acct_mysql_db, query, 0))) { + xfree(query); + return NULL; + } + xfree(query); + + rc = 0; + ret_list = list_create(slurm_destroy_char); + while((row = mysql_fetch_row(result))) { + char *object = xstrdup(row[0]); + list_append(ret_list, object); + if(!rc) { + xstrfmtcat(name_char, "name='%s'", object); + xstrfmtcat(assoc_char, "t2.acct='%s'", object); + rc = 1; + } else { + xstrfmtcat(name_char, " || name='%s'", object); + xstrfmtcat(assoc_char, " || t2.acct='%s'", object); + } + } + mysql_free_result(result); + + if(!list_count(ret_list)) { + debug3("didn't effect anything"); + return ret_list; + } + + if(_remove_common(mysql_conn, DBD_REMOVE_ACCOUNTS, now, + user_name, acct_table, name_char, assoc_char) + == SLURM_ERROR) { + list_destroy(ret_list); + xfree(name_char); + xfree(assoc_char); + return NULL; + } + xfree(name_char); + xfree(assoc_char); + + return ret_list; +#else + return NULL; +#endif +} + +extern List acct_storage_p_remove_clusters(mysql_conn_t *mysql_conn, + uint32_t uid, + acct_cluster_cond_t *cluster_q) +{ +#ifdef HAVE_MYSQL + ListIterator itr = NULL; + List ret_list = NULL; + int rc = SLURM_SUCCESS; + char *object = NULL; + char *extra = NULL, *query = NULL, + *name_char = NULL, *assoc_char = NULL; + time_t now = time(NULL); + struct passwd *pw = NULL; + char *user_name = NULL; + int set = 0; + MYSQL_RES *result = NULL; + MYSQL_ROW row; + int day_old = now - DELETE_SEC_BACK; + + if(!cluster_q) { + error("we need something to change"); + return NULL; + } + + if(!mysql_conn) { + error("We need a connection to run this"); + return NULL; + } else if(!mysql_conn->acct_mysql_db + || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) { + if(mysql_get_db_connection(&mysql_conn->acct_mysql_db, + mysql_db_name, mysql_db_info) + != SLURM_SUCCESS) { + error("unable to re-connect to mysql database"); + return NULL; + } + } + + if((pw=getpwuid(uid))) { + user_name = pw->pw_name; + } + xstrcat(extra, "where deleted=0"); + if(cluster_q->cluster_list && list_count(cluster_q->cluster_list)) { + set = 0; + xstrcat(extra, " && ("); + itr = list_iterator_create(cluster_q->cluster_list); + while((object = list_next(itr))) { + if(set) + xstrcat(extra, " || "); + xstrfmtcat(extra, "name='%s'", object); + set = 1; + } + list_iterator_destroy(itr); + xstrcat(extra, ")"); + } + + if(!extra) { + error("Nothing to remove"); + return NULL; + } + + query = xstrdup_printf("select name from %s %s;", cluster_table, extra); + xfree(extra); + if(!(result = mysql_db_query_ret( + mysql_conn->acct_mysql_db, query, 0))) { + xfree(query); + return NULL; + } + rc = 0; + ret_list = list_create(slurm_destroy_char); + while((row = mysql_fetch_row(result))) { + char *object = xstrdup(row[0]); + list_append(ret_list, object); + if(!rc) { + xstrfmtcat(name_char, "name='%s'", object); + xstrfmtcat(extra, "t2.cluster='%s'", object); + xstrfmtcat(assoc_char, "cluster='%s'", object); + rc = 1; + } else { + xstrfmtcat(name_char, " || name='%s'", object); + xstrfmtcat(extra, " || t2.cluster='%s'", object); + xstrfmtcat(assoc_char, " || cluster='%s'", object); + } + } + mysql_free_result(result); + + if(!list_count(ret_list)) { + debug3("didn't effect anything\n%s", query); + xfree(query); + return ret_list; + } + xfree(query); + + /* if this is a cluster update the machine usage tables as well */ + query = xstrdup_printf("delete from %s where creation_time>%d && (%s);" + "delete from %s where creation_time>%d && (%s);" + "delete from %s where creation_time>%d && (%s);", + cluster_day_table, day_old, assoc_char, + cluster_hour_table, day_old, assoc_char, + cluster_month_table, day_old, assoc_char); + xstrfmtcat(query, + "update %s set mod_time=%d, deleted=1 where (%s);" + "update %s set mod_time=%d, deleted=1 where (%s);" + "update %s set mod_time=%d, deleted=1 where (%s);", + cluster_day_table, now, assoc_char, + cluster_hour_table, now, assoc_char, + cluster_month_table, now, assoc_char); + xfree(assoc_char); + debug3("%d query\n%s", mysql_conn->conn, query); + rc = mysql_db_query(mysql_conn->acct_mysql_db, query); + xfree(query); + if(rc != SLURM_SUCCESS) { + if(mysql_conn->rollback) { + mysql_db_rollback(mysql_conn->acct_mysql_db); + } + list_flush(mysql_conn->update_list); + list_destroy(ret_list); + xfree(name_char); + xfree(extra); + return NULL; + } + + assoc_char = xstrdup_printf("t2.acct='root' && (%s)", extra); + xfree(extra); + + if(_remove_common(mysql_conn, DBD_REMOVE_CLUSTERS, now, + user_name, cluster_table, name_char, assoc_char) + == SLURM_ERROR) { + list_destroy(ret_list); + xfree(name_char); + xfree(assoc_char); + return NULL; + } + xfree(name_char); + xfree(assoc_char); + + return ret_list; +#else + return NULL; +#endif +} + +extern List acct_storage_p_remove_associations(mysql_conn_t *mysql_conn, + uint32_t uid, + acct_association_cond_t *assoc_q) +{ +#ifdef HAVE_MYSQL + ListIterator itr = NULL; + List ret_list = NULL; + int rc = SLURM_SUCCESS; + char *object = NULL; + char *extra = NULL, *query = NULL, + *name_char = NULL, *assoc_char = NULL; + time_t now = time(NULL); + struct passwd *pw = NULL; + char *user_name = NULL; + int set = 0, i = 0; + MYSQL_RES *result = NULL; + MYSQL_ROW row; + + /* if this changes you will need to edit the corresponding + * enum below also t1 is step_table */ + char *rassoc_req_inx[] = { + "id", + "acct", + "parent_acct", + "cluster", + "user", + "partition" + }; + + enum { + RASSOC_ID, + RASSOC_ACCT, + RASSOC_PACCT, + RASSOC_CLUSTER, + RASSOC_USER, + RASSOC_PART, + RASSOC_COUNT + }; + + if(!assoc_q) { + error("we need something to change"); + return NULL; + } + + if(!mysql_conn) { + error("We need a connection to run this"); + return NULL; + } else if(!mysql_conn->acct_mysql_db + || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) { + if(mysql_get_db_connection(&mysql_conn->acct_mysql_db, + mysql_db_name, mysql_db_info) + != SLURM_SUCCESS) { + error("unable to re-connect to mysql database"); + return NULL; + } + } + + xstrcat(extra, "where id>0 && deleted=0"); + + if((pw=getpwuid(uid))) { + user_name = pw->pw_name; + } + + if(assoc_q->acct_list && list_count(assoc_q->acct_list)) { + set = 0; + xstrcat(extra, " && ("); + itr = list_iterator_create(assoc_q->acct_list); + while((object = list_next(itr))) { + if(set) + xstrcat(extra, " || "); + xstrfmtcat(extra, "acct='%s'", object); + set = 1; + } + list_iterator_destroy(itr); + xstrcat(extra, ")"); + } + + if(assoc_q->cluster_list && list_count(assoc_q->cluster_list)) { + set = 0; + xstrcat(extra, " && ("); + itr = list_iterator_create(assoc_q->cluster_list); + while((object = list_next(itr))) { + if(set) + xstrcat(extra, " || "); + xstrfmtcat(extra, "cluster='%s'", object); + set = 1; + } + list_iterator_destroy(itr); + xstrcat(extra, ")"); + } + + if(assoc_q->user_list && list_count(assoc_q->user_list)) { + set = 0; + xstrcat(extra, " && ("); + itr = list_iterator_create(assoc_q->user_list); + while((object = list_next(itr))) { + if(set) + xstrcat(extra, " || "); + xstrfmtcat(extra, "user='%s'", object); + set = 1; + } + list_iterator_destroy(itr); + xstrcat(extra, ")"); + } + + if(assoc_q->id_list && list_count(assoc_q->id_list)) { + set = 0; + xstrcat(extra, " && ("); + itr = list_iterator_create(assoc_q->id_list); + while((object = list_next(itr))) { + if(set) + xstrcat(extra, " || "); + xstrfmtcat(extra, "id=%s", object); + set = 1; + } + list_iterator_destroy(itr); + xstrcat(extra, ")"); + } + + if(assoc_q->parent_acct) { + xstrfmtcat(extra, " && parent_acct='%s'", + assoc_q->parent_acct); + } + + for(i=0; i<RASSOC_COUNT; i++) { + if(i) + xstrcat(object, ", "); + xstrcat(object, rassoc_req_inx[i]); + } + + query = xstrdup_printf("select lft, rgt from %s %s order by lft " + "FOR UPDATE;", + assoc_table, extra); + xfree(extra); + if(!(result = mysql_db_query_ret( + mysql_conn->acct_mysql_db, query, 0))) { + xfree(query); + return NULL; + } + xfree(query); + + rc = 0; + while((row = mysql_fetch_row(result))) { + if(!rc) { + xstrfmtcat(name_char, "lft between %s and %s", + row[0], row[1]); + rc = 1; + } else { + xstrfmtcat(name_char, " || lft between %s and %s", + row[0], row[1]); + } + } + mysql_free_result(result); + + if(!name_char) { + debug3("didn't effect anything\n%s", query); + xfree(query); + return ret_list; + } + + xfree(query); + query = xstrdup_printf("select distinct %s " + "from %s where (%s) order by lft;", + object, + assoc_table, name_char); + xfree(extra); + xfree(object); + debug3("%d query\n%s", mysql_conn->conn, query); + if(!(result = mysql_db_query_ret( + mysql_conn->acct_mysql_db, query, 0))) { + xfree(query); + xfree(name_char); + return NULL; + } + + rc = 0; + ret_list = list_create(slurm_destroy_char); + while((row = mysql_fetch_row(result))) { + acct_association_rec_t *rem_assoc = NULL; + + if(row[RASSOC_PART][0]) { + // see if there is a partition name + object = xstrdup_printf( + "C = %-10s A = %-10s U = %-9s P = %s", + row[RASSOC_CLUSTER], row[RASSOC_ACCT], + row[RASSOC_USER], row[RASSOC_PART]); + } else if(row[RASSOC_USER][0]){ + object = xstrdup_printf( + "C = %-10s A = %-10s U = %-9s", + row[RASSOC_CLUSTER], row[RASSOC_ACCT], + row[RASSOC_USER]); + } else { + if(row[RASSOC_PACCT][0]) { + object = xstrdup_printf( + "C = %-10s A = %s of %s", + row[RASSOC_CLUSTER], row[RASSOC_ACCT], + row[RASSOC_PACCT]); + } else { + object = xstrdup_printf( + "C = %-10s A = %s", + row[RASSOC_CLUSTER], row[RASSOC_ACCT]); + } + } + list_append(ret_list, object); + if(!rc) { + xstrfmtcat(assoc_char, "id=%s", row[RASSOC_ID]); + rc = 1; + } else { + xstrfmtcat(assoc_char, " || id=%s", row[RASSOC_ID]); + } + + rem_assoc = xmalloc(sizeof(acct_association_rec_t)); + rem_assoc->id = atoi(row[RASSOC_ID]); + if(_addto_update_list(mysql_conn->update_list, + ACCT_REMOVE_ASSOC, + rem_assoc) != SLURM_SUCCESS) + error("couldn't add to the update list"); + + } + mysql_free_result(result); + + if(_remove_common(mysql_conn, DBD_REMOVE_ASSOCS, now, + user_name, assoc_table, name_char, assoc_char) + == SLURM_ERROR) { + list_destroy(ret_list); + xfree(name_char); + xfree(assoc_char); + return NULL; + } + xfree(name_char); + xfree(assoc_char); + + return ret_list; +#else + return NULL; +#endif +} + +extern List acct_storage_p_get_users(mysql_conn_t *mysql_conn, + acct_user_cond_t *user_q) +{ +#ifdef HAVE_MYSQL + char *query = NULL; + char *extra = NULL; + char *tmp = NULL; + List user_list = NULL; + ListIterator itr = NULL; + char *object = NULL; + int set = 0; + int i=0; + MYSQL_RES *result = NULL, *coord_result = NULL; + MYSQL_ROW row, coord_row; + + /* if this changes you will need to edit the corresponding enum */ + char *user_req_inx[] = { + "name", + "default_acct", + "qos", + "admin_level" + }; + enum { + USER_REQ_NAME, + USER_REQ_DA, + USER_REQ_EX, + USER_REQ_AL, + USER_REQ_COUNT + }; + + if(!mysql_conn) { + error("We need a connection to run this"); + return NULL; + } else if(!mysql_conn->acct_mysql_db + || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) { + if(mysql_get_db_connection(&mysql_conn->acct_mysql_db, + mysql_db_name, mysql_db_info) + != SLURM_SUCCESS) { + error("unable to re-connect to mysql database"); + return NULL; + } + } + + xstrcat(extra, "where deleted=0"); + + if(!user_q) + goto empty; + + if(user_q->user_list && list_count(user_q->user_list)) { + set = 0; + xstrcat(extra, " && ("); + itr = list_iterator_create(user_q->user_list); + while((object = list_next(itr))) { + if(set) + xstrcat(extra, " || "); + xstrfmtcat(extra, "name='%s'", object); + set = 1; + } + list_iterator_destroy(itr); + xstrcat(extra, ")"); + } + + if(user_q->def_acct_list && list_count(user_q->def_acct_list)) { + set = 0; + xstrcat(extra, " && ("); + itr = list_iterator_create(user_q->def_acct_list); + while((object = list_next(itr))) { + if(set) + xstrcat(extra, " || "); + xstrfmtcat(extra, "default_acct='%s'", object); + set = 1; + } + list_iterator_destroy(itr); + xstrcat(extra, ")"); + } + + if(user_q->qos != ACCT_QOS_NOTSET) { + if(extra) + xstrfmtcat(extra, " && qos=%u", user_q->qos); + else + xstrfmtcat(extra, " where qos=%u", + user_q->qos); + + } + + if(user_q->admin_level != ACCT_ADMIN_NOTSET) { + if(extra) + xstrfmtcat(extra, " && admin_level=%u", + user_q->admin_level); + else + xstrfmtcat(extra, " where admin_level=%u", + user_q->admin_level); + } +empty: + + xfree(tmp); + xstrfmtcat(tmp, "%s", user_req_inx[i]); + for(i=1; i<USER_REQ_COUNT; i++) { + xstrfmtcat(tmp, ", %s", user_req_inx[i]); + } + + query = xstrdup_printf("select %s from %s %s", tmp, user_table, extra); + xfree(tmp); + xfree(extra); + + debug3("%d query\n%s", mysql_conn->conn, query); + if(!(result = mysql_db_query_ret( + mysql_conn->acct_mysql_db, query, 0))) { + xfree(query); + return NULL; + } + xfree(query); + + user_list = list_create(destroy_acct_user_rec); + + while((row = mysql_fetch_row(result))) { + acct_user_rec_t *user = xmalloc(sizeof(acct_user_rec_t)); + struct passwd *passwd_ptr = NULL; + list_append(user_list, user); + + user->name = xstrdup(row[USER_REQ_NAME]); + user->default_acct = xstrdup(row[USER_REQ_DA]); + user->admin_level = atoi(row[USER_REQ_AL]); + user->qos = atoi(row[USER_REQ_EX]); + + passwd_ptr = getpwnam(user->name); + if(passwd_ptr) + user->uid = passwd_ptr->pw_uid; + + user->coord_accts = list_create(destroy_acct_coord_rec); + query = xstrdup_printf("select acct from %s where user='%s' " + "&& deleted=0", + acct_coord_table, user->name); + + if(!(coord_result = + mysql_db_query_ret(mysql_conn->acct_mysql_db, query, 0))) { + xfree(query); + continue; + } + xfree(query); + + while((coord_row = mysql_fetch_row(coord_result))) { + acct_coord_rec_t *coord = + xmalloc(sizeof(acct_coord_rec_t)); + list_append(user->coord_accts, coord); + coord->acct_name = xstrdup(coord_row[0]); + coord->sub_acct = 0; + } + mysql_free_result(coord_result); + /* FIX ME: ADD SUB projects here from assoc list lft + * rgt */ + + if(user_q && user_q->with_assocs) { + acct_association_cond_t *assoc_q = NULL; + if(!user_q->assoc_cond) { + user_q->assoc_cond = xmalloc( + sizeof(acct_association_cond_t)); + } + assoc_q = user_q->assoc_cond; + if(assoc_q->user_list) + list_destroy(assoc_q->user_list); + + assoc_q->user_list = list_create(NULL); + list_append(assoc_q->user_list, user->name); + user->assoc_list = acct_storage_p_get_associations( + mysql_conn, assoc_q); + list_destroy(assoc_q->user_list); + assoc_q->user_list = NULL; + } + } + mysql_free_result(result); + + return user_list; +#else + return NULL; +#endif +} + +extern List acct_storage_p_get_accts(mysql_conn_t *mysql_conn, + acct_account_cond_t *acct_q) +{ +#ifdef HAVE_MYSQL + char *query = NULL; + char *extra = NULL; + char *tmp = NULL; + List acct_list = NULL; + ListIterator itr = NULL; + char *object = NULL; + int set = 0; + int i=0; + MYSQL_RES *result = NULL, *coord_result = NULL; + MYSQL_ROW row, coord_row; + + /* if this changes you will need to edit the corresponding enum */ + char *acct_req_inx[] = { + "name", + "description", + "qos", + "organization" + }; + enum { + ACCT_REQ_NAME, + ACCT_REQ_DESC, + ACCT_REQ_QOS, + ACCT_REQ_ORG, + ACCT_REQ_COUNT + }; + + if(!mysql_conn) { + error("We need a connection to run this"); + return NULL; + } else if(!mysql_conn->acct_mysql_db + || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) { + if(mysql_get_db_connection(&mysql_conn->acct_mysql_db, + mysql_db_name, mysql_db_info) + != SLURM_SUCCESS) { + error("unable to re-connect to mysql database"); + return NULL; + } + } + + xstrcat(extra, "where deleted=0"); + if(!acct_q) + goto empty; + + if(acct_q->acct_list && list_count(acct_q->acct_list)) { + set = 0; + xstrcat(extra, " && ("); + itr = list_iterator_create(acct_q->acct_list); + while((object = list_next(itr))) { + if(set) + xstrcat(extra, " || "); + xstrfmtcat(extra, "name='%s'", object); + set = 1; + } + list_iterator_destroy(itr); + xstrcat(extra, ")"); + } + + if(acct_q->description_list && list_count(acct_q->description_list)) { + set = 0; + xstrcat(extra, " && ("); + itr = list_iterator_create(acct_q->description_list); + while((object = list_next(itr))) { + if(set) + xstrcat(extra, " || "); + xstrfmtcat(extra, "description='%s'", object); + set = 1; + } + list_iterator_destroy(itr); + xstrcat(extra, ")"); + } + + if(acct_q->organization_list && list_count(acct_q->organization_list)) { + set = 0; + xstrcat(extra, " && ("); + itr = list_iterator_create(acct_q->organization_list); + while((object = list_next(itr))) { + if(set) + xstrcat(extra, " || "); + xstrfmtcat(extra, "organization='%s'", object); + set = 1; + } + list_iterator_destroy(itr); + xstrcat(extra, ")"); + } + + if(acct_q->qos != ACCT_QOS_NOTSET) { + if(extra) + xstrfmtcat(extra, " && qos=%u", acct_q->qos); + else + xstrfmtcat(extra, " where qos=%u", + acct_q->qos); + } + +empty: + + xfree(tmp); + xstrfmtcat(tmp, "%s", acct_req_inx[i]); + for(i=1; i<ACCT_REQ_COUNT; i++) { + xstrfmtcat(tmp, ", %s", acct_req_inx[i]); + } + + query = xstrdup_printf("select %s from %s %s", tmp, acct_table, extra); + xfree(tmp); + xfree(extra); + + debug3("%d query\n%s", mysql_conn->conn, query); + if(!(result = mysql_db_query_ret( + mysql_conn->acct_mysql_db, query, 0))) { + xfree(query); + return NULL; + } + xfree(query); + + acct_list = list_create(destroy_acct_account_rec); + + while((row = mysql_fetch_row(result))) { + acct_account_rec_t *acct = xmalloc(sizeof(acct_account_rec_t)); + list_append(acct_list, acct); + + acct->name = xstrdup(row[ACCT_REQ_NAME]); + acct->description = xstrdup(row[ACCT_REQ_DESC]); + acct->organization = xstrdup(row[ACCT_REQ_ORG]); + acct->qos = atoi(row[ACCT_REQ_QOS]); + + acct->coordinators = list_create(slurm_destroy_char); + query = xstrdup_printf("select user from %s where acct='%s' " + "&& deleted=0;", + acct_coord_table, acct->name); + + if(!(coord_result = + mysql_db_query_ret(mysql_conn->acct_mysql_db, query, 0))) { + xfree(query); + continue; + } + xfree(query); + + while((coord_row = mysql_fetch_row(coord_result))) { + object = xstrdup(coord_row[0]); + list_append(acct->coordinators, object); + } + mysql_free_result(coord_result); + + if(acct_q && acct_q->with_assocs) { + acct_association_cond_t *assoc_q = NULL; + if(!acct_q->assoc_cond) { + acct_q->assoc_cond = xmalloc( + sizeof(acct_association_cond_t)); + } + assoc_q = acct_q->assoc_cond; + if(assoc_q->acct_list) + list_destroy(assoc_q->acct_list); + + assoc_q->acct_list = list_create(NULL); + list_append(assoc_q->acct_list, acct->name); + acct->assoc_list = acct_storage_p_get_associations( + mysql_conn, assoc_q); + list_destroy(assoc_q->acct_list); + assoc_q->acct_list = NULL; + } + + } + mysql_free_result(result); + + return acct_list; +#else + return NULL; +#endif +} + +extern List acct_storage_p_get_clusters(mysql_conn_t *mysql_conn, + acct_cluster_cond_t *cluster_q) +{ +#ifdef HAVE_MYSQL + char *query = NULL; + char *extra = NULL; + char *tmp = NULL; + List cluster_list = NULL; + ListIterator itr = NULL; + char *object = NULL; + int set = 0; + int i=0; + MYSQL_RES *result = NULL; + MYSQL_ROW row; + + /* if this changes you will need to edit the corresponding enum */ + char *cluster_req_inx[] = { + "name", + "control_host", + "control_port" + }; + enum { + CLUSTER_REQ_NAME, + CLUSTER_REQ_CH, + CLUSTER_REQ_CP, + CLUSTER_REQ_COUNT + }; + char *assoc_req_inx[] = { + "fairshare", + "max_jobs", + "max_nodes_per_job", + "max_wall_duration_per_job", + "max_cpu_secs_per_job", + }; + enum { + ASSOC_REQ_FS, + ASSOC_REQ_MJ, + ASSOC_REQ_MNPJ, + ASSOC_REQ_MWPJ, + ASSOC_REQ_MCPJ, + ASSOC_REQ_COUNT + }; + + if(!mysql_conn) { + error("We need a connection to run this"); + return NULL; + } else if(!mysql_conn->acct_mysql_db + || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) { + if(mysql_get_db_connection(&mysql_conn->acct_mysql_db, + mysql_db_name, mysql_db_info) + != SLURM_SUCCESS) { + error("unable to re-connect to mysql database"); + return NULL; + } + } + + xstrcat(extra, "where deleted=0"); + + if(!cluster_q) + goto empty; + + if(cluster_q->cluster_list && list_count(cluster_q->cluster_list)) { + set = 0; + xstrcat(extra, " && ("); + itr = list_iterator_create(cluster_q->cluster_list); + while((object = list_next(itr))) { + if(set) + xstrcat(extra, " || "); + xstrfmtcat(extra, "name='%s'", object); + set = 1; + } + list_iterator_destroy(itr); + xstrcat(extra, ")"); + } + +empty: + + xfree(tmp); + i=0; + xstrfmtcat(tmp, "%s", cluster_req_inx[i]); + for(i=1; i<CLUSTER_REQ_COUNT; i++) { + xstrfmtcat(tmp, ", %s", cluster_req_inx[i]); + } + + query = xstrdup_printf("select %s from %s %s", + tmp, cluster_table, extra); + xfree(tmp); + xfree(extra); + + debug3("%d query\n%s", mysql_conn->conn, query); + if(!(result = mysql_db_query_ret( + mysql_conn->acct_mysql_db, query, 0))) { + xfree(query); + return NULL; + } + xfree(query); + + i=0; + xstrfmtcat(tmp, "%s", assoc_req_inx[i]); + for(i=1; i<ASSOC_REQ_COUNT; i++) { + xstrfmtcat(tmp, ", %s", assoc_req_inx[i]); + } + + cluster_list = list_create(destroy_acct_cluster_rec); + + while((row = mysql_fetch_row(result))) { + acct_cluster_rec_t *cluster = + xmalloc(sizeof(acct_cluster_rec_t)); + MYSQL_RES *result2 = NULL; + MYSQL_ROW row2; + list_append(cluster_list, cluster); + + cluster->name = xstrdup(row[CLUSTER_REQ_NAME]); + cluster->control_host = xstrdup(row[CLUSTER_REQ_CH]); + cluster->control_port = atoi(row[CLUSTER_REQ_CP]); + query = xstrdup_printf("select %s from %s where cluster='%s' " + "&& acct='root'", + tmp, assoc_table, cluster->name); + if(!(result2 = mysql_db_query_ret(mysql_conn->acct_mysql_db, + query, 1))) { + xfree(query); + break; + } + xfree(query); + row2 = mysql_fetch_row(result2); + + if(row2[ASSOC_REQ_FS]) + cluster->default_fairshare = atoi(row2[ASSOC_REQ_FS]); + else + cluster->default_fairshare = 1; + + if(row2[ASSOC_REQ_MJ]) + cluster->default_max_jobs = atoi(row2[ASSOC_REQ_MJ]); + else + cluster->default_max_jobs = -1; + + if(row2[ASSOC_REQ_MNPJ]) + cluster->default_max_nodes_per_job = + atoi(row2[ASSOC_REQ_MNPJ]); + else + cluster->default_max_nodes_per_job = -1; + + if(row2[ASSOC_REQ_MWPJ]) + cluster->default_max_wall_duration_per_job = + atoi(row2[ASSOC_REQ_MWPJ]); + else + cluster->default_max_wall_duration_per_job = -1; + + if(row2[ASSOC_REQ_MCPJ]) + cluster->default_max_cpu_secs_per_job = + atoi(row2[ASSOC_REQ_MCPJ]); + else + cluster->default_max_cpu_secs_per_job = -1; + mysql_free_result(result2); + } + mysql_free_result(result); + xfree(tmp); + + return cluster_list; +#else + return NULL; +#endif +} + +extern List acct_storage_p_get_associations(mysql_conn_t *mysql_conn, + acct_association_cond_t *assoc_q) +{ +#ifdef HAVE_MYSQL + char *query = NULL; + char *extra = NULL; + char *tmp = NULL; + List assoc_list = NULL; + ListIterator itr = NULL; + char *object = NULL; + int set = 0; + int i=0; + MYSQL_RES *result = NULL; + MYSQL_ROW row; + int parent_mj = -1; + int parent_mnpj = -1; + int parent_mwpj = -1; + int parent_mcpj = -1; + char *last_acct = NULL; + char *last_acct_parent = NULL; + char *last_cluster = NULL; + uint32_t user_parent_id = 0; + uint32_t acct_parent_id = 0; + + /* if this changes you will need to edit the corresponding enum */ + char *assoc_req_inx[] = { + "id", + "user", + "acct", + "cluster", + "partition", + "parent_acct", + "fairshare", + "max_jobs", + "max_nodes_per_job", + "max_wall_duration_per_job", + "max_cpu_secs_per_job", + }; + enum { + ASSOC_REQ_ID, + ASSOC_REQ_USER, + ASSOC_REQ_ACCT, + ASSOC_REQ_CLUSTER, + ASSOC_REQ_PART, + ASSOC_REQ_PARENT, + ASSOC_REQ_FS, + ASSOC_REQ_MJ, + ASSOC_REQ_MNPJ, + ASSOC_REQ_MWPJ, + ASSOC_REQ_MCPJ, + ASSOC_REQ_COUNT + }; + enum { + ASSOC2_REQ_PARENT_ID, + ASSOC2_REQ_MJ, + ASSOC2_REQ_MNPJ, + ASSOC2_REQ_MWPJ, + ASSOC2_REQ_MCPJ + }; + + if(!mysql_conn) { + error("We need a connection to run this"); + return NULL; + } else if(!mysql_conn->acct_mysql_db + || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) { + if(mysql_get_db_connection(&mysql_conn->acct_mysql_db, + mysql_db_name, mysql_db_info) + != SLURM_SUCCESS) { + error("unable to re-connect to mysql database"); + return NULL; + } + } + + xstrcat(extra, "where deleted=0"); + if(!assoc_q) + goto empty; + + if(assoc_q->acct_list && list_count(assoc_q->acct_list)) { + set = 0; + xstrcat(extra, " && ("); + itr = list_iterator_create(assoc_q->acct_list); + while((object = list_next(itr))) { + if(set) + xstrcat(extra, " || "); + xstrfmtcat(extra, "acct='%s'", object); + set = 1; + } + list_iterator_destroy(itr); + xstrcat(extra, ")"); + } + + if(assoc_q->cluster_list && list_count(assoc_q->cluster_list)) { + set = 0; + xstrcat(extra, " && ("); + itr = list_iterator_create(assoc_q->cluster_list); + while((object = list_next(itr))) { + if(set) + xstrcat(extra, " || "); + xstrfmtcat(extra, "cluster='%s'", object); + set = 1; + } + list_iterator_destroy(itr); + xstrcat(extra, ")"); + } + + if(assoc_q->user_list && list_count(assoc_q->user_list)) { + set = 0; + xstrcat(extra, " && ("); + itr = list_iterator_create(assoc_q->user_list); + while((object = list_next(itr))) { + if(set) + xstrcat(extra, " || "); + xstrfmtcat(extra, "user='%s'", object); + set = 1; + } + list_iterator_destroy(itr); + xstrcat(extra, ")"); + } + + if(assoc_q->id_list && list_count(assoc_q->id_list)) { + set = 0; + xstrcat(extra, " && ("); + itr = list_iterator_create(assoc_q->id_list); + while((object = list_next(itr))) { + if(set) + xstrcat(extra, " || "); + xstrfmtcat(extra, "id=%s", object); + set = 1; + } + list_iterator_destroy(itr); + xstrcat(extra, ")"); + } + + if(assoc_q->parent_acct) { + xstrfmtcat(extra, " && parent_acct='%s'", assoc_q->parent_acct); + } +empty: + xfree(tmp); + xstrfmtcat(tmp, "%s", assoc_req_inx[i]); + for(i=1; i<ASSOC_REQ_COUNT; i++) { + xstrfmtcat(tmp, ", %s", assoc_req_inx[i]); + } + + query = xstrdup_printf("select %s from %s %s order by lft;", + tmp, assoc_table, extra); + xfree(tmp); + xfree(extra); + debug3("%d query\n%s", mysql_conn->conn, query); + if(!(result = mysql_db_query_ret( + mysql_conn->acct_mysql_db, query, 0))) { + xfree(query); + return NULL; + } + xfree(query); + + assoc_list = list_create(destroy_acct_association_rec); + + while((row = mysql_fetch_row(result))) { + acct_association_rec_t *assoc = + xmalloc(sizeof(acct_association_rec_t)); + MYSQL_RES *result2 = NULL; + MYSQL_ROW row2; + + list_append(assoc_list, assoc); + + assoc->id = atoi(row[ASSOC_REQ_ID]); + + if(row[ASSOC_REQ_USER][0]) + assoc->user = xstrdup(row[ASSOC_REQ_USER]); + assoc->acct = xstrdup(row[ASSOC_REQ_ACCT]); + assoc->cluster = xstrdup(row[ASSOC_REQ_CLUSTER]); + + if(row[ASSOC_REQ_PARENT][0]) { + if(!last_acct_parent || !last_cluster + || strcmp(row[ASSOC_REQ_PARENT], last_acct_parent) + || strcmp(row[ASSOC_REQ_CLUSTER], last_cluster)) { + + query = xstrdup_printf( + "select id from %s where user='' " + "and deleted = 0 and acct='%s' " + "and cluster='%s';", + assoc_table, row[ASSOC_REQ_PARENT], + row[ASSOC_REQ_CLUSTER]); + + if(!(result2 = mysql_db_query_ret( + mysql_conn->acct_mysql_db, + query, 1))) { + xfree(query); + break; + } + xfree(query); + row2 = mysql_fetch_row(result2); + last_acct_parent = row[ASSOC_REQ_PARENT]; + last_cluster = row[ASSOC_REQ_CLUSTER]; + acct_parent_id = atoi(row2[0]); + mysql_free_result(result2); + } + assoc->parent_acct = xstrdup(row[ASSOC_REQ_PARENT]); + assoc->parent_id = acct_parent_id; + } + + if(row[ASSOC_REQ_PART][0]) + assoc->partition = xstrdup(row[ASSOC_REQ_PART]); + if(row[ASSOC_REQ_FS]) + assoc->fairshare = atoi(row[ASSOC_REQ_FS]); + else + assoc->fairshare = 1; + + if(!last_acct || !last_cluster + || strcmp(row[ASSOC_REQ_ACCT], last_acct) + || strcmp(row[ASSOC_REQ_CLUSTER], last_cluster)) { + query = xstrdup_printf( + "call get_parent_limits('%s', '%s', '%s');" + "select @par_id, @mj, @mnpj, @mwpj, @mcpj;", + assoc_table, row[ASSOC_REQ_ACCT], + row[ASSOC_REQ_CLUSTER]); + + if(!(result2 = mysql_db_query_ret( + mysql_conn->acct_mysql_db, query, 1))) { + xfree(query); + break; + } + xfree(query); + + row2 = mysql_fetch_row(result2); + user_parent_id = atoi(row2[ASSOC2_REQ_PARENT_ID]); + + if(row2[ASSOC2_REQ_MJ]) + parent_mj = atoi(row2[ASSOC2_REQ_MJ]); + else + parent_mj = -1; + + if(row2[ASSOC2_REQ_MNPJ]) + parent_mnpj = atoi(row2[ASSOC2_REQ_MNPJ]); + else + parent_mwpj = -1; + + if(row2[ASSOC2_REQ_MWPJ]) + parent_mwpj = atoi(row2[ASSOC2_REQ_MWPJ]); + else + parent_mwpj = -1; + + if(row2[ASSOC2_REQ_MCPJ]) + parent_mcpj = atoi(row2[ASSOC2_REQ_MCPJ]); + else + parent_mcpj = -1; + + last_acct = row[ASSOC_REQ_ACCT]; + last_cluster = row[ASSOC_REQ_CLUSTER]; + mysql_free_result(result2); + } + if(row[ASSOC_REQ_MJ]) + assoc->max_jobs = atoi(row[ASSOC_REQ_MJ]); + else + assoc->max_jobs = parent_mj; + if(row[ASSOC_REQ_MNPJ]) + assoc->max_nodes_per_job = + atoi(row[ASSOC_REQ_MNPJ]); + else + assoc->max_nodes_per_job = parent_mnpj; + if(row[ASSOC_REQ_MWPJ]) + assoc->max_wall_duration_per_job = + atoi(row[ASSOC_REQ_MWPJ]); + else + assoc->max_wall_duration_per_job = parent_mwpj; + if(row[ASSOC_REQ_MCPJ]) + assoc->max_cpu_secs_per_job = + atoi(row[ASSOC_REQ_MCPJ]); + else + assoc->max_cpu_secs_per_job = parent_mcpj; + + if(assoc->parent_id != acct_parent_id) + assoc->parent_id = user_parent_id; + //info("parent id is %d", assoc->parent_id); + //log_assoc_rec(assoc); + } + mysql_free_result(result); + + return assoc_list; +#else + return NULL; +#endif +} + +extern int acct_storage_p_get_usage(mysql_conn_t *mysql_conn, + acct_association_rec_t *acct_assoc, + time_t start, time_t end) +{ +#ifdef HAVE_MYSQL + int rc = SLURM_SUCCESS; + return rc; +#else + return SLURM_ERROR; +#endif +} + +extern int acct_storage_p_roll_usage(mysql_conn_t *mysql_conn, + time_t sent_start) +{ +#ifdef HAVE_MYSQL + int rc = SLURM_SUCCESS; + int i = 0; + time_t my_time = time(NULL); + struct tm start_tm; + struct tm end_tm; + MYSQL_RES *result = NULL; + MYSQL_ROW row; + char *query = NULL; + char *tmp = NULL; + time_t last_hour = sent_start; + time_t last_day = sent_start; + time_t last_month = sent_start; + time_t start_time = 0; + time_t end_time = 0; + DEF_TIMERS; + + char *update_req_inx[] = { + "hourly_rollup", + "daily_rollup", + "monthly_rollup" + }; + + enum { + UPDATE_HOUR, + UPDATE_DAY, + UPDATE_MONTH, + UPDATE_COUNT + }; + + if(!mysql_conn) { + error("We need a connection to run this"); + return SLURM_ERROR; + } else if(!mysql_conn->acct_mysql_db + || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) { + if(mysql_get_db_connection(&mysql_conn->acct_mysql_db, + mysql_db_name, mysql_db_info) + != SLURM_SUCCESS) { + error("unable to re-connect to mysql database"); + return SLURM_ERROR; + } + } + + if(!sent_start) { + i=0; + xstrfmtcat(tmp, "%s", update_req_inx[i]); + for(i=1; i<UPDATE_COUNT; i++) { + xstrfmtcat(tmp, ", %s", update_req_inx[i]); + } + query = xstrdup_printf("select %s from %s", + tmp, last_ran_table); + xfree(tmp); + + if(!(result = mysql_db_query_ret( + mysql_conn->acct_mysql_db, query, 0))) { + xfree(query); + return SLURM_ERROR; + } + + xfree(query); + row = mysql_fetch_row(result); + if(row) { + last_hour = atoi(row[UPDATE_HOUR]); + last_day = atoi(row[UPDATE_DAY]); + last_month = atoi(row[UPDATE_MONTH]); + mysql_free_result(result); + } else { + query = xstrdup_printf( + "select @PS := period_start from %s limit 1;" + "insert into %s " + "(hourly_rollup, daily_rollup, monthly_rollup) " + "values (@PS, @PS, @PS);", + event_table, last_ran_table); + + mysql_free_result(result); + if(!(result = mysql_db_query_ret( + mysql_conn->acct_mysql_db, query, 0))) { + xfree(query); + return SLURM_ERROR; + } + xfree(query); + row = mysql_fetch_row(result); + if(!row) { + debug("No clusters have been added " + "not doing rollup"); + mysql_free_result(result); + return SLURM_SUCCESS; + } + + last_hour = last_day = last_month = atoi(row[0]); + mysql_free_result(result); + } + } +/* last_hour = 1211475599; */ +/* last_day = 1211475599; */ +/* last_month = 1211475599; */ + +// last_hour = 1211403599; + // last_hour = 1206946800; +// last_day = 1207033199; +// last_day = 1197033199; +// last_month = 1204358399; + + if(!localtime_r(&last_hour, &start_tm)) { + error("Couldn't get localtime from hour start %d", last_hour); + return SLURM_ERROR; + } + + if(!localtime_r(&my_time, &end_tm)) { + error("Couldn't get localtime from hour end %d", my_time); + return SLURM_ERROR; + } + + /* below and anywhere in a rollup plugin when dealing with + * epoch times we need to set the tm_isdst = -1 so we don't + * have to worry about the time changes. Not setting it to -1 + * will cause problems in the month with the date change. + */ + + start_tm.tm_sec = 0; + start_tm.tm_min = 0; + start_tm.tm_hour++; + start_tm.tm_isdst = -1; + start_time = mktime(&start_tm); + end_tm.tm_sec = 59; + end_tm.tm_min = 59; + end_tm.tm_hour--; + end_tm.tm_isdst = -1; + end_time = mktime(&end_tm); + if(end_time-start_time > 0) { + START_TIMER; + if((rc = mysql_hourly_rollup(mysql_conn, start_time, end_time)) + != SLURM_SUCCESS) + return rc; + END_TIMER2("hourly_rollup"); + query = xstrdup_printf("update %s set hourly_rollup=%d", + last_ran_table, end_time); + } else { + debug2("no need to run this hour %d < %d", + end_time, start_time); + } + + if(!localtime_r(&last_day, &start_tm)) { + error("Couldn't get localtime from day %d", last_day); + return SLURM_ERROR; + } + start_tm.tm_sec = 0; + start_tm.tm_min = 0; + start_tm.tm_hour = 0; + start_tm.tm_mday++; + start_tm.tm_isdst = -1; + start_time = mktime(&start_tm); + end_tm.tm_hour = 23; + end_tm.tm_mday--; + end_tm.tm_isdst = -1; + end_time = mktime(&end_tm); + if(end_time-start_time > 0) { + START_TIMER; + if((rc = mysql_daily_rollup(mysql_conn, start_time, end_time)) + != SLURM_SUCCESS) + return rc; + END_TIMER2("daily_rollup"); + if(query) + xstrfmtcat(query, ", daily_rollup=%d", end_time); + else + query = xstrdup_printf("update %s set daily_rollup=%d", + last_ran_table, end_time); + } else { + debug2("no need to run this day %d < %d", end_time, start_time); + } + + if(!localtime_r(&last_month, &start_tm)) { + error("Couldn't get localtime from month %d", last_month); + return SLURM_ERROR; + } + + start_tm.tm_sec = 0; + start_tm.tm_min = 0; + start_tm.tm_hour = 0; + start_tm.tm_mday = 1; + start_tm.tm_mon++; + start_tm.tm_isdst = -1; + start_time = mktime(&start_tm); + end_tm.tm_sec = -1; + end_tm.tm_min = 0; + end_tm.tm_hour = 0; + end_tm.tm_mday = 1; + end_tm.tm_isdst = -1; + end_time = mktime(&end_tm); + if(end_time-start_time > 0) { + START_TIMER; + if((rc = mysql_monthly_rollup( + mysql_conn, start_time, end_time)) != SLURM_SUCCESS) + return rc; + END_TIMER2("monthly_rollup"); + + if(query) + xstrfmtcat(query, ", montly_rollup=%d", end_time); + else + query = xstrdup_printf( + "update %s set monthly_rollup=%d", + last_ran_table, end_time); + } else { + debug2("no need to run this month %d < %d", + end_time, start_time); + } + + if(query) { + debug3("%s", query); + rc = mysql_db_query(mysql_conn->acct_mysql_db, query); + xfree(query); + } + return rc; +#else + return SLURM_ERROR; +#endif +} + +extern int clusteracct_storage_p_node_down(mysql_conn_t *mysql_conn, + char *cluster, + struct node_record *node_ptr, + time_t event_time, char *reason) +{ +#ifdef HAVE_MYSQL + uint16_t cpus; + int rc = SLURM_SUCCESS; + char *query = NULL; + char *my_reason; + + if(!mysql_conn) { + error("We need a connection to run this"); + return SLURM_ERROR; + } else if(!mysql_conn->acct_mysql_db + || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) { + if(mysql_get_db_connection(&mysql_conn->acct_mysql_db, + mysql_db_name, mysql_db_info) + != SLURM_SUCCESS) { + error("unable to re-connect to mysql database"); + return SLURM_ERROR; + } + } + + if (slurmctld_conf.fast_schedule && !slurmdbd_conf) + cpus = node_ptr->config_ptr->cpus; + else + cpus = node_ptr->cpus; + + if (reason) + my_reason = reason; + else + my_reason = node_ptr->reason; + + debug2("inserting %s(%s) with %u cpus", node_ptr->name, cluster, cpus); + + query = xstrdup_printf( + "update %s set period_end=%d where cluster='%s' " + "and period_end=0 and node_name='%s';", + event_table, (event_time-1), cluster, node_ptr->name); + xstrfmtcat(query, + "insert into %s " + "(node_name, cluster, cpu_count, period_start, reason) " + "values ('%s', '%s', %u, %d, '%s');", + event_table, node_ptr->name, cluster, + cpus, event_time, my_reason); + rc = mysql_db_query(mysql_conn->acct_mysql_db, query); + xfree(query); + + return rc; +#else + return SLURM_ERROR; +#endif +} +extern int clusteracct_storage_p_node_up(mysql_conn_t *mysql_conn, + char *cluster, + struct node_record *node_ptr, + time_t event_time) +{ +#ifdef HAVE_MYSQL + char* query; + int rc = SLURM_SUCCESS; + + if(!mysql_conn) { + error("We need a connection to run this"); + return SLURM_ERROR; + } else if(!mysql_conn->acct_mysql_db + || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) { + if(mysql_get_db_connection(&mysql_conn->acct_mysql_db, + mysql_db_name, mysql_db_info) + != SLURM_SUCCESS) { + error("unable to re-connect to mysql database"); + return SLURM_ERROR; + } + } + + query = xstrdup_printf( + "update %s set period_end=%d where cluster='%s' " + "and period_end=0 and node_name='%s';", + event_table, (event_time-1), cluster, node_ptr->name); + rc = mysql_db_query(mysql_conn->acct_mysql_db, query); + xfree(query); + return rc; +#else + return SLURM_ERROR; +#endif +} + +extern int clusteracct_storage_p_register_ctld(char *cluster, + uint16_t port) +{ + return SLURM_SUCCESS; +} + +extern int clusteracct_storage_p_cluster_procs(mysql_conn_t *mysql_conn, + char *cluster, + uint32_t procs, + time_t event_time) +{ +#ifdef HAVE_MYSQL + static uint32_t last_procs = -1; + char* query; + int rc = SLURM_SUCCESS; + MYSQL_RES *result = NULL; + MYSQL_ROW row; + + if (procs == last_procs) { + debug3("we have the same procs as before no need to " + "update the database."); + return SLURM_SUCCESS; + } + last_procs = procs; + + if(!mysql_conn) { + error("We need a connection to run this"); + return SLURM_ERROR; + } else if(!mysql_conn->acct_mysql_db + || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) { + if(mysql_get_db_connection(&mysql_conn->acct_mysql_db, + mysql_db_name, mysql_db_info) + != SLURM_SUCCESS) { + error("unable to re-connect to mysql database"); + return SLURM_ERROR; + } + } + + /* Record the processor count */ + query = xstrdup_printf( + "select cpu_count from %s where cluster='%s' " + "and period_end=0 and node_name=''", + event_table, cluster); + if(!(result = mysql_db_query_ret( + mysql_conn->acct_mysql_db, query, 0))) { + xfree(query); + return SLURM_ERROR; + } + xfree(query); + + /* we only are checking the first one here */ + if(!(row = mysql_fetch_row(result))) { + debug("We don't have an entry for this machine %s " + "most likely a first time running.", cluster); + goto add_it; + } + + if(atoi(row[0]) == procs) { + debug("%s hasn't changed since last entry", cluster); + goto end_it; + } + debug("%s has changed from %s cpus to %u", cluster, row[0], procs); + + query = xstrdup_printf( + "update %s set period_end=%d where cluster='%s' " + "and period_end=0 and node_name=''", + event_table, (event_time-1), cluster); + rc = mysql_db_query(mysql_conn->acct_mysql_db, query); + xfree(query); + if(rc != SLURM_SUCCESS) + goto end_it; +add_it: + query = xstrdup_printf( + "insert into %s (cluster, cpu_count, period_start) " + "values ('%s', %u, %d)", + event_table, cluster, procs, event_time); + rc = mysql_db_query(mysql_conn->acct_mysql_db, query); + xfree(query); + +end_it: + mysql_free_result(result); + return rc; +#else + return SLURM_ERROR; +#endif +} + +extern int clusteracct_storage_p_get_usage( + mysql_conn_t *mysql_conn, + acct_cluster_rec_t *cluster_rec, time_t start, time_t end) +{ +#ifdef HAVE_MYSQL + + return SLURM_SUCCESS; +#else + return SLURM_ERROR; +#endif +} + +/* + * load into the storage the start of a job + */ +extern int jobacct_storage_p_job_start(mysql_conn_t *mysql_conn, + struct job_record *job_ptr) +{ +#ifdef HAVE_MYSQL + int rc=SLURM_SUCCESS; + char *jname, *nodes; + long priority; + int track_steps = 0; + char *block_id = NULL; + char *query = NULL; + int reinit = 0; + + if (!job_ptr->details || !job_ptr->details->submit_time) { + error("jobacct_storage_p_job_start: " + "Not inputing this job, it has no submit time."); + return SLURM_ERROR; + } + + if(!mysql_conn) { + error("We need a connection to run this"); + return SLURM_ERROR; + } else if(!mysql_conn->acct_mysql_db + || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) { + if(mysql_get_db_connection(&mysql_conn->acct_mysql_db, + mysql_db_name, mysql_db_info) + != SLURM_SUCCESS) { + error("unable to re-connect to mysql database"); + return SLURM_ERROR; + } + } + + debug2("mysql_jobacct_job_start() called"); + priority = (job_ptr->priority == NO_VAL) ? + -1L : (long) job_ptr->priority; + + if (job_ptr->name && job_ptr->name[0]) { + int i; + jname = xmalloc(strlen(job_ptr->name) + 1); + for (i=0; job_ptr->name[i]; i++) { + if (isalnum(job_ptr->name[i])) + jname[i] = job_ptr->name[i]; + else + jname[i] = '_'; + } + } else { + jname = xstrdup("allocation"); + track_steps = 1; + } + + if (job_ptr->nodes && job_ptr->nodes[0]) + nodes = job_ptr->nodes; + else + nodes = "(null)"; + + if(job_ptr->batch_flag) + track_steps = 1; + + if(slurmdbd_conf) { + block_id = xstrdup(job_ptr->comment); + } else { + select_g_get_jobinfo(job_ptr->select_jobinfo, + SELECT_DATA_BLOCK_ID, + &block_id); + } + + job_ptr->requid = -1; /* force to -1 for sacct to know this + * hasn't been set yet */ + if(!job_ptr->db_index) { + query = xstrdup_printf( + "insert into %s " + "(jobid, account, associd, uid, gid, partition, " + "blockid, eligible, submit, start, name, track_steps, " + "state, priority, req_cpus, alloc_cpus, nodelist) " + "values (%u, '%s', %u, %u, %u, '%s', '%s', " + "%d, %d, %d, '%s', %u, " + "%u, %u, %u, %u, '%s') " + "on duplicate key update id=LAST_INSERT_ID(id)", + job_table, job_ptr->job_id, job_ptr->account, + job_ptr->assoc_id, + job_ptr->user_id, job_ptr->group_id, + job_ptr->partition, block_id, + (int)job_ptr->details->begin_time, + (int)job_ptr->details->submit_time, + (int)job_ptr->start_time, + jname, track_steps, + job_ptr->job_state & (~JOB_COMPLETING), + priority, job_ptr->num_procs, + job_ptr->total_procs, nodes); + + try_again: + if(!(job_ptr->db_index = mysql_insert_ret_id( + mysql_conn->acct_mysql_db, query))) { + if(!reinit) { + error("It looks like the storage has gone " + "away trying to reconnect"); + mysql_close_db_connection( + &mysql_conn->acct_mysql_db); + mysql_get_db_connection( + &mysql_conn->acct_mysql_db, + mysql_db_name, mysql_db_info); + reinit = 1; + goto try_again; + } else + rc = SLURM_ERROR; + } + } else { + query = xstrdup_printf( + "update %s set partition='%s', blockid='%s', start=%d, " + "name='%s', state=%u, alloc_cpus=%u, nodelist='%s', " + "account='%s' where id=%d", + job_table, job_ptr->partition, block_id, + (int)job_ptr->start_time, + jname, + job_ptr->job_state & (~JOB_COMPLETING), + job_ptr->total_procs, nodes, + job_ptr->account, job_ptr->db_index); + rc = mysql_db_query(mysql_conn->acct_mysql_db, query); + } + + xfree(block_id); + xfree(jname); + + xfree(query); + + return rc; +#else + return SLURM_ERROR; +#endif +} + +/* + * load into the storage the end of a job + */ +extern int jobacct_storage_p_job_complete(mysql_conn_t *mysql_conn, + struct job_record *job_ptr) +{ +#ifdef HAVE_MYSQL + char *query = NULL, *nodes = NULL; + int rc=SLURM_SUCCESS; + + if (!job_ptr->db_index + && (!job_ptr->details || !job_ptr->details->submit_time)) { + error("jobacct_storage_p_job_complete: " + "Not inputing this job, it has no submit time."); + return SLURM_ERROR; + } + + if(!mysql_conn) { + error("We need a connection to run this"); + return SLURM_ERROR; + } else if(!mysql_conn->acct_mysql_db + || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) { + if(mysql_get_db_connection(&mysql_conn->acct_mysql_db, + mysql_db_name, mysql_db_info) + != SLURM_SUCCESS) { + error("unable to re-connect to mysql database"); + return SLURM_ERROR; + } + } + debug2("mysql_jobacct_job_complete() called"); + if (job_ptr->end_time == 0) { + debug("mysql_jobacct: job %u never started", job_ptr->job_id); + return SLURM_ERROR; + } + + if (job_ptr->nodes && job_ptr->nodes[0]) + nodes = job_ptr->nodes; + else + nodes = "(null)"; + + if(!job_ptr->db_index) { + job_ptr->db_index = _get_db_index(mysql_conn->acct_mysql_db, + job_ptr->details->submit_time, + job_ptr->job_id, + job_ptr->assoc_id); + if(job_ptr->db_index == (uint32_t)-1) { + + } + } + query = xstrdup_printf("update %s set start=%u, end=%u, state=%d, " + "nodelist='%s', comp_code=%u, " + "kill_requid=%u where id=%u", + job_table, (int)job_ptr->start_time, + (int)job_ptr->end_time, + job_ptr->job_state & (~JOB_COMPLETING), + nodes, job_ptr->exit_code, + job_ptr->requid, job_ptr->db_index); + rc = mysql_db_query(mysql_conn->acct_mysql_db, query); + xfree(query); + + return rc; +#else + return SLURM_ERROR; +#endif +} + +/* + * load into the storage the start of a job step + */ +extern int jobacct_storage_p_step_start(mysql_conn_t *mysql_conn, + struct step_record *step_ptr) +{ +#ifdef HAVE_MYSQL + int cpus = 0; + int rc=SLURM_SUCCESS; + char node_list[BUFFER_SIZE]; +#ifdef HAVE_BG + char *ionodes = NULL; +#endif + char *query = NULL; + + if (!step_ptr->job_ptr->db_index + && (!step_ptr->job_ptr->details + || !step_ptr->job_ptr->details->submit_time)) { + error("jobacct_storage_p_step_start: " + "Not inputing this job, it has no submit time."); + return SLURM_ERROR; + } + + if(!mysql_conn) { + error("We need a connection to run this"); + return SLURM_ERROR; + } else if(!mysql_conn->acct_mysql_db + || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) { + if(mysql_get_db_connection(&mysql_conn->acct_mysql_db, + mysql_db_name, mysql_db_info) + != SLURM_SUCCESS) { + error("unable to re-connect to mysql database"); + return SLURM_ERROR; + } + } + if(slurmdbd_conf) { + cpus = step_ptr->job_ptr->total_procs; + snprintf(node_list, BUFFER_SIZE, "%s", + step_ptr->job_ptr->nodes); + } else { +#ifdef HAVE_BG + cpus = step_ptr->job_ptr->num_procs; + select_g_get_jobinfo(step_ptr->job_ptr->select_jobinfo, + SELECT_DATA_IONODES, + &ionodes); + if(ionodes) { + snprintf(node_list, BUFFER_SIZE, + "%s[%s]", step_ptr->job_ptr->nodes, ionodes); + xfree(ionodes); + } else + snprintf(node_list, BUFFER_SIZE, "%s", + step_ptr->job_ptr->nodes); + +#else + if(!step_ptr->step_layout || !step_ptr->step_layout->task_cnt) { + cpus = step_ptr->job_ptr->total_procs; + snprintf(node_list, BUFFER_SIZE, "%s", + step_ptr->job_ptr->nodes); + } else { + cpus = step_ptr->step_layout->task_cnt; + snprintf(node_list, BUFFER_SIZE, "%s", + step_ptr->step_layout->node_list); + } +#endif + } + + step_ptr->job_ptr->requid = -1; /* force to -1 for sacct to know this + * hasn't been set yet */ + + if(!step_ptr->job_ptr->db_index) { + step_ptr->job_ptr->db_index = + _get_db_index(mysql_conn->acct_mysql_db, + step_ptr->job_ptr->details->submit_time, + step_ptr->job_ptr->job_id, + step_ptr->job_ptr->assoc_id); + if(step_ptr->job_ptr->db_index == (uint32_t)-1) + return SLURM_ERROR; + } + /* we want to print a -1 for the requid so leave it a + %d */ + query = xstrdup_printf( + "insert into %s (id, stepid, start, name, state, " + "cpus, nodelist) " + "values (%d, %u, %d, '%s', %d, %u, '%s') " + "on duplicate key update cpus=%u", + step_table, step_ptr->job_ptr->db_index, + step_ptr->step_id, + (int)step_ptr->start_time, step_ptr->name, + JOB_RUNNING, cpus, node_list, cpus); + debug3("%d query\n%s", mysql_conn->conn, query); + rc = mysql_db_query(mysql_conn->acct_mysql_db, query); + xfree(query); + + return rc; +#else + return SLURM_ERROR; +#endif +} + +/* + * load into the storage the end of a job step + */ +extern int jobacct_storage_p_step_complete(mysql_conn_t *mysql_conn, + struct step_record *step_ptr) +{ +#ifdef HAVE_MYSQL + time_t now; + int elapsed; + int comp_status; + int cpus = 0; + struct jobacctinfo *jobacct = (struct jobacctinfo *)step_ptr->jobacct; + struct jobacctinfo dummy_jobacct; + float ave_vsize = 0, ave_rss = 0, ave_pages = 0; + float ave_cpu = 0, ave_cpu2 = 0; + char *query = NULL; + int rc =SLURM_SUCCESS; + + if (!step_ptr->job_ptr->db_index + && (!step_ptr->job_ptr->details + || !step_ptr->job_ptr->details->submit_time)) { + error("jobacct_storage_p_step_complete: " + "Not inputing this job, it has no submit time."); + return SLURM_ERROR; + } + + if (jobacct == NULL) { + /* JobAcctGather=jobacct_gather/none, no data to process */ + bzero(&dummy_jobacct, sizeof(dummy_jobacct)); + jobacct = &dummy_jobacct; + } + + if(!mysql_conn) { + error("We need a connection to run this"); + return SLURM_ERROR; + } else if(!mysql_conn->acct_mysql_db + || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) { + if(mysql_get_db_connection(&mysql_conn->acct_mysql_db, + mysql_db_name, mysql_db_info) + != SLURM_SUCCESS) { + error("unable to re-connect to mysql database"); + return SLURM_ERROR; + } + } + + if(slurmdbd_conf) { + now = step_ptr->job_ptr->end_time; + cpus = step_ptr->job_ptr->total_procs; + + } else { + now = time(NULL); +#ifdef HAVE_BG + cpus = step_ptr->job_ptr->num_procs; + +#else + if(!step_ptr->step_layout || !step_ptr->step_layout->task_cnt) + cpus = step_ptr->job_ptr->total_procs; + else + cpus = step_ptr->step_layout->task_cnt; +#endif + } + + if ((elapsed=now-step_ptr->start_time)<0) + elapsed=0; /* For *very* short jobs, if clock is wrong */ + if (step_ptr->exit_code) + comp_status = JOB_FAILED; + else + comp_status = JOB_COMPLETE; + + /* figure out the ave of the totals sent */ + if(cpus > 0) { + ave_vsize = jobacct->tot_vsize; + ave_vsize /= cpus; + ave_rss = jobacct->tot_rss; + ave_rss /= cpus; + ave_pages = jobacct->tot_pages; + ave_pages /= cpus; + ave_cpu = jobacct->tot_cpu; + ave_cpu /= cpus; + ave_cpu /= 100; + } + + if(jobacct->min_cpu != (uint32_t)NO_VAL) { + ave_cpu2 = jobacct->min_cpu; + ave_cpu2 /= 100; + } + + if(!step_ptr->job_ptr->db_index) { + step_ptr->job_ptr->db_index = + _get_db_index(mysql_conn->acct_mysql_db, + step_ptr->job_ptr->details->submit_time, + step_ptr->job_ptr->job_id, + step_ptr->job_ptr->assoc_id); + if(step_ptr->job_ptr->db_index == -1) + return SLURM_ERROR; + } + + query = xstrdup_printf( + "update %s set end=%d, state=%d, " + "kill_requid=%u, comp_code=%u, " + "user_sec=%ld, user_usec=%ld, " + "sys_sec=%ld, sys_usec=%ld, " + "max_vsize=%u, max_vsize_task=%u, " + "max_vsize_node=%u, ave_vsize=%.2f, " + "max_rss=%u, max_rss_task=%u, " + "max_rss_node=%u, ave_rss=%.2f, " + "max_pages=%u, max_pages_task=%u, " + "max_pages_node=%u, ave_pages=%.2f, " + "min_cpu=%.2f, min_cpu_task=%u, " + "min_cpu_node=%u, ave_cpu=%.2f " + "where id=%u and stepid=%u", + step_table, (int)now, + comp_status, + step_ptr->job_ptr->requid, + step_ptr->exit_code, + /* user seconds */ + jobacct->user_cpu_sec, + /* user microseconds */ + jobacct->user_cpu_usec, + /* system seconds */ + jobacct->sys_cpu_sec, + /* system microsecs */ + jobacct->sys_cpu_usec, + jobacct->max_vsize, /* max vsize */ + jobacct->max_vsize_id.taskid, /* max vsize task */ + jobacct->max_vsize_id.nodeid, /* max vsize node */ + ave_vsize, /* ave vsize */ + jobacct->max_rss, /* max vsize */ + jobacct->max_rss_id.taskid, /* max rss task */ + jobacct->max_rss_id.nodeid, /* max rss node */ + ave_rss, /* ave rss */ + jobacct->max_pages, /* max pages */ + jobacct->max_pages_id.taskid, /* max pages task */ + jobacct->max_pages_id.nodeid, /* max pages node */ + ave_pages, /* ave pages */ + ave_cpu2, /* min cpu */ + jobacct->min_cpu_id.taskid, /* min cpu task */ + jobacct->min_cpu_id.nodeid, /* min cpu node */ + ave_cpu, /* ave cpu */ + step_ptr->job_ptr->db_index, step_ptr->step_id); + rc = mysql_db_query(mysql_conn->acct_mysql_db, query); + xfree(query); + + return rc; +#else + return SLURM_ERROR; +#endif +} + +/* + * load into the storage a suspention of a job + */ +extern int jobacct_storage_p_suspend(mysql_conn_t *mysql_conn, + struct job_record *job_ptr) +{ +#ifdef HAVE_MYSQL + char *query = NULL; + int rc = SLURM_SUCCESS; + bool suspended = false; + + if(!mysql_conn) { + error("We need a connection to run this"); + return SLURM_ERROR; + } else if(!mysql_conn->acct_mysql_db + || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) { + if(mysql_get_db_connection(&mysql_conn->acct_mysql_db, + mysql_db_name, mysql_db_info) + != SLURM_SUCCESS) { + error("unable to re-connect to mysql database"); + return SLURM_ERROR; + } + } + if(!job_ptr->db_index) { + job_ptr->db_index = _get_db_index(mysql_conn->acct_mysql_db, + job_ptr->details->submit_time, + job_ptr->job_id, + job_ptr->assoc_id); + if(job_ptr->db_index == -1) + return SLURM_ERROR; + } + + if (job_ptr->job_state == JOB_SUSPENDED) + suspended = true; + + xstrfmtcat(query, + "update %s set suspended=%d-suspended, state=%d " + "where id=%u;", + job_table, (int)job_ptr->suspend_time, + job_ptr->job_state & (~JOB_COMPLETING), + job_ptr->db_index); + if(suspended) + xstrfmtcat(query, + "insert into %s (id, associd, start, end) " + "values (%u, %u, %d, 0);", + suspend_table, job_ptr->db_index, job_ptr->assoc_id, + (int)job_ptr->suspend_time); + else + xstrfmtcat(query, + "update %s set end=%d where id=%u && end=0;", + suspend_table, (int)job_ptr->suspend_time, + job_ptr->db_index); + debug3("%d query\n%s", mysql_conn->conn, query); + + rc = mysql_db_query(mysql_conn->acct_mysql_db, query); + + xfree(query); + if(rc != SLURM_ERROR) { + xstrfmtcat(query, + "update %s set suspended=%u-suspended, " + "state=%d where id=%u and end=0", + step_table, (int)job_ptr->suspend_time, + job_ptr->job_state, job_ptr->db_index); + rc = mysql_db_query(mysql_conn->acct_mysql_db, query); + xfree(query); + } + + return rc; +#else + return SLURM_ERROR; +#endif +} + +/* + * get info from the storage + * returns List of job_rec_t * + * note List needs to be freed when called + */ +extern List jobacct_storage_p_get_jobs(mysql_conn_t *mysql_conn, + List selected_steps, + List selected_parts, + void *params) +{ + List job_list = NULL; +#ifdef HAVE_MYSQL + if(!mysql_conn) { + error("We need a connection to run this"); + return NULL; + } else if(!mysql_conn->acct_mysql_db + || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) { + if(mysql_get_db_connection(&mysql_conn->acct_mysql_db, + mysql_db_name, mysql_db_info) + != SLURM_SUCCESS) { + error("unable to re-connect to mysql database"); + return NULL; + } + } + job_list = mysql_jobacct_process_get_jobs(mysql_conn, + selected_steps, + selected_parts, + params); +#endif + return job_list; +} + +/* + * expire old info from the storage + */ +extern void jobacct_storage_p_archive(mysql_conn_t *mysql_conn, + List selected_parts, + void *params) +{ +#ifdef HAVE_MYSQL + if(!mysql_conn) { + error("We need a connection to run this"); + return; + } else if(!mysql_conn->acct_mysql_db + || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) { + if(!mysql_get_db_connection(&mysql_conn->acct_mysql_db, + mysql_db_name, mysql_db_info)) + return; + } + mysql_jobacct_process_archive(mysql_conn, + selected_parts, params); +#endif + return; +} + +extern int acct_storage_p_update_shares_used(mysql_conn_t *mysql_conn, + List shares_used) +{ + /* This definitely needs to be fleshed out. + * Go through the list of shares_used_object_t objects and store them */ + return SLURM_SUCCESS; +} + +extern int acct_storage_p_flush_jobs_on_cluster( + mysql_conn_t *mysql_conn, char *cluster, time_t event_time) +{ + int rc = SLURM_SUCCESS; +#ifdef HAVE_MYSQL + /* put end times for a clean start */ + char *query = NULL; + + if(!mysql_conn) { + error("We need a connection to run this"); + return SLURM_ERROR; + } else if(!mysql_conn->acct_mysql_db + || mysql_db_ping(mysql_conn->acct_mysql_db) != 0) { + if(mysql_get_db_connection(&mysql_conn->acct_mysql_db, + mysql_db_name, mysql_db_info) + != SLURM_SUCCESS) { + error("unable to re-connect to mysql database"); + return SLURM_ERROR; + } + } + + query = xstrdup_printf("update %s as t1, %s as t2 set " + "t1.state=%u, t1.end=%u where " + "t2.id=t1.associd and t2.cluster='%s' " + "&& t1.end=0;", + job_table, assoc_table, JOB_CANCELLED, + event_time, cluster); + + rc = mysql_db_query(mysql_conn->acct_mysql_db, query); + xfree(query); +#endif + + return rc; +} diff --git a/src/plugins/accounting_storage/mysql/mysql_jobacct_process.c b/src/plugins/accounting_storage/mysql/mysql_jobacct_process.c new file mode 100644 index 000000000..359c1926f --- /dev/null +++ b/src/plugins/accounting_storage/mysql/mysql_jobacct_process.c @@ -0,0 +1,463 @@ +/*****************************************************************************\ + * mysql_jobacct_process.c - functions the processing of + * information from the mysql jobacct + * storage. + ***************************************************************************** + * + * Copyright (C) 2004-2007 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * This file is patterned after jobcomp_linux.c, written by Morris Jette and + * Copyright (C) 2002 The Regents of the University of California. +\*****************************************************************************/ + +#include <stdlib.h> +#include "src/common/xstring.h" +#include "mysql_jobacct_process.h" + +#ifdef HAVE_MYSQL +static void _do_fdump(List job_list) +{ + info("fdump option not applicable from mysql plugin"); + return; +} + +extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn, + List selected_steps, + List selected_parts, + sacct_parameters_t *params) +{ + + char *query = NULL; + char *extra = NULL; + char *tmp = NULL; + char *selected_part = NULL; + jobacct_selected_step_t *selected_step = NULL; + ListIterator itr = NULL; + int set = 0; + MYSQL_RES *result = NULL, *step_result = NULL; + MYSQL_ROW row, step_row; + int i; + jobacct_job_rec_t *job = NULL; + jobacct_step_rec_t *step = NULL; + time_t now = time(NULL); + List job_list = list_create(destroy_jobacct_job_rec); + + /* if this changes you will need to edit the corresponding + * enum below also t1 is job_table */ + char *job_req_inx[] = { + "t1.id", + "t1.jobid", + "t1.associd", + "t1.uid", + "t1.gid", + "t1.partition", + "t1.blockid", + "t1.account", + "t1.eligible", + "t1.submit", + "t1.start", + "t1.end", + "t1.suspended", + "t1.name", + "t1.track_steps", + "t1.state", + "t1.comp_code", + "t1.priority", + "t1.req_cpus", + "t1.alloc_cpus", + "t1.nodelist", + "t1.kill_requid", + "t1.qos" + }; + + /* if this changes you will need to edit the corresponding + * enum below also t1 is step_table */ + char *step_req_inx[] = { + "t1.stepid", + "t1.start", + "t1.end", + "t1.suspended", + "t1.name", + "t1.nodelist", + "t1.state", + "t1.kill_requid", + "t1.comp_code", + "t1.cpus", + "t1.user_sec", + "t1.user_usec", + "t1.sys_sec", + "t1.sys_usec", + "t1.max_vsize", + "t1.max_vsize_task", + "t1.max_vsize_node", + "t1.ave_vsize", + "t1.max_rss", + "t1.max_rss_task", + "t1.max_rss_node", + "t1.ave_rss", + "t1.max_pages", + "t1.max_pages_task", + "t1.max_pages_node", + "t1.ave_pages", + "t1.min_cpu", + "t1.min_cpu_task", + "t1.min_cpu_node", + "t1.ave_cpu" + }; + + enum { + JOB_REQ_ID, + JOB_REQ_JOBID, + JOB_REQ_ASSOCID, + JOB_REQ_UID, + JOB_REQ_GID, + JOB_REQ_PARTITION, + JOB_REQ_BLOCKID, + JOB_REQ_ACCOUNT, + JOB_REQ_ELIGIBLE, + JOB_REQ_SUBMIT, + JOB_REQ_START, + JOB_REQ_END, + JOB_REQ_SUSPENDED, + JOB_REQ_NAME, + JOB_REQ_TRACKSTEPS, + JOB_REQ_STATE, + JOB_REQ_COMP_CODE, + JOB_REQ_PRIORITY, + JOB_REQ_REQ_CPUS, + JOB_REQ_ALLOC_CPUS, + JOB_REQ_NODELIST, + JOB_REQ_KILL_REQUID, + JOB_REQ_QOS, + JOB_REQ_COUNT + }; + enum { + STEP_REQ_STEPID, + STEP_REQ_START, + STEP_REQ_END, + STEP_REQ_SUSPENDED, + STEP_REQ_NAME, + STEP_REQ_NODELIST, + STEP_REQ_STATE, + STEP_REQ_KILL_REQUID, + STEP_REQ_COMP_CODE, + STEP_REQ_CPUS, + STEP_REQ_USER_SEC, + STEP_REQ_USER_USEC, + STEP_REQ_SYS_SEC, + STEP_REQ_SYS_USEC, + STEP_REQ_MAX_VSIZE, + STEP_REQ_MAX_VSIZE_TASK, + STEP_REQ_MAX_VSIZE_NODE, + STEP_REQ_AVE_VSIZE, + STEP_REQ_MAX_RSS, + STEP_REQ_MAX_RSS_TASK, + STEP_REQ_MAX_RSS_NODE, + STEP_REQ_AVE_RSS, + STEP_REQ_MAX_PAGES, + STEP_REQ_MAX_PAGES_TASK, + STEP_REQ_MAX_PAGES_NODE, + STEP_REQ_AVE_PAGES, + STEP_REQ_MIN_CPU, + STEP_REQ_MIN_CPU_TASK, + STEP_REQ_MIN_CPU_NODE, + STEP_REQ_AVE_CPU, + STEP_REQ_COUNT + }; + + if(selected_steps && list_count(selected_steps)) { + set = 0; + if(extra) + xstrcat(extra, " && ("); + else + xstrcat(extra, " where ("); + itr = list_iterator_create(selected_steps); + while((selected_step = list_next(itr))) { + if(set) + xstrcat(extra, " || "); + tmp = xstrdup_printf("t1.jobid=%u", + selected_step->jobid); + xstrcat(extra, tmp); + set = 1; + xfree(tmp); + } + list_iterator_destroy(itr); + xstrcat(extra, ")"); + } + + if(selected_parts && list_count(selected_parts)) { + set = 0; + if(extra) + xstrcat(extra, " && ("); + else + xstrcat(extra, " where ("); + itr = list_iterator_create(selected_parts); + while((selected_part = list_next(itr))) { + if(set) + xstrcat(extra, " || "); + tmp = xstrdup_printf("t1.partition='%s'", + selected_part); + xstrcat(extra, tmp); + set = 1; + xfree(tmp); + } + list_iterator_destroy(itr); + xstrcat(extra, ")"); + } + + for(i=0; i<JOB_REQ_COUNT; i++) { + if(i) + xstrcat(tmp, ", "); + xstrcat(tmp, job_req_inx[i]); + } + + query = xstrdup_printf("select %s from %s t1", + tmp, job_table); + xfree(tmp); + + if(extra) { + xstrcat(query, extra); + xfree(extra); + } + + //info("query = %s", query); + if(!(result = mysql_db_query_ret( + mysql_conn->acct_mysql_db, query, 0))) { + xfree(query); + list_destroy(job_list); + return NULL; + } + xfree(query); + + while((row = mysql_fetch_row(result))) { + char *id = row[JOB_REQ_ID]; + acct_association_rec_t account_rec; + memset(&account_rec, 0, sizeof(acct_association_rec_t)); + job = create_jobacct_job_rec(); + + job->alloc_cpus = atoi(row[JOB_REQ_ALLOC_CPUS]); + account_rec.id = job->associd = atoi(row[JOB_REQ_ASSOCID]); + assoc_mgr_fill_in_assoc(mysql_conn, &account_rec, 0, NULL); + if(account_rec.cluster) { + if(params->opt_cluster && + strcmp(params->opt_cluster, account_rec.cluster)) { + destroy_jobacct_job_rec(job); + job = NULL; + continue; + } + job->cluster = xstrdup(account_rec.cluster); + } + + if(account_rec.user) + job->user = xstrdup(account_rec.user); + else + job->uid = atoi(row[JOB_REQ_UID]); + if(account_rec.acct) + job->account = xstrdup(account_rec.acct); + else + job->account = xstrdup(row[JOB_REQ_ACCOUNT]); + + job->blockid = xstrdup(row[JOB_REQ_BLOCKID]); + + job->eligible = atoi(row[JOB_REQ_ELIGIBLE]); + job->submit = atoi(row[JOB_REQ_SUBMIT]); + job->start = atoi(row[JOB_REQ_START]); + job->end = atoi(row[JOB_REQ_END]); + job->suspended = atoi(row[JOB_REQ_SUSPENDED]); + if(!job->end) { + job->elapsed = now - job->start; + } else { + job->elapsed = job->end - job->start; + } + job->elapsed -= job->suspended; + + job->jobid = atoi(row[JOB_REQ_JOBID]); + job->jobname = xstrdup(row[JOB_REQ_NAME]); + job->gid = atoi(row[JOB_REQ_GID]); + job->exitcode = atoi(row[JOB_REQ_COMP_CODE]); + job->partition = xstrdup(row[JOB_REQ_PARTITION]); + job->nodes = xstrdup(row[JOB_REQ_NODELIST]); + if (!strcmp(job->nodes, "(null)")) { + xfree(job->nodes); + job->nodes = xstrdup("(unknown)"); + } + + job->track_steps = atoi(row[JOB_REQ_TRACKSTEPS]); + job->state = atoi(row[JOB_REQ_STATE]); + job->priority = atoi(row[JOB_REQ_PRIORITY]); + job->req_cpus = atoi(row[JOB_REQ_REQ_CPUS]); + job->requid = atoi(row[JOB_REQ_KILL_REQUID]); + job->qos = atoi(row[JOB_REQ_QOS]); + job->show_full = 1; + + list_append(job_list, job); + + if(selected_steps && list_count(selected_steps)) { + set = 0; + itr = list_iterator_create(selected_steps); + while((selected_step = list_next(itr))) { + if(selected_step->jobid != job->jobid) { + continue; + } else if (selected_step->stepid + == (uint32_t)NO_VAL) { + job->show_full = 1; + break; + } + + if(set) + xstrcat(extra, " || "); + else + xstrcat(extra, " && ("); + + tmp = xstrdup_printf("t1.stepid=%u", + selected_step->stepid); + xstrcat(extra, tmp); + set = 1; + xfree(tmp); + job->show_full = 0; + } + list_iterator_destroy(itr); + if(set) + xstrcat(extra, ")"); + } + for(i=0; i<STEP_REQ_COUNT; i++) { + if(i) + xstrcat(tmp, ", "); + xstrcat(tmp, step_req_inx[i]); + } + query = xstrdup_printf("select %s from %s t1 where t1.id=%s", + tmp, step_table, id); + xfree(tmp); + + if(extra) { + xstrcat(query, extra); + xfree(extra); + } + + //info("query = %s", query); + if(!(step_result = mysql_db_query_ret( + mysql_conn->acct_mysql_db, query, 0))) { + xfree(query); + list_destroy(job_list); + return NULL; + } + xfree(query); + while ((step_row = mysql_fetch_row(step_result))) { + step = create_jobacct_step_rec(); + step->jobid = job->jobid; + list_append(job->steps, step); + step->stepid = atoi(step_row[STEP_REQ_STEPID]); + /* info("got step %u.%u", */ +/* job->header.jobnum, step->stepnum); */ + step->state = atoi(step_row[STEP_REQ_STATE]); + step->exitcode = atoi(step_row[STEP_REQ_COMP_CODE]); + step->ncpus = atoi(step_row[STEP_REQ_CPUS]); + step->start = atoi(step_row[JOB_REQ_START]); + + step->end = atoi(step_row[STEP_REQ_END]); + /* figure this out by start stop */ + step->suspended = atoi(step_row[STEP_REQ_SUSPENDED]); + if(!step->end) { + step->elapsed = now - step->start; + } else { + step->elapsed = step->end - step->start; + } + step->elapsed -= step->suspended; + step->user_cpu_sec = atoi(step_row[STEP_REQ_USER_SEC]); + step->user_cpu_usec = + atoi(step_row[STEP_REQ_USER_USEC]); + step->sys_cpu_sec = atoi(step_row[STEP_REQ_SYS_SEC]); + step->sys_cpu_usec = atoi(step_row[STEP_REQ_SYS_USEC]); + job->tot_cpu_sec += + step->tot_cpu_sec += + step->user_cpu_sec + step->sys_cpu_sec; + job->tot_cpu_usec += + step->tot_cpu_usec += + step->user_cpu_usec + step->sys_cpu_usec; + step->sacct.max_vsize = + atoi(step_row[STEP_REQ_MAX_VSIZE]) * 1024; + step->sacct.max_vsize_id.taskid = + atoi(step_row[STEP_REQ_MAX_VSIZE_TASK]); + step->sacct.ave_vsize = + atof(step_row[STEP_REQ_AVE_VSIZE]) * 1024; + step->sacct.max_rss = + atoi(step_row[STEP_REQ_MAX_RSS]) * 1024; + step->sacct.max_rss_id.taskid = + atoi(step_row[STEP_REQ_MAX_RSS_TASK]); + step->sacct.ave_rss = + atof(step_row[STEP_REQ_AVE_RSS]) * 1024; + step->sacct.max_pages = + atoi(step_row[STEP_REQ_MAX_PAGES]); + step->sacct.max_pages_id.taskid = + atoi(step_row[STEP_REQ_MAX_PAGES_TASK]); + step->sacct.ave_pages = + atof(step_row[STEP_REQ_AVE_PAGES]); + step->sacct.min_cpu = + atof(step_row[STEP_REQ_MIN_CPU]); + step->sacct.min_cpu_id.taskid = + atoi(step_row[STEP_REQ_MIN_CPU_TASK]); + step->sacct.ave_cpu = atof(step_row[STEP_REQ_AVE_CPU]); + step->stepname = xstrdup(step_row[STEP_REQ_NAME]); + step->nodes = xstrdup(step_row[STEP_REQ_NODELIST]); + step->sacct.max_vsize_id.nodeid = + atoi(step_row[STEP_REQ_MAX_VSIZE_NODE]); + step->sacct.max_rss_id.nodeid = + atoi(step_row[STEP_REQ_MAX_RSS_NODE]); + step->sacct.max_pages_id.nodeid = + atoi(step_row[STEP_REQ_MAX_PAGES_NODE]); + step->sacct.min_cpu_id.nodeid = + atoi(step_row[STEP_REQ_MIN_CPU_NODE]); + + step->requid = atoi(step_row[STEP_REQ_KILL_REQUID]); + } + mysql_free_result(step_result); + + if(list_count(job->steps) > 1) + job->track_steps = 1; + } + mysql_free_result(result); + + if (params && params->opt_fdump) + _do_fdump(job_list); + + return job_list; +} + +extern void mysql_jobacct_process_archive(mysql_conn_t *mysql_conn, + List selected_parts, + sacct_parameters_t *params) +{ + return; +} + +#endif diff --git a/src/plugins/accounting_storage/mysql/mysql_jobacct_process.h b/src/plugins/accounting_storage/mysql/mysql_jobacct_process.h new file mode 100644 index 000000000..e9def5417 --- /dev/null +++ b/src/plugins/accounting_storage/mysql/mysql_jobacct_process.h @@ -0,0 +1,82 @@ +/*****************************************************************************\ + * mysql_jobacct_process.h - functions the processing of + * information from the mysql jobacct + * storage. + ***************************************************************************** + * + * Copyright (C) 2004-2007 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * This file is patterned after jobcomp_linux.c, written by Morris Jette and + * Copyright (C) 2002 The Regents of the University of California. +\*****************************************************************************/ + +#ifndef _HAVE_MYSQL_JOBACCT_PROCESS_H +#define _HAVE_MYSQL_JOBACCT_PROCESS_H + +#include <sys/types.h> +#include <pwd.h> +#include <stdlib.h> +#include "src/common/assoc_mgr.h" +#include "src/common/jobacct_common.h" +#include "src/slurmdbd/read_config.h" +#include "src/slurmctld/slurmctld.h" +#include "src/database/mysql_common.h" +#include "src/common/slurm_accounting_storage.h" + +#ifndef HAVE_MYSQL +typedef void mysql_conn_t; +#else + +typedef struct { + MYSQL *acct_mysql_db; + bool rollback; + List update_list; + int conn; +} mysql_conn_t; + +//extern int acct_db_init; + +extern char *job_table; +extern char *step_table; + +extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn, + List selected_steps, + List selected_parts, + sacct_parameters_t *params); + +extern void mysql_jobacct_process_archive(mysql_conn_t *mysql_conn, + List selected_parts, + sacct_parameters_t *params); +#endif + +#endif diff --git a/src/plugins/accounting_storage/mysql/mysql_rollup.c b/src/plugins/accounting_storage/mysql/mysql_rollup.c new file mode 100644 index 000000000..4fe1d41c1 --- /dev/null +++ b/src/plugins/accounting_storage/mysql/mysql_rollup.c @@ -0,0 +1,762 @@ +/*****************************************************************************\ + * mysql_rollup.c - functions for rolling up data for associations + * and machines from the mysql storage. + ***************************************************************************** + * + * Copyright (C) 2004-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * LLNL-CODE-402394. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#include "mysql_rollup.h" + +#ifdef HAVE_MYSQL + +typedef struct { + int assoc_id; + int a_cpu; +} local_assoc_usage_t; + +typedef struct { + char *name; + int total_time; + int a_cpu; + int cpu_count; + int d_cpu; + int i_cpu; + int o_cpu; + int r_cpu; + time_t start; + time_t end; +} local_cluster_usage_t; + + +extern void _destroy_local_assoc_usage(void *object) +{ + local_assoc_usage_t *a_usage = (local_assoc_usage_t *)object; + if(a_usage) { + xfree(a_usage); + } +} + +extern void _destroy_local_cluster_usage(void *object) +{ + local_cluster_usage_t *c_usage = (local_cluster_usage_t *)object; + if(c_usage) { + xfree(c_usage->name); + xfree(c_usage); + } +} + +extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn, + time_t start, time_t end) +{ + int rc = SLURM_SUCCESS; + int add_sec = 3600; + int i=0; + time_t now = time(NULL); + time_t curr_start = start; + time_t curr_end = curr_start + add_sec; + char *query = NULL; + MYSQL_RES *result = NULL; + MYSQL_ROW row; + ListIterator a_itr = NULL; + ListIterator c_itr = NULL; + List assoc_usage_list = list_create(_destroy_local_assoc_usage); + List cluster_usage_list = list_create(_destroy_local_cluster_usage); + char *event_req_inx[] = { + "node_name", + "cluster", + "cpu_count", + "period_start", + "period_end" + }; + char *event_str = NULL; + enum { + EVENT_REQ_NAME, + EVENT_REQ_CLUSTER, + EVENT_REQ_CPU, + EVENT_REQ_START, + EVENT_REQ_END, + EVENT_REQ_COUNT + }; + char *job_req_inx[] = { + "t1.id", + "jobid", + "associd", + "cluster", + "eligible", + "start", + "end", + "suspended", + "alloc_cpus", + "req_cpus" + }; + char *job_str = NULL; + enum { + JOB_REQ_DB_INX, + JOB_REQ_JOBID, + JOB_REQ_ASSOCID, + JOB_REQ_CLUSTER, + JOB_REQ_ELG, + JOB_REQ_START, + JOB_REQ_END, + JOB_REQ_SUSPENDED, + JOB_REQ_ACPU, + JOB_REQ_RCPU, + JOB_REQ_COUNT + }; + char *suspend_req_inx[] = { + "start", + "end" + }; + char *suspend_str = NULL; + enum { + SUSPEND_REQ_START, + SUSPEND_REQ_END, + SUSPEND_REQ_COUNT + }; + + i=0; + xstrfmtcat(event_str, "%s", event_req_inx[i]); + for(i=1; i<EVENT_REQ_COUNT; i++) { + xstrfmtcat(event_str, ", %s", event_req_inx[i]); + } + + i=0; + xstrfmtcat(job_str, "%s", job_req_inx[i]); + for(i=1; i<JOB_REQ_COUNT; i++) { + xstrfmtcat(job_str, ", %s", job_req_inx[i]); + } + + i=0; + xstrfmtcat(suspend_str, "%s", suspend_req_inx[i]); + for(i=1; i<SUSPEND_REQ_COUNT; i++) { + xstrfmtcat(suspend_str, ", %s", suspend_req_inx[i]); + } + +/* info("begin start %s", ctime(&curr_start)); */ +/* info("begin end %s", ctime(&curr_end)); */ + a_itr = list_iterator_create(assoc_usage_list); + c_itr = list_iterator_create(cluster_usage_list); + while(curr_start < end) { + int last_id = 0; + int seconds = 0; + local_cluster_usage_t *c_usage = NULL; + local_assoc_usage_t *a_usage = NULL; + debug3("curr hour is now %d-%d", curr_start, curr_end); +/* info("start %s", ctime(&curr_start)); */ +/* info("end %s", ctime(&curr_end)); */ + + // first get the events during this time + query = xstrdup_printf("select %s from %s where " + "(period_start < %d " + "&& (period_end >= %d " + "|| period_end = 0)) " + "order by node_name, period_start", + event_str, event_table, + curr_end, curr_start); + + debug3("%d query\n%s", mysql_conn->conn, query); + if(!(result = mysql_db_query_ret( + mysql_conn->acct_mysql_db, query, 0))) { + xfree(query); + return SLURM_ERROR; + } + xfree(query); + + while((row = mysql_fetch_row(result))) { + int row_start = atoi(row[EVENT_REQ_START]); + int row_end = atoi(row[EVENT_REQ_END]); + int row_cpu = atoi(row[EVENT_REQ_CPU]); + + if(row_start < curr_start) + row_start = curr_start; + + if(!row_end || row_end > curr_end) + row_end = curr_end; + + /* Don't worry about it if the time is less + * than 1 second. + */ + if((row_end - row_start) < 1) + continue; + + if(!row[EVENT_REQ_NAME][0]) { + list_iterator_reset(c_itr); + while((c_usage = list_next(c_itr))) { + if(!strcmp(c_usage->name, + row[EVENT_REQ_CLUSTER])) { + break; + } + } + /* if the cpu count changes we will + * only care about the last cpu count but + * we will keep a total of the time for + * all cpus to get the correct cpu time + * for the entire period. + */ + if(!c_usage) { + c_usage = xmalloc( + sizeof(local_cluster_usage_t)); + c_usage->name = + xstrdup(row[EVENT_REQ_CLUSTER]); + c_usage->cpu_count = row_cpu; + c_usage->total_time = + (row_end - row_start) * row_cpu; + c_usage->start = row_start; + c_usage->end = row_end; + list_append(cluster_usage_list, + c_usage); + } else { + c_usage->cpu_count = row_cpu; + c_usage->total_time += + (row_end - row_start) * row_cpu; + c_usage->end = row_end; + } + continue; + } + + list_iterator_reset(c_itr); + while((c_usage = list_next(c_itr))) { + if(!strcmp(c_usage->name, + row[EVENT_REQ_CLUSTER])) { + int local_start = row_start; + int local_end = row_end; + if(c_usage->start > local_start) + local_start = c_usage->start; + if(c_usage->end < local_end) + local_end = c_usage->end; + + if((local_end - local_start) < 1) + continue; + + seconds = (local_end - local_start); + +/* info("node %s adds " */ +/* "(%d)(%d-%d) * %d = %d " */ +/* "to %d", */ +/* row[EVENT_REQ_NAME], */ +/* seconds, */ +/* local_end, local_start, */ +/* row_cpu, */ +/* seconds * row_cpu, */ +/* row_cpu); */ + c_usage->d_cpu += seconds * row_cpu; + + break; + } + } + } + mysql_free_result(result); + + query = xstrdup_printf("select %s from %s as t1, " + "%s as t2 where " + "(eligible < %d && (end >= %d " + "|| end = 0)) && associd=t2.id " + "order by associd, eligible", + job_str, job_table, assoc_table, + curr_end, curr_start, curr_start); + + debug3("%d query\n%s", mysql_conn->conn, query); + if(!(result = mysql_db_query_ret( + mysql_conn->acct_mysql_db, query, 0))) { + xfree(query); + return SLURM_ERROR; + } + xfree(query); + + while((row = mysql_fetch_row(result))) { + int job_id = atoi(row[JOB_REQ_JOBID]); + int assoc_id = atoi(row[JOB_REQ_ASSOCID]); + int row_eligible = atoi(row[JOB_REQ_ELG]); + int row_start = atoi(row[JOB_REQ_START]); + int row_end = atoi(row[JOB_REQ_END]); + int row_acpu = atoi(row[JOB_REQ_ACPU]); + int row_rcpu = atoi(row[JOB_REQ_RCPU]); + seconds = 0; + + if(row_start && (row_start < curr_start)) + row_start = curr_start; + + if(!row_start && row_end) + row_start = row_end; + + if(!row_end || row_end > curr_end) + row_end = curr_end; + + if(last_id != assoc_id) { + a_usage = + xmalloc(sizeof(local_cluster_usage_t)); + a_usage->assoc_id = assoc_id; + list_append(assoc_usage_list, a_usage); + last_id = assoc_id; + } + + + if(!row_start || ((row_end - row_start) < 1)) + goto calc_cluster; + + seconds = (row_end - row_start); + + if(row[JOB_REQ_SUSPENDED]) { + MYSQL_RES *result2 = NULL; + MYSQL_ROW row2; + /* get the suspended time for this job */ + query = xstrdup_printf( + "select %s from %s where " + "(start < %d && (end >= %d " + "|| end = 0)) && id=%s " + "order by start", + suspend_str, suspend_table, + curr_end, curr_start, + row[JOB_REQ_DB_INX]); + + debug4("%d query\n%s", mysql_conn->conn, query); + if(!(result2 = mysql_db_query_ret( + mysql_conn->acct_mysql_db, + query, 0))) { + xfree(query); + return SLURM_ERROR; + } + xfree(query); + while((row2 = mysql_fetch_row(result2))) { + int local_start = + atoi(row2[SUSPEND_REQ_START]); + int local_end = + atoi(row2[SUSPEND_REQ_END]); + + if(!local_start) + continue; + + if(row_start > local_start) + local_start = row_start; + if(row_end < local_end) + local_end = row_end; + + if((local_end - local_start) < 1) + continue; + + seconds -= (local_end - local_start); + } + mysql_free_result(result2); + + } + if(seconds < 1) { + debug4("This job (%u) was suspended " + "the entire hour", job_id); + continue; + } + + + a_usage->a_cpu += seconds * row_acpu; + + calc_cluster: + if(!row[JOB_REQ_CLUSTER]) + continue; + + list_iterator_reset(c_itr); + while((c_usage = list_next(c_itr))) { + if(!strcmp(c_usage->name, + row[JOB_REQ_CLUSTER])) { + if(!row_start || seconds < 1) + goto calc_resv; + +/* info("%d assoc %d adds " */ +/* "(%d)(%d-%d) * %d = %d " */ +/* "to %d", */ +/* job_id, */ +/* a_usage->assoc_id, */ +/* seconds, */ +/* row_end, row_start, */ +/* row_acpu, */ +/* seconds * row_acpu, */ +/* row_acpu); */ + + c_usage->a_cpu += seconds * row_acpu; + + calc_resv: + /* now reserved time */ + if(row_start && + row_start < c_usage->start) + continue; + + row_end = row_start; + row_start = row_eligible; + if(c_usage->start > row_start) + row_start = c_usage->start; + if(c_usage->end < row_end) + row_end = c_usage->end; + + if((row_end - row_start) < 1) + continue; + + seconds = (row_end - row_start); + +/* info("%d assoc %d reserved " */ +/* "(%d)(%d-%d) * %d = %d " */ +/* "to %d", */ +/* job_id, */ +/* assoc_id, */ +/* seconds, */ +/* row_end, row_start, */ +/* row_rcpu, */ +/* seconds * row_rcpu, */ +/* row_rcpu); */ + c_usage->r_cpu += seconds * row_rcpu; + + break; + } + } + } + mysql_free_result(result); + + list_iterator_reset(c_itr); + while((c_usage = list_next(c_itr))) { + c_usage->i_cpu = c_usage->total_time - c_usage->a_cpu - + c_usage->d_cpu - c_usage->r_cpu; + /* sanity check just to make sure we have a + * legitimate time after we calulated + * idle/reserved time put extra in the over + * commit field + */ + + if(c_usage->i_cpu < 0) { + c_usage->r_cpu += c_usage->i_cpu; + c_usage->o_cpu -= c_usage->i_cpu; + c_usage->i_cpu = 0; + } + +/* info("cluster %s(%d) down %d alloc %d " */ +/* "resv %d idle %d over %d " */ +/* "total= %d = %d from %s", */ +/* c_usage->name, */ +/* c_usage->cpu_count, c_usage->d_cpu, */ +/* c_usage->a_cpu, */ +/* c_usage->r_cpu, c_usage->i_cpu, c_usage->o_cpu, */ +/* c_usage->d_cpu + c_usage->a_cpu + */ +/* c_usage->r_cpu + c_usage->i_cpu, */ +/* c_usage->total_time, */ +/* ctime(&c_usage->start)); */ +/* info("to %s", ctime(&c_usage->end)); */ + if(query) { + xstrfmtcat(query, + ", (%d, %d, '%s', %d, %d, " + "%d, %d, %d, %d, %d) " + "on duplicate key update " + "mod_time=%d, cpu_count=%d, " + "alloc_cpu_secs=%d, " + "down_cpu_secs=%d, " + "idle_cpu_secs=%d, " + "over_cpu_secs=%d, resv_cpu_secs=%d", + now, now, + c_usage->name, c_usage->start, + c_usage->cpu_count, c_usage->a_cpu, + c_usage->d_cpu, c_usage->i_cpu, + c_usage->o_cpu, c_usage->r_cpu, + now, + c_usage->cpu_count, c_usage->a_cpu, + c_usage->d_cpu, c_usage->i_cpu, + c_usage->o_cpu, c_usage->r_cpu); + } else { + xstrfmtcat(query, + "insert into %s (creation_time, " + "mod_time, cluster, period_start, " + "cpu_count, alloc_cpu_secs, " + "down_cpu_secs, idle_cpu_secs, " + "over_cpu_secs, resv_cpu_secs) " + "values (%d, %d, '%s', %d, %d, " + "%d, %d, %d, %d, %d) " + "on duplicate key update " + "mod_time=%d, cpu_count=%d, " + "alloc_cpu_secs=%d, " + "down_cpu_secs=%d, " + "idle_cpu_secs=%d, " + "over_cpu_secs=%d, resv_cpu_secs=%d", + cluster_hour_table, now, now, + c_usage->name, c_usage->start, + c_usage->cpu_count, c_usage->a_cpu, + c_usage->d_cpu, c_usage->i_cpu, + c_usage->o_cpu, c_usage->r_cpu, + now, + c_usage->cpu_count, c_usage->a_cpu, + c_usage->d_cpu, c_usage->i_cpu, + c_usage->o_cpu, c_usage->r_cpu); + } + } + if(query) { + rc = mysql_db_query(mysql_conn->acct_mysql_db, query); + xfree(query); + if(rc != SLURM_SUCCESS) { + error("Couldn't add cluster hour rollup"); + goto end_it; + } + } + + list_iterator_reset(a_itr); + while((a_usage = list_next(a_itr))) { +/* info("association (%d) %d alloc %d", */ +/* a_usage->assoc_id, last_id, */ +/* a_usage->a_cpu); */ + if(query) { + xstrfmtcat(query, + ", (%d, %d, %d, %d, %d, " + "%d, %d, %d, %d) " + "on duplicate key update " + "mod_time=%d, alloc_cpu_secs=%d", + now, now, + a_usage->assoc_id, curr_start, + a_usage->a_cpu, + now, a_usage->a_cpu); + } else { + xstrfmtcat(query, + "insert into %s (creation_time, " + "mod_time, id, period_start, " + "alloc_cpu_secs) values " + "(%d, %d, %d, %d, %d) " + "on duplicate key update " + "mod_time=%d, alloc_cpu_secs=%d", + assoc_hour_table, now, now, + a_usage->assoc_id, curr_start, + a_usage->a_cpu, + now, a_usage->a_cpu); + } + } + + if(query) { + debug3("%d query\n%s", mysql_conn->conn, query); + rc = mysql_db_query(mysql_conn->acct_mysql_db, query); + xfree(query); + if(rc != SLURM_SUCCESS) { + error("Couldn't add assoc hour rollup"); + goto end_it; + } + } + list_flush(assoc_usage_list); + list_flush(cluster_usage_list); + curr_start = curr_end; + curr_end = curr_start + add_sec; + } +end_it: + xfree(suspend_str); + xfree(event_str); + xfree(job_str); + list_iterator_destroy(a_itr); + list_iterator_destroy(c_itr); + + list_destroy(assoc_usage_list); + list_destroy(cluster_usage_list); +/* info("stop start %s", ctime(&curr_start)); */ +/* info("stop end %s", ctime(&curr_end)); */ + return rc; +} +extern int mysql_daily_rollup(mysql_conn_t *mysql_conn, + time_t start, time_t end) +{ + /* can't just add 86400 since daylight savings starts and ends every + * once in a while + */ + int rc = SLURM_SUCCESS; + struct tm start_tm; + time_t curr_start = start; + time_t curr_end; + time_t now = time(NULL); + char *query = NULL; + + if(!localtime_r(&curr_start, &start_tm)) { + error("Couldn't get localtime from day start %d", curr_start); + return SLURM_ERROR; + } + start_tm.tm_sec = 0; + start_tm.tm_min = 0; + start_tm.tm_hour = 0; + start_tm.tm_mday++; + start_tm.tm_isdst = -1; + curr_end = mktime(&start_tm); + + while(curr_start < end) { + debug3("curr day is now %d-%d", curr_start, curr_end); +/* info("start %s", ctime(&curr_start)); */ +/* info("end %s", ctime(&curr_end)); */ + query = xstrdup_printf( + "insert into %s (creation_time, mod_time, id, " + "period_start, alloc_cpu_secs) select %d, %d, id, " + "%d, @ASUM:=SUM(alloc_cpu_secs) from %s where " + "(period_start < %d && period_start >= %d) " + "group by id on duplicate key update mod_time=%d, " + "alloc_cpu_secs=@ASUM;", + assoc_day_table, now, now, curr_start, + assoc_hour_table, + curr_end, curr_start, now); + xstrfmtcat(query, + "insert into %s (creation_time, " + "mod_time, cluster, period_start, cpu_count, " + "alloc_cpu_secs, down_cpu_secs, idle_cpu_secs, " + "over_cpu_secs, resv_cpu_secs) " + "select %d, %d, cluster, " + "%d, @CPU:=MAX(cpu_count), " + "@ASUM:=SUM(alloc_cpu_secs), " + "@DSUM:=SUM(down_cpu_secs), " + "@ISUM:=SUM(idle_cpu_secs), " + "@OSUM:=SUM(over_cpu_secs), " + "@RSUM:=SUM(resv_cpu_secs) from %s where " + "(period_start < %d && period_start >= %d) " + "group by cluster on duplicate key update " + "mod_time=%d, cpu_count=@CPU, " + "alloc_cpu_secs=@ASUM, down_cpu_secs=@DSUM, " + "idle_cpu_secs=@ISUM, over_cpu_secs=@OSUM, " + "resv_cpu_secs=@RSUM;", + cluster_day_table, now, now, curr_start, + cluster_hour_table, + curr_end, curr_start, now); + debug3("%d query\n%s", mysql_conn->conn, query); + rc = mysql_db_query(mysql_conn->acct_mysql_db, query); + xfree(query); + if(rc != SLURM_SUCCESS) { + error("Couldn't add day rollup"); + return SLURM_ERROR; + } + + curr_start = curr_end; + if(!localtime_r(&curr_start, &start_tm)) { + error("Couldn't get localtime from day start %d", + curr_start); + return SLURM_ERROR; + } + start_tm.tm_sec = 0; + start_tm.tm_min = 0; + start_tm.tm_hour = 0; + start_tm.tm_mday++; + start_tm.tm_isdst = -1; + curr_end = mktime(&start_tm); + } + /* remove all data from suspend table that was older than + * start. + */ + query = xstrdup_printf("delete from %s where end < %d && end != 0", + suspend_table, start); + rc = mysql_db_query(mysql_conn->acct_mysql_db, query); + xfree(query); + if(rc != SLURM_SUCCESS) { + error("Couldn't remove old suspend data"); + return SLURM_ERROR; + } + + +/* info("stop start %s", ctime(&curr_start)); */ +/* info("stop end %s", ctime(&curr_end)); */ + + return SLURM_SUCCESS; +} +extern int mysql_monthly_rollup(mysql_conn_t *mysql_conn, + time_t start, time_t end) +{ + int rc = SLURM_SUCCESS; + struct tm start_tm; + time_t curr_start = start; + time_t curr_end; + time_t now = time(NULL); + char *query = NULL; + + if(!localtime_r(&curr_start, &start_tm)) { + error("Couldn't get localtime from month start %d", curr_start); + return SLURM_ERROR; + } + start_tm.tm_sec = 0; + start_tm.tm_min = 0; + start_tm.tm_hour = 0; + start_tm.tm_mday = 1; + start_tm.tm_mon++; + start_tm.tm_isdst = -1; + curr_end = mktime(&start_tm); + + while(curr_start < end) { + debug3("curr month is now %d-%d", curr_start, curr_end); +/* info("start %s", ctime(&curr_start)); */ +/* info("end %s", ctime(&curr_end)); */ + query = xstrdup_printf( + "insert into %s (creation_time, mod_time, id, " + "period_start, alloc_cpu_secs) select %d, %d, id, " + "%d, @ASUM:=SUM(alloc_cpu_secs) from %s where " + "(period_start < %d && period_start >= %d) " + "group by id on duplicate key update mod_time=%d, " + "alloc_cpu_secs=@ASUM;", + assoc_month_table, now, now, curr_start, + assoc_day_table, + curr_end, curr_start, now); + xstrfmtcat(query, + "insert into %s (creation_time, " + "mod_time, cluster, period_start, cpu_count, " + "alloc_cpu_secs, down_cpu_secs, idle_cpu_secs, " + "over_cpu_secs, resv_cpu_secs) " + "select %d, %d, cluster, " + "%d, @CPU:=MAX(cpu_count), " + "@ASUM:=SUM(alloc_cpu_secs), " + "@DSUM:=SUM(down_cpu_secs), " + "@ISUM:=SUM(idle_cpu_secs), " + "@OSUM:=SUM(over_cpu_secs), " + "@RSUM:=SUM(resv_cpu_secs) from %s where " + "(period_start < %d && period_start >= %d) " + "group by cluster on duplicate key update " + "mod_time=%d, cpu_count=@CPU, " + "alloc_cpu_secs=@ASUM, down_cpu_secs=@DSUM, " + "idle_cpu_secs=@ISUM, over_cpu_secs=@OSUM, " + "resv_cpu_secs=@RSUM;", + cluster_month_table, now, now, curr_start, + cluster_day_table, + curr_end, curr_start, now); + debug3("%d query\n%s", mysql_conn->conn, query); + rc = mysql_db_query(mysql_conn->acct_mysql_db, query); + xfree(query); + if(rc != SLURM_SUCCESS) { + error("Couldn't add day rollup"); + return SLURM_ERROR; + } + + curr_start = curr_end; + if(!localtime_r(&curr_start, &start_tm)) { + error("Couldn't get localtime from month start %d", + curr_start); + } + start_tm.tm_sec = 0; + start_tm.tm_min = 0; + start_tm.tm_hour = 0; + start_tm.tm_mday = 1; + start_tm.tm_mon++; + start_tm.tm_isdst = -1; + curr_end = mktime(&start_tm); + } + return SLURM_SUCCESS; +} + +#endif diff --git a/src/srun/launch.h b/src/plugins/accounting_storage/mysql/mysql_rollup.h similarity index 65% rename from src/srun/launch.h rename to src/plugins/accounting_storage/mysql/mysql_rollup.h index d0bd2bb1b..bc48a2814 100644 --- a/src/srun/launch.h +++ b/src/plugins/accounting_storage/mysql/mysql_rollup.h @@ -1,10 +1,13 @@ /*****************************************************************************\ - * src/srun/launch.h - header for srun launch thread + * mysql_rollup.h - functions for rolling up data for associations + * and machines from the mysql storage. ***************************************************************************** - * Copyright (C) 2002 The Regents of the University of California. + * + * Copyright (C) 2004-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * Written by Danny Auble <da@llnl.gov> + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -15,7 +18,7 @@ * any later version. * * In addition, as a special exception, the copyright holders give permission - * to link the code of portions of this program with the OpenSSL library under + * to link the code of portions of this program with the OpenSSL library under * certain conditions as described in each individual source file, and * distribute linked combinations including the two. You must obey the GNU * General Public License in all respects for all of the code used other than @@ -35,33 +38,29 @@ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. \*****************************************************************************/ -#ifndef _HAVE_LAUNCH_H -#define _HAVE_LAUNCH_H - -#ifdef HAVE_CONFIG_H -# include "config.h" -#endif +#ifndef _HAVE_MYSQL_ROLLUP_H +#define _HAVE_MYSQL_ROLLUP_H -#ifdef WITH_PTHREADS -# include <pthread.h> -#endif +#include "mysql_jobacct_process.h" -#include "src/common/macros.h" -#include "src/common/slurm_protocol_api.h" +#ifdef HAVE_MYSQL +extern char *assoc_table; +extern char *assoc_day_table; +extern char *assoc_hour_table; +extern char *assoc_month_table; +extern char *cluster_day_table; +extern char *cluster_hour_table; +extern char *cluster_month_table; +extern char *event_table; +extern char *suspend_table; -#include "src/srun/opt.h" -#include "src/srun/srun_job.h" +extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn, + time_t start, time_t end); +extern int mysql_daily_rollup(mysql_conn_t *mysql_conn, + time_t start, time_t end); +extern int mysql_monthly_rollup(mysql_conn_t *mysql_conn, + time_t start, time_t end); -typedef struct launch_thr { - pthread_t thread; - pthread_attr_t attr; - char *host; /* name of host on which to run */ - int ntasks; /* number of tasks to initiate on host*/ - int *taskid; /* list of global task ids */ - int i; /* temporary index into array */ -} launch_thr_t; - -int launch_thr_create(srun_job_t *job); -void * launch(void *arg); +#endif -#endif /* !_HAVE_LAUNCH_H */ +#endif diff --git a/src/plugins/accounting_storage/none/Makefile.am b/src/plugins/accounting_storage/none/Makefile.am new file mode 100644 index 000000000..122e92c28 --- /dev/null +++ b/src/plugins/accounting_storage/none/Makefile.am @@ -0,0 +1,12 @@ +# Makefile for accounting_storage/none plugin + +AUTOMAKE_OPTIONS = foreign + +PLUGIN_FLAGS = -module -avoid-version --export-dynamic + +INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common + +pkglib_LTLIBRARIES = accounting_storage_none.la + +accounting_storage_none_la_SOURCES = accounting_storage_none.c +accounting_storage_none_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) diff --git a/src/plugins/accounting_storage/none/Makefile.in b/src/plugins/accounting_storage/none/Makefile.in new file mode 100644 index 000000000..4d62249e9 --- /dev/null +++ b/src/plugins/accounting_storage/none/Makefile.in @@ -0,0 +1,555 @@ +# Makefile.in generated by automake 1.10.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# Makefile for accounting_storage/none plugin + +VPATH = @srcdir@ +pkgdatadir = $(datadir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +target_triplet = @target@ +subdir = src/plugins/accounting_storage/none +DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ + $(top_srcdir)/auxdir/slurm.m4 \ + $(top_srcdir)/auxdir/x_ac__system_configuration.m4 \ + $(top_srcdir)/auxdir/x_ac_affinity.m4 \ + $(top_srcdir)/auxdir/x_ac_aix.m4 \ + $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ + $(top_srcdir)/auxdir/x_ac_debug.m4 \ + $(top_srcdir)/auxdir/x_ac_elan.m4 \ + $(top_srcdir)/auxdir/x_ac_federation.m4 \ + $(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \ + $(top_srcdir)/auxdir/x_ac_gtk.m4 \ + $(top_srcdir)/auxdir/x_ac_munge.m4 \ + $(top_srcdir)/auxdir/x_ac_ncurses.m4 \ + $(top_srcdir)/auxdir/x_ac_pam.m4 \ + $(top_srcdir)/auxdir/x_ac_ptrace.m4 \ + $(top_srcdir)/auxdir/x_ac_readline.m4 \ + $(top_srcdir)/auxdir/x_ac_setpgrp.m4 \ + $(top_srcdir)/auxdir/x_ac_setproctitle.m4 \ + $(top_srcdir)/auxdir/x_ac_sgi_job.m4 \ + $(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \ + $(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h +CONFIG_CLEAN_FILES = +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; +am__installdirs = "$(DESTDIR)$(pkglibdir)" +pkglibLTLIBRARIES_INSTALL = $(INSTALL) +LTLIBRARIES = $(pkglib_LTLIBRARIES) +accounting_storage_none_la_LIBADD = +am_accounting_storage_none_la_OBJECTS = accounting_storage_none.lo +accounting_storage_none_la_OBJECTS = \ + $(am_accounting_storage_none_la_OBJECTS) +accounting_storage_none_la_LINK = $(LIBTOOL) --tag=CC \ + $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CCLD) \ + $(AM_CFLAGS) $(CFLAGS) $(accounting_storage_none_la_LDFLAGS) \ + $(LDFLAGS) -o $@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm +depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp +am__depfiles_maybe = depfiles +COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ + $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ + $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +CCLD = $(CC) +LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ + $(LDFLAGS) -o $@ +SOURCES = $(accounting_storage_none_la_SOURCES) +DIST_SOURCES = $(accounting_storage_none_la_SOURCES) +ETAGS = etags +CTAGS = ctags +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AR = @AR@ +AUTHD_CFLAGS = @AUTHD_CFLAGS@ +AUTHD_LIBS = @AUTHD_LIBS@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +BG_INCLUDES = @BG_INCLUDES@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CMD_LDFLAGS = @CMD_LDFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ +ECHO = @ECHO@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +ELAN_LIBS = @ELAN_LIBS@ +EXEEXT = @EXEEXT@ +F77 = @F77@ +FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@ +FFLAGS = @FFLAGS@ +GREP = @GREP@ +GTK2_CFLAGS = @GTK2_CFLAGS@ +GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ +HAVEPKGCONFIG = @HAVEPKGCONFIG@ +HAVE_AIX = @HAVE_AIX@ +HAVE_ELAN = @HAVE_ELAN@ +HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ +HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIB_LDFLAGS = @LIB_LDFLAGS@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MKDIR_P = @MKDIR_P@ +MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ +MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ +MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ +NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ +NUMA_LIBS = @NUMA_LIBS@ +OBJEXT = @OBJEXT@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PAM_LIBS = @PAM_LIBS@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ +PLPA_LIBS = @PLPA_LIBS@ +PROCTRACKDIR = @PROCTRACKDIR@ +PROJECT = @PROJECT@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +RANLIB = @RANLIB@ +READLINE_LIBS = @READLINE_LIBS@ +RELEASE = @RELEASE@ +SED = @SED@ +SEMAPHORE_LIBS = @SEMAPHORE_LIBS@ +SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ +SLURMD_PORT = @SLURMD_PORT@ +SLURM_API_AGE = @SLURM_API_AGE@ +SLURM_API_CURRENT = @SLURM_API_CURRENT@ +SLURM_API_MAJOR = @SLURM_API_MAJOR@ +SLURM_API_REVISION = @SLURM_API_REVISION@ +SLURM_API_VERSION = @SLURM_API_VERSION@ +SLURM_MAJOR = @SLURM_MAJOR@ +SLURM_MICRO = @SLURM_MICRO@ +SLURM_MINOR = @SLURM_MINOR@ +SLURM_VERSION = @SLURM_VERSION@ +SO_LDFLAGS = @SO_LDFLAGS@ +SSL_CPPFLAGS = @SSL_CPPFLAGS@ +SSL_LDFLAGS = @SSL_LDFLAGS@ +SSL_LIBS = @SSL_LIBS@ +STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ +VERSION = @VERSION@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_F77 = @ac_ct_F77@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target = @target@ +target_alias = @target_alias@ +target_cpu = @target_cpu@ +target_os = @target_os@ +target_vendor = @target_vendor@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +AUTOMAKE_OPTIONS = foreign +PLUGIN_FLAGS = -module -avoid-version --export-dynamic +INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common +pkglib_LTLIBRARIES = accounting_storage_none.la +accounting_storage_none_la_SOURCES = accounting_storage_none.c +accounting_storage_none_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) +all: all-am + +.SUFFIXES: +.SUFFIXES: .c .lo .o .obj +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ + && exit 0; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/accounting_storage/none/Makefile'; \ + cd $(top_srcdir) && \ + $(AUTOMAKE) --foreign src/plugins/accounting_storage/none/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) + @$(NORMAL_INSTALL) + test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" + @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ + if test -f $$p; then \ + f=$(am__strip_dir) \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + else :; fi; \ + done + +uninstall-pkglibLTLIBRARIES: + @$(NORMAL_UNINSTALL) + @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ + p=$(am__strip_dir) \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + done + +clean-pkglibLTLIBRARIES: + -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) + @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ + dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ + test "$$dir" != "$$p" || dir=.; \ + echo "rm -f \"$${dir}/so_locations\""; \ + rm -f "$${dir}/so_locations"; \ + done +accounting_storage_none.la: $(accounting_storage_none_la_OBJECTS) $(accounting_storage_none_la_DEPENDENCIES) + $(accounting_storage_none_la_LINK) -rpath $(pkglibdir) $(accounting_storage_none_la_OBJECTS) $(accounting_storage_none_la_LIBADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + +distclean-compile: + -rm -f *.tab.c + +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/accounting_storage_none.Plo@am__quote@ + +.c.o: +@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c $< + +.c.obj: +@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'` + +.c.lo: +@am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $< + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + mkid -fID $$unique +tags: TAGS + +TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + here=`pwd`; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$tags $$unique; \ + fi +ctags: CTAGS +CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + test -z "$(CTAGS_ARGS)$$tags$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$tags $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && cd $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) $$here + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ + fi; \ + cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ + else \ + test -f $(distdir)/$$file \ + || cp -p $$d/$$file $(distdir)/$$file \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(LTLIBRARIES) +installdirs: + for dir in "$(DESTDIR)$(pkglibdir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + `test -z '$(STRIP)' || \ + echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ + mostlyclean-am + +distclean: distclean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-exec-am: install-pkglibLTLIBRARIES + +install-html: install-html-am + +install-info: install-info-am + +install-man: + +install-pdf: install-pdf-am + +install-ps: install-ps-am + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-pkglibLTLIBRARIES + +.MAKE: install-am install-strip + +.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ + clean-libtool clean-pkglibLTLIBRARIES ctags distclean \ + distclean-compile distclean-generic distclean-libtool \ + distclean-tags distdir dvi dvi-am html html-am info info-am \ + install install-am install-data install-data-am install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-pdf install-pdf-am install-pkglibLTLIBRARIES \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-compile \ + mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ + tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/src/plugins/accounting_storage/none/accounting_storage_none.c b/src/plugins/accounting_storage/none/accounting_storage_none.c new file mode 100644 index 000000000..0701440e2 --- /dev/null +++ b/src/plugins/accounting_storage/none/accounting_storage_none.c @@ -0,0 +1,349 @@ +/*****************************************************************************\ + * accounting_storage_none.c - account interface to none. + * + * $Id: accounting_storage_none.c 13061 2008-01-22 21:23:56Z da $ + ***************************************************************************** + * Copyright (C) 2004-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#include "src/common/slurm_accounting_storage.h" + +/* + * These variables are required by the generic plugin interface. If they + * are not found in the plugin, the plugin loader will ignore it. + * + * plugin_name - a string giving a human-readable description of the + * plugin. There is no maximum length, but the symbol must refer to + * a valid string. + * + * plugin_type - a string suggesting the type of the plugin or its + * applicability to a particular form of data or method of data handling. + * If the low-level plugin API is used, the contents of this string are + * unimportant and may be anything. SLURM uses the higher-level plugin + * interface which requires this string to be of the form + * + * <application>/<method> + * + * where <application> is a description of the intended application of + * the plugin (e.g., "jobacct" for SLURM job completion logging) and <method> + * is a description of how this plugin satisfies that application. SLURM will + * only load job completion logging plugins if the plugin_type string has a + * prefix of "jobacct/". + * + * plugin_version - an unsigned 32-bit integer giving the version number + * of the plugin. If major and minor revisions are desired, the major + * version number may be multiplied by a suitable magnitude constant such + * as 100 or 1000. Various SLURM versions will likely require a certain + * minimum versions for their plugins as the job accounting API + * matures. + */ +const char plugin_name[] = "Accounting storage NOT INVOKED plugin"; +const char plugin_type[] = "accounting_storage/none"; +const uint32_t plugin_version = 100; + +/* + * init() is called when the plugin is loaded, before any other functions + * are called. Put global initialization here. + */ +extern int init ( void ) +{ + verbose("%s loaded", plugin_name); + return SLURM_SUCCESS; +} + +extern int fini ( void ) +{ + return SLURM_SUCCESS; +} + +extern void * acct_storage_p_get_connection(bool make_agent, bool rollback) +{ + return NULL; +} + +extern int acct_storage_p_close_connection(void **db_conn) +{ + return SLURM_SUCCESS; +} + +extern int acct_storage_p_commit(void *db_conn, bool commit) +{ + return SLURM_SUCCESS; +} + +extern int acct_storage_p_add_users(void *db_conn, uint32_t uid, + List user_list) +{ + return SLURM_SUCCESS; +} + +extern int acct_storage_p_add_coord(void *db_conn, uint32_t uid, + char *acct, acct_user_cond_t *user_q) +{ + return SLURM_SUCCESS; +} + +extern int acct_storage_p_add_accts(void *db_conn, uint32_t uid, + List acct_list) +{ + return SLURM_SUCCESS; +} + +extern int acct_storage_p_add_clusters(void *db_conn, uint32_t uid, + List cluster_list) +{ + return SLURM_SUCCESS; +} + +extern int acct_storage_p_add_associations(void *db_conn, uint32_t uid, + List association_list) +{ + return SLURM_SUCCESS; +} + +extern List acct_storage_p_modify_users(void *db_conn, uint32_t uid, + acct_user_cond_t *user_q, + acct_user_rec_t *user) +{ + return SLURM_SUCCESS; +} + +extern List acct_storage_p_modify_accts(void *db_conn, uint32_t uid, + acct_account_cond_t *acct_q, + acct_account_rec_t *acct) +{ + return SLURM_SUCCESS; +} + +extern List acct_storage_p_modify_clusters(void *db_conn, uint32_t uid, + acct_cluster_cond_t *cluster_q, + acct_cluster_rec_t *cluster) +{ + return SLURM_SUCCESS; +} + +extern List acct_storage_p_modify_associations(void *db_conn, uint32_t uid, + acct_association_cond_t *assoc_q, + acct_association_rec_t *assoc) +{ + return SLURM_SUCCESS; +} + +extern List acct_storage_p_remove_users(void *db_conn, uint32_t uid, + acct_user_cond_t *user_q) +{ + return SLURM_SUCCESS; +} + +extern List acct_storage_p_remove_coord(void *db_conn, uint32_t uid, + char *acct, acct_user_cond_t *user_q) +{ + return SLURM_SUCCESS; +} + +extern List acct_storage_p_remove_accts(void *db_conn, uint32_t uid, + acct_account_cond_t *acct_q) +{ + return SLURM_SUCCESS; +} + +extern List acct_storage_p_remove_clusters(void *db_conn, uint32_t uid, + acct_account_cond_t *cluster_q) +{ + return SLURM_SUCCESS; +} + +extern List acct_storage_p_remove_associations(void *db_conn, uint32_t uid, + acct_association_cond_t *assoc_q) +{ + return SLURM_SUCCESS; +} + +extern List acct_storage_p_get_users(void *db_conn, + acct_user_cond_t *user_q) +{ + return NULL; +} + +extern List acct_storage_p_get_accts(void *db_conn, + acct_account_cond_t *acct_q) +{ + return NULL; +} + +extern List acct_storage_p_get_clusters(void *db_conn, + acct_account_cond_t *cluster_q) +{ + return NULL; +} + +extern List acct_storage_p_get_associations(void *db_conn, + acct_association_cond_t *assoc_q) +{ + return NULL; +} + +extern int acct_storage_p_get_usage(void *db_conn, + acct_association_rec_t *acct_assoc, + time_t start, time_t end) +{ + int rc = SLURM_SUCCESS; + + return rc; +} + +extern int acct_storage_p_roll_usage(void *db_conn, + time_t sent_start) +{ + int rc = SLURM_SUCCESS; + + return rc; +} + +extern int clusteracct_storage_p_node_down(void *db_conn, + char *cluster, + struct node_record *node_ptr, + time_t event_time, char *reason) +{ + return SLURM_SUCCESS; +} +extern int clusteracct_storage_p_node_up(void *db_conn, + char *cluster, + struct node_record *node_ptr, + time_t event_time) +{ + return SLURM_SUCCESS; +} + +extern int clusteracct_storage_p_register_ctld(char *cluster, + uint16_t port) +{ + return SLURM_SUCCESS; +} + +extern int clusteracct_storage_p_cluster_procs(void *db_conn, + char *cluster, + uint32_t procs, + time_t event_time) +{ + return SLURM_SUCCESS; +} + +extern int clusteracct_storage_p_get_usage( + void *db_conn, + acct_cluster_rec_t *cluster_rec, time_t start, time_t end) +{ + + return SLURM_SUCCESS; +} + +/* + * load into the storage the start of a job + */ +extern int jobacct_storage_p_job_start(void *db_conn, + struct job_record *job_ptr) +{ + return SLURM_SUCCESS; +} + +/* + * load into the storage the end of a job + */ +extern int jobacct_storage_p_job_complete(void *db_conn, + struct job_record *job_ptr) +{ + return SLURM_SUCCESS; +} + +/* + * load into the storage the start of a job step + */ +extern int jobacct_storage_p_step_start(void *db_conn, + struct step_record *step_ptr) +{ + return SLURM_SUCCESS; +} + +/* + * load into the storage the end of a job step + */ +extern int jobacct_storage_p_step_complete(void *db_conn, + struct step_record *step_ptr) +{ + return SLURM_SUCCESS; +} + +/* + * load into the storage a suspention of a job + */ +extern int jobacct_storage_p_suspend(void *db_conn, + struct job_record *job_ptr) +{ + return SLURM_SUCCESS; +} + +/* + * get info from the storage + * returns List of job_rec_t * + * note List needs to be freed when called + */ +extern List jobacct_storage_p_get_jobs(void *db_conn, + List selected_steps, + List selected_parts, + void *params) +{ + return NULL; +} + +/* + * expire old info from the storage + */ +extern void jobacct_storage_p_archive(void *db_conn, + List selected_parts, + void *params) +{ + return; +} + +extern int acct_storage_p_update_shares_used(void *db_conn, + List shares_used) +{ + return SLURM_SUCCESS; +} + +extern int acct_storage_p_flush_jobs_on_cluster( + void *db_conn, char *cluster, time_t event_time) +{ + return SLURM_SUCCESS; +} diff --git a/src/plugins/accounting_storage/pgsql/Makefile.am b/src/plugins/accounting_storage/pgsql/Makefile.am new file mode 100644 index 000000000..3c0a2833a --- /dev/null +++ b/src/plugins/accounting_storage/pgsql/Makefile.am @@ -0,0 +1,20 @@ +# Makefile for accounting_storage/pgsql plugin + +AUTOMAKE_OPTIONS = foreign + +PLUGIN_FLAGS = -module -avoid-version --export-dynamic + +INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common + +pkglib_LTLIBRARIES = accounting_storage_pgsql.la + +# Pgsql storage plugin. +accounting_storage_pgsql_la_SOURCES = accounting_storage_pgsql.c \ + pgsql_jobacct_process.c pgsql_jobacct_process.h +accounting_storage_pgsql_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) +accounting_storage_pgsql_la_CFLAGS = $(PGSQL_CFLAGS) +accounting_storage_pgsql_la_LIBADD = \ + $(top_builddir)/src/database/libslurm_pgsql.la $(PGSQL_LIBS) +accounting_storage_pgsql_la_DEPENDENCIES = \ + $(top_builddir)/src/database/libslurm_pgsql.la + diff --git a/src/plugins/accounting_storage/pgsql/Makefile.in b/src/plugins/accounting_storage/pgsql/Makefile.in new file mode 100644 index 000000000..c7c14a438 --- /dev/null +++ b/src/plugins/accounting_storage/pgsql/Makefile.in @@ -0,0 +1,583 @@ +# Makefile.in generated by automake 1.10.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# Makefile for accounting_storage/pgsql plugin + +VPATH = @srcdir@ +pkgdatadir = $(datadir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +target_triplet = @target@ +subdir = src/plugins/accounting_storage/pgsql +DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ + $(top_srcdir)/auxdir/slurm.m4 \ + $(top_srcdir)/auxdir/x_ac__system_configuration.m4 \ + $(top_srcdir)/auxdir/x_ac_affinity.m4 \ + $(top_srcdir)/auxdir/x_ac_aix.m4 \ + $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ + $(top_srcdir)/auxdir/x_ac_debug.m4 \ + $(top_srcdir)/auxdir/x_ac_elan.m4 \ + $(top_srcdir)/auxdir/x_ac_federation.m4 \ + $(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \ + $(top_srcdir)/auxdir/x_ac_gtk.m4 \ + $(top_srcdir)/auxdir/x_ac_munge.m4 \ + $(top_srcdir)/auxdir/x_ac_ncurses.m4 \ + $(top_srcdir)/auxdir/x_ac_pam.m4 \ + $(top_srcdir)/auxdir/x_ac_ptrace.m4 \ + $(top_srcdir)/auxdir/x_ac_readline.m4 \ + $(top_srcdir)/auxdir/x_ac_setpgrp.m4 \ + $(top_srcdir)/auxdir/x_ac_setproctitle.m4 \ + $(top_srcdir)/auxdir/x_ac_sgi_job.m4 \ + $(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \ + $(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h +CONFIG_CLEAN_FILES = +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; +am__installdirs = "$(DESTDIR)$(pkglibdir)" +pkglibLTLIBRARIES_INSTALL = $(INSTALL) +LTLIBRARIES = $(pkglib_LTLIBRARIES) +am__DEPENDENCIES_1 = +am_accounting_storage_pgsql_la_OBJECTS = \ + accounting_storage_pgsql_la-accounting_storage_pgsql.lo \ + accounting_storage_pgsql_la-pgsql_jobacct_process.lo +accounting_storage_pgsql_la_OBJECTS = \ + $(am_accounting_storage_pgsql_la_OBJECTS) +accounting_storage_pgsql_la_LINK = $(LIBTOOL) --tag=CC \ + $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CCLD) \ + $(accounting_storage_pgsql_la_CFLAGS) $(CFLAGS) \ + $(accounting_storage_pgsql_la_LDFLAGS) $(LDFLAGS) -o $@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm +depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp +am__depfiles_maybe = depfiles +COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ + $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ + $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +CCLD = $(CC) +LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ + $(LDFLAGS) -o $@ +SOURCES = $(accounting_storage_pgsql_la_SOURCES) +DIST_SOURCES = $(accounting_storage_pgsql_la_SOURCES) +ETAGS = etags +CTAGS = ctags +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AR = @AR@ +AUTHD_CFLAGS = @AUTHD_CFLAGS@ +AUTHD_LIBS = @AUTHD_LIBS@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +BG_INCLUDES = @BG_INCLUDES@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CMD_LDFLAGS = @CMD_LDFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ +ECHO = @ECHO@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +ELAN_LIBS = @ELAN_LIBS@ +EXEEXT = @EXEEXT@ +F77 = @F77@ +FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@ +FFLAGS = @FFLAGS@ +GREP = @GREP@ +GTK2_CFLAGS = @GTK2_CFLAGS@ +GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ +HAVEPKGCONFIG = @HAVEPKGCONFIG@ +HAVE_AIX = @HAVE_AIX@ +HAVE_ELAN = @HAVE_ELAN@ +HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ +HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIB_LDFLAGS = @LIB_LDFLAGS@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MKDIR_P = @MKDIR_P@ +MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ +MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ +MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ +NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ +NUMA_LIBS = @NUMA_LIBS@ +OBJEXT = @OBJEXT@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PAM_LIBS = @PAM_LIBS@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ +PLPA_LIBS = @PLPA_LIBS@ +PROCTRACKDIR = @PROCTRACKDIR@ +PROJECT = @PROJECT@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +RANLIB = @RANLIB@ +READLINE_LIBS = @READLINE_LIBS@ +RELEASE = @RELEASE@ +SED = @SED@ +SEMAPHORE_LIBS = @SEMAPHORE_LIBS@ +SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ +SLURMD_PORT = @SLURMD_PORT@ +SLURM_API_AGE = @SLURM_API_AGE@ +SLURM_API_CURRENT = @SLURM_API_CURRENT@ +SLURM_API_MAJOR = @SLURM_API_MAJOR@ +SLURM_API_REVISION = @SLURM_API_REVISION@ +SLURM_API_VERSION = @SLURM_API_VERSION@ +SLURM_MAJOR = @SLURM_MAJOR@ +SLURM_MICRO = @SLURM_MICRO@ +SLURM_MINOR = @SLURM_MINOR@ +SLURM_VERSION = @SLURM_VERSION@ +SO_LDFLAGS = @SO_LDFLAGS@ +SSL_CPPFLAGS = @SSL_CPPFLAGS@ +SSL_LDFLAGS = @SSL_LDFLAGS@ +SSL_LIBS = @SSL_LIBS@ +STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ +VERSION = @VERSION@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_F77 = @ac_ct_F77@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target = @target@ +target_alias = @target_alias@ +target_cpu = @target_cpu@ +target_os = @target_os@ +target_vendor = @target_vendor@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +AUTOMAKE_OPTIONS = foreign +PLUGIN_FLAGS = -module -avoid-version --export-dynamic +INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common +pkglib_LTLIBRARIES = accounting_storage_pgsql.la + +# Pgsql storage plugin. +accounting_storage_pgsql_la_SOURCES = accounting_storage_pgsql.c \ + pgsql_jobacct_process.c pgsql_jobacct_process.h + +accounting_storage_pgsql_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) +accounting_storage_pgsql_la_CFLAGS = $(PGSQL_CFLAGS) +accounting_storage_pgsql_la_LIBADD = \ + $(top_builddir)/src/database/libslurm_pgsql.la $(PGSQL_LIBS) + +accounting_storage_pgsql_la_DEPENDENCIES = \ + $(top_builddir)/src/database/libslurm_pgsql.la + +all: all-am + +.SUFFIXES: +.SUFFIXES: .c .lo .o .obj +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ + && exit 0; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/accounting_storage/pgsql/Makefile'; \ + cd $(top_srcdir) && \ + $(AUTOMAKE) --foreign src/plugins/accounting_storage/pgsql/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) + @$(NORMAL_INSTALL) + test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" + @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ + if test -f $$p; then \ + f=$(am__strip_dir) \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + else :; fi; \ + done + +uninstall-pkglibLTLIBRARIES: + @$(NORMAL_UNINSTALL) + @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ + p=$(am__strip_dir) \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + done + +clean-pkglibLTLIBRARIES: + -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) + @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ + dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ + test "$$dir" != "$$p" || dir=.; \ + echo "rm -f \"$${dir}/so_locations\""; \ + rm -f "$${dir}/so_locations"; \ + done +accounting_storage_pgsql.la: $(accounting_storage_pgsql_la_OBJECTS) $(accounting_storage_pgsql_la_DEPENDENCIES) + $(accounting_storage_pgsql_la_LINK) -rpath $(pkglibdir) $(accounting_storage_pgsql_la_OBJECTS) $(accounting_storage_pgsql_la_LIBADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + +distclean-compile: + -rm -f *.tab.c + +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/accounting_storage_pgsql_la-accounting_storage_pgsql.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/accounting_storage_pgsql_la-pgsql_jobacct_process.Plo@am__quote@ + +.c.o: +@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c $< + +.c.obj: +@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'` + +.c.lo: +@am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $< + +accounting_storage_pgsql_la-accounting_storage_pgsql.lo: accounting_storage_pgsql.c +@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(accounting_storage_pgsql_la_CFLAGS) $(CFLAGS) -MT accounting_storage_pgsql_la-accounting_storage_pgsql.lo -MD -MP -MF $(DEPDIR)/accounting_storage_pgsql_la-accounting_storage_pgsql.Tpo -c -o accounting_storage_pgsql_la-accounting_storage_pgsql.lo `test -f 'accounting_storage_pgsql.c' || echo '$(srcdir)/'`accounting_storage_pgsql.c +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/accounting_storage_pgsql_la-accounting_storage_pgsql.Tpo $(DEPDIR)/accounting_storage_pgsql_la-accounting_storage_pgsql.Plo +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='accounting_storage_pgsql.c' object='accounting_storage_pgsql_la-accounting_storage_pgsql.lo' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(accounting_storage_pgsql_la_CFLAGS) $(CFLAGS) -c -o accounting_storage_pgsql_la-accounting_storage_pgsql.lo `test -f 'accounting_storage_pgsql.c' || echo '$(srcdir)/'`accounting_storage_pgsql.c + +accounting_storage_pgsql_la-pgsql_jobacct_process.lo: pgsql_jobacct_process.c +@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(accounting_storage_pgsql_la_CFLAGS) $(CFLAGS) -MT accounting_storage_pgsql_la-pgsql_jobacct_process.lo -MD -MP -MF $(DEPDIR)/accounting_storage_pgsql_la-pgsql_jobacct_process.Tpo -c -o accounting_storage_pgsql_la-pgsql_jobacct_process.lo `test -f 'pgsql_jobacct_process.c' || echo '$(srcdir)/'`pgsql_jobacct_process.c +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/accounting_storage_pgsql_la-pgsql_jobacct_process.Tpo $(DEPDIR)/accounting_storage_pgsql_la-pgsql_jobacct_process.Plo +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='pgsql_jobacct_process.c' object='accounting_storage_pgsql_la-pgsql_jobacct_process.lo' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(accounting_storage_pgsql_la_CFLAGS) $(CFLAGS) -c -o accounting_storage_pgsql_la-pgsql_jobacct_process.lo `test -f 'pgsql_jobacct_process.c' || echo '$(srcdir)/'`pgsql_jobacct_process.c + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + mkid -fID $$unique +tags: TAGS + +TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + here=`pwd`; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$tags $$unique; \ + fi +ctags: CTAGS +CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + test -z "$(CTAGS_ARGS)$$tags$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$tags $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && cd $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) $$here + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ + fi; \ + cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ + else \ + test -f $(distdir)/$$file \ + || cp -p $$d/$$file $(distdir)/$$file \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(LTLIBRARIES) +installdirs: + for dir in "$(DESTDIR)$(pkglibdir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + `test -z '$(STRIP)' || \ + echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ + mostlyclean-am + +distclean: distclean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-exec-am: install-pkglibLTLIBRARIES + +install-html: install-html-am + +install-info: install-info-am + +install-man: + +install-pdf: install-pdf-am + +install-ps: install-ps-am + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-pkglibLTLIBRARIES + +.MAKE: install-am install-strip + +.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ + clean-libtool clean-pkglibLTLIBRARIES ctags distclean \ + distclean-compile distclean-generic distclean-libtool \ + distclean-tags distdir dvi dvi-am html html-am info info-am \ + install install-am install-data install-data-am install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-pdf install-pdf-am install-pkglibLTLIBRARIES \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-compile \ + mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ + tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/src/plugins/accounting_storage/pgsql/accounting_storage_pgsql.c b/src/plugins/accounting_storage/pgsql/accounting_storage_pgsql.c new file mode 100644 index 000000000..0219e9517 --- /dev/null +++ b/src/plugins/accounting_storage/pgsql/accounting_storage_pgsql.c @@ -0,0 +1,1566 @@ +/*****************************************************************************\ + * accounting_storage_pgsql.c - accounting interface to pgsql. + * + * $Id: accounting_storage_pgsql.c 13061 2008-01-22 21:23:56Z da $ + ***************************************************************************** + * Copyright (C) 2004-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#include <strings.h> +#include "pgsql_jobacct_process.h" +#include "src/common/slurmdbd_defs.h" + +/* + * These variables are required by the generic plugin interface. If they + * are not found in the plugin, the plugin loader will ignore it. + * + * plugin_name - a string giving a human-readable description of the + * plugin. There is no maximum length, but the symbol must refer to + * a valid string. + * + * plugin_type - a string suggesting the type of the plugin or its + * applicability to a particular form of data or method of data handling. + * If the low-level plugin API is used, the contents of this string are + * unimportant and may be anything. SLURM uses the higher-level plugin + * interface which requires this string to be of the form + * + * <application>/<method> + * + * where <application> is a description of the intended application of + * the plugin (e.g., "jobacct" for SLURM job completion logging) and <method> + * is a description of how this plugin satisfies that application. SLURM will + * only load job completion logging plugins if the plugin_type string has a + * prefix of "jobacct/". + * + * plugin_version - an unsigned 32-bit integer giving the version number + * of the plugin. If major and minor revisions are desired, the major + * version number may be multiplied by a suitable magnitude constant such + * as 100 or 1000. Various SLURM versions will likely require a certain + * minimum versions for their plugins as the job accounting API + * matures. + */ +const char plugin_name[] = "Accounting storage PGSQL plugin"; +const char plugin_type[] = "accounting_storage/pgsql"; +const uint32_t plugin_version = 100; +#ifndef HAVE_PGSQL +typedef void PGconn; +#else +#define DEFAULT_ACCT_DB "slurm_acct_db" + +static pgsql_db_info_t *pgsql_db_info = NULL; +static char *pgsql_db_name = NULL; + +char *acct_coord_table = "acct_coord_table"; +char *acct_table = "acct_table"; +char *assoc_day_table = "assoc_day_usage_table"; +char *assoc_hour_table = "assoc_hour_usage_table"; +char *assoc_month_table = "assoc_month_usage_table"; +char *assoc_table = "assoc_table"; +char *cluster_day_table = "cluster_day_usage_table"; +char *cluster_hour_table = "cluster_hour_usage_table"; +char *cluster_month_table = "cluster_month_usage_table"; +char *cluster_table = "cluster_table"; +char *event_table = "cluster_event_table"; +char *job_table = "job_table"; +char *step_table = "step_table"; +char *txn_table = "txn_table"; +char *user_table = "user_table"; +char *last_ran_table = "last_ran_table"; +char *suspend_table = "suspend_table"; + +static int _get_db_index(PGconn *acct_pgsql_db, + time_t submit, uint32_t jobid, uint32_t associd) +{ + PGresult *result = NULL; + int db_index = -1; + char *query = xstrdup_printf("select id from %s where " + "submit=%u and jobid=%u and associd=%u", + job_table, (int)submit, jobid, associd); + + if(!(result = pgsql_db_query_ret(acct_pgsql_db, query))) { + xfree(query); + return -1; + } + + xfree(query); + + if(!PQntuples(result)) { + PQclear(result); + error("We can't get a db_index for this combo, " + "submit=%u and jobid=%u and associd=%u.", + (int)submit, jobid, associd); + return -1; + } + db_index = atoi(PQgetvalue(result, 0, 0)); + PQclear(result); + + return db_index; +} + + +static pgsql_db_info_t *_pgsql_acct_create_db_info() +{ + pgsql_db_info_t *db_info = xmalloc(sizeof(pgsql_db_info_t)); + db_info->port = slurm_get_accounting_storage_port(); + /* it turns out it is better if using defaults to let postgres + handle them on it's own terms */ + if(!db_info->port) + db_info->port = 5432; + db_info->host = slurm_get_accounting_storage_host(); + if(!db_info->host) + db_info->host = xstrdup("localhost"); + db_info->user = slurm_get_accounting_storage_user(); + db_info->pass = slurm_get_accounting_storage_pass(); + return db_info; +} + +static int _pgsql_acct_check_tables(PGconn *acct_pgsql_db, + char *user) +{ + storage_field_t acct_coord_table_fields[] = { + { "deleted", "smallint default 0" }, + { "acct", "text not null" }, + { "user_name", "text not null" }, + { NULL, NULL} + }; + + storage_field_t acct_table_fields[] = { + { "creation_time", "bigint not null" }, + { "mod_time", "bigint default 0" }, + { "deleted", "smallint default 0" }, + { "name", "text not null" }, + { "description", "text not null" }, + { "organization", "text not null" }, + { "qos", "smallint default 1 not null" }, + { NULL, NULL} + }; + + storage_field_t assoc_table_fields[] = { + { "creation_time", "bigint not null" }, + { "mod_time", "bigint default 0" }, + { "deleted", "smallint default 0" }, + { "id", "serial" }, + { "user_name", "text not null default ''" }, + { "acct", "text not null" }, + { "cluster", "text not null" }, + { "partition", "text not null default ''" }, + { "parent", "int not null" }, + { "lft", "int not null" }, + { "rgt", "int not null" }, + { "fairshare", "int default 1 not null" }, + { "max_jobs", "int default NULL" }, + { "max_nodes_per_job", "int default NULL" }, + { "max_wall_duration_per_job", "int default NULL" }, + { "max_cpu_seconds_per_job", "int default NULL" }, + { NULL, NULL} + }; + + storage_field_t assoc_usage_table_fields[] = { + { "creation_time", "bigint not null" }, + { "mod_time", "bigint default 0" }, + { "deleted", "smallint default 0" }, + { "associd", "int not null" }, + { "period_start", "bigint not null" }, + { "alloc_cpu_secs", "bigint default 0" }, + { NULL, NULL} + }; + + storage_field_t cluster_table_fields[] = { + { "creation_time", "bigint not null" }, + { "mod_time", "bigint default 0" }, + { "deleted", "smallint default 0" }, + { "name", "text not null" }, + { "control_host", "text not null" }, + { "control_port", "int not null" }, + { NULL, NULL} + }; + + storage_field_t cluster_usage_table_fields[] = { + { "creation_time", "bigint not null" }, + { "mod_time", "bigint default 0" }, + { "deleted", "smallint default 0" }, + { "cluster", "text not null" }, + { "period_start", "bigint not null" }, + { "cpu_count", "bigint default 0" }, + { "alloc_cpu_secs", "bigint default 0" }, + { "down_cpu_secs", "bigint default 0" }, + { "idle_cpu_secs", "bigint default 0" }, + { "resv_cpu_secs", "bigint default 0" }, + { "over_cpu_secs", "bigint default 0" }, + { NULL, NULL} + }; + + storage_field_t event_table_fields[] = { + { "node_name", "text default '' not null" }, + { "cluster", "text not null" }, + { "cpu_count", "int not null" }, + { "period_start", "bigint not null" }, + { "period_end", "bigint default 0 not null" }, + { "reason", "text not null" }, + { NULL, NULL} + }; + + storage_field_t job_table_fields[] = { + { "id", "serial" }, + { "jobid", "integer not null" }, + { "associd", "bigint not null" }, + { "uid", "smallint not null" }, + { "gid", "smallint not null" }, + { "partition", "text not null" }, + { "blockid", "text" }, + { "account", "text" }, + { "submit", "bigint not null" }, + { "eligible", "bigint default 0 not null" }, + { "start", "bigint default 0 not null" }, + { "endtime", "bigint default 0 not null" }, + { "suspended", "bigint default 0 not null" }, + { "name", "text not null" }, + { "track_steps", "smallint not null" }, + { "state", "smallint not null" }, + { "comp_code", "int default 0 not null" }, + { "priority", "bigint not null" }, + { "req_cpus", "int not null" }, + { "alloc_cpus", "int not null" }, + { "nodelist", "text" }, + { "kill_requid", "smallint default -1 not null" }, + { "qos", "smallint default 0" }, + { NULL, NULL} + }; + + storage_field_t last_ran_table_fields[] = { + { "hourly_rollup", "bigint default 0 not null" }, + { "daily_rollup", "bigint default 0 not null" }, + { "monthly_rollup", "bigint default 0 not null" }, + { NULL, NULL} + }; + + storage_field_t step_table_fields[] = { + { "id", "int not null" }, + { "stepid", "smallint not null" }, + { "start", "bigint default 0 not null" }, + { "endtime", "bigint default 0 not null" }, + { "suspended", "bigint default 0 not null" }, + { "name", "text not null" }, + { "nodelist", "text not null" }, + { "state", "smallint not null" }, + { "kill_requid", "smallint default -1 not null" }, + { "comp_code", "int default 0 not null" }, + { "cpus", "int not null" }, + { "user_sec", "bigint default 0 not null" }, + { "user_usec", "bigint default 0 not null" }, + { "sys_sec", "bigint default 0 not null" }, + { "sys_usec", "bigint default 0 not null" }, + { "max_vsize", "integer default 0 not null" }, + { "max_vsize_task", "smallint default 0 not null" }, + { "max_vsize_node", "integer default 0 not null" }, + { "ave_vsize", "float default 0.0 not null" }, + { "max_rss", "integer default 0 not null" }, + { "max_rss_task", "smallint default 0 not null" }, + { "max_rss_node", "integer default 0 not null" }, + { "ave_rss", "float default 0.0 not null" }, + { "max_pages", "integer default 0 not null" }, + { "max_pages_task", "smallint default 0 not null" }, + { "max_pages_node", "integer default 0 not null" }, + { "ave_pages", "float default 0.0 not null" }, + { "min_cpu", "integer default 0 not null" }, + { "min_cpu_task", "smallint default 0 not null" }, + { "min_cpu_node", "integer default 0 not null" }, + { "ave_cpu", "float default 0.0 not null" }, + { NULL, NULL} + }; + + storage_field_t suspend_table_fields[] = { + { "id", "int not null" }, + { "associd", "bigint not null" }, + { "start", "bigint default 0 not null" }, + { "endtime", "bigint default 0 not null" }, + { NULL, NULL} + }; + + storage_field_t txn_table_fields[] = { + { "id", "serial" }, + { "timestamp", "bigint default 0" }, + { "action", "smallint not null" }, + { "name", "text not null" }, + { "actor", "text not null" }, + { "info", "text not null" }, + { NULL, NULL} + }; + + storage_field_t user_table_fields[] = { + { "creation_time", "bigint not null" }, + { "mod_time", "bigint default 0" }, + { "deleted", "smallint default 0" }, + { "name", "text not null" }, + { "default_acct", "text not null" }, + { "qos", "smallint default 1 not null" }, + { "admin_level", "smallint default 1 not null" }, + { NULL, NULL} + }; + + int i = 0, job_found = 0; + int step_found = 0, txn_found = 0, event_found = 0; + int user_found = 0, acct_found = 0, acct_coord_found = 0; + int cluster_found = 0, cluster_hour_found = 0, + cluster_day_found = 0, cluster_month_found = 0; + int assoc_found = 0, assoc_hour_found = 0, + assoc_day_found = 0, assoc_month_found = 0; + int suspend_found = 0, last_ran_found = 0; + + PGresult *result = NULL; + char *query = xstrdup_printf("select tablename from pg_tables " + "where tableowner='%s' " + "and tablename !~ '^pg_+'", user); + + if(!(result = + pgsql_db_query_ret(acct_pgsql_db, query))) { + xfree(query); + return SLURM_ERROR; + } + xfree(query); + + for (i = 0; i < PQntuples(result); i++) { + if(!acct_coord_found && + !strcmp(acct_coord_table, PQgetvalue(result, i, 0))) + acct_coord_found = 1; + else if(!acct_found && + !strcmp(acct_table, PQgetvalue(result, i, 0))) + acct_found = 1; + else if(!assoc_found && + !strcmp(assoc_table, PQgetvalue(result, i, 0))) + assoc_found = 1; + else if(!assoc_day_found && + !strcmp(assoc_day_table, PQgetvalue(result, i, 0))) + assoc_day_found = 1; + else if(!assoc_hour_found && + !strcmp(assoc_hour_table, PQgetvalue(result, i, 0))) + assoc_hour_found = 1; + else if(!assoc_month_found && + !strcmp(assoc_month_table, PQgetvalue(result, i, 0))) + assoc_month_found = 1; + else if(!cluster_found && + !strcmp(cluster_table, PQgetvalue(result, i, 0))) + cluster_found = 1; + else if(!cluster_day_found && + !strcmp(cluster_day_table, PQgetvalue(result, i, 0))) + cluster_day_found = 1; + else if(!cluster_hour_found && + !strcmp(cluster_hour_table, PQgetvalue(result, i, 0))) + cluster_hour_found = 1; + else if(!cluster_month_found && + !strcmp(cluster_month_table, PQgetvalue(result, i, 0))) + cluster_month_found = 1; + else if(!event_found && + !strcmp(event_table, PQgetvalue(result, i, 0))) + event_found = 1; + else if(!job_found && + !strcmp(job_table, PQgetvalue(result, i, 0))) + job_found = 1; + else if(!last_ran_found && + !strcmp(last_ran_table, PQgetvalue(result, i, 0))) + last_ran_found = 1; + else if(!step_found && + !strcmp(step_table, PQgetvalue(result, i, 0))) + step_found = 1; + else if(!suspend_found && + !strcmp(suspend_table, PQgetvalue(result, i, 0))) + suspend_found = 1; + else if(!txn_found && + !strcmp(txn_table, PQgetvalue(result, i, 0))) + txn_found = 1; + else if(!user_found && + !strcmp(user_table, PQgetvalue(result, i, 0))) + user_found = 1; + } + PQclear(result); + + if(!acct_coord_found) { + if(pgsql_db_create_table(acct_pgsql_db, + acct_coord_table, + acct_coord_table_fields, + ", unique (acct, user_name))") + == SLURM_ERROR) + return SLURM_ERROR; + } else { + if(pgsql_db_make_table_current(acct_pgsql_db, + acct_coord_table, + acct_coord_table_fields)) + return SLURM_ERROR; + } + + if(!acct_found) { + if(pgsql_db_create_table(acct_pgsql_db, + acct_table, acct_table_fields, + ", unique (name))") + == SLURM_ERROR) + return SLURM_ERROR; + } else { + if(pgsql_db_make_table_current(acct_pgsql_db, + acct_table, + acct_table_fields)) + return SLURM_ERROR; + } + + if(!assoc_day_found) { + if(pgsql_db_create_table( + acct_pgsql_db, + assoc_day_table, + assoc_usage_table_fields, + ", unique (associd, period_start))") + == SLURM_ERROR) + return SLURM_ERROR; + } else { + if(pgsql_db_make_table_current(acct_pgsql_db, + assoc_day_table, + assoc_usage_table_fields)) + return SLURM_ERROR; + } + + if(!assoc_hour_found) { + if(pgsql_db_create_table( + acct_pgsql_db, + assoc_hour_table, + assoc_usage_table_fields, + ", unique (associd, period_start))") + == SLURM_ERROR) + return SLURM_ERROR; + } else { + if(pgsql_db_make_table_current(acct_pgsql_db, + assoc_hour_table, + assoc_usage_table_fields)) + return SLURM_ERROR; + } + + if(!assoc_month_found) { + if(pgsql_db_create_table( + acct_pgsql_db, + assoc_month_table, + assoc_usage_table_fields, + ", unique (associd, period_start))") + == SLURM_ERROR) + return SLURM_ERROR; + } else { + if(pgsql_db_make_table_current(acct_pgsql_db, + assoc_month_table, + assoc_usage_table_fields)) + return SLURM_ERROR; + } + + if(!assoc_found) { + if(pgsql_db_create_table( + acct_pgsql_db, + assoc_table, assoc_table_fields, + ", unique (user_name, acct, cluster, partition))") + == SLURM_ERROR) + return SLURM_ERROR; + } else { + if(pgsql_db_make_table_current(acct_pgsql_db, + assoc_table, + assoc_table_fields)) + return SLURM_ERROR; + } + + if(!cluster_day_found) { + if(pgsql_db_create_table( + acct_pgsql_db, + cluster_day_table, + cluster_usage_table_fields, + ", unique (cluster, period_start))") + == SLURM_ERROR) + return SLURM_ERROR; + } else { + if(pgsql_db_make_table_current(acct_pgsql_db, + cluster_day_table, + cluster_usage_table_fields)) + return SLURM_ERROR; + } + + if(!cluster_hour_found) { + if(pgsql_db_create_table( + acct_pgsql_db, + cluster_hour_table, + cluster_usage_table_fields, + ", unique (cluster, period_start))") + == SLURM_ERROR) + return SLURM_ERROR; + } else { + if(pgsql_db_make_table_current(acct_pgsql_db, + cluster_hour_table, + cluster_usage_table_fields)) + return SLURM_ERROR; + } + + if(!cluster_month_found) { + if(pgsql_db_create_table( + acct_pgsql_db, + cluster_month_table, + cluster_usage_table_fields, + ", unique (cluster, period_start))") + == SLURM_ERROR) + return SLURM_ERROR; + } else { + if(pgsql_db_make_table_current(acct_pgsql_db, + cluster_month_table, + cluster_usage_table_fields)) + return SLURM_ERROR; + } + + if(!cluster_found) { + if(pgsql_db_create_table(acct_pgsql_db, + cluster_table, cluster_table_fields, + ", unique (name))") + == SLURM_ERROR) + return SLURM_ERROR; + } else { + if(pgsql_db_make_table_current(acct_pgsql_db, + cluster_table, + cluster_table_fields)) + return SLURM_ERROR; + } + + if(!event_found) { + if(pgsql_db_create_table(acct_pgsql_db, + event_table, event_table_fields, + ", unique (node_name, " + "cluster, period_start))") + == SLURM_ERROR) + return SLURM_ERROR; + } else { + if(pgsql_db_make_table_current(acct_pgsql_db, + event_table, + event_table_fields)) + return SLURM_ERROR; + } + + if(!job_found) { + if(pgsql_db_create_table(acct_pgsql_db, + job_table, job_table_fields, + ", unique (jobid, associd, submit))") + == SLURM_ERROR) + return SLURM_ERROR; + } else { + if(pgsql_db_make_table_current(acct_pgsql_db, + job_table, + job_table_fields)) + return SLURM_ERROR; + } + + if(!last_ran_found) { + if(pgsql_db_create_table(acct_pgsql_db, + last_ran_table, last_ran_table_fields, + ")") + == SLURM_ERROR) + return SLURM_ERROR; + } else { + if(pgsql_db_make_table_current(acct_pgsql_db, + last_ran_table, + last_ran_table_fields)) + return SLURM_ERROR; + } + + if(!step_found) { + if(pgsql_db_create_table(acct_pgsql_db, + step_table, step_table_fields, + ", unique (id, stepid))") + == SLURM_ERROR) + return SLURM_ERROR; + + } else { + if(pgsql_db_make_table_current(acct_pgsql_db, + step_table, + step_table_fields)) + return SLURM_ERROR; + } + + if(!suspend_found) { + if(pgsql_db_create_table(acct_pgsql_db, + suspend_table, suspend_table_fields, + ")") + == SLURM_ERROR) + return SLURM_ERROR; + + } else { + if(pgsql_db_make_table_current(acct_pgsql_db, + suspend_table, + suspend_table_fields)) + return SLURM_ERROR; + } + + if(!txn_found) { + if(pgsql_db_create_table(acct_pgsql_db, + txn_table, txn_table_fields, + ", unique (id))") + == SLURM_ERROR) + return SLURM_ERROR; + } else { + if(pgsql_db_make_table_current(acct_pgsql_db, + txn_table, + txn_table_fields)) + return SLURM_ERROR; + } + + if(!user_found) { + if(pgsql_db_create_table(acct_pgsql_db, + user_table, user_table_fields, + ", unique (name))") + == SLURM_ERROR) + return SLURM_ERROR; + } else { + if(pgsql_db_make_table_current(acct_pgsql_db, + user_table, + user_table_fields)) + return SLURM_ERROR; + } + + return SLURM_SUCCESS; +} +#endif + +/* + * init() is called when the plugin is loaded, before any other functions + * are called. Put global initialization here. + */ +extern int init ( void ) +{ + static int first = 1; + int rc = SLURM_SUCCESS; +#ifdef HAVE_PGSQL + PGconn *acct_pgsql_db = NULL; + char *location = NULL; +#else + fatal("No Postgres database was found on the machine. " + "Please check the configure log and run again."); +#endif + /* since this can be loaded from many different places + only tell us once. */ + if(!first) + return SLURM_SUCCESS; + + first = 0; + +#ifdef HAVE_PGSQL + pgsql_db_info = _pgsql_acct_create_db_info(); + + location = slurm_get_accounting_storage_loc(); + if(!location) + pgsql_db_name = xstrdup(DEFAULT_ACCT_DB); + else { + int i = 0; + while(location[i]) { + if(location[i] == '.' || location[i] == '/') { + debug("%s doesn't look like a database " + "name using %s", + location, DEFAULT_ACCT_DB); + break; + } + i++; + } + if(location[i]) { + pgsql_db_name = xstrdup(DEFAULT_ACCT_DB); + xfree(location); + } else + pgsql_db_name = location; + } + + debug2("pgsql_connect() called for db %s", pgsql_db_name); + + pgsql_get_db_connection(&acct_pgsql_db, pgsql_db_name, pgsql_db_info); + + rc = _pgsql_acct_check_tables(acct_pgsql_db, pgsql_db_info->user); + pgsql_close_db_connection(&acct_pgsql_db); +#endif + /* since this can be loaded from many different places + only tell us once. */ + if(rc == SLURM_SUCCESS) + verbose("%s loaded", plugin_name); + else + verbose("%s failed", plugin_name); + + return rc; +} + +extern int fini ( void ) +{ +#ifdef HAVE_PGSQL + destroy_pgsql_db_info(pgsql_db_info); + xfree(pgsql_db_name); + return SLURM_SUCCESS; +#else + return SLURM_ERROR; +#endif +} + +extern void *acct_storage_p_get_connection(bool make_agent, bool rollback) +{ +#ifdef HAVE_PGSQL + PGconn *acct_pgsql_db = NULL; + + if(!pgsql_db_info) + init(); + + debug2("acct_storage_p_get_connection: request new connection"); + + pgsql_get_db_connection(&acct_pgsql_db, pgsql_db_name, pgsql_db_info); + + return (void *)acct_pgsql_db; +#else + return NULL; +#endif +} + +extern int acct_storage_p_close_connection(PGconn **acct_pgsql_db) +{ +#ifdef HAVE_PGSQL + if(acct_pgsql_db && *acct_pgsql_db) + pgsql_close_db_connection(acct_pgsql_db); + + return SLURM_SUCCESS; +#else + return SLURM_ERROR; +#endif +} + +extern int acct_storage_p_commit(void *db_conn, bool commit) +{ + return SLURM_SUCCESS; +} + +extern int acct_storage_p_add_users(PGconn *acct_pgsql_db, uint32_t uid, + List user_list) +{ + return SLURM_SUCCESS; +} + +extern int acct_storage_p_add_coord(PGconn *acct_pgsql_db, uint32_t uid, + char *acct, acct_user_cond_t *user_q) +{ + return SLURM_SUCCESS; +} + +extern int acct_storage_p_add_accts(PGconn *acct_pgsql_db, uint32_t uid, + List acct_list) +{ + return SLURM_SUCCESS; +} + +extern int acct_storage_p_add_clusters(PGconn *acct_pgsql_db, uint32_t uid, + List cluster_list) +{ + return SLURM_SUCCESS; +} + +extern int acct_storage_p_add_associations(PGconn *acct_pgsql_db, uint32_t uid, + List association_list) +{ + return SLURM_SUCCESS; +} + +extern List acct_storage_p_modify_users(PGconn *acct_pgsql_db, uint32_t uid, + acct_user_cond_t *user_q, + acct_user_rec_t *user) +{ + return SLURM_SUCCESS; +} + +extern List acct_storage_p_modify_accts(PGconn *acct_pgsql_db, uint32_t uid, + acct_account_cond_t *acct_q, + acct_account_rec_t *acct) +{ + return SLURM_SUCCESS; +} + +extern List acct_storage_p_modify_clusters(PGconn *acct_pgsql_db, uint32_t uid, + acct_cluster_cond_t *cluster_q, + acct_cluster_rec_t *cluster) +{ + return SLURM_SUCCESS; +} + +extern List acct_storage_p_modify_associations(PGconn *acct_pgsql_db, + uint32_t uid, + acct_association_cond_t *assoc_q, + acct_association_rec_t *assoc) +{ + return SLURM_SUCCESS; +} + +extern List acct_storage_p_remove_users(PGconn *acct_pgsql_db, uint32_t uid, + acct_user_cond_t *user_q) +{ + return SLURM_SUCCESS; +} + +extern List acct_storage_p_remove_coord(PGconn *acct_pgsql_db, uint32_t uid, + char *acct, acct_user_cond_t *user_q) +{ + return SLURM_SUCCESS; +} + +extern List acct_storage_p_remove_accts(PGconn *acct_pgsql_db, uint32_t uid, + acct_account_cond_t *acct_q) +{ + return SLURM_SUCCESS; +} + +extern List acct_storage_p_remove_clusters(PGconn *acct_pgsql_db, uint32_t uid, + acct_account_cond_t *cluster_q) +{ + return SLURM_SUCCESS; +} + +extern List acct_storage_p_remove_associations(PGconn *acct_pgsql_db, + uint32_t uid, + acct_association_cond_t *assoc_q) +{ + return SLURM_SUCCESS; +} + +extern List acct_storage_p_get_users(PGconn *acct_pgsql_db, + acct_user_cond_t *user_q) +{ + return NULL; +} + +extern List acct_storage_p_get_accts(PGconn *acct_pgsql_db, + acct_account_cond_t *acct_q) +{ + return NULL; +} + +extern List acct_storage_p_get_clusters(PGconn *acct_pgsql_db, + acct_account_cond_t *cluster_q) +{ + return NULL; +} + +extern List acct_storage_p_get_associations(PGconn *acct_pgsql_db, + acct_association_cond_t *assoc_q) +{ + return NULL; +} + +extern int acct_storage_p_get_usage(PGconn *acct_pgsql_db, + acct_association_rec_t *acct_assoc, + time_t start, time_t end) +{ + int rc = SLURM_SUCCESS; + + return rc; +} + +extern int acct_storage_p_roll_usage(PGconn *acct_pgsql_db, + time_t sent_start) +{ + int rc = SLURM_SUCCESS; + + return rc; +} + +extern int clusteracct_storage_p_node_down(PGconn *acct_pgsql_db, + char *cluster, + struct node_record *node_ptr, + time_t event_time, char *reason) +{ +#ifdef HAVE_PGSQL + uint16_t cpus; + int rc = SLURM_ERROR; + char *query = NULL; + char *my_reason; + + if (slurmctld_conf.fast_schedule && !slurmdbd_conf) + cpus = node_ptr->config_ptr->cpus; + else + cpus = node_ptr->cpus; + + if (reason) + my_reason = reason; + else + my_reason = node_ptr->reason; + + query = xstrdup_printf( + "update %s set period_end=%d where cluster='%s' " + "and period_end=0 and node_name='%s'", + event_table, (event_time-1), cluster, node_ptr->name); + rc = pgsql_db_query(acct_pgsql_db, query); + xfree(query); + + debug2("inserting %s(%s) with %u cpus", node_ptr->name, cluster, cpus); + + query = xstrdup_printf( + "insert into %s " + "(node_name, cluster, cpu_count, period_start, reason) " + "values ('%s', '%s', %u, %d, '%s')", + event_table, node_ptr->name, cluster, + cpus, event_time, my_reason); + rc = pgsql_db_query(acct_pgsql_db, query); + xfree(query); + + return SLURM_SUCCESS; +#else + return SLURM_ERROR; +#endif +} +extern int clusteracct_storage_p_node_up(PGconn *acct_pgsql_db, + char *cluster, + struct node_record *node_ptr, + time_t event_time) +{ +#ifdef HAVE_PGSQL + char* query; + int rc = SLURM_ERROR; + + query = xstrdup_printf( + "update %s set period_end=%d where cluster='%s' " + "and period_end=0 and node_name='%s'", + event_table, (event_time-1), cluster, node_ptr->name); + rc = pgsql_db_query(acct_pgsql_db, query); + xfree(query); + return rc; +#else + return SLURM_ERROR; +#endif +} + +extern int clusteracct_storage_p_register_ctld(char *cluster, + uint16_t port) +{ + return SLURM_SUCCESS; +} + +extern int clusteracct_storage_p_cluster_procs(PGconn *acct_pgsql_db, + char *cluster, + uint32_t procs, + time_t event_time) +{ +#ifdef HAVE_PGSQL + static uint32_t last_procs = -1; + char* query; + int rc = SLURM_SUCCESS; + PGresult *result = NULL; + int got_procs = 0; + + if (procs == last_procs) { + debug3("we have the same procs as before no need to " + "update the database."); + return SLURM_SUCCESS; + } + last_procs = procs; + + /* Record the processor count */ +#if _DEBUG + slurm_make_time_str(&event_time, tmp_buff, sizeof(tmp_buff)); + info("cluster_acct_procs: %s has %u total CPUs at %s", + cluster, procs, tmp_buff); +#endif + query = xstrdup_printf( + "select cpu_count from %s where cluster='%s' " + "and period_end=0 and node_name=''", + event_table, cluster); + if(!(result = pgsql_db_query_ret(acct_pgsql_db, query))) { + xfree(query); + return SLURM_ERROR; + } + xfree(query); + + /* we only are checking the first one here */ + if(!PQntuples(result)) { + debug("We don't have an entry for this machine %s " + "most likely a first time running.", cluster); + goto add_it; + } + got_procs = atoi(PQgetvalue(result, 0, 0)); + if(got_procs == procs) { + debug("%s hasn't changed since last entry", cluster); + goto end_it; + } + debug("%s has changed from %d cpus to %u", cluster, got_procs, procs); + + query = xstrdup_printf( + "update %s set period_end=%u where cluster='%s' " + "and period_end=0 and node_name=''", + event_table, (event_time-1), cluster); + rc = pgsql_db_query(acct_pgsql_db, query); + xfree(query); + if(rc != SLURM_SUCCESS) + goto end_it; +add_it: + query = xstrdup_printf( + "insert into %s (cluster, cpu_count, period_start) " + "values ('%s', %u, %d)", + event_table, cluster, procs, event_time); + rc = pgsql_db_query(acct_pgsql_db, query); + xfree(query); + +end_it: + PQclear(result); + return rc; +#else + return SLURM_ERROR; +#endif +} + +extern int clusteracct_storage_p_get_usage( + void *db_conn, + acct_cluster_rec_t *cluster_rec, time_t start, time_t end) +{ + + return SLURM_SUCCESS; +} + +/* + * load into the storage the start of a job + */ +extern int jobacct_storage_p_job_start(PGconn *acct_pgsql_db, + struct job_record *job_ptr) +{ +#ifdef HAVE_PGSQL + int rc=SLURM_SUCCESS; + char *jname, *nodes; + long priority; + int track_steps = 0; + char *block_id = NULL; + char *query = NULL; + int reinit = 0; + + if (!job_ptr->details || !job_ptr->details->submit_time) { + error("jobacct_storage_p_job_start: " + "Not inputing this job, it has no submit time."); + return SLURM_ERROR; + } + + if(!acct_pgsql_db || PQstatus(acct_pgsql_db) != CONNECTION_OK) { + if(!pgsql_get_db_connection(&acct_pgsql_db, + pgsql_db_name, pgsql_db_info)) + return SLURM_ERROR; + } + + debug3("pgsql_jobacct_job_start() called"); + priority = (job_ptr->priority == NO_VAL) ? + -1L : (long) job_ptr->priority; + + if (job_ptr->name && job_ptr->name[0]) { + int i; + jname = xmalloc(strlen(job_ptr->name) + 1); + for (i=0; job_ptr->name[i]; i++) { + if (isalnum(job_ptr->name[i])) + jname[i] = job_ptr->name[i]; + else + jname[i] = '_'; + } + } else { + jname = xstrdup("allocation"); + track_steps = 1; + } + + + if (job_ptr->nodes && job_ptr->nodes[0]) + nodes = job_ptr->nodes; + else + nodes = "(null)"; + + if(job_ptr->batch_flag) + track_steps = 1; + + if(slurmdbd_conf) { + block_id = xstrdup(job_ptr->comment); + } else { + select_g_get_jobinfo(job_ptr->select_jobinfo, + SELECT_DATA_BLOCK_ID, + &block_id); + } + job_ptr->requid = -1; /* force to -1 for sacct to know this + * hasn't been set yet */ + + if(!job_ptr->db_index) { + query = xstrdup_printf( + "insert into %s " + "(jobid, account, associd, uid, gid, partition, " + "blockid, eligible, submit, start, name, track_steps, " + "state, priority, req_cpus, alloc_cpus, nodelist) " + "values (%u, '%s', %u, %u, %u, '%s', '%s', " + "%d, %d, %d, '%s', %u, " + "%u, %u, %u, %u, '%s')", + job_table, job_ptr->job_id, job_ptr->account, + job_ptr->assoc_id, + job_ptr->user_id, job_ptr->group_id, + job_ptr->partition, block_id, + (int)job_ptr->details->begin_time, + (int)job_ptr->details->submit_time, + (int)job_ptr->start_time, + jname, track_steps, + job_ptr->job_state & (~JOB_COMPLETING), + priority, job_ptr->num_procs, + job_ptr->total_procs, nodes); + try_again: + if(!(job_ptr->db_index = pgsql_insert_ret_id(acct_pgsql_db, + "job_table_id_seq", + query))) { + if(!reinit) { + error("It looks like the storage has gone " + "away trying to reconnect"); + pgsql_close_db_connection(&acct_pgsql_db); + pgsql_get_db_connection(&acct_pgsql_db, + pgsql_db_name, + pgsql_db_info); + reinit = 1; + goto try_again; + } else + rc = SLURM_ERROR; + } + } else { + query = xstrdup_printf( + "update %s set partition='%s', blockid='%s', start=%d, " + "name='%s', state=%u, alloc_cpus=%u, nodelist='%s', " + "account='%s' where id=%d", + job_table, job_ptr->partition, block_id, + (int)job_ptr->start_time, + jname, + job_ptr->job_state & (~JOB_COMPLETING), + job_ptr->total_procs, nodes, + job_ptr->account, job_ptr->db_index); + rc = pgsql_db_query(acct_pgsql_db, query); + } + xfree(block_id); + xfree(jname); + + xfree(query); + + return rc; +#else + return SLURM_ERROR; +#endif +} + +/* + * load into the storage the end of a job + */ +extern int jobacct_storage_p_job_complete(PGconn *acct_pgsql_db, + struct job_record *job_ptr) +{ +#ifdef HAVE_PGSQL + char *query = NULL, *nodes = NULL; + int rc=SLURM_SUCCESS; + + if (!job_ptr->db_index + && (!job_ptr->details || !job_ptr->details->submit_time)) { + error("jobacct_storage_p_job_complete: " + "Not inputing this job, it has no submit time."); + return SLURM_ERROR; + } + + if(!acct_pgsql_db || PQstatus(acct_pgsql_db) != CONNECTION_OK) { + if(!pgsql_get_db_connection(&acct_pgsql_db, + pgsql_db_name, pgsql_db_info)) + return SLURM_ERROR; + } + + debug3("pgsql_jobacct_job_complete() called"); + if (job_ptr->end_time == 0) { + debug("pgsql_jobacct: job %u never started", job_ptr->job_id); + return SLURM_ERROR; + } + + if (job_ptr->nodes && job_ptr->nodes[0]) + nodes = job_ptr->nodes; + else + nodes = "(null)"; + + if(!job_ptr->db_index) { + job_ptr->db_index = _get_db_index(acct_pgsql_db, + job_ptr->details->submit_time, + job_ptr->job_id, + job_ptr->assoc_id); + if(job_ptr->db_index == -1) + return SLURM_ERROR; + } + query = xstrdup_printf("update %s set start=%u, endtime=%u, state=%d, " + "nodelist='%s', comp_code=%u, " + "kill_requid=%u where id=%u", + job_table, (int)job_ptr->start_time, + (int)job_ptr->end_time, + job_ptr->job_state & (~JOB_COMPLETING), + nodes, job_ptr->exit_code, + job_ptr->requid, job_ptr->db_index); + rc = pgsql_db_query(acct_pgsql_db, query); + xfree(query); + + return rc; +#else + return SLURM_ERROR; +#endif +} + +/* + * load into the storage the start of a job step + */ +extern int jobacct_storage_p_step_start(PGconn *acct_pgsql_db, + struct step_record *step_ptr) +{ +#ifdef HAVE_PGSQL + int cpus = 0; + int rc=SLURM_SUCCESS; + char node_list[BUFFER_SIZE]; +#ifdef HAVE_BG + char *ionodes = NULL; +#endif + char *query = NULL; + + if (!step_ptr->job_ptr->db_index + && (!step_ptr->job_ptr->details + || !step_ptr->job_ptr->details->submit_time)) { + error("jobacct_storage_p_step_start: " + "Not inputing this job, it has no submit time."); + return SLURM_ERROR; + } + + if(!acct_pgsql_db || PQstatus(acct_pgsql_db) != CONNECTION_OK) { + if(!pgsql_get_db_connection(&acct_pgsql_db, + pgsql_db_name, pgsql_db_info)) + return SLURM_ERROR; + } + + if(slurmdbd_conf) { + cpus = step_ptr->job_ptr->total_procs; + snprintf(node_list, BUFFER_SIZE, "%s", + step_ptr->job_ptr->nodes); + } else { +#ifdef HAVE_BG + cpus = step_ptr->job_ptr->num_procs; + select_g_get_jobinfo(step_ptr->job_ptr->select_jobinfo, + SELECT_DATA_IONODES, + &ionodes); + if(ionodes) { + snprintf(node_list, BUFFER_SIZE, + "%s[%s]", step_ptr->job_ptr->nodes, ionodes); + xfree(ionodes); + } else + snprintf(node_list, BUFFER_SIZE, "%s", + step_ptr->job_ptr->nodes); + +#else + if(!step_ptr->step_layout || !step_ptr->step_layout->task_cnt) { + cpus = step_ptr->job_ptr->total_procs; + snprintf(node_list, BUFFER_SIZE, "%s", + step_ptr->job_ptr->nodes); + } else { + cpus = step_ptr->step_layout->task_cnt; + snprintf(node_list, BUFFER_SIZE, "%s", + step_ptr->step_layout->node_list); + } +#endif + } + + step_ptr->job_ptr->requid = -1; /* force to -1 for sacct to know this + * hasn't been set yet */ + + if(!step_ptr->job_ptr->db_index) { + step_ptr->job_ptr->db_index = + _get_db_index(acct_pgsql_db, + step_ptr->job_ptr->details->submit_time, + step_ptr->job_ptr->job_id, + step_ptr->job_ptr->assoc_id); + if(step_ptr->job_ptr->db_index == -1) + return SLURM_ERROR; + } + /* we want to print a -1 for the requid so leave it a + %d */ + query = xstrdup_printf( + "insert into %s (id, stepid, start, name, state, " + "cpus, nodelist) " + "values (%d, %u, %u, '%s', %d, %u, '%s')", + step_table, step_ptr->job_ptr->db_index, + step_ptr->step_id, + (int)step_ptr->start_time, step_ptr->name, + JOB_RUNNING, cpus, node_list); + rc = pgsql_db_query(acct_pgsql_db, query); + xfree(query); + + return rc; +#else + return SLURM_ERROR; +#endif +} + +/* + * load into the storage the end of a job step + */ +extern int jobacct_storage_p_step_complete(PGconn *acct_pgsql_db, + struct step_record *step_ptr) +{ +#ifdef HAVE_PGSQL + time_t now; + int elapsed; + int comp_status; + int cpus = 0; + struct jobacctinfo *jobacct = (struct jobacctinfo *)step_ptr->jobacct; + struct jobacctinfo dummy_jobacct; + float ave_vsize = 0, ave_rss = 0, ave_pages = 0; + float ave_cpu = 0, ave_cpu2 = 0; + char *query = NULL; + int rc =SLURM_SUCCESS; + + if (!step_ptr->job_ptr->db_index + && (!step_ptr->job_ptr->details + || !step_ptr->job_ptr->details->submit_time)) { + error("jobacct_storage_p_step_complete: " + "Not inputing this job, it has no submit time."); + return SLURM_ERROR; + } + + if(!acct_pgsql_db || PQstatus(acct_pgsql_db) != CONNECTION_OK) { + if(!pgsql_get_db_connection(&acct_pgsql_db, + pgsql_db_name, pgsql_db_info)) + return SLURM_ERROR; + } + + if (jobacct == NULL) { + /* JobAcctGather=jobacct_gather/none, no data to process */ + bzero(&dummy_jobacct, sizeof(dummy_jobacct)); + jobacct = &dummy_jobacct; + } + + if(slurmdbd_conf) { + now = step_ptr->job_ptr->end_time; + cpus = step_ptr->job_ptr->total_procs; + + } else { + now = time(NULL); +#ifdef HAVE_BG + cpus = step_ptr->job_ptr->num_procs; + +#else + if(!step_ptr->step_layout || !step_ptr->step_layout->task_cnt) + cpus = step_ptr->job_ptr->total_procs; + else + cpus = step_ptr->step_layout->task_cnt; +#endif + } + + if ((elapsed=now-step_ptr->start_time)<0) + elapsed=0; /* For *very* short jobs, if clock is wrong */ + if (step_ptr->exit_code) + comp_status = JOB_FAILED; + else + comp_status = JOB_COMPLETE; + + /* figure out the ave of the totals sent */ + if(cpus > 0) { + ave_vsize = jobacct->tot_vsize; + ave_vsize /= cpus; + ave_rss = jobacct->tot_rss; + ave_rss /= cpus; + ave_pages = jobacct->tot_pages; + ave_pages /= cpus; + ave_cpu = jobacct->tot_cpu; + ave_cpu /= cpus; + ave_cpu /= 100; + } + + if(jobacct->min_cpu != (uint32_t)NO_VAL) { + ave_cpu2 = jobacct->min_cpu; + ave_cpu2 /= 100; + } + + if(!step_ptr->job_ptr->db_index) { + step_ptr->job_ptr->db_index = + _get_db_index(acct_pgsql_db, + step_ptr->job_ptr->details->submit_time, + step_ptr->job_ptr->job_id, + step_ptr->job_ptr->assoc_id); + if(step_ptr->job_ptr->db_index == -1) + return SLURM_ERROR; + } + + query = xstrdup_printf( + "update %s set endtime=%u, state=%d, " + "kill_requid=%u, comp_code=%u, " + "user_sec=%ld, user_usec=%ld, " + "sys_sec=%ld, sys_usec=%ld, " + "max_vsize=%u, max_vsize_task=%u, " + "max_vsize_node=%u, ave_vsize=%.2f, " + "max_rss=%u, max_rss_task=%u, " + "max_rss_node=%u, ave_rss=%.2f, " + "max_pages=%u, max_pages_task=%u, " + "max_pages_node=%u, ave_pages=%.2f, " + "min_cpu=%.2f, min_cpu_task=%u, " + "min_cpu_node=%u, ave_cpu=%.2f " + "where id=%u and stepid=%u", + step_table, (int)now, + comp_status, + step_ptr->job_ptr->requid, + step_ptr->exit_code, + /* user seconds */ + jobacct->user_cpu_sec, + /* user microseconds */ + jobacct->user_cpu_usec, + /* system seconds */ + jobacct->sys_cpu_sec, + /* system microsecs */ + jobacct->sys_cpu_usec, + jobacct->max_vsize, /* max vsize */ + jobacct->max_vsize_id.taskid, /* max vsize task */ + jobacct->max_vsize_id.nodeid, /* max vsize node */ + ave_vsize, /* ave vsize */ + jobacct->max_rss, /* max vsize */ + jobacct->max_rss_id.taskid, /* max rss task */ + jobacct->max_rss_id.nodeid, /* max rss node */ + ave_rss, /* ave rss */ + jobacct->max_pages, /* max pages */ + jobacct->max_pages_id.taskid, /* max pages task */ + jobacct->max_pages_id.nodeid, /* max pages node */ + ave_pages, /* ave pages */ + ave_cpu2, /* min cpu */ + jobacct->min_cpu_id.taskid, /* min cpu task */ + jobacct->min_cpu_id.nodeid, /* min cpu node */ + ave_cpu, /* ave cpu */ + step_ptr->job_ptr->db_index, step_ptr->step_id); + rc = pgsql_db_query(acct_pgsql_db, query); + xfree(query); + + return rc; +#else + return SLURM_ERROR; +#endif +} + +/* + * load into the storage a suspention of a job + */ +extern int jobacct_storage_p_suspend(PGconn *acct_pgsql_db, + struct job_record *job_ptr) +{ +#ifdef HAVE_PGSQL + char query[1024]; + int rc = SLURM_SUCCESS; + + if(!acct_pgsql_db || PQstatus(acct_pgsql_db) != CONNECTION_OK) { + if(!pgsql_get_db_connection(&acct_pgsql_db, + pgsql_db_name, pgsql_db_info)) + return SLURM_ERROR; + } + + if(!job_ptr->db_index) { + job_ptr->db_index = _get_db_index(acct_pgsql_db, + job_ptr->details->submit_time, + job_ptr->job_id, + job_ptr->assoc_id); + if(job_ptr->db_index == -1) + return SLURM_ERROR; + } + + snprintf(query, sizeof(query), + "update %s set suspended=%u-suspended, state=%d " + "where id=%u", + job_table, (int)job_ptr->suspend_time, + job_ptr->job_state & (~JOB_COMPLETING), + job_ptr->db_index); + rc = pgsql_db_query(acct_pgsql_db, query); + if(rc != SLURM_ERROR) { + snprintf(query, sizeof(query), + "update %s set suspended=%u-suspended, " + "state=%d where id=%u and endtime=0", + step_table, (int)job_ptr->suspend_time, + job_ptr->job_state, job_ptr->db_index); + rc = pgsql_db_query(acct_pgsql_db, query); + } + + return rc; +#else + return SLURM_ERROR; +#endif +} + +/* + * get info from the storage + * returns List of job_rec_t * + * note List needs to be freed when called + */ +extern List jobacct_storage_p_get_jobs(PGconn *acct_pgsql_db, + List selected_steps, + List selected_parts, + void *params) +{ + List job_list = NULL; +#ifdef HAVE_PGSQL + if(!acct_pgsql_db || PQstatus(acct_pgsql_db) != CONNECTION_OK) { + if(!pgsql_get_db_connection(&acct_pgsql_db, + pgsql_db_name, pgsql_db_info)) + return job_list; + } + + job_list = pgsql_jobacct_process_get_jobs(acct_pgsql_db, + selected_steps, + selected_parts, + params); +#endif + return job_list; +} + +/* + * expire old info from the storage + */ +extern void jobacct_storage_p_archive(PGconn *acct_pgsql_db, + List selected_parts, + void *params) +{ +#ifdef HAVE_PGSQL + if(!acct_pgsql_db || PQstatus(acct_pgsql_db) != CONNECTION_OK) { + if(!pgsql_get_db_connection(&acct_pgsql_db, + pgsql_db_name, pgsql_db_info)) + return; + } + + pgsql_jobacct_process_archive(acct_pgsql_db, selected_parts, params); +#endif + return; +} + +extern int acct_storage_p_update_shares_used(void *db_conn, + List shares_used) +{ + /* This definitely needs to be fleshed out. + * Go through the list of shares_used_object_t objects and store them */ + return SLURM_SUCCESS; +} + +extern int acct_storage_p_flush_jobs_on_cluster( + void *db_conn, char *cluster, time_t event_time) +{ + /* put end times for a clean start */ + + + + + return SLURM_SUCCESS; +} diff --git a/src/plugins/accounting_storage/pgsql/pgsql_jobacct_process.c b/src/plugins/accounting_storage/pgsql/pgsql_jobacct_process.c new file mode 100644 index 000000000..c1ff95fb8 --- /dev/null +++ b/src/plugins/accounting_storage/pgsql/pgsql_jobacct_process.c @@ -0,0 +1,491 @@ +/*****************************************************************************\ + * pgsql_jobacct_process.c - functions the processing of + * information from the pgsql jobacct + * storage. + ***************************************************************************** + * + * Copyright (C) 2004-2007 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * This file is patterned after jobcomp_linux.c, written by Morris Jette and + * Copyright (C) 2002 The Regents of the University of California. +\*****************************************************************************/ + +#include <stdlib.h> +#include "pgsql_jobacct_process.h" + +#ifdef HAVE_PGSQL +static void _do_fdump(List job_list) +{ + info("fdump option not applicable from pgsql plugin"); + return; +} + +extern List pgsql_jobacct_process_get_jobs(PGconn *acct_pgsql_db, + List selected_steps, + List selected_parts, + sacct_parameters_t *params) +{ + + char *query = NULL; + char *extra = NULL; + char *tmp = NULL; + char *selected_part = NULL; + jobacct_selected_step_t *selected_step = NULL; + ListIterator itr = NULL; + int set = 0; + PGresult *result = NULL, *step_result = NULL; + int i, j; + jobacct_job_rec_t *job = NULL; + jobacct_step_rec_t *step = NULL; + time_t now = time(NULL); + List job_list = list_create(destroy_jobacct_job_rec); + + /* if this changes you will need to edit the corresponding + * enum below also t1 is job_table */ + char *job_req_inx[] = { + "t1.id", + "t1.jobid", + "t1.associd", + "t1.uid", + "t1.gid", + "t1.partition", + "t1.blockid", + "t1.account", + "t1.eligible", + "t1.submit", + "t1.start", + "t1.endtime", + "t1.suspended", + "t1.name", + "t1.track_steps", + "t1.state", + "t1.comp_code", + "t1.priority", + "t1.req_cpus", + "t1.alloc_cpus", + "t1.nodelist", + "t1.kill_requid", + "t1.qos", + }; + + /* if this changes you will need to edit the corresponding + * enum below also t1 is step_table */ + char *step_req_inx[] = { + "t1.stepid", + "t1.start", + "t1.endtime", + "t1.suspended", + "t1.name", + "t1.nodelist", + "t1.state", + "t1.kill_requid", + "t1.comp_code", + "t1.cpus", + "t1.user_sec", + "t1.user_usec", + "t1.sys_sec", + "t1.sys_usec", + "t1.max_vsize", + "t1.max_vsize_task", + "t1.max_vsize_node", + "t1.ave_vsize", + "t1.max_rss", + "t1.max_rss_task", + "t1.max_rss_node", + "t1.ave_rss", + "t1.max_pages", + "t1.max_pages_task", + "t1.max_pages_node", + "t1.ave_pages", + "t1.min_cpu", + "t1.min_cpu_task", + "t1.min_cpu_node", + "t1.ave_cpu", + }; + + enum { + JOB_REQ_ID, + JOB_REQ_JOBID, + JOB_REQ_ASSOCID, + JOB_REQ_UID, + JOB_REQ_GID, + JOB_REQ_PARTITION, + JOB_REQ_BLOCKID, + JOB_REQ_ACCOUNT, + JOB_REQ_ELIGIBLE, + JOB_REQ_SUBMIT, + JOB_REQ_START, + JOB_REQ_ENDTIME, + JOB_REQ_SUSPENDED, + JOB_REQ_NAME, + JOB_REQ_TRACKSTEPS, + JOB_REQ_STATE, + JOB_REQ_COMP_CODE, + JOB_REQ_PRIORITY, + JOB_REQ_REQ_CPUS, + JOB_REQ_ALLOC_CPUS, + JOB_REQ_NODELIST, + JOB_REQ_KILL_REQUID, + JOB_REQ_QOS, + JOB_REQ_COUNT + }; + enum { + STEP_REQ_STEPID, + STEP_REQ_START, + STEP_REQ_ENDTIME, + STEP_REQ_SUSPENDED, + STEP_REQ_NAME, + STEP_REQ_NODELIST, + STEP_REQ_STATE, + STEP_REQ_KILL_REQUID, + STEP_REQ_COMP_CODE, + STEP_REQ_CPUS, + STEP_REQ_USER_SEC, + STEP_REQ_USER_USEC, + STEP_REQ_SYS_SEC, + STEP_REQ_SYS_USEC, + STEP_REQ_MAX_VSIZE, + STEP_REQ_MAX_VSIZE_TASK, + STEP_REQ_MAX_VSIZE_NODE, + STEP_REQ_AVE_VSIZE, + STEP_REQ_MAX_RSS, + STEP_REQ_MAX_RSS_TASK, + STEP_REQ_MAX_RSS_NODE, + STEP_REQ_AVE_RSS, + STEP_REQ_MAX_PAGES, + STEP_REQ_MAX_PAGES_TASK, + STEP_REQ_MAX_PAGES_NODE, + STEP_REQ_AVE_PAGES, + STEP_REQ_MIN_CPU, + STEP_REQ_MIN_CPU_TASK, + STEP_REQ_MIN_CPU_NODE, + STEP_REQ_AVE_CPU, + STEP_REQ_COUNT + }; + + if(selected_steps && list_count(selected_steps)) { + set = 0; + xstrcat(extra, " and ("); + itr = list_iterator_create(selected_steps); + while((selected_step = list_next(itr))) { + if(set) + xstrcat(extra, " or "); + tmp = xstrdup_printf("t1.jobid=%u", + selected_step->jobid); + xstrcat(extra, tmp); + set = 1; + xfree(tmp); + } + list_iterator_destroy(itr); + xstrcat(extra, ")"); + } + + if(selected_parts && list_count(selected_parts)) { + set = 0; + xstrcat(extra, " and ("); + itr = list_iterator_create(selected_parts); + while((selected_part = list_next(itr))) { + if(set) + xstrcat(extra, " or "); + tmp = xstrdup_printf("t1.partition='%s'", + selected_part); + xstrcat(extra, tmp); + set = 1; + xfree(tmp); + } + list_iterator_destroy(itr); + xstrcat(extra, ")"); + } + + for(i=0; i<JOB_REQ_COUNT; i++) { + if(i) + xstrcat(tmp, ", "); + xstrcat(tmp, job_req_inx[i]); + } + + query = xstrdup_printf("select %s from %s t1", + tmp, job_table); + xfree(tmp); + + if(extra) { + xstrcat(query, extra); + xfree(extra); + } + + //info("query = %s", query); + if(!(result = pgsql_db_query_ret(acct_pgsql_db, query))) { + xfree(query); + list_destroy(job_list); + return NULL; + } + xfree(query); + + for (i = 0; i < PQntuples(result); i++) { + char *id = PQgetvalue(result, i, JOB_REQ_ID); + acct_association_rec_t account_rec; + memset(&account_rec, 0, sizeof(acct_association_rec_t)); + job = create_jobacct_job_rec(); + + job->alloc_cpus = atoi(PQgetvalue(result, i, + JOB_REQ_ALLOC_CPUS)); + job->associd = atoi(PQgetvalue(result, i, JOB_REQ_ASSOCID)); + account_rec.id = job->associd; + assoc_mgr_fill_in_assoc(acct_pgsql_db, &account_rec, 0, NULL); + if(account_rec.cluster) { + if(params->opt_cluster && + strcmp(params->opt_cluster, account_rec.cluster)) { + destroy_jobacct_job_rec(job); + job = NULL; + continue; + } + job->cluster = xstrdup(account_rec.cluster); + } + if(account_rec.user) + job->user = xstrdup(account_rec.user); + else + job->uid = atoi(PQgetvalue(result, i, JOB_REQ_UID)); + if(account_rec.acct) + job->account = xstrdup(account_rec.acct); + else + job->account = xstrdup(PQgetvalue(result, i, + JOB_REQ_ACCOUNT)); + job->blockid = xstrdup(PQgetvalue(result, i, + JOB_REQ_BLOCKID)); + job->eligible = atoi(PQgetvalue(result, i, JOB_REQ_SUBMIT)); + job->submit = atoi(PQgetvalue(result, i, JOB_REQ_SUBMIT)); + job->start = atoi(PQgetvalue(result, i, JOB_REQ_START)); + job->end = atoi(PQgetvalue(result, i, JOB_REQ_ENDTIME)); + job->suspended = atoi(PQgetvalue(result, i, JOB_REQ_SUSPENDED)); + if(!job->end) { + job->elapsed = now - job->start; + } else { + job->elapsed = job->end - job->start; + } + job->elapsed -= job->suspended; + + job->jobid = atoi(PQgetvalue(result, i, JOB_REQ_JOBID)); + job->jobname = xstrdup(PQgetvalue(result, i, JOB_REQ_NAME)); + job->gid = atoi(PQgetvalue(result, i, JOB_REQ_GID)); + job->exitcode = atoi(PQgetvalue(result, i, JOB_REQ_COMP_CODE)); + job->partition = xstrdup(PQgetvalue(result, i, + JOB_REQ_PARTITION)); + job->nodes = xstrdup(PQgetvalue(result, i, JOB_REQ_NODELIST)); + if (!strcmp(job->nodes, "(null)")) { + xfree(job->nodes); + job->nodes = xstrdup("(unknown)"); + } + + job->track_steps = atoi(PQgetvalue(result, i, + JOB_REQ_TRACKSTEPS)); + job->state = atoi(PQgetvalue(result, i, JOB_REQ_STATE)); + job->priority = atoi(PQgetvalue(result, i, JOB_REQ_PRIORITY)); + job->req_cpus = atoi(PQgetvalue(result, i, JOB_REQ_REQ_CPUS)); + job->requid = atoi(PQgetvalue(result, i, JOB_REQ_KILL_REQUID)); + job->qos = atoi(PQgetvalue(result, i, JOB_REQ_QOS)); + job->show_full = 1; + + list_append(job_list, job); + + if(selected_steps && list_count(selected_steps)) { + set = 0; + itr = list_iterator_create(selected_steps); + while((selected_step = list_next(itr))) { + if(selected_step->jobid != job->jobid) { + continue; + } else if (selected_step->stepid + == (uint32_t)NO_VAL) { + job->show_full = 1; + break; + } + + if(set) + xstrcat(extra, " or "); + else + xstrcat(extra, " and ("); + + tmp = xstrdup_printf("t1.stepid=%u", + selected_step->stepid); + xstrcat(extra, tmp); + set = 1; + xfree(tmp); + job->show_full = 0; + } + list_iterator_destroy(itr); + if(set) + xstrcat(extra, ")"); + } + for(j=0; j<STEP_REQ_COUNT; j++) { + if(j) + xstrcat(tmp, ", "); + xstrcat(tmp, step_req_inx[j]); + } + + query = xstrdup_printf("select %s from %s t1 where t1.id=%s", + tmp, step_table, id); + xfree(tmp); + + if(extra) { + xstrcat(query, extra); + xfree(extra); + } + + //info("query = %s", query); + if(!(step_result = pgsql_db_query_ret(acct_pgsql_db, query))) { + xfree(query); + list_destroy(job_list); + return NULL; + } + xfree(query); + for(j = 0; j < PQntuples(step_result); j++) { + step = create_jobacct_step_rec(); + step->jobid = job->jobid; + list_append(job->steps, step); + step->stepid = atoi( + PQgetvalue(step_result, j, STEP_REQ_STEPID)); + /* info("got step %u.%u", */ +/* job->header.jobnum, step->stepnum); */ + step->state = atoi( + PQgetvalue(step_result, j, STEP_REQ_STATE)); + step->exitcode = atoi( + PQgetvalue(step_result, j, + STEP_REQ_COMP_CODE)); + step->ncpus = atoi( + PQgetvalue(step_result, j, STEP_REQ_CPUS)); + step->start = atoi( + PQgetvalue(step_result, j, JOB_REQ_START)); + step->end = atoi( + PQgetvalue(step_result, j, STEP_REQ_ENDTIME)); + /* figure this out by start stop */ + step->suspended = atoi( + PQgetvalue(step_result, j, STEP_REQ_SUSPENDED)); + if(!step->end) { + step->elapsed = now - step->start; + } else { + step->elapsed = step->end - step->start; + } + step->elapsed -= step->suspended; + step->user_cpu_sec = atoi( + PQgetvalue(step_result, j, STEP_REQ_USER_SEC)); + step->user_cpu_usec = atoi( + PQgetvalue(step_result, j, STEP_REQ_USER_USEC)); + step->sys_cpu_sec = atoi( + PQgetvalue(step_result, j, STEP_REQ_SYS_SEC)); + step->sys_cpu_usec = atoi( + PQgetvalue(step_result, j, STEP_REQ_SYS_USEC)); + job->tot_cpu_sec += + step->tot_cpu_sec += + step->user_cpu_sec + step->sys_cpu_sec; + job->tot_cpu_usec += + step->tot_cpu_usec += + step->user_cpu_usec + step->sys_cpu_usec; + step->sacct.max_vsize = atoi( + PQgetvalue(step_result, j, + STEP_REQ_MAX_VSIZE)) * 1024; + step->sacct.max_vsize_id.taskid = atoi( + PQgetvalue(step_result, j, + STEP_REQ_MAX_VSIZE_TASK)); + step->sacct.ave_vsize = atof( + PQgetvalue(step_result, j, + STEP_REQ_AVE_VSIZE)) * 1024; + step->sacct.max_rss = atoi( + PQgetvalue(step_result, j, + STEP_REQ_MAX_RSS)) * 1024; + step->sacct.max_rss_id.taskid = atoi( + PQgetvalue(step_result, j, + STEP_REQ_MAX_RSS_TASK)); + step->sacct.max_rss = atoi( + PQgetvalue(step_result, j, + STEP_REQ_MAX_RSS)) * 1024; + step->sacct.max_rss_id.taskid = atoi( + PQgetvalue(step_result, j, + STEP_REQ_MAX_RSS_TASK)); + step->sacct.ave_rss = atof( + PQgetvalue(step_result, j, + STEP_REQ_AVE_RSS)) * 1024; + step->sacct.max_pages = atoi( + PQgetvalue(step_result, j, + STEP_REQ_MAX_PAGES)); + step->sacct.max_pages_id.taskid = atoi( + PQgetvalue(step_result, j, + STEP_REQ_MAX_PAGES_TASK)); + step->sacct.ave_pages = atof( + PQgetvalue(step_result, j, + STEP_REQ_AVE_PAGES)); + step->sacct.min_cpu = atof( + PQgetvalue(step_result, j, STEP_REQ_MIN_CPU)); + step->sacct.min_cpu_id.taskid = atoi( + PQgetvalue(step_result, j, + STEP_REQ_MIN_CPU_TASK)); + step->sacct.ave_cpu = atof( + PQgetvalue(step_result, j, STEP_REQ_AVE_CPU)); + step->stepname = xstrdup( + PQgetvalue(step_result, j, STEP_REQ_NAME)); + step->nodes = xstrdup( + PQgetvalue(step_result, j, STEP_REQ_NODELIST)); + step->sacct.max_vsize_id.nodeid = atoi( + PQgetvalue(step_result, j, + STEP_REQ_MAX_VSIZE_NODE)); + step->sacct.max_rss_id.nodeid = atoi( + PQgetvalue(step_result, j, + STEP_REQ_MAX_RSS_NODE)); + step->sacct.max_pages_id.nodeid = atoi( + PQgetvalue(step_result, j, + STEP_REQ_MAX_PAGES_NODE)); + step->sacct.min_cpu_id.nodeid = atoi( + PQgetvalue(step_result, j, + STEP_REQ_MIN_CPU_NODE)); + + step->requid = atoi(PQgetvalue(step_result, j, + STEP_REQ_KILL_REQUID)); + } + PQclear(step_result); + + if(list_count(job->steps) > 1) + job->track_steps = 1; + } + PQclear(result); + + if (params && params->opt_fdump) + _do_fdump(job_list); + + return job_list; +} + +extern void pgsql_jobacct_process_archive(PGconn *acct_pgsql_db, + List selected_parts, + sacct_parameters_t *params) +{ + return; +} + +#endif diff --git a/src/plugins/accounting_storage/pgsql/pgsql_jobacct_process.h b/src/plugins/accounting_storage/pgsql/pgsql_jobacct_process.h new file mode 100644 index 000000000..255f1a9d7 --- /dev/null +++ b/src/plugins/accounting_storage/pgsql/pgsql_jobacct_process.h @@ -0,0 +1,71 @@ +/*****************************************************************************\ + * pgsql_jobacct_process.h - functions the processing of + * information from the pgsql jobacct + * storage. + ***************************************************************************** + * + * Copyright (C) 2004-2007 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * This file is patterned after jobcomp_linux.c, written by Morris Jette and + * Copyright (C) 2002 The Regents of the University of California. +\*****************************************************************************/ + +#ifndef _HAVE_PGSQL_JOBACCT_PROCESS_H +#define _HAVE_PGSQL_JOBACCT_PROCESS_H + +#include <sys/types.h> +#include <pwd.h> +#include <stdlib.h> +#include "src/common/assoc_mgr.h" +#include "src/common/jobacct_common.h" +#include "src/slurmdbd/read_config.h" +#include "src/slurmctld/slurmctld.h" +#include "src/database/pgsql_common.h" +#include "src/common/slurm_accounting_storage.h" + +#ifdef HAVE_PGSQL + +extern char *job_table; +extern char *step_table; + +extern List pgsql_jobacct_process_get_jobs(PGconn *acct_pgsql_db, + List selected_steps, + List selected_parts, + sacct_parameters_t *params); + +extern void pgsql_jobacct_process_archive(PGconn *acct_pgsql_db, + List selected_parts, + sacct_parameters_t *params); +#endif + +#endif diff --git a/src/plugins/accounting_storage/slurmdbd/Makefile.am b/src/plugins/accounting_storage/slurmdbd/Makefile.am new file mode 100644 index 000000000..ab90155fe --- /dev/null +++ b/src/plugins/accounting_storage/slurmdbd/Makefile.am @@ -0,0 +1,16 @@ +# Makefile for accounting_storage/slurmdbd plugin + +AUTOMAKE_OPTIONS = foreign + +PLUGIN_FLAGS = -module -avoid-version --export-dynamic + +INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common + +pkglib_LTLIBRARIES = accounting_storage_slurmdbd.la + +# Null job completion logging plugin. +accounting_storage_slurmdbd_la_SOURCES = accounting_storage_slurmdbd.c +accounting_storage_slurmdbd_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) + + + diff --git a/src/plugins/accounting_storage/slurmdbd/Makefile.in b/src/plugins/accounting_storage/slurmdbd/Makefile.in new file mode 100644 index 000000000..66483726f --- /dev/null +++ b/src/plugins/accounting_storage/slurmdbd/Makefile.in @@ -0,0 +1,558 @@ +# Makefile.in generated by automake 1.10.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# Makefile for accounting_storage/slurmdbd plugin + +VPATH = @srcdir@ +pkgdatadir = $(datadir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +target_triplet = @target@ +subdir = src/plugins/accounting_storage/slurmdbd +DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ + $(top_srcdir)/auxdir/slurm.m4 \ + $(top_srcdir)/auxdir/x_ac__system_configuration.m4 \ + $(top_srcdir)/auxdir/x_ac_affinity.m4 \ + $(top_srcdir)/auxdir/x_ac_aix.m4 \ + $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ + $(top_srcdir)/auxdir/x_ac_debug.m4 \ + $(top_srcdir)/auxdir/x_ac_elan.m4 \ + $(top_srcdir)/auxdir/x_ac_federation.m4 \ + $(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \ + $(top_srcdir)/auxdir/x_ac_gtk.m4 \ + $(top_srcdir)/auxdir/x_ac_munge.m4 \ + $(top_srcdir)/auxdir/x_ac_ncurses.m4 \ + $(top_srcdir)/auxdir/x_ac_pam.m4 \ + $(top_srcdir)/auxdir/x_ac_ptrace.m4 \ + $(top_srcdir)/auxdir/x_ac_readline.m4 \ + $(top_srcdir)/auxdir/x_ac_setpgrp.m4 \ + $(top_srcdir)/auxdir/x_ac_setproctitle.m4 \ + $(top_srcdir)/auxdir/x_ac_sgi_job.m4 \ + $(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \ + $(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h +CONFIG_CLEAN_FILES = +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; +am__installdirs = "$(DESTDIR)$(pkglibdir)" +pkglibLTLIBRARIES_INSTALL = $(INSTALL) +LTLIBRARIES = $(pkglib_LTLIBRARIES) +accounting_storage_slurmdbd_la_LIBADD = +am_accounting_storage_slurmdbd_la_OBJECTS = \ + accounting_storage_slurmdbd.lo +accounting_storage_slurmdbd_la_OBJECTS = \ + $(am_accounting_storage_slurmdbd_la_OBJECTS) +accounting_storage_slurmdbd_la_LINK = $(LIBTOOL) --tag=CC \ + $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CCLD) \ + $(AM_CFLAGS) $(CFLAGS) \ + $(accounting_storage_slurmdbd_la_LDFLAGS) $(LDFLAGS) -o $@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm +depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp +am__depfiles_maybe = depfiles +COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ + $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ + $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +CCLD = $(CC) +LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ + $(LDFLAGS) -o $@ +SOURCES = $(accounting_storage_slurmdbd_la_SOURCES) +DIST_SOURCES = $(accounting_storage_slurmdbd_la_SOURCES) +ETAGS = etags +CTAGS = ctags +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AR = @AR@ +AUTHD_CFLAGS = @AUTHD_CFLAGS@ +AUTHD_LIBS = @AUTHD_LIBS@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +BG_INCLUDES = @BG_INCLUDES@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CMD_LDFLAGS = @CMD_LDFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ +ECHO = @ECHO@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +ELAN_LIBS = @ELAN_LIBS@ +EXEEXT = @EXEEXT@ +F77 = @F77@ +FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@ +FFLAGS = @FFLAGS@ +GREP = @GREP@ +GTK2_CFLAGS = @GTK2_CFLAGS@ +GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ +HAVEPKGCONFIG = @HAVEPKGCONFIG@ +HAVE_AIX = @HAVE_AIX@ +HAVE_ELAN = @HAVE_ELAN@ +HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ +HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIB_LDFLAGS = @LIB_LDFLAGS@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MKDIR_P = @MKDIR_P@ +MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ +MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ +MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ +NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ +NUMA_LIBS = @NUMA_LIBS@ +OBJEXT = @OBJEXT@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PAM_LIBS = @PAM_LIBS@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ +PLPA_LIBS = @PLPA_LIBS@ +PROCTRACKDIR = @PROCTRACKDIR@ +PROJECT = @PROJECT@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +RANLIB = @RANLIB@ +READLINE_LIBS = @READLINE_LIBS@ +RELEASE = @RELEASE@ +SED = @SED@ +SEMAPHORE_LIBS = @SEMAPHORE_LIBS@ +SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ +SLURMD_PORT = @SLURMD_PORT@ +SLURM_API_AGE = @SLURM_API_AGE@ +SLURM_API_CURRENT = @SLURM_API_CURRENT@ +SLURM_API_MAJOR = @SLURM_API_MAJOR@ +SLURM_API_REVISION = @SLURM_API_REVISION@ +SLURM_API_VERSION = @SLURM_API_VERSION@ +SLURM_MAJOR = @SLURM_MAJOR@ +SLURM_MICRO = @SLURM_MICRO@ +SLURM_MINOR = @SLURM_MINOR@ +SLURM_VERSION = @SLURM_VERSION@ +SO_LDFLAGS = @SO_LDFLAGS@ +SSL_CPPFLAGS = @SSL_CPPFLAGS@ +SSL_LDFLAGS = @SSL_LDFLAGS@ +SSL_LIBS = @SSL_LIBS@ +STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ +VERSION = @VERSION@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_F77 = @ac_ct_F77@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target = @target@ +target_alias = @target_alias@ +target_cpu = @target_cpu@ +target_os = @target_os@ +target_vendor = @target_vendor@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +AUTOMAKE_OPTIONS = foreign +PLUGIN_FLAGS = -module -avoid-version --export-dynamic +INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common +pkglib_LTLIBRARIES = accounting_storage_slurmdbd.la + +# Null job completion logging plugin. +accounting_storage_slurmdbd_la_SOURCES = accounting_storage_slurmdbd.c +accounting_storage_slurmdbd_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) +all: all-am + +.SUFFIXES: +.SUFFIXES: .c .lo .o .obj +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ + && exit 0; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/accounting_storage/slurmdbd/Makefile'; \ + cd $(top_srcdir) && \ + $(AUTOMAKE) --foreign src/plugins/accounting_storage/slurmdbd/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) + @$(NORMAL_INSTALL) + test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" + @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ + if test -f $$p; then \ + f=$(am__strip_dir) \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + else :; fi; \ + done + +uninstall-pkglibLTLIBRARIES: + @$(NORMAL_UNINSTALL) + @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ + p=$(am__strip_dir) \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + done + +clean-pkglibLTLIBRARIES: + -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) + @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ + dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ + test "$$dir" != "$$p" || dir=.; \ + echo "rm -f \"$${dir}/so_locations\""; \ + rm -f "$${dir}/so_locations"; \ + done +accounting_storage_slurmdbd.la: $(accounting_storage_slurmdbd_la_OBJECTS) $(accounting_storage_slurmdbd_la_DEPENDENCIES) + $(accounting_storage_slurmdbd_la_LINK) -rpath $(pkglibdir) $(accounting_storage_slurmdbd_la_OBJECTS) $(accounting_storage_slurmdbd_la_LIBADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + +distclean-compile: + -rm -f *.tab.c + +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/accounting_storage_slurmdbd.Plo@am__quote@ + +.c.o: +@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c $< + +.c.obj: +@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'` + +.c.lo: +@am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $< + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + mkid -fID $$unique +tags: TAGS + +TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + here=`pwd`; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$tags $$unique; \ + fi +ctags: CTAGS +CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + test -z "$(CTAGS_ARGS)$$tags$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$tags $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && cd $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) $$here + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ + fi; \ + cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ + else \ + test -f $(distdir)/$$file \ + || cp -p $$d/$$file $(distdir)/$$file \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(LTLIBRARIES) +installdirs: + for dir in "$(DESTDIR)$(pkglibdir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + `test -z '$(STRIP)' || \ + echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ + mostlyclean-am + +distclean: distclean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-exec-am: install-pkglibLTLIBRARIES + +install-html: install-html-am + +install-info: install-info-am + +install-man: + +install-pdf: install-pdf-am + +install-ps: install-ps-am + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-pkglibLTLIBRARIES + +.MAKE: install-am install-strip + +.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ + clean-libtool clean-pkglibLTLIBRARIES ctags distclean \ + distclean-compile distclean-generic distclean-libtool \ + distclean-tags distdir dvi dvi-am html html-am info info-am \ + install install-am install-data install-data-am install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-pdf install-pdf-am install-pkglibLTLIBRARIES \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-compile \ + mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ + tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/src/plugins/accounting_storage/slurmdbd/accounting_storage_slurmdbd.c b/src/plugins/accounting_storage/slurmdbd/accounting_storage_slurmdbd.c new file mode 100644 index 000000000..3dd41ad22 --- /dev/null +++ b/src/plugins/accounting_storage/slurmdbd/accounting_storage_slurmdbd.c @@ -0,0 +1,1233 @@ +/*****************************************************************************\ + * accounting_storage_slurmdbd.c - accounting interface to slurmdbd. + ***************************************************************************** + * Copyright (C) 2004-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#ifdef HAVE_CONFIG_H +# include "config.h" +#endif + +#if HAVE_STDINT_H +# include <stdint.h> +#endif +#if HAVE_INTTYPES_H +# include <inttypes.h> +#endif + +#include <stdio.h> +#include <sys/types.h> +#include <pwd.h> + +#include <slurm/slurm_errno.h> + +#include "src/common/jobacct_common.h" +#include "src/common/read_config.h" +#include "src/common/slurm_accounting_storage.h" +#include "src/common/slurmdbd_defs.h" +#include "src/common/xstring.h" +#include "src/slurmctld/slurmctld.h" + +/* + * These variables are required by the generic plugin interface. If they + * are not found in the plugin, the plugin loader will ignore it. + * + * plugin_name - a string giving a human-readable description of the + * plugin. There is no maximum length, but the symbol must refer to + * a valid string. + * + * plugin_type - a string suggesting the type of the plugin or its + * applicability to a particular form of data or method of data handling. + * If the low-level plugin API is used, the contents of this string are + * unimportant and may be anything. SLURM uses the higher-level plugin + * interface which requires this string to be of the form + * + * <application>/<method> + * + * where <application> is a description of the intended application of + * the plugin (e.g., "jobacct" for SLURM job completion logging) and <method> + * is a description of how this plugin satisfies that application. SLURM will + * only load job completion logging plugins if the plugin_type string has a + * prefix of "jobacct/". + * + * plugin_version - an unsigned 32-bit integer giving the version number + * of the plugin. If major and minor revisions are desired, the major + * version number may be multiplied by a suitable magnitude constant such + * as 100 or 1000. Various SLURM versions will likely require a certain + * minimum versions for their plugins as the job accounting API + * matures. + */ +const char plugin_name[] = "Accounting storage SLURMDBD plugin"; +const char plugin_type[] = "accounting_storage/slurmdbd"; +const uint32_t plugin_version = 100; + +static char *slurmdbd_auth_info = NULL; + +/* + * init() is called when the plugin is loaded, before any other functions + * are called. Put global initialization here. + */ +extern int init ( void ) +{ + static int first = 1; + char *cluster_name = NULL; + + if (first) { + /* since this can be loaded from many different places + only tell us once. */ + if (!(cluster_name = slurm_get_cluster_name())) + fatal("%s requires ClusterName in slurm.conf", + plugin_name); + xfree(cluster_name); + slurmdbd_auth_info = slurm_get_accounting_storage_pass(); + if(!slurmdbd_auth_info) + verbose("%s loaded AuthInfo=%s", + plugin_name, slurmdbd_auth_info); + first = 0; + } else { + debug4("%s loaded", plugin_name); + } + + return SLURM_SUCCESS; +} + +extern int fini ( void ) +{ + xfree(slurmdbd_auth_info); + + return SLURM_SUCCESS; +} + +extern void *acct_storage_p_get_connection(bool make_agent, bool rollback) +{ + if(!slurmdbd_auth_info) + init(); + slurm_open_slurmdbd_conn(slurmdbd_auth_info, make_agent, rollback); + + return NULL; +} + +extern int acct_storage_p_close_connection(void **db_conn) +{ + return slurm_close_slurmdbd_conn(); +} + +extern int acct_storage_p_commit(void *db_conn, bool commit) +{ + slurmdbd_msg_t req; + dbd_fini_msg_t get_msg; + int rc, resp_code; + + memset(&get_msg, 0, sizeof(dbd_fini_msg_t)); + + get_msg.close_conn = 0; + get_msg.commit = (uint16_t)commit; + + req.msg_type = DBD_FINI; + req.data = &get_msg; + rc = slurm_send_slurmdbd_recv_rc_msg(&req, &resp_code); + + if(resp_code != SLURM_SUCCESS) + rc = resp_code; + + return rc; +} + +extern int acct_storage_p_add_users(void *db_conn, uint32_t uid, List user_list) +{ + slurmdbd_msg_t req; + dbd_list_msg_t get_msg; + int rc, resp_code; + + get_msg.my_list = user_list; + + req.msg_type = DBD_ADD_USERS; + req.data = &get_msg; + rc = slurm_send_slurmdbd_recv_rc_msg(&req, &resp_code); + + if(resp_code != SLURM_SUCCESS) + rc = resp_code; + + return rc; +} + +extern int acct_storage_p_add_coord(void *db_conn, uint32_t uid, + char *acct, acct_user_cond_t *user_q) +{ + slurmdbd_msg_t req; + dbd_acct_coord_msg_t get_msg; + int rc, resp_code; + + get_msg.acct = acct; + get_msg.cond = user_q; + + req.msg_type = DBD_ADD_ACCOUNT_COORDS; + req.data = &get_msg; + rc = slurm_send_slurmdbd_recv_rc_msg(&req, &resp_code); + + if(resp_code != SLURM_SUCCESS) + rc = resp_code; + + return rc; +} + +extern int acct_storage_p_add_accts(void *db_conn, uint32_t uid, List acct_list) +{ + slurmdbd_msg_t req; + dbd_list_msg_t get_msg; + int rc, resp_code; + + get_msg.my_list = acct_list; + + req.msg_type = DBD_ADD_ACCOUNTS; + req.data = &get_msg; + rc = slurm_send_slurmdbd_recv_rc_msg(&req, &resp_code); + + if(resp_code != SLURM_SUCCESS) + rc = resp_code; + + return rc; +} + +extern int acct_storage_p_add_clusters(void *db_conn, uint32_t uid, List cluster_list) +{ + slurmdbd_msg_t req; + dbd_list_msg_t get_msg; + int rc, resp_code; + + get_msg.my_list = cluster_list; + + req.msg_type = DBD_ADD_CLUSTERS; + req.data = &get_msg; + + rc = slurm_send_slurmdbd_recv_rc_msg(&req, &resp_code); + + if(resp_code != SLURM_SUCCESS) { + rc = resp_code; + } + return rc; +} + +extern int acct_storage_p_add_associations(void *db_conn, uint32_t uid, + List association_list) +{ + slurmdbd_msg_t req; + dbd_list_msg_t get_msg; + int rc, resp_code; + + get_msg.my_list = association_list; + + req.msg_type = DBD_ADD_ASSOCS; + req.data = &get_msg; + rc = slurm_send_slurmdbd_recv_rc_msg(&req, &resp_code); + + if(resp_code != SLURM_SUCCESS) + rc = resp_code; + + return rc; +} + +extern List acct_storage_p_modify_users(void *db_conn, uint32_t uid, + acct_user_cond_t *user_q, + acct_user_rec_t *user) +{ + slurmdbd_msg_t req, resp; + dbd_modify_msg_t get_msg; + dbd_list_msg_t *got_msg; + List ret_list = NULL; + int rc; + + get_msg.cond = user_q; + get_msg.rec = user; + + req.msg_type = DBD_MODIFY_USERS; + req.data = &get_msg; + rc = slurm_send_recv_slurmdbd_msg(&req, &resp); + + if (rc != SLURM_SUCCESS) + error("slurmdbd: DBD_MODIFY_USERS failure: %m"); + else if (resp.msg_type != DBD_GOT_LIST) { + error("slurmdbd: response type not DBD_GOT_LIST: %u", + resp.msg_type); + } else { + got_msg = (dbd_list_msg_t *) resp.data; + ret_list = got_msg->my_list; + got_msg->my_list = NULL; + slurmdbd_free_list_msg(got_msg); + } + + return ret_list; +} + +extern List acct_storage_p_modify_accts(void *db_conn, uint32_t uid, + acct_account_cond_t *acct_q, + acct_account_rec_t *acct) +{ + slurmdbd_msg_t req, resp; + dbd_modify_msg_t get_msg; + dbd_list_msg_t *got_msg; + int rc; + List ret_list = NULL; + + get_msg.cond = acct_q; + get_msg.rec = acct; + + req.msg_type = DBD_MODIFY_ACCOUNTS; + req.data = &get_msg; + rc = slurm_send_recv_slurmdbd_msg(&req, &resp); + + if (rc != SLURM_SUCCESS) + error("slurmdbd: DBD_MODIFY_ACCOUNTS failure: %m"); + else if (resp.msg_type != DBD_GOT_LIST) { + error("slurmdbd: response type not DBD_GOT_LIST: %u", + resp.msg_type); + } else { + got_msg = (dbd_list_msg_t *) resp.data; + ret_list = got_msg->my_list; + got_msg->my_list = NULL; + slurmdbd_free_list_msg(got_msg); + } + + return ret_list; +} + +extern List acct_storage_p_modify_clusters(void *db_conn, uint32_t uid, + acct_cluster_cond_t *cluster_q, + acct_cluster_rec_t *cluster) +{ + slurmdbd_msg_t req; + dbd_modify_msg_t get_msg; + int rc; + slurmdbd_msg_t resp; + dbd_list_msg_t *got_msg; + List ret_list = NULL; + + get_msg.cond = cluster_q; + get_msg.rec = cluster; + + req.msg_type = DBD_MODIFY_CLUSTERS; + req.data = &get_msg; + + rc = slurm_send_recv_slurmdbd_msg(&req, &resp); + + if (rc != SLURM_SUCCESS) + error("slurmdbd: DBD_MODIFY_CLUSTERS failure: %m"); + else if (resp.msg_type != DBD_GOT_LIST) { + error("slurmdbd: response type not DBD_GOT_LIST: %u", + resp.msg_type); + } else { + got_msg = (dbd_list_msg_t *) resp.data; + ret_list = got_msg->my_list; + got_msg->my_list = NULL; + slurmdbd_free_list_msg(got_msg); + } + + return ret_list; +} + +extern List acct_storage_p_modify_associations(void *db_conn, uint32_t uid, + acct_association_cond_t *assoc_q, + acct_association_rec_t *assoc) +{ + slurmdbd_msg_t req; + dbd_modify_msg_t get_msg; + int rc; + slurmdbd_msg_t resp; + dbd_list_msg_t *got_msg; + List ret_list = NULL; + + + get_msg.cond = assoc_q; + get_msg.rec = assoc; + + req.msg_type = DBD_MODIFY_ASSOCS; + req.data = &get_msg; + rc = slurm_send_recv_slurmdbd_msg(&req, &resp); + + if (rc != SLURM_SUCCESS) + error("slurmdbd: DBD_MODIFY_ASSOCS failure: %m"); + else if (resp.msg_type != DBD_GOT_LIST) { + error("slurmdbd: response type not DBD_GOT_LIST: %u", + resp.msg_type); + } else { + got_msg = (dbd_list_msg_t *) resp.data; + ret_list = got_msg->my_list; + got_msg->my_list = NULL; + slurmdbd_free_list_msg(got_msg); + } + + return ret_list; +} + +extern List acct_storage_p_remove_users(void *db_conn, uint32_t uid, + acct_user_cond_t *user_q) +{ + slurmdbd_msg_t req; + dbd_cond_msg_t get_msg; + int rc; + slurmdbd_msg_t resp; + dbd_list_msg_t *got_msg; + List ret_list = NULL; + + + get_msg.cond = user_q; + + req.msg_type = DBD_REMOVE_USERS; + req.data = &get_msg; + rc = slurm_send_recv_slurmdbd_msg(&req, &resp); + + if (rc != SLURM_SUCCESS) + error("slurmdbd: DBD_REMOVE_USERS failure: %m"); + else if (resp.msg_type != DBD_GOT_LIST) { + error("slurmdbd: response type not DBD_GOT_LIST: %u", + resp.msg_type); + } else { + got_msg = (dbd_list_msg_t *) resp.data; + ret_list = got_msg->my_list; + got_msg->my_list = NULL; + slurmdbd_free_list_msg(got_msg); + } + + return ret_list; +} + +extern List acct_storage_p_remove_coord(void *db_conn, uint32_t uid, + char *acct, acct_user_cond_t *user_q) +{ + slurmdbd_msg_t req; + dbd_acct_coord_msg_t get_msg; + int rc; + slurmdbd_msg_t resp; + dbd_list_msg_t *got_msg; + List ret_list = NULL; + + + get_msg.acct = acct; + get_msg.cond = user_q; + + req.msg_type = DBD_REMOVE_ACCOUNT_COORDS; + req.data = &get_msg; + rc = slurm_send_recv_slurmdbd_msg(&req, &resp); + + if (rc != SLURM_SUCCESS) + error("slurmdbd: DBD_REMOVE_ACCOUNT_COORDS failure: %m"); + else if (resp.msg_type != DBD_GOT_LIST) { + error("slurmdbd: response type not DBD_GOT_LIST: %u", + resp.msg_type); + } else { + got_msg = (dbd_list_msg_t *) resp.data; + ret_list = got_msg->my_list; + got_msg->my_list = NULL; + slurmdbd_free_list_msg(got_msg); + } + + return ret_list; +} + +extern List acct_storage_p_remove_accts(void *db_conn, uint32_t uid, + acct_account_cond_t *acct_q) +{ + slurmdbd_msg_t req; + dbd_cond_msg_t get_msg; + int rc; + slurmdbd_msg_t resp; + dbd_list_msg_t *got_msg; + List ret_list = NULL; + + + get_msg.cond = acct_q; + + req.msg_type = DBD_REMOVE_ACCOUNTS; + req.data = &get_msg; + rc = slurm_send_recv_slurmdbd_msg(&req, &resp); + + if (rc != SLURM_SUCCESS) + error("slurmdbd: DBD_REMOVE_ACCTS failure: %m"); + else if (resp.msg_type != DBD_GOT_LIST) { + error("slurmdbd: response type not DBD_GOT_LIST: %u", + resp.msg_type); + } else { + got_msg = (dbd_list_msg_t *) resp.data; + ret_list = got_msg->my_list; + got_msg->my_list = NULL; + slurmdbd_free_list_msg(got_msg); + } + + return ret_list; +} + +extern List acct_storage_p_remove_clusters(void *db_conn, uint32_t uid, + acct_account_cond_t *cluster_q) +{ + slurmdbd_msg_t req; + dbd_cond_msg_t get_msg; + int rc; + slurmdbd_msg_t resp; + dbd_list_msg_t *got_msg; + List ret_list = NULL; + + + get_msg.cond = cluster_q; + + req.msg_type = DBD_REMOVE_CLUSTERS; + req.data = &get_msg; + rc = slurm_send_recv_slurmdbd_msg(&req, &resp); + + if (rc != SLURM_SUCCESS) + error("slurmdbd: DBD_REMOVE_CLUSTERS failure: %m"); + else if (resp.msg_type != DBD_GOT_LIST) { + error("slurmdbd: response type not DBD_GOT_LIST: %u", + resp.msg_type); + } else { + got_msg = (dbd_list_msg_t *) resp.data; + ret_list = got_msg->my_list; + got_msg->my_list = NULL; + slurmdbd_free_list_msg(got_msg); + } + + return ret_list; +} + +extern List acct_storage_p_remove_associations(void *db_conn, uint32_t uid, + acct_association_cond_t *assoc_q) +{ + slurmdbd_msg_t req; + dbd_cond_msg_t get_msg; + int rc; + slurmdbd_msg_t resp; + dbd_list_msg_t *got_msg; + List ret_list = NULL; + + + get_msg.cond = assoc_q; + + req.msg_type = DBD_REMOVE_ASSOCS; + req.data = &get_msg; + rc = slurm_send_recv_slurmdbd_msg(&req, &resp); + + if (rc != SLURM_SUCCESS) + error("slurmdbd: DBD_REMOVE_ASSOCS failure: %m"); + else if (resp.msg_type != DBD_GOT_LIST) { + error("slurmdbd: response type not DBD_GOT_LIST: %u", + resp.msg_type); + } else { + got_msg = (dbd_list_msg_t *) resp.data; + ret_list = got_msg->my_list; + got_msg->my_list = NULL; + slurmdbd_free_list_msg(got_msg); + } + + return ret_list; +} + +extern List acct_storage_p_get_users(void *db_conn, + acct_user_cond_t *user_q) +{ + slurmdbd_msg_t req, resp; + dbd_cond_msg_t get_msg; + dbd_list_msg_t *got_msg; + int rc; + List ret_list = NULL; + + get_msg.cond = user_q; + + req.msg_type = DBD_GET_USERS; + req.data = &get_msg; + rc = slurm_send_recv_slurmdbd_msg(&req, &resp); + + if (rc != SLURM_SUCCESS) + error("slurmdbd: DBD_GET_USERS failure: %m"); + else if (resp.msg_type != DBD_GOT_USERS) { + error("slurmdbd: response type not DBD_GOT_USERS: %u", + resp.msg_type); + } else { + got_msg = (dbd_list_msg_t *) resp.data; + ret_list = got_msg->my_list; + got_msg->my_list = NULL; + slurmdbd_free_list_msg(got_msg); + } + + return ret_list; +} + +extern List acct_storage_p_get_accts(void *db_conn, + acct_account_cond_t *acct_q) +{ + slurmdbd_msg_t req, resp; + dbd_cond_msg_t get_msg; + dbd_list_msg_t *got_msg; + int rc; + List ret_list = NULL; + + get_msg.cond = acct_q; + + req.msg_type = DBD_GET_ACCOUNTS; + req.data = &get_msg; + rc = slurm_send_recv_slurmdbd_msg(&req, &resp); + + if (rc != SLURM_SUCCESS) + error("slurmdbd: DBD_GET_ACCOUNTS failure: %m"); + else if (resp.msg_type != DBD_GOT_ACCOUNTS) { + error("slurmdbd: response type not DBD_GOT_ACCOUNTS: %u", + resp.msg_type); + } else { + got_msg = (dbd_list_msg_t *) resp.data; + ret_list = got_msg->my_list; + got_msg->my_list = NULL; + slurmdbd_free_list_msg(got_msg); + } + + + return ret_list; +} + +extern List acct_storage_p_get_clusters(void *db_conn, + acct_account_cond_t *cluster_q) +{ + slurmdbd_msg_t req, resp; + dbd_cond_msg_t get_msg; + dbd_list_msg_t *got_msg; + int rc; + List ret_list = NULL; + + get_msg.cond = cluster_q; + + req.msg_type = DBD_GET_CLUSTERS; + req.data = &get_msg; + rc = slurm_send_recv_slurmdbd_msg(&req, &resp); + + if (rc != SLURM_SUCCESS) + error("slurmdbd: DBD_GET_CLUSTERS failure: %m"); + else if (resp.msg_type != DBD_GOT_CLUSTERS) { + error("slurmdbd: response type not DBD_GOT_CLUSTERS: %u", + resp.msg_type); + } else { + got_msg = (dbd_list_msg_t *) resp.data; + ret_list = got_msg->my_list; + got_msg->my_list = NULL; + slurmdbd_free_list_msg(got_msg); + } + + + return ret_list; +} + +extern List acct_storage_p_get_associations(void *db_conn, + acct_association_cond_t *assoc_q) +{ + + slurmdbd_msg_t req, resp; + dbd_cond_msg_t get_msg; + dbd_list_msg_t *got_msg; + int rc; + List ret_list = NULL; + + get_msg.cond = assoc_q; + + req.msg_type = DBD_GET_ASSOCS; + req.data = &get_msg; + rc = slurm_send_recv_slurmdbd_msg(&req, &resp); + + if (rc != SLURM_SUCCESS) + error("slurmdbd: DBD_GET_ASSOCS failure: %m"); + else if (resp.msg_type != DBD_GOT_ASSOCS) { + error("slurmdbd: response type not DBD_GOT_ASSOCS: %u", + resp.msg_type); + } else { + got_msg = (dbd_list_msg_t *) resp.data; + ret_list = got_msg->my_list; + got_msg->my_list = NULL; + slurmdbd_free_list_msg(got_msg); + } + + + return ret_list; +} + +extern int acct_storage_p_get_usage(void *db_conn, + acct_association_rec_t *acct_assoc, + time_t start, time_t end) +{ + slurmdbd_msg_t req, resp; + dbd_usage_msg_t get_msg; + dbd_usage_msg_t *got_msg; + acct_association_rec_t *got_rec; + int rc; + + get_msg.rec = acct_assoc; + get_msg.start = start; + get_msg.end = end; + req.msg_type = DBD_GET_ASSOC_USAGE; + + req.data = &get_msg; + rc = slurm_send_recv_slurmdbd_msg(&req, &resp); + + if (rc != SLURM_SUCCESS) + error("slurmdbd: DBD_GET_ASSOC_USAGE failure: %m"); + else if (resp.msg_type != DBD_GOT_ASSOC_USAGE) { + error("slurmdbd: response type not DBD_GOT_ASSOC_USAGE: %u", + resp.msg_type); + } else { + got_msg = (dbd_usage_msg_t *) resp.data; + got_rec = (acct_association_rec_t *)got_msg->rec; + acct_assoc->accounting_list = got_rec->accounting_list; + got_rec->accounting_list = NULL; + slurmdbd_free_usage_msg(resp.msg_type, got_msg); + } + + + return rc; +} + +extern int acct_storage_p_roll_usage(void *db_conn, + time_t sent_start) +{ + slurmdbd_msg_t req; + dbd_roll_usage_msg_t get_msg; + int rc, resp_code; + + get_msg.start = sent_start; + + req.msg_type = DBD_ROLL_USAGE; + + req.data = &get_msg; + + rc = slurm_send_slurmdbd_recv_rc_msg(&req, &resp_code); + + if(resp_code != SLURM_SUCCESS) + rc = resp_code; + + return rc; +} + +extern int clusteracct_storage_p_node_down(void *db_conn, + char *cluster, + struct node_record *node_ptr, + time_t event_time, char *reason) +{ + slurmdbd_msg_t msg; + dbd_node_state_msg_t req; + uint16_t cpus; + char *my_reason; + + if (slurmctld_conf.fast_schedule) + cpus = node_ptr->config_ptr->cpus; + else + cpus = node_ptr->cpus; + + if (reason) + my_reason = reason; + else + my_reason = node_ptr->reason; + + req.cluster_name = cluster; + req.cpu_count = cpus; + req.hostlist = node_ptr->name; + req.new_state = DBD_NODE_STATE_DOWN; + req.event_time = event_time; + req.reason = my_reason; + msg.msg_type = DBD_NODE_STATE; + msg.data = &req; + + if (slurm_send_slurmdbd_msg(&msg) < 0) + return SLURM_ERROR; + + return SLURM_SUCCESS; +} +extern int clusteracct_storage_p_node_up(void *db_conn, + char *cluster, + struct node_record *node_ptr, + time_t event_time) +{ + slurmdbd_msg_t msg; + dbd_node_state_msg_t req; + + req.cluster_name = cluster; + req.hostlist = node_ptr->name; + req.new_state = DBD_NODE_STATE_UP; + req.event_time = event_time; + req.reason = NULL; + msg.msg_type = DBD_NODE_STATE; + msg.data = &req; + + if (slurm_send_slurmdbd_msg(&msg) < 0) + return SLURM_ERROR; + + return SLURM_SUCCESS; +} + +extern int clusteracct_storage_p_cluster_procs(void *db_conn, + char *cluster, + uint32_t procs, + time_t event_time) +{ + slurmdbd_msg_t msg; + dbd_cluster_procs_msg_t req; + info("sending info for cluster %s", cluster); + req.cluster_name = cluster; + req.proc_count = procs; + req.event_time = event_time; + msg.msg_type = DBD_CLUSTER_PROCS; + msg.data = &req; + + if (slurm_send_slurmdbd_msg(&msg) < 0) + return SLURM_ERROR; + + return SLURM_SUCCESS; +} + +extern int clusteracct_storage_p_register_ctld(char *cluster, + uint16_t port) +{ + slurmdbd_msg_t msg; + dbd_register_ctld_msg_t req; + info("registering slurmctld for cluster %s at port %u", cluster, port); + req.cluster_name = cluster; + req.port = port; + msg.msg_type = DBD_REGISTER_CTLD; + msg.data = &req; + + if (slurm_send_slurmdbd_msg(&msg) < 0) + return SLURM_ERROR; + + return SLURM_SUCCESS; +} + +extern int clusteracct_storage_p_get_usage( + void *db_conn, + acct_cluster_rec_t *cluster_rec, + time_t start, time_t end) +{ + slurmdbd_msg_t req, resp; + dbd_usage_msg_t get_msg; + dbd_usage_msg_t *got_msg; + acct_cluster_rec_t *got_rec; + int rc; + + get_msg.rec = cluster_rec; + get_msg.start = start; + get_msg.end = end; + + req.msg_type = DBD_GET_CLUSTER_USAGE; + + req.data = &get_msg; + rc = slurm_send_recv_slurmdbd_msg(&req, &resp); + + if (rc != SLURM_SUCCESS) + error("slurmdbd: DBD_GET_CLUSTER_USAGE failure: %m"); + else if (resp.msg_type != DBD_GOT_CLUSTER_USAGE) { + error("slurmdbd: response type not DBD_GOT_CLUSTER_USAGE: %u", + resp.msg_type); + } else { + got_msg = (dbd_usage_msg_t *) resp.data; + got_rec = (acct_cluster_rec_t *)got_msg->rec; + cluster_rec->accounting_list = got_rec->accounting_list; + got_rec->accounting_list = NULL; + slurmdbd_free_usage_msg(resp.msg_type, got_msg); + } + + + return rc; +} + +/* + * load into the storage the start of a job + */ +extern int jobacct_storage_p_job_start(void *db_conn, + struct job_record *job_ptr) +{ + slurmdbd_msg_t msg, msg_rc; + dbd_job_start_msg_t req; + dbd_job_start_rc_msg_t *resp; + char *block_id = NULL; + int rc = SLURM_SUCCESS; + + if (!job_ptr->details || !job_ptr->details->submit_time) { + error("jobacct_storage_p_job_start: " + "Not inputing this job, it has no submit time."); + return SLURM_ERROR; + } + + req.alloc_cpus = job_ptr->total_procs; + req.account = job_ptr->account; + req.assoc_id = job_ptr->assoc_id; +#ifdef HAVE_BG + select_g_get_jobinfo(job_ptr->select_jobinfo, + SELECT_DATA_BLOCK_ID, + &block_id); +#endif + req.block_id = block_id; + req.db_index = job_ptr->db_index; + if (job_ptr->details) + req.eligible_time = job_ptr->details->begin_time; + req.gid = job_ptr->group_id; + req.job_id = job_ptr->job_id; + req.job_state = job_ptr->job_state & (~JOB_COMPLETING); + req.name = job_ptr->name; + req.nodes = job_ptr->nodes; + req.partition = job_ptr->partition; + req.req_cpus = job_ptr->num_procs; + req.priority = job_ptr->priority; + req.start_time = job_ptr->start_time; + if (job_ptr->details) + req.submit_time = job_ptr->details->submit_time; + req.uid = job_ptr->user_id; + + msg.msg_type = DBD_JOB_START; + msg.data = &req; + + /* if we already have the db_index don't wait around for it + * again just send the message + */ + if(req.db_index) { + if (slurm_send_slurmdbd_msg(&msg) < 0) { + xfree(block_id); + return SLURM_ERROR; + } + xfree(block_id); + return SLURM_SUCCESS; + } + + /* If we don't have the db_index we need to wait for it to be + * used in the other submissions for this job. + */ + rc = slurm_send_recv_slurmdbd_msg(&msg, &msg_rc); + if (rc != SLURM_SUCCESS) { + if (slurm_send_slurmdbd_msg(&msg) < 0) { + xfree(block_id); + return SLURM_ERROR; + } + } else if (msg_rc.msg_type != DBD_JOB_START_RC) { + error("slurmdbd: response type not DBD_GOT_JOBS: %u", + msg_rc.msg_type); + } else { + resp = (dbd_job_start_rc_msg_t *) msg_rc.data; + job_ptr->db_index = resp->db_index; + slurmdbd_free_job_start_rc_msg(resp); + } + xfree(block_id); + + return rc; +} + +/* + * load into the storage the end of a job + */ +extern int jobacct_storage_p_job_complete(void *db_conn, + struct job_record *job_ptr) +{ + slurmdbd_msg_t msg; + dbd_job_comp_msg_t req; + + if (!job_ptr->db_index + && (!job_ptr->details || !job_ptr->details->submit_time)) { + error("jobacct_storage_p_job_complete: " + "Not inputing this job, it has no submit time."); + return SLURM_ERROR; + } + + req.assoc_id = job_ptr->assoc_id; + req.db_index = job_ptr->db_index; + req.end_time = job_ptr->end_time; + req.exit_code = job_ptr->exit_code; + req.job_id = job_ptr->job_id; + req.job_state = job_ptr->job_state & (~JOB_COMPLETING); + req.nodes = job_ptr->nodes; + req.start_time = job_ptr->start_time; + if (job_ptr->details) + req.submit_time = job_ptr->details->submit_time; + + msg.msg_type = DBD_JOB_COMPLETE; + msg.data = &req; + + if (slurm_send_slurmdbd_msg(&msg) < 0) + return SLURM_ERROR; + + return SLURM_SUCCESS; +} + +/* + * load into the storage the start of a job step + */ +extern int jobacct_storage_p_step_start(void *db_conn, + struct step_record *step_ptr) +{ + uint32_t cpus = 0; + char node_list[BUFFER_SIZE]; + slurmdbd_msg_t msg; + dbd_step_start_msg_t req; + +#ifdef HAVE_BG + char *ionodes = NULL; + + cpus = step_ptr->job_ptr->num_procs; + select_g_get_jobinfo(step_ptr->job_ptr->select_jobinfo, + SELECT_DATA_IONODES, + &ionodes); + if (ionodes) { + snprintf(node_list, BUFFER_SIZE, + "%s[%s]", step_ptr->job_ptr->nodes, ionodes); + xfree(ionodes); + } else { + snprintf(node_list, BUFFER_SIZE, "%s", + step_ptr->job_ptr->nodes); + } + +#else + if (!step_ptr->step_layout || !step_ptr->step_layout->task_cnt) { + cpus = step_ptr->job_ptr->total_procs; + snprintf(node_list, BUFFER_SIZE, "%s", + step_ptr->job_ptr->nodes); + } else { + cpus = step_ptr->step_layout->task_cnt; + snprintf(node_list, BUFFER_SIZE, "%s", + step_ptr->step_layout->node_list); + } +#endif + + if (!step_ptr->job_ptr->db_index + && (!step_ptr->job_ptr->details + || !step_ptr->job_ptr->details->submit_time)) { + error("jobacct_storage_p_step_start: " + "Not inputing this job, it has no submit time."); + return SLURM_ERROR; + } + + req.assoc_id = step_ptr->job_ptr->assoc_id; + req.db_index = step_ptr->job_ptr->db_index; + req.job_id = step_ptr->job_ptr->job_id; + req.name = step_ptr->name; + req.nodes = node_list; + req.start_time = step_ptr->start_time; + if (step_ptr->job_ptr->details) + req.job_submit_time = step_ptr->job_ptr->details->submit_time; + req.step_id = step_ptr->step_id; + req.total_procs = cpus; + + msg.msg_type = DBD_STEP_START; + msg.data = &req; + + if (slurm_send_slurmdbd_msg(&msg) < 0) + return SLURM_ERROR; + + return SLURM_SUCCESS; +} + +/* + * load into the storage the end of a job step + */ +extern int jobacct_storage_p_step_complete(void *db_conn, + struct step_record *step_ptr) +{ + uint32_t cpus = 0; + char node_list[BUFFER_SIZE]; + slurmdbd_msg_t msg; + dbd_step_comp_msg_t req; + +#ifdef HAVE_BG + char *ionodes = NULL; + + cpus = step_ptr->job_ptr->num_procs; + select_g_get_jobinfo(step_ptr->job_ptr->select_jobinfo, + SELECT_DATA_IONODES, + &ionodes); + if (ionodes) { + snprintf(node_list, BUFFER_SIZE, + "%s[%s]", step_ptr->job_ptr->nodes, ionodes); + xfree(ionodes); + } else { + snprintf(node_list, BUFFER_SIZE, "%s", + step_ptr->job_ptr->nodes); + } + +#else + if (!step_ptr->step_layout || !step_ptr->step_layout->task_cnt) { + cpus = step_ptr->job_ptr->total_procs; + snprintf(node_list, BUFFER_SIZE, "%s", step_ptr->job_ptr->nodes); + } else { + cpus = step_ptr->step_layout->task_cnt; + snprintf(node_list, BUFFER_SIZE, "%s", + step_ptr->step_layout->node_list); + } +#endif + + if (!step_ptr->job_ptr->db_index + && (!step_ptr->job_ptr->details + || !step_ptr->job_ptr->details->submit_time)) { + error("jobacct_storage_p_step_complete: " + "Not inputing this job, it has no submit time."); + return SLURM_ERROR; + } + + req.assoc_id = step_ptr->job_ptr->assoc_id; + req.db_index = step_ptr->job_ptr->db_index; + req.end_time = time(NULL); /* called at step completion */ + req.jobacct = step_ptr->jobacct; + req.job_id = step_ptr->job_ptr->job_id; + req.req_uid = step_ptr->job_ptr->requid; + req.start_time = step_ptr->start_time; + if (step_ptr->job_ptr->details) + req.job_submit_time = step_ptr->job_ptr->details->submit_time; + req.step_id = step_ptr->step_id; + req.total_procs = cpus; + + msg.msg_type = DBD_STEP_COMPLETE; + msg.data = &req; + + if (slurm_send_slurmdbd_msg(&msg) < 0) + return SLURM_ERROR; + + return SLURM_SUCCESS; +} + +/* + * load into the storage a suspention of a job + */ +extern int jobacct_storage_p_suspend(void *db_conn, + struct job_record *job_ptr) +{ + slurmdbd_msg_t msg; + dbd_job_suspend_msg_t req; + + req.assoc_id = job_ptr->assoc_id; + req.job_id = job_ptr->job_id; + req.db_index = job_ptr->db_index; + req.job_state = job_ptr->job_state & (~JOB_COMPLETING); + if (job_ptr->details) + req.submit_time = job_ptr->details->submit_time; + req.suspend_time = job_ptr->suspend_time; + msg.msg_type = DBD_JOB_SUSPEND; + msg.data = &req; + + if (slurm_send_slurmdbd_msg(&msg) < 0) + return SLURM_ERROR; + + return SLURM_SUCCESS; +} + +/* + * get info from the storage + * returns List of job_rec_t * + * note List needs to be freed when called + */ +extern List jobacct_storage_p_get_jobs(void *db_conn, + List selected_steps, + List selected_parts, + sacct_parameters_t *params) +{ + slurmdbd_msg_t req, resp; + dbd_get_jobs_msg_t get_msg; + dbd_list_msg_t *got_msg; + int rc; + List job_list = NULL; + struct passwd *pw = NULL; + + get_msg.selected_steps = selected_steps; + get_msg.selected_parts = selected_parts; + get_msg.cluster_name = params->opt_cluster; + get_msg.gid = params->opt_gid; + + if (params->opt_uid >=0 && (pw=getpwuid(params->opt_uid))) + get_msg.user = pw->pw_name; + else + get_msg.user = NULL; + + req.msg_type = DBD_GET_JOBS; + req.data = &get_msg; + rc = slurm_send_recv_slurmdbd_msg(&req, &resp); + + if (rc != SLURM_SUCCESS) + error("slurmdbd: DBD_GET_JOBS failure: %m"); + else if (resp.msg_type != DBD_GOT_JOBS) { + error("slurmdbd: response type not DBD_GOT_JOBS: %u", + resp.msg_type); + } else { + got_msg = (dbd_list_msg_t *) resp.data; + job_list = got_msg->my_list; + got_msg->my_list = NULL; + slurmdbd_free_list_msg(got_msg); + } + + return job_list; +} + +/* + * Expire old info from the storage + * Not applicable for any database + */ +extern void jobacct_storage_p_archive(void *db_conn, + List selected_parts, + void *params) +{ + return; +} + +extern int acct_storage_p_update_shares_used(void *db_conn, + List shares_used) +{ + slurmdbd_msg_t req; + dbd_list_msg_t shares_used_msg; + int rc, resp_code; + + shares_used_msg.my_list = shares_used; + + req.msg_type = DBD_UPDATE_SHARES_USED; + req.data = &shares_used_msg; + rc = slurm_send_slurmdbd_recv_rc_msg(&req, &resp_code); + + if(resp_code != SLURM_SUCCESS) + rc = resp_code; + + return rc; +} + +extern int acct_storage_p_flush_jobs_on_cluster(void *db_conn, char *cluster, + time_t event_time) +{ + slurmdbd_msg_t msg; + dbd_cluster_procs_msg_t req; + + info("Ending any jobs in accounting that were running when controller " + "went down on cluster %s", cluster); + req.cluster_name = cluster; + req.proc_count = 0; + req.event_time = event_time; + msg.msg_type = DBD_FLUSH_JOBS; + msg.data = &req; + + if (slurm_send_slurmdbd_msg(&msg) < 0) + return SLURM_ERROR; + + return SLURM_SUCCESS; +} diff --git a/src/plugins/auth/Makefile.in b/src/plugins/auth/Makefile.in index 97f40a1cc..c4610851e 100644 --- a/src/plugins/auth/Makefile.in +++ b/src/plugins/auth/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -43,6 +43,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -101,6 +103,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -114,10 +117,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -137,7 +143,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -148,6 +157,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -163,6 +174,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -178,6 +190,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -349,8 +362,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -375,8 +388,8 @@ TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -386,13 +399,12 @@ ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/auth/authd/Makefile.in b/src/plugins/auth/authd/Makefile.in index 388e2677c..8c37e3d26 100644 --- a/src/plugins/auth/authd/Makefile.in +++ b/src/plugins/auth/authd/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -44,6 +44,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -80,7 +82,7 @@ auth_authd_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(auth_authd_la_LDFLAGS) $(LDFLAGS) -o $@ @WITH_AUTHD_TRUE@am_auth_authd_la_rpath = -rpath $(pkglibdir) -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -120,6 +122,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -133,10 +136,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -156,7 +162,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -167,6 +176,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -182,6 +193,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -197,6 +209,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -304,8 +317,8 @@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ else :; fi; \ done @@ -313,8 +326,8 @@ uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ done clean-pkglibLTLIBRARIES: @@ -368,8 +381,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -381,8 +394,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -392,13 +405,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/auth/authd/auth_authd.c b/src/plugins/auth/authd/auth_authd.c index afef1f776..06ef97759 100644 --- a/src/plugins/auth/authd/auth_authd.c +++ b/src/plugins/auth/authd/auth_authd.c @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Kevin Tew <tew1@llnl.gov> et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -119,7 +119,7 @@ extern int fini ( void ) } slurm_auth_credential_t * -slurm_auth_create( void *argv[] ) +slurm_auth_create( void *argv[], char *auth_info ) { int ttl; int rc; @@ -178,7 +178,7 @@ slurm_auth_destroy( slurm_auth_credential_t *cred ) } int -slurm_auth_verify( slurm_auth_credential_t *cred, void *argv[] ) +slurm_auth_verify( slurm_auth_credential_t *cred, void *argv[], char *auth_info ) { int rc; time_t now; @@ -206,7 +206,7 @@ slurm_auth_verify( slurm_auth_credential_t *cred, void *argv[] ) uid_t -slurm_auth_get_uid( slurm_auth_credential_t *cred ) +slurm_auth_get_uid( slurm_auth_credential_t *cred, char *auth_info ) { if ( cred == NULL ) { plugin_errno = SLURM_AUTH_BADARG; @@ -217,7 +217,7 @@ slurm_auth_get_uid( slurm_auth_credential_t *cred ) gid_t -slurm_auth_get_gid( slurm_auth_credential_t *cred ) +slurm_auth_get_gid( slurm_auth_credential_t *cred, char *auth_info ) { if ( cred == NULL ) { plugin_errno = SLURM_AUTH_BADARG; @@ -258,7 +258,7 @@ slurm_auth_pack( slurm_auth_credential_t *cred, Buf buf ) slurm_auth_credential_t * slurm_auth_unpack( Buf buf ) { - slurm_auth_credential_t *cred; + slurm_auth_credential_t *cred = NULL; uint16_t sig_size; /* ignored */ uint32_t version, tmpint; char *data; @@ -270,19 +270,13 @@ slurm_auth_unpack( Buf buf ) /* Check the plugin type. */ - if ( unpackmem_ptr( &data, &sig_size, buf ) != SLURM_SUCCESS ) { - plugin_errno = SLURM_AUTH_UNPACK; - return NULL; - } + safe_unpackmem_ptr( &data, &sig_size, buf ); if ( strcmp( data, plugin_type ) != 0 ) { plugin_errno = SLURM_AUTH_MISMATCH; return NULL; } - if ( unpack32( &version, buf ) != SLURM_SUCCESS ) { - plugin_errno = SLURM_AUTH_UNPACK; - return NULL; - } + safe_unpack32( &version, buf ); if( version != plugin_version ) { plugin_errno = SLURM_AUTH_MISMATCH; return NULL; @@ -293,33 +287,19 @@ slurm_auth_unpack( Buf buf ) xmalloc( sizeof( slurm_auth_credential_t ) ); cred->cr_errno = SLURM_SUCCESS; - if ( unpack32( &tmpint, buf ) != SLURM_SUCCESS ) { - plugin_errno = SLURM_AUTH_UNPACK; - goto unpack_error; - } + safe_unpack32( &tmpint, buf ); cred->cred.uid = tmpint; - if ( unpack32( &tmpint, buf ) != SLURM_SUCCESS ) { - plugin_errno = SLURM_AUTH_UNPACK; - goto unpack_error; - } + safe_unpack32( &tmpint, buf ); cred->cred.gid = tmpint; - if ( unpack_time( &cred->cred.valid_from, buf ) != SLURM_SUCCESS ) { - plugin_errno = SLURM_AUTH_UNPACK; - goto unpack_error; - } - if ( unpack_time( &cred->cred.valid_to, buf ) != SLURM_SUCCESS ) { - plugin_errno = SLURM_AUTH_UNPACK; - goto unpack_error; - } - if ( unpackmem_ptr( &data, &sig_size, buf ) != SLURM_SUCCESS ) { - plugin_errno = SLURM_AUTH_UNPACK; - goto unpack_error; - } + safe_unpack_time( &cred->cred.valid_from, buf ); + safe_unpack_time( &cred->cred.valid_to, buf ); + safe_unpackmem_ptr( &data, &sig_size, buf ); memcpy( cred->sig.data, data, sizeof( signature ) ); return cred; unpack_error: + plugin_errno = SLURM_AUTH_UNPACK; xfree( cred ); return NULL; } diff --git a/src/plugins/auth/munge/Makefile.in b/src/plugins/auth/munge/Makefile.in index a891850ef..850d9f69e 100644 --- a/src/plugins/auth/munge/Makefile.in +++ b/src/plugins/auth/munge/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -44,6 +44,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -81,7 +83,7 @@ auth_munge_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(auth_munge_la_LDFLAGS) $(LDFLAGS) -o $@ @WITH_MUNGE_TRUE@am_auth_munge_la_rpath = -rpath $(pkglibdir) -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -121,6 +123,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -134,10 +137,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -157,7 +163,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -168,6 +177,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -183,6 +194,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -198,6 +210,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -305,8 +318,8 @@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ else :; fi; \ done @@ -314,8 +327,8 @@ uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ done clean-pkglibLTLIBRARIES: @@ -369,8 +382,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -382,8 +395,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -393,13 +406,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/auth/munge/auth_munge.c b/src/plugins/auth/munge/auth_munge.c index 7d9e41e40..1a5a7f517 100644 --- a/src/plugins/auth/munge/auth_munge.c +++ b/src/plugins/auth/munge/auth_munge.c @@ -1,11 +1,12 @@ /*****************************************************************************\ * auth_munge.c - SLURM auth implementation via Chris Dunlap's Munge - * $Id: auth_munge.c 10574 2006-12-15 23:38:29Z jette $ + * $Id: auth_munge.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** - * Copyright (C) 2002 The Regents of the University of California. + * Copyright (C) 2002-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -122,7 +123,7 @@ static munge_info_t * cred_info_create(munge_ctx_t ctx); static void cred_info_destroy(munge_info_t *); static void _print_cred_info(munge_info_t *mi); static void _print_cred(munge_ctx_t ctx); -static int _decode_cred(slurm_auth_credential_t *c); +static int _decode_cred(slurm_auth_credential_t *c, char *socket); /* @@ -146,7 +147,7 @@ int init ( void ) * data at this time is implementation-dependent. */ slurm_auth_credential_t * -slurm_auth_create( void *argv[] ) +slurm_auth_create( void *argv[], char *socket ) { int retry = 2; slurm_auth_credential_t *cred = NULL; @@ -159,6 +160,24 @@ slurm_auth_create( void *argv[] ) return NULL; } +#if 0 + /* This logic can be used to determine what socket is used by default. + * A typical name is "/var/run/munge/munge.socket.2" */ +{ + char *old_socket; + if (munge_ctx_get(ctx, MUNGE_OPT_SOCKET, &old_socket) != EMUNGE_SUCCESS) + error("munge_ctx_get failure"); + else + info("Default Munge socket is %s", old_socket); +} +#endif + if (socket && + (munge_ctx_set(ctx, MUNGE_OPT_SOCKET, socket) != EMUNGE_SUCCESS)) { + error("munge_ctx_set failure"); + munge_ctx_destroy(ctx); + return NULL; + } + cred = xmalloc(sizeof(*cred)); cred->verified = false; cred->m_str = NULL; @@ -226,7 +245,7 @@ slurm_auth_destroy( slurm_auth_credential_t *cred ) * Return SLURM_SUCCESS if the credential is in order and valid. */ int -slurm_auth_verify( slurm_auth_credential_t *c, void *argv ) +slurm_auth_verify( slurm_auth_credential_t *c, void *argv, char *socket ) { if (!c) { plugin_errno = SLURM_AUTH_BADARG; @@ -238,7 +257,7 @@ slurm_auth_verify( slurm_auth_credential_t *c, void *argv ) if (c->verified) return SLURM_SUCCESS; - if (_decode_cred(c) < 0) + if (_decode_cred(c, socket) < 0) return SLURM_ERROR; return SLURM_SUCCESS; @@ -249,13 +268,13 @@ slurm_auth_verify( slurm_auth_credential_t *c, void *argv ) * is not assured until slurm_auth_verify() has been called for it. */ uid_t -slurm_auth_get_uid( slurm_auth_credential_t *cred ) +slurm_auth_get_uid( slurm_auth_credential_t *cred, char *socket ) { if (cred == NULL) { plugin_errno = SLURM_AUTH_BADARG; return SLURM_AUTH_NOBODY; } - if ((!cred->verified) && (_decode_cred(cred) < 0)) { + if ((!cred->verified) && (_decode_cred(cred, socket) < 0)) { cred->cr_errno = SLURM_AUTH_INVALID; return SLURM_AUTH_NOBODY; } @@ -270,13 +289,13 @@ slurm_auth_get_uid( slurm_auth_credential_t *cred ) * above for details on correct behavior. */ gid_t -slurm_auth_get_gid( slurm_auth_credential_t *cred ) +slurm_auth_get_gid( slurm_auth_credential_t *cred, char *socket ) { if (cred == NULL) { plugin_errno = SLURM_AUTH_BADARG; return SLURM_AUTH_NOBODY; } - if ((!cred->verified) && (_decode_cred(cred) < 0)) { + if ((!cred->verified) && (_decode_cred(cred, socket) < 0)) { cred->cr_errno = SLURM_AUTH_INVALID; return SLURM_AUTH_NOBODY; } @@ -325,9 +344,9 @@ slurm_auth_pack( slurm_auth_credential_t *cred, Buf buf ) slurm_auth_credential_t * slurm_auth_unpack( Buf buf ) { - slurm_auth_credential_t *cred; + slurm_auth_credential_t *cred = NULL; char *type; - uint16_t size; + uint32_t size; uint32_t version; if ( buf == NULL ) { @@ -338,20 +357,14 @@ slurm_auth_unpack( Buf buf ) /* * Get the authentication type. */ - if ( unpackmem_ptr( &type, &size, buf ) != SLURM_SUCCESS ) { - plugin_errno = SLURM_AUTH_UNPACK; - return NULL; - } + safe_unpackmem_ptr( &type, &size, buf ); if (( type == NULL ) || ( strcmp( type, plugin_type ) != 0 )) { plugin_errno = SLURM_AUTH_MISMATCH; return NULL; } - if ( unpack32( &version, buf ) != SLURM_SUCCESS ) { - plugin_errno = SLURM_AUTH_UNPACK; - return NULL; - } + safe_unpack32( &version, buf ); if ( version != plugin_version ) { plugin_errno = SLURM_AUTH_MISMATCH; return NULL; @@ -367,13 +380,11 @@ slurm_auth_unpack( Buf buf ) xassert(cred->magic = MUNGE_MAGIC); - if (unpackstr_malloc(&cred->m_str, &size, buf) < 0) { - plugin_errno = SLURM_AUTH_UNPACK; - goto unpack_error; - } + safe_unpackstr_malloc(&cred->m_str, &size, buf); return cred; unpack_error: + plugin_errno = SLURM_AUTH_UNPACK; xfree( cred ); return NULL; } @@ -439,7 +450,7 @@ slurm_auth_errstr( int slurm_errno ) * into slurm credential `c' */ static int -_decode_cred(slurm_auth_credential_t *c) +_decode_cred(slurm_auth_credential_t *c, char *socket) { int retry = 2; munge_err_t e; @@ -457,6 +468,12 @@ _decode_cred(slurm_auth_credential_t *c) error("munge_ctx_create failure"); return SLURM_ERROR; } + if (socket && + (munge_ctx_set(ctx, MUNGE_OPT_SOCKET, socket) != EMUNGE_SUCCESS)) { + error("munge_ctx_set failure"); + munge_ctx_destroy(ctx); + return SLURM_ERROR; + } again: if ((e = munge_decode(c->m_str, ctx, &c->buf, &c->len, &c->uid, &c->gid))) { diff --git a/src/plugins/auth/none/Makefile.in b/src/plugins/auth/none/Makefile.in index eeaefacd3..8ef4b2079 100644 --- a/src/plugins/auth/none/Makefile.in +++ b/src/plugins/auth/none/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -44,6 +44,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -79,7 +81,7 @@ auth_none_la_OBJECTS = $(am_auth_none_la_OBJECTS) auth_none_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(auth_none_la_LDFLAGS) $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -119,6 +121,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -132,10 +135,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -155,7 +161,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -166,6 +175,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -181,6 +192,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -196,6 +208,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -299,8 +312,8 @@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ else :; fi; \ done @@ -308,8 +321,8 @@ uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ done clean-pkglibLTLIBRARIES: @@ -363,8 +376,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -376,8 +389,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -387,13 +400,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/auth/none/auth_none.c b/src/plugins/auth/none/auth_none.c index 42a557975..307fcb2bb 100644 --- a/src/plugins/auth/none/auth_none.c +++ b/src/plugins/auth/none/auth_none.c @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Kevin Tew <tew1@llnl.gov> et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -144,9 +144,7 @@ static int plugin_errno = SLURM_SUCCESS; * the general ones. */ enum { - SLURM_AUTH_UNPACK_TYPE = SLURM_AUTH_FIRST_LOCAL_ERROR, - SLURM_AUTH_UNPACK_VERSION, - SLURM_AUTH_UNPACK_CRED + SLURM_AUTH_UNPACK }; /* @@ -174,7 +172,7 @@ extern int fini ( void ) * NULL if it cannot allocate a credential. */ slurm_auth_credential_t * -slurm_auth_create( void *argv[] ) +slurm_auth_create( void *argv[], char *auth_info ) { slurm_auth_credential_t *cred; @@ -207,7 +205,7 @@ slurm_auth_destroy( slurm_auth_credential_t *cred ) * Return SLURM_SUCCESS if the credential is in order and valid. */ int -slurm_auth_verify( slurm_auth_credential_t *cred, void *argv[] ) +slurm_auth_verify( slurm_auth_credential_t *cred, void *argv[], char *auth_info ) { return SLURM_SUCCESS; } @@ -217,7 +215,7 @@ slurm_auth_verify( slurm_auth_credential_t *cred, void *argv[] ) * is not assured until slurm_auth_verify() has been called for it. */ uid_t -slurm_auth_get_uid( slurm_auth_credential_t *cred ) +slurm_auth_get_uid( slurm_auth_credential_t *cred, char *auth_info ) { if ( cred == NULL ) { plugin_errno = SLURM_AUTH_BADARG; @@ -232,7 +230,7 @@ slurm_auth_get_uid( slurm_auth_credential_t *cred ) * above for details on correct behavior. */ gid_t -slurm_auth_get_gid( slurm_auth_credential_t *cred ) +slurm_auth_get_gid( slurm_auth_credential_t *cred, char *auth_info ) { if ( cred == NULL ) { plugin_errno = SLURM_AUTH_BADARG; @@ -276,11 +274,11 @@ slurm_auth_pack( slurm_auth_credential_t *cred, Buf buf ) slurm_auth_credential_t * slurm_auth_unpack( Buf buf ) { - slurm_auth_credential_t *cred; + slurm_auth_credential_t *cred = NULL; char *tmpstr; uint32_t tmpint; uint32_t version; - uint16_t size; + uint32_t size; if ( buf == NULL ) { plugin_errno = SLURM_AUTH_BADARG; @@ -290,19 +288,13 @@ slurm_auth_unpack( Buf buf ) /* * Get the authentication type. */ - if ( unpackmem_ptr( &tmpstr, &size, buf ) != SLURM_SUCCESS ) { - plugin_errno = SLURM_AUTH_UNPACK_TYPE; - return NULL; - } + safe_unpackmem_ptr( &tmpstr, &size, buf ); if (( tmpstr == NULL ) || ( strcmp( tmpstr, plugin_type ) != 0 )) { plugin_errno = SLURM_AUTH_MISMATCH; return NULL; } - if ( unpack32( &version, buf ) != SLURM_SUCCESS ) { - plugin_errno = SLURM_AUTH_UNPACK_VERSION; - return NULL; - } + safe_unpack32( &version, buf ); if ( version != plugin_version ) { plugin_errno = SLURM_AUTH_MISMATCH; return NULL; @@ -321,20 +313,17 @@ slurm_auth_unpack( Buf buf ) * clobbering if they really aren't. This technique ensures a * warning at compile time if the sizes are incompatible. */ - if ( unpack32( &tmpint, buf ) != SLURM_SUCCESS ) { - plugin_errno = SLURM_AUTH_UNPACK_CRED; - xfree( cred ); - return NULL; - } + safe_unpack32( &tmpint, buf ); cred->uid = tmpint; - if ( unpack32( &tmpint, buf ) != SLURM_SUCCESS ) { - plugin_errno = SLURM_AUTH_UNPACK_CRED; - xfree( cred ); - return NULL; - } + safe_unpack32( &tmpint, buf ); cred->gid = tmpint; return cred; + + unpack_error: + plugin_errno = SLURM_AUTH_UNPACK; + xfree( cred ); + return NULL; } /* @@ -386,9 +375,7 @@ slurm_auth_errstr( int slurm_errno ) int err; char *msg; } tbl[] = { - { SLURM_AUTH_UNPACK_TYPE, "cannot unpack authentication type" }, - { SLURM_AUTH_UNPACK_VERSION, "cannot unpack credential version" }, - { SLURM_AUTH_UNPACK_CRED, "cannot unpack credential" }, + { SLURM_AUTH_UNPACK, "cannot unpack credential" }, { 0, NULL } }; diff --git a/src/plugins/checkpoint/Makefile.am b/src/plugins/checkpoint/Makefile.am index 3ce36725a..0527fc065 100644 --- a/src/plugins/checkpoint/Makefile.am +++ b/src/plugins/checkpoint/Makefile.am @@ -1,3 +1,3 @@ # Makefile for checkpoint plugins -SUBDIRS = aix none ompi +SUBDIRS = aix none ompi xlch diff --git a/src/plugins/checkpoint/Makefile.in b/src/plugins/checkpoint/Makefile.in index c195f945d..6a27134d7 100644 --- a/src/plugins/checkpoint/Makefile.in +++ b/src/plugins/checkpoint/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -43,6 +43,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -101,6 +103,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -114,10 +117,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -137,7 +143,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -148,6 +157,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -163,6 +174,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -178,6 +190,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -234,7 +247,7 @@ target_os = @target_os@ target_vendor = @target_vendor@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ -SUBDIRS = aix none ompi +SUBDIRS = aix none ompi xlch all: all-recursive .SUFFIXES: @@ -349,8 +362,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -375,8 +388,8 @@ TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -386,13 +399,12 @@ ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/checkpoint/aix/Makefile.in b/src/plugins/checkpoint/aix/Makefile.in index e7c8bbf0d..73c445d25 100644 --- a/src/plugins/checkpoint/aix/Makefile.in +++ b/src/plugins/checkpoint/aix/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -44,6 +44,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -82,7 +84,7 @@ checkpoint_aix_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(checkpoint_aix_la_LDFLAGS) $(LDFLAGS) -o $@ @HAVE_AIX_TRUE@am_checkpoint_aix_la_rpath = -rpath $(pkglibdir) -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -124,6 +126,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -137,10 +140,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -160,7 +166,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -171,6 +180,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -186,6 +197,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -201,6 +213,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -305,8 +318,8 @@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ else :; fi; \ done @@ -314,8 +327,8 @@ uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ done clean-pkglibLTLIBRARIES: @@ -369,8 +382,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -382,8 +395,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -393,13 +406,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/checkpoint/aix/checkpoint_aix.c b/src/plugins/checkpoint/aix/checkpoint_aix.c index 71229d03f..fef01c9f3 100644 --- a/src/plugins/checkpoint/aix/checkpoint_aix.c +++ b/src/plugins/checkpoint/aix/checkpoint_aix.c @@ -1,11 +1,11 @@ /*****************************************************************************\ * checkpoint_aix.c - AIX slurm checkpoint plugin. - * $Id: checkpoint_aix.c 12088 2007-08-22 18:02:24Z jette $ + * $Id: checkpoint_aix.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -318,7 +318,7 @@ extern int slurm_ckpt_pack_job(check_jobinfo_t jobinfo, Buf buffer) extern int slurm_ckpt_unpack_job(check_jobinfo_t jobinfo, Buf buffer) { - uint16_t uint16_tmp; + uint32_t uint32_tmp; struct check_job_info *check_ptr = (struct check_job_info *)jobinfo; @@ -328,7 +328,7 @@ extern int slurm_ckpt_unpack_job(check_jobinfo_t jobinfo, Buf buffer) safe_unpack16(&check_ptr->wait_time, buffer); safe_unpack32(&check_ptr->error_code, buffer); - safe_unpackstr_xmalloc(&check_ptr->error_msg, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&check_ptr->error_msg, &uint32_tmp, buffer); safe_unpack_time(&check_ptr->time_stamp, buffer); return SLURM_SUCCESS; @@ -433,7 +433,7 @@ static void *_ckpt_agent_thr(void *arg) info("checkpoint timeout for %u.%u", rec->job_id, rec->step_id); _ckpt_signal_step(rec); - list_delete(iter); + list_delete_item(iter); } slurm_mutex_unlock(&ckpt_agent_mutex); list_iterator_destroy(iter); @@ -499,7 +499,7 @@ static void _ckpt_dequeue_timeout(uint32_t job_id, uint32_t step_id, || (start_time && (rec->start_time != start_time))) continue; /* debug("dequeue %u.%u", job_id, step_id); */ - list_delete(iter); + list_delete_item(iter); break; } list_iterator_destroy(iter); @@ -507,3 +507,8 @@ static void _ckpt_dequeue_timeout(uint32_t job_id, uint32_t step_id, slurm_mutex_unlock(&ckpt_agent_mutex); } +extern int slurm_ckpt_task_comp ( struct step_record * step_ptr, uint32_t task_id, + time_t event_time, uint32_t error_code, char *error_msg ) +{ + return SLURM_SUCCESS; +} diff --git a/src/plugins/checkpoint/none/Makefile.in b/src/plugins/checkpoint/none/Makefile.in index 3a4305892..af3c69428 100644 --- a/src/plugins/checkpoint/none/Makefile.in +++ b/src/plugins/checkpoint/none/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -44,6 +44,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -79,7 +81,7 @@ checkpoint_none_la_OBJECTS = $(am_checkpoint_none_la_OBJECTS) checkpoint_none_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(checkpoint_none_la_LDFLAGS) $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -119,6 +121,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -132,10 +135,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -155,7 +161,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -166,6 +175,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -181,6 +192,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -196,6 +208,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -299,8 +312,8 @@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ else :; fi; \ done @@ -308,8 +321,8 @@ uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ done clean-pkglibLTLIBRARIES: @@ -363,8 +376,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -376,8 +389,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -387,13 +400,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/checkpoint/none/checkpoint_none.c b/src/plugins/checkpoint/none/checkpoint_none.c index 17cec0468..78a441782 100644 --- a/src/plugins/checkpoint/none/checkpoint_none.c +++ b/src/plugins/checkpoint/none/checkpoint_none.c @@ -4,7 +4,7 @@ * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -134,3 +134,10 @@ extern int slurm_ckpt_unpack_job(check_jobinfo_t jobinfo, Buf buffer) { return SLURM_SUCCESS; } + +extern int slurm_ckpt_task_comp ( struct step_record * step_ptr, uint32_t task_id, + time_t event_time, uint32_t error_code, char *error_msg ) +{ + return SLURM_SUCCESS; +} + diff --git a/src/plugins/checkpoint/ompi/Makefile.in b/src/plugins/checkpoint/ompi/Makefile.in index a62dcd895..d2a48ab72 100644 --- a/src/plugins/checkpoint/ompi/Makefile.in +++ b/src/plugins/checkpoint/ompi/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -44,6 +44,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -79,7 +81,7 @@ checkpoint_ompi_la_OBJECTS = $(am_checkpoint_ompi_la_OBJECTS) checkpoint_ompi_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(checkpoint_ompi_la_LDFLAGS) $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -119,6 +121,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -132,10 +135,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -155,7 +161,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -166,6 +175,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -181,6 +192,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -196,6 +208,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -299,8 +312,8 @@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ else :; fi; \ done @@ -308,8 +321,8 @@ uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ done clean-pkglibLTLIBRARIES: @@ -363,8 +376,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -376,8 +389,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -387,13 +400,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/checkpoint/ompi/checkpoint_ompi.c b/src/plugins/checkpoint/ompi/checkpoint_ompi.c index 7f344cff8..37a60f63a 100644 --- a/src/plugins/checkpoint/ompi/checkpoint_ompi.c +++ b/src/plugins/checkpoint/ompi/checkpoint_ompi.c @@ -4,7 +4,7 @@ * Copyright (C) 2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -257,7 +257,7 @@ extern int slurm_ckpt_pack_job(check_jobinfo_t jobinfo, Buf buffer) extern int slurm_ckpt_unpack_job(check_jobinfo_t jobinfo, Buf buffer) { - uint16_t uint16_tmp; + uint32_t uint32_tmp; struct check_job_info *check_ptr = (struct check_job_info *)jobinfo; @@ -266,7 +266,7 @@ extern int slurm_ckpt_unpack_job(check_jobinfo_t jobinfo, Buf buffer) safe_unpack16(&check_ptr->wait_time, buffer); safe_unpack32(&check_ptr->error_code, buffer); - safe_unpackstr_xmalloc(&check_ptr->error_msg, &uint16_tmp, buffer); + safe_unpackstr_xmalloc(&check_ptr->error_msg, &uint32_tmp, buffer); safe_unpack_time(&check_ptr->time_stamp, buffer); return SLURM_SUCCESS; @@ -307,3 +307,10 @@ static int _ckpt_step(struct step_record * step_ptr, uint16_t wait, int vacate) job_ptr->job_id, step_ptr->step_id); return SLURM_SUCCESS; } + +extern int slurm_ckpt_task_comp ( struct step_record * step_ptr, uint32_t task_id, + time_t event_time, uint32_t error_code, char *error_msg ) +{ + return SLURM_SUCCESS; +} + diff --git a/src/plugins/checkpoint/xlch/Makefile.am b/src/plugins/checkpoint/xlch/Makefile.am new file mode 100644 index 000000000..5bcc9bae2 --- /dev/null +++ b/src/plugins/checkpoint/xlch/Makefile.am @@ -0,0 +1,24 @@ +# Makefile for checkpoint/xlch plugin + +AUTOMAKE_OPTIONS = foreign + +PLUGIN_FLAGS = -module -avoid-version --export-dynamic + +INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common + +pkglib_LTLIBRARIES = checkpoint_xlch.la +checkpoint_xlch_la_SOURCES = checkpoint_xlch.c config.c +checkpoint_xlch_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) + +convenience_libs = $(top_builddir)/src/api/libslurmhelper.la + +checkpoint_xlch_la_LIBADD = $(convenience_libs) + +config.c: Makefile + @( echo "char *scch_path = \"$(prefix)/sbin/scch\";"\ + ) > config.c + +force: + +$(checkpoint_xlch_LDADD) : force + @cd `dirname $@` && $(MAKE) `basename $@` diff --git a/src/plugins/checkpoint/xlch/Makefile.in b/src/plugins/checkpoint/xlch/Makefile.in new file mode 100644 index 000000000..bece591cd --- /dev/null +++ b/src/plugins/checkpoint/xlch/Makefile.in @@ -0,0 +1,565 @@ +# Makefile.in generated by automake 1.10.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# Makefile for checkpoint/xlch plugin + +VPATH = @srcdir@ +pkgdatadir = $(datadir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +target_triplet = @target@ +subdir = src/plugins/checkpoint/xlch +DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ + $(top_srcdir)/auxdir/slurm.m4 \ + $(top_srcdir)/auxdir/x_ac__system_configuration.m4 \ + $(top_srcdir)/auxdir/x_ac_affinity.m4 \ + $(top_srcdir)/auxdir/x_ac_aix.m4 \ + $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ + $(top_srcdir)/auxdir/x_ac_debug.m4 \ + $(top_srcdir)/auxdir/x_ac_elan.m4 \ + $(top_srcdir)/auxdir/x_ac_federation.m4 \ + $(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \ + $(top_srcdir)/auxdir/x_ac_gtk.m4 \ + $(top_srcdir)/auxdir/x_ac_munge.m4 \ + $(top_srcdir)/auxdir/x_ac_ncurses.m4 \ + $(top_srcdir)/auxdir/x_ac_pam.m4 \ + $(top_srcdir)/auxdir/x_ac_ptrace.m4 \ + $(top_srcdir)/auxdir/x_ac_readline.m4 \ + $(top_srcdir)/auxdir/x_ac_setpgrp.m4 \ + $(top_srcdir)/auxdir/x_ac_setproctitle.m4 \ + $(top_srcdir)/auxdir/x_ac_sgi_job.m4 \ + $(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \ + $(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h +CONFIG_CLEAN_FILES = +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; +am__installdirs = "$(DESTDIR)$(pkglibdir)" +pkglibLTLIBRARIES_INSTALL = $(INSTALL) +LTLIBRARIES = $(pkglib_LTLIBRARIES) +checkpoint_xlch_la_DEPENDENCIES = $(convenience_libs) +am_checkpoint_xlch_la_OBJECTS = checkpoint_xlch.lo config.lo +checkpoint_xlch_la_OBJECTS = $(am_checkpoint_xlch_la_OBJECTS) +checkpoint_xlch_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ + $(checkpoint_xlch_la_LDFLAGS) $(LDFLAGS) -o $@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm +depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp +am__depfiles_maybe = depfiles +COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ + $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ + $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +CCLD = $(CC) +LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ + $(LDFLAGS) -o $@ +SOURCES = $(checkpoint_xlch_la_SOURCES) +DIST_SOURCES = $(checkpoint_xlch_la_SOURCES) +ETAGS = etags +CTAGS = ctags +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AR = @AR@ +AUTHD_CFLAGS = @AUTHD_CFLAGS@ +AUTHD_LIBS = @AUTHD_LIBS@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +BG_INCLUDES = @BG_INCLUDES@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CMD_LDFLAGS = @CMD_LDFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ +ECHO = @ECHO@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +ELAN_LIBS = @ELAN_LIBS@ +EXEEXT = @EXEEXT@ +F77 = @F77@ +FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@ +FFLAGS = @FFLAGS@ +GREP = @GREP@ +GTK2_CFLAGS = @GTK2_CFLAGS@ +GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ +HAVEPKGCONFIG = @HAVEPKGCONFIG@ +HAVE_AIX = @HAVE_AIX@ +HAVE_ELAN = @HAVE_ELAN@ +HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ +HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIB_LDFLAGS = @LIB_LDFLAGS@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MKDIR_P = @MKDIR_P@ +MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ +MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ +MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ +NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ +NUMA_LIBS = @NUMA_LIBS@ +OBJEXT = @OBJEXT@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PAM_LIBS = @PAM_LIBS@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ +PLPA_LIBS = @PLPA_LIBS@ +PROCTRACKDIR = @PROCTRACKDIR@ +PROJECT = @PROJECT@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +RANLIB = @RANLIB@ +READLINE_LIBS = @READLINE_LIBS@ +RELEASE = @RELEASE@ +SED = @SED@ +SEMAPHORE_LIBS = @SEMAPHORE_LIBS@ +SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ +SLURMD_PORT = @SLURMD_PORT@ +SLURM_API_AGE = @SLURM_API_AGE@ +SLURM_API_CURRENT = @SLURM_API_CURRENT@ +SLURM_API_MAJOR = @SLURM_API_MAJOR@ +SLURM_API_REVISION = @SLURM_API_REVISION@ +SLURM_API_VERSION = @SLURM_API_VERSION@ +SLURM_MAJOR = @SLURM_MAJOR@ +SLURM_MICRO = @SLURM_MICRO@ +SLURM_MINOR = @SLURM_MINOR@ +SLURM_VERSION = @SLURM_VERSION@ +SO_LDFLAGS = @SO_LDFLAGS@ +SSL_CPPFLAGS = @SSL_CPPFLAGS@ +SSL_LDFLAGS = @SSL_LDFLAGS@ +SSL_LIBS = @SSL_LIBS@ +STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ +VERSION = @VERSION@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_F77 = @ac_ct_F77@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target = @target@ +target_alias = @target_alias@ +target_cpu = @target_cpu@ +target_os = @target_os@ +target_vendor = @target_vendor@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +AUTOMAKE_OPTIONS = foreign +PLUGIN_FLAGS = -module -avoid-version --export-dynamic +INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common +pkglib_LTLIBRARIES = checkpoint_xlch.la +checkpoint_xlch_la_SOURCES = checkpoint_xlch.c config.c +checkpoint_xlch_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) +convenience_libs = $(top_builddir)/src/api/libslurmhelper.la +checkpoint_xlch_la_LIBADD = $(convenience_libs) +all: all-am + +.SUFFIXES: +.SUFFIXES: .c .lo .o .obj +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ + && exit 0; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/checkpoint/xlch/Makefile'; \ + cd $(top_srcdir) && \ + $(AUTOMAKE) --foreign src/plugins/checkpoint/xlch/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) + @$(NORMAL_INSTALL) + test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" + @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ + if test -f $$p; then \ + f=$(am__strip_dir) \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + else :; fi; \ + done + +uninstall-pkglibLTLIBRARIES: + @$(NORMAL_UNINSTALL) + @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ + p=$(am__strip_dir) \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + done + +clean-pkglibLTLIBRARIES: + -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) + @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ + dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ + test "$$dir" != "$$p" || dir=.; \ + echo "rm -f \"$${dir}/so_locations\""; \ + rm -f "$${dir}/so_locations"; \ + done +checkpoint_xlch.la: $(checkpoint_xlch_la_OBJECTS) $(checkpoint_xlch_la_DEPENDENCIES) + $(checkpoint_xlch_la_LINK) -rpath $(pkglibdir) $(checkpoint_xlch_la_OBJECTS) $(checkpoint_xlch_la_LIBADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + +distclean-compile: + -rm -f *.tab.c + +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/checkpoint_xlch.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/config.Plo@am__quote@ + +.c.o: +@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c $< + +.c.obj: +@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'` + +.c.lo: +@am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $< + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + mkid -fID $$unique +tags: TAGS + +TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + here=`pwd`; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$tags $$unique; \ + fi +ctags: CTAGS +CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + test -z "$(CTAGS_ARGS)$$tags$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$tags $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && cd $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) $$here + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ + fi; \ + cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ + else \ + test -f $(distdir)/$$file \ + || cp -p $$d/$$file $(distdir)/$$file \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(LTLIBRARIES) +installdirs: + for dir in "$(DESTDIR)$(pkglibdir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + `test -z '$(STRIP)' || \ + echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ + mostlyclean-am + +distclean: distclean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-exec-am: install-pkglibLTLIBRARIES + +install-html: install-html-am + +install-info: install-info-am + +install-man: + +install-pdf: install-pdf-am + +install-ps: install-ps-am + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-pkglibLTLIBRARIES + +.MAKE: install-am install-strip + +.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ + clean-libtool clean-pkglibLTLIBRARIES ctags distclean \ + distclean-compile distclean-generic distclean-libtool \ + distclean-tags distdir dvi dvi-am html html-am info info-am \ + install install-am install-data install-data-am install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-pdf install-pdf-am install-pkglibLTLIBRARIES \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-compile \ + mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ + tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES + + +config.c: Makefile + @( echo "char *scch_path = \"$(prefix)/sbin/scch\";"\ + ) > config.c + +force: + +$(checkpoint_xlch_LDADD) : force + @cd `dirname $@` && $(MAKE) `basename $@` +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/src/plugins/checkpoint/xlch/checkpoint_xlch.c b/src/plugins/checkpoint/xlch/checkpoint_xlch.c new file mode 100644 index 000000000..6089588cb --- /dev/null +++ b/src/plugins/checkpoint/xlch/checkpoint_xlch.c @@ -0,0 +1,696 @@ +/*****************************************************************************\ + * checkpoint_xlch.c - XLCH slurm checkpoint plugin. + * $Id: checkpoint_xlch.c 0001 2006-10-31 10:55:11Z hjcao $ + ***************************************************************************** + * Copied from checkpoint_aix.c + * + * Copyright (C) 2004 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Morris Jette <jette1@llnl.gov> + * LLNL-CODE-402394. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#ifdef HAVE_CONFIG_H +# include "config.h" +#endif + +#if HAVE_STDINT_H +# include <stdint.h> +#endif +#if HAVE_INTTYPES_H +# include <inttypes.h> +#endif +#ifdef WITH_PTHREADS +# include <pthread.h> +#endif + +#include <signal.h> +#include <stdio.h> +#include <stdlib.h> +#include <time.h> +#include <unistd.h> +#include <slurm/slurm.h> +#include <slurm/slurm_errno.h> + +#include "src/common/list.h" +#include "src/common/log.h" +#include "src/common/pack.h" +#include "src/common/xassert.h" +#include "src/common/xstring.h" +#include "src/common/xmalloc.h" +#include "src/slurmctld/agent.h" +#include "src/slurmctld/slurmctld.h" + +#define SIGCKPT 20 + +struct check_job_info { + uint16_t disabled; /* counter, checkpointable only if zero */ + uint16_t task_cnt; + uint16_t reply_cnt; + uint16_t wait_time; + time_t time_stamp; /* begin or end checkpoint time */ + uint32_t error_code; + char *error_msg; + uint16_t sig_done; + bitstr_t *replied; /* which task has replied the checkpoint. + XXX: only valid if in operation */ + pthread_mutex_t mutex; +}; + +static void _send_sig(uint32_t job_id, uint32_t step_id, uint16_t signal, + char *nodelist); +static void _send_ckpt(uint32_t job_id, uint32_t step_id, uint16_t signal, + time_t timestamp, char *nodelist); +static int _step_ckpt(struct step_record * step_ptr, uint16_t wait, + uint16_t signal, uint16_t sig_timeout); + +/* checkpoint request timeout processing */ +static pthread_t ckpt_agent_tid = 0; +static pthread_mutex_t ckpt_agent_mutex = PTHREAD_MUTEX_INITIALIZER; +static List ckpt_timeout_list = NULL; +struct ckpt_timeout_info { + uint32_t job_id; + uint32_t step_id; + uint16_t signal; + time_t start_time; + time_t end_time; + char* nodelist; +}; +static void *_ckpt_agent_thr(void *arg); +static void _ckpt_enqueue_timeout(uint32_t job_id, uint32_t step_id, + time_t start_time, uint16_t signal, + uint16_t wait_time, char *nodelist); +static void _ckpt_dequeue_timeout(uint32_t job_id, uint32_t step_id, + time_t start_time); +static void _ckpt_timeout_free(void *rec); +static void _ckpt_signal_step(struct ckpt_timeout_info *rec); + +static int _on_ckpt_complete(struct step_record *step_ptr, uint32_t error_code); + +extern char *scch_path; + +/* + * These variables are required by the generic plugin interface. If they + * are not found in the plugin, the plugin loader will ignore it. + * + * plugin_name - a string giving a human-readable description of the + * plugin. There is no maximum length, but the symbol must refer to + * a valid string. + * + * plugin_type - a string suggesting the type of the plugin or its + * applicability to a particular form of data or method of data handling. + * If the low-level plugin API is used, the contents of this string are + * unimportant and may be anything. SLURM uses the higher-level plugin + * interface which requires this string to be of the form + * + * <application>/<method> + * + * where <application> is a description of the intended application of + * the plugin (e.g., "checkpoint" for SLURM checkpoint) and <method> + * is a description of how this plugin satisfies that application. SLURM will + * only load checkpoint plugins if the plugin_type string has a + * prefix of "checkpoint/". + * + * plugin_version - an unsigned 32-bit integer giving the version number + * of the plugin. If major and minor revisions are desired, the major + * version number may be multiplied by a suitable magnitude constant such + * as 100 or 1000. Various SLURM versions will likely require a certain + * minimum versions for their plugins as the checkpoint API matures. + */ +const char plugin_name[] = "XLCH checkpoint plugin"; +const char plugin_type[] = "checkpoint/xlch"; +const uint32_t plugin_version = 10; + +/* + * init() is called when the plugin is loaded, before any other functions + * are called. Put global initialization here. + */ +extern int init ( void ) +{ + pthread_attr_t attr; + + slurm_attr_init(&attr); + if (pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED)) + error("pthread_attr_setdetachstate: %m"); + if (pthread_create(&ckpt_agent_tid, &attr, _ckpt_agent_thr, NULL)) { + error("pthread_create: %m"); + return SLURM_ERROR; + } + slurm_attr_destroy(&attr); + + return SLURM_SUCCESS; +} + + +extern int fini ( void ) +{ + int i; + + if (!ckpt_agent_tid) + return SLURM_SUCCESS; + + for (i=0; i<4; i++) { + if (pthread_cancel(ckpt_agent_tid)) { + ckpt_agent_tid = 0; + return SLURM_SUCCESS; + } + usleep(1000); + } + error("Could not kill checkpoint pthread"); + return SLURM_ERROR; +} + +/* + * The remainder of this file implements the standard SLURM checkpoint API. + */ + +extern int slurm_ckpt_op ( uint16_t op, uint16_t data, + struct step_record * step_ptr, time_t * event_time, + uint32_t *error_code, char **error_msg ) +{ + int rc = SLURM_SUCCESS; + struct check_job_info *check_ptr; + + xassert(step_ptr); + check_ptr = (struct check_job_info *) step_ptr->check_job; + check_ptr->task_cnt = step_ptr->step_layout->task_cnt; /* set it early */ + xassert(check_ptr); + + slurm_mutex_lock (&check_ptr->mutex); + + switch (op) { + case CHECK_ABLE: + if (check_ptr->disabled) + rc = ESLURM_DISABLED; + else { + if (check_ptr->reply_cnt < check_ptr->task_cnt) + *event_time = check_ptr->time_stamp; + rc = SLURM_SUCCESS; + } + break; + case CHECK_DISABLE: + check_ptr->disabled++; + break; + case CHECK_ENABLE: + check_ptr->disabled--; + break; + case CHECK_CREATE: + if (check_ptr->time_stamp != 0) { + rc = EALREADY; + break; + } + check_ptr->time_stamp = time(NULL); + check_ptr->reply_cnt = 0; + check_ptr->replied = bit_alloc(check_ptr->task_cnt); + check_ptr->error_code = 0; + check_ptr->sig_done = 0; + xfree(check_ptr->error_msg); + rc = _step_ckpt(step_ptr, data, SIGCKPT, SIGKILL); + break; + case CHECK_VACATE: + if (check_ptr->time_stamp != 0) { + rc = EALREADY; + break; + } + check_ptr->time_stamp = time(NULL); + check_ptr->reply_cnt = 0; + check_ptr->replied = bit_alloc(check_ptr->task_cnt); + check_ptr->error_code = 0; + check_ptr->sig_done = SIGTERM; /* exit elegantly */ + xfree(check_ptr->error_msg); + rc = _step_ckpt(step_ptr, data, SIGCKPT, SIGKILL); + break; + case CHECK_RESTART: + rc = ESLURM_NOT_SUPPORTED; + break; + case CHECK_ERROR: + xassert(error_code); + xassert(error_msg); + *error_code = check_ptr->error_code; + xfree(*error_msg); + *error_msg = xstrdup(check_ptr->error_msg); + break; + default: + error("Invalid checkpoint operation: %d", op); + rc = EINVAL; + } + + slurm_mutex_unlock (&check_ptr->mutex); + + return rc; +} + +/* this function will not be called by us */ +extern int slurm_ckpt_comp ( struct step_record * step_ptr, time_t event_time, + uint32_t error_code, char *error_msg ) +{ + error("checkpoint/xlch: slurm_ckpt_comp not implemented"); + return SLURM_FAILURE; +} + +extern int slurm_ckpt_task_comp ( struct step_record * step_ptr, uint32_t task_id, + time_t event_time, uint32_t error_code, char *error_msg ) +{ + struct check_job_info *check_ptr; + int rc = SLURM_SUCCESS; + + xassert(step_ptr); + check_ptr = (struct check_job_info *) step_ptr->check_job; + xassert(check_ptr); + + /* XXX: we need a mutex here, since in proc_req only JOB_READ locked */ + debug3("slurm_ckpt_task_comp: job %u.%hu, task %u, error %d", + step_ptr->job_ptr->job_id, step_ptr->step_id, task_id, + error_code); + + slurm_mutex_lock (&check_ptr->mutex); + + /* + * for now we do not use event_time to identify operation and always + * set it 0 + * TODO: consider send event_time to the task via sigqueue(). + */ + if (event_time && (event_time != check_ptr->time_stamp)) { + rc = ESLURM_ALREADY_DONE; + goto out; + } + + if (!check_ptr->replied || bit_test (check_ptr->replied, task_id)) { + rc = ESLURM_ALREADY_DONE; + goto out; + } + + if ((uint16_t)task_id >= check_ptr->task_cnt) { + error("invalid task_id %u, task_cnt: %hu", task_id, + check_ptr->task_cnt); + rc = EINVAL; + goto out; + } + bit_set (check_ptr->replied, task_id); + check_ptr->reply_cnt ++; + + /* TODO: check the error_code */ + if (error_code > check_ptr->error_code) { + info("slurm_ckpt_task_comp error %u: %s", error_code, error_msg); + check_ptr->error_code = error_code; + xfree(check_ptr->error_msg); + check_ptr->error_msg = xstrdup(error_msg); + } + + /* We need an error-free reply from each task to note completion */ + if (check_ptr->reply_cnt == check_ptr->task_cnt) { /* all tasks done */ + time_t now = time(NULL); + long delay = (long) difftime(now, check_ptr->time_stamp); + info("Checkpoint complete for job %u.%u in %ld seconds", + step_ptr->job_ptr->job_id, step_ptr->step_id, + delay); + /* remove the timeout */ + _ckpt_dequeue_timeout(step_ptr->job_ptr->job_id, + step_ptr->step_id, check_ptr->time_stamp); + /* free the replied bitstr */ + FREE_NULL_BITMAP (check_ptr->replied); + + if (check_ptr->sig_done) { + info ("checkpoint step %u.%hu done, sending signal %hu", + step_ptr->job_ptr->job_id, + step_ptr->step_id, check_ptr->sig_done); + _send_sig(step_ptr->job_ptr->job_id, step_ptr->step_id, + check_ptr->sig_done, + step_ptr->step_layout->node_list); + } + + _on_ckpt_complete(step_ptr, check_ptr->error_code); /* how about we execute a program? */ + + check_ptr->time_stamp = 0; /* this enables checkpoint again */ + } + + out: + slurm_mutex_unlock (&check_ptr->mutex); + return rc; +} + +extern int slurm_ckpt_alloc_job(check_jobinfo_t *jobinfo) +{ + struct check_job_info *check_ptr; + + check_ptr = xmalloc(sizeof(struct check_job_info)); + slurm_mutex_init (&check_ptr->mutex); + *jobinfo = (check_jobinfo_t) check_ptr; + return SLURM_SUCCESS; +} + +extern int slurm_ckpt_free_job(check_jobinfo_t jobinfo) +{ + struct check_job_info *check_ptr = (struct check_job_info *)jobinfo; + if (check_ptr) { + xfree (check_ptr->error_msg); + FREE_NULL_BITMAP (check_ptr->replied); + } + xfree(jobinfo); + return SLURM_SUCCESS; +} + +extern int slurm_ckpt_pack_job(check_jobinfo_t jobinfo, Buf buffer) +{ + struct check_job_info *check_ptr = + (struct check_job_info *)jobinfo; + + pack16(check_ptr->disabled, buffer); + pack16(check_ptr->task_cnt, buffer); + pack16(check_ptr->reply_cnt, buffer); + pack16(check_ptr->wait_time, buffer); + pack_bit_fmt(check_ptr->replied, buffer); + + pack32(check_ptr->error_code, buffer); + packstr(check_ptr->error_msg, buffer); + pack_time(check_ptr->time_stamp, buffer); + + return SLURM_SUCCESS; +} + +extern int slurm_ckpt_unpack_job(check_jobinfo_t jobinfo, Buf buffer) +{ + uint32_t uint32_tmp; + char *task_inx_str; + struct check_job_info *check_ptr = + (struct check_job_info *)jobinfo; + + safe_unpack16(&check_ptr->disabled, buffer); + safe_unpack16(&check_ptr->task_cnt, buffer); + safe_unpack16(&check_ptr->reply_cnt, buffer); + safe_unpack16(&check_ptr->wait_time, buffer); + safe_unpackstr_xmalloc(&task_inx_str, &uint32_tmp, buffer); + if (task_inx_str == NULL) + check_ptr->replied = NULL; + else { + check_ptr->replied = bit_alloc(check_ptr->task_cnt); + bit_unfmt(check_ptr->replied, task_inx_str); + xfree(task_inx_str); + } + + safe_unpack32(&check_ptr->error_code, buffer); + safe_unpackstr_xmalloc(&check_ptr->error_msg, &uint32_tmp, buffer); + safe_unpack_time(&check_ptr->time_stamp, buffer); + + return SLURM_SUCCESS; + + unpack_error: + xfree(check_ptr->error_msg); + return SLURM_ERROR; +} + +/* Send a checkpoint RPC to a specific job step */ +static void _send_ckpt(uint32_t job_id, uint32_t step_id, uint16_t signal, + time_t timestamp, char *nodelist) +{ + agent_arg_t *agent_args; + checkpoint_tasks_msg_t *ckpt_tasks_msg; + + ckpt_tasks_msg = xmalloc(sizeof(checkpoint_tasks_msg_t)); + ckpt_tasks_msg->job_id = job_id; + ckpt_tasks_msg->job_step_id = step_id; + ckpt_tasks_msg->signal = signal; + ckpt_tasks_msg->timestamp = timestamp; + + agent_args = xmalloc(sizeof(agent_arg_t)); + agent_args->msg_type = REQUEST_CHECKPOINT_TASKS; + agent_args->retry = 1; /* keep retrying until all nodes receives the request */ + agent_args->msg_args = ckpt_tasks_msg; + agent_args->hostlist = hostlist_create(nodelist); + agent_args->node_count = hostlist_count(agent_args->hostlist); + + agent_queue_request(agent_args); +} + +/* Send a signal RPC to a list of nodes */ +static void _send_sig(uint32_t job_id, uint32_t step_id, uint16_t signal, + char *nodelist) +{ + agent_arg_t *agent_args; + kill_tasks_msg_t *kill_tasks_msg; + + kill_tasks_msg = xmalloc(sizeof(kill_tasks_msg_t)); + kill_tasks_msg->job_id = job_id; + kill_tasks_msg->job_step_id = step_id; + kill_tasks_msg->signal = signal; + + agent_args = xmalloc(sizeof(agent_arg_t)); + agent_args->msg_type = REQUEST_SIGNAL_TASKS; + agent_args->retry = 1; + agent_args->msg_args = kill_tasks_msg; + agent_args->hostlist = hostlist_create(nodelist); + agent_args->node_count = hostlist_count(agent_args->hostlist); + + agent_queue_request(agent_args); +} + +/* Send checkpoint request to the processes of a job step. + * If the request times out, send sig_timeout. */ +static int _step_ckpt(struct step_record * step_ptr, uint16_t wait, + uint16_t signal, uint16_t sig_timeout) +{ + struct check_job_info *check_ptr; + struct job_record *job_ptr; + + xassert(step_ptr); + check_ptr = (struct check_job_info *) step_ptr->check_job; + xassert(check_ptr); + job_ptr = step_ptr->job_ptr; + xassert(job_ptr); + + if (IS_JOB_FINISHED(job_ptr)) + return ESLURM_ALREADY_DONE; + + if (check_ptr->disabled) + return ESLURM_DISABLED; + + if (!check_ptr->task_cnt) { + error("_step_ckpt: job %u.%u has no tasks to checkpoint", + job_ptr->job_id, + step_ptr->step_id); + return ESLURM_INVALID_NODE_NAME; + } + char* nodelist = xstrdup (step_ptr->step_layout->node_list); + check_ptr->wait_time = wait; /* TODO: how about change wait_time according to task_cnt? */ + + _send_ckpt(step_ptr->job_ptr->job_id, step_ptr->step_id, + signal, check_ptr->time_stamp, nodelist); + + _ckpt_enqueue_timeout(step_ptr->job_ptr->job_id, + step_ptr->step_id, check_ptr->time_stamp, + sig_timeout, check_ptr->wait_time, nodelist); + + info("checkpoint requested for job %u.%u", job_ptr->job_id, + step_ptr->step_id); + xfree (nodelist); + return SLURM_SUCCESS; +} + + +static void _ckpt_signal_step(struct ckpt_timeout_info *rec) +{ + /* debug("signal %u.%u %u", rec->job_id, rec->step_id, rec->signal); */ + _send_sig(rec->job_id, rec->step_id, rec->signal, rec->nodelist); +} + +/* Checkpoint processing pthread + * Never returns, but is cancelled on plugin termiantion */ +static void *_ckpt_agent_thr(void *arg) +{ + ListIterator iter; + struct ckpt_timeout_info *rec; + time_t now; + + while (1) { + sleep(1); + if (!ckpt_timeout_list) + continue; + + now = time(NULL); + iter = list_iterator_create(ckpt_timeout_list); + slurm_mutex_lock(&ckpt_agent_mutex); + /* look for and process any timeouts */ + while ((rec = list_next(iter))) { + if (rec->end_time > now) + continue; + info("checkpoint timeout for %u.%u", + rec->job_id, rec->step_id); + _ckpt_signal_step(rec); + list_delete_item(iter); + } + slurm_mutex_unlock(&ckpt_agent_mutex); + list_iterator_destroy(iter); + } +} + +/* Queue a checkpoint request timeout */ +static void _ckpt_enqueue_timeout(uint32_t job_id, uint32_t step_id, + time_t start_time, uint16_t signal, + uint16_t wait_time, char *nodelist) +{ + struct ckpt_timeout_info *rec; + + if ((wait_time == 0) || (signal == 0)) /* if signal == 0, don't enqueue it */ + return; + + slurm_mutex_lock(&ckpt_agent_mutex); + if (!ckpt_timeout_list) + ckpt_timeout_list = list_create(_ckpt_timeout_free); + rec = xmalloc(sizeof(struct ckpt_timeout_info)); + rec->job_id = job_id; + rec->step_id = step_id; + rec->signal = signal; + rec->start_time = start_time; + rec->end_time = start_time + wait_time; + rec->nodelist = xstrdup(nodelist); + /* debug("enqueue %u.%u %u", job_id, step_id, wait_time); */ + list_enqueue(ckpt_timeout_list, rec); + slurm_mutex_unlock(&ckpt_agent_mutex); +} + +static void _ckpt_timeout_free(void *rec) +{ + struct ckpt_timeout_info *ckpt_rec = (struct ckpt_timeout_info *)rec; + + if (ckpt_rec) { + xfree(ckpt_rec->nodelist); + xfree(ckpt_rec); + } +} + +/* De-queue a checkpoint timeout request. The operation completed */ +static void _ckpt_dequeue_timeout(uint32_t job_id, uint32_t step_id, + time_t start_time) +{ + ListIterator iter; + struct ckpt_timeout_info *rec; + + slurm_mutex_lock(&ckpt_agent_mutex); + if (!ckpt_timeout_list) + goto fini; + iter = list_iterator_create(ckpt_timeout_list); + while ((rec = list_next(iter))) { + if ((rec->job_id != job_id) || (rec->step_id != step_id) + || (start_time && (rec->start_time != start_time))) + continue; + /* debug("dequeue %u.%u", job_id, step_id); */ + list_delete_item(iter); + break; + } + list_iterator_destroy(iter); + fini: + slurm_mutex_unlock(&ckpt_agent_mutex); +} + + +/* a checkpoint completed, process the images files */ +static int _on_ckpt_complete(struct step_record *step_ptr, uint32_t error_code) +{ + int status; + pid_t cpid; + + if (access(scch_path, R_OK | X_OK) < 0) { + info("Access denied for %s: %m", scch_path); + return SLURM_ERROR; + } + + if ((cpid = fork()) < 0) { + error ("_on_ckpt_complete: fork: %m"); + return SLURM_ERROR; + } + + if (cpid == 0) { + /* + * We don't fork and wait the child process because the job + * read lock is held. It could take minutes to delete/move + * the checkpoint image files. So there is a race condition + * of the user requesting another checkpoint before SCCH + * finishes. + */ + /* fork twice to avoid zombies */ + if ((cpid = fork()) < 0) { + error ("_on_ckpt_complete: second fork: %m"); + exit(127); + } + /* grand child execs */ + if (cpid == 0) { + char *args[6]; + char str_job[11]; + char str_step[11]; + char str_err[11]; + + /* + * XXX: if slurmctld is running as root, we must setuid here. + * But what if slurmctld is running as SlurmUser? + * How about we make scch setuid and pass the user/group to it? + */ + if (geteuid() == 0) { /* root */ + if (setgid(step_ptr->job_ptr->group_id) < 0) { + error ("_on_ckpt_complete: failed to " + "setgid: %m"); + exit(127); + } + if (setuid(step_ptr->job_ptr->user_id) < 0) { + error ("_on_ckpt_complete: failed to " + "setuid: %m"); + exit(127); + } + } + snprintf(str_job, sizeof(str_job), "%u", + step_ptr->job_ptr->job_id); + snprintf(str_step, sizeof(str_step), "%hu", + step_ptr->step_id); + snprintf(str_err, sizeof(str_err), "%u", + error_code); + + args[0] = scch_path; + args[1] = str_job; + args[2] = str_step; + args[3] = str_err; + args[4] = step_ptr->ckpt_path; + args[5] = NULL; + + execv(scch_path, args); + error("help! %m"); + exit(127); + } + /* child just exits */ + exit(0); + } + + while(1) { + if (waitpid(cpid, &status, 0) < 0 && errno == EINTR) + continue; + break; + } + + return SLURM_SUCCESS; +} diff --git a/src/plugins/crypto/Makefile.am b/src/plugins/crypto/Makefile.am new file mode 100644 index 000000000..c86ce2759 --- /dev/null +++ b/src/plugins/crypto/Makefile.am @@ -0,0 +1,3 @@ +# Makefile for crypto plugins + +SUBDIRS = munge openssl diff --git a/src/plugins/crypto/Makefile.in b/src/plugins/crypto/Makefile.in new file mode 100644 index 000000000..87447b847 --- /dev/null +++ b/src/plugins/crypto/Makefile.in @@ -0,0 +1,565 @@ +# Makefile.in generated by automake 1.10.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# Makefile for crypto plugins +VPATH = @srcdir@ +pkgdatadir = $(datadir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +target_triplet = @target@ +subdir = src/plugins/crypto +DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ + $(top_srcdir)/auxdir/slurm.m4 \ + $(top_srcdir)/auxdir/x_ac__system_configuration.m4 \ + $(top_srcdir)/auxdir/x_ac_affinity.m4 \ + $(top_srcdir)/auxdir/x_ac_aix.m4 \ + $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ + $(top_srcdir)/auxdir/x_ac_debug.m4 \ + $(top_srcdir)/auxdir/x_ac_elan.m4 \ + $(top_srcdir)/auxdir/x_ac_federation.m4 \ + $(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \ + $(top_srcdir)/auxdir/x_ac_gtk.m4 \ + $(top_srcdir)/auxdir/x_ac_munge.m4 \ + $(top_srcdir)/auxdir/x_ac_ncurses.m4 \ + $(top_srcdir)/auxdir/x_ac_pam.m4 \ + $(top_srcdir)/auxdir/x_ac_ptrace.m4 \ + $(top_srcdir)/auxdir/x_ac_readline.m4 \ + $(top_srcdir)/auxdir/x_ac_setpgrp.m4 \ + $(top_srcdir)/auxdir/x_ac_setproctitle.m4 \ + $(top_srcdir)/auxdir/x_ac_sgi_job.m4 \ + $(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \ + $(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h +CONFIG_CLEAN_FILES = +SOURCES = +DIST_SOURCES = +RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \ + html-recursive info-recursive install-data-recursive \ + install-dvi-recursive install-exec-recursive \ + install-html-recursive install-info-recursive \ + install-pdf-recursive install-ps-recursive install-recursive \ + installcheck-recursive installdirs-recursive pdf-recursive \ + ps-recursive uninstall-recursive +RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ + distclean-recursive maintainer-clean-recursive +ETAGS = etags +CTAGS = ctags +DIST_SUBDIRS = $(SUBDIRS) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AR = @AR@ +AUTHD_CFLAGS = @AUTHD_CFLAGS@ +AUTHD_LIBS = @AUTHD_LIBS@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +BG_INCLUDES = @BG_INCLUDES@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CMD_LDFLAGS = @CMD_LDFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ +ECHO = @ECHO@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +ELAN_LIBS = @ELAN_LIBS@ +EXEEXT = @EXEEXT@ +F77 = @F77@ +FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@ +FFLAGS = @FFLAGS@ +GREP = @GREP@ +GTK2_CFLAGS = @GTK2_CFLAGS@ +GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ +HAVEPKGCONFIG = @HAVEPKGCONFIG@ +HAVE_AIX = @HAVE_AIX@ +HAVE_ELAN = @HAVE_ELAN@ +HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ +HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIB_LDFLAGS = @LIB_LDFLAGS@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MKDIR_P = @MKDIR_P@ +MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ +MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ +MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ +NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ +NUMA_LIBS = @NUMA_LIBS@ +OBJEXT = @OBJEXT@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PAM_LIBS = @PAM_LIBS@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ +PLPA_LIBS = @PLPA_LIBS@ +PROCTRACKDIR = @PROCTRACKDIR@ +PROJECT = @PROJECT@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +RANLIB = @RANLIB@ +READLINE_LIBS = @READLINE_LIBS@ +RELEASE = @RELEASE@ +SED = @SED@ +SEMAPHORE_LIBS = @SEMAPHORE_LIBS@ +SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ +SLURMD_PORT = @SLURMD_PORT@ +SLURM_API_AGE = @SLURM_API_AGE@ +SLURM_API_CURRENT = @SLURM_API_CURRENT@ +SLURM_API_MAJOR = @SLURM_API_MAJOR@ +SLURM_API_REVISION = @SLURM_API_REVISION@ +SLURM_API_VERSION = @SLURM_API_VERSION@ +SLURM_MAJOR = @SLURM_MAJOR@ +SLURM_MICRO = @SLURM_MICRO@ +SLURM_MINOR = @SLURM_MINOR@ +SLURM_VERSION = @SLURM_VERSION@ +SO_LDFLAGS = @SO_LDFLAGS@ +SSL_CPPFLAGS = @SSL_CPPFLAGS@ +SSL_LDFLAGS = @SSL_LDFLAGS@ +SSL_LIBS = @SSL_LIBS@ +STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ +VERSION = @VERSION@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_F77 = @ac_ct_F77@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target = @target@ +target_alias = @target_alias@ +target_cpu = @target_cpu@ +target_os = @target_os@ +target_vendor = @target_vendor@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +SUBDIRS = munge openssl +all: all-recursive + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ + && exit 0; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu src/plugins/crypto/Makefile'; \ + cd $(top_srcdir) && \ + $(AUTOMAKE) --gnu src/plugins/crypto/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +# This directory's subdirectories are mostly independent; you can cd +# into them and run `make' without going through this Makefile. +# To change the values of `make' variables: instead of editing Makefiles, +# (1) if the variable is set in `config.status', edit `config.status' +# (which will cause the Makefiles to be regenerated when you run `make'); +# (2) otherwise, pass the desired values on the `make' command line. +$(RECURSIVE_TARGETS): + @failcom='exit 1'; \ + for f in x $$MAKEFLAGS; do \ + case $$f in \ + *=* | --[!k]*);; \ + *k*) failcom='fail=yes';; \ + esac; \ + done; \ + dot_seen=no; \ + target=`echo $@ | sed s/-recursive//`; \ + list='$(SUBDIRS)'; for subdir in $$list; do \ + echo "Making $$target in $$subdir"; \ + if test "$$subdir" = "."; then \ + dot_seen=yes; \ + local_target="$$target-am"; \ + else \ + local_target="$$target"; \ + fi; \ + (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ + || eval $$failcom; \ + done; \ + if test "$$dot_seen" = "no"; then \ + $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ + fi; test -z "$$fail" + +$(RECURSIVE_CLEAN_TARGETS): + @failcom='exit 1'; \ + for f in x $$MAKEFLAGS; do \ + case $$f in \ + *=* | --[!k]*);; \ + *k*) failcom='fail=yes';; \ + esac; \ + done; \ + dot_seen=no; \ + case "$@" in \ + distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ + *) list='$(SUBDIRS)' ;; \ + esac; \ + rev=''; for subdir in $$list; do \ + if test "$$subdir" = "."; then :; else \ + rev="$$subdir $$rev"; \ + fi; \ + done; \ + rev="$$rev ."; \ + target=`echo $@ | sed s/-recursive//`; \ + for subdir in $$rev; do \ + echo "Making $$target in $$subdir"; \ + if test "$$subdir" = "."; then \ + local_target="$$target-am"; \ + else \ + local_target="$$target"; \ + fi; \ + (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ + || eval $$failcom; \ + done && test -z "$$fail" +tags-recursive: + list='$(SUBDIRS)'; for subdir in $$list; do \ + test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \ + done +ctags-recursive: + list='$(SUBDIRS)'; for subdir in $$list; do \ + test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \ + done + +ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + mkid -fID $$unique +tags: TAGS + +TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + here=`pwd`; \ + if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ + include_option=--etags-include; \ + empty_fix=.; \ + else \ + include_option=--include; \ + empty_fix=; \ + fi; \ + list='$(SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + test ! -f $$subdir/TAGS || \ + tags="$$tags $$include_option=$$here/$$subdir/TAGS"; \ + fi; \ + done; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$tags $$unique; \ + fi +ctags: CTAGS +CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + test -z "$(CTAGS_ARGS)$$tags$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$tags $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && cd $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) $$here + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ + fi; \ + cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ + else \ + test -f $(distdir)/$$file \ + || cp -p $$d/$$file $(distdir)/$$file \ + || exit 1; \ + fi; \ + done + list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + test -d "$(distdir)/$$subdir" \ + || $(MKDIR_P) "$(distdir)/$$subdir" \ + || exit 1; \ + distdir=`$(am__cd) $(distdir) && pwd`; \ + top_distdir=`$(am__cd) $(top_distdir) && pwd`; \ + (cd $$subdir && \ + $(MAKE) $(AM_MAKEFLAGS) \ + top_distdir="$$top_distdir" \ + distdir="$$distdir/$$subdir" \ + am__remove_distdir=: \ + am__skip_length_check=: \ + distdir) \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-recursive +all-am: Makefile +installdirs: installdirs-recursive +installdirs-am: +install: install-recursive +install-exec: install-exec-recursive +install-data: install-data-recursive +uninstall: uninstall-recursive + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-recursive +install-strip: + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + `test -z '$(STRIP)' || \ + echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-recursive + +clean-am: clean-generic clean-libtool mostlyclean-am + +distclean: distclean-recursive + -rm -f Makefile +distclean-am: clean-am distclean-generic distclean-tags + +dvi: dvi-recursive + +dvi-am: + +html: html-recursive + +info: info-recursive + +info-am: + +install-data-am: + +install-dvi: install-dvi-recursive + +install-exec-am: + +install-html: install-html-recursive + +install-info: install-info-recursive + +install-man: + +install-pdf: install-pdf-recursive + +install-ps: install-ps-recursive + +installcheck-am: + +maintainer-clean: maintainer-clean-recursive + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-recursive + +mostlyclean-am: mostlyclean-generic mostlyclean-libtool + +pdf: pdf-recursive + +pdf-am: + +ps: ps-recursive + +ps-am: + +uninstall-am: + +.MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) install-am \ + install-strip + +.PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \ + all all-am check check-am clean clean-generic clean-libtool \ + ctags ctags-recursive distclean distclean-generic \ + distclean-libtool distclean-tags distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs installdirs-am maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-generic \ + mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \ + uninstall uninstall-am + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/src/plugins/crypto/munge/Makefile.am b/src/plugins/crypto/munge/Makefile.am new file mode 100644 index 000000000..319f41728 --- /dev/null +++ b/src/plugins/crypto/munge/Makefile.am @@ -0,0 +1,19 @@ +# Makefile for crypto/munge plugin + +AUTOMAKE_OPTIONS = foreign + +PLUGIN_FLAGS = -module -avoid-version --export-dynamic + +INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common $(MUNGE_CPPFLAGS) + +# Add your plugin to this line, following the naming conventions. +if WITH_MUNGE +MUNGE = crypto_munge.la +endif + +pkglib_LTLIBRARIES = $(MUNGE) + +# Munge crypto plugin +crypto_munge_la_SOURCES = crypto_munge.c +crypto_munge_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) $(MUNGE_LDFLAGS) +crypto_munge_la_LIBADD = $(MUNGE_LIBS) diff --git a/src/plugins/crypto/munge/Makefile.in b/src/plugins/crypto/munge/Makefile.in new file mode 100644 index 000000000..16c4f449f --- /dev/null +++ b/src/plugins/crypto/munge/Makefile.in @@ -0,0 +1,561 @@ +# Makefile.in generated by automake 1.10.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# Makefile for crypto/munge plugin + +VPATH = @srcdir@ +pkgdatadir = $(datadir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +target_triplet = @target@ +subdir = src/plugins/crypto/munge +DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ + $(top_srcdir)/auxdir/slurm.m4 \ + $(top_srcdir)/auxdir/x_ac__system_configuration.m4 \ + $(top_srcdir)/auxdir/x_ac_affinity.m4 \ + $(top_srcdir)/auxdir/x_ac_aix.m4 \ + $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ + $(top_srcdir)/auxdir/x_ac_debug.m4 \ + $(top_srcdir)/auxdir/x_ac_elan.m4 \ + $(top_srcdir)/auxdir/x_ac_federation.m4 \ + $(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \ + $(top_srcdir)/auxdir/x_ac_gtk.m4 \ + $(top_srcdir)/auxdir/x_ac_munge.m4 \ + $(top_srcdir)/auxdir/x_ac_ncurses.m4 \ + $(top_srcdir)/auxdir/x_ac_pam.m4 \ + $(top_srcdir)/auxdir/x_ac_ptrace.m4 \ + $(top_srcdir)/auxdir/x_ac_readline.m4 \ + $(top_srcdir)/auxdir/x_ac_setpgrp.m4 \ + $(top_srcdir)/auxdir/x_ac_setproctitle.m4 \ + $(top_srcdir)/auxdir/x_ac_sgi_job.m4 \ + $(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \ + $(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h +CONFIG_CLEAN_FILES = +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; +am__installdirs = "$(DESTDIR)$(pkglibdir)" +pkglibLTLIBRARIES_INSTALL = $(INSTALL) +LTLIBRARIES = $(pkglib_LTLIBRARIES) +am__DEPENDENCIES_1 = +crypto_munge_la_DEPENDENCIES = $(am__DEPENDENCIES_1) +am_crypto_munge_la_OBJECTS = crypto_munge.lo +crypto_munge_la_OBJECTS = $(am_crypto_munge_la_OBJECTS) +crypto_munge_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ + $(crypto_munge_la_LDFLAGS) $(LDFLAGS) -o $@ +@WITH_MUNGE_TRUE@am_crypto_munge_la_rpath = -rpath $(pkglibdir) +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm +depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp +am__depfiles_maybe = depfiles +COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ + $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ + $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +CCLD = $(CC) +LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ + $(LDFLAGS) -o $@ +SOURCES = $(crypto_munge_la_SOURCES) +DIST_SOURCES = $(crypto_munge_la_SOURCES) +ETAGS = etags +CTAGS = ctags +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AR = @AR@ +AUTHD_CFLAGS = @AUTHD_CFLAGS@ +AUTHD_LIBS = @AUTHD_LIBS@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +BG_INCLUDES = @BG_INCLUDES@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CMD_LDFLAGS = @CMD_LDFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ +ECHO = @ECHO@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +ELAN_LIBS = @ELAN_LIBS@ +EXEEXT = @EXEEXT@ +F77 = @F77@ +FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@ +FFLAGS = @FFLAGS@ +GREP = @GREP@ +GTK2_CFLAGS = @GTK2_CFLAGS@ +GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ +HAVEPKGCONFIG = @HAVEPKGCONFIG@ +HAVE_AIX = @HAVE_AIX@ +HAVE_ELAN = @HAVE_ELAN@ +HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ +HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIB_LDFLAGS = @LIB_LDFLAGS@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MKDIR_P = @MKDIR_P@ +MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ +MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ +MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ +NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ +NUMA_LIBS = @NUMA_LIBS@ +OBJEXT = @OBJEXT@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PAM_LIBS = @PAM_LIBS@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ +PLPA_LIBS = @PLPA_LIBS@ +PROCTRACKDIR = @PROCTRACKDIR@ +PROJECT = @PROJECT@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +RANLIB = @RANLIB@ +READLINE_LIBS = @READLINE_LIBS@ +RELEASE = @RELEASE@ +SED = @SED@ +SEMAPHORE_LIBS = @SEMAPHORE_LIBS@ +SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ +SLURMD_PORT = @SLURMD_PORT@ +SLURM_API_AGE = @SLURM_API_AGE@ +SLURM_API_CURRENT = @SLURM_API_CURRENT@ +SLURM_API_MAJOR = @SLURM_API_MAJOR@ +SLURM_API_REVISION = @SLURM_API_REVISION@ +SLURM_API_VERSION = @SLURM_API_VERSION@ +SLURM_MAJOR = @SLURM_MAJOR@ +SLURM_MICRO = @SLURM_MICRO@ +SLURM_MINOR = @SLURM_MINOR@ +SLURM_VERSION = @SLURM_VERSION@ +SO_LDFLAGS = @SO_LDFLAGS@ +SSL_CPPFLAGS = @SSL_CPPFLAGS@ +SSL_LDFLAGS = @SSL_LDFLAGS@ +SSL_LIBS = @SSL_LIBS@ +STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ +VERSION = @VERSION@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_F77 = @ac_ct_F77@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target = @target@ +target_alias = @target_alias@ +target_cpu = @target_cpu@ +target_os = @target_os@ +target_vendor = @target_vendor@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +AUTOMAKE_OPTIONS = foreign +PLUGIN_FLAGS = -module -avoid-version --export-dynamic +INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common $(MUNGE_CPPFLAGS) + +# Add your plugin to this line, following the naming conventions. +@WITH_MUNGE_TRUE@MUNGE = crypto_munge.la +pkglib_LTLIBRARIES = $(MUNGE) + +# Munge crypto plugin +crypto_munge_la_SOURCES = crypto_munge.c +crypto_munge_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) $(MUNGE_LDFLAGS) +crypto_munge_la_LIBADD = $(MUNGE_LIBS) +all: all-am + +.SUFFIXES: +.SUFFIXES: .c .lo .o .obj +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ + && exit 0; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/crypto/munge/Makefile'; \ + cd $(top_srcdir) && \ + $(AUTOMAKE) --foreign src/plugins/crypto/munge/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) + @$(NORMAL_INSTALL) + test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" + @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ + if test -f $$p; then \ + f=$(am__strip_dir) \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + else :; fi; \ + done + +uninstall-pkglibLTLIBRARIES: + @$(NORMAL_UNINSTALL) + @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ + p=$(am__strip_dir) \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + done + +clean-pkglibLTLIBRARIES: + -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) + @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ + dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ + test "$$dir" != "$$p" || dir=.; \ + echo "rm -f \"$${dir}/so_locations\""; \ + rm -f "$${dir}/so_locations"; \ + done +crypto_munge.la: $(crypto_munge_la_OBJECTS) $(crypto_munge_la_DEPENDENCIES) + $(crypto_munge_la_LINK) $(am_crypto_munge_la_rpath) $(crypto_munge_la_OBJECTS) $(crypto_munge_la_LIBADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + +distclean-compile: + -rm -f *.tab.c + +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/crypto_munge.Plo@am__quote@ + +.c.o: +@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c $< + +.c.obj: +@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'` + +.c.lo: +@am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $< + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + mkid -fID $$unique +tags: TAGS + +TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + here=`pwd`; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$tags $$unique; \ + fi +ctags: CTAGS +CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + test -z "$(CTAGS_ARGS)$$tags$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$tags $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && cd $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) $$here + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ + fi; \ + cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ + else \ + test -f $(distdir)/$$file \ + || cp -p $$d/$$file $(distdir)/$$file \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(LTLIBRARIES) +installdirs: + for dir in "$(DESTDIR)$(pkglibdir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + `test -z '$(STRIP)' || \ + echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ + mostlyclean-am + +distclean: distclean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-exec-am: install-pkglibLTLIBRARIES + +install-html: install-html-am + +install-info: install-info-am + +install-man: + +install-pdf: install-pdf-am + +install-ps: install-ps-am + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-pkglibLTLIBRARIES + +.MAKE: install-am install-strip + +.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ + clean-libtool clean-pkglibLTLIBRARIES ctags distclean \ + distclean-compile distclean-generic distclean-libtool \ + distclean-tags distdir dvi dvi-am html html-am info info-am \ + install install-am install-data install-data-am install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-pdf install-pdf-am install-pkglibLTLIBRARIES \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-compile \ + mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ + tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/src/plugins/crypto/munge/crypto_munge.c b/src/plugins/crypto/munge/crypto_munge.c new file mode 100644 index 000000000..99d4c28d2 --- /dev/null +++ b/src/plugins/crypto/munge/crypto_munge.c @@ -0,0 +1,211 @@ +/*****************************************************************************\ + * crypto_munge.c - Munge based cryptographic signature plugin + ***************************************************************************** + * Copyright (C) 2007 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Mark A. Grondona <mgrondona@llnl.gov>. + * LLNL-CODE-402394. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#if HAVE_CONFIG_H +# include "config.h" +# if HAVE_INTTYPES_H +# include <inttypes.h> +# else /* ! HAVE_INTTYPES_H */ +# if HAVE_STDINT_H +# include <stdint.h> +# endif +# endif /* HAVE_INTTYPES_H */ +#else /* ! HAVE_CONFIG_H */ +# include <stdint.h> +#endif /* HAVE_CONFIG_H */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> + +#define GPL_LICENSED 1 +#include <munge.h> + +#include <slurm/slurm_errno.h> +#include "src/common/slurm_protocol_api.h" +#include "src/common/xassert.h" +#include "src/common/xmalloc.h" +#include "src/common/xstring.h" + +/* + * These variables are required by the generic plugin interface. If they + * are not found in the plugin, the plugin loader will ignore it. + * + * plugin_name - a string giving a human-readable description of the + * plugin. There is no maximum length, but the symbol must refer to + * a valid string. + * + * plugin_type - a string suggesting the type of the plugin or its + * applicability to a particular form of data or method of data handling. + * If the low-level plugin API is used, the contents of this string are + * unimportant and may be anything. SLURM uses the higher-level plugin + * interface which requires this string to be of the form + * + * <application>/<method> + * + * where <application> is a description of the intended application of + * the plugin (e.g., "auth" for SLURM authentication) and <method> is a + * description of how this plugin satisfies that application. SLURM will + * only load authentication plugins if the plugin_type string has a prefix + * of "auth/". + * + * plugin_version - an unsigned 32-bit integer giving the version number + * of the plugin. If major and minor revisions are desired, the major + * version number may be multiplied by a suitable magnitude constant such + * as 100 or 1000. Various SLURM versions will likely require a certain + * minimum versions for their plugins as the authentication API matures. + */ +const char plugin_name[] = "Munge cryptographic signature plugin"; +const char plugin_type[] = "crypto/munge"; +const uint32_t plugin_version = 90; + +static munge_err_t munge_err; + +/* + * init() is called when the plugin is loaded, before any other functions + * are called. Put global initialization here. + */ +extern int init ( void ) +{ + verbose("%s loaded", plugin_name); + return SLURM_SUCCESS; +} + +/* + * fini() is called when the plugin is unloaded, + * free any global memory allocations here to avoid memory leaks. + */ +extern int fini ( void ) +{ + verbose("%s unloaded", plugin_name); + return SLURM_SUCCESS; +} + +extern void +crypto_destroy_key(void *key) +{ + munge_ctx_destroy((munge_ctx_t) key); + return; +} + +extern void * +crypto_read_private_key(const char *path) +{ + return (void *) munge_ctx_create(); +} + + +extern void * +crypto_read_public_key(const char *path) +{ + return (void *) munge_ctx_create(); +} + +extern char * +crypto_str_error(void) +{ + return (char *) munge_strerror(munge_err); +} + +/* NOTE: Caller must xfree the signature returned by sig_pp */ +extern int +crypto_sign(void * key, char *buffer, int buf_size, char **sig_pp, + unsigned int *sig_size_p) +{ + char *cred; + + munge_err = munge_encode(&cred, (munge_ctx_t) key, + buffer, buf_size); + + if (munge_err != EMUNGE_SUCCESS) + return SLURM_ERROR; + + *sig_size_p = strlen(cred) + 1; + *sig_pp = xstrdup(cred); + free(cred); + return SLURM_SUCCESS; +} + +extern int +crypto_verify_sign(void * key, char *buffer, unsigned int buf_size, + char *signature, unsigned int sig_size) +{ + static uid_t slurm_user = 0; + static int got_slurm_user = 0; + uid_t uid; + gid_t gid; + void *buf_out; + int buf_out_size; + + munge_err = munge_decode(signature, (munge_ctx_t) key, + &buf_out, &buf_out_size, + &uid, &gid); + + if (munge_err != EMUNGE_SUCCESS) + return SLURM_ERROR; + + if (!got_slurm_user) { + slurm_user = slurm_get_slurm_user_id(); + got_slurm_user = 1; + } + + if ((uid != slurm_user) && (uid != 0)) { + error("crypto/munge: bad user id (%d != %d)", + (int) slurm_user, (int) uid); + munge_err = EMUNGE_CRED_UNAUTHORIZED; + free(buf_out); + return SLURM_ERROR; + } + + if (buf_size != buf_out_size) { + error("crypto/munge: buf_size bad"); + munge_err = EMUNGE_CRED_INVALID; + free(buf_out); + return SLURM_ERROR; + } + + if (memcmp(buffer, buf_out, buf_size)) { + error("crypto/munge: buffers different"); + munge_err = EMUNGE_CRED_INVALID; + free(buf_out); + return SLURM_ERROR; + } + + free(buf_out); + return SLURM_SUCCESS; +} diff --git a/src/plugins/crypto/openssl/Makefile.am b/src/plugins/crypto/openssl/Makefile.am new file mode 100644 index 000000000..b55b6a344 --- /dev/null +++ b/src/plugins/crypto/openssl/Makefile.am @@ -0,0 +1,25 @@ +# Makefile for crypto/openssl plugin + +AUTOMAKE_OPTIONS = foreign + +if HAVE_OPENSSL +openssl_lib = crypto_openssl.la +else +openssl_lib = +endif + +PLUGIN_FLAGS = -module -avoid-version --export-dynamic + +INCLUDES = -I$(top_srcdir) $(SSL_CPPFLAGS) -I$(top_srcdir)/src/common + +pkglib_LTLIBRARIES = $(openssl_lib) + +if HAVE_OPENSSL +crypto_openssl_la_SOURCES = crypto_openssl.c + +crypto_openssl_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) $(SSL_LDFLAGS) +crypto_openssl_la_LIBADD = $(SSL_LIBS) + +else +EXTRA_crypto_openssl_la_SOURCES = crypto_openssl.c +endif diff --git a/src/plugins/crypto/openssl/Makefile.in b/src/plugins/crypto/openssl/Makefile.in new file mode 100644 index 000000000..6c06edf1c --- /dev/null +++ b/src/plugins/crypto/openssl/Makefile.in @@ -0,0 +1,564 @@ +# Makefile.in generated by automake 1.10.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# Makefile for crypto/openssl plugin + +VPATH = @srcdir@ +pkgdatadir = $(datadir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +target_triplet = @target@ +subdir = src/plugins/crypto/openssl +DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ + $(top_srcdir)/auxdir/slurm.m4 \ + $(top_srcdir)/auxdir/x_ac__system_configuration.m4 \ + $(top_srcdir)/auxdir/x_ac_affinity.m4 \ + $(top_srcdir)/auxdir/x_ac_aix.m4 \ + $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ + $(top_srcdir)/auxdir/x_ac_debug.m4 \ + $(top_srcdir)/auxdir/x_ac_elan.m4 \ + $(top_srcdir)/auxdir/x_ac_federation.m4 \ + $(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \ + $(top_srcdir)/auxdir/x_ac_gtk.m4 \ + $(top_srcdir)/auxdir/x_ac_munge.m4 \ + $(top_srcdir)/auxdir/x_ac_ncurses.m4 \ + $(top_srcdir)/auxdir/x_ac_pam.m4 \ + $(top_srcdir)/auxdir/x_ac_ptrace.m4 \ + $(top_srcdir)/auxdir/x_ac_readline.m4 \ + $(top_srcdir)/auxdir/x_ac_setpgrp.m4 \ + $(top_srcdir)/auxdir/x_ac_setproctitle.m4 \ + $(top_srcdir)/auxdir/x_ac_sgi_job.m4 \ + $(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \ + $(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h +CONFIG_CLEAN_FILES = +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; +am__installdirs = "$(DESTDIR)$(pkglibdir)" +pkglibLTLIBRARIES_INSTALL = $(INSTALL) +LTLIBRARIES = $(pkglib_LTLIBRARIES) +am__DEPENDENCIES_1 = +@HAVE_OPENSSL_TRUE@crypto_openssl_la_DEPENDENCIES = \ +@HAVE_OPENSSL_TRUE@ $(am__DEPENDENCIES_1) +am__crypto_openssl_la_SOURCES_DIST = crypto_openssl.c +@HAVE_OPENSSL_TRUE@am_crypto_openssl_la_OBJECTS = crypto_openssl.lo +am__EXTRA_crypto_openssl_la_SOURCES_DIST = crypto_openssl.c +crypto_openssl_la_OBJECTS = $(am_crypto_openssl_la_OBJECTS) +crypto_openssl_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ + $(crypto_openssl_la_LDFLAGS) $(LDFLAGS) -o $@ +@HAVE_OPENSSL_TRUE@am_crypto_openssl_la_rpath = -rpath $(pkglibdir) +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm +depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp +am__depfiles_maybe = depfiles +COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ + $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ + $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +CCLD = $(CC) +LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ + $(LDFLAGS) -o $@ +SOURCES = $(crypto_openssl_la_SOURCES) \ + $(EXTRA_crypto_openssl_la_SOURCES) +DIST_SOURCES = $(am__crypto_openssl_la_SOURCES_DIST) \ + $(am__EXTRA_crypto_openssl_la_SOURCES_DIST) +ETAGS = etags +CTAGS = ctags +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AR = @AR@ +AUTHD_CFLAGS = @AUTHD_CFLAGS@ +AUTHD_LIBS = @AUTHD_LIBS@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +BG_INCLUDES = @BG_INCLUDES@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CMD_LDFLAGS = @CMD_LDFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ +ECHO = @ECHO@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +ELAN_LIBS = @ELAN_LIBS@ +EXEEXT = @EXEEXT@ +F77 = @F77@ +FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@ +FFLAGS = @FFLAGS@ +GREP = @GREP@ +GTK2_CFLAGS = @GTK2_CFLAGS@ +GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ +HAVEPKGCONFIG = @HAVEPKGCONFIG@ +HAVE_AIX = @HAVE_AIX@ +HAVE_ELAN = @HAVE_ELAN@ +HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ +HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIB_LDFLAGS = @LIB_LDFLAGS@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MKDIR_P = @MKDIR_P@ +MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ +MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ +MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ +NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ +NUMA_LIBS = @NUMA_LIBS@ +OBJEXT = @OBJEXT@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PAM_LIBS = @PAM_LIBS@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ +PLPA_LIBS = @PLPA_LIBS@ +PROCTRACKDIR = @PROCTRACKDIR@ +PROJECT = @PROJECT@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +RANLIB = @RANLIB@ +READLINE_LIBS = @READLINE_LIBS@ +RELEASE = @RELEASE@ +SED = @SED@ +SEMAPHORE_LIBS = @SEMAPHORE_LIBS@ +SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ +SLURMD_PORT = @SLURMD_PORT@ +SLURM_API_AGE = @SLURM_API_AGE@ +SLURM_API_CURRENT = @SLURM_API_CURRENT@ +SLURM_API_MAJOR = @SLURM_API_MAJOR@ +SLURM_API_REVISION = @SLURM_API_REVISION@ +SLURM_API_VERSION = @SLURM_API_VERSION@ +SLURM_MAJOR = @SLURM_MAJOR@ +SLURM_MICRO = @SLURM_MICRO@ +SLURM_MINOR = @SLURM_MINOR@ +SLURM_VERSION = @SLURM_VERSION@ +SO_LDFLAGS = @SO_LDFLAGS@ +SSL_CPPFLAGS = @SSL_CPPFLAGS@ +SSL_LDFLAGS = @SSL_LDFLAGS@ +SSL_LIBS = @SSL_LIBS@ +STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ +VERSION = @VERSION@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_F77 = @ac_ct_F77@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target = @target@ +target_alias = @target_alias@ +target_cpu = @target_cpu@ +target_os = @target_os@ +target_vendor = @target_vendor@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +AUTOMAKE_OPTIONS = foreign +@HAVE_OPENSSL_FALSE@openssl_lib = +@HAVE_OPENSSL_TRUE@openssl_lib = crypto_openssl.la +PLUGIN_FLAGS = -module -avoid-version --export-dynamic +INCLUDES = -I$(top_srcdir) $(SSL_CPPFLAGS) -I$(top_srcdir)/src/common +pkglib_LTLIBRARIES = $(openssl_lib) +@HAVE_OPENSSL_TRUE@crypto_openssl_la_SOURCES = crypto_openssl.c +@HAVE_OPENSSL_TRUE@crypto_openssl_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) $(SSL_LDFLAGS) +@HAVE_OPENSSL_TRUE@crypto_openssl_la_LIBADD = $(SSL_LIBS) +@HAVE_OPENSSL_FALSE@EXTRA_crypto_openssl_la_SOURCES = crypto_openssl.c +all: all-am + +.SUFFIXES: +.SUFFIXES: .c .lo .o .obj +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ + && exit 0; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/crypto/openssl/Makefile'; \ + cd $(top_srcdir) && \ + $(AUTOMAKE) --foreign src/plugins/crypto/openssl/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) + @$(NORMAL_INSTALL) + test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" + @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ + if test -f $$p; then \ + f=$(am__strip_dir) \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + else :; fi; \ + done + +uninstall-pkglibLTLIBRARIES: + @$(NORMAL_UNINSTALL) + @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ + p=$(am__strip_dir) \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + done + +clean-pkglibLTLIBRARIES: + -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) + @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ + dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ + test "$$dir" != "$$p" || dir=.; \ + echo "rm -f \"$${dir}/so_locations\""; \ + rm -f "$${dir}/so_locations"; \ + done +crypto_openssl.la: $(crypto_openssl_la_OBJECTS) $(crypto_openssl_la_DEPENDENCIES) + $(crypto_openssl_la_LINK) $(am_crypto_openssl_la_rpath) $(crypto_openssl_la_OBJECTS) $(crypto_openssl_la_LIBADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + +distclean-compile: + -rm -f *.tab.c + +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/crypto_openssl.Plo@am__quote@ + +.c.o: +@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c $< + +.c.obj: +@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'` + +.c.lo: +@am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $< + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + mkid -fID $$unique +tags: TAGS + +TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + here=`pwd`; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$tags $$unique; \ + fi +ctags: CTAGS +CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + test -z "$(CTAGS_ARGS)$$tags$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$tags $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && cd $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) $$here + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ + fi; \ + cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ + else \ + test -f $(distdir)/$$file \ + || cp -p $$d/$$file $(distdir)/$$file \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(LTLIBRARIES) +installdirs: + for dir in "$(DESTDIR)$(pkglibdir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + `test -z '$(STRIP)' || \ + echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ + mostlyclean-am + +distclean: distclean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-exec-am: install-pkglibLTLIBRARIES + +install-html: install-html-am + +install-info: install-info-am + +install-man: + +install-pdf: install-pdf-am + +install-ps: install-ps-am + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-pkglibLTLIBRARIES + +.MAKE: install-am install-strip + +.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ + clean-libtool clean-pkglibLTLIBRARIES ctags distclean \ + distclean-compile distclean-generic distclean-libtool \ + distclean-tags distdir dvi dvi-am html html-am info info-am \ + install install-am install-data install-data-am install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-pdf install-pdf-am install-pkglibLTLIBRARIES \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-compile \ + mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ + tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/src/plugins/crypto/openssl/crypto_openssl.c b/src/plugins/crypto/openssl/crypto_openssl.c new file mode 100644 index 000000000..2d37e14d4 --- /dev/null +++ b/src/plugins/crypto/openssl/crypto_openssl.c @@ -0,0 +1,230 @@ +/*****************************************************************************\ + * crypto_openssl.c - OpenSSL based cryptographic signature plugin + ***************************************************************************** + * Copyright (C) 2007 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Mark A. Grondona <mgrondona@llnl.gov>. + * LLNL-CODE-402394. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#if HAVE_CONFIG_H +# include "config.h" +# if HAVE_INTTYPES_H +# include <inttypes.h> +# else /* ! HAVE_INTTYPES_H */ +# if HAVE_STDINT_H +# include <stdint.h> +# endif +# endif /* HAVE_INTTYPES_H */ +#else /* ! HAVE_CONFIG_H */ +# include <stdint.h> +#endif /* HAVE_CONFIG_H */ + +#include <stdio.h> + +/* + * OpenSSL includes + */ +#include <openssl/evp.h> +#include <openssl/pem.h> +#include <openssl/err.h> + +#include <slurm/slurm_errno.h> +#include "src/common/xassert.h" +#include "src/common/xmalloc.h" + +/* + * These variables are required by the generic plugin interface. If they + * are not found in the plugin, the plugin loader will ignore it. + * + * plugin_name - a string giving a human-readable description of the + * plugin. There is no maximum length, but the symbol must refer to + * a valid string. + * + * plugin_type - a string suggesting the type of the plugin or its + * applicability to a particular form of data or method of data handling. + * If the low-level plugin API is used, the contents of this string are + * unimportant and may be anything. SLURM uses the higher-level plugin + * interface which requires this string to be of the form + * + * <application>/<method> + * + * where <application> is a description of the intended application of + * the plugin (e.g., "auth" for SLURM authentication) and <method> is a + * description of how this plugin satisfies that application. SLURM will + * only load authentication plugins if the plugin_type string has a prefix + * of "auth/". + * + * plugin_version - an unsigned 32-bit integer giving the version number + * of the plugin. If major and minor revisions are desired, the major + * version number may be multiplied by a suitable magnitude constant such + * as 100 or 1000. Various SLURM versions will likely require a certain + * minimum versions for their plugins as the authentication API matures. + */ +const char plugin_name[] = "OpenSSL cryptographic signature plugin"; +const char plugin_type[] = "crypto/openssl"; +const uint32_t plugin_version = 90; + +/* + * init() is called when the plugin is loaded, before any other functions + * are called. Put global initialization here. + */ +extern int init ( void ) +{ + verbose("%s loaded", plugin_name); + return SLURM_SUCCESS; +} + +/* + * fini() is called when the plugin is unloaded, + * free any global memory allocations here to avoid memory leaks. + */ +extern int fini ( void ) +{ + verbose("%s unloaded", plugin_name); + return SLURM_SUCCESS; +} + +extern void +crypto_destroy_key(void *key) +{ + if (key) + EVP_PKEY_free((EVP_PKEY *) key); +} + +extern void * +crypto_read_private_key(const char *path) +{ + FILE *fp = NULL; + EVP_PKEY *pk = NULL; + + xassert(path != NULL); + + if (!(fp = fopen(path, "r"))) + return NULL; + + if (!PEM_read_PrivateKey(fp, &pk, NULL, NULL)) { + fclose(fp); + return NULL; + } + fclose(fp); + + return (void *) pk; +} + + +extern void * +crypto_read_public_key(const char *path) +{ + FILE *fp = NULL; + EVP_PKEY *pk = NULL; + + xassert(path != NULL); + + if ((fp = fopen(path, "r")) == NULL) + return NULL; + + if (!PEM_read_PUBKEY(fp, &pk, NULL, NULL)) { + fclose(fp); + return NULL; + } + fclose(fp); + + return (void *) pk; +} + +extern char * +crypto_str_error(void) +{ + static int loaded = 0; + + if (loaded == 0) { + ERR_load_crypto_strings(); + loaded = 1; + } + + return (char *) ERR_reason_error_string(ERR_get_error()); +} + +/* NOTE: Caller must xfree the signature returned by sig_pp */ +extern int +crypto_sign(void * key, char *buffer, int buf_size, char **sig_pp, + unsigned int *sig_size_p) +{ + EVP_MD_CTX ectx; + int rc = SLURM_SUCCESS; + int ksize = EVP_PKEY_size((EVP_PKEY *) key); + + /* + * Allocate memory for signature: at most EVP_PKEY_size() bytes + */ + *sig_pp = xmalloc(ksize * sizeof(unsigned char)); + + EVP_SignInit(&ectx, EVP_sha1()); + EVP_SignUpdate(&ectx, buffer, buf_size); + + if (!(EVP_SignFinal(&ectx, (unsigned char *)*sig_pp, sig_size_p, + (EVP_PKEY *) key))) { + rc = SLURM_ERROR; + } + +#ifdef HAVE_EVP_MD_CTX_CLEANUP + /* Note: Likely memory leak if this function is absent */ + EVP_MD_CTX_cleanup(&ectx); +#endif + + return rc; +} + +extern int +crypto_verify_sign(void * key, char *buffer, unsigned int buf_size, + char *signature, unsigned int sig_size) +{ + EVP_MD_CTX ectx; + int rc; + + EVP_VerifyInit(&ectx, EVP_sha1()); + EVP_VerifyUpdate(&ectx, buffer, buf_size); + + rc = EVP_VerifyFinal(&ectx, (unsigned char *) signature, + sig_size, (EVP_PKEY *) key); + if (!rc) + rc = SLURM_ERROR; + else + rc = SLURM_SUCCESS; + +#ifdef HAVE_EVP_MD_CTX_CLEANUP + /* Note: Likely memory leak if this function is absent */ + EVP_MD_CTX_cleanup(&ectx); +#endif + + return rc; +} diff --git a/src/plugins/jobacct/aix/Makefile.am b/src/plugins/jobacct/aix/Makefile.am deleted file mode 100644 index 0c59144dd..000000000 --- a/src/plugins/jobacct/aix/Makefile.am +++ /dev/null @@ -1,19 +0,0 @@ -# Makefile for jobacct/aix plugin - -AUTOMAKE_OPTIONS = foreign - -PLUGIN_FLAGS = -module -avoid-version --export-dynamic - -INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common - -pkglib_LTLIBRARIES = jobacct_aix.la - -# Null job completion logging plugin. -jobacct_aix_la_SOURCES = jobacct_aix.c \ - $(top_builddir)/src/plugins/jobacct/common/jobacct_common.c \ - $(top_builddir)/src/plugins/jobacct/common/common_slurmctld.c \ - $(top_builddir)/src/plugins/jobacct/common/common_slurmstepd.c \ - $(top_builddir)/src/slurmd/common/proctrack.c \ - $(top_builddir)/src/slurmd/common/proctrack.h - -jobacct_aix_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) diff --git a/src/plugins/jobacct/common/common_slurmctld.c b/src/plugins/jobacct/common/common_slurmctld.c deleted file mode 100644 index 24dec3a79..000000000 --- a/src/plugins/jobacct/common/common_slurmctld.c +++ /dev/null @@ -1,537 +0,0 @@ -/*****************************************************************************\ - * jobacct_common.c - common functions for almost all jobacct plugins. - ***************************************************************************** - * - * Copyright (C) 2005 Hewlett-Packard Development Company, L.P. - * Written by Danny Auble, <da@llnl.gov> - * UCRL-CODE-226842. - * - * This file is part of SLURM, a resource management program. - * For details, see <http://www.llnl.gov/linux/slurm/>. - * - * SLURM is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * In addition, as a special exception, the copyright holders give permission - * to link the code of portions of this program with the OpenSSL library under - * certain conditions as described in each individual source file, and - * distribute linked combinations including the two. You must obey the GNU - * General Public License in all respects for all of the code used other than - * OpenSSL. If you modify file(s) with this exception, you may extend this - * exception to your version of the file(s), but you are not obligated to do - * so. If you do not wish to do so, delete this exception statement from your - * version. If you delete this exception statement from all source files in - * the program, then also delete it here. - * - * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY - * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License along - * with SLURM; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * - * This file is patterned after jobcomp_linux.c, written by Morris Jette and - * Copyright (C) 2002 The Regents of the University of California. -\*****************************************************************************/ -#if HAVE_CONFIG_H -# include "config.h" -#endif - -#include "jobacct_common.h" - -static FILE * LOGFILE; -static int LOGFILE_FD; -static pthread_mutex_t logfile_lock = PTHREAD_MUTEX_INITIALIZER; -static char * log_file = NULL; -static int init; -/* Format of the JOB_STEP record */ -const char *_jobstep_format = -"%d " -"%u " /* stepid */ -"%d " /* completion status */ -"%u " /* completion code */ -"%u " /* nprocs */ -"%u " /* number of cpus */ -"%u " /* elapsed seconds */ -"%u " /* total cputime seconds */ -"%u " /* total cputime microseconds */ -"%u " /* user seconds */ -"%u " /* user microseconds */ -"%u " /* system seconds */ -"%u " /* system microseconds */ -"%u " /* max rss */ -"%u " /* max ixrss */ -"%u " /* max idrss */ -"%u " /* max isrss */ -"%u " /* max minflt */ -"%u " /* max majflt */ -"%u " /* max nswap */ -"%u " /* total inblock */ -"%u " /* total outblock */ -"%u " /* total msgsnd */ -"%u " /* total msgrcv */ -"%u " /* total nsignals */ -"%u " /* total nvcsw */ -"%u " /* total nivcsw */ -"%u " /* max vsize */ -"%u " /* max vsize task */ -"%.2f " /* ave vsize */ -"%u " /* max rss */ -"%u " /* max rss task */ -"%.2f " /* ave rss */ -"%u " /* max pages */ -"%u " /* max pages task */ -"%.2f " /* ave pages */ -"%.2f " /* min cpu */ -"%u " /* min cpu task */ -"%.2f " /* ave cpu */ -"%s " /* step process name */ -"%s " /* step node names */ -"%u " /* max vsize node */ -"%u " /* max rss node */ -"%u " /* max pages node */ -"%u " /* min cpu node */ -"%s " /* account */ -"%u"; /* requester user id */ - -/* - * Print the record to the log file. - */ - -static int _print_record(struct job_record *job_ptr, - time_t time, char *data) -{ - static int rc=SLURM_SUCCESS; - char *block_id = NULL; - if(!job_ptr->details) { - error("job_acct: job=%u doesn't exist", job_ptr->job_id); - return SLURM_ERROR; - } - debug2("_print_record, job=%u, \"%s\"", - job_ptr->job_id, data); -#ifdef HAVE_BG - select_g_get_jobinfo(job_ptr->select_jobinfo, - SELECT_DATA_BLOCK_ID, - &block_id); - -#endif - if(!block_id) - block_id = xstrdup("-"); - - slurm_mutex_lock( &logfile_lock ); - - if (fprintf(LOGFILE, - "%u %s %u %u %d %d %s - %s\n", - job_ptr->job_id, job_ptr->partition, - (int)job_ptr->details->submit_time, (int)time, - job_ptr->user_id, job_ptr->group_id, block_id, data) - < 0) - rc=SLURM_ERROR; -#ifdef HAVE_FDATASYNC - fdatasync(LOGFILE_FD); -#endif - slurm_mutex_unlock( &logfile_lock ); - xfree(block_id); - - return rc; -} - -extern int common_init_slurmctld(char *job_acct_log) -{ - int rc = SLURM_SUCCESS; - mode_t prot = 0600; - struct stat statbuf; - - debug2("jobacct_init() called"); - slurm_mutex_lock( &logfile_lock ); - if (LOGFILE) - fclose(LOGFILE); - log_file=job_acct_log; - if (*log_file != '/') - fatal("JobAcctLogfile must specify an absolute pathname"); - if (stat(log_file, &statbuf)==0) /* preserve current file mode */ - prot = statbuf.st_mode; - LOGFILE = fopen(log_file, "a"); - if (LOGFILE == NULL) { - error("open %s: %m", log_file); - init = 0; - slurm_mutex_unlock( &logfile_lock ); - return SLURM_ERROR; - } else - chmod(log_file, prot); - if (setvbuf(LOGFILE, NULL, _IOLBF, 0)) - error("setvbuf() failed"); - LOGFILE_FD = fileno(LOGFILE); - slurm_mutex_unlock( &logfile_lock ); - init = 1; - return rc; -} - -extern int common_fini_slurmctld() -{ - if (LOGFILE) - fclose(LOGFILE); - return SLURM_SUCCESS; -} - -extern int common_job_start_slurmctld(struct job_record *job_ptr) -{ - int i, - ncpus=0, - rc=SLURM_SUCCESS, - tmp; - char buf[BUFFER_SIZE], *jname, *account, *nodes; - long priority; - int track_steps = 0; - - if(!init) { - debug("jobacct init was not called or it failed"); - return SLURM_ERROR; - } - - debug2("jobacct_job_start() called"); - - if (job_ptr->start_time == 0) { - /* This function is called when a job becomes elligible to run - * in order to record reserved time (a measure of system - * over-subscription). We only use this in the Gold plugin. */ - return SLURM_SUCCESS; - } - - for (i=0; i < job_ptr->num_cpu_groups; i++) - ncpus += (job_ptr->cpus_per_node[i]) - * (job_ptr->cpu_count_reps[i]); - priority = (job_ptr->priority == NO_VAL) ? - -1L : (long) job_ptr->priority; - - if ((tmp = strlen(job_ptr->name))) { - jname = xmalloc(++tmp); - for (i=0; i<tmp; i++) { - if (isspace(job_ptr->name[i])) - jname[i]='_'; - else - jname[i]=job_ptr->name[i]; - } - } else { - jname = xstrdup("allocation"); - track_steps = 1; - } - - if (job_ptr->account && job_ptr->account[0]) - account = job_ptr->account; - else - account = "(null)"; - if (job_ptr->nodes && job_ptr->nodes[0]) - nodes = job_ptr->nodes; - else - nodes = "(null)"; - - if(job_ptr->batch_flag) - track_steps = 1; - - job_ptr->requid = -1; /* force to -1 for sacct to know this - * hasn't been set yet */ - - tmp = snprintf(buf, BUFFER_SIZE, - "%d %s %d %ld %u %s %s", - JOB_START, jname, - track_steps, priority, job_ptr->num_procs, - nodes, account); - - rc = _print_record(job_ptr, job_ptr->start_time, buf); - - xfree(jname); - return rc; -} - -extern int common_job_complete_slurmctld(struct job_record *job_ptr) -{ - char buf[BUFFER_SIZE]; - if(!init) { - debug("jobacct init was not called or it failed"); - return SLURM_ERROR; - } - - debug2("jobacct_job_complete() called"); - if (job_ptr->end_time == 0) { - debug("jobacct: job %u never started", job_ptr->job_id); - return SLURM_ERROR; - } - /* leave the requid as a %d since we want to see if it is -1 - in sacct */ - snprintf(buf, BUFFER_SIZE, "%d %u %d %u", - JOB_TERMINATED, - (int) (job_ptr->end_time - job_ptr->start_time), - job_ptr->job_state & (~JOB_COMPLETING), - job_ptr->requid); - - return _print_record(job_ptr, job_ptr->end_time, buf); -} - -extern int common_step_start_slurmctld(struct step_record *step) -{ - char buf[BUFFER_SIZE]; - int cpus = 0; - char node_list[BUFFER_SIZE]; -#ifdef HAVE_BG - char *ionodes = NULL; -#endif - float float_tmp = 0; - char *account; - - if(!init) { - debug("jobacct init was not called or it failed"); - return SLURM_ERROR; - } - -#ifdef HAVE_BG - cpus = step->job_ptr->num_procs; - select_g_get_jobinfo(step->job_ptr->select_jobinfo, - SELECT_DATA_IONODES, - &ionodes); - if(ionodes) { - snprintf(node_list, BUFFER_SIZE, - "%s[%s]", step->job_ptr->nodes, ionodes); - xfree(ionodes); - } else - snprintf(node_list, BUFFER_SIZE, "%s", - step->job_ptr->nodes); - -#else - if(!step->step_layout || !step->step_layout->task_cnt) { - cpus = step->job_ptr->num_procs; - snprintf(node_list, BUFFER_SIZE, "%s", step->job_ptr->nodes); - } else { - cpus = step->step_layout->task_cnt; - snprintf(node_list, BUFFER_SIZE, "%s", - step->step_layout->node_list); - } -#endif - if (step->job_ptr->account && step->job_ptr->account[0]) - account = step->job_ptr->account; - else - account = "(null)"; - - step->job_ptr->requid = -1; /* force to -1 for sacct to know this - * hasn't been set yet */ - - snprintf(buf, BUFFER_SIZE, _jobstep_format, - JOB_STEP, - step->step_id, /* stepid */ - JOB_RUNNING, /* completion status */ - 0, /* completion code */ - cpus, /* number of tasks */ - cpus, /* number of cpus */ - 0, /* elapsed seconds */ - 0, /* total cputime seconds */ - 0, /* total cputime seconds */ - 0, /* user seconds */ - 0,/* user microseconds */ - 0, /* system seconds */ - 0,/* system microsecs */ - 0, /* max rss */ - 0, /* max ixrss */ - 0, /* max idrss */ - 0, /* max isrss */ - 0, /* max minflt */ - 0, /* max majflt */ - 0, /* max nswap */ - 0, /* total inblock */ - 0, /* total outblock */ - 0, /* total msgsnd */ - 0, /* total msgrcv */ - 0, /* total nsignals */ - 0, /* total nvcsw */ - 0, /* total nivcsw */ - 0, /* max vsize */ - 0, /* max vsize task */ - float_tmp, /* ave vsize */ - 0, /* max rss */ - 0, /* max rss task */ - float_tmp, /* ave rss */ - 0, /* max pages */ - 0, /* max pages task */ - float_tmp, /* ave pages */ - float_tmp, /* min cpu */ - 0, /* min cpu task */ - float_tmp, /* ave cpu */ - step->name, /* step exe name */ - node_list, /* name of nodes step running on */ - 0, /* max vsize node */ - 0, /* max rss node */ - 0, /* max pages node */ - 0, /* min cpu node */ - account, - step->job_ptr->requid); /* requester user id */ - - return _print_record(step->job_ptr, step->start_time, buf); -} - -extern int common_step_complete_slurmctld(struct step_record *step) -{ - char buf[BUFFER_SIZE]; - time_t now; - int elapsed; - int comp_status; - int cpus = 0; - char node_list[BUFFER_SIZE]; - struct jobacctinfo *jobacct = (struct jobacctinfo *)step->jobacct; -#ifdef HAVE_BG - char *ionodes = NULL; -#endif - float ave_vsize = 0, ave_rss = 0, ave_pages = 0; - float ave_cpu = 0, ave_cpu2 = 0; - char *account; - - if(!init) { - debug("jobacct init was not called or it failed"); - return SLURM_ERROR; - } - - now = time(NULL); - - if ((elapsed=now-step->start_time)<0) - elapsed=0; /* For *very* short jobs, if clock is wrong */ - if (step->exit_code) - comp_status = JOB_FAILED; - else - comp_status = JOB_COMPLETE; - -#ifdef HAVE_BG - cpus = step->job_ptr->num_procs; - select_g_get_jobinfo(step->job_ptr->select_jobinfo, - SELECT_DATA_IONODES, - &ionodes); - if(ionodes) { - snprintf(node_list, BUFFER_SIZE, - "%s[%s]", step->job_ptr->nodes, ionodes); - xfree(ionodes); - } else - snprintf(node_list, BUFFER_SIZE, "%s", - step->job_ptr->nodes); - -#else - if(!step->step_layout || !step->step_layout->task_cnt) { - cpus = step->job_ptr->num_procs; - snprintf(node_list, BUFFER_SIZE, "%s", step->job_ptr->nodes); - - } else { - cpus = step->step_layout->task_cnt; - snprintf(node_list, BUFFER_SIZE, "%s", - step->step_layout->node_list); - } -#endif - /* figure out the ave of the totals sent */ - if(cpus > 0) { - ave_vsize = jobacct->tot_vsize; - ave_vsize /= cpus; - ave_rss = jobacct->tot_rss; - ave_rss /= cpus; - ave_pages = jobacct->tot_pages; - ave_pages /= cpus; - ave_cpu = jobacct->tot_cpu; - ave_cpu /= cpus; - ave_cpu /= 100; - } - - if(jobacct->min_cpu != (uint32_t)NO_VAL) { - ave_cpu2 = jobacct->min_cpu; - ave_cpu2 /= 100; - } - - if (step->job_ptr->account && step->job_ptr->account[0]) - account = step->job_ptr->account; - else - account = "(null)"; - - snprintf(buf, BUFFER_SIZE, _jobstep_format, - JOB_STEP, - step->step_id, /* stepid */ - comp_status, /* completion status */ - step->exit_code, /* completion code */ - cpus, /* number of tasks */ - cpus, /* number of cpus */ - elapsed, /* elapsed seconds */ - /* total cputime seconds */ - jobacct->rusage.ru_utime.tv_sec - + jobacct->rusage.ru_stime.tv_sec, - /* total cputime seconds */ - jobacct->rusage.ru_utime.tv_usec - + jobacct->rusage.ru_stime.tv_usec, - jobacct->rusage.ru_utime.tv_sec, /* user seconds */ - jobacct->rusage.ru_utime.tv_usec,/* user microseconds */ - jobacct->rusage.ru_stime.tv_sec, /* system seconds */ - jobacct->rusage.ru_stime.tv_usec,/* system microsecs */ - jobacct->rusage.ru_maxrss, /* max rss */ - jobacct->rusage.ru_ixrss, /* max ixrss */ - jobacct->rusage.ru_idrss, /* max idrss */ - jobacct->rusage.ru_isrss, /* max isrss */ - jobacct->rusage.ru_minflt, /* max minflt */ - jobacct->rusage.ru_majflt, /* max majflt */ - jobacct->rusage.ru_nswap, /* max nswap */ - jobacct->rusage.ru_inblock, /* total inblock */ - jobacct->rusage.ru_oublock, /* total outblock */ - jobacct->rusage.ru_msgsnd, /* total msgsnd */ - jobacct->rusage.ru_msgrcv, /* total msgrcv */ - jobacct->rusage.ru_nsignals, /* total nsignals */ - jobacct->rusage.ru_nvcsw, /* total nvcsw */ - jobacct->rusage.ru_nivcsw, /* total nivcsw */ - jobacct->max_vsize, /* max vsize */ - jobacct->max_vsize_id.taskid, /* max vsize node */ - ave_vsize, /* ave vsize */ - jobacct->max_rss, /* max vsize */ - jobacct->max_rss_id.taskid, /* max rss node */ - ave_rss, /* ave rss */ - jobacct->max_pages, /* max pages */ - jobacct->max_pages_id.taskid, /* max pages node */ - ave_pages, /* ave pages */ - ave_cpu2, /* min cpu */ - jobacct->min_cpu_id.taskid, /* min cpu node */ - ave_cpu, /* ave cpu */ - step->name, /* step exe name */ - node_list, /* name of nodes step running on */ - jobacct->max_vsize_id.nodeid, /* max vsize task */ - jobacct->max_rss_id.nodeid, /* max rss task */ - jobacct->max_pages_id.nodeid, /* max pages task */ - jobacct->min_cpu_id.nodeid, /* min cpu task */ - account, - step->job_ptr->requid); /* requester user id */ - - return _print_record(step->job_ptr, now, buf); -} - -extern int common_suspend_slurmctld(struct job_record *job_ptr) -{ - char buf[BUFFER_SIZE]; - static time_t now = 0; - static time_t temp = 0; - int elapsed; - if(!init) { - debug("jobacct init was not called or it failed"); - return SLURM_ERROR; - } - - /* tell what time has passed */ - if(!now) - now = job_ptr->start_time; - temp = now; - now = time(NULL); - - if ((elapsed=now-temp) < 0) - elapsed=0; /* For *very* short jobs, if clock is wrong */ - - /* here we are really just going for a marker in time to tell when - * the process was suspended or resumed (check job state), we don't - * really need to keep track of anything else */ - snprintf(buf, BUFFER_SIZE, "%d %u %d", - JOB_SUSPEND, - elapsed, - job_ptr->job_state & (~JOB_COMPLETING));/* job status */ - - return _print_record(job_ptr, now, buf); - -} diff --git a/src/plugins/jobacct/common/common_slurmstepd.c b/src/plugins/jobacct/common/common_slurmstepd.c deleted file mode 100644 index 22b1222cf..000000000 --- a/src/plugins/jobacct/common/common_slurmstepd.c +++ /dev/null @@ -1,170 +0,0 @@ -/*****************************************************************************\ - * jobacct_common.c - common functions for almost all jobacct plugins. - ***************************************************************************** - * Copyright (C) 2005 Hewlett-Packard Development Company, L.P. - * Written by Danny Auble, <da@llnl.gov> - * UCRL-CODE-226842. - * - * This file is part of SLURM, a resource management program. - * For details, see <http://www.llnl.gov/linux/slurm/>. - * - * SLURM is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * In addition, as a special exception, the copyright holders give permission - * to link the code of portions of this program with the OpenSSL library under - * certain conditions as described in each individual source file, and - * distribute linked combinations including the two. You must obey the GNU - * General Public License in all respects for all of the code used other than - * OpenSSL. If you modify file(s) with this exception, you may extend this - * exception to your version of the file(s), but you are not obligated to do - * so. If you do not wish to do so, delete this exception statement from your - * version. If you delete this exception statement from all source files in - * the program, then also delete it here. - * - * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY - * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License along - * with SLURM; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * - * This file is patterned after jobcomp_linux.c, written by Morris Jette and - * Copyright (C) 2002 The Regents of the University of California. -\*****************************************************************************/ - -#include "jobacct_common.h" - -bool jobacct_shutdown = false; -bool suspended = false; -List task_list = NULL; -pthread_mutex_t jobacct_lock = PTHREAD_MUTEX_INITIALIZER; -uint32_t cont_id = (uint32_t)NO_VAL; -bool pgid_plugin = false; - -extern int common_endpoll() -{ - jobacct_shutdown = true; - - return SLURM_SUCCESS; -} - -extern int common_set_proctrack_container_id(uint32_t id) -{ - if(pgid_plugin) - return SLURM_SUCCESS; - - if(cont_id != (uint32_t)NO_VAL) - info("Warning: jobacct: set_proctrack_container_id: " - "cont_id is already set to %d you are setting it to %d", - cont_id, id); - if((int)id <= 0) { - error("jobacct: set_proctrack_container_id: " - "I was given most likely an unset cont_id %d", - id); - return SLURM_ERROR; - } - cont_id = id; - - return SLURM_SUCCESS; -} - -extern int common_add_task(pid_t pid, jobacct_id_t *jobacct_id) -{ - struct jobacctinfo *jobacct = common_alloc_jobacct(jobacct_id); - - slurm_mutex_lock(&jobacct_lock); - if(pid <= 0) { - error("invalid pid given (%d) for task acct", pid); - goto error; - } else if (!task_list) { - error("no task list created!"); - goto error; - } - - jobacct->pid = pid; - jobacct->min_cpu = 0; - debug2("adding task %u pid %d on node %u to jobacct", - jobacct_id->taskid, pid, jobacct_id->nodeid); - list_push(task_list, jobacct); - slurm_mutex_unlock(&jobacct_lock); - - return SLURM_SUCCESS; -error: - slurm_mutex_unlock(&jobacct_lock); - common_free_jobacct(jobacct); - return SLURM_ERROR; -} - -extern struct jobacctinfo *common_stat_task(pid_t pid) -{ - struct jobacctinfo *jobacct = NULL; - struct jobacctinfo *ret_jobacct = NULL; - ListIterator itr = NULL; - - slurm_mutex_lock(&jobacct_lock); - if (!task_list) { - error("no task list created!"); - goto error; - } - - itr = list_iterator_create(task_list); - while((jobacct = list_next(itr))) { - if(jobacct->pid == pid) - break; - } - list_iterator_destroy(itr); - ret_jobacct = xmalloc(sizeof(struct jobacctinfo)); - memcpy(ret_jobacct, jobacct, sizeof(struct jobacctinfo)); -error: - slurm_mutex_unlock(&jobacct_lock); - return ret_jobacct; -} - -extern struct jobacctinfo *common_remove_task(pid_t pid) -{ - struct jobacctinfo *jobacct = NULL; - struct jobacctinfo *ret_jobacct = NULL; - ListIterator itr = NULL; - - slurm_mutex_lock(&jobacct_lock); - if (!task_list) { - error("no task list created!"); - goto error; - } - - itr = list_iterator_create(task_list); - while((jobacct = list_next(itr))) { - if(jobacct->pid == pid) { - list_remove(itr); - break; - } - } - list_iterator_destroy(itr); - if(jobacct) { - debug2("removing task %u pid %d from jobacct", - jobacct->max_vsize_id.taskid, jobacct->pid); - ret_jobacct = xmalloc(sizeof(struct jobacctinfo)); - memcpy(ret_jobacct, jobacct, sizeof(struct jobacctinfo)); - common_free_jobacct(jobacct); - } else { - error("pid(%d) not being watched in jobacct!", pid); - } -error: - slurm_mutex_unlock(&jobacct_lock); - return ret_jobacct; -} - -extern void common_suspend_poll() -{ - suspended = true; -} - -extern void common_resume_poll() -{ - suspended = false; -} diff --git a/src/plugins/jobacct/common/jobacct_common.c b/src/plugins/jobacct/common/jobacct_common.c deleted file mode 100644 index afa63b949..000000000 --- a/src/plugins/jobacct/common/jobacct_common.c +++ /dev/null @@ -1,461 +0,0 @@ -/*****************************************************************************\ - * jobacct_common.c - common functions for almost all jobacct plugins. - ***************************************************************************** - * - * Copyright (C) 2005 Hewlett-Packard Development Company, L.P. - * Written by Danny Auble, <da@llnl.gov> - * UCRL-CODE-226842. - * - * This file is part of SLURM, a resource management program. - * For details, see <http://www.llnl.gov/linux/slurm/>. - * - * SLURM is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * In addition, as a special exception, the copyright holders give permission - * to link the code of portions of this program with the OpenSSL library under - * certain conditions as described in each individual source file, and - * distribute linked combinations including the two. You must obey the GNU - * General Public License in all respects for all of the code used other than - * OpenSSL. If you modify file(s) with this exception, you may extend this - * exception to your version of the file(s), but you are not obligated to do - * so. If you do not wish to do so, delete this exception statement from your - * version. If you delete this exception statement from all source files in - * the program, then also delete it here. - * - * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY - * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License along - * with SLURM; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * - * This file is patterned after jobcomp_linux.c, written by Morris Jette and - * Copyright (C) 2002 The Regents of the University of California. -\*****************************************************************************/ - -#include "jobacct_common.h" - -static void _pack_jobacct_id(jobacct_id_t *jobacct_id, Buf buffer) -{ - pack32((uint32_t)jobacct_id->nodeid, buffer); - pack16((uint16_t)jobacct_id->taskid, buffer); -} - -static int _unpack_jobacct_id(jobacct_id_t *jobacct_id, Buf buffer) -{ - safe_unpack32(&jobacct_id->nodeid, buffer); - safe_unpack16(&jobacct_id->taskid, buffer); - return SLURM_SUCCESS; -unpack_error: - return SLURM_ERROR; -} - -extern int common_init_struct(struct jobacctinfo *jobacct, - jobacct_id_t *jobacct_id) -{ - if(!jobacct_id) { - jobacct_id_t temp_id; - temp_id.taskid = (uint16_t)NO_VAL; - temp_id.nodeid = (uint32_t)NO_VAL; - jobacct_id = &temp_id; - } - jobacct->rusage.ru_utime.tv_sec = 0; - jobacct->rusage.ru_utime.tv_usec = 0; - jobacct->rusage.ru_stime.tv_sec = 0; - jobacct->rusage.ru_stime.tv_usec = 0; - jobacct->rusage.ru_maxrss = 0; - jobacct->rusage.ru_ixrss = 0; - jobacct->rusage.ru_idrss = 0; - jobacct->rusage.ru_isrss = 0; - jobacct->rusage.ru_minflt = 0; - jobacct->rusage.ru_majflt = 0; - jobacct->rusage.ru_nswap = 0; - jobacct->rusage.ru_inblock = 0; - jobacct->rusage.ru_oublock = 0; - jobacct->rusage.ru_msgsnd = 0; - jobacct->rusage.ru_msgrcv = 0; - jobacct->rusage.ru_nsignals = 0; - jobacct->rusage.ru_nvcsw = 0; - jobacct->rusage.ru_nivcsw = 0; - - jobacct->max_vsize = 0; - memcpy(&jobacct->max_vsize_id, jobacct_id, sizeof(jobacct_id_t)); - jobacct->tot_vsize = 0; - jobacct->max_rss = 0; - memcpy(&jobacct->max_rss_id, jobacct_id, sizeof(jobacct_id_t)); - jobacct->tot_rss = 0; - jobacct->max_pages = 0; - memcpy(&jobacct->max_pages_id, jobacct_id, sizeof(jobacct_id_t)); - jobacct->tot_pages = 0; - jobacct->min_cpu = (uint32_t)NO_VAL; - memcpy(&jobacct->min_cpu_id, jobacct_id, sizeof(jobacct_id_t)); - jobacct->tot_cpu = 0; - - return SLURM_SUCCESS; -} - -extern struct jobacctinfo *common_alloc_jobacct(jobacct_id_t *jobacct_id) -{ - struct jobacctinfo *jobacct = xmalloc(sizeof(struct jobacctinfo)); - common_init_struct(jobacct, jobacct_id); - return jobacct; -} - -extern void common_free_jobacct(void *object) -{ - struct jobacctinfo *jobacct = (struct jobacctinfo *)object; - xfree(jobacct); - jobacct = NULL; -} - -extern int common_setinfo(struct jobacctinfo *jobacct, - enum jobacct_data_type type, void *data) -{ - int rc = SLURM_SUCCESS; - int *fd = (int *)data; - uint32_t *uint32 = (uint32_t *) data; - jobacct_id_t *jobacct_id = (jobacct_id_t *) data; - struct rusage *rusage = (struct rusage *) data; - struct jobacctinfo *send = (struct jobacctinfo *) data; - - slurm_mutex_lock(&jobacct_lock); - switch (type) { - case JOBACCT_DATA_TOTAL: - memcpy(jobacct, send, sizeof(struct jobacctinfo)); - break; - case JOBACCT_DATA_PIPE: - safe_write(*fd, jobacct, sizeof(struct jobacctinfo)); - break; - case JOBACCT_DATA_RUSAGE: - memcpy(&jobacct->rusage, rusage, sizeof(struct rusage)); - break; - case JOBACCT_DATA_MAX_RSS: - jobacct->max_rss = *uint32; - break; - case JOBACCT_DATA_MAX_RSS_ID: - jobacct->max_rss_id = *jobacct_id; - break; - case JOBACCT_DATA_TOT_RSS: - jobacct->tot_rss = *uint32; - break; - case JOBACCT_DATA_MAX_VSIZE: - jobacct->max_vsize = *uint32; - break; - case JOBACCT_DATA_MAX_VSIZE_ID: - jobacct->max_vsize_id = *jobacct_id; - break; - case JOBACCT_DATA_TOT_VSIZE: - jobacct->tot_vsize = *uint32; - break; - case JOBACCT_DATA_MAX_PAGES: - jobacct->max_pages = *uint32; - break; - case JOBACCT_DATA_MAX_PAGES_ID: - jobacct->max_pages_id = *jobacct_id; - break; - case JOBACCT_DATA_TOT_PAGES: - jobacct->tot_pages = *uint32; - break; - case JOBACCT_DATA_MIN_CPU: - jobacct->min_cpu = *uint32; - break; - case JOBACCT_DATA_MIN_CPU_ID: - jobacct->min_cpu_id = *jobacct_id; - break; - case JOBACCT_DATA_TOT_CPU: - jobacct->tot_cpu = *uint32; - break; - default: - debug("jobacct_g_set_setinfo data_type %d invalid", - type); - } - slurm_mutex_unlock(&jobacct_lock); - return rc; -rwfail: - slurm_mutex_unlock(&jobacct_lock); - return SLURM_ERROR; - -} - -extern int common_getinfo(struct jobacctinfo *jobacct, - enum jobacct_data_type type, void *data) -{ - int rc = SLURM_SUCCESS; - int *fd = (int *)data; - uint32_t *uint32 = (uint32_t *) data; - jobacct_id_t *jobacct_id = (jobacct_id_t *) data; - struct rusage *rusage = (struct rusage *) data; - struct jobacctinfo *send = (struct jobacctinfo *) data; - - slurm_mutex_lock(&jobacct_lock); - switch (type) { - case JOBACCT_DATA_TOTAL: - memcpy(send, jobacct, sizeof(struct jobacctinfo)); - break; - case JOBACCT_DATA_PIPE: - safe_read(*fd, jobacct, sizeof(struct jobacctinfo)); - break; - case JOBACCT_DATA_RUSAGE: - memcpy(rusage, &jobacct->rusage, sizeof(struct rusage)); - break; - case JOBACCT_DATA_MAX_RSS: - *uint32 = jobacct->max_rss; - break; - case JOBACCT_DATA_MAX_RSS_ID: - *jobacct_id = jobacct->max_rss_id; - break; - case JOBACCT_DATA_TOT_RSS: - *uint32 = jobacct->tot_rss; - break; - case JOBACCT_DATA_MAX_VSIZE: - *uint32 = jobacct->max_vsize; - break; - case JOBACCT_DATA_MAX_VSIZE_ID: - *jobacct_id = jobacct->max_vsize_id; - break; - case JOBACCT_DATA_TOT_VSIZE: - *uint32 = jobacct->tot_vsize; - break; - case JOBACCT_DATA_MAX_PAGES: - *uint32 = jobacct->max_pages; - break; - case JOBACCT_DATA_MAX_PAGES_ID: - *jobacct_id = jobacct->max_pages_id; - break; - case JOBACCT_DATA_TOT_PAGES: - *uint32 = jobacct->tot_pages; - break; - case JOBACCT_DATA_MIN_CPU: - *uint32 = jobacct->min_cpu; - break; - case JOBACCT_DATA_MIN_CPU_ID: - *jobacct_id = jobacct->min_cpu_id; - break; - case JOBACCT_DATA_TOT_CPU: - *uint32 = jobacct->tot_cpu; - break; - default: - debug("jobacct_g_set_setinfo data_type %d invalid", - type); - } - slurm_mutex_unlock(&jobacct_lock); - return rc; -rwfail: - slurm_mutex_unlock(&jobacct_lock); - return SLURM_ERROR; - -} - -extern void common_aggregate(struct jobacctinfo *dest, - struct jobacctinfo *from) -{ - xassert(dest); - xassert(from); - - slurm_mutex_lock(&jobacct_lock); - if(dest->max_vsize < from->max_vsize) { - dest->max_vsize = from->max_vsize; - dest->max_vsize_id = from->max_vsize_id; - } - dest->tot_vsize += from->tot_vsize; - - if(dest->max_rss < from->max_rss) { - dest->max_rss = from->max_rss; - dest->max_rss_id = from->max_rss_id; - } - dest->tot_rss += from->tot_rss; - - if(dest->max_pages < from->max_pages) { - dest->max_pages = from->max_pages; - dest->max_pages_id = from->max_pages_id; - } - dest->tot_pages += from->tot_pages; - if((dest->min_cpu > from->min_cpu) - || (dest->min_cpu == (uint32_t)NO_VAL)) { - if(from->min_cpu == (uint32_t)NO_VAL) - from->min_cpu = 0; - dest->min_cpu = from->min_cpu; - dest->min_cpu_id = from->min_cpu_id; - } - dest->tot_cpu += from->tot_cpu; - - if(dest->max_vsize_id.taskid == (uint16_t)NO_VAL) - dest->max_vsize_id = from->max_vsize_id; - - if(dest->max_rss_id.taskid == (uint16_t)NO_VAL) - dest->max_rss_id = from->max_rss_id; - - if(dest->max_pages_id.taskid == (uint16_t)NO_VAL) - dest->max_pages_id = from->max_pages_id; - - if(dest->min_cpu_id.taskid == (uint16_t)NO_VAL) - dest->min_cpu_id = from->min_cpu_id; - - /* sum up all rusage stuff */ - dest->rusage.ru_utime.tv_sec += from->rusage.ru_utime.tv_sec; - dest->rusage.ru_utime.tv_usec += from->rusage.ru_utime.tv_usec; - while (dest->rusage.ru_utime.tv_usec >= 1E6) { - dest->rusage.ru_utime.tv_sec++; - dest->rusage.ru_utime.tv_usec -= 1E6; - } - dest->rusage.ru_stime.tv_sec += from->rusage.ru_stime.tv_sec; - dest->rusage.ru_stime.tv_usec += from->rusage.ru_stime.tv_usec; - while (dest->rusage.ru_stime.tv_usec >= 1E6) { - dest->rusage.ru_stime.tv_sec++; - dest->rusage.ru_stime.tv_usec -= 1E6; - } - - dest->rusage.ru_maxrss += from->rusage.ru_maxrss; - dest->rusage.ru_ixrss += from->rusage.ru_ixrss; - dest->rusage.ru_idrss += from->rusage.ru_idrss; - dest->rusage.ru_isrss += from->rusage.ru_isrss; - dest->rusage.ru_minflt += from->rusage.ru_minflt; - dest->rusage.ru_majflt += from->rusage.ru_majflt; - dest->rusage.ru_nswap += from->rusage.ru_nswap; - dest->rusage.ru_inblock += from->rusage.ru_inblock; - dest->rusage.ru_oublock += from->rusage.ru_oublock; - dest->rusage.ru_msgsnd += from->rusage.ru_msgsnd; - dest->rusage.ru_msgrcv += from->rusage.ru_msgrcv; - dest->rusage.ru_nsignals += from->rusage.ru_nsignals; - dest->rusage.ru_nvcsw += from->rusage.ru_nvcsw; - dest->rusage.ru_nivcsw += from->rusage.ru_nivcsw; - slurm_mutex_unlock(&jobacct_lock); -} - -extern void common_2_sacct(sacct_t *sacct, struct jobacctinfo *jobacct) -{ - xassert(jobacct); - xassert(sacct); - slurm_mutex_lock(&jobacct_lock); - sacct->max_vsize = jobacct->max_vsize; - sacct->max_vsize_id = jobacct->max_vsize_id; - sacct->ave_vsize = jobacct->tot_vsize; - sacct->max_rss = jobacct->max_rss; - sacct->max_rss_id = jobacct->max_rss_id; - sacct->ave_rss = jobacct->tot_rss; - sacct->max_pages = jobacct->max_pages; - sacct->max_pages_id = jobacct->max_pages_id; - sacct->ave_pages = jobacct->tot_pages; - sacct->min_cpu = jobacct->min_cpu; - sacct->min_cpu_id = jobacct->min_cpu_id; - sacct->ave_cpu = jobacct->tot_cpu; - slurm_mutex_unlock(&jobacct_lock); -} - -extern void common_pack(struct jobacctinfo *jobacct, Buf buffer) -{ - int i=0; - - if(!jobacct) { - for(i=0; i<26; i++) - pack32((uint32_t) 0, buffer); - for(i=0; i<4; i++) - pack16((uint16_t) 0, buffer); - return; - } - slurm_mutex_lock(&jobacct_lock); - pack32((uint32_t)jobacct->rusage.ru_utime.tv_sec, buffer); - pack32((uint32_t)jobacct->rusage.ru_utime.tv_usec, buffer); - pack32((uint32_t)jobacct->rusage.ru_stime.tv_sec, buffer); - pack32((uint32_t)jobacct->rusage.ru_stime.tv_usec, buffer); - pack32((uint32_t)jobacct->rusage.ru_maxrss, buffer); - pack32((uint32_t)jobacct->rusage.ru_ixrss, buffer); - pack32((uint32_t)jobacct->rusage.ru_idrss, buffer); - pack32((uint32_t)jobacct->rusage.ru_isrss, buffer); - pack32((uint32_t)jobacct->rusage.ru_minflt, buffer); - pack32((uint32_t)jobacct->rusage.ru_majflt, buffer); - pack32((uint32_t)jobacct->rusage.ru_nswap, buffer); - pack32((uint32_t)jobacct->rusage.ru_inblock, buffer); - pack32((uint32_t)jobacct->rusage.ru_oublock, buffer); - pack32((uint32_t)jobacct->rusage.ru_msgsnd, buffer); - pack32((uint32_t)jobacct->rusage.ru_msgrcv, buffer); - pack32((uint32_t)jobacct->rusage.ru_nsignals, buffer); - pack32((uint32_t)jobacct->rusage.ru_nvcsw, buffer); - pack32((uint32_t)jobacct->rusage.ru_nivcsw, buffer); - pack32((uint32_t)jobacct->max_vsize, buffer); - pack32((uint32_t)jobacct->tot_vsize, buffer); - pack32((uint32_t)jobacct->max_rss, buffer); - pack32((uint32_t)jobacct->tot_rss, buffer); - pack32((uint32_t)jobacct->max_pages, buffer); - pack32((uint32_t)jobacct->tot_pages, buffer); - pack32((uint32_t)jobacct->min_cpu, buffer); - pack32((uint32_t)jobacct->tot_cpu, buffer); - _pack_jobacct_id(&jobacct->max_vsize_id, buffer); - _pack_jobacct_id(&jobacct->max_rss_id, buffer); - _pack_jobacct_id(&jobacct->max_pages_id, buffer); - _pack_jobacct_id(&jobacct->min_cpu_id, buffer); - slurm_mutex_unlock(&jobacct_lock); -} - -/* you need to xfree this */ -extern int common_unpack(struct jobacctinfo **jobacct, Buf buffer) -{ - uint32_t uint32_tmp; - *jobacct = xmalloc(sizeof(struct jobacctinfo)); - safe_unpack32(&uint32_tmp, buffer); - (*jobacct)->rusage.ru_utime.tv_sec = uint32_tmp; - safe_unpack32(&uint32_tmp, buffer); - (*jobacct)->rusage.ru_utime.tv_usec = uint32_tmp; - safe_unpack32(&uint32_tmp, buffer); - (*jobacct)->rusage.ru_stime.tv_sec = uint32_tmp; - safe_unpack32(&uint32_tmp, buffer); - (*jobacct)->rusage.ru_stime.tv_usec = uint32_tmp; - safe_unpack32(&uint32_tmp, buffer); - (*jobacct)->rusage.ru_maxrss = uint32_tmp; - safe_unpack32(&uint32_tmp, buffer); - (*jobacct)->rusage.ru_ixrss = uint32_tmp; - safe_unpack32(&uint32_tmp, buffer); - (*jobacct)->rusage.ru_idrss = uint32_tmp; - safe_unpack32(&uint32_tmp, buffer); - (*jobacct)->rusage.ru_isrss = uint32_tmp; - safe_unpack32(&uint32_tmp, buffer); - (*jobacct)->rusage.ru_minflt = uint32_tmp; - safe_unpack32(&uint32_tmp, buffer); - (*jobacct)->rusage.ru_majflt = uint32_tmp; - safe_unpack32(&uint32_tmp, buffer); - (*jobacct)->rusage.ru_nswap = uint32_tmp; - safe_unpack32(&uint32_tmp, buffer); - (*jobacct)->rusage.ru_inblock = uint32_tmp; - safe_unpack32(&uint32_tmp, buffer); - (*jobacct)->rusage.ru_oublock = uint32_tmp; - safe_unpack32(&uint32_tmp, buffer); - (*jobacct)->rusage.ru_msgsnd = uint32_tmp; - safe_unpack32(&uint32_tmp, buffer); - (*jobacct)->rusage.ru_msgrcv = uint32_tmp; - safe_unpack32(&uint32_tmp, buffer); - (*jobacct)->rusage.ru_nsignals = uint32_tmp; - safe_unpack32(&uint32_tmp, buffer); - (*jobacct)->rusage.ru_nvcsw = uint32_tmp; - safe_unpack32(&uint32_tmp, buffer); - (*jobacct)->rusage.ru_nivcsw = uint32_tmp; - safe_unpack32(&(*jobacct)->max_vsize, buffer); - safe_unpack32(&(*jobacct)->tot_vsize, buffer); - safe_unpack32(&(*jobacct)->max_rss, buffer); - safe_unpack32(&(*jobacct)->tot_rss, buffer); - safe_unpack32(&(*jobacct)->max_pages, buffer); - safe_unpack32(&(*jobacct)->tot_pages, buffer); - safe_unpack32(&(*jobacct)->min_cpu, buffer); - safe_unpack32(&(*jobacct)->tot_cpu, buffer); - if(_unpack_jobacct_id(&(*jobacct)->max_vsize_id, buffer) - != SLURM_SUCCESS) - goto unpack_error; - if(_unpack_jobacct_id(&(*jobacct)->max_rss_id, buffer) - != SLURM_SUCCESS) - goto unpack_error; - if(_unpack_jobacct_id(&(*jobacct)->max_pages_id, buffer) - != SLURM_SUCCESS) - goto unpack_error; - if(_unpack_jobacct_id(&(*jobacct)->min_cpu_id, buffer) - != SLURM_SUCCESS) - goto unpack_error; - return SLURM_SUCCESS; - -unpack_error: - xfree(*jobacct); - return SLURM_ERROR; -} diff --git a/src/plugins/jobacct/common/jobacct_common.h b/src/plugins/jobacct/common/jobacct_common.h deleted file mode 100644 index 47fc72ee1..000000000 --- a/src/plugins/jobacct/common/jobacct_common.h +++ /dev/null @@ -1,139 +0,0 @@ -/*****************************************************************************\ - * jobacct_common.h - common functions for almost all jobacct plugins. - ***************************************************************************** - * - * Copyright (C) 2005 Hewlett-Packard Development Company, L.P. - * Written by Danny Auble, <da@llnl.gov> - * UCRL-CODE-226842. - * - * This file is part of SLURM, a resource management program. - * For details, see <http://www.llnl.gov/linux/slurm/>. - * - * SLURM is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * In addition, as a special exception, the copyright holders give permission - * to link the code of portions of this program with the OpenSSL library under - * certain conditions as described in each individual source file, and - * distribute linked combinations including the two. You must obey the GNU - * General Public License in all respects for all of the code used other than - * OpenSSL. If you modify file(s) with this exception, you may extend this - * exception to your version of the file(s), but you are not obligated to do - * so. If you do not wish to do so, delete this exception statement from your - * version. If you delete this exception statement from all source files in - * the program, then also delete it here. - * - * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY - * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License along - * with SLURM; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - * - * This file is patterned after jobcomp_linux.c, written by Morris Jette and - * Copyright (C) 2002 The Regents of the University of California. -\*****************************************************************************/ - -#ifndef _HAVE_JOBACCT_COMMON_H -#define _HAVE_JOBACCT_COMMON_H - -#ifdef HAVE_CONFIG_H -# include "config.h" -#endif - -#if HAVE_STDINT_H -# include <stdint.h> -#endif -#if HAVE_INTTYPES_H -# include <inttypes.h> -#endif - -#include <dirent.h> -#include <sys/stat.h> - -#include "src/common/slurm_jobacct.h" -#include "src/common/xmalloc.h" -#include "src/common/list.h" -#include "src/common/xstring.h" -#include "src/common/node_select.h" - -#include "src/slurmd/common/proctrack.h" - -#include <ctype.h> - -#define BUFFER_SIZE 4096 - -struct jobacctinfo { - pid_t pid; - struct rusage rusage; /* returned by wait3 */ - uint32_t max_vsize; /* max size of virtual memory */ - jobacct_id_t max_vsize_id; /* contains which task number it was on */ - uint32_t tot_vsize; /* total virtual memory - (used to figure out ave later) */ - uint32_t max_rss; /* max Resident Set Size */ - jobacct_id_t max_rss_id; /* contains which task it was on */ - uint32_t tot_rss; /* total rss - (used to figure out ave later) */ - uint32_t max_pages; /* max pages */ - jobacct_id_t max_pages_id; /* contains which task it was on */ - uint32_t tot_pages; /* total pages - (used to figure out ave later) */ - uint32_t min_cpu; /* min cpu time */ - jobacct_id_t min_cpu_id; /* contains which task it was on */ - uint32_t tot_cpu; /* total cpu time - (used to figure out ave later) */ -}; - -/* Define jobacctinfo_t below to avoid including extraneous slurm headers */ -#ifndef __jobacctinfo_t_defined -# define __jobacctinfo_t_defined - typedef struct jobacctinfo *jobacctinfo_t; /* opaque data type */ -#endif - - -/* in jobacct_common.c */ -extern int common_init_struct(struct jobacctinfo *jobacct, - jobacct_id_t *jobacct_id); -extern struct jobacctinfo *common_alloc_jobacct(jobacct_id_t *jobacct_id); -extern void common_free_jobacct(void *object); -extern int common_setinfo(struct jobacctinfo *jobacct, - enum jobacct_data_type type, void *data); -extern int common_getinfo(struct jobacctinfo *jobacct, - enum jobacct_data_type type, void *data); -extern void common_aggregate(struct jobacctinfo *dest, - struct jobacctinfo *from); -extern void common_2_sacct(sacct_t *sacct, struct jobacctinfo *jobacct); -extern void common_pack(struct jobacctinfo *jobacct, Buf buffer); -extern int common_unpack(struct jobacctinfo **jobacct, Buf buffer); - -/* in common_slurmctld.c */ -extern int common_init_slurmctld(char *job_acct_log); -extern int common_fini_slurmctld(); -extern int common_job_start_slurmctld(struct job_record *job_ptr); -extern int common_job_complete_slurmctld(struct job_record *job_ptr); -extern int common_step_start_slurmctld(struct step_record *step); -extern int common_step_complete_slurmctld(struct step_record *step); -extern int common_suspend_slurmctld(struct job_record *job_ptr); - -/* in common_slurmstepd.c */ -extern int common_endpoll(); -extern int common_set_proctrack_container_id(uint32_t id); -extern int common_add_task(pid_t pid, jobacct_id_t *jobacct_id); -extern struct jobacctinfo *common_stat_task(pid_t pid); -extern struct jobacctinfo *common_remove_task(pid_t pid); -extern void common_suspend_poll(); -extern void common_resume_poll(); - -/* defined in common_slurmstepd.c */ -extern bool jobacct_shutdown; -extern bool suspended; -extern List task_list; -extern pthread_mutex_t jobacct_lock; -extern uint32_t cont_id; -extern bool pgid_plugin; - -#endif diff --git a/src/plugins/jobacct/gold/Makefile.am b/src/plugins/jobacct/gold/Makefile.am deleted file mode 100644 index 8352b6cfe..000000000 --- a/src/plugins/jobacct/gold/Makefile.am +++ /dev/null @@ -1,18 +0,0 @@ -# Makefile for jobacct/none plugin - -AUTOMAKE_OPTIONS = foreign - -PLUGIN_FLAGS = -module -avoid-version --export-dynamic - -# Gold job completion logging plugin. -INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common - -pkglib_LTLIBRARIES = jobacct_gold.la -jobacct_gold_la_SOURCES = \ - agent.c agent.h \ - base64.c base64.h \ - gold_interface.c gold_interface.h \ - jobacct_gold.c -jobacct_gold_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) - - diff --git a/src/plugins/jobacct/gold/agent.c b/src/plugins/jobacct/gold/agent.c deleted file mode 100644 index 5e4ce97a5..000000000 --- a/src/plugins/jobacct/gold/agent.c +++ /dev/null @@ -1,675 +0,0 @@ -/****************************************************************************\ - * agent.c - Agent to queue and process pending Gold requests - * Largely copied from src/common/slurmdbd_defs.c in Slurm v1.3 - ***************************************************************************** - * Copyright (C) 2008 Lawrence Livermore National Security. - * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. - * - * This file is part of SLURM, a resource management program. - * For details, see <http://www.llnl.gov/linux/slurm/>. - * - * SLURM is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * In addition, as a special exception, the copyright holders give permission - * to link the code of portions of this program with the OpenSSL library under - * certain conditions as described in each individual source file, and - * distribute linked combinations including the two. You must obey the GNU - * General Public License in all respects for all of the code used other than - * OpenSSL. If you modify file(s) with this exception, you may extend this - * exception to your version of the file(s), but you are not obligated to do - * so. If you do not wish to do so, delete this exception statement from your - * version. If you delete this exception statement from all source files in - * the program, then also delete it here. - * - * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY - * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License along - * with SLURM; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -\*****************************************************************************/ - -#if HAVE_CONFIG_H -# include "config.h" -# if HAVE_INTTYPES_H -# include <inttypes.h> -# else -# if HAVE_STDINT_H -# include <stdint.h> -# endif -# endif /* HAVE_INTTYPES_H */ -#else /* !HAVE_CONFIG_H */ -# include <inttypes.h> -#endif /* HAVE_CONFIG_H */ - -#include <arpa/inet.h> -#include <fcntl.h> -#include <pthread.h> -#include <stdio.h> -#include <syslog.h> -#include <sys/poll.h> -#include <sys/stat.h> -#include <sys/types.h> -#include <time.h> -#include <unistd.h> - -#include "agent.h" -#include "slurm/slurm_errno.h" -#include "src/common/fd.h" -#include "src/common/pack.h" -#include "src/common/slurm_auth.h" -#include "src/common/slurm_protocol_api.h" -#include "src/common/xmalloc.h" -#include "src/common/xsignal.h" -#include "src/common/xstring.h" - -#define _DEBUG 0 -#define GOLD_MAGIC 0xDEAD3219 -#define MAX_AGENT_QUEUE 10000 -#define MAX_GOLD_MSG_LEN 16384 - -static pthread_mutex_t agent_lock = PTHREAD_MUTEX_INITIALIZER; -static pthread_cond_t agent_cond = PTHREAD_COND_INITIALIZER; -static List agent_list = (List) NULL; -static pthread_t agent_tid = 0; -static time_t agent_shutdown = 0; - -static void * _agent(void *x); -static void _agent_queue_del(void *x); -static void _create_agent(void); -static Buf _load_gold_rec(int fd); -static void _load_gold_state(void); -static int _process_msg(Buf buffer); -static int _save_gold_rec(int fd, Buf buffer); -static void _save_gold_state(void); -static void _sig_handler(int signal); -static void _shutdown_agent(void); - -/**************************************************************************** - * External APIs for use by jobacct_gold.c - ****************************************************************************/ - -/* Initiated a Gold message agent. Recover any saved RPCs. */ -extern int gold_agent_init(void) -{ - slurm_mutex_lock(&agent_lock); - if ((agent_tid == 0) || (agent_list == NULL)) - _create_agent(); - slurm_mutex_unlock(&agent_lock); - - return SLURM_SUCCESS; -} - -/* Terminate a Gold message agent. Save any pending RPCs. */ -extern int gold_agent_fini(void) -{ - /* NOTE: agent_lock not needed for _shutdown_agent() */ - _shutdown_agent(); - - return SLURM_SUCCESS; -} - -/* Send an RPC to the Gold. Do not wait for the reply. The RPC - * will be queued and processed later if Gold is not responding. - * Returns SLURM_SUCCESS or an error code */ -extern int gold_agent_xmit(gold_agent_msg_t *req) -{ - Buf buffer; - int cnt, rc = SLURM_SUCCESS; - static time_t syslog_time = 0; - - buffer = init_buf(MAX_GOLD_MSG_LEN); - pack16(req->msg_type, buffer); - switch (req->msg_type) { - case GOLD_MSG_CLUSTER_PROCS: - gold_agent_pack_cluster_procs_msg( - (gold_cluster_procs_msg_t *) req->data, buffer); - break; - case GOLD_MSG_JOB_COMPLETE: - gold_agent_pack_job_info_msg( - (gold_job_info_msg_t *) req->data, buffer); - break; - case GOLD_MSG_JOB_START: - gold_agent_pack_job_info_msg( - (gold_job_info_msg_t *) req->data, buffer); - break; - case GOLD_MSG_NODE_DOWN: - gold_agent_pack_node_down_msg( - (gold_node_down_msg_t *) req->data, buffer); - break; - case GOLD_MSG_NODE_UP: - gold_agent_pack_node_up_msg( - (gold_node_up_msg_t *) req->data, buffer); - break; - case GOLD_MSG_STEP_START: - gold_agent_pack_job_info_msg( - (gold_job_info_msg_t *) req->data, buffer); - break; - default: - error("gold: Invalid message send type %u", - req->msg_type); - free_buf(buffer); - return SLURM_ERROR; - } - - slurm_mutex_lock(&agent_lock); - if ((agent_tid == 0) || (agent_list == NULL)) { - _create_agent(); - if ((agent_tid == 0) || (agent_list == NULL)) { - slurm_mutex_unlock(&agent_lock); - free_buf(buffer); - return SLURM_ERROR; - } - } - cnt = list_count(agent_list); -#if _DEBUG - info("gold agent: queuing msg_type %u queue_len %d", - req->msg_type, cnt); -#endif - if ((cnt >= (MAX_AGENT_QUEUE / 2)) && - (difftime(time(NULL), syslog_time) > 120)) { - /* Log critical error every 120 seconds */ - syslog_time = time(NULL); - error("gold: agent queue filling, RESTART GOLD NOW"); - syslog(LOG_CRIT, "*** RESTART GOLD NOW ***"); - } - if (cnt < MAX_AGENT_QUEUE) { - if (list_enqueue(agent_list, buffer) == NULL) - fatal("list_enqueue: memory allocation failure"); - } else { - error("gold: agent queue is full, discarding request"); - rc = SLURM_ERROR; - } - slurm_mutex_unlock(&agent_lock); - pthread_cond_broadcast(&agent_cond); - return rc; -} - -/**************************************************************************** - * Functions for agent to manage queue of pending message for Gold - ****************************************************************************/ -static void _create_agent(void) -{ - if (agent_list == NULL) { - agent_list = list_create(_agent_queue_del); - if (agent_list == NULL) - fatal("list_create: malloc failure"); - _load_gold_state(); - } - - if (agent_tid == 0) { - pthread_attr_t agent_attr; - slurm_attr_init(&agent_attr); - pthread_attr_setdetachstate(&agent_attr, - PTHREAD_CREATE_DETACHED); - if (pthread_create(&agent_tid, &agent_attr, _agent, NULL) || - (agent_tid == 0)) - fatal("pthread_create: %m"); - } -} - -static void _agent_queue_del(void *x) -{ - Buf buffer = (Buf) x; - free_buf(buffer); -} - -static void _shutdown_agent(void) -{ - int i; - - if (agent_tid) { - agent_shutdown = time(NULL); - pthread_cond_broadcast(&agent_cond); - for (i=0; ((i<10) && agent_tid); i++) { - sleep(1); - pthread_cond_broadcast(&agent_cond); - if (pthread_kill(agent_tid, SIGUSR1)) - agent_tid = 0; - } - if (agent_tid) { - error("gold: agent failed to shutdown gracefully"); - } else - agent_shutdown = 0; - } -} - -static void *_agent(void *x) -{ - int cnt, rc; - Buf buffer; - struct timespec abs_time; - static time_t fail_time = 0; - int sigarray[] = {SIGUSR1, 0}; - - /* Prepare to catch SIGUSR1 to interrupt pending - * I/O and terminate in a timely fashion. */ - xsignal(SIGUSR1, _sig_handler); - xsignal_unblock(sigarray); - - while (agent_shutdown == 0) { - slurm_mutex_lock(&agent_lock); - if (agent_list) - cnt = list_count(agent_list); - else - cnt = 0; - if ((cnt == 0) || - (fail_time && (difftime(time(NULL), fail_time) < 10))) { - abs_time.tv_sec = time(NULL) + 10; - abs_time.tv_nsec = 0; - rc = pthread_cond_timedwait(&agent_cond, &agent_lock, - &abs_time); - slurm_mutex_unlock(&agent_lock); - continue; - } else if ((cnt > 0) && ((cnt % 50) == 0)) - info("gold: agent queue size %u", cnt); - /* Leave item on the queue until processing complete */ - if (agent_list) - buffer = (Buf) list_peek(agent_list); - else - buffer = NULL; - slurm_mutex_unlock(&agent_lock); - if (buffer == NULL) - continue; - - /* NOTE: agent_lock is clear here, so we can add more - * requests to the queue while waiting for this RPC to - * complete. */ - rc = _process_msg(buffer); - if (rc != SLURM_SUCCESS) { - if (agent_shutdown) - break; - error("gold: Failure sending message"); - } - - slurm_mutex_lock(&agent_lock); - if (agent_list && (rc != EAGAIN)) { - buffer = (Buf) list_dequeue(agent_list); - free_buf(buffer); - fail_time = 0; - } else { - fail_time = time(NULL); - } - slurm_mutex_unlock(&agent_lock); - } - - slurm_mutex_lock(&agent_lock); - _save_gold_state(); - if (agent_list) { - list_destroy(agent_list); - agent_list = NULL; - } - slurm_mutex_unlock(&agent_lock); - return NULL; -} - -static int _process_msg(Buf buffer) -{ - int rc; - uint16_t msg_type; - uint32_t msg_size; - - /* We save the full buffer size in case the RPC fails - * and we need to save state for later recovery. */ - msg_size = get_buf_offset(buffer); - set_buf_offset(buffer, 0); - safe_unpack16(&msg_type, buffer); -#if _DEBUG - info("gold agent: processing msg_type %u", msg_type); -#endif - switch (msg_type) { - case GOLD_MSG_CLUSTER_PROCS: - rc = agent_cluster_procs(buffer); - break; - case GOLD_MSG_JOB_COMPLETE: - rc = agent_job_complete(buffer); - break; - case GOLD_MSG_JOB_START: - rc = agent_job_start(buffer); - break; - case GOLD_MSG_NODE_DOWN: - rc = agent_node_down(buffer); - break; - case GOLD_MSG_NODE_UP: - rc = agent_node_up(buffer); - break; - case GOLD_MSG_STEP_START: - rc = agent_step_start(buffer); - break; - default: - error("gold: Invalid send message type %u", msg_type); - rc = SLURM_ERROR; /* discard entry and continue */ - } - set_buf_offset(buffer, msg_size); /* restore buffer size */ - return rc; - -unpack_error: - /* If the message format is bad return SLURM_SUCCESS to get - * it off of the queue since we can't work with it anyway */ - error("gold agent: message unpack error"); - return SLURM_ERROR; -} - -static void _save_gold_state(void) -{ - char *gold_fname; - Buf buffer; - int fd, rc, wrote = 0; - - gold_fname = slurm_get_state_save_location(); - xstrcat(gold_fname, "/gold.messages"); - fd = open(gold_fname, O_WRONLY | O_CREAT | O_TRUNC, 0600); - if (fd < 0) { - error("gold: Creating state save file %s", gold_fname); - } else if (agent_list) { - while ((buffer = list_dequeue(agent_list))) { - rc = _save_gold_rec(fd, buffer); - free_buf(buffer); - if (rc != SLURM_SUCCESS) - break; - wrote++; - } - } - if (fd >= 0) { - verbose("gold: saved %d pending RPCs", wrote); - (void) close(fd); - } - xfree(gold_fname); -} - -static void _load_gold_state(void) -{ - char *gold_fname; - Buf buffer; - int fd, recovered = 0; - - gold_fname = slurm_get_state_save_location(); - xstrcat(gold_fname, "/gold.messages"); - fd = open(gold_fname, O_RDONLY); - if (fd < 0) { - error("gold: Opening state save file %s", gold_fname); - } else { - while (1) { - buffer = _load_gold_rec(fd); - if (buffer == NULL) - break; - if (list_enqueue(agent_list, buffer) == NULL) - fatal("gold: list_enqueue, no memory"); - recovered++; - } - } - if (fd >= 0) { - verbose("gold: recovered %d pending RPCs", recovered); - (void) close(fd); - (void) unlink(gold_fname); /* clear save state */ - } - xfree(gold_fname); -} - -static int _save_gold_rec(int fd, Buf buffer) -{ - ssize_t size, wrote; - uint32_t msg_size = get_buf_offset(buffer); - uint32_t magic = GOLD_MAGIC; - char *msg = get_buf_data(buffer); - - size = sizeof(msg_size); - wrote = write(fd, &msg_size, size); - if (wrote != size) { - error("gold: state save error: %m"); - return SLURM_ERROR; - } - - wrote = 0; - while (wrote < msg_size) { - wrote = write(fd, msg, msg_size); - if (wrote > 0) { - msg += wrote; - msg_size -= wrote; - } else if ((wrote == -1) && (errno == EINTR)) - continue; - else { - error("gold: state save error: %m"); - return SLURM_ERROR; - } - } - - size = sizeof(magic); - wrote = write(fd, &magic, size); - if (wrote != size) { - error("gold: state save error: %m"); - return SLURM_ERROR; - } - - return SLURM_SUCCESS; -} - -static Buf _load_gold_rec(int fd) -{ - ssize_t size, rd_size; - uint32_t msg_size, magic; - char *msg; - Buf buffer; - - size = sizeof(msg_size); - rd_size = read(fd, &msg_size, size); - if (rd_size == 0) - return (Buf) NULL; - if (rd_size != size) { - error("gold: state recover error: %m"); - return (Buf) NULL; - } - if (msg_size > MAX_GOLD_MSG_LEN) { - error("gold: state recover error, msg_size=%u", msg_size); - return (Buf) NULL; - } - - buffer = init_buf((int) msg_size); - if (buffer == NULL) - fatal("gold: create_buf malloc failure"); - set_buf_offset(buffer, msg_size); - msg = get_buf_data(buffer); - size = msg_size; - while (size) { - rd_size = read(fd, msg, size); - if (rd_size > 0) { - msg += rd_size; - size -= rd_size; - } else if ((rd_size == -1) && (errno == EINTR)) - continue; - else { - error("gold: state recover error: %m"); - free_buf(buffer); - return (Buf) NULL; - } - } - - size = sizeof(magic); - rd_size = read(fd, &magic, size); - if ((rd_size != size) || (magic != GOLD_MAGIC)) { - error("gold: state recover error"); - free_buf(buffer); - return (Buf) NULL; - } - - return buffer; -} - -static void _sig_handler(int signal) -{ -} - -/****************************************************************************\ - * Free data structures -\****************************************************************************/ -void inline gold_agent_free_cluster_procs_msg(gold_cluster_procs_msg_t *msg) -{ - xfree(msg); -} - -void inline gold_agent_free_job_info_msg(gold_job_info_msg_t *msg) -{ - if (msg) { - xfree(msg->account); - xfree(msg->name); - xfree(msg->nodes); - xfree(msg->partition); - xfree(msg); - } -} - -void inline gold_agent_free_node_down_msg(gold_node_down_msg_t *msg) -{ - if (msg) { - xfree(msg->hostlist); - xfree(msg->reason); - xfree(msg); - } -} - -void inline gold_agent_free_node_up_msg(gold_node_up_msg_t *msg) -{ - if (msg) { - xfree(msg->hostlist); - xfree(msg); - } -} - -/****************************************************************************\ - * Pack and unpack data structures -\****************************************************************************/ -void inline -gold_agent_pack_cluster_procs_msg(gold_cluster_procs_msg_t *msg, Buf buffer) -{ - pack32(msg->proc_count, buffer); - pack_time(msg->event_time, buffer); -} -int inline -gold_agent_unpack_cluster_procs_msg(gold_cluster_procs_msg_t **msg, Buf buffer) -{ - gold_cluster_procs_msg_t *msg_ptr; - - msg_ptr = xmalloc(sizeof(gold_cluster_procs_msg_t)); - *msg = msg_ptr; - safe_unpack32(&msg_ptr->proc_count, buffer); - safe_unpack_time(&msg_ptr->event_time, buffer); - return SLURM_SUCCESS; - -unpack_error: - xfree(msg_ptr); - *msg = NULL; - return SLURM_ERROR; -} - -void inline -gold_agent_pack_job_info_msg(gold_job_info_msg_t *msg, Buf buffer) -{ - packstr(msg->account, buffer); - pack_time(msg->begin_time, buffer); - pack_time(msg->end_time, buffer); - pack32(msg->exit_code, buffer); - pack32(msg->job_id, buffer); - pack16(msg->job_state, buffer); - packstr(msg->name, buffer); - packstr(msg->nodes, buffer); - packstr(msg->partition, buffer); - pack_time(msg->start_time, buffer); - pack_time(msg->submit_time, buffer); - pack32(msg->total_procs, buffer); - pack32(msg->user_id, buffer); -} - -int inline -gold_agent_unpack_job_info_msg(gold_job_info_msg_t **msg, Buf buffer) -{ - uint16_t uint16_tmp; - gold_job_info_msg_t *msg_ptr = xmalloc(sizeof(gold_job_info_msg_t)); - *msg = msg_ptr; - safe_unpackstr_xmalloc(&msg_ptr->account, &uint16_tmp, buffer); - safe_unpack_time(&msg_ptr->begin_time, buffer); - safe_unpack_time(&msg_ptr->end_time, buffer); - safe_unpack32(&msg_ptr->exit_code, buffer); - safe_unpack32(&msg_ptr->job_id, buffer); - safe_unpack16(&msg_ptr->job_state, buffer); - safe_unpackstr_xmalloc(&msg_ptr->name, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&msg_ptr->nodes, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&msg_ptr->partition, &uint16_tmp, buffer); - safe_unpack_time(&msg_ptr->start_time, buffer); - safe_unpack_time(&msg_ptr->submit_time, buffer); - safe_unpack32(&msg_ptr->total_procs, buffer); - safe_unpack32(&msg_ptr->user_id, buffer); - return SLURM_SUCCESS; - -unpack_error: - xfree(msg_ptr->account); - xfree(msg_ptr->name); - xfree(msg_ptr->nodes); - xfree(msg_ptr->partition); - xfree(msg_ptr); - *msg = NULL; - return SLURM_ERROR; -} - -void inline -gold_agent_pack_node_down_msg(gold_node_down_msg_t *msg, Buf buffer) -{ - pack16(msg->cpus, buffer); - pack_time(msg->event_time, buffer); - packstr(msg->hostlist, buffer); - packstr(msg->reason, buffer); -} - -int inline -gold_agent_unpack_node_down_msg(gold_node_down_msg_t **msg, Buf buffer) -{ - gold_node_down_msg_t *msg_ptr; - uint16_t uint16_tmp; - - msg_ptr = xmalloc(sizeof(gold_node_down_msg_t)); - *msg = msg_ptr; - safe_unpack16(&msg_ptr->cpus, buffer); - safe_unpack_time(&msg_ptr->event_time, buffer); - safe_unpackstr_xmalloc(&msg_ptr->hostlist, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&msg_ptr->reason, &uint16_tmp, buffer); - return SLURM_SUCCESS; - -unpack_error: - xfree(msg_ptr->hostlist); - xfree(msg_ptr->reason); - xfree(msg_ptr); - *msg = NULL; - return SLURM_ERROR; -} - -void inline -gold_agent_pack_node_up_msg(gold_node_up_msg_t *msg, Buf buffer) -{ - pack_time(msg->event_time, buffer); - packstr(msg->hostlist, buffer); -} - -int inline -gold_agent_unpack_node_up_msg(gold_node_up_msg_t **msg, Buf buffer) -{ - gold_node_up_msg_t *msg_ptr; - uint16_t uint16_tmp; - - msg_ptr = xmalloc(sizeof(gold_node_up_msg_t)); - *msg = msg_ptr; - safe_unpack_time(&msg_ptr->event_time, buffer); - safe_unpackstr_xmalloc(&msg_ptr->hostlist, &uint16_tmp, buffer); - return SLURM_SUCCESS; - -unpack_error: - xfree(msg_ptr->hostlist); - xfree(msg_ptr); - *msg = NULL; - return SLURM_ERROR; -} diff --git a/src/plugins/jobacct/gold/agent.h b/src/plugins/jobacct/gold/agent.h deleted file mode 100644 index 13df74826..000000000 --- a/src/plugins/jobacct/gold/agent.h +++ /dev/null @@ -1,170 +0,0 @@ -/****************************************************************************\ - * agent.h - Definitions used to queue and process pending Gold requests - * Largely copied from src/common/slurmdbd_defs.h in Slurm v1.3 - ***************************************************************************** - * Copyright (C) 2008 Lawrence Livermore National Security. - * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. - * - * This file is part of SLURM, a resource management program. - * For details, see <http://www.llnl.gov/linux/slurm/>. - * - * SLURM is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * In addition, as a special exception, the copyright holders give permission - * to link the code of portions of this program with the OpenSSL library under - * certain conditions as described in each individual source file, and - * distribute linked combinations including the two. You must obey the GNU - * General Public License in all respects for all of the code used other than - * OpenSSL. If you modify file(s) with this exception, you may extend this - * exception to your version of the file(s), but you are not obligated to do - * so. If you do not wish to do so, delete this exception statement from your - * version. If you delete this exception statement from all source files in - * the program, then also delete it here. - * - * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY - * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License along - * with SLURM; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -\*****************************************************************************/ - -#ifndef _GOLD_AGENT_H -#define _GOLD_AGENT_H - -#if HAVE_CONFIG_H -# include "config.h" -# if HAVE_INTTYPES_H -# include <inttypes.h> -# else -# if HAVE_STDINT_H -# include <stdint.h> -# endif -# endif /* HAVE_INTTYPES_H */ -#else /* !HAVE_CONFIG_H */ -# include <inttypes.h> -#endif /* HAVE_CONFIG_H */ - -#include "src/common/pack.h" - -/* Increment SLURM_DBD_VERSION if any of the RPCs change */ -#define SLURM_DBD_VERSION 01 - -/* SLURM DBD message types */ -typedef enum { - GOLD_MSG_INIT = 1400, /* Connection initialization */ - GOLD_MSG_CLUSTER_PROCS, /* Record tota processors on cluster */ - GOLD_MSG_JOB_COMPLETE, /* Record job completion */ - GOLD_MSG_JOB_START, /* Record job starting */ - GOLD_MSG_NODE_DOWN, /* Record node state going DOWN */ - GOLD_MSG_NODE_UP, /* Record node state coming UP */ - GOLD_MSG_STEP_START /* Record step starting */ -} slurm_gold_msg_type_t; - -/*****************************************************************************\ - * Slurm DBD protocol data structures -\*****************************************************************************/ - -typedef struct gold_agent_msg { - uint16_t msg_type; /* see gold_agent_msg_type_t above */ - void * data; /* pointer to a message type below */ -} gold_agent_msg_t; - -typedef struct gold_cluster_procs_msg { - uint32_t proc_count; /* total processor count */ - time_t event_time; /* time of transition */ -} gold_cluster_procs_msg_t; - -typedef struct gold_job_info_msg { - char * account; /* bank account for job */ - time_t begin_time; /* time job becomes eligible to run */ - time_t end_time; /* job termintation time */ - uint32_t exit_code; /* job exit code or signal */ - uint32_t job_id; /* job ID */ - uint16_t job_state; /* job state */ - char * name; /* job name */ - char * nodes; /* hosts allocated to the job */ - char * partition; /* job's partition */ - time_t start_time; /* job start time */ - time_t submit_time; /* job submit time */ - uint32_t total_procs; /* count of allocated processors */ - uint32_t user_id; /* owner's UID */ -} gold_job_info_msg_t; - -typedef struct gold_node_down_msg { - uint16_t cpus; /* processors on the node */ - time_t event_time; /* time of transition */ - char *hostlist; /* name of hosts */ - char *reason; /* explanation for the node's state */ -} gold_node_down_msg_t; - - -typedef struct gold_node_up_msg { - time_t event_time; /* time of transition */ - char *hostlist; /* name of hosts */ -} gold_node_up_msg_t; - -/*****************************************************************************\ - * Slurm DBD message processing functions -\*****************************************************************************/ - -/* Initiated a Gold message agent. Recover any saved RPCs. */ -extern int gold_agent_init(void); - -/* Terminate a Gold message agent. Save any pending RPCs. */ -extern int gold_agent_fini(void); - -/* Send an RPC to the Gold. Do not wait for the reply. The RPC - * will be queued and processed later if Gold is not responding. - * Returns SLURM_SUCCESS or an error code */ -extern int gold_agent_xmit(gold_agent_msg_t *req); - -/*****************************************************************************\ - * Functions for processing the Gold requests, located in jobacct_gold.c -\*****************************************************************************/ -/* For all functions below - * RET SLURM_SUCCESS on success - * SLURM_ERROR on non-recoverable error (e.g. invalid account ID) - * EAGAIN on recoverable error (e.g. Gold not responding) */ -extern int agent_cluster_procs(Buf buffer); -extern int agent_job_start(Buf buffer); -extern int agent_job_complete(Buf buffer); -extern int agent_step_start(Buf buffer); -extern int agent_node_down(Buf buffer); -extern int agent_node_up(Buf buffer); - -/*****************************************************************************\ - * Free various Gold message structures -\*****************************************************************************/ -void inline gold_agent_free_cluster_procs_msg(gold_cluster_procs_msg_t *msg); -void inline gold_agent_free_job_info_msg(gold_job_info_msg_t *msg); -void inline gold_agent_free_node_down_msg(gold_node_down_msg_t *msg); -void inline gold_agent_free_node_up_msg(gold_node_up_msg_t *msg); - -/*****************************************************************************\ - * Pack various Gold message structures into a buffer -\*****************************************************************************/ -void inline gold_agent_pack_cluster_procs_msg(gold_cluster_procs_msg_t *msg, - Buf buffer); -void inline gold_agent_pack_job_info_msg(gold_job_info_msg_t *msg, Buf buffer); -void inline gold_agent_pack_node_down_msg(gold_node_down_msg_t *msg, Buf buffer); -void inline gold_agent_pack_node_up_msg(gold_node_up_msg_t *msg, Buf buffer); - -/*****************************************************************************\ - * Unpack various Gold message structures from a buffer -\*****************************************************************************/ -int inline gold_agent_unpack_cluster_procs_msg(gold_cluster_procs_msg_t **msg, - Buf buffer); -int inline gold_agent_unpack_job_info_msg(gold_job_info_msg_t **msg, Buf buffer); -int inline gold_agent_unpack_node_down_msg(gold_node_down_msg_t **msg, - Buf buffer); -int inline gold_agent_unpack_node_up_msg(gold_node_up_msg_t **msg, Buf buffer); - -#endif /* !_GOLD_AGENT_H */ diff --git a/src/plugins/jobacct/gold/jobacct_gold.c b/src/plugins/jobacct/gold/jobacct_gold.c deleted file mode 100644 index bfea65a6e..000000000 --- a/src/plugins/jobacct/gold/jobacct_gold.c +++ /dev/null @@ -1,1133 +0,0 @@ -/*****************************************************************************\ - * jobacct_gold.c - jobacct interface to gold. - ***************************************************************************** - * Copyright (C) 2004-2007 The Regents of the University of California. - * Copyright (C) 2008 Lawrence Livermore National Security. - * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Danny Auble <da@llnl.gov> - * - * This file is part of SLURM, a resource management program. - * For details, see <http://www.llnl.gov/linux/slurm/>. - * - * SLURM is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * In addition, as a special exception, the copyright holders give permission - * to link the code of portions of this program with the OpenSSL library under - * certain conditions as described in each individual source file, and - * distribute linked combinations including the two. You must obey the GNU - * General Public License in all respects for all of the code used other than - * OpenSSL. If you modify file(s) with this exception, you may extend this - * exception to your version of the file(s), but you are not obligated to do - * so. If you do not wish to do so, delete this exception statement from your - * version. If you delete this exception statement from all source files in - * the program, then also delete it here. - * - * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY - * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License along - * with SLURM; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -\*****************************************************************************/ -#include "gold_interface.h" -#include "agent.h" - -#include <ctype.h> -#include <stdlib.h> -#include <sys/stat.h> - -#include "src/common/list.h" -#include "src/common/parse_time.h" -#include "src/common/slurm_jobacct.h" -#include "src/common/slurm_protocol_api.h" -#include "src/common/uid.h" -#include "src/common/xmalloc.h" -#include "src/common/xstring.h" -#include "src/slurmctld/slurmctld.h" -#include "src/slurmd/slurmd/slurmd.h" - -typedef struct { - char *user; - char *project; - char *machine; - char *gold_id; -} gold_account_t; - -static int _add_edit_job(gold_job_info_msg_t *job_ptr, gold_object_t action); - -/* - * These variables are required by the generic plugin interface. If they - * are not found in the plugin, the plugin loader will ignore it. - * - * plugin_name - a string giving a human-readable description of the - * plugin. There is no maximum length, but the symbol must refer to - * a valid string. - * - * plugin_type - a string suggesting the type of the plugin or its - * applicability to a particular form of data or method of data handling. - * If the low-level plugin API is used, the contents of this string are - * unimportant and may be anything. SLURM uses the higher-level plugin - * interface which requires this string to be of the form - * - * <application>/<method> - * - * where <application> is a description of the intended application of - * the plugin (e.g., "jobacct" for SLURM job completion logging) and <method> - * is a description of how this plugin satisfies that application. SLURM will - * only load job completion logging plugins if the plugin_type string has a - * prefix of "jobacct/". - * - * plugin_version - an unsigned 32-bit integer giving the version number - * of the plugin. If major and minor revisions are desired, the major - * version number may be multiplied by a suitable magnitude constant such - * as 100 or 1000. Various SLURM versions will likely require a certain - * minimum versions for their plugins as the job accounting API - * matures. - */ -const char plugin_name[] = "Job accounting GOLD plugin"; -const char plugin_type[] = "jobacct/gold"; -const uint32_t plugin_version = 100; - -/* for this first draft we are only supporting one cluster per slurm - * 1.3 will probably do better than this. - */ - -static char *cluster_name = NULL; -static List gold_account_list = NULL; - -/* _check_for_job - * IN jobid - job id to check for - * IN submit - timestamp for submit time of job - * RET 0 for not found 1 for found - */ - -static void _destroy_gold_account(void *object) -{ - gold_account_t *gold_account = (gold_account_t *) object; - if(gold_account) { - xfree(gold_account->user); - xfree(gold_account->project); - xfree(gold_account->machine); - xfree(gold_account->gold_id); - xfree(gold_account); - } -} - -static int _check_for_job(uint32_t jobid, time_t submit) -{ - gold_request_t *gold_request = create_gold_request(GOLD_OBJECT_JOB, - GOLD_ACTION_QUERY); - gold_response_t *gold_response = NULL; - char tmp_buff[50]; - int rc = 0; - - if(!gold_request) - return rc; - - gold_request_add_selection(gold_request, "JobId"); - - snprintf(tmp_buff, sizeof(tmp_buff), "%u", jobid); - gold_request_add_condition(gold_request, "JobId", tmp_buff, - GOLD_OPERATOR_NONE); - - snprintf(tmp_buff, sizeof(tmp_buff), "%u", (uint32_t)submit); - gold_request_add_condition(gold_request, "SubmitTime", tmp_buff, - GOLD_OPERATOR_NONE); - - gold_response = get_gold_response(gold_request); - destroy_gold_request(gold_request); - - if(!gold_response) { - error("_check_for_job: no response received"); - return 0; - } - - if(gold_response->entry_cnt > 0) - rc = 1; - destroy_gold_response(gold_response); - - return rc; -} - -/* - * Get an account ID for some user/project/machine - * RET the account ID OR - * NULL on Gold communcation failure OR - * "0" if there is no valid account - */ -static char *_get_account_id(char *user, char *project, char *machine) -{ - gold_request_t *gold_request = NULL; - gold_response_t *gold_response = NULL; - char *gold_account_id = NULL; - gold_response_entry_t *resp_entry = NULL; - gold_name_value_t *name_val = NULL; - gold_account_t *gold_account = NULL; - ListIterator itr = list_iterator_create(gold_account_list); - - while((gold_account = list_next(itr))) { - if(user && strcmp(gold_account->user, user)) - continue; - if(project && strcmp(gold_account->project, project)) - continue; - gold_account_id = xstrdup(gold_account->gold_id); - break; - } - list_iterator_destroy(itr); - - if(gold_account_id) - return gold_account_id; - - gold_request = create_gold_request(GOLD_OBJECT_ACCOUNT, - GOLD_ACTION_QUERY); - - gold_request_add_selection(gold_request, "Id"); - gold_request_add_condition(gold_request, "User", user, - GOLD_OPERATOR_NONE); - if(project) - gold_request_add_condition(gold_request, "Project", project, - GOLD_OPERATOR_NONE); - gold_request_add_condition(gold_request, "Machine", machine, - GOLD_OPERATOR_NONE); - - gold_response = get_gold_response(gold_request); - destroy_gold_request(gold_request); - - if(!gold_response) { - error("_get_account_id: no response received"); - return NULL; - } - - if(gold_response->entry_cnt > 0) { - resp_entry = list_pop(gold_response->entries); - name_val = list_pop(resp_entry->name_val); - - gold_account_id = xstrdup(name_val->value); - - destroy_gold_name_value(name_val); - destroy_gold_response_entry(resp_entry); - /* no need to keep track of machine since this is - * always going to be on the same machine. - */ - gold_account = xmalloc(sizeof(gold_account_t)); - gold_account->user = xstrdup(user); - gold_account->gold_id = xstrdup(gold_account_id); - if(project) - gold_account->project = xstrdup(project); - list_push(gold_account_list, gold_account); - } else { - error("no account found returning 0"); - gold_account_id = xstrdup("0"); - } - - destroy_gold_response(gold_response); - - return gold_account_id; -} - -/* - * init() is called when the plugin is loaded, before any other functions - * are called. Put global initialization here. - */ -extern int init ( void ) -{ - verbose("%s loaded", plugin_name); - return SLURM_SUCCESS; -} - -extern int fini ( void ) -{ - return SLURM_SUCCESS; -} - -/* - * The following routines are called by slurmctld - */ - -/* - * The following routines are called by slurmd - */ -int jobacct_p_init_struct(struct jobacctinfo *jobacct, - jobacct_id_t *jobacct_id) -{ - return SLURM_SUCCESS; -} - -struct jobacctinfo *jobacct_p_alloc(jobacct_id_t *jobacct_id) -{ - return NULL; -} - -void jobacct_p_free(struct jobacctinfo *jobacct) -{ - return; -} - -int jobacct_p_setinfo(struct jobacctinfo *jobacct, - enum jobacct_data_type type, void *data) -{ - return SLURM_SUCCESS; - -} - -int jobacct_p_getinfo(struct jobacctinfo *jobacct, - enum jobacct_data_type type, void *data) -{ - return SLURM_SUCCESS; -} - -void jobacct_p_aggregate(struct jobacctinfo *dest, struct jobacctinfo *from) -{ - return; -} - -void jobacct_p_2_sacct(sacct_t *sacct, struct jobacctinfo *jobacct) -{ - return; -} - -void jobacct_p_pack(struct jobacctinfo *jobacct, Buf buffer) -{ - return; -} - -int jobacct_p_unpack(struct jobacctinfo **jobacct, Buf buffer) -{ - return SLURM_SUCCESS; -} - - -int jobacct_p_init_slurmctld(char *gold_info) -{ - char *total = "localhost:/etc/gold/auth_key:localhost:7112"; - int found = 0; - int i=0, j=0; - char *host = NULL; - char *keyfile = NULL; - uint16_t port = 0; - - debug2("jobacct_init() called"); - if(cluster_name) { - info("already called init"); - return SLURM_SUCCESS; - } - if(gold_info) - total = gold_info; - - if(!gold_account_list) - gold_account_list = list_create(_destroy_gold_account); - - - i = 0; - while(total[j]) { - if(total[j] == ':') { - switch(found) { - case 0: // cluster_name name - cluster_name = xstrndup(total+i, j-i); - break; - case 1: // keyfile name - keyfile = xstrndup(total+i, j-i); - break; - case 2: // host name - host = xstrndup(total+i, j-i); - break; - case 3: // port - port = atoi(total+i); - break; - } - found++; - i = j+1; - } - j++; - } - if(!port) - port = atoi(total+i); - - if(!cluster_name) - fatal("JobAcctLogfile should be in the format of " - "cluster_name:gold_auth_key_file_path:" - "goldd_host:goldd_port " - "bad cluster_name"); - if (!keyfile || *keyfile != '/') - fatal("JobAcctLogfile should be in the format of " - "cluster_name:gold_auth_key_file_path:" - "goldd_host:goldd_port " - "bad key file"); - if(!host) - fatal("JobAcctLogfile should be in the format of " - "cluster_name:gold_auth_key_file_path:" - "goldd_host:goldd_port " - "bad host"); - if(!port) - fatal("JobAcctLogfile should be in the format of " - "cluster_name:gold_auth_key_file_path:" - "goldd_host:goldd_port " - "bad port"); - - debug2("connecting from %s to gold with keyfile='%s' for %s(%d)", - cluster_name, keyfile, host, port); - - init_gold(cluster_name, keyfile, host, port); - gold_agent_init(); - xfree(keyfile); - xfree(host); - - return SLURM_SUCCESS; -} - -int jobacct_p_fini_slurmctld() -{ - gold_agent_fini(); - xfree(cluster_name); - if(gold_account_list) - list_destroy(gold_account_list); - fini_gold(); - return SLURM_SUCCESS; -} - -int jobacct_p_job_start_slurmctld(struct job_record *job_ptr) -{ - gold_agent_msg_t msg; - gold_job_info_msg_t req; - - req.account = job_ptr->account; - req.begin_time = job_ptr->details->begin_time; - req.end_time = job_ptr->end_time; - req.exit_code = job_ptr->exit_code; - req.job_id = job_ptr->job_id; - req.job_state = job_ptr->job_state; - req.name = job_ptr->name; - req.nodes = job_ptr->nodes; - req.partition = job_ptr->partition; - req.start_time = job_ptr->start_time; - req.submit_time = job_ptr->details->submit_time; - req.total_procs = job_ptr->details->total_procs; - req.user_id = job_ptr->user_id; - msg.msg_type = GOLD_MSG_JOB_START; - msg.data = &req; - - if (gold_agent_xmit(&msg) < 0) - return SLURM_ERROR; - - return SLURM_SUCCESS; -} - -int jobacct_p_job_complete_slurmctld(struct job_record *job_ptr) -{ - gold_agent_msg_t msg; - gold_job_info_msg_t req; - - req.account = job_ptr->account; - req.begin_time = job_ptr->details->begin_time; - req.end_time = job_ptr->end_time; - req.exit_code = job_ptr->exit_code; - req.job_id = job_ptr->job_id; - req.job_state = job_ptr->job_state; - req.name = job_ptr->name; - req.nodes = job_ptr->nodes; - req.partition = job_ptr->partition; - req.start_time = job_ptr->start_time; - req.submit_time = job_ptr->details->submit_time; - req.total_procs = job_ptr->details->total_procs; - req.user_id = job_ptr->user_id; - msg.msg_type = GOLD_MSG_JOB_COMPLETE; - msg.data = &req; - - if (gold_agent_xmit(&msg) < 0) - return SLURM_ERROR; - - return SLURM_SUCCESS; -} - -int jobacct_p_step_start_slurmctld(struct step_record *step) -{ - gold_agent_msg_t msg; - gold_job_info_msg_t req; - struct job_record *job_ptr = step->job_ptr; - - req.account = job_ptr->account; - req.begin_time = job_ptr->details->begin_time; - req.end_time = job_ptr->end_time; - req.exit_code = job_ptr->exit_code; - req.job_id = job_ptr->job_id; - req.job_state = job_ptr->job_state; - req.name = job_ptr->name; - req.nodes = job_ptr->nodes; - req.partition = job_ptr->partition; - req.start_time = job_ptr->start_time; - req.submit_time = job_ptr->details->submit_time; - req.total_procs = job_ptr->details->total_procs; - req.user_id = job_ptr->user_id; - msg.msg_type = GOLD_MSG_STEP_START; - msg.data = &req; - - if (gold_agent_xmit(&msg) < 0) - return SLURM_ERROR; - - return SLURM_SUCCESS; -} - -int jobacct_p_step_complete_slurmctld(struct step_record *step) -{ - return SLURM_SUCCESS; -} - -int jobacct_p_suspend_slurmctld(struct job_record *job_ptr) -{ - return SLURM_SUCCESS; -} - -int jobacct_p_startpoll(int frequency) -{ - info("jobacct GOLD plugin loaded"); - debug3("slurmd_jobacct_init() called"); - - return SLURM_SUCCESS; -} - -int jobacct_p_endpoll() -{ - return SLURM_SUCCESS; -} - -int jobacct_p_set_proctrack_container_id(uint32_t id) -{ - return SLURM_SUCCESS; -} - -int jobacct_p_add_task(pid_t pid, jobacct_id_t *jobacct_id) -{ - return SLURM_SUCCESS; -} - -struct jobacctinfo *jobacct_p_stat_task(pid_t pid) -{ - return NULL; -} - -struct jobacctinfo *jobacct_p_remove_task(pid_t pid) -{ - return NULL; -} - -void jobacct_p_suspend_poll() -{ - return; -} - -void jobacct_p_resume_poll() -{ - return; -} - -#define _DEBUG 0 - -extern int jobacct_p_node_down(struct node_record *node_ptr, time_t event_time, - char *reason) -{ - gold_agent_msg_t msg; - gold_node_down_msg_t req; - uint16_t cpus; - - if (slurmctld_conf.fast_schedule) - cpus = node_ptr->config_ptr->cpus; - else - cpus = node_ptr->cpus; - if (reason == NULL) - reason = node_ptr->reason; -#if _DEBUG -{ - char tmp_buff[50]; - slurm_make_time_str(&event_time, tmp_buff, sizeof(tmp_buff)); - info("jobacct_p_node_down: %s at %s with %u cpus due to %s", - node_ptr->name, tmp_buff, cpus, reason); -} -#endif - req.cpus = cpus; - req.event_time = event_time; - req.hostlist = node_ptr->name; - req.reason = reason; - msg.msg_type = GOLD_MSG_NODE_DOWN; - msg.data = &req; - - if (gold_agent_xmit(&msg) < 0) - return SLURM_ERROR; - - return SLURM_SUCCESS; -} - -extern int jobacct_p_node_up(struct node_record *node_ptr, time_t event_time) -{ - gold_agent_msg_t msg; - gold_node_up_msg_t req; - -#if _DEBUG -{ - char tmp_buff[50]; - slurm_make_time_str(&event_time, tmp_buff, sizeof(tmp_buff)); - info("jobacct_p_node_up: %s at %s", node_ptr->name, tmp_buff); -} -#endif - - req.hostlist = node_ptr->name; - req.event_time = event_time; - msg.msg_type = GOLD_MSG_NODE_UP; - msg.data = &req; - - if (gold_agent_xmit(&msg) < 0) - return SLURM_ERROR; - - return SLURM_SUCCESS; -} - -extern int jobacct_p_cluster_procs(uint32_t procs, time_t event_time) -{ - static uint32_t last_procs = 0; - gold_agent_msg_t msg; - gold_cluster_procs_msg_t req; - -#if _DEBUG -{ - char tmp_buff[50]; - slurm_make_time_str(&event_time, tmp_buff, sizeof(tmp_buff)); - info("jobacct_p_cluster_procs: %s has %u total CPUs at %s", - cluster_name, procs, tmp_buff); -} -#endif - if (procs == last_procs) { - debug3("jobacct_p_cluster_procs: no change in proc count"); - return SLURM_SUCCESS; - } - last_procs = procs; - - req.proc_count = procs; - req.event_time = event_time; - msg.msg_type = GOLD_MSG_CLUSTER_PROCS; - msg.data = &req; - - if (gold_agent_xmit(&msg) < 0) - return SLURM_ERROR; - - return SLURM_SUCCESS; -} - -/* - * Functions that process queued Gold requests - */ -extern int agent_job_start(Buf buffer) -{ - int rc; - gold_job_info_msg_t *job_info_msg; - gold_object_t action; - - if (gold_agent_unpack_job_info_msg(&job_info_msg, buffer) != - SLURM_SUCCESS) { - error("Failed to unpack GOLD_MSG_JOB_START message"); - return SLURM_ERROR; - } - - if (_check_for_job(job_info_msg->job_id, - job_info_msg->submit_time)) { - error("Job %u is already in GOLD, overwrite old info", - job_info_msg->job_id); - action = GOLD_ACTION_MODIFY; - } else { - action = GOLD_ACTION_CREATE; - } - - rc = _add_edit_job(job_info_msg, action); - gold_agent_free_job_info_msg(job_info_msg); - return rc; -} - -extern int agent_job_complete(Buf buffer) -{ - int rc; - gold_job_info_msg_t *job_info_msg; - gold_object_t action; - - if (gold_agent_unpack_job_info_msg(&job_info_msg, buffer) != - SLURM_SUCCESS) { - error("Failed to unpack GOLD_MSG_JOB_COMPLETE message"); - return SLURM_ERROR; - } - - if (_check_for_job(job_info_msg->job_id, - job_info_msg->submit_time)) { - action = GOLD_ACTION_MODIFY; - } else { - error("Job %u is missing from GOLD, creating new record", - job_info_msg->job_id); - action = GOLD_ACTION_CREATE; - } - - rc = _add_edit_job(job_info_msg, action); - gold_agent_free_job_info_msg(job_info_msg); - return rc; -} - -extern int agent_step_start(Buf buffer) -{ - int rc; - gold_job_info_msg_t *job_info_msg; - gold_object_t action; - - if (gold_agent_unpack_job_info_msg(&job_info_msg, buffer) != - SLURM_SUCCESS) { - error("Failed to unpack GOLD_MSG_STEP_START message"); - return SLURM_ERROR; - } - - if (_check_for_job(job_info_msg->job_id, - job_info_msg->submit_time)) { - action = GOLD_ACTION_MODIFY; - } else { - error("Job %u is missing from GOLD, creating new record", - job_info_msg->job_id); - action = GOLD_ACTION_CREATE; - } - - rc = _add_edit_job(job_info_msg, action); - gold_agent_free_job_info_msg(job_info_msg); - return rc; -} - -/* - * Update a job entry - * RET SLURM_SUCCESS on success - * SLURM_ERROR on non-recoverable error (e.g. invalid account ID) - * EAGAIN on recoverable error (e.g. Gold not responding) - */ -static int _add_edit_job(gold_job_info_msg_t *job_ptr, gold_object_t action) -{ - gold_request_t *gold_request = create_gold_request(GOLD_OBJECT_JOB, - action); - gold_response_t *gold_response = NULL; - char tmp_buff[50]; - int rc = SLURM_ERROR; - char *gold_account_id = NULL; - char *user = uid_to_string((uid_t)job_ptr->user_id); - char *jname = NULL; - int tmp = 0, i = 0; - char *account = NULL; - char *nodes = "(null)"; - - if (!gold_request) - return SLURM_ERROR; - - if (action == GOLD_ACTION_CREATE) { - snprintf(tmp_buff, sizeof(tmp_buff), "%u", job_ptr->job_id); - gold_request_add_assignment(gold_request, "JobId", tmp_buff); - - snprintf(tmp_buff, sizeof(tmp_buff), "%u", - (uint32_t)job_ptr->submit_time); - gold_request_add_assignment(gold_request, "SubmitTime", - tmp_buff); - - gold_account_id = _get_account_id(user, account, - cluster_name); - if ((gold_account_id == NULL) || - ((gold_account_id[0] == '0') && (gold_account_id[1] == '\0'))) { - destroy_gold_request(gold_request); - if (gold_account_id) { - xfree(gold_account_id); - return SLURM_ERROR; /* Invalid account */ - } - return EAGAIN; /* Gold not responding */ - } - gold_request_add_assignment(gold_request, "GoldAccountId", - gold_account_id); - xfree(gold_account_id); - - } else if (action == GOLD_ACTION_MODIFY) { - snprintf(tmp_buff, sizeof(tmp_buff), "%u", job_ptr->job_id); - gold_request_add_condition(gold_request, "JobId", tmp_buff, - GOLD_OPERATOR_NONE); - - snprintf(tmp_buff, sizeof(tmp_buff), "%u", - (uint32_t)job_ptr->submit_time); - gold_request_add_condition(gold_request, "SubmitTime", - tmp_buff, - GOLD_OPERATOR_NONE); - } else { - destroy_gold_request(gold_request); - error("_add_edit_job: bad action given %d", - action); - return SLURM_ERROR; - } - - if ((tmp = strlen(job_ptr->name))) { - jname = xmalloc(++tmp); - for (i=0; i<tmp; i++) { - if (isspace(job_ptr->name[i])) - jname[i]='_'; - else - jname[i]=job_ptr->name[i]; - } - } else - jname = xstrdup("allocation"); - gold_request_add_assignment(gold_request, "JobName", jname); - xfree(jname); - - if (job_ptr->account && job_ptr->account[0]) - account = job_ptr->account; - - if (job_ptr->nodes && job_ptr->nodes[0]) - nodes = job_ptr->nodes; - - gold_request_add_assignment(gold_request, "Partition", - job_ptr->partition); - snprintf(tmp_buff, sizeof(tmp_buff), "%u", job_ptr->total_procs); - gold_request_add_assignment(gold_request, "RequestedCPUCount", - tmp_buff); - snprintf(tmp_buff, sizeof(tmp_buff), "%u", job_ptr->total_procs); - gold_request_add_assignment(gold_request, "AllocatedCPUCount", - tmp_buff); - gold_request_add_assignment(gold_request, "NodeList", nodes); - - - if (job_ptr->job_state >= JOB_COMPLETE) { - snprintf(tmp_buff, sizeof(tmp_buff), "%u", - (uint32_t)job_ptr->end_time); - gold_request_add_assignment(gold_request, "EndTime", - tmp_buff); - - snprintf(tmp_buff, sizeof(tmp_buff), "%u", - job_ptr->exit_code); - gold_request_add_assignment(gold_request, "ExitCode", - tmp_buff); - } - - - snprintf(tmp_buff, sizeof(tmp_buff), "%u", - (uint32_t)job_ptr->begin_time); - gold_request_add_assignment(gold_request, "EligibleTime", tmp_buff); - - snprintf(tmp_buff, sizeof(tmp_buff), "%u", - (uint32_t)job_ptr->start_time); - gold_request_add_assignment(gold_request, "StartTime", tmp_buff); - - snprintf(tmp_buff, sizeof(tmp_buff), "%u", - job_ptr->job_state & (~JOB_COMPLETING)); - gold_request_add_assignment(gold_request, "State", tmp_buff); - - gold_response = get_gold_response(gold_request); - destroy_gold_request(gold_request); - - if (!gold_response) { - error("_add_edit_job: no response received"); - return EAGAIN; - } - - if (!gold_response->rc) - rc = SLURM_SUCCESS; - else { - error("gold_response has non-zero rc(%d): %s", - gold_response->rc, - gold_response->message); - rc = SLURM_ERROR; - } - destroy_gold_response(gold_response); - - return rc; -} - -extern int agent_node_up(Buf buffer) -{ - int rc = SLURM_ERROR; - gold_request_t *gold_request = NULL; - gold_response_t *gold_response = NULL; - char tmp_buff[50]; - gold_node_up_msg_t *node_up_msg; - time_t event_time; - - if (gold_agent_unpack_node_up_msg(&node_up_msg, buffer) != - SLURM_SUCCESS) { - error("Failed to unpack GOLD_MSG_NODE_UP message"); - return SLURM_ERROR; - } - - gold_request = create_gold_request(GOLD_OBJECT_EVENT, - GOLD_ACTION_MODIFY); - if (!gold_request) - goto fini; - - gold_request_add_condition(gold_request, "Machine", - cluster_name, GOLD_OPERATOR_NONE); - gold_request_add_condition(gold_request, "EndTime", "0", - GOLD_OPERATOR_NONE); - gold_request_add_condition(gold_request, "Name", - node_up_msg->hostlist, - GOLD_OPERATOR_NONE); - event_time = node_up_msg->event_time; - if (event_time) - event_time--; - snprintf(tmp_buff, sizeof(tmp_buff), "%u", (uint32_t)event_time); - gold_request_add_assignment(gold_request, "EndTime", - tmp_buff); - - gold_response = get_gold_response(gold_request); - destroy_gold_request(gold_request); - - if (!gold_response) { - error("agent_node_up: no response received"); - rc = EAGAIN; - goto fini; - } - - if (gold_response->rc) { - error("gold_response has non-zero rc(%d): %s", - gold_response->rc, - gold_response->message); - destroy_gold_response(gold_response); - goto fini; - } - destroy_gold_response(gold_response); - rc = SLURM_SUCCESS; - - fini: gold_agent_free_node_up_msg(node_up_msg); - return rc; -} - -extern int agent_node_down(Buf buffer) -{ - int rc = SLURM_ERROR; - gold_request_t *gold_request = NULL; - gold_response_t *gold_response = NULL; - char tmp_buff[50]; - gold_node_down_msg_t *node_down_msg; - time_t event_time; - - if (gold_agent_unpack_node_down_msg(&node_down_msg, buffer) != - SLURM_SUCCESS) { - error("Failed to unpack GOLD_MSG_NODE_DOWN message"); - return SLURM_ERROR; - } - - /* - * If the node was already down end that record since the - * reason will most likely be different - */ - gold_request = create_gold_request(GOLD_OBJECT_EVENT, - GOLD_ACTION_MODIFY); - if (!gold_request) - goto fini; - - gold_request_add_condition(gold_request, "Machine", - cluster_name, GOLD_OPERATOR_NONE); - gold_request_add_condition(gold_request, "EndTime", "0", - GOLD_OPERATOR_NONE); - gold_request_add_condition(gold_request, "Name", - node_down_msg->hostlist, - GOLD_OPERATOR_NONE); - event_time = node_down_msg->event_time; - if (event_time) - event_time--; - snprintf(tmp_buff, sizeof(tmp_buff), "%u", (uint32_t)event_time); - gold_request_add_assignment(gold_request, "EndTime", - tmp_buff); - - gold_response = get_gold_response(gold_request); - destroy_gold_request(gold_request); - - if (!gold_response) { - error("jobacct_p_node_down: no response received"); - rc = EAGAIN; - goto fini; - } - - if (gold_response->rc) { - error("gold_response has non-zero rc(%d): %s", - gold_response->rc, - gold_response->message); - destroy_gold_response(gold_response); - goto fini; - } - destroy_gold_response(gold_response); - - /* now add the new one */ - gold_request = create_gold_request(GOLD_OBJECT_EVENT, - GOLD_ACTION_CREATE); - if (!gold_request) - goto fini; - - gold_request_add_assignment(gold_request, "Machine", cluster_name); - snprintf(tmp_buff, sizeof(tmp_buff), "%u", - (uint32_t)node_down_msg->event_time); - gold_request_add_assignment(gold_request, "StartTime", tmp_buff); - gold_request_add_assignment(gold_request, "Name", - node_down_msg->hostlist); - snprintf(tmp_buff, sizeof(tmp_buff), "%u", node_down_msg->cpus); - gold_request_add_assignment(gold_request, "CPUCount", tmp_buff); - gold_request_add_assignment(gold_request, "Reason", - node_down_msg->reason); - - gold_response = get_gold_response(gold_request); - destroy_gold_request(gold_request); - - if (!gold_response) { - error("jobacct_p_node_down: no response received"); - rc = EAGAIN; - goto fini; - } - - if (!gold_response->rc) - rc = SLURM_SUCCESS; - else { - error("gold_response has non-zero rc(%d): %s", - gold_response->rc, - gold_response->message); - } - destroy_gold_response(gold_response); - - fini: gold_agent_free_node_down_msg(node_down_msg); - return rc; -} - -extern int agent_cluster_procs(Buf buffer) -{ - gold_cluster_procs_msg_t *cluster_procs_msg; - gold_request_t *gold_request = NULL; - gold_response_t *gold_response = NULL; - char tmp_buff[50]; - int rc = SLURM_ERROR; - bool no_modify = 0; - time_t event_time; - - if (gold_agent_unpack_cluster_procs_msg(&cluster_procs_msg, buffer) != - SLURM_SUCCESS) { - error("Failed to unpack GOLD_MSG_CLUSTER_PROCS message"); - return SLURM_ERROR; - } - - /* get the last known processor count */ - gold_request = create_gold_request(GOLD_OBJECT_EVENT, - GOLD_ACTION_QUERY); - if (!gold_request) - goto fini; - gold_request_add_condition(gold_request, "Machine", - cluster_name, GOLD_OPERATOR_NONE); - gold_request_add_condition(gold_request, "EndTime", "0", - GOLD_OPERATOR_NONE); - gold_request_add_condition(gold_request, "Name", "NULL", - GOLD_OPERATOR_NONE); - - gold_request_add_selection(gold_request, "CPUCount"); - - gold_response = get_gold_response(gold_request); - destroy_gold_request(gold_request); - - if (!gold_response) { - error("jobacct_p_cluster_procs: no response received"); - rc = EAGAIN; - goto fini; - } - - if (gold_response->entry_cnt > 0) { - gold_response_entry_t *resp_entry = - list_pop(gold_response->entries); - gold_name_value_t *name_val = list_pop(resp_entry->name_val); - - if (cluster_procs_msg->proc_count == atoi(name_val->value)) { - debug("System hasn't changed since last entry"); - destroy_gold_name_value(name_val); - destroy_gold_response_entry(resp_entry); - destroy_gold_response(gold_response); - rc = SLURM_SUCCESS; - goto fini; - } else { - debug("System has changed from %s cpus to %u", - name_val->value, cluster_procs_msg->proc_count); - } - - destroy_gold_name_value(name_val); - destroy_gold_response_entry(resp_entry); - } else { - debug("We don't have an entry for this machine " - "most likely a first time running."); - no_modify = 1; - } - - destroy_gold_response(gold_response); - - if (no_modify) { - gold_request = create_gold_request(GOLD_OBJECT_EVENT, - GOLD_ACTION_MODIFY); - if (!gold_request) - goto fini; - - gold_request_add_condition(gold_request, "Machine", - cluster_name, GOLD_OPERATOR_NONE); - gold_request_add_condition(gold_request, "EndTime", "0", - GOLD_OPERATOR_NONE); - gold_request_add_condition(gold_request, "Name", "NULL", - GOLD_OPERATOR_NONE); - - event_time = cluster_procs_msg->event_time; - if (event_time) - event_time--; - snprintf(tmp_buff, sizeof(tmp_buff), "%u", - (uint32_t)event_time); - gold_request_add_assignment(gold_request, "EndTime", - tmp_buff); - - gold_response = get_gold_response(gold_request); - destroy_gold_request(gold_request); - - if (!gold_response) { - error("jobacct_p_cluster_procs: no response " - "received"); - rc = EAGAIN; - goto fini; - } - - if (gold_response->rc) { - error("gold_response has non-zero rc(%d): %s", - gold_response->rc, - gold_response->message); - destroy_gold_response(gold_response); - goto fini; - } - destroy_gold_response(gold_response); - } - - /* now add the new processor count */ - gold_request = create_gold_request(GOLD_OBJECT_EVENT, - GOLD_ACTION_CREATE); - if (!gold_request) - goto fini; - - gold_request_add_assignment(gold_request, "Machine", cluster_name); - snprintf(tmp_buff, sizeof(tmp_buff), "%u", - (uint32_t)cluster_procs_msg->event_time); - gold_request_add_assignment(gold_request, "StartTime", tmp_buff); - snprintf(tmp_buff, sizeof(tmp_buff), "%u", - cluster_procs_msg->proc_count); - gold_request_add_assignment(gold_request, "CPUCount", tmp_buff); - - gold_response = get_gold_response(gold_request); - destroy_gold_request(gold_request); - - if (!gold_response) { - error("jobacct_p_cluster_procs: no response received"); - rc = EAGAIN; - goto fini; - } - - if (!gold_response->rc) - rc = SLURM_SUCCESS; - else { - error("gold_response has non-zero rc(%d): %s", - gold_response->rc, - gold_response->message); - } - destroy_gold_response(gold_response); - - fini: gold_agent_free_cluster_procs_msg(cluster_procs_msg); - return rc; -} diff --git a/src/plugins/jobacct/linux/Makefile.am b/src/plugins/jobacct/linux/Makefile.am deleted file mode 100644 index abf998966..000000000 --- a/src/plugins/jobacct/linux/Makefile.am +++ /dev/null @@ -1,19 +0,0 @@ -# Makefile for jobacct/linux plugin - -AUTOMAKE_OPTIONS = foreign - -PLUGIN_FLAGS = -module -avoid-version --export-dynamic - -INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common - -pkglib_LTLIBRARIES = jobacct_linux.la - -# Null job completion logging plugin. -jobacct_linux_la_SOURCES = jobacct_linux.c \ - $(top_builddir)/src/plugins/jobacct/common/jobacct_common.c \ - $(top_builddir)/src/plugins/jobacct/common/common_slurmctld.c \ - $(top_builddir)/src/plugins/jobacct/common/common_slurmstepd.c \ - $(top_builddir)/src/slurmd/common/proctrack.c \ - $(top_builddir)/src/slurmd/common/proctrack.h - -jobacct_linux_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) diff --git a/src/plugins/jobacct/none/Makefile.am b/src/plugins/jobacct/none/Makefile.am deleted file mode 100644 index 4d6584b31..000000000 --- a/src/plugins/jobacct/none/Makefile.am +++ /dev/null @@ -1,13 +0,0 @@ -# Makefile for jobacct/none plugin - -AUTOMAKE_OPTIONS = foreign - -PLUGIN_FLAGS = -module -avoid-version --export-dynamic - -INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common - -pkglib_LTLIBRARIES = jobacct_none.la - -# Null job completion logging plugin. -jobacct_none_la_SOURCES = jobacct_none.c -jobacct_none_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) diff --git a/src/plugins/jobacct/Makefile.am b/src/plugins/jobacct_gather/Makefile.am similarity index 51% rename from src/plugins/jobacct/Makefile.am rename to src/plugins/jobacct_gather/Makefile.am index 1412b0808..1039e1ad8 100644 --- a/src/plugins/jobacct/Makefile.am +++ b/src/plugins/jobacct_gather/Makefile.am @@ -1,3 +1,3 @@ # Makefile for jobacct plugins -SUBDIRS = linux aix none gold +SUBDIRS = linux aix none diff --git a/src/plugins/jobacct/Makefile.in b/src/plugins/jobacct_gather/Makefile.in similarity index 94% rename from src/plugins/jobacct/Makefile.in rename to src/plugins/jobacct_gather/Makefile.in index 4fa1bb9b9..81976b36f 100644 --- a/src/plugins/jobacct/Makefile.in +++ b/src/plugins/jobacct_gather/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -34,7 +34,7 @@ POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ target_triplet = @target@ -subdir = src/plugins/jobacct +subdir = src/plugins/jobacct_gather DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ @@ -43,6 +43,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -101,6 +103,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -114,10 +117,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -137,7 +143,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -148,6 +157,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -163,6 +174,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -178,6 +190,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -234,7 +247,7 @@ target_os = @target_os@ target_vendor = @target_vendor@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ -SUBDIRS = linux aix none gold +SUBDIRS = linux aix none all: all-recursive .SUFFIXES: @@ -247,9 +260,9 @@ $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__confi exit 1;; \ esac; \ done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu src/plugins/jobacct/Makefile'; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu src/plugins/jobacct_gather/Makefile'; \ cd $(top_srcdir) && \ - $(AUTOMAKE) --gnu src/plugins/jobacct/Makefile + $(AUTOMAKE) --gnu src/plugins/jobacct_gather/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ @@ -349,8 +362,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -375,8 +388,8 @@ TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -386,13 +399,12 @@ ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/jobacct_gather/aix/Makefile.am b/src/plugins/jobacct_gather/aix/Makefile.am new file mode 100644 index 000000000..4ceb3a62b --- /dev/null +++ b/src/plugins/jobacct_gather/aix/Makefile.am @@ -0,0 +1,17 @@ +# Makefile for jobacct_gather/aix plugin + +AUTOMAKE_OPTIONS = foreign + +PLUGIN_FLAGS = -module -avoid-version --export-dynamic + +INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common + +pkglib_LTLIBRARIES = jobacct_gather_aix.la + +# Null job completion logging plugin. +jobacct_gather_aix_la_SOURCES = jobacct_gather_aix.c \ + $(top_builddir)/src/slurmd/common/proctrack.c \ + $(top_builddir)/src/slurmd/common/proctrack.h + +jobacct_gather_aix_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) + diff --git a/src/plugins/jobacct/aix/Makefile.in b/src/plugins/jobacct_gather/aix/Makefile.in similarity index 74% rename from src/plugins/jobacct/aix/Makefile.in rename to src/plugins/jobacct_gather/aix/Makefile.in index 9ddd082c1..2e74f286e 100644 --- a/src/plugins/jobacct/aix/Makefile.in +++ b/src/plugins/jobacct_gather/aix/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -14,7 +14,7 @@ @SET_MAKE@ -# Makefile for jobacct/aix plugin +# Makefile for jobacct_gather/aix plugin VPATH = @srcdir@ pkgdatadir = $(datadir)/@PACKAGE@ @@ -35,7 +35,7 @@ POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ target_triplet = @target@ -subdir = src/plugins/jobacct/aix +subdir = src/plugins/jobacct_gather/aix DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ @@ -44,6 +44,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -73,14 +75,13 @@ am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; am__installdirs = "$(DESTDIR)$(pkglibdir)" pkglibLTLIBRARIES_INSTALL = $(INSTALL) LTLIBRARIES = $(pkglib_LTLIBRARIES) -jobacct_aix_la_LIBADD = -am_jobacct_aix_la_OBJECTS = jobacct_aix.lo jobacct_common.lo \ - common_slurmctld.lo common_slurmstepd.lo proctrack.lo -jobacct_aix_la_OBJECTS = $(am_jobacct_aix_la_OBJECTS) -jobacct_aix_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ +jobacct_gather_aix_la_LIBADD = +am_jobacct_gather_aix_la_OBJECTS = jobacct_gather_aix.lo proctrack.lo +jobacct_gather_aix_la_OBJECTS = $(am_jobacct_gather_aix_la_OBJECTS) +jobacct_gather_aix_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ - $(jobacct_aix_la_LDFLAGS) $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ + $(jobacct_gather_aix_la_LDFLAGS) $(LDFLAGS) -o $@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -92,8 +93,8 @@ CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ -SOURCES = $(jobacct_aix_la_SOURCES) -DIST_SOURCES = $(jobacct_aix_la_SOURCES) +SOURCES = $(jobacct_gather_aix_la_SOURCES) +DIST_SOURCES = $(jobacct_gather_aix_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) @@ -120,6 +121,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -133,10 +135,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -156,7 +161,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -167,6 +175,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -182,6 +192,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -197,6 +208,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -256,17 +268,14 @@ top_srcdir = @top_srcdir@ AUTOMAKE_OPTIONS = foreign PLUGIN_FLAGS = -module -avoid-version --export-dynamic INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common -pkglib_LTLIBRARIES = jobacct_aix.la +pkglib_LTLIBRARIES = jobacct_gather_aix.la # Null job completion logging plugin. -jobacct_aix_la_SOURCES = jobacct_aix.c \ - $(top_builddir)/src/plugins/jobacct/common/jobacct_common.c \ - $(top_builddir)/src/plugins/jobacct/common/common_slurmctld.c \ - $(top_builddir)/src/plugins/jobacct/common/common_slurmstepd.c \ +jobacct_gather_aix_la_SOURCES = jobacct_gather_aix.c \ $(top_builddir)/src/slurmd/common/proctrack.c \ $(top_builddir)/src/slurmd/common/proctrack.h -jobacct_aix_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) +jobacct_gather_aix_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) all: all-am .SUFFIXES: @@ -280,9 +289,9 @@ $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__confi exit 1;; \ esac; \ done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/jobacct/aix/Makefile'; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/jobacct_gather/aix/Makefile'; \ cd $(top_srcdir) && \ - $(AUTOMAKE) --foreign src/plugins/jobacct/aix/Makefile + $(AUTOMAKE) --foreign src/plugins/jobacct_gather/aix/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ @@ -306,8 +315,8 @@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ else :; fi; \ done @@ -315,8 +324,8 @@ uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ done clean-pkglibLTLIBRARIES: @@ -327,8 +336,8 @@ clean-pkglibLTLIBRARIES: echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done -jobacct_aix.la: $(jobacct_aix_la_OBJECTS) $(jobacct_aix_la_DEPENDENCIES) - $(jobacct_aix_la_LINK) -rpath $(pkglibdir) $(jobacct_aix_la_OBJECTS) $(jobacct_aix_la_LIBADD) $(LIBS) +jobacct_gather_aix.la: $(jobacct_gather_aix_la_OBJECTS) $(jobacct_gather_aix_la_DEPENDENCIES) + $(jobacct_gather_aix_la_LINK) -rpath $(pkglibdir) $(jobacct_gather_aix_la_OBJECTS) $(jobacct_gather_aix_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) @@ -336,10 +345,7 @@ mostlyclean-compile: distclean-compile: -rm -f *.tab.c -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/common_slurmctld.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/common_slurmstepd.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/jobacct_aix.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/jobacct_common.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/jobacct_gather_aix.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proctrack.Plo@am__quote@ .c.o: @@ -363,27 +369,6 @@ distclean-compile: @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $< -jobacct_common.lo: $(top_builddir)/src/plugins/jobacct/common/jobacct_common.c -@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT jobacct_common.lo -MD -MP -MF $(DEPDIR)/jobacct_common.Tpo -c -o jobacct_common.lo `test -f '$(top_builddir)/src/plugins/jobacct/common/jobacct_common.c' || echo '$(srcdir)/'`$(top_builddir)/src/plugins/jobacct/common/jobacct_common.c -@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/jobacct_common.Tpo $(DEPDIR)/jobacct_common.Plo -@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$(top_builddir)/src/plugins/jobacct/common/jobacct_common.c' object='jobacct_common.lo' libtool=yes @AMDEPBACKSLASH@ -@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ -@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o jobacct_common.lo `test -f '$(top_builddir)/src/plugins/jobacct/common/jobacct_common.c' || echo '$(srcdir)/'`$(top_builddir)/src/plugins/jobacct/common/jobacct_common.c - -common_slurmctld.lo: $(top_builddir)/src/plugins/jobacct/common/common_slurmctld.c -@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT common_slurmctld.lo -MD -MP -MF $(DEPDIR)/common_slurmctld.Tpo -c -o common_slurmctld.lo `test -f '$(top_builddir)/src/plugins/jobacct/common/common_slurmctld.c' || echo '$(srcdir)/'`$(top_builddir)/src/plugins/jobacct/common/common_slurmctld.c -@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/common_slurmctld.Tpo $(DEPDIR)/common_slurmctld.Plo -@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$(top_builddir)/src/plugins/jobacct/common/common_slurmctld.c' object='common_slurmctld.lo' libtool=yes @AMDEPBACKSLASH@ -@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ -@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o common_slurmctld.lo `test -f '$(top_builddir)/src/plugins/jobacct/common/common_slurmctld.c' || echo '$(srcdir)/'`$(top_builddir)/src/plugins/jobacct/common/common_slurmctld.c - -common_slurmstepd.lo: $(top_builddir)/src/plugins/jobacct/common/common_slurmstepd.c -@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT common_slurmstepd.lo -MD -MP -MF $(DEPDIR)/common_slurmstepd.Tpo -c -o common_slurmstepd.lo `test -f '$(top_builddir)/src/plugins/jobacct/common/common_slurmstepd.c' || echo '$(srcdir)/'`$(top_builddir)/src/plugins/jobacct/common/common_slurmstepd.c -@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/common_slurmstepd.Tpo $(DEPDIR)/common_slurmstepd.Plo -@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$(top_builddir)/src/plugins/jobacct/common/common_slurmstepd.c' object='common_slurmstepd.lo' libtool=yes @AMDEPBACKSLASH@ -@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ -@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o common_slurmstepd.lo `test -f '$(top_builddir)/src/plugins/jobacct/common/common_slurmstepd.c' || echo '$(srcdir)/'`$(top_builddir)/src/plugins/jobacct/common/common_slurmstepd.c - proctrack.lo: $(top_builddir)/src/slurmd/common/proctrack.c @am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT proctrack.lo -MD -MP -MF $(DEPDIR)/proctrack.Tpo -c -o proctrack.lo `test -f '$(top_builddir)/src/slurmd/common/proctrack.c' || echo '$(srcdir)/'`$(top_builddir)/src/slurmd/common/proctrack.c @am__fastdepCC_TRUE@ mv -f $(DEPDIR)/proctrack.Tpo $(DEPDIR)/proctrack.Plo @@ -402,8 +387,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -415,8 +400,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -426,13 +411,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/jobacct/aix/jobacct_aix.c b/src/plugins/jobacct_gather/aix/jobacct_gather_aix.c similarity index 75% rename from src/plugins/jobacct/aix/jobacct_aix.c rename to src/plugins/jobacct_gather/aix/jobacct_gather_aix.c index 98b6545e7..dc0b0ca35 100644 --- a/src/plugins/jobacct/aix/jobacct_aix.c +++ b/src/plugins/jobacct_gather/aix/jobacct_gather_aix.c @@ -1,11 +1,11 @@ /*****************************************************************************\ - * jobacct_aix.c - slurm job accounting plugin for AIX. + * jobacct_gather_aix.c - slurm job accounting gather plugin for AIX. ***************************************************************************** * * Copyright (C) 2005 Hewlett-Packard Development Company, L.P. * Written by Andy Riebs, <andy.riebs@hp.com>, who borrowed heavily * from other parts of SLURM, and Danny Auble, <da@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -39,7 +39,11 @@ * Copyright (C) 2002 The Regents of the University of California. \*****************************************************************************/ -#include "src/plugins/jobacct/common/jobacct_common.h" +#include <signal.h> +#include "src/common/jobacct_common.h" +#include "src/common/slurm_protocol_api.h" +#include "src/common/slurm_protocol_defs.h" +#include "src/slurmd/common/proctrack.h" #ifdef HAVE_AIX #include <procinfo.h> @@ -77,8 +81,8 @@ * minimum versions for their plugins as the job accounting API * matures. */ -const char plugin_name[] = "Job accounting AIX plugin"; -const char plugin_type[] = "jobacct/aix"; +const char plugin_name[] = "Job accounting gather AIX plugin"; +const char plugin_type[] = "jobacct_gather/aix"; const uint32_t plugin_version = 100; /* Other useful declarations */ @@ -97,6 +101,7 @@ static int freq = 0; static int pagesize = 0; /* Finally, pre-define all the routines. */ +static void _acct_kill_job(void); static void _get_offspring_data(List prec_list, prec_t *ancestor, pid_t pid); static void _get_process_data(); static void *_watch_tasks(void *arg); @@ -109,222 +114,6 @@ extern int getprocs(struct procsinfo *procinfo, int, struct fdsinfo *, /* nproc: number of user procinfo struct */ /* sizproc: size of expected procinfo structure */ -#endif - -/* - * init() is called when the plugin is loaded, before any other functions - * are called. Put global initialization here. - */ -extern int init ( void ) -{ - char *proctrack = slurm_get_proctrack_type(); - if(!strcasecmp(proctrack, "proctrack/pgid")) { - info("WARNING: We will use a much slower algorithm with " - "proctrack/pgid, use Proctracktype=proctrack/aix " - "with %s", plugin_name); - pgid_plugin = true; - } - xfree(proctrack); - - verbose("%s loaded", plugin_name); - return SLURM_SUCCESS; -} - -extern int fini ( void ) -{ - return SLURM_SUCCESS; -} - -/* - * The following routine is called by the slurmd mainline - */ - -int jobacct_p_init_struct(struct jobacctinfo *jobacct, - jobacct_id_t *jobacct_id) -{ - return common_init_struct(jobacct, jobacct_id); -} - -struct jobacctinfo *jobacct_p_alloc(jobacct_id_t *jobacct_id) -{ - return common_alloc_jobacct(jobacct_id); -} - -void jobacct_p_free(struct jobacctinfo *jobacct) -{ - common_free_jobacct(jobacct); -} - -int jobacct_p_setinfo(struct jobacctinfo *jobacct, - enum jobacct_data_type type, void *data) -{ - return common_setinfo(jobacct, type, data); - -} - -int jobacct_p_getinfo(struct jobacctinfo *jobacct, - enum jobacct_data_type type, void *data) -{ - return common_getinfo(jobacct, type, data); -} - -void jobacct_p_aggregate(struct jobacctinfo *dest, struct jobacctinfo *from) -{ - common_aggregate(dest, from); -} - -void jobacct_p_2_sacct(sacct_t *sacct, struct jobacctinfo *jobacct) -{ - common_2_sacct(sacct, jobacct); -} - -void jobacct_p_pack(struct jobacctinfo *jobacct, Buf buffer) -{ - common_pack(jobacct, buffer); -} - -int jobacct_p_unpack(struct jobacctinfo **jobacct, Buf buffer) -{ - return common_unpack(jobacct, buffer); -} - - -int jobacct_p_init_slurmctld(char *job_acct_log) -{ - return common_init_slurmctld(job_acct_log); -} - -int jobacct_p_fini_slurmctld() -{ - return common_fini_slurmctld(); -} - -int jobacct_p_job_start_slurmctld(struct job_record *job_ptr) -{ - return common_job_start_slurmctld(job_ptr); -} - -int jobacct_p_job_complete_slurmctld(struct job_record *job_ptr) -{ - return common_job_complete_slurmctld(job_ptr); -} - -int jobacct_p_step_start_slurmctld(struct step_record *step) -{ - return common_step_start_slurmctld(step); -} - -int jobacct_p_step_complete_slurmctld(struct step_record *step) -{ - return common_step_complete_slurmctld(step); -} - -int jobacct_p_suspend_slurmctld(struct job_record *job_ptr) -{ - return common_suspend_slurmctld(job_ptr); -} -/* - * jobacct_startpoll() is called when the plugin is loaded by - * slurmd, before any other functions are called. Put global - * initialization here. - */ - -int jobacct_p_startpoll(int frequency) -{ - int rc = SLURM_SUCCESS; - -#ifdef HAVE_AIX - pthread_attr_t attr; - pthread_t _watch_tasks_thread_id; - - debug("jobacct AIX plugin loaded"); - - debug("jobacct: frequency = %d", frequency); - - jobacct_shutdown = false; - - if (frequency == 0) { /* don't want dynamic monitoring? */ - debug2("jobacct AIX dynamic logging disabled"); - return rc; - } - - freq = frequency; - pagesize = getpagesize()/1024; - task_list = list_create(common_free_jobacct); - - /* create polling thread */ - slurm_attr_init(&attr); - if (pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED)) - error("pthread_attr_setdetachstate error %m"); - - if (pthread_create(&_watch_tasks_thread_id, &attr, - &_watch_tasks, NULL)) { - debug("jobacct failed to create _watch_tasks " - "thread: %m"); - frequency = 0; - } - else - debug3("jobacct AIX dynamic logging enabled"); - slurm_attr_destroy(&attr); -#else - error("jobacct AIX not loaded, not an aix system, check slurm.conf"); -#endif - return rc; -} - -int jobacct_p_endpoll() -{ - return common_endpoll(); -} - -int jobacct_p_set_proctrack_container_id(uint32_t id) -{ - return common_set_proctrack_container_id(id); -} - -int jobacct_p_add_task(pid_t pid, jobacct_id_t *jobacct_id) -{ - return common_add_task(pid, jobacct_id); -} - -struct jobacctinfo *jobacct_p_stat_task(pid_t pid) -{ -#ifdef HAVE_AIX - _get_process_data(); -#endif - return common_stat_task(pid); -} - -struct jobacctinfo *jobacct_p_remove_task(pid_t pid) -{ - return common_remove_task(pid); -} - -void jobacct_p_suspend_poll() -{ - common_suspend_poll(); -} - -void jobacct_p_resume_poll() -{ - common_resume_poll(); -} - -extern int jobacct_p_node_down(struct node_record *node_ptr, - time_t event_time, char *reason) -{ - return SLURM_SUCCESS; -} -extern int jobacct_p_node_up(struct node_record *node_ptr, time_t event_time) -{ - return SLURM_SUCCESS; -} -extern int jobacct_p_cluster_procs(uint32_t procs, time_t event_time) -{ - return SLURM_SUCCESS; -} - -#ifdef HAVE_AIX /* * _get_offspring_data() -- collect memory usage data for the offspring @@ -390,9 +179,8 @@ static void _get_process_data() struct procsinfo proc; pid_t *pids = NULL; int npids = 0; - int i; - + uint32_t total_job_mem = 0; int pid = 0; static int processing = 0; prec_t *prec = NULL; @@ -483,6 +271,7 @@ static void _get_process_data() /* tally their usage */ jobacct->max_rss = jobacct->tot_rss = MAX(jobacct->max_rss, (int)prec->rss); + total_job_mem += jobacct->max_rss; jobacct->max_vsize = jobacct->tot_vsize = MAX(jobacct->max_vsize, (int)prec->vsize); @@ -502,6 +291,18 @@ static void _get_process_data() } list_iterator_destroy(itr); slurm_mutex_unlock(&jobacct_lock); + + if (job_mem_limit) { + debug("Job %u memory used:%u limit:%u KB", + acct_job_id, total_job_mem, job_mem_limit); + } + if (acct_job_id && job_mem_limit && + (total_job_mem > job_mem_limit)) { + error("Job %u exceeded %u KB memory limit, being killed", + acct_job_id, job_mem_limit); + _acct_kill_job(); + } + finished: list_destroy(prec_list); processing = 0; @@ -509,6 +310,25 @@ finished: return; } +/* _acct_kill_job() issue RPC to kill a slurm job */ +static void _acct_kill_job(void) +{ + slurm_msg_t msg; + job_step_kill_msg_t req; + + slurm_msg_t_init(&msg); + /* + * Request message: + */ + req.job_id = acct_job_id; + req.job_step_id = NO_VAL; + req.signal = SIGKILL; + req.batch_flag = 0; + msg.msg_type = REQUEST_CANCEL_JOB_STEP; + msg.data = &req; + + slurm_send_only_controller_msg(&msg); +} /* _watch_tasks() -- monitor slurm jobs and track their memory usage * @@ -519,7 +339,7 @@ static void *_watch_tasks(void *arg) { while(!jobacct_shutdown) { /* Do this until shutdown is requested */ - if(!suspended) { + if(!jobacct_suspended) { _get_process_data(); /* Update the data */ } sleep(freq); @@ -536,3 +356,179 @@ static void _destroy_prec(void *object) } #endif + +/* + * init() is called when the plugin is loaded, before any other functions + * are called. Put global initialization here. + */ +extern int init ( void ) +{ + char *temp = slurm_get_proctrack_type(); + if(!strcasecmp(temp, "proctrack/pgid")) { + info("WARNING: We will use a much slower algorithm with " + "proctrack/pgid, use Proctracktype=proctrack/aix " + "with %s", plugin_name); + pgid_plugin = true; + } + xfree(temp); + temp = slurm_get_accounting_storage_type(); + if(!strcasecmp(temp, ACCOUNTING_STORAGE_TYPE_NONE)) { + error("WARNING: Even though we are collecting accounting " + "information you have asked for it not to be stored " + "(%s) if this is not what you have in mind you will " + "need to change it.", ACCOUNTING_STORAGE_TYPE_NONE); + } + xfree(temp); + + verbose("%s loaded", plugin_name); + return SLURM_SUCCESS; +} + +extern int fini ( void ) +{ + return SLURM_SUCCESS; +} + +extern struct jobacctinfo *jobacct_gather_p_create(jobacct_id_t *jobacct_id) +{ + return jobacct_common_alloc_jobacct(jobacct_id); +} + +extern void jobacct_gather_p_destroy(struct jobacctinfo *jobacct) +{ + jobacct_common_free_jobacct(jobacct); +} + +extern int jobacct_gather_p_setinfo(struct jobacctinfo *jobacct, + enum jobacct_data_type type, void *data) +{ + return jobacct_common_setinfo(jobacct, type, data); + +} + +extern int jobacct_gather_p_getinfo(struct jobacctinfo *jobacct, + enum jobacct_data_type type, void *data) +{ + return jobacct_common_getinfo(jobacct, type, data); +} + +extern void jobacct_gather_p_pack(struct jobacctinfo *jobacct, Buf buffer) +{ + jobacct_common_pack(jobacct, buffer); +} + +extern int jobacct_gather_p_unpack(struct jobacctinfo **jobacct, Buf buffer) +{ + return jobacct_common_unpack(jobacct, buffer); +} + +extern void jobacct_gather_p_aggregate(struct jobacctinfo *dest, + struct jobacctinfo *from) +{ + jobacct_common_aggregate(dest, from); +} + +/* + * jobacct_startpoll() is called when the plugin is loaded by + * slurmd, before any other functions are called. Put global + * initialization here. + */ + +extern int jobacct_gather_p_startpoll(uint16_t frequency) +{ + int rc = SLURM_SUCCESS; + +#ifdef HAVE_AIX + pthread_attr_t attr; + pthread_t _watch_tasks_thread_id; + + debug("%s loaded", plugin_name); + + debug("jobacct: frequency = %d", frequency); + + jobacct_shutdown = false; + + if (frequency == 0) { /* don't want dynamic monitoring? */ + debug2("jobacct AIX dynamic logging disabled"); + return rc; + } + + freq = frequency; + pagesize = getpagesize()/1024; + task_list = list_create(jobacct_common_free_jobacct); + + /* create polling thread */ + slurm_attr_init(&attr); + if (pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED)) + error("pthread_attr_setdetachstate error %m"); + + if (pthread_create(&_watch_tasks_thread_id, &attr, + &_watch_tasks, NULL)) { + debug("jobacct failed to create _watch_tasks " + "thread: %m"); + frequency = 0; + } + else + debug3("jobacct AIX dynamic logging enabled"); + slurm_attr_destroy(&attr); +#else + error("jobacct AIX not loaded, not an aix system, check slurm.conf"); +#endif + return rc; +} + +extern int jobacct_gather_p_endpoll() +{ + jobacct_shutdown = true; + + return SLURM_SUCCESS; +} + +extern void jobacct_gather_p_change_poll(uint16_t frequency) +{ +#ifdef HAVE_AIX + freq = frequency; + if (freq == 0) + jobacct_shutdown = true; +#endif + return; +} + +extern void jobacct_gather_p_suspend_poll() +{ + jobacct_common_suspend_poll(); +} + +extern void jobacct_gather_p_resume_poll() +{ + jobacct_common_resume_poll(); +} + +extern int jobacct_gather_p_set_proctrack_container_id(uint32_t id) +{ + return jobacct_common_set_proctrack_container_id(id); +} + +extern int jobacct_gather_p_add_task(pid_t pid, jobacct_id_t *jobacct_id) +{ + return jobacct_common_add_task(pid, jobacct_id); +} + +extern struct jobacctinfo *jobacct_gather_p_stat_task(pid_t pid) +{ +#ifdef HAVE_AIX + _get_process_data(); +#endif + return jobacct_common_stat_task(pid); +} + +extern struct jobacctinfo *jobacct_gather_p_remove_task(pid_t pid) +{ + return jobacct_common_remove_task(pid); +} + +extern void jobacct_gather_p_2_sacct(sacct_t *sacct, + struct jobacctinfo *jobacct) +{ + jobacct_common_2_sacct(sacct, jobacct); +} diff --git a/src/plugins/jobacct_gather/linux/Makefile.am b/src/plugins/jobacct_gather/linux/Makefile.am new file mode 100644 index 000000000..8a9451752 --- /dev/null +++ b/src/plugins/jobacct_gather/linux/Makefile.am @@ -0,0 +1,17 @@ +# Makefile for jobacct_gather/linux plugin + +AUTOMAKE_OPTIONS = foreign + +PLUGIN_FLAGS = -module -avoid-version --export-dynamic + +INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common + +pkglib_LTLIBRARIES = jobacct_gather_linux.la + +# Null job completion logging plugin. +jobacct_gather_linux_la_SOURCES = jobacct_gather_linux.c \ + $(top_builddir)/src/slurmd/common/proctrack.c \ + $(top_builddir)/src/slurmd/common/proctrack.h + +jobacct_gather_linux_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) + diff --git a/src/plugins/jobacct/linux/Makefile.in b/src/plugins/jobacct_gather/linux/Makefile.in similarity index 74% rename from src/plugins/jobacct/linux/Makefile.in rename to src/plugins/jobacct_gather/linux/Makefile.in index e14899c36..bb7a009b9 100644 --- a/src/plugins/jobacct/linux/Makefile.in +++ b/src/plugins/jobacct_gather/linux/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -14,7 +14,7 @@ @SET_MAKE@ -# Makefile for jobacct/linux plugin +# Makefile for jobacct_gather/linux plugin VPATH = @srcdir@ pkgdatadir = $(datadir)/@PACKAGE@ @@ -35,7 +35,7 @@ POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ target_triplet = @target@ -subdir = src/plugins/jobacct/linux +subdir = src/plugins/jobacct_gather/linux DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ @@ -44,6 +44,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -73,14 +75,15 @@ am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; am__installdirs = "$(DESTDIR)$(pkglibdir)" pkglibLTLIBRARIES_INSTALL = $(INSTALL) LTLIBRARIES = $(pkglib_LTLIBRARIES) -jobacct_linux_la_LIBADD = -am_jobacct_linux_la_OBJECTS = jobacct_linux.lo jobacct_common.lo \ - common_slurmctld.lo common_slurmstepd.lo proctrack.lo -jobacct_linux_la_OBJECTS = $(am_jobacct_linux_la_OBJECTS) -jobacct_linux_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ +jobacct_gather_linux_la_LIBADD = +am_jobacct_gather_linux_la_OBJECTS = jobacct_gather_linux.lo \ + proctrack.lo +jobacct_gather_linux_la_OBJECTS = \ + $(am_jobacct_gather_linux_la_OBJECTS) +jobacct_gather_linux_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ - $(jobacct_linux_la_LDFLAGS) $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ + $(jobacct_gather_linux_la_LDFLAGS) $(LDFLAGS) -o $@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -92,8 +95,8 @@ CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ -SOURCES = $(jobacct_linux_la_SOURCES) -DIST_SOURCES = $(jobacct_linux_la_SOURCES) +SOURCES = $(jobacct_gather_linux_la_SOURCES) +DIST_SOURCES = $(jobacct_gather_linux_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) @@ -120,6 +123,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -133,10 +137,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -156,7 +163,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -167,6 +177,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -182,6 +194,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -197,6 +210,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -256,17 +270,14 @@ top_srcdir = @top_srcdir@ AUTOMAKE_OPTIONS = foreign PLUGIN_FLAGS = -module -avoid-version --export-dynamic INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common -pkglib_LTLIBRARIES = jobacct_linux.la +pkglib_LTLIBRARIES = jobacct_gather_linux.la # Null job completion logging plugin. -jobacct_linux_la_SOURCES = jobacct_linux.c \ - $(top_builddir)/src/plugins/jobacct/common/jobacct_common.c \ - $(top_builddir)/src/plugins/jobacct/common/common_slurmctld.c \ - $(top_builddir)/src/plugins/jobacct/common/common_slurmstepd.c \ +jobacct_gather_linux_la_SOURCES = jobacct_gather_linux.c \ $(top_builddir)/src/slurmd/common/proctrack.c \ $(top_builddir)/src/slurmd/common/proctrack.h -jobacct_linux_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) +jobacct_gather_linux_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) all: all-am .SUFFIXES: @@ -280,9 +291,9 @@ $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__confi exit 1;; \ esac; \ done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/jobacct/linux/Makefile'; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/jobacct_gather/linux/Makefile'; \ cd $(top_srcdir) && \ - $(AUTOMAKE) --foreign src/plugins/jobacct/linux/Makefile + $(AUTOMAKE) --foreign src/plugins/jobacct_gather/linux/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ @@ -306,8 +317,8 @@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ else :; fi; \ done @@ -315,8 +326,8 @@ uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ done clean-pkglibLTLIBRARIES: @@ -327,8 +338,8 @@ clean-pkglibLTLIBRARIES: echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done -jobacct_linux.la: $(jobacct_linux_la_OBJECTS) $(jobacct_linux_la_DEPENDENCIES) - $(jobacct_linux_la_LINK) -rpath $(pkglibdir) $(jobacct_linux_la_OBJECTS) $(jobacct_linux_la_LIBADD) $(LIBS) +jobacct_gather_linux.la: $(jobacct_gather_linux_la_OBJECTS) $(jobacct_gather_linux_la_DEPENDENCIES) + $(jobacct_gather_linux_la_LINK) -rpath $(pkglibdir) $(jobacct_gather_linux_la_OBJECTS) $(jobacct_gather_linux_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) @@ -336,10 +347,7 @@ mostlyclean-compile: distclean-compile: -rm -f *.tab.c -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/common_slurmctld.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/common_slurmstepd.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/jobacct_common.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/jobacct_linux.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/jobacct_gather_linux.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proctrack.Plo@am__quote@ .c.o: @@ -363,27 +371,6 @@ distclean-compile: @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ @am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $< -jobacct_common.lo: $(top_builddir)/src/plugins/jobacct/common/jobacct_common.c -@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT jobacct_common.lo -MD -MP -MF $(DEPDIR)/jobacct_common.Tpo -c -o jobacct_common.lo `test -f '$(top_builddir)/src/plugins/jobacct/common/jobacct_common.c' || echo '$(srcdir)/'`$(top_builddir)/src/plugins/jobacct/common/jobacct_common.c -@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/jobacct_common.Tpo $(DEPDIR)/jobacct_common.Plo -@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$(top_builddir)/src/plugins/jobacct/common/jobacct_common.c' object='jobacct_common.lo' libtool=yes @AMDEPBACKSLASH@ -@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ -@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o jobacct_common.lo `test -f '$(top_builddir)/src/plugins/jobacct/common/jobacct_common.c' || echo '$(srcdir)/'`$(top_builddir)/src/plugins/jobacct/common/jobacct_common.c - -common_slurmctld.lo: $(top_builddir)/src/plugins/jobacct/common/common_slurmctld.c -@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT common_slurmctld.lo -MD -MP -MF $(DEPDIR)/common_slurmctld.Tpo -c -o common_slurmctld.lo `test -f '$(top_builddir)/src/plugins/jobacct/common/common_slurmctld.c' || echo '$(srcdir)/'`$(top_builddir)/src/plugins/jobacct/common/common_slurmctld.c -@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/common_slurmctld.Tpo $(DEPDIR)/common_slurmctld.Plo -@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$(top_builddir)/src/plugins/jobacct/common/common_slurmctld.c' object='common_slurmctld.lo' libtool=yes @AMDEPBACKSLASH@ -@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ -@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o common_slurmctld.lo `test -f '$(top_builddir)/src/plugins/jobacct/common/common_slurmctld.c' || echo '$(srcdir)/'`$(top_builddir)/src/plugins/jobacct/common/common_slurmctld.c - -common_slurmstepd.lo: $(top_builddir)/src/plugins/jobacct/common/common_slurmstepd.c -@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT common_slurmstepd.lo -MD -MP -MF $(DEPDIR)/common_slurmstepd.Tpo -c -o common_slurmstepd.lo `test -f '$(top_builddir)/src/plugins/jobacct/common/common_slurmstepd.c' || echo '$(srcdir)/'`$(top_builddir)/src/plugins/jobacct/common/common_slurmstepd.c -@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/common_slurmstepd.Tpo $(DEPDIR)/common_slurmstepd.Plo -@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$(top_builddir)/src/plugins/jobacct/common/common_slurmstepd.c' object='common_slurmstepd.lo' libtool=yes @AMDEPBACKSLASH@ -@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ -@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o common_slurmstepd.lo `test -f '$(top_builddir)/src/plugins/jobacct/common/common_slurmstepd.c' || echo '$(srcdir)/'`$(top_builddir)/src/plugins/jobacct/common/common_slurmstepd.c - proctrack.lo: $(top_builddir)/src/slurmd/common/proctrack.c @am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT proctrack.lo -MD -MP -MF $(DEPDIR)/proctrack.Tpo -c -o proctrack.lo `test -f '$(top_builddir)/src/slurmd/common/proctrack.c' || echo '$(srcdir)/'`$(top_builddir)/src/slurmd/common/proctrack.c @am__fastdepCC_TRUE@ mv -f $(DEPDIR)/proctrack.Tpo $(DEPDIR)/proctrack.Plo @@ -402,8 +389,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -415,8 +402,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -426,13 +413,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/jobacct/linux/jobacct_linux.c b/src/plugins/jobacct_gather/linux/jobacct_gather_linux.c similarity index 66% rename from src/plugins/jobacct/linux/jobacct_linux.c rename to src/plugins/jobacct_gather/linux/jobacct_gather_linux.c index 626ae5d6c..d8f19f1ff 100644 --- a/src/plugins/jobacct/linux/jobacct_linux.c +++ b/src/plugins/jobacct_gather/linux/jobacct_gather_linux.c @@ -1,10 +1,10 @@ /*****************************************************************************\ - * jobacct_linux.c - slurm job accounting plugin. + * jobacct_gather_linux.c - slurm job accounting gather plugin for linux. ***************************************************************************** * Copyright (C) 2005 Hewlett-Packard Development Company, L.P. * Written by Andy Riebs, <andy.riebs@hp.com>, who borrowed heavily * from other parts of SLURM, and Danny Auble, <da@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -39,7 +39,13 @@ \*****************************************************************************/ #include <fcntl.h> -#include "src/plugins/jobacct/common/jobacct_common.h" +#include <signal.h> +#include "src/common/jobacct_common.h" +#include "src/common/slurm_protocol_api.h" +#include "src/common/slurm_protocol_defs.h" +#include "src/slurmd/common/proctrack.h" + +#define _DEBUG 0 /* * These variables are required by the generic plugin interface. If they @@ -70,8 +76,8 @@ * minimum versions for their plugins as the job accounting API * matures. */ -const char plugin_name[] = "Job accounting LINUX plugin"; -const char plugin_type[] = "jobacct/linux"; +const char plugin_name[] = "Job accounting gather LINUX plugin"; +const char plugin_type[] = "jobacct_gather/linux"; const uint32_t plugin_version = 100; /* Other useful declarations */ @@ -92,235 +98,13 @@ static pthread_mutex_t reading_mutex = PTHREAD_MUTEX_INITIALIZER; /* Finally, pre-define all local routines. */ +static void _acct_kill_job(void); static void _get_offspring_data(List prec_list, prec_t *ancestor, pid_t pid); static void _get_process_data(); -static int _get_process_data_line(FILE *in, prec_t *prec); +static int _get_process_data_line(int in, prec_t *prec); static void *_watch_tasks(void *arg); static void _destroy_prec(void *object); -/* - * init() is called when the plugin is loaded, before any other functions - * are called. Put global initialization here. - */ -extern int init ( void ) -{ - char *proctrack = slurm_get_proctrack_type(); - if(!strcasecmp(proctrack, "proctrack/pgid")) { - info("WARNING: We will use a much slower algorithm with " - "proctrack/pgid, use Proctracktype=proctrack/linuxproc " - "or Proctracktype=proctrack/rms with %s", - plugin_name); - pgid_plugin = true; - } - xfree(proctrack); - - verbose("%s loaded", plugin_name); - return SLURM_SUCCESS; -} - -extern int fini ( void ) -{ - return SLURM_SUCCESS; -} - -/* - * The following routine is called by the slurmd mainline - */ - -int jobacct_p_init_struct(struct jobacctinfo *jobacct, - jobacct_id_t *jobacct_id) -{ - return common_init_struct(jobacct, jobacct_id); -} - -struct jobacctinfo *jobacct_p_alloc(jobacct_id_t *jobacct_id) -{ - return common_alloc_jobacct(jobacct_id); -} - -void jobacct_p_free(struct jobacctinfo *jobacct) -{ - common_free_jobacct(jobacct); -} - -int jobacct_p_setinfo(struct jobacctinfo *jobacct, - enum jobacct_data_type type, void *data) -{ - return common_setinfo(jobacct, type, data); - -} - -int jobacct_p_getinfo(struct jobacctinfo *jobacct, - enum jobacct_data_type type, void *data) -{ - return common_getinfo(jobacct, type, data); -} - -void jobacct_p_aggregate(struct jobacctinfo *dest, struct jobacctinfo *from) -{ - common_aggregate(dest, from); -} - -void jobacct_p_2_sacct(sacct_t *sacct, struct jobacctinfo *jobacct) -{ - common_2_sacct(sacct, jobacct); -} - -void jobacct_p_pack(struct jobacctinfo *jobacct, Buf buffer) -{ - common_pack(jobacct, buffer); -} - -int jobacct_p_unpack(struct jobacctinfo **jobacct, Buf buffer) -{ - return common_unpack(jobacct, buffer); -} - - -int jobacct_p_init_slurmctld(char *job_acct_log) -{ - return common_init_slurmctld(job_acct_log); -} - -int jobacct_p_fini_slurmctld() -{ - return common_fini_slurmctld(); -} - -int jobacct_p_job_start_slurmctld(struct job_record *job_ptr) -{ - return common_job_start_slurmctld(job_ptr); -} - -int jobacct_p_job_complete_slurmctld(struct job_record *job_ptr) -{ - return common_job_complete_slurmctld(job_ptr); -} - -int jobacct_p_step_start_slurmctld(struct step_record *step) -{ - return common_step_start_slurmctld(step); -} - -int jobacct_p_step_complete_slurmctld(struct step_record *step) -{ - return common_step_complete_slurmctld(step); -} - -int jobacct_p_suspend_slurmctld(struct job_record *job_ptr) -{ - return common_suspend_slurmctld(job_ptr); -} - -/* - * jobacct_startpoll() is called when the plugin is loaded by - * slurmd, before any other functions are called. Put global - * initialization here. - */ - -int jobacct_p_startpoll(int frequency) -{ - int rc = SLURM_SUCCESS; - - pthread_attr_t attr; - pthread_t _watch_tasks_thread_id; - - debug("jobacct LINUX plugin loaded"); - - /* Parse the JobAcctParameters */ - - - debug("jobacct: frequency = %d", frequency); - - jobacct_shutdown = false; - - if (frequency == 0) { /* don't want dynamic monitoring? */ - debug2("jobacct LINUX dynamic logging disabled"); - return rc; - } - - freq = frequency; - task_list = list_create(common_free_jobacct); - - /* create polling thread */ - slurm_attr_init(&attr); - if (pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED)) - error("pthread_attr_setdetachstate error %m"); - - if (pthread_create(&_watch_tasks_thread_id, &attr, - &_watch_tasks, NULL)) { - debug("jobacct failed to create _watch_tasks " - "thread: %m"); - frequency = 0; - } - else - debug3("jobacct LINUX dynamic logging enabled"); - slurm_attr_destroy(&attr); - - return rc; -} - -int jobacct_p_endpoll() -{ - slurm_mutex_lock(&jobacct_lock); - if(task_list) - list_destroy(task_list); - task_list = NULL; - slurm_mutex_unlock(&jobacct_lock); - - if (slash_proc) { - slurm_mutex_lock(&reading_mutex); - (void) closedir(slash_proc); - slurm_mutex_unlock(&reading_mutex); - } - return common_endpoll(); -} - -int jobacct_p_set_proctrack_container_id(uint32_t id) -{ - return common_set_proctrack_container_id(id); -} - -int jobacct_p_add_task(pid_t pid, jobacct_id_t *jobacct_id) -{ - return common_add_task(pid, jobacct_id); -} - -struct jobacctinfo *jobacct_p_stat_task(pid_t pid) -{ - _get_process_data(); - return common_stat_task(pid); -} - -struct jobacctinfo *jobacct_p_remove_task(pid_t pid) -{ - return common_remove_task(pid); -} - -void jobacct_p_suspend_poll() -{ - common_suspend_poll(); -} - -void jobacct_p_resume_poll() -{ - common_resume_poll(); -} - -extern int jobacct_p_node_down(struct node_record *node_ptr, - time_t event_time, char *reason) -{ - return SLURM_SUCCESS; -} -extern int jobacct_p_node_up(struct node_record *node_ptr, time_t event_time) -{ - return SLURM_SUCCESS; -} -extern int jobacct_p_cluster_procs(uint32_t procs, time_t event_time) -{ - return SLURM_SUCCESS; -} - /* * _get_offspring_data() -- collect memory usage data for the offspring * @@ -351,6 +135,10 @@ _get_offspring_data(List prec_list, prec_t *ancestor, pid_t pid) { itr = list_iterator_create(prec_list); while((prec = list_next(itr))) { if (prec->ppid == pid) { +#if _DEBUG + info("pid:%u ppid:%u rss:%d KB", + prec->pid, prec->ppid, prec->rss); +#endif _get_offspring_data(prec_list, ancestor, prec->pid); ancestor->usec += prec->usec; ancestor->ssec += prec->ssec; @@ -370,7 +158,7 @@ _get_offspring_data(List prec_list, prec_t *ancestor, pid_t pid) { * * OUT: none * - * THREADSAFE! Only one thread ever gets here. + * THREADSAFE! Only one thread ever gets here. * * Assumption: * Any file with a name of the form "/proc/[0-9]+/stat" @@ -387,7 +175,7 @@ static void _get_process_data() { List prec_list = NULL; pid_t *pids = NULL; int npids = 0; - + uint32_t total_job_mem = 0; int i, fd; ListIterator itr; ListIterator itr2; @@ -433,7 +221,7 @@ static void _get_process_data() { fcntl(fd, F_SETFD, FD_CLOEXEC); prec = xmalloc(sizeof(prec_t)); - if (_get_process_data_line(stat_fp, prec)) + if (_get_process_data_line(fd, prec)) list_append(prec_list, prec); else xfree(prec); @@ -499,7 +287,7 @@ static void _get_process_data() { fcntl(fd, F_SETFD, FD_CLOEXEC); prec = xmalloc(sizeof(prec_t)); - if (_get_process_data_line(stat_fp, prec)) + if (_get_process_data_line(fd, prec)) list_append(prec_list, prec); else xfree(prec); @@ -524,12 +312,17 @@ static void _get_process_data() { itr2 = list_iterator_create(prec_list); while((prec = list_next(itr2))) { if (prec->pid == jobacct->pid) { +#if _DEBUG + info("pid:%u ppid:%u rss:%d KB", + prec->pid, prec->ppid, prec->rss); +#endif /* find all my descendents */ _get_offspring_data(prec_list, prec, prec->pid); /* tally their usage */ jobacct->max_rss = jobacct->tot_rss = MAX(jobacct->max_rss, prec->rss); + total_job_mem += prec->rss; jobacct->max_vsize = jobacct->tot_vsize = MAX(jobacct->max_vsize, prec->vsize); jobacct->max_pages = jobacct->tot_pages = @@ -537,74 +330,108 @@ static void _get_process_data() { jobacct->min_cpu = jobacct->tot_cpu = MAX(jobacct->min_cpu, (prec->usec + prec->ssec)); - debug2("%d size now %d %d time %d", + debug2("%d mem size %u %u time %u", jobacct->pid, jobacct->max_rss, jobacct->max_vsize, jobacct->tot_cpu); - break; } } list_iterator_destroy(itr2); } - list_iterator_destroy(itr); + list_iterator_destroy(itr); slurm_mutex_unlock(&jobacct_lock); - + + if (job_mem_limit) { + debug("Job %u memory used:%u limit:%u KB", + acct_job_id, total_job_mem, job_mem_limit); + } + if (acct_job_id && job_mem_limit && + (total_job_mem > job_mem_limit)) { + error("Job %u exceeded %u KB memory limit, being killed", + acct_job_id, job_mem_limit); + _acct_kill_job(); + } + finished: list_destroy(prec_list); processing = 0; return; } +/* _acct_kill_job() issue RPC to kill a slurm job */ +static void _acct_kill_job(void) +{ + slurm_msg_t msg; + job_step_kill_msg_t req; + + slurm_msg_t_init(&msg); + /* + * Request message: + */ + req.job_id = acct_job_id; + req.job_step_id = NO_VAL; + req.signal = SIGKILL; + req.batch_flag = 0; + msg.msg_type = REQUEST_CANCEL_JOB_STEP; + msg.data = &req; + + slurm_send_only_controller_msg(&msg); +} + /* _get_process_data_line() - get line of data from /proc/<pid>/stat * - * IN: in - input file channel + * IN: in - input file descriptor * OUT: prec - the destination for the data * * RETVAL: ==0 - no valid data * !=0 - data are valid * - * Note: It seems a bit wasteful to do all those atoi() and - * atol() conversions that are implicit in the scanf(), - * but they help to ensure that we really are looking at the - * expected type of record. + * Based upon stat2proc() from the ps command. It can handle arbitrary executable + * file basenames for `cmd', i.e. those with embedded whitespace or embedded ')'s. + * Such names confuse %s (see scanf(3)), so the string is split and %39c is used + * instead. (except for embedded ')' "(%[^)]c)" would work. */ -static int _get_process_data_line(FILE *in, prec_t *prec) { - /* discardable data */ - int d; - char c; - char *s; - uint32_t tmpu32; - int max_path_len = pathconf("/", _PC_NAME_MAX); - - /* useful datum */ - int nvals; - - s = xmalloc(max_path_len + 1); - nvals=fscanf(in, - "%d %s %c %d %d " - "%d %d %d %d %d " - "%d %d %d %d %d " - "%d %d %d %d %d " - "%d %d %d %d %d", - &prec->pid, s, &c, &prec->ppid, &d, - &d, &d, &d, &tmpu32, &tmpu32, - &tmpu32, &prec->pages, &tmpu32, &prec->usec, &prec->ssec, - &tmpu32, &tmpu32, &tmpu32, &tmpu32, &tmpu32, - &tmpu32, &tmpu32, &prec->vsize, &prec->rss, &tmpu32); - /* The fields in the record are - * pid, command, state, ppid, pgrp, - * session, tty_nr, tpgid, flags, minflt, - * cminflt, majflt, cmajflt, utime, stime, - * cutime, cstime, priority, nice, lit_0, - * itrealvalue, starttime, vsize, rss, rlim - */ - xfree(s); - if (nvals != 25) /* Is it what we expected? */ - return 0; /* No! */ - - prec->rss *= getpagesize(); /* convert rss from pages to bytes */ - prec->rss /= 1024; /* convert rss to kibibytes */ - prec->vsize /= 1024; /* and convert vsize to kibibytes */ +static int _get_process_data_line(int in, prec_t *prec) { + char sbuf[256], *tmp; + int num_read, nvals; + char cmd[40], state[1]; + int ppid, pgrp, session, tty_nr, tpgid; + long unsigned flags, minflt, cminflt, majflt, cmajflt; + long unsigned utime, stime, starttime, vsize; + long int cutime, cstime, priority, nice, timeout, itrealvalue, rss; + + num_read = read(in, sbuf, (sizeof(sbuf) - 1)); + if (num_read <= 0) + return 0; + sbuf[num_read] = '\0'; + + tmp = strrchr(sbuf, ')'); /* split into "PID (cmd" and "<rest>" */ + *tmp = '\0'; /* replace trailing ')' with NUL */ + /* parse these two strings separately, skipping the leading "(". */ + nvals = sscanf(sbuf, "%d (%39c", &prec->pid, cmd); + if (nvals < 2) + return 0; + + nvals = sscanf(tmp + 2, /* skip space after ')' too */ + "%c %d %d %d %d %d " + "%lu %lu %lu %lu %lu " + "%lu %lu %ld %ld %ld %ld " + "%ld %ld %lu %lu %ld", + state, &ppid, &pgrp, &session, &tty_nr, &tpgid, + &flags, &minflt, &cminflt, &majflt, &cmajflt, + &utime, &stime, &cutime, &cstime, &priority, &nice, + &timeout, &itrealvalue, &starttime, &vsize, &rss); + /* There are some additional fields, which we do not scan or use */ + if ((nvals < 22) || (rss < 0)) + return 0; + + /* Copy the values that slurm records into our data structure */ + prec->ppid = ppid; + prec->pages = majflt; + prec->usec = utime; + prec->ssec = stime; + prec->vsize = vsize / 1024; /* convert from bytes to KB */ + prec->rss = rss * getpagesize() / 1024; /* convert from pages to KB */ return 1; } @@ -629,7 +456,7 @@ static void *_watch_tasks(void *arg) _task_sleep(1); while(!jobacct_shutdown) { /* Do this until shutdown is requested */ - if(!suspended) { + if(!jobacct_suspended) { _get_process_data(); /* Update the data */ } _task_sleep(freq); @@ -644,3 +471,186 @@ static void _destroy_prec(void *object) xfree(prec); return; } + +/* + * init() is called when the plugin is loaded, before any other functions + * are called. Put global initialization here. + */ +extern int init ( void ) +{ + char *temp = slurm_get_proctrack_type(); + if(!strcasecmp(temp, "proctrack/pgid")) { + info("WARNING: We will use a much slower algorithm with " + "proctrack/pgid, use Proctracktype=proctrack/linuxproc " + "or Proctracktype=proctrack/rms with %s", + plugin_name); + pgid_plugin = true; + } + xfree(temp); + temp = slurm_get_accounting_storage_type(); + if(!strcasecmp(temp, ACCOUNTING_STORAGE_TYPE_NONE)) { + error("WARNING: Even though we are collecting accounting " + "information you have asked for it not to be stored " + "(%s) if this is not what you have in mind you will " + "need to change it.", ACCOUNTING_STORAGE_TYPE_NONE); + } + xfree(temp); + verbose("%s loaded", plugin_name); + return SLURM_SUCCESS; +} + +extern int fini ( void ) +{ + return SLURM_SUCCESS; +} + +extern struct jobacctinfo *jobacct_gather_p_create(jobacct_id_t *jobacct_id) +{ + return jobacct_common_alloc_jobacct(jobacct_id); +} + +extern void jobacct_gather_p_destroy(struct jobacctinfo *jobacct) +{ + jobacct_common_free_jobacct(jobacct); +} + +extern int jobacct_gather_p_setinfo(struct jobacctinfo *jobacct, + enum jobacct_data_type type, void *data) +{ + return jobacct_common_setinfo(jobacct, type, data); + +} + +extern int jobacct_gather_p_getinfo(struct jobacctinfo *jobacct, + enum jobacct_data_type type, void *data) +{ + return jobacct_common_getinfo(jobacct, type, data); +} + +extern void jobacct_gather_p_pack(struct jobacctinfo *jobacct, Buf buffer) +{ + jobacct_common_pack(jobacct, buffer); +} + +extern int jobacct_gather_p_unpack(struct jobacctinfo **jobacct, Buf buffer) +{ + return jobacct_common_unpack(jobacct, buffer); +} + +extern void jobacct_gather_p_aggregate(struct jobacctinfo *dest, + struct jobacctinfo *from) +{ + jobacct_common_aggregate(dest, from); +} + +/* + * jobacct_startpoll() is called when the plugin is loaded by + * slurmd, before any other functions are called. Put global + * initialization here. + */ + +extern int jobacct_gather_p_startpoll(uint16_t frequency) +{ + int rc = SLURM_SUCCESS; + + pthread_attr_t attr; + pthread_t _watch_tasks_thread_id; + + debug("%s loaded", plugin_name); + + debug("jobacct-gather: frequency = %d", frequency); + + jobacct_shutdown = false; + + if (frequency == 0) { /* don't want dynamic monitoring? */ + debug2("jobacct-gather LINUX dynamic logging disabled"); + return rc; + } + + freq = frequency; + task_list = list_create(jobacct_common_free_jobacct); + + /* create polling thread */ + slurm_attr_init(&attr); + if (pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED)) + error("pthread_attr_setdetachstate error %m"); + + if (pthread_create(&_watch_tasks_thread_id, &attr, + &_watch_tasks, NULL)) { + debug("jobacct-gather failed to create _watch_tasks " + "thread: %m"); + frequency = 0; + } + else + debug3("jobacct-gather LINUX dynamic logging enabled"); + slurm_attr_destroy(&attr); + + return rc; +} + +extern int jobacct_gather_p_endpoll() +{ + slurm_mutex_lock(&jobacct_lock); + if(task_list) + list_destroy(task_list); + task_list = NULL; + slurm_mutex_unlock(&jobacct_lock); + + if (slash_proc) { + slurm_mutex_lock(&reading_mutex); + (void) closedir(slash_proc); + slurm_mutex_unlock(&reading_mutex); + } + + jobacct_shutdown = true; + + return SLURM_SUCCESS; +} + +extern void jobacct_gather_p_change_poll(uint16_t frequency) +{ + freq = frequency; + if (freq == 0) + jobacct_shutdown = true; + return; +} + +extern void jobacct_gather_p_suspend_poll() +{ + jobacct_common_suspend_poll(); +} + +extern void jobacct_gather_p_resume_poll() +{ + jobacct_common_resume_poll(); +} + +extern int jobacct_gather_p_set_proctrack_container_id(uint32_t id) +{ + return jobacct_common_set_proctrack_container_id(id); +} + +extern int jobacct_gather_p_add_task(pid_t pid, jobacct_id_t *jobacct_id) +{ + return jobacct_common_add_task(pid, jobacct_id); +} + + +extern struct jobacctinfo *jobacct_gather_p_stat_task(pid_t pid) +{ + _get_process_data(); + return jobacct_common_stat_task(pid); +} + +extern struct jobacctinfo *jobacct_gather_p_remove_task(pid_t pid) +{ + return jobacct_common_remove_task(pid); +} + +extern void jobacct_gather_p_2_sacct(sacct_t *sacct, + struct jobacctinfo *jobacct) +{ + jobacct_common_2_sacct(sacct, jobacct); +} + + diff --git a/src/plugins/jobacct_gather/none/Makefile.am b/src/plugins/jobacct_gather/none/Makefile.am new file mode 100644 index 000000000..08c8420b0 --- /dev/null +++ b/src/plugins/jobacct_gather/none/Makefile.am @@ -0,0 +1,13 @@ +# Makefile for jobacct_gather/none plugin + +AUTOMAKE_OPTIONS = foreign + +PLUGIN_FLAGS = -module -avoid-version --export-dynamic + +INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common + +pkglib_LTLIBRARIES = jobacct_gather_none.la + +# Null job completion logging plugin. +jobacct_gather_none_la_SOURCES = jobacct_gather_none.c +jobacct_gather_none_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) diff --git a/src/plugins/jobacct/gold/Makefile.in b/src/plugins/jobacct_gather/none/Makefile.in similarity index 86% rename from src/plugins/jobacct/gold/Makefile.in rename to src/plugins/jobacct_gather/none/Makefile.in index a7710977a..00d16a399 100644 --- a/src/plugins/jobacct/gold/Makefile.in +++ b/src/plugins/jobacct_gather/none/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -14,7 +14,7 @@ @SET_MAKE@ -# Makefile for jobacct/none plugin +# Makefile for jobacct_gather/none plugin VPATH = @srcdir@ pkgdatadir = $(datadir)/@PACKAGE@ @@ -35,7 +35,7 @@ POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ target_triplet = @target@ -subdir = src/plugins/jobacct/gold +subdir = src/plugins/jobacct_gather/none DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ @@ -44,6 +44,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -73,14 +75,13 @@ am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; am__installdirs = "$(DESTDIR)$(pkglibdir)" pkglibLTLIBRARIES_INSTALL = $(INSTALL) LTLIBRARIES = $(pkglib_LTLIBRARIES) -jobacct_gold_la_LIBADD = -am_jobacct_gold_la_OBJECTS = agent.lo base64.lo gold_interface.lo \ - jobacct_gold.lo -jobacct_gold_la_OBJECTS = $(am_jobacct_gold_la_OBJECTS) -jobacct_gold_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ +jobacct_gather_none_la_LIBADD = +am_jobacct_gather_none_la_OBJECTS = jobacct_gather_none.lo +jobacct_gather_none_la_OBJECTS = $(am_jobacct_gather_none_la_OBJECTS) +jobacct_gather_none_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ - $(jobacct_gold_la_LDFLAGS) $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ + $(jobacct_gather_none_la_LDFLAGS) $(LDFLAGS) -o $@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -92,8 +93,8 @@ CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ -SOURCES = $(jobacct_gold_la_SOURCES) -DIST_SOURCES = $(jobacct_gold_la_SOURCES) +SOURCES = $(jobacct_gather_none_la_SOURCES) +DIST_SOURCES = $(jobacct_gather_none_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) @@ -120,6 +121,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -133,10 +135,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -156,7 +161,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -167,6 +175,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -182,6 +192,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -197,6 +208,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -255,17 +267,12 @@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ AUTOMAKE_OPTIONS = foreign PLUGIN_FLAGS = -module -avoid-version --export-dynamic - -# Gold job completion logging plugin. INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common -pkglib_LTLIBRARIES = jobacct_gold.la -jobacct_gold_la_SOURCES = \ - agent.c agent.h \ - base64.c base64.h \ - gold_interface.c gold_interface.h \ - jobacct_gold.c - -jobacct_gold_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) +pkglib_LTLIBRARIES = jobacct_gather_none.la + +# Null job completion logging plugin. +jobacct_gather_none_la_SOURCES = jobacct_gather_none.c +jobacct_gather_none_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) all: all-am .SUFFIXES: @@ -279,9 +286,9 @@ $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__confi exit 1;; \ esac; \ done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/jobacct/gold/Makefile'; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/jobacct_gather/none/Makefile'; \ cd $(top_srcdir) && \ - $(AUTOMAKE) --foreign src/plugins/jobacct/gold/Makefile + $(AUTOMAKE) --foreign src/plugins/jobacct_gather/none/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ @@ -305,8 +312,8 @@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ else :; fi; \ done @@ -314,8 +321,8 @@ uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ done clean-pkglibLTLIBRARIES: @@ -326,8 +333,8 @@ clean-pkglibLTLIBRARIES: echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done -jobacct_gold.la: $(jobacct_gold_la_OBJECTS) $(jobacct_gold_la_DEPENDENCIES) - $(jobacct_gold_la_LINK) -rpath $(pkglibdir) $(jobacct_gold_la_OBJECTS) $(jobacct_gold_la_LIBADD) $(LIBS) +jobacct_gather_none.la: $(jobacct_gather_none_la_OBJECTS) $(jobacct_gather_none_la_DEPENDENCIES) + $(jobacct_gather_none_la_LINK) -rpath $(pkglibdir) $(jobacct_gather_none_la_OBJECTS) $(jobacct_gather_none_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) @@ -335,10 +342,7 @@ mostlyclean-compile: distclean-compile: -rm -f *.tab.c -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/agent.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/base64.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/gold_interface.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/jobacct_gold.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/jobacct_gather_none.Plo@am__quote@ .c.o: @am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @@ -372,8 +376,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -385,8 +389,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -396,13 +400,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/jobacct/none/jobacct_none.c b/src/plugins/jobacct_gather/none/jobacct_gather_none.c similarity index 61% rename from src/plugins/jobacct/none/jobacct_none.c rename to src/plugins/jobacct_gather/none/jobacct_gather_none.c index 25d995622..48df0bf2d 100644 --- a/src/plugins/jobacct/none/jobacct_none.c +++ b/src/plugins/jobacct_gather/none/jobacct_gather_none.c @@ -1,11 +1,10 @@ - /*****************************************************************************\ * jobacct_none.c - NO-OP slurm job completion logging plugin. ***************************************************************************** * * Copyright (C) 2005 Hewlett-Packard Development Company, L.P. * Written by Andy Riebs, <andy.riebs@hp.com>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -39,23 +38,7 @@ * Copyright (C) 2002 The Regents of the University of California. \*****************************************************************************/ -#ifdef HAVE_CONFIG_H -# include "config.h" -#endif - -#if HAVE_STDINT_H -# include <stdint.h> -#endif -#if HAVE_INTTYPES_H -# include <inttypes.h> -#endif - -#include <stdio.h> -#include <slurm/slurm_errno.h> - -#include "src/slurmctld/slurmctld.h" -#include "src/slurmd/slurmd/slurmd.h" -#include "src/common/slurm_jobacct.h" +#include "src/common/slurm_jobacct_gather.h" /* * These variables are required by the generic plugin interface. If they @@ -86,8 +69,8 @@ * minimum versions for their plugins as the job accounting API * matures. */ -const char plugin_name[] = "Job accounting NOT_INVOKED plugin"; -const char plugin_type[] = "jobacct/none"; +const char plugin_name[] = "Job accounting gather NOT_INVOKED plugin"; +const char plugin_type[] = "jobacct_gather/none"; const uint32_t plugin_version = 100; /* @@ -105,153 +88,95 @@ extern int fini ( void ) return SLURM_SUCCESS; } -/* - * The following routines are called by slurmctld - */ - -/* - * The following routines are called by slurmd - */ -int jobacct_p_init_struct(struct jobacctinfo *jobacct, - jobacct_id_t *jobacct_id) -{ - return SLURM_SUCCESS; -} - -struct jobacctinfo *jobacct_p_alloc(jobacct_id_t *jobacct_id) +extern jobacctinfo_t *jobacct_gather_p_create(jobacct_id_t *jobacct_id) { return NULL; } -void jobacct_p_free(struct jobacctinfo *jobacct) +extern void jobacct_gather_p_destroy(struct jobacctinfo *jobacct) { return; } -int jobacct_p_setinfo(struct jobacctinfo *jobacct, - enum jobacct_data_type type, void *data) +extern int jobacct_gather_p_setinfo(struct jobacctinfo *jobacct, + enum jobacct_data_type type, void *data) { return SLURM_SUCCESS; } -int jobacct_p_getinfo(struct jobacctinfo *jobacct, - enum jobacct_data_type type, void *data) +extern int jobacct_gather_p_getinfo(struct jobacctinfo *jobacct, + enum jobacct_data_type type, void *data) { return SLURM_SUCCESS; } -void jobacct_p_aggregate(struct jobacctinfo *dest, struct jobacctinfo *from) +extern void jobacct_gather_p_pack(struct jobacctinfo *jobacct, Buf buffer) { return; } -void jobacct_p_2_sacct(sacct_t *sacct, struct jobacctinfo *jobacct) -{ - return; -} - -void jobacct_p_pack(struct jobacctinfo *jobacct, Buf buffer) -{ - return; -} - -int jobacct_p_unpack(struct jobacctinfo **jobacct, Buf buffer) +extern int jobacct_gather_p_unpack(struct jobacctinfo **jobacct, Buf buffer) { *jobacct = NULL; return SLURM_SUCCESS; } - -int jobacct_p_init_slurmctld(char *job_acct_log) +extern void jobacct_gather_p_aggregate(struct jobacctinfo *dest, + struct jobacctinfo *from) { - return SLURM_SUCCESS; + return; } -int jobacct_p_fini_slurmctld() +extern int jobacct_gather_p_startpoll(uint16_t frequency) { return SLURM_SUCCESS; } -int jobacct_p_job_start_slurmctld(struct job_record *job_ptr) +extern int jobacct_gather_p_endpoll() { return SLURM_SUCCESS; } -int jobacct_p_job_complete_slurmctld(struct job_record *job_ptr) -{ - return SLURM_SUCCESS; -} - -int jobacct_p_step_start_slurmctld(struct step_record *step) +extern void jobacct_gather_p_change_poll(uint16_t frequency) { - return SLURM_SUCCESS; -} - -int jobacct_p_step_complete_slurmctld(struct step_record *step) -{ - return SLURM_SUCCESS; + return; } -int jobacct_p_suspend_slurmctld(struct job_record *job_ptr) +extern void jobacct_gather_p_suspend_poll() { - return SLURM_SUCCESS; + return; } -int jobacct_p_startpoll(int frequency) +extern void jobacct_gather_p_resume_poll() { - info("jobacct NONE plugin loaded"); - debug3("slurmd_jobacct_init() called"); - - return SLURM_SUCCESS; + return; } -int jobacct_p_endpoll() +extern int jobacct_gather_p_set_proctrack_container_id(uint32_t id) { return SLURM_SUCCESS; } -int jobacct_p_set_proctrack_container_id(uint32_t id) +extern int jobacct_gather_p_add_task(pid_t pid, jobacct_id_t *jobacct_id) { return SLURM_SUCCESS; } -int jobacct_p_add_task(pid_t pid, jobacct_id_t *jobacct_id) -{ - return SLURM_SUCCESS; -} -struct jobacctinfo *jobacct_p_stat_task(pid_t pid) +extern jobacctinfo_t *jobacct_gather_p_stat_task(pid_t pid) { return NULL; } -struct jobacctinfo *jobacct_p_remove_task(pid_t pid) +extern jobacctinfo_t *jobacct_gather_p_remove_task(pid_t pid) { return NULL; } -void jobacct_p_suspend_poll() -{ - return; -} - -void jobacct_p_resume_poll() +extern void jobacct_gather_p_2_sacct(sacct_t *sacct, + struct jobacctinfo *jobacct) { return; } -extern int jobacct_p_node_down(struct node_record *node_ptr, - time_t event_time, char *reason) -{ - return SLURM_SUCCESS; -} -extern int jobacct_p_node_up(struct node_record *node_ptr, time_t event_time) -{ - return SLURM_SUCCESS; -} -extern int jobacct_p_cluster_procs(uint32_t procs, time_t event_time) -{ - return SLURM_SUCCESS; -} - diff --git a/src/plugins/jobcomp/Makefile.am b/src/plugins/jobcomp/Makefile.am index 49d7c632b..0f2d169d4 100644 --- a/src/plugins/jobcomp/Makefile.am +++ b/src/plugins/jobcomp/Makefile.am @@ -1,3 +1,3 @@ # Makefile for jobcomp plugins -SUBDIRS = filetxt none script +SUBDIRS = filetxt none script mysql pgsql slurmdbd diff --git a/src/plugins/jobcomp/Makefile.in b/src/plugins/jobcomp/Makefile.in index 756a9dcb2..c83e5dd7c 100644 --- a/src/plugins/jobcomp/Makefile.in +++ b/src/plugins/jobcomp/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -43,6 +43,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -101,6 +103,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -114,10 +117,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -137,7 +143,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -148,6 +157,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -163,6 +174,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -178,6 +190,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -234,7 +247,7 @@ target_os = @target_os@ target_vendor = @target_vendor@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ -SUBDIRS = filetxt none script +SUBDIRS = filetxt none script mysql pgsql slurmdbd all: all-recursive .SUFFIXES: @@ -349,8 +362,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -375,8 +388,8 @@ TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -386,13 +399,12 @@ ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/jobcomp/filetxt/Makefile.am b/src/plugins/jobcomp/filetxt/Makefile.am index 0c48f13ec..096768183 100644 --- a/src/plugins/jobcomp/filetxt/Makefile.am +++ b/src/plugins/jobcomp/filetxt/Makefile.am @@ -9,5 +9,7 @@ INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common pkglib_LTLIBRARIES = jobcomp_filetxt.la # Text file job completion logging plugin. -jobcomp_filetxt_la_SOURCES = jobcomp_filetxt.c +jobcomp_filetxt_la_SOURCES = jobcomp_filetxt.c \ + filetxt_jobcomp_process.c filetxt_jobcomp_process.h + jobcomp_filetxt_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) diff --git a/src/plugins/jobcomp/filetxt/Makefile.in b/src/plugins/jobcomp/filetxt/Makefile.in index ce8f66b1c..860015aaa 100644 --- a/src/plugins/jobcomp/filetxt/Makefile.in +++ b/src/plugins/jobcomp/filetxt/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -44,6 +44,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -74,12 +76,13 @@ am__installdirs = "$(DESTDIR)$(pkglibdir)" pkglibLTLIBRARIES_INSTALL = $(INSTALL) LTLIBRARIES = $(pkglib_LTLIBRARIES) jobcomp_filetxt_la_LIBADD = -am_jobcomp_filetxt_la_OBJECTS = jobcomp_filetxt.lo +am_jobcomp_filetxt_la_OBJECTS = jobcomp_filetxt.lo \ + filetxt_jobcomp_process.lo jobcomp_filetxt_la_OBJECTS = $(am_jobcomp_filetxt_la_OBJECTS) jobcomp_filetxt_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(jobcomp_filetxt_la_LDFLAGS) $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -119,6 +122,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -132,10 +136,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -155,7 +162,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -166,6 +176,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -181,6 +193,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -196,6 +209,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -258,7 +272,9 @@ INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common pkglib_LTLIBRARIES = jobcomp_filetxt.la # Text file job completion logging plugin. -jobcomp_filetxt_la_SOURCES = jobcomp_filetxt.c +jobcomp_filetxt_la_SOURCES = jobcomp_filetxt.c \ + filetxt_jobcomp_process.c filetxt_jobcomp_process.h + jobcomp_filetxt_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) all: all-am @@ -299,8 +315,8 @@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ else :; fi; \ done @@ -308,8 +324,8 @@ uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ done clean-pkglibLTLIBRARIES: @@ -329,6 +345,7 @@ mostlyclean-compile: distclean-compile: -rm -f *.tab.c +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/filetxt_jobcomp_process.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/jobcomp_filetxt.Plo@am__quote@ .c.o: @@ -363,8 +380,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -376,8 +393,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -387,13 +404,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/jobcomp/filetxt/filetxt_jobcomp_process.c b/src/plugins/jobcomp/filetxt/filetxt_jobcomp_process.c new file mode 100644 index 000000000..dd882778d --- /dev/null +++ b/src/plugins/jobcomp/filetxt/filetxt_jobcomp_process.c @@ -0,0 +1,300 @@ +/*****************************************************************************\ + * filetxt_jobcomp_process.c - functions the processing of + * information from the filetxt jobcomp + * database. + ***************************************************************************** + * + * Copyright (C) 2004-2007 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * This file is patterned after jobcomp_linux.c, written by Morris Jette and + * Copyright (C) 2002 The Regents of the University of California. +\*****************************************************************************/ +#include <stdlib.h> +#include <ctype.h> +#include <sys/stat.h> + +#include "src/common/xstring.h" +#include "src/common/xmalloc.h" +#include "src/common/slurm_jobcomp.h" +#include "filetxt_jobcomp_process.h" + +typedef struct { + char *name; + char *val; +} filetxt_jobcomp_info_t; + + +static void _destroy_filetxt_jobcomp_info(void *object) +{ + filetxt_jobcomp_info_t *jobcomp_info = + (filetxt_jobcomp_info_t *)object; + if(jobcomp_info) { + xfree(jobcomp_info); + } +} + + +/* _open_log_file() -- find the current or specified log file, and open it + * + * IN: Nothing + * RETURNS: Nothing + * + * Side effects: + * - Sets opt_filein to the current system accounting log unless + * the user specified another file. + */ + +static FILE *_open_log_file(char *logfile) +{ + FILE *fd = fopen(logfile, "r"); + if (fd == NULL) { + perror(logfile); + exit(1); + } + return fd; +} + +static void _do_fdump(List job_info_list, int lc) +{ + filetxt_jobcomp_info_t *jobcomp_info = NULL; + ListIterator itr = list_iterator_create(job_info_list); + + printf("\n------- Line %d -------\n", lc); + while((jobcomp_info = list_next(itr))) { + printf("%12s: %s\n", jobcomp_info->name, jobcomp_info->val); + } +} + +static jobcomp_job_rec_t *_parse_line(List job_info_list) +{ + ListIterator itr = NULL; + filetxt_jobcomp_info_t *jobcomp_info = NULL; + jobcomp_job_rec_t *job = xmalloc(sizeof(jobcomp_job_rec_t)); + char *temp = NULL; + char *temp2 = NULL; + + itr = list_iterator_create(job_info_list); + while((jobcomp_info = list_next(itr))) { + if(!strcasecmp("JobID", jobcomp_info->name)) { + job->jobid = atoi(jobcomp_info->val); + } else if(!strcasecmp("Partition", jobcomp_info->name)) { + job->partition = xstrdup(jobcomp_info->val); + } else if(!strcasecmp("StartTime", jobcomp_info->name)) { + job->start_time = xstrdup(jobcomp_info->val); + } else if(!strcasecmp("EndTime", jobcomp_info->name)) { + job->end_time = xstrdup(jobcomp_info->val); + } else if(!strcasecmp("Userid", jobcomp_info->name)) { + temp = strstr(jobcomp_info->val, "("); + if(!temp) + job->uid = atoi(jobcomp_info->val); + *temp++ = 0; + temp2 = temp; + temp = strstr(temp, ")"); + if(!temp) { + error("problem getting correct uid from %s", + jobcomp_info->val); + } else { + *temp = 0; + job->uid = atoi(temp2); + job->uid_name = xstrdup(jobcomp_info->val); + } + } else if(!strcasecmp("GroupId", jobcomp_info->name)) { + temp = strstr(jobcomp_info->val, "("); + if(!temp) + job->gid = atoi(jobcomp_info->val); + *temp++ = 0; + temp2 = temp; + temp = strstr(temp, ")"); + if(!temp) { + error("problem getting correct gid from %s", + jobcomp_info->val); + } else { + *temp = 0; + job->gid = atoi(temp2); + job->gid_name = xstrdup(jobcomp_info->val); + } + } else if(!strcasecmp("Name", jobcomp_info->name)) { + job->jobname = xstrdup(jobcomp_info->val); + } else if(!strcasecmp("NodeList", jobcomp_info->name)) { + job->nodelist = xstrdup(jobcomp_info->val); + } else if(!strcasecmp("NodeCnt", jobcomp_info->name)) { + job->node_cnt = atoi(jobcomp_info->val); + } else if(!strcasecmp("JobState", jobcomp_info->name)) { + job->state = xstrdup(jobcomp_info->val); + } else if(!strcasecmp("Timelimit", jobcomp_info->name)) { + job->timelimit = xstrdup(jobcomp_info->val); + } +#ifdef HAVE_BG + else if(!strcasecmp("MaxProcs", jobcomp_info->name)) { + job->max_procs = atoi(jobcomp_info->val); + } else if(!strcasecmp("Block_Id", jobcomp_info->name)) { + job->blockid = xstrdup(jobcomp_info->val); + } else if(!strcasecmp("Connection", jobcomp_info->name)) { + job->connection = xstrdup(jobcomp_info->val); + } else if(!strcasecmp("reboot", jobcomp_info->name)) { + job->reboot = xstrdup(jobcomp_info->val); + } else if(!strcasecmp("rotate", jobcomp_info->name)) { + job->rotate = xstrdup(jobcomp_info->val); + } else if(!strcasecmp("geometry", jobcomp_info->name)) { + job->geo = xstrdup(jobcomp_info->val); + } else if(!strcasecmp("start", jobcomp_info->name)) { + job->bg_start_point = xstrdup(jobcomp_info->val); + } +#endif + else { + error("Unknown type %s: %s", jobcomp_info->name, + jobcomp_info->val); + } + } + list_iterator_destroy(itr); + + return job; +} + +extern void filetxt_jobcomp_process_get_jobs(List job_list, + List selected_steps, + List selected_parts, + sacct_parameters_t *params) +{ + char line[BUFFER_SIZE]; + char *fptr = NULL; + char *jobid = NULL; + char *partition = NULL; + FILE *fd = NULL; + int lc = 0; + jobcomp_job_rec_t *job = NULL; + jobacct_selected_step_t *selected_step = NULL; + char *selected_part = NULL; + ListIterator itr = NULL; + List job_info_list = NULL; + filetxt_jobcomp_info_t *jobcomp_info = NULL; + + fd = _open_log_file(params->opt_filein); + + while (fgets(line, BUFFER_SIZE, fd)) { + lc++; + fptr = line; /* break the record into NULL- + terminated strings */ + if(job_info_list) + list_destroy(job_info_list); + jobid = NULL; + partition = NULL; + job_info_list = list_create(_destroy_filetxt_jobcomp_info); + while(fptr) { + jobcomp_info = + xmalloc(sizeof(filetxt_jobcomp_info_t)); + list_append(job_info_list, jobcomp_info); + jobcomp_info->name = fptr; + fptr = strstr(fptr, "="); + *fptr++ = 0; + jobcomp_info->val = fptr; + fptr = strstr(fptr, " "); + if(!strcasecmp("JobId", jobcomp_info->name)) + jobid = jobcomp_info->val; + else if(!strcasecmp("Partition", + jobcomp_info->name)) + partition = jobcomp_info->val; + + + if(!fptr) { + fptr = strstr(jobcomp_info->val, "\n"); + if (fptr) + *fptr = 0; + break; + } else { + *fptr++ = 0; + if(*fptr == '\n') { + *fptr = 0; + break; + } + } + } + + if (list_count(selected_steps)) { + if(!jobid) + continue; + itr = list_iterator_create(selected_steps); + while((selected_step = list_next(itr))) { + if (strcmp(selected_step->job, jobid)) + continue; + /* job matches */ + list_iterator_destroy(itr); + goto foundjob; + } + list_iterator_destroy(itr); + continue; /* no match */ + } + foundjob: + + if (list_count(selected_parts)) { + if(!partition) + continue; + itr = list_iterator_create(selected_parts); + while((selected_part = list_next(itr))) + if (!strcasecmp(selected_part, partition)) { + list_iterator_destroy(itr); + goto foundp; + } + list_iterator_destroy(itr); + continue; /* no match */ + } + foundp: + + if (params->opt_fdump) { + _do_fdump(job_info_list, lc); + continue; + } + + + job = _parse_line(job_info_list); + + if(job) + list_append(job_list, job); + } + if(job_info_list) + list_destroy(job_info_list); + + if (ferror(fd)) { + perror(params->opt_filein); + exit(1); + } + fclose(fd); + + return; +} + +extern void filetxt_jobcomp_process_archive(List selected_parts, + sacct_parameters_t *params) +{ + info("No code to archive jobcomp."); +} diff --git a/src/plugins/jobcomp/filetxt/filetxt_jobcomp_process.h b/src/plugins/jobcomp/filetxt/filetxt_jobcomp_process.h new file mode 100644 index 000000000..2b2bb8776 --- /dev/null +++ b/src/plugins/jobcomp/filetxt/filetxt_jobcomp_process.h @@ -0,0 +1,55 @@ +/*****************************************************************************\ + * filetxt_jobcomp_process.h - functions the processing of + * information from the filetxt jobcomp + * database. + ***************************************************************************** + * + * Copyright (C) 2004-2007 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * This file is patterned after jobcomp_linux.c, written by Morris Jette and + * Copyright (C) 2002 The Regents of the University of California. +\*****************************************************************************/ + +#ifndef _HAVE_FILETXT_JOBCOMP_PROCESS_H +#define _HAVE_FILETXT_JOBCOMP_PROCESS_H + +#include "src/common/jobacct_common.h" + +extern void filetxt_jobcomp_process_get_jobs(List job_list, + List selected_steps, + List selected_parts, + sacct_parameters_t *params); +extern void filetxt_jobcomp_process_archive(List selected_parts, + sacct_parameters_t *params); + +#endif diff --git a/src/plugins/jobcomp/filetxt/jobcomp_filetxt.c b/src/plugins/jobcomp/filetxt/jobcomp_filetxt.c index ebff6cafd..a4e67243b 100644 --- a/src/plugins/jobcomp/filetxt/jobcomp_filetxt.c +++ b/src/plugins/jobcomp/filetxt/jobcomp_filetxt.c @@ -4,7 +4,7 @@ * Copyright (C) 2003 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -15,7 +15,7 @@ * any later version. * * In addition, as a special exception, the copyright holders give permission - * to link the code of portions of this program with the OpenSSL library under + * to link the code of portions of this program with the OpenSSL library under * certain conditions as described in each individual source file, and * distribute linked combinations including the two. You must obey the GNU * General Public License in all respects for all of the code used other than @@ -47,37 +47,13 @@ #endif #include <fcntl.h> -#include <pthread.h> #include <pwd.h> -#include <slurm/slurm.h> -#include <slurm/slurm_errno.h> -#include <string.h> -#include <sys/types.h> -#include <sys/stat.h> +#include <grp.h> #include <unistd.h> - -#include "src/common/macros.h" -#include "src/common/node_select.h" #include "src/common/slurm_protocol_defs.h" #include "src/common/slurm_jobcomp.h" -#include "src/common/uid.h" -#include "src/common/xmalloc.h" -#include "src/common/xstring.h" -#include "src/slurmctld/slurmctld.h" - -#define JOB_FORMAT "JobId=%lu UserId=%s(%lu) Name=%s JobState=%s Partition=%s "\ - "TimeLimit=%s StartTime=%s EndTime=%s NodeList=%s NodeCnt=%u %s\n" - -/* Type for error string table entries */ -typedef struct { - int xe_number; - char *xe_message; -} slurm_errtab_t; - -static slurm_errtab_t slurm_errtab[] = { - {0, "No error"}, - {-1, "Unspecified error"} -}; +#include "src/common/parse_time.h" +#include "filetxt_jobcomp_process.h" /* * These variables are required by the generic plugin interface. If they @@ -110,7 +86,21 @@ static slurm_errtab_t slurm_errtab[] = { */ const char plugin_name[] = "Job completion text file logging plugin"; const char plugin_type[] = "jobcomp/filetxt"; -const uint32_t plugin_version = 90; +const uint32_t plugin_version = 100; + +#define JOB_FORMAT "JobId=%lu UserId=%s(%lu) GroupId=%s(%lu) Name=%s JobState=%s Partition=%s "\ + "TimeLimit=%s StartTime=%s EndTime=%s NodeList=%s NodeCnt=%u ProcCnt=%u %s\n" + +/* Type for error string table entries */ +typedef struct { + int xe_number; + char *xe_message; +} slurm_errtab_t; + +static slurm_errtab_t slurm_errtab[] = { + {0, "No error"}, + {-1, "Unspecified error"} +}; /* A plugin-global errno. */ static int plugin_errno = SLURM_SUCCESS; @@ -119,6 +109,69 @@ static int plugin_errno = SLURM_SUCCESS; static pthread_mutex_t file_lock = PTHREAD_MUTEX_INITIALIZER; static char * log_name = NULL; static int job_comp_fd = -1; + +/* get the user name for the give user_id */ +static void +_get_user_name(uint32_t user_id, char *user_name, int buf_size) +{ + static uint32_t cache_uid = 0; + static char cache_name[32] = "root"; + struct passwd * user_info = NULL; + + if (user_id == cache_uid) + snprintf(user_name, buf_size, "%s", cache_name); + else { + user_info = getpwuid((uid_t) user_id); + if (user_info && user_info->pw_name[0]) + snprintf(cache_name, sizeof(cache_name), "%s", + user_info->pw_name); + else + snprintf(cache_name, sizeof(cache_name), "Unknown"); + cache_uid = user_id; + snprintf(user_name, buf_size, "%s", cache_name); + } +} + +/* get the group name for the give group_id */ +static void +_get_group_name(uint32_t group_id, char *group_name, int buf_size) +{ + static uint32_t cache_gid = 0; + static char cache_name[32] = "root"; + struct group *group_info = NULL; + + if (group_id == cache_gid) + snprintf(group_name, buf_size, "%s", cache_name); + else { + group_info = getgrgid((gid_t) group_id); + if (group_info && group_info->gr_name[0]) + snprintf(cache_name, sizeof(cache_name), "%s", + group_info->gr_name); + else + snprintf(cache_name, sizeof(cache_name), "Unknown"); + cache_gid = group_id; + snprintf(group_name, buf_size, "%s", cache_name); + } +} + +/* + * Linear search through table of errno values and strings, + * returns NULL on error, string on success. + */ +static char *_lookup_slurm_api_errtab(int errnum) +{ + char *res = NULL; + int i; + + for (i = 0; i < sizeof(slurm_errtab) / sizeof(slurm_errtab_t); i++) { + if (slurm_errtab[i].xe_number == errnum) { + res = slurm_errtab[i].xe_message; + break; + } + } + return res; +} + /* * init() is called when the plugin is loaded, before any other functions * are called. Put global initialization here. @@ -128,12 +181,20 @@ int init ( void ) return SLURM_SUCCESS; } +int fini ( void ) +{ + if (job_comp_fd >= 0) + close(job_comp_fd); + xfree(log_name); + return SLURM_SUCCESS; +} + /* * The remainder of this file implements the standard SLURM job completion * logging API. */ -int slurm_jobcomp_set_location ( char * location ) +extern int slurm_jobcomp_set_location ( char * location ) { int rc = SLURM_SUCCESS; @@ -158,51 +219,11 @@ int slurm_jobcomp_set_location ( char * location ) return rc; } -/* get the user name for the give user_id */ -static void -_get_user_name(uint32_t user_id, char *user_name, int buf_size) -{ - static uint32_t cache_uid = 0; - static char cache_name[32] = "root"; - struct passwd * user_info = NULL; - - if (user_id == cache_uid) - snprintf(user_name, buf_size, "%s", cache_name); - else { - user_info = getpwuid((uid_t) user_id); - if (user_info && user_info->pw_name[0]) - snprintf(cache_name, sizeof(cache_name), "%s", - user_info->pw_name); - else - snprintf(cache_name, sizeof(cache_name), "Unknown"); - cache_uid = user_id; - snprintf(user_name, buf_size, "%s", cache_name); - } -} - -/* - * make_time_str - convert time_t to string with "YYYY-MM-DDTHH:MM:SS" - * Note this is the ISO8601 standard format - * IN time - a time stamp - * IN str_size - size of string buffer - * OUT string - pointer user defined buffer - */ -static void -_make_time_str (time_t *time, char *string, int str_size) -{ - struct tm time_tm; - - localtime_r (time, &time_tm); - snprintf ( string, str_size, "%4.4u-%2.2u-%2.2uT%2.2u:%2.2u:%2.2u", - (time_tm.tm_year + 1900), (time_tm.tm_mon+1), time_tm.tm_mday, - time_tm.tm_hour, time_tm.tm_min, time_tm.tm_sec); -} - -int slurm_jobcomp_log_record ( struct job_record *job_ptr ) +extern int slurm_jobcomp_log_record ( struct job_record *job_ptr ) { int rc = SLURM_SUCCESS; - char job_rec[512+MAX_JOBNAME_LEN]; - char usr_str[32], start_str[32], end_str[32], lim_str[32]; + char job_rec[1024]; + char usr_str[32], grp_str[32], start_str[32], end_str[32], lim_str[32]; char select_buf[128]; size_t offset = 0, tot_size, wrote; enum job_states job_state; @@ -214,6 +235,7 @@ int slurm_jobcomp_log_record ( struct job_record *job_ptr ) slurm_mutex_lock( &file_lock ); _get_user_name(job_ptr->user_id, usr_str, sizeof(usr_str)); + _get_group_name(job_ptr->group_id, grp_str, sizeof(grp_str)); if (job_ptr->time_limit == INFINITE) strcpy(lim_str, "UNLIMITED"); else @@ -225,19 +247,22 @@ int slurm_jobcomp_log_record ( struct job_record *job_ptr ) * JOB_FAILED, JOB_TIMEOUT, etc. */ job_state = job_ptr->job_state & (~JOB_COMPLETING); - _make_time_str(&(job_ptr->start_time), start_str, sizeof(start_str)); - _make_time_str(&(job_ptr->end_time), end_str, sizeof(end_str)); + slurm_make_time_str(&(job_ptr->start_time), + start_str, sizeof(start_str)); + slurm_make_time_str(&(job_ptr->end_time), end_str, sizeof(end_str)); select_g_sprint_jobinfo(job_ptr->select_jobinfo, select_buf, sizeof(select_buf), SELECT_PRINT_MIXED); snprintf(job_rec, sizeof(job_rec), JOB_FORMAT, - (unsigned long) job_ptr->job_id, usr_str, - (unsigned long) job_ptr->user_id, job_ptr->name, - job_state_string(job_state), - job_ptr->partition, lim_str, start_str, - end_str, job_ptr->nodes, job_ptr->node_cnt, - select_buf); + (unsigned long) job_ptr->job_id, usr_str, + (unsigned long) job_ptr->user_id, grp_str, + (unsigned long) job_ptr->group_id, job_ptr->name, + job_state_string(job_state), + job_ptr->partition, lim_str, start_str, + end_str, job_ptr->nodes, job_ptr->node_cnt, + job_ptr->total_procs, + select_buf); tot_size = strlen(job_rec); while ( offset < tot_size ) { @@ -263,34 +288,33 @@ extern int slurm_jobcomp_get_errno( void ) return plugin_errno; } -/* - * Linear search through table of errno values and strings, - * returns NULL on error, string on success. - */ -static char *_lookup_slurm_api_errtab(int errnum) -{ - char *res = NULL; - int i; - - for (i = 0; i < sizeof(slurm_errtab) / sizeof(slurm_errtab_t); i++) { - if (slurm_errtab[i].xe_number == errnum) { - res = slurm_errtab[i].xe_message; - break; - } - } - return res; -} - extern char *slurm_jobcomp_strerror( int errnum ) { char *res = _lookup_slurm_api_errtab(errnum); return (res ? res : strerror(errnum)); } -int fini ( void ) +/* + * get info from the database + * in/out job_list List of job_rec_t * + * note List needs to be freed when called + */ +extern void slurm_jobcomp_get_jobs(List job_list, + List selected_steps, List selected_parts, + void *params) { - if (job_comp_fd >= 0) - close(job_comp_fd); - xfree(log_name); - return SLURM_SUCCESS; + filetxt_jobcomp_process_get_jobs(job_list, + selected_steps, selected_parts, + params); + return; +} + +/* + * expire old info from the database + */ +extern void slurm_jobcomp_archive(List selected_parts, + void *params) +{ + filetxt_jobcomp_process_archive(selected_parts, params); + return; } diff --git a/src/plugins/jobcomp/mysql/Makefile.am b/src/plugins/jobcomp/mysql/Makefile.am new file mode 100644 index 000000000..44da93ece --- /dev/null +++ b/src/plugins/jobcomp/mysql/Makefile.am @@ -0,0 +1,19 @@ +# Makefile for jobcomp/mysql plugin + +AUTOMAKE_OPTIONS = foreign + +PLUGIN_FLAGS = -module -avoid-version --export-dynamic + +INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common + +pkglib_LTLIBRARIES = jobcomp_mysql.la + +# Mysql storage plugin. +jobcomp_mysql_la_SOURCES = jobcomp_mysql.c \ + mysql_jobcomp_process.c mysql_jobcomp_process.h +jobcomp_mysql_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) +jobcomp_mysql_la_CFLAGS = $(MYSQL_CFLAGS) +jobcomp_mysql_la_LIBADD = $(top_builddir)/src/database/libslurm_mysql.la \ + $(MYSQL_LIBS) +jobcomp_mysql_la_DEPENDENCIES = $(top_builddir)/src/database/libslurm_mysql.la + diff --git a/src/plugins/jobcomp/mysql/Makefile.in b/src/plugins/jobcomp/mysql/Makefile.in new file mode 100644 index 000000000..bddbb69dd --- /dev/null +++ b/src/plugins/jobcomp/mysql/Makefile.in @@ -0,0 +1,578 @@ +# Makefile.in generated by automake 1.10.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# Makefile for jobcomp/mysql plugin + +VPATH = @srcdir@ +pkgdatadir = $(datadir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +target_triplet = @target@ +subdir = src/plugins/jobcomp/mysql +DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ + $(top_srcdir)/auxdir/slurm.m4 \ + $(top_srcdir)/auxdir/x_ac__system_configuration.m4 \ + $(top_srcdir)/auxdir/x_ac_affinity.m4 \ + $(top_srcdir)/auxdir/x_ac_aix.m4 \ + $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ + $(top_srcdir)/auxdir/x_ac_debug.m4 \ + $(top_srcdir)/auxdir/x_ac_elan.m4 \ + $(top_srcdir)/auxdir/x_ac_federation.m4 \ + $(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \ + $(top_srcdir)/auxdir/x_ac_gtk.m4 \ + $(top_srcdir)/auxdir/x_ac_munge.m4 \ + $(top_srcdir)/auxdir/x_ac_ncurses.m4 \ + $(top_srcdir)/auxdir/x_ac_pam.m4 \ + $(top_srcdir)/auxdir/x_ac_ptrace.m4 \ + $(top_srcdir)/auxdir/x_ac_readline.m4 \ + $(top_srcdir)/auxdir/x_ac_setpgrp.m4 \ + $(top_srcdir)/auxdir/x_ac_setproctitle.m4 \ + $(top_srcdir)/auxdir/x_ac_sgi_job.m4 \ + $(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \ + $(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h +CONFIG_CLEAN_FILES = +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; +am__installdirs = "$(DESTDIR)$(pkglibdir)" +pkglibLTLIBRARIES_INSTALL = $(INSTALL) +LTLIBRARIES = $(pkglib_LTLIBRARIES) +am__DEPENDENCIES_1 = +am_jobcomp_mysql_la_OBJECTS = jobcomp_mysql_la-jobcomp_mysql.lo \ + jobcomp_mysql_la-mysql_jobcomp_process.lo +jobcomp_mysql_la_OBJECTS = $(am_jobcomp_mysql_la_OBJECTS) +jobcomp_mysql_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=link $(CCLD) $(jobcomp_mysql_la_CFLAGS) \ + $(CFLAGS) $(jobcomp_mysql_la_LDFLAGS) $(LDFLAGS) -o $@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm +depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp +am__depfiles_maybe = depfiles +COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ + $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ + $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +CCLD = $(CC) +LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ + $(LDFLAGS) -o $@ +SOURCES = $(jobcomp_mysql_la_SOURCES) +DIST_SOURCES = $(jobcomp_mysql_la_SOURCES) +ETAGS = etags +CTAGS = ctags +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AR = @AR@ +AUTHD_CFLAGS = @AUTHD_CFLAGS@ +AUTHD_LIBS = @AUTHD_LIBS@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +BG_INCLUDES = @BG_INCLUDES@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CMD_LDFLAGS = @CMD_LDFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ +ECHO = @ECHO@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +ELAN_LIBS = @ELAN_LIBS@ +EXEEXT = @EXEEXT@ +F77 = @F77@ +FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@ +FFLAGS = @FFLAGS@ +GREP = @GREP@ +GTK2_CFLAGS = @GTK2_CFLAGS@ +GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ +HAVEPKGCONFIG = @HAVEPKGCONFIG@ +HAVE_AIX = @HAVE_AIX@ +HAVE_ELAN = @HAVE_ELAN@ +HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ +HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIB_LDFLAGS = @LIB_LDFLAGS@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MKDIR_P = @MKDIR_P@ +MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ +MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ +MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ +NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ +NUMA_LIBS = @NUMA_LIBS@ +OBJEXT = @OBJEXT@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PAM_LIBS = @PAM_LIBS@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ +PLPA_LIBS = @PLPA_LIBS@ +PROCTRACKDIR = @PROCTRACKDIR@ +PROJECT = @PROJECT@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +RANLIB = @RANLIB@ +READLINE_LIBS = @READLINE_LIBS@ +RELEASE = @RELEASE@ +SED = @SED@ +SEMAPHORE_LIBS = @SEMAPHORE_LIBS@ +SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ +SLURMD_PORT = @SLURMD_PORT@ +SLURM_API_AGE = @SLURM_API_AGE@ +SLURM_API_CURRENT = @SLURM_API_CURRENT@ +SLURM_API_MAJOR = @SLURM_API_MAJOR@ +SLURM_API_REVISION = @SLURM_API_REVISION@ +SLURM_API_VERSION = @SLURM_API_VERSION@ +SLURM_MAJOR = @SLURM_MAJOR@ +SLURM_MICRO = @SLURM_MICRO@ +SLURM_MINOR = @SLURM_MINOR@ +SLURM_VERSION = @SLURM_VERSION@ +SO_LDFLAGS = @SO_LDFLAGS@ +SSL_CPPFLAGS = @SSL_CPPFLAGS@ +SSL_LDFLAGS = @SSL_LDFLAGS@ +SSL_LIBS = @SSL_LIBS@ +STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ +VERSION = @VERSION@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_F77 = @ac_ct_F77@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target = @target@ +target_alias = @target_alias@ +target_cpu = @target_cpu@ +target_os = @target_os@ +target_vendor = @target_vendor@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +AUTOMAKE_OPTIONS = foreign +PLUGIN_FLAGS = -module -avoid-version --export-dynamic +INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common +pkglib_LTLIBRARIES = jobcomp_mysql.la + +# Mysql storage plugin. +jobcomp_mysql_la_SOURCES = jobcomp_mysql.c \ + mysql_jobcomp_process.c mysql_jobcomp_process.h + +jobcomp_mysql_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) +jobcomp_mysql_la_CFLAGS = $(MYSQL_CFLAGS) +jobcomp_mysql_la_LIBADD = $(top_builddir)/src/database/libslurm_mysql.la \ + $(MYSQL_LIBS) + +jobcomp_mysql_la_DEPENDENCIES = $(top_builddir)/src/database/libslurm_mysql.la +all: all-am + +.SUFFIXES: +.SUFFIXES: .c .lo .o .obj +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ + && exit 0; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/jobcomp/mysql/Makefile'; \ + cd $(top_srcdir) && \ + $(AUTOMAKE) --foreign src/plugins/jobcomp/mysql/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) + @$(NORMAL_INSTALL) + test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" + @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ + if test -f $$p; then \ + f=$(am__strip_dir) \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + else :; fi; \ + done + +uninstall-pkglibLTLIBRARIES: + @$(NORMAL_UNINSTALL) + @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ + p=$(am__strip_dir) \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + done + +clean-pkglibLTLIBRARIES: + -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) + @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ + dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ + test "$$dir" != "$$p" || dir=.; \ + echo "rm -f \"$${dir}/so_locations\""; \ + rm -f "$${dir}/so_locations"; \ + done +jobcomp_mysql.la: $(jobcomp_mysql_la_OBJECTS) $(jobcomp_mysql_la_DEPENDENCIES) + $(jobcomp_mysql_la_LINK) -rpath $(pkglibdir) $(jobcomp_mysql_la_OBJECTS) $(jobcomp_mysql_la_LIBADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + +distclean-compile: + -rm -f *.tab.c + +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/jobcomp_mysql_la-jobcomp_mysql.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/jobcomp_mysql_la-mysql_jobcomp_process.Plo@am__quote@ + +.c.o: +@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c $< + +.c.obj: +@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'` + +.c.lo: +@am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $< + +jobcomp_mysql_la-jobcomp_mysql.lo: jobcomp_mysql.c +@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jobcomp_mysql_la_CFLAGS) $(CFLAGS) -MT jobcomp_mysql_la-jobcomp_mysql.lo -MD -MP -MF $(DEPDIR)/jobcomp_mysql_la-jobcomp_mysql.Tpo -c -o jobcomp_mysql_la-jobcomp_mysql.lo `test -f 'jobcomp_mysql.c' || echo '$(srcdir)/'`jobcomp_mysql.c +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/jobcomp_mysql_la-jobcomp_mysql.Tpo $(DEPDIR)/jobcomp_mysql_la-jobcomp_mysql.Plo +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='jobcomp_mysql.c' object='jobcomp_mysql_la-jobcomp_mysql.lo' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jobcomp_mysql_la_CFLAGS) $(CFLAGS) -c -o jobcomp_mysql_la-jobcomp_mysql.lo `test -f 'jobcomp_mysql.c' || echo '$(srcdir)/'`jobcomp_mysql.c + +jobcomp_mysql_la-mysql_jobcomp_process.lo: mysql_jobcomp_process.c +@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jobcomp_mysql_la_CFLAGS) $(CFLAGS) -MT jobcomp_mysql_la-mysql_jobcomp_process.lo -MD -MP -MF $(DEPDIR)/jobcomp_mysql_la-mysql_jobcomp_process.Tpo -c -o jobcomp_mysql_la-mysql_jobcomp_process.lo `test -f 'mysql_jobcomp_process.c' || echo '$(srcdir)/'`mysql_jobcomp_process.c +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/jobcomp_mysql_la-mysql_jobcomp_process.Tpo $(DEPDIR)/jobcomp_mysql_la-mysql_jobcomp_process.Plo +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='mysql_jobcomp_process.c' object='jobcomp_mysql_la-mysql_jobcomp_process.lo' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jobcomp_mysql_la_CFLAGS) $(CFLAGS) -c -o jobcomp_mysql_la-mysql_jobcomp_process.lo `test -f 'mysql_jobcomp_process.c' || echo '$(srcdir)/'`mysql_jobcomp_process.c + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + mkid -fID $$unique +tags: TAGS + +TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + here=`pwd`; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$tags $$unique; \ + fi +ctags: CTAGS +CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + test -z "$(CTAGS_ARGS)$$tags$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$tags $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && cd $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) $$here + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ + fi; \ + cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ + else \ + test -f $(distdir)/$$file \ + || cp -p $$d/$$file $(distdir)/$$file \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(LTLIBRARIES) +installdirs: + for dir in "$(DESTDIR)$(pkglibdir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + `test -z '$(STRIP)' || \ + echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ + mostlyclean-am + +distclean: distclean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-exec-am: install-pkglibLTLIBRARIES + +install-html: install-html-am + +install-info: install-info-am + +install-man: + +install-pdf: install-pdf-am + +install-ps: install-ps-am + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-pkglibLTLIBRARIES + +.MAKE: install-am install-strip + +.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ + clean-libtool clean-pkglibLTLIBRARIES ctags distclean \ + distclean-compile distclean-generic distclean-libtool \ + distclean-tags distdir dvi dvi-am html html-am info info-am \ + install install-am install-data install-data-am install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-pdf install-pdf-am install-pkglibLTLIBRARIES \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-compile \ + mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ + tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/src/plugins/jobcomp/mysql/jobcomp_mysql.c b/src/plugins/jobcomp/mysql/jobcomp_mysql.c new file mode 100644 index 000000000..46a139d9b --- /dev/null +++ b/src/plugins/jobcomp/mysql/jobcomp_mysql.c @@ -0,0 +1,459 @@ +/*****************************************************************************\ + * jobcomp_mysql.c - Store/Get all information in a mysql storage. + * + * $Id: storage_mysql.c 10893 2007-01-29 21:53:48Z da $ + ***************************************************************************** + * Copyright (C) 2004-2007 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#include "mysql_jobcomp_process.h" +#include <pwd.h> +#include <grp.h> +#include <sys/types.h> +#include "src/common/parse_time.h" +#include "src/common/node_select.h" + +/* + * These variables are required by the generic plugin interface. If they + * are not found in the plugin, the plugin loader will ignore it. + * + * plugin_name - a string giving a human-readable description of the + * plugin. There is no maximum length, but the symbol must refer to + * a valid string. + * + * plugin_type - a string suggesting the type of the plugin or its + * applicability to a particular form of data or method of data handling. + * If the low-level plugin API is used, the contents of this string are + * unimportant and may be anything. SLURM uses the higher-level plugin + * interface which requires this string to be of the form + * + * <application>/<method> + * + * where <application> is a description of the intended application of + * the plugin (e.g., "jobacct" for SLURM job completion logging) and <method> + * is a description of how this plugin satisfies that application. SLURM will + * only load job completion logging plugins if the plugin_type string has a + * prefix of "jobacct/". + * + * plugin_version - an unsigned 32-bit integer giving the version number + * of the plugin. If major and minor revisions are desired, the major + * version number may be multiplied by a suitable magnitude constant such + * as 100 or 1000. Various SLURM versions will likely require a certain + * minimum versions for their plugins as the job accounting API + * matures. + */ +const char plugin_name[] = "Job completion MYSQL plugin"; +const char plugin_type[] = "jobcomp/mysql"; +const uint32_t plugin_version = 100; + +#ifdef HAVE_MYSQL + +#define DEFAULT_JOBCOMP_DB "slurm_jobcomp_db" + +MYSQL *jobcomp_mysql_db = NULL; + +char *jobcomp_table = "jobcomp_table"; +storage_field_t jobcomp_table_fields[] = { + { "jobid", "int not null" }, + { "uid", "smallint unsigned not null" }, + { "user_name", "tinytext not null" }, + { "gid", "smallint unsigned not null" }, + { "group_name", "tinytext not null" }, + { "name", "tinytext not null" }, + { "state", "smallint not null" }, + { "partition", "tinytext not null" }, + { "timelimit", "tinytext not null" }, + { "starttime", "int unsigned default 0 not null" }, + { "endtime", "int unsigned default 0 not null" }, + { "nodelist", "text" }, + { "nodecnt", "mediumint unsigned not null" }, + { "proc_cnt", "mediumint unsigned not null" }, +#ifdef HAVE_BG + { "connect_type", "tinytext" }, + { "reboot", "tinytext" }, + { "rotate", "tinytext" }, + { "maxprocs", "mediumint unsigned default 0 not null" }, + { "geometry", "tinytext" }, + { "start", "tinytext" }, + { "blockid", "tinytext" }, +#endif + { NULL, NULL} +}; + + +/* Type for error string table entries */ +typedef struct { + int xe_number; + char *xe_message; +} slurm_errtab_t; + +static slurm_errtab_t slurm_errtab[] = { + {0, "No error"}, + {-1, "Unspecified error"} +}; + +/* A plugin-global errno. */ +static int plugin_errno = SLURM_SUCCESS; + +/* File descriptor used for logging */ +static pthread_mutex_t jobcomp_lock = PTHREAD_MUTEX_INITIALIZER; + + +static mysql_db_info_t *_mysql_jobcomp_create_db_info() +{ + mysql_db_info_t *db_info = xmalloc(sizeof(mysql_db_info_t)); + db_info->port = slurm_get_jobcomp_port(); + if(!db_info->port) + db_info->port = 3306; + db_info->host = slurm_get_jobcomp_host(); + db_info->user = slurm_get_jobcomp_user(); + db_info->pass = slurm_get_jobcomp_pass(); + return db_info; +} + +static int _mysql_jobcomp_check_tables() +{ + if(mysql_db_create_table(jobcomp_mysql_db, jobcomp_table, + jobcomp_table_fields, ")") == SLURM_ERROR) + return SLURM_ERROR; + + return SLURM_SUCCESS; +} + + +/* get the user name for the give user_id */ +static char *_get_user_name(uint32_t user_id) +{ + static uint32_t cache_uid = 0; + static char cache_name[32] = "root"; + struct passwd * user_info = NULL; + char *ret_name = NULL; + + slurm_mutex_lock(&jobcomp_lock); + if (user_id != cache_uid) { + user_info = getpwuid((uid_t) user_id); + if (user_info && user_info->pw_name[0]) + snprintf(cache_name, sizeof(cache_name), "%s", + user_info->pw_name); + else + snprintf(cache_name, sizeof(cache_name), "Unknown"); + cache_uid = user_id; + } + ret_name = xstrdup(cache_name); + slurm_mutex_unlock(&jobcomp_lock); + + return ret_name; +} + +/* get the group name for the give group_id */ +static char *_get_group_name(uint32_t group_id) +{ + static uint32_t cache_gid = 0; + static char cache_name[32] = "root"; + struct group *group_info = NULL; + char *ret_name = NULL; + + slurm_mutex_lock(&jobcomp_lock); + if (group_id != cache_gid) { + group_info = getgrgid((gid_t) group_id); + if (group_info && group_info->gr_name[0]) + snprintf(cache_name, sizeof(cache_name), "%s", + group_info->gr_name); + else + snprintf(cache_name, sizeof(cache_name), "Unknown"); + cache_gid = group_id; + } + ret_name = xstrdup(cache_name); + slurm_mutex_unlock(&jobcomp_lock); + + return ret_name; +} + +/* + * Linear search through table of errno values and strings, + * returns NULL on error, string on success. + */ +static char *_lookup_slurm_api_errtab(int errnum) +{ + char *res = NULL; + int i; + + for (i = 0; i < sizeof(slurm_errtab) / sizeof(slurm_errtab_t); i++) { + if (slurm_errtab[i].xe_number == errnum) { + res = slurm_errtab[i].xe_message; + break; + } + } + return res; +} +#endif + +/* + * init() is called when the plugin is loaded, before any other functions + * are called. Put global initialization here. + */ +extern int init ( void ) +{ + static int first = 1; +#ifndef HAVE_MYSQL + fatal("No MySQL storage was found on the machine. " + "Please check the configure ran and run again."); +#endif + if(first) { + /* since this can be loaded from many different places + only tell us once. */ + verbose("%s loaded", plugin_name); + first = 0; + } else { + debug4("%s loaded", plugin_name); + } + + return SLURM_SUCCESS; +} + +extern int fini ( void ) +{ +#ifdef HAVE_MYSQL + if (jobcomp_mysql_db) { + mysql_close(jobcomp_mysql_db); + jobcomp_mysql_db = NULL; + } + return SLURM_SUCCESS; +#else + return SLURM_ERROR; +#endif +} + +extern int slurm_jobcomp_set_location(char *location) +{ +#ifdef HAVE_MYSQL + mysql_db_info_t *db_info = _mysql_jobcomp_create_db_info(); + int rc = SLURM_SUCCESS; + char *db_name = NULL; + int i = 0; + + if(jobcomp_mysql_db && mysql_ping(jobcomp_mysql_db) == 0) + return SLURM_SUCCESS; + + if(!location) + db_name = DEFAULT_JOBCOMP_DB; + else { + while(location[i]) { + if(location[i] == '.' || location[i] == '/') { + debug("%s doesn't look like a database " + "name using %s", + location, DEFAULT_JOBCOMP_DB); + break; + } + i++; + } + if(location[i]) + db_name = DEFAULT_JOBCOMP_DB; + else + db_name = location; + } + + debug2("mysql_connect() called for db %s", db_name); + + mysql_get_db_connection(&jobcomp_mysql_db, db_name, db_info); + + rc = _mysql_jobcomp_check_tables(); + + destroy_mysql_db_info(db_info); + + if(rc == SLURM_SUCCESS) + debug("Jobcomp database init finished"); + else + debug("Jobcomp database init failed"); + return rc; +#else + return SLURM_ERROR; +#endif +} + +extern int slurm_jobcomp_log_record(struct job_record *job_ptr) +{ +#ifdef HAVE_MYSQL + int rc = SLURM_SUCCESS; + char *usr_str = NULL, *grp_str = NULL, lim_str[32]; +#ifdef HAVE_BG + char connect_type[128]; + char reboot[4]; + char rotate[4]; + char maxprocs[20]; + char geometry[20]; + char start[20]; + char blockid[128]; +#endif + enum job_states job_state; + char query[1024]; + + if(!jobcomp_mysql_db || mysql_ping(jobcomp_mysql_db) != 0) { + char *loc = slurm_get_jobcomp_loc(); + if(slurm_jobcomp_set_location(loc) == SLURM_ERROR) { + xfree(loc); + return SLURM_ERROR; + } + xfree(loc); + } + + usr_str = _get_user_name(job_ptr->user_id); + grp_str = _get_group_name(job_ptr->group_id); + if (job_ptr->time_limit == INFINITE) + strcpy(lim_str, "UNLIMITED"); + else + snprintf(lim_str, sizeof(lim_str), "%lu", + (unsigned long) job_ptr->time_limit); + + /* Job will typically be COMPLETING when this is called. + * We remove this flag to get the eventual completion state: + * JOB_FAILED, JOB_TIMEOUT, etc. */ + job_state = job_ptr->job_state & (~JOB_COMPLETING); + +#ifdef HAVE_BG + select_g_sprint_jobinfo(job_ptr->select_jobinfo, + connect_type, sizeof(connect_type), SELECT_PRINT_CONNECTION); + select_g_sprint_jobinfo(job_ptr->select_jobinfo, + reboot, sizeof(reboot), SELECT_PRINT_REBOOT); + select_g_sprint_jobinfo(job_ptr->select_jobinfo, + rotate, sizeof(rotate), SELECT_PRINT_ROTATE); + select_g_sprint_jobinfo(job_ptr->select_jobinfo, + maxprocs, sizeof(maxprocs), SELECT_PRINT_MAX_PROCS); + select_g_sprint_jobinfo(job_ptr->select_jobinfo, + geometry, sizeof(geometry), SELECT_PRINT_GEOMETRY); + select_g_sprint_jobinfo(job_ptr->select_jobinfo, + start, sizeof(start), SELECT_PRINT_START); + select_g_sprint_jobinfo(job_ptr->select_jobinfo, + blockid, sizeof(blockid), SELECT_PRINT_BG_ID); +#endif + snprintf(query, sizeof(query), + "insert into %s (jobid, uid, user_name, gid, group_name, " + "name, state, proc_cnt, " + "partition, timelimit, starttime, endtime, nodelist, nodecnt" +#ifdef HAVE_BG + ", connect_type, reboot, rotate, maxprocs, geometry, " + "start, blockid" +#endif + ") values (%u, %u, '%s', %u, '%s', '%s', %d, %u, " + "'%s', '%s', %u, %u, '%s', %u" +#ifdef HAVE_BG + ", '%s', '%s', '%s', %s, '%s', '%s', '%s'" +#endif + ")", + jobcomp_table, job_ptr->job_id, job_ptr->user_id, usr_str, + job_ptr->group_id, grp_str, job_ptr->name, + job_state, job_ptr->total_procs, job_ptr->partition, lim_str, + (int)job_ptr->start_time, (int)job_ptr->end_time, + job_ptr->nodes, job_ptr->node_cnt +#ifdef HAVE_BG + , connect_type, reboot, rotate, maxprocs, geometry, + start, blockid +#endif + ); +// info("query = %s", query); + rc = mysql_db_query(jobcomp_mysql_db, query); + xfree(usr_str); + xfree(grp_str); + + return rc; +#else + return SLURM_ERROR; +#endif +} + +extern int slurm_jobcomp_get_errno(void) +{ +#ifdef HAVE_MYSQL + return plugin_errno; +#else + return SLURM_ERROR; +#endif +} + +extern char *slurm_jobcomp_strerror(int errnum) +{ +#ifdef HAVE_MYSQL + char *res = _lookup_slurm_api_errtab(errnum); + return (res ? res : strerror(errnum)); +#else + return NULL; +#endif +} + +/* + * get info from the storage + * in/out job_list List of job_rec_t * + * note List needs to be freed when called + */ +extern List slurm_jobcomp_get_jobs(List selected_steps, + List selected_parts, + void *params) +{ + List job_list = NULL; + +#ifdef HAVE_MYSQL + if(!jobcomp_mysql_db || mysql_ping(jobcomp_mysql_db) != 0) { + char *loc = slurm_get_jobcomp_loc(); + if(slurm_jobcomp_set_location(loc) == SLURM_ERROR) { + xfree(loc); + return job_list; + } + xfree(loc); + } + + job_list = mysql_jobcomp_process_get_jobs(selected_steps, + selected_parts, + params); +#endif + return job_list; +} + +/* + * expire old info from the storage + */ +extern void slurm_jobcomp_archive(List selected_parts, void *params) +{ +#ifdef HAVE_MYSQL + if(!jobcomp_mysql_db || mysql_ping(jobcomp_mysql_db) != 0) { + char *loc = slurm_get_jobcomp_loc(); + if(slurm_jobcomp_set_location(loc) == SLURM_ERROR) { + xfree(loc); + return; + } + xfree(loc); + } + + mysql_jobcomp_process_archive(selected_parts, params); +#endif + return; +} diff --git a/src/plugins/jobcomp/mysql/mysql_jobcomp_process.c b/src/plugins/jobcomp/mysql/mysql_jobcomp_process.c new file mode 100644 index 000000000..e6c51d811 --- /dev/null +++ b/src/plugins/jobcomp/mysql/mysql_jobcomp_process.c @@ -0,0 +1,207 @@ +/*****************************************************************************\ + * mysql_jobcomp_process.c - functions the processing of + * information from the mysql jobcomp + * storage. + ***************************************************************************** + * + * Copyright (C) 2004-2007 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * This file is patterned after jobcomp_linux.c, written by Morris Jette and + * Copyright (C) 2002 The Regents of the University of California. +\*****************************************************************************/ + +#include <stdlib.h> +#include "src/common/parse_time.h" +#include "src/common/xstring.h" +#include "mysql_jobcomp_process.h" + +#ifdef HAVE_MYSQL +static void _do_fdump(MYSQL_ROW row, int lc) +{ + int i = 0; + printf("\n------- Line %d -------\n", lc); + while(jobcomp_table_fields[i].name) { + printf("%12s: %s\n", jobcomp_table_fields[i].name, row[i]); + i++; + } + + return; +} + +extern List mysql_jobcomp_process_get_jobs(List selected_steps, + List selected_parts, + sacct_parameters_t *params) +{ + + char *query = NULL; + char *extra = NULL; + char *tmp = NULL; + char *selected_part = NULL; + jobacct_selected_step_t *selected_step = NULL; + ListIterator itr = NULL; + int set = 0; + MYSQL_RES *result = NULL; + MYSQL_ROW row; + int i; + int lc = 0; + jobcomp_job_rec_t *job = NULL; + char time_str[32]; + time_t temp_time; + List job_list = list_create(jobcomp_destroy_job); + + if(selected_steps && list_count(selected_steps)) { + set = 0; + xstrcat(extra, " where ("); + itr = list_iterator_create(selected_steps); + while((selected_step = list_next(itr))) { + if(set) + xstrcat(extra, " || "); + tmp = xstrdup_printf("jobid=%d", + selected_step->jobid); + xstrcat(extra, tmp); + set = 1; + xfree(tmp); + } + list_iterator_destroy(itr); + xstrcat(extra, ")"); + } + + if(selected_parts && list_count(selected_parts)) { + set = 0; + if(extra) + xstrcat(extra, " && ("); + else + xstrcat(extra, " where ("); + + itr = list_iterator_create(selected_parts); + while((selected_part = list_next(itr))) { + if(set) + xstrcat(extra, " || "); + tmp = xstrdup_printf("partition='%s'", + selected_part); + xstrcat(extra, tmp); + set = 1; + xfree(tmp); + } + list_iterator_destroy(itr); + xstrcat(extra, ")"); + } + + i = 0; + while(jobcomp_table_fields[i].name) { + if(i) + xstrcat(tmp, ", "); + xstrcat(tmp, jobcomp_table_fields[i].name); + i++; + } + + query = xstrdup_printf("select %s from %s", tmp, jobcomp_table); + xfree(tmp); + + if(extra) { + xstrcat(query, extra); + xfree(extra); + } + + //info("query = %s", query); + if(!(result = + mysql_db_query_ret(jobcomp_mysql_db, query, 0))) { + xfree(query); + list_destroy(job_list); + return NULL; + } + xfree(query); + + while((row = mysql_fetch_row(result))) { + lc++; + + if (params->opt_fdump) { + _do_fdump(row, lc); + continue; + } + job = xmalloc(sizeof(jobcomp_job_rec_t)); + if(row[JOBCOMP_REQ_JOBID]) + job->jobid = atoi(row[JOBCOMP_REQ_JOBID]); + job->partition = xstrdup(row[JOBCOMP_REQ_PARTITION]); + temp_time = atoi(row[JOBCOMP_REQ_STARTTIME]); + slurm_make_time_str(&temp_time, + time_str, + sizeof(time_str)); + + job->start_time = xstrdup(time_str); + temp_time = atoi(row[JOBCOMP_REQ_ENDTIME]); + slurm_make_time_str(&temp_time, + time_str, + sizeof(time_str)); + + job->end_time = xstrdup(time_str); + if(row[JOBCOMP_REQ_UID]) + job->uid = atoi(row[JOBCOMP_REQ_UID]); + job->uid_name = xstrdup(row[JOBCOMP_REQ_USER_NAME]); + if(row[JOBCOMP_REQ_GID]) + job->gid = atoi(row[JOBCOMP_REQ_GID]); + job->gid_name = xstrdup(row[JOBCOMP_REQ_GROUP_NAME]); + job->jobname = xstrdup(row[JOBCOMP_REQ_NAME]); + job->nodelist = xstrdup(row[JOBCOMP_REQ_NODELIST]); + if(row[JOBCOMP_REQ_NODECNT]) + job->node_cnt = atoi(row[JOBCOMP_REQ_NODECNT]); + if(row[JOBCOMP_REQ_STATE]) { + i = atoi(row[JOBCOMP_REQ_STATE]); + job->state = xstrdup(job_state_string(i)); + } + job->timelimit = xstrdup(row[JOBCOMP_REQ_TIMELIMIT]); +#ifdef HAVE_BG + if(row[JOBCOMP_REQ_MAXPROCS]) + job->max_procs = atoi(row[JOBCOMP_REQ_MAXPROCS]); + job->connection = xstrdup(row[JOBCOMP_REQ_CONNECTION]); + job->reboot = xstrdup(row[JOBCOMP_REQ_REBOOT]); + job->rotate = xstrdup(row[JOBCOMP_REQ_ROTATE]); + job->geo = xstrdup(row[JOBCOMP_REQ_GEOMETRY]); + job->bg_start_point = xstrdup(row[JOBCOMP_REQ_START]); + job->blockid = xstrdup(row[JOBCOMP_REQ_BLOCKID]); +#endif + list_append(job_list, job); + } + + mysql_free_result(result); + + return job_list; +} + +extern void mysql_jobcomp_process_archive(List selected_parts, + sacct_parameters_t *params) +{ + return; +} + +#endif diff --git a/src/plugins/jobcomp/mysql/mysql_jobcomp_process.h b/src/plugins/jobcomp/mysql/mysql_jobcomp_process.h new file mode 100644 index 000000000..787b098c4 --- /dev/null +++ b/src/plugins/jobcomp/mysql/mysql_jobcomp_process.h @@ -0,0 +1,94 @@ +/*****************************************************************************\ + * mysql_jobcomp_process.h - functions the processing of + * information from the mysql jobcomp + * storage. + ***************************************************************************** + * + * Copyright (C) 2004-2007 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * This file is patterned after jobcomp_linux.c, written by Morris Jette and + * Copyright (C) 2002 The Regents of the University of California. +\*****************************************************************************/ + +#ifndef _HAVE_MYSQL_JOBCOMP_PROCESS_H +#define _HAVE_MYSQL_JOBCOMP_PROCESS_H + +#include "src/database/mysql_common.h" +#include "src/common/slurm_jobacct_gather.h" +#include "src/common/slurm_jobcomp.h" + +#ifdef HAVE_MYSQL +extern MYSQL *jobcomp_mysql_db; +extern int jobcomp_db_init; + +extern char *jobcomp_table; +/* This variable and the following enum are related so if you change + the jobcomp_table_fields defined in mysql_jobcomp.c you must update + this enum accordingly. +*/ +extern storage_field_t jobcomp_table_fields[]; +enum { + JOBCOMP_REQ_JOBID, + JOBCOMP_REQ_UID, + JOBCOMP_REQ_USER_NAME, + JOBCOMP_REQ_GID, + JOBCOMP_REQ_GROUP_NAME, + JOBCOMP_REQ_NAME, + JOBCOMP_REQ_STATE, + JOBCOMP_REQ_PARTITION, + JOBCOMP_REQ_TIMELIMIT, + JOBCOMP_REQ_STARTTIME, + JOBCOMP_REQ_ENDTIME, + JOBCOMP_REQ_NODELIST, + JOBCOMP_REQ_NODECNT, +#ifdef HAVE_BG + JOBCOMP_REQ_CONNECTION, + JOBCOMP_REQ_REBOOT, + JOBCOMP_REQ_ROTATE, + JOBCOMP_REQ_MAXPROCS, + JOBCOMP_REQ_GEOMETRY, + JOBCOMP_REQ_START, + JOBCOMP_REQ_BLOCKID, +#endif + JOBCOMP_REQ_COUNT +}; + +extern List mysql_jobcomp_process_get_jobs(List selected_steps, + List selected_parts, + sacct_parameters_t *params); + +extern void mysql_jobcomp_process_archive(List selected_parts, + sacct_parameters_t *params); +#endif + +#endif diff --git a/src/plugins/jobcomp/none/Makefile.in b/src/plugins/jobcomp/none/Makefile.in index ef118a378..5d4d646c0 100644 --- a/src/plugins/jobcomp/none/Makefile.in +++ b/src/plugins/jobcomp/none/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -44,6 +44,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -79,7 +81,7 @@ jobcomp_none_la_OBJECTS = $(am_jobcomp_none_la_OBJECTS) jobcomp_none_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(jobcomp_none_la_LDFLAGS) $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -119,6 +121,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -132,10 +135,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -155,7 +161,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -166,6 +175,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -181,6 +192,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -196,6 +208,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -299,8 +312,8 @@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ else :; fi; \ done @@ -308,8 +321,8 @@ uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ done clean-pkglibLTLIBRARIES: @@ -363,8 +376,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -376,8 +389,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -387,13 +400,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/jobcomp/none/jobcomp_none.c b/src/plugins/jobcomp/none/jobcomp_none.c index 986a8a705..767fac2a4 100644 --- a/src/plugins/jobcomp/none/jobcomp_none.c +++ b/src/plugins/jobcomp/none/jobcomp_none.c @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -83,7 +83,7 @@ */ const char plugin_name[] = "Job completion logging NONE plugin"; const char plugin_type[] = "jobcomp/none"; -const uint32_t plugin_version = 90; +const uint32_t plugin_version = 100; /* * init() is called when the plugin is loaded, before any other functions @@ -119,7 +119,21 @@ char *slurm_jobcomp_strerror( int errnum ) return NULL; } +List slurm_jobcomp_get_jobs(List selected_steps, List selected_parts, + void *params) +{ + return NULL; +} + +void slurm_jobcomp_archive(List selected_parts, + void *params) +{ + return; +} + int fini ( void ) { return SLURM_SUCCESS; } + + diff --git a/src/plugins/jobcomp/pgsql/Makefile.am b/src/plugins/jobcomp/pgsql/Makefile.am new file mode 100644 index 000000000..ae90b1625 --- /dev/null +++ b/src/plugins/jobcomp/pgsql/Makefile.am @@ -0,0 +1,19 @@ +# Makefile for jobcomp/pgsql plugin + +CPPFLAGS = $(PGSQL_CFLAGS) +AUTOMAKE_OPTIONS = foreign + +PLUGIN_FLAGS = -module -avoid-version --export-dynamic + +INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common + +pkglib_LTLIBRARIES = jobcomp_pgsql.la + +# Pgsql storage plugin. +jobcomp_pgsql_la_SOURCES = jobcomp_pgsql.c \ + pgsql_jobcomp_process.c pgsql_jobcomp_process.h +jobcomp_pgsql_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) +jobcomp_pgsql_la_CFLAGS = $(PGSQL_CFLAGS) +jobcomp_pgsql_la_LIBADD = $(top_builddir)/src/database/libslurm_pgsql.la \ + $(PGSQL_LIBS) +jobcomp_pgsql_la_DEPENDENCIES = $(top_builddir)/src/database/libslurm_pgsql.la diff --git a/src/plugins/jobcomp/pgsql/Makefile.in b/src/plugins/jobcomp/pgsql/Makefile.in new file mode 100644 index 000000000..71612054e --- /dev/null +++ b/src/plugins/jobcomp/pgsql/Makefile.in @@ -0,0 +1,578 @@ +# Makefile.in generated by automake 1.10.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# Makefile for jobcomp/pgsql plugin + +VPATH = @srcdir@ +pkgdatadir = $(datadir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +target_triplet = @target@ +subdir = src/plugins/jobcomp/pgsql +DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ + $(top_srcdir)/auxdir/slurm.m4 \ + $(top_srcdir)/auxdir/x_ac__system_configuration.m4 \ + $(top_srcdir)/auxdir/x_ac_affinity.m4 \ + $(top_srcdir)/auxdir/x_ac_aix.m4 \ + $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ + $(top_srcdir)/auxdir/x_ac_debug.m4 \ + $(top_srcdir)/auxdir/x_ac_elan.m4 \ + $(top_srcdir)/auxdir/x_ac_federation.m4 \ + $(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \ + $(top_srcdir)/auxdir/x_ac_gtk.m4 \ + $(top_srcdir)/auxdir/x_ac_munge.m4 \ + $(top_srcdir)/auxdir/x_ac_ncurses.m4 \ + $(top_srcdir)/auxdir/x_ac_pam.m4 \ + $(top_srcdir)/auxdir/x_ac_ptrace.m4 \ + $(top_srcdir)/auxdir/x_ac_readline.m4 \ + $(top_srcdir)/auxdir/x_ac_setpgrp.m4 \ + $(top_srcdir)/auxdir/x_ac_setproctitle.m4 \ + $(top_srcdir)/auxdir/x_ac_sgi_job.m4 \ + $(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \ + $(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h +CONFIG_CLEAN_FILES = +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; +am__installdirs = "$(DESTDIR)$(pkglibdir)" +pkglibLTLIBRARIES_INSTALL = $(INSTALL) +LTLIBRARIES = $(pkglib_LTLIBRARIES) +am__DEPENDENCIES_1 = +am_jobcomp_pgsql_la_OBJECTS = jobcomp_pgsql_la-jobcomp_pgsql.lo \ + jobcomp_pgsql_la-pgsql_jobcomp_process.lo +jobcomp_pgsql_la_OBJECTS = $(am_jobcomp_pgsql_la_OBJECTS) +jobcomp_pgsql_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=link $(CCLD) $(jobcomp_pgsql_la_CFLAGS) \ + $(CFLAGS) $(jobcomp_pgsql_la_LDFLAGS) $(LDFLAGS) -o $@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm +depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp +am__depfiles_maybe = depfiles +COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ + $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ + $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +CCLD = $(CC) +LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ + $(LDFLAGS) -o $@ +SOURCES = $(jobcomp_pgsql_la_SOURCES) +DIST_SOURCES = $(jobcomp_pgsql_la_SOURCES) +ETAGS = etags +CTAGS = ctags +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AR = @AR@ +AUTHD_CFLAGS = @AUTHD_CFLAGS@ +AUTHD_LIBS = @AUTHD_LIBS@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +BG_INCLUDES = @BG_INCLUDES@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CMD_LDFLAGS = @CMD_LDFLAGS@ +CPP = @CPP@ +CPPFLAGS = $(PGSQL_CFLAGS) +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ +ECHO = @ECHO@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +ELAN_LIBS = @ELAN_LIBS@ +EXEEXT = @EXEEXT@ +F77 = @F77@ +FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@ +FFLAGS = @FFLAGS@ +GREP = @GREP@ +GTK2_CFLAGS = @GTK2_CFLAGS@ +GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ +HAVEPKGCONFIG = @HAVEPKGCONFIG@ +HAVE_AIX = @HAVE_AIX@ +HAVE_ELAN = @HAVE_ELAN@ +HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ +HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIB_LDFLAGS = @LIB_LDFLAGS@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MKDIR_P = @MKDIR_P@ +MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ +MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ +MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ +NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ +NUMA_LIBS = @NUMA_LIBS@ +OBJEXT = @OBJEXT@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PAM_LIBS = @PAM_LIBS@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ +PLPA_LIBS = @PLPA_LIBS@ +PROCTRACKDIR = @PROCTRACKDIR@ +PROJECT = @PROJECT@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +RANLIB = @RANLIB@ +READLINE_LIBS = @READLINE_LIBS@ +RELEASE = @RELEASE@ +SED = @SED@ +SEMAPHORE_LIBS = @SEMAPHORE_LIBS@ +SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ +SLURMD_PORT = @SLURMD_PORT@ +SLURM_API_AGE = @SLURM_API_AGE@ +SLURM_API_CURRENT = @SLURM_API_CURRENT@ +SLURM_API_MAJOR = @SLURM_API_MAJOR@ +SLURM_API_REVISION = @SLURM_API_REVISION@ +SLURM_API_VERSION = @SLURM_API_VERSION@ +SLURM_MAJOR = @SLURM_MAJOR@ +SLURM_MICRO = @SLURM_MICRO@ +SLURM_MINOR = @SLURM_MINOR@ +SLURM_VERSION = @SLURM_VERSION@ +SO_LDFLAGS = @SO_LDFLAGS@ +SSL_CPPFLAGS = @SSL_CPPFLAGS@ +SSL_LDFLAGS = @SSL_LDFLAGS@ +SSL_LIBS = @SSL_LIBS@ +STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ +VERSION = @VERSION@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_F77 = @ac_ct_F77@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target = @target@ +target_alias = @target_alias@ +target_cpu = @target_cpu@ +target_os = @target_os@ +target_vendor = @target_vendor@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +AUTOMAKE_OPTIONS = foreign +PLUGIN_FLAGS = -module -avoid-version --export-dynamic +INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common +pkglib_LTLIBRARIES = jobcomp_pgsql.la + +# Pgsql storage plugin. +jobcomp_pgsql_la_SOURCES = jobcomp_pgsql.c \ + pgsql_jobcomp_process.c pgsql_jobcomp_process.h + +jobcomp_pgsql_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) +jobcomp_pgsql_la_CFLAGS = $(PGSQL_CFLAGS) +jobcomp_pgsql_la_LIBADD = $(top_builddir)/src/database/libslurm_pgsql.la \ + $(PGSQL_LIBS) + +jobcomp_pgsql_la_DEPENDENCIES = $(top_builddir)/src/database/libslurm_pgsql.la +all: all-am + +.SUFFIXES: +.SUFFIXES: .c .lo .o .obj +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ + && exit 0; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/jobcomp/pgsql/Makefile'; \ + cd $(top_srcdir) && \ + $(AUTOMAKE) --foreign src/plugins/jobcomp/pgsql/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) + @$(NORMAL_INSTALL) + test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" + @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ + if test -f $$p; then \ + f=$(am__strip_dir) \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + else :; fi; \ + done + +uninstall-pkglibLTLIBRARIES: + @$(NORMAL_UNINSTALL) + @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ + p=$(am__strip_dir) \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + done + +clean-pkglibLTLIBRARIES: + -test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES) + @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ + dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ + test "$$dir" != "$$p" || dir=.; \ + echo "rm -f \"$${dir}/so_locations\""; \ + rm -f "$${dir}/so_locations"; \ + done +jobcomp_pgsql.la: $(jobcomp_pgsql_la_OBJECTS) $(jobcomp_pgsql_la_DEPENDENCIES) + $(jobcomp_pgsql_la_LINK) -rpath $(pkglibdir) $(jobcomp_pgsql_la_OBJECTS) $(jobcomp_pgsql_la_LIBADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + +distclean-compile: + -rm -f *.tab.c + +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/jobcomp_pgsql_la-jobcomp_pgsql.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/jobcomp_pgsql_la-pgsql_jobcomp_process.Plo@am__quote@ + +.c.o: +@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c $< + +.c.obj: +@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'` + +.c.lo: +@am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $< + +jobcomp_pgsql_la-jobcomp_pgsql.lo: jobcomp_pgsql.c +@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jobcomp_pgsql_la_CFLAGS) $(CFLAGS) -MT jobcomp_pgsql_la-jobcomp_pgsql.lo -MD -MP -MF $(DEPDIR)/jobcomp_pgsql_la-jobcomp_pgsql.Tpo -c -o jobcomp_pgsql_la-jobcomp_pgsql.lo `test -f 'jobcomp_pgsql.c' || echo '$(srcdir)/'`jobcomp_pgsql.c +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/jobcomp_pgsql_la-jobcomp_pgsql.Tpo $(DEPDIR)/jobcomp_pgsql_la-jobcomp_pgsql.Plo +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='jobcomp_pgsql.c' object='jobcomp_pgsql_la-jobcomp_pgsql.lo' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jobcomp_pgsql_la_CFLAGS) $(CFLAGS) -c -o jobcomp_pgsql_la-jobcomp_pgsql.lo `test -f 'jobcomp_pgsql.c' || echo '$(srcdir)/'`jobcomp_pgsql.c + +jobcomp_pgsql_la-pgsql_jobcomp_process.lo: pgsql_jobcomp_process.c +@am__fastdepCC_TRUE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jobcomp_pgsql_la_CFLAGS) $(CFLAGS) -MT jobcomp_pgsql_la-pgsql_jobcomp_process.lo -MD -MP -MF $(DEPDIR)/jobcomp_pgsql_la-pgsql_jobcomp_process.Tpo -c -o jobcomp_pgsql_la-pgsql_jobcomp_process.lo `test -f 'pgsql_jobcomp_process.c' || echo '$(srcdir)/'`pgsql_jobcomp_process.c +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/jobcomp_pgsql_la-pgsql_jobcomp_process.Tpo $(DEPDIR)/jobcomp_pgsql_la-pgsql_jobcomp_process.Plo +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='pgsql_jobcomp_process.c' object='jobcomp_pgsql_la-pgsql_jobcomp_process.lo' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(jobcomp_pgsql_la_CFLAGS) $(CFLAGS) -c -o jobcomp_pgsql_la-pgsql_jobcomp_process.lo `test -f 'pgsql_jobcomp_process.c' || echo '$(srcdir)/'`pgsql_jobcomp_process.c + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + mkid -fID $$unique +tags: TAGS + +TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + here=`pwd`; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$tags $$unique; \ + fi +ctags: CTAGS +CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + test -z "$(CTAGS_ARGS)$$tags$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$tags $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && cd $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) $$here + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ + fi; \ + cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ + else \ + test -f $(distdir)/$$file \ + || cp -p $$d/$$file $(distdir)/$$file \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(LTLIBRARIES) +installdirs: + for dir in "$(DESTDIR)$(pkglibdir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + `test -z '$(STRIP)' || \ + echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \ + mostlyclean-am + +distclean: distclean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-exec-am: install-pkglibLTLIBRARIES + +install-html: install-html-am + +install-info: install-info-am + +install-man: + +install-pdf: install-pdf-am + +install-ps: install-ps-am + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-pkglibLTLIBRARIES + +.MAKE: install-am install-strip + +.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ + clean-libtool clean-pkglibLTLIBRARIES ctags distclean \ + distclean-compile distclean-generic distclean-libtool \ + distclean-tags distdir dvi dvi-am html html-am info info-am \ + install install-am install-data install-data-am install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-pdf install-pdf-am install-pkglibLTLIBRARIES \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-compile \ + mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ + tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/src/plugins/jobcomp/pgsql/jobcomp_pgsql.c b/src/plugins/jobcomp/pgsql/jobcomp_pgsql.c new file mode 100644 index 000000000..a56157c07 --- /dev/null +++ b/src/plugins/jobcomp/pgsql/jobcomp_pgsql.c @@ -0,0 +1,482 @@ +/*****************************************************************************\ + * jobcomp_pgsql.c - Store/Get all information in a postgresql storage. + * + * $Id: storage_pgsql.c 10893 2007-01-29 21:53:48Z da $ + ***************************************************************************** + * Copyright (C) 2004-2007 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#include "pgsql_jobcomp_process.h" +#include <pwd.h> +#include <grp.h> +#include <sys/types.h> +#include "src/common/parse_time.h" +#include "src/common/node_select.h" + +/* + * These variables are required by the generic plugin interface. If they + * are not found in the plugin, the plugin loader will ignore it. + * + * plugin_name - a string giving a human-readable description of the + * plugin. There is no maximum length, but the symbol must refer to + * a valid string. + * + * plugin_type - a string suggesting the type of the plugin or its + * applicability to a particular form of data or method of data handling. + * If the low-level plugin API is used, the contents of this string are + * unimportant and may be anything. SLURM uses the higher-level plugin + * interface which requires this string to be of the form + * + * <application>/<method> + * + * where <application> is a description of the intended application of + * the plugin (e.g., "jobacct" for SLURM job completion logging) and <method> + * is a description of how this plugin satisfies that application. SLURM will + * only load job completion logging plugins if the plugin_type string has a + * prefix of "jobacct/". + * + * plugin_version - an unsigned 32-bit integer giving the version number + * of the plugin. If major and minor revisions are desired, the major + * version number may be multiplied by a suitable magnitude constant such + * as 100 or 1000. Various SLURM versions will likely require a certain + * minimum versions for their plugins as the job accounting API + * matures. + */ +const char plugin_name[] = "Job completion POSTGRESQL plugin"; +const char plugin_type[] = "jobcomp/pgsql"; +const uint32_t plugin_version = 100; + +#ifdef HAVE_PGSQL + +#define DEFAULT_JOBCOMP_DB "slurm_jobcomp_db" + +PGconn *jobcomp_pgsql_db = NULL; + +char *jobcomp_table = "jobcomp_table"; +storage_field_t jobcomp_table_fields[] = { + { "jobid", "integer not null" }, + { "uid", "smallint not null" }, + { "user_name", "text not null" }, + { "gid", "smallint not null" }, + { "group_name", "text not null" }, + { "name", "text not null" }, + { "state", "smallint not null" }, + { "partition", "text not null" }, + { "timelimit", "text not null" }, + { "starttime", "bigint default 0 not null" }, + { "endtime", "bigint default 0 not null" }, + { "nodelist", "text" }, + { "nodecnt", "integer not null" }, + { "proc_cnt", "integer not null" }, +#ifdef HAVE_BG + { "connect_type", "text" }, + { "reboot", "text" }, + { "rotate", "text" }, + { "maxprocs", "integer default 0 not null" }, + { "geometry", "text" }, + { "start", "text" }, + { "blockid", "text" }, +#endif + { NULL, NULL} +}; + +/* Type for error string table entries */ +typedef struct { + int xe_number; + char *xe_message; +} slurm_errtab_t; + +static slurm_errtab_t slurm_errtab[] = { + {0, "No error"}, + {-1, "Unspecified error"} +}; + +/* A plugin-global errno. */ +static int plugin_errno = SLURM_SUCCESS; + +/* File descriptor used for logging */ +static pthread_mutex_t jobcomp_lock = PTHREAD_MUTEX_INITIALIZER; + +static pgsql_db_info_t *_pgsql_jobcomp_create_db_info() +{ + pgsql_db_info_t *db_info = xmalloc(sizeof(pgsql_db_info_t)); + db_info->port = slurm_get_jobcomp_port(); + /* it turns out it is better if using defaults to let postgres + handle them on it's own terms */ + if(!db_info->port) + db_info->port = 5432; + db_info->host = slurm_get_jobcomp_host(); + db_info->user = slurm_get_jobcomp_user(); + db_info->pass = slurm_get_jobcomp_pass(); + return db_info; +} + +static int _pgsql_jobcomp_check_tables(char *user) +{ + + int i = 0, job_found = 0; + PGresult *result = NULL; + char *query = xstrdup_printf("select tablename from pg_tables " + "where tableowner='%s' " + "and tablename !~ '^pg_+'", user); + + if(!(result = + pgsql_db_query_ret(jobcomp_pgsql_db, query))) { + xfree(query); + return SLURM_ERROR; + } + xfree(query); + + for (i = 0; i < PQntuples(result); i++) { + if(!job_found + && !strcmp(jobcomp_table, PQgetvalue(result, i, 0))) + job_found = 1; + } + PQclear(result); + + if(!job_found) + if(pgsql_db_create_table(jobcomp_pgsql_db, jobcomp_table, + jobcomp_table_fields, + ")") == SLURM_ERROR) + return SLURM_ERROR; + + return SLURM_SUCCESS; +} + + +/* get the user name for the give user_id */ +static char *_get_user_name(uint32_t user_id) +{ + static uint32_t cache_uid = 0; + static char cache_name[32] = "root"; + struct passwd * user_info = NULL; + char *ret_name = NULL; + + slurm_mutex_lock(&jobcomp_lock); + if (user_id != cache_uid) { + user_info = getpwuid((uid_t) user_id); + if (user_info && user_info->pw_name[0]) + snprintf(cache_name, sizeof(cache_name), "%s", + user_info->pw_name); + else + snprintf(cache_name, sizeof(cache_name), "Unknown"); + cache_uid = user_id; + } + ret_name = xstrdup(cache_name); + slurm_mutex_unlock(&jobcomp_lock); + + return ret_name; +} + +/* get the group name for the give group_id */ +static char *_get_group_name(uint32_t group_id) +{ + static uint32_t cache_gid = 0; + static char cache_name[32] = "root"; + struct group *group_info = NULL; + char *ret_name = NULL; + + slurm_mutex_lock(&jobcomp_lock); + if (group_id != cache_gid) { + group_info = getgrgid((gid_t) group_id); + if (group_info && group_info->gr_name[0]) + snprintf(cache_name, sizeof(cache_name), "%s", + group_info->gr_name); + else + snprintf(cache_name, sizeof(cache_name), "Unknown"); + cache_gid = group_id; + } + ret_name = xstrdup(cache_name); + slurm_mutex_unlock(&jobcomp_lock); + + return ret_name; +} + +/* + * Linear search through table of errno values and strings, + * returns NULL on error, string on success. + */ +static char *_lookup_slurm_api_errtab(int errnum) +{ + char *res = NULL; + int i; + + for (i = 0; i < sizeof(slurm_errtab) / sizeof(slurm_errtab_t); i++) { + if (slurm_errtab[i].xe_number == errnum) { + res = slurm_errtab[i].xe_message; + break; + } + } + return res; +} +#endif + +/* + * init() is called when the plugin is loaded, before any other functions + * are called. Put global initialization here. + */ +extern int init ( void ) +{ + static int first = 1; +#ifndef HAVE_PGSQL + fatal("No Postgresql storage was found on the machine. " + "Please check the configure ran and run again."); +#endif + if(first) { + /* since this can be loaded from many different places + only tell us once. */ + verbose("%s loaded", plugin_name); + first = 0; + } else { + debug4("%s loaded", plugin_name); + } + + return SLURM_SUCCESS; +} + +extern int fini ( void ) +{ +#ifdef HAVE_PGSQL + if (jobcomp_pgsql_db) { + PQfinish(jobcomp_pgsql_db); + jobcomp_pgsql_db = NULL; + } + return SLURM_SUCCESS; +#else + return SLURM_ERROR; +#endif +} + +extern int slurm_jobcomp_set_location(char *location) +{ +#ifdef HAVE_PGSQL + pgsql_db_info_t *db_info = _pgsql_jobcomp_create_db_info(); + int rc = SLURM_SUCCESS; + char *db_name = NULL; + int i = 0; + + if(jobcomp_pgsql_db && PQstatus(jobcomp_pgsql_db) == CONNECTION_OK) + return SLURM_SUCCESS; + + if(!location) + db_name = DEFAULT_JOBCOMP_DB; + else { + while(location[i]) { + if(location[i] == '.' || location[i] == '/') { + debug("%s doesn't look like a database " + "name using %s", + location, DEFAULT_JOBCOMP_DB); + break; + } + i++; + } + if(location[i]) + db_name = DEFAULT_JOBCOMP_DB; + else + db_name = location; + } + + debug2("pgsql_connect() called for db %s", db_name); + + pgsql_get_db_connection(&jobcomp_pgsql_db, db_name, db_info); + + rc = _pgsql_jobcomp_check_tables(db_info->user); + + destroy_pgsql_db_info(db_info); + + if(rc == SLURM_SUCCESS) + debug("Jobcomp database init finished"); + else + debug("Jobcomp database init failed"); + return rc; +#else + return SLURM_ERROR; +#endif +} + +extern int slurm_jobcomp_log_record(struct job_record *job_ptr) +{ +#ifdef HAVE_PGSQL + int rc = SLURM_SUCCESS; + char *usr_str = NULL, *grp_str = NULL, lim_str[32]; +#ifdef HAVE_BG + char connect_type[128]; + char reboot[4]; + char rotate[4]; + char maxprocs[20]; + char geometry[20]; + char start[20]; + char blockid[128]; +#endif + enum job_states job_state; + char query[1024]; + + if(!jobcomp_pgsql_db || PQstatus(jobcomp_pgsql_db) != CONNECTION_OK) { + char *loc = slurm_get_jobcomp_loc(); + if(slurm_jobcomp_set_location(loc) == SLURM_ERROR) { + xfree(loc); + return SLURM_ERROR; + } + xfree(loc); + } + + usr_str = _get_user_name(job_ptr->user_id); + grp_str = _get_group_name(job_ptr->group_id); + if (job_ptr->time_limit == INFINITE) + strcpy(lim_str, "UNLIMITED"); + else + snprintf(lim_str, sizeof(lim_str), "%lu", + (unsigned long) job_ptr->time_limit); + + /* Job will typically be COMPLETING when this is called. + * We remove this flag to get the eventual completion state: + * JOB_FAILED, JOB_TIMEOUT, etc. */ + job_state = job_ptr->job_state & (~JOB_COMPLETING); + +#ifdef HAVE_BG + select_g_sprint_jobinfo(job_ptr->select_jobinfo, + connect_type, sizeof(connect_type), SELECT_PRINT_CONNECTION); + select_g_sprint_jobinfo(job_ptr->select_jobinfo, + reboot, sizeof(reboot), SELECT_PRINT_REBOOT); + select_g_sprint_jobinfo(job_ptr->select_jobinfo, + rotate, sizeof(rotate), SELECT_PRINT_ROTATE); + select_g_sprint_jobinfo(job_ptr->select_jobinfo, + maxprocs, sizeof(maxprocs), SELECT_PRINT_MAX_PROCS); + select_g_sprint_jobinfo(job_ptr->select_jobinfo, + geometry, sizeof(geometry), SELECT_PRINT_GEOMETRY); + select_g_sprint_jobinfo(job_ptr->select_jobinfo, + start, sizeof(start), SELECT_PRINT_START); + select_g_sprint_jobinfo(job_ptr->select_jobinfo, + blockid, sizeof(blockid), SELECT_PRINT_BG_ID); +#endif + snprintf(query, sizeof(query), + "insert into %s (jobid, uid, user_name, gid, group_name, " + "name, state, proc_cnt, " + "partition, timelimit, starttime, endtime, nodelist, nodecnt" +#ifdef HAVE_BG + ", connect_type, reboot, rotate, maxprocs, geometry, " + "start, blockid" +#endif + ") values (%u, %u, '%s', %u, '%s', '%s', %d, %u, " + "'%s', '%s', %u, %u, '%s', %u" +#ifdef HAVE_BG + ", '%s', '%s', '%s', %s, '%s', '%s', '%s'" +#endif + ")", + jobcomp_table, job_ptr->job_id, job_ptr->user_id, usr_str, + job_ptr->group_id, grp_str, job_ptr->name, job_state, + job_ptr->total_procs, job_ptr->partition, lim_str, + (int)job_ptr->start_time, (int)job_ptr->end_time, + job_ptr->nodes, job_ptr->node_cnt +#ifdef HAVE_BG + , connect_type, reboot, rotate, maxprocs, geometry, + start, blockid +#endif + ); + //info("here is the query %s", query); + + rc = pgsql_db_query(jobcomp_pgsql_db, query); + xfree(usr_str); + + return rc; +#else + return SLURM_ERROR; +#endif +} + +extern int slurm_jobcomp_get_errno() +{ +#ifdef HAVE_PGSQL + return plugin_errno; +#else + return SLURM_ERROR; +#endif +} + +extern char *slurm_jobcomp_strerror(int errnum) +{ +#ifdef HAVE_PGSQL + char *res = _lookup_slurm_api_errtab(errnum); + return (res ? res : strerror(errnum)); +#else + return NULL; +#endif +} + +/* + * get info from the storage + * in/out job_list List of job_rec_t * + * note List needs to be freed when called + */ +extern List slurm_jobcomp_get_jobs(List selected_steps, + List selected_parts, + void *params) +{ + List job_list = NULL; + +#ifdef HAVE_PGSQL + if(!jobcomp_pgsql_db || PQstatus(jobcomp_pgsql_db) != CONNECTION_OK) { + char *loc = slurm_get_jobcomp_loc(); + if(slurm_jobcomp_set_location(loc) == SLURM_ERROR) { + xfree(loc); + return NULL; + } + xfree(loc); + } + + job_list = pgsql_jobcomp_process_get_jobs(selected_steps, + selected_parts, + params); +#endif + return job_list; +} + +/* + * expire old info from the storage + */ +extern void slurm_jobcomp_archive(List selected_parts, void *params) +{ +#ifdef HAVE_PGSQL + if(!jobcomp_pgsql_db || PQstatus(jobcomp_pgsql_db) != CONNECTION_OK) { + char *loc = slurm_get_jobcomp_loc(); + if(slurm_jobcomp_set_location(loc) == SLURM_ERROR) { + xfree(loc); + return; + } + xfree(loc); + } + + pgsql_jobcomp_process_archive(selected_parts, params); +#endif + return; +} diff --git a/src/plugins/jobcomp/pgsql/pgsql_jobcomp_process.c b/src/plugins/jobcomp/pgsql/pgsql_jobcomp_process.c new file mode 100644 index 000000000..38e477527 --- /dev/null +++ b/src/plugins/jobcomp/pgsql/pgsql_jobcomp_process.c @@ -0,0 +1,223 @@ +/*****************************************************************************\ + * pgsql_jobcomp_process.c - functions the processing of + * information from the pgsql jobcomp + * storage. + ***************************************************************************** + * + * Copyright (C) 2004-2007 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * This file is patterned after jobcomp_linux.c, written by Morris Jette and + * Copyright (C) 2002 The Regents of the University of California. +\*****************************************************************************/ + +#include <stdlib.h> +#include "src/common/parse_time.h" +#include "src/common/xstring.h" +#include "pgsql_jobcomp_process.h" + +#ifdef HAVE_PGSQL +static void _do_fdump(PGresult *result, int lc) +{ + int i = 0; + printf("\n------- Line %d -------\n", lc); + while(jobcomp_table_fields[i].name) { + printf("%12s: %s\n", jobcomp_table_fields[i].name, + PQgetvalue(result, lc, i)); + i++; + } + + return; +} + +extern List pgsql_jobcomp_process_get_jobs(List selected_steps, + List selected_parts, + sacct_parameters_t *params) +{ + + char *query = NULL; + char *extra = NULL; + char *tmp = NULL; + char *selected_part = NULL; + jobacct_selected_step_t *selected_step = NULL; + ListIterator itr = NULL; + int set = 0; + PGresult *result = NULL; + int i; + jobcomp_job_rec_t *job = NULL; + char time_str[32]; + time_t temp_time; + List job_list = NULL; + + if(selected_steps && list_count(selected_steps)) { + set = 0; + xstrcat(extra, " where ("); + itr = list_iterator_create(selected_steps); + while((selected_step = list_next(itr))) { + if(set) + xstrcat(extra, " || "); + tmp = xstrdup_printf("jobid=%d", + selected_step->jobid); + xstrcat(extra, tmp); + set = 1; + xfree(tmp); + } + list_iterator_destroy(itr); + xstrcat(extra, ")"); + } + + if(selected_parts && list_count(selected_parts)) { + set = 0; + if(extra) + xstrcat(extra, " && ("); + else + xstrcat(extra, " where ("); + + itr = list_iterator_create(selected_parts); + while((selected_part = list_next(itr))) { + if(set) + xstrcat(extra, " || "); + tmp = xstrdup_printf("partition='%s'", + selected_part); + xstrcat(extra, tmp); + set = 1; + xfree(tmp); + } + list_iterator_destroy(itr); + xstrcat(extra, ")"); + } + + i = 0; + while(jobcomp_table_fields[i].name) { + if(i) + xstrcat(tmp, ", "); + xstrcat(tmp, jobcomp_table_fields[i].name); + i++; + } + + query = xstrdup_printf("select %s from %s", tmp, jobcomp_table); + xfree(tmp); + + if(extra) { + xstrcat(query, extra); + xfree(extra); + } + + //info("query = %s", query); + if(!(result = + pgsql_db_query_ret(jobcomp_pgsql_db, query))) { + xfree(query); + return NULL; + } + xfree(query); + + job_list = list_create(jobcomp_destroy_job); + for (i = 0; i < PQntuples(result); i++) { + + if (params->opt_fdump) { + _do_fdump(result, i); + continue; + } + job = xmalloc(sizeof(jobcomp_job_rec_t)); + if(PQgetvalue(result, i, JOBCOMP_REQ_JOBID)) + job->jobid = + atoi(PQgetvalue(result, i, JOBCOMP_REQ_JOBID)); + job->partition = + xstrdup(PQgetvalue(result, i, JOBCOMP_REQ_PARTITION)); + temp_time = atoi(PQgetvalue(result, i, JOBCOMP_REQ_STARTTIME)); + slurm_make_time_str(&temp_time, + time_str, + sizeof(time_str)); + job->start_time = xstrdup(time_str); + + temp_time = atoi(PQgetvalue(result, i, JOBCOMP_REQ_ENDTIME)); + slurm_make_time_str(&temp_time, + time_str, + sizeof(time_str)); + job->end_time = xstrdup(time_str); + + if(PQgetvalue(result, i, JOBCOMP_REQ_UID)) + job->uid = + atoi(PQgetvalue(result, i, JOBCOMP_REQ_UID)); + job->uid_name = + xstrdup(PQgetvalue(result, i, JOBCOMP_REQ_USER_NAME)); + if(PQgetvalue(result, i, JOBCOMP_REQ_GID)) + job->gid = + atoi(PQgetvalue(result, i, JOBCOMP_REQ_GID)); + job->gid_name = + xstrdup(PQgetvalue(result, i, JOBCOMP_REQ_GROUP_NAME)); + job->jobname = + xstrdup(PQgetvalue(result, i, JOBCOMP_REQ_NAME)); + job->nodelist = + xstrdup(PQgetvalue(result, i, JOBCOMP_REQ_NODELIST)); + if(PQgetvalue(result, i, JOBCOMP_REQ_NODECNT)) + job->node_cnt = + atoi(PQgetvalue(result, i, JOBCOMP_REQ_NODECNT)); + if(PQgetvalue(result, i, JOBCOMP_REQ_STATE)) { + int j = atoi(PQgetvalue(result, i, JOBCOMP_REQ_STATE)); + job->state = xstrdup(job_state_string(j)); + } + job->timelimit = + xstrdup(PQgetvalue(result, i, JOBCOMP_REQ_TIMELIMIT)); +#ifdef HAVE_BG + if(PQgetvalue(result, i, JOBCOMP_REQ_MAXPROCS)) + job->max_procs = + atoi(PQgetvalue(result, i, + JOBCOMP_REQ_MAXPROCS)); + job->blockid = + xstrdup(PQgetvalue(result, i, JOBCOMP_REQ_BLOCKID)); + job->connection = + xstrdup(PQgetvalue(result, i, JOBCOMP_REQ_CONNECTION)); + job->reboot = + xstrdup(PQgetvalue(result, i, JOBCOMP_REQ_REBOOT)); + job->rotate = + xstrdup(PQgetvalue(result, i, JOBCOMP_REQ_ROTATE)); + job->geo = + xstrdup(PQgetvalue(result, i, JOBCOMP_REQ_GEOMETRY)); + job->bg_start_point = + xstrdup(PQgetvalue(result, i, JOBCOMP_REQ_START)); +#endif + list_append(job_list, job); + + } + + PQclear(result); + return job_list; +} + +extern void pgsql_jobcomp_process_archive(List selected_parts, + sacct_parameters_t *params) +{ + return; +} + +#endif diff --git a/src/plugins/jobcomp/pgsql/pgsql_jobcomp_process.h b/src/plugins/jobcomp/pgsql/pgsql_jobcomp_process.h new file mode 100644 index 000000000..a3f0c8c35 --- /dev/null +++ b/src/plugins/jobcomp/pgsql/pgsql_jobcomp_process.h @@ -0,0 +1,92 @@ +/*****************************************************************************\ + * pgsql_jobcomp_process.h - functions the processing of + * information from the pgsql jobcomp + * storage. + ***************************************************************************** + * + * Copyright (C) 2004-2007 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * This file is patterned after jobcomp_linux.c, written by Morris Jette and + * Copyright (C) 2002 The Regents of the University of California. +\*****************************************************************************/ + +#ifndef _HAVE_PGSQL_JOBCOMP_PROCESS_H +#define _HAVE_PGSQL_JOBCOMP_PROCESS_H + +#include "src/database/pgsql_common.h" +#include "src/common/slurm_jobacct_gather.h" +#include "src/common/slurm_jobcomp.h" + +#ifdef HAVE_PGSQL +extern PGconn *jobcomp_pgsql_db; +extern int jobcomp_db_init; + +extern char *jobcomp_table; +/* This variable and the following enum are related so if you change + the jobcomp_table_fields defined in mysql_jobcomp.c you must update + this enum accordingly. +*/ +extern storage_field_t jobcomp_table_fields[]; +enum { + JOBCOMP_REQ_JOBID, + JOBCOMP_REQ_UID, + JOBCOMP_REQ_USER_NAME, + JOBCOMP_REQ_GID, + JOBCOMP_REQ_GROUP_NAME, + JOBCOMP_REQ_NAME, + JOBCOMP_REQ_STATE, + JOBCOMP_REQ_PARTITION, + JOBCOMP_REQ_TIMELIMIT, + JOBCOMP_REQ_STARTTIME, + JOBCOMP_REQ_ENDTIME, + JOBCOMP_REQ_NODELIST, + JOBCOMP_REQ_NODECNT, + JOBCOMP_REQ_CONNECTION, + JOBCOMP_REQ_REBOOT, + JOBCOMP_REQ_ROTATE, + JOBCOMP_REQ_MAXPROCS, + JOBCOMP_REQ_GEOMETRY, + JOBCOMP_REQ_START, + JOBCOMP_REQ_BLOCKID, + JOBCOMP_REQ_COUNT +}; + +extern List pgsql_jobcomp_process_get_jobs(List selected_steps, + List selected_parts, + sacct_parameters_t *params); + +extern void pgsql_jobcomp_process_archive(List selected_parts, + sacct_parameters_t *params); +#endif + +#endif diff --git a/src/plugins/jobcomp/script/Makefile.in b/src/plugins/jobcomp/script/Makefile.in index 5c262974d..2463aab8b 100644 --- a/src/plugins/jobcomp/script/Makefile.in +++ b/src/plugins/jobcomp/script/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -44,6 +44,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -79,7 +81,7 @@ jobcomp_script_la_OBJECTS = $(am_jobcomp_script_la_OBJECTS) jobcomp_script_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(jobcomp_script_la_LDFLAGS) $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -119,6 +121,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -132,10 +135,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -155,7 +161,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -166,6 +175,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -181,6 +192,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -196,6 +208,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -299,8 +312,8 @@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ else :; fi; \ done @@ -308,8 +321,8 @@ uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ done clean-pkglibLTLIBRARIES: @@ -363,8 +376,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -376,8 +389,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -387,13 +400,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/jobcomp/script/jobcomp_script.c b/src/plugins/jobcomp/script/jobcomp_script.c index 6e64a3100..660074bea 100644 --- a/src/plugins/jobcomp/script/jobcomp_script.c +++ b/src/plugins/jobcomp/script/jobcomp_script.c @@ -1,11 +1,11 @@ /*****************************************************************************\ * jobcomp_script.c - Script running slurm job completion logging plugin. - * $Id: jobcomp_script.c 11930 2007-08-03 05:40:18Z grondo $ + * $Id: jobcomp_script.c 14054 2008-05-14 17:06:31Z da $ ***************************************************************************** * Produced at Center for High Performance Computing, North Dakota State * University * Written by Nathan Huff <nhuff@acm.org> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -98,7 +98,7 @@ */ const char plugin_name[] = "Job completion logging script plugin"; const char plugin_type[] = "jobcomp/script"; -const uint32_t plugin_version = 90; +const uint32_t plugin_version = 100; static char * script = NULL; static List comp_list = NULL; @@ -145,8 +145,10 @@ static const char * _jobcomp_script_strerror (int errnum) struct jobcomp_info { uint32_t jobid; uint32_t uid; + uint32_t gid; uint32_t limit; uint32_t nprocs; + uint32_t nnodes; uint16_t batch_flag; time_t submit; time_t start; @@ -165,6 +167,7 @@ static struct jobcomp_info * _jobcomp_info_create (struct job_record *job) j->jobid = job->job_id; j->uid = job->user_id; + j->gid = job->group_id; j->name = xstrdup (job->name); /* @@ -179,10 +182,11 @@ static struct jobcomp_info * _jobcomp_info_create (struct job_record *job) j->limit = job->time_limit; j->start = job->start_time; j->end = job->end_time; - j->submit = job->details ? job->details->submit_time:job->start_time;; + j->submit = job->details ? job->details->submit_time:job->start_time; j->batch_flag = job->batch_flag; j->nodes = xstrdup (job->nodes); - j->nprocs = job->num_procs; + j->nprocs = job->total_procs; + j->nnodes = job->node_cnt; j->account = job->account ? xstrdup (job->account) : NULL; return (j); @@ -287,10 +291,12 @@ static char ** _create_environment (struct jobcomp_info *job) _env_append_fmt (&env, "JOBID", "%u", job->jobid); _env_append_fmt (&env, "UID", "%u", job->uid); + _env_append_fmt (&env, "GID", "%u", job->gid); _env_append_fmt (&env, "START", "%lu", job->start); _env_append_fmt (&env, "END", "%lu", job->end); _env_append_fmt (&env, "SUBMIT","%lu", job->submit); _env_append_fmt (&env, "PROCS", "%u", job->nprocs); + _env_append_fmt (&env, "NODECNT", "%u", job->nnodes); _env_append (&env, "BATCH", (job->batch_flag ? "yes" : "no")); _env_append (&env, "NODES", job->nodes); @@ -557,3 +563,26 @@ extern int fini ( void ) return rc; } + +/* + * get info from the storage + * in/out job_list List of job_rec_t * + * note List needs to be freed when called + */ +extern List slurm_jobcomp_get_jobs(List selected_steps, + List selected_parts, + void *params) +{ + + info("This function is not implemented."); + return NULL; +} + +/* + * expire old info from the storage + */ +extern void slurm_jobcomp_archive(List selected_parts, void *params) +{ + info("This function is not implemented."); + return; +} diff --git a/src/plugins/jobcomp/slurmdbd/Makefile.am b/src/plugins/jobcomp/slurmdbd/Makefile.am new file mode 100644 index 000000000..5ce733dbf --- /dev/null +++ b/src/plugins/jobcomp/slurmdbd/Makefile.am @@ -0,0 +1,13 @@ +# Makefile for jobcomp/slurmdbd plugin + +AUTOMAKE_OPTIONS = foreign + +PLUGIN_FLAGS = -module -avoid-version --export-dynamic + +INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common + +pkglib_LTLIBRARIES = jobcomp_slurmdbd.la + +# Null job completion logging plugin. +jobcomp_slurmdbd_la_SOURCES = jobcomp_slurmdbd.c +jobcomp_slurmdbd_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) diff --git a/src/plugins/jobacct/none/Makefile.in b/src/plugins/jobcomp/slurmdbd/Makefile.in similarity index 86% rename from src/plugins/jobacct/none/Makefile.in rename to src/plugins/jobcomp/slurmdbd/Makefile.in index ad57db32d..42f439efd 100644 --- a/src/plugins/jobacct/none/Makefile.in +++ b/src/plugins/jobcomp/slurmdbd/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -14,7 +14,7 @@ @SET_MAKE@ -# Makefile for jobacct/none plugin +# Makefile for jobcomp/slurmdbd plugin VPATH = @srcdir@ pkgdatadir = $(datadir)/@PACKAGE@ @@ -35,7 +35,7 @@ POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ target_triplet = @target@ -subdir = src/plugins/jobacct/none +subdir = src/plugins/jobcomp/slurmdbd DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ @@ -44,6 +44,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -73,13 +75,13 @@ am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; am__installdirs = "$(DESTDIR)$(pkglibdir)" pkglibLTLIBRARIES_INSTALL = $(INSTALL) LTLIBRARIES = $(pkglib_LTLIBRARIES) -jobacct_none_la_LIBADD = -am_jobacct_none_la_OBJECTS = jobacct_none.lo -jobacct_none_la_OBJECTS = $(am_jobacct_none_la_OBJECTS) -jobacct_none_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ +jobcomp_slurmdbd_la_LIBADD = +am_jobcomp_slurmdbd_la_OBJECTS = jobcomp_slurmdbd.lo +jobcomp_slurmdbd_la_OBJECTS = $(am_jobcomp_slurmdbd_la_OBJECTS) +jobcomp_slurmdbd_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ - $(jobacct_none_la_LDFLAGS) $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ + $(jobcomp_slurmdbd_la_LDFLAGS) $(LDFLAGS) -o $@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -91,8 +93,8 @@ CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ -SOURCES = $(jobacct_none_la_SOURCES) -DIST_SOURCES = $(jobacct_none_la_SOURCES) +SOURCES = $(jobcomp_slurmdbd_la_SOURCES) +DIST_SOURCES = $(jobcomp_slurmdbd_la_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) @@ -119,6 +121,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -132,10 +135,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -155,7 +161,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -166,6 +175,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -181,6 +192,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -196,6 +208,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -255,11 +268,11 @@ top_srcdir = @top_srcdir@ AUTOMAKE_OPTIONS = foreign PLUGIN_FLAGS = -module -avoid-version --export-dynamic INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common -pkglib_LTLIBRARIES = jobacct_none.la +pkglib_LTLIBRARIES = jobcomp_slurmdbd.la # Null job completion logging plugin. -jobacct_none_la_SOURCES = jobacct_none.c -jobacct_none_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) +jobcomp_slurmdbd_la_SOURCES = jobcomp_slurmdbd.c +jobcomp_slurmdbd_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) all: all-am .SUFFIXES: @@ -273,9 +286,9 @@ $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__confi exit 1;; \ esac; \ done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/jobacct/none/Makefile'; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/jobcomp/slurmdbd/Makefile'; \ cd $(top_srcdir) && \ - $(AUTOMAKE) --foreign src/plugins/jobacct/none/Makefile + $(AUTOMAKE) --foreign src/plugins/jobcomp/slurmdbd/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ @@ -299,8 +312,8 @@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ else :; fi; \ done @@ -308,8 +321,8 @@ uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ done clean-pkglibLTLIBRARIES: @@ -320,8 +333,8 @@ clean-pkglibLTLIBRARIES: echo "rm -f \"$${dir}/so_locations\""; \ rm -f "$${dir}/so_locations"; \ done -jobacct_none.la: $(jobacct_none_la_OBJECTS) $(jobacct_none_la_DEPENDENCIES) - $(jobacct_none_la_LINK) -rpath $(pkglibdir) $(jobacct_none_la_OBJECTS) $(jobacct_none_la_LIBADD) $(LIBS) +jobcomp_slurmdbd.la: $(jobcomp_slurmdbd_la_OBJECTS) $(jobcomp_slurmdbd_la_DEPENDENCIES) + $(jobcomp_slurmdbd_la_LINK) -rpath $(pkglibdir) $(jobcomp_slurmdbd_la_OBJECTS) $(jobcomp_slurmdbd_la_LIBADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) @@ -329,7 +342,7 @@ mostlyclean-compile: distclean-compile: -rm -f *.tab.c -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/jobacct_none.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/jobcomp_slurmdbd.Plo@am__quote@ .c.o: @am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @@ -363,8 +376,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -376,8 +389,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -387,13 +400,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/jobcomp/slurmdbd/jobcomp_slurmdbd.c b/src/plugins/jobcomp/slurmdbd/jobcomp_slurmdbd.c new file mode 100644 index 000000000..aceee6cc8 --- /dev/null +++ b/src/plugins/jobcomp/slurmdbd/jobcomp_slurmdbd.c @@ -0,0 +1,139 @@ +/*****************************************************************************\ + * jobcomp_slurmdbd.c - SlurmDBD slurm job completion plugin. + ***************************************************************************** + * Copyright (C) 2002-2006 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Aubke <da@llnl.gov>. + * LLNL-CODE-402394. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#ifdef HAVE_CONFIG_H +# include "config.h" +#endif + +#if HAVE_STDINT_H +# include <stdint.h> +#endif +#if HAVE_INTTYPES_H +# include <inttypes.h> +#endif + +#include <stdio.h> +#include <slurm/slurm_errno.h> + +#include "src/common/slurm_jobcomp.h" +#include "src/slurmctld/slurmctld.h" + +/* + * These variables are required by the generic plugin interface. If they + * are not found in the plugin, the plugin loader will ignore it. + * + * plugin_name - a string giving a human-readable description of the + * plugin. There is no maximum length, but the symbol must refer to + * a valid string. + * + * plugin_type - a string suggesting the type of the plugin or its + * applicability to a particular form of data or method of data handling. + * If the low-level plugin API is used, the contents of this string are + * unimportant and may be anything. SLURM uses the higher-level plugin + * interface which requires this string to be of the form + * + * <application>/<method> + * + * where <application> is a description of the intended application of + * the plugin (e.g., "jobcomp" for SLURM job completion logging) and <method> + * is a description of how this plugin satisfies that application. SLURM will + * only load job completion logging plugins if the plugin_type string has a + * prefix of "jobcomp/". + * + * plugin_version - an unsigned 32-bit integer giving the version number + * of the plugin. If major and minor revisions are desired, the major + * version number may be multiplied by a suitable magnitude constant such + * as 100 or 1000. Various SLURM versions will likely require a certain + * minimum versions for their plugins as the job completion logging API + * matures. + */ +const char plugin_name[] = "Job completion logging SLURMDBD plugin"; +const char plugin_type[] = "jobcomp/slurmdbd"; +const uint32_t plugin_version = 100; + +/* + * init() is called when the plugin is loaded, before any other functions + * are called. Put global initialization here. + */ +int init ( void ) +{ + return SLURM_SUCCESS; +} + +/* + * The remainder of this file implements the standard SLURM job completion + * logging API. + */ + +int slurm_jobcomp_set_location ( char * location ) +{ + return SLURM_SUCCESS; +} + +int slurm_jobcomp_log_record ( struct job_record *job_ptr ) +{ + return SLURM_SUCCESS; +} + +int slurm_jobcomp_get_errno( void ) +{ + return SLURM_SUCCESS; +} + +char *slurm_jobcomp_strerror( int errnum ) +{ + return NULL; +} + +List slurm_jobcomp_get_jobs(List selected_steps, List selected_parts, + void *params) +{ + return NULL; +} + +void slurm_jobcomp_archive(List selected_parts, + void *params) +{ + return; +} + +int fini ( void ) +{ + return SLURM_SUCCESS; +} + + diff --git a/src/plugins/mpi/Makefile.in b/src/plugins/mpi/Makefile.in index 48bf6e365..5bed871f5 100644 --- a/src/plugins/mpi/Makefile.in +++ b/src/plugins/mpi/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -43,6 +43,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -101,6 +103,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -114,10 +117,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -137,7 +143,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -148,6 +157,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -163,6 +174,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -178,6 +190,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -349,8 +362,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -375,8 +388,8 @@ TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -386,13 +399,12 @@ ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/mpi/lam/Makefile.in b/src/plugins/mpi/lam/Makefile.in index 9fc4e0917..554b35b32 100644 --- a/src/plugins/mpi/lam/Makefile.in +++ b/src/plugins/mpi/lam/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -44,6 +44,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -79,7 +81,7 @@ mpi_lam_la_OBJECTS = $(am_mpi_lam_la_OBJECTS) mpi_lam_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(mpi_lam_la_LDFLAGS) $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -119,6 +121,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -132,10 +135,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -155,7 +161,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -166,6 +175,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -181,6 +192,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -196,6 +208,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -297,8 +310,8 @@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ else :; fi; \ done @@ -306,8 +319,8 @@ uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ done clean-pkglibLTLIBRARIES: @@ -361,8 +374,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -374,8 +387,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -385,13 +398,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/mpi/lam/lam.h b/src/plugins/mpi/lam/lam.h index ab493a71e..b09c0cbe8 100644 --- a/src/plugins/mpi/lam/lam.h +++ b/src/plugins/mpi/lam/lam.h @@ -5,7 +5,7 @@ * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <da@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/mpi/lam/mpi_lam.c b/src/plugins/mpi/lam/mpi_lam.c index c366fb6aa..c1d90bbcf 100644 --- a/src/plugins/mpi/lam/mpi_lam.c +++ b/src/plugins/mpi/lam/mpi_lam.c @@ -5,7 +5,7 @@ * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <da@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/mpi/mpich1_p4/Makefile.in b/src/plugins/mpi/mpich1_p4/Makefile.in index f30e9f8d1..3d9ef1ecc 100644 --- a/src/plugins/mpi/mpich1_p4/Makefile.in +++ b/src/plugins/mpi/mpich1_p4/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -44,6 +44,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -79,7 +81,7 @@ mpi_mpich1_p4_la_OBJECTS = $(am_mpi_mpich1_p4_la_OBJECTS) mpi_mpich1_p4_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(mpi_mpich1_p4_la_LDFLAGS) $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -119,6 +121,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -132,10 +135,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -155,7 +161,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -166,6 +175,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -181,6 +192,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -196,6 +208,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -299,8 +312,8 @@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ else :; fi; \ done @@ -308,8 +321,8 @@ uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ done clean-pkglibLTLIBRARIES: @@ -363,8 +376,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -376,8 +389,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -387,13 +400,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/mpi/mpich1_p4/mpich1_p4.c b/src/plugins/mpi/mpich1_p4/mpich1_p4.c index e58f39a6a..66303a9a2 100644 --- a/src/plugins/mpi/mpich1_p4/mpich1_p4.c +++ b/src/plugins/mpi/mpich1_p4/mpich1_p4.c @@ -4,7 +4,7 @@ * Copyright (C) 2004-2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/mpi/mpich1_shmem/Makefile.in b/src/plugins/mpi/mpich1_shmem/Makefile.in index cbdc0c145..8f832878a 100644 --- a/src/plugins/mpi/mpich1_shmem/Makefile.in +++ b/src/plugins/mpi/mpich1_shmem/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -44,6 +44,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -79,7 +81,7 @@ mpi_mpich1_shmem_la_OBJECTS = $(am_mpi_mpich1_shmem_la_OBJECTS) mpi_mpich1_shmem_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(mpi_mpich1_shmem_la_LDFLAGS) $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -119,6 +121,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -132,10 +135,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -155,7 +161,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -166,6 +175,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -181,6 +192,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -196,6 +208,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -297,8 +310,8 @@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ else :; fi; \ done @@ -306,8 +319,8 @@ uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ done clean-pkglibLTLIBRARIES: @@ -361,8 +374,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -374,8 +387,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -385,13 +398,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/mpi/mpich1_shmem/mpich1_shmem.c b/src/plugins/mpi/mpich1_shmem/mpich1_shmem.c index 19bb60d01..2a848feac 100644 --- a/src/plugins/mpi/mpich1_shmem/mpich1_shmem.c +++ b/src/plugins/mpi/mpich1_shmem/mpich1_shmem.c @@ -5,7 +5,7 @@ * Copyright (C) 2004-2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <da@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/mpi/mpichgm/Makefile.in b/src/plugins/mpi/mpichgm/Makefile.in index 4d723c0aa..23a04991e 100644 --- a/src/plugins/mpi/mpichgm/Makefile.in +++ b/src/plugins/mpi/mpichgm/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -44,6 +44,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -79,7 +81,7 @@ mpi_mpichgm_la_OBJECTS = $(am_mpi_mpichgm_la_OBJECTS) mpi_mpichgm_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(mpi_mpichgm_la_LDFLAGS) $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -119,6 +121,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -132,10 +135,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -155,7 +161,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -166,6 +175,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -181,6 +192,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -196,6 +208,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -301,8 +314,8 @@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ else :; fi; \ done @@ -310,8 +323,8 @@ uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ done clean-pkglibLTLIBRARIES: @@ -366,8 +379,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -379,8 +392,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -390,13 +403,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/mpi/mpichgm/mpi_mpichgm.c b/src/plugins/mpi/mpichgm/mpi_mpichgm.c index 2d20c249e..3524f351b 100644 --- a/src/plugins/mpi/mpichgm/mpi_mpichgm.c +++ b/src/plugins/mpi/mpichgm/mpi_mpichgm.c @@ -5,7 +5,7 @@ * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <da@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/mpi/mpichgm/mpichgm.c b/src/plugins/mpi/mpichgm/mpichgm.c index e5a1d6f8c..c42295e7c 100644 --- a/src/plugins/mpi/mpichgm/mpichgm.c +++ b/src/plugins/mpi/mpichgm/mpichgm.c @@ -5,7 +5,7 @@ * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Takao Hatazaki <takao.hatazaki@hp.com> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/mpi/mpichgm/mpichgm.h b/src/plugins/mpi/mpichgm/mpichgm.h index 8f092e832..f7180de8d 100644 --- a/src/plugins/mpi/mpichgm/mpichgm.h +++ b/src/plugins/mpi/mpichgm/mpichgm.h @@ -5,7 +5,7 @@ * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <da@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/mpi/mpichmx/Makefile.in b/src/plugins/mpi/mpichmx/Makefile.in index a59463b8b..05febabcd 100644 --- a/src/plugins/mpi/mpichmx/Makefile.in +++ b/src/plugins/mpi/mpichmx/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -44,6 +44,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -79,7 +81,7 @@ mpi_mpichmx_la_OBJECTS = $(am_mpi_mpichmx_la_OBJECTS) mpi_mpichmx_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(mpi_mpichmx_la_LDFLAGS) $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -119,6 +121,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -132,10 +135,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -155,7 +161,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -166,6 +175,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -181,6 +192,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -196,6 +208,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -301,8 +314,8 @@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ else :; fi; \ done @@ -310,8 +323,8 @@ uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ done clean-pkglibLTLIBRARIES: @@ -366,8 +379,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -379,8 +392,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -390,13 +403,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/mpi/mpichmx/mpi_mpichmx.c b/src/plugins/mpi/mpichmx/mpi_mpichmx.c index 3cdc84235..77b67a303 100644 --- a/src/plugins/mpi/mpichmx/mpi_mpichmx.c +++ b/src/plugins/mpi/mpichmx/mpi_mpichmx.c @@ -4,7 +4,7 @@ * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <da@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/mpi/mpichmx/mpichmx.c b/src/plugins/mpi/mpichmx/mpichmx.c index b1d48fffd..19998ab74 100644 --- a/src/plugins/mpi/mpichmx/mpichmx.c +++ b/src/plugins/mpi/mpichmx/mpichmx.c @@ -4,7 +4,7 @@ * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Takao Hatazaki <takao.hatazaki@hp.com> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/mpi/mpichmx/mpichmx.h b/src/plugins/mpi/mpichmx/mpichmx.h index cdcade47c..6f71bb29b 100644 --- a/src/plugins/mpi/mpichmx/mpichmx.h +++ b/src/plugins/mpi/mpichmx/mpichmx.h @@ -5,7 +5,7 @@ * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <da@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/mpi/mvapich/Makefile.in b/src/plugins/mpi/mvapich/Makefile.in index 7cf94d7fd..0bcd876e3 100644 --- a/src/plugins/mpi/mvapich/Makefile.in +++ b/src/plugins/mpi/mvapich/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -44,6 +44,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -79,7 +81,7 @@ mpi_mvapich_la_OBJECTS = $(am_mpi_mvapich_la_OBJECTS) mpi_mvapich_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(mpi_mvapich_la_LDFLAGS) $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -119,6 +121,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -132,10 +135,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -155,7 +161,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -166,6 +175,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -181,6 +192,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -196,6 +208,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -299,8 +312,8 @@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ else :; fi; \ done @@ -308,8 +321,8 @@ uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ done clean-pkglibLTLIBRARIES: @@ -364,8 +377,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -377,8 +390,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -388,13 +401,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/mpi/mvapich/mpi_mvapich.c b/src/plugins/mpi/mvapich/mpi_mvapich.c index c6e247291..01e516354 100644 --- a/src/plugins/mpi/mvapich/mpi_mvapich.c +++ b/src/plugins/mpi/mvapich/mpi_mvapich.c @@ -5,7 +5,7 @@ * Copyright (C) 2004-2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <da@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/mpi/mvapich/mvapich.c b/src/plugins/mpi/mvapich/mvapich.c index 2a2b5541a..3182cb4a6 100644 --- a/src/plugins/mpi/mvapich/mvapich.c +++ b/src/plugins/mpi/mvapich/mvapich.c @@ -4,7 +4,7 @@ * Copyright (C) 2004-2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/mpi/mvapich/mvapich.h b/src/plugins/mpi/mvapich/mvapich.h index 257e4b0a6..c4708a176 100644 --- a/src/plugins/mpi/mvapich/mvapich.h +++ b/src/plugins/mpi/mvapich/mvapich.h @@ -5,7 +5,7 @@ * Copyright (C) 2004-2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <da@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/mpi/none/Makefile.in b/src/plugins/mpi/none/Makefile.in index 97d80b08f..b4ef574dd 100644 --- a/src/plugins/mpi/none/Makefile.in +++ b/src/plugins/mpi/none/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -44,6 +44,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -79,7 +81,7 @@ mpi_none_la_OBJECTS = $(am_mpi_none_la_OBJECTS) mpi_none_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(mpi_none_la_LDFLAGS) $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -119,6 +121,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -132,10 +135,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -155,7 +161,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -166,6 +175,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -181,6 +192,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -196,6 +208,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -299,8 +312,8 @@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ else :; fi; \ done @@ -308,8 +321,8 @@ uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ done clean-pkglibLTLIBRARIES: @@ -363,8 +376,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -376,8 +389,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -387,13 +400,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/mpi/none/mpi_none.c b/src/plugins/mpi/none/mpi_none.c index f9288a252..b331e5f77 100644 --- a/src/plugins/mpi/none/mpi_none.c +++ b/src/plugins/mpi/none/mpi_none.c @@ -5,7 +5,7 @@ * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <da@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/mpi/openmpi/Makefile.in b/src/plugins/mpi/openmpi/Makefile.in index 19e48800d..e156ebc2f 100644 --- a/src/plugins/mpi/openmpi/Makefile.in +++ b/src/plugins/mpi/openmpi/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -44,6 +44,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -79,7 +81,7 @@ mpi_openmpi_la_OBJECTS = $(am_mpi_openmpi_la_OBJECTS) mpi_openmpi_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(mpi_openmpi_la_LDFLAGS) $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -119,6 +121,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -132,10 +135,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -155,7 +161,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -166,6 +175,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -181,6 +192,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -196,6 +208,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -299,8 +312,8 @@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ else :; fi; \ done @@ -308,8 +321,8 @@ uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ done clean-pkglibLTLIBRARIES: @@ -363,8 +376,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -376,8 +389,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -387,13 +400,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/mpi/openmpi/mpi_openmpi.c b/src/plugins/mpi/openmpi/mpi_openmpi.c index 64fb70edd..543b39a1e 100644 --- a/src/plugins/mpi/openmpi/mpi_openmpi.c +++ b/src/plugins/mpi/openmpi/mpi_openmpi.c @@ -5,7 +5,7 @@ * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Christopher J. Morrone <morrone2@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/proctrack/Makefile.in b/src/plugins/proctrack/Makefile.in index 936995f07..02e355b88 100644 --- a/src/plugins/proctrack/Makefile.in +++ b/src/plugins/proctrack/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -43,6 +43,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -101,6 +103,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -114,10 +117,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -137,7 +143,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -148,6 +157,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -163,6 +174,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -178,6 +190,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -351,8 +364,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -377,8 +390,8 @@ TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -388,13 +401,12 @@ ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/proctrack/aix/Makefile.in b/src/plugins/proctrack/aix/Makefile.in index 28d167813..599c8f658 100644 --- a/src/plugins/proctrack/aix/Makefile.in +++ b/src/plugins/proctrack/aix/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -44,6 +44,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -79,7 +81,7 @@ proctrack_aix_la_OBJECTS = $(am_proctrack_aix_la_OBJECTS) proctrack_aix_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(proctrack_aix_la_LDFLAGS) $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -119,6 +121,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -132,10 +135,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -155,7 +161,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -166,6 +175,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -181,6 +192,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -196,6 +208,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -301,8 +314,8 @@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ else :; fi; \ done @@ -310,8 +323,8 @@ uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ done clean-pkglibLTLIBRARIES: @@ -365,8 +378,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -378,8 +391,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -389,13 +402,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/proctrack/aix/proctrack_aix.c b/src/plugins/proctrack/aix/proctrack_aix.c index df726aba6..d3b7fb63e 100644 --- a/src/plugins/proctrack/aix/proctrack_aix.c +++ b/src/plugins/proctrack/aix/proctrack_aix.c @@ -4,7 +4,7 @@ * Copyright (C) 2005-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/proctrack/linuxproc/Makefile.in b/src/plugins/proctrack/linuxproc/Makefile.in index 1dadf4deb..2034e44b7 100644 --- a/src/plugins/proctrack/linuxproc/Makefile.in +++ b/src/plugins/proctrack/linuxproc/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -44,6 +44,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -80,7 +82,7 @@ proctrack_linuxproc_la_OBJECTS = $(am_proctrack_linuxproc_la_OBJECTS) proctrack_linuxproc_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(proctrack_linuxproc_la_LDFLAGS) $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -120,6 +122,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -133,10 +136,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -156,7 +162,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -167,6 +176,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -182,6 +193,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -197,6 +209,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -304,8 +317,8 @@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ else :; fi; \ done @@ -313,8 +326,8 @@ uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ done clean-pkglibLTLIBRARIES: @@ -369,8 +382,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -382,8 +395,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -393,13 +406,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/proctrack/linuxproc/kill_tree.c b/src/plugins/proctrack/linuxproc/kill_tree.c index 871fcda55..b792cea0f 100644 --- a/src/plugins/proctrack/linuxproc/kill_tree.c +++ b/src/plugins/proctrack/linuxproc/kill_tree.c @@ -5,7 +5,7 @@ * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Takao Hatazaki <takao.hatazaki@hp.com> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/proctrack/linuxproc/kill_tree.h b/src/plugins/proctrack/linuxproc/kill_tree.h index e27c333f7..8ae0d2c56 100644 --- a/src/plugins/proctrack/linuxproc/kill_tree.h +++ b/src/plugins/proctrack/linuxproc/kill_tree.h @@ -5,7 +5,7 @@ * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Takao Hatazaki <takao.hatazaki@hp.com> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/proctrack/linuxproc/proctrack_linuxproc.c b/src/plugins/proctrack/linuxproc/proctrack_linuxproc.c index 5b4b19712..95331c8d9 100644 --- a/src/plugins/proctrack/linuxproc/proctrack_linuxproc.c +++ b/src/plugins/proctrack/linuxproc/proctrack_linuxproc.c @@ -4,7 +4,7 @@ * Copyright (C) 2005 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/proctrack/pgid/Makefile.in b/src/plugins/proctrack/pgid/Makefile.in index fe1a1ba08..f8a41c911 100644 --- a/src/plugins/proctrack/pgid/Makefile.in +++ b/src/plugins/proctrack/pgid/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -44,6 +44,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -79,7 +81,7 @@ proctrack_pgid_la_OBJECTS = $(am_proctrack_pgid_la_OBJECTS) proctrack_pgid_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(proctrack_pgid_la_LDFLAGS) $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -119,6 +121,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -132,10 +135,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -155,7 +161,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -166,6 +175,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -181,6 +192,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -196,6 +208,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -299,8 +312,8 @@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ else :; fi; \ done @@ -308,8 +321,8 @@ uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ done clean-pkglibLTLIBRARIES: @@ -363,8 +376,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -376,8 +389,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -387,13 +400,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/proctrack/pgid/proctrack_pgid.c b/src/plugins/proctrack/pgid/proctrack_pgid.c index 04880e102..94f6f2b59 100644 --- a/src/plugins/proctrack/pgid/proctrack_pgid.c +++ b/src/plugins/proctrack/pgid/proctrack_pgid.c @@ -4,7 +4,7 @@ * Copyright (C) 2005 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -46,7 +46,9 @@ # include <inttypes.h> #endif -#define __USE_XOPEN_EXTENDED /* getpgid */ +#ifndef __USE_XOPEN_EXTENDED +# define __USE_XOPEN_EXTENDED /* getpgid */ +#endif #include <unistd.h> #include <sys/types.h> diff --git a/src/plugins/proctrack/rms/Makefile.in b/src/plugins/proctrack/rms/Makefile.in index 3ac284288..4e2607fa9 100644 --- a/src/plugins/proctrack/rms/Makefile.in +++ b/src/plugins/proctrack/rms/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -44,6 +44,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -80,7 +82,7 @@ proctrack_rms_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(proctrack_rms_la_LDFLAGS) $(LDFLAGS) -o $@ @HAVE_ELAN_TRUE@am_proctrack_rms_la_rpath = -rpath $(pkglibdir) -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -120,6 +122,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -133,10 +136,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -156,7 +162,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -167,6 +176,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -182,6 +193,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -197,6 +209,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -300,8 +313,8 @@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ else :; fi; \ done @@ -309,8 +322,8 @@ uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ done clean-pkglibLTLIBRARIES: @@ -364,8 +377,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -377,8 +390,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -388,13 +401,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/proctrack/rms/proctrack_rms.c b/src/plugins/proctrack/rms/proctrack_rms.c index e85ce8a2b..d6f6bd747 100644 --- a/src/plugins/proctrack/rms/proctrack_rms.c +++ b/src/plugins/proctrack/rms/proctrack_rms.c @@ -3,7 +3,7 @@ ***************************************************************************** * Copyright (C) 2005 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/proctrack/sgi_job/Makefile.in b/src/plugins/proctrack/sgi_job/Makefile.in index d74e95537..6ad660d31 100644 --- a/src/plugins/proctrack/sgi_job/Makefile.in +++ b/src/plugins/proctrack/sgi_job/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -44,6 +44,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -79,7 +81,7 @@ proctrack_sgi_job_la_OBJECTS = $(am_proctrack_sgi_job_la_OBJECTS) proctrack_sgi_job_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(proctrack_sgi_job_la_LDFLAGS) $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -119,6 +121,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -132,10 +135,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -155,7 +161,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -166,6 +175,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -181,6 +192,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -196,6 +208,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -297,8 +310,8 @@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ else :; fi; \ done @@ -306,8 +319,8 @@ uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ done clean-pkglibLTLIBRARIES: @@ -361,8 +374,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -374,8 +387,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -385,13 +398,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/proctrack/sgi_job/proctrack_sgi_job.c b/src/plugins/proctrack/sgi_job/proctrack_sgi_job.c index 4a0575080..53fa0d62c 100644 --- a/src/plugins/proctrack/sgi_job/proctrack_sgi_job.c +++ b/src/plugins/proctrack/sgi_job/proctrack_sgi_job.c @@ -4,7 +4,7 @@ * Copyright (C) 2005 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/sched/Makefile.in b/src/plugins/sched/Makefile.in index f06d022d6..f91deddde 100644 --- a/src/plugins/sched/Makefile.in +++ b/src/plugins/sched/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -43,6 +43,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -101,6 +103,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -114,10 +117,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -137,7 +143,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -148,6 +157,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -163,6 +174,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -178,6 +190,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -349,8 +362,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -375,8 +388,8 @@ TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -386,13 +399,12 @@ ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/sched/backfill/Makefile.in b/src/plugins/sched/backfill/Makefile.in index d6855810e..cd1bb54d8 100644 --- a/src/plugins/sched/backfill/Makefile.in +++ b/src/plugins/sched/backfill/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -44,6 +44,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -79,7 +81,7 @@ sched_backfill_la_OBJECTS = $(am_sched_backfill_la_OBJECTS) sched_backfill_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(sched_backfill_la_LDFLAGS) $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -119,6 +121,7 @@ CXXFLAGS = -fexceptions CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -132,10 +135,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -155,7 +161,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -166,6 +175,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -181,6 +192,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -196,6 +208,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -300,8 +313,8 @@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ else :; fi; \ done @@ -309,8 +322,8 @@ uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ done clean-pkglibLTLIBRARIES: @@ -365,8 +378,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -378,8 +391,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -389,13 +402,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/sched/backfill/backfill.c b/src/plugins/sched/backfill/backfill.c index 804d289cd..008607474 100644 --- a/src/plugins/sched/backfill/backfill.c +++ b/src/plugins/sched/backfill/backfill.c @@ -14,10 +14,11 @@ * "lx[06-08]", we can't start it without possibly delaying the higher * priority job. ***************************************************************************** - * Copyright (C) 2003-2006 The Regents of the University of California. + * Copyright (C) 2003-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -59,93 +60,82 @@ #include "slurm/slurm_errno.h" #include "src/common/list.h" #include "src/common/macros.h" +#include "src/common/node_select.h" +#include "src/common/parse_time.h" #include "src/common/slurm_protocol_api.h" #include "src/common/xmalloc.h" #include "src/common/xstring.h" +#include "src/slurmctld/job_scheduler.h" +#include "src/slurmctld/licenses.h" #include "src/slurmctld/locks.h" +#include "src/slurmctld/node_scheduler.h" #include "src/slurmctld/slurmctld.h" - -typedef struct part_specs { - uint32_t idle_node_cnt; - uint32_t max_cpus; - uint32_t min_cpus; - uint32_t min_mem; - uint32_t min_disk; -} part_specs_t; +#include "src/slurmctld/srun_comm.h" typedef struct node_space_map { - uint32_t idle_node_cnt; - time_t time; + time_t begin_time; + time_t end_time; + bitstr_t *avail_bitmap; + int next; /* next record, by time, zero termination */ } node_space_map_t; +int backfilled_jobs = 0; /*********************** local variables *********************/ -static bool altered_job = false; static bool new_work = false; static bool stop_backfill = false; static pthread_mutex_t thread_flag_mutex = PTHREAD_MUTEX_INITIALIZER; -static List pend_job_list = NULL; -static List run_job_list = NULL; - -#define MAX_JOB_CNT 100 -static int node_space_recs; -static node_space_map_t node_space[MAX_JOB_CNT + 1]; +/* Backfill scheduling has considerable overhead, + * so only attempt it every BACKFILL_INTERVAL seconds */ +#ifndef BACKFILL_INTERVAL +# define BACKFILL_INTERVAL 10 +#endif /* Set __DEBUG to get detailed logging for this thread without * detailed logging for the entire slurmctld daemon */ -#define __DEBUG 0 -#define SLEEP_TIME 1 +#define __DEBUG 0 + +/* Do not attempt to build job/resource/time record for + * more than MAX_BACKFILL_JOB_CNT records */ +#define MAX_BACKFILL_JOB_CNT 100 + +/* Do not build job/resource/time record for more than this + * far in the future, in seconds, currently one day */ +#define BACKFILL_WINDOW (24 * 60 * 60) /*********************** local functions *********************/ -static int _add_pending_job(struct job_record *job_ptr, - struct part_record *part_ptr, part_specs_t *part_specs); -static int _add_running_job(struct job_record *job_ptr); -static void _attempt_backfill(struct part_record *part_ptr); -static void _backfill_part(part_specs_t *part_specs); -static void _build_node_space_map(part_specs_t *part_specs); -static void _change_prio(struct job_record *job_ptr, uint32_t prio); +static void _add_reservation(uint32_t start_time, uint32_t end_reserve, + bitstr_t *res_bitmap, + node_space_map_t *node_space, + int *node_space_recs); +static void _attempt_backfill(void); static void _diff_tv_str(struct timeval *tv1,struct timeval *tv2, char *tv_str, int len_tv_str); -static void _dump_node_space_map(uint32_t job_id, uint32_t node_cnt); -static int _get_avail_node_cnt(struct job_record *job_ptr); -static void _get_part_specs(struct part_record *part_ptr, - part_specs_t *part_specs); -static bool _has_state_changed(void); -static bool _loc_restrict(struct job_record *job_ptr, part_specs_t *part_specs); static bool _more_work(void); -static int _sort_by_prio(void *x, void *y); -static int _sort_by_end(void *x, void *y); -static int _update_node_space_map(struct job_record *job_ptr); +static int _start_job(struct job_record *job_ptr, bitstr_t *avail_bitmap); -/* list processing function, sort jobs by _decreasing_ priority */ -static int _sort_by_prio(void *x, void *y) -{ - struct job_record *job_ptr1 = (struct job_record *) x; - struct job_record *job_ptr2 = (struct job_record *) y; - double diff = job_ptr2->priority - job_ptr1->priority; - - if (diff > 0) - return 1; - else if (diff < 0) - return -1; - else - return 0; -} - -/* list processing function, sort jobs by _increasing_ end time */ -static int _sort_by_end(void *x, void *y) +#if __DEBUG +/* Log resource allocate table */ +static void _dump_node_space_table(node_space_map_t *node_space_ptr) { - struct job_record *job_ptr1 = (struct job_record *) x; - struct job_record *job_ptr2 = (struct job_record *) y; - double diff = difftime(job_ptr1->end_time, job_ptr2->end_time); - - if (diff > 0) - return 1; - else if (diff < 0) - return -1; - else - return 0; + int i = 0; + char begin_buf[32], end_buf[32], *node_list; + + info("========================================="); + while (1) { + slurm_make_time_str(&node_space_ptr[i].begin_time, + begin_buf, sizeof(begin_buf)); + slurm_make_time_str(&node_space_ptr[i].end_time, + end_buf, sizeof(end_buf)); + node_list = bitmap2node_name(node_space_ptr[i].avail_bitmap); + info("Begin:%s End:%s Nodes:%s", begin_buf, end_buf, node_list); + xfree(node_list); + if ((i = node_space_ptr[i].next) == 0) + break; + } + info("========================================="); } +#endif /* * _diff_tv_str - build a string showing the time difference between two times @@ -171,512 +161,346 @@ extern void stop_backfill_agent(void) /* backfill_agent - detached thread periodically attempts to backfill jobs */ -extern void * -backfill_agent(void *args) +extern void *backfill_agent(void *args) { struct timeval tv1, tv2; char tv_str[20]; - bool filter_root = false; - /* Read config, node, and partitions; Write jobs */ + time_t now; + static time_t last_backfill_time = 0; + /* Read config, and partitions; Write jobs and nodes */ slurmctld_lock_t all_locks = { - READ_LOCK, WRITE_LOCK, READ_LOCK, READ_LOCK }; + READ_LOCK, WRITE_LOCK, WRITE_LOCK, READ_LOCK }; - if (slurm_get_root_filter()) - filter_root = true; while (!stop_backfill) { - sleep(SLEEP_TIME); /* don't run continuously */ - if ((!_more_work()) || stop_backfill) + sleep(2); /* don't run continuously */ + + now = time(NULL); + /* Avoid resource fragmentation if important */ + if (switch_no_frag() && job_is_completing()) + continue; + if ((difftime(now, last_backfill_time) < BACKFILL_INTERVAL) || + stop_backfill || (!_more_work())) continue; + last_backfill_time = now; gettimeofday(&tv1, NULL); lock_slurmctld(all_locks); - if ( _has_state_changed() ) { - ListIterator part_iterator; - struct part_record *part_ptr; - - /* identify partitions eligible for backfill */ - part_iterator = list_iterator_create(part_list); - while ((part_ptr = (struct part_record *) - list_next(part_iterator))) { - if ( ((part_ptr->shared) || - (part_ptr->state_up == 0)) ) - continue; /* not under our control */ - if ((part_ptr->root_only) && filter_root) - continue; - _attempt_backfill(part_ptr); - } - list_iterator_destroy(part_iterator); - } + _attempt_backfill(); unlock_slurmctld(all_locks); gettimeofday(&tv2, NULL); _diff_tv_str(&tv1, &tv2, tv_str, 20); #if __DEBUG info("backfill: completed, %s", tv_str); #endif - if (altered_job) { - altered_job = false; - schedule(); /* has own locks */ - } } return NULL; } -/* trigger the attempt of a backfill */ -extern void -run_backfill (void) -{ - pthread_mutex_lock( &thread_flag_mutex ); - new_work = true; - pthread_mutex_unlock( &thread_flag_mutex ); -} - -static bool -_more_work (void) -{ - static bool rc; - pthread_mutex_lock( &thread_flag_mutex ); - rc = new_work; - new_work = false; - pthread_mutex_unlock( &thread_flag_mutex ); - return rc; -} - -/* Report if any changes occurred to job, node or partition information */ -static bool -_has_state_changed(void) +static void _attempt_backfill(void) { - static time_t backfill_job_time = (time_t) 0; - static time_t backfill_node_time = (time_t) 0; - static time_t backfill_part_time = (time_t) 0; + bool filter_root = false; + struct job_queue *job_queue = NULL; + int i, j,job_queue_size, node_space_recs = 0; + struct job_record *job_ptr; + struct part_record *part_ptr; + uint32_t end_time, end_reserve, time_limit; + uint32_t min_nodes, max_nodes, req_nodes; + uint16_t orig_shared; + bitstr_t *avail_bitmap = NULL; + time_t now = time(NULL); + node_space_map_t node_space[MAX_BACKFILL_JOB_CNT + 2]; - if ( (backfill_job_time == last_job_update ) && - (backfill_node_time == last_node_update) && - (backfill_part_time == last_part_update) ) - return false; + if (slurm_get_root_filter()) + filter_root = true; - backfill_job_time = last_job_update; - backfill_node_time = last_node_update; - backfill_part_time = last_part_update; - return true; -} + job_queue_size = build_job_queue(&job_queue); + if (job_queue_size == 0) + return; -/* Attempt to perform backfill scheduling on the specified partition */ -static void -_attempt_backfill(struct part_record *part_ptr) -{ - int i, cg_hung = 0, error_code = 0; - uint32_t max_pending_prio = 0; - uint32_t min_pend_job_size = INFINITE; - struct job_record *job_ptr; - ListIterator job_iterator; - part_specs_t part_specs; - time_t now = time(NULL); + sort_job_queue(job_queue, job_queue_size); + node_space[0].begin_time = now; + node_space[0].end_time = now + BACKFILL_WINDOW; + node_space[0].avail_bitmap = bit_alloc(node_record_count); + bit_or(node_space[0].avail_bitmap, avail_node_bitmap); + node_space[0].next = 0; #if __DEBUG - info("backfill: attempt on partition %s", part_ptr->name); + _dump_node_space_table(node_space); #endif - _get_part_specs(part_ptr, &part_specs); - if (part_specs.idle_node_cnt == 0) - return; /* no idle nodes */ - - pend_job_list = list_create(NULL); - run_job_list = list_create(NULL); - - /* build lists of pending and running jobs in this partition */ - job_iterator = list_iterator_create(job_list); - while ((job_ptr = (struct job_record *) list_next(job_iterator))) { - if (job_ptr->part_ptr != part_ptr) - continue; /* job in different partition */ - - if (job_ptr->job_state & JOB_COMPLETING) { - long wait_time = (long) difftime(now, job_ptr->end_time); - if (wait_time > 600) { - /* Job has been in completing state for - * >10 minutes, try to schedule around it */ - cg_hung++; - continue; - } -#if __DEBUG - info("backfill: Job %u completing, skip partition", - job_ptr->job_id); -#endif - error_code = 1; - break; - } else if (job_ptr->job_state == JOB_RUNNING) { - if (_add_running_job(job_ptr)) { - error_code = 2; - break; - } - } else if (job_ptr->job_state == JOB_PENDING) { - max_pending_prio = MAX(max_pending_prio, - job_ptr->priority); - if (_add_pending_job(job_ptr, part_ptr, &part_specs)) { - error_code = 3; - break; - } - min_pend_job_size = MIN(min_pend_job_size, - job_ptr->node_cnt); + for (i = 0; i < job_queue_size; i++) { + job_ptr = job_queue[i].job_ptr; + part_ptr = job_ptr->part_ptr; + if (part_ptr == NULL) { + part_ptr = find_part_record(job_ptr->partition); + xassert(part_ptr); + job_ptr->part_ptr = part_ptr; + error("partition pointer reset for job %u, part %s", + job_ptr->job_id, job_ptr->partition); } - } - list_iterator_destroy(job_iterator); - if (error_code) - goto cleanup; - - i = list_count(run_job_list) + cg_hung; - /* Do not try to backfill if - * we already have many running jobs, - * there are no pending jobs, OR - * there are insufficient idle nodes to start any pending jobs */ - if ((i > MAX_JOB_CNT) - || list_is_empty(pend_job_list) - || (min_pend_job_size > part_specs.idle_node_cnt)) - goto cleanup; - - list_sort(pend_job_list, _sort_by_prio); - list_sort(run_job_list, _sort_by_end); - _build_node_space_map(&part_specs); - _backfill_part(&part_specs); - - cleanup: - list_destroy(pend_job_list); - list_destroy(run_job_list); -} - -/* get the specs on nodes within a partition */ -static void -_get_part_specs(struct part_record *part_ptr, part_specs_t *part_specs) -{ - int i, j; + if ((part_ptr->state_up == 0) || + (part_ptr->node_bitmap == NULL)) + continue; + if ((part_ptr->root_only) && filter_root) + continue; - part_specs->idle_node_cnt = 0; - part_specs->max_cpus = 0; - part_specs->min_cpus = INFINITE; - part_specs->min_mem = INFINITE; - part_specs->min_disk = INFINITE; + if (license_job_test(job_ptr) != SLURM_SUCCESS) + continue; + if (job_independent(job_ptr) != true) + continue; - for (i=0; i<node_record_count; i++) { - struct node_record *node_ptr = &node_record_table_ptr[i]; - bool found_part = false; + /* Determine minimum and maximum node counts */ + min_nodes = MAX(job_ptr->details->min_nodes, + part_ptr->min_nodes); + if (job_ptr->details->max_nodes == 0) + max_nodes = part_ptr->max_nodes; + else + max_nodes = MIN(job_ptr->details->max_nodes, + part_ptr->max_nodes); + max_nodes = MIN(max_nodes, 500000); /* prevent overflows */ + if (job_ptr->details->max_nodes) + req_nodes = max_nodes; + else + req_nodes = min_nodes; + if (min_nodes > max_nodes) { + /* job's min_nodes exceeds partition's max_nodes */ + continue; + } - for (j=0; j<node_ptr->part_cnt; j++) { - if (node_ptr->part_pptr[j] != part_ptr) - continue; - found_part = true; + /* Determine job's expected completion time */ + if (job_ptr->time_limit == NO_VAL) + time_limit = part_ptr->max_time; + else + time_limit = job_ptr->time_limit; + end_time = (time_limit * 60) + now; + + /* Identify usable nodes for this job */ + FREE_NULL_BITMAP(avail_bitmap); + avail_bitmap = bit_copy(part_ptr->node_bitmap); + for (j=0; ; ) { + if (node_space[j].end_time <= end_time) { + bit_and(avail_bitmap, + node_space[j].avail_bitmap); + } + if ((j = node_space[j].next) == 0) + break; + } + if (job_req_node_filter(job_ptr, avail_bitmap)) + continue; /* problem with features */ + if (job_ptr->details->exc_node_bitmap) { + bit_not(job_ptr->details->exc_node_bitmap); + bit_and(avail_bitmap, + job_ptr->details->exc_node_bitmap); + bit_not(job_ptr->details->exc_node_bitmap); + } + if ((job_ptr->details->req_node_bitmap) && + (!bit_super_set(job_ptr->details->req_node_bitmap, + avail_bitmap))) + continue; /* required nodes missing */ + if (bit_set_count(avail_bitmap) < min_nodes) + continue; /* no nodes remain */ + + /* Try to schedule the job. First on dedicated nodes + * then on shared nodes (if so configured). */ + orig_shared = job_ptr->details->shared; + job_ptr->details->shared = 0; + j = select_g_job_test(job_ptr, avail_bitmap, + min_nodes, max_nodes, req_nodes, + SELECT_MODE_WILL_RUN); + job_ptr->details->shared = orig_shared; + if ((j != SLURM_SUCCESS) && (orig_shared != 0)) { + j = select_g_job_test(job_ptr, avail_bitmap, + min_nodes, max_nodes, req_nodes, + SELECT_MODE_WILL_RUN); + } + if (j != SLURM_SUCCESS) + continue; /* not runable */ + if (job_ptr->start_time <= now) { + /* Start the job now */ + _start_job(job_ptr, avail_bitmap); break; } - if (found_part == false) - continue; /* different partition */ - if (node_ptr->node_state == NODE_STATE_IDLE) - part_specs->idle_node_cnt++; - if (slurmctld_conf.fast_schedule) { - part_specs->max_cpus = MAX(part_specs->max_cpus, - node_ptr->config_ptr->cpus); - part_specs->min_cpus = MIN(part_specs->min_cpus, - node_ptr->config_ptr->cpus); - part_specs->min_mem = MIN(part_specs->min_mem, - node_ptr->config_ptr->real_memory); - part_specs->min_disk = MIN(part_specs->min_disk, - node_ptr->config_ptr->tmp_disk); - } else { - part_specs->max_cpus = MAX(part_specs->max_cpus, - node_ptr->cpus); - part_specs->min_cpus = MIN(part_specs->min_cpus, - node_ptr->cpus); - part_specs->min_mem = MIN(part_specs->min_mem, - node_ptr->real_memory); - part_specs->min_disk = MIN(part_specs->min_disk, - node_ptr->tmp_disk); + if (job_ptr->start_time > (now + BACKFILL_WINDOW)) { + /* Starts too far in the future to worry about */ + continue; } - } - -#if __DEBUG - info("backfill: partition %s cpus=%u:%u mem=%u+ disk=%u+", - part_ptr->name, part_specs->min_cpus, part_specs->max_cpus, - part_specs->min_mem, part_specs->min_disk); -#endif -} -/* Add specified pending job to our records */ -static int -_add_pending_job(struct job_record *job_ptr, struct part_record *part_ptr, - part_specs_t *part_specs) -{ - int min_node_cnt; - struct job_details *detail_ptr = job_ptr->details; - - if (job_ptr->priority == 0) { -#if __DEBUG - info("backfill: pending job %u is held", job_ptr->job_id); -#endif - return 0; /* Skip this job */ - } + if (node_space_recs == MAX_BACKFILL_JOB_CNT) { + /* Already have too many jobs to deal with */ + break; + } - if ((job_ptr->time_limit != NO_VAL) && - (job_ptr->time_limit > part_ptr->max_time)) { + /* + * Add reservation to scheduling table + */ + end_reserve = job_ptr->start_time + (time_limit * 60); + bit_not(avail_bitmap); + _add_reservation(job_ptr->start_time, end_reserve, + avail_bitmap, node_space, &node_space_recs); #if __DEBUG - info("backfill: pending job %u exceeds partition time limit", - job_ptr->job_id); + _dump_node_space_table(node_space); #endif - return 0; /* Skip this job */ - } - - if (detail_ptr == NULL) { - error("backfill: pending job %u lacks details", - job_ptr->job_id); - return 1; } + FREE_NULL_BITMAP(avail_bitmap); - /* figure out how many nodes this job needs */ - min_node_cnt = (job_ptr->num_procs + part_specs->max_cpus - 1) / - part_specs->max_cpus; /* round up */ - detail_ptr->min_nodes = MAX(min_node_cnt, detail_ptr->min_nodes); - if (detail_ptr->min_nodes > part_ptr->max_nodes) { -#if __DEBUG - info("backfill: pending job %u exceeds partition node limit", - job_ptr->job_id); -#endif - return 0; /* Skip this job */ + for (i=0; ; ) { + bit_free(node_space[i].avail_bitmap); + if ((i = node_space[i].next) == 0) + break; } - -#if __DEBUG - info("backfill: job %u pending on %d nodes", job_ptr->job_id, - detail_ptr->min_nodes); -#endif - - list_append(pend_job_list, (void *) job_ptr); - return 0; + xfree(job_queue); } -/* Add specified running job to our records */ -static int -_add_running_job(struct job_record *job_ptr) +static int _start_job(struct job_record *job_ptr, bitstr_t *avail_bitmap) { + int rc; + bitstr_t *orig_exc_nodes = NULL; + static uint32_t fail_jobid = 0; + + if (job_ptr->details->exc_node_bitmap) + orig_exc_nodes = job_ptr->details->exc_node_bitmap; + job_ptr->details->exc_node_bitmap = bit_copy(avail_bitmap); + bit_not(job_ptr->details->exc_node_bitmap); + + rc = select_nodes(job_ptr, false, NULL); + bit_free(job_ptr->details->exc_node_bitmap); + job_ptr->details->exc_node_bitmap = orig_exc_nodes; + if (rc == SLURM_SUCCESS) { + /* job initiated */ + last_job_update = time(NULL); + info("backfill: Started JobId=%u on %s", + job_ptr->job_id, job_ptr->nodes); + if (job_ptr->batch_flag) + launch_job(job_ptr); + else + srun_allocate(job_ptr->job_id); + backfilled_jobs++; #if __DEBUG - info("backfill: job %u running on %d nodes: %s", job_ptr->job_id, - job_ptr->node_cnt, job_ptr->nodes); + info("backfill: Jobs backfilled: %d", backfilled_jobs); #endif + } else if (job_ptr->job_id != fail_jobid) { + char *node_list = bitmap2node_name(avail_bitmap); + /* This happens when a job has sharing disabled and + * a selected node is still completing some job, + * which should be a temporary situation. */ + verbose("backfill: Failed to start JobId=%u on %s: %s", + job_ptr->job_id, node_list, slurm_strerror(rc)); + xfree(node_list); + fail_jobid = job_ptr->job_id; + } else { + debug3("backfill: Failed to start JobId=%u", job_ptr->job_id); + } - list_append(run_job_list, (void *) job_ptr); - return 0; + return rc; } -/* build a map of how many nodes are free at any point in time - * based upon currently running jobs. pending jobs are added to - * the map as we execute the backfill algorithm */ -static void -_build_node_space_map(part_specs_t *part_specs) +/* trigger the attempt of a backfill */ +extern void run_backfill (void) { - ListIterator run_job_iterate; - struct job_record *run_job_ptr; - int base_size = 0; - - node_space_recs = 0; - - if (part_specs->idle_node_cnt) { - base_size = part_specs->idle_node_cnt; - node_space[node_space_recs].idle_node_cnt = base_size; - node_space[node_space_recs++].time = time(NULL); - } - - run_job_iterate = list_iterator_create(run_job_list); - while ( (run_job_ptr = list_next(run_job_iterate)) ) { - uint32_t nodes2free = _get_avail_node_cnt(run_job_ptr); - if (nodes2free == 0) - continue; /* no nodes returning to service */ - base_size += nodes2free; - node_space[node_space_recs].idle_node_cnt = base_size; - node_space[node_space_recs++].time = run_job_ptr->end_time; - } - list_iterator_destroy(run_job_iterate); - - _dump_node_space_map(0, 0); + pthread_mutex_lock( &thread_flag_mutex ); + new_work = true; + pthread_mutex_unlock( &thread_flag_mutex ); } -static void -_dump_node_space_map(uint32_t job_id, uint32_t node_cnt) +/* Report if any changes occurred to job, node or partition information */ +static bool _more_work (void) { -#if __DEBUG - int i; - time_t now; + bool rc; + static time_t backfill_job_time = (time_t) 0; + static time_t backfill_node_time = (time_t) 0; + static time_t backfill_part_time = (time_t) 0; - if (job_id == 0) - info("backfill: initial node_space_map"); - else - info("backfill: node_space_map after job %u allocated %u nodes", - job_id, node_cnt); - - now = time(NULL); - for (i=0; i<node_space_recs; i++) { - info("backfill: %3d nodes at time %4d (seconds in future)", - node_space[i].idle_node_cnt, - (int) difftime(node_space[i].time, now)); + pthread_mutex_lock( &thread_flag_mutex ); + if ( (backfill_job_time == last_job_update ) && + (backfill_node_time == last_node_update) && + (backfill_part_time == last_part_update) && + (new_work == false) ) { + rc = false; + } else { + backfill_job_time = last_job_update; + backfill_node_time = last_node_update; + backfill_part_time = last_part_update; + new_work = false; + rc = true; } -#endif + pthread_mutex_unlock( &thread_flag_mutex ); + return rc; } -/* return 1 if the job could be started now, 0 otherwise and add job into - * node_space_map - */ -static int -_update_node_space_map(struct job_record *job_ptr) +/* Create a reservation for a job in the future */ +static void _add_reservation(uint32_t start_time, uint32_t end_reserve, + bitstr_t *res_bitmap, + node_space_map_t *node_space, + int *node_space_recs) { - int i, j, min_nodes, nodes_needed; - time_t fini_time; - - if (node_space_recs == 0) /* no nodes now or in future */ - return 0; - if (job_ptr->details == NULL) /* pending job lacks details */ - return 0; - - if (job_ptr->time_limit == NO_VAL) - fini_time = time(NULL) + job_ptr->part_ptr->max_time; - else - fini_time = time(NULL) + job_ptr->time_limit; - min_nodes = node_space[0].idle_node_cnt; - for (i=1; i<node_space_recs; i++) { - if (node_space[i].time > fini_time) + int i, j; + + for (j=0; ; ) { + if (node_space[j].end_time > start_time) { + /* insert start entry record */ + i = *node_space_recs; + node_space[i].begin_time = start_time; + node_space[i].end_time = node_space[j].end_time; + node_space[j].end_time = start_time; + node_space[i].avail_bitmap = + bit_copy(node_space[j].avail_bitmap); + node_space[i].next = node_space[j].next; + node_space[j].next = i; + (*node_space_recs)++; break; - if (min_nodes > node_space[i].idle_node_cnt) - min_nodes = node_space[i].idle_node_cnt; - } - - nodes_needed = job_ptr->details->min_nodes; - if (nodes_needed <= min_nodes) - return 1; - - for (i=0; i<node_space_recs; i++) { - int fits = 0; - if (node_space[i].idle_node_cnt < nodes_needed) - continue; /* can't start yet... */ - fits = 1; - for (j=i; j<node_space_recs; j++) { - if (node_space[j].idle_node_cnt < nodes_needed) { - fits = 0; - break; - } } - if (fits == 0) - continue; - for (j=i; j<node_space_recs; j++) { - node_space[j].idle_node_cnt -= nodes_needed; + if (node_space[j].end_time == start_time) { + /* no need to insert start entry record */ + break; } - break; - } - - _dump_node_space_map(job_ptr->job_id, nodes_needed); - return 0; -} - -/* return the number of nodes to be returned to this partition when - * the specified job terminates. Don't count DRAIN or DOWN nodes */ -static int -_get_avail_node_cnt(struct job_record *job_ptr) -{ - int cnt = 0, i; - struct node_record *node_ptr; - uint16_t base_state; - - for (i=0; i<node_record_count; i++) { - if (bit_test(job_ptr->node_bitmap, i) == 0) - continue; - node_ptr = node_record_table_ptr + i; - if (node_ptr->node_state & NODE_STATE_DRAIN) - continue; - base_state = node_ptr->node_state & NODE_STATE_BASE; - if (base_state == NODE_STATE_DOWN) - continue; - cnt++; + if ((j = node_space[j].next) == 0) + break; } - return cnt; -} - - -/* scan pending job queue and change the priority of any that - * can run now without delaying the expected initiation time - * of any higher priority job */ -static void -_backfill_part(part_specs_t *part_specs) -{ - struct job_record *pend_job_ptr; - ListIterator pend_job_iterate; - struct job_record *first_job = NULL; /* just used as flag */ - - /* find job to possibly backfill */ - pend_job_iterate = list_iterator_create(pend_job_list); - while ( (pend_job_ptr = list_next(pend_job_iterate)) ) { - if (first_job == NULL) - first_job = pend_job_ptr; - - if (_loc_restrict(pend_job_ptr, part_specs)) { -#if __DEBUG - info("Job %u has locality restrictions", - pend_job_ptr->job_id); -#endif - continue; - } - - if (first_job == pend_job_ptr) { - if (pend_job_ptr->details == NULL) - break; - if (pend_job_ptr->details->min_nodes <= - part_specs->idle_node_cnt) { -#if __DEBUG - info("Job %u should start via FIFO", - pend_job_ptr->job_id); -#endif - break; - } +#if 0 + /* This records end of reservation so we maintain a full map + * of when jobs start and end. Since we only care about starting + * jobs right now, the end of reservation time is not very useful + * unless we want to track expected job initiation time, which + * would necessitate additional logic. */ + for (j=0; ; ) { + if ((node_space[j].begin_time < end_reserve) && + (node_space[j].end_time > end_reserve)) { + /* insert end entry record */ + i = *node_space_recs; + node_space[i].begin_time = node_space[j].begin_time; + node_space[j].begin_time = end_reserve; + node_space[i].end_time = end_reserve; + node_space[i].avail_bitmap = + bit_copy(node_space[j].avail_bitmap); + node_space[i].next = j; + node_space[previous].next = i; + (*node_space_recs)++; + break; } - - if (_update_node_space_map(pend_job_ptr)) { - _change_prio(pend_job_ptr, - (first_job->priority + 1)); + if (node_space[j].end_time == end_reserve) { + /* no need to insert end entry record */ break; } + previous = j; + if ((j = node_space[j].next) == 0) + break; } - list_iterator_destroy(pend_job_iterate); -} -/* Return true if job has locality restrictions, false otherwise */ -static bool -_loc_restrict(struct job_record *job_ptr, part_specs_t *part_specs) -{ - struct job_details *detail_ptr = job_ptr->details; - - if (detail_ptr == NULL) - return false; - - if ( (detail_ptr->contiguous) || (detail_ptr->features) || - (detail_ptr->req_nodes && detail_ptr->req_nodes[0]) || - (detail_ptr->exc_nodes && detail_ptr->exc_nodes[0]) ) - return true; - - if ( (detail_ptr->job_min_procs > part_specs->min_cpus) || - (detail_ptr->job_min_memory > part_specs->min_mem) || - (detail_ptr->job_min_tmp_disk > part_specs->min_disk) ) - return true; - - if (part_specs->max_cpus != part_specs->min_cpus) { - int max_node_cnt; - max_node_cnt = (job_ptr->num_procs + part_specs->min_cpus - - 1) / part_specs->min_cpus; - if (max_node_cnt > detail_ptr->min_nodes) - return true; + for (j=0; ; ) { + if ((node_space[j].begin_time >= start_time) && + (node_space[j].end_time <= end_reserve)) { + bit_and(node_space[j].avail_bitmap, res_bitmap); + } + if ((j = node_space[j].next) == 0) + break; } - - return false; -} - -/* Change the priority of a pending job to get it running now */ -static void -_change_prio(struct job_record *job_ptr, uint32_t prio) -{ - info("backfill: set job %u to priority %u", job_ptr->job_id, prio); - job_ptr->priority = prio; - altered_job = true; - run_backfill(); - last_job_update = time(NULL); +#else + for (j=0; ; ) { + if (node_space[j].begin_time >= start_time) + bit_and(node_space[j].avail_bitmap, res_bitmap); + if ((j = node_space[j].next) == 0) + break; + } +#endif } - diff --git a/src/plugins/sched/backfill/backfill.h b/src/plugins/sched/backfill/backfill.h index de1ab0a09..76ff6d136 100644 --- a/src/plugins/sched/backfill/backfill.h +++ b/src/plugins/sched/backfill/backfill.h @@ -4,7 +4,7 @@ * Copyright (C) 2003 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/sched/backfill/backfill_wrapper.c b/src/plugins/sched/backfill/backfill_wrapper.c index 0a14d0799..d2940cffe 100644 --- a/src/plugins/sched/backfill/backfill_wrapper.c +++ b/src/plugins/sched/backfill/backfill_wrapper.c @@ -6,7 +6,7 @@ * Copyright (C) 2003 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Jay Windley <jwindley@lnxi.com>, Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -137,6 +137,24 @@ slurm_sched_plugin_schedule( void ) return SLURM_SUCCESS; } +/***************************************************************************/ +/* TAG( slurm_sched_plugin_newalloc ) */ +/***************************************************************************/ +int +slurm_sched_plugin_newalloc( struct job_record *job_ptr ) +{ + return SLURM_SUCCESS; +} + +/***************************************************************************/ +/* TAG( slurm_sched_plugin_freealloc ) */ +/***************************************************************************/ +int +slurm_sched_plugin_freealloc( struct job_record *job_ptr ) +{ + return SLURM_SUCCESS; +} + /**************************************************************************/ /* TAG( slurm_sched_plugin_initial_priority ) */ @@ -183,3 +201,18 @@ char *slurm_sched_strerror( int errnum ) return NULL; } +/**************************************************************************/ +/* TAG( slurm_sched_plugin_requeue ) */ +/**************************************************************************/ +void slurm_sched_plugin_requeue( struct job_record *job_ptr, char *reason ) +{ + /* Empty. */ +} + +/**************************************************************************/ +/* TAG( slurm_sched_get_conf ) */ +/**************************************************************************/ +char *slurm_sched_get_conf( void ) +{ + return NULL; +} diff --git a/src/plugins/sched/builtin/Makefile.in b/src/plugins/sched/builtin/Makefile.in index e5153c593..f157306dc 100644 --- a/src/plugins/sched/builtin/Makefile.in +++ b/src/plugins/sched/builtin/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -44,6 +44,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -79,7 +81,7 @@ sched_builtin_la_OBJECTS = $(am_sched_builtin_la_OBJECTS) sched_builtin_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(sched_builtin_la_LDFLAGS) $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -119,6 +121,7 @@ CXXFLAGS = -fexceptions CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -132,10 +135,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -155,7 +161,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -166,6 +175,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -181,6 +192,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -196,6 +208,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -297,8 +310,8 @@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ else :; fi; \ done @@ -306,8 +319,8 @@ uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ done clean-pkglibLTLIBRARIES: @@ -361,8 +374,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -374,8 +387,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -385,13 +398,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/sched/builtin/builtin_wrapper.c b/src/plugins/sched/builtin/builtin_wrapper.c index 72df0a5c7..9abed1f5e 100644 --- a/src/plugins/sched/builtin/builtin_wrapper.c +++ b/src/plugins/sched/builtin/builtin_wrapper.c @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Kevin Tew <tew1@llnl.gov> et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -83,6 +83,24 @@ slurm_sched_plugin_schedule( void ) return SLURM_SUCCESS; } +/***************************************************************************/ +/* TAG( slurm_sched_plugin_newalloc ) */ +/***************************************************************************/ +int +slurm_sched_plugin_newalloc( struct job_record *job_ptr ) +{ + return SLURM_SUCCESS; +} + +/***************************************************************************/ +/* TAG( slurm_sched_plugin_freealloc ) */ +/***************************************************************************/ +int +slurm_sched_plugin_freealloc( struct job_record *job_ptr ) +{ + return SLURM_SUCCESS; +} + /**************************************************************************/ /* TAG( slurm_sched_plugin_initial_priority ) */ @@ -129,3 +147,18 @@ char *slurm_sched_strerror( int errnum ) return NULL; } +/**************************************************************************/ +/* TAG( slurm_sched_plugin_requeue ) */ +/**************************************************************************/ +void slurm_sched_plugin_requeue( struct job_record *job_ptr, char *reason ) +{ + /* Empty. */ +} + +/**************************************************************************/ +/* TAG( slurm_sched_get_conf ) */ +/**************************************************************************/ +char *slurm_sched_get_conf( void ) +{ + return NULL; +} diff --git a/src/plugins/sched/gang/Makefile.in b/src/plugins/sched/gang/Makefile.in index c494168ea..a71c67159 100644 --- a/src/plugins/sched/gang/Makefile.in +++ b/src/plugins/sched/gang/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -44,6 +44,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -79,7 +81,7 @@ sched_gang_la_OBJECTS = $(am_sched_gang_la_OBJECTS) sched_gang_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(sched_gang_la_LDFLAGS) $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -119,6 +121,7 @@ CXXFLAGS = -fexceptions CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -132,10 +135,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -155,7 +161,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -166,6 +175,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -181,6 +192,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -196,6 +208,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -301,8 +314,8 @@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ else :; fi; \ done @@ -310,8 +323,8 @@ uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ done clean-pkglibLTLIBRARIES: @@ -366,8 +379,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -379,8 +392,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -390,13 +403,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/sched/gang/gang.c b/src/plugins/sched/gang/gang.c index f7fb884d2..86ab638b1 100644 --- a/src/plugins/sched/gang/gang.c +++ b/src/plugins/sched/gang/gang.c @@ -1,10 +1,9 @@ /***************************************************************************** * gang.c - Gang scheduler functions. ***************************************************************************** - * Copyright (C) 2006 The Regents of the University of California. - * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * Copyright (C) 2008 Hewlett-Packard Development Company, L.P. + * Written by Chris Holmes + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -13,18 +12,18 @@ * the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. - * - * In addition, as a special exception, the copyright holders give permission - * to link the code of portions of this program with the OpenSSL library under - * certain conditions as described in each individual source file, and - * distribute linked combinations including the two. You must obey the GNU - * General Public License in all respects for all of the code used other than - * OpenSSL. If you modify file(s) with this exception, you may extend this - * exception to your version of the file(s), but you are not obligated to do + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do * so. If you do not wish to do so, delete this exception statement from your - * version. If you delete this exception statement from all source files in + * version. If you delete this exception statement from all source files in * the program, then also delete it here. - * + * * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more @@ -35,168 +34,1415 @@ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. \*****************************************************************************/ +/* + * gang scheduler plugin for SLURM + */ + +#include <pthread.h> +#include <unistd.h> + #include "./gang.h" +#include "slurm/slurm.h" +#include "src/common/bitstring.h" +#include "src/common/list.h" +#include "src/common/node_select.h" #include "src/common/slurm_protocol_defs.h" -#include "src/slurmctld/slurmctld.h" +#include "src/common/xstring.h" #include "src/slurmctld/locks.h" +#include "src/slurmctld/slurmctld.h" +/* global timeslicer thread variables */ static bool thread_running = false; static bool thread_shutdown = false; static pthread_mutex_t thread_flag_mutex = PTHREAD_MUTEX_INITIALIZER; -static pthread_t gang_thread_id; +static pthread_t timeslicer_thread_id; + +/* timeslicer flags and structures */ +enum entity_type { + GS_NODE, + GS_SOCKET, + GS_CORE, + GS_CPU +}; + +enum gs_flags { + GS_SUSPEND, + GS_RESUME, + GS_NO_PART, + GS_SUCCESS, + GS_ACTIVE, + GS_NO_ACTIVE, + GS_FILLER +}; + +struct gs_job { + uint32_t job_id; + uint16_t sig_state; + uint16_t row_state; + bitstr_t *resmap; + uint16_t *alloc_cpus; +}; + +struct gs_part { + char *part_name; + uint16_t priority; + uint32_t num_jobs; + struct gs_job **job_list; + uint32_t job_list_size; + uint32_t num_shadows; + struct gs_job **shadow; /* see '"Shadow" Design' below */ + uint32_t shadow_size; + uint32_t jobs_active; + bitstr_t *active_resmap; + uint16_t *active_cpus; + uint16_t array_size; + struct gs_part *next; +}; + +/****************************************** + * + * SUMMARY OF DATA MANAGEMENT + * + * For GS_NODE and GS_CPU: bits in resmaps represent nodes + * For GS_SOCKET and GS_CORE: bits in resmaps represent sockets + * GS_NODE and GS_SOCKET ignore the CPU array + * GS_CPU and GS_CORE use the CPU array to help resolve conflict + * + * EVALUATION ALGORITHM + * + * For GS_NODE and GS_SOCKET: bits CANNOT conflict + * For GS_CPUS and GS_CORE: if bits conflict, make sure sum of CPUs per + * resource don't exceed physical resource count + * + * + * The j_ptr->alloc_cpus array is a collection of allocated values ONLY. + * For every bit set in j_ptr->resmap, there is a corresponding element + * (with an equal-to or less-than index value) in j_ptr->alloc_cpus. + * + ****************************************** + * + * "Shadow" Design to support Preemption + * + * Jobs in higher priority partitions "cast shadows" on the active + * rows of lower priority partitions. The effect is that jobs that + * are "caught" in these shadows are preempted (suspended) + * indefinitely until the "shadow" disappears. When constructing + * the active row of a partition, any jobs in the 'shadow' array + * are applied first. + * + ****************************************** + */ + + +/* global variables */ +static uint32_t timeslicer_seconds = 0; +static uint16_t gr_type = GS_NODE; +static uint16_t gs_fast_schedule = 0; +static struct gs_part *gs_part_list = NULL; +static uint32_t default_job_list_size = 64; +static uint32_t gs_resmap_size = 0; +static pthread_mutex_t data_mutex = PTHREAD_MUTEX_INITIALIZER; + +static uint32_t gs_num_groups = 0; +static uint16_t *gs_cpus_per_res = NULL; +static uint32_t *gs_cpu_count_reps = NULL; + +static struct gs_part **gs_part_sorted = NULL; +static uint32_t num_sorted_part = 0; + +#define GS_CPU_ARRAY_INCREMENT 8 + +/* function declarations */ +static void *_timeslicer_thread(); + + +char *_print_flag(int flag) { + switch (flag) { + case GS_SUSPEND: return "GS_SUSPEND"; + case GS_RESUME: return "GS_RESUME"; + case GS_NO_PART: return "GS_NO_PART"; + case GS_SUCCESS: return "GS_SUCCESS"; + case GS_ACTIVE: return "GS_ACTIVE"; + case GS_NO_ACTIVE: return "GS_NO_ACTIVE"; + case GS_FILLER: return "GS_FILLER"; + default: return "unknown"; + } + return "unknown"; +} + + +void _print_jobs(struct gs_part *p_ptr) +{ + int i; + debug3("sched/gang: part %s has %u jobs, %u shadows:", + p_ptr->part_name, p_ptr->num_jobs, p_ptr->num_shadows); + for (i = 0; i < p_ptr->num_shadows; i++) { + debug3("sched/gang: shadow job %u row_s %s, sig_s %s", + p_ptr->shadow[i]->job_id, + _print_flag(p_ptr->shadow[i]->row_state), + _print_flag(p_ptr->shadow[i]->sig_state)); + } + for (i = 0; i < p_ptr->num_jobs; i++) { + debug3("sched/gang: job %u row_s %s, sig_s %s", + p_ptr->job_list[i]->job_id, + _print_flag(p_ptr->job_list[i]->row_state), + _print_flag(p_ptr->job_list[i]->sig_state)); + } + if (p_ptr->active_resmap) { + int s = bit_size(p_ptr->active_resmap); + i = bit_set_count(p_ptr->active_resmap); + debug3("sched/gang: active resmap has %d of %d bits set", i, s); + } +} -/* Global configuration parameters */ -uint16_t multi_prog_level = 2; /* maximum multi-programming level */ -uint16_t slice_time = 10; /* seconds */ +static uint16_t +_get_gr_type() { -static bool _context_switch(void); -static void * _gang_thread(void *no_data); -static int _gang_resume_job(uint32_t jobid); -static int _gang_suspend_job(uint32_t jobid); -static void _parse_gang_config(void); + switch (slurmctld_conf.select_type_param) { + case CR_CORE: + case CR_CORE_MEMORY: + return GS_CORE; + case CR_CPU: + case CR_CPU_MEMORY: + return GS_CPU; + case CR_SOCKET: + case CR_SOCKET_MEMORY: + return GS_SOCKET; + } + /* note that CR_MEMORY is node-level scheduling with + * memory management */ + return GS_NODE; +} +/* Return resource data for the given node */ +static uint16_t +_compute_resources(int i, char socket_count) +{ + if (gr_type == GS_NODE) + return 1; -/* _parse_gang_config - load gang scheduler configuration parameters. - * To read gang.conf configuration file, see _parse_wiki_config - * code in src/wiki/msg.c, or add parameters to main config file */ + if (gr_type == GS_CPU) { + if (socket_count) + return 1; + if (gs_fast_schedule) + return node_record_table_ptr[i].config_ptr->cpus; + return node_record_table_ptr[i].cpus; + } + + if (socket_count || gr_type == GS_SOCKET) { + if (gs_fast_schedule) + return node_record_table_ptr[i].config_ptr->sockets; + return node_record_table_ptr[i].sockets; + } + + /* gr_type == GS_CORE */ + if (gs_fast_schedule) + return node_record_table_ptr[i].config_ptr->cores; + return node_record_table_ptr[i].cores; +} + +/* For GS_CPU the gs_phys_res_cnt is the total number of CPUs per node. + * For GS_CORE the gs_phys_res_cnt is the total number of cores per socket per + * node (currently no nodes are made with different core counts per socket) */ static void -_parse_gang_config(void) +_load_phys_res_cnt() { - /* Reset multi_prog_level and slice_time as needed */ + int i, array_size = GS_CPU_ARRAY_INCREMENT; + uint32_t adder; + + xfree(gs_cpus_per_res); + xfree(gs_cpu_count_reps); + gs_num_groups = 0; + if (gr_type == GS_NODE || gr_type == GS_SOCKET) + return; + + gs_cpus_per_res = xmalloc(array_size * sizeof(uint16_t)); + gs_cpu_count_reps = xmalloc(array_size * sizeof(uint32_t)); + for (i = 0; i < node_record_count; i++) { + uint16_t res = _compute_resources(i, 0); + if (gs_cpus_per_res[gs_num_groups] == res) { + adder = 1; + if (gr_type == GS_CORE) + adder = _compute_resources(i, 1); + gs_cpu_count_reps[gs_num_groups] += adder; + continue; + } + if (gs_cpus_per_res[gs_num_groups] != 0) { + gs_num_groups++; + if (gs_num_groups >= array_size) { + array_size += GS_CPU_ARRAY_INCREMENT; + xrealloc(gs_cpus_per_res, + array_size * sizeof(uint16_t)); + xrealloc(gs_cpu_count_reps, + array_size * sizeof(uint32_t)); + } + } + gs_cpus_per_res[gs_num_groups] = res; + adder = 1; + if (gr_type == GS_CORE) + adder = _compute_resources(i, 1); + gs_cpu_count_reps[gs_num_groups] = adder; + } + gs_num_groups++; + for (i = 0; i < gs_num_groups; i++) { + debug3("sched/gang: _load_phys_res_cnt: grp %d cpus %u reps %u", + i, gs_cpus_per_res[i], gs_cpu_count_reps[i]); + } + return; +} + +static uint16_t +_get_phys_res_cnt(int res_index) +{ + int i = 0; + int pos = gs_cpu_count_reps[i++]; + while (res_index >= pos) { + pos += gs_cpu_count_reps[i++]; + } + return gs_cpus_per_res[i-1]; } + +/* The gs_part_list is a single large array of gs_part entities. + * To destroy it, step down the array and destroy the pieces of + * each gs_part entity, and then delete the whole array. + * To destroy a gs_part entity, you need to delete the name, the + * list of jobs, the shadow list, and the active_resmap. Each + * job has a resmap that must be deleted also. + */ +static void +_destroy_parts() { + int i; + struct gs_part *tmp, *ptr = gs_part_list; + struct gs_job *j_ptr; + + while (ptr) { + tmp = ptr; + ptr = ptr->next; + + xfree(tmp->part_name); + for (i = 0; i < tmp->num_jobs; i++) { + j_ptr = tmp->job_list[i]; + if (j_ptr->resmap) + bit_free(j_ptr->resmap); + xfree(j_ptr->alloc_cpus); + xfree(j_ptr); + } + xfree(tmp->shadow); + if (tmp->active_resmap) + bit_free(tmp->active_resmap); + xfree(tmp->active_cpus); + xfree(tmp->job_list); + } + xfree(gs_part_list); +} + +/* Build the gs_part_list. The job_list will be created later, + * once a job is added. */ +static void +_build_parts() { + ListIterator part_iterator; + struct part_record *p_ptr; + int i, num_parts; + + if (gs_part_list) + _destroy_parts(); + + /* reset the sorted list, since it's currently + * pointing to partitions we just destroyed */ + num_sorted_part = 0; + + num_parts = list_count(part_list); + if (!num_parts) + return; + + part_iterator = list_iterator_create(part_list); + if (part_iterator == NULL) + fatal ("memory allocation failure"); + + gs_part_list = xmalloc(num_parts * sizeof(struct gs_part)); + i = 0; + while ((p_ptr = (struct part_record *) list_next(part_iterator))) { + gs_part_list[i].part_name = xstrdup(p_ptr->name); + gs_part_list[i].priority = p_ptr->priority; + /* everything else is already set to zero/NULL */ + gs_part_list[i].next = &(gs_part_list[i+1]); + i++; + } + gs_part_list[--i].next = NULL; + list_iterator_destroy(part_iterator); +} + +/* Find the gs_part entity with the given name */ +static struct gs_part * +_find_gs_part(char *name) +{ + struct gs_part *p_ptr = gs_part_list; + for (; p_ptr; p_ptr = p_ptr->next) { + if (strcmp(name, p_ptr->part_name) == 0) + return p_ptr; + } + return NULL; +} + +/* Find the job_list index of the given job_id in the given partition */ static int -_gang_resume_job(uint32_t jobid) +_find_job_index(struct gs_part *p_ptr, uint32_t job_id) { + int i; + for (i = 0; i < p_ptr->num_jobs; i++) { + if (p_ptr->job_list[i]->job_id == job_id) + return i; + } + return -1; +} + +/* Return 1 if job fits in this row, else return 0 */ +static int +_can_cpus_fit(bitstr_t *setmap, struct gs_job *j_ptr, struct gs_part *p_ptr) { - int slurm_rc; - suspend_msg_t msg; + int i, size, a = 0; + uint16_t *p_cpus, *j_cpus; + + size = bit_size(setmap); + p_cpus = p_ptr->active_cpus; + j_cpus = j_ptr->alloc_cpus; - msg.job_id = jobid; - msg.op = RESUME_JOB; - slurm_rc = job_suspend(&msg, 0, -1); - if (slurm_rc != SLURM_SUCCESS) - error("gang: Failed to resume job %u (%m)", jobid); - else - info("gang: Resumed job %u", jobid); - return slurm_rc; + if (!p_cpus || !j_cpus) + return 0; + + for (i = 0; i < size; i++) { + if (bit_test(setmap, i)) { + if (p_cpus[i]+j_cpus[a] > _get_phys_res_cnt(i)) + return 0; + } + if (bit_test(j_ptr->resmap, i)) + a++; + } + return 1; } + +/* Return 1 if job fits in this row, else return 0 */ static int -_gang_suspend_job(uint32_t jobid) +_job_fits_in_active_row(struct gs_job *j_ptr, struct gs_part *p_ptr) { - int slurm_rc; + int count; + bitstr_t *tmpmap; + + if (p_ptr->active_resmap == NULL || p_ptr->jobs_active == 0) + return 1; + + tmpmap = bit_copy(j_ptr->resmap); + if (!tmpmap) + fatal("sched/gang: memory allocation error"); + + bit_and(tmpmap, p_ptr->active_resmap); + /* any set bits indicate contention for the same resource */ + count = bit_set_count(tmpmap); + debug3("sched/gang: _job_fits_in_active_row: %d bits conflict", count); + + if (count == 0) { + bit_free(tmpmap); + return 1; + } + if (gr_type == GS_NODE || gr_type == GS_SOCKET) { + bit_free(tmpmap); + return 0; + } + + /* for GS_CPU and GS_CORE, we need to compare CPU arrays and + * see if the sum of CPUs on any one resource exceed the total + * of physical resources available */ + count = _can_cpus_fit(tmpmap, j_ptr, p_ptr); + bit_free(tmpmap); + return count; +} + +/* Add the given job to the "active" structures of + * the given partition and increment the run count */ +static void +_add_job_to_active(struct gs_job *j_ptr, struct gs_part *p_ptr) +{ + int i, a, sz; + + /* add job to active_resmap */ + if (!p_ptr->active_resmap) { + /* allocate the active resmap */ + debug3("sched/gang: _add_job_to_active: using job %u as active base", + j_ptr->job_id); + p_ptr->active_resmap = bit_copy(j_ptr->resmap); + } else if (p_ptr->jobs_active == 0) { + /* if the active_resmap exists but jobs_active is '0', + * this means to overwrite the bitmap memory */ + debug3("sched/gang: _add_job_to_active: copying job %u into active base", + j_ptr->job_id); + bit_copybits(p_ptr->active_resmap, j_ptr->resmap); + } else { + /* add job to existing jobs in the active resmap */ + debug3("sched/gang: _add_job_to_active: merging job %u into active resmap", + j_ptr->job_id); + bit_or(p_ptr->active_resmap, j_ptr->resmap); + } + + /* add job to the active_cpus array */ + if (gr_type == GS_CPU || gr_type == GS_CORE) { + sz = bit_size(p_ptr->active_resmap); + if (!p_ptr->active_cpus) { + /* create active_cpus array */ + p_ptr->active_cpus = xmalloc(sz * sizeof(uint16_t)); + } + if (p_ptr->jobs_active == 0) { + /* overwrite the existing values in active_cpus */ + a = 0; + for (i = 0; i < sz; i++) { + if (bit_test(j_ptr->resmap, i)) { + p_ptr->active_cpus[i] = + j_ptr->alloc_cpus[a++]; + } else { + p_ptr->active_cpus[i] = 0; + } + } + } else { + /* add job to existing jobs in the active cpus */ + a = 0; + for (i = 0; i < sz; i++) { + if (bit_test(j_ptr->resmap, i)) { + uint16_t limit = _get_phys_res_cnt(i); + p_ptr->active_cpus[i] += + j_ptr->alloc_cpus[a++]; + /* when adding shadows, the resources + * may get overcommitted */ + if (p_ptr->active_cpus[i] > limit) + p_ptr->active_cpus[i] = limit; + } + } + } + } + p_ptr->jobs_active += 1; +} + +static void +_signal_job(uint32_t job_id, int sig) +{ + int rc; suspend_msg_t msg; + + msg.job_id = job_id; + if (sig == GS_SUSPEND) { + debug3("sched/gang: suspending %u", job_id); + msg.op = SUSPEND_JOB; + } else { + debug3("sched/gang: resuming %u", job_id); + msg.op = RESUME_JOB; + } + rc = job_suspend(&msg, 0, -1); + if (rc) + error("sched/gang: error (%d) signaling(%d) job %u", rc, sig, + job_id); +} - msg.job_id = jobid; - msg.op = SUSPEND_JOB; - slurm_rc = job_suspend(&msg, 0, -1); - if (slurm_rc != SLURM_SUCCESS) - error("gang: Failed to suspend job %u (%m)", jobid); - else - info("gang: Suspended job %u", jobid); - return slurm_rc; -} - -/* _context_switch - This is just a very simple proof of concept sample. - * The production version needs to maintain an Ousterhout matrix and - * make intelligent scheduling decisions. This version supports a - * multi-programming level of 2 only. In practice we'll want a - * time slice much larger than 10 seconds too, but that's fine for - * testing. - Moe */ -static bool -_context_switch(void) -{ - bool run_scheduler = false; - struct job_record *job_ptr; - ListIterator job_iterator; +static uint32_t +_get_resmap_size() +{ + int i; + uint32_t count = 0; + /* if GS_NODE or GS_CPU, then size is the number of nodes */ + if (gr_type == GS_NODE || gr_type == GS_CPU) + return node_record_count; + /* else the size is the total number of sockets on all nodes */ + for (i = 0; i < node_record_count; i++) { + count += _compute_resources(i, 1); + } + return count; +} - if (!job_list) /* Not yet initialized */ - return false; +/* Load the gs_job struct with the correct + * resmap and CPU array information + */ +static void +_load_alloc_cpus(struct gs_job *j_ptr, bitstr_t *nodemap) +{ + int i, a, alloc_index, sz; - job_iterator = list_iterator_create(job_list); - while ((job_ptr = (struct job_record *) list_next(job_iterator))) { - if (job_ptr->job_state == JOB_RUNNING) - _gang_suspend_job(job_ptr->job_id); - else if (job_ptr->job_state == JOB_SUSPENDED) { - _gang_resume_job(job_ptr->job_id); - run_scheduler = true; + xfree(j_ptr->alloc_cpus); + sz = bit_set_count(j_ptr->resmap); + j_ptr->alloc_cpus = xmalloc(sz * sizeof(uint16_t)); + + a = 0; + alloc_index = 0; + for (i = 0; i < node_record_count; i++) { + uint16_t j, cores, sockets = _compute_resources(i, 1); + + if (bit_test(nodemap, i)) { + for (j = 0; j < sockets; j++) { + cores = select_g_get_job_cores(j_ptr->job_id, + alloc_index, + j); + if (cores > 0) + j_ptr->alloc_cpus[a++] = cores; + } + alloc_index++; } } - list_iterator_destroy(job_iterator); - return run_scheduler; } -/* _gang_thread - A pthread to periodically perform gang scheduler context - * switches. */ -static void * -_gang_thread(void *no_data) +/* return an appropriate resmap given the granularity (GS_NODE/GS_CORE/etc.) */ +/* This code fails if the bitmap size has changed. */ +static bitstr_t * +_get_resmap(bitstr_t *origmap, uint32_t job_id) +{ + int i, alloc_index = 0, map_index = 0; + bitstr_t *newmap; + + if (bit_size(origmap) != node_record_count) { + error("sched/gang: bitmap size has changed from %d for %u", + node_record_count, job_id); + fatal("sched/gang: inconsistent bitmap size error"); + } + if (gr_type == GS_NODE || gr_type == GS_CPU) { + newmap = bit_copy(origmap); + return newmap; + } + + /* for GS_SOCKET and GS_CORE the resmap represents sockets */ + newmap = bit_alloc(gs_resmap_size); + if (!newmap) { + fatal("sched/gang: memory error creating newmap"); + } + for (i = 0; i < node_record_count; i++) { + uint16_t j, cores, sockets = _compute_resources(i, 1); + + if (bit_test(origmap, i)) { + for (j = 0; j < sockets; j++) { + cores = select_g_get_job_cores(job_id, + alloc_index, + j); + if (cores > 0) + bit_set(newmap, map_index); + map_index++; + } + alloc_index++; + } else { + /* no cores allocated on this node */ + map_index += sockets; + } + } + return newmap; +} + +/* construct gs_part_sorted as a sorted list of the current partitions */ +static void +_sort_partitions() { - bool run_scheduler; + struct gs_part *p_ptr; + int i, j, size = 0; - /* Locks: write job and node info */ - slurmctld_lock_t job_write_lock = { - NO_LOCK, WRITE_LOCK, WRITE_LOCK, NO_LOCK }; + /* sort all partitions by priority */ + for (p_ptr = gs_part_list; p_ptr; p_ptr = p_ptr->next, size++); - while (!thread_shutdown) { - lock_slurmctld(job_write_lock); - run_scheduler = _context_switch(); - unlock_slurmctld(job_write_lock); - if (run_scheduler) - schedule(); /* has own locking */ - sleep(slice_time); + /* sorted array is new, or number of partitions has changed */ + if (size != num_sorted_part) { + xfree(gs_part_sorted); + gs_part_sorted = xmalloc(size * sizeof(struct gs_part *)); + num_sorted_part = size; + /* load the array */ + i = 0; + for (p_ptr = gs_part_list; p_ptr; p_ptr = p_ptr->next) + gs_part_sorted[i++] = p_ptr; } - pthread_exit((void *) 0); - return NULL; + if (size <= 1) { + gs_part_sorted[0] = gs_part_list; + return; + } + + /* sort array (new array or priorities may have changed) */ + for (j = 0; j < size; j++) { + for (i = j+1; i < size; i++) { + if (gs_part_sorted[i]->priority > + gs_part_sorted[j]->priority) { + struct gs_part *tmp_ptr; + tmp_ptr = gs_part_sorted[j]; + gs_part_sorted[j] = gs_part_sorted[i]; + gs_part_sorted[i] = tmp_ptr; + } + } + } } -/* - * spawn_gang_thread - Create a pthread to perform gang scheduler actions - * - * NOTE: Create only one pthread in any plugin. Some systems leak memory on - * each pthread_create from within a plugin +/* Scan the partition list. Add the given job as a "shadow" to every + * partition with a lower priority than the given partition */ +static void +_cast_shadow(struct gs_job *j_ptr, uint16_t priority) +{ + struct gs_part *p_ptr; + int i; + + for (p_ptr = gs_part_list; p_ptr; p_ptr = p_ptr->next) { + if (p_ptr->priority >= priority) + continue; + + /* This partition has a lower priority, so add + * the job as a "Shadow" */ + if (!p_ptr->shadow) { + p_ptr->shadow_size = default_job_list_size; + p_ptr->shadow = xmalloc(p_ptr->shadow_size * + sizeof(struct gs_job *)); + /* 'shadow' is initialized to be NULL filled */ + } else { + /* does this shadow already exist? */ + for (i = 0; i < p_ptr->num_shadows; i++) { + if (p_ptr->shadow[i] == j_ptr) + break; + } + if (i < p_ptr->num_shadows) + continue; + } + + if (p_ptr->num_shadows+1 >= p_ptr->shadow_size) { + p_ptr->shadow_size *= 2; + xrealloc(p_ptr->shadow, p_ptr->shadow_size * + sizeof(struct gs_job *)); + } + p_ptr->shadow[p_ptr->num_shadows++] = j_ptr; + } +} + +/* Remove the given job as a "shadow" from all partitions */ +static void +_clear_shadow(struct gs_job *j_ptr) +{ + struct gs_part *p_ptr; + int i; + + for (p_ptr = gs_part_list; p_ptr; p_ptr = p_ptr->next) { + + if (!p_ptr->shadow) + continue; + + for (i = 0; i < p_ptr->num_shadows; i++) { + if (p_ptr->shadow[i] == j_ptr) + break; + } + if (i >= p_ptr->num_shadows) + /* job not found */ + continue; + + p_ptr->num_shadows--; + + /* shift all other jobs down */ + for (; i < p_ptr->num_shadows; i++) + p_ptr->shadow[i] = p_ptr->shadow[i+1]; + p_ptr->shadow[p_ptr->num_shadows] = NULL; + } +} + +/* Rebuild the active row BUT preserve the order of existing jobs. + * This is called after one or more jobs have been removed from + * the partition or if a higher priority "shadow" has been added + * which could preempt running jobs. */ -extern int -spawn_gang_thread(void) +static void +_update_active_row(struct gs_part *p_ptr, int add_new_jobs) +{ + int i; + struct gs_job *j_ptr; + + /* rebuild the active row, starting with any shadows */ + p_ptr->jobs_active = 0; + for (i = 0; p_ptr->shadow && p_ptr->shadow[i]; i++) { + _add_job_to_active(p_ptr->shadow[i], p_ptr); + } + + /* attempt to add the existing 'active' jobs */ + for (i = 0; i < p_ptr->num_jobs; i++) { + j_ptr = p_ptr->job_list[i]; + if (j_ptr->row_state != GS_ACTIVE) + continue; + if (_job_fits_in_active_row(j_ptr, p_ptr)) { + _add_job_to_active(j_ptr, p_ptr); + _cast_shadow(j_ptr, p_ptr->priority); + + } else { + /* this job has been preempted by a shadow job. + * suspend it and preserve it's job_list order */ + if (j_ptr->sig_state != GS_SUSPEND) { + _signal_job(j_ptr->job_id, GS_SUSPEND); + j_ptr->sig_state = GS_SUSPEND; + _clear_shadow(j_ptr); + } + j_ptr->row_state = GS_NO_ACTIVE; + } + } + /* attempt to add the existing 'filler' jobs */ + for (i = 0; i < p_ptr->num_jobs; i++) { + j_ptr = p_ptr->job_list[i]; + if (j_ptr->row_state != GS_FILLER) + continue; + if (_job_fits_in_active_row(j_ptr, p_ptr)) { + _add_job_to_active(j_ptr, p_ptr); + _cast_shadow(j_ptr, p_ptr->priority); + } else { + /* this job has been preempted by a shadow job. + * suspend it and preserve it's job_list order */ + if (j_ptr->sig_state != GS_SUSPEND) { + _signal_job(j_ptr->job_id, GS_SUSPEND); + j_ptr->sig_state = GS_SUSPEND; + _clear_shadow(j_ptr); + } + j_ptr->row_state = GS_NO_ACTIVE; + } + } + + if (!add_new_jobs) + return; + + /* attempt to add any new jobs */ + for (i = 0; i < p_ptr->num_jobs; i++) { + j_ptr = p_ptr->job_list[i]; + if (j_ptr->row_state != GS_NO_ACTIVE) + continue; + if (_job_fits_in_active_row(j_ptr, p_ptr)) { + _add_job_to_active(j_ptr, p_ptr); + _cast_shadow(j_ptr, p_ptr->priority); + /* note that this job is a "filler" for this row */ + j_ptr->row_state = GS_FILLER; + /* resume the job */ + if (j_ptr->sig_state == GS_SUSPEND) { + _signal_job(j_ptr->job_id, GS_RESUME); + j_ptr->sig_state = GS_RESUME; + } + } + } +} + +/* rebuild all active rows without reordering jobs: + * - attempt to preserve running jobs + * - suspend any jobs that have been "shadowed" (preempted) + * - resume any "filler" jobs that can be found + */ +static void +_update_all_active_rows() +{ + int i; + + /* Sort the partitions. This way the shadows of any high-priority + * jobs are appropriately adjusted before the lower priority + * partitions are updated */ + _sort_partitions(); + + for (i = 0; i < num_sorted_part; i++) { + _update_active_row(gs_part_sorted[i], 1); + } +} + +/* remove the given job from the given partition */ +static void +_remove_job_from_part(uint32_t job_id, struct gs_part *p_ptr) +{ + int i; + struct gs_job *j_ptr; + + if (!job_id || !p_ptr) + return; + + debug3("sched/gang: _remove_job_from_part: removing job %u", job_id); + /* find the job in the job_list */ + i = _find_job_index(p_ptr, job_id); + if (i < 0) + /* job not found */ + return; + + j_ptr = p_ptr->job_list[i]; + + /* remove any shadow first */ + _clear_shadow(j_ptr); + + /* remove the job from the job_list by shifting everyone else down */ + p_ptr->num_jobs -= 1; + for (; i < p_ptr->num_jobs; i++) { + p_ptr->job_list[i] = p_ptr->job_list[i+1]; + } + p_ptr->job_list[i] = NULL; + + /* make sure the job is not suspended, and then delete it */ + if (j_ptr->sig_state == GS_SUSPEND) { + debug3("sched/gang: _remove_job_from_part: resuming suspended job %u", + j_ptr->job_id); + _signal_job(j_ptr->job_id, GS_RESUME); + } + bit_free(j_ptr->resmap); + j_ptr->resmap = NULL; + if (j_ptr->alloc_cpus) + xfree(j_ptr->alloc_cpus); + j_ptr->alloc_cpus = NULL; + xfree(j_ptr); + + return; +} + +/* Add the given job to the given partition, and if it remains running + * then "cast it's shadow" over the active row of any partition with a + * lower priority than the given partition. Return the sig state of the + * job (GS_SUSPEND or GS_RESUME) */ +static uint16_t +_add_job_to_part(struct gs_part *p_ptr, uint32_t job_id, bitstr_t *job_bitmap) +{ + int i; + struct gs_job *j_ptr; + + xassert(p_ptr); + xassert(job_id > 0); + xassert(job_bitmap); + + debug3("sched/gang: _add_job_to_part: adding job %u", job_id); + _print_jobs(p_ptr); + + /* take care of any memory needs */ + if (!p_ptr->job_list) { + p_ptr->job_list_size = default_job_list_size; + p_ptr->job_list = xmalloc(p_ptr->job_list_size * + sizeof(struct gs_job *)); + /* job_list is initialized to be NULL filled */ + } + + /* protect against duplicates */ + i = _find_job_index(p_ptr, job_id); + if (i >= 0) { + /* This job already exists, but the resource allocation + * may have changed. In any case, remove the existing + * job before adding this new one. + */ + debug3("sched/gang: _add_job_to_part: duplicate job %u detected", + job_id); + _remove_job_from_part(job_id, p_ptr); + _update_active_row(p_ptr, 0); + } + + /* more memory management */ + if (p_ptr->num_jobs+1 == p_ptr->job_list_size) { + p_ptr->job_list_size *= 2; + xrealloc(p_ptr->job_list, p_ptr->job_list_size * + sizeof(struct gs_job *)); + for (i = p_ptr->num_jobs+1; i < p_ptr->job_list_size; i++) + p_ptr->job_list[i] = NULL; + } + j_ptr = xmalloc(sizeof(struct gs_job)); + + /* gather job info */ + j_ptr->job_id = job_id; + j_ptr->sig_state = GS_RESUME; /* all jobs are running initially */ + j_ptr->row_state = GS_NO_ACTIVE; /* job is not in the active row */ + j_ptr->resmap = _get_resmap(job_bitmap, job_id); + j_ptr->alloc_cpus = NULL; + if (gr_type == GS_CORE || gr_type == GS_CPU) { + _load_alloc_cpus(j_ptr, job_bitmap); + } + + /* append this job to the job_list */ + p_ptr->job_list[p_ptr->num_jobs++] = j_ptr; + + /* determine the immediate fate of this job (run or suspend) */ + if (_job_fits_in_active_row(j_ptr, p_ptr)) { + debug3("sched/gang: _add_job_to_part: adding job %u to active row", + job_id); + _add_job_to_active(j_ptr, p_ptr); + /* note that this job is a "filler" for this row */ + j_ptr->row_state = GS_FILLER; + /* all jobs begin in the run state, so + * there's no need to signal this job */ + + /* since this job is running we need to "cast it's shadow" + * over lower priority partitions */ + _cast_shadow(j_ptr, p_ptr->priority); + + } else { + debug3("sched/gang: _add_job_to_part: suspending job %u", + job_id); + _signal_job(j_ptr->job_id, GS_SUSPEND); + j_ptr->sig_state = GS_SUSPEND; + } + + _print_jobs(p_ptr); + + return j_ptr->sig_state; +} + +/* ensure that all jobs running in SLURM are accounted for. + * this procedure assumes that the gs data has already been + * locked by the caller! + */ +static void +_scan_slurm_job_list() +{ + struct job_record *job_ptr; + struct gs_part *p_ptr; + int i; + ListIterator job_iterator; + + if (!job_list) { /* no jobs */ + return; + } + debug3("sched/gang: _scan_slurm_job_list: job_list exists..."); + job_iterator = list_iterator_create(job_list); + while ((job_ptr = (struct job_record *) list_next(job_iterator))) { + debug3("sched/gang: _scan_slurm_job_list: checking job %u", + job_ptr->job_id); + if (job_ptr->job_state == JOB_PENDING) + continue; + if (job_ptr->job_state == JOB_SUSPENDED || + job_ptr->job_state == JOB_RUNNING) { + /* are we tracking this job already? */ + p_ptr = _find_gs_part(job_ptr->partition); + if (!p_ptr) /* no partition */ + continue; + i = _find_job_index(p_ptr, job_ptr->job_id); + if (i >= 0) + /* we're tracking it, so continue */ + continue; + + /* We're not tracking this job. Resume it if it's + * suspended, and then add it to the job list. */ + + if (job_ptr->job_state == JOB_SUSPENDED) + /* The likely scenario here is that the slurmctld has + * failed over, and this is a job that the sched/gang + * plugin had previously suspended. + * It's not possible to determine the previous order + * of jobs without preserving sched/gang state, which + * is not worth the extra infrastructure. Just resume + * the job and then add it to the job list. + */ + _signal_job(job_ptr->job_id, GS_RESUME); + + _add_job_to_part(p_ptr, job_ptr->job_id, + job_ptr->node_bitmap); + continue; + } + + /* if the job is not pending, suspended, or running, then + it's completing or completed. Make sure we've released + this job */ + p_ptr = _find_gs_part(job_ptr->partition); + if (!p_ptr) /* no partition */ + continue; + _remove_job_from_part(job_ptr->job_id, p_ptr); + } + list_iterator_destroy(job_iterator); + + /* now that all of the old jobs have been flushed out, + * update the active row of all partitions */ + _update_all_active_rows(); + + return; +} + + +/**************************** + * SLURM Timeslicer Hooks + * + * Here is a summary of the primary activities that occur + * within this plugin: + * + * gs_init: initialize plugin + * + * gs_job_start: a new allocation has been created + * gs_job_scan: synchronize with master job list + * gs_job_fini: an existing allocation has been cleared + * gs_reconfig: refresh partition and job data + * _cycle_job_list: timeslicer thread is rotating jobs + * + * gs_fini: terminate plugin + * + ***************************/ + +static void +_spawn_timeslicer_thread() { pthread_attr_t thread_attr_msg; pthread_mutex_lock( &thread_flag_mutex ); if (thread_running) { - error("gang thread already running, not starting another"); + error("timeslicer thread already running, not starting another"); pthread_mutex_unlock(&thread_flag_mutex); - return SLURM_ERROR; + return; } - _parse_gang_config(); slurm_attr_init(&thread_attr_msg); - if (pthread_create(&gang_thread_id, &thread_attr_msg, - _gang_thread, NULL)) + if (pthread_create(×licer_thread_id, &thread_attr_msg, + _timeslicer_thread, NULL)) fatal("pthread_create %m"); slurm_attr_destroy(&thread_attr_msg); thread_running = true; pthread_mutex_unlock(&thread_flag_mutex); +} + +extern int +gs_init() +{ + /* initialize global variables */ + debug3("sched/gang: entering gs_init"); + timeslicer_seconds = slurmctld_conf.sched_time_slice; + gs_fast_schedule = slurm_get_fast_schedule(); + gr_type = _get_gr_type(); + gs_resmap_size = _get_resmap_size(); + + /* load the physical resource count data */ + _load_phys_res_cnt(); + + pthread_mutex_lock(&data_mutex); + _build_parts(); + /* load any currently running jobs */ + _scan_slurm_job_list(); + pthread_mutex_unlock(&data_mutex); + + /* spawn the timeslicer thread */ + _spawn_timeslicer_thread(); + debug3("sched/gang: leaving gs_init"); return SLURM_SUCCESS; } -extern void -term_gang_thread(void) +extern int +gs_fini() { + /* terminate the timeslicer thread */ + debug3("sched/gang: entering gs_fini"); pthread_mutex_lock(&thread_flag_mutex); if (thread_running) { int i; thread_shutdown = true; for (i=0; i<4; i++) { - if (pthread_cancel(gang_thread_id)) { - gang_thread_id = 0; + if (pthread_cancel(timeslicer_thread_id)) { + timeslicer_thread_id = 0; break; } usleep(1000); } - if (gang_thread_id) - error("Cound not kill gang pthread"); + if (timeslicer_thread_id) + error("sched/gang: Cound not kill timeslicer pthread"); } pthread_mutex_unlock(&thread_flag_mutex); + + pthread_mutex_lock(&data_mutex); + _destroy_parts(); + xfree(gs_part_sorted); + gs_part_sorted = NULL; + xfree(gs_cpus_per_res); + xfree(gs_cpu_count_reps); + gs_num_groups = 0; + pthread_mutex_unlock(&data_mutex); + debug3("sched/gang: leaving gs_fini"); + + return SLURM_SUCCESS; } +extern int +gs_job_start(struct job_record *job_ptr) +{ + struct gs_part *p_ptr; + uint16_t job_state; + + debug3("sched/gang: entering gs_job_start"); + /* add job to partition */ + pthread_mutex_lock(&data_mutex); + p_ptr = _find_gs_part(job_ptr->partition); + if (p_ptr) { + job_state = _add_job_to_part(p_ptr, job_ptr->job_id, + job_ptr->node_bitmap); + /* if this job is running then check for preemption */ + if (job_state == GS_RESUME) + _update_all_active_rows(); + } + pthread_mutex_unlock(&data_mutex); + + if (!p_ptr) { + /* No partition was found for this job, so let it run + * uninterupted (what else can we do?) + */ + error("sched_gang: could not find partition %s for job %u", + job_ptr->partition, job_ptr->job_id); + } + debug3("sched/gang: leaving gs_job_start"); + return SLURM_SUCCESS; +} + +extern int +gs_job_scan(void) +{ + /* scan the master SLURM job list for any new + * jobs to add, or for any old jobs to remove + */ + debug3("sched/gang: entering gs_job_scan"); + pthread_mutex_lock(&data_mutex); + _scan_slurm_job_list(); + pthread_mutex_unlock(&data_mutex); + debug3("sched/gang: leaving gs_job_scan"); + + return SLURM_SUCCESS; +} + +extern int +gs_job_fini(struct job_record *job_ptr) +{ + struct gs_part *p_ptr; + + debug3("sched/gang: entering gs_job_fini"); + pthread_mutex_lock(&data_mutex); + p_ptr = _find_gs_part(job_ptr->partition); + if (!p_ptr) { + pthread_mutex_unlock(&data_mutex); + debug3("sched/gang: leaving gs_job_fini"); + return SLURM_SUCCESS; + } + + /* remove job from the partition */ + _remove_job_from_part(job_ptr->job_id, p_ptr); + /* this job may have preempted other jobs, so + * check by updating all active rows */ + _update_all_active_rows(); + pthread_mutex_unlock(&data_mutex); + debug3("sched/gang: leaving gs_job_fini"); + + return SLURM_SUCCESS; +} + +/* rebuild from scratch */ +/* A reconfigure can affect this plugin in these ways: + * - partitions can be added or removed + * - this affects the gs_part_list + * - nodes can be removed from a partition, or added to a partition + * - this affects the size of the active resmap + * + * If nodes have been added or removed, then the node_record_count + * will be different from gs_resmap_size. In this case, we need + * to resize the existing resmaps to prevent errors when comparing + * them. + * + * Here's the plan: + * 1. save a copy of the global structures, and then construct + * new ones. + * 2. load the new partition structures with existing jobs, + * confirming the job exists and resizing their resmaps + * (if necessary). + * 3. make sure all partitions are accounted for. If a partition + * was removed, make sure any jobs that were in the queue and + * that were suspended are resumed. Conversely, if a partition + * was added, check for existing jobs that may be contending + * for resources that we could begin timeslicing. + * 4. delete the old global structures and return. + */ +extern int +gs_reconfig() +{ + int i; + struct gs_part *p_ptr, *old_part_list, *newp_ptr; + struct job_record *job_ptr; + + debug3("sched/gang: entering gs_reconfig"); + pthread_mutex_lock(&data_mutex); + + old_part_list = gs_part_list; + gs_part_list = NULL; + _build_parts(); + + /* scan the old part list and add existing jobs to the new list */ + for (p_ptr = old_part_list; p_ptr; p_ptr = p_ptr->next) { + newp_ptr = _find_gs_part(p_ptr->part_name); + if (!newp_ptr) { + /* this partition was removed, so resume + * any suspended jobs and continue */ + for (i = 0; i < p_ptr->num_jobs; i++) { + if (p_ptr->job_list[i]->sig_state == GS_SUSPEND) { + _signal_job(p_ptr->job_list[i]->job_id, + GS_RESUME); + p_ptr->job_list[i]->sig_state = GS_RESUME; + } + } + continue; + } + if (p_ptr->num_jobs == 0) + /* no jobs to transfer */ + continue; + /* we need to transfer the jobs from p_ptr to new_ptr and + * adjust their resmaps (if necessary). then we need to create + * the active resmap and adjust the state of each job (if + * necessary). NOTE: there could be jobs that only overlap + * on nodes that are no longer in the partition, but we're + * not going to worry about those cases. + */ + /* add the jobs from p_ptr into new_ptr in their current order + * to preserve the state of timeslicing. + */ + for (i = 0; i < p_ptr->num_jobs; i++) { + job_ptr = find_job_record(p_ptr->job_list[i]->job_id); + if (job_ptr == NULL) { + /* job no longer exists in SLURM, so drop it */ + continue; + } + /* resume any job that is suspended */ + if (job_ptr->job_state == JOB_SUSPENDED) + _signal_job(job_ptr->job_id, GS_RESUME); + + /* transfer the job as long as it is still active */ + if (job_ptr->job_state == JOB_SUSPENDED || + job_ptr->job_state == JOB_RUNNING) { + _add_job_to_part(newp_ptr, job_ptr->job_id, + job_ptr->node_bitmap); + } + } + } + + /* confirm all jobs. Scan the master job_list and confirm that we + * are tracking all jobs */ + _scan_slurm_job_list(); + + /* Finally, destroy the old data */ + p_ptr = gs_part_list; + gs_part_list = old_part_list; + _destroy_parts(); + gs_part_list = p_ptr; + + pthread_mutex_unlock(&data_mutex); + debug3("sched/gang: leaving gs_reconfig"); + return SLURM_SUCCESS; +} + +/************************************ + * Timeslicer Functions + ***********************************/ + +/* Build the active row from the job_list. + * The job_list is assumed to be sorted */ +static void +_build_active_row(struct gs_part *p_ptr) +{ + int i; + + debug3("sched/gang: entering _build_active_row"); + p_ptr->jobs_active = 0; + if (p_ptr->num_jobs == 0) + return; + + /* apply all shadow jobs first */ + for (i = 0; i < p_ptr->num_shadows; i++) { + _add_job_to_active(p_ptr->shadow[i], p_ptr); + } + + /* attempt to add jobs from the job_list in the current order */ + for (i = 0; i < p_ptr->num_jobs; i++) { + if (_job_fits_in_active_row(p_ptr->job_list[i], p_ptr)) { + _add_job_to_active(p_ptr->job_list[i], p_ptr); + p_ptr->job_list[i]->row_state = GS_ACTIVE; + } + } + debug3("sched/gang: leaving _build_active_row"); +} + +/* _cycle_job_list + * + * This is the heart of the timeslicer. The algorithm works as follows: + * + * 1. Each new job is added to the end of the job list, so the earliest job + * is at the front of the list. + * 2. Any "shadow" jobs are first applied to the active_resmap. Then the + * active_resmap is filled out by starting with the first job in the list, + * and adding to it any job that doesn't conflict with the resources. + * 3. When the timeslice has passed, all jobs that were added to the active + * resmap are moved to the back of the list (preserving their order among + * each other). + * 4. Loop back to step 2, starting with the new "first job in the list". + */ +static void +_cycle_job_list(struct gs_part *p_ptr) +{ + int i, j; + struct gs_job *j_ptr; + + debug3("sched/gang: entering _cycle_job_list"); + _print_jobs(p_ptr); + /* re-prioritize the job_list and set all row_states to GS_NO_ACTIVE */ + for (i = 0; i < p_ptr->num_jobs; i++) { + while (p_ptr->job_list[i]->row_state == GS_ACTIVE) { + /* move this job to the back row and "de-activate" it */ + j_ptr = p_ptr->job_list[i]; + j_ptr->row_state = GS_NO_ACTIVE; + for (j = i; j+1 < p_ptr->num_jobs; j++) { + p_ptr->job_list[j] = p_ptr->job_list[j+1]; + } + p_ptr->job_list[j] = j_ptr; + } + if (p_ptr->job_list[i]->row_state == GS_FILLER) + p_ptr->job_list[i]->row_state = GS_NO_ACTIVE; + + } + debug3("sched/gang: _cycle_job_list reordered job list:"); + _print_jobs(p_ptr); + /* Rebuild the active row. */ + _build_active_row(p_ptr); + debug3("sched/gang: _cycle_job_list new active job list:"); + _print_jobs(p_ptr); + + /* Suspend running jobs that are GS_NO_ACTIVE */ + for (i = 0; i < p_ptr->num_jobs; i++) { + j_ptr = p_ptr->job_list[i]; + if (j_ptr->row_state == GS_NO_ACTIVE && + j_ptr->sig_state == GS_RESUME) { + debug3("sched/gang: _cycle_job_list: suspending job %u", + j_ptr->job_id); + _signal_job(j_ptr->job_id, GS_SUSPEND); + j_ptr->sig_state = GS_SUSPEND; + _clear_shadow(j_ptr); + } + } + + /* Resume suspended jobs that are GS_ACTIVE */ + for (i = 0; i < p_ptr->num_jobs; i++) { + j_ptr = p_ptr->job_list[i]; + if (j_ptr->row_state == GS_ACTIVE && + j_ptr->sig_state == GS_SUSPEND) { + debug3("sched/gang: _cycle_job_list: resuming job %u", + j_ptr->job_id); + _signal_job(j_ptr->job_id, GS_RESUME); + j_ptr->sig_state = GS_RESUME; + _cast_shadow(j_ptr, p_ptr->priority); + } + } + debug3("sched/gang: leaving _cycle_job_list"); +} + +/* The timeslicer thread */ +static void * +_timeslicer_thread() { + struct gs_part *p_ptr; + int i; + + debug3("sched/gang: starting timeslicer loop"); + while (!thread_shutdown) { + pthread_mutex_lock(&data_mutex); + + _sort_partitions(); + + /* scan each partition... */ + debug3("sched/gang: _timeslicer_thread: scanning partitions"); + for (i = 0; i < num_sorted_part; i++) { + p_ptr = gs_part_sorted[i]; + debug3("sched/gang: _timeslicer_thread: part %s: run %u total %u", + p_ptr->part_name, p_ptr->jobs_active, + p_ptr->num_jobs); + if (p_ptr->jobs_active < + p_ptr->num_jobs + p_ptr->num_shadows) + _cycle_job_list(p_ptr); + } + pthread_mutex_unlock(&data_mutex); + + /* sleep AND check for thread termination requests */ + pthread_testcancel(); + debug3("sched/gang: _timeslicer_thread: preparing to sleep"); + sleep(timeslicer_seconds); + debug3("sched/gang: _timeslicer_thread: waking up"); + pthread_testcancel(); + } + pthread_exit((void *) 0); + return NULL; +} diff --git a/src/plugins/sched/gang/gang.h b/src/plugins/sched/gang/gang.h index a08d7b234..520ea513a 100644 --- a/src/plugins/sched/gang/gang.h +++ b/src/plugins/sched/gang/gang.h @@ -1,10 +1,9 @@ /*****************************************************************************\ * gang.h - Gang scheduler definitions ***************************************************************************** - * Copyright (C) 2006 The Regents of the University of California. - * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * Copyright (C) 2008 Hewlett-Packard Development Company, L.P. + * Written by Chris Holmes + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -13,18 +12,18 @@ * the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. - * - * In addition, as a special exception, the copyright holders give permission - * to link the code of portions of this program with the OpenSSL library under - * certain conditions as described in each individual source file, and - * distribute linked combinations including the two. You must obey the GNU - * General Public License in all respects for all of the code used other than - * OpenSSL. If you modify file(s) with this exception, you may extend this - * exception to your version of the file(s), but you are not obligated to do + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do * so. If you do not wish to do so, delete this exception statement from your - * version. If you delete this exception statement from all source files in + * version. If you delete this exception statement from all source files in * the program, then also delete it here. - * + * * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more @@ -43,9 +42,14 @@ #include "src/common/plugin.h" #include "src/common/log.h" +#include "src/slurmctld/job_scheduler.h" #include "src/slurmctld/slurmctld.h" -extern int spawn_gang_thread(void); -extern void term_gang_thread(void); +extern int gs_init(void); +extern int gs_fini(void); +extern int gs_job_start(struct job_record *job_ptr); +extern int gs_job_scan(void); +extern int gs_job_fini(struct job_record *job_ptr); +extern int gs_reconfig(void); #endif diff --git a/src/plugins/sched/gang/sched_gang.c b/src/plugins/sched/gang/sched_gang.c index 5f29755f3..52095c6a8 100644 --- a/src/plugins/sched/gang/sched_gang.c +++ b/src/plugins/sched/gang/sched_gang.c @@ -1,10 +1,9 @@ /*****************************************************************************\ * sched_gang.c - Gang scheduler plugin functions. ***************************************************************************** - * Copyright (C) 2006 The Regents of the University of California. - * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * Copyright (C) 2008 Hewlett-Packard Development Company, L.P. + * Written by Chris Holmes + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -13,18 +12,18 @@ * the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. - * - * In addition, as a special exception, the copyright holders give permission - * to link the code of portions of this program with the OpenSSL library under - * certain conditions as described in each individual source file, and - * distribute linked combinations including the two. You must obey the GNU - * General Public License in all respects for all of the code used other than - * OpenSSL. If you modify file(s) with this exception, you may extend this - * exception to your version of the file(s), but you are not obligated to do + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do * so. If you do not wish to do so, delete this exception statement from your - * version. If you delete this exception statement from all source files in + * version. If you delete this exception statement from all source files in * the program, then also delete it here. - * + * * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more @@ -39,7 +38,7 @@ const char plugin_name[] = "Gang Scheduler plugin"; const char plugin_type[] = "sched/gang"; -const uint32_t plugin_version = 100; +const uint32_t plugin_version = 101; /* A plugin-global errno. */ static int plugin_errno = SLURM_SUCCESS; @@ -50,7 +49,7 @@ static int plugin_errno = SLURM_SUCCESS; extern int init( void ) { verbose( "gang scheduler plugin loaded" ); - return spawn_gang_thread(); + return gs_init(); } /**************************************************************************/ @@ -58,7 +57,7 @@ extern int init( void ) /**************************************************************************/ extern void fini( void ) { - term_gang_thread(); + gs_fini(); } /**************************************************************************/ @@ -66,7 +65,7 @@ extern void fini( void ) /**************************************************************************/ int slurm_sched_plugin_reconfig( void ) { - return SLURM_SUCCESS; + return gs_reconfig(); } /***************************************************************************/ @@ -74,9 +73,34 @@ int slurm_sched_plugin_reconfig( void ) /***************************************************************************/ extern int slurm_sched_plugin_schedule( void ) { + /* synchronize job listings */ + debug3("sched/gang: slurm_sched_schedule called"); + /* return gs_job_scan();*/ return SLURM_SUCCESS; } +/***************************************************************************/ +/* TAG( slurm_sched_plugin_newalloc ) */ +/***************************************************************************/ +extern int slurm_sched_plugin_newalloc( struct job_record *job_ptr ) +{ + if (!job_ptr) + return SLURM_ERROR; + debug3("sched/gang: slurm_sched_newalloc called"); + return gs_job_start(job_ptr); +} + +/***************************************************************************/ +/* TAG( slurm_sched_plugin_freealloc ) */ +/***************************************************************************/ +extern int slurm_sched_plugin_freealloc( struct job_record *job_ptr ) +{ + if (!job_ptr) + return SLURM_ERROR; + debug3("sched/gang: slurm_sched_freealloc called"); + return gs_job_fini(job_ptr); +} + /**************************************************************************/ /* TAG( slurm_sched_plugin_initial_priority ) */ @@ -85,6 +109,8 @@ extern uint32_t slurm_sched_plugin_initial_priority( uint32_t last_prio, struct job_record *job_ptr ) { + /* ignored for timeslicing, but will be used to support priority */ + if (last_prio >= 2) return (last_prio - 1); else @@ -96,7 +122,8 @@ slurm_sched_plugin_initial_priority( uint32_t last_prio, /**************************************************************************/ void slurm_sched_plugin_job_is_pending( void ) { - /* No action required */ + /* synchronize job listings? Here? */ + /*return gs_job_scan();*/ } /**************************************************************************/ @@ -104,7 +131,7 @@ void slurm_sched_plugin_job_is_pending( void ) /**************************************************************************/ void slurm_sched_plugin_partition_change( void ) { - /* No action required */ + gs_reconfig(); } /**************************************************************************/ @@ -123,3 +150,18 @@ char *slurm_sched_strerror( int errnum ) return NULL; } +/**************************************************************************/ +/* TAG( slurm_sched_plugin_requeue ) */ +/**************************************************************************/ +void slurm_sched_plugin_requeue( struct job_record *job_ptr, char *reason ) +{ + /* Empty. */ +} + +/**************************************************************************/ +/* TAG( slurm_sched_get_conf ) */ +/**************************************************************************/ +char *slurm_sched_get_conf( void ) +{ + return NULL; +} diff --git a/src/plugins/sched/hold/Makefile.in b/src/plugins/sched/hold/Makefile.in index 3100ebc06..4f1130aa2 100644 --- a/src/plugins/sched/hold/Makefile.in +++ b/src/plugins/sched/hold/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -44,6 +44,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -79,7 +81,7 @@ sched_hold_la_OBJECTS = $(am_sched_hold_la_OBJECTS) sched_hold_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(sched_hold_la_LDFLAGS) $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -119,6 +121,7 @@ CXXFLAGS = -fexceptions CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -132,10 +135,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -155,7 +161,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -166,6 +175,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -181,6 +192,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -196,6 +208,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -297,8 +310,8 @@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ else :; fi; \ done @@ -306,8 +319,8 @@ uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ done clean-pkglibLTLIBRARIES: @@ -361,8 +374,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -374,8 +387,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -385,13 +398,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/sched/hold/hold_wrapper.c b/src/plugins/sched/hold/hold_wrapper.c index c488374c8..7e6dc6405 100644 --- a/src/plugins/sched/hold/hold_wrapper.c +++ b/src/plugins/sched/hold/hold_wrapper.c @@ -5,7 +5,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -88,6 +88,24 @@ slurm_sched_plugin_schedule( void ) return SLURM_SUCCESS; } +/***************************************************************************/ +/* TAG( slurm_sched_plugin_newalloc ) */ +/***************************************************************************/ +int +slurm_sched_plugin_newalloc( struct job_record *job_ptr ) +{ + return SLURM_SUCCESS; +} + +/***************************************************************************/ +/* TAG( slurm_sched_plugin_freealloc ) */ +/***************************************************************************/ +int +slurm_sched_plugin_freealloc( struct job_record *job_ptr ) +{ + return SLURM_SUCCESS; +} + /**************************************************************************/ /* TAG( slurm_sched_plugin_initial_priority ) */ @@ -139,3 +157,18 @@ char *slurm_sched_strerror( int errnum ) return NULL; } +/**************************************************************************/ +/* TAG( slurm_sched_plugin_requeue ) */ +/**************************************************************************/ +void slurm_sched_plugin_requeue( struct job_record *job_ptr, char *reason ) +{ + /* Empty. */ +} + +/**************************************************************************/ +/* TAG( slurm_sched_get_conf ) */ +/**************************************************************************/ +char *slurm_sched_get_conf( void ) +{ + return NULL; +} diff --git a/src/plugins/sched/wiki/Makefile.in b/src/plugins/sched/wiki/Makefile.in index 17ec4ae84..7efbdab57 100644 --- a/src/plugins/sched/wiki/Makefile.in +++ b/src/plugins/sched/wiki/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -44,6 +44,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -81,7 +83,7 @@ sched_wiki_la_OBJECTS = $(am_sched_wiki_la_OBJECTS) sched_wiki_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(sched_wiki_la_LDFLAGS) $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -121,6 +123,7 @@ CXXFLAGS = -fexceptions CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -134,10 +137,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -157,7 +163,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -168,6 +177,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -183,6 +194,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -198,6 +210,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -313,8 +326,8 @@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ else :; fi; \ done @@ -322,8 +335,8 @@ uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ done clean-pkglibLTLIBRARIES: @@ -387,8 +400,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -400,8 +413,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -411,13 +424,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/sched/wiki/cancel_job.c b/src/plugins/sched/wiki/cancel_job.c index ec392bbdd..a74921f06 100644 --- a/src/plugins/sched/wiki/cancel_job.c +++ b/src/plugins/sched/wiki/cancel_job.c @@ -4,7 +4,7 @@ * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/sched/wiki/get_jobs.c b/src/plugins/sched/wiki/get_jobs.c index 687bd65cd..e638098a1 100644 --- a/src/plugins/sched/wiki/get_jobs.c +++ b/src/plugins/sched/wiki/get_jobs.c @@ -4,7 +4,7 @@ * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -42,6 +42,7 @@ #include "src/common/hostlist.h" #include "src/common/list.h" #include "src/common/uid.h" +#include "src/slurmctld/licenses.h" #include "src/slurmctld/locks.h" #include "src/slurmctld/slurmctld.h" @@ -125,8 +126,8 @@ extern int get_jobs(char *cmd_ptr, int *err_code, char **err_msg) /* report all jobs */ buf = _dump_all_jobs(&job_rec_cnt, update_time); } else { - struct job_record *job_ptr; - char *job_name, *tmp2_char; + struct job_record *job_ptr = NULL; + char *job_name = NULL, *tmp2_char = NULL; uint32_t job_id; job_name = strtok_r(tmp_char, ":", &tmp2_char); @@ -148,7 +149,10 @@ extern int get_jobs(char *cmd_ptr, int *err_code, char **err_msg) if (buf) buf_size = strlen(buf); tmp_buf = xmalloc(buf_size + 32); - sprintf(tmp_buf, "SC=0 ARG=%d#%s", job_rec_cnt, buf); + if (job_rec_cnt) + sprintf(tmp_buf, "SC=0 ARG=%d#%s", job_rec_cnt, buf); + else + sprintf(tmp_buf, "SC=0 ARG=0#"); xfree(buf); *err_code = 0; *err_msg = tmp_buf; @@ -241,18 +245,18 @@ static char * _dump_job(struct job_record *job_ptr, time_t update_time) } snprintf(tmp, sizeof(tmp), - "UPDATETIME=%u;WCLIMIT=%u;", + "UPDATETIME=%u;WCLIMIT=%u;TASKS=%u;", (uint32_t) job_ptr->time_last_active, - (uint32_t) _get_job_time_limit(job_ptr)); + (uint32_t) _get_job_time_limit(job_ptr), + _get_job_tasks(job_ptr)); xstrcat(buf, tmp); - /* Don't report actual tasks or nodes allocated since - * this can impact requeue on heterogenous clusters */ - snprintf(tmp, sizeof(tmp), - "TASKS=%u;NODES=%u;", - _get_job_tasks(job_ptr), - _get_job_min_nodes(job_ptr)); - xstrcat(buf, tmp); + if (!IS_JOB_FINISHED(job_ptr)) { + snprintf(tmp, sizeof(tmp), + "NODES=%u;", + _get_job_min_nodes(job_ptr)); + xstrcat(buf, tmp); + } snprintf(tmp, sizeof(tmp), "DPROCS=%u;", @@ -344,7 +348,8 @@ static uint32_t _get_job_min_disk(struct job_record *job_ptr) static uint32_t _get_job_min_nodes(struct job_record *job_ptr) { if (job_ptr->job_state > JOB_PENDING) { - /* return actual count of allocated nodes */ + /* return actual count of currently allocated nodes. + * NOTE: gets decremented to zero while job is completing */ return job_ptr->node_cnt; } @@ -372,15 +377,21 @@ static uint32_t _get_job_submit_time(struct job_record *job_ptr) static uint32_t _get_job_tasks(struct job_record *job_ptr) { - uint32_t task_cnt = 1; - - if (job_ptr->num_procs) - task_cnt = job_ptr->num_procs; + uint32_t task_cnt; - if (job_ptr->details) { - task_cnt = MAX(task_cnt, - (_get_job_min_nodes(job_ptr) * - job_ptr->details->ntasks_per_node)); + if (job_ptr->job_state > JOB_PENDING) { + task_cnt = job_ptr->total_procs; + } else { + if (job_ptr->num_procs) + task_cnt = job_ptr->num_procs; + else + task_cnt = 1; + if (job_ptr->details) { + task_cnt = MAX(task_cnt, + (_get_job_min_nodes(job_ptr) * + job_ptr->details-> + ntasks_per_node)); + } } return task_cnt / _get_job_cpus_per_task(job_ptr); @@ -414,12 +425,12 @@ static char * _get_job_state(struct job_record *job_ptr) return "Running"; } - if (base_state == JOB_PENDING) - return "Idle"; if (base_state == JOB_RUNNING) return "Running"; if (base_state == JOB_SUSPENDED) return "Suspended"; + if (base_state == JOB_PENDING) + return "Idle"; if (base_state == JOB_COMPLETE) return "Completed"; diff --git a/src/plugins/sched/wiki/get_nodes.c b/src/plugins/sched/wiki/get_nodes.c index 8569e9580..7c51c106d 100644 --- a/src/plugins/sched/wiki/get_nodes.c +++ b/src/plugins/sched/wiki/get_nodes.c @@ -4,7 +4,7 @@ * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -50,10 +50,16 @@ static char * _get_node_state(struct node_record *node_ptr); * RET 0 on success, -1 on failure * * Response format - * ARG=<cnt>#<NODEID>:STATE=<state>; - * FEATURE=<feature:feature>; - * CMEMORY=<mb>;CDISK=<mb>;CPROC=<cpus>; - * [#<NODEID>:...]; + * Response format + * ARG=<cnt>#<NODEID>: + * STATE=<state>; Moab equivalent node state + * [ARCH=<architecture>;] Computer architecture + * [OS=<operating_system>;] Operating system + * CMEMORY=<MB>; MB of memory on node + * CDISK=<MB>; MB of disk space on node + * CPROCS=<cpus>; CPU count on node + * [FEATURE=<feature>;] Features associated with node, if any + * [#<NODEID>:...]; */ extern int get_nodes(char *cmd_ptr, int *err_code, char **err_msg) { @@ -84,12 +90,17 @@ extern int get_nodes(char *cmd_ptr, int *err_code, char **err_msg) /* report all nodes */ buf = _dump_all_nodes(&node_rec_cnt, update_time); } else { - struct node_record *node_ptr; - char *node_name, *tmp2_char; + struct node_record *node_ptr = NULL; + char *node_name = NULL, *tmp2_char = NULL; node_name = strtok_r(tmp_char, ":", &tmp2_char); while (node_name) { node_ptr = find_node_record(node_name); + if (node_ptr == NULL) { + error("sched/wiki2: bad hostname %s", + node_name); + continue; + } tmp_buf = _dump_node(node_ptr, update_time); if (node_rec_cnt > 0) xstrcat(buf, "#"); @@ -105,7 +116,10 @@ extern int get_nodes(char *cmd_ptr, int *err_code, char **err_msg) if (buf) buf_size = strlen(buf); tmp_buf = xmalloc(buf_size + 32); - sprintf(tmp_buf, "SC=0 ARG=%d#%s", node_rec_cnt, buf); + if (node_rec_cnt) + sprintf(tmp_buf, "SC=0 ARG=%d#%s", node_rec_cnt, buf); + else + sprintf(tmp_buf, "SC=0 ARG=0#"); xfree(buf); *err_code = 0; *err_msg = tmp_buf; @@ -144,7 +158,30 @@ static char * _dump_node(struct node_record *node_ptr, time_t update_time) node_ptr->name, _get_node_state(node_ptr)); xstrcat(buf, tmp); - + + if (node_ptr->arch) { + snprintf(tmp, sizeof(tmp), "ARCH=%s;", node_ptr->arch); + xstrcat(buf, tmp); + } + + if (node_ptr->os) { + snprintf(tmp, sizeof(tmp), "OS=%s;", node_ptr->os); + xstrcat(buf, tmp); + } + + if (node_ptr->config_ptr + && node_ptr->config_ptr->feature) { + snprintf(tmp, sizeof(tmp), "FEATURES=%s;", + node_ptr->config_ptr->feature); + /* comma separated to colon */ + for (i=0; (tmp[i] != '\0'); i++) { + if ((tmp[i] == ',') + || (tmp[i] == '|')) + tmp[i] = ':'; + } + xstrcat(buf, tmp); + } + if (update_time > 0) return buf; @@ -165,19 +202,6 @@ static char * _dump_node(struct node_record *node_ptr, time_t update_time) } xstrcat(buf, tmp); - if (node_ptr->config_ptr - && node_ptr->config_ptr->feature) { - snprintf(tmp, sizeof(tmp), "FEATURES=%s;", - node_ptr->config_ptr->feature); - /* comma separated to colon */ - for (i=0; (tmp[i] != '\0'); i++) { - if ((tmp[i] == ',') - || (tmp[i] == '|')) - tmp[i] = ':'; - } - xstrcat(buf, tmp); - } - return buf; } @@ -186,7 +210,8 @@ static char * _get_node_state(struct node_record *node_ptr) uint16_t state = node_ptr->node_state; uint16_t base_state = state & NODE_STATE_BASE; - if (state & NODE_STATE_DRAIN) + if ((state & NODE_STATE_DRAIN) + || (state & NODE_STATE_FAIL)) return "Draining"; if (state & NODE_STATE_COMPLETING) return "Busy"; diff --git a/src/plugins/sched/wiki/hostlist.c b/src/plugins/sched/wiki/hostlist.c index c31dc6bf8..1c3214891 100644 --- a/src/plugins/sched/wiki/hostlist.c +++ b/src/plugins/sched/wiki/hostlist.c @@ -4,7 +4,7 @@ * Copyright (C) 2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -74,7 +74,8 @@ static char * _task_list_exp(struct job_record *job_ptr); */ extern char * moab2slurm_task_list(char *moab_tasklist, int *task_cnt) { - char *slurm_tasklist, *host, *tmp1, *tmp2, *tok, *tok_p; + char *slurm_tasklist = NULL, *host = NULL, *tmp1 = NULL, + *tmp2 = NULL, *tok = NULL, *tok_p = NULL; int i, reps; hostlist_t hl; static uint32_t cr_test = 0, cr_enabled = 0; diff --git a/src/plugins/sched/wiki/job_modify.c b/src/plugins/sched/wiki/job_modify.c index e3a626549..4ac123c9c 100644 --- a/src/plugins/sched/wiki/job_modify.c +++ b/src/plugins/sched/wiki/job_modify.c @@ -4,7 +4,7 @@ * Copyright (C) 2006-2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -37,10 +37,11 @@ #include "./msg.h" #include <strings.h> +#include "src/slurmctld/job_scheduler.h" #include "src/slurmctld/locks.h" #include "src/slurmctld/slurmctld.h" -static void _null_term(char *str) +extern void null_term(char *str) { char *tmp_ptr; for (tmp_ptr=str; ; tmp_ptr++) { @@ -53,24 +54,8 @@ static void _null_term(char *str) } } -/* return -1 on error */ -static int32_t _get_depend_id(char *str) -{ - /* stand-alone job_id */ - if (isdigit(str[0])) - return (int32_t) atol(str); - - if (strncasecmp(str, "afterany:", 9) != 0) /* invalid spec */ - return (int32_t) -1; - - str += 9; - if (!isdigit(str[0])) - return (int32_t) -1; - return (int32_t) atol(str); -} - static int _job_modify(uint32_t jobid, char *bank_ptr, - int32_t depend_id, char *new_hostlist, + char *depend_ptr, char *new_hostlist, uint32_t new_node_cnt, char *part_name_ptr, uint32_t new_time_limit) { @@ -86,9 +71,16 @@ static int _job_modify(uint32_t jobid, char *bank_ptr, return ESLURM_DISABLED; } - if (depend_id != -1) { - info("wiki: changing job dependency to %d", depend_id); - job_ptr->dependency = depend_id; + if (depend_ptr) { + int rc = update_job_dependency(job_ptr, depend_ptr); + if (rc == SLURM_SUCCESS) { + info("wiki: changed job %u dependency to %s", + jobid, depend_ptr); + } else { + error("wiki: changing job %u dependency to %s", + jobid, depend_ptr); + return EINVAL; + } } if (new_time_limit) { @@ -174,11 +166,11 @@ host_fini: if (rc) { } info("wiki: change job %u partition %s", jobid, part_name_ptr); - strncpy(job_ptr->partition, part_name_ptr, MAX_SLURM_NAME); + xfree(job_ptr->partition); + job_ptr->partition = xstrdup(part_name_ptr); job_ptr->part_ptr = part_ptr; last_job_update = time(NULL); } - if (new_node_cnt) { if (IS_JOB_PENDING(job_ptr) && job_ptr->details) { job_ptr->details->min_nodes = new_node_cnt; @@ -207,7 +199,6 @@ extern int job_modify_wiki(char *cmd_ptr, int *err_code, char **err_msg) char *arg_ptr, *bank_ptr, *depend_ptr, *nodes_ptr; char *host_ptr, *part_ptr, *time_ptr, *tmp_char; int slurm_rc; - int depend_id = -1; uint32_t jobid, new_node_cnt = 0, new_time_limit = 0; static char reply_msg[128]; /* Locks: write job, read node and partition info */ @@ -241,24 +232,17 @@ extern int job_modify_wiki(char *cmd_ptr, int *err_code, char **err_msg) if (bank_ptr) { bank_ptr[4] = ':'; bank_ptr += 5; - _null_term(bank_ptr); + null_term(bank_ptr); } if (depend_ptr) { depend_ptr[6] = ':'; depend_ptr += 7; - depend_id = _get_depend_id(depend_ptr); - if (depend_id == -1) { - *err_code = -300; - *err_msg = "MODIFYJOB has invalid DEPEND specificiation"; - error("wiki: MODIFYJOB has invalid DEPEND spec: %s", - depend_ptr); - return -1; - } + null_term(depend_ptr); } if (host_ptr) { host_ptr[8] = ':'; host_ptr += 9; - _null_term(bank_ptr); + null_term(bank_ptr); } if (nodes_ptr) { nodes_ptr[5] = ':'; @@ -268,7 +252,7 @@ extern int job_modify_wiki(char *cmd_ptr, int *err_code, char **err_msg) if (part_ptr) { part_ptr[9] = ':'; part_ptr += 10; - _null_term(part_ptr); + null_term(part_ptr); } if (time_ptr) { time_ptr[9] = ':'; @@ -286,7 +270,7 @@ extern int job_modify_wiki(char *cmd_ptr, int *err_code, char **err_msg) } lock_slurmctld(job_write_lock); - slurm_rc = _job_modify(jobid, bank_ptr, depend_id, host_ptr, + slurm_rc = _job_modify(jobid, bank_ptr, depend_ptr, host_ptr, new_node_cnt, part_ptr, new_time_limit); unlock_slurmctld(job_write_lock); if (slurm_rc != SLURM_SUCCESS) { diff --git a/src/plugins/sched/wiki/msg.c b/src/plugins/sched/wiki/msg.c index 60df0858f..8f3ec8252 100644 --- a/src/plugins/sched/wiki/msg.c +++ b/src/plugins/sched/wiki/msg.c @@ -4,7 +4,7 @@ * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -38,6 +38,7 @@ #include "./crypto.h" #include "./msg.h" #include "src/common/uid.h" +#include "src/slurmctld/locks.h" #define _DEBUG 0 @@ -139,12 +140,20 @@ static void *_msg_thread(void *no_data) slurm_fd sock_fd = -1, new_fd; slurm_addr cli_addr; char *msg; - slurm_ctl_conf_t *conf = slurm_conf_lock(); + slurm_ctl_conf_t *conf; int i; + /* Locks: Write configuration, job, node, and partition */ + slurmctld_lock_t config_write_lock = { + WRITE_LOCK, WRITE_LOCK, WRITE_LOCK, WRITE_LOCK }; + conf = slurm_conf_lock(); sched_port = conf->schedport; slurm_conf_unlock(); + /* Wait until configuration is completely loaded */ + lock_slurmctld(config_write_lock); + unlock_slurmctld(config_write_lock); + /* If SchedulerPort is already taken, keep trying to open it * once per minute. Slurmctld will continue to function * during this interval even if nothing can be scheduled. */ @@ -178,8 +187,10 @@ static void *_msg_thread(void *no_data) err_code = 0; err_msg = ""; msg = _recv_msg(new_fd); - _proc_msg(new_fd, msg); - xfree(msg); + if (msg) { + _proc_msg(new_fd, msg); + xfree(msg); + } slurm_close_accepted_conn(new_fd); } if (sock_fd > 0) @@ -202,7 +213,7 @@ static char * _get_wiki_conf_path(void) val = default_slurm_config_file; /* Replace file name on end of path */ - i = strlen(val) + 1; + i = strlen(val) + 10; path = xmalloc(i); strcpy(path, val); val = strrchr(path, (int)'/'); @@ -287,7 +298,7 @@ extern int parse_wiki_config(void) s_p_get_uint16(&job_aggregation_time, "JobAggregationTime", tbl); if (s_p_get_string(&exclude_partitions, "ExcludePartitions", tbl)) { - char *tok, *tok_p; + char *tok = NULL, *tok_p = NULL; tok = strtok_r(exclude_partitions, ",", &tok_p); i = 0; while (tok) { @@ -344,10 +355,55 @@ extern int parse_wiki_config(void) info("JobAggregationTime = %u sec", job_aggregation_time); info("JobPriority = %s", init_prio_mode ? "run" : "hold"); info("KillWait = %u sec", kill_wait); + for (i=0; i<EXC_PART_CNT; i++) { + if (!exclude_part_ptr[i]) + continue; + info("ExcludePartitions = %s", exclude_part_ptr[i]->name); + } + for (i=0; i<HIDE_PART_CNT; i++) { + if (!hide_part_ptr[i]) + continue; + info("HidePartitionJobs = %s", hide_ptr_ptr[i]->name); + } #endif return SLURM_SUCCESS; } +extern char * get_wiki_conf(void) +{ + int i, first; + char buf[20], *conf = NULL; + + snprintf(buf, sizeof(buf), "HostFormat=%u", use_host_exp); + xstrcat(conf, buf); + + first = 1; + for (i=0; i<EXC_PART_CNT; i++) { + if (!exclude_part_ptr[i]) + continue; + if (first) { + xstrcat(conf, ";ExcludePartitions="); + first = 0; + } else + xstrcat(conf, ","); + xstrcat(conf, exclude_part_ptr[i]->name); + } + + first = 1; + for (i=0; i<HIDE_PART_CNT; i++) { + if (!hide_part_ptr[i]) + continue; + if (first) { + xstrcat(conf, ";HidePartitionJobs="); + first = 0; + } else + xstrcat(conf, ","); + xstrcat(conf, hide_part_ptr[i]->name); + } + + return conf; +} + static size_t _read_bytes(int fd, char *buf, const size_t size) { size_t bytes_remaining, bytes_read; diff --git a/src/plugins/sched/wiki/msg.h b/src/plugins/sched/wiki/msg.h index 5015e26cb..7964a09a7 100644 --- a/src/plugins/sched/wiki/msg.h +++ b/src/plugins/sched/wiki/msg.h @@ -4,7 +4,7 @@ * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -99,10 +99,16 @@ extern uint16_t job_aggregation_time; extern uint16_t kill_wait; extern uint16_t use_host_exp; +extern char * bitmap2wiki_node_name(bitstr_t *bitmap); extern int event_notify(char *msg); extern int spawn_msg_thread(void); extern void term_msg_thread(void); -extern char * bitmap2wiki_node_name(bitstr_t *bitmap); +extern char * get_wiki_conf(void); + +/* + * Given a string, replace the first space found with '\0' + */ +extern void null_term(char *str); /* Functions called from within msg.c (rather than creating a bunch * more header files with one function definition each */ diff --git a/src/plugins/sched/wiki/resume_job.c b/src/plugins/sched/wiki/resume_job.c index 319219244..b172e04d2 100644 --- a/src/plugins/sched/wiki/resume_job.c +++ b/src/plugins/sched/wiki/resume_job.c @@ -4,7 +4,7 @@ * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/sched/wiki/sched_wiki.c b/src/plugins/sched/wiki/sched_wiki.c index 64606a366..4fdb4dd7d 100644 --- a/src/plugins/sched/wiki/sched_wiki.c +++ b/src/plugins/sched/wiki/sched_wiki.c @@ -4,7 +4,7 @@ * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -76,6 +76,22 @@ extern int slurm_sched_plugin_schedule( void ) return SLURM_SUCCESS; } +/***************************************************************************/ +/* TAG( slurm_sched_plugin_newalloc ) */ +/***************************************************************************/ +extern int slurm_sched_plugin_newalloc( struct job_record *job_ptr ) +{ + return SLURM_SUCCESS; +} + +/***************************************************************************/ +/* TAG( slurm_sched_plugin_freealloc ) */ +/***************************************************************************/ +extern int slurm_sched_plugin_freealloc( struct job_record *job_ptr ) +{ + return SLURM_SUCCESS; +} + /**************************************************************************/ /* TAG( slurm_sched_plugin_initial_priority ) */ @@ -150,3 +166,18 @@ char *slurm_sched_strerror( int errnum ) return NULL; } +/**************************************************************************/ +/* TAG( slurm_sched_plugin_requeue ) */ +/**************************************************************************/ +void slurm_sched_plugin_requeue( struct job_record *job_ptr, char *reason ) +{ + /* Empty. */ +} + +/**************************************************************************/ +/* TAG( slurm_sched_get_conf ) */ +/**************************************************************************/ +char *slurm_sched_get_conf( void ) +{ + return get_wiki_conf(); +} diff --git a/src/plugins/sched/wiki/start_job.c b/src/plugins/sched/wiki/start_job.c index 180e79742..22007c628 100644 --- a/src/plugins/sched/wiki/start_job.c +++ b/src/plugins/sched/wiki/start_job.c @@ -4,7 +4,7 @@ * Copyright (C) 2006-2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -39,6 +39,7 @@ #include "src/common/node_select.h" #include "src/common/slurm_protocol_defs.h" #include "src/common/xstring.h" +#include "src/slurmctld/job_scheduler.h" #include "src/slurmctld/locks.h" #include "src/slurmctld/slurmctld.h" #include "src/slurmctld/state_save.h" @@ -46,7 +47,9 @@ static int _start_job(uint32_t jobid, int task_cnt, char *hostlist, char *tasklist, int *err_code, char **err_msg); -/* RET 0 on success, -1 on failure */ +/* Start a job: + * CMD=STARTJOB ARG=<jobid> TASKLIST=<node_list> + * RET 0 on success, -1 on failure */ extern int start_job(char *cmd_ptr, int *err_code, char **err_msg) { char *arg_ptr, *task_ptr, *tasklist, *tmp_char; @@ -79,6 +82,7 @@ extern int start_job(char *cmd_ptr, int *err_code, char **err_msg) return -1; } task_ptr += 9; /* skip over "TASKLIST=" */ + null_term(task_ptr); tasklist = moab2slurm_task_list(task_ptr, &task_cnt); if (tasklist) hl = hostlist_create(tasklist); diff --git a/src/plugins/sched/wiki/suspend_job.c b/src/plugins/sched/wiki/suspend_job.c index 058d2dce3..383b9539b 100644 --- a/src/plugins/sched/wiki/suspend_job.c +++ b/src/plugins/sched/wiki/suspend_job.c @@ -4,7 +4,7 @@ * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/sched/wiki2/Makefile.in b/src/plugins/sched/wiki2/Makefile.in index 62fccc350..fdd07feb3 100644 --- a/src/plugins/sched/wiki2/Makefile.in +++ b/src/plugins/sched/wiki2/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -44,6 +44,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -84,7 +86,7 @@ sched_wiki2_la_OBJECTS = $(am_sched_wiki2_la_OBJECTS) sched_wiki2_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(sched_wiki2_la_LDFLAGS) $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -124,6 +126,7 @@ CXXFLAGS = -fexceptions CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -137,10 +140,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -160,7 +166,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -171,6 +180,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -186,6 +197,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -201,6 +213,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -324,8 +337,8 @@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ else :; fi; \ done @@ -333,8 +346,8 @@ uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ done clean-pkglibLTLIBRARIES: @@ -406,8 +419,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -419,8 +432,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -430,13 +443,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/sched/wiki2/cancel_job.c b/src/plugins/sched/wiki2/cancel_job.c index 294a67c12..8a91bf0a0 100644 --- a/src/plugins/sched/wiki2/cancel_job.c +++ b/src/plugins/sched/wiki2/cancel_job.c @@ -4,7 +4,7 @@ * Copyright (C) 2006-2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/sched/wiki2/event.c b/src/plugins/sched/wiki2/event.c index f3795d2d5..5c8405071 100644 --- a/src/plugins/sched/wiki2/event.c +++ b/src/plugins/sched/wiki2/event.c @@ -4,7 +4,7 @@ * Copyright (C) 2006-2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/sched/wiki2/get_jobs.c b/src/plugins/sched/wiki2/get_jobs.c index 59b3720bd..b95a9fcc9 100644 --- a/src/plugins/sched/wiki2/get_jobs.c +++ b/src/plugins/sched/wiki2/get_jobs.c @@ -4,7 +4,7 @@ * Copyright (C) 2006-2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -43,6 +43,7 @@ #include "src/common/list.h" #include "src/common/node_select.h" #include "src/common/uid.h" +#include "src/slurmctld/licenses.h" #include "src/slurmctld/locks.h" static char * _dump_all_jobs(int *job_cnt, time_t update_time); @@ -65,10 +66,23 @@ static int _hidden_job(struct job_record *job_ptr); static uint32_t cr_enabled = 0, cr_test = 0; +/* We only keep a few reject message to limit the overhead */ +#define REJECT_MSG_MAX 16 +#define REJECT_MSG_LEN 128 +static int reject_msg_cnt = 0; +typedef struct reject_msg { + uint32_t job_id; + char reason[REJECT_MSG_LEN]; +} reject_msg_t; +reject_msg_t reject_msgs[REJECT_MSG_MAX]; + /* * get_jobs - get information on specific job(s) changed since some time - * cmd_ptr IN - CMD=GETJOBS ARG=[<UPDATETIME>:<JOBID>[:<JOBID>]...] + * cmd_ptr IN - CMD=GETJOBS ARG=[<UPDATETIME>:<JOBID>[:<JOBID>]...] * [<UPDATETIME>:ALL] + * err_code OUT - 0 or an error code + * err_msg OUT - response message + * NOTE: xfree() err_msg if err_code is zero * RET 0 on success, -1 on failure * * Response format @@ -100,7 +114,6 @@ static uint32_t cr_enabled = 0, cr_test = 0; * [#<JOBID>;...]; additional jobs, if any * */ -/* RET 0 on success, -1 on failure */ extern int get_jobs(char *cmd_ptr, int *err_code, char **err_msg) { char *arg_ptr = NULL, *tmp_char = NULL, *tmp_buf = NULL, *buf = NULL; @@ -165,7 +178,10 @@ extern int get_jobs(char *cmd_ptr, int *err_code, char **err_msg) if (buf) buf_size = strlen(buf); tmp_buf = xmalloc(buf_size + 32); - sprintf(tmp_buf, "SC=0 ARG=%d#%s", job_rec_cnt, buf); + if (job_rec_cnt) + sprintf(tmp_buf, "SC=0 ARG=%d#%s", job_rec_cnt, buf); + else + sprintf(tmp_buf, "SC=0 ARG=0#"); xfree(buf); *err_code = 0; *err_msg = tmp_buf; @@ -216,6 +232,7 @@ static char * _dump_job(struct job_record *job_ptr, time_t update_time) { char tmp[16384], *buf = NULL; uint32_t end_time, suspend_time; + int i, rej_sent = 0; if (!job_ptr) return NULL; @@ -261,7 +278,22 @@ static char * _dump_job(struct job_record *job_ptr, time_t update_time) xfree(hosts); } - if (job_ptr->job_state == JOB_FAILED) { + if (reject_msg_cnt) { + /* Possible job requeue/reject message */ + for (i=0; i<REJECT_MSG_MAX; i++) { + if (reject_msgs[i].job_id != job_ptr->job_id) + continue; + snprintf(tmp, sizeof(tmp), + "REJMESSAGE=\"%s\";", + reject_msgs[i].reason); + xstrcat(buf, tmp); + reject_msgs[i].job_id = 0; + reject_msg_cnt--; + rej_sent = 1; + break; + } + } + if ((rej_sent == 0) && (job_ptr->job_state == JOB_FAILED)) { snprintf(tmp, sizeof(tmp), "REJMESSAGE=\"%s\";", job_reason_string(job_ptr->state_reason)); @@ -272,16 +304,18 @@ static char * _dump_job(struct job_record *job_ptr, time_t update_time) xstrcat(buf, "FLAGS=INTERACTIVE;"); snprintf(tmp, sizeof(tmp), - "UPDATETIME=%u;WCLIMIT=%u;", + "UPDATETIME=%u;WCLIMIT=%u;TASKS=%u;", (uint32_t) job_ptr->time_last_active, - (uint32_t) _get_job_time_limit(job_ptr)); + (uint32_t) _get_job_time_limit(job_ptr), + _get_job_tasks(job_ptr)); xstrcat(buf, tmp); - snprintf(tmp, sizeof(tmp), - "TASKS=%u;NODES=%u;", - _get_job_tasks(job_ptr), - _get_job_min_nodes(job_ptr)); - xstrcat(buf, tmp); + if (!IS_JOB_FINISHED(job_ptr)) { + snprintf(tmp, sizeof(tmp), + "NODES=%u;", + _get_job_min_nodes(job_ptr)); + xstrcat(buf, tmp); + } snprintf(tmp, sizeof(tmp), "DPROCS=%u;", @@ -351,26 +385,25 @@ static void _get_job_comment(struct job_record *job_ptr, size = snprintf(buffer, buf_size, "COMMENT=\""); /* JOB DEPENDENCY */ - if (job_ptr->dependency) { + if (job_ptr->details && job_ptr->details->dependency) { /* Kludge for job dependency set via srun */ size += snprintf((buffer + size), (buf_size - size), - "DEPEND=afterany:%u", job_ptr->dependency); + "DEPEND=%s", job_ptr->details->dependency); field_sep = "?"; } /* SHARED NODES */ if (cr_enabled) { /* consumable resources */ if (job_ptr->part_ptr && - (job_ptr->part_ptr->shared == SHARED_EXCLUSIVE)) + (job_ptr->part_ptr->max_share == 0)) /* Exclusive use */ sharing = 0; - else if (job_ptr->details && (job_ptr->details->shared != 0)) - sharing = 1; - } else if (job_ptr->part_ptr) { /* partition with */ - if (job_ptr->part_ptr->shared == SHARED_FORCE) + else if (job_ptr->details && job_ptr->details->shared) sharing = 1; - else if ((job_ptr->part_ptr->shared == SHARED_YES) - && (job_ptr->details) /* optional for partition */ - && (job_ptr->details->shared)) /* with job to share */ + } else if (job_ptr->part_ptr) { /* partition level control */ + if (job_ptr->part_ptr->max_share & SHARED_FORCE) + sharing = 1; /* Sharing forced */ + else if ((job_ptr->part_ptr->max_share > 1) && + (job_ptr->details) && (job_ptr->details->shared)) sharing = 1; } if (sharing) { @@ -425,7 +458,8 @@ static uint32_t _get_job_min_disk(struct job_record *job_ptr) static uint32_t _get_job_min_nodes(struct job_record *job_ptr) { if (job_ptr->job_state > JOB_PENDING) { - /* return actual count of allocated nodes */ + /* return actual count of currently allocated nodes. + * NOTE: gets decremented to zero while job is completing */ return job_ptr->node_cnt; } @@ -453,15 +487,21 @@ static uint32_t _get_job_submit_time(struct job_record *job_ptr) static uint32_t _get_job_tasks(struct job_record *job_ptr) { - uint32_t task_cnt = 1; - - if (job_ptr->num_procs) - task_cnt = job_ptr->num_procs; + uint32_t task_cnt; - if (job_ptr->details) { - task_cnt = MAX(task_cnt, - (_get_job_min_nodes(job_ptr) * - job_ptr->details->ntasks_per_node)); + if (job_ptr->job_state > JOB_PENDING) { + task_cnt = job_ptr->total_procs; + } else { + if (job_ptr->num_procs) + task_cnt = job_ptr->num_procs; + else + task_cnt = 1; + if (job_ptr->details) { + task_cnt = MAX(task_cnt, + (_get_job_min_nodes(job_ptr) * + job_ptr->details-> + ntasks_per_node)); + } } return task_cnt / _get_job_cpus_per_task(job_ptr); @@ -504,12 +544,12 @@ static char * _get_job_state(struct job_record *job_ptr) return "Running"; } - if (base_state == JOB_PENDING) - return "Idle"; if (base_state == JOB_RUNNING) return "Running"; if (base_state == JOB_SUSPENDED) return "Suspended"; + if (base_state == JOB_PENDING) + return "Idle"; if ((base_state == JOB_COMPLETE) || (base_state == JOB_FAILED)) state_str = "Completed"; @@ -567,4 +607,30 @@ static uint32_t _get_job_suspend_time(struct job_record *job_ptr) return (uint32_t) 0; } +extern void wiki_job_requeue(struct job_record *job_ptr, char *reason) +{ + int empty = -1, i; + + for (i=0; i<REJECT_MSG_MAX; i++) { + if ((reject_msgs[i].job_id == 0) && (empty == -1)) { + empty = i; + if (reject_msg_cnt == 0) + break; + } else if (reject_msgs[i].job_id != job_ptr->job_id) + continue; + + /* over-write previous message for this job */ + strncpy(reject_msgs[i].reason, reason, REJECT_MSG_LEN); + reject_msgs[i].reason[REJECT_MSG_LEN - 1] = '\0'; + return; + } + + if (empty == -1) /* no free space */ + return; + + reject_msgs[empty].job_id = job_ptr->job_id; + strncpy(reject_msgs[i].reason, reason, REJECT_MSG_LEN); + reject_msgs[i].reason[REJECT_MSG_LEN - 1] = '\0'; + reject_msg_cnt++; +} diff --git a/src/plugins/sched/wiki2/get_nodes.c b/src/plugins/sched/wiki2/get_nodes.c index ddf66db69..34450f674 100644 --- a/src/plugins/sched/wiki2/get_nodes.c +++ b/src/plugins/sched/wiki2/get_nodes.c @@ -4,7 +4,7 @@ * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -36,30 +36,40 @@ \*****************************************************************************/ #include "./msg.h" +#include "src/common/hostlist.h" #include "src/slurmctld/locks.h" #include "src/slurmctld/slurmctld.h" static char * _dump_all_nodes(int *node_cnt, time_t update_time); -static char * _dump_node(struct node_record *node_ptr, time_t update_time); +static char * _dump_node(struct node_record *node_ptr, hostlist_t hl, + time_t update_time); static char * _get_node_state(struct node_record *node_ptr); +static int _same_info(struct node_record *node1_ptr, + struct node_record *node2_ptr, time_t update_time); +static int _str_cmp(char *s1, char *s2); /* * get_nodes - get information on specific node(s) changed since some time - * cmd_ptr IN - CMD=GETNODES ARG=[<UPDATETIME>:<NODEID>[:<NODEID>]...] + * cmd_ptr IN - CMD=GETNODES ARG=[<UPDATETIME>:<NODEID>[:<NODEID>]...] * [<UPDATETIME>:ALL] + * err_code OUT - 0 or an error code + * err_msg OUT - response message + * NOTE: xfree() err_msg if err_code is zero * RET 0 on success, -1 on failure * * Response format * ARG=<cnt>#<NODEID>: - * STATE=<state>; Moab equivalent node state - * CCLASS=<[part:cpus]>; SLURM partition with CPU count of node, - * make have more than one partition - * CMEMORY=<MB>; MB of memory on node - * CDISK=<MB>; MB of disk space on node - * CPROCS=<cpus>; CPU count on node - * [FEATURE=<feature>;] features associated with node, if any, - * [CAT=<reason>]; Reason for a node being down or drained - * colon separator + * STATE=<state>; Moab equivalent node state + * [CAT=<reason>]; Reason for a node being down or drained + * colon separator + * CCLASS=<[part:cpus]>; SLURM partition with CPU count of node, + * make have more than one partition + * [ARCH=<architecture>;] Computer architecture + * [OS=<operating_system>;] Operating system + * CMEMORY=<MB>; MB of memory on node + * CDISK=<MB>; MB of disk space on node + * CPROCS=<cpus>; CPU count on node + * [FEATURE=<feature>;] Features associated with node, if any * [#<NODEID>:...]; */ extern int get_nodes(char *cmd_ptr, int *err_code, char **err_msg) @@ -92,19 +102,31 @@ extern int get_nodes(char *cmd_ptr, int *err_code, char **err_msg) buf = _dump_all_nodes(&node_rec_cnt, update_time); } else { struct node_record *node_ptr = NULL; - char *node_name = NULL, *tmp2_char = NULL; - - node_name = strtok_r(tmp_char, ":", &tmp2_char); - while (node_name) { - node_ptr = find_node_record(node_name); - tmp_buf = _dump_node(node_ptr, update_time); - if (node_rec_cnt > 0) - xstrcat(buf, "#"); - xstrcat(buf, tmp_buf); - xfree(tmp_buf); - node_rec_cnt++; - node_name = strtok_r(NULL, ":", &tmp2_char); + char *node_name, *slurm_hosts; + int node_cnt; + hostset_t slurm_hostset; + + slurm_hosts = moab2slurm_task_list(tmp_char, &node_cnt); + if ((slurm_hostset = hostset_create(slurm_hosts))) { + while ((node_name = hostset_shift(slurm_hostset))) { + node_ptr = find_node_record(node_name); + if (node_ptr == NULL) { + error("sched/wiki2: bad hostname %s", + node_name); + continue; + } + tmp_buf = _dump_node(node_ptr, NULL, update_time); + if (node_rec_cnt > 0) + xstrcat(buf, "#"); + xstrcat(buf, tmp_buf); + xfree(tmp_buf); + node_rec_cnt++; + } + hostset_destroy(slurm_hostset); + } else { + error("hostset_create(%s): %m", slurm_hosts); } + xfree(slurm_hosts); } unlock_slurmctld(node_read_lock); @@ -112,7 +134,10 @@ extern int get_nodes(char *cmd_ptr, int *err_code, char **err_msg) if (buf) buf_size = strlen(buf); tmp_buf = xmalloc(buf_size + 32); - sprintf(tmp_buf, "SC=0 ARG=%d#%s", node_rec_cnt, buf); + if (node_rec_cnt) + sprintf(tmp_buf, "SC=0 ARG=%d#%s", node_rec_cnt, buf); + else + sprintf(tmp_buf, "SC=0 ARG=0#"); xfree(buf); *err_code = 0; *err_msg = tmp_buf; @@ -121,36 +146,143 @@ extern int get_nodes(char *cmd_ptr, int *err_code, char **err_msg) static char * _dump_all_nodes(int *node_cnt, time_t update_time) { - int i, cnt = 0; + int i, cnt = 0, rc; struct node_record *node_ptr = node_record_table_ptr; char *tmp_buf = NULL, *buf = NULL; - + struct node_record *uniq_node_ptr = NULL; + hostlist_t hl = NULL; + for (i=0; i<node_record_count; i++, node_ptr++) { if (node_ptr->name == NULL) continue; - tmp_buf = _dump_node(node_ptr, update_time); + if (use_host_exp == 2) { + rc = _same_info(uniq_node_ptr, node_ptr, update_time); + if (rc == 0) { + uniq_node_ptr = node_ptr; + if (hl) { + hostlist_push(hl, node_ptr->name); + } else { + hl = hostlist_create(node_ptr->name); + if (hl == NULL) + fatal("malloc failure"); + } + continue; + } else { + tmp_buf = _dump_node(uniq_node_ptr, hl, + update_time); + hostlist_destroy(hl); + hl = hostlist_create(node_ptr->name); + if (hl == NULL) + fatal("malloc failure"); + uniq_node_ptr = node_ptr; + } + } else { + tmp_buf = _dump_node(node_ptr, hl, update_time); + } if (cnt > 0) xstrcat(buf, "#"); xstrcat(buf, tmp_buf); xfree(tmp_buf); cnt++; } + + if (hl) { + tmp_buf = _dump_node(uniq_node_ptr, hl, update_time); + hostlist_destroy(hl); + if (cnt > 0) + xstrcat(buf, "#"); + xstrcat(buf, tmp_buf); + xfree(tmp_buf); + cnt++; + } + *node_cnt = cnt; return buf; } -static char * _dump_node(struct node_record *node_ptr, time_t update_time) +/* Determine if node1 and node2 have the same parameters that we report to Moab + * RET 0 of node1 is NULL or their parameters are the same + * >0 otherwise + */ +static int _same_info(struct node_record *node1_ptr, + struct node_record *node2_ptr, time_t update_time) { - char tmp[512], *buf = NULL; + int i; + + if (node1_ptr == NULL) /* first record, treat as a match */ + return 0; + + if (node1_ptr->node_state != node2_ptr->node_state) + return 1; + if (_str_cmp(node1_ptr->reason, node2_ptr->reason)) + return 2; + if (update_time > last_node_update) + return 0; + + if (slurmctld_conf.fast_schedule) { + /* config from slurm.conf */ + if (node1_ptr->config_ptr->cpus != node2_ptr->config_ptr->cpus) + return 3; + } else { + /* config as reported by slurmd */ + if (node1_ptr->cpus != node2_ptr->cpus) + return 4; + } + if (node1_ptr->part_cnt != node2_ptr->part_cnt) + return 5; + for (i=0; i<node1_ptr->part_cnt; i++) { + if (node1_ptr->part_pptr[i] != node2_ptr->part_pptr[i]) + return 6; + } + if (_str_cmp(node1_ptr->arch, node2_ptr->arch)) + return 7; + if (_str_cmp(node1_ptr->os, node2_ptr->os)) + return 8; + if (update_time > 0) + return 0; + + if (slurmctld_conf.fast_schedule) { + /* config from slurm.conf */ + if ((node1_ptr->config_ptr->real_memory != + node2_ptr->config_ptr->real_memory) || + (node1_ptr->config_ptr->tmp_disk != + node2_ptr->config_ptr->tmp_disk) || + (node1_ptr->config_ptr->cpus != + node2_ptr->config_ptr->cpus)) + return 9; + } else { + if ((node1_ptr->real_memory != node2_ptr->real_memory) || + (node1_ptr->tmp_disk != node2_ptr->tmp_disk) || + (node1_ptr->cpus != node2_ptr->cpus)) + return 10; + } + if (_str_cmp(node1_ptr->config_ptr->feature, + node2_ptr->config_ptr->feature)) + return 11; + return 0; +} + +static char * _dump_node(struct node_record *node_ptr, hostlist_t hl, + time_t update_time) +{ + char tmp[16*1024], *buf = NULL; int i; uint32_t cpu_cnt; if (!node_ptr) return NULL; - snprintf(tmp, sizeof(tmp), "%s:STATE=%s;", - node_ptr->name, - _get_node_state(node_ptr)); + if (hl) { + hostlist_sort(hl); + hostlist_uniq(hl); + hostlist_ranged_string(hl, sizeof(tmp), tmp); + xstrcat(buf, tmp); + } else { + snprintf(tmp, sizeof(tmp), "%s", node_ptr->name); + xstrcat(buf, tmp); + } + + snprintf(tmp, sizeof(tmp), ":STATE=%s;", _get_node_state(node_ptr)); xstrcat(buf, tmp); if (node_ptr->reason) { snprintf(tmp, sizeof(tmp), "CAT=\"%s\";", node_ptr->reason); @@ -178,6 +310,28 @@ static char * _dump_node(struct node_record *node_ptr, time_t update_time) if (i > 0) xstrcat(buf, ";"); + if (node_ptr->arch) { + snprintf(tmp, sizeof(tmp), "ARCH=%s;", node_ptr->arch); + xstrcat(buf, tmp); + } + + if (node_ptr->os) { + snprintf(tmp, sizeof(tmp), "OS=%s;", node_ptr->os); + xstrcat(buf, tmp); + } + + if (node_ptr->config_ptr + && node_ptr->config_ptr->feature) { + snprintf(tmp, sizeof(tmp), "FEATURE=%s;", + node_ptr->config_ptr->feature); + /* comma separator to colon */ + for (i=0; (tmp[i] != '\0'); i++) { + if (tmp[i] == ',') + tmp[i] = ':'; + } + xstrcat(buf, tmp); + } + if (update_time > 0) return buf; @@ -198,18 +352,6 @@ static char * _dump_node(struct node_record *node_ptr, time_t update_time) } xstrcat(buf, tmp); - if (node_ptr->config_ptr - && node_ptr->config_ptr->feature) { - snprintf(tmp, sizeof(tmp), "FEATURE=%s;", - node_ptr->config_ptr->feature); - /* comma separator to colon */ - for (i=0; (tmp[i] != '\0'); i++) { - if (tmp[i] == ',') - tmp[i] = ':'; - } - xstrcat(buf, tmp); - } - return buf; } @@ -218,7 +360,8 @@ static char * _get_node_state(struct node_record *node_ptr) uint16_t state = node_ptr->node_state; uint16_t base_state = state & NODE_STATE_BASE; - if (state & NODE_STATE_DRAIN) + if ((state & NODE_STATE_DRAIN) + || (state & NODE_STATE_FAIL)) return "Draining"; if (state & NODE_STATE_COMPLETING) return "Busy"; @@ -232,3 +375,16 @@ static char * _get_node_state(struct node_record *node_ptr) return "Unknown"; } + +/* Like strcmp(), but can handle NULL pointers */ +static int _str_cmp(char *s1, char *s2) +{ + if (s1 && s2) + return strcmp(s1, s2); + + if ((s1 == NULL) && (s2 == NULL)) + return 0; + + /* One pointer is valid and the other is NULL */ + return 1; +} diff --git a/src/plugins/sched/wiki2/hostlist.c b/src/plugins/sched/wiki2/hostlist.c index 7a96bd30c..c9f83f4a2 100644 --- a/src/plugins/sched/wiki2/hostlist.c +++ b/src/plugins/sched/wiki2/hostlist.c @@ -4,7 +4,7 @@ * Copyright (C) 2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -74,7 +74,8 @@ static char * _task_list_exp(struct job_record *job_ptr); */ extern char * moab2slurm_task_list(char *moab_tasklist, int *task_cnt) { - char *slurm_tasklist, *host, *tmp1, *tmp2, *tok, *tok_p; + char *slurm_tasklist = NULL, *host = NULL, *tmp1 = NULL, + *tmp2 = NULL, *tok = NULL, *tok_p = NULL; int i, reps; hostlist_t hl; static uint32_t cr_test = 0, cr_enabled = 0; diff --git a/src/plugins/sched/wiki2/initialize.c b/src/plugins/sched/wiki2/initialize.c index 9d143c1c9..f94576ab1 100644 --- a/src/plugins/sched/wiki2/initialize.c +++ b/src/plugins/sched/wiki2/initialize.c @@ -4,7 +4,7 @@ * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -40,7 +40,16 @@ #include "src/slurmctld/locks.h" #include "src/slurmctld/slurmctld.h" -/* RET 0 on success, -1 on failure */ +/* + * initialize_wiki - used by Moab to communication desired format information + * cmd_ptr IN - CMD=INITIALIZE EPORT=<port> USEHOSTEXP=[N|T|F] + * USEHOSTEXP=N : use hostlist expression for GETNODES messages + * USEHOSTEXP=T : use hostlist expression for GETJOBS messages + * USEHOSTEXP=F : use no hostlist expressions + * err_code OUT - 0 or an error code + * err_msg OUT - response message + * RET 0 on success, -1 on failure + */ extern int initialize_wiki(char *cmd_ptr, int *err_code, char **err_msg) { char *arg_ptr, *eport_ptr, *exp_ptr, *use_ptr; @@ -65,6 +74,8 @@ extern int initialize_wiki(char *cmd_ptr, int *err_code, char **err_msg) use_host_exp = 1; else if (exp_ptr[0] == 'F') use_host_exp = 0; + else if (exp_ptr[0] == 'N') + use_host_exp = 2; else { *err_code = -300; *err_msg = "INITIALIZE has invalid USEHOSTEXP"; @@ -73,7 +84,9 @@ extern int initialize_wiki(char *cmd_ptr, int *err_code, char **err_msg) } } - if (use_host_exp) + if (use_host_exp == 2) + use_ptr = "N"; + else if (use_host_exp == 1) use_ptr = "T"; else use_ptr = "F"; diff --git a/src/plugins/sched/wiki2/job_add_task.c b/src/plugins/sched/wiki2/job_add_task.c index c639f1ed4..97d6604ac 100644 --- a/src/plugins/sched/wiki2/job_add_task.c +++ b/src/plugins/sched/wiki2/job_add_task.c @@ -4,7 +4,7 @@ * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/sched/wiki2/job_modify.c b/src/plugins/sched/wiki2/job_modify.c index f726304fa..9e5f7aec1 100644 --- a/src/plugins/sched/wiki2/job_modify.c +++ b/src/plugins/sched/wiki2/job_modify.c @@ -4,7 +4,7 @@ * Copyright (C) 2006-2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -37,6 +37,7 @@ #include "./msg.h" #include <strings.h> +#include "src/slurmctld/job_scheduler.h" #include "src/slurmctld/locks.h" #include "src/slurmctld/slurmctld.h" @@ -54,27 +55,11 @@ extern void null_term(char *str) } } -/* return -1 on error */ -static int32_t _get_depend_id(char *str) -{ - /* stand-alone job_id */ - if (isdigit(str[0])) - return (int32_t) atol(str); - - if (strncasecmp(str, "afterany:", 9) != 0) /* invalid spec */ - return (int32_t) -1; - - str += 9; - if (!isdigit(str[0])) - return (int32_t) -1; - return (int32_t) atol(str); -} - static int _job_modify(uint32_t jobid, char *bank_ptr, - int32_t depend_id, char *new_hostlist, + char *depend_ptr, char *new_hostlist, uint32_t new_node_cnt, char *part_name_ptr, uint32_t new_time_limit, char *name_ptr, - char *start_ptr) + char *start_ptr, char *feature_ptr) { struct job_record *job_ptr; time_t now = time(NULL); @@ -89,9 +74,16 @@ static int _job_modify(uint32_t jobid, char *bank_ptr, return ESLURM_DISABLED; } - if (depend_id != -1) { - info("wiki: changing job dependency to %d", depend_id); - job_ptr->dependency = depend_id; + if (depend_ptr) { + int rc = update_job_dependency(job_ptr, depend_ptr); + if (rc == SLURM_SUCCESS) { + info("wiki: changed job %u dependency to %s", + jobid, depend_ptr); + } else { + error("wiki: changing job %u dependency to %s", + jobid, depend_ptr); + return EINVAL; + } } if (new_time_limit) { @@ -114,6 +106,20 @@ static int _job_modify(uint32_t jobid, char *bank_ptr, last_job_update = now; } + if (feature_ptr) { + if ((job_ptr->job_state == JOB_PENDING) && + (job_ptr->details)) { + info("wiki: change job %u features to %s", + jobid, feature_ptr); + job_ptr->details->features = xstrdup(feature_ptr); + last_job_update = now; + } else { + error("wiki: MODIFYJOB features of non-pending " + "job %u", jobid); + return ESLURM_DISABLED; + } + } + if (start_ptr) { char *end_ptr; uint32_t begin_time = strtol(start_ptr, &end_ptr, 10); @@ -122,6 +128,7 @@ static int _job_modify(uint32_t jobid, char *bank_ptr, info("wiki: change job %u begin time to %u", jobid, begin_time); job_ptr->details->begin_time = begin_time; + last_job_update = now; } else { error("wiki: MODIFYJOB begin_time of non-pending " "job %u", jobid); @@ -131,7 +138,8 @@ static int _job_modify(uint32_t jobid, char *bank_ptr, if (name_ptr) { info("wiki: change job %u name %s", jobid, name_ptr); - strncpy(job_ptr->name, name_ptr, sizeof(job_ptr->name)); + xfree(job_ptr->name); + job_ptr->name = xstrdup(name_ptr); last_job_update = now; } @@ -201,7 +209,8 @@ host_fini: if (rc) { } info("wiki: change job %u partition %s", jobid, part_name_ptr); - strncpy(job_ptr->partition, part_name_ptr, MAX_SLURM_NAME); + xfree(job_ptr->partition); + job_ptr->partition = xstrdup(part_name_ptr); job_ptr->part_ptr = part_ptr; last_job_update = now; } @@ -228,14 +237,14 @@ host_fini: if (rc) { /* Modify a job: * CMD=MODIFYJOB ARG=<jobid> PARTITION=<name> NODES=<number> * DEPEND=afterany:<jobid> TIMELIMT=<seconds> BANK=<name> - * MINSTARTTIME=<uts> + * MINSTARTTIME=<uts> RFEATURES=<features> * RET 0 on success, -1 on failure */ extern int job_modify_wiki(char *cmd_ptr, int *err_code, char **err_msg) { char *arg_ptr, *bank_ptr, *depend_ptr, *nodes_ptr, *start_ptr; char *host_ptr, *name_ptr, *part_ptr, *time_ptr, *tmp_char; + char *feature_ptr; int i, slurm_rc; - int depend_id = -1; uint32_t jobid, new_node_cnt = 0, new_time_limit = 0; static char reply_msg[128]; /* Locks: write job, read node and partition info */ @@ -260,14 +269,15 @@ extern int job_modify_wiki(char *cmd_ptr, int *err_code, char **err_msg) error("wiki: MODIFYJOB has invalid jobid"); return -1; } - bank_ptr = strstr(cmd_ptr, "BANK="); - depend_ptr = strstr(cmd_ptr, "DEPEND="); - name_ptr = strstr(cmd_ptr, "JOBNAME="); - host_ptr = strstr(cmd_ptr, "HOSTLIST="); - start_ptr = strstr(cmd_ptr, "MINSTARTTIME="); - nodes_ptr = strstr(cmd_ptr, "NODES="); - part_ptr = strstr(cmd_ptr, "PARTITION="); - time_ptr = strstr(cmd_ptr, "TIMELIMIT="); + bank_ptr = strstr(cmd_ptr, "BANK="); + depend_ptr = strstr(cmd_ptr, "DEPEND="); + host_ptr = strstr(cmd_ptr, "HOSTLIST="); + name_ptr = strstr(cmd_ptr, "JOBNAME="); + start_ptr = strstr(cmd_ptr, "MINSTARTTIME="); + nodes_ptr = strstr(cmd_ptr, "NODES="); + part_ptr = strstr(cmd_ptr, "PARTITION="); + feature_ptr = strstr(cmd_ptr, "RFEATURES="); + time_ptr = strstr(cmd_ptr, "TIMELIMIT="); if (bank_ptr) { bank_ptr[4] = ':'; bank_ptr += 5; @@ -276,14 +286,12 @@ extern int job_modify_wiki(char *cmd_ptr, int *err_code, char **err_msg) if (depend_ptr) { depend_ptr[6] = ':'; depend_ptr += 7; - depend_id = _get_depend_id(depend_ptr); - if (depend_id == -1) { - *err_code = -300; - *err_msg = "MODIFYJOB has invalid DEPEND specificiation"; - error("wiki: MODIFYJOB has invalid DEPEND spec: %s", - depend_ptr); - return -1; - } + null_term(depend_ptr); + } + if (feature_ptr) { + feature_ptr[9] = ':'; + feature_ptr += 10; + null_term(feature_ptr); } if (host_ptr) { host_ptr[8] = ':'; @@ -295,7 +303,7 @@ extern int job_modify_wiki(char *cmd_ptr, int *err_code, char **err_msg) name_ptr += 8; if (name_ptr[0] == '\"') { name_ptr++; - for (i=0; i<MAX_JOBNAME_LEN; i++) { + for (i=0; ; i++) { if (name_ptr[i] == '\0') break; if (name_ptr[i] == '\"') { @@ -303,11 +311,9 @@ extern int job_modify_wiki(char *cmd_ptr, int *err_code, char **err_msg) break; } } - if (i == MAX_JOBNAME_LEN) - name_ptr[i-1] = '\0'; } else if (name_ptr[0] == '\'') { name_ptr++; - for (i=0; i<MAX_JOBNAME_LEN; i++) { + for (i=0; ; i++) { if (name_ptr[i] == '\0') break; if (name_ptr[i] == '\'') { @@ -315,8 +321,6 @@ extern int job_modify_wiki(char *cmd_ptr, int *err_code, char **err_msg) break; } } - if (i == MAX_JOBNAME_LEN) - name_ptr[i-1] = '\0'; } else null_term(name_ptr); } @@ -351,9 +355,9 @@ extern int job_modify_wiki(char *cmd_ptr, int *err_code, char **err_msg) } lock_slurmctld(job_write_lock); - slurm_rc = _job_modify(jobid, bank_ptr, depend_id, host_ptr, + slurm_rc = _job_modify(jobid, bank_ptr, depend_ptr, host_ptr, new_node_cnt, part_ptr, new_time_limit, name_ptr, - start_ptr); + start_ptr, feature_ptr); unlock_slurmctld(job_write_lock); if (slurm_rc != SLURM_SUCCESS) { *err_code = -700; diff --git a/src/plugins/sched/wiki2/job_notify.c b/src/plugins/sched/wiki2/job_notify.c index ac7ed5217..ddb3a65dd 100644 --- a/src/plugins/sched/wiki2/job_notify.c +++ b/src/plugins/sched/wiki2/job_notify.c @@ -4,7 +4,7 @@ * Copyright (C) 2006-2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/sched/wiki2/job_release_task.c b/src/plugins/sched/wiki2/job_release_task.c index 896a7504d..1a02f40e1 100644 --- a/src/plugins/sched/wiki2/job_release_task.c +++ b/src/plugins/sched/wiki2/job_release_task.c @@ -4,7 +4,7 @@ * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/sched/wiki2/job_requeue.c b/src/plugins/sched/wiki2/job_requeue.c index ce0bd142a..444b810ee 100644 --- a/src/plugins/sched/wiki2/job_requeue.c +++ b/src/plugins/sched/wiki2/job_requeue.c @@ -4,7 +4,7 @@ * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/sched/wiki2/job_signal.c b/src/plugins/sched/wiki2/job_signal.c index 02f1e99c6..d1a24fa06 100644 --- a/src/plugins/sched/wiki2/job_signal.c +++ b/src/plugins/sched/wiki2/job_signal.c @@ -4,7 +4,7 @@ * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/sched/wiki2/job_will_run.c b/src/plugins/sched/wiki2/job_will_run.c index 637a112b0..3ecca4dfd 100644 --- a/src/plugins/sched/wiki2/job_will_run.c +++ b/src/plugins/sched/wiki2/job_will_run.c @@ -1,10 +1,11 @@ /*****************************************************************************\ * job_will_run.c - Process Wiki job will_run test ***************************************************************************** - * Copyright (C) 2006 The Regents of the University of California. + * Copyright (C) 2006-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -36,22 +37,45 @@ \*****************************************************************************/ #include "./msg.h" +#include "src/common/node_select.h" #include "src/slurmctld/locks.h" #include "src/slurmctld/slurmctld.h" #include "src/slurmctld/node_scheduler.h" #include "src/slurmctld/state_save.h" -static char * _copy_nodelist_no_dup(char *node_list); -static int _will_run_test(uint32_t jobid, char *hostlist, - int *err_code, char **err_msg); +#define MAX_JOB_QUEUE 20 -/* RET 0 on success, -1 on failure */ +static void _select_list_del(void *x); +static char * _will_run_test(uint32_t *jobid, time_t *start_time, + char **node_list, int job_cnt, + int *err_code, char **err_msg); + +/* + * job_will_run - Determine if, when and where a priority ordered list of jobs + * can be initiated with the currently running jobs as a + * backgorund + * cmd_ptr IN - CMD=JOBWILLRUN ARG=JOBID=<JOBID>[@<TIME>],<AVAIL_NODES> + * [JOBID=<JOBID>[@<TIME>],<AVAIL_NODES>]... + * err_code OUT - 0 on success or some error code + * err_msg OUT - error message if any of the specified jobs can not be started + * at the specified time (if given) on the available nodes. + * Otherwise information on when and where the pending jobs + * will be initiated + * ARG=<JOBID>:<PROCS>@<TIME>,<USED_NODES> + * [<JOBID>:<PROCS>@<TIME>,<USED_NODES>] + * NOTE: xfree() err_msg if err_code is zero + * RET 0 on success, -1 on failure + */ extern int job_will_run(char *cmd_ptr, int *err_code, char **err_msg) { - char *arg_ptr, *task_ptr, *node_ptr, *tmp_char; - int i; - uint32_t jobid; - char host_string[MAXHOSTRANGELEN]; + char *arg_ptr, *buf, *tmp_buf, *tmp_char; + int job_cnt; + uint32_t jobid[MAX_JOB_QUEUE]; + time_t start_time[MAX_JOB_QUEUE]; + char *avail_nodes[MAX_JOB_QUEUE]; + /* Locks: write job, read node and partition info */ + slurmctld_lock_t job_write_lock = { + NO_LOCK, WRITE_LOCK, READ_LOCK, READ_LOCK }; arg_ptr = strstr(cmd_ptr, "ARG="); if (arg_ptr == NULL) { @@ -60,176 +84,279 @@ extern int job_will_run(char *cmd_ptr, int *err_code, char **err_msg) error("wiki: JOBWILLRUN lacks ARG"); return -1; } - jobid = strtoul(arg_ptr+4, &tmp_char, 10); - if ((tmp_char[0] != '\0') && (!isspace(tmp_char[0]))) { - *err_code = -300; - *err_msg = "Invalid ARG value"; - error("wiki: JOBWILLRUN has invalid jobid"); - return -1; - } + arg_ptr += 4; - task_ptr = strstr(cmd_ptr, "TASKLIST="); - if (task_ptr) { - hostlist_t hl; - node_ptr = task_ptr + 9; - for (i=0; node_ptr[i]!='\0'; i++) { - if (node_ptr[i] == ':') - node_ptr[i] = ','; + for (job_cnt=0; job_cnt<MAX_JOB_QUEUE; ) { + if (strncmp(arg_ptr, "JOBID=", 6)) { + *err_code = -300; + *err_msg = "Invalid ARG value"; + error("wiki: JOBWILLRUN has invalid ARG value"); + return -1; } - hl = hostlist_create(node_ptr); - i = hostlist_ranged_string(hl, sizeof(host_string), host_string); - hostlist_destroy(hl); - if (i < 0) { + arg_ptr += 6; + jobid[job_cnt] = strtoul(arg_ptr, &tmp_char, 10); + if (tmp_char[0] == '@') + start_time[job_cnt] = strtoul(tmp_char+1, &tmp_char, 10); + else + start_time[job_cnt] = 0; + if (tmp_char[0] != ',') { *err_code = -300; - *err_msg = "JOBWILLRUN has invalid TASKLIST"; - error("wiki: JOBWILLRUN has invalid TASKLIST"); + *err_msg = "Invalid ARG value"; + error("wiki: JOBWILLRUN has invalid ARG value"); return -1; } - } else { - /* no restrictions on nodes available for use */ - strcpy(host_string, ""); + avail_nodes[job_cnt] = tmp_char + 1; + job_cnt++; + + while (tmp_char[0] && (!isspace(tmp_char[0]))) + tmp_char++; + if (tmp_char[0] == '\0') + break; + tmp_char[0] = '\0'; /* was space */ + tmp_char++; + while (isspace(tmp_char[0])) + tmp_char++; + if (tmp_char[0] == '\0') + break; + arg_ptr = tmp_char; } - if (_will_run_test(jobid, host_string, err_code, err_msg) != 0) + lock_slurmctld(job_write_lock); + buf = _will_run_test(jobid, start_time, avail_nodes, job_cnt, + err_code, err_msg); + unlock_slurmctld(job_write_lock); + + if (!buf) return -1; + tmp_buf = xmalloc(strlen(buf) + 32); + sprintf(tmp_buf, "SC=0 ARG=%s", buf); + xfree(buf); + *err_code = 0; + *err_msg = tmp_buf; return 0; } -static int _will_run_test(uint32_t jobid, char *hostlist, - int *err_code, char **err_msg) +static void _select_list_del (void *x) +{ + select_will_run_t *select_will_run = (select_will_run_t *) x; + FREE_NULL_BITMAP(select_will_run->avail_nodes); + xfree(select_will_run); +} + +static char * _will_run_test(uint32_t *jobid, time_t *start_time, + char **node_list, int job_cnt, + int *err_code, char **err_msg) { - int rc = 0, i; struct job_record *job_ptr; - /* Write lock on job info, read lock on node info */ - slurmctld_lock_t job_write_lock = { - NO_LOCK, WRITE_LOCK, READ_LOCK, NO_LOCK }; - char *new_node_list, *picked_node_list = NULL; - bitstr_t *new_bitmap, *save_exc_bitmap, *save_req_bitmap; - uint32_t save_prio; - bitstr_t *picked_node_bitmap = NULL; - /* Just create a big static message buffer to avoid dealing with - * xmalloc/xfree. We'll switch to compressed node naming soon - * and this buffer can be set smaller then. */ - static char reply_msg[16384]; + struct part_record *part_ptr; + bitstr_t *avail_bitmap = NULL; + char *hostlist, *reply_msg = NULL; + uint32_t min_nodes, max_nodes, req_nodes; + int i, rc; + select_will_run_t *select_will_run = NULL; + List select_list; + ListIterator iter; - lock_slurmctld(job_write_lock); - job_ptr = find_job_record(jobid); - if (job_ptr == NULL) { - *err_code = -700; - *err_msg = "No such job"; - error("wiki: Failed to find job %u", jobid); - rc = -1; - unlock_slurmctld(job_write_lock); - return rc; - } + select_list = list_create(_select_list_del); + if (select_list == NULL) + fatal("list_create: malloc failure"); - if ((job_ptr->details == NULL) - || (job_ptr->job_state != JOB_PENDING)) { - *err_code = -700; - *err_msg = "Job not pending, can't test will_run"; - error("wiki: Attempt to test will_run of non-pending job %u", - jobid); - rc = -1; - unlock_slurmctld(job_write_lock); - return rc; - } + for (i=0; i<job_cnt; i++) { + debug2("wiki2: will_run job_id=%u start_time=%u node_list=%s", + jobid[i], start_time[i], node_list[i]); + job_ptr = find_job_record(jobid[i]); + if (job_ptr == NULL) { + *err_code = -700; + *err_msg = "No such job"; + error("wiki: Failed to find job %u", jobid[i]); + break; + } + if (job_ptr->job_state != JOB_PENDING) { + *err_code = -700; + *err_msg = "WillRun not applicable to non-pending job"; + error("wiki: WillRun on non-pending job %u", jobid[i]); + break; + } - new_node_list = _copy_nodelist_no_dup(hostlist); - if (hostlist && (new_node_list == NULL)) { - *err_code = -700; - *err_msg = "Invalid TASKLIST"; - error("wiki: Attempt to set invalid node list for job %u, %s", - jobid, hostlist); - rc = -1; - unlock_slurmctld(job_write_lock); - return rc; - } + part_ptr = job_ptr->part_ptr; + if (part_ptr == NULL) { + *err_code = -700; + *err_msg = "Job lacks a partition"; + error("wiki: Job %u lacks a partition", jobid[i]); + break; + } - if (node_name2bitmap(new_node_list, false, &new_bitmap) != 0) { - *err_code = -700; - *err_msg = "Invalid TASKLIST"; - error("wiki: Attempt to set invalid node list for job %u, %s", - jobid, hostlist); - rc = -1; - xfree(new_node_list); - unlock_slurmctld(job_write_lock); - return rc; - } + if ((job_ptr->details == NULL) || + (job_ptr->job_state != JOB_PENDING)) { + *err_code = -700; + *err_msg = "Job not pending, can't test will_run"; + error("wiki: Attempt to test will_run of non-pending " + "job %u", jobid[i]); + break; + } - /* Put the inverse of this on the excluded node list, - * Remove any required nodes, and test */ - save_exc_bitmap = job_ptr->details->exc_node_bitmap; - if (hostlist[0]) { /* empty hostlist, all nodes usable */ - bit_not(new_bitmap); - job_ptr->details->exc_node_bitmap = new_bitmap; - } - save_req_bitmap = job_ptr->details->req_node_bitmap; - job_ptr->details->req_node_bitmap = bit_alloc(node_record_count); - save_prio = job_ptr->priority; - job_ptr->priority = 1; + if ((node_list[i] == NULL) || (node_list[i][0] == '\0')) { + /* assume all nodes available to job for testing */ + avail_bitmap = bit_copy(avail_node_bitmap); + } else if (node_name2bitmap(node_list[i], false, + &avail_bitmap) != 0) { + *err_code = -700; + *err_msg = "Invalid available nodes value"; + error("wiki: Attempt to set invalid available node " + "list for job %u, %s", jobid[i], node_list[i]); + break; + } + + /* Only consider nodes that are not DOWN or DRAINED */ + bit_and(avail_bitmap, avail_node_bitmap); + + /* Consider only nodes in this job's partition */ + if (part_ptr->node_bitmap) + bit_and(avail_bitmap, part_ptr->node_bitmap); + else { + *err_code = -730; + *err_msg = "Job's partition has no nodes"; + error("wiki: no nodes in partition %s for job %u", + part_ptr->name, jobid[i]); + break; + } - rc = select_nodes(job_ptr, true, &picked_node_bitmap); - if (picked_node_bitmap) { - picked_node_list = bitmap2wiki_node_name(picked_node_bitmap); - i = strlen(picked_node_list); - if ((i + 64) > sizeof(reply_msg)) - error("wiki: will_run buffer overflow"); + if (job_req_node_filter(job_ptr, avail_bitmap) != + SLURM_SUCCESS) { + /* Job probably has invalid feature list */ + *err_code = -730; + *err_msg = "Job's required features not available " + "on selected nodes"; + error("wiki: job %u not runnable on hosts=%s", + jobid[i], node_list[i]); + break; + } + if (job_ptr->details->exc_node_bitmap) { + bit_not(job_ptr->details->exc_node_bitmap); + bit_and(avail_bitmap, job_ptr->details->exc_node_bitmap); + bit_not(job_ptr->details->exc_node_bitmap); + } + if ((job_ptr->details->req_node_bitmap) && + (!bit_super_set(job_ptr->details->req_node_bitmap, + avail_bitmap))) { + *err_code = -730; + *err_msg = "Job's required nodes not available"; + error("wiki: job %u not runnable on hosts=%s", + jobid[i], node_list[i]); + break; + } + + min_nodes = MAX(job_ptr->details->min_nodes, + part_ptr->min_nodes); + if (job_ptr->details->max_nodes == 0) + max_nodes = part_ptr->max_nodes; + else + max_nodes = MIN(job_ptr->details->max_nodes, + part_ptr->max_nodes); + max_nodes = MIN(max_nodes, 500000); /* prevent overflows */ + if (job_ptr->details->max_nodes) + req_nodes = max_nodes; + else + req_nodes = min_nodes; + if (min_nodes > max_nodes) { + /* job's min_nodes exceeds partitions max_nodes */ + *err_code = -730; + *err_msg = "Job's min_nodes > max_nodes"; + error("wiki: job %u not runnable on hosts=%s", + jobid[i], node_list[i]); + break; + } + select_will_run = xmalloc(sizeof(select_will_run_t)); + select_will_run->avail_nodes = avail_bitmap; + avail_bitmap = NULL; + select_will_run->job_ptr = job_ptr; + job_ptr->start_time = start_time[i]; + select_will_run->max_nodes = max_nodes; + select_will_run->min_nodes = min_nodes; + select_will_run->req_nodes = req_nodes; + list_push(select_list, select_will_run); + } + FREE_NULL_BITMAP(avail_bitmap); + if (i < job_cnt) { /* error logged above */ + /* Restore pending job start time */ + iter = list_iterator_create(select_list); + if (iter == NULL) + fatal("list_iterator_create: malloc failure"); + while ((select_will_run = list_next(iter))) + select_will_run->job_ptr->start_time = 0; + list_iterator_destroy(iter); + list_destroy(select_list); + return NULL; } - if (rc == SLURM_SUCCESS) { - *err_code = 0; - snprintf(reply_msg, sizeof(reply_msg), - "SC=0 Job %d runnable now TASKLIST:%s", - jobid, picked_node_list); - *err_msg = reply_msg; - } else if (rc == ESLURM_NODES_BUSY) { - *err_code = 1; - snprintf(reply_msg, sizeof(reply_msg), - "SC=1 Job %d runnable later TASKLIST:%s", - jobid, picked_node_list); - *err_msg = reply_msg; + if (job_cnt == 1) { + rc = select_g_job_test( + select_will_run->job_ptr, + select_will_run->avail_nodes, + select_will_run->min_nodes, + select_will_run->max_nodes, + select_will_run->req_nodes, + SELECT_MODE_WILL_RUN); } else { - char *err_str = slurm_strerror(rc); - error("wiki: job %d never runnable on hosts=%s %s", - jobid, new_node_list, err_str); - *err_code = -740; - snprintf(reply_msg, sizeof(reply_msg), - "SC=-740 Job %d not runable: %s", - jobid, err_str); - *err_msg = reply_msg; + rc = select_g_job_list_test(select_list); } - /* Restore job's state, release memory */ - xfree(picked_node_list); - FREE_NULL_BITMAP(picked_node_bitmap); - xfree(new_node_list); - bit_free(new_bitmap); - FREE_NULL_BITMAP(job_ptr->details->req_node_bitmap); - job_ptr->details->exc_node_bitmap = save_exc_bitmap; - job_ptr->details->req_node_bitmap = save_req_bitmap; - job_ptr->priority = save_prio; - unlock_slurmctld(job_write_lock); - return rc; -} - -static char * _copy_nodelist_no_dup(char *node_list) -{ - int new_size = 128; - char *new_str; - hostlist_t hl = hostlist_create(node_list); + if (rc == SLURM_SUCCESS) { + char tmp_str[128]; + *err_code = 0; + uint32_t proc_cnt = 0; - if (hl == NULL) - return NULL; + iter = list_iterator_create(select_list); + if (iter == NULL) + fatal("list_iterator_create: malloc failure"); + for (i=0; i<job_cnt; i++) { + select_will_run = list_next(iter); + if (select_will_run == NULL) { + error("wiki2: select_list size is bad"); + break; + } + if (i) + xstrcat(reply_msg, " "); + else + xstrcat(reply_msg, "STARTINFO="); +#ifdef HAVE_BG + select_g_get_jobinfo(select_will_run->job_ptr-> + select_jobinfo, + SELECT_DATA_NODE_CNT, + &proc_cnt); - hostlist_uniq(hl); - new_str = xmalloc(new_size); - while (hostlist_ranged_string(hl, new_size, new_str) == -1) { - new_size *= 2; - xrealloc(new_str, new_size); +#else + proc_cnt = select_will_run->job_ptr->total_procs; +#endif + snprintf(tmp_str, sizeof(tmp_str), "%u:%u@%u,", + select_will_run->job_ptr->job_id, + proc_cnt, + (uint32_t) select_will_run-> + job_ptr->start_time); + /* Restore pending job start time */ + select_will_run->job_ptr->start_time = 0; + xstrcat(reply_msg, tmp_str); + hostlist = bitmap2node_name(select_will_run-> + avail_nodes); + xstrcat(reply_msg, hostlist); + xfree(hostlist); + } + list_iterator_destroy(iter); + } else { + /* Restore pending job start times */ + iter = list_iterator_create(select_list); + if (iter == NULL) + fatal("list_iterator_create: malloc failure"); + while ((select_will_run = list_next(iter))) + select_will_run->job_ptr->start_time = 0; + list_iterator_destroy(iter); + xstrcat(reply_msg, "Jobs not runable on selected nodes"); + error("wiki: jobs not runnable on nodes"); } - hostlist_destroy(hl); - return new_str; + + list_destroy(select_list); + return reply_msg; } /* @@ -240,7 +367,7 @@ static char * _copy_nodelist_no_dup(char *node_list) * IN bitmap - bitmap pointer * RET pointer to node list or NULL on error * globals: node_record_table_ptr - pointer to node table - * NOTE: the caller must xfree the memory at node_list when no longer required + * NOTE: the caller must xfree the returned pointer when no longer required */ extern char * bitmap2wiki_node_name(bitstr_t *bitmap) { diff --git a/src/plugins/sched/wiki2/msg.c b/src/plugins/sched/wiki2/msg.c index ab710c976..50790b3c9 100644 --- a/src/plugins/sched/wiki2/msg.c +++ b/src/plugins/sched/wiki2/msg.c @@ -4,7 +4,7 @@ * Copyright (C) 2006-2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -38,6 +38,7 @@ #include "./crypto.h" #include "./msg.h" #include "src/common/uid.h" +#include "src/slurmctld/locks.h" #include <sys/poll.h> #define _DEBUG 0 @@ -142,12 +143,20 @@ static void *_msg_thread(void *no_data) slurm_fd sock_fd = -1, new_fd; slurm_addr cli_addr; char *msg; - slurm_ctl_conf_t *conf = slurm_conf_lock(); + slurm_ctl_conf_t *conf; int i; + /* Locks: Write configuration, job, node, and partition */ + slurmctld_lock_t config_write_lock = { + WRITE_LOCK, WRITE_LOCK, WRITE_LOCK, WRITE_LOCK }; + conf = slurm_conf_lock(); sched_port = conf->schedport; slurm_conf_unlock(); + /* Wait until configuration is completely loaded */ + lock_slurmctld(config_write_lock); + unlock_slurmctld(config_write_lock); + /* If SchedulerPort is already taken, keep trying to open it * once per minute. Slurmctld will continue to function * during this interval even if nothing can be scheduled. */ @@ -181,8 +190,10 @@ static void *_msg_thread(void *no_data) err_code = 0; err_msg = ""; msg = _recv_msg(new_fd); - _proc_msg(new_fd, msg); - xfree(msg); + if (msg) { + _proc_msg(new_fd, msg); + xfree(msg); + } slurm_close_accepted_conn(new_fd); } verbose("wiki: message engine shutdown"); @@ -206,7 +217,7 @@ static char * _get_wiki_conf_path(void) val = default_slurm_config_file; /* Replace file name on end of path */ - i = strlen(val) + 1; + i = strlen(val) + 10; path = xmalloc(i); strcpy(path, val); val = strrchr(path, (int)'/'); @@ -351,11 +362,61 @@ extern int parse_wiki_config(void) info("HostFormat = %u", use_host_exp); info("JobAggregationTime = %u sec", job_aggregation_time); info("JobPriority = %s", init_prio_mode ? "run" : "hold"); - info("KillWait = %u sec", kill_wait); + info("KillWait = %u sec", kill_wait); + for (i=0; i<EXC_PART_CNT; i++) { + if (!exclude_part_ptr[i]) + continue; + info("ExcludePartitions = %s", exclude_part_ptr[i]->name); + } + for (i=0; i<HIDE_PART_CNT; i++) { + if (!hide_part_ptr[i]) + continue; + info("HidePartitionJobs = %s", hide_ptr_ptr[i]->name); + } #endif return SLURM_SUCCESS; } +/* + * Return a string containing any scheduling plugin configuration information + * that we want to expose via "scontrol show configuration". + * NOTE: the caller must xfree the returned pointer + */ +extern char * get_wiki_conf(void) +{ + int i, first; + char buf[20], *conf = NULL; + + snprintf(buf, sizeof(buf), "HostFormat=%u", use_host_exp); + xstrcat(conf, buf); + + first = 1; + for (i=0; i<EXC_PART_CNT; i++) { + if (!exclude_part_ptr[i]) + continue; + if (first) { + xstrcat(conf, ";ExcludePartitions="); + first = 0; + } else + xstrcat(conf, ","); + xstrcat(conf, exclude_part_ptr[i]->name); + } + + first = 1; + for (i=0; i<HIDE_PART_CNT; i++) { + if (!hide_part_ptr[i]) + continue; + if (first) { + xstrcat(conf, ";HidePartitionJobs="); + first = 0; + } else + xstrcat(conf, ","); + xstrcat(conf, hide_part_ptr[i]->name); + } + + return conf; +} + static size_t _read_bytes(int fd, char *buf, size_t size) { size_t bytes_remaining, bytes_read; @@ -620,7 +681,8 @@ static void _proc_msg(slurm_fd new_fd, char *msg) job_release_task(cmd_ptr, &err_code, &err_msg); } else if (strncmp(cmd_ptr, "JOBWILLRUN", 10) == 0) { msg_type = "wiki:JOBWILLRUN"; - job_will_run(cmd_ptr, &err_code, &err_msg); + if (!job_will_run(cmd_ptr, &err_code, &err_msg)) + goto free_resp_msg; } else if (strncmp(cmd_ptr, "MODIFYJOB", 9) == 0) { msg_type = "wiki:MODIFYJOB"; job_modify_wiki(cmd_ptr, &err_code, &err_msg); diff --git a/src/plugins/sched/wiki2/msg.h b/src/plugins/sched/wiki2/msg.h index 53cbb8c9f..6b58dcf23 100644 --- a/src/plugins/sched/wiki2/msg.h +++ b/src/plugins/sched/wiki2/msg.h @@ -4,7 +4,7 @@ * Copyright (C) 2006-2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -101,18 +101,55 @@ extern uint16_t job_aggregation_time; extern uint16_t kill_wait; extern uint16_t use_host_exp; +/* + * bitmap2wiki_node_name - given a bitmap, build a list of colon separated + * node names (if we can't use node range expressions), or the + * normal slurm node name expression + * + * IN bitmap - bitmap pointer + * RET pointer to node list or NULL on error + * globals: node_record_table_ptr - pointer to node table + * NOTE: the caller must xfree the returned pointer when no longer required + */ +extern char * bitmap2wiki_node_name(bitstr_t *bitmap); + +/* + * event_notify - Notify Moab of some event + * event_code IN - message code to send Moab + * 1234 - job state change + * 1235 - partition state change + * desc IN - event description + * RET 0 on success, -1 on failure + */ extern int event_notify(int event_code, char *desc); -extern int spawn_msg_thread(void); + +/* + * Spawn message hander thread + */ +extern int spawn_msg_thread(void); + +/* + * Terminate message hander thread + */ extern void term_msg_thread(void); -extern char * bitmap2wiki_node_name(bitstr_t *bitmap); + +/* + * Return a string containing any scheduling plugin configuration information + * that we want to expose via "scontrol show configuration". + * NOTE: the caller must xfree the returned pointer + */ +extern char * get_wiki_conf(void); /* * Given a string, replace the first space found with '\0' */ extern void null_term(char *str); -/* Functions called from within msg.c (rather than creating a bunch - * more header files with one function definition each */ + +/* + * Functions called from within msg.c (rather than creating a bunch + * more header files with one function definition each) + */ extern int cancel_job(char *cmd_ptr, int *err_code, char **err_msg); extern int get_jobs(char *cmd_ptr, int *err_code, char **err_msg); extern int get_nodes(char *cmd_ptr, int *err_code, char **err_msg); @@ -130,3 +167,4 @@ extern char * slurm_job2moab_task_list(struct job_record *job_ptr); extern int start_job(char *cmd_ptr, int *err_code, char **err_msg); extern int suspend_job(char *cmd_ptr, int *err_code, char **err_msg); extern int resume_job(char *cmd_ptr, int *err_code, char **err_msg); +extern void wiki_job_requeue(struct job_record *job_ptr, char *reason); diff --git a/src/plugins/sched/wiki2/resume_job.c b/src/plugins/sched/wiki2/resume_job.c index 319219244..b172e04d2 100644 --- a/src/plugins/sched/wiki2/resume_job.c +++ b/src/plugins/sched/wiki2/resume_job.c @@ -4,7 +4,7 @@ * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/sched/wiki2/sched_wiki.c b/src/plugins/sched/wiki2/sched_wiki.c index fd7710640..db4168acf 100644 --- a/src/plugins/sched/wiki2/sched_wiki.c +++ b/src/plugins/sched/wiki2/sched_wiki.c @@ -4,7 +4,7 @@ * Copyright (C) 2006-2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -75,6 +75,22 @@ extern int slurm_sched_plugin_schedule( void ) return SLURM_SUCCESS; } +/***************************************************************************/ +/* TAG( slurm_sched_plugin_newalloc ) */ +/***************************************************************************/ +extern int slurm_sched_plugin_newalloc( struct job_record *job_ptr ) +{ + return SLURM_SUCCESS; +} + +/***************************************************************************/ +/* TAG( slurm_sched_plugin_freealloc ) */ +/***************************************************************************/ +extern int slurm_sched_plugin_freealloc( struct job_record *job_ptr ) +{ + return SLURM_SUCCESS; +} + /**************************************************************************/ /* TAG( slurm_sched_plugin_initial_priority ) */ @@ -151,3 +167,18 @@ char *slurm_sched_strerror( int errnum ) return NULL; } +/**************************************************************************/ +/* TAG( slurm_sched_plugin_requeue ) */ +/**************************************************************************/ +void slurm_sched_plugin_requeue( struct job_record *job_ptr, char *reason ) +{ + wiki_job_requeue(job_ptr, reason); +} + +/**************************************************************************/ +/* TAG( slurm_sched_get_conf ) */ +/**************************************************************************/ +char *slurm_sched_get_conf( void ) +{ + return get_wiki_conf(); +} diff --git a/src/plugins/sched/wiki2/start_job.c b/src/plugins/sched/wiki2/start_job.c index 564919f93..bc9f296c6 100644 --- a/src/plugins/sched/wiki2/start_job.c +++ b/src/plugins/sched/wiki2/start_job.c @@ -4,7 +4,7 @@ * Copyright (C) 2006-2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -39,6 +39,7 @@ #include "src/common/node_select.h" #include "src/common/slurm_protocol_defs.h" #include "src/common/xstring.h" +#include "src/slurmctld/job_scheduler.h" #include "src/slurmctld/locks.h" #include "src/slurmctld/slurmctld.h" #include "src/slurmctld/state_save.h" @@ -115,28 +116,36 @@ extern int start_job(char *cmd_ptr, int *err_code, char **err_msg) return -1; } task_ptr += 9; /* skip over "TASKLIST=" */ - tasklist = moab2slurm_task_list(task_ptr, &task_cnt); - if (tasklist) - hl = hostlist_create(tasklist); - if ((tasklist == NULL) || (hl == NULL)) { - *err_code = -300; - *err_msg = "STARTJOB TASKLIST is invalid"; - error("wiki: STARTJOB TASKLIST is invalid: %s", - task_ptr); - xfree(tasklist); - return -1; - } - hostlist_uniq(hl); - hostlist_sort(hl); - i = hostlist_ranged_string(hl, sizeof(host_string), host_string); - hostlist_destroy(hl); - if (i < 0) { - *err_code = -300; - *err_msg = "STARTJOB has invalid TASKLIST"; - error("wiki: STARTJOB has invalid TASKLIST: %s", - host_string); - xfree(tasklist); - return -1; + if ((task_ptr[0] == '\0') || isspace(task_ptr[0])) { + /* No TASKLIST specification, useful for testing */ + host_string[0] = '0'; + task_cnt = 0; + tasklist = NULL; + } else { + null_term(task_ptr); + tasklist = moab2slurm_task_list(task_ptr, &task_cnt); + if (tasklist) + hl = hostlist_create(tasklist); + if ((tasklist == NULL) || (hl == NULL)) { + *err_code = -300; + *err_msg = "STARTJOB TASKLIST is invalid"; + error("wiki: STARTJOB TASKLIST is invalid: %s", + task_ptr); + xfree(tasklist); + return -1; + } + hostlist_uniq(hl); + hostlist_sort(hl); + i = hostlist_ranged_string(hl, sizeof(host_string), host_string); + hostlist_destroy(hl); + if (i < 0) { + *err_code = -300; + *err_msg = "STARTJOB has invalid TASKLIST"; + error("wiki: STARTJOB has invalid TASKLIST: %s", + host_string); + xfree(tasklist); + return -1; + } } rc = _start_job(jobid, task_cnt, host_string, tasklist, comment_ptr, @@ -172,7 +181,8 @@ static int _start_job(uint32_t jobid, int task_cnt, char *hostlist, NO_LOCK, WRITE_LOCK, READ_LOCK, NO_LOCK }; char *new_node_list = NULL; static char tmp_msg[128]; - bitstr_t *new_bitmap, *save_req_bitmap = (bitstr_t *) NULL; + bitstr_t *new_bitmap = (bitstr_t *) NULL; + bitstr_t *save_req_bitmap = (bitstr_t *) NULL; bitoff_t i, bsize; int ll; /* layout info index */ char *node_name, *node_idx, *node_cur, *save_req_nodes = NULL; @@ -216,29 +226,21 @@ static int _start_job(uint32_t jobid, int task_cnt, char *hostlist, job_ptr->comment = xstrdup(comment_ptr); } - new_node_list = xstrdup(hostlist); - if (hostlist && (new_node_list == NULL)) { - *err_code = -700; - *err_msg = "Invalid TASKLIST"; - error("wiki: Attempt to set invalid node list for job %u, %s", - jobid, hostlist); - rc = -1; - goto fini; - } - - if (node_name2bitmap(new_node_list, false, &new_bitmap) != 0) { - *err_code = -700; - *err_msg = "Invalid TASKLIST"; - error("wiki: Attempt to set invalid node list for job %u, %s", - jobid, hostlist); - xfree(new_node_list); - rc = -1; - goto fini; - } - - /* User excluded node list incompatable with Wiki - * Exclude all nodes not explicitly requested */ if (task_cnt) { + new_node_list = xstrdup(hostlist); + if (node_name2bitmap(new_node_list, false, &new_bitmap) != 0) { + *err_code = -700; + *err_msg = "Invalid TASKLIST"; + error("wiki: Attempt to set invalid node list for " + "job %u, %s", + jobid, hostlist); + xfree(new_node_list); + rc = -1; + goto fini; + } + + /* User excluded node list incompatable with Wiki + * Exclude all nodes not explicitly requested */ FREE_NULL_BITMAP(job_ptr->details->exc_node_bitmap); job_ptr->details->exc_node_bitmap = bit_copy(new_bitmap); bit_not(job_ptr->details->exc_node_bitmap); @@ -281,7 +283,7 @@ static int _start_job(uint32_t jobid, int task_cnt, char *hostlist, } } - /* get job ready to start now */ + /* save and update job state to start now */ save_req_nodes = job_ptr->details->req_nodes; job_ptr->details->req_nodes = new_node_list; save_req_bitmap = job_ptr->details->req_node_bitmap; diff --git a/src/plugins/sched/wiki2/suspend_job.c b/src/plugins/sched/wiki2/suspend_job.c index 058d2dce3..383b9539b 100644 --- a/src/plugins/sched/wiki2/suspend_job.c +++ b/src/plugins/sched/wiki2/suspend_job.c @@ -4,7 +4,7 @@ * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/select/Makefile.in b/src/plugins/select/Makefile.in index 0d92f90a1..0a7883657 100644 --- a/src/plugins/select/Makefile.in +++ b/src/plugins/select/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -43,6 +43,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -101,6 +103,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -114,10 +117,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -137,7 +143,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -148,6 +157,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -163,6 +174,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -178,6 +190,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -349,8 +362,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -375,8 +388,8 @@ TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -386,13 +399,12 @@ ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/select/bluegene/Makefile.in b/src/plugins/select/bluegene/Makefile.in index 2c595da00..03cd7fcc5 100644 --- a/src/plugins/select/bluegene/Makefile.in +++ b/src/plugins/select/bluegene/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -43,6 +43,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -101,6 +103,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -114,10 +117,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -137,7 +143,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -148,6 +157,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -163,6 +174,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -178,6 +190,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -349,8 +362,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -375,8 +388,8 @@ TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -386,13 +399,12 @@ ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/select/bluegene/block_allocator/Makefile.am b/src/plugins/select/bluegene/block_allocator/Makefile.am index 1d003724a..5061e0daa 100644 --- a/src/plugins/select/bluegene/block_allocator/Makefile.am +++ b/src/plugins/select/bluegene/block_allocator/Makefile.am @@ -12,7 +12,6 @@ INCLUDES = -I$(top_srcdir) $(BG_INCLUDES) # block_allocator.h bridge_linker.h # block_allocator_LDADD = \ -# $(top_builddir)/src/common/libcommon.la \ # $(top_builddir)/src/api/libslurm.la @@ -26,12 +25,6 @@ noinst_LTLIBRARIES = libbluegene_block_allocator.la libbluegene_block_allocator_la_SOURCES = \ block_allocator.c bridge_linker.c block_allocator.h bridge_linker.h -libbluegene_block_allocator_la_LIBADD = \ - $(top_builddir)/src/common/libcommon.la -lpthread \ - $(top_builddir)/src/api/libslurm.la - libbluegene_block_allocator_la_LDFLAGS = \ $(LIB_LDFLAGS) -lm -libbluegene_block_allocator_la_DEPENDENCIES = \ - $(top_builddir)/src/common/libcommon.la diff --git a/src/plugins/select/bluegene/block_allocator/Makefile.in b/src/plugins/select/bluegene/block_allocator/Makefile.in index e606f1621..65d67a3ef 100644 --- a/src/plugins/select/bluegene/block_allocator/Makefile.in +++ b/src/plugins/select/bluegene/block_allocator/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -44,6 +44,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -65,6 +67,7 @@ mkinstalldirs = $(install_sh) -d CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h CONFIG_CLEAN_FILES = LTLIBRARIES = $(noinst_LTLIBRARIES) +libbluegene_block_allocator_la_LIBADD = am_libbluegene_block_allocator_la_OBJECTS = block_allocator.lo \ bridge_linker.lo libbluegene_block_allocator_la_OBJECTS = \ @@ -73,7 +76,7 @@ libbluegene_block_allocator_la_LINK = $(LIBTOOL) --tag=CC \ $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CCLD) \ $(AM_CFLAGS) $(CFLAGS) \ $(libbluegene_block_allocator_la_LDFLAGS) $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -113,6 +116,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -126,10 +130,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -149,7 +156,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -160,6 +170,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -175,6 +187,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -190,6 +203,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -257,7 +271,6 @@ INCLUDES = -I$(top_srcdir) $(BG_INCLUDES) # block_allocator.h bridge_linker.h # block_allocator_LDADD = \ -# $(top_builddir)/src/common/libcommon.la \ # $(top_builddir)/src/api/libslurm.la # block_allocator_LDFLAGS = -export-dynamic -lm $(CMD_LDFLAGS) @@ -269,16 +282,9 @@ noinst_LTLIBRARIES = libbluegene_block_allocator.la libbluegene_block_allocator_la_SOURCES = \ block_allocator.c bridge_linker.c block_allocator.h bridge_linker.h -libbluegene_block_allocator_la_LIBADD = \ - $(top_builddir)/src/common/libcommon.la -lpthread \ - $(top_builddir)/src/api/libslurm.la - libbluegene_block_allocator_la_LDFLAGS = \ $(LIB_LDFLAGS) -lm -libbluegene_block_allocator_la_DEPENDENCIES = \ - $(top_builddir)/src/common/libcommon.la - all: all-am .SUFFIXES: @@ -365,8 +371,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -378,8 +384,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -389,13 +395,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/select/bluegene/block_allocator/block_allocator.c b/src/plugins/select/bluegene/block_allocator/block_allocator.c index 9c0467374..b9bf065ec 100644 --- a/src/plugins/select/bluegene/block_allocator/block_allocator.c +++ b/src/plugins/select/bluegene/block_allocator/block_allocator.c @@ -1,7 +1,7 @@ /*****************************************************************************\ * block_allocator.c - Assorted functions for layout of bglblocks, * wiring, mapping for smap, etc. - * $Id: block_allocator.c 13150 2008-01-31 22:59:13Z da $ + * $Id: block_allocator.c 13934 2008-04-23 23:00:29Z da $ ***************************************************************************** * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). @@ -404,8 +404,9 @@ extern int new_ba_request(ba_request_t* ba_request) if ((geo[i] < 1) || (geo[i] > DIM_SIZE[i])){ error("new_ba_request Error, " - "request geometry is invalid %d " - "DIMS are %c%c%c", + "request geometry is invalid %d can't be " + "%d, DIMS are %c%c%c", + i, geo[i], alpha_num[DIM_SIZE[X]], alpha_num[DIM_SIZE[Y]], @@ -980,7 +981,7 @@ extern void init_wires() #else source = &ba_system_ptr->grid[x]; #endif - for(i=0; i<6; i++) { + for(i=0; i<NUM_PORTS_PER_NODE; i++) { _switch_config(source, source, X, i, i); _switch_config(source, source, @@ -1051,11 +1052,11 @@ extern void ba_update_node_state(ba_node_t *ba_node, uint16_t state) } #ifdef HAVE_BG - debug2("ba_update_node_state: new state of node[%c%c%c] is %s", + debug2("ba_update_node_state: new state of [%c%c%c] is %s", alpha_num[ba_node->coord[X]], alpha_num[ba_node->coord[Y]], alpha_num[ba_node->coord[Z]], node_state_string(state)); #else - debug2("ba_update_node_state: new state of node[%d] is %s", + debug2("ba_update_node_state: new state of [%d] is %s", ba_node->coord[X], node_state_string(state)); #endif @@ -1093,7 +1094,6 @@ extern ba_node_t *ba_copy_node(ba_node_t *ba_node) */ extern int allocate_block(ba_request_t* ba_request, List results) { - if (!_initialized){ error("Error, configuration not initialized, " "calling ba_init(NULL)"); @@ -1252,9 +1252,12 @@ extern int copy_node_path(List nodes, List dest_nodes) curr_switch = &ba_node->axis_switch[dim]; new_switch = &new_ba_node->axis_switch[dim]; if(curr_switch->int_wire[0].used) { - _copy_the_path(dest_nodes, - curr_switch, new_switch, - 0, dim); + if(!_copy_the_path(dest_nodes, + curr_switch, new_switch, + 0, dim)) { + rc = SLURM_ERROR; + break; + } } } @@ -1306,23 +1309,35 @@ extern int check_and_set_node_list(List nodes) curr_ba_switch = &curr_ba_node->axis_switch[i]; //info("checking dim %d", i); - for(j=0; j<BA_SYSTEM_DIMENSIONS; j++) { + for(j=0; j<NUM_PORTS_PER_NODE; j++) { //info("checking port %d", j); if(ba_switch->int_wire[j].used - && curr_ba_switch->int_wire[j].used) { + && curr_ba_switch->int_wire[j].used + && j != curr_ba_switch-> + int_wire[j].port_tar) { debug3("%c%c%c dim %d port %d " - "is already in use", + "is already in use to %d", alpha_num[ba_node->coord[X]], alpha_num[ba_node->coord[Y]], alpha_num[ba_node->coord[Z]], i, - j); + j, + curr_ba_switch-> + int_wire[j].port_tar); rc = SLURM_ERROR; goto end_it; } if(!ba_switch->int_wire[j].used) continue; + + /* info("setting %c%c%c dim %d port %d -> %d", */ +/* alpha_num[ba_node->coord[X]], */ +/* alpha_num[ba_node->coord[Y]], */ +/* alpha_num[ba_node->coord[Z]], */ +/* i, */ +/* j, */ +/* ba_switch->int_wire[j].port_tar); */ curr_ba_switch->int_wire[j].used = 1; curr_ba_switch->int_wire[j].port_tar = ba_switch->int_wire[j].port_tar; @@ -2479,9 +2494,8 @@ static int _copy_the_path(List nodes, ba_switch_t *curr_switch, next_mark_switch = &ba_node->axis_switch[dim]; } - _copy_the_path(nodes, next_switch, next_mark_switch, + return _copy_the_path(nodes, next_switch, next_mark_switch, port_tar, dim); - return 1; } static int _find_yz_path(ba_node_t *ba_node, int *first, @@ -2777,8 +2791,8 @@ static int _reset_the_path(ba_switch_t *curr_switch, int source, #endif .axis_switch[dim]; - _reset_the_path(next_switch, port_tar, target, dim); - return 1; + return _reset_the_path(next_switch, port_tar, target, dim); +// return 1; } /* @@ -3161,7 +3175,7 @@ start_again: #endif } requested_end: - debug("can't allocate"); + debug2("1 can't allocate"); return 0; } @@ -3554,7 +3568,7 @@ static int _set_external_wires(int dim, int count, ba_node_t* source, break; } } else { - fatal("Do don't have a config to do a BG system with %d " + fatal("We don't have a config to do a BG system with %d " "in the X-dim.", DIM_SIZE[X]); } #else @@ -4897,56 +4911,56 @@ int main(int argc, char** argv) ba_init(new_node_ptr); init_wires(NULL); - /* results = list_create(NULL); */ -/* request->geometry[0] = 1; */ -/* request->geometry[1] = 4; */ -/* request->geometry[2] = 4; */ -/* request->start[0] = 5; */ -/* request->start[1] = 0; */ -/* request->start[2] = 0; */ -/* request->start_req = 1; */ -/* request->size = 32; */ -/* request->rotate = 0; */ -/* request->elongate = 0; */ -/* request->conn_type = SELECT_TORUS; */ -/* new_ba_request(request); */ -/* print_ba_request(request); */ -/* if(!allocate_block(request, results)) { */ -/* debug("couldn't allocate %c%c%c", */ -/* request->geometry[0], */ -/* request->geometry[1], */ -/* request->geometry[2]); */ -/* } */ -/* list_destroy(results); */ - -/* results = list_create(NULL); */ -/* request->geometry[0] = 1; */ -/* request->geometry[1] = 1; */ -/* request->geometry[2] = 1; */ -/* request->start[0] = 0; */ -/* request->start[1] = 0; */ -/* request->start[2] = 0; */ -/* request->start_req = 1; */ -/* request->size = 1; */ -/* request->rotate = 0; */ -/* request->elongate = 0; */ -/* request->conn_type = SELECT_TORUS; */ -/* new_ba_request(request); */ -/* print_ba_request(request); */ -/* if(!allocate_block(request, results)) { */ -/* debug("couldn't allocate %c%c%c", */ -/* alpha_num[request->geometry[0]], */ -/* alpha_num[request->geometry[1]], */ -/* alpha_num[request->geometry[2]]); */ -/* } */ -/* list_destroy(results); */ - results = list_create(NULL); - request->geometry[0] = 12; + request->geometry[0] = 1; request->geometry[1] = 1; request->geometry[2] = 1; - request->start[0] = 0; + request->start[0] = 6; + request->start[1] = 3; + request->start[2] = 2; + request->start_req = 1; +// request->size = 1; + request->rotate = 0; + request->elongate = 0; + request->conn_type = SELECT_TORUS; + new_ba_request(request); + print_ba_request(request); + if(!allocate_block(request, results)) { + debug("couldn't allocate %c%c%c", + request->geometry[0], + request->geometry[1], + request->geometry[2]); + } + list_destroy(results); + + results = list_create(NULL); + request->geometry[0] = 2; + request->geometry[1] = 4; + request->geometry[2] = 1; + request->start[0] = 3; request->start[1] = 0; + request->start[2] = 2; + request->start_req = 1; +// request->size = 16; + request->rotate = 0; + request->elongate = 0; + request->conn_type = SELECT_TORUS; + new_ba_request(request); + print_ba_request(request); + if(!allocate_block(request, results)) { + debug("couldn't allocate %c%c%c", + alpha_num[request->geometry[0]], + alpha_num[request->geometry[1]], + alpha_num[request->geometry[2]]); + } + list_destroy(results); + + results = list_create(NULL); + request->geometry[0] = 2; + request->geometry[1] = 1; + request->geometry[2] = 4; + request->start[0] = 5; + request->start[1] = 2; request->start[2] = 0; request->start_req = 1; request->rotate = 0; @@ -5004,16 +5018,17 @@ int main(int argc, char** argv) for(x=startx;x<endx;x++) { for(y=starty;y<endy;y++) { for(z=startz;z<endz;z++) { + ba_node_t *curr_node = + &(ba_system_ptr->grid[x][y][z]); info("Node %c%c%c Used = %d Letter = %c", alpha_num[x],alpha_num[y],alpha_num[z], - ba_system_ptr->grid[x][y][z].used, - ba_system_ptr->grid[x][y][z].letter); + curr_node->used, + curr_node->letter); for(dim=0;dim<1;dim++) { info("Dim %d",dim); ba_switch_t *wire = - &ba_system_ptr-> - grid[x][y][z].axis_switch[dim]; - for(j=0;j<6;j++) + &curr_node->axis_switch[dim]; + for(j=0;j<NUM_PORTS_PER_NODE;j++) info("\t%d -> %d -> %c%c%c %d " "Used = %d", j, wire->int_wire[j]. diff --git a/src/plugins/select/bluegene/block_allocator/block_allocator.h b/src/plugins/select/bluegene/block_allocator/block_allocator.h index 91fe58747..b9b1c5a15 100644 --- a/src/plugins/select/bluegene/block_allocator/block_allocator.h +++ b/src/plugins/select/bluegene/block_allocator/block_allocator.h @@ -227,7 +227,7 @@ extern void destroy_image_group_list(void *ptr); extern void destroy_image(void *ptr); extern void destroy_ba_node(void *ptr); -/** +/* * create a block request. Note that if the geometry is given, * then size is ignored. If elongate is true, the algorithm will try * to fit that a block of cubic shape and then it will try other @@ -247,23 +247,23 @@ extern void destroy_ba_node(void *ptr); */ extern int new_ba_request(ba_request_t* ba_request); -/** +/* * delete a block request */ extern void delete_ba_request(void *arg); -/** +/* * empty a list that we don't want to destroy the memory of the * elements always returns 1 */ extern int empty_null_destroy_list(void *arg, void *key); -/** +/* * print a block request */ extern void print_ba_request(ba_request_t* ba_request); -/** +/* * Initialize internal structures by either reading previous block * configurations from a file or by running the graph solver. * @@ -272,7 +272,7 @@ extern void print_ba_request(ba_request_t* ba_request); * return: success or error of the intialization. */ extern void ba_init(); -/** +/* */ extern void init_wires(); /** diff --git a/src/plugins/select/bluegene/plugin/Makefile.am b/src/plugins/select/bluegene/plugin/Makefile.am index 5ba952ded..817ccd600 100644 --- a/src/plugins/select/bluegene/plugin/Makefile.am +++ b/src/plugins/select/bluegene/plugin/Makefile.am @@ -14,10 +14,13 @@ select_bluegene_la_SOURCES = select_bluegene.c \ bg_job_place.c bg_job_place.h \ bg_job_run.c bg_job_run.h \ bg_block_info.c bg_block_info.h \ + bg_record_functions.c bg_record_functions.h \ bluegene.c bluegene.h \ state_test.c state_test.h \ bg_switch_connections.c \ block_sys.c \ + dynamic_block.c dynamic_block.h \ + defined_block.c defined_block.h \ ../wrap_rm_api.h select_bluegene_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) @@ -32,12 +35,9 @@ libsched_if64_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) sbin_PROGRAMS = slurm_prolog slurm_epilog sfree -sfree_LDADD = $(top_builddir)/src/common/libcommon.la \ - $(top_builddir)/src/api/libslurm.la -slurm_prolog_LDADD = $(top_builddir)/src/common/libcommon.la \ - $(top_builddir)/src/api/libslurm.la -slurm_epilog_LDADD = $(top_builddir)/src/common/libcommon.la \ - $(top_builddir)/src/api/libslurm.la +sfree_LDADD = $(top_builddir)/src/api/libslurmhelper.la +slurm_prolog_LDADD = $(top_builddir)/src/api/libslurmhelper.la +slurm_epilog_LDADD = $(top_builddir)/src/api/libslurmhelper.la sfree_SOURCES = sfree.c sfree.h opts.c \ ../block_allocator/bridge_linker.c \ ../block_allocator/bridge_linker.h diff --git a/src/plugins/select/bluegene/plugin/Makefile.in b/src/plugins/select/bluegene/plugin/Makefile.in index 2f204dce4..6f705c3df 100644 --- a/src/plugins/select/bluegene/plugin/Makefile.in +++ b/src/plugins/select/bluegene/plugin/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -47,6 +47,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -85,8 +87,9 @@ libsched_if64_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ select_bluegene_la_DEPENDENCIES = \ ../block_allocator/libbluegene_block_allocator.la am_select_bluegene_la_OBJECTS = select_bluegene.lo bg_job_place.lo \ - bg_job_run.lo bg_block_info.lo bluegene.lo state_test.lo \ - bg_switch_connections.lo block_sys.lo + bg_job_run.lo bg_block_info.lo bg_record_functions.lo \ + bluegene.lo state_test.lo bg_switch_connections.lo \ + block_sys.lo dynamic_block.lo defined_block.lo select_bluegene_la_OBJECTS = $(am_select_bluegene_la_OBJECTS) select_bluegene_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ @@ -96,26 +99,23 @@ PROGRAMS = $(sbin_PROGRAMS) am_sfree_OBJECTS = sfree.$(OBJEXT) opts.$(OBJEXT) \ bridge_linker.$(OBJEXT) sfree_OBJECTS = $(am_sfree_OBJECTS) -sfree_DEPENDENCIES = $(top_builddir)/src/common/libcommon.la \ - $(top_builddir)/src/api/libslurm.la +sfree_DEPENDENCIES = $(top_builddir)/src/api/libslurmhelper.la sfree_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(sfree_LDFLAGS) \ $(LDFLAGS) -o $@ am_slurm_epilog_OBJECTS = slurm_epilog.$(OBJEXT) slurm_epilog_OBJECTS = $(am_slurm_epilog_OBJECTS) -slurm_epilog_DEPENDENCIES = $(top_builddir)/src/common/libcommon.la \ - $(top_builddir)/src/api/libslurm.la +slurm_epilog_DEPENDENCIES = $(top_builddir)/src/api/libslurmhelper.la slurm_epilog_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(slurm_epilog_LDFLAGS) $(LDFLAGS) -o $@ am_slurm_prolog_OBJECTS = slurm_prolog.$(OBJEXT) slurm_prolog_OBJECTS = $(am_slurm_prolog_OBJECTS) -slurm_prolog_DEPENDENCIES = $(top_builddir)/src/common/libcommon.la \ - $(top_builddir)/src/api/libslurm.la +slurm_prolog_DEPENDENCIES = $(top_builddir)/src/api/libslurmhelper.la slurm_prolog_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(slurm_prolog_LDFLAGS) $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -159,6 +159,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -172,10 +173,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -195,7 +199,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -206,6 +213,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -221,6 +230,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -236,6 +246,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -302,10 +313,13 @@ select_bluegene_la_SOURCES = select_bluegene.c \ bg_job_place.c bg_job_place.h \ bg_job_run.c bg_job_run.h \ bg_block_info.c bg_block_info.h \ + bg_record_functions.c bg_record_functions.h \ bluegene.c bluegene.h \ state_test.c state_test.h \ bg_switch_connections.c \ block_sys.c \ + dynamic_block.c dynamic_block.h \ + defined_block.c defined_block.h \ ../wrap_rm_api.h select_bluegene_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) @@ -314,15 +328,9 @@ select_bluegene_la_LIBADD = ../block_allocator/libbluegene_block_allocator.la # MPIRUN dynamic lib. libsched_if64_la_SOURCES = libsched_if64.c libsched_if64_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) -sfree_LDADD = $(top_builddir)/src/common/libcommon.la \ - $(top_builddir)/src/api/libslurm.la - -slurm_prolog_LDADD = $(top_builddir)/src/common/libcommon.la \ - $(top_builddir)/src/api/libslurm.la - -slurm_epilog_LDADD = $(top_builddir)/src/common/libcommon.la \ - $(top_builddir)/src/api/libslurm.la - +sfree_LDADD = $(top_builddir)/src/api/libslurmhelper.la +slurm_prolog_LDADD = $(top_builddir)/src/api/libslurmhelper.la +slurm_epilog_LDADD = $(top_builddir)/src/api/libslurmhelper.la sfree_SOURCES = sfree.c sfree.h opts.c \ ../block_allocator/bridge_linker.c \ ../block_allocator/bridge_linker.h @@ -371,8 +379,8 @@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ else :; fi; \ done @@ -380,8 +388,8 @@ uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ done clean-pkglibLTLIBRARIES: @@ -405,8 +413,8 @@ install-sbinPROGRAMS: $(sbin_PROGRAMS) || test -f $$p1 \ ; then \ f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \ - echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) --mode=install $(sbinPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(sbindir)/$$f'"; \ - $(INSTALL_PROGRAM_ENV) $(LIBTOOL) --mode=install $(sbinPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(sbindir)/$$f" || exit 1; \ + echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(sbinPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(sbindir)/$$f'"; \ + $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(sbinPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(sbindir)/$$f" || exit 1; \ else :; fi; \ done @@ -443,10 +451,13 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bg_block_info.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bg_job_place.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bg_job_run.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bg_record_functions.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bg_switch_connections.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/block_sys.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bluegene.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bridge_linker.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/defined_block.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/dynamic_block.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/libsched_if64.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/opts.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/select_bluegene.Plo@am__quote@ @@ -501,8 +512,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -514,8 +525,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -525,13 +536,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/select/bluegene/plugin/bg_job_place.c b/src/plugins/select/bluegene/plugin/bg_job_place.c index 30d60327b..3cb46be9e 100644 --- a/src/plugins/select/bluegene/plugin/bg_job_place.c +++ b/src/plugins/select/bluegene/plugin/bg_job_place.c @@ -2,7 +2,7 @@ * bg_job_place.c - blue gene job placement (e.g. base block selection) * functions. * - * $Id: bg_job_place.c 13271 2008-02-14 20:02:00Z da $ + * $Id: bg_job_place.c 13999 2008-05-07 22:08:58Z da $ ***************************************************************************** * Copyright (C) 2004-2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). @@ -43,6 +43,9 @@ #include "src/common/node_select.h" #include "src/slurmctld/trigger_mgr.h" #include "bluegene.h" +#include "dynamic_block.h" + +#ifdef HAVE_BG #define _DEBUG 0 #define MAX_GROUPS 128 @@ -54,16 +57,51 @@ _STMT_START { \ (b) = (t); \ } _STMT_END -static int _find_best_block_match(struct job_record* job_ptr, - bitstr_t* slurm_block_bitmap, uint32_t min_nodes, - uint32_t max_nodes, uint32_t req_nodes, - int spec, bg_record_t** found_bg_record, - bool test_only); -static int _get_user_groups(uint32_t user_id, uint32_t group_id, - gid_t *groups, int max_groups, int *ngroups); + +pthread_mutex_t create_dynamic_mutex = PTHREAD_MUTEX_INITIALIZER; +pthread_mutex_t job_list_test_mutex = PTHREAD_MUTEX_INITIALIZER; + +/* This list is for the test_job_list function because we will be + * adding and removing blocks off the bg_job_block_list and don't want + * to ruin that list in submit_job it should = bg_job_block_list + * otherwise it should be a copy of that list. + */ +List job_block_test_list = NULL; + static void _rotate_geo(uint16_t *req_geometry, int rot_cnt); -static int _test_image_perms(char *image_name, List image_list, +static int _bg_record_sort_aval_inc(bg_record_t* rec_a, bg_record_t* rec_b); +static int _get_user_groups(uint32_t user_id, uint32_t group_id, + gid_t *groups, int max_groups, int *ngroups); +static int _test_image_perms(char *image_name, List image_list, struct job_record* job_ptr); +static int _check_images(struct job_record* job_ptr, + char **blrtsimage, char **linuximage, + char **mloaderimage, char **ramdiskimage); +static bg_record_t *_find_matching_block(List block_list, + struct job_record* job_ptr, + bitstr_t* slurm_block_bitmap, + ba_request_t *request, + uint32_t max_procs, + int allow, int check_image, + int overlap_check, + List overlapped_list, + bool test_only); +static int _check_for_booted_overlapping_blocks( + List block_list, ListIterator bg_record_itr, + bg_record_t *bg_record, int overlap_check, List overlapped_list, + bool test_only); +static int _dynamically_request(List block_list, int *blocks_added, + ba_request_t *request, + bitstr_t* slurm_block_bitmap, + char *user_req_nodes); +static int _find_best_block_match(List block_list, int *blocks_added, + struct job_record* job_ptr, + bitstr_t* slurm_block_bitmap, + uint32_t min_nodes, + uint32_t max_nodes, uint32_t req_nodes, + bg_record_t** found_bg_record, + bool test_only); +static int _sync_block_lists(List full_list, List incomp_list); /* Rotate a 3-D geometry array through its six permutations */ static void _rotate_geo(uint16_t *req_geometry, int rot_cnt) @@ -84,7 +122,97 @@ static void _rotate_geo(uint16_t *req_geometry, int rot_cnt) } } -pthread_mutex_t create_dynamic_mutex = PTHREAD_MUTEX_INITIALIZER; +/* + * Comparator used for sorting blocks smallest to largest + * + * returns: -1: rec_a < rec_b 0: rec_a == rec_b 1: rec_a > rec_b + * + */ +static int _bg_record_sort_aval_inc(bg_record_t* rec_a, bg_record_t* rec_b) +{ + int size_a = rec_a->node_cnt; + int size_b = rec_b->node_cnt; + + if(rec_a->job_ptr && !rec_b->job_ptr) + return -1; + else if(!rec_a->job_ptr && rec_b->job_ptr) + return 1; + else if(rec_a->job_ptr && rec_b->job_ptr) { + if(rec_a->job_ptr->start_time > rec_b->job_ptr->start_time) + return 1; + else if(rec_a->job_ptr->start_time < rec_b->job_ptr->start_time) + return -1; + } + + if (size_a < size_b) + return -1; + else if (size_a > size_b) + return 1; + if(rec_a->nodes && rec_b->nodes) { + size_a = strcmp(rec_a->nodes, rec_b->nodes); + if (size_a < 0) + return -1; + else if (size_a > 0) + return 1; + } + if (rec_a->quarter < rec_b->quarter) + return -1; + else if (rec_a->quarter > rec_b->quarter) + return 1; + + if(rec_a->nodecard < rec_b->nodecard) + return -1; + else if(rec_a->nodecard > rec_b->nodecard) + return 1; + + return 0; +} + +/* + * Comparator used for sorting blocks smallest to largest + * + * returns: -1: rec_a >rec_b 0: rec_a == rec_b 1: rec_a < rec_b + * + */ +static int _bg_record_sort_aval_dec(bg_record_t* rec_a, bg_record_t* rec_b) +{ + int size_a = rec_a->node_cnt; + int size_b = rec_b->node_cnt; + + if(rec_a->job_ptr && !rec_b->job_ptr) + return 1; + else if(!rec_a->job_ptr && rec_b->job_ptr) + return -1; + else if(rec_a->job_ptr && rec_b->job_ptr) { + if(rec_a->job_ptr->start_time > rec_b->job_ptr->start_time) + return -1; + else if(rec_a->job_ptr->start_time < rec_b->job_ptr->start_time) + return 1; + } + + if (size_a < size_b) + return -1; + else if (size_a > size_b) + return 1; + if(rec_a->nodes && rec_b->nodes) { + size_a = strcmp(rec_a->nodes, rec_b->nodes); + if (size_a < 0) + return -1; + else if (size_a > 0) + return 1; + } + if (rec_a->quarter < rec_b->quarter) + return -1; + else if (rec_a->quarter > rec_b->quarter) + return 1; + + if(rec_a->nodecard < rec_b->nodecard) + return -1; + else if(rec_a->nodecard > rec_b->nodecard) + return 1; + + return 0; +} /* * Get a list of groups associated with a specific user_id @@ -110,7 +238,7 @@ static int _get_user_groups(uint32_t user_id, uint32_t group_id, return -1; } *ngroups = max_groups; - rc = getgrouplist(pwd.pw_name, (gid_t) group_id, groups, ngroups); + rc = getgrouplist(pwd.pw_name, (gid_t) group_id, groups, ngroups); xfree(buffer); if (rc < 0) { error("getgrouplist(%s): %m", pwd.pw_name); @@ -140,14 +268,13 @@ static int _test_image_perms(char *image_name, List image_list, itr = list_iterator_create(image_list); while ((image = list_next(itr))) { - if (!strcasecmp(image->name, image_name) || - !strcasecmp(image->name, "*")) { + if (!strcasecmp(image->name, image_name) + || !strcasecmp(image->name, "*")) { if (image->def) { allow = 1; break; } - if (!image->groups || - !list_count(image->groups)) { + if (!image->groups || !list_count(image->groups)) { allow = 1; break; } @@ -161,8 +288,7 @@ static int _test_image_perms(char *image_name, List image_list, cache_user = job_ptr->user_id; } itr2 = list_iterator_create(image->groups); - while ((allow == 0) && - (image_group = list_next(itr2))) { + while (!allow && (image_group = list_next(itr2))) { for (i=0; i<ngroups; i++) { if (image_group->gid == groups[i]) { @@ -181,251 +307,117 @@ static int _test_image_perms(char *image_name, List image_list, return allow; } -/* - * finds the best match for a given job request - * - * IN - int spec right now holds the place for some type of - * specification as to the importance of certain job params, for - * instance, geometry, type, size, etc. - * - * OUT - block_id of matched block, NULL otherwise - * returns 1 for error (no match) - * - */ -static int _find_best_block_match(struct job_record* job_ptr, - bitstr_t* slurm_block_bitmap, - uint32_t min_nodes, uint32_t max_nodes, - uint32_t req_nodes, int spec, - bg_record_t** found_bg_record, - bool test_only) +static int _check_images(struct job_record* job_ptr, + char **blrtsimage, char **linuximage, + char **mloaderimage, char **ramdiskimage) { - ListIterator itr; - ListIterator itr2; - bg_record_t *record = NULL; - bg_record_t *found_record = NULL; - uint16_t req_geometry[BA_SYSTEM_DIMENSIONS]; - uint16_t start[BA_SYSTEM_DIMENSIONS]; - uint16_t conn_type, rotate, target_size = 0; - uint32_t req_procs = job_ptr->num_procs; - uint32_t proc_cnt; - ba_request_t request; - int i; - int rot_cnt = 0; - int created = 0; int allow = 0; - int check_image = 1; - uint32_t max_procs = NO_VAL; - List lists_of_lists = NULL; - List temp_list = NULL; - char tmp_char[256]; - bitstr_t* tmp_bitmap = NULL; - int start_req = 0; - static int total_cpus = 0; - char *blrtsimage = NULL; /* BlrtsImage for this request */ - char *linuximage = NULL; /* LinuxImage for this request */ - char *mloaderimage = NULL; /* mloaderImage for this request */ - char *ramdiskimage = NULL; /* RamDiskImage for this request */ - int rc = SLURM_SUCCESS; - - if(!total_cpus) - total_cpus = DIM_SIZE[X] * DIM_SIZE[Y] * DIM_SIZE[Z] - * procs_per_node; - - if(req_nodes > max_nodes) { - error("can't run this job max bps is %u asking for %u", - max_nodes, req_nodes); - return SLURM_ERROR; - } - - slurm_mutex_lock(&block_state_mutex); - if(!test_only && req_procs > num_unused_cpus) { - debug2("asking for %u I only got %d", - req_procs, num_unused_cpus); - slurm_mutex_unlock(&block_state_mutex); - return SLURM_ERROR; - } - slurm_mutex_unlock(&block_state_mutex); - if(!bg_list) { - error("_find_best_block_match: There is no bg_list"); - return SLURM_ERROR; - } - - select_g_get_jobinfo(job_ptr->select_jobinfo, - SELECT_DATA_START, &start); - - if(start[X] != (uint16_t)NO_VAL) - start_req = 1; - - select_g_get_jobinfo(job_ptr->select_jobinfo, - SELECT_DATA_CONN_TYPE, &conn_type); - select_g_get_jobinfo(job_ptr->select_jobinfo, - SELECT_DATA_GEOMETRY, &req_geometry); - select_g_get_jobinfo(job_ptr->select_jobinfo, - SELECT_DATA_ROTATE, &rotate); - select_g_get_jobinfo(job_ptr->select_jobinfo, - SELECT_DATA_MAX_PROCS, &max_procs); select_g_get_jobinfo(job_ptr->select_jobinfo, - SELECT_DATA_BLRTS_IMAGE, &blrtsimage); - if (blrtsimage) { - allow = _test_image_perms(blrtsimage, bg_blrtsimage_list, + SELECT_DATA_BLRTS_IMAGE, blrtsimage); + + if (*blrtsimage) { + allow = _test_image_perms(*blrtsimage, bg_blrtsimage_list, job_ptr); if (!allow) { error("User %u:%u is not allowed to use BlrtsImage %s", - job_ptr->user_id, job_ptr->group_id, blrtsimage); - rc = SLURM_ERROR; - goto end_it; + job_ptr->user_id, job_ptr->group_id, *blrtsimage); + return SLURM_ERROR; + } } select_g_get_jobinfo(job_ptr->select_jobinfo, - SELECT_DATA_LINUX_IMAGE, &linuximage); - if (linuximage) { - allow = _test_image_perms(linuximage, bg_linuximage_list, + SELECT_DATA_LINUX_IMAGE, linuximage); + if (*linuximage) { + allow = _test_image_perms(*linuximage, bg_linuximage_list, job_ptr); if (!allow) { error("User %u:%u is not allowed to use LinuxImage %s", - job_ptr->user_id, job_ptr->group_id, linuximage); - rc = SLURM_ERROR; - goto end_it; + job_ptr->user_id, job_ptr->group_id, *linuximage); + return SLURM_ERROR; } } select_g_get_jobinfo(job_ptr->select_jobinfo, - SELECT_DATA_MLOADER_IMAGE, &mloaderimage); - if (mloaderimage) { - allow = _test_image_perms(mloaderimage, bg_mloaderimage_list, + SELECT_DATA_MLOADER_IMAGE, mloaderimage); + if (*mloaderimage) { + allow = _test_image_perms(*mloaderimage, bg_mloaderimage_list, job_ptr); if(!allow) { error("User %u:%u is not allowed " "to use MloaderImage %s", job_ptr->user_id, job_ptr->group_id, - mloaderimage); - rc = SLURM_ERROR; - goto end_it; + *mloaderimage); + return SLURM_ERROR; } } select_g_get_jobinfo(job_ptr->select_jobinfo, - SELECT_DATA_RAMDISK_IMAGE, &ramdiskimage); - if (ramdiskimage) { - allow = _test_image_perms(ramdiskimage, bg_ramdiskimage_list, + SELECT_DATA_RAMDISK_IMAGE, ramdiskimage); + if (*ramdiskimage) { + allow = _test_image_perms(*ramdiskimage, bg_ramdiskimage_list, job_ptr); if(!allow) { error("User %u:%u is not allowed " "to use RamDiskImage %s", job_ptr->user_id, job_ptr->group_id, - ramdiskimage); - rc = SLURM_ERROR; - goto end_it; - } - } - - if(req_geometry[X] != 0 && req_geometry[X] != (uint16_t)NO_VAL) { - target_size = 1; - for (i=0; i<BA_SYSTEM_DIMENSIONS; i++) - target_size *= (uint16_t)req_geometry[i]; - if(target_size != min_nodes) { - debug2("min_nodes not set correctly %u should be %u " - "from %u%u%u", - min_nodes, target_size, - req_geometry[X], - req_geometry[Y], - req_geometry[Z]); - min_nodes = target_size; - } - if(!req_nodes) - req_nodes = req_nodes; - } - if (target_size == 0) { /* no geometry specified */ - if(job_ptr->details->req_nodes - && !start_req) { - bg_record_t *tmp_record = NULL; - char *tmp_nodes= job_ptr->details->req_nodes; - int len = strlen(tmp_nodes); - - i = 0; - while(i<len - && tmp_nodes[i] != '[' - && (tmp_nodes[i] < '0' || tmp_nodes[i] > 'Z' - || (tmp_nodes[i] > '9' - && tmp_nodes[i] < 'A'))) - i++; - - if(i<len) { - len -= i; - tmp_record = xmalloc(sizeof(bg_record_t)); - tmp_record->bg_block_list = - list_create(destroy_ba_node); - slurm_conf_lock(); - len += strlen(slurmctld_conf.node_prefix)+1; - tmp_record->nodes = xmalloc(len); - - snprintf(tmp_record->nodes, - len, - "%s%s", - slurmctld_conf.node_prefix, - tmp_nodes+i); - slurm_conf_unlock(); - - process_nodes(tmp_record); - for(i=0; i<BA_SYSTEM_DIMENSIONS; i++) { - req_geometry[i] = tmp_record->geo[i]; - start[i] = tmp_record->start[i]; - } - destroy_bg_record(tmp_record); - select_g_set_jobinfo(job_ptr->select_jobinfo, - SELECT_DATA_GEOMETRY, - &req_geometry); - select_g_set_jobinfo(job_ptr->select_jobinfo, - SELECT_DATA_START, - &start); - start_req = 1; - } else - error("BPs=%s is in a weird format", - tmp_nodes); - } else { - req_geometry[X] = (uint16_t)NO_VAL; + *ramdiskimage); + return SLURM_ERROR; } - target_size = min_nodes; } + + return SLURM_SUCCESS; +} + +static bg_record_t *_find_matching_block(List block_list, + struct job_record* job_ptr, + bitstr_t* slurm_block_bitmap, + ba_request_t *request, + uint32_t max_procs, + int allow, int check_image, + int overlap_check, + List overlapped_list, + bool test_only) +{ + bg_record_t *bg_record = NULL; + ListIterator itr = NULL; + uint32_t proc_cnt = 0; + char tmp_char[256]; - /* this is where we should have the control flow depending on - * the spec arguement */ - - *found_bg_record = NULL; - allow = 0; -try_again: - slurm_mutex_lock(&block_state_mutex); debug("number of blocks to check: %d state %d", - list_count(bg_list), + list_count(block_list), test_only); - itr = list_iterator_create(bg_list); - while ((record = (bg_record_t*) list_next(itr))) { + + itr = list_iterator_create(block_list); + while ((bg_record = (bg_record_t*) list_next(itr))) { /* If test_only we want to fall through to tell the scheduler that it is runnable just not right now. */ debug3("%s job_running = %d", - record->bg_block_id, record->job_running); + bg_record->bg_block_id, bg_record->job_running); /*block is messed up some how (BLOCK_ERROR_STATE) ignore it*/ - if(record->job_running == BLOCK_ERROR_STATE) { + if(bg_record->job_running == BLOCK_ERROR_STATE) { debug("block %s is in an error state (can't use)", - record->bg_block_id); + bg_record->bg_block_id); continue; - } else if((record->job_running != NO_JOB_RUNNING) - && !test_only) { + } else if((bg_record->job_running != NO_JOB_RUNNING) + && (bg_record->job_running != job_ptr->job_id) + && (bluegene_layout_mode == LAYOUT_DYNAMIC + || (!test_only + && bluegene_layout_mode != LAYOUT_DYNAMIC))) { debug("block %s in use by %s job %d", - record->bg_block_id, - record->user_name, - record->job_running); + bg_record->bg_block_id, + bg_record->user_name, + bg_record->job_running); continue; } - + /* Check processor count */ - proc_cnt = record->bp_count * record->cpus_per_bp; + proc_cnt = bg_record->bp_count * bg_record->cpus_per_bp; debug3("asking for %u-%u looking at %d", - req_procs, max_procs, proc_cnt); - if ((proc_cnt < req_procs) + request->procs, max_procs, proc_cnt); + if ((proc_cnt < request->procs) || ((max_procs != NO_VAL) && (proc_cnt > max_procs))) { /* We use the proccessor count per block here mostly to see if we can run on a smaller block. @@ -433,27 +425,11 @@ try_again: convert_num_unit((float)proc_cnt, tmp_char, sizeof(tmp_char), UNIT_NONE); debug("block %s CPU count (%s) not suitable", - record->bg_block_id, + bg_record->bg_block_id, tmp_char); continue; } - /* - * check that the number of nodes is suitable - */ - debug3("asking for %u-%u bps looking at %d", - min_nodes, req_nodes, record->bp_count); - if ((record->bp_count < min_nodes) - || (req_nodes != 0 && record->bp_count > req_nodes) - || (record->bp_count < target_size)) { - convert_num_unit((float)record->node_cnt, tmp_char, - sizeof(tmp_char), UNIT_NONE); - debug("block %s node count (%s) not suitable", - record->bg_block_id, - tmp_char); - continue; - } - /* * Next we check that this block's bitmap is within * the set of nodes which the job can use. @@ -461,9 +437,9 @@ try_again: * drained, allocated to some other job, or in some * SLURM block not available to this job. */ - if (!bit_super_set(record->bitmap, slurm_block_bitmap)) { + if (!bit_super_set(bg_record->bitmap, slurm_block_bitmap)) { debug("bg block %s has nodes not usable by this job", - record->bg_block_id); + bg_record->bg_block_id); continue; } @@ -472,92 +448,37 @@ try_again: */ if (job_ptr->details->req_node_bitmap && (!bit_super_set(job_ptr->details->req_node_bitmap, - record->bitmap))) { + bg_record->bitmap))) { debug("bg block %s lacks required nodes", - record->bg_block_id); + bg_record->bg_block_id); continue; } - - /* Make sure no other blocks are under this block - are booted and running jobs - */ - itr2 = list_iterator_create(bg_list); - while ((found_record = (bg_record_t*) - list_next(itr2)) != NULL) { - if ((!found_record->bg_block_id) - || (!strcmp(record->bg_block_id, - found_record->bg_block_id))) - continue; - if(blocks_overlap(record, found_record)) { - if(!test_only - && bluegene_layout_mode == LAYOUT_OVERLAP) { - if(!created && record->state - != RM_PARTITION_READY) - break; - else if(created == 1 - && found_record->state - != RM_PARTITION_FREE) { - break; - } - } - if(!test_only && found_record->job_running - != NO_JOB_RUNNING) { - if(found_record->job_running - == BLOCK_ERROR_STATE) - error("can't use %s, " - "overlapping block %s " - "is in an error state.", - record->bg_block_id, - found_record-> - bg_block_id); - else - debug("can't use %s, there is " - "a job (%d) running on " - "an overlapping " - "block %s", - record->bg_block_id, - found_record-> - job_running, - found_record-> - bg_block_id); - - if(bluegene_layout_mode == - LAYOUT_DYNAMIC) { - list_remove(itr); - temp_list = list_create(NULL); - list_push(temp_list, record); - num_block_to_free++; - free_block_list(temp_list); - list_destroy(temp_list); - } - break; - } - } - } - list_iterator_destroy(itr2); - - if(found_record) { + + + if(_check_for_booted_overlapping_blocks( + block_list, itr, bg_record, + overlap_check, overlapped_list, test_only)) continue; - } - + if(check_image) { - if(blrtsimage && - strcasecmp(blrtsimage, record->blrtsimage)) { + if(request->blrtsimage && + strcasecmp(request->blrtsimage, + bg_record->blrtsimage)) { allow = 1; continue; - } - if(linuximage && - strcasecmp(linuximage, record->linuximage)) { + } else if(request->linuximage && + strcasecmp(request->linuximage, + bg_record->linuximage)) { allow = 1; continue; - } - if(mloaderimage && - strcasecmp(mloaderimage, record->mloaderimage)) { + } else if(request->mloaderimage && + strcasecmp(request->mloaderimage, + bg_record->mloaderimage)) { allow = 1; continue; - } - if(ramdiskimage && - strcasecmp(ramdiskimage, record->ramdiskimage)) { + } else if(request->ramdiskimage && + strcasecmp(request->ramdiskimage, + bg_record->ramdiskimage)) { allow = 1; continue; } @@ -566,221 +487,667 @@ try_again: /***********************************************/ /* check the connection type specified matches */ /***********************************************/ - if ((conn_type != record->conn_type) - && (conn_type != SELECT_NAV)) { + if ((request->conn_type != bg_record->conn_type) + && (request->conn_type != SELECT_NAV)) { debug("bg block %s conn-type not usable asking for %s " - "record is %s", - record->bg_block_id, - convert_conn_type(conn_type), - convert_conn_type(record->conn_type)); + "bg_record is %s", + bg_record->bg_block_id, + convert_conn_type(request->conn_type), + convert_conn_type(bg_record->conn_type)); continue; } /*****************************************/ /* match up geometry as "best" possible */ /*****************************************/ - if (req_geometry[X] == (uint16_t)NO_VAL) + if (request->geometry[X] == (uint16_t)NO_VAL) ; /* Geometry not specified */ else { /* match requested geometry */ bool match = false; - rot_cnt = 0; /* attempt six rotations */ + int rot_cnt = 0; /* attempt six rotations */ for (rot_cnt=0; rot_cnt<6; rot_cnt++) { - if ((record->geo[X] >= req_geometry[X]) - && (record->geo[Y] >= req_geometry[Y]) - && (record->geo[Z] >= req_geometry[Z])) { + if ((bg_record->geo[X] >= request->geometry[X]) + && (bg_record->geo[Y] + >= request->geometry[Y]) + && (bg_record->geo[Z] + >= request->geometry[Z])) { match = true; break; } - if (!rotate) { + if (!request->rotate) break; - } - _rotate_geo(req_geometry, rot_cnt); + + _rotate_geo((uint16_t *)request->geometry, + rot_cnt); } - + if (!match) continue; /* Not usable */ } - *found_bg_record = record; - debug2("we found one! %s", (*found_bg_record)->bg_block_id); + debug2("we found one! %s", bg_record->bg_block_id); break; } list_iterator_destroy(itr); - - /* set the bitmap and do other allocation activities */ - if (*found_bg_record) { - if(!test_only) { - if(check_block_bp_states( - (*found_bg_record)->bg_block_id) - == SLURM_ERROR) { - error("_find_best_block_match: Marking " - "block %s in an error state " - "because of bad bps.", - (*found_bg_record)->bg_block_id); - (*found_bg_record)->job_running = - BLOCK_ERROR_STATE; - (*found_bg_record)->state = RM_PARTITION_ERROR; - slurm_mutex_unlock(&block_state_mutex); - trigger_block_error(); - goto try_again; - } - } - format_node_name(*found_bg_record, tmp_char, sizeof(tmp_char)); - debug("_find_best_block_match %s <%s>", - (*found_bg_record)->bg_block_id, - tmp_char); - bit_and(slurm_block_bitmap, (*found_bg_record)->bitmap); - slurm_mutex_unlock(&block_state_mutex); - rc = SLURM_SUCCESS; - goto end_it; - } + return bg_record; +} - /* see if we can just reset the image and reboot the block */ - if(allow) { - check_image = 0; - allow = 0; - slurm_mutex_unlock(&block_state_mutex); - goto try_again; - } +static int _check_for_booted_overlapping_blocks( + List block_list, ListIterator bg_record_itr, + bg_record_t *bg_record, int overlap_check, List overlapped_list, + bool test_only) +{ + bg_record_t *found_record = NULL; + ListIterator itr = NULL; + int rc = 0; - check_image = 1; - /* all these assume that the *found_bg_record is NULL */ - if(bluegene_layout_mode == LAYOUT_OVERLAP && !test_only && created<2) { - created++; - slurm_mutex_unlock(&block_state_mutex); - goto try_again; - } + /* this test only is for actually picking a block not testing */ + if(test_only && bluegene_layout_mode == LAYOUT_DYNAMIC) + return rc; + + /* Make sure no other blocks are under this block + are booted and running jobs + */ + itr = list_iterator_create(block_list); + while ((found_record = (bg_record_t*)list_next(itr)) != NULL) { + if ((!found_record->bg_block_id) + || (bg_record == found_record)) { + debug4("Don't need to look at myself %s %s", + bg_record->bg_block_id, + found_record->bg_block_id); + continue; + } - slurm_mutex_unlock(&block_state_mutex); - if(bluegene_layout_mode != LAYOUT_DYNAMIC) - goto not_dynamic; - - if(test_only) { - for(i=0; i<BA_SYSTEM_DIMENSIONS; i++) - request.start[i] = start[i]; - - for(i=0; i<BA_SYSTEM_DIMENSIONS; i++) - request.geometry[i] = req_geometry[i]; - - request.save_name = NULL; - request.elongate_geos = NULL; - request.size = target_size; - request.procs = req_procs; - request.conn_type = conn_type; - request.rotate = rotate; - request.elongate = true; - request.start_req = start_req; - request.blrtsimage = blrtsimage; - request.linuximage = linuximage; - request.mloaderimage = mloaderimage; - request.ramdiskimage = ramdiskimage; - if(job_ptr->details->req_node_bitmap) - request.avail_node_bitmap = - job_ptr->details->req_node_bitmap; - else - request.avail_node_bitmap = slurm_block_bitmap; - - debug("trying with all free blocks"); - if(create_dynamic_block(&request, NULL) == SLURM_ERROR) { - error("this job will never run on " - "this system"); - xfree(request.save_name); - rc = SLURM_ERROR; - goto end_it; - } else { - if(!request.save_name) { - error("no name returned from " - "create_dynamic_block"); - rc = SLURM_ERROR; - goto end_it; + if(blocks_overlap(bg_record, found_record)) { + /* make the available time on this block + * (bg_record) the max of this found_record's job + * or the one already set if in overlapped_block_list + * since we aren't setting job_running we + * don't have to remove them since the + * block_list should always be destroyed afterwards. + */ + if(test_only && overlapped_list + && found_record->job_ptr + && bg_record->job_running == NO_JOB_RUNNING) { + debug2("found over lapping block %s " + "overlapped %s with job %u", + found_record->bg_block_id, + bg_record->bg_block_id, + found_record->job_ptr->job_id); + ListIterator itr = list_iterator_create( + overlapped_list); + bg_record_t *tmp_rec = NULL; + while((tmp_rec = list_next(itr))) { + if(tmp_rec == bg_record) + break; + } + list_iterator_destroy(itr); + if(tmp_rec && tmp_rec->job_ptr->end_time + < found_record->job_ptr->end_time) + tmp_rec->job_ptr = + found_record->job_ptr; + else if(!tmp_rec) { + bg_record->job_ptr = + found_record->job_ptr; + list_append(overlapped_list, + bg_record); + } + } + /* We already know this block doesn't work + * right now so we will if there is another + * overlapping block that ends later + */ + if(rc) + continue; + /* This test is here to check if the block we + * chose is not booted or if there is a block + * overlapping that we could avoid freeing if + * we choose something else + */ + if(bluegene_layout_mode == LAYOUT_OVERLAP + && ((overlap_check == 0 && bg_record->state + != RM_PARTITION_READY) + || (overlap_check == 1 && found_record->state + != RM_PARTITION_FREE))) { + + if(!test_only) { + rc = 1; + break; + } + } + + if(found_record->job_running != NO_JOB_RUNNING) { + if(found_record->job_running + == BLOCK_ERROR_STATE) + error("can't use %s, " + "overlapping block %s " + "is in an error state.", + bg_record->bg_block_id, + found_record->bg_block_id); + else + debug("can't use %s, there is " + "a job (%d) running on " + "an overlapping " + "block %s", + bg_record->bg_block_id, + found_record->job_running, + found_record->bg_block_id); + + if(bluegene_layout_mode == LAYOUT_DYNAMIC) { + /* this will remove and + * destroy the memory for + * bg_record + */ + list_remove(bg_record_itr); + if(bg_record->original) { + debug3("This was a copy"); + found_record = + bg_record->original; + remove_from_bg_list( + bg_list, found_record); + } else { + debug("looking for original"); + found_record = + find_and_remove_org_from_bg_list( + bg_list, + bg_record); + } + destroy_bg_record(bg_record); + if(!found_record) { + error("1 this record wasn't " + "found in the list!"); + //rc = SLURM_ERROR; + } else { + List temp_list = + list_create(NULL); + list_push(temp_list, + found_record); + num_block_to_free++; + free_block_list(temp_list); + list_destroy(temp_list); + } + } + rc = 1; + + if(!test_only) + break; } + } + } + list_iterator_destroy(itr); + + return rc; +} + +/* + * + * Return SLURM_SUCCESS on successful create, SLURM_ERROR for no create + */ - slurm_conf_lock(); - snprintf(tmp_char, sizeof(tmp_char), "%s%s", - slurmctld_conf.node_prefix, - request.save_name); - slurm_conf_unlock(); - if (node_name2bitmap(tmp_char, - false, - &tmp_bitmap)) { - fatal("Unable to convert nodes %s to bitmap", - tmp_char); +static int _dynamically_request(List block_list, int *blocks_added, + ba_request_t *request, + bitstr_t* slurm_block_bitmap, + char *user_req_nodes) +{ + List list_of_lists = NULL; + List temp_list = NULL; + List new_blocks = NULL; + ListIterator itr = NULL; + int rc = SLURM_ERROR; + int create_try = 0; + int start_geo[BA_SYSTEM_DIMENSIONS]; + + memcpy(start_geo, request->geometry, sizeof(int)*BA_SYSTEM_DIMENSIONS); + debug2("going to create %d", request->size); + list_of_lists = list_create(NULL); + + if(user_req_nodes) + list_append(list_of_lists, job_block_test_list); + else { + list_append(list_of_lists, block_list); + if(job_block_test_list == bg_job_block_list && + list_count(block_list) != list_count(bg_booted_block_list)) { + list_append(list_of_lists, bg_booted_block_list); + if(list_count(bg_booted_block_list) + != list_count(job_block_test_list)) + list_append(list_of_lists, job_block_test_list); + } else if(list_count(block_list) + != list_count(job_block_test_list)) { + list_append(list_of_lists, job_block_test_list); + } + } + itr = list_iterator_create(list_of_lists); + while ((temp_list = (List)list_next(itr))) { + create_try++; + + /* 1- try empty space + 2- we see if we can create one in the + unused bps + 3- see if we can create one in the non + job running bps + */ + debug("trying with %d", create_try); + if((new_blocks = create_dynamic_block(block_list, + request, temp_list))) { + bg_record_t *bg_record = NULL; + while((bg_record = list_pop(new_blocks))) { + if(block_exist_in_list(block_list, bg_record)) + destroy_bg_record(bg_record); + else { + if(job_block_test_list + == bg_job_block_list) { + if(configure_block(bg_record) + == SLURM_ERROR) { + destroy_bg_record( + bg_record); + error("_dynamically_" + "request: " + "unable to " + "configure " + "block"); + rc = SLURM_ERROR; + break; + } + } + list_append(block_list, bg_record); + print_bg_record(bg_record); + (*blocks_added) = 1; + } } - - bit_and(slurm_block_bitmap, tmp_bitmap); - FREE_NULL_BITMAP(tmp_bitmap); - xfree(request.save_name); + list_destroy(new_blocks); + if(!*blocks_added) { + memcpy(request->geometry, start_geo, + sizeof(int)*BA_SYSTEM_DIMENSIONS); + rc = SLURM_ERROR; + continue; + } + list_sort(block_list, + (ListCmpF)_bg_record_sort_aval_dec); + rc = SLURM_SUCCESS; - goto end_it; + break; + } else if (errno == ESLURM_INTERCONNECT_FAILURE) { + rc = SLURM_ERROR; + break; + } + + memcpy(request->geometry, start_geo, + sizeof(int)*BA_SYSTEM_DIMENSIONS); + + } + list_iterator_destroy(itr); + + if(list_of_lists) + list_destroy(list_of_lists); + + return rc; +} +/* + * finds the best match for a given job request + * + * + * OUT - block_id of matched block, NULL otherwise + * returns 1 for error (no match) + * + */ +static int _find_best_block_match(List block_list, + int *blocks_added, + struct job_record* job_ptr, + bitstr_t* slurm_block_bitmap, + uint32_t min_nodes, uint32_t max_nodes, + uint32_t req_nodes, + bg_record_t** found_bg_record, + bool test_only) +{ + bg_record_t *bg_record = NULL; + uint16_t req_geometry[BA_SYSTEM_DIMENSIONS]; + uint16_t start[BA_SYSTEM_DIMENSIONS]; + uint16_t conn_type, rotate, target_size = 0; + uint32_t req_procs = job_ptr->num_procs; + ba_request_t request; + int i; + int overlap_check = 0; + int allow = 0; + int check_image = 1; + uint32_t max_procs = (uint32_t)NO_VAL; + char tmp_char[256]; + int start_req = 0; + static int total_cpus = 0; + char *blrtsimage = NULL; /* BlrtsImage for this request */ + char *linuximage = NULL; /* LinuxImage for this request */ + char *mloaderimage = NULL; /* mloaderImage for this request */ + char *ramdiskimage = NULL; /* RamDiskImage for this request */ + int rc = SLURM_SUCCESS; + int create_try = 0; + List overlapped_list = NULL; + + if(!total_cpus) + total_cpus = DIM_SIZE[X] * DIM_SIZE[Y] * DIM_SIZE[Z] + * procs_per_node; + + if(req_nodes > max_nodes) { + error("can't run this job max bps is %u asking for %u", + max_nodes, req_nodes); + return SLURM_ERROR; + } + + if(!test_only && req_procs > num_unused_cpus) { + debug2("asking for %u I only got %d", + req_procs, num_unused_cpus); + return SLURM_ERROR; + } + + if(!block_list) { + error("_find_best_block_match: There is no block_list"); + return SLURM_ERROR; + } + + select_g_get_jobinfo(job_ptr->select_jobinfo, + SELECT_DATA_START, &start); + + if(start[X] != (uint16_t)NO_VAL) + start_req = 1; + + select_g_get_jobinfo(job_ptr->select_jobinfo, + SELECT_DATA_CONN_TYPE, &conn_type); + select_g_get_jobinfo(job_ptr->select_jobinfo, + SELECT_DATA_GEOMETRY, &req_geometry); + select_g_get_jobinfo(job_ptr->select_jobinfo, + SELECT_DATA_ROTATE, &rotate); + select_g_get_jobinfo(job_ptr->select_jobinfo, + SELECT_DATA_MAX_PROCS, &max_procs); + + + if((rc = _check_images(job_ptr, &blrtsimage, &linuximage, + &mloaderimage, &ramdiskimage)) == SLURM_ERROR) + goto end_it; + + if(req_geometry[X] != 0 && req_geometry[X] != (uint16_t)NO_VAL) { + target_size = 1; + for (i=0; i<BA_SYSTEM_DIMENSIONS; i++) + target_size *= (uint16_t)req_geometry[i]; + if(target_size != min_nodes) { + debug2("min_nodes not set correctly %u should be %u " + "from %u%u%u", + min_nodes, target_size, + req_geometry[X], + req_geometry[Y], + req_geometry[Z]); + min_nodes = target_size; } - } else if(!created) { - debug2("going to create %d", target_size); - lists_of_lists = list_create(NULL); - if(job_ptr->details->req_nodes) { - list_append(lists_of_lists, bg_job_block_list); + if(!req_nodes) + req_nodes = min_nodes; + } + if (target_size == 0) { /* no geometry specified */ + if(job_ptr->details->req_nodes + && !start_req) { + bg_record_t *tmp_record = NULL; + char *tmp_nodes= job_ptr->details->req_nodes; + int len = strlen(tmp_nodes); + + i = 0; + while(i<len + && tmp_nodes[i] != '[' + && (tmp_nodes[i] < '0' || tmp_nodes[i] > 'Z' + || (tmp_nodes[i] > '9' + && tmp_nodes[i] < 'A'))) + i++; + + if(i<len) { + len -= i; + tmp_record = xmalloc(sizeof(bg_record_t)); + tmp_record->bg_block_list = + list_create(destroy_ba_node); + slurm_conf_lock(); + len += strlen(slurmctld_conf.node_prefix)+1; + tmp_record->nodes = xmalloc(len); + + snprintf(tmp_record->nodes, + len, + "%s%s", + slurmctld_conf.node_prefix, + tmp_nodes+i); + slurm_conf_unlock(); + + process_nodes(tmp_record, false); + for(i=0; i<BA_SYSTEM_DIMENSIONS; i++) { + req_geometry[i] = tmp_record->geo[i]; + start[i] = tmp_record->start[i]; + } + destroy_bg_record(tmp_record); + select_g_set_jobinfo(job_ptr->select_jobinfo, + SELECT_DATA_GEOMETRY, + &req_geometry); + select_g_set_jobinfo(job_ptr->select_jobinfo, + SELECT_DATA_START, + &start); + start_req = 1; + } else + error("BPs=%s is in a weird format", + tmp_nodes); } else { - list_append(lists_of_lists, bg_list); - if(list_count(bg_list) - != list_count(bg_booted_block_list)) { - list_append(lists_of_lists, - bg_booted_block_list); - if(list_count(bg_booted_block_list) - != list_count(bg_job_block_list)) - list_append(lists_of_lists, - bg_job_block_list); - } else if(list_count(bg_list) - != list_count(bg_job_block_list)) - list_append(lists_of_lists, bg_job_block_list); + req_geometry[X] = (uint16_t)NO_VAL; } - itr = list_iterator_create(lists_of_lists); - while ((temp_list = (List)list_next(itr))) { - created++; + target_size = min_nodes; + } + + *found_bg_record = NULL; + allow = 0; - for(i=0; i<BA_SYSTEM_DIMENSIONS; i++) - request.start[i] = start[i]; + for(i=0; i<BA_SYSTEM_DIMENSIONS; i++) + request.start[i] = start[i]; + + for(i=0; i<BA_SYSTEM_DIMENSIONS; i++) + request.geometry[i] = req_geometry[i]; + + request.save_name = NULL; + request.elongate_geos = NULL; + request.size = target_size; + request.procs = req_procs; + request.conn_type = conn_type; + request.rotate = rotate; + request.elongate = true; + request.start_req = start_req; + request.blrtsimage = blrtsimage; + request.linuximage = linuximage; + request.mloaderimage = mloaderimage; + request.ramdiskimage = ramdiskimage; + if(job_ptr->details->req_node_bitmap) + request.avail_node_bitmap = + job_ptr->details->req_node_bitmap; + else + request.avail_node_bitmap = slurm_block_bitmap; + + select_g_get_jobinfo(job_ptr->select_jobinfo, + SELECT_DATA_MAX_PROCS, &max_procs); + /* since we only look at procs after this and not nodes we + * need to set a max_procs if given + */ + if(max_procs == (uint32_t)NO_VAL) + max_procs = max_nodes * procs_per_node; + + while(1) { + /* Here we are creating a list of all the blocks that + * have overlapped jobs so if we don't find one that + * works we will have can look and see the earliest + * the job can start. This doesn't apply to Dynamic mode. + */ + if(test_only && bluegene_layout_mode != LAYOUT_DYNAMIC) + overlapped_list = list_create(NULL); + + bg_record = _find_matching_block(block_list, + job_ptr, + slurm_block_bitmap, + &request, + max_procs, + allow, check_image, + overlap_check, + overlapped_list, + test_only); + if(!bg_record && test_only + && bluegene_layout_mode != LAYOUT_DYNAMIC + && list_count(overlapped_list)) { + ListIterator itr = + list_iterator_create(overlapped_list); + bg_record_t *tmp_rec = NULL; + while((tmp_rec = list_next(itr))) { + if(!bg_record || + (tmp_rec->job_ptr->end_time < + bg_record->job_ptr->end_time)) + bg_record = tmp_rec; + } + list_iterator_destroy(itr); + } + + if(test_only && bluegene_layout_mode != LAYOUT_DYNAMIC) + list_destroy(overlapped_list); + + /* set the bitmap and do other allocation activities */ + if (bg_record) { + if(!test_only) { + if(check_block_bp_states( + bg_record->bg_block_id) + == SLURM_ERROR) { + error("_find_best_block_match: Marking " + "block %s in an error state " + "because of bad bps.", + bg_record->bg_block_id); + bg_record->job_running = + BLOCK_ERROR_STATE; + bg_record->state = RM_PARTITION_ERROR; + trigger_block_error(); + continue; + } + } + format_node_name(bg_record, tmp_char, sizeof(tmp_char)); + debug("_find_best_block_match %s <%s>", + bg_record->bg_block_id, + tmp_char); + bit_and(slurm_block_bitmap, bg_record->bitmap); + rc = SLURM_SUCCESS; + *found_bg_record = bg_record; + goto end_it; + } else { + /* this gets altered in _find_matching_block so we + reset it */ for(i=0; i<BA_SYSTEM_DIMENSIONS; i++) request.geometry[i] = req_geometry[i]; + } + + /* see if we can just reset the image and reboot the block */ + if(allow) { + check_image = 0; + allow = 0; + continue; + } + + check_image = 1; + + /* all these assume that the *bg_record is NULL */ + + if(bluegene_layout_mode == LAYOUT_OVERLAP + && !test_only && overlap_check < 2) { + overlap_check++; + continue; + } + + if(create_try || bluegene_layout_mode != LAYOUT_DYNAMIC) + goto no_match; + + if((rc = _dynamically_request(block_list, blocks_added, + &request, + slurm_block_bitmap, + job_ptr->details->req_nodes)) + == SLURM_SUCCESS) { + create_try = 1; + continue; + } - request.save_name = NULL; - request.elongate_geos = NULL; - request.size = target_size; - request.procs = req_procs; - request.conn_type = conn_type; - request.rotate = rotate; - request.elongate = true; - request.start_req = start_req; - request.blrtsimage = blrtsimage; - request.linuximage = linuximage; - request.mloaderimage = mloaderimage; - request.ramdiskimage = ramdiskimage; - if(job_ptr->details->req_node_bitmap) - request.avail_node_bitmap = - job_ptr->details->req_node_bitmap; + + if(test_only) { + List new_blocks = NULL; + List job_list = NULL; + debug("trying with empty machine"); + slurm_mutex_lock(&block_state_mutex); + if(job_block_test_list == bg_job_block_list) + job_list = copy_bg_list(job_block_test_list); else - request.avail_node_bitmap = slurm_block_bitmap; - - /* 1- try empty space - 2- we see if we can create one in the - unused bps - 3- see if we can create one in the non - job running bps - */ - debug("trying with %d", created); - if(create_dynamic_block(&request, temp_list) - == SLURM_SUCCESS) { - list_iterator_destroy(itr); - list_destroy(lists_of_lists); - lists_of_lists = NULL; - goto try_again; + job_list = job_block_test_list; + slurm_mutex_unlock(&block_state_mutex); + list_sort(job_list, (ListCmpF)_bg_record_sort_aval_inc); + while(1) { + /* this gets altered in + * create_dynamic_block so we reset it */ + for(i=0; i<BA_SYSTEM_DIMENSIONS; i++) + request.geometry[i] = req_geometry[i]; + + bg_record = list_pop(job_list); + if(bg_record) + debug2("taking off %d(%s) started at %d ends at %d", + bg_record->job_running, + bg_record->bg_block_id, + bg_record->job_ptr->start_time, + bg_record->job_ptr->end_time); + if(!(new_blocks = create_dynamic_block( + block_list, &request, job_list))) { + destroy_bg_record(bg_record); + if(errno == ESLURM_INTERCONNECT_FAILURE + || !list_count(job_list)) { + error("this job will never " + "run on this system"); + break; + } + continue; + } + rc = SLURM_SUCCESS; + /* outside of the job_test_list this + * gets destroyed later, so don't worry + * about it now + */ + (*found_bg_record) = list_pop(new_blocks); + bit_and(slurm_block_bitmap, + (*found_bg_record)->bitmap); + + if(bg_record) { + (*found_bg_record)->job_ptr + = bg_record->job_ptr; + destroy_bg_record(bg_record); + } + + if(job_block_test_list != bg_job_block_list) { + list_append(block_list, + (*found_bg_record)); + while((bg_record = + list_pop(new_blocks))) { + if(block_exist_in_list( + block_list, + bg_record)) + destroy_bg_record( + bg_record); + else { + list_append(block_list, + bg_record); +// print_bg_record(bg_record); + } + } + } + + list_destroy(new_blocks); + break; } + + if(job_block_test_list == bg_job_block_list) + list_destroy(job_list); + + goto end_it; + } else { + break; } - list_iterator_destroy(itr); - if(lists_of_lists) - list_destroy(lists_of_lists); } -not_dynamic: + +no_match: debug("_find_best_block_match none found"); rc = SLURM_ERROR; @@ -793,6 +1160,43 @@ end_it: return rc; } + +static int _sync_block_lists(List full_list, List incomp_list) +{ + ListIterator itr; + ListIterator itr2; + bg_record_t *bg_record = NULL; + bg_record_t *new_record = NULL; + int count = 0; + + itr = list_iterator_create(full_list); + itr2 = list_iterator_create(incomp_list); + while((new_record = list_next(itr))) { + while((bg_record = list_next(itr2))) { + if(bit_equal(bg_record->bitmap, new_record->bitmap) + && bit_equal(bg_record->ionode_bitmap, + new_record->ionode_bitmap)) + break; + } + + if(!bg_record) { + bg_record = xmalloc(sizeof(bg_record_t)); + copy_bg_record(new_record, bg_record); + debug4("adding %s", bg_record->bg_block_id); + list_append(incomp_list, bg_record); + count++; + } + list_iterator_reset(itr2); + } + list_iterator_destroy(itr); + list_iterator_destroy(itr2); + sort_bg_record_inc_size(incomp_list); + + return count; +} + +#endif // HAVE_BG + /* * Try to find resources for a given job request * IN job_ptr - pointer to job record in slurmctld @@ -800,20 +1204,37 @@ end_it: * be used * IN min_nodes, max_nodes - minimum and maximum number of nodes to allocate * to this job (considers slurm block limits) - * IN test_only - if true, only test if ever could run, not necessarily now + * IN mode - SELECT_MODE_RUN_NOW: try to schedule job now + * SELECT_MODE_TEST_ONLY: test if job can ever run + * SELECT_MODE_WILL_RUN: determine when and where job can run * RET - SLURM_SUCCESS if job runnable now, error code otherwise */ extern int submit_job(struct job_record *job_ptr, bitstr_t *slurm_block_bitmap, - uint32_t min_nodes, uint32_t max_nodes, uint32_t req_nodes, - bool test_only) + uint32_t min_nodes, uint32_t max_nodes, + uint32_t req_nodes, int mode) { - int spec = 1; /* this will be like, keep TYPE a priority, etc, */ - bg_record_t* record = NULL; + int rc = SLURM_SUCCESS; +#ifdef HAVE_BG + int i=0; + bg_record_t* bg_record = NULL; char buf[100]; - int i, rc = SLURM_SUCCESS; - uint16_t geo[BA_SYSTEM_DIMENSIONS]; uint16_t tmp16 = (uint16_t)NO_VAL; - + List block_list = NULL; + int blocks_added = 0; + int starttime = time(NULL); + bool test_only; + + if (mode == SELECT_MODE_TEST_ONLY || mode == SELECT_MODE_WILL_RUN) + test_only = true; + else if (mode == SELECT_MODE_RUN_NOW) + test_only = false; + else + return EINVAL; /* something not yet supported */ + + if(bluegene_layout_mode == LAYOUT_DYNAMIC) + slurm_mutex_lock(&create_dynamic_mutex); + + job_block_test_list = bg_job_block_list; select_g_sprint_jobinfo(job_ptr->select_jobinfo, buf, sizeof(buf), SELECT_PRINT_MIXED); @@ -832,71 +1253,323 @@ extern int submit_job(struct job_record *job_ptr, bitstr_t *slurm_block_bitmap, SELECT_PRINT_RAMDISK_IMAGE); debug2("RamDiskImage=%s", buf); - if(bluegene_layout_mode == LAYOUT_DYNAMIC) - slurm_mutex_lock(&create_dynamic_mutex); + slurm_mutex_lock(&block_state_mutex); + block_list = copy_bg_list(bg_list); + slurm_mutex_unlock(&block_state_mutex); - rc = _find_best_block_match(job_ptr, slurm_block_bitmap, min_nodes, - max_nodes, req_nodes, spec, - &record, test_only); + list_sort(block_list, (ListCmpF)_bg_record_sort_aval_dec); + + rc = _find_best_block_match(block_list, &blocks_added, + job_ptr, slurm_block_bitmap, min_nodes, + max_nodes, req_nodes, + &bg_record, test_only); if(rc == SLURM_SUCCESS) { - if(!record) { - debug2("can run, but block not made"); + if(bg_record) { + /* Here we see if there is a job running since + * some jobs take awhile to finish we need to + * make sure the time of the end is in the + * future. If it isn't (meaning it is in the + * past or current time) we add 5 seconds to + * it so we don't use the block immediately. + */ + if(bg_record->job_ptr + && bg_record->job_ptr->end_time) { + if(bg_record->job_ptr->end_time <= starttime) + starttime += 5; + else + starttime = + bg_record->job_ptr->end_time; + } + + job_ptr->start_time = starttime; + + select_g_set_jobinfo(job_ptr->select_jobinfo, + SELECT_DATA_NODES, + bg_record->nodes); select_g_set_jobinfo(job_ptr->select_jobinfo, + SELECT_DATA_IONODES, + bg_record->ionodes); + + if(!bg_record->bg_block_id) { + uint16_t geo[BA_SYSTEM_DIMENSIONS]; + + debug2("%d can start job at " + "%u on %s on unmade block", + test_only, starttime, + bg_record->nodes); + select_g_set_jobinfo(job_ptr->select_jobinfo, SELECT_DATA_BLOCK_ID, "unassigned"); - if(job_ptr->num_procs < bluegene_bp_node_cnt - && job_ptr->num_procs > 0) { - i = procs_per_node/job_ptr->num_procs; - debug2("divide by %d", i); - } else - i = 1; - min_nodes *= bluegene_bp_node_cnt/i; - select_g_set_jobinfo(job_ptr->select_jobinfo, + if(job_ptr->num_procs < bluegene_bp_node_cnt + && job_ptr->num_procs > 0) { + i = procs_per_node/job_ptr->num_procs; + debug2("divide by %d", i); + } else + i = 1; + min_nodes *= bluegene_bp_node_cnt/i; + select_g_set_jobinfo(job_ptr->select_jobinfo, SELECT_DATA_NODE_CNT, &min_nodes); - - for(i=0; i<BA_SYSTEM_DIMENSIONS; i++) - geo[i] = 0; - select_g_set_jobinfo(job_ptr->select_jobinfo, - SELECT_DATA_GEOMETRY, - &geo); - - } else { - slurm_mutex_lock(&block_state_mutex); + memset(geo, 0, + sizeof(uint16_t) * BA_SYSTEM_DIMENSIONS); + select_g_set_jobinfo(job_ptr->select_jobinfo, + SELECT_DATA_GEOMETRY, + &geo); + /* This is a fake record so we need to + * destroy it after we get the info from + * it */ + destroy_bg_record(bg_record); + } else { + if((bg_record->ionodes) + && (job_ptr->part_ptr->max_share <= 1)) + error("Small block used in " + "non-shared partition"); + + debug2("%d can start job at %u on %s", + test_only, starttime, + bg_record->nodes); - if((record->ionodes) - && (job_ptr->part_ptr->shared == 0)) - error("Small block used in " - "non-shared partition"); + select_g_set_jobinfo(job_ptr->select_jobinfo, + SELECT_DATA_BLOCK_ID, + bg_record->bg_block_id); + select_g_set_jobinfo(job_ptr->select_jobinfo, + SELECT_DATA_NODE_CNT, + &bg_record->node_cnt); + select_g_set_jobinfo(job_ptr->select_jobinfo, + SELECT_DATA_GEOMETRY, + &bg_record->geo); - /* set the block id and info about block */ - select_g_set_jobinfo(job_ptr->select_jobinfo, - SELECT_DATA_BLOCK_ID, - record->bg_block_id); - select_g_set_jobinfo(job_ptr->select_jobinfo, - SELECT_DATA_IONODES, - record->ionodes); - select_g_set_jobinfo(job_ptr->select_jobinfo, - SELECT_DATA_NODE_CNT, - &record->node_cnt); - select_g_set_jobinfo(job_ptr->select_jobinfo, - SELECT_DATA_GEOMETRY, - &record->geo); - tmp16 = record->conn_type; - select_g_set_jobinfo(job_ptr->select_jobinfo, - SELECT_DATA_CONN_TYPE, - &tmp16); - slurm_mutex_unlock(&block_state_mutex); + tmp16 = bg_record->conn_type; + select_g_set_jobinfo(job_ptr->select_jobinfo, + SELECT_DATA_CONN_TYPE, + &tmp16); + } + } else { + error("we got a success, but no block back"); } - if(test_only) { - select_g_set_jobinfo(job_ptr->select_jobinfo, - SELECT_DATA_BLOCK_ID, - "unassigned"); - } } + + if(bluegene_layout_mode == LAYOUT_DYNAMIC) { + slurm_mutex_lock(&block_state_mutex); + if(blocks_added) + _sync_block_lists(block_list, bg_list); + slurm_mutex_unlock(&block_state_mutex); + slurm_mutex_unlock(&create_dynamic_mutex); + } + + list_destroy(block_list); +#endif + return rc; +} + +extern int test_job_list(List req_list) +{ + int rc = SLURM_SUCCESS; +#ifdef HAVE_BG + bg_record_t* bg_record = NULL; + bg_record_t* new_record = NULL; + char buf[100]; +// uint16_t tmp16 = (uint16_t)NO_VAL; + List block_list = NULL; + int blocks_added = 0; + int starttime = time(NULL); + ListIterator itr = NULL; + select_will_run_t *will_run = NULL; + + slurm_mutex_lock(&job_list_test_mutex); + if(bluegene_layout_mode == LAYOUT_DYNAMIC) + slurm_mutex_lock(&create_dynamic_mutex); + + job_block_test_list = copy_bg_list(bg_job_block_list); + + slurm_mutex_lock(&block_state_mutex); + block_list = copy_bg_list(bg_list); + slurm_mutex_unlock(&block_state_mutex); + + itr = list_iterator_create(req_list); + while((will_run = list_next(itr))) { + if(!will_run->job_ptr) { + error("test_job_list: you need to give me a job_ptr"); + rc = SLURM_ERROR; + break; + } + + select_g_sprint_jobinfo(will_run->job_ptr->select_jobinfo, + buf, sizeof(buf), + SELECT_PRINT_MIXED); + debug("bluegene:submit_job_list: %s nodes=%u-%u-%u", + buf, will_run->min_nodes, + will_run->req_nodes, will_run->max_nodes); + list_sort(block_list, (ListCmpF)_bg_record_sort_aval_dec); + rc = _find_best_block_match(block_list, &blocks_added, + will_run->job_ptr, + will_run->avail_nodes, + will_run->min_nodes, + will_run->max_nodes, + will_run->req_nodes, + &bg_record, true); + + if(rc == SLURM_SUCCESS) { + if(bg_record) { + if(bg_record->job_ptr + && bg_record->job_ptr->end_time) { + starttime = + bg_record->job_ptr->end_time; + } + bg_record->job_running = + will_run->job_ptr->job_id; + bg_record->job_ptr = will_run->job_ptr; + debug2("test_job_list: " + "can run job %u on found block at %d" + "nodes = %s", + bg_record->job_ptr->job_id, + starttime, + bg_record->nodes); + + if(!block_exist_in_list(job_block_test_list, + bg_record)) { + new_record = + xmalloc(sizeof(bg_record_t)); + copy_bg_record(bg_record, new_record); + list_append(job_block_test_list, + new_record); + } + + if(will_run->job_ptr->start_time) { + if(will_run->job_ptr->start_time + < starttime) { + debug2("test_job_list: " + "Time is later " + "than one supplied."); + rc = SLURM_ERROR; + break; + } + + //continue; + } else + will_run->job_ptr->start_time + = starttime; + + if(will_run->job_ptr->time_limit != INFINITE + && will_run->job_ptr->time_limit != NO_VAL) + will_run->job_ptr->end_time = + will_run->job_ptr->start_time + + will_run->job_ptr->time_limit * + 60; + else if(will_run->job_ptr->part_ptr->max_time + != INFINITE + && will_run->job_ptr-> + part_ptr->max_time != NO_VAL) + will_run->job_ptr->end_time = + will_run->job_ptr->start_time + + will_run->job_ptr-> + part_ptr->max_time * 60; + else + will_run->job_ptr->end_time = + will_run->job_ptr->start_time + + 31536000; // + year + + select_g_set_jobinfo( + will_run->job_ptr->select_jobinfo, + SELECT_DATA_NODES, + bg_record->nodes); + select_g_set_jobinfo( + will_run->job_ptr->select_jobinfo, + SELECT_DATA_IONODES, + bg_record->ionodes); + +/* if(!bg_record->bg_block_id) { */ +/* uint16_t geo[BA_SYSTEM_DIMENSIONS]; */ + +/* debug2("test_job_list: " */ +/* "can start job at " */ +/* "%u on %s on unmade block", */ +/* starttime, */ +/* bg_record->nodes); */ +/* select_g_set_jobinfo( */ +/* will_run->job_ptr-> */ +/* select_jobinfo, */ +/* SELECT_DATA_BLOCK_ID, */ +/* "unassigned"); */ +/* if(will_run->job_ptr->num_procs */ +/* < bluegene_bp_node_cnt */ +/* && will_run->job_ptr->num_procs */ +/* > 0) { */ +/* i = procs_per_node/ */ +/* will_run->job_ptr-> */ +/* num_procs; */ +/* debug2("divide by %d", i); */ +/* } else */ +/* i = 1; */ +/* will_run->min_nodes *= */ +/* bluegene_bp_node_cnt/i; */ +/* select_g_set_jobinfo( */ +/* will_run->job_ptr-> */ +/* select_jobinfo, */ +/* SELECT_DATA_NODE_CNT, */ +/* &will_run->min_nodes); */ +/* memset(geo, 0, */ +/* sizeof(uint16_t) */ +/* * BA_SYSTEM_DIMENSIONS); */ +/* select_g_set_jobinfo( */ +/* will_run->job_ptr-> */ +/* select_jobinfo, */ +/* SELECT_DATA_GEOMETRY, */ +/* &geo); */ +/* } else { */ +/* if((bg_record->ionodes) */ +/* && (will_run->job_ptr->part_ptr-> */ +/* max_share */ +/* <= 1)) */ +/* error("Small block used in " */ +/* "non-shared partition"); */ + +/* debug2("test_job_list: " */ +/* "can start job at %u on %s", */ +/* starttime, */ +/* bg_record->nodes); */ + +/* select_g_set_jobinfo( */ +/* will_run->job_ptr-> */ +/* select_jobinfo, */ +/* SELECT_DATA_BLOCK_ID, */ +/* bg_record->bg_block_id); */ +/* select_g_set_jobinfo( */ +/* will_run->job_ptr-> */ +/* select_jobinfo, */ +/* SELECT_DATA_NODE_CNT, */ +/* &bg_record->node_cnt); */ +/* select_g_set_jobinfo( */ +/* will_run->job_ptr-> */ +/* select_jobinfo, */ +/* SELECT_DATA_GEOMETRY, */ +/* &bg_record->geo); */ + +/* tmp16 = bg_record->conn_type; */ +/* select_g_set_jobinfo( */ +/* will_run->job_ptr-> */ +/* select_jobinfo, */ +/* SELECT_DATA_CONN_TYPE, */ +/* &tmp16); */ +/* } */ + } else { + error("we got a success, but no block back"); + rc = SLURM_ERROR; + } + } + } + list_iterator_destroy(itr); + + if(bluegene_layout_mode == LAYOUT_DYNAMIC) slurm_mutex_unlock(&create_dynamic_mutex); + + list_destroy(block_list); + list_destroy(job_block_test_list); + + slurm_mutex_unlock(&job_list_test_mutex); +#endif return rc; } diff --git a/src/plugins/select/bluegene/plugin/bg_job_place.h b/src/plugins/select/bluegene/plugin/bg_job_place.h index f523b52b1..448698e58 100644 --- a/src/plugins/select/bluegene/plugin/bg_job_place.h +++ b/src/plugins/select/bluegene/plugin/bg_job_place.h @@ -49,12 +49,15 @@ * to this job (considers slurm partition limits) * IN test_only - test to see if job is ever runnable, * or (false) runable right now - * IN test_only - if true, only test if ever could run, not necessarily now - * IN test_only - if true, only test if ever could run, not necessarily now + * IN mode - SELECT_MODE_RUN_NOW: try to schedule job now + * SELECT_MODE_TEST_ONLY: test if job can ever run + * SELECT_MODE_WILL_RUN: determine when and where job can run * RET - SLURM_SUCCESS if job runnable now, error code otherwise */ extern int submit_job(struct job_record *job_ptr, bitstr_t *bitmap, - uint32_t min_nodes, uint32_t max_nodes, uint32_t req_nodes, - bool test_only); + uint32_t min_nodes, uint32_t max_nodes, + uint32_t req_nodes, int mode); + +extern int test_job_list(List req_list); #endif /* _BG_JOB_PLACE_H_ */ diff --git a/src/plugins/select/bluegene/plugin/bg_job_run.c b/src/plugins/select/bluegene/plugin/bg_job_run.c index 5153de568..4f0e7383f 100644 --- a/src/plugins/select/bluegene/plugin/bg_job_run.c +++ b/src/plugins/select/bluegene/plugin/bg_job_run.c @@ -2,7 +2,7 @@ * bg_job_run.c - blue gene job execution (e.g. initiation and termination) * functions. * - * $Id: bg_job_run.c 13271 2008-02-14 20:02:00Z da $ + * $Id: bg_job_run.c 13947 2008-04-29 19:35:34Z jette $ ***************************************************************************** * Copyright (C) 2004-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). @@ -70,8 +70,8 @@ enum update_op {START_OP, TERM_OP, SYNC_OP}; typedef struct bg_update { enum update_op op; /* start | terminate | sync */ - uid_t uid; /* new user */ - uint32_t job_id; /* SLURM job id */ + struct job_record *job_ptr; /* pointer to job running on + * block or NULL if no job */ uint16_t reboot; /* reboot block before starting job */ pm_partition_id_t bg_block_id; char *blrtsimage; /* BlrtsImage for this block */ @@ -205,32 +205,36 @@ static void _sync_agent(bg_update_t *bg_update_ptr) { bg_record_t * bg_record = NULL; - bg_record = - find_bg_record_in_list(bg_list, bg_update_ptr->bg_block_id); + bg_record = find_bg_record_in_list(bg_list, bg_update_ptr->bg_block_id); if(!bg_record) { error("No block %s", bg_update_ptr->bg_block_id); return; } slurm_mutex_lock(&block_state_mutex); - bg_record->job_running = bg_update_ptr->job_id; + + bg_record->job_running = bg_update_ptr->job_ptr->job_id; + bg_record->job_ptr = bg_update_ptr->job_ptr; + if(!block_exist_in_list(bg_job_block_list, bg_record)) { list_push(bg_job_block_list, bg_record); num_unused_cpus -= bg_record->bp_count*bg_record->cpus_per_bp; } + if(!block_exist_in_list(bg_booted_block_list, bg_record)) + list_push(bg_booted_block_list, bg_record); slurm_mutex_unlock(&block_state_mutex); if(bg_record->state == RM_PARTITION_READY) { - if(bg_record->user_uid != bg_update_ptr->uid) { + if(bg_record->user_uid != bg_update_ptr->job_ptr->user_id) { int set_user_rc = SLURM_SUCCESS; slurm_mutex_lock(&block_state_mutex); debug("User isn't correct for job %d on %s, " "fixing...", - bg_update_ptr->job_id, + bg_update_ptr->job_ptr->job_id, bg_update_ptr->bg_block_id); xfree(bg_record->target_name); - bg_record->target_name = - xstrdup(uid_to_string(bg_update_ptr->uid)); + bg_record->target_name = xstrdup( + uid_to_string(bg_update_ptr->job_ptr->user_id)); set_user_rc = set_block_user(bg_record); slurm_mutex_unlock(&block_state_mutex); @@ -267,7 +271,7 @@ static void _start_agent(bg_update_t *bg_update_ptr) if(!bg_record) { error("block %s not found in bg_list", bg_update_ptr->bg_block_id); - (void) slurm_fail_job(bg_update_ptr->job_id); + (void) slurm_fail_job(bg_update_ptr->job_ptr->job_id); slurm_mutex_unlock(&job_start_mutex); return; } @@ -275,9 +279,9 @@ static void _start_agent(bg_update_t *bg_update_ptr) if(bg_record->job_running <= NO_JOB_RUNNING) { slurm_mutex_unlock(&block_state_mutex); slurm_mutex_unlock(&job_start_mutex); - debug("job %d finished during the queueing job " + debug("job %u finished during the queueing job " "(everything is ok)", - bg_update_ptr->job_id); + bg_update_ptr->job_ptr->job_id); return; } if(bg_record->state == RM_PARTITION_DEALLOCATING) { @@ -333,8 +337,8 @@ static void _start_agent(bg_update_t *bg_update_ptr) if(bg_record->job_running <= NO_JOB_RUNNING) { slurm_mutex_unlock(&block_state_mutex); slurm_mutex_unlock(&job_start_mutex); - debug("job %d already finished before boot", - bg_update_ptr->job_id); + debug("job %u already finished before boot", + bg_update_ptr->job_ptr->job_id); return; } @@ -424,7 +428,7 @@ static void _start_agent(bg_update_t *bg_update_ptr) is a no-op if issued prior to the script initiation do clean up just incase the fail job isn't ran */ - (void) slurm_fail_job(bg_update_ptr->job_id); + (void) slurm_fail_job(bg_update_ptr->job_ptr->job_id); slurm_mutex_lock(&block_state_mutex); if (remove_from_bg_list(bg_job_block_list, bg_record) == SLURM_SUCCESS) { @@ -441,16 +445,17 @@ static void _start_agent(bg_update_t *bg_update_ptr) if(bg_record->job_running <= NO_JOB_RUNNING) { slurm_mutex_unlock(&job_start_mutex); - debug("job %d finished during the start of the boot " + debug("job %u finished during the start of the boot " "(everything is ok)", - bg_update_ptr->job_id); + bg_update_ptr->job_ptr->job_id); return; } slurm_mutex_lock(&block_state_mutex); bg_record->boot_count = 0; xfree(bg_record->target_name); - bg_record->target_name = xstrdup(uid_to_string(bg_update_ptr->uid)); + bg_record->target_name = xstrdup( + uid_to_string(bg_update_ptr->job_ptr->user_id)); debug("setting the target_name for Block %s to %s", bg_record->bg_block_id, bg_record->target_name); @@ -604,9 +609,10 @@ static void _term_agent(bg_update_t *bg_update_ptr) } slurm_mutex_lock(&block_state_mutex); - if(bg_record->job_running > NO_JOB_RUNNING) + if(bg_record->job_running > NO_JOB_RUNNING) { bg_record->job_running = NO_JOB_RUNNING; - + bg_record->job_ptr = NULL; + } /* remove user from list */ slurm_conf_lock(); @@ -647,7 +653,7 @@ static void _term_agent(bg_update_t *bg_update_ptr) #endif } - + /* Process requests off the bg_update_list queue and exit when done */ static void *_block_agent(void *args) { @@ -662,6 +668,8 @@ static void *_block_agent(void *args) while (!agent_fini) { slurm_mutex_lock(&agent_cnt_mutex); bg_update_ptr = list_dequeue(bg_update_list); +/* info("running %d %d %d", TERM_OP, bg_update_ptr->op, */ +/* list_count(bg_update_list)); */ slurm_mutex_unlock(&agent_cnt_mutex); if (!bg_update_ptr) { usleep(100000); @@ -698,9 +706,16 @@ static void _block_op(bg_update_t *bg_update_ptr) && ((bg_update_list = list_create(_bg_list_del)) == NULL)) fatal("malloc failure in start_job/list_create"); - /* push job onto queue in a FIFO */ - if (list_push(bg_update_list, bg_update_ptr) == NULL) - fatal("malloc failure in _block_op/list_push"); + /* push TERM_OP on the head of the queue + * append START_OP and SYNC_OP to the tail of the queue */ + if (bg_update_ptr->op == TERM_OP) { + if (list_push(bg_update_list, bg_update_ptr) == NULL) + fatal("malloc failure in _block_op/list_push"); + } else { + if (list_enqueue(bg_update_list, bg_update_ptr) == NULL) + fatal("malloc failure in _block_op/list_enqueue"); + } + /* already running MAX_AGENTS we don't really need more since they never end */ if (agent_cnt > MAX_AGENT_COUNT) { @@ -785,7 +800,7 @@ static int _excise_block(List block_list, pm_partition_id_t bg_block_id, /* exact match of name and node list */ debug("synced Block %s", bg_block_id); - list_delete(iter); + list_delete_item(iter); rc = SLURM_SUCCESS; break; } @@ -840,8 +855,8 @@ extern int start_job(struct job_record *job_ptr) bg_update_ptr = xmalloc(sizeof(bg_update_t)); bg_update_ptr->op = START_OP; - bg_update_ptr->uid = job_ptr->user_id; - bg_update_ptr->job_id = job_ptr->job_id; + bg_update_ptr->job_ptr = job_ptr; + select_g_get_jobinfo(job_ptr->select_jobinfo, SELECT_DATA_BLOCK_ID, &(bg_update_ptr->bg_block_id)); @@ -890,7 +905,8 @@ extern int start_job(struct job_record *job_ptr) slurm_mutex_lock(&block_state_mutex); job_ptr->num_procs = (bg_record->cpus_per_bp * bg_record->bp_count); - bg_record->job_running = bg_update_ptr->job_id; + bg_record->job_running = bg_update_ptr->job_ptr->job_id; + bg_record->job_ptr = bg_update_ptr->job_ptr; if(!block_exist_in_list(bg_job_block_list, bg_record)) { list_push(bg_job_block_list, bg_record); num_unused_cpus -= @@ -900,7 +916,7 @@ extern int start_job(struct job_record *job_ptr) list_push(bg_booted_block_list, bg_record); slurm_mutex_unlock(&block_state_mutex); } else { - error("bg_record %s does exist, requested for job (%d)", + error("bg_record %s doesn't exist, requested for job (%d)", bg_update_ptr->bg_block_id, job_ptr->job_id); _bg_list_del(bg_update_ptr); return SLURM_ERROR; @@ -929,8 +945,7 @@ int term_job(struct job_record *job_ptr) bg_update_ptr = xmalloc(sizeof(bg_update_t)); bg_update_ptr->op = TERM_OP; - bg_update_ptr->uid = job_ptr->user_id; - bg_update_ptr->job_id = job_ptr->job_id; + bg_update_ptr->job_ptr = job_ptr; select_g_get_jobinfo(job_ptr->select_jobinfo, SELECT_DATA_BLOCK_ID, &(bg_update_ptr->bg_block_id)); @@ -1014,12 +1029,13 @@ extern int sync_jobs(List job_list) continue; } - debug3("Queue sync of job %u in BG block %s", + debug3("Queue sync of job %u in BG block %s " + "ending at %d", job_ptr->job_id, - bg_update_ptr->bg_block_id); + bg_update_ptr->bg_block_id, + job_ptr->end_time); bg_update_ptr->op = SYNC_OP; - bg_update_ptr->uid = job_ptr->user_id; - bg_update_ptr->job_id = job_ptr->job_id; + bg_update_ptr->job_ptr = job_ptr; _block_op(bg_update_ptr); } list_iterator_destroy(job_iterator); diff --git a/src/plugins/select/bluegene/plugin/bg_job_run.h b/src/plugins/select/bluegene/plugin/bg_job_run.h index 92db1d14d..9ea7ea949 100644 --- a/src/plugins/select/bluegene/plugin/bg_job_run.h +++ b/src/plugins/select/bluegene/plugin/bg_job_run.h @@ -40,6 +40,13 @@ #include "src/slurmctld/slurmctld.h" +/* + * Boot a block. Partition state expected to be FREE upon entry. + * NOTE: This function does not wait for the boot to complete. + * the slurm prolog script needs to perform the waiting. + */ +extern int boot_block(bg_record_t *bg_record); + /* * Perform any setup required to initiate a job * job_ptr IN - pointer to the job being initiated @@ -51,6 +58,13 @@ */ extern int start_job(struct job_record *job_ptr); +/* + * Synchronize BG block state to that of currently active jobs. + * This can recover from slurmctld crashes when block ownership + * changes were queued + */ +extern int sync_jobs(List job_list); + /* * Perform any work required to terminate a job * job_ptr IN - pointer to the job being terminated @@ -72,17 +86,4 @@ extern int term_job(struct job_record *job_ptr); */ extern int term_jobs_on_block(pm_partition_id_t bg_block_id); -/* - * Synchronize BG block state to that of currently active jobs. - * This can recover from slurmctld crashes when block ownership - * changes were queued - */ -extern int sync_jobs(List job_list); - -/* - * Boot a block. Partition state expected to be FREE upon entry. - * NOTE: This function does not wait for the boot to complete. - * the slurm prolog script needs to perform the waiting. - */ -extern int boot_block(bg_record_t *bg_record); #endif /* _BG_JOB_RUN_H_ */ diff --git a/src/plugins/select/bluegene/plugin/bg_record_functions.c b/src/plugins/select/bluegene/plugin/bg_record_functions.c new file mode 100644 index 000000000..f0f78ecf5 --- /dev/null +++ b/src/plugins/select/bluegene/plugin/bg_record_functions.c @@ -0,0 +1,867 @@ +/*****************************************************************************\ + * bg_record_functions.c - header for creating blocks in a static environment. + * + * $Id: bg_record_functions.c 12954 2008-01-04 20:37:49Z da $ + ***************************************************************************** + * Copyright (C) 2008 Lawrence Livermore National Security. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#include "bluegene.h" +#include "dynamic_block.h" + +#include "src/slurmctld/trigger_mgr.h" + +/* some local functions */ +#ifdef HAVE_BG +static int _addto_node_list(bg_record_t *bg_record, int *start, int *end); +static int _ba_node_cmpf_inc(ba_node_t *node_a, ba_node_t *node_b); +#endif + +extern void print_bg_record(bg_record_t* bg_record) +{ + if (!bg_record) { + error("print_bg_record, record given is null"); + return; + } +#if _DEBUG + info(" bg_record: "); + if (bg_record->bg_block_id) + info("\tbg_block_id: %s", bg_record->bg_block_id); + info("\tnodes: %s", bg_record->nodes); + info("\tsize: %d BPs %u Nodes %d cpus", + bg_record->bp_count, + bg_record->node_cnt, + bg_record->cpus_per_bp * bg_record->bp_count); + info("\tgeo: %ux%ux%u", bg_record->geo[X], bg_record->geo[Y], + bg_record->geo[Z]); + info("\tconn_type: %s", convert_conn_type(bg_record->conn_type)); + info("\tnode_use: %s", convert_node_use(bg_record->node_use)); + if (bg_record->bitmap) { + char bitstring[BITSIZE]; + bit_fmt(bitstring, BITSIZE, bg_record->bitmap); + info("\tbitmap: %s", bitstring); + } +#else +{ + char tmp_char[256]; + format_node_name(bg_record, tmp_char, sizeof(tmp_char)); + info("Record: BlockID:%s Nodes:%s Conn:%s", + bg_record->bg_block_id, tmp_char, + convert_conn_type(bg_record->conn_type)); +} +#endif +} + +extern void destroy_bg_record(void *object) +{ + bg_record_t* bg_record = (bg_record_t*) object; + + if (bg_record) { + xfree(bg_record->bg_block_id); + xfree(bg_record->nodes); + xfree(bg_record->ionodes); + xfree(bg_record->user_name); + xfree(bg_record->target_name); + if(bg_record->bg_block_list) { + list_destroy(bg_record->bg_block_list); + bg_record->bg_block_list = NULL; + } + FREE_NULL_BITMAP(bg_record->bitmap); + FREE_NULL_BITMAP(bg_record->ionode_bitmap); + + xfree(bg_record->blrtsimage); + xfree(bg_record->linuximage); + xfree(bg_record->mloaderimage); + xfree(bg_record->ramdiskimage); + + xfree(bg_record); + } +} + +extern int block_exist_in_list(List my_list, bg_record_t *bg_record) +{ + ListIterator itr = list_iterator_create(my_list); + bg_record_t *found_record = NULL; + int rc = 0; + + while ((found_record = (bg_record_t *) list_next(itr)) != NULL) { + /* check for full node bitmap compare */ + if(bit_equal(bg_record->bitmap, found_record->bitmap) + && bit_equal(bg_record->ionode_bitmap, + found_record->ionode_bitmap)) { + if(bg_record->ionodes) + debug3("This block %s[%s] " + "is already in the list %s", + bg_record->nodes, + bg_record->ionodes, + found_record->bg_block_id); + else + debug3("This block %s " + "is already in the list %s", + bg_record->nodes, + found_record->bg_block_id); + + rc = 1; + break; + } + } + list_iterator_destroy(itr); + return rc; +} + +extern void process_nodes(bg_record_t *bg_record, bool startup) +{ +#ifdef HAVE_BG + int j=0, number; + int start[BA_SYSTEM_DIMENSIONS]; + int end[BA_SYSTEM_DIMENSIONS]; + ListIterator itr; + ba_node_t* ba_node = NULL; + + if(!bg_record->bg_block_list + || !list_count(bg_record->bg_block_list)) { + if(!bg_record->bg_block_list) { + bg_record->bg_block_list = + list_create(destroy_ba_node); + } + bg_record->bp_count = 0; + if((bg_record->conn_type == SELECT_SMALL) && (!startup)) + error("We shouldn't be here there could be some " + "badness if we use this logic %s", + bg_record->nodes); + while (bg_record->nodes[j] != '\0') { + if ((bg_record->nodes[j] == '[' + || bg_record->nodes[j] == ',') + && (bg_record->nodes[j+8] == ']' + || bg_record->nodes[j+8] == ',') + && (bg_record->nodes[j+4] == 'x' + || bg_record->nodes[j+4] == '-')) { + j++; + number = xstrntol(bg_record->nodes + j, + NULL, BA_SYSTEM_DIMENSIONS, + HOSTLIST_BASE); + start[X] = number / + (HOSTLIST_BASE * HOSTLIST_BASE); + start[Y] = (number % + (HOSTLIST_BASE * HOSTLIST_BASE)) + / HOSTLIST_BASE; + start[Z] = (number % HOSTLIST_BASE); + j += 4; + number = xstrntol(bg_record->nodes + j, + NULL, 3, HOSTLIST_BASE); + end[X] = number / + (HOSTLIST_BASE * HOSTLIST_BASE); + end[Y] = (number + % (HOSTLIST_BASE * HOSTLIST_BASE)) + / HOSTLIST_BASE; + end[Z] = (number % HOSTLIST_BASE); + j += 3; + if(!bg_record->bp_count) { + bg_record->start[X] = start[X]; + bg_record->start[Y] = start[Y]; + bg_record->start[Z] = start[Z]; + debug2("start is %dx%dx%d", + bg_record->start[X], + bg_record->start[Y], + bg_record->start[Z]); + } + bg_record->bp_count += _addto_node_list( + bg_record, + start, + end); + if(bg_record->nodes[j] != ',') + break; + j--; + } else if((bg_record->nodes[j] >= '0' + && bg_record->nodes[j] <= '9') + || (bg_record->nodes[j] >= 'A' + && bg_record->nodes[j] <= 'Z')) { + + number = xstrntol(bg_record->nodes + j, + NULL, BA_SYSTEM_DIMENSIONS, + HOSTLIST_BASE); + start[X] = number / + (HOSTLIST_BASE * HOSTLIST_BASE); + start[Y] = (number % + (HOSTLIST_BASE * HOSTLIST_BASE)) + / HOSTLIST_BASE; + start[Z] = (number % HOSTLIST_BASE); + j+=3; + if(!bg_record->bp_count) { + bg_record->start[X] = start[X]; + bg_record->start[Y] = start[Y]; + bg_record->start[Z] = start[Z]; + debug2("start is %dx%dx%d", + bg_record->start[X], + bg_record->start[Y], + bg_record->start[Z]); + } + bg_record->bp_count += _addto_node_list( + bg_record, + start, + start); + if(bg_record->nodes[j] != ',') + break; + j--; + } + j++; + } + } + + bg_record->geo[X] = 0; + bg_record->geo[Y] = 0; + bg_record->geo[Z] = 0; + end[X] = -1; + end[Y] = -1; + end[Z] = -1; + + list_sort(bg_record->bg_block_list, (ListCmpF) _ba_node_cmpf_inc); + + itr = list_iterator_create(bg_record->bg_block_list); + while ((ba_node = list_next(itr)) != NULL) { + if(!ba_node->used) + continue; + debug4("%c%c%c is included in this block", + alpha_num[ba_node->coord[X]], + alpha_num[ba_node->coord[Y]], + alpha_num[ba_node->coord[Z]]); + + if(ba_node->coord[X]>end[X]) { + bg_record->geo[X]++; + end[X] = ba_node->coord[X]; + } + if(ba_node->coord[Y]>end[Y]) { + bg_record->geo[Y]++; + end[Y] = ba_node->coord[Y]; + } + if(ba_node->coord[Z]>end[Z]) { + bg_record->geo[Z]++; + end[Z] = ba_node->coord[Z]; + } + } + list_iterator_destroy(itr); + debug3("geo = %c%c%c bp count is %d\n", + alpha_num[bg_record->geo[X]], + alpha_num[bg_record->geo[Y]], + alpha_num[bg_record->geo[Z]], + bg_record->bp_count); + + if ((bg_record->geo[X] == DIM_SIZE[X]) + && (bg_record->geo[Y] == DIM_SIZE[Y]) + && (bg_record->geo[Z] == DIM_SIZE[Z])) { + bg_record->full_block = 1; + } + +/* #ifndef HAVE_BG_FILES */ +/* max_dim[X] = MAX(max_dim[X], end[X]); */ +/* max_dim[Y] = MAX(max_dim[Y], end[Y]); */ +/* max_dim[Z] = MAX(max_dim[Z], end[Z]); */ +/* #endif */ + + if (node_name2bitmap(bg_record->nodes, + false, + &bg_record->bitmap)) { + fatal("1 Unable to convert nodes %s to bitmap", + bg_record->nodes); + } +#endif + return; +} + +/* + * NOTE: This function does not do a mutex lock so if you are copying the + * main bg_list you need to lock 'block_state_mutex' before calling + */ +extern List copy_bg_list(List in_list) +{ + bg_record_t *bg_record = NULL; + bg_record_t *new_record = NULL; + List out_list = list_create(destroy_bg_record); + ListIterator itr = list_iterator_create(in_list); + + while ((bg_record = (bg_record_t *) list_next(itr))) { + new_record = xmalloc(sizeof(bg_record_t)); + new_record->original = bg_record; + copy_bg_record(bg_record, new_record); + list_append(out_list, new_record); + } + + list_iterator_destroy(itr); + + return out_list; +} + +extern void copy_bg_record(bg_record_t *fir_record, bg_record_t *sec_record) +{ + int i; + ListIterator itr = NULL; + ba_node_t *ba_node = NULL, *new_ba_node = NULL; + + if(!fir_record || !sec_record) { + error("copy_bg_record: " + "given a null for either first record or second record"); + return; + } + + xfree(sec_record->bg_block_id); + sec_record->bg_block_id = xstrdup(fir_record->bg_block_id); + xfree(sec_record->nodes); + sec_record->nodes = xstrdup(fir_record->nodes); + xfree(sec_record->ionodes); + sec_record->ionodes = xstrdup(fir_record->ionodes); + xfree(sec_record->user_name); + sec_record->user_name = xstrdup(fir_record->user_name); + xfree(sec_record->target_name); + sec_record->target_name = xstrdup(fir_record->target_name); + + xfree(sec_record->blrtsimage); + sec_record->blrtsimage = xstrdup(fir_record->blrtsimage); + xfree(sec_record->linuximage); + sec_record->linuximage = xstrdup(fir_record->linuximage); + xfree(sec_record->mloaderimage); + sec_record->mloaderimage = xstrdup(fir_record->mloaderimage); + xfree(sec_record->ramdiskimage); + sec_record->ramdiskimage = xstrdup(fir_record->ramdiskimage); + + sec_record->user_uid = fir_record->user_uid; + sec_record->state = fir_record->state; + sec_record->conn_type = fir_record->conn_type; + sec_record->node_use = fir_record->node_use; + sec_record->bp_count = fir_record->bp_count; + sec_record->switch_count = fir_record->switch_count; + sec_record->boot_state = fir_record->boot_state; + sec_record->boot_count = fir_record->boot_count; + sec_record->full_block = fir_record->full_block; + + for(i=0;i<BA_SYSTEM_DIMENSIONS;i++) { + sec_record->geo[i] = fir_record->geo[i]; + sec_record->start[i] = fir_record->start[i]; + } + + FREE_NULL_BITMAP(sec_record->bitmap); + if(fir_record->bitmap + && (sec_record->bitmap = bit_copy(fir_record->bitmap)) == NULL) { + error("Unable to copy bitmap for %s", fir_record->nodes); + sec_record->bitmap = NULL; + } + FREE_NULL_BITMAP(sec_record->ionode_bitmap); + if(fir_record->ionode_bitmap + && (sec_record->ionode_bitmap + = bit_copy(fir_record->ionode_bitmap)) == NULL) { + error("Unable to copy ionode_bitmap for %s", + fir_record->nodes); + sec_record->ionode_bitmap = NULL; + } + if(sec_record->bg_block_list) + list_destroy(sec_record->bg_block_list); + sec_record->bg_block_list = list_create(destroy_ba_node); + if(fir_record->bg_block_list) { + itr = list_iterator_create(fir_record->bg_block_list); + while((ba_node = list_next(itr))) { + new_ba_node = ba_copy_node(ba_node); + list_push(sec_record->bg_block_list, new_ba_node); + } + list_iterator_destroy(itr); + } + sec_record->job_running = fir_record->job_running; + sec_record->job_ptr = fir_record->job_ptr; + sec_record->cpus_per_bp = fir_record->cpus_per_bp; + sec_record->node_cnt = fir_record->node_cnt; + sec_record->quarter = fir_record->quarter; + sec_record->nodecard = fir_record->nodecard; +} + +extern bg_record_t *find_bg_record_in_list(List my_list, char *bg_block_id) +{ + ListIterator itr; + bg_record_t *bg_record = NULL; + + if(!bg_block_id) + return NULL; + + if(my_list) { + slurm_mutex_lock(&block_state_mutex); + itr = list_iterator_create(my_list); + while ((bg_record = (bg_record_t *) list_next(itr)) != NULL) { + if(bg_record->bg_block_id) + if (!strcmp(bg_record->bg_block_id, + bg_block_id)) + break; + } + list_iterator_destroy(itr); + slurm_mutex_unlock(&block_state_mutex); + if(bg_record) + return bg_record; + else + return NULL; + } else { + error("find_bg_record_in_list: no list"); + return NULL; + } + +} + +/* All changes to the bg_list target_name must + be done before this function is called. + also slurm_conf_lock() must be called before calling this + function along with slurm_conf_unlock() afterwards. +*/ +extern int update_block_user(bg_record_t *bg_record, int set) +{ + struct passwd *pw_ent = NULL; + + if(!bg_record->target_name) { + error("Must set target_name to run update_block_user."); + return -1; + } + if(!bg_record->user_name) { + error("No user_name"); + bg_record->user_name = xstrdup(slurmctld_conf.slurm_user_name); + } +#ifdef HAVE_BG_FILES + int rc=0; + if(set) { + if((rc = remove_all_users(bg_record->bg_block_id, + bg_record->target_name)) + == REMOVE_USER_ERR) { + error("1 Something happened removing " + "users from block %s", + bg_record->bg_block_id); + return -1; + } else if (rc == REMOVE_USER_NONE) { + if (strcmp(bg_record->target_name, + slurmctld_conf.slurm_user_name)) { + info("Adding user %s to Block %s", + bg_record->target_name, + bg_record->bg_block_id); + + if ((rc = bridge_add_block_user( + bg_record->bg_block_id, + bg_record->target_name)) + != STATUS_OK) { + error("bridge_add_block_user" + "(%s,%s): %s", + bg_record->bg_block_id, + bg_record->target_name, + bg_err_str(rc)); + return -1; + } + } + } + } +#endif + + if(strcmp(bg_record->target_name, bg_record->user_name)) { + xfree(bg_record->user_name); + bg_record->user_name = xstrdup(bg_record->target_name); + if((pw_ent = getpwnam(bg_record->user_name)) == NULL) { + error("getpwnam(%s): %m", bg_record->user_name); + return -1; + } else { + bg_record->user_uid = pw_ent->pw_uid; + } + return 1; + } + + return 0; +} + +/* If any nodes in node_list are drained, draining, or down, + * then just return + * else drain all of the nodes + * This function lets us drain an entire bgblock only if + * we have not already identified a specific node as bad. */ +extern void drain_as_needed(bg_record_t *bg_record, char *reason) +{ + bool needed = true; + hostlist_t hl; + char *host = NULL; + char bg_down_node[128]; + + if(bg_record->job_running > NO_JOB_RUNNING) + slurm_fail_job(bg_record->job_running); + + /* small blocks */ + if(bg_record->cpus_per_bp != procs_per_node) { + debug2("small block"); + goto end_it; + } + + /* at least one base partition */ + hl = hostlist_create(bg_record->nodes); + if (!hl) { + slurm_drain_nodes(bg_record->nodes, reason); + return; + } + while ((host = hostlist_shift(hl))) { + if (node_already_down(bg_down_node)) { + needed = false; + free(host); + break; + } + free(host); + } + hostlist_destroy(hl); + + if (needed) { + slurm_drain_nodes(bg_record->nodes, reason); + } +end_it: + while(bg_record->job_running > NO_JOB_RUNNING) { + debug2("block %s is still running job %d", + bg_record->bg_block_id, bg_record->job_running); + sleep(1); + } + + slurm_mutex_lock(&block_state_mutex); + error("Setting Block %s to ERROR state.", bg_record->bg_block_id); + bg_record->job_running = BLOCK_ERROR_STATE; + bg_record->state = RM_PARTITION_ERROR; + slurm_mutex_unlock(&block_state_mutex); + trigger_block_error(); + return; +} + +extern int set_ionodes(bg_record_t *bg_record) +{ + int i = 0; + int start_bit = 0; + int size = 0; + char bitstring[BITSIZE]; + + if(!bg_record) + return SLURM_ERROR; + /* set the bitmap blank here if it is a full node we don't + want anything set we also don't want the bg_record->ionodes set. + */ + bg_record->ionode_bitmap = bit_alloc(bluegene_numpsets); + if(bg_record->quarter == (uint16_t)NO_VAL) { + return SLURM_SUCCESS; + } + + start_bit = bluegene_quarter_ionode_cnt*bg_record->quarter; + + if(bg_record->nodecard != (uint16_t)NO_VAL + && bluegene_nodecard_ionode_cnt) { + start_bit += bluegene_nodecard_ionode_cnt*bg_record->nodecard; + size = bluegene_nodecard_ionode_cnt; + } else + size = bluegene_quarter_ionode_cnt; + size += start_bit; + + if(size == start_bit) { + error("start bit is the same as the end bit %d", size); + return SLURM_ERROR; + } + for(i=start_bit; i<size; i++) + bit_set(bg_record->ionode_bitmap, i); + + bit_fmt(bitstring, BITSIZE, bg_record->ionode_bitmap); + bg_record->ionodes = xstrdup(bitstring); + + return SLURM_SUCCESS; +} + +extern int add_bg_record(List records, List used_nodes, blockreq_t *blockreq) +{ + bg_record_t *bg_record = NULL; + bg_record_t *found_record = NULL; + ba_node_t *ba_node = NULL; + ListIterator itr; + struct passwd *pw_ent = NULL; + int i, len; + int small_size = 0; + int small_count = 0; + uint16_t quarter = 0; + uint16_t nodecard = 0; + int node_cnt = 0; + + if(!records) { + fatal("add_bg_record: no records list given"); + } + bg_record = (bg_record_t*) xmalloc(sizeof(bg_record_t)); + + slurm_conf_lock(); + bg_record->user_name = + xstrdup(slurmctld_conf.slurm_user_name); + bg_record->target_name = + xstrdup(slurmctld_conf.slurm_user_name); + slurm_conf_unlock(); + if((pw_ent = getpwnam(bg_record->user_name)) == NULL) { + error("getpwnam(%s): %m", bg_record->user_name); + } else { + bg_record->user_uid = pw_ent->pw_uid; + } + + bg_record->bg_block_list = list_create(destroy_ba_node); + if(used_nodes) { + if(copy_node_path(used_nodes, bg_record->bg_block_list) + == SLURM_ERROR) + error("couldn't copy the path for the allocation"); + bg_record->bp_count = list_count(used_nodes); + } + bg_record->quarter = (uint16_t)NO_VAL; + bg_record->nodecard = (uint16_t)NO_VAL; + if(set_ionodes(bg_record) == SLURM_ERROR) { + fatal("add_bg_record: problem creating ionodes"); + } + /* bg_record->boot_state = 0; Implicit */ + /* bg_record->state = 0; Implicit */ + debug2("asking for %s %d %d %s", + blockreq->block, blockreq->quarters, blockreq->nodecards, + convert_conn_type(blockreq->conn_type)); + len = strlen(blockreq->block); + i=0; + while(i<len + && blockreq->block[i] != '[' + && (blockreq->block[i] < '0' || blockreq->block[i] > 'Z' + || (blockreq->block[i] > '9' && blockreq->block[i] < 'A'))) + i++; + + if(i<len) { + len -= i; + slurm_conf_lock(); + len += strlen(slurmctld_conf.node_prefix)+1; + bg_record->nodes = xmalloc(len); + snprintf(bg_record->nodes, len, "%s%s", + slurmctld_conf.node_prefix, blockreq->block+i); + slurm_conf_unlock(); + + } else + fatal("BPs=%s is in a weird format", blockreq->block); + + process_nodes(bg_record, false); + + bg_record->node_use = SELECT_COPROCESSOR_MODE; + bg_record->conn_type = blockreq->conn_type; + bg_record->cpus_per_bp = procs_per_node; + bg_record->node_cnt = bluegene_bp_node_cnt * bg_record->bp_count; + bg_record->job_running = NO_JOB_RUNNING; + + if(blockreq->blrtsimage) + bg_record->blrtsimage = xstrdup(blockreq->blrtsimage); + else + bg_record->blrtsimage = xstrdup(default_blrtsimage); + + if(blockreq->linuximage) + bg_record->linuximage = xstrdup(blockreq->linuximage); + else + bg_record->linuximage = xstrdup(default_linuximage); + + if(blockreq->mloaderimage) + bg_record->mloaderimage = xstrdup(blockreq->mloaderimage); + else + bg_record->mloaderimage = xstrdup(default_mloaderimage); + + if(blockreq->ramdiskimage) + bg_record->ramdiskimage = xstrdup(blockreq->ramdiskimage); + else + bg_record->ramdiskimage = xstrdup(default_ramdiskimage); + + if(bg_record->conn_type != SELECT_SMALL) { + /* this needs to be an append so we keep things in the + order we got them, they will be sorted later */ + list_append(records, bg_record); + /* this isn't a correct list so we need to set it later for + now we just used it to be the bp number */ + if(!used_nodes) { + debug4("we didn't get a request list so we are " + "destroying this bp list"); + list_destroy(bg_record->bg_block_list); + bg_record->bg_block_list = NULL; + } + } else { + debug("adding a small block"); + /* if the ionode cnt for nodecards is 0 then don't + allow a nodecard allocation + */ + if(!bluegene_nodecard_ionode_cnt) { + if(blockreq->nodecards) + fatal("There is an error in your " + "bluegene.conf file.\n" + "Can't create a 32 node block with " + "Numpsets=%u. (Try setting it to 64)", + bluegene_numpsets); + } + + if(blockreq->nodecards==0 && blockreq->quarters==0) { + info("No specs given for this small block, " + "I am spliting this block into 4 quarters"); + blockreq->quarters=4; + } + + i = (blockreq->nodecards*bluegene_nodecard_node_cnt) + + (blockreq->quarters*bluegene_quarter_node_cnt); + if(i != bluegene_bp_node_cnt) + fatal("There is an error in your bluegene.conf file.\n" + "I am unable to request %d nodes consisting of " + "%u nodecards and\n%u quarters in one " + "base partition with %u nodes.", + i, bluegene_bp_node_cnt, + blockreq->nodecards, blockreq->quarters); + small_count = blockreq->nodecards+blockreq->quarters; + + /* Automatically create 4-way split if + * conn_type == SELECT_SMALL in bluegene.conf + * Here we go through each node listed and do the same thing + * for each node. + */ + itr = list_iterator_create(bg_record->bg_block_list); + while ((ba_node = list_next(itr)) != NULL) { + /* break base partition up into 16 parts */ + small_size = 16; + node_cnt = 0; + quarter = 0; + nodecard = 0; + for(i=0; i<small_count; i++) { + if(i == blockreq->nodecards) { + /* break base partition + up into 4 parts */ + small_size = 4; + } + + if(small_size == 4) + nodecard = (uint16_t)NO_VAL; + else + nodecard = i%4; + found_record = create_small_record(bg_record, + quarter, + nodecard); + + /* this needs to be an append so we + keep things in the order we got + them, they will be sorted later */ + list_append(records, found_record); + node_cnt += bluegene_bp_node_cnt/small_size; + if(node_cnt == 128) { + node_cnt = 0; + quarter++; + } + } + } + list_iterator_destroy(itr); + destroy_bg_record(bg_record); + } + return SLURM_SUCCESS; +} + +extern int format_node_name(bg_record_t *bg_record, char *buf, int buf_size) +{ + if(bg_record->ionodes) { + snprintf(buf, buf_size, "%s[%s]", + bg_record->nodes, + bg_record->ionodes); + } else { + snprintf(buf, buf_size, "%s", bg_record->nodes); + } + return SLURM_SUCCESS; +} + +/************************* local functions ***************************/ + +#ifdef HAVE_BG +static int _addto_node_list(bg_record_t *bg_record, int *start, int *end) +{ + int node_count=0; + int x,y,z; + char node_name_tmp[255]; + ba_node_t *ba_node = NULL; + + if ((start[X] < 0) || (start[Y] < 0) || (start[Z] < 0)) { + fatal("bluegene.conf starting coordinate is invalid: %d%d%d", + start[X], start[Y], start[Z]); + } + if ((end[X] >= DIM_SIZE[X]) || (end[Y] >= DIM_SIZE[Y]) + || (end[Z] >= DIM_SIZE[Z])) { + fatal("bluegene.conf matrix size exceeds space defined in " + "slurm.conf %c%c%cx%d%d%d => %c%c%c", + alpha_num[start[X]], alpha_num[start[Y]], + alpha_num[start[Z]], + end[X], end[Y], end[Z], + alpha_num[DIM_SIZE[X]], alpha_num[DIM_SIZE[Y]], + alpha_num[DIM_SIZE[Z]]); + } + debug3("adding bps: %c%c%cx%c%c%c", + alpha_num[start[X]], alpha_num[start[Y]], alpha_num[start[Z]], + alpha_num[end[X]], alpha_num[end[Y]], alpha_num[end[Z]]); + debug3("slurm.conf: %c%c%c", + alpha_num[DIM_SIZE[X]], alpha_num[DIM_SIZE[Y]], + alpha_num[DIM_SIZE[Z]]); + + for (x = start[X]; x <= end[X]; x++) { + for (y = start[Y]; y <= end[Y]; y++) { + for (z = start[Z]; z <= end[Z]; z++) { + slurm_conf_lock(); + snprintf(node_name_tmp, sizeof(node_name_tmp), + "%s%c%c%c", + slurmctld_conf.node_prefix, + alpha_num[x], alpha_num[y], + alpha_num[z]); + slurm_conf_unlock(); + ba_node = ba_copy_node( + &ba_system_ptr->grid[x][y][z]); + ba_node->used = 1; + list_append(bg_record->bg_block_list, ba_node); + node_count++; + } + } + } + return node_count; +} + +static int _ba_node_cmpf_inc(ba_node_t *node_a, ba_node_t *node_b) +{ + if (node_a->coord[X] < node_b->coord[X]) + return -1; + else if (node_a->coord[X] > node_b->coord[X]) + return 1; + + if (node_a->coord[Y] < node_b->coord[Y]) + return -1; + else if (node_a->coord[Y] > node_b->coord[Y]) + return 1; + + if (node_a->coord[Z] < node_b->coord[Z]) + return -1; + else if (node_a->coord[Z] > node_b->coord[Z]) + return 1; + + error("You have the node %c%c%c in the list twice", + alpha_num[node_a->coord[X]], + alpha_num[node_a->coord[Y]], + alpha_num[node_a->coord[Z]]); + return 0; +} +#endif //HAVE_BG + + diff --git a/src/plugins/select/bluegene/plugin/bg_record_functions.h b/src/plugins/select/bluegene/plugin/bg_record_functions.h new file mode 100644 index 000000000..0c5486931 --- /dev/null +++ b/src/plugins/select/bluegene/plugin/bg_record_functions.h @@ -0,0 +1,136 @@ +/*****************************************************************************\ + * bg_record_functions.h - header for creating blocks in a static environment. + * + * $Id: bg_record_functions.h 12954 2008-01-04 20:37:49Z da $ + ***************************************************************************** + * Copyright (C) 2008 Lawrence Livermore National Security. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#ifndef _BLUEGENE_BG_RECORD_FUNCTIONS_H_ +#define _BLUEGENE_BG_RECORD_FUNCTIONS_H_ + +#if HAVE_CONFIG_H +# include "config.h" +#endif + +#include <stdlib.h> +#include <sys/stat.h> +#include <pwd.h> + +#include "src/common/bitstring.h" +#include "src/common/hostlist.h" +#include "src/common/list.h" +#include "src/common/macros.h" +#include "src/common/node_select.h" +#include "src/common/parse_time.h" +#include "src/slurmctld/slurmctld.h" + +#include "../block_allocator/block_allocator.h" + +typedef struct bg_record { + pm_partition_id_t bg_block_id; /* ID returned from MMCS */ + char *nodes; /* String of nodes in block */ + char *ionodes; /* String of ionodes in block + * NULL if not a small block*/ + char *user_name; /* user using the block */ + char *target_name; /* when a block is freed this + is the name of the user we + want on the block */ + int full_block; /* whether or not block is the full + block */ + int modifying; /* flag to say the block is + being modified or not at + job launch usually */ + uid_t user_uid; /* Owner of block uid */ + rm_partition_state_t state; /* the allocated block */ + int start[BA_SYSTEM_DIMENSIONS];/* start node */ + uint16_t geo[BA_SYSTEM_DIMENSIONS]; /* geometry */ + rm_connection_type_t conn_type; /* Mesh or Torus or NAV */ + rm_partition_mode_t node_use; /* either COPROCESSOR or VIRTUAL */ + rm_partition_t *bg_block; /* structure to hold info from db2 */ + List bg_block_list; /* node list of blocks in block */ + int bp_count; /* size */ + int switch_count; /* number of switches used. */ + int boot_state; /* check to see if boot failed. + -1 = fail, + 0 = not booting, + 1 = booting */ + int boot_count; /* number of attemts boot attempts */ + bitstr_t *bitmap; /* bitmap to check the name + of block */ + bitstr_t *ionode_bitmap; /* for small blocks bitmap to + keep track which ionodes we + are on. NULL if not a small block*/ + struct job_record *job_ptr; /* pointer to job running on + * block or NULL if no job */ + int job_running; /* job id of job running of if + * block is in an error state + * BLOCK_ERROR_STATE */ + int cpus_per_bp; /* count of cpus per base part */ + uint32_t node_cnt; /* count of nodes per block */ + uint16_t quarter; /* used for small blocks + determine quarter of BP */ + uint16_t nodecard; /* used for small blocks + determine nodecard of quarter */ + char *blrtsimage; /* BlrtsImage for this block */ + char *linuximage; /* LinuxImage for this block */ + char *mloaderimage; /* mloaderImage for this block */ + char *ramdiskimage; /* RamDiskImage for this block */ + struct bg_record *original; /* if this is a copy this is a + pointer to the original */ +} bg_record_t; + +/* Log a bg_record's contents */ +extern void print_bg_record(bg_record_t *record); +extern void destroy_bg_record(void *object); +extern int block_exist_in_list(List my_list, bg_record_t *bg_record); +extern void process_nodes(bg_record_t *bg_reord, bool startup); +extern List copy_bg_list(List in_list); +extern void copy_bg_record(bg_record_t *fir_record, bg_record_t *sec_record); + +/* return bg_record from a bg_list */ +extern bg_record_t *find_bg_record_in_list(List my_list, char *bg_block_id); + +/* change username of a block bg_record_t target_name needs to be + updated before call of function. +*/ +extern int update_block_user(bg_record_t *bg_block_id, int set); +extern void drain_as_needed(bg_record_t *bg_record, char *reason); + +extern int set_ionodes(bg_record_t *bg_record); + +extern int add_bg_record(List records, List used_nodes, blockreq_t *blockreq); + +extern int format_node_name(bg_record_t *bg_record, char *buf, int buf_size); + +#endif /* _BLUEGENE_BG_RECORD_FUNCTIONS_H_ */ diff --git a/src/plugins/select/bluegene/plugin/block_sys.c b/src/plugins/select/bluegene/plugin/block_sys.c index 20f7288aa..eb67ba679 100755 --- a/src/plugins/select/bluegene/plugin/block_sys.c +++ b/src/plugins/select/bluegene/plugin/block_sys.c @@ -1,7 +1,7 @@ /*****************************************************************************\ * block_sys.c - component used for wiring up the blocks * - * $Id: block_sys.c 11400 2007-04-24 18:50:38Z da $ + * $Id: block_sys.c 13960 2008-04-30 21:45:26Z jette $ ***************************************************************************** * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). @@ -654,7 +654,7 @@ int read_bg_blocks() bg_record->bg_block_id, bg_record->state); - process_nodes(bg_record); + process_nodes(bg_record, false); if(bluegene_layout_mode == LAYOUT_DYNAMIC) { bg_record_t *tmp_record = xmalloc(sizeof(bg_record_t)); diff --git a/src/plugins/select/bluegene/plugin/bluegene.c b/src/plugins/select/bluegene/plugin/bluegene.c index 30684bf23..e6cf2017f 100644 --- a/src/plugins/select/bluegene/plugin/bluegene.c +++ b/src/plugins/select/bluegene/plugin/bluegene.c @@ -1,7 +1,7 @@ /*****************************************************************************\ * bluegene.c - blue gene node configuration processing module. * - * $Id: bluegene.c 13271 2008-02-14 20:02:00Z da $ + * $Id: bluegene.c 13924 2008-04-23 06:24:55Z da $ ***************************************************************************** * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). @@ -37,11 +37,9 @@ \*****************************************************************************/ #include "bluegene.h" -#include "src/slurmctld/trigger_mgr.h" +#include "defined_block.h" #include <stdio.h> -#define BUFSIZE 4096 -#define BITSIZE 128 #define MMCS_POLL_TIME 120 /* poll MMCS for down switches and nodes * every 120 secs */ #define BG_POLL_TIME 0 /* poll bg blocks every 3 secs */ @@ -55,7 +53,6 @@ rm_BGL_t *bg = NULL; List bg_list = NULL; /* total list of bg_record entries */ List bg_curr_block_list = NULL; /* current bg blocks in bluegene.conf*/ -List bg_found_block_list = NULL; /* found bg blocks already on system */ List bg_job_block_list = NULL; /* jobs running in these blocks */ List bg_booted_block_list = NULL; /* blocks that are booted */ List bg_freeing_list = NULL; /* blocks that being freed */ @@ -97,21 +94,12 @@ int max_dim[BA_SYSTEM_DIMENSIONS] = { 0 }; # endif #endif -/* some local functions */ -#ifdef HAVE_BG -static int _addto_node_list(bg_record_t *bg_record, int *start, int *end); -static int _ba_node_cmpf_inc(ba_node_t *node_a, ba_node_t *node_b); -#endif static void _set_bg_lists(); -static int _validate_config_nodes(void); +static int _validate_config_nodes(List *bg_found_block_list); static int _bg_record_cmpf_inc(bg_record_t *rec_a, bg_record_t *rec_b); -static int _delete_old_blocks(void); +static int _delete_old_blocks(List bg_found_block_list); static char *_get_bg_conf(void); -static int _split_block(bg_record_t *bg_record, int procs); -static int _breakup_blocks(ba_request_t *request, List my_block_list); -static bg_record_t *_create_small_record(bg_record_t *bg_record, - uint16_t quarter, uint16_t nodecard); static int _reopen_bridge_log(void); /* Initialize all plugin variables */ @@ -159,10 +147,6 @@ extern void fini_bg(void) list_destroy(bg_curr_block_list); bg_curr_block_list = NULL; } - if (bg_found_block_list) { - list_destroy(bg_found_block_list); - bg_found_block_list = NULL; - } if (bg_job_block_list) { list_destroy(bg_job_block_list); bg_job_block_list = NULL; @@ -217,487 +201,6 @@ extern void fini_bg(void) ba_fini(); } -extern void print_bg_record(bg_record_t* bg_record) -{ - if (!bg_record) { - error("print_bg_record, record given is null"); - return; - } -#if _DEBUG - info(" bg_record: "); - if (bg_record->bg_block_id) - info("\tbg_block_id: %s", bg_record->bg_block_id); - info("\tnodes: %s", bg_record->nodes); - info("\tsize: %d BPs %u Nodes %d cpus", - bg_record->bp_count, - bg_record->node_cnt, - bg_record->cpus_per_bp * bg_record->bp_count); - info("\tgeo: %ux%ux%u", bg_record->geo[X], bg_record->geo[Y], - bg_record->geo[Z]); - info("\tconn_type: %s", convert_conn_type(bg_record->conn_type)); - info("\tnode_use: %s", convert_node_use(bg_record->node_use)); - if (bg_record->bitmap) { - char bitstring[BITSIZE]; - bit_fmt(bitstring, BITSIZE, bg_record->bitmap); - info("\tbitmap: %s", bitstring); - } -#else -{ - char tmp_char[256]; - format_node_name(bg_record, tmp_char, sizeof(tmp_char)); - info("Record: BlockID:%s Nodes:%s Conn:%s", - bg_record->bg_block_id, tmp_char, - convert_conn_type(bg_record->conn_type)); -} -#endif -} - -extern void destroy_bg_record(void *object) -{ - bg_record_t* bg_record = (bg_record_t*) object; - - if (bg_record) { - xfree(bg_record->bg_block_id); - xfree(bg_record->nodes); - xfree(bg_record->ionodes); - xfree(bg_record->user_name); - xfree(bg_record->target_name); - if(bg_record->bg_block_list) - list_destroy(bg_record->bg_block_list); - FREE_NULL_BITMAP(bg_record->bitmap); - FREE_NULL_BITMAP(bg_record->ionode_bitmap); - - xfree(bg_record->blrtsimage); - xfree(bg_record->linuximage); - xfree(bg_record->mloaderimage); - xfree(bg_record->ramdiskimage); - - xfree(bg_record); - } -} - -extern int block_exist_in_list(List my_list, bg_record_t *bg_record) -{ - ListIterator itr = list_iterator_create(my_list); - bg_record_t *found_record = NULL; - int rc = 0; - - while ((found_record = (bg_record_t *) list_next(itr)) != NULL) { - /* check for full node bitmap compare */ - if(bit_equal(bg_record->bitmap, found_record->bitmap) - && bit_equal(bg_record->ionode_bitmap, - found_record->ionode_bitmap)) { - if(bg_record->ionodes) - debug3("This block %s[%s] " - "is already in the list %s", - bg_record->nodes, - bg_record->ionodes, - found_record->bg_block_id); - else - debug3("This block %s " - "is already in the list %s", - bg_record->nodes, - found_record->bg_block_id); - - rc = 1; - break; - } - } - list_iterator_destroy(itr); - return rc; -} - -extern void process_nodes(bg_record_t *bg_record) -{ -#ifdef HAVE_BG - int j=0, number; - int start[BA_SYSTEM_DIMENSIONS]; - int end[BA_SYSTEM_DIMENSIONS]; - ListIterator itr; - ba_node_t* ba_node = NULL; - - if(!bg_record->bg_block_list - || !list_count(bg_record->bg_block_list)) { - if(!bg_record->bg_block_list) { - bg_record->bg_block_list = - list_create(destroy_ba_node); - } - bg_record->bp_count = 0; - - while (bg_record->nodes[j] != '\0') { - if ((bg_record->nodes[j] == '[' - || bg_record->nodes[j] == ',') - && (bg_record->nodes[j+8] == ']' - || bg_record->nodes[j+8] == ',') - && (bg_record->nodes[j+4] == 'x' - || bg_record->nodes[j+4] == '-')) { - j++; - number = xstrntol(bg_record->nodes + j, - NULL, BA_SYSTEM_DIMENSIONS, - HOSTLIST_BASE); - start[X] = number / - (HOSTLIST_BASE * HOSTLIST_BASE); - start[Y] = (number % - (HOSTLIST_BASE * HOSTLIST_BASE)) - / HOSTLIST_BASE; - start[Z] = (number % HOSTLIST_BASE); - j += 4; - number = xstrntol(bg_record->nodes + j, - NULL, 3, HOSTLIST_BASE); - end[X] = number / - (HOSTLIST_BASE * HOSTLIST_BASE); - end[Y] = (number - % (HOSTLIST_BASE * HOSTLIST_BASE)) - / HOSTLIST_BASE; - end[Z] = (number % HOSTLIST_BASE); - j += 3; - if(!bg_record->bp_count) { - bg_record->start[X] = start[X]; - bg_record->start[Y] = start[Y]; - bg_record->start[Z] = start[Z]; - debug2("start is %dx%dx%d", - bg_record->start[X], - bg_record->start[Y], - bg_record->start[Z]); - } - bg_record->bp_count += _addto_node_list( - bg_record, - start, - end); - if(bg_record->nodes[j] != ',') - break; - j--; - } else if((bg_record->nodes[j] >= '0' - && bg_record->nodes[j] <= '9') - || (bg_record->nodes[j] >= 'A' - && bg_record->nodes[j] <= 'Z')) { - - number = xstrntol(bg_record->nodes + j, - NULL, BA_SYSTEM_DIMENSIONS, - HOSTLIST_BASE); - start[X] = number / - (HOSTLIST_BASE * HOSTLIST_BASE); - start[Y] = (number % - (HOSTLIST_BASE * HOSTLIST_BASE)) - / HOSTLIST_BASE; - start[Z] = (number % HOSTLIST_BASE); - j+=3; - if(!bg_record->bp_count) { - bg_record->start[X] = start[X]; - bg_record->start[Y] = start[Y]; - bg_record->start[Z] = start[Z]; - debug2("start is %dx%dx%d", - bg_record->start[X], - bg_record->start[Y], - bg_record->start[Z]); - } - bg_record->bp_count += _addto_node_list( - bg_record, - start, - start); - if(bg_record->nodes[j] != ',') - break; - j--; - } - j++; - } - } - - bg_record->geo[X] = 0; - bg_record->geo[Y] = 0; - bg_record->geo[Z] = 0; - end[X] = -1; - end[Y] = -1; - end[Z] = -1; - - list_sort(bg_record->bg_block_list, (ListCmpF) _ba_node_cmpf_inc); - - itr = list_iterator_create(bg_record->bg_block_list); - while ((ba_node = list_next(itr)) != NULL) { - if(!ba_node->used) - continue; - debug4("%c%c%c is included in this block", - alpha_num[ba_node->coord[X]], - alpha_num[ba_node->coord[Y]], - alpha_num[ba_node->coord[Z]]); - - if(ba_node->coord[X]>end[X]) { - bg_record->geo[X]++; - end[X] = ba_node->coord[X]; - } - if(ba_node->coord[Y]>end[Y]) { - bg_record->geo[Y]++; - end[Y] = ba_node->coord[Y]; - } - if(ba_node->coord[Z]>end[Z]) { - bg_record->geo[Z]++; - end[Z] = ba_node->coord[Z]; - } - } - list_iterator_destroy(itr); - debug3("geo = %c%c%c bp count is %d\n", - alpha_num[bg_record->geo[X]], - alpha_num[bg_record->geo[Y]], - alpha_num[bg_record->geo[Z]], - bg_record->bp_count); - - if ((bg_record->geo[X] == DIM_SIZE[X]) - && (bg_record->geo[Y] == DIM_SIZE[Y]) - && (bg_record->geo[Z] == DIM_SIZE[Z])) { - bg_record->full_block = 1; - } - -#ifndef HAVE_BG_FILES - max_dim[X] = MAX(max_dim[X], end[X]); - max_dim[Y] = MAX(max_dim[Y], end[Y]); - max_dim[Z] = MAX(max_dim[Z], end[Z]); -#endif - - if (node_name2bitmap(bg_record->nodes, - false, - &bg_record->bitmap)) { - fatal("1 Unable to convert nodes %s to bitmap", - bg_record->nodes); - } -#endif - return; -} - -extern void copy_bg_record(bg_record_t *fir_record, bg_record_t *sec_record) -{ - int i; - ListIterator itr = NULL; - ba_node_t *ba_node = NULL, *new_ba_node = NULL; - - xfree(sec_record->bg_block_id); - sec_record->bg_block_id = xstrdup(fir_record->bg_block_id); - xfree(sec_record->nodes); - sec_record->nodes = xstrdup(fir_record->nodes); - xfree(sec_record->ionodes); - sec_record->ionodes = xstrdup(fir_record->ionodes); - xfree(sec_record->user_name); - sec_record->user_name = xstrdup(fir_record->user_name); - xfree(sec_record->target_name); - sec_record->target_name = xstrdup(fir_record->target_name); - - xfree(sec_record->blrtsimage); - sec_record->blrtsimage = xstrdup(fir_record->blrtsimage); - xfree(sec_record->linuximage); - sec_record->linuximage = xstrdup(fir_record->linuximage); - xfree(sec_record->mloaderimage); - sec_record->mloaderimage = xstrdup(fir_record->mloaderimage); - xfree(sec_record->ramdiskimage); - sec_record->ramdiskimage = xstrdup(fir_record->ramdiskimage); - - sec_record->user_uid = fir_record->user_uid; - sec_record->state = fir_record->state; - sec_record->conn_type = fir_record->conn_type; - sec_record->node_use = fir_record->node_use; - sec_record->bp_count = fir_record->bp_count; - sec_record->switch_count = fir_record->switch_count; - sec_record->boot_state = fir_record->boot_state; - sec_record->boot_count = fir_record->boot_count; - sec_record->full_block = fir_record->full_block; - - for(i=0;i<BA_SYSTEM_DIMENSIONS;i++) { - sec_record->geo[i] = fir_record->geo[i]; - sec_record->start[i] = fir_record->start[i]; - } - - FREE_NULL_BITMAP(sec_record->bitmap); - if(fir_record->bitmap - && (sec_record->bitmap = bit_copy(fir_record->bitmap)) == NULL) { - error("Unable to copy bitmap for %s", fir_record->nodes); - sec_record->bitmap = NULL; - } - FREE_NULL_BITMAP(sec_record->ionode_bitmap); - if(fir_record->ionode_bitmap - && (sec_record->ionode_bitmap - = bit_copy(fir_record->ionode_bitmap)) == NULL) { - error("Unable to copy ionode_bitmap for %s", - fir_record->nodes); - sec_record->ionode_bitmap = NULL; - } - if(sec_record->bg_block_list) - list_destroy(sec_record->bg_block_list); - sec_record->bg_block_list = list_create(destroy_ba_node); - if(fir_record->bg_block_list) { - itr = list_iterator_create(fir_record->bg_block_list); - while((ba_node = list_next(itr))) { - new_ba_node = ba_copy_node(ba_node); - list_push(sec_record->bg_block_list, new_ba_node); - } - list_iterator_destroy(itr); - } - sec_record->job_running = fir_record->job_running; - sec_record->cpus_per_bp = fir_record->cpus_per_bp; - sec_record->node_cnt = fir_record->node_cnt; - sec_record->quarter = fir_record->quarter; - sec_record->nodecard = fir_record->nodecard; -} - -extern bg_record_t *find_bg_record_in_list(List my_list, char *bg_block_id) -{ - ListIterator itr; - bg_record_t *bg_record = NULL; - - if(!bg_block_id) - return NULL; - - if(my_list) { - slurm_mutex_lock(&block_state_mutex); - itr = list_iterator_create(my_list); - while ((bg_record = (bg_record_t *) list_next(itr)) != NULL) { - if(bg_record->bg_block_id) - if (!strcmp(bg_record->bg_block_id, - bg_block_id)) - break; - } - list_iterator_destroy(itr); - slurm_mutex_unlock(&block_state_mutex); - if(bg_record) - return bg_record; - else - return NULL; - } else { - error("find_bg_record_in_list: no list"); - return NULL; - } - -} -/* All changes to the bg_list target_name must - be done before this function is called. - also slurm_conf_lock() must be called before calling this - function along with slurm_conf_unlock() afterwards. -*/ -extern int update_block_user(bg_record_t *bg_record, int set) -{ - struct passwd *pw_ent = NULL; - - if(!bg_record->target_name) { - error("Must set target_name to run update_block_user."); - return -1; - } - if(!bg_record->user_name) { - error("No user_name"); - bg_record->user_name = xstrdup(slurmctld_conf.slurm_user_name); - } -#ifdef HAVE_BG_FILES - int rc=0; - if(set) { - if((rc = remove_all_users(bg_record->bg_block_id, - bg_record->target_name)) - == REMOVE_USER_ERR) { - error("1 Something happened removing " - "users from block %s", - bg_record->bg_block_id); - return -1; - } else if (rc == REMOVE_USER_NONE) { - if (strcmp(bg_record->target_name, - slurmctld_conf.slurm_user_name)) { - info("Adding user %s to Block %s", - bg_record->target_name, - bg_record->bg_block_id); - - if ((rc = bridge_add_block_user( - bg_record->bg_block_id, - bg_record->target_name)) - != STATUS_OK) { - error("bridge_add_block_user" - "(%s,%s): %s", - bg_record->bg_block_id, - bg_record->target_name, - bg_err_str(rc)); - return -1; - } - } - } - } -#endif - - if(strcmp(bg_record->target_name, bg_record->user_name)) { - xfree(bg_record->user_name); - bg_record->user_name = xstrdup(bg_record->target_name); - if((pw_ent = getpwnam(bg_record->user_name)) == NULL) { - error("getpwnam(%s): %m", bg_record->user_name); - return -1; - } else { - bg_record->user_uid = pw_ent->pw_uid; - } - return 1; - } - - return 0; -} - -/* If any nodes in node_list are drained, draining, or down, - * then just return - * else drain all of the nodes - * This function lets us drain an entire bgblock only if - * we have not already identified a specific node as bad. */ -extern void drain_as_needed(bg_record_t *bg_record, char *reason) -{ - bool needed = true; - hostlist_t hl; - char *host = NULL; - char bg_down_node[128]; - - if(bg_record->job_running > NO_JOB_RUNNING) - slurm_fail_job(bg_record->job_running); - - /* small blocks */ - if(bg_record->cpus_per_bp != procs_per_node) { - debug2("small block"); - goto end_it; - } - - /* at least one base partition */ - hl = hostlist_create(bg_record->nodes); - if (!hl) { - slurm_drain_nodes(bg_record->nodes, reason); - return; - } - while ((host = hostlist_shift(hl))) { - if (node_already_down(bg_down_node)) { - needed = false; - free(host); - break; - } - free(host); - } - hostlist_destroy(hl); - - if (needed) { - slurm_drain_nodes(bg_record->nodes, reason); - } -end_it: - while(bg_record->job_running > NO_JOB_RUNNING) { - debug2("block %s is still running job %d", - bg_record->bg_block_id, bg_record->job_running); - sleep(1); - } - - slurm_mutex_lock(&block_state_mutex); - error("Setting Block %s to ERROR state.", bg_record->bg_block_id); - bg_record->job_running = BLOCK_ERROR_STATE; - bg_record->state = RM_PARTITION_ERROR; - slurm_mutex_unlock(&block_state_mutex); - trigger_block_error(); - return; -} - -extern int format_node_name(bg_record_t *bg_record, char *buf, int buf_size) -{ - if(bg_record->ionodes) { - snprintf(buf, buf_size, "%s[%s]", - bg_record->nodes, - bg_record->ionodes); - } else { - snprintf(buf, buf_size, "%s", bg_record->nodes); - } - return SLURM_SUCCESS; -} - extern bool blocks_overlap(bg_record_t *rec_a, bg_record_t *rec_b) { bitstr_t *my_bitmap = NULL; @@ -886,10 +389,8 @@ extern char* convert_node_use(rm_partition_mode_t pt) extern void sort_bg_record_inc_size(List records){ if (records == NULL) return; - slurm_mutex_lock(&block_state_mutex); list_sort(records, (ListCmpF) _bg_record_cmpf_inc); last_bg_update = time(NULL); - slurm_mutex_unlock(&block_state_mutex); } /* @@ -925,659 +426,21 @@ extern void *bluegene_agent(void *args) if((rc = update_freeing_block_list()) == -1) error("Error with " - "update_block_list 2"); - } - } - } - - if (difftime(now, last_mmcs_test) >= MMCS_POLL_TIME) { - if (agent_fini) /* don't bother */ - return NULL; /* quit now */ - last_mmcs_test = now; - test_mmcs_failures(); /* can run for a while */ - } - - sleep(1); - } - return NULL; -} - -/* - * create_defined_blocks - create the static blocks that will be used - * for scheduling, all partitions must be able to be created and booted - * at once. - * IN - int overlapped, 1 if partitions are to be overlapped, 0 if they are - * static. - * RET - success of fitting all configurations - */ -extern int create_defined_blocks(bg_layout_t overlapped) -{ - int rc = SLURM_SUCCESS; - - ListIterator itr; - bg_record_t *bg_record = NULL; - ListIterator itr_found; - int i; - bg_record_t *found_record = NULL; - int geo[BA_SYSTEM_DIMENSIONS]; - char temp[256]; - List results = NULL; - -#ifdef HAVE_BG_FILES - init_wires(); -#endif - slurm_mutex_lock(&block_state_mutex); - reset_ba_system(false); - if(bg_list) { - itr = list_iterator_create(bg_list); - while((bg_record = list_next(itr))) { - if(bg_found_block_list) { - itr_found = list_iterator_create( - bg_found_block_list); - while ((found_record = (bg_record_t*) - list_next(itr_found)) != NULL) { -/* info("%s.%d.%d ?= %s.%d.%d\n", */ -/* bg_record->nodes, */ -/* bg_record->quarter, */ -/* bg_record->nodecard, */ -/* found_record->nodes, */ -/* found_record->quarter, */ -/* found_record->nodecard); */ - - if ((bit_equal(bg_record->bitmap, - found_record->bitmap)) - && (bg_record->quarter == - found_record->quarter) - && (bg_record->nodecard == - found_record->nodecard)) { - /* don't reboot this one */ - break; - } - } - list_iterator_destroy(itr_found); - } else { - error("create_defined_blocks: " - "no bg_found_block_list 1"); - } - if(bg_record->bp_count>0 - && !bg_record->full_block - && bg_record->cpus_per_bp == procs_per_node) { - char *name = NULL; - if(overlapped == LAYOUT_OVERLAP) { - reset_ba_system(false); - set_all_bps_except(bg_record->nodes); - } - for(i=0; i<BA_SYSTEM_DIMENSIONS; i++) - geo[i] = bg_record->geo[i]; - debug2("adding %s %c%c%c %c%c%c", - bg_record->nodes, - alpha_num[bg_record->start[X]], - alpha_num[bg_record->start[Y]], - alpha_num[bg_record->start[Z]], - alpha_num[geo[X]], - alpha_num[geo[Y]], - alpha_num[geo[Z]]); - if(bg_record->bg_block_list - && list_count(bg_record->bg_block_list)) { - if(check_and_set_node_list( - bg_record->bg_block_list) - == SLURM_ERROR) { - debug2("something happened in " - "the load of %s" - "Did you use smap to " - "make the " - "bluegene.conf file?", - bg_record->bg_block_id); - list_iterator_destroy(itr); - slurm_mutex_unlock( - &block_state_mutex); - return SLURM_ERROR; - } - } else { - results = list_create(NULL); - name = set_bg_block( - results, - bg_record->start, - geo, - bg_record->conn_type); - if(!name) { - error("I was unable to " - "make the " - "requested block."); - list_destroy(results); - list_iterator_destroy(itr); - slurm_mutex_unlock( - &block_state_mutex); - return SLURM_ERROR; - } - slurm_conf_lock(); - snprintf(temp, sizeof(temp), "%s%s", - slurmctld_conf.node_prefix, - name); - slurm_conf_unlock(); - xfree(name); - if(strcmp(temp, bg_record->nodes)) { - fatal("given list of %s " - "but allocated %s, " - "your order might be " - "wrong in the " - "bluegene.conf", - bg_record->nodes, - temp); - } - if(bg_record->bg_block_list) - list_destroy(bg_record-> - bg_block_list); - bg_record->bg_block_list = - list_create(destroy_ba_node); - copy_node_path( - results, - bg_record->bg_block_list); - list_destroy(results); - } - } - if(found_record == NULL) { - if(bg_record->full_block) { - /* if this is defined we need - to remove it since we are - going to try to create it - later on overlap systems - this doesn't matter, but - since we don't clear the - table on static mode we - can't do it here or it just - won't work since other - wires will be or are - already set - */ - list_remove(itr); - continue; - } - if((rc = configure_block(bg_record)) - == SLURM_ERROR) { - list_iterator_destroy(itr); - slurm_mutex_unlock(&block_state_mutex); - return rc; - } - print_bg_record(bg_record); - } - } - list_iterator_destroy(itr); - } else { - error("create_defined_blocks: no bg_list 2"); - slurm_mutex_unlock(&block_state_mutex); - return SLURM_ERROR; - } - slurm_mutex_unlock(&block_state_mutex); - create_full_system_block(); - sort_bg_record_inc_size(bg_list); - -#ifdef _PRINT_BLOCKS_AND_EXIT - if(bg_list) { - itr = list_iterator_create(bg_list); - debug("\n\n"); - while ((found_record = (bg_record_t *) list_next(itr)) - != NULL) { - print_bg_record(found_record); - } - list_iterator_destroy(itr); - } else { - error("create_defined_blocks: no bg_list 5"); - } - exit(0); -#endif /* _PRINT_BLOCKS_AND_EXIT */ - rc = SLURM_SUCCESS; - //exit(0); - return rc; -} - - - -/* - * create_dynamic_block - create a new block to be used for a new - * job allocation. This will be added to the booted and job bg_lists. - * RET - success of fitting configuration in the running system. - */ -extern int create_dynamic_block(ba_request_t *request, List my_block_list) -{ - int rc = SLURM_SUCCESS; - - ListIterator itr; - bg_record_t *bg_record = NULL; - List results = NULL; - List requests = NULL; - uint16_t num_quarter=0, num_nodecard=0; - bitstr_t *my_bitmap = NULL; - int geo[BA_SYSTEM_DIMENSIONS]; - int i; - blockreq_t blockreq; - - slurm_mutex_lock(&block_state_mutex); - - if(my_block_list) { - reset_ba_system(true); - itr = list_iterator_create(my_block_list); - while ((bg_record = (bg_record_t *) list_next(itr)) != NULL) { - if(!my_bitmap) { - my_bitmap = - bit_alloc(bit_size(bg_record->bitmap)); - } - - if(!bit_super_set(bg_record->bitmap, my_bitmap)) { - bit_or(my_bitmap, bg_record->bitmap); - for(i=0; i<BA_SYSTEM_DIMENSIONS; i++) - geo[i] = bg_record->geo[i]; - debug2("adding %s %c%c%c %c%c%c", - bg_record->nodes, - alpha_num[bg_record->start[X]], - alpha_num[bg_record->start[Y]], - alpha_num[bg_record->start[Z]], - alpha_num[geo[X]], - alpha_num[geo[Y]], - alpha_num[geo[Z]]); - - if(check_and_set_node_list( - bg_record->bg_block_list) - == SLURM_ERROR) { - debug2("something happened in " - "the load of %s", - bg_record->bg_block_id); - list_iterator_destroy(itr); - slurm_mutex_unlock(&block_state_mutex); - FREE_NULL_BITMAP(my_bitmap); - return SLURM_ERROR; - } - //set_node_list(bg_record->bg_block_list); -/* #endif */ - } - } - list_iterator_destroy(itr); - FREE_NULL_BITMAP(my_bitmap); - } else { - reset_ba_system(false); - debug("No list was given"); - } - - if(request->avail_node_bitmap) { - int j=0, number; - int x,y,z; - char *nodes = NULL; - bitstr_t *bitmap = bit_alloc(node_record_count); - int start[BA_SYSTEM_DIMENSIONS]; - int end[BA_SYSTEM_DIMENSIONS]; - - /* we want the bps that aren't in this partition to - * mark them as used - */ - bit_or(bitmap, request->avail_node_bitmap); - bit_not(bitmap); - nodes = bitmap2node_name(bitmap); - //info("not using %s", nodes); - while(nodes[j] != '\0') { - if ((nodes[j] == '[' || nodes[j] == ',') - && (nodes[j+8] == ']' || nodes[j+8] == ',') - && (nodes[j+4] == 'x' || nodes[j+4] == '-')) { - j++; - number = xstrntol(nodes + j, - NULL, BA_SYSTEM_DIMENSIONS, - HOSTLIST_BASE); - start[X] = number / - (HOSTLIST_BASE * HOSTLIST_BASE); - start[Y] = (number % - (HOSTLIST_BASE * HOSTLIST_BASE)) - / HOSTLIST_BASE; - start[Z] = (number % HOSTLIST_BASE); - j += 4; - number = xstrntol(nodes + j, - NULL, 3, HOSTLIST_BASE); - end[X] = number / - (HOSTLIST_BASE * HOSTLIST_BASE); - end[Y] = (number - % (HOSTLIST_BASE * HOSTLIST_BASE)) - / HOSTLIST_BASE; - end[Z] = (number % HOSTLIST_BASE); - j += 3; - for (x = start[X]; x <= end[X]; x++) { - for (y = start[Y]; y <= end[Y]; y++) { - for (z = start[Z]; - z <= end[Z]; z++) { - ba_system_ptr-> - grid[x] -#ifdef HAVE_BG - [y][z] -#endif - .used = 1; - } - } - } - - if(nodes[j] != ',') - break; - j--; - } else if((nodes[j] >= '0' && nodes[j] <= '9') - || (nodes[j] >= 'A' && nodes[j] <= 'Z')) { - - number = xstrntol(nodes + j, - NULL, BA_SYSTEM_DIMENSIONS, - HOSTLIST_BASE); - x = number / (HOSTLIST_BASE * HOSTLIST_BASE); - y = (number % (HOSTLIST_BASE * HOSTLIST_BASE)) - / HOSTLIST_BASE; - z = (number % HOSTLIST_BASE); - j+=3; - ba_system_ptr->grid[x] -#ifdef HAVE_BG - [y][z] -#endif - .used = 1; - - if(nodes[j] != ',') - break; - j--; - } - j++; - } - xfree(nodes); - FREE_NULL_BITMAP(bitmap); - } - - if(request->size==1 && request->procs < bluegene_bp_node_cnt) { - request->conn_type = SELECT_SMALL; - if(request->procs == (procs_per_node/16)) { - if(!bluegene_nodecard_ionode_cnt) { - error("can't create this size %d " - "on this system numpsets is %d", - request->procs, - bluegene_numpsets); - goto finished; - } - - num_nodecard=4; - num_quarter=3; - } else { - if(!bluegene_quarter_ionode_cnt) { - error("can't create this size %d " - "on this system numpsets is %d", - request->procs, - bluegene_numpsets); - goto finished; - } - num_quarter=4; - } - - if(_breakup_blocks(request, my_block_list) != SLURM_SUCCESS) { - debug2("small block not able to be placed"); - //rc = SLURM_ERROR; - } else - goto finished; - } - - if(request->conn_type == SELECT_NAV) - request->conn_type = SELECT_TORUS; - - if(!new_ba_request(request)) { - error("Problems with request for size %d geo %dx%dx%d", - request->size, - request->geometry[X], - request->geometry[Y], - request->geometry[Z]); - rc = SLURM_ERROR; - goto finished; - } - - if(!list_count(bg_list) || !my_block_list) { - bg_record = NULL; - goto no_list; - } - - /*Try to put block starting in the smallest of the exisiting blocks*/ - if(!request->start_req) { - itr = list_iterator_create(bg_list); - while ((bg_record = (bg_record_t *) list_next(itr)) != NULL) { - request->rotate_count = 0; - request->elongate_count = 1; - - if(bg_record->job_running == NO_JOB_RUNNING - && (bg_record->quarter == (uint16_t) NO_VAL - || (bg_record->quarter == 0 - && (bg_record->nodecard == (uint16_t) NO_VAL - || bg_record->nodecard == 0)))) { - - for(i=0; i<BA_SYSTEM_DIMENSIONS; i++) - request->start[i] = - bg_record->start[i]; - debug2("allocating %s %c%c%c %d", - bg_record->nodes, - alpha_num[request->start[X]], - alpha_num[request->start[Y]], - alpha_num[request->start[Z]], - request->size); - request->start_req = 1; - rc = SLURM_SUCCESS; - if(results) - list_delete_all( - results, - &empty_null_destroy_list, ""); - else - results = list_create(NULL); - if (!allocate_block(request, results)){ - debug2("allocate failure for size %d " - "base partitions", - request->size); - rc = SLURM_ERROR; - } else - break; - } - } - list_iterator_destroy(itr); - - request->start_req = 0; - for(i=0; i<BA_SYSTEM_DIMENSIONS; i++) - request->start[i] = (uint16_t) NO_VAL; - } -no_list: - if(!bg_record) { - rc = SLURM_SUCCESS; - if(results) - list_delete_all(results, - &empty_null_destroy_list, ""); - else - results = list_create(NULL); - if (!allocate_block(request, results)) { - debug("allocate failure for size %d base partitions", - request->size); - rc = SLURM_ERROR; - } - } - - if(rc == SLURM_ERROR || !my_block_list) { - goto finished; - } - /*set up bg_record(s) here */ - requests = list_create(destroy_bg_record); - - blockreq.block = request->save_name; - blockreq.blrtsimage = request->blrtsimage; - blockreq.linuximage = request->linuximage; - blockreq.mloaderimage = request->mloaderimage; - blockreq.ramdiskimage = request->ramdiskimage; - blockreq.conn_type = request->conn_type; - blockreq.nodecards = num_nodecard; - blockreq.quarters = num_quarter; - - add_bg_record(requests, results, &blockreq); - - while((bg_record = (bg_record_t *) list_pop(requests)) != NULL) { - if(block_exist_in_list(bg_list, bg_record)) - destroy_bg_record(bg_record); - else { - if(configure_block(bg_record) == SLURM_ERROR) { - destroy_bg_record(bg_record); - error("create_dynamic_block: " - "unable to configure block in api"); - goto finished; - } - - list_append(bg_list, bg_record); - print_bg_record(bg_record); - } - } - -finished: - if(my_block_list) - xfree(request->save_name); - if(request->elongate_geos) - list_destroy(request->elongate_geos); - if(results) - list_destroy(results); - if(requests) - list_destroy(requests); - - slurm_mutex_unlock(&block_state_mutex); - sort_bg_record_inc_size(bg_list); - - return rc; -} - -extern int create_full_system_block() -{ - int rc = SLURM_SUCCESS; - ListIterator itr; - bg_record_t *bg_record = NULL; - char *name = NULL; - List records = NULL; - int geo[BA_SYSTEM_DIMENSIONS]; - int i; - blockreq_t blockreq; - List results = NULL; - - /* Here we are adding a block that in for the entire machine - just in case it isn't in the bluegene.conf file. - */ - slurm_mutex_lock(&block_state_mutex); - -#ifdef HAVE_BG_FILES - geo[X] = DIM_SIZE[X] - 1; - geo[Y] = DIM_SIZE[Y] - 1; - geo[Z] = DIM_SIZE[Z] - 1; -#else - geo[X] = max_dim[X]; - geo[Y] = max_dim[Y]; - geo[Z] = max_dim[Z]; -#endif - slurm_conf_lock(); - i = (10+strlen(slurmctld_conf.node_prefix)); - name = xmalloc(i); - if((geo[X] == 0) && (geo[Y] == 0) && (geo[Z] == 0)) - snprintf(name, i, "%s000", - slurmctld_conf.node_prefix); - else - snprintf(name, i, "%s[000x%c%c%c]", - slurmctld_conf.node_prefix, - alpha_num[geo[X]], alpha_num[geo[Y]], - alpha_num[geo[Z]]); - slurm_conf_unlock(); - - if(bg_found_block_list) { - itr = list_iterator_create(bg_found_block_list); - while ((bg_record = (bg_record_t *) list_next(itr)) != NULL) { - if (!strcmp(name, bg_record->nodes)) { - xfree(name); - list_iterator_destroy(itr); - /* don't create total already there */ - goto no_total; - } - } - list_iterator_destroy(itr); - } else { - error("create_full_system_block: no bg_found_block_list 2"); - } - - if(bg_list) { - itr = list_iterator_create(bg_list); - while ((bg_record = (bg_record_t *) list_next(itr)) - != NULL) { - if (!strcmp(name, bg_record->nodes)) { - xfree(name); - list_iterator_destroy(itr); - /* don't create total already there */ - goto no_total; + "update_block_list 2"); + } } } - list_iterator_destroy(itr); - } else { - xfree(name); - error("create_overlapped_blocks: no bg_list 3"); - rc = SLURM_ERROR; - goto no_total; - } - records = list_create(destroy_bg_record); - blockreq.block = name; - blockreq.blrtsimage = NULL; - blockreq.linuximage = NULL; - blockreq.mloaderimage = NULL; - blockreq.ramdiskimage = NULL; - blockreq.conn_type = SELECT_TORUS; - blockreq.nodecards = 0; - blockreq.quarters = 0; - add_bg_record(records, NULL, &blockreq); - xfree(name); - - bg_record = (bg_record_t *) list_pop(records); - if(!bg_record) { - error("Nothing was returned from full system create"); - rc = SLURM_ERROR; - goto no_total; - } - reset_ba_system(false); - for(i=0; i<BA_SYSTEM_DIMENSIONS; i++) - geo[i] = bg_record->geo[i]; - debug2("adding %s %c%c%c %c%c%c", - bg_record->nodes, - alpha_num[bg_record->start[X]], - alpha_num[bg_record->start[Y]], - alpha_num[bg_record->start[Z]], - alpha_num[geo[X]], - alpha_num[geo[Y]], - alpha_num[geo[Z]]); - results = list_create(NULL); - name = set_bg_block(results, - bg_record->start, - geo, - bg_record->conn_type); - if(!name) { - error("I was unable to make the " - "requested block."); - list_destroy(results); - list_iterator_destroy(itr); - slurm_mutex_unlock(&block_state_mutex); - return SLURM_ERROR; - } - xfree(name); - if(bg_record->bg_block_list) - list_destroy(bg_record->bg_block_list); - bg_record->bg_block_list = - list_create(destroy_ba_node); - copy_node_path(results, - bg_record->bg_block_list); - list_destroy(results); + if (difftime(now, last_mmcs_test) >= MMCS_POLL_TIME) { + if (agent_fini) /* don't bother */ + return NULL; /* quit now */ + last_mmcs_test = now; + test_mmcs_failures(); /* can run for a while */ + } - if((rc = configure_block(bg_record)) == SLURM_ERROR) { - error("create_full_system_block: " - "unable to configure block in api"); - destroy_bg_record(bg_record); - goto no_total; + sleep(1); } - - print_bg_record(bg_record); - list_append(bg_list, bg_record); - -no_total: - if(records) - list_destroy(records); - slurm_mutex_unlock(&block_state_mutex); - return rc; + return NULL; } /* must set the protecting mutex if any before this function is called */ @@ -1607,6 +470,57 @@ extern int remove_from_bg_list(List my_bg_list, bg_record_t *bg_record) return rc; } +/* This is here to remove from the orignal list when dealing with + * copies like above all locks need to be set. This function does not + * free anything you must free it when you are done */ +extern bg_record_t *find_and_remove_org_from_bg_list(List my_list, + bg_record_t *bg_record) +{ + ListIterator itr = list_iterator_create(my_list); + bg_record_t *found_record = NULL; + + while ((found_record = (bg_record_t *) list_next(itr)) != NULL) { + /* check for full node bitmap compare */ + if(bit_equal(bg_record->bitmap, found_record->bitmap) + && bit_equal(bg_record->ionode_bitmap, + found_record->ionode_bitmap)) { + + if(!strcmp(bg_record->bg_block_id, + found_record->bg_block_id)) { + list_remove(itr); + debug2("got the block"); + break; + } + } + } + list_iterator_destroy(itr); + return found_record; +} + +/* This is here to remove from the orignal list when dealing with + * copies like above all locks need to be set */ +extern bg_record_t *find_org_in_bg_list(List my_list, bg_record_t *bg_record) +{ + ListIterator itr = list_iterator_create(my_list); + bg_record_t *found_record = NULL; + + while ((found_record = (bg_record_t *) list_next(itr)) != NULL) { + /* check for full node bitmap compare */ + if(bit_equal(bg_record->bitmap, found_record->bitmap) + && bit_equal(bg_record->ionode_bitmap, + found_record->ionode_bitmap)) { + + if(!strcmp(bg_record->bg_block_id, + found_record->bg_block_id)) { + debug2("got the block"); + break; + } + } + } + list_iterator_destroy(itr); + return found_record; +} + extern int bg_free_block(bg_record_t *bg_record) { #ifdef HAVE_BG_FILES @@ -1744,19 +658,13 @@ extern void *mult_destroy_block(void *args) } slurm_mutex_lock(&block_state_mutex); remove_from_bg_list(bg_list, bg_record); - slurm_mutex_unlock(&block_state_mutex); - - slurm_mutex_lock(&block_state_mutex); list_push(bg_freeing_list, bg_record); - slurm_mutex_unlock(&block_state_mutex); /* * we only are sorting this so when we send it to a * tool such as smap it will be in a nice order */ sort_bg_record_inc_size(bg_freeing_list); - - slurm_mutex_lock(&block_state_mutex); if(remove_from_bg_list(bg_job_block_list, bg_record) == SLURM_SUCCESS) { num_unused_cpus += @@ -1914,7 +822,9 @@ extern int read_bg_conf(void) static time_t last_config_update = (time_t) 0; struct stat config_stat; ListIterator itr = NULL; - + /* found bg blocks already on system */ + List bg_found_block_list = list_create(NULL); + debug("Reading the bluegene.conf file"); /* check if config file has changed */ @@ -2128,8 +1038,7 @@ extern int read_bg_conf(void) &count, "BPs", tbl)) { info("WARNING: no blocks defined in bluegene.conf, " "only making full system block"); - i = 0; - create_full_system_block(&i); + create_full_system_block(NULL); } for (i = 0; i < count; i++) { @@ -2138,8 +1047,8 @@ extern int read_bg_conf(void) } //#if 0 /* Check to see if the configs we have are correct */ - if (_validate_config_nodes() == SLURM_ERROR) { - _delete_old_blocks(); + if (_validate_config_nodes(&bg_found_block_list) == SLURM_ERROR) { + _delete_old_blocks(bg_found_block_list); } //#endif /* looking for blocks only I created */ @@ -2147,7 +1056,8 @@ extern int read_bg_conf(void) init_wires(); info("No blocks created until jobs are submitted"); } else { - if (create_defined_blocks(bluegene_layout_mode) + if (create_defined_blocks(bluegene_layout_mode, + bg_found_block_list) == SLURM_ERROR) { /* error in creating the static blocks, so * blocks referenced by submitted jobs won't @@ -2161,314 +1071,24 @@ extern int read_bg_conf(void) slurm_mutex_lock(&block_state_mutex); list_destroy(bg_curr_block_list); bg_curr_block_list = NULL; - list_destroy(bg_found_block_list); - bg_found_block_list = NULL; + if(bg_found_block_list) { + list_destroy(bg_found_block_list); + bg_found_block_list = NULL; + } last_bg_update = time(NULL); blocks_are_created = 1; - slurm_mutex_unlock(&block_state_mutex); sort_bg_record_inc_size(bg_list); + slurm_mutex_unlock(&block_state_mutex); debug("Blocks have finished being created."); s_p_hashtbl_destroy(tbl); return SLURM_SUCCESS; } -extern int set_ionodes(bg_record_t *bg_record) -{ - int i = 0; - int start_bit = 0; - int size = 0; - char bitstring[BITSIZE]; - - if(!bg_record) - return SLURM_ERROR; - /* set the bitmap blank here if it is a full node we don't - want anything set we also don't want the bg_record->ionodes set. - */ - bg_record->ionode_bitmap = bit_alloc(bluegene_numpsets); - if(bg_record->quarter == (uint16_t)NO_VAL) { - return SLURM_SUCCESS; - } - - start_bit = bluegene_quarter_ionode_cnt*bg_record->quarter; - - if(bg_record->nodecard != (uint16_t)NO_VAL - && bluegene_nodecard_ionode_cnt) { - start_bit += bluegene_nodecard_ionode_cnt*bg_record->nodecard; - size = bluegene_nodecard_ionode_cnt; - } else - size = bluegene_quarter_ionode_cnt; - size += start_bit; - - if(size == start_bit) { - error("start bit is the same as the end bit %d", size); - return SLURM_ERROR; - } - for(i=start_bit; i<size; i++) - bit_set(bg_record->ionode_bitmap, i); - - bit_fmt(bitstring, BITSIZE, bg_record->ionode_bitmap); - bg_record->ionodes = xstrdup(bitstring); - - return SLURM_SUCCESS; -} - -extern int add_bg_record(List records, List used_nodes, blockreq_t *blockreq) -{ - bg_record_t *bg_record = NULL; - bg_record_t *found_record = NULL; - ba_node_t *ba_node = NULL; - ListIterator itr; - struct passwd *pw_ent = NULL; - int i, len; - int small_size = 0; - int small_count = 0; - uint16_t quarter = 0; - uint16_t nodecard = 0; - int node_cnt = 0; - - if(!records) { - fatal("add_bg_record: no records list given"); - } - bg_record = (bg_record_t*) xmalloc(sizeof(bg_record_t)); - - slurm_conf_lock(); - bg_record->user_name = - xstrdup(slurmctld_conf.slurm_user_name); - bg_record->target_name = - xstrdup(slurmctld_conf.slurm_user_name); - slurm_conf_unlock(); - if((pw_ent = getpwnam(bg_record->user_name)) == NULL) { - error("getpwnam(%s): %m", bg_record->user_name); - } else { - bg_record->user_uid = pw_ent->pw_uid; - } - - bg_record->bg_block_list = list_create(destroy_ba_node); - if(used_nodes) { - if(copy_node_path(used_nodes, bg_record->bg_block_list) - == SLURM_ERROR) - error("couldn't copy the path for the allocation"); - bg_record->bp_count = list_count(used_nodes); - } - bg_record->quarter = (uint16_t)NO_VAL; - bg_record->nodecard = (uint16_t)NO_VAL; - if(set_ionodes(bg_record) == SLURM_ERROR) { - fatal("add_bg_record: problem creating ionodes"); - } - /* bg_record->boot_state = 0; Implicit */ - /* bg_record->state = 0; Implicit */ - debug2("asking for %s %d %d %s", - blockreq->block, blockreq->quarters, blockreq->nodecards, - convert_conn_type(blockreq->conn_type)); - len = strlen(blockreq->block); - i=0; - while(i<len - && blockreq->block[i] != '[' - && (blockreq->block[i] < '0' || blockreq->block[i] > 'Z' - || (blockreq->block[i] > '9' && blockreq->block[i] < 'A'))) - i++; - - if(i<len) { - len -= i; - slurm_conf_lock(); - len += strlen(slurmctld_conf.node_prefix)+1; - bg_record->nodes = xmalloc(len); - snprintf(bg_record->nodes, len, "%s%s", - slurmctld_conf.node_prefix, blockreq->block+i); - slurm_conf_unlock(); - - } else - fatal("BPs=%s is in a weird format", blockreq->block); - - process_nodes(bg_record); - - bg_record->node_use = SELECT_COPROCESSOR_MODE; - bg_record->conn_type = blockreq->conn_type; - bg_record->cpus_per_bp = procs_per_node; - bg_record->node_cnt = bluegene_bp_node_cnt * bg_record->bp_count; - bg_record->job_running = NO_JOB_RUNNING; - - if(blockreq->blrtsimage) - bg_record->blrtsimage = xstrdup(blockreq->blrtsimage); - else - bg_record->blrtsimage = xstrdup(default_blrtsimage); - - if(blockreq->linuximage) - bg_record->linuximage = xstrdup(blockreq->linuximage); - else - bg_record->linuximage = xstrdup(default_linuximage); - - if(blockreq->mloaderimage) - bg_record->mloaderimage = xstrdup(blockreq->mloaderimage); - else - bg_record->mloaderimage = xstrdup(default_mloaderimage); - - if(blockreq->ramdiskimage) - bg_record->ramdiskimage = xstrdup(blockreq->ramdiskimage); - else - bg_record->ramdiskimage = xstrdup(default_ramdiskimage); - - if(bg_record->conn_type != SELECT_SMALL) { - /* this needs to be an append so we keep things in the - order we got them, they will be sorted later */ - list_append(records, bg_record); - /* this isn't a correct list so we need to set it later for - now we just used it to be the bp number */ - if(!used_nodes) { - debug4("we didn't get a request list so we are " - "destroying this bp list"); - list_destroy(bg_record->bg_block_list); - bg_record->bg_block_list = NULL; - } - } else { - debug("adding a small block"); - /* if the ionode cnt for nodecards is 0 then don't - allow a nodecard allocation - */ - if(!bluegene_nodecard_ionode_cnt) - blockreq->nodecards = 0; - - if(blockreq->nodecards==0 && blockreq->quarters==0) { - info("No specs given for this small block, " - "I am spliting this block into 4 quarters"); - blockreq->quarters=4; - } - - i = (blockreq->nodecards*bluegene_nodecard_node_cnt) + - (blockreq->quarters*bluegene_quarter_node_cnt); - if(i != bluegene_bp_node_cnt) - fatal("There is an error in your bluegene.conf file.\n" - "I am unable to request %d nodes in one " - "base partition with %d nodes.", - i, bluegene_bp_node_cnt); - small_count = blockreq->nodecards+blockreq->quarters; - - /* Automatically create 4-way split if - * conn_type == SELECT_SMALL in bluegene.conf - * Here we go through each node listed and do the same thing - * for each node. - */ - itr = list_iterator_create(bg_record->bg_block_list); - while ((ba_node = list_next(itr)) != NULL) { - /* break base partition up into 16 parts */ - small_size = 16; - node_cnt = 0; - quarter = 0; - nodecard = 0; - for(i=0; i<small_count; i++) { - if(i == blockreq->nodecards) { - /* break base partition - up into 4 parts */ - small_size = 4; - } - - if(small_size == 4) - nodecard = (uint16_t)NO_VAL; - else - nodecard = i%4; - found_record = _create_small_record(bg_record, - quarter, - nodecard); - - /* this needs to be an append so we - keep things in the order we got - them, they will be sorted later */ - list_append(records, found_record); - node_cnt += bluegene_bp_node_cnt/small_size; - if(node_cnt == 128) { - node_cnt = 0; - quarter++; - } - } - } - list_iterator_destroy(itr); - destroy_bg_record(bg_record); - } - return SLURM_SUCCESS; -} - -#ifdef HAVE_BG -static int _addto_node_list(bg_record_t *bg_record, int *start, int *end) -{ - int node_count=0; - int x,y,z; - char node_name_tmp[255]; - ba_node_t *ba_node = NULL; - - if ((start[X] < 0) || (start[Y] < 0) || (start[Z] < 0)) { - fatal("bluegene.conf starting coordinate is invalid: %d%d%d", - start[X], start[Y], start[Z]); - } - if ((end[X] >= DIM_SIZE[X]) || (end[Y] >= DIM_SIZE[Y]) - || (end[Z] >= DIM_SIZE[Z])) { - fatal("bluegene.conf matrix size exceeds space defined in " - "slurm.conf %c%c%cx%d%d%d => %c%c%c", - alpha_num[start[X]], alpha_num[start[Y]], - alpha_num[start[Z]], - end[X], end[Y], end[Z], - alpha_num[DIM_SIZE[X]], alpha_num[DIM_SIZE[Y]], - alpha_num[DIM_SIZE[Z]]); - } - debug3("adding bps: %c%c%cx%c%c%c", - alpha_num[start[X]], alpha_num[start[Y]], alpha_num[start[Z]], - alpha_num[end[X]], alpha_num[end[Y]], alpha_num[end[Z]]); - debug3("slurm.conf: %c%c%c", - alpha_num[DIM_SIZE[X]], alpha_num[DIM_SIZE[Y]], - alpha_num[DIM_SIZE[Z]]); - - for (x = start[X]; x <= end[X]; x++) { - for (y = start[Y]; y <= end[Y]; y++) { - for (z = start[Z]; z <= end[Z]; z++) { - slurm_conf_lock(); - snprintf(node_name_tmp, sizeof(node_name_tmp), - "%s%c%c%c", - slurmctld_conf.node_prefix, - alpha_num[x], alpha_num[y], - alpha_num[z]); - slurm_conf_unlock(); - ba_node = ba_copy_node( - &ba_system_ptr->grid[x][y][z]); - ba_node->used = 1; - list_append(bg_record->bg_block_list, ba_node); - node_count++; - } - } - } - return node_count; -} - -static int _ba_node_cmpf_inc(ba_node_t *node_a, ba_node_t *node_b) -{ - if (node_a->coord[X] < node_b->coord[X]) - return -1; - else if (node_a->coord[X] > node_b->coord[X]) - return 1; - - if (node_a->coord[Y] < node_b->coord[Y]) - return -1; - else if (node_a->coord[Y] > node_b->coord[Y]) - return 1; - - if (node_a->coord[Z] < node_b->coord[Z]) - return -1; - else if (node_a->coord[Z] > node_b->coord[Z]) - return 1; - - error("You have the node %c%c%c in the list twice", - alpha_num[node_a->coord[X]], - alpha_num[node_a->coord[Y]], - alpha_num[node_a->coord[Z]]); - return 0; -} -#endif //HAVE_BG static void _set_bg_lists() { slurm_mutex_lock(&block_state_mutex); - if(bg_found_block_list) - list_destroy(bg_found_block_list); - bg_found_block_list = list_create(NULL); if(bg_booted_block_list) list_destroy(bg_booted_block_list); bg_booted_block_list = list_create(NULL); @@ -2504,12 +1124,15 @@ static void _set_bg_lists() } /* - * Match slurm configuration information with current BG block - * configuration. Return SLURM_SUCCESS if they match, else an error + * _validate_config_nodes - Match slurm configuration information with + * current BG block configuration. + * IN/OUT bg_found_block_list - if NULL is created and then any blocks + * found on the system are then pushed on. + * RET - SLURM_SUCCESS if they match, else an error * code. Writes bg_block_id into bg_list records. */ -static int _validate_config_nodes(void) +static int _validate_config_nodes(List *bg_found_block_list) { int rc = SLURM_ERROR; #ifdef HAVE_BG_FILES @@ -2521,6 +1144,7 @@ static int _validate_config_nodes(void) ListIterator itr_curr; rm_partition_mode_t node_use; char tmp_char[256]; + /* read current bg block info into bg_curr_block_list */ if (read_bg_blocks() == SLURM_ERROR) return SLURM_ERROR; @@ -2535,7 +1159,11 @@ static int _validate_config_nodes(void) while ((init_bg_record = list_next(itr_curr))) if(init_bg_record->full_block) full_system_bg_record = init_bg_record; - + + if(!*bg_found_block_list) + (*bg_found_block_list) = list_create(NULL); + + itr_conf = list_iterator_create(bg_list); while ((bg_record = (bg_record_t*) list_next(itr_conf))) { /* translate hostlist to ranged @@ -2590,7 +1218,7 @@ static int _validate_config_nodes(void) if(bg_record->full_block) full_created = 1; - list_push(bg_found_block_list, bg_record); + list_push(*bg_found_block_list, bg_record); format_node_name(bg_record, tmp_char, sizeof(tmp_char)); info("Existing: BlockID:%s Nodes:%s Conn:%s", @@ -2613,7 +1241,7 @@ static int _validate_config_nodes(void) bg_record = xmalloc(sizeof(bg_record_t)); copy_bg_record(full_system_bg_record, bg_record); list_append(bg_list, bg_record); - list_push(bg_found_block_list, bg_record); + list_push(*bg_found_block_list, bg_record); format_node_name(bg_record, tmp_char, sizeof(tmp_char)); info("Existing: BlockID:%s Nodes:%s Conn:%s", bg_record->bg_block_id, @@ -2669,7 +1297,7 @@ static int _bg_record_cmpf_inc(bg_record_t* rec_a, bg_record_t* rec_b) return 0; } -static int _delete_old_blocks(void) +static int _delete_old_blocks(List bg_found_block_list) { #ifdef HAVE_BG_FILES ListIterator itr_curr, itr_found; @@ -2698,13 +1326,12 @@ static int _delete_old_blocks(void) } else { if(bg_curr_block_list) { itr_curr = list_iterator_create(bg_curr_block_list); - while ((init_record = (bg_record_t*) - list_next(itr_curr))) { + while ((init_record = list_next(itr_curr))) { if(bg_found_block_list) { itr_found = list_iterator_create( bg_found_block_list); - while ((found_record = (bg_record_t*) - list_next(itr_found)) + while ((found_record + = list_next(itr_found)) != NULL) { if (!strcmp(init_record-> bg_block_id, @@ -2818,360 +1445,6 @@ static char *_get_bg_conf(void) return rc; } -static int _split_block(bg_record_t *bg_record, int procs) -{ - bg_record_t *found_record = NULL; - bool full_bp = false; - int small_count = 0; - int small_size = 0; - uint16_t num_nodecard = 0, num_quarter = 0; - int i; - int node_cnt = 0; - uint16_t quarter = 0; - uint16_t nodecard = 0; - - if(bg_record->quarter == (uint16_t) NO_VAL) - full_bp = true; - - if(procs == (procs_per_node/16) && bluegene_nodecard_ionode_cnt) { - num_nodecard=4; - if(full_bp) - num_quarter=3; - } else if(full_bp) { - num_quarter = 4; - } else { - error("you asked for something that was already this size"); - return SLURM_ERROR; - } - debug2("asking for %d 32s from a %d block", - num_nodecard, bg_record->node_cnt); - small_count = num_nodecard+num_quarter; - - /* break base partition up into 16 parts */ - small_size = bluegene_bp_node_cnt/bluegene_nodecard_node_cnt; - node_cnt = 0; - if(!full_bp) - quarter = bg_record->quarter; - else - quarter = 0; - nodecard = 0; - for(i=0; i<small_count; i++) { - if(i == num_nodecard) { - /* break base partition up into 4 parts */ - small_size = 4; - } - - if(small_size == 4) - nodecard = (uint16_t)NO_VAL; - else - nodecard = i%4; - found_record = _create_small_record(bg_record, - quarter, - nodecard); - if(block_exist_in_list(bg_list, found_record)) { - destroy_bg_record(found_record); - } else { - if(configure_block(found_record) == SLURM_ERROR) { - destroy_bg_record(found_record); - error("_split_block: " - "unable to configure block in api"); - return SLURM_ERROR; - } - list_append(bg_list, found_record); - print_bg_record(found_record); - } - node_cnt += bluegene_bp_node_cnt/small_size; - if(node_cnt == 128) { - node_cnt = 0; - quarter++; - } - } - - return SLURM_SUCCESS; -} - -static int _breakup_blocks(ba_request_t *request, List my_block_list) -{ - int rc = SLURM_ERROR; - bg_record_t *bg_record = NULL; - ListIterator itr; - int proc_cnt=0; - int total_proc_cnt=0; - uint16_t last_quarter = (uint16_t) NO_VAL; - char tmp_char[256]; - - debug2("proc count = %d size = %d", - request->procs, request->size); - - itr = list_iterator_create(bg_list); - while ((bg_record = (bg_record_t *) list_next(itr)) != NULL) { - if(bg_record->job_running != NO_JOB_RUNNING) - continue; - if(bg_record->state != RM_PARTITION_FREE) - continue; - if (request->avail_node_bitmap && - !bit_super_set(bg_record->bitmap, - request->avail_node_bitmap)) { - debug2("bg block %s has nodes not usable by this job", - bg_record->bg_block_id); - continue; - } - - if(request->start_req) { - if ((request->start[X] != bg_record->start[X]) - || (request->start[Y] != bg_record->start[Y]) - || (request->start[Z] != bg_record->start[Z])) { - debug4("small got %c%c%c looking for %c%c%c", - alpha_num[bg_record->start[X]], - alpha_num[bg_record->start[Y]], - alpha_num[bg_record->start[Z]], - alpha_num[request->start[X]], - alpha_num[request->start[Y]], - alpha_num[request->start[Z]]); - continue; - } - debug3("small found %c%c%c looking for %c%c%c", - alpha_num[bg_record->start[X]], - alpha_num[bg_record->start[Y]], - alpha_num[bg_record->start[Z]], - alpha_num[request->start[X]], - alpha_num[request->start[Y]], - alpha_num[request->start[Z]]); - } - proc_cnt = bg_record->bp_count * - bg_record->cpus_per_bp; - if(proc_cnt == request->procs) { - debug("found it here %s, %s", - bg_record->bg_block_id, - bg_record->nodes); - request->save_name = xmalloc(4); - snprintf(request->save_name, - 4, - "%c%c%c", - alpha_num[bg_record->start[X]], - alpha_num[bg_record->start[Y]], - alpha_num[bg_record->start[Z]]); - rc = SLURM_SUCCESS; - goto finished; - } - if(bg_record->node_cnt > bluegene_bp_node_cnt) - continue; - if(proc_cnt < request->procs) { - if(last_quarter != bg_record->quarter){ - last_quarter = bg_record->quarter; - total_proc_cnt = proc_cnt; - } else { - total_proc_cnt += proc_cnt; - } - debug2("1 got %d on quarter %d", - total_proc_cnt, last_quarter); - if(total_proc_cnt == request->procs) { - request->save_name = xmalloc(4); - snprintf(request->save_name, - 4, - "%c%c%c", - alpha_num[bg_record->start[X]], - alpha_num[bg_record->start[Y]], - alpha_num[bg_record->start[Z]]); - if(!my_block_list) { - rc = SLURM_SUCCESS; - goto finished; - } - - bg_record = _create_small_record( - bg_record, - last_quarter, - (uint16_t) NO_VAL); - if(block_exist_in_list(bg_list, bg_record)) - destroy_bg_record(bg_record); - else { - if(configure_block(bg_record) - == SLURM_ERROR) { - destroy_bg_record(bg_record); - error("_breakup_blocks: " - "unable to configure " - "block in api"); - return SLURM_ERROR; - } - list_append(bg_list, bg_record); - print_bg_record(bg_record); - } - rc = SLURM_SUCCESS; - goto finished; - } - continue; - } - break; - } - if(bg_record) { - debug2("got one on the first pass"); - goto found_one; - } - list_iterator_reset(itr); - last_quarter = (uint16_t) NO_VAL; - while ((bg_record = (bg_record_t *) list_next(itr)) - != NULL) { - if(bg_record->job_running != NO_JOB_RUNNING) - continue; - if (request->avail_node_bitmap && - !bit_super_set(bg_record->bitmap, - request->avail_node_bitmap)) { - debug2("bg block %s has nodes not usable by this job", - bg_record->bg_block_id); - continue; - } - - if(request->start_req) { - if ((request->start[X] != bg_record->start[X]) - || (request->start[Y] != bg_record->start[Y]) - || (request->start[Z] != bg_record->start[Z])) { - debug4("small 2 got %c%c%c looking for %c%c%c", - alpha_num[bg_record->start[X]], - alpha_num[bg_record->start[Y]], - alpha_num[bg_record->start[Z]], - alpha_num[request->start[X]], - alpha_num[request->start[Y]], - alpha_num[request->start[Z]]); - continue; - } - debug3("small 2 found %c%c%c looking for %c%c%c", - alpha_num[bg_record->start[X]], - alpha_num[bg_record->start[Y]], - alpha_num[bg_record->start[Z]], - alpha_num[request->start[X]], - alpha_num[request->start[Y]], - alpha_num[request->start[Z]]); - } - - proc_cnt = bg_record->bp_count * bg_record->cpus_per_bp; - if(proc_cnt == request->procs) { - debug2("found it here %s, %s", - bg_record->bg_block_id, - bg_record->nodes); - request->save_name = xmalloc(4); - snprintf(request->save_name, - 4, - "%c%c%c", - alpha_num[bg_record->start[X]], - alpha_num[bg_record->start[Y]], - alpha_num[bg_record->start[Z]]); - rc = SLURM_SUCCESS; - goto finished; - } - - if(bg_record->node_cnt > bluegene_bp_node_cnt) - continue; - if(proc_cnt < request->procs) { - if(last_quarter != bg_record->quarter){ - last_quarter = bg_record->quarter; - total_proc_cnt = proc_cnt; - } else { - total_proc_cnt += proc_cnt; - } - debug2("got %d on quarter %d", - total_proc_cnt, last_quarter); - if(total_proc_cnt == request->procs) { - request->save_name = xmalloc(4); - snprintf(request->save_name, - 4, - "%c%c%c", - alpha_num[bg_record->start[X]], - alpha_num[bg_record->start[Y]], - alpha_num[bg_record->start[Z]]); - if(!my_block_list) { - rc = SLURM_SUCCESS; - goto finished; - } - bg_record = _create_small_record( - bg_record, - last_quarter, - (uint16_t) NO_VAL); - if(block_exist_in_list(bg_list, bg_record)) - destroy_bg_record(bg_record); - else { - if(configure_block(bg_record) - == SLURM_ERROR) { - destroy_bg_record(bg_record); - error("_breakup_blocks: " - "unable to configure " - "block in api 2"); - return SLURM_ERROR; - } - list_append(bg_list, bg_record); - print_bg_record(bg_record); - } - rc = SLURM_SUCCESS; - goto finished; - } - continue; - } - break; - } -found_one: - if(bg_record) { - format_node_name(bg_record, tmp_char, sizeof(tmp_char)); - - debug2("going to split %s, %s", - bg_record->bg_block_id, - tmp_char); - request->save_name = xmalloc(4); - snprintf(request->save_name, - 4, - "%c%c%c", - alpha_num[bg_record->start[X]], - alpha_num[bg_record->start[Y]], - alpha_num[bg_record->start[Z]]); - if(!my_block_list) { - rc = SLURM_SUCCESS; - goto finished; - } - _split_block(bg_record, request->procs); - rc = SLURM_SUCCESS; - goto finished; - } - -finished: - list_iterator_destroy(itr); - - return rc; -} - -static bg_record_t *_create_small_record(bg_record_t *bg_record, - uint16_t quarter, uint16_t nodecard) -{ - bg_record_t *found_record = NULL; - int small_size = 4; - - found_record = (bg_record_t*) xmalloc(sizeof(bg_record_t)); - - found_record->job_running = NO_JOB_RUNNING; - found_record->user_name = xstrdup(bg_record->user_name); - found_record->user_uid = bg_record->user_uid; - found_record->bg_block_list = list_create(destroy_ba_node); - found_record->nodes = xstrdup(bg_record->nodes); - found_record->blrtsimage = xstrdup(bg_record->blrtsimage); - found_record->linuximage = xstrdup(bg_record->linuximage); - found_record->mloaderimage = xstrdup(bg_record->mloaderimage); - found_record->ramdiskimage = xstrdup(bg_record->ramdiskimage); - - process_nodes(found_record); - - found_record->conn_type = SELECT_SMALL; - - found_record->node_use = SELECT_COPROCESSOR_MODE; - if(nodecard != (uint16_t) NO_VAL) - small_size = 16; - found_record->cpus_per_bp = procs_per_node/small_size; - found_record->node_cnt = bluegene_bp_node_cnt/small_size; - found_record->quarter = quarter; - found_record->nodecard = nodecard; - - if(set_ionodes(found_record) == SLURM_ERROR) - error("couldn't create ionode_bitmap for %d.%d", - found_record->quarter, found_record->nodecard); - return found_record; -} - static int _reopen_bridge_log(void) { int rc = SLURM_SUCCESS; diff --git a/src/plugins/select/bluegene/plugin/bluegene.h b/src/plugins/select/bluegene/plugin/bluegene.h index b19e4d1ec..e5afba4c0 100644 --- a/src/plugins/select/bluegene/plugin/bluegene.h +++ b/src/plugins/select/bluegene/plugin/bluegene.h @@ -1,7 +1,7 @@ /*****************************************************************************\ * bluegene.h - header for blue gene configuration processing module. * - * $Id: bluegene.h 10449 2006-12-12 16:34:51Z da $ + * $Id: bluegene.h 13924 2008-04-23 06:24:55Z da $ ***************************************************************************** * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). @@ -39,22 +39,7 @@ #ifndef _BLUEGENE_H_ #define _BLUEGENE_H_ -#if HAVE_CONFIG_H -# include "config.h" -#endif - -#include <stdlib.h> -#include <sys/stat.h> -#include <pwd.h> - -#include "src/common/bitstring.h" -#include "src/common/hostlist.h" -#include "src/common/list.h" -#include "src/common/macros.h" -#include "src/common/node_select.h" -#include "src/common/parse_time.h" -#include "src/slurmctld/slurmctld.h" -#include "../block_allocator/block_allocator.h" +#include "bg_record_functions.h" typedef enum bg_layout_type { LAYOUT_STATIC, /* no overlaps, except for full system block @@ -64,53 +49,6 @@ typedef enum bg_layout_type { LAYOUT_DYNAMIC /* slurm will make all blocks */ } bg_layout_t; -typedef struct bg_record { - pm_partition_id_t bg_block_id; /* ID returned from MMCS */ - char *nodes; /* String of nodes in block */ - char *ionodes; /* String of ionodes in block - * NULL if not a small block*/ - char *user_name; /* user using the block */ - char *target_name; /* when a block is freed this - is the name of the user we - want on the block */ - int full_block; /* whether or not block is the full - block */ - int modifying; /* flag to say the block is - being modified or not at - job launch usually */ - uid_t user_uid; /* Owner of block uid */ - rm_partition_state_t state; /* the allocated block */ - int start[BA_SYSTEM_DIMENSIONS];/* start node */ - uint16_t geo[BA_SYSTEM_DIMENSIONS]; /* geometry */ - rm_connection_type_t conn_type; /* Mesh or Torus or NAV */ - rm_partition_mode_t node_use; /* either COPROCESSOR or VIRTUAL */ - rm_partition_t *bg_block; /* structure to hold info from db2 */ - List bg_block_list; /* node list of blocks in block */ - int bp_count; /* size */ - int switch_count; /* number of switches used. */ - int boot_state; /* check to see if boot failed. - -1 = fail, - 0 = not booting, - 1 = booting */ - int boot_count; /* number of attemts boot attempts */ - bitstr_t *bitmap; /* bitmap to check the name - of block */ - bitstr_t *ionode_bitmap; /* for small blocks bitmap to - keep track which ionodes we - are on. NULL if not a small block*/ - int job_running; /* job id if there is a job running - on the block */ - int cpus_per_bp; /* count of cpus per base part */ - uint32_t node_cnt; /* count of nodes per block */ - uint16_t quarter; /* used for small blocks - determine quarter of BP */ - uint16_t nodecard; /* used for small blocks - determine nodecard of quarter */ - char *blrtsimage; /* BlrtsImage for this block */ - char *linuximage; /* LinuxImage for this block */ - char *mloaderimage; /* mloaderImage for this block */ - char *ramdiskimage; /* RamDiskImage for this block */ -} bg_record_t; /* Global variables */ extern rm_BGL_t *bg; @@ -134,8 +72,6 @@ extern List bg_list; /* List of configured BG blocks */ extern List bg_job_block_list; /* jobs running in these blocks */ extern List bg_booted_block_list; /* blocks that are booted */ extern List bg_freeing_list; /* blocks that being freed */ -extern List bg_request_list; /* list of request that can't - be made just yet */ extern List bg_blrtsimage_list; extern List bg_linuximage_list; extern List bg_mloaderimage_list; @@ -154,6 +90,8 @@ extern int num_unused_cpus; #define BLOCK_ERROR_STATE -3 #define NO_JOB_RUNNING -1 #define MAX_AGENT_COUNT 30 +#define BUFSIZE 4096 +#define BITSIZE 128 #include "bg_block_info.h" #include "bg_job_place.h" @@ -169,22 +107,6 @@ extern int init_bg(void); /* Purge all plugin variables */ extern void fini_bg(void); -/* Log a bg_record's contents */ -extern void print_bg_record(bg_record_t *record); -extern void destroy_bg_record(void *object); -extern int block_exist_in_list(List my_list, bg_record_t *bg_record); -extern void process_nodes(bg_record_t *bg_reord); -extern void copy_bg_record(bg_record_t *fir_record, bg_record_t *sec_record); - -/* return bg_record from a bg_list */ -extern bg_record_t *find_bg_record_in_list(List my_list, char *bg_block_id); - -/* change username of a block bg_record_t target_name needs to be - updated before call of function. -*/ -extern int update_block_user(bg_record_t *bg_block_id, int set); -extern void drain_as_needed(bg_record_t *bg_record, char *reason); -extern int format_node_name(bg_record_t *bg_record, char *buf, int buf_size); extern bool blocks_overlap(bg_record_t *rec_a, bg_record_t *rec_b); @@ -207,24 +129,15 @@ extern void sort_bg_record_inc_size(List records); * nodes and switches */ extern void *bluegene_agent(void *args); -/* - * create_*_block(s) - functions for creating blocks that will be used - * for scheduling. - * RET - success of fitting all configurations - */ -extern int create_defined_blocks(bg_layout_t overlapped); -extern int create_dynamic_block(ba_request_t *request, List my_block_list); -extern int create_full_system_block(); - extern int bg_free_block(bg_record_t *bg_record); extern int remove_from_bg_list(List my_bg_list, bg_record_t *bg_record); -extern int remove_from_request_list(); +extern bg_record_t *find_and_remove_org_from_bg_list(List my_list, + bg_record_t *bg_record); +extern bg_record_t *find_org_in_bg_list(List my_list, bg_record_t *bg_record); extern void *mult_free_block(void *args); extern void *mult_destroy_block(void *args); extern int free_block_list(List delete_list); extern int read_bg_conf(void); -extern int set_ionodes(bg_record_t *bg_record); -extern int add_bg_record(List records, List used_nodes, blockreq_t *blockreq); /* block_sys.c */ /*****************************************************/ diff --git a/src/plugins/select/bluegene/plugin/defined_block.c b/src/plugins/select/bluegene/plugin/defined_block.c new file mode 100644 index 000000000..4407f7019 --- /dev/null +++ b/src/plugins/select/bluegene/plugin/defined_block.c @@ -0,0 +1,370 @@ +/*****************************************************************************\ + * defined_block.c - functions for creating blocks in a static environment. + * + * $Id: defined_block.c 12954 2008-01-04 20:37:49Z da $ + ***************************************************************************** + * Copyright (C) 2008 Lawrence Livermore National Security. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#include "defined_block.h" + +/* + * create_defined_blocks - create the static blocks that will be used + * for scheduling, all partitions must be able to be created and booted + * at once. + * IN - int overlapped, 1 if partitions are to be overlapped, 0 if they are + * static. + * RET - success of fitting all configurations + */ +extern int create_defined_blocks(bg_layout_t overlapped, + List bg_found_block_list) +{ + int rc = SLURM_SUCCESS; + + ListIterator itr; + bg_record_t *bg_record = NULL; + ListIterator itr_found; + int i; + bg_record_t *found_record = NULL; + int geo[BA_SYSTEM_DIMENSIONS]; + char temp[256]; + List results = NULL; + +#ifdef HAVE_BG_FILES + init_wires(); +#endif + slurm_mutex_lock(&block_state_mutex); + reset_ba_system(false); + if(bg_list) { + itr = list_iterator_create(bg_list); + while((bg_record = list_next(itr))) { + if(bg_found_block_list) { + itr_found = list_iterator_create( + bg_found_block_list); + while ((found_record = (bg_record_t*) + list_next(itr_found)) != NULL) { +/* info("%s.%d.%d ?= %s.%d.%d\n", */ +/* bg_record->nodes, */ +/* bg_record->quarter, */ +/* bg_record->nodecard, */ +/* found_record->nodes, */ +/* found_record->quarter, */ +/* found_record->nodecard); */ + + if ((bit_equal(bg_record->bitmap, + found_record->bitmap)) + && (bg_record->quarter == + found_record->quarter) + && (bg_record->nodecard == + found_record->nodecard)) { + /* don't reboot this one */ + break; + } + } + list_iterator_destroy(itr_found); + } else { + error("create_defined_blocks: " + "no bg_found_block_list 1"); + } + if(bg_record->bp_count>0 + && !bg_record->full_block + && bg_record->cpus_per_bp == procs_per_node) { + char *name = NULL; + if(overlapped == LAYOUT_OVERLAP) { + reset_ba_system(false); + set_all_bps_except(bg_record->nodes); + } + for(i=0; i<BA_SYSTEM_DIMENSIONS; i++) + geo[i] = bg_record->geo[i]; + debug2("adding %s %c%c%c %c%c%c", + bg_record->nodes, + alpha_num[bg_record->start[X]], + alpha_num[bg_record->start[Y]], + alpha_num[bg_record->start[Z]], + alpha_num[geo[X]], + alpha_num[geo[Y]], + alpha_num[geo[Z]]); + if(bg_record->bg_block_list + && list_count(bg_record->bg_block_list)) { + if(check_and_set_node_list( + bg_record->bg_block_list) + == SLURM_ERROR) { + debug2("something happened in " + "the load of %s" + "Did you use smap to " + "make the " + "bluegene.conf file?", + bg_record->bg_block_id); + list_iterator_destroy(itr); + slurm_mutex_unlock( + &block_state_mutex); + return SLURM_ERROR; + } + } else { + results = list_create(NULL); + name = set_bg_block( + results, + bg_record->start, + geo, + bg_record->conn_type); + if(!name) { + error("I was unable to " + "make the " + "requested block."); + list_destroy(results); + list_iterator_destroy(itr); + slurm_mutex_unlock( + &block_state_mutex); + return SLURM_ERROR; + } + slurm_conf_lock(); + snprintf(temp, sizeof(temp), "%s%s", + slurmctld_conf.node_prefix, + name); + slurm_conf_unlock(); + xfree(name); + if(strcmp(temp, bg_record->nodes)) { + fatal("given list of %s " + "but allocated %s, " + "your order might be " + "wrong in bluegene.conf", + bg_record->nodes, + temp); + } + if(bg_record->bg_block_list) + list_destroy(bg_record-> + bg_block_list); + bg_record->bg_block_list = + list_create(destroy_ba_node); + copy_node_path( + results, + bg_record->bg_block_list); + list_destroy(results); + } + } + if(found_record == NULL) { + if(bg_record->full_block) { + /* if this is defined we need + to remove it since we are + going to try to create it + later on overlap systems + this doesn't matter, but + since we don't clear the + table on static mode we + can't do it here or it just + won't work since other + wires will be or are + already set + */ + list_remove(itr); + continue; + } + if((rc = configure_block(bg_record)) + == SLURM_ERROR) { + list_iterator_destroy(itr); + slurm_mutex_unlock(&block_state_mutex); + return rc; + } + print_bg_record(bg_record); + } + } + list_iterator_destroy(itr); + } else { + error("create_defined_blocks: no bg_list 2"); + slurm_mutex_unlock(&block_state_mutex); + return SLURM_ERROR; + } + slurm_mutex_unlock(&block_state_mutex); + create_full_system_block(bg_found_block_list); + + slurm_mutex_lock(&block_state_mutex); + sort_bg_record_inc_size(bg_list); + slurm_mutex_unlock(&block_state_mutex); + +#ifdef _PRINT_BLOCKS_AND_EXIT + if(bg_list) { + itr = list_iterator_create(bg_list); + debug("\n\n"); + while ((found_record = (bg_record_t *) list_next(itr)) + != NULL) { + print_bg_record(found_record); + } + list_iterator_destroy(itr); + } else { + error("create_defined_blocks: no bg_list 5"); + } + exit(0); +#endif /* _PRINT_BLOCKS_AND_EXIT */ + rc = SLURM_SUCCESS; + //exit(0); + return rc; +} + +extern int create_full_system_block(List bg_found_block_list) +{ + int rc = SLURM_SUCCESS; + ListIterator itr; + bg_record_t *bg_record = NULL; + char *name = NULL; + List records = NULL; + int geo[BA_SYSTEM_DIMENSIONS]; + int i; + blockreq_t blockreq; + List results = NULL; + + /* Here we are adding a block that in for the entire machine + just in case it isn't in the bluegene.conf file. + */ + slurm_mutex_lock(&block_state_mutex); + +//#ifdef HAVE_BG_FILES + geo[X] = DIM_SIZE[X] - 1; + geo[Y] = DIM_SIZE[Y] - 1; + geo[Z] = DIM_SIZE[Z] - 1; +/* #else */ +/* geo[X] = max_dim[X]; */ +/* geo[Y] = max_dim[Y]; */ +/* geo[Z] = max_dim[Z]; */ +/* #endif */ + slurm_conf_lock(); + i = (10+strlen(slurmctld_conf.node_prefix)); + name = xmalloc(i); + if((geo[X] == 0) && (geo[Y] == 0) && (geo[Z] == 0)) + snprintf(name, i, "%s000", + slurmctld_conf.node_prefix); + else + snprintf(name, i, "%s[000x%c%c%c]", + slurmctld_conf.node_prefix, + alpha_num[geo[X]], alpha_num[geo[Y]], + alpha_num[geo[Z]]); + slurm_conf_unlock(); + + if(bg_found_block_list) { + itr = list_iterator_create(bg_found_block_list); + while ((bg_record = (bg_record_t *) list_next(itr)) != NULL) { + if (!strcmp(name, bg_record->nodes)) { + xfree(name); + list_iterator_destroy(itr); + /* don't create total already there */ + goto no_total; + } + } + list_iterator_destroy(itr); + } else { + error("create_full_system_block: no bg_found_block_list 2"); + } + + if(bg_list) { + itr = list_iterator_create(bg_list); + while ((bg_record = (bg_record_t *) list_next(itr)) + != NULL) { + if (!strcmp(name, bg_record->nodes)) { + xfree(name); + list_iterator_destroy(itr); + /* don't create total already there */ + goto no_total; + } + } + list_iterator_destroy(itr); + } else { + xfree(name); + error("create_overlapped_blocks: no bg_list 3"); + rc = SLURM_ERROR; + goto no_total; + } + + records = list_create(destroy_bg_record); + blockreq.block = name; + blockreq.blrtsimage = NULL; + blockreq.linuximage = NULL; + blockreq.mloaderimage = NULL; + blockreq.ramdiskimage = NULL; + blockreq.conn_type = SELECT_TORUS; + blockreq.nodecards = 0; + blockreq.quarters = 0; + add_bg_record(records, NULL, &blockreq); + xfree(name); + + bg_record = (bg_record_t *) list_pop(records); + if(!bg_record) { + error("Nothing was returned from full system create"); + rc = SLURM_ERROR; + goto no_total; + } + reset_ba_system(false); + for(i=0; i<BA_SYSTEM_DIMENSIONS; i++) + geo[i] = bg_record->geo[i]; + debug2("adding %s %c%c%c %c%c%c", + bg_record->nodes, + alpha_num[bg_record->start[X]], + alpha_num[bg_record->start[Y]], + alpha_num[bg_record->start[Z]], + alpha_num[geo[X]], + alpha_num[geo[Y]], + alpha_num[geo[Z]]); + results = list_create(NULL); + name = set_bg_block(results, + bg_record->start, + geo, + bg_record->conn_type); + if(!name) { + error("I was unable to make the " + "requested block."); + list_destroy(results); + list_iterator_destroy(itr); + slurm_mutex_unlock(&block_state_mutex); + return SLURM_ERROR; + } + xfree(name); + if(bg_record->bg_block_list) + list_destroy(bg_record->bg_block_list); + bg_record->bg_block_list = list_create(destroy_ba_node); + copy_node_path(results, bg_record->bg_block_list); + list_destroy(results); + + if((rc = configure_block(bg_record)) == SLURM_ERROR) { + error("create_full_system_block: " + "unable to configure block in api"); + destroy_bg_record(bg_record); + goto no_total; + } + + print_bg_record(bg_record); + list_append(bg_list, bg_record); + +no_total: + if(records) + list_destroy(records); + slurm_mutex_unlock(&block_state_mutex); + return rc; +} diff --git a/src/plugins/select/bluegene/plugin/defined_block.h b/src/plugins/select/bluegene/plugin/defined_block.h new file mode 100644 index 000000000..4255135f3 --- /dev/null +++ b/src/plugins/select/bluegene/plugin/defined_block.h @@ -0,0 +1,48 @@ +/*****************************************************************************\ + * defined_block.h - header for creating blocks in a static environment. + * + * $Id: defined_block.h 12954 2008-01-04 20:37:49Z da $ + ***************************************************************************** + * Copyright (C) 2008 Lawrence Livermore National Security. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#ifndef _BLUEGENE_DEFINED_BLOCK_H_ +#define _BLUEGENE_DEFINED_BLOCK_H_ + +#include "bluegene.h" + +extern int create_defined_blocks(bg_layout_t overlapped, + List bg_found_block_list); +extern int create_full_system_block(List bg_found_block_list); + +#endif /* _BLUEGENE_DEFINED_BLOCK_H_ */ diff --git a/src/plugins/select/bluegene/plugin/dynamic_block.c b/src/plugins/select/bluegene/plugin/dynamic_block.c new file mode 100644 index 000000000..941f4fc79 --- /dev/null +++ b/src/plugins/select/bluegene/plugin/dynamic_block.c @@ -0,0 +1,715 @@ +/*****************************************************************************\ + * dynamic_block.c - functions for creating blocks in a dynamic environment. + * + * $Id: dynamic_block.c 12954 2008-01-04 20:37:49Z da $ + ***************************************************************************** + * Copyright (C) 2008 Lawrence Livermore National Security. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#include "dynamic_block.h" + +static int _split_block(List block_list, List new_blocks, + bg_record_t *bg_record, int procs); +static int _breakup_blocks(List block_list, List new_blocks, + ba_request_t *request, List my_block_list); + +/* + * create_dynamic_block - create new block(s) to be used for a new + * job allocation. + * RET - a list of created block(s) or NULL on failure errno is set. + */ +extern List create_dynamic_block(List block_list, + ba_request_t *request, List my_block_list) +{ + int rc = SLURM_SUCCESS; + + ListIterator itr; + bg_record_t *bg_record = NULL; + List results = NULL; + List new_blocks = NULL; + uint16_t num_quarter=0, num_nodecard=0; + bitstr_t *my_bitmap = NULL; + int geo[BA_SYSTEM_DIMENSIONS]; + int i; + blockreq_t blockreq; + + slurm_mutex_lock(&block_state_mutex); + if(my_block_list) { + reset_ba_system(true); + itr = list_iterator_create(my_block_list); + while ((bg_record = list_next(itr))) { + if(!my_bitmap) { + my_bitmap = + bit_alloc(bit_size(bg_record->bitmap)); + } + + if(!bit_super_set(bg_record->bitmap, my_bitmap)) { + bit_or(my_bitmap, bg_record->bitmap); + for(i=0; i<BA_SYSTEM_DIMENSIONS; i++) + geo[i] = bg_record->geo[i]; + debug2("adding %s %c%c%c %c%c%c", + bg_record->nodes, + alpha_num[bg_record->start[X]], + alpha_num[bg_record->start[Y]], + alpha_num[bg_record->start[Z]], + alpha_num[geo[X]], + alpha_num[geo[Y]], + alpha_num[geo[Z]]); + + if(check_and_set_node_list( + bg_record->bg_block_list) + == SLURM_ERROR) { + debug2("something happened in " + "the load of %s", + bg_record->bg_block_id); + list_iterator_destroy(itr); + FREE_NULL_BITMAP(my_bitmap); + rc = SLURM_ERROR; + goto finished; + } + } + } + list_iterator_destroy(itr); + FREE_NULL_BITMAP(my_bitmap); + } else { + reset_ba_system(false); + debug("No list was given"); + } + + if(request->avail_node_bitmap) { + int j=0, number; + int x,y,z; + char *nodes = NULL; + bitstr_t *bitmap = bit_alloc(node_record_count); + int start[BA_SYSTEM_DIMENSIONS]; + int end[BA_SYSTEM_DIMENSIONS]; + + /* we want the bps that aren't in this partition to + * mark them as used + */ + bit_or(bitmap, request->avail_node_bitmap); + bit_not(bitmap); + nodes = bitmap2node_name(bitmap); + + //info("not using %s", nodes); + while(nodes[j] != '\0') { + if ((nodes[j] == '[' || nodes[j] == ',') + && (nodes[j+8] == ']' || nodes[j+8] == ',') + && (nodes[j+4] == 'x' || nodes[j+4] == '-')) { + + j++; + number = xstrntol(nodes + j, + NULL, BA_SYSTEM_DIMENSIONS, + HOSTLIST_BASE); + start[X] = number / + (HOSTLIST_BASE * HOSTLIST_BASE); + start[Y] = (number % + (HOSTLIST_BASE * HOSTLIST_BASE)) + / HOSTLIST_BASE; + start[Z] = (number % HOSTLIST_BASE); + j += 4; + number = xstrntol(nodes + j, + NULL, 3, HOSTLIST_BASE); + end[X] = number / + (HOSTLIST_BASE * HOSTLIST_BASE); + end[Y] = (number + % (HOSTLIST_BASE * HOSTLIST_BASE)) + / HOSTLIST_BASE; + end[Z] = (number % HOSTLIST_BASE); + j += 3; + for (x = start[X]; x <= end[X]; x++) { + for (y = start[Y]; y <= end[Y]; y++) { + for (z = start[Z]; + z <= end[Z]; z++) { + ba_system_ptr-> + grid[x] +#ifdef HAVE_BG + [y][z] +#endif + .used = 1; + } + } + } + + if(nodes[j] != ',') + break; + j--; + } else if((nodes[j] >= '0' && nodes[j] <= '9') + || (nodes[j] >= 'A' && nodes[j] <= 'Z')) { + + number = xstrntol(nodes + j, + NULL, BA_SYSTEM_DIMENSIONS, + HOSTLIST_BASE); + x = number / (HOSTLIST_BASE * HOSTLIST_BASE); + y = (number % (HOSTLIST_BASE * HOSTLIST_BASE)) + / HOSTLIST_BASE; + z = (number % HOSTLIST_BASE); + j+=3; + + ba_system_ptr->grid[x] +#ifdef HAVE_BG + [y][z] +#endif + .used = 1; + + if(nodes[j] != ',') + break; + j--; + } + j++; + } + xfree(nodes); + FREE_NULL_BITMAP(bitmap); + } + + if(request->size==1 && request->procs < bluegene_bp_node_cnt) { + request->conn_type = SELECT_SMALL; + if(request->procs == (procs_per_node/16)) { + if(!bluegene_nodecard_ionode_cnt) { + error("can't create this size %d " + "on this system numpsets is %d", + request->procs, + bluegene_numpsets); + goto finished; + } + + num_nodecard=4; + num_quarter=3; + } else { + if(!bluegene_quarter_ionode_cnt) { + error("can't create this size %d " + "on this system numpsets is %d", + request->procs, + bluegene_numpsets); + goto finished; + } + num_quarter=4; + } + new_blocks = list_create(destroy_bg_record); + if(_breakup_blocks(block_list, new_blocks, + request, my_block_list) + != SLURM_SUCCESS) { + list_destroy(new_blocks); + new_blocks = NULL; + debug2("small block not able to be placed"); + //rc = SLURM_ERROR; + } else + goto finished; + } + + if(request->conn_type == SELECT_NAV) + request->conn_type = SELECT_TORUS; + + if(!new_ba_request(request)) { + error("Problems with request for size %d geo %dx%dx%d", + request->size, + request->geometry[X], + request->geometry[Y], + request->geometry[Z]); + rc = ESLURM_INTERCONNECT_FAILURE; + goto finished; + } + + if(!list_count(block_list) || !my_block_list) { + bg_record = NULL; + goto no_list; + } + + /*Try to put block starting in the smallest of the exisiting blocks*/ + if(!request->start_req) { + itr = list_iterator_create(block_list); + while ((bg_record = (bg_record_t *) list_next(itr)) != NULL) { + request->rotate_count = 0; + request->elongate_count = 1; + + if(bg_record->job_running == NO_JOB_RUNNING + && (bg_record->quarter == (uint16_t) NO_VAL + || (bg_record->quarter == 0 + && (bg_record->nodecard == (uint16_t) NO_VAL + || bg_record->nodecard == 0)))) { + + for(i=0; i<BA_SYSTEM_DIMENSIONS; i++) + request->start[i] = + bg_record->start[i]; + debug2("allocating %s %c%c%c %d", + bg_record->nodes, + alpha_num[request->start[X]], + alpha_num[request->start[Y]], + alpha_num[request->start[Z]], + request->size); + request->start_req = 1; + rc = SLURM_SUCCESS; + if(results) + list_delete_all( + results, + &empty_null_destroy_list, ""); + else + results = list_create(NULL); + if (!allocate_block(request, results)){ + debug2("1 allocate failure for size %d " + "base partitions", + request->size); + rc = SLURM_ERROR; + } else + break; + } + } + list_iterator_destroy(itr); + + request->start_req = 0; + for(i=0; i<BA_SYSTEM_DIMENSIONS; i++) + request->start[i] = (uint16_t) NO_VAL; + } + +no_list: + if(!bg_record) { + rc = SLURM_SUCCESS; + if(results) + list_delete_all(results, + &empty_null_destroy_list, ""); + else + results = list_create(NULL); + if (!allocate_block(request, results)) { + debug("allocate failure for size %d base partitions", + request->size); + rc = SLURM_ERROR; + } + } + + if(rc != SLURM_SUCCESS) + goto finished; + + /*set up bg_record(s) here */ + new_blocks = list_create(destroy_bg_record); + + blockreq.block = request->save_name; + blockreq.blrtsimage = request->blrtsimage; + blockreq.linuximage = request->linuximage; + blockreq.mloaderimage = request->mloaderimage; + blockreq.ramdiskimage = request->ramdiskimage; + blockreq.conn_type = request->conn_type; + blockreq.nodecards = num_nodecard; + blockreq.quarters = num_quarter; + + add_bg_record(new_blocks, results, &blockreq); + +finished: + xfree(request->save_name); + + if(request->elongate_geos) { + list_destroy(request->elongate_geos); + request->elongate_geos = NULL; + } + if(results) + list_destroy(results); + errno = rc; + slurm_mutex_unlock(&block_state_mutex); + + return new_blocks; +} + +extern bg_record_t *create_small_record(bg_record_t *bg_record, + uint16_t quarter, uint16_t nodecard) +{ + bg_record_t *found_record = NULL; + int small_size = 4; + ListIterator itr = NULL; + ba_node_t *new_ba_node = NULL; + ba_node_t *ba_node = NULL; + found_record = (bg_record_t*) xmalloc(sizeof(bg_record_t)); + + found_record->job_running = NO_JOB_RUNNING; + found_record->user_name = xstrdup(bg_record->user_name); + found_record->user_uid = bg_record->user_uid; + found_record->bg_block_list = list_create(destroy_ba_node); + itr = list_iterator_create(bg_record->bg_block_list); + ba_node = list_next(itr); + list_iterator_destroy(itr); + if(!ba_node) + error("you gave me a list with no ba_nodes"); + else { + int i=0,j=0; + new_ba_node = ba_copy_node(ba_node); + for (i=0; i<BA_SYSTEM_DIMENSIONS; i++){ + for(j=0;j<NUM_PORTS_PER_NODE;j++) { + ba_node->axis_switch[i].int_wire[j].used = 0; + if(i!=X) { + if(j==3 || j==4) + ba_node->axis_switch[i]. + int_wire[j]. + used = 1; + } + ba_node->axis_switch[i].int_wire[j]. + port_tar = j; + } + } + list_append(found_record->bg_block_list, new_ba_node); + found_record->bp_count = 1; + } + found_record->nodes = xstrdup(bg_record->nodes); + found_record->blrtsimage = xstrdup(bg_record->blrtsimage); + found_record->linuximage = xstrdup(bg_record->linuximage); + found_record->mloaderimage = xstrdup(bg_record->mloaderimage); + found_record->ramdiskimage = xstrdup(bg_record->ramdiskimage); + + process_nodes(found_record, false); + + found_record->conn_type = SELECT_SMALL; + + found_record->node_use = SELECT_COPROCESSOR_MODE; + if(nodecard != (uint16_t) NO_VAL) + small_size = 16; + found_record->cpus_per_bp = procs_per_node/small_size; + found_record->node_cnt = bluegene_bp_node_cnt/small_size; + found_record->quarter = quarter; + found_record->nodecard = nodecard; + + if(set_ionodes(found_record) == SLURM_ERROR) + error("couldn't create ionode_bitmap for %d.%d", + found_record->quarter, found_record->nodecard); + return found_record; +} + +/*********************** Local Functions *************************/ + +static int _split_block(List block_list, List new_blocks, + bg_record_t *bg_record, int procs) +{ + bg_record_t *found_record = NULL; + bool full_bp = false; + int small_count = 0; + int small_size = 0; + uint16_t num_nodecard = 0, num_quarter = 0; + int i; + int node_cnt = 0; + uint16_t quarter = 0; + uint16_t nodecard = 0; + + if(bg_record->quarter == (uint16_t) NO_VAL) + full_bp = true; + + if(procs == (procs_per_node/16) && bluegene_nodecard_ionode_cnt) { + num_nodecard=4; + if(full_bp) + num_quarter=3; + } else if(full_bp) { + num_quarter = 4; + } else { + error("you asked for something that was already this size"); + return SLURM_ERROR; + } + debug2("asking for %d 32s from a %d block", + num_nodecard, bg_record->node_cnt); + small_count = num_nodecard+num_quarter; + + /* break base partition up into 16 parts */ + small_size = bluegene_bp_node_cnt/bluegene_nodecard_node_cnt; + node_cnt = 0; + if(!full_bp) + quarter = bg_record->quarter; + else + quarter = 0; + nodecard = 0; + for(i=0; i<small_count; i++) { + if(i == num_nodecard) { + /* break base partition up into 4 parts */ + small_size = 4; + } + + if(small_size == 4) + nodecard = (uint16_t)NO_VAL; + else + nodecard = i%4; + found_record = create_small_record(bg_record, + quarter, + nodecard); + list_append(new_blocks, found_record); + + node_cnt += bluegene_bp_node_cnt/small_size; + if(node_cnt == 128) { + node_cnt = 0; + quarter++; + } + } + + return SLURM_SUCCESS; +} + +static int _breakup_blocks(List block_list, List new_blocks, + ba_request_t *request, List my_block_list) +{ + int rc = SLURM_ERROR; + bg_record_t *bg_record = NULL; + ListIterator itr; + int proc_cnt=0; + int total_proc_cnt=0; + uint16_t last_quarter = (uint16_t) NO_VAL; + char tmp_char[256]; + + debug2("proc count = %d size = %d", + request->procs, request->size); + + itr = list_iterator_create(block_list); + while ((bg_record = (bg_record_t *) list_next(itr)) != NULL) { + if(bg_record->job_running != NO_JOB_RUNNING) + continue; + if(bg_record->state != RM_PARTITION_FREE) + continue; + if (request->avail_node_bitmap && + !bit_super_set(bg_record->bitmap, + request->avail_node_bitmap)) { + debug2("bg block %s has nodes not usable by this job", + bg_record->bg_block_id); + continue; + } + + if(request->start_req) { + if ((request->start[X] != bg_record->start[X]) + || (request->start[Y] != bg_record->start[Y]) + || (request->start[Z] != bg_record->start[Z])) { + debug4("small got %c%c%c looking for %c%c%c", + alpha_num[bg_record->start[X]], + alpha_num[bg_record->start[Y]], + alpha_num[bg_record->start[Z]], + alpha_num[request->start[X]], + alpha_num[request->start[Y]], + alpha_num[request->start[Z]]); + continue; + } + debug3("small found %c%c%c looking for %c%c%c", + alpha_num[bg_record->start[X]], + alpha_num[bg_record->start[Y]], + alpha_num[bg_record->start[Z]], + alpha_num[request->start[X]], + alpha_num[request->start[Y]], + alpha_num[request->start[Z]]); + } + proc_cnt = bg_record->bp_count * + bg_record->cpus_per_bp; + if(proc_cnt == request->procs) { + debug2("found it here %s, %s", + bg_record->bg_block_id, + bg_record->nodes); + request->save_name = xmalloc(4); + snprintf(request->save_name, + 4, + "%c%c%c", + alpha_num[bg_record->start[X]], + alpha_num[bg_record->start[Y]], + alpha_num[bg_record->start[Z]]); + rc = SLURM_SUCCESS; + goto finished; + } + if(bg_record->node_cnt > bluegene_bp_node_cnt) + continue; + if(proc_cnt < request->procs) { + if(last_quarter != bg_record->quarter){ + last_quarter = bg_record->quarter; + total_proc_cnt = proc_cnt; + } else { + total_proc_cnt += proc_cnt; + } + debug2("1 got %d on quarter %d", + total_proc_cnt, last_quarter); + if(total_proc_cnt == request->procs) { + request->save_name = xmalloc(4); + snprintf(request->save_name, + 4, + "%c%c%c", + alpha_num[bg_record->start[X]], + alpha_num[bg_record->start[Y]], + alpha_num[bg_record->start[Z]]); + if(!my_block_list) { + rc = SLURM_SUCCESS; + goto finished; + } + + bg_record = create_small_record( + bg_record, + last_quarter, + (uint16_t) NO_VAL); + list_append(new_blocks, bg_record); + + rc = SLURM_SUCCESS; + goto finished; + } + continue; + } + break; + } + if(bg_record) { + debug2("got one on the first pass"); + goto found_one; + } + list_iterator_reset(itr); + last_quarter = (uint16_t) NO_VAL; + while ((bg_record = (bg_record_t *) list_next(itr)) + != NULL) { + if(bg_record->job_running != NO_JOB_RUNNING) + continue; + if (request->avail_node_bitmap && + !bit_super_set(bg_record->bitmap, + request->avail_node_bitmap)) { + debug2("bg block %s has nodes not usable by this job", + bg_record->bg_block_id); + continue; + } + + if(request->start_req) { + if ((request->start[X] != bg_record->start[X]) + || (request->start[Y] != bg_record->start[Y]) + || (request->start[Z] != bg_record->start[Z])) { + debug4("small 2 got %c%c%c looking for %c%c%c", + alpha_num[bg_record->start[X]], + alpha_num[bg_record->start[Y]], + alpha_num[bg_record->start[Z]], + alpha_num[request->start[X]], + alpha_num[request->start[Y]], + alpha_num[request->start[Z]]); + continue; + } + debug3("small 2 found %c%c%c looking for %c%c%c", + alpha_num[bg_record->start[X]], + alpha_num[bg_record->start[Y]], + alpha_num[bg_record->start[Z]], + alpha_num[request->start[X]], + alpha_num[request->start[Y]], + alpha_num[request->start[Z]]); + } + + proc_cnt = bg_record->bp_count * bg_record->cpus_per_bp; + if(proc_cnt == request->procs) { + debug2("found it here %s, %s", + bg_record->bg_block_id, + bg_record->nodes); + request->save_name = xmalloc(4); + snprintf(request->save_name, + 4, + "%c%c%c", + alpha_num[bg_record->start[X]], + alpha_num[bg_record->start[Y]], + alpha_num[bg_record->start[Z]]); + rc = SLURM_SUCCESS; + goto finished; + } + + if(bg_record->node_cnt > bluegene_bp_node_cnt) + continue; + if(proc_cnt < request->procs) { + if(last_quarter != bg_record->quarter){ + last_quarter = bg_record->quarter; + total_proc_cnt = proc_cnt; + } else { + total_proc_cnt += proc_cnt; + } + debug2("got %d on quarter %d", + total_proc_cnt, last_quarter); + if(total_proc_cnt == request->procs) { + request->save_name = xmalloc(4); + snprintf(request->save_name, + 4, + "%c%c%c", + alpha_num[bg_record->start[X]], + alpha_num[bg_record->start[Y]], + alpha_num[bg_record->start[Z]]); + if(!my_block_list) { + rc = SLURM_SUCCESS; + goto finished; + } + bg_record = create_small_record( + bg_record, + last_quarter, + (uint16_t) NO_VAL); + list_append(new_blocks, bg_record); + + rc = SLURM_SUCCESS; + goto finished; + } + continue; + } + break; + } +found_one: + if(bg_record) { + List temp_list = NULL; + bg_record_t *found_record = NULL; + + if(bg_record->original) { + debug3("This was a copy"); + found_record = bg_record->original; + } else { + debug3("looking for original"); + found_record = find_org_in_bg_list( + bg_list, bg_record); + } + if(!found_record) { + error("this record wasn't found in the list!"); + rc = SLURM_ERROR; + goto finished; + } + + format_node_name(found_record, tmp_char, sizeof(tmp_char)); + + debug2("going to split %s, %s", + found_record->bg_block_id, + tmp_char); + request->save_name = xmalloc(4); + snprintf(request->save_name, + 4, + "%c%c%c", + alpha_num[found_record->start[X]], + alpha_num[found_record->start[Y]], + alpha_num[found_record->start[Z]]); + if(!my_block_list) { + rc = SLURM_SUCCESS; + goto finished; + } + _split_block(block_list, new_blocks, + found_record, request->procs); + remove_from_bg_list(block_list, bg_record); + destroy_bg_record(bg_record); + remove_from_bg_list(bg_list, found_record); + temp_list = list_create(NULL); + list_push(temp_list, found_record); + num_block_to_free++; + free_block_list(temp_list); + list_destroy(temp_list); + rc = SLURM_SUCCESS; + goto finished; + } + +finished: + list_iterator_destroy(itr); + + return rc; +} diff --git a/src/plugins/select/bluegene/plugin/dynamic_block.h b/src/plugins/select/bluegene/plugin/dynamic_block.h new file mode 100644 index 000000000..c98aadf1a --- /dev/null +++ b/src/plugins/select/bluegene/plugin/dynamic_block.h @@ -0,0 +1,49 @@ +/*****************************************************************************\ + * dynamic_block.h - header for creating blocks in a dynamic environment. + * + * $Id: dynamic_block.h 12954 2008-01-04 20:37:49Z da $ + ***************************************************************************** + * Copyright (C) 2008 Lawrence Livermore National Security. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#ifndef _BLUEGENE_DYNAMIC_BLOCK_H_ +#define _BLUEGENE_DYNAMIC_BLOCK_H_ + +#include "bluegene.h" + +extern List create_dynamic_block(List block_list, + ba_request_t *request, List my_block_list); + +extern bg_record_t *create_small_record(bg_record_t *bg_record, + uint16_t quarter, uint16_t nodecard); +#endif /* _BLUEGENE_DYNAMIC_BLOCK_H_ */ diff --git a/src/plugins/select/bluegene/plugin/opts.c b/src/plugins/select/bluegene/plugin/opts.c index e196a982e..4fa939229 100644 --- a/src/plugins/select/bluegene/plugin/opts.c +++ b/src/plugins/select/bluegene/plugin/opts.c @@ -1,11 +1,11 @@ /****************************************************************************\ * opts.c - sfree command line option processing functions - * $Id: opts.c 12403 2007-09-25 18:36:42Z da $ + * $Id: opts.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <da@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/select/bluegene/plugin/select_bluegene.c b/src/plugins/select/bluegene/plugin/select_bluegene.c index e23eca54c..1d17316b7 100644 --- a/src/plugins/select/bluegene/plugin/select_bluegene.c +++ b/src/plugins/select/bluegene/plugin/select_bluegene.c @@ -1,12 +1,12 @@ /*****************************************************************************\ * select_bluegene.c - node selection plugin for Blue Gene system. * - * $Id: select_bluegene.c 13423 2008-02-29 17:30:38Z da $ + * $Id: select_bluegene.c 14091 2008-05-20 21:34:02Z da $ ***************************************************************************** * Copyright (C) 2004-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Dan Phung <phung4@llnl.gov> Danny Auble <da@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -38,6 +38,11 @@ \*****************************************************************************/ #include "bluegene.h" + +#ifndef HAVE_BG +#include "defined_block.h" +#endif + #include "src/slurmctld/trigger_mgr.h" #include <fcntl.h> @@ -45,7 +50,7 @@ /* Change BLOCK_STATE_VERSION value when changing the state save * format i.e. pack_block() */ -#define BLOCK_STATE_VERSION "VER000" +#define BLOCK_STATE_VERSION "VER001" /* global */ int procs_per_node = 512; @@ -80,7 +85,7 @@ int procs_per_node = 512; */ const char plugin_name[] = "Blue Gene node selection plugin"; const char plugin_type[] = "select/bluegene"; -const uint32_t plugin_version = 90; +const uint32_t plugin_version = 100; /* pthread stuff for updating BG node status */ static pthread_t bluegene_thread = 0; @@ -91,6 +96,8 @@ static int _init_status_pthread(void); static int _wait_for_thread (pthread_t thread_id); static char *_block_state_str(int state); +extern int select_p_alter_node_cnt(enum select_node_cnt type, void *data); + /* * init() is called when the plugin is loaded, before any other functions * are called. Put global initialization here. @@ -117,6 +124,7 @@ extern int init ( void ) if ((SELECT_COPROCESSOR_MODE != RM_PARTITION_COPROCESSOR_MODE) || (SELECT_VIRTUAL_NODE_MODE != RM_PARTITION_VIRTUAL_NODE_MODE)) fatal("enum node_use_type out of sync with rm_api.h"); + #endif verbose("%s loading...", plugin_name); @@ -213,9 +221,22 @@ extern int fini ( void ) fatal("Error, could not read the file"); return SLURM_ERROR; } + if(part_list) { + struct part_record *part_ptr = NULL; + ListIterator itr = list_iterator_create(part_list); + while((part_ptr = list_next(itr))) { + part_ptr->max_nodes = part_ptr->max_nodes_orig; + part_ptr->min_nodes = part_ptr->min_nodes_orig; + select_p_alter_node_cnt(SELECT_SET_BP_CNT, + &part_ptr->max_nodes); + select_p_alter_node_cnt(SELECT_SET_BP_CNT, + &part_ptr->min_nodes); + } + list_iterator_destroy(itr); + } #else /*looking for blocks only I created */ - if (create_defined_blocks(bluegene_layout_mode) + if (create_defined_blocks(bluegene_layout_mode, NULL) == SLURM_ERROR) { /* error in creating the static blocks, so * blocks referenced by submitted jobs won't @@ -225,7 +246,7 @@ extern int fini ( void ) return SLURM_ERROR; } #endif - + return SLURM_SUCCESS; } @@ -337,7 +358,7 @@ extern int select_p_state_restore(char *dir_name) List results = NULL; int data_allocated, data_read = 0; char *ver_str = NULL; - uint16_t ver_str_len; + uint32_t ver_str_len; struct passwd *pw_ent = NULL; int blocks = 0; @@ -385,10 +406,9 @@ extern int select_p_state_restore(char *dir_name) * we don't try to unpack data using the wrong format routines */ if(size_buf(buffer) - >= sizeof(uint16_t) + strlen(BLOCK_STATE_VERSION)) { + >= sizeof(uint32_t) + strlen(BLOCK_STATE_VERSION)) { char *ptr = get_buf_data(buffer); - - if (!memcmp(&ptr[sizeof(uint16_t)], BLOCK_STATE_VERSION, 3)) { + if (!memcmp(&ptr[sizeof(uint32_t)], BLOCK_STATE_VERSION, 3)) { safe_unpackstr_xmalloc(&ver_str, &ver_str_len, buffer); debug3("Version string in block_state header is %s", ver_str); @@ -402,9 +422,10 @@ extern int select_p_state_restore(char *dir_name) return EFAULT; } xfree(ver_str); - if(select_g_unpack_node_info(&node_select_ptr, buffer) == SLURM_ERROR) + if(select_g_unpack_node_info(&node_select_ptr, buffer) == SLURM_ERROR) { + error("select_p_state_restore: problem unpacking node_info"); goto unpack_error; - + } reset_ba_system(false); node_bitmap = bit_alloc(node_record_count); @@ -416,6 +437,21 @@ extern int select_p_state_restore(char *dir_name) bit_nclear(node_bitmap, 0, bit_size(node_bitmap) - 1); bit_nclear(ionode_bitmap, 0, bit_size(ionode_bitmap) - 1); + j = 0; + while(bg_info_record->bp_inx[j] >= 0) { + if (bg_info_record->bp_inx[j+1] + >= node_record_count) { + fatal("Job state recovered incompatable with " + "bluegene.conf. bp=%u state=%d", + node_record_count, + bg_info_record->bp_inx[j+1]); + } + bit_nset(node_bitmap, + bg_info_record->bp_inx[j], + bg_info_record->bp_inx[j+1]); + j += 2; + } + j = 0; while(bg_info_record->ionode_inx[j] >= 0) { if (bg_info_record->ionode_inx[j+1] @@ -434,9 +470,10 @@ extern int select_p_state_restore(char *dir_name) while((bg_record = list_next(itr))) { if(bit_equal(bg_record->bitmap, node_bitmap) && bit_equal(bg_record->ionode_bitmap, - ionode_bitmap)) + ionode_bitmap)) break; } + list_iterator_reset(itr); if(bg_record) { slurm_mutex_lock(&block_state_mutex); @@ -456,9 +493,10 @@ extern int select_p_state_restore(char *dir_name) continue; #endif if(bluegene_layout_mode != LAYOUT_DYNAMIC) { - error("Only adding state save blocks in " - "Dynamic block creation Mode not " - "adding %s", + error("Evidently we found a block (%s) which " + "we had before but no longer care about. " + "We are not adding it since we aren't " + "using Dynamic mode", bg_info_record->bg_block_id); continue; } @@ -492,7 +530,7 @@ extern int select_p_state_restore(char *dir_name) bg_record->conn_type = bg_info_record->conn_type; bg_record->boot_state = 0; - process_nodes(bg_record); + process_nodes(bg_record, true); slurm_conf_lock(); bg_record->target_name = @@ -542,13 +580,18 @@ extern int select_p_state_restore(char *dir_name) xfree(name); if(strcmp(temp, bg_record->nodes)) { +#ifdef HAVE_BG_FILES fatal("given list of %s " "but allocated %s, " "your order might be " - "wrong in the " - "bluegene.conf", - bg_record->nodes, - temp); + "wrong in bluegene.conf", + bg_record->nodes, temp); +#else + fatal("bad wiring in preserved state " + "(found %s, but allocated %s) " + "YOU MUST COLDSTART", + bg_record->nodes, temp); +#endif } if(bg_record->bg_block_list) list_destroy(bg_record->bg_block_list); @@ -566,7 +609,9 @@ extern int select_p_state_restore(char *dir_name) FREE_NULL_BITMAP(node_bitmap); list_iterator_destroy(itr); + slurm_mutex_lock(&block_state_mutex); sort_bg_record_inc_size(bg_list); + slurm_mutex_unlock(&block_state_mutex); info("Recovered %d blocks", blocks); select_g_free_node_info(&node_select_ptr); @@ -599,33 +644,52 @@ extern int select_p_node_init(struct node_record *node_ptr, int node_cnt) * identify the nodes which "best" satify the request. The specified * nodes may be DOWN or BUSY at the time of this test as may be used * to deterime if a job could ever run. - * IN job_ptr - pointer to job being scheduled + * IN/OUT job_ptr - pointer to job being scheduled start_time is set + * when we can possibly start job. * IN/OUT bitmap - usable nodes are set on input, nodes not required to * satisfy the request are cleared, other left set * IN min_nodes - minimum count of nodes * IN max_nodes - maximum count of nodes (0==don't care) * IN req_nodes - requested (or desired) count of nodes - * IN test_only - if true, only test if ever could run, not necessarily now + * IN mode - SELECT_MODE_RUN_NOW: try to schedule job now + * SELECT_MODE_TEST_ONLY: test if job can ever run + * SELECT_MODE_WILL_RUN: determine when and where job can run * RET zero on success, EINVAL otherwise * NOTE: bitmap must be a superset of req_nodes at the time that * select_p_job_test is called */ extern int select_p_job_test(struct job_record *job_ptr, bitstr_t *bitmap, - uint32_t min_nodes, uint32_t max_nodes, - uint32_t req_nodes, bool test_only) + uint32_t min_nodes, uint32_t max_nodes, + uint32_t req_nodes, int mode) { - /* bg block test - is there a block where we have: + /* submit_job - is there a block where we have: * 1) geometry requested * 2) min/max nodes (BPs) requested * 3) type: TORUS or MESH or NAV (torus else mesh) - * 4) use: VIRTUAL or COPROCESSOR * * note: we don't have to worry about security at this level * as the SLURM block logic will handle access rights. */ return submit_job(job_ptr, bitmap, min_nodes, max_nodes, - req_nodes, test_only); + req_nodes, mode); +} + +/* + * select_p_job_list_test - Given a list of select_will_run_t's in + * accending priority order we will see if we can start and + * finish all the jobs without increasing the start times of the + * jobs specified and fill in the est_start of requests with no + * est_start. If you are looking to see if one job will ever run + * then use select_p_job_test instead. + * IN/OUT req_list - list of select_will_run_t's in asscending + * priority order on success of placement fill in + * est_start of request with time. + * RET zero on success, EINVAL otherwise + */ +extern int select_p_job_list_test(List req_list) +{ + return test_job_list(req_list); } extern int select_p_job_begin(struct job_record *job_ptr) @@ -648,6 +712,11 @@ extern int select_p_job_resume(struct job_record *job_ptr) return ESLURM_NOT_SUPPORTED; } +extern int select_p_get_job_cores(uint32_t job_id, int alloc_index, int s) +{ + return ESLURM_NOT_SUPPORTED; +} + extern int select_p_job_ready(struct job_record *job_ptr) { #ifdef HAVE_BG_FILES @@ -1049,7 +1118,14 @@ extern int select_p_get_extra_jobinfo (struct node_record *node_ptr, enum select_data_info info, void *data) { - return SLURM_SUCCESS; + if (info == SELECT_AVAIL_CPUS) { + /* Needed to track CPUs allocated to jobs on whole nodes + * for sched/wiki2 (Moab scheduler). Small block allocations + * handled through use of job_ptr->num_procs in slurmctld */ + uint16_t *cpus_per_bp = (uint16_t *) data; + *cpus_per_bp = procs_per_node; + } + return SLURM_SUCCESS; } extern int select_p_get_info_from_plugin (enum select_data_info info, @@ -1091,15 +1167,32 @@ extern int select_p_update_node_state (int index, uint16_t state) extern int select_p_alter_node_cnt(enum select_node_cnt type, void *data) { job_desc_msg_t *job_desc = (job_desc_msg_t *)data; - uint32_t *nodes = (uint32_t *)data; - int tmp, i; + uint32_t *nodes = (uint32_t *)data, tmp; + int i; uint16_t req_geometry[BA_SYSTEM_DIMENSIONS]; + if(!bluegene_bp_node_cnt) { + fatal("select_g_alter_node_cnt: This can't be called " + "before select_g_block_init"); + } + switch (type) { case SELECT_GET_NODE_SCALING: if((*nodes) != INFINITE) (*nodes) = bluegene_bp_node_cnt; break; + case SELECT_SET_BP_CNT: + if(((*nodes) == INFINITE) || ((*nodes) == NO_VAL)) + tmp = (*nodes); + else if((*nodes) > bluegene_bp_node_cnt) { + tmp = (*nodes); + tmp /= bluegene_bp_node_cnt; + if(tmp < 1) + tmp = 1; + } else + tmp = 1; + (*nodes) = tmp; + break; case SELECT_APPLY_NODE_MIN_OFFSET: if((*nodes) == 1) { /* Job will actually get more than one c-node, @@ -1151,15 +1244,15 @@ extern int select_p_alter_node_cnt(enum select_node_cnt type, void *data) /* See if min_nodes is greater than one base partition */ if(job_desc->min_nodes > bluegene_bp_node_cnt) { /* - if it is make sure it is a factor of - bluegene_bp_node_cnt, if it isn't make it - that way - */ + * if it is make sure it is a factor of + * bluegene_bp_node_cnt, if it isn't make it + * that way + */ tmp = job_desc->min_nodes % bluegene_bp_node_cnt; if(tmp > 0) job_desc->min_nodes += (bluegene_bp_node_cnt-tmp); - } + } tmp = job_desc->min_nodes / bluegene_bp_node_cnt; /* this means it is greater or equal to one bp */ @@ -1235,3 +1328,18 @@ extern int select_p_alter_node_cnt(enum select_node_cnt type, void *data) return SLURM_SUCCESS; } + +extern int select_p_reconfigure(void) +{ + return SLURM_SUCCESS; +} + +extern int select_p_step_begin(struct step_record *step_ptr) +{ + return SLURM_SUCCESS; +} + +extern int select_p_step_fini(struct step_record *step_ptr) +{ + return SLURM_SUCCESS; +} diff --git a/src/plugins/select/bluegene/plugin/sfree.c b/src/plugins/select/bluegene/plugin/sfree.c index 98c41d2b0..0e49f57f1 100644 --- a/src/plugins/select/bluegene/plugin/sfree.c +++ b/src/plugins/select/bluegene/plugin/sfree.c @@ -1,12 +1,12 @@ /*****************************************************************************\ * sfree.c - free specified block or all blocks. - * $Id: sfree.c 12858 2007-12-19 20:15:32Z da $ + * $Id: sfree.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <da@llnl.gov> * - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/select/bluegene/plugin/sfree.h b/src/plugins/select/bluegene/plugin/sfree.h index 58dbfef0c..f8bad5a1c 100644 --- a/src/plugins/select/bluegene/plugin/sfree.h +++ b/src/plugins/select/bluegene/plugin/sfree.h @@ -4,7 +4,7 @@ * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <da@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/select/bluegene/plugin/slurm_epilog.c b/src/plugins/select/bluegene/plugin/slurm_epilog.c index 4e6115600..a281e5e73 100644 --- a/src/plugins/select/bluegene/plugin/slurm_epilog.c +++ b/src/plugins/select/bluegene/plugin/slurm_epilog.c @@ -3,12 +3,12 @@ * owned by this user. This is executed via SLURM to synchronize the * user's job execution with slurmctld configuration of partitions. * - * $Id: slurm_epilog.c 10574 2006-12-15 23:38:29Z jette $ + * $Id: slurm_epilog.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/select/bluegene/plugin/slurm_prolog.c b/src/plugins/select/bluegene/plugin/slurm_prolog.c index 992bd8852..dbe8a9332 100644 --- a/src/plugins/select/bluegene/plugin/slurm_prolog.c +++ b/src/plugins/select/bluegene/plugin/slurm_prolog.c @@ -2,13 +2,11 @@ * slurm_ prolog.c - Wait until the specified partition is ready and owned by * this user. This is executed via SLURM to synchronize the user's job * execution with slurmctld configuration of partitions. - * - * $Id: slurm_prolog.c 10574 2006-12-15 23:38:29Z jette $ ***************************************************************************** * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -61,15 +59,15 @@ /* * Check the bgblock's status every POLL_SLEEP seconds. - * Retry for a period of MIN_FREE_PERVIOUS_BLOCK_DELAY + MIN_DELAY + - * (INCR_DELAY * POLL_SLEEP * base partition count). - * For example if MIN_DELAY=300 and INCR_DELAY=20 and POLL_SLEEP=3, - * wait up to 1260 seconds. - * For a 16 base partition bgblock to be ready (300 + (20 * 3 * 16). + * Retry for a period of + * MIN_FREE_PERVIOUS_BLOCK_DELAY + MIN_DELAY + (INCR_DELAY * base partition count) + * For example if MIN_FREE_PERVIOUS_BLOCK_DELAY=300 and MIN_DELAY=600 and + * INCR_DELAY=20 and job_size=4 base partitions then wait up to 980 seconds + * 300 + 600 + (20 * 4) */ #define POLL_SLEEP 3 /* retry interval in seconds */ #define MIN_FREE_PERVIOUS_BLOCK_DELAY 300 /* time in seconds */ -#define MIN_DELAY 300 /* time in seconds */ +#define MIN_DELAY 600 /* time in seconds */ #define INCR_DELAY 20 /* time in seconds per BP */ int max_delay = MIN_DELAY + MIN_FREE_PERVIOUS_BLOCK_DELAY; @@ -115,7 +113,7 @@ static int _wait_part_ready(uint32_t job_id) { int is_ready = 0, i, rc; - max_delay = MIN_DELAY + MIN_FREE_PERVIOUS_BLOCK_DELAY + + max_delay = MIN_FREE_PERVIOUS_BLOCK_DELAY + MIN_DELAY + (INCR_DELAY * _get_job_size(job_id)); #if _DEBUG diff --git a/src/plugins/select/bluegene/plugin/state_test.c b/src/plugins/select/bluegene/plugin/state_test.c index bb8a497af..ac651efa0 100644 --- a/src/plugins/select/bluegene/plugin/state_test.c +++ b/src/plugins/select/bluegene/plugin/state_test.c @@ -2,7 +2,7 @@ * state_test.c - Test state of Bluegene base partitions and switches. * DRAIN nodes in SLURM that are not usable. * - * $Id: state_test.c 11400 2007-04-24 18:50:38Z da $ + * $Id: state_test.c 12928 2007-12-28 21:59:29Z da $ ***************************************************************************** * Copyright (C) 2004-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). @@ -309,6 +309,7 @@ extern bool node_already_down(char *node_name) { uint16_t base_state; struct node_record *node_ptr = find_node_record(node_name); + if (node_ptr) { base_state = node_ptr->node_state & (~NODE_STATE_NO_RESPOND); diff --git a/src/plugins/select/cons_res/Makefile.am b/src/plugins/select/cons_res/Makefile.am index cb6bd89e2..fc88fa6fe 100644 --- a/src/plugins/select/cons_res/Makefile.am +++ b/src/plugins/select/cons_res/Makefile.am @@ -12,4 +12,3 @@ pkglib_LTLIBRARIES = select_cons_res.la select_cons_res_la_SOURCES = select_cons_res.c select_cons_res.h \ dist_tasks.c dist_tasks.h select_cons_res_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) -select_cons_res_la_LIBADD = $(top_builddir)/src/common/libcommon.la diff --git a/src/plugins/select/cons_res/Makefile.in b/src/plugins/select/cons_res/Makefile.in index 29b889301..4e1ec9662 100644 --- a/src/plugins/select/cons_res/Makefile.in +++ b/src/plugins/select/cons_res/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -44,6 +44,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -73,14 +75,13 @@ am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; am__installdirs = "$(DESTDIR)$(pkglibdir)" pkglibLTLIBRARIES_INSTALL = $(INSTALL) LTLIBRARIES = $(pkglib_LTLIBRARIES) -select_cons_res_la_DEPENDENCIES = \ - $(top_builddir)/src/common/libcommon.la +select_cons_res_la_LIBADD = am_select_cons_res_la_OBJECTS = select_cons_res.lo dist_tasks.lo select_cons_res_la_OBJECTS = $(am_select_cons_res_la_OBJECTS) select_cons_res_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(select_cons_res_la_LDFLAGS) $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -120,6 +121,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -133,10 +135,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -156,7 +161,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -167,6 +175,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -182,6 +192,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -197,6 +208,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -263,7 +275,6 @@ select_cons_res_la_SOURCES = select_cons_res.c select_cons_res.h \ dist_tasks.c dist_tasks.h select_cons_res_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) -select_cons_res_la_LIBADD = $(top_builddir)/src/common/libcommon.la all: all-am .SUFFIXES: @@ -303,8 +314,8 @@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ else :; fi; \ done @@ -312,8 +323,8 @@ uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ done clean-pkglibLTLIBRARIES: @@ -368,8 +379,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -381,8 +392,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -392,13 +403,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/select/cons_res/dist_tasks.c b/src/plugins/select/cons_res/dist_tasks.c index 1f6e1f403..1a9dc8492 100644 --- a/src/plugins/select/cons_res/dist_tasks.c +++ b/src/plugins/select/cons_res/dist_tasks.c @@ -6,7 +6,7 @@ ***************************************************************************** * Copyright (C) 2006 Hewlett-Packard Development Company, L.P. * Written by Susanne M. Balle, <susanne.balle@hp.com> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -56,121 +56,24 @@ * CPUs/Logical processors resources we keep the initial set of * resources. * - * IN job_ptr - pointer to job being scheduled + * IN/OUT job_ptr - pointer to job being scheduled. The per-node + * job->alloc_cpus array is computed here. + * */ -int compute_c_b_task_dist(struct select_cr_job *job, - const select_type_plugin_info_t cr_type, - const uint16_t fast_schedule) +int compute_c_b_task_dist(struct select_cr_job *job) { int i, j, rc = SLURM_SUCCESS; - uint16_t avail_cpus = 0, cpus, sockets, cores, threads; bool over_subscribe = false; uint32_t taskid = 0, last_taskid, maxtasks = job->nprocs; for (j = 0; (taskid < maxtasks); j++) { /* cycle counter */ bool space_remaining = false; last_taskid = taskid; - for (i = 0; - ((i < job->nhosts) && (taskid < maxtasks)); i++) { - struct node_cr_record *this_node; - this_node = find_cr_node_record (job->host[i]); - if (this_node == NULL) { - error(" cons_res: could not find node %s", - job->host[i]); - return SLURM_ERROR; - } - - switch(cr_type) { - case CR_MEMORY: - if (fast_schedule) { - avail_cpus = this_node->node_ptr->config_ptr->cpus; - } else { - avail_cpus = this_node->node_ptr->cpus; - } - case CR_CPU: - case CR_CPU_MEMORY: - if (fast_schedule) { - avail_cpus = this_node->node_ptr->config_ptr->cpus; - } else { - avail_cpus = this_node->node_ptr->cpus; - } - avail_cpus -= this_node->alloc_lps; - avail_cpus = MIN(job->cpus[i], avail_cpus); - break; - case CR_SOCKET: - case CR_SOCKET_MEMORY: - { - uint16_t alloc_sockets = 0; - uint16_t alloc_lps = 0; - get_resources_this_node(&cpus, &sockets, - &cores, &threads, - this_node, &alloc_sockets, - &alloc_lps, &job->job_id); - - avail_cpus = slurm_get_avail_procs(job->max_sockets, - job->max_cores, - job->max_threads, - job->min_sockets, - job->min_cores, - job->cpus_per_task, - job->ntasks_per_node, - job->ntasks_per_socket, - job->ntasks_per_core, - &cpus, - &sockets, - &cores, - &threads, - alloc_sockets, - this_node->alloc_cores, - alloc_lps, - cr_type, - job->job_id, - this_node->node_ptr->name); - break; - } - case CR_CORE: - case CR_CORE_MEMORY: - { - uint16_t alloc_sockets = 0; - uint16_t alloc_lps = 0; - get_resources_this_node(&cpus, &sockets, - &cores, &threads, - this_node, &alloc_sockets, - &alloc_lps, &job->job_id); - - avail_cpus = slurm_get_avail_procs(job->max_sockets, - job->max_cores, - job->max_threads, - job->min_sockets, - job->min_cores, - job->cpus_per_task, - job->ntasks_per_node, - job->ntasks_per_socket, - job->ntasks_per_core, - &cpus, - &sockets, - &cores, - &threads, - alloc_sockets, - this_node->alloc_cores, - alloc_lps, - cr_type, - job->job_id, - this_node->node_ptr->name); - break; - } - default: - /* We should never get in here. If we - do it is a bug */ - error (" cr_type not recognized "); - return SLURM_ERROR; - break; - } - avail_cpus = MIN(avail_cpus, job->cpus[i]); - if ((j < avail_cpus) || over_subscribe) { + for (i = 0; ((i < job->nhosts) && (taskid < maxtasks)); i++) { + if ((j < job->cpus[i]) || over_subscribe) { taskid++; - job->alloc_lps[i]++; - if ((j + 1) < avail_cpus) + job->alloc_cpus[i]++; + if ((j + 1) < job->cpus[i]) space_remaining = true; } } @@ -178,250 +81,282 @@ int compute_c_b_task_dist(struct select_cr_job *job, over_subscribe = true; if (last_taskid == taskid) { /* avoid infinite loop */ - fatal("compute_c_b_task_dist failure"); + error("compute_c_b_task_dist failure"); + rc = SLURM_ERROR; + break; } } #if (CR_DEBUG) for (i = 0; i < job->nhosts; i++) { - info("cons_res _c_b_task_dist %u host %s nprocs %u maxtasks %u cpus %u alloc_lps %u", - job->job_id, job->host[i], job->nprocs, - maxtasks, job->cpus[i], job->alloc_lps[i]); + info("cons_res _c_b_task_dist %u host_index %d nprocs %u " + "maxtasks %u cpus %u alloc_cpus %u", + job->job_id, i, job->nprocs, + maxtasks, job->cpus[i], job->alloc_cpus[i]); } #endif return rc; } +/* scan all rows looking for the best fit, and return the offset */ +static int _find_offset(struct select_cr_job *job, const int job_index, + uint16_t cores, uint16_t sockets, uint32_t maxcores, + const select_type_plugin_info_t cr_type, + struct node_cr_record *this_cr_node) +{ + struct part_cr_record *p_ptr; + int i, j, index, offset, skip; + uint16_t acores, asockets, freecpus, last_freecpus = 0; + struct multi_core_data *mc_ptr; + + p_ptr = get_cr_part_ptr(this_cr_node, job->job_ptr->part_ptr); + if (p_ptr == NULL) + abort(); + mc_ptr = job->job_ptr->details->mc_ptr; + + index = -1; + for (i = 0; i < p_ptr->num_rows; i++) { + acores = 0; + asockets = 0; + skip = 0; + offset = i * this_cr_node->sockets; + for (j = 0; j < this_cr_node->sockets; j++) { + if ((cores - p_ptr->alloc_cores[offset+j]) < + mc_ptr->min_cores) { + /* count the number of unusable sockets */ + skip++; + acores += cores; + } else { + acores += p_ptr->alloc_cores[offset+j]; + } + if (p_ptr->alloc_cores[offset+j]) + asockets++; + } + /* make sure we have the required number of usable sockets */ + if (skip && ((sockets - skip) < mc_ptr->min_sockets)) + continue; + /* CR_SOCKET needs UNALLOCATED sockets */ + if ((cr_type == CR_SOCKET) || (cr_type == CR_SOCKET_MEMORY)) { + if (sockets - asockets < mc_ptr->min_sockets) + continue; + } + + freecpus = (cores * sockets) - acores; + if (freecpus < maxcores) + continue; + + if (index < 0) { + index = i; + last_freecpus = freecpus; + } + if (freecpus < last_freecpus) { + index = i; + last_freecpus = freecpus; + } + } + if (index < 0) { + /* This may happen if a node has fewer nodes than + * configured and FastSchedule=2 */ + error("job_assign_task: failure in computing offset"); + index = 0; + } + + return index * this_cr_node->sockets; +} + /* _job_assign_tasks: Assign tasks to hardware for block and cyclic * distributions */ -void _job_assign_tasks(struct select_cr_job *job, - struct node_cr_record *this_cr_node, - const uint16_t usable_threads, - const uint16_t usable_cores, - const uint16_t usable_sockets, - const int job_index, - const uint32_t maxtasks, - const select_type_plugin_info_t cr_type) +static int _job_assign_tasks(struct select_cr_job *job, + struct node_cr_record *this_cr_node, + const int job_index, + const select_type_plugin_info_t cr_type, + const int cyclic) { - int i, j; - uint16_t nsockets = this_cr_node->node_ptr->sockets; - uint16_t acores, avail_cores[nsockets]; - uint16_t asockets, avail_sockets[nsockets]; - uint32_t taskcount = 0, last_taskcount; - uint16_t ncores = 0, total = 0; - - debug3("job_assign_task %u s_ m %u u %u c_ u %u min %u" - " t_ u %u min %u task %u ", - job->job_id, job->min_sockets, usable_sockets, - job->min_cores, usable_cores, job->min_threads, - usable_threads, maxtasks); + int i, j, rc = SLURM_SUCCESS; + uint16_t cores, cpus, sockets, threads; + uint16_t usable_cores, usable_sockets, usable_threads; + uint16_t *avail_cores = NULL; + uint32_t corecount, last_corecount; + uint16_t asockets, offset, total; + uint32_t maxcores, reqcores, maxtasks = job->alloc_cpus[job_index]; + struct part_cr_record *p_ptr; + struct multi_core_data *mc_ptr; + + p_ptr = get_cr_part_ptr(this_cr_node, job->job_ptr->part_ptr); + if (p_ptr == NULL) + return SLURM_ERROR; - for (i=0; i < nsockets; i++) { - avail_cores[i] = 0; - avail_sockets[i] = 0; + if ((job->job_ptr == NULL) || (job->job_ptr->details == NULL)) { + /* This should never happen */ + error("cons_res: job %u has no details", job->job_id); + return SLURM_ERROR; + } + if (!job->job_ptr->details->mc_ptr) + job->job_ptr->details->mc_ptr = create_default_mc(); + mc_ptr = job->job_ptr->details->mc_ptr; + + /* get hardware info for this node */ + get_resources_this_node(&cpus, &sockets, &cores, &threads, + this_cr_node, job->job_id); + + /* compute any job limits */ + usable_sockets = MIN(mc_ptr->max_sockets, sockets); + usable_cores = MIN(mc_ptr->max_cores, cores); + usable_threads = MIN(mc_ptr->max_threads, threads); + + /* determine the number of required cores. When multiple threads + * are available, the maxtasks value may not reflect the requested + * core count, which is what we are seeking here. */ + if (job->job_ptr->details->overcommit) { + maxcores = 1; + reqcores = 1; + } else { + maxcores = maxtasks / usable_threads; + while ((maxcores * usable_threads) < maxtasks) + maxcores++; + reqcores = mc_ptr->min_cores * mc_ptr->min_sockets; + if (maxcores < reqcores) + maxcores = reqcores; } + offset = _find_offset(job, job_index, cores, sockets, maxcores, cr_type, + this_cr_node); + job->node_offset[job_index] = offset; + + debug3("job_assign_task %u s_ min %u u %u c_ min %u u %u" + " t_ min %u u %u task %u core %u offset %u", + job->job_id, mc_ptr->min_sockets, usable_sockets, + mc_ptr->min_cores, usable_cores, mc_ptr->min_threads, + usable_threads, maxtasks, maxcores, offset); + + avail_cores = xmalloc(sizeof(uint16_t) * sockets); + /* initialized to zero by xmalloc */ + total = 0; asockets = 0; - for (i=0; i<nsockets; i++) { - if ((total >= maxtasks) && (asockets >= job->min_sockets)) { + for (i = 0; i < sockets; i++) { + if ((total >= maxcores) && (asockets >= mc_ptr->min_sockets)) { break; } - if (this_cr_node->node_ptr->cores <= - this_cr_node->alloc_cores[i]) { + if (this_cr_node->cores <= p_ptr->alloc_cores[offset+i]) { continue; } - acores = this_cr_node->node_ptr->cores - - this_cr_node->alloc_cores[i]; - if (usable_cores <= acores) { - ncores = usable_cores; - } else if (job->min_cores <= acores) { - ncores = job->min_cores; - } else { - ncores = 0; + /* for CR_SOCKET, we only want to allocate empty sockets */ + if ((cr_type == CR_SOCKET || cr_type == CR_SOCKET_MEMORY) && + (p_ptr->alloc_cores[offset+i] > 0)) + continue; + avail_cores[i] = this_cr_node->cores - + p_ptr->alloc_cores[offset+i]; + if (usable_cores <= avail_cores[i]) { + avail_cores[i] = usable_cores; + } else if (mc_ptr->min_cores > avail_cores[i]) { + avail_cores[i] = 0; } - if (ncores > 0) { - avail_cores[i] = ncores; - avail_sockets[i] = 1; - total += ncores*usable_threads; + if (avail_cores[i] > 0) { + total += avail_cores[i]; asockets++; } } +#if(CR_DEBUG) + for (i = 0; i < sockets; i+=2) { + info("cons_res: assign_task: avail_cores[%d]=%u, [%d]=%u", i, + avail_cores[i], i+1, avail_cores[i+1]); + } +#endif if (asockets == 0) { /* Should never get here but just in case */ error("cons_res: %u Zero sockets satisfy" " request -B %u:%u: Using alternative strategy", - job->job_id, job->min_sockets, job->min_cores); - for (i=0; i < nsockets; i++) { - if (this_cr_node->node_ptr->cores <= - this_cr_node->alloc_cores[i]) + job->job_id, mc_ptr->min_sockets, mc_ptr->min_cores); + for (i = 0; i < sockets; i++) { + if (this_cr_node->cores <= p_ptr->alloc_cores[offset+i]) continue; - acores = this_cr_node->node_ptr->cores - - this_cr_node->alloc_cores[i]; - avail_cores[i] = acores; - avail_sockets[i] = 1; + avail_cores[i] = this_cr_node->cores - + p_ptr->alloc_cores[offset+i]; } } - if (asockets < job->min_sockets) { - error("cons_res: %u maxtasks %u Cannot satisfy" + if (asockets < mc_ptr->min_sockets) { + error("cons_res: %u maxcores %u Cannot satisfy" " request -B %u:%u: Using -B %u:%u", - job->job_id, maxtasks, job->min_sockets, - job->min_cores, asockets, job->min_cores); + job->job_id, maxcores, mc_ptr->min_sockets, + mc_ptr->min_cores, asockets, mc_ptr->min_cores); } - for (i=0; taskcount<maxtasks; i++) { - last_taskcount = taskcount; - for (j=0; ((j<nsockets) && (taskcount<maxtasks)); j++) { - asockets = avail_sockets[j]; - if (asockets == 0) - continue; + corecount = 0; + if (cyclic) { + /* distribute tasks cyclically across the sockets */ + for (i=1; corecount<maxcores; i++) { + last_corecount = corecount; + for (j=0; ((j<sockets) && (corecount<maxcores)); j++) { + if (avail_cores[j] == 0) + continue; + if (i<=avail_cores[j]) { + job->alloc_cores[job_index][j]++; + corecount++; + } + } + if (last_corecount == corecount) { + /* Avoid possible infinite loop on error */ + error("_job_assign_tasks failure"); + rc = SLURM_ERROR; + goto fini; + } + } + } else { + /* distribute tasks in blocks across the sockets */ + for (j=0; ((j<sockets) && (corecount<maxcores)); j++) { + last_corecount = corecount; if (avail_cores[j] == 0) continue; - if (i == 0) - job->alloc_sockets[job_index]++; - if (i<avail_cores[j]) + for (i = 0; (i < avail_cores[j]) && + (corecount<maxcores); i++) { job->alloc_cores[job_index][j]++; - taskcount++; - } - if (last_taskcount == taskcount) { - /* Avoid possible infinite loop on error */ - fatal("_job_assign_tasks failure"); + corecount++; + } + if (last_corecount == corecount) { + /* Avoid possible infinite loop on error */ + error("_job_assign_tasks failure"); + rc = SLURM_ERROR; + goto fini; + } } } + fini: xfree(avail_cores); + return rc; } -/* _job_assign_tasks: Assign tasks to hardware for block and cyclic - * distributions */ -void _job_assign_tasks_plane(struct select_cr_job *job, - struct node_cr_record *this_cr_node, - const uint16_t usable_threads, - const uint16_t usable_cores, - const uint16_t usable_sockets, - const int job_index, - const uint32_t maxtasks, - const uint16_t plane_size, - const select_type_plugin_info_t cr_type) +static uint16_t _get_cpu_offset(struct select_cr_job *job, int index, + struct node_cr_record *this_node) { - int s, l, m, i, j; - uint16_t nsockets = this_cr_node->node_ptr->sockets; - uint16_t avail_cores[nsockets]; - uint16_t avail_sockets[nsockets]; - uint32_t taskcount, last_taskcount; - uint16_t total, ncores, acores, isocket; - uint16_t core_index, thread_index, ucores; - uint16_t max_plane_size = 0; - int last_socket_index = -1; - - debug3("job_assign_task %u _plane_ s_ m %u u %u c_ u %u" - " min %u t_ u %u min %u task %u", - job->job_id, job->min_sockets, usable_sockets, - job->min_cores, usable_cores, job->min_threads, - usable_threads, maxtasks); - - for (i=0; i < nsockets; i++) { - avail_cores[i] = 0; - avail_sockets[i] = 0; - } - - total = 0; - isocket = 0; - for (i=0; i<nsockets; i++) { - if ((total >= maxtasks) && (isocket >= job->min_sockets)) { - break; - } - /* sockets with the required available core count */ - if (this_cr_node->node_ptr->cores <= - this_cr_node->alloc_cores[i]) { - continue; - } - acores = this_cr_node->node_ptr->cores - - this_cr_node->alloc_cores[i]; - if (plane_size <= acores) { - ncores = plane_size; - } else if (usable_cores <= acores) { - ncores = usable_cores; - } else if (job->min_cores <= acores) { - ncores = job->min_cores; - } else { - ncores = 0; - } - if (ncores > 0) { - avail_cores[i] = ncores; - avail_sockets[i] = 1; - total += ncores*usable_threads; - isocket++; - } - } - - if (isocket == 0) { - /* Should never get here but just in case */ - error("cons_res: %u Zero sockets satisfy request" - " -B %u:%u: Using alternative strategy", - job->job_id, job->min_sockets, job->min_cores); - for (i=0; i < nsockets; i++) { - if (this_cr_node->node_ptr->cores <= - this_cr_node->alloc_cores[i]) - continue; - acores = this_cr_node->node_ptr->cores - - this_cr_node->alloc_cores[i]; - avail_cores[i] = acores; - avail_sockets[i] = 1; - } - } - - if (isocket < job->min_sockets) - error("cons_res: %u maxtasks %d Cannot satisfy" - " request -B %u:%u: Using -B %u:%u", - job->job_id, maxtasks, job->min_sockets, - job->min_cores, isocket, job->min_cores); - - last_socket_index = -1; - taskcount = 0; - for (j=0; taskcount<maxtasks; j++) { - last_taskcount = taskcount; - for (s=0; ((s<nsockets) && (taskcount<maxtasks)); - s++) { - if (avail_sockets[s] == 0) - continue; - ucores = avail_cores[s]; - max_plane_size = - (plane_size > ucores) - ? plane_size : ucores; - for (m=0; ((m<max_plane_size) - && (taskcount<maxtasks)); - m++) { - core_index = m%ucores; - if(m > ucores) - continue; - for(l=0; ((l<usable_threads) - && (taskcount<maxtasks)); - l++) { - thread_index = - l%usable_threads; - if(thread_index > usable_threads) - continue; - if (last_socket_index != s) { - job->alloc_sockets [job_index]++; - last_socket_index = s; - } - if ((l == 0) && (m < ucores)) { - if (job->alloc_cores[job_index][s] - < this_cr_node->node_ptr->cores) { - job->alloc_cores[job_index][s]++; - } - } - taskcount++; - } + int i, set = 0; + uint16_t cpus, sockets, cores, threads, besto = 0, offset = 0; + struct part_cr_record *p_ptr; + + p_ptr = get_cr_part_ptr(this_node, job->job_ptr->part_ptr); + if ((p_ptr == NULL) || (p_ptr->num_rows < 2)) + return offset; + + get_resources_this_node(&cpus, &sockets, &cores, &threads, + this_node, job->job_id); + /* scan all rows looking for the best row for job->alloc_cpus[index] */ + for (i = 0; i < p_ptr->num_rows; i++) { + if ((cpus - p_ptr->alloc_cores[offset]) >= + job->alloc_cpus[index]) { + if (!set) { + set = 1; + besto = offset; + } + if (p_ptr->alloc_cores[offset] > + p_ptr->alloc_cores[besto]) { + besto = offset; } } - if (last_taskcount == taskcount) { - /* avoid possible infinite loop on error */ - fatal("job_assign_task failure"); - } + offset += this_node->sockets; } + return besto; } /* To effectively deal with heterogeneous nodes, we fake a cyclic @@ -445,178 +380,69 @@ void _job_assign_tasks_plane(struct select_cr_job *job, * In the consumable resources environment we need to determine the * layout schema within slurmctld. */ -int cr_dist(struct select_cr_job *job, int cyclic, - const select_type_plugin_info_t cr_type, - const uint16_t fast_schedule) +extern int cr_dist(struct select_cr_job *job, int cyclic, + const select_type_plugin_info_t cr_type) { -#if(CR_DEBUG) - int i; -#endif - int j, rc = SLURM_SUCCESS; + int i, cr_cpu = 0, rc = SLURM_SUCCESS; uint32_t taskcount = 0; - uint32_t maxtasks = job->nprocs; int host_index; - uint16_t usable_cpus = 0; - uint16_t usable_sockets = 0, usable_cores = 0, usable_threads = 0; - int last_socket_index = -1; - int last_core_index = -1; int job_index = -1; - int error_code = compute_c_b_task_dist(job, cr_type, fast_schedule); + int error_code = compute_c_b_task_dist(job); if (error_code != SLURM_SUCCESS) { error(" Error in compute_c_b_task_dist"); return error_code; } - if ((cr_type == CR_CPU) - || (cr_type == CR_MEMORY) - || (cr_type == CR_CPU_MEMORY)) - return SLURM_SUCCESS; + if ((cr_type == CR_CPU) || (cr_type == CR_MEMORY) || + (cr_type == CR_CPU_MEMORY)) + cr_cpu = 1; for (host_index = 0; ((host_index < node_record_count) && (taskcount < job->nprocs)); host_index++) { struct node_cr_record *this_cr_node; - uint16_t alloc_sockets = 0; - uint16_t alloc_lps = 0; - uint16_t avail_cpus = 0; + if (bit_test(job->node_bitmap, host_index) == 0) continue; job_index++; - this_cr_node = find_cr_node_record( - node_record_table_ptr[host_index].name); - if (this_cr_node == NULL) { - error(" cons_res: could not find node %s", - node_record_table_ptr[host_index].name); + if (select_node_ptr == NULL) { + error("cons_res: select_node_ptr is NULL"); return SLURM_ERROR; } - - get_resources_this_node(&usable_cpus, &usable_sockets, - &usable_cores, &usable_threads, - this_cr_node, &alloc_sockets, - &alloc_lps, &job->job_id); + this_cr_node = &select_node_ptr[host_index]; - avail_cpus = slurm_get_avail_procs(job->max_sockets, - job->max_cores, - job->max_threads, - job->min_sockets, - job->min_cores, - job->cpus_per_task, - job->ntasks_per_node, - job->ntasks_per_socket, - job->ntasks_per_core, - &usable_cpus, - &usable_sockets, - &usable_cores, - &usable_threads, - alloc_sockets, - this_cr_node->alloc_cores, - alloc_lps, - cr_type, - job->job_id, - this_cr_node->node_ptr->name); - avail_cpus = MIN(avail_cpus, job->cpus[job_index]); - -#if(CR_DEBUG) - info("cons_res: _cr_dist %u avail_s %u _c %u _t %u" - " alloc_s %d lps %u", - job->job_id, usable_sockets, usable_cores, - usable_threads, - alloc_sockets, alloc_lps); - if ((cr_type == CR_CORE) || (cr_type == CR_CORE_MEMORY)) - for(i=0; i<usable_sockets;i++) - info("cons_res: _cr_dist alloc_cores %d = %u", - i, this_cr_node->alloc_cores[i]); -#endif - - if (avail_cpus == 0) { - error(" cons_res: %d no available cpus on node %s " - " s %u c %u t %u", - job->job_id, node_record_table_ptr[host_index].name, - usable_sockets, usable_cores, usable_threads); + if (job->cpus[job_index] == 0) { + error("cons_res: %d no available cpus on node %s ", + job->job_id, + node_record_table_ptr[host_index].name); + continue; } - - maxtasks = job->alloc_lps[job_index]; - job->alloc_sockets[job_index] = 0; - if ((cr_type == CR_CORE) || (cr_type == CR_CORE_MEMORY)) { - for (j = 0; - j < job->num_sockets[job_index]; - j++) - job->alloc_cores[job_index][j] = 0; - } - - if ((cr_type == CR_CORE) || (cr_type == CR_CORE_MEMORY)) { - _job_assign_tasks(job, this_cr_node, - usable_threads, usable_cores, - usable_sockets, job_index, - maxtasks, cr_type); - } else if (cyclic == 0) { /* block lllp distribution */ - /* CR _SOCKET or CR_SOCKET_MEMORY */ - int s, c, t; - last_socket_index = -1; - taskcount = 0; - for (s=0; - s < usable_sockets; - s++) { - last_core_index = -1; - if (maxtasks <= taskcount) - continue; - for (c=0; - c < usable_cores; - c++) { - if (maxtasks <= taskcount) - continue; - for (t=0; - t < usable_threads; t++) { - if (maxtasks <= taskcount) - continue; - if (last_socket_index != s) { - job->alloc_sockets[job_index]++; - last_socket_index = s; - } - taskcount++; - } - } - } - } else if (cyclic == 1) { /* cyclic lllp distribution */ - /* CR_SOCKET or CR_SOCKET_MEMORY */ - int s, c, t; - int last_socket_index = 0; - taskcount = 0; - for (t=0; - t < usable_threads; t++) { - if (maxtasks <= taskcount) - continue; - for (c=0; - c < usable_cores; c++) { - if (maxtasks <= taskcount) - continue; - for (s=0; - s < usable_sockets; s++) { - if (maxtasks <= taskcount) - continue; - if (last_socket_index == 0) { - job->alloc_sockets[job_index]++; - if(s == (usable_sockets-1)) - last_socket_index = 1; - } - taskcount++; - } - } - } + + if (cr_cpu) { + /* compute the offset */ + job->node_offset[job_index] = + _get_cpu_offset(job, job_index, this_cr_node); + } else { + for (i = 0; i < job->num_sockets[job_index]; i++) + job->alloc_cores[job_index][i] = 0; + + if (_job_assign_tasks(job, this_cr_node, job_index, + cr_type, cyclic) != SLURM_SUCCESS) + return SLURM_ERROR; } - #if(CR_DEBUG) - info("cons_res _cr_dist %u host %d %s alloc_ " - "sockets %u lps %u", - job->job_id, host_index, this_cr_node->node_ptr->name, - job->alloc_sockets[job_index], job->alloc_lps[job_index]); - if ((cr_type == CR_CORE) || (cr_type == CR_CORE_MEMORY)) - for(i=0; i<usable_sockets;i++) - info("cons_res: _cr_dist: %u alloc_cores[%d][%d] = %u", - job->job_id, i, job_index, - job->alloc_cores[job_index][i]); + info("cons_res _cr_dist %u host %d %s alloc_cpus %u", + job->job_id, host_index, this_cr_node->node_ptr->name, + job->alloc_cpus[job_index]); + for(i=0; !cr_cpu && i<job->num_sockets[job_index];i+=2) { + info("cons_res: _cr_dist: %u " + "alloc_cores[%d][%d]=%u, [%d][%d]=%u", + job->job_id, + job_index, i, job->alloc_cores[job_index][i], + job_index, i+1, job->alloc_cores[job_index][i+1]); + } #endif } return rc; @@ -627,38 +453,59 @@ int cr_dist(struct select_cr_job *job, int cyclic, * case we do not need to compute the number of tasks on each nodes * since it should be set to the number of cpus. */ -int cr_exclusive_dist(struct select_cr_job *job, - const select_type_plugin_info_t cr_type) +extern int cr_exclusive_dist(struct select_cr_job *job, + const select_type_plugin_info_t cr_type) { int i, j; - int host_index = 0; + int host_index = 0, get_cores = 0; - for (i = 0; i < node_record_count; i++) { - if (bit_test(job->node_bitmap, i) == 0) - continue; - job->alloc_lps[host_index] = node_record_table_ptr[i].cpus; - job->alloc_sockets[host_index] = - node_record_table_ptr[i].sockets; - if ((cr_type == CR_CORE) || (cr_type == CR_CORE_MEMORY)) { - for (j = 0; j < node_record_table_ptr[i].sockets; j++) - job->alloc_cores[host_index][j] = - node_record_table_ptr[i].cores; + if ((cr_type == CR_CORE) || (cr_type == CR_CORE_MEMORY) || + (cr_type == CR_SOCKET) || (cr_type == CR_SOCKET_MEMORY)) + get_cores = 1; + + if (select_fast_schedule) { + struct config_record *config_ptr; + for (i = 0; i < node_record_count; i++) { + if (bit_test(job->node_bitmap, i) == 0) + continue; + config_ptr = node_record_table_ptr[i].config_ptr; + job->alloc_cpus[host_index] = config_ptr->cpus; + if (get_cores) { + for (j=0; j<config_ptr->sockets; + j++) { + job->alloc_cores[host_index][j] = + config_ptr->cores; + } + } + host_index++; + } + } else { + for (i = 0; i < node_record_count; i++) { + if (bit_test(job->node_bitmap, i) == 0) + continue; + job->alloc_cpus[host_index] = node_record_table_ptr[i]. + cpus; + if (get_cores) { + for (j=0; j<node_record_table_ptr[i].sockets; + j++) { + job->alloc_cores[host_index][j] = + node_record_table_ptr[i].cores; + } + } + host_index++; } - host_index++; } return SLURM_SUCCESS; } -int cr_plane_dist(struct select_cr_job *job, - const uint16_t plane_size, - const select_type_plugin_info_t cr_type) +extern int cr_plane_dist(struct select_cr_job *job, + const uint16_t plane_size, + const select_type_plugin_info_t cr_type) { - uint32_t maxtasks = job->nprocs; - uint16_t num_hosts = job->nhosts; - int i, j, k, s, m, l, host_index; - uint16_t usable_cpus, usable_sockets, usable_cores, usable_threads; + uint32_t maxtasks = job->nprocs; + uint32_t num_hosts = job->nhosts; + int i, j, k, host_index, cr_cpu = 0; uint32_t taskcount = 0, last_taskcount; - int last_socket_index = -1; int job_index = -1; bool count_done = false; @@ -683,149 +530,71 @@ int cr_plane_dist(struct select_cr_job *job, break; } taskcount++; - job->alloc_lps[i]++; + job->alloc_cpus[i]++; } } if (last_taskcount == taskcount) { /* avoid possible infinite loop on error */ - fatal("cr_plane_dist failure"); + error("cr_plane_dist failure"); + return SLURM_ERROR; } } #if(CR_DEBUG) for (i = 0; i < job->nhosts; i++) { - info("cons_res _cr_plane_dist %u host %s alloc_lps %u ", - job->job_id, job->host[i], job->alloc_lps[i]); + info("cons_res _cr_plane_dist %u host_index %d alloc_cpus %u ", + job->job_id, i, job->alloc_cpus[i]); } #endif + if ((cr_type == CR_CPU) || (cr_type == CR_MEMORY) || + (cr_type == CR_CPU_MEMORY)) + cr_cpu = 1; + taskcount = 0; for (host_index = 0; ((host_index < node_record_count) && (taskcount < job->nprocs)); host_index++) { struct node_cr_record *this_cr_node = NULL; - uint16_t alloc_sockets = 0; - uint16_t alloc_lps = 0; - uint16_t avail_cpus = 0; + if (bit_test(job->node_bitmap, host_index) == 0) continue; job_index++; - - this_cr_node = find_cr_node_record( - node_record_table_ptr[host_index].name); - if (this_cr_node == NULL) { - error("cons_res: could not find node %s", - node_record_table_ptr[host_index].name); + + if (select_node_ptr == NULL) { + error("cons_res: select_node_ptr is NULL"); return SLURM_ERROR; } + this_cr_node = &select_node_ptr[host_index]; - get_resources_this_node(&usable_cpus, &usable_sockets, - &usable_cores, &usable_threads, - this_cr_node, &alloc_sockets, - &alloc_lps, &job->job_id); - - avail_cpus = slurm_get_avail_procs(job->max_sockets, - job->max_cores, - job->max_threads, - job->min_sockets, - job->min_cores, - job->cpus_per_task, - job->ntasks_per_node, - job->ntasks_per_socket, - job->ntasks_per_core, - &usable_cpus, - &usable_sockets, - &usable_cores, - &usable_threads, - alloc_sockets, - this_cr_node->alloc_cores, - alloc_lps, - cr_type, - job->job_id, - this_cr_node->node_ptr->name); - avail_cpus = MIN(avail_cpus, job->cpus[job_index]); - if (avail_cpus == 0) { - error(" cons_res: no available cpus on node %s", + if (job->cpus[job_index] == 0) { + error("cons_res: no available cpus on node %s", node_record_table_ptr[host_index].name); + continue; } - job->alloc_sockets[job_index] = 0; - if ((cr_type == CR_CORE) || (cr_type == CR_CORE_MEMORY)) { - for (j = 0; - j < job->num_sockets[job_index]; - j++) - job->alloc_cores[job_index][j] = 0; - } - maxtasks = job->alloc_lps[job_index]; - - if ((cr_type == CR_CORE) || (cr_type == CR_CORE_MEMORY)) { - _job_assign_tasks_plane(job, this_cr_node, - usable_threads, - usable_cores, usable_sockets, - job_index, maxtasks, - plane_size, cr_type); + if (cr_cpu) { + /* compute the offset */ + job->node_offset[job_index] = + _get_cpu_offset(job, job_index, this_cr_node); } else { - /* CR _SOCKET or CR_SOCKET_MEMORY */ - int core_index; - int thread_index; - int max_plane_size; - last_socket_index = -1; - taskcount = 0; - for (j=0; taskcount<maxtasks; j++) { - last_taskcount = taskcount; - for (s=0; ((s<usable_sockets) && (taskcount<maxtasks)); - s++) { - max_plane_size = - (plane_size > usable_cores) - ? plane_size : usable_cores; - for (m=0; ((m<max_plane_size) && - (taskcount<maxtasks)); m++) { - core_index = m % usable_cores; - if(m > usable_cores) - continue; - for(l=0; ((l<usable_threads) && - (taskcount<maxtasks)); l++) { - thread_index = - l % usable_threads; - if(thread_index > usable_threads) - continue; - if (last_socket_index != s) { - job->alloc_sockets[job_index]++; - last_socket_index = s; - } - } - } - taskcount++; - } - if (last_taskcount == taskcount) { - /* avoid possible infinite loop on error */ - fatal("cr_plane_dist failure"); - } - } - } + for (j = 0; j < job->num_sockets[job_index]; j++) + job->alloc_cores[job_index][j] = 0; -#if(0) - if ((cr_type == CR_CORE) || (cr_type == CR_CORE_MEMORY)) { - job->alloc_lps[job_index] = 0; - for (i = 0; i < job->alloc_sockets[job_index]; i++) - job->alloc_lps[job_index] += - job->alloc_cores[job_index][i]; + if (_job_assign_tasks(job, this_cr_node, job_index, + cr_type, 0) != SLURM_SUCCESS) + return SLURM_ERROR; } -#endif - #if(CR_DEBUG) - info("cons_res _cr_plane_dist %u host %d %s alloc_ " - "s %u lps %u", - job->job_id, host_index, this_cr_node->node_ptr->name, - job->alloc_sockets[job_index], job->alloc_lps[job_index]); - int i = 0; - if ((cr_type == CR_CORE) || (cr_type == CR_CORE_MEMORY)) { - for (i = 0; i < this_cr_node->node_ptr->sockets; i++) - info("cons_res _cr_plane_dist %u host %d " - "%s alloc_cores %u", - job->job_id, host_index, - this_cr_node->node_ptr->name, - job->alloc_cores[job_index][i]); + info("cons_res _cr_plane_dist %u host %d %s alloc_cpus %u", + job->job_id, host_index, this_cr_node->node_ptr->name, + job->alloc_cpus[job_index]); + + for (i = 0; !cr_cpu && i < this_cr_node->sockets; i++) { + info("cons_res _cr_plane_dist %u host %d %s alloc_cores %u", + job->job_id, host_index, + this_cr_node->node_ptr->name, + job->alloc_cores[job_index][i]); } #endif diff --git a/src/plugins/select/cons_res/dist_tasks.h b/src/plugins/select/cons_res/dist_tasks.h index f500441b7..aea7e3f19 100644 --- a/src/plugins/select/cons_res/dist_tasks.h +++ b/src/plugins/select/cons_res/dist_tasks.h @@ -5,7 +5,7 @@ ***************************************************************************** * Copyright (C) 2006 Hewlett-Packard Development Company, L.P. * Written by Susanne M. Balle, <susanne.balle@hp.com> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -53,15 +53,12 @@ int cr_exclusive_dist(struct select_cr_job *job, const select_type_plugin_info_t cr_type); int cr_dist(struct select_cr_job *job, int cyclic, - const select_type_plugin_info_t cr_type, - const uint16_t fast_schedule); + const select_type_plugin_info_t cr_type); int cr_plane_dist(struct select_cr_job *job, const uint16_t plane_size, const select_type_plugin_info_t cr_type); -int compute_c_b_task_dist(struct select_cr_job *job, - const select_type_plugin_info_t cr_type, - const uint16_t fast_schedule); +int compute_c_b_task_dist(struct select_cr_job *job); #endif /* !_CONS_RES_DIST_TASKS_H */ diff --git a/src/plugins/select/cons_res/select_cons_res.c b/src/plugins/select/cons_res/select_cons_res.c index a246aefac..acca6dc9e 100644 --- a/src/plugins/select/cons_res/select_cons_res.c +++ b/src/plugins/select/cons_res/select_cons_res.c @@ -2,7 +2,7 @@ * select_cons_res.c - node selection plugin supporting consumable * resources policies. * - * $Id: select_cons_res.c 13373 2008-02-27 16:47:13Z jette $ + * $Id: select_cons_res.c 13814 2008-04-07 15:46:55Z jette $ *****************************************************************************\ * * The following example below illustrates how four jobs are allocated @@ -32,27 +32,27 @@ * * [<snip>]# squeue * JOBID PARTITION NAME USER ST TIME NODES NODELIST(REASON) - * 5 lsf sleep root PD 0:00 1 (Resources) - * 2 lsf sleep root R 0:13 4 linux[01-04] - * 3 lsf sleep root R 0:09 3 linux[01-03] - * 4 lsf sleep root R 0:05 1 linux04 + * 5 lsf sleep root PD 0:00 1 (Resources) + * 2 lsf sleep root R 0:13 4 linux[01-04] + * 3 lsf sleep root R 0:09 3 linux[01-03] + * 4 lsf sleep root R 0:05 1 linux04 * [<snip>]# * * Once Job 2 finishes, Job 5, which was pending, is allocated * available resources and is then running as illustrated below: * * [<snip>]# squeue4 - * JOBID PARTITION NAME USER ST TIME NODES NODELIST(REASON) - * 3 lsf sleep root R 1:58 3 linux[01-03] - * 4 lsf sleep root R 1:54 1 linux04 - * 5 lsf sleep root R 0:02 3 linux[01-03] + * JOBID PARTITION NAME USER ST TIME NODES NODELIST(REASON) + * 3 lsf sleep root R 1:58 3 linux[01-03] + * 4 lsf sleep root R 1:54 1 linux04 + * 5 lsf sleep root R 0:02 3 linux[01-03] * [<snip>]# * * Job 3, Job 4, and Job 5 are now running concurrently on the cluster. * * [<snip>]# squeue4 * JOBID PARTITION NAME USER ST TIME NODES NODELIST(REASON) - * 5 lsf sleep root R 1:52 3 xc14n[13-15] + * 5 lsf sleep root R 1:52 3 xc14n[13-15] * [<snip>]# * * The advantage of the consumable resource scheduling policy is that @@ -137,333 +137,487 @@ * as 100 or 1000. Various SLURM versions will likely require a certain * minimum versions for their plugins as the node selection API matures. */ -const char plugin_name[] = - "Consumable Resources (CR) Node Selection plugin"; +const char plugin_name[] = "Consumable Resources (CR) Node Selection plugin"; const char plugin_type[] = "select/cons_res"; const uint32_t plugin_version = 90; -const uint32_t pstate_version = 3; /* version control on saved state */ +const uint32_t pstate_version = 6; /* version control on saved state */ -#define CR_JOB_STATE_SUSPENDED 1 +#define CR_JOB_ALLOCATED_CPUS 0x1 +#define CR_JOB_ALLOCATED_MEM 0x2 select_type_plugin_info_t cr_type = CR_CPU; /* cr_type is overwritten in init() */ /* Array of node_cr_record. One entry for each node in the cluster */ -static struct node_cr_record *select_node_ptr = NULL; +struct node_cr_record *select_node_ptr = NULL; +uint16_t select_fast_schedule; static int select_node_cnt = 0; -static struct node_cr_record **cr_node_hash_table = NULL; static time_t last_cr_update_time; static pthread_mutex_t cr_mutex = PTHREAD_MUTEX_INITIALIZER; -/* Restored node_cr_records - used by select_p_state_restore/node_init */ -static struct node_cr_record *prev_select_node_ptr = NULL; -static int prev_select_node_cnt = 0; +List select_cr_job_list = NULL; /* List of select_cr_job(s) that are still active */ +static uint32_t last_verified_job_id = 0; +/* verify the job list after every CR_VERIFY_JOB_CYCLE jobs have finished */ +#define CR_VERIFY_JOB_CYCLE 2000 + +static void _cr_job_list_del(void *x); +static int _cr_job_list_sort(void *x, void *y); +static struct node_cr_record *_dup_node_cr(struct node_cr_record *node_cr_ptr); +static int _job_test(struct job_record *job_ptr, bitstr_t *bitmap, + uint32_t min_nodes, uint32_t max_nodes, + uint32_t req_nodes, int mode, + enum node_cr_state job_node_req, + struct node_cr_record *select_node_ptr); +static int _will_run_test(struct job_record *job_ptr, bitstr_t *bitmap, + uint32_t min_nodes, uint32_t max_nodes, + uint32_t req_nodes, enum node_cr_state job_node_req); + +#ifdef CR_DEBUG +static void _dump_state(struct node_cr_record *select_node_ptr) +{ + int i, j, cores; + struct part_cr_record *parts; + ListIterator job_iterator; + struct select_cr_job *job; -static uint16_t select_fast_schedule; + for (i=0; i<select_node_cnt; i++) { + info("node:%s sockets:%u alloc_memory:%u state:%d", + select_node_ptr[i].node_ptr->name, + select_node_ptr[i].sockets, + select_node_ptr[i].alloc_memory, + select_node_ptr[i].node_state); + parts = select_node_ptr[i].parts; + while (parts) { + info(" part:%s rows:%u", + parts->part_ptr->name, + parts->num_rows); + cores = select_node_ptr[i].sockets * + parts->num_rows; + for (j=0; j<cores; j++) { + info(" alloc_cores[%d]:%u", + j, parts->alloc_cores[j]); + } + parts = parts->next; + } + } -List select_cr_job_list = NULL; /* List of select_cr_job(s) that are still active */ + if (select_cr_job_list == NULL) + return; + job_iterator = list_iterator_create(select_cr_job_list); + while ((job = (struct select_cr_job *) list_next(job_iterator))) { + info("job:%u nprocs:%u nhosts:%u", + job->job_id, job->nprocs, job->nhosts); + if (job->job_ptr == NULL) + error(" job_ptr is NULL"); + else if (job->job_ptr->job_id != job->job_id) + error(" job_ptr is bad"); + for (i=0; ((i<job->nhosts)&&(i<2)); i++) { + info(" cpus:%u alloc_cpus:%u ", + job->cpus[i], job->alloc_cpus[i]); + info(" node_offset:%u", + job->node_offset[i]); + } + } + list_iterator_destroy(job_iterator); + return; +} +#endif -#if(0) -/* - * _cr_dump_hash - print the cr_node_hash_table contents, used for debugging - * or analysis of hash technique. See _hash_table in slurmctld/node_mgr.c - * global: select_node_ptr - table of node_cr_record - * cr_node_hash_table - table of hash indices - * Inspired from _dump_hash() in slurmctld/node_mgr.c - */ -static void _cr_dump_hash (void) +/* Create a duplicate node_cr_records structure */ +static struct node_cr_record *_dup_node_cr(struct node_cr_record *node_cr_ptr) { - int i, inx; - struct node_cr_record *this_node_ptr; + int i, j; + struct node_cr_record *new_node_cr_ptr; + struct part_cr_record *part_cr_ptr, *new_part_cr_ptr; - if (cr_node_hash_table == NULL) - return; - for (i = 0; i < select_node_cnt; i++) { - this_node_ptr = cr_node_hash_table[i]; - while (this_node_ptr) { - inx = this_node_ptr - select_node_ptr; - verbose("node_hash[%d]:%d", i, inx); - this_node_ptr = this_node_ptr->node_next; + if (node_cr_ptr == NULL) + return NULL; + + new_node_cr_ptr = xmalloc(select_node_cnt * + sizeof(struct node_cr_record)); + + for (i=0; i<select_node_cnt; i++) { + new_node_cr_ptr[i].node_ptr = select_node_ptr[i].node_ptr; + new_node_cr_ptr[i].cpus = select_node_ptr[i].cpus; + new_node_cr_ptr[i].sockets = select_node_ptr[i].sockets; + new_node_cr_ptr[i].cores = select_node_ptr[i].cores; + new_node_cr_ptr[i].threads = select_node_ptr[i].threads; + new_node_cr_ptr[i].real_memory = select_node_ptr[i].real_memory; + new_node_cr_ptr[i].alloc_memory = select_node_ptr[i].alloc_memory; + new_node_cr_ptr[i].node_state = select_node_ptr[i].node_state; + + part_cr_ptr = select_node_ptr[i].parts; + while (part_cr_ptr) { + new_part_cr_ptr = xmalloc(sizeof(struct part_cr_record)); + new_part_cr_ptr->part_ptr = part_cr_ptr->part_ptr; + new_part_cr_ptr->num_rows = part_cr_ptr->num_rows; + j = sizeof(uint16_t) * part_cr_ptr->num_rows * + select_node_ptr[i].sockets; + new_part_cr_ptr->alloc_cores = xmalloc(j); + memcpy(new_part_cr_ptr->alloc_cores, + part_cr_ptr->alloc_cores, j); + new_part_cr_ptr->next = new_node_cr_ptr[i].parts; + new_node_cr_ptr[i].parts = new_part_cr_ptr; + part_cr_ptr = part_cr_ptr->next; } } + return new_node_cr_ptr; } -#endif - -/* - * _cr_hash_index - return a hash table index for the given node name - * IN name = the node's name - * RET the hash table index - * Inspired from _hash_index(char *name) in slurmctld/node_mgr.c - */ -static int _cr_hash_index (const char *name) +static void _destroy_node_part_array(struct node_cr_record *this_cr_node) { - int index = 0; - int j; + struct part_cr_record *p_ptr; - if ((select_node_cnt == 0) - || (name == NULL)) - return 0; /* degenerate case */ + if (!this_cr_node) + return; + for (p_ptr = this_cr_node->parts; p_ptr; p_ptr = p_ptr->next) + xfree(p_ptr->alloc_cores); + xfree(this_cr_node->parts); +} - /* Multiply each character by its numerical position in the - * name string to add a bit of entropy, because host names such - * as cluster[0001-1000] can cause excessive index collisions. - */ - for (j = 1; *name; name++, j++) - index += (int)*name * j; - index %= select_node_cnt; - - return index; +static void _cr_job_list_del(void *x) +{ + xfree(x); +} +static int _cr_job_list_sort(void *x, void *y) +{ + struct job_record **job1_pptr = (struct job_record **) x; + struct job_record **job2_pptr = (struct job_record **) y; + return (int) difftime(job1_pptr[0]->end_time, job2_pptr[0]->end_time); } -/* - * _build_cr_node_hash_table - build a hash table of the node_cr_record entries. - * global: select_node_ptr - table of node_cr_record - * cr_node_hash_table - table of hash indices - * NOTE: manages memory for cr_node_hash_table - * Inspired from rehash_nodes() in slurmctld/node_mgr.c - */ -void _build_cr_node_hash_table (void) +static void _create_node_part_array(struct node_cr_record *this_cr_node) { - int i, inx; + struct node_record *node_ptr; + struct part_cr_record *p_ptr; + int i; - xfree (cr_node_hash_table); - cr_node_hash_table = xmalloc (sizeof (struct node_cr_record *) * - select_node_cnt); + if (!this_cr_node) + return; + node_ptr = this_cr_node->node_ptr; - for (i = 0; i < select_node_cnt; i++) { - if (strlen (select_node_ptr[i].node_ptr->name) == 0) - continue; /* vestigial record */ - inx = _cr_hash_index (select_node_ptr[i].node_ptr->name); - select_node_ptr[i].node_next = cr_node_hash_table[inx]; - cr_node_hash_table[inx] = &select_node_ptr[i]; - } + if (this_cr_node->parts) + _destroy_node_part_array(this_cr_node); -#if(0) - _cr_dump_hash(); + if (node_ptr->part_cnt < 1) + return; + this_cr_node->parts = xmalloc(sizeof(struct part_cr_record) * + node_ptr->part_cnt); + for (i = 0; i < node_ptr->part_cnt; i++) { + p_ptr = &(this_cr_node->parts[i]); + p_ptr->part_ptr = node_ptr->part_pptr[i]; + p_ptr->num_rows = node_ptr->part_pptr[i]->max_share; + if (p_ptr->num_rows & SHARED_FORCE) + p_ptr->num_rows &= (~SHARED_FORCE); + /* SHARED=EXCLUSIVE sets max_share = 0 */ + if (p_ptr->num_rows < 1) + p_ptr->num_rows = 1; +#if (CR_DEBUG) + info("cons_res: _create_node_part_array: part %s num_rows %d", + p_ptr->part_ptr->name, p_ptr->num_rows); #endif - return; + p_ptr->alloc_cores = xmalloc(sizeof(uint16_t) * + this_cr_node->sockets * + p_ptr->num_rows); + if (i+1 < node_ptr->part_cnt) + p_ptr->next = &(this_cr_node->parts[i+1]); + else + p_ptr->next = NULL; + } + } -/* - * find_cr_node_record - find a record for node with specified name - * input: name - name of the desired node - * output: return pointer to node record or NULL if not found - * global: select_node_ptr - pointer to global select_node_ptr - * cr_node_hash_table - table of hash indecies - * Inspired from find_node_record (char *name) in slurmctld/node_mgr.c - */ -struct node_cr_record * find_cr_node_record (const char *name) +static int _find_job_by_id(void *x, void *key) { - int i; + struct select_cr_job *cr_job_ptr = (struct select_cr_job *) x; + uint32_t *job_id = (uint32_t *) key; - if ((name == NULL) - || (name[0] == '\0')) { - info("find_cr_node_record passed NULL name"); - return NULL; - } + if (cr_job_ptr->job_id == *job_id) + return 1; + return 0; +} - /* try to find via hash table, if it exists */ - if (cr_node_hash_table) { - struct node_cr_record *this_node; +/* Find a partition record based upon pointer to slurmctld record */ +extern struct part_cr_record *get_cr_part_ptr(struct node_cr_record *this_node, + struct part_record *part_ptr) +{ + struct part_cr_record *p_ptr; - i = _cr_hash_index (name); - this_node = cr_node_hash_table[i]; - while (this_node) { - xassert(this_node->node_ptr->magic == NODE_MAGIC); - if (strncmp(this_node->node_ptr->name, name, MAX_SLURM_NAME) == 0) { - return this_node; - } - this_node = this_node->node_next; - } - error ("find_cr_node_record: lookup failure using hashtable for %s", - name); - } + if (part_ptr == NULL) + return NULL; - /* revert to sequential search */ - else { - for (i = 0; i < select_node_cnt; i++) { - if (strcmp (name, select_node_ptr[i].node_ptr->name) == 0) { - debug3("cons_res find_cr_node_record: linear %s", name); - return (&select_node_ptr[i]); - } - } - error ("find_cr_node_record: lookup failure with linear search for %s", - name); + if (!this_node->parts) + _create_node_part_array(this_node); + + for (p_ptr = this_node->parts; p_ptr; p_ptr = p_ptr->next) { + if (p_ptr->part_ptr == part_ptr) + return p_ptr; } - error ("find_cr_node_record: lookup failure with both method %s", name); - return (struct node_cr_record *) NULL; + error("cons_res: could not find partition %s", part_ptr->name); + + return NULL; } -void chk_resize_node(struct node_cr_record *node, uint16_t sockets) +/* This just resizes alloc_cores based on a potential change to + * the number of sockets on this node (if fast_schedule = 0 and the + * node checks in with a different node count after initialization). + * Any changes to the number of partition rows will be caught + * and adjusted in select_p_reconfigure() */ +static void _chk_resize_node(struct node_cr_record *node) { - if ((node->alloc_cores == NULL) || - (sockets > node->num_sockets)) { - debug3("cons_res: increasing node %s num_sockets from %u to %u", - node->node_ptr->name, node->num_sockets, sockets); - xrealloc(node->alloc_cores, sockets * sizeof(uint16_t)); + struct part_cr_record *p_ptr; + + if ((select_fast_schedule > 0) || + (node->cpus >= node->node_ptr->cpus)) + return; + + verbose("cons_res: increasing node %s cpus from %u to %u", + node->node_ptr->name, node->cpus, node->node_ptr->cpus); + node->cpus = node->node_ptr->cpus; + node->sockets = node->node_ptr->sockets; + node->cores = node->node_ptr->cores; + node->threads = node->node_ptr->threads; + node->real_memory = node->node_ptr->real_memory; + for (p_ptr = node->parts; p_ptr; p_ptr = p_ptr->next) { + xrealloc(p_ptr->alloc_cores, (sizeof(uint16_t) * + node->sockets * p_ptr->num_rows)); /* NOTE: xrealloc zero fills added memory */ - node->num_sockets = sockets; } } -void chk_resize_job(struct select_cr_job *job, uint16_t node_id, uint16_t sockets) +static void _chk_resize_job(struct select_cr_job *job, uint16_t node_id, + uint16_t sockets) { if ((job->alloc_cores[node_id] == NULL) || (sockets > job->num_sockets[node_id])) { - debug3("cons_res: increasing job %u node %u num_sockets from %u to %u", - job->job_id, node_id, job->num_sockets[node_id], sockets); + debug3("cons_res: increasing job %u node %u " + "num_sockets from %u to %u", + job->job_id, node_id, + job->num_sockets[node_id], sockets); xrealloc(job->alloc_cores[node_id], sockets * sizeof(uint16_t)); /* NOTE: xrealloc zero fills added memory */ job->num_sockets[node_id] = sockets; } } -void get_resources_this_node(uint16_t *cpus, - uint16_t *sockets, - uint16_t *cores, - uint16_t *threads, - struct node_cr_record *this_cr_node, - uint16_t *alloc_sockets, - uint16_t *alloc_lps, - uint32_t *jobid) +extern void get_resources_this_node(uint16_t *cpus, uint16_t *sockets, + uint16_t *cores, uint16_t *threads, + struct node_cr_record *this_cr_node, + uint32_t jobid) { - if (select_fast_schedule) { - *cpus = this_cr_node->node_ptr->config_ptr->cpus; - *sockets = this_cr_node->node_ptr->config_ptr->sockets; - *cores = this_cr_node->node_ptr->config_ptr->cores; - *threads = this_cr_node->node_ptr->config_ptr->threads; - } else { - *cpus = this_cr_node->node_ptr->cpus; - *sockets = this_cr_node->node_ptr->sockets; - *cores = this_cr_node->node_ptr->cores; - *threads = this_cr_node->node_ptr->threads; - } - *alloc_sockets = this_cr_node->alloc_sockets; - *alloc_lps = this_cr_node->alloc_lps; + _chk_resize_node(this_cr_node); + + *cpus = this_cr_node->cpus; + *sockets = this_cr_node->sockets; + *cores = this_cr_node->cores; + *threads = this_cr_node->threads; debug3("cons_res %u _get_resources host %s HW_ " "cpus %u sockets %u cores %u threads %u ", - *jobid, this_cr_node->node_ptr->name, + jobid, this_cr_node->node_ptr->name, *cpus, *sockets, *cores, *threads); - debug3("cons_res %u _get_resources host %s Alloc_ sockets %u lps %u", - *jobid, this_cr_node->node_ptr->name, - *alloc_sockets, *alloc_lps); } -/* - * _get_avail_memory returns the amount of available real memory in MB - * for this node. +/* _get_cpu_data + * determine the number of available free cores/cpus/sockets + * IN - p_ptr: pointer to a node's part_cr_record for a specific partition + * IN - num_sockets: number of sockets on this node + * IN - max_cpus: the total number of cores/cpus/sockets on this node + * OUT- row_index: the row index from which the returned value was obtained + * (if -1 then nothing is allocated in this partition) + * OUT- free_row: the row index of an unallocated row (if -1 then all rows + * contain allocated cores) + * RETURN - the maximum number of free cores/cpus/sockets found in the given + * row_index (if 0 then node is full; if 'max_cpus' then node is free) */ -static uint32_t _get_avail_memory(int index, int all_available) +static uint16_t _get_cpu_data (struct part_cr_record *p_ptr, int num_sockets, + uint16_t max_cpus, int *row_index, int *free_row) { - uint32_t avail_memory = 0; - struct node_cr_record *this_cr_node; - - if (select_fast_schedule) { - avail_memory = select_node_ptr[index].node_ptr->config_ptr->real_memory; - } else { - avail_memory = select_node_ptr[index].node_ptr->real_memory; + int i, j, index; + uint16_t alloc_count = 0; + bool counting_sockets = 0; + if ((cr_type == CR_SOCKET) || (cr_type == CR_SOCKET_MEMORY)) + counting_sockets = 1; + + *free_row = -1; + *row_index = -1; + + for (i = 0, index = 0; i < p_ptr->num_rows; i++) { + uint16_t cpu_count = 0; + uint16_t socket_count = 0; + for (j = 0; j < num_sockets; j++, index++) { + if (p_ptr->alloc_cores[index]) { + socket_count++; + cpu_count += p_ptr->alloc_cores[index]; + } + } + if (socket_count > 0) { + if (counting_sockets) { + if ((alloc_count == 0) || + (socket_count < alloc_count)) { + alloc_count = socket_count; + *row_index = i; + } + } else { + if ((alloc_count == 0) || + (cpu_count < alloc_count)) { + alloc_count = cpu_count; + *row_index = i; + } + } + } + else if (*free_row < 0) { + *free_row = i; + } } - - if (all_available) - return avail_memory; - - this_cr_node = find_cr_node_record (select_node_ptr[index].node_ptr->name); - if (this_cr_node == NULL) { - error(" cons_res: could not find node %s", - select_node_ptr[index].node_ptr->name); - avail_memory = 0; - return avail_memory; - } - avail_memory -= this_cr_node->alloc_memory; - - return(avail_memory); + return max_cpus - alloc_count; } /* - * _get_avail_lps - Get the number of "available" cpus on a node - * given the number of cpus_per_task and - * maximum sockets, cores, threads. Note that the value of - * cpus is the lowest-level logical processor (LLLP). + * _get_task_count - Given the job requirements, compute the number of tasks + * this node can run + * * IN job_ptr - pointer to job being scheduled * IN index - index of node's configuration information in select_node_ptr */ -static uint16_t _get_avail_lps(struct job_record *job_ptr, - const int index, - const bool all_available) +static uint16_t _get_task_count(struct node_cr_record *select_node_ptr, + struct job_record *job_ptr, const int index, + const bool all_available, bool try_partial_idle, + enum node_cr_state job_node_req) { - uint16_t avail_cpus, cpus_per_task = 0; + uint16_t numtasks, cpus_per_task = 0; uint16_t max_sockets = 0, max_cores = 0, max_threads = 0; uint16_t min_sockets = 0, min_cores = 0, min_threads = 0; uint16_t ntasks_per_node = 0, ntasks_per_socket = 0, ntasks_per_core = 0; - uint16_t cpus, sockets, cores, threads; - uint16_t alloc_sockets = 0, alloc_lps = 0; - struct node_cr_record *this_cr_node; + uint16_t i, cpus, sockets, cores, threads, *alloc_cores = NULL; + struct node_cr_record *this_node; + struct part_cr_record *p_ptr; struct multi_core_data *mc_ptr = NULL; - if (job_ptr->details) { - cpus_per_task = job_ptr->details->cpus_per_task; - ntasks_per_node = job_ptr->details->ntasks_per_node; - mc_ptr = job_ptr->details->mc_ptr; - } - if (mc_ptr) { - min_sockets = mc_ptr->min_sockets; - max_sockets = mc_ptr->max_sockets; - min_cores = mc_ptr->min_cores; - max_cores = mc_ptr->max_cores; - min_threads = mc_ptr->min_threads; - max_threads = mc_ptr->max_threads; - ntasks_per_socket = mc_ptr->ntasks_per_socket; - ntasks_per_core = mc_ptr->ntasks_per_core; - } - - this_cr_node = find_cr_node_record (select_node_ptr[index].node_ptr->name); - if (this_cr_node == NULL) { - error(" cons_res: could not find node %s", - select_node_ptr[index].node_ptr->name); - avail_cpus = 0; - return avail_cpus; - } + cpus_per_task = job_ptr->details->cpus_per_task; + ntasks_per_node = job_ptr->details->ntasks_per_node; + + mc_ptr = job_ptr->details->mc_ptr; + min_sockets = mc_ptr->min_sockets; + max_sockets = mc_ptr->max_sockets; + min_cores = mc_ptr->min_cores; + max_cores = mc_ptr->max_cores; + min_threads = mc_ptr->min_threads; + max_threads = mc_ptr->max_threads; + ntasks_per_socket = mc_ptr->ntasks_per_socket; + ntasks_per_core = mc_ptr->ntasks_per_core; + + this_node = &(select_node_ptr[index]); get_resources_this_node(&cpus, &sockets, &cores, &threads, - this_cr_node, &alloc_sockets, - &alloc_lps, &job_ptr->job_id); - if (all_available) { - alloc_sockets = 0; - alloc_lps = 0; - } - - chk_resize_node(this_cr_node, sockets); - avail_cpus = slurm_get_avail_procs(max_sockets, - max_cores, - max_threads, - min_sockets, - min_cores, - cpus_per_task, - ntasks_per_node, - ntasks_per_socket, - ntasks_per_core, - &cpus, &sockets, &cores, - &threads, alloc_sockets, - this_cr_node->alloc_cores, - alloc_lps, cr_type, - job_ptr->job_id, - this_cr_node->node_ptr->name); - return(avail_cpus); + this_node, job_ptr->job_id); + + alloc_cores = xmalloc(sockets * sizeof(uint16_t)); + /* array is zero filled by xmalloc() */ + + if (!all_available) { + p_ptr = get_cr_part_ptr(this_node, job_ptr->part_ptr); + if (!p_ptr) { + error("cons_res: _get_task_count: could not find part %s", + job_ptr->part_ptr->name); + } else { + if (job_node_req == NODE_CR_ONE_ROW) { + /* need to scan over all partitions with + * num_rows = 1 */ + for (p_ptr = this_node->parts; p_ptr; + p_ptr = p_ptr->next) { + if (p_ptr->num_rows > 1) + continue; + for (i = 0; i < sockets; i++) { + if ((cr_type == CR_SOCKET) || + (cr_type == CR_SOCKET_MEMORY)) { + if (p_ptr->alloc_cores[i]) + alloc_cores[i] = cores; + } else { + alloc_cores[i] = + p_ptr->alloc_cores[i]; + } + } + } + } else { + /* job_node_req == EXCLUSIVE | AVAILABLE + * if EXCLUSIVE, then node *should* be free and + * this code should fall through with + * alloc_cores all set to zero. + * if AVAILABLE then scan partition rows based + * on 'try_partial_idle' setting. Note that + * if 'try_partial_idle' is FALSE then this + * code should use a 'free' row and this is + * where a new row will first be evaluated. + */ + uint16_t count, max_cpus; + int alloc_row, free_row; + + max_cpus = cpus; + if ((cr_type == CR_SOCKET) || + (cr_type == CR_SOCKET_MEMORY)) + max_cpus = sockets; + if ((cr_type == CR_CORE) || + (cr_type == CR_CORE_MEMORY)) + max_cpus = cores * sockets; + + count = _get_cpu_data(p_ptr, sockets, max_cpus, + &alloc_row, &free_row); + if ((count == 0) && (free_row == -1)) { + /* node is completely allocated */ + xfree(alloc_cores); + return 0; + } + if ((free_row == -1) && (!try_partial_idle)) { + /* no free rows, so partial idle is + * all that is left! */ + try_partial_idle = 1; + } + if (try_partial_idle && (alloc_row > -1)) { + alloc_row *= sockets; + for (i = 0; i < sockets; i++) + alloc_cores[i] = + p_ptr->alloc_cores[alloc_row+i]; + } + } + } + } +#if (CR_DEBUG) + for (i = 0; i < sockets; i+=2) { + info("cons_res: _get_task_count: %s alloc_cores[%d]=%d, [%d]=%d", + this_node->node_ptr->name, i, alloc_cores[i], + i+1, alloc_cores[i+1]); + } +#endif + + numtasks = slurm_get_avail_procs(max_sockets, max_cores, max_threads, + min_sockets, min_cores, + cpus_per_task, + ntasks_per_node, + ntasks_per_socket, + ntasks_per_core, + &cpus, &sockets, &cores, + &threads, alloc_cores, + cr_type, job_ptr->job_id, + this_node->node_ptr->name); +#if (CR_DEBUG) + info("cons_res: _get_task_count computed a_tasks %d s %d c %d " + "t %d on %s for job %d", + numtasks, sockets, cores, + threads, this_node->node_ptr->name, job_ptr->job_id); +#endif + xfree(alloc_cores); + return(numtasks); } /* xfree an array of node_cr_record */ -static void _xfree_select_nodes(struct node_cr_record *ptr, int select_node_cnt) +static void _xfree_select_nodes(struct node_cr_record *ptr, int count) { int i; if (ptr == NULL) return; - for (i = 0; i < select_node_cnt; i++) { - xfree(ptr[i].alloc_cores); - xfree(ptr[i].name); - ptr[i].num_sockets = 0; - } + for (i = 0; i < count; i++) + _destroy_node_part_array(&(ptr[i])); xfree(ptr); } @@ -475,16 +629,12 @@ static void _xfree_select_cr_job(struct select_cr_job *job) if (job == NULL) return; - if (job->host) { - for (i=0; i<job->nhosts; i++) - xfree(job->host[i]); - xfree(job->host); - } xfree(job->cpus); - xfree(job->alloc_lps); - xfree(job->alloc_sockets); + xfree(job->alloc_cpus); + xfree(job->node_offset); xfree(job->alloc_memory); - if ((cr_type == CR_CORE) || (cr_type == CR_CORE_MEMORY)) { + if ((cr_type == CR_CORE) || (cr_type == CR_CORE_MEMORY) || + (cr_type == CR_SOCKET) || (cr_type == CR_SOCKET_MEMORY)) { for (i = 0; i < job->nhosts; i++) xfree(job->alloc_cores[i]); xfree(job->alloc_cores); @@ -502,9 +652,8 @@ static void _clear_job_list(void) ListIterator job_iterator; struct select_cr_job *job; - if (select_cr_job_list == NULL) { + if (select_cr_job_list == NULL) return; - } slurm_mutex_lock(&cr_mutex); job_iterator = list_iterator_create(select_cr_job_list); @@ -516,6 +665,36 @@ static void _clear_job_list(void) slurm_mutex_unlock(&cr_mutex); } +static void _verify_select_job_list(uint32_t job_id) +{ + ListIterator job_iterator; + struct select_cr_job *job; + + if (list_count(select_cr_job_list) < 1) { + last_verified_job_id = job_id; + return; + } + if ((job_id > last_verified_job_id) && + (job_id < (last_verified_job_id + CR_VERIFY_JOB_CYCLE))) { + return; + } + + last_verified_job_id = job_id; + slurm_mutex_lock(&cr_mutex); + job_iterator = list_iterator_create(select_cr_job_list); + while ((job = (struct select_cr_job *) list_next(job_iterator))) { + if (find_job_record(job->job_id) == NULL) { + list_remove(job_iterator); + debug2("cons_res: _verify_job_list: removing " + "nonexistent job %u", job->job_id); + _xfree_select_cr_job(job); + } + } + list_iterator_destroy(job_iterator); + slurm_mutex_unlock(&cr_mutex); + last_cr_update_time = time(NULL); +} + /* Append a specific select_cr_job to select_cr_job_list. If the * select_job already exists then it is deleted and re-added otherwise * it is just added to the list. @@ -524,8 +703,8 @@ static void _append_to_job_list(struct select_cr_job *new_job) { int job_id = new_job->job_id; struct select_cr_job *old_job = NULL; - ListIterator iterator = list_iterator_create(select_cr_job_list); + slurm_mutex_lock(&cr_mutex); while ((old_job = (struct select_cr_job *) list_next(iterator))) { if (old_job->job_id != job_id) @@ -534,103 +713,93 @@ static void _append_to_job_list(struct select_cr_job *new_job) _xfree_select_cr_job(old_job); /* xfree job structure */ break; } - list_iterator_destroy(iterator); list_append(select_cr_job_list, new_job); slurm_mutex_unlock(&cr_mutex); - debug3 (" cons_res: _append_to_job_list job_id %u to list. " + debug3 ("cons_res: _append_to_job_list job_id %u to list. " "list_count %d ", job_id, list_count(select_cr_job_list)); } -/* - * _count_cpus - report how many cpus are available with the identified nodes - */ -static void _count_cpus(bitstr_t *bitmap, uint16_t *sum) +/* find the maximum number of idle cpus from all partitions */ +static uint16_t _count_idle_cpus(struct node_cr_record *this_node) { - int i, allocated_lps; - *sum = 0; + struct part_cr_record *p_ptr; + int i, j, index, idlecpus; + uint16_t cpus, sockets, cores, threads; - for (i = 0; i < node_record_count; i++) { - struct node_cr_record *this_node; - allocated_lps = 0; - if (bit_test(bitmap, i) != 1) - continue; + if (this_node->node_state == NODE_CR_RESERVED) + return (uint16_t) 0; - this_node = find_cr_node_record(node_record_table_ptr[i].name); - if (this_node == NULL) { - error(" cons_res: Invalid Node reference %s ", - node_record_table_ptr[i].name); - *sum = 0; - return; - } + get_resources_this_node(&cpus, &sockets, &cores, &threads, + this_node, 0); - switch(cr_type) { - case CR_SOCKET: - case CR_SOCKET_MEMORY: - if (slurmctld_conf.fast_schedule) { - (*sum) += (node_record_table_ptr[i].config_ptr->sockets - - this_node->alloc_sockets) * - node_record_table_ptr[i].config_ptr->cores * - node_record_table_ptr[i].config_ptr->threads; - } else { - (*sum) += (node_record_table_ptr[i].sockets - - this_node->alloc_sockets) - * node_record_table_ptr[i].cores - * node_record_table_ptr[i].threads; - } - break; - case CR_CORE: - case CR_CORE_MEMORY: - { - int core_cnt = 0; - chk_resize_node(this_node, this_node->node_ptr->sockets); - for (i = 0; i < this_node->node_ptr->sockets; i++) - core_cnt += this_node->alloc_cores[i]; - if (slurmctld_conf.fast_schedule) { - (*sum) += ((node_record_table_ptr[i].config_ptr->sockets - * node_record_table_ptr[i].config_ptr->cores) - - core_cnt) - * node_record_table_ptr[i].config_ptr->threads; - } else { - (*sum) += ((node_record_table_ptr[i].sockets - * node_record_table_ptr[i].cores) - - core_cnt) - * node_record_table_ptr[i].threads; + if (!this_node->parts) + return cpus; + + idlecpus = cpus; + if (this_node->node_state == NODE_CR_ONE_ROW) { + /* check single-row partitions for idle CPUs */ + for (p_ptr = this_node->parts; p_ptr; p_ptr = p_ptr->next) { + if (p_ptr->num_rows > 1) + continue; + for (i = 0; i < this_node->sockets; i++) { + if ((cr_type == CR_SOCKET) || + (cr_type == CR_SOCKET_MEMORY)) { + if (p_ptr->alloc_cores[i]) + idlecpus -= cores; + } else { + idlecpus -= p_ptr->alloc_cores[i]; + } } - break; + if (idlecpus < 1) + return (uint16_t) 0; } - case CR_MEMORY: - if (slurmctld_conf.fast_schedule) { - (*sum) += node_record_table_ptr[i].config_ptr->cpus; - } else { - (*sum) += node_record_table_ptr[i].cpus; - } - break; - case CR_CPU: - case CR_CPU_MEMORY: - default: - if (slurmctld_conf.fast_schedule) { - (*sum) += node_record_table_ptr[i].config_ptr->cpus - - this_node->alloc_lps; - } else { - (*sum) += node_record_table_ptr[i].cpus - - this_node->alloc_lps; + return (uint16_t) idlecpus; + } + + if (this_node->node_state == NODE_CR_AVAILABLE) { + /* check all partitions for idle CPUs */ + int tmpcpus, max_idle = 0; + for (p_ptr = this_node->parts; p_ptr; p_ptr = p_ptr->next) { + for (i = 0, index = 0; i < p_ptr->num_rows; i++) { + tmpcpus = idlecpus; + for (j = 0; j < this_node->sockets; + j++, index++) { + if ((cr_type == CR_SOCKET) || + (cr_type == CR_SOCKET_MEMORY)) { + if (p_ptr->alloc_cores[index]) + tmpcpus -= cores; + } else { + tmpcpus -= p_ptr-> + alloc_cores[index]; + } + } + if (tmpcpus > max_idle) { + max_idle = tmpcpus; + if (max_idle == idlecpus) + break; + } } - break; + if (max_idle == idlecpus) + break; } + if (this_node->parts) + idlecpus = max_idle; } + return (uint16_t) idlecpus; } static int _synchronize_bitmaps(bitstr_t ** partially_idle_bitmap) { - int rc = SLURM_SUCCESS, i; - bitstr_t *bitmap = bit_alloc(bit_size(avail_node_bitmap)); + int rc = SLURM_SUCCESS; + int size, i, idlecpus = bit_set_count(avail_node_bitmap); + size = bit_size(avail_node_bitmap); + bitstr_t *bitmap = bit_alloc(size); - debug3(" cons_res: Synch size avail %d size idle %d ", - bit_size(avail_node_bitmap), bit_size(idle_node_bitmap)); + debug3("cons_res: synch_bm: size avail %d (%d set) size idle %d ", + size, idlecpus, bit_size(idle_node_bitmap)); - for (i = 0; i < node_record_count; i++) { - uint16_t allocated_cpus; + for (i = 0; i < select_node_cnt; i++) { if (bit_test(avail_node_bitmap, i) != 1) continue; @@ -638,205 +807,291 @@ static int _synchronize_bitmaps(bitstr_t ** partially_idle_bitmap) bit_set(bitmap, i); continue; } - - allocated_cpus = 0; - rc = select_g_get_select_nodeinfo(&node_record_table_ptr - [i], SELECT_ALLOC_CPUS, - &allocated_cpus); - if (rc != SLURM_SUCCESS) { - error(" cons_res: Invalid Node reference %s", - node_record_table_ptr[i].name); - goto cleanup; - } - - if (allocated_cpus < node_record_table_ptr[i].cpus) + + idlecpus = _count_idle_cpus(&(select_node_ptr[i])); + if (idlecpus) bit_set(bitmap, i); - else - bit_clear(bitmap, i); } + idlecpus = bit_set_count(bitmap); + debug3("cons_res: synch found %d partially idle nodes", idlecpus); *partially_idle_bitmap = bitmap; - if (rc == SLURM_SUCCESS) + if (rc != SLURM_SUCCESS) + FREE_NULL_BITMAP(bitmap); + return rc; +} + +/* allocate resources to the given job + * + * if suspend = 0 then fully add job + * if suspend = 1 then only add memory + */ +static int _add_job_to_nodes(struct select_cr_job *job, char *pre_err, + int suspend) +{ + int host_index, i, j, rc = SLURM_SUCCESS; + uint16_t add_memory = 0; + uint16_t memset = job->state & CR_JOB_ALLOCATED_MEM; + uint16_t cpuset = job->state & CR_JOB_ALLOCATED_CPUS; + + if (memset && cpuset) + return rc; + if (job->node_bitmap == NULL) { /* likely still starting up */ + error("job %u has no node_bitmap", job->job_id); return rc; + } + if (!memset && + ((cr_type == CR_CORE_MEMORY) || (cr_type == CR_CPU_MEMORY) || + (cr_type == CR_MEMORY) || (cr_type == CR_SOCKET_MEMORY))) { + job->state |= CR_JOB_ALLOCATED_MEM; + add_memory = 1; + } + if (!cpuset && !suspend) + job->state |= CR_JOB_ALLOCATED_CPUS; + + i = -1; + for (host_index = 0; host_index < select_node_cnt; host_index++) { + struct node_cr_record *this_node; + struct part_cr_record *p_ptr; + uint16_t offset = 0; + + if (bit_test(job->node_bitmap, host_index) == 0) + continue; + + this_node = &select_node_ptr[host_index]; + i++; + + /* Update this node's allocated resources, starting with + * memory (if applicable) */ + + if (add_memory) + this_node->alloc_memory += job->alloc_memory[i]; + + if (cpuset || suspend) + continue; + + this_node->node_state = job->node_req; + + p_ptr = get_cr_part_ptr(this_node, job->job_ptr->part_ptr); + if (p_ptr == NULL) { + error("%s: could not find part %s", pre_err, + job->job_ptr->partition); + continue; + } + + /* The offset could be invalid if the sysadmin reduced the + * number of shared rows after this job was allocated. In + * this case, we *should* attempt to place this job in + * other rows. However, this may be futile if they are all + * currently full. + * For now, we're going to be lazy and simply NOT "allocate" + * this job on the node(s) (hey - you get what you pay for). ;-) + * This just means that we will not be accounting for this + * job when determining available space for future jobs, + * which is relatively harmless (hey, there was space when + * this job was first scheduled - if the sysadmin doesn't + * like it, then (s)he can terminate the job). ;-) + * Note that we are still "allocating" memory for this job + * (if requested). + */ + offset = job->node_offset[i]; + if (offset > (this_node->sockets * (p_ptr->num_rows - 1))) { + rc = SLURM_ERROR; + continue; + } + + switch (cr_type) { + case CR_SOCKET_MEMORY: + case CR_SOCKET: + case CR_CORE_MEMORY: + case CR_CORE: + _chk_resize_job(job, i, this_node->sockets); + for (j = 0; j < this_node->sockets; j++) { + p_ptr->alloc_cores[offset+j] += + job->alloc_cores[i][j]; + if (p_ptr->alloc_cores[offset+j] > + this_node->cores) + error("%s: Job %u Host %s offset %u " + "too many allocated " + "cores %u for socket %d", + pre_err, job->job_id, + this_node->node_ptr->name, offset, + p_ptr->alloc_cores[offset+j], j); + } + break; + case CR_CPU_MEMORY: + case CR_CPU: + /* "CPU" count is stored in the first "core" */ + p_ptr->alloc_cores[offset] += job->alloc_cpus[i]; + break; + default: + break; + } - cleanup: - FREE_NULL_BITMAP(bitmap); + /* Remove debug only */ + debug3("cons_res: %s: Job %u (+) node %s alloc_mem %u state %d", + pre_err, job->job_id, + node_record_table_ptr[host_index].name, + this_node->alloc_memory, this_node->node_state); + debug3("cons_res: %s: Job %u (+) alloc_ cpus %u offset %u mem %u", + pre_err, job->job_id, job->alloc_cpus[i], + job->node_offset[i], job->alloc_memory[i]); + for (j = 0; j < this_node->sockets; j++) + debug3("cons_res: %s: Job %u (+) node %s alloc_cores[%d] %u", + pre_err, job->job_id, + node_record_table_ptr[host_index].name, + j, p_ptr->alloc_cores[offset+j]); + } + last_cr_update_time = time(NULL); return rc; } -static int _clear_select_jobinfo(struct job_record *job_ptr) +/* deallocate resources that were assigned to this job + * + * if remove_all = 1: deallocate all resources + * if remove_all = 0: the job has been suspended, so just deallocate CPUs + */ +static int _rm_job_from_nodes(struct node_cr_record *select_node_ptr, + struct select_cr_job *job, char *pre_err, + int remove_all) { - int rc = SLURM_SUCCESS, i, j, nodes, job_id; - struct select_cr_job *job = NULL; - ListIterator iterator; + int host_index, i, j, k, rc = SLURM_SUCCESS; - xassert(job_ptr); - xassert(job_ptr->magic == JOB_MAGIC); + uint16_t memset = job->state & CR_JOB_ALLOCATED_MEM; + uint16_t cpuset = job->state & CR_JOB_ALLOCATED_CPUS; + uint16_t remove_memory = 0; - if (list_count(select_cr_job_list) == 0) + if (!memset && !cpuset) return rc; + if (!cpuset && !remove_all) + return rc; + if (memset && remove_all && + ((cr_type == CR_CORE_MEMORY) || (cr_type == CR_CPU_MEMORY) || + (cr_type == CR_MEMORY) || (cr_type == CR_SOCKET_MEMORY))) { + remove_memory = 1; + job->state &= ~CR_JOB_ALLOCATED_MEM; + } + if (cpuset) + job->state &= ~CR_JOB_ALLOCATED_CPUS; - job_id = job_ptr->job_id; - iterator = list_iterator_create(select_cr_job_list); - while ((job = (struct select_cr_job *) list_next(iterator))) { - if (job->job_id != job_id) + i = -1; + for (host_index = 0; host_index < select_node_cnt; host_index++) { + struct node_cr_record *this_node; + struct part_cr_record *p_ptr; + uint16_t offset; + + if (bit_test(job->node_bitmap, host_index) == 0) continue; - if (job->state & CR_JOB_STATE_SUSPENDED) - nodes = 0; - else - nodes = job->nhosts; - for (i = 0; i < nodes; i++) { - struct node_cr_record *this_node; - this_node = find_cr_node_record(job->host[i]); - if (this_node == NULL) { - error("cons_res: could not find node %s", - job->host[i]); - rc = SLURM_ERROR; - goto out; + + this_node = &select_node_ptr[host_index]; + i++; + + /* Update this nodes allocated resources, beginning with + * memory (if applicable) */ + if (remove_memory) { + if (this_node->alloc_memory >= job->alloc_memory[i]) + this_node->alloc_memory -= job->alloc_memory[i]; + else { + error("%s: alloc_memory underflow on %s", + pre_err, this_node->node_ptr->name); + this_node->alloc_memory = 0; + rc = SLURM_ERROR; } - - /* Updating this node allocated resources */ - switch(cr_type) { - case CR_SOCKET: - case CR_SOCKET_MEMORY: - if (this_node->alloc_lps >= job->alloc_lps[i]) - this_node->alloc_lps -= job->alloc_lps[i]; - else { - error("cons_res: alloc_lps underflow on %s", - this_node->node_ptr->name); - rc = SLURM_ERROR; - } - if (this_node->alloc_sockets >= job->alloc_sockets[i]) - this_node->alloc_sockets -= job->alloc_sockets[i]; + } + + if (!cpuset) + continue; + + p_ptr = get_cr_part_ptr(this_node, job->job_ptr->part_ptr); + if (p_ptr == NULL) { + error("%s: could not find part %s", pre_err, + job->job_ptr->partition); + continue; + } + + /* If the offset is no longer valid, then the job was never + * "allocated" on these cores (see add_job_to_nodes). + * Therefore just continue. */ + offset = job->node_offset[i]; + if (offset > (this_node->sockets * (p_ptr->num_rows - 1))) { + rc = SLURM_ERROR; + continue; + } + + switch(cr_type) { + case CR_SOCKET_MEMORY: + case CR_SOCKET: + case CR_CORE_MEMORY: + case CR_CORE: + _chk_resize_job(job, i, this_node->sockets); + for (j = 0; j < this_node->sockets; j++) { + if (p_ptr->alloc_cores[offset+j] >= + job->alloc_cores[i][j]) + p_ptr->alloc_cores[offset+j] -= + job->alloc_cores[i][j]; else { - error("cons_res: alloc_sockets underflow on %s", - this_node->node_ptr->name); + error("%s: alloc_cores underflow on %s", + pre_err, + node_record_table_ptr[host_index].name); + p_ptr->alloc_cores[offset+j] = 0; rc = SLURM_ERROR; } - if (this_node->alloc_memory >= job->alloc_memory[i]) - this_node->alloc_memory -= job->alloc_memory[i]; - else { - error("cons_res: alloc_memory underflow on %s", - this_node->node_ptr->name); - rc = SLURM_ERROR; - } - if (rc == SLURM_ERROR) { - this_node->alloc_lps = 0; - this_node->alloc_sockets = 0; - this_node->alloc_memory = 0; - goto out; - } - break; - case CR_CORE: - case CR_CORE_MEMORY: - if (this_node->alloc_lps >= job->alloc_lps[i]) - this_node->alloc_lps -= job->alloc_lps[i]; - else { - error("cons_res: alloc_lps underflow on %s", - this_node->node_ptr->name); - rc = SLURM_ERROR; - } - chk_resize_node(this_node, - this_node->node_ptr->sockets); - chk_resize_job(job, i, this_node->num_sockets); - for (j =0; j < this_node->num_sockets; j++) { - if (this_node->alloc_cores[j] >= - job->alloc_cores[i][j]) - this_node->alloc_cores[j] -= - job->alloc_cores[i][j]; - else { - error("cons_res: alloc_cores underflow on %s", - this_node->node_ptr->name); - rc = SLURM_ERROR; - } - } - if (this_node->alloc_memory >= job->alloc_memory[i]) - this_node->alloc_memory -= job->alloc_memory[i]; - else { - error("cons_res: alloc_memory underflow on %s", - this_node->node_ptr->name); - rc = SLURM_ERROR; - } - if (rc == SLURM_ERROR) { - this_node->alloc_lps = 0; - for (j =0; j < this_node->num_sockets; j++) { - this_node->alloc_cores[j] = 0; - } - this_node->alloc_memory = 0; - goto out; - } - break; - case CR_MEMORY: - if (this_node->alloc_memory >= job->alloc_memory[i]) - this_node->alloc_memory -= job->alloc_memory[i]; - else { - error("cons_res: alloc_memory underflow on %s", - this_node->node_ptr->name); - this_node->alloc_memory = 0; - rc = SLURM_ERROR; - goto out; - } - break; - case CR_CPU: - case CR_CPU_MEMORY: - if (this_node->alloc_lps >= job->alloc_lps[i]) - this_node->alloc_lps -= job->alloc_lps[i]; - else { - error("cons_res: alloc_lps underflow on %s", - this_node->node_ptr->name); - this_node->alloc_lps = 0; - rc = SLURM_ERROR; - goto out; - } - if (cr_type == CR_CPU) - break; + } + break; + case CR_CPU_MEMORY: + case CR_CPU: + /* CPU count is stored in the first "core" */ + if (p_ptr->alloc_cores[offset] >= job->alloc_cpus[i]) + p_ptr->alloc_cores[offset] -= + job->alloc_cpus[i]; + else { + error("%s: CPU underflow (%u - %u) on %s", + pre_err, p_ptr->alloc_cores[offset], + job->alloc_cpus[i], + node_record_table_ptr[host_index].name); + p_ptr->alloc_cores[offset] = 0; + rc = SLURM_ERROR; + } + break; + default: + break; + } - if (this_node->alloc_memory >= job->alloc_memory[i]) - this_node->alloc_memory -= job->alloc_memory[i]; - else { - error("cons_res: alloc_memory underflow on %s", - this_node->node_ptr->name); - this_node->alloc_memory = 0; - rc = SLURM_ERROR; - goto out; + /* if all cores are available, set NODE_CR_AVAILABLE */ + if (this_node->node_state != NODE_CR_AVAILABLE) { + /* need to scan all partitions */ + struct part_cr_record *pptr; + int count = 0; + for (pptr = this_node->parts; pptr; pptr = pptr->next) { + /* just need to check single row partitions */ + if (pptr->num_rows > 1) + continue; + k = pptr->num_rows * this_node->sockets; + for (j = 0; j < k; j++) { + count += p_ptr->alloc_cores[j]; } - break; - default: - break; + if (count) + break; } -#if(CR_DEBUG) - info("cons_res %u _clear_select_jobinfo (-) node %s " - "alloc_ s %u lps %u", - job->job_id, this_node->node_ptr->name, - this_node->alloc_sockets, - this_node->alloc_lps); - if ((cr_type == CR_CORE) || (cr_type == CR_CORE_MEMORY)) - for (j =0; j < this_node->num_sockets; j++) - info("cons_res %u _clear_select_jobinfo (-) " - " node %s alloc_ c %u", - job->job_id, this_node->node_ptr->name, - this_node->alloc_cores[j]); -#endif + if (count == 0) + this_node->node_state = NODE_CR_AVAILABLE; } - out: - slurm_mutex_lock(&cr_mutex); - list_remove(iterator); - slurm_mutex_unlock(&cr_mutex); - _xfree_select_cr_job(job); - break; - } - list_iterator_destroy(iterator); - - debug3("cons_res: _clear_select_jobinfo Job_id %u: " - "list_count: %d", job_ptr->job_id, - list_count(select_cr_job_list)); + debug3("%s: Job %u (-) node %s alloc_mem %u offset %d", + pre_err, job->job_id, this_node->node_ptr->name, + this_node->alloc_memory, offset); + for (j = 0; j < this_node->sockets; j++) + debug3("cons_res: %s: Job %u (-) node %s alloc_cores[%d] %u", + pre_err, job->job_id, + node_record_table_ptr[host_index].name, + j, p_ptr->alloc_cores[offset+j]); + } + last_cr_update_time = time(NULL); return rc; } -static bool -_enough_nodes(int avail_nodes, int rem_nodes, - uint32_t min_nodes, uint32_t req_nodes) +static bool _enough_nodes(int avail_nodes, int rem_nodes, + uint32_t min_nodes, uint32_t req_nodes) { int needed_nodes; @@ -855,10 +1110,13 @@ _enough_nodes(int avail_nodes, int rem_nodes, extern int init(void) { #ifdef HAVE_XCPU - error("%s presently incompatible with XCPU use", plugin_name); - return SLURM_ERROR; + error("%s is incompatible with XCPU use", plugin_name); + fatal("Use SelectType=select/linear"); +#endif +#ifdef HAVE_BG + error("%s is incompatable with BlueGene", plugin_name); + fatal("Use SelectType=select/bluegene"); #endif - cr_type = (select_type_plugin_info_t) slurmctld_conf.select_type_param; info("%s loaded with argument %d ", plugin_name, cr_type); @@ -877,11 +1135,6 @@ extern int fini(void) _xfree_select_nodes(select_node_ptr, select_node_cnt); select_node_ptr = NULL; select_node_cnt = 0; - xfree(cr_node_hash_table); - - _xfree_select_nodes(prev_select_node_ptr, prev_select_node_cnt); - prev_select_node_ptr = NULL; - prev_select_node_cnt = 0; verbose("%s shutting down ...", plugin_name); return SLURM_SUCCESS; @@ -964,17 +1217,19 @@ static int _cr_read_state_buffer(int fd, char **data_p, int *data_size_p) static int _cr_pack_job(struct select_cr_job *job, Buf buffer) { int i; - uint16_t nhosts = job->nhosts; + uint32_t nhosts = job->nhosts; + /* Do not write job->state since we re-establish + * the job's state on the nodes at restart time. + * Likewise for job_ptr and node_bitmap. */ pack32(job->job_id, buffer); - pack16(job->state, buffer); pack32(job->nprocs, buffer); - pack16(job->nhosts, buffer); + pack32(job->nhosts, buffer); + pack16(job->node_req, buffer); - packstr_array(job->host, nhosts, buffer); pack16_array(job->cpus, nhosts, buffer); - pack16_array(job->alloc_lps, nhosts, buffer); - pack16_array(job->alloc_sockets, nhosts, buffer); + pack16_array(job->alloc_cpus, nhosts, buffer); + pack16_array(job->node_offset, nhosts, buffer); if (job->alloc_cores) { pack16((uint16_t) 1, buffer); @@ -988,48 +1243,27 @@ static int _cr_pack_job(struct select_cr_job *job, Buf buffer) } pack32_array(job->alloc_memory, nhosts, buffer); - pack16(job->max_sockets, buffer); - pack16(job->max_cores, buffer); - pack16(job->max_threads, buffer); - pack16(job->min_sockets, buffer); - pack16(job->min_cores, buffer); - pack16(job->min_threads, buffer); - pack16(job->ntasks_per_node, buffer); - pack16(job->ntasks_per_socket, buffer); - pack16(job->ntasks_per_core, buffer); - pack16(job->cpus_per_task, buffer); - - pack_bit_fmt(job->node_bitmap, buffer); - pack16(_bitstr_bits(job->node_bitmap), buffer); - return 0; } static int _cr_unpack_job(struct select_cr_job *job, Buf buffer) { int i; - uint16_t len16, have_alloc_cores; + uint16_t have_alloc_cores; uint32_t len32; - int32_t nhosts = 0; - char *bit_fmt = NULL; - uint16_t bit_cnt; + uint32_t nhosts = 0; + uint16_t bit_cnt; safe_unpack32(&job->job_id, buffer); - safe_unpack16(&job->state, buffer); safe_unpack32(&job->nprocs, buffer); - safe_unpack16(&job->nhosts, buffer); + safe_unpack32(&job->nhosts, buffer); + safe_unpack16(&bit_cnt, buffer); nhosts = job->nhosts; - - safe_unpackstr_array(&job->host, &len16, buffer); - if (len16 != nhosts) { - error("cons_res unpack_job: expected %u hosts, saw %u", - nhosts, len16); - goto unpack_error; - } + job->node_req = bit_cnt; safe_unpack16_array(&job->cpus, &len32, buffer); - safe_unpack16_array(&job->alloc_lps, &len32, buffer); - safe_unpack16_array(&job->alloc_sockets, &len32, buffer); + safe_unpack16_array(&job->alloc_cpus, &len32, buffer); + safe_unpack16_array(&job->node_offset, &len32, buffer); safe_unpack16(&have_alloc_cores, buffer); if (have_alloc_cores) { @@ -1048,34 +1282,10 @@ static int _cr_unpack_job(struct select_cr_job *job, Buf buffer) if (len32 != nhosts) goto unpack_error; - safe_unpack16(&job->max_sockets, buffer); - safe_unpack16(&job->max_cores, buffer); - safe_unpack16(&job->max_threads, buffer); - safe_unpack16(&job->min_sockets, buffer); - safe_unpack16(&job->min_cores, buffer); - safe_unpack16(&job->min_threads, buffer); - safe_unpack16(&job->ntasks_per_node, buffer); - safe_unpack16(&job->ntasks_per_socket, buffer); - safe_unpack16(&job->ntasks_per_core, buffer); - safe_unpack16(&job->cpus_per_task, buffer); - - safe_unpackstr_xmalloc(&bit_fmt, &len16, buffer); - safe_unpack16(&bit_cnt, buffer); - if (bit_fmt) { - job->node_bitmap = bit_alloc(bit_cnt); - if (job->node_bitmap == NULL) - fatal("bit_alloc: %m"); - if (bit_unfmt(job->node_bitmap, bit_fmt)) { - error("error recovering exit_node_bitmap from %s", - bit_fmt); - } - xfree(bit_fmt); - } return 0; unpack_error: _xfree_select_cr_job(job); - xfree(bit_fmt); return -1; } @@ -1085,7 +1295,7 @@ extern int select_p_state_save(char *dir_name) ListIterator job_iterator; struct select_cr_job *job = NULL; Buf buffer = NULL; - int state_fd, i; + int state_fd; uint16_t job_cnt; char *file_name = NULL; static time_t last_save_time; @@ -1101,8 +1311,8 @@ extern int select_p_state_save(char *dir_name) (void) unlink(file_name); state_fd = creat (file_name, 0600); if (state_fd < 0) { - error ("Can't save state, error creating file %s", - file_name); + error("Can't save state, error creating file %s", file_name); + xfree(file_name); return SLURM_ERROR; } @@ -1126,25 +1336,6 @@ extern int select_p_state_save(char *dir_name) list_iterator_destroy(job_iterator); } else pack16((uint16_t) 0, buffer); /* job count */ - - /*** pack the node_cr_record array ***/ - pack32((uint32_t)select_node_cnt, buffer); - for (i = 0; i < select_node_cnt; i++) { - /*** don't save select_node_ptr[i].node_ptr ***/ - packstr((char*)select_node_ptr[i].node_ptr->name, buffer); - pack16(select_node_ptr[i].alloc_lps, buffer); - pack16(select_node_ptr[i].alloc_sockets, buffer); - pack32(select_node_ptr[i].alloc_memory, buffer); - pack16(select_node_ptr[i].num_sockets, buffer); - if (select_node_ptr[i].alloc_cores) { - uint16_t nsockets = select_node_ptr[i].num_sockets; - pack16((uint16_t) 1, buffer); - pack16_array(select_node_ptr[i].alloc_cores, - nsockets, buffer); - } else { - pack16((uint16_t) 0, buffer); - } - } slurm_mutex_unlock(&cr_mutex); /*** close the state file ***/ @@ -1160,101 +1351,10 @@ extern int select_p_state_save(char *dir_name) } -/* _cr_find_prev_node - * Return the index in the previous node list for the host - * with the given name. The previous index matched is used - * as a starting point in to achieve O(1) performance when - * matching node data in sequence between two identical lists - * of hosts +/* This is Part 2 of a 4-part procedure which can be found in + * src/slurmctld/read_config.c. See select_p_node_init for the + * whole story. */ -static int _cr_find_prev_node(char *name, int prev_i) -{ - int i, cnt = 0; - if (prev_i < 0) { - prev_i = -1; - } - - /* scan forward from previous index for a match */ - for (i = prev_i + 1; i < prev_select_node_cnt; i++) { - cnt++; - if (strcmp(name, prev_select_node_ptr[i].name) == 0) { - debug3("_cr_find_prev_node fwd: %d %d cmp", i, cnt); - return i; - } - } - - /* if not found, scan from beginning to previous index for a match */ - for (i = 0; i < MIN(prev_i + 1, prev_select_node_cnt); i++) { - cnt++; - if (strcmp(name, prev_select_node_ptr[i].name) == 0) { - debug3("_cr_find_prev_node beg: %d %d cmp", i, cnt); - return i; - } - } - - debug3("_cr_find_prev_node none: %d %d cmp", -1, cnt); - return -1; /* no match found */ -} - -static void _cr_restore_node_data(void) -{ - int i, j, tmp, prev_i; - - if ((select_node_ptr == NULL) || (select_node_cnt <= 0)) { - /* can't restore, nodes not yet initialized */ - /* will attempt restore later in select_p_node_init */ - return; - } - - if ((prev_select_node_ptr == NULL) || (prev_select_node_cnt <= 0)) { - /* can't restore, node restore data not present */ - /* will attempt restore later in select_p_state_restore */ - return; - } - - prev_i = -1; /* index of previous matched node */ - for (i = 0; i < select_node_cnt; i++) { - tmp = _cr_find_prev_node(select_node_ptr[i].name, prev_i); - if (tmp < 0) { /* not found in prev node list */ - continue; /* skip update for this node */ - } - prev_i = tmp; /* found a match */ - - debug2("recovered cons_res node data for %s", - select_node_ptr[i].name); - - /* set alloc_lps/sockets/memory/cores to 0, and let - * select_p_update_nodeinfo to recover the current info - * from jobs (update_nodeinfo is called from reset_job_bitmaps) */ - select_node_ptr[i].alloc_lps = 0; - select_node_ptr[i].alloc_sockets = 0; - select_node_ptr[i].alloc_memory = 0; - /* select_node_ptr[i].alloc_lps */ - /* = prev_select_node_ptr[prev_i].alloc_lps; */ - /* select_node_ptr[i].alloc_sockets */ - /* = prev_select_node_ptr[prev_i].alloc_sockets; */ - /* select_node_ptr[i].alloc_memory */ - /* = prev_select_node_ptr[prev_i].alloc_memory; */ - if (select_node_ptr[i].alloc_cores && - prev_select_node_ptr[prev_i].alloc_cores) { - chk_resize_node(&(select_node_ptr[i]), - prev_select_node_ptr[prev_i].num_sockets); - select_node_ptr[i].num_sockets = - prev_select_node_ptr[prev_i].num_sockets; - for (j = 0; j < select_node_ptr[i].num_sockets; j++) { - select_node_ptr[i].alloc_cores[j] = 0; - /* select_node_ptr[i].alloc_cores[j] */ - /* = prev_select_node_ptr[prev_i].alloc_cores[j]; */ - } - } - } - - /* Release any previous node data */ - _xfree_select_nodes(prev_select_node_ptr, prev_select_node_cnt); - prev_select_node_ptr = NULL; - prev_select_node_cnt = 0; -} - extern int select_p_state_restore(char *dir_name) { int error_code = SLURM_SUCCESS; @@ -1262,7 +1362,6 @@ extern int select_p_state_restore(char *dir_name) char *file_name = NULL; struct select_cr_job *job; Buf buffer = NULL; - uint16_t len16; uint32_t len32; char *data = NULL; int data_size = 0; @@ -1274,6 +1373,9 @@ extern int select_p_state_restore(char *dir_name) info("cons_res: select_p_state_restore"); + if (select_cr_job_list) /* preserve current job info */ + return SLURM_SUCCESS; + if (!dir_name) { info("Starting cons_res with clean slate"); return SLURM_SUCCESS; @@ -1304,7 +1406,7 @@ extern int select_p_state_restore(char *dir_name) data = NULL; /* now in buffer, don't xfree() */ /*** retrieve the plugin type ***/ - safe_unpackstr_xmalloc(&restore_plugin_type, &len16, buffer); + safe_unpackstr_xmalloc(&restore_plugin_type, &len32, buffer); safe_unpack32(&restore_plugin_version, buffer); safe_unpack16(&restore_plugin_crtype, buffer); safe_unpack32(&restore_pstate_version, buffer); @@ -1315,7 +1417,7 @@ extern int select_p_state_restore(char *dir_name) (restore_plugin_version != plugin_version) || (restore_plugin_crtype != cr_type) || (restore_pstate_version != pstate_version)) { - error ("Can't restore state, state version mismtach: " + error ("Can't restore state, state version mismatch: " "saw %s/%u/%u/%u, expected %s/%u/%u/%u", restore_plugin_type, restore_plugin_version, @@ -1346,39 +1448,17 @@ extern int select_p_state_restore(char *dir_name) job = xmalloc(sizeof(struct select_cr_job)); if (_cr_unpack_job(job, buffer) != 0) goto unpack_error; - if (find_job_record(job->job_id) != NULL) { - list_append(select_cr_job_list, job); - debug2("recovered cons_res job data for job %u", job->job_id); - } else { - debug2("recovered cons_res job data for unexistent job %u", + job->job_ptr = find_job_record(job->job_id); + if (job->job_ptr == NULL) { + error("cons_res: recovered non-existent job %u", job->job_id); _xfree_select_cr_job(job); - } - } - - /*** unpack the node_cr_record array ***/ - if (prev_select_node_ptr) { /* clear any existing data */ - _xfree_select_nodes(prev_select_node_ptr, prev_select_node_cnt); - prev_select_node_ptr = NULL; - prev_select_node_cnt = 0; - } - safe_unpack32((uint32_t*)&prev_select_node_cnt, buffer); - prev_select_node_ptr = xmalloc(sizeof(struct node_cr_record) * - (prev_select_node_cnt)); - for (i = 0; i < prev_select_node_cnt; i++) { - uint16_t have_alloc_cores = 0; - /*** don't restore prev_select_node_ptr[i].node_ptr ***/ - safe_unpackstr_xmalloc(&(prev_select_node_ptr[i].name), - &len16, buffer); - safe_unpack16(&prev_select_node_ptr[i].alloc_lps, buffer); - safe_unpack16(&prev_select_node_ptr[i].alloc_sockets, buffer); - safe_unpack32(&prev_select_node_ptr[i].alloc_memory, buffer); - safe_unpack16(&prev_select_node_ptr[i].num_sockets, buffer); - safe_unpack16(&have_alloc_cores, buffer); - if (have_alloc_cores) { - safe_unpack16_array( - &prev_select_node_ptr[i].alloc_cores, - &len32, buffer); + } else { + /* NOTE: Nodes can be added or removed from the + * system on a restart */ + list_append(select_cr_job_list, job); + debug2("recovered cons_res job data for job %u", + job->job_id); } } @@ -1388,8 +1468,6 @@ extern int select_p_state_restore(char *dir_name) xfree(restore_plugin_type); xfree(file_name); - _cr_restore_node_data(); /* if nodes already initialized */ - return SLURM_SUCCESS; unpack_error: @@ -1397,29 +1475,78 @@ unpack_error: free_buf(buffer); xfree(restore_plugin_type); - /* don't keep possibly invalid prev_select_node_ptr */ - _xfree_select_nodes(prev_select_node_ptr, prev_select_node_cnt); - prev_select_node_ptr = NULL; - prev_select_node_cnt = 0; - error ("Can't restore state, error unpacking file %s", file_name); error ("Starting cons_res with clean slate"); return SLURM_SUCCESS; } +/* This is Part 3 of a 4-part procedure which can be found in + * src/slurmctld/read_config.c. See select_p_node_init for the + * whole story. + */ extern int select_p_job_init(List job_list) { + struct select_cr_job *job = NULL; + ListIterator iterator; + int suspend; + info("cons_res: select_p_job_init"); - if (!select_cr_job_list) { + /* Note: select_cr_job_list restored in select_p_state_restore + * except on a cold-start */ + if (!select_cr_job_list) { select_cr_job_list = list_create(NULL); + return SLURM_SUCCESS; } - /* Note: select_cr_job_list restored in select_p_state_restore */ + /* Now synchronize the node information to the active jobs */ + if (list_count(select_cr_job_list) == 0) + return SLURM_SUCCESS; + + iterator = list_iterator_create(select_cr_job_list); + while ((job = (struct select_cr_job *) list_next(iterator))) { + job->job_ptr = find_job_record(job->job_id); + if (job->job_ptr == NULL) { + error("select_p_job_init: could not find job %u", + job->job_id); + list_remove(iterator); + continue; + } + if (job->job_ptr->job_state == JOB_SUSPENDED) + suspend = 1; + else + suspend = 0; + if ((job->job_ptr->nodes == NULL) || + (node_name2bitmap(job->job_ptr->nodes, true, + &job->node_bitmap))) { + error("cons_res: job %u has no allocated nodes", + job->job_id); + job->node_bitmap = bit_alloc(node_record_count); + } + _add_job_to_nodes(job, "select_p_job_init", suspend); + } + list_iterator_destroy(iterator); + last_cr_update_time = time(NULL); return SLURM_SUCCESS; } +/* This is Part 1 of a 4-part procedure which can be found in + * src/slurmctld/read_config.c. The whole story goes like this: + * + * Step 1: select_g_node_init : initializes 'select_node_ptr' global array + * sets node_ptr, node_name, and num_sockets + * Step 2: select_g_state_restore : IFF a cons_res state file exists: + * loads global 'select_cr_job_list' with + * saved job data + * Step 3: select_g_job_init : creates global 'select_cr_job_list' if + * nothing was recovered from state file. + * Rebuilds select_node_ptr global array. + * Step 4: select_g_update_nodeinfo : called from reset_job_bitmaps() with each + * valid recovered job_ptr AND from + * select_nodes(), this procedure adds job + * data to the 'select_node_ptr' global array + */ extern int select_p_node_init(struct node_record *node_ptr, int node_cnt) { int i; @@ -1441,31 +1568,31 @@ extern int select_p_node_init(struct node_record *node_ptr, int node_cnt) select_node_cnt = node_cnt; select_node_ptr = xmalloc(sizeof(struct node_cr_record) * select_node_cnt); + select_fast_schedule = slurm_get_fast_schedule(); for (i = 0; i < select_node_cnt; i++) { select_node_ptr[i].node_ptr = &node_ptr[i]; - select_node_ptr[i].name = xstrdup(node_ptr[i].name); - select_node_ptr[i].alloc_lps = 0; - select_node_ptr[i].alloc_sockets = 0; - select_node_ptr[i].alloc_memory = 0; - if ((cr_type == CR_CORE) || (cr_type == CR_CORE_MEMORY)) { - info("select_g_node_init node:%s sockets:%u", - select_node_ptr[i].name, - select_node_ptr[i].node_ptr->sockets); - select_node_ptr[i].num_sockets = - select_node_ptr[i].node_ptr->sockets; - select_node_ptr[i].alloc_cores = - xmalloc(sizeof(int) * - select_node_ptr[i].num_sockets); + if (select_fast_schedule) { + struct config_record *config_ptr; + config_ptr = node_ptr[i].config_ptr; + select_node_ptr[i].cpus = config_ptr->cpus; + select_node_ptr[i].sockets = config_ptr->sockets; + select_node_ptr[i].cores = config_ptr->cores; + select_node_ptr[i].threads = config_ptr->threads; + select_node_ptr[i].real_memory = config_ptr->real_memory; + } else { + select_node_ptr[i].cpus = node_ptr[i].cpus; + select_node_ptr[i].sockets = node_ptr[i].sockets; + select_node_ptr[i].cores = node_ptr[i].cores; + select_node_ptr[i].threads = node_ptr[i].threads; + select_node_ptr[i].real_memory = node_ptr[i].real_memory; } + select_node_ptr[i].node_state = NODE_CR_AVAILABLE; + /* xmalloc initialized everything to zero, + * including alloc_memory and parts */ + _create_node_part_array(&(select_node_ptr[i])); } - _cr_restore_node_data(); /* if restore data present */ - - select_fast_schedule = slurm_get_fast_schedule(); - - _build_cr_node_hash_table(); - return SLURM_SUCCESS; } @@ -1474,36 +1601,38 @@ extern int select_p_block_init(List part_list) return SLURM_SUCCESS; } -/* - * select_p_job_test - Given a specification of scheduling requirements, - * identify the nodes which "best" satisfy the request. - * "best" is defined as either single set of consecutive nodes satisfying - * the request and leaving the minimum number of unused nodes OR - * the fewest number of consecutive node sets - * IN job_ptr - pointer to job being scheduled - * IN/OUT bitmap - usable nodes are set on input, nodes not required to - * satisfy the request are cleared, other left set - * IN min_nodes - minimum count of nodes - * IN req_nodes - requested (or desired) count of nodes - * IN max_nodes - maximum count of nodes (0==don't care) - * IN test_only - if true, only test if ever could run, not necessarily now, - * not used in this implementation - * RET zero on success, EINVAL otherwise - * globals (passed via select_p_node_init): - * node_record_count - count of nodes configured - * node_record_table_ptr - pointer to global node table - * NOTE: the job information that is considered for scheduling includes: - * req_node_bitmap: bitmap of specific nodes required by the job - * contiguous: allocated nodes must be sequentially located - * num_procs: minimum number of processors required by the job - * NOTE: bitmap must be a superset of req_nodes at the time that - * select_p_job_test is called - */ -extern int select_p_job_test(struct job_record *job_ptr, bitstr_t * bitmap, - uint32_t min_nodes, uint32_t max_nodes, - uint32_t req_nodes, bool test_only) +/* return the number of tasks that the given + * job can run on the indexed node */ +static int _get_task_cnt(struct job_record *job_ptr, const int node_index, + int *task_cnt, int *freq, int size) +{ + int i, pos, tasks; + uint16_t * layout_ptr = NULL; + + layout_ptr = job_ptr->details->req_node_layout; + + pos = 0; + for (i = 0; i < size; i++) { + if (pos+freq[i] > node_index) + break; + pos += freq[i]; + } + tasks = task_cnt[i]; + if (layout_ptr && bit_test(job_ptr->details->req_node_bitmap, i)) { + pos = bit_get_pos_num(job_ptr->details->req_node_bitmap, i); + tasks = MIN(tasks, layout_ptr[pos]); + } else if (layout_ptr) { + tasks = 0; /* should not happen? */ + } + return tasks; +} + +static int _eval_nodes(struct job_record *job_ptr, bitstr_t * bitmap, + uint32_t min_nodes, uint32_t max_nodes, + uint32_t req_nodes, int *task_cnt, int *freq, + int array_size) { - int i, index, error_code = SLURM_ERROR, sufficient; + int i, f, index, error_code = SLURM_ERROR; int *consec_nodes; /* how many nodes we can add from this * consecutive set of nodes */ int *consec_cpus; /* how many nodes we can add from this @@ -1512,24 +1641,23 @@ extern int select_p_job_test(struct job_record *job_ptr, bitstr_t * bitmap, int *consec_end; /* where this consecutive set ends (index) */ int *consec_req; /* are nodes from this set required * (in req_bitmap) */ - int consec_index, consec_size; + int consec_index, consec_size, sufficient; int rem_cpus, rem_nodes; /* remaining resources desired */ int best_fit_nodes, best_fit_cpus, best_fit_req; - int best_fit_location = 0, best_fit_sufficient; - int avail_cpus; - uint16_t plane_size = 0; - //int asockets, acores, athreads, acpus; - bool all_avail = false; + int best_fit_sufficient, best_fit_index = 0; + int avail_cpus, ll; /* ll = layout array index */ struct multi_core_data *mc_ptr = NULL; - int ll; /* layout array index */ uint16_t * layout_ptr = NULL; - - if (job_ptr->details) - layout_ptr = job_ptr->details->req_node_layout; + bool required_node; xassert(bitmap); + + if (bit_set_count(bitmap) < min_nodes) + return error_code; + + layout_ptr = job_ptr->details->req_node_layout; + mc_ptr = job_ptr->details->mc_ptr; - consec_index = 0; consec_size = 50; /* start allocation for 50 sets of * consecutive nodes */ consec_cpus = xmalloc(sizeof(int) * consec_size); @@ -1539,53 +1667,50 @@ extern int select_p_job_test(struct job_record *job_ptr, bitstr_t * bitmap, consec_req = xmalloc(sizeof(int) * consec_size); /* Build table with information about sets of consecutive nodes */ + consec_index = 0; consec_cpus[consec_index] = consec_nodes[consec_index] = 0; consec_req[consec_index] = -1; /* no required nodes here by default */ - if (job_ptr->details) - mc_ptr = job_ptr->details->mc_ptr; - - /* This is the case if -O/--overcommit is true */ - debug3("job_ptr->num_procs %u", job_ptr->num_procs); - if (mc_ptr && (job_ptr->num_procs == job_ptr->details->min_nodes)) { - job_ptr->num_procs *= MAX(1,mc_ptr->min_threads); - job_ptr->num_procs *= MAX(1,mc_ptr->min_cores); - job_ptr->num_procs *= MAX(1,mc_ptr->min_sockets); - } - rem_cpus = job_ptr->num_procs; if (req_nodes > min_nodes) rem_nodes = req_nodes; else rem_nodes = min_nodes; - for (index = 0, ll = -1; index < select_node_cnt; index++) { - if (layout_ptr && bit_test(job_ptr->details->req_node_bitmap, index)) + + i = 0; + f = 0; + for (index = 0, ll = -1; index < select_node_cnt; index++, f++) { + if (f >= freq[i]) { + f = 0; + i++; + } + if (job_ptr->details->req_node_bitmap) { + required_node = + bit_test(job_ptr->details->req_node_bitmap, + index); + } else + required_node = false; + if (layout_ptr && required_node) ll++; if (bit_test(bitmap, index)) { if (consec_nodes[consec_index] == 0) consec_start[consec_index] = index; - if (!test_only) - all_avail = false; - else - all_avail = true; - avail_cpus = _get_avail_lps(job_ptr, index, all_avail); - if (layout_ptr - && bit_test(job_ptr->details->req_node_bitmap, index)) { + avail_cpus = task_cnt[i]; + if (layout_ptr && required_node){ avail_cpus = MIN(avail_cpus, layout_ptr[ll]); } else if (layout_ptr) { avail_cpus = 0; /* should not happen? */ } - if (job_ptr->details->req_node_bitmap - && bit_test(job_ptr->details->req_node_bitmap, index) - && (max_nodes > 0)) { + if ((max_nodes > 0) && required_node) { if (consec_req[consec_index] == -1) { /* first required node in set */ consec_req[consec_index] = index; } rem_cpus -= avail_cpus; rem_nodes--; + /* leaving bitmap set, decrement max limit */ max_nodes--; - } else { /* node not required (yet) */ + } else { /* node not selected (yet) */ bit_clear(bitmap, index); consec_cpus[consec_index] += avail_cpus; consec_nodes[consec_index]++; @@ -1598,16 +1723,11 @@ extern int select_p_job_test(struct job_record *job_ptr, bitstr_t * bitmap, consec_end[consec_index] = index - 1; if (++consec_index >= consec_size) { consec_size *= 2; - xrealloc(consec_cpus, - sizeof(int) * consec_size); - xrealloc(consec_nodes, - sizeof(int) * consec_size); - xrealloc(consec_start, - sizeof(int) * consec_size); - xrealloc(consec_end, - sizeof(int) * consec_size); - xrealloc(consec_req, - sizeof(int) * consec_size); + xrealloc(consec_cpus, sizeof(int)*consec_size); + xrealloc(consec_nodes, sizeof(int)*consec_size); + xrealloc(consec_start, sizeof(int)*consec_size); + xrealloc(consec_end, sizeof(int)*consec_size); + xrealloc(consec_req, sizeof(int)*consec_size); } consec_cpus[consec_index] = 0; consec_nodes[consec_index] = 0; @@ -1617,6 +1737,12 @@ extern int select_p_job_test(struct job_record *job_ptr, bitstr_t * bitmap, if (consec_nodes[consec_index] != 0) consec_end[consec_index++] = index - 1; + for (i = 0; i < consec_index; i++) { + debug3("cons_res: eval_nodes: %d consec c=%d n=%d b=%d e=%d r=%d", + i, consec_cpus[i], consec_nodes[i], consec_start[i], + consec_end[i], consec_req[i]); + } + /* accumulate nodes from these sets of consecutive nodes until */ /* sufficient resources have been accumulated */ while (consec_index && (max_nodes > 0)) { @@ -1635,15 +1761,13 @@ extern int select_p_job_test(struct job_record *job_ptr, bitstr_t * bitmap, /* tightest fit (less resource waste) OR */ /* nothing yet large enough, but this is biggest */ if ((best_fit_nodes == 0) || - ((best_fit_req == -1) && (consec_req[i] != -1)) - || (sufficient && (best_fit_sufficient == 0)) - || (sufficient - && (consec_cpus[i] < best_fit_cpus)) - || ((sufficient == 0) - && (consec_cpus[i] > best_fit_cpus))) { + ((best_fit_req == -1) && (consec_req[i] != -1)) || + (sufficient && (best_fit_sufficient == 0)) || + (sufficient && (consec_cpus[i] < best_fit_cpus)) || + (!sufficient && (consec_cpus[i] > best_fit_cpus))) { best_fit_cpus = consec_cpus[i]; best_fit_nodes = consec_nodes[i]; - best_fit_location = i; + best_fit_index = i; best_fit_req = consec_req[i]; best_fit_sufficient = sufficient; } @@ -1660,7 +1784,7 @@ extern int select_p_job_test(struct job_record *job_ptr, bitstr_t * bitmap, * select nodes from this set, first working up * then down from the required nodes */ for (i = best_fit_req; - i <= consec_end[best_fit_location]; i++) { + i <= consec_end[best_fit_index]; i++) { if ((max_nodes <= 0) || ((rem_nodes <= 0) && (rem_cpus <= 0))) break; @@ -1669,41 +1793,21 @@ extern int select_p_job_test(struct job_record *job_ptr, bitstr_t * bitmap, bit_set(bitmap, i); rem_nodes--; max_nodes--; - if (!test_only) - all_avail = false; - else - all_avail = true; - avail_cpus = _get_avail_lps(job_ptr, i, all_avail); - if (layout_ptr - && bit_test(job_ptr->details->req_node_bitmap, i)) { - ll = bit_get_pos_num(job_ptr->details-> - req_node_bitmap, i); - avail_cpus = MIN(avail_cpus, layout_ptr[ll]); - } else if (layout_ptr) { - avail_cpus = 0; /* should not happen? */ - } + avail_cpus = _get_task_cnt(job_ptr, i, + task_cnt, freq, + array_size); rem_cpus -= avail_cpus; } for (i = (best_fit_req - 1); - i >= consec_start[best_fit_location]; i--) { + i >= consec_start[best_fit_index]; i--) { if ((max_nodes <= 0) || ((rem_nodes <= 0) && (rem_cpus <= 0))) break; if (bit_test(bitmap, i)) continue; - if (!test_only) - all_avail = false; - else - all_avail = true; - avail_cpus = _get_avail_lps(job_ptr, i, all_avail); - if (layout_ptr - && bit_test(job_ptr->details->req_node_bitmap, i)) { - ll = bit_get_pos_num(job_ptr->details-> - req_node_bitmap, i); - avail_cpus = MIN(avail_cpus, layout_ptr[ll]); - } else if (layout_ptr) { - avail_cpus = 0; /* should not happen? */ - } + avail_cpus = _get_task_cnt(job_ptr, i, + task_cnt, freq, + array_size); if(avail_cpus <= 0) continue; rem_cpus -= avail_cpus; @@ -1712,26 +1816,16 @@ extern int select_p_job_test(struct job_record *job_ptr, bitstr_t * bitmap, max_nodes--; } } else { - for (i = consec_start[best_fit_location]; - i <= consec_end[best_fit_location]; i++) { + for (i = consec_start[best_fit_index]; + i <= consec_end[best_fit_index]; i++) { if ((max_nodes <= 0) || ((rem_nodes <= 0) && (rem_cpus <= 0))) break; if (bit_test(bitmap, i)) continue; - if (!test_only) - all_avail = false; - else - all_avail = true; - avail_cpus = _get_avail_lps(job_ptr, i, all_avail); - if (layout_ptr - && bit_test(job_ptr->details->req_node_bitmap, i)) { - ll = bit_get_pos_num(job_ptr->details-> - req_node_bitmap, i); - avail_cpus = MIN(avail_cpus, layout_ptr[ll]); - } else if (layout_ptr) { - avail_cpus = 0; /* should not happen? */ - } + avail_cpus = _get_task_cnt(job_ptr, i, + task_cnt, freq, + array_size); if(avail_cpus <= 0) continue; rem_cpus -= avail_cpus; @@ -1746,162 +1840,14 @@ extern int select_p_job_test(struct job_record *job_ptr, bitstr_t * bitmap, error_code = SLURM_SUCCESS; break; } - consec_cpus[best_fit_location] = 0; - consec_nodes[best_fit_location] = 0; + consec_cpus[best_fit_index] = 0; + consec_nodes[best_fit_index] = 0; } if (error_code && (rem_cpus <= 0) && _enough_nodes(0, rem_nodes, min_nodes, req_nodes)) error_code = SLURM_SUCCESS; - if (error_code != SLURM_SUCCESS) - goto cleanup; - - if (!test_only) { - int jobid, job_nodecnt, j, k; - bitoff_t size; - static struct select_cr_job *job; - job = xmalloc(sizeof(struct select_cr_job)); - jobid = job_ptr->job_id; - job->job_id = jobid; - job_nodecnt = bit_set_count(bitmap); - job->nhosts = job_nodecnt; - job->nprocs = MAX(job_ptr->num_procs, job_nodecnt); - job->cpus_per_task = job_ptr->details->cpus_per_task; - job->ntasks_per_node = job_ptr->details->ntasks_per_node; - if (mc_ptr) { - plane_size = mc_ptr->plane_size; - job->max_sockets = mc_ptr->max_sockets; - job->max_cores = mc_ptr->max_cores; - job->max_threads = mc_ptr->max_threads; - job->min_sockets = mc_ptr->min_sockets; - job->min_cores = mc_ptr->min_cores; - job->min_threads = mc_ptr->min_threads; - job->ntasks_per_socket = mc_ptr->ntasks_per_socket; - job->ntasks_per_core = mc_ptr->ntasks_per_core; - } else { - job->max_sockets = 0xffff; - job->max_cores = 0xffff; - job->max_threads = 0xffff; - job->min_sockets = 1; - job->min_cores = 1; - job->min_threads = 1; - job->ntasks_per_socket = 0; - job->ntasks_per_core = 0; - } - - size = bit_size(bitmap); - job->node_bitmap = (bitstr_t *) bit_alloc(size); - if (job->node_bitmap == NULL) - fatal("bit_alloc malloc failure"); - for (i = 0; i < size; i++) { - if (!bit_test(bitmap, i)) - continue; - bit_set(job->node_bitmap, i); - } - - job->host = (char **) xmalloc(job->nhosts * sizeof(char *)); - job->cpus = (uint16_t *) xmalloc(job->nhosts * sizeof(uint16_t)); - job->alloc_lps = (uint16_t *) xmalloc(job->nhosts * sizeof(uint16_t)); - job->alloc_sockets = (uint16_t *) xmalloc(job->nhosts * sizeof(uint16_t)); - job->alloc_memory = (uint32_t *) xmalloc(job->nhosts * sizeof(uint32_t)); - if ((cr_type == CR_CORE) || (cr_type == CR_CORE_MEMORY)) { - job->num_sockets = (uint16_t *) xmalloc(job->nhosts * sizeof(uint16_t)); - job->alloc_cores = (uint16_t **) xmalloc(job->nhosts * sizeof(uint16_t *)); - for (i = 0; i < job->nhosts; i++) { - job->num_sockets[i] = - node_record_table_ptr[i].sockets; - job->alloc_cores[i] = (uint16_t *) xmalloc( - job->num_sockets[i] * sizeof(uint16_t)); - } - } - - j = 0; - for (i = 0, ll = -1; i < node_record_count; i++) { - if (layout_ptr - && bit_test(job_ptr->details->req_node_bitmap, i)) { - ll ++; - } - if (bit_test(bitmap, i) == 0) - continue; - if (j >= job->nhosts) { - error("select_cons_res: job nhosts too small\n"); - break; - } - job->host[j] = xstrdup(node_record_table_ptr[i].name); - job->cpus[j] = node_record_table_ptr[i].cpus; - if (layout_ptr - && bit_test(job_ptr->details->req_node_bitmap, i)) { - job->cpus[j] = MIN(job->cpus[j], layout_ptr[ll]); - } else if (layout_ptr) { - job->cpus[j] = 0; - } - job->alloc_lps[j] = 0; - job->alloc_sockets[j] = 0; - job->alloc_memory[j] = job_ptr->details->job_max_memory; - if ((cr_type == CR_CORE) || (cr_type == CR_CORE_MEMORY)) { - chk_resize_job(job, j, node_record_table_ptr[i].sockets); - job->num_sockets[j] = node_record_table_ptr[i].sockets; - for (k = 0; k < job->num_sockets[j]; k++) - job->alloc_cores[j][k] = 0; - } - j++; - } - - if (job_ptr->details->shared == 0) { - /* Nodes need to be allocated in dedicated - mode. User has specified the --exclusive - switch */ - error_code = cr_exclusive_dist(job, cr_type); - } else { - /* Determine the number of logical processors - per node needed for this job */ - /* Make sure below matches the layouts in - * lllp_distribution in - * plugins/task/affinity/dist_task.c */ - switch(job_ptr->details->task_dist) { - case SLURM_DIST_BLOCK_BLOCK: - case SLURM_DIST_CYCLIC_BLOCK: - error_code = cr_dist(job, 0, - cr_type, - select_fast_schedule); - break; - case SLURM_DIST_BLOCK: - case SLURM_DIST_CYCLIC: - case SLURM_DIST_BLOCK_CYCLIC: - case SLURM_DIST_CYCLIC_CYCLIC: - case SLURM_DIST_UNKNOWN: - error_code = cr_dist(job, 1, - cr_type, - select_fast_schedule); - break; - case SLURM_DIST_PLANE: - error_code = cr_plane_dist(job, - plane_size, - cr_type); - break; - case SLURM_DIST_ARBITRARY: - default: - error_code = compute_c_b_task_dist(job, - cr_type, - select_fast_schedule); - if (error_code != SLURM_SUCCESS) { - error(" Error in compute_c_b_task_dist"); - return error_code; - } - break; - } - } - if (error_code != SLURM_SUCCESS) { - _xfree_select_cr_job(job); - goto cleanup; - } - - _append_to_job_list(job); - last_cr_update_time = time(NULL); - } - - cleanup: xfree(consec_cpus); xfree(consec_nodes); xfree(consec_start); @@ -1910,256 +1856,912 @@ extern int select_p_job_test(struct job_record *job_ptr, bitstr_t * bitmap, return error_code; } -extern int select_p_job_begin(struct job_record *job_ptr) +/* this is an intermediary step between select_p_job_test and _eval_nodes + * to tackle the knapsack problem. This code incrementally removes nodes + * with low task counts for the job and re-evaluates each result */ +static int _select_nodes(struct job_record *job_ptr, bitstr_t * bitmap, + uint32_t min_nodes, uint32_t max_nodes, + uint32_t req_nodes, int *task_cnt, int *freq, + int array_size) { - return SLURM_SUCCESS; -} + int i, b, count, ec, most_tasks = 0; + bitstr_t *origmap, *reqmap = NULL; -extern int select_p_job_ready(struct job_record *job_ptr) -{ - return SLURM_SUCCESS; -} + /* allocated node count should never exceed num_procs, right? + * if so, then this should be done earlier and max_nodes + * could be used to make this process more efficient (truncate + * # of available nodes when (# of idle nodes == max_nodes)*/ + if (max_nodes > job_ptr->num_procs) + max_nodes = job_ptr->num_procs; -extern int select_p_job_fini(struct job_record *job_ptr) -{ - int rc = SLURM_SUCCESS; - rc = _clear_select_jobinfo(job_ptr); - last_cr_update_time = time(NULL); - if (rc != SLURM_SUCCESS) { - error(" error for %u in select/cons_res: " - "_clear_select_jobinfo", - job_ptr->job_id); + origmap = bit_copy(bitmap); + if (origmap == NULL) + fatal("bit_copy malloc failure"); + + ec = _eval_nodes(job_ptr, bitmap, min_nodes, max_nodes, + req_nodes, task_cnt, freq, array_size); + + if (ec == SLURM_SUCCESS) { + bit_free(origmap); + return ec; } - return rc; -} + /* This nodeset didn't work. To avoid a possible knapsack problem, + * incrementally remove nodes with low task counts and retry */ -extern int select_p_job_suspend(struct job_record *job_ptr) -{ - ListIterator job_iterator; - struct select_cr_job *job; - int i, j, rc = ESLURM_INVALID_JOB_ID; - - xassert(job_ptr); - xassert(select_cr_job_list); + for (i = 0; i < array_size; i++) { + if (task_cnt[i] > most_tasks) + most_tasks = task_cnt[i]; + } - job_iterator = list_iterator_create(select_cr_job_list); - if (job_iterator == NULL) - fatal("list_iterator_create: %m"); - while ((job = (struct select_cr_job *) list_next(job_iterator))) { - if (job->job_id != job_ptr->job_id) + if (job_ptr->details->req_node_bitmap) + reqmap = job_ptr->details->req_node_bitmap; + + for (count = 0; count < most_tasks; count++) { + int nochange = 1; + bit_or(bitmap, origmap); + for (i = 0, b = 0; i < array_size; i++) { + if (task_cnt[i] != -1 && task_cnt[i] <= count) { + int j = 0, x = b; + for (; j < freq[i]; j++, x++) { + if (!bit_test(bitmap, x)) + continue; + if (reqmap && bit_test(reqmap, x)) { + bit_free(origmap); + return SLURM_ERROR; + } + nochange = 0; + bit_clear(bitmap, x); + bit_clear(origmap, x); + } + } + b += freq[i]; + } + if (nochange) continue; - if (job->state & CR_JOB_STATE_SUSPENDED) { - error("cons_res: job %u already suspended", - job->job_id); - break; + ec = _eval_nodes(job_ptr, bitmap, min_nodes, max_nodes, + req_nodes, task_cnt, freq, array_size); + if (ec == SLURM_SUCCESS) { + bit_free(origmap); + return ec; } + } + bit_free(origmap); + return ec; +} - rc = SLURM_SUCCESS; - last_cr_update_time = time(NULL); - job->state |= CR_JOB_STATE_SUSPENDED; - for (i = 0; i < job->nhosts; i++) { - struct node_cr_record *this_node; - this_node = find_cr_node_record(job->host[i]); - if (this_node == NULL) { - error("cons_res: could not find node %s", - job->host[i]); - rc = SLURM_ERROR; - break; - } - - /* Updating this node allocated resources */ - switch(cr_type) { - case CR_SOCKET: - case CR_SOCKET_MEMORY: - if (this_node->alloc_lps >= job->alloc_lps[i]) - this_node->alloc_lps -= job->alloc_lps[i]; - else { - error("cons_res: alloc_lps underflow on %s", - this_node->node_ptr->name); - rc = SLURM_ERROR; - } - if (this_node->alloc_sockets >= - job->alloc_sockets[i]) { - this_node->alloc_sockets -= - job->alloc_sockets[i]; - } else { - error("cons_res: alloc_sockets underflow on %s", - this_node->node_ptr->name); - rc = SLURM_ERROR; - } - if (this_node->alloc_memory >= job->alloc_memory[i]) - this_node->alloc_memory -= job->alloc_memory[i]; - else { - error("cons_res: alloc_memory underflow on %s", - this_node->node_ptr->name); - rc = SLURM_ERROR; - } - if (rc == SLURM_ERROR) { - this_node->alloc_lps = 0; - this_node->alloc_sockets = 0; - this_node->alloc_memory = 0; - } +/* test to see if any shared partitions are running jobs */ +static int _is_node_sharing(struct node_cr_record *this_node) +{ + int i, size; + struct part_cr_record *p_ptr = this_node->parts; + for (; p_ptr; p_ptr = p_ptr->next) { + if (p_ptr->num_rows < 2) + continue; + size = p_ptr->num_rows * this_node->sockets; + for (i = 0; i < size; i++) { + if (p_ptr->alloc_cores[i]) + return 1; + } + } + return 0; + +} + +/* test to see if the given node has any jobs running on it */ +static int _is_node_busy(struct node_cr_record *this_node) +{ + int i, size; + struct part_cr_record *p_ptr = this_node->parts; + for (; p_ptr; p_ptr = p_ptr->next) { + size = p_ptr->num_rows * this_node->sockets; + for (i = 0; i < size; i++) { + if (p_ptr->alloc_cores[i]) + return 1; + } + } + return 0; +} + +/* + * Determine which of these nodes are usable by this job + * + * Remove nodes from the bitmap that don't have enough memory to + * support the job. Return SLURM_ERROR if a required node doesn't + * have enough memory. + * + * if node_state = NODE_CR_RESERVED, clear bitmap (if node is required + * then should we return NODE_BUSY!?!) + * + * if node_state = NODE_CR_ONE_ROW, then this node can only be used by + * another NODE_CR_ONE_ROW job + * + * if node_state = NODE_CR_AVAILABLE AND: + * - job_node_req = NODE_CR_RESERVED, then we need idle nodes + * - job_node_req = NODE_CR_ONE_ROW, then we need idle or non-sharing nodes + */ +static int _verify_node_state(struct node_cr_record *select_node_ptr, + struct job_record *job_ptr, bitstr_t * bitmap, + enum node_cr_state job_node_req) +{ + int i; + uint32_t free_mem; + + for (i = 0; i < select_node_cnt; i++) { + if (!bit_test(bitmap, i)) + continue; + + if ((job_ptr->details->job_min_memory) && + ((cr_type == CR_CORE_MEMORY) || (cr_type == CR_CPU_MEMORY) || + (cr_type == CR_MEMORY) || (cr_type == CR_SOCKET_MEMORY))) { + free_mem = select_node_ptr[i].real_memory; + free_mem -= select_node_ptr[i].alloc_memory; + if (free_mem < job_ptr->details->job_min_memory) + goto clear_bit; + } + + if (select_node_ptr[i].node_state == NODE_CR_RESERVED) { + goto clear_bit; + } else if (select_node_ptr[i].node_state == NODE_CR_ONE_ROW) { + if ((job_node_req == NODE_CR_RESERVED) || + (job_node_req == NODE_CR_AVAILABLE)) + goto clear_bit; + /* cannot use this node if it is running jobs + * in sharing partitions */ + if ( _is_node_sharing(&(select_node_ptr[i])) ) + goto clear_bit; + } else { /* node_state = NODE_CR_AVAILABLE */ + if (job_node_req == NODE_CR_RESERVED) { + if ( _is_node_busy(&(select_node_ptr[i])) ) + goto clear_bit; + } else if (job_node_req == NODE_CR_ONE_ROW) { + if ( _is_node_sharing(&(select_node_ptr[i])) ) + goto clear_bit; + } + } + continue; /* node is usable, test next node */ + + /* This node is not usable by this job */ + clear_bit: bit_clear(bitmap, i); + if (job_ptr->details->req_node_bitmap && + bit_test(job_ptr->details->req_node_bitmap, i)) + return SLURM_ERROR; + + } + + return SLURM_SUCCESS; +} + +/* Determine the node requirements for the job: + * - does the job need exclusive nodes? (NODE_CR_RESERVED) + * - can the job run on shared nodes? (NODE_CR_ONE_ROW) + * - can the job run on overcommitted resources? (NODE_CR_AVAILABLE) + */ +static enum node_cr_state _get_job_node_req(struct job_record *job_ptr) +{ + int max_share = job_ptr->part_ptr->max_share; + + if (max_share == 0) + return NODE_CR_RESERVED; + + if (max_share & SHARED_FORCE) + return NODE_CR_AVAILABLE; + + /* Shared=NO or Shared=YES */ + if (job_ptr->details->shared == 0) + /* user has requested exclusive nodes */ + return NODE_CR_RESERVED; + if ((max_share > 1) && (job_ptr->details->shared == 1)) + /* part allows sharing, and + * the user has requested it */ + return NODE_CR_AVAILABLE; + return NODE_CR_ONE_ROW; +} + +/* for a given node and partition return the count of rows (time slices) + * that have resources allocated */ +static int _get_allocated_rows(struct node_cr_record *select_node_ptr, + struct job_record *job_ptr, int n, + enum node_cr_state job_node_req) +{ + struct part_cr_record *p_ptr; + int i, j, rows = 0; + + p_ptr = get_cr_part_ptr(&(select_node_ptr[n]), job_ptr->part_ptr); + if (p_ptr == NULL) + return rows; + + for (i = 0; i < p_ptr->num_rows; i++) { + int offset = i * select_node_ptr[n].sockets; + for (j = 0; j < select_node_ptr[n].sockets; j++){ + if (p_ptr->alloc_cores[offset+j]) { + rows++; break; - case CR_CORE: - case CR_CORE_MEMORY: - if (this_node->alloc_lps >= job->alloc_lps[i]) - this_node->alloc_lps -= job->alloc_lps[i]; - else { - error("cons_res: alloc_lps underflow on %s", - this_node->node_ptr->name); - rc = SLURM_ERROR; - } - chk_resize_node(this_node, - this_node->node_ptr->sockets); - chk_resize_job(job, i, this_node->num_sockets); - for (j =0; j < this_node->num_sockets; j++) { - if (this_node->alloc_cores[j] >= - job->alloc_cores[i][j]) { - this_node->alloc_cores[j] -= - job->alloc_cores[i][j]; - } else { - error("cons_res: alloc_cores underflow on %s", - this_node->node_ptr->name); - rc = SLURM_ERROR; + } + } + } + return rows; +} + +static int _load_arrays(struct node_cr_record *select_node_ptr, + struct job_record *job_ptr, bitstr_t *bitmap, + int **a_rows, int **s_tasks, int **a_tasks, + int **freq, bool test_only, + enum node_cr_state job_node_req) +{ + int i, index = 0, size = 32; + int *busy_rows, *shr_tasks, *all_tasks, *num_nodes; + + busy_rows = xmalloc (sizeof(int)*size); /* allocated rows */ + shr_tasks = xmalloc (sizeof(int)*size); /* max free cpus */ + all_tasks = xmalloc (sizeof(int)*size); /* all cpus */ + num_nodes = xmalloc (sizeof(int)*size); /* number of nodes */ + /* above arrays are all zero filled by xmalloc() */ + + for (i = 0; i < select_node_cnt; i++) { + if (bit_test(bitmap, i)) { + int rows; + uint16_t atasks, ptasks; + rows = _get_allocated_rows(select_node_ptr, job_ptr, + i, job_node_req); + /* false = use free rows (if available) */ + atasks = _get_task_count(select_node_ptr, job_ptr, i, + test_only, false, + job_node_req); + if (test_only) { + ptasks = atasks; + } else { + /* true = try using an already allocated row */ + ptasks = _get_task_count(select_node_ptr, + job_ptr, i, test_only, + true, job_node_req); + } + if (rows != busy_rows[index] || + ptasks != shr_tasks[index] || + atasks != all_tasks[index]) { + if (num_nodes[index]) { + index++; + if (index >= size) { + size *= 2; + xrealloc(busy_rows, + sizeof(int)*size); + xrealloc(shr_tasks, + sizeof(int)*size); + xrealloc(all_tasks, + sizeof(int)*size); + xrealloc(num_nodes, + sizeof(int)*size); } + num_nodes[index] = 0; } - if (this_node->alloc_memory >= job->alloc_memory[i]) - this_node->alloc_memory -= job->alloc_memory[i]; - else { - error("cons_res: alloc_memory underflow on %s", - this_node->node_ptr->name); - rc = SLURM_ERROR; - } - if (rc == SLURM_ERROR) { - this_node->alloc_lps = 0; - for (j =0; j < this_node->num_sockets; j++) { - this_node->alloc_cores[j] = 0; + busy_rows[index] = rows; + shr_tasks[index] = ptasks; + all_tasks[index] = atasks; + } + } else { + if (busy_rows[index] != -1) { + if (num_nodes[index] > 0) { + index++; + if (index >= size) { + size *= 2; + xrealloc(busy_rows, + sizeof(int)*size); + xrealloc(shr_tasks, + sizeof(int)*size); + xrealloc(all_tasks, + sizeof(int)*size); + xrealloc(num_nodes, + sizeof(int)*size); } - this_node->alloc_memory = 0; + num_nodes[index] = 0; } - break; - case CR_MEMORY: - if (this_node->alloc_memory >= job->alloc_memory[i]) - this_node->alloc_memory -= job->alloc_memory[i]; - else { - error("cons_res: alloc_memory underflow on %s", - this_node->node_ptr->name); - this_node->alloc_memory = 0; - rc = SLURM_ERROR; - } - break; - case CR_CPU: - case CR_CPU_MEMORY: - if (this_node->alloc_lps >= job->alloc_lps[i]) - this_node->alloc_lps -= job->alloc_lps[i]; - else { - error("cons_res: alloc_lps underflow on %s", - this_node->node_ptr->name); - this_node->alloc_lps = 0; - rc = SLURM_ERROR; - } - if (cr_type == CR_CPU) - break; - - if (this_node->alloc_memory >= job->alloc_memory[i]) - this_node->alloc_memory -= job->alloc_memory[i]; - else { - error("cons_res: alloc_memory underflow on %s", - this_node->node_ptr->name); - this_node->alloc_memory = 0; - rc = SLURM_ERROR; - } - break; - default: - break; + busy_rows[index] = -1; + shr_tasks[index] = -1; + all_tasks[index] = -1; } } - rc = SLURM_SUCCESS; - break; + num_nodes[index]++; } - list_iterator_destroy(job_iterator); + /* array_index becomes "array size" */ + index++; - return rc; +#if (CR_DEBUG) + for (i = 0; i < index; i++) { + info("cons_res: i %d row %d ptasks %d atasks %d freq %d", + i, busy_rows[i], shr_tasks[i], all_tasks[i], num_nodes[i]); + } +#endif + + *a_rows = busy_rows; + *s_tasks = shr_tasks; + *a_tasks = all_tasks; + *freq = num_nodes; + + return index; } -extern int select_p_job_resume(struct job_record *job_ptr) +/* + * select_p_job_test - Given a specification of scheduling requirements, + * identify the nodes which "best" satisfy the request. + * "best" is defined as either a minimal number of consecutive nodes + * or if sharing resources then sharing them with a job of similar size. + * IN/OUT job_ptr - pointer to job being considered for initiation, + * set's start_time when job expected to start + * IN/OUT bitmap - usable nodes are set on input, nodes not required to + * satisfy the request are cleared, other left set + * IN min_nodes - minimum count of nodes + * IN req_nodes - requested (or desired) count of nodes + * IN max_nodes - maximum count of nodes (0==don't care) + * IN mode - SELECT_MODE_RUN_NOW: try to schedule job now + * SELECT_MODE_TEST_ONLY: test if job can ever run + * SELECT_MODE_WILL_RUN: determine when and where job can run + * RET zero on success, EINVAL otherwise + * globals (passed via select_p_node_init): + * node_record_count - count of nodes configured + * node_record_table_ptr - pointer to global node table + * NOTE: the job information that is considered for scheduling includes: + * req_node_bitmap: bitmap of specific nodes required by the job + * contiguous: allocated nodes must be sequentially located + * num_procs: minimum number of processors required by the job + * NOTE: bitmap must be a superset of req_nodes at the time that + * select_p_job_test is called + */ +extern int select_p_job_test(struct job_record *job_ptr, bitstr_t * bitmap, + uint32_t min_nodes, uint32_t max_nodes, + uint32_t req_nodes, int mode) { - ListIterator job_iterator; - struct select_cr_job *job; - int i, j, rc = ESLURM_INVALID_JOB_ID; + enum node_cr_state job_node_req; - xassert(job_ptr); - xassert(select_cr_job_list); + xassert(bitmap); - job_iterator = list_iterator_create(select_cr_job_list); - if (job_iterator == NULL) - fatal("list_iterator_create: %m"); + if (!job_ptr->details) + return EINVAL; - while ((job = (struct select_cr_job *) list_next(job_iterator))) { - if (job->job_id != job_ptr->job_id) + if (!job_ptr->details->mc_ptr) + job_ptr->details->mc_ptr = create_default_mc(); + job_node_req = _get_job_node_req(job_ptr); + + debug3("cons_res: select_p_job_test: job %d node_req %d, mode %d", + job_ptr->job_id, job_node_req, mode); + debug3("cons_res: select_p_job_test: min_n %u max_n %u req_n %u", + min_nodes, max_nodes, req_nodes); + +#if (CR_DEBUG) + _dump_state(select_node_ptr); +#endif + if (mode == SELECT_MODE_WILL_RUN) { + return _will_run_test(job_ptr, bitmap, min_nodes, max_nodes, + req_nodes, job_node_req); + } + + return _job_test(job_ptr, bitmap, min_nodes, max_nodes, req_nodes, + mode, job_node_req, select_node_ptr); +} + +/* + * select_p_job_list_test - Given a list of select_will_run_t's in + * accending priority order we will see if we can start and + * finish all the jobs without increasing the start times of the + * jobs specified and fill in the est_start of requests with no + * est_start. If you are looking to see if one job will ever run + * then use select_p_job_test instead. + * IN/OUT req_list - list of select_will_run_t's in asscending + * priority order on success of placement fill in + * est_start of request with time. + * RET zero on success, EINVAL otherwise + */ +extern int select_p_job_list_test(List req_list) +{ + /* not currently supported */ + return EINVAL; +} + + +/* _will_run_test - determine when and where a pending job can start, removes + * jobs from node table at termination time and run _test_job() after + * each one. */ +static int _will_run_test(struct job_record *job_ptr, bitstr_t *bitmap, + uint32_t min_nodes, uint32_t max_nodes, + uint32_t req_nodes, enum node_cr_state job_node_req) +{ + struct node_cr_record *exp_node_cr; + struct job_record *tmp_job_ptr, **tmp_job_pptr; + struct select_cr_job *job; + List cr_job_list; + ListIterator job_iterator; + bitstr_t *orig_map; + int rc = SLURM_ERROR; + uint16_t saved_state; + + orig_map = bit_copy(bitmap); + + /* Try to run with currently available nodes */ + rc = _job_test(job_ptr, bitmap, min_nodes, max_nodes, req_nodes, + SELECT_MODE_WILL_RUN, job_node_req, select_node_ptr); + if (rc == SLURM_SUCCESS) { + bit_free(orig_map); + job_ptr->start_time = time(NULL); + return SLURM_SUCCESS; + } + + /* Job is still pending. Simulate termination of jobs one at a time + * to determine when and where the job can start. */ + exp_node_cr = _dup_node_cr(select_node_ptr); + if (exp_node_cr == NULL) { + bit_free(orig_map); + return SLURM_ERROR; + } + + /* Build list of running jobs */ + cr_job_list = list_create(_cr_job_list_del); + job_iterator = list_iterator_create(job_list); + while ((tmp_job_ptr = (struct job_record *) list_next(job_iterator))) { + if (tmp_job_ptr->job_state != JOB_RUNNING) continue; - if ((job->state & CR_JOB_STATE_SUSPENDED) == 0) { - error("select: job %s not suspended", - job->job_id); + if (tmp_job_ptr->end_time == 0) { + error("Job %u has zero end_time", tmp_job_ptr->job_id); + continue; + } + tmp_job_pptr = xmalloc(sizeof(struct job_record *)); + *tmp_job_pptr = tmp_job_ptr; + list_append(cr_job_list, tmp_job_pptr); + } + list_iterator_destroy(job_iterator); + list_sort(cr_job_list, _cr_job_list_sort); + + /* Remove the running jobs one at a time from exp_node_cr and try + * scheduling the pending job after each one */ + job_iterator = list_iterator_create(cr_job_list); + while ((tmp_job_pptr = (struct job_record **) list_next(job_iterator))) { + tmp_job_ptr = *tmp_job_pptr; + job = list_find_first(select_cr_job_list, _find_job_by_id, + &tmp_job_ptr->job_id); + if (!job) { + error("cons_res: could not find job %u", + tmp_job_ptr->job_id); + continue; + } + saved_state = job->state; + _rm_job_from_nodes(exp_node_cr, job, "_will_run_test", 1); + job->state = saved_state; + bit_or(bitmap, orig_map); + rc = _job_test(job_ptr, bitmap, min_nodes, max_nodes, + req_nodes, SELECT_MODE_WILL_RUN, job_node_req, + exp_node_cr); + if (rc == SLURM_SUCCESS) { + job_ptr->start_time = tmp_job_ptr->end_time; break; } + } + list_iterator_destroy(job_iterator); + list_destroy(cr_job_list); + _destroy_node_part_array(exp_node_cr); + bit_free(orig_map); + return rc; +} - rc = SLURM_SUCCESS; - last_cr_update_time = time(NULL); - job->state &= (~CR_JOB_STATE_SUSPENDED); +/* _job_test - does most of the real work for select_p_job_test(), which + * pretty much just handles load-leveling and max_share logic */ +static int _job_test(struct job_record *job_ptr, bitstr_t *bitmap, + uint32_t min_nodes, uint32_t max_nodes, + uint32_t req_nodes, int mode, + enum node_cr_state job_node_req, + struct node_cr_record *select_node_ptr) +{ + int a, f, i, j, k, error_code, ll; /* ll = layout array index */ + struct multi_core_data *mc_ptr = NULL; + static struct select_cr_job *job; + uint16_t * layout_ptr = NULL; + int array_size; + int *busy_rows, *sh_tasks, *al_tasks, *freq; + bitstr_t *origmap, *reqmap = NULL; + int row, rows, try; + bool test_only; + uint32_t save_mem = 0; + + layout_ptr = job_ptr->details->req_node_layout; + mc_ptr = job_ptr->details->mc_ptr; + reqmap = job_ptr->details->req_node_bitmap; + + /* check node_state and update bitmap as necessary */ + if (mode == SELECT_MODE_TEST_ONLY) { + test_only = true; + save_mem = job_ptr->details->job_min_memory; + job_ptr->details->job_min_memory = 0; + } else /* SELECT_MODE_RUN_NOW || SELECT_MODE_WILL_RUN */ + test_only = false; - for (i = 0; i < job->nhosts; i++) { - struct node_cr_record *this_node; - this_node = find_cr_node_record(job->host[i]); - if (this_node == NULL) { - error("cons_res: could not find node %s", - job->host[i]); - rc = SLURM_ERROR; - break; - } - - /* Updating this node allocated resources */ - switch(cr_type) { - case CR_SOCKET: - case CR_SOCKET_MEMORY: - this_node->alloc_lps += job->alloc_lps[i]; - this_node->alloc_sockets += - job->alloc_sockets[i]; - this_node->alloc_memory += job->alloc_memory[i]; - break; - case CR_CORE: - case CR_CORE_MEMORY: - this_node->alloc_lps += job->alloc_lps[i]; - chk_resize_node(this_node, - this_node->node_ptr->sockets); - chk_resize_job(job, i, this_node->num_sockets); - for (j =0; j < this_node->num_sockets; j++) { - this_node->alloc_cores[j] += - job->alloc_cores[i][j]; + if (!test_only) { + error_code = _verify_node_state(select_node_ptr, job_ptr, + bitmap, job_node_req); + if (error_code != SLURM_SUCCESS) + return error_code; + } + + /* This is the case if -O/--overcommit is true */ + if (job_ptr->num_procs == job_ptr->details->min_nodes) { + job_ptr->num_procs *= MAX(1, mc_ptr->min_threads); + job_ptr->num_procs *= MAX(1, mc_ptr->min_cores); + job_ptr->num_procs *= MAX(1, mc_ptr->min_sockets); + } + + /* compute condensed arrays of node allocation data */ + array_size = _load_arrays(select_node_ptr, job_ptr, bitmap, &busy_rows, + &sh_tasks, &al_tasks, &freq, test_only, + job_node_req); + + if (test_only) { + /* try with all nodes and all possible cpus */ + error_code = _select_nodes(job_ptr, bitmap, min_nodes, + max_nodes, req_nodes, al_tasks, freq, + array_size); + xfree(busy_rows); + xfree(sh_tasks); + xfree(al_tasks); + xfree(freq); + if (save_mem) + job_ptr->details->job_min_memory = save_mem; + return error_code; + } + + origmap = bit_copy(bitmap); + if (origmap == NULL) + fatal("bit_copy malloc failure"); + + error_code = SLURM_ERROR; + rows = job_ptr->part_ptr->max_share & ~SHARED_FORCE; + rows = MAX(1, rows); /* max_share == 0 for EXCLUSIVE */ + for (row = 1; row <= rows; row++) { + + /* + * first try : try "as is" + * second try: only add a row to nodes with no free cpus + * third try : add a row to nodes with some alloc cpus + */ + for (try = 0; try < 3; try++) { + bit_or(bitmap, origmap); + + debug3("cons_res: cur row = %d, try = %d", row, try); + + for (i = 0, f = 0; i < array_size; i++) { + + /* Step 1: + * remove nodes from bitmap (unless required) + * who's busy_rows value is bigger than 'row'. + * Why? to enforce "least-loaded" over + * "contiguous" */ + if ((busy_rows[i] > row) || + (busy_rows[i] == row && sh_tasks[i] == 0)) { + for (j = f; j < f+freq[i]; j++) { + if (reqmap && + bit_test(reqmap, j)) + continue; + bit_clear(bitmap, j); + } } - this_node->alloc_memory += job->alloc_memory[i]; - break; - case CR_MEMORY: - this_node->alloc_memory += job->alloc_memory[i]; - break; - case CR_CPU: - case CR_CPU_MEMORY: - this_node->alloc_lps += job->alloc_lps[i]; - if (cr_type == CR_CPU) - break; - this_node->alloc_memory += job->alloc_memory[i]; + f += freq[i]; + + if (try == 0) + continue; + /* Step 2: + * set sh_tasks = al_tasks for nodes who's + * busy_rows value is < 'row'. + * Why? to select a new row for these + * nodes when appropriate */ + if ((busy_rows[i] == -1) || + (busy_rows[i] >= row)) + continue; + if (sh_tasks[i] == al_tasks[i]) + continue; + if ((try == 1) && (sh_tasks[i] != 0)) + continue; + sh_tasks[i] = al_tasks[i]; + } + if (bit_set_count(bitmap) < min_nodes) break; - default: + +#if (CR_DEBUG) + for (i = 0; i < array_size; i++) { + info("cons_res: try:%d i:%d busy_rows:%d " + "sh_tasks:%d al_tasks:%d freq:%d", + try, i, busy_rows[i], sh_tasks[i], + al_tasks[i], freq[i]); + } +#endif + + if (row > 1) { + /* We need to share resources. + * Try to find suitable job to share nodes with. */ + + /* FIXME: To be added. There is some simple logic + * to do this in select/linear.c:_find_job_mate(), + * but the data structures here are very different */ + } + + error_code = _select_nodes(job_ptr, bitmap, min_nodes, + max_nodes, req_nodes, + sh_tasks, freq, array_size); + if (error_code == SLURM_SUCCESS) break; + } + if (error_code == SLURM_SUCCESS) + break; + } + + bit_free(origmap); + + if ((mode != SELECT_MODE_WILL_RUN) && (job_ptr->part_ptr == NULL)) + error_code = EINVAL; + if ((error_code == SLURM_SUCCESS) && (mode == SELECT_MODE_WILL_RUN)) { + if (job_ptr->details->shared == 0) { + uint16_t procs; + job_ptr->total_procs = 0; + for (i = 0; i < select_node_cnt; i++) { + if (!bit_test(bitmap, i)) + continue; + procs = select_node_ptr[i].cpus; + job_ptr->total_procs += procs; + } + } else { + job_ptr->total_procs = job_ptr->num_procs; + if (job_ptr->details->cpus_per_task && + (job_ptr->details->cpus_per_task != + (uint16_t) NO_VAL)) { + job_ptr->total_procs *= job_ptr->details-> + cpus_per_task; } } - rc = SLURM_SUCCESS; - break; } - list_iterator_destroy(job_iterator); + if ((error_code != SLURM_SUCCESS) || (mode != SELECT_MODE_RUN_NOW)) { + xfree(busy_rows); + xfree(sh_tasks); + xfree(al_tasks); + xfree(freq); + return error_code; + } + + /* allocate the job and distribute the tasks appropriately */ + job = xmalloc(sizeof(struct select_cr_job)); + job->job_ptr = job_ptr; + job->job_id = job_ptr->job_id; + job->nhosts = bit_set_count(bitmap); + job->nprocs = MAX(job_ptr->num_procs, job->nhosts); + job->node_req = job_node_req; + + job->node_bitmap = bit_copy(bitmap); + if (job->node_bitmap == NULL) + fatal("bit_copy malloc failure"); + + job->cpus = (uint16_t *) xmalloc(job->nhosts * sizeof(uint16_t)); + job->alloc_cpus = (uint16_t *) xmalloc(job->nhosts * sizeof(uint16_t)); + job->node_offset = (uint16_t *) xmalloc(job->nhosts * sizeof(uint16_t)); + job->alloc_memory = (uint32_t *) xmalloc(job->nhosts * sizeof(uint32_t)); + + if ((cr_type == CR_CORE) || (cr_type == CR_CORE_MEMORY) || + (cr_type == CR_SOCKET) || (cr_type == CR_SOCKET_MEMORY)) { + job->num_sockets = (uint16_t *) xmalloc(job->nhosts * + sizeof(uint16_t)); + job->alloc_cores = (uint16_t **) xmalloc(job->nhosts * + sizeof(uint16_t *)); + j = 0; + for (i = 0; i < select_node_cnt; i++) { + if (!bit_test(job->node_bitmap, i)) + continue; + job->num_sockets[j] = select_node_ptr[i].sockets; + job->alloc_cores[j] = (uint16_t *) xmalloc( + job->num_sockets[j] * sizeof(uint16_t)); + j++; + } + } - return rc; + j = 0; + a = 0; + f = 0; + row = 0; /* total up all available cpus for --overcommit scenarios */ + for (i = 0, ll = -1; i < node_record_count; i++, f++) { + if (f >= freq[a]) { + f = 0; + a++; + } + if (layout_ptr + && bit_test(job_ptr->details->req_node_bitmap, i)) { + ll++; + } + if (bit_test(bitmap, i) == 0) + continue; + if (j >= job->nhosts) { + error("select_cons_res: job nhosts too small\n"); + break; + } + job->cpus[j] = sh_tasks[a]; + row += sh_tasks[a]; + if (layout_ptr + && bit_test(job_ptr->details->req_node_bitmap, i)) { + job->cpus[j] = MIN(job->cpus[j], layout_ptr[ll]); + } else if (layout_ptr) { + job->cpus[j] = 0; + } + job->alloc_cpus[j] = 0; + if ((cr_type == CR_CORE_MEMORY) || (cr_type == CR_CPU_MEMORY) || + (cr_type == CR_MEMORY) || (cr_type == CR_SOCKET_MEMORY)) + job->alloc_memory[j] = job_ptr->details->job_min_memory; + if ((cr_type == CR_CORE) || (cr_type == CR_CORE_MEMORY)|| + (cr_type == CR_SOCKET) || (cr_type == CR_SOCKET_MEMORY)) { + _chk_resize_job(job, j, job->num_sockets[j]); + for (k = 0; k < job->num_sockets[j]; k++) + job->alloc_cores[j][k] = 0; + } + j++; + } + + xfree(busy_rows); + xfree(sh_tasks); + xfree(al_tasks); + xfree(freq); + + /* When 'srun --overcommit' is used, nprocs is set to a minimum value + * in order to allocate the appropriate number of nodes based on the + * job request. + * For cons_res, all available logical processors will be allocated on + * each allocated node in order to accommodate the overcommit request. + */ + if (job_ptr->details->overcommit) + job->nprocs = MIN(row, job_ptr->details->num_tasks); + + if (job_ptr->details->shared == 0) { + /* Nodes need to be allocated in dedicated + mode. User has specified the --exclusive switch */ + error_code = cr_exclusive_dist(job, cr_type); + } else { + /* Determine the number of logical processors + * per node needed for this job. + * Make sure below matches the layouts in + * lllp_distribution in + * plugins/task/affinity/dist_task.c */ + switch(job_ptr->details->task_dist) { + case SLURM_DIST_BLOCK_BLOCK: + case SLURM_DIST_CYCLIC_BLOCK: + error_code = cr_dist(job, 0, cr_type); + break; + case SLURM_DIST_BLOCK: + case SLURM_DIST_CYCLIC: + case SLURM_DIST_BLOCK_CYCLIC: + case SLURM_DIST_CYCLIC_CYCLIC: + case SLURM_DIST_UNKNOWN: + error_code = cr_dist(job, 1, cr_type); + break; + case SLURM_DIST_PLANE: + error_code = cr_plane_dist(job, mc_ptr->plane_size, cr_type); + break; + case SLURM_DIST_ARBITRARY: + default: + error_code = compute_c_b_task_dist(job); + if (error_code != SLURM_SUCCESS) { + error(" Error in compute_c_b_task_dist"); + } + break; + } + } + if (error_code != SLURM_SUCCESS) { + _xfree_select_cr_job(job); + return error_code; + } + + _append_to_job_list(job); + last_cr_update_time = time(NULL); + + return error_code; +} + +extern int select_p_job_begin(struct job_record *job_ptr) +{ + return SLURM_SUCCESS; +} + +extern int select_p_job_ready(struct job_record *job_ptr) +{ + return SLURM_SUCCESS; +} + +extern int select_p_job_fini(struct job_record *job_ptr) +{ + struct select_cr_job *job = NULL; + ListIterator iterator; + + xassert(job_ptr); + xassert(job_ptr->magic == JOB_MAGIC); + + if (list_count(select_cr_job_list) == 0) + return SLURM_SUCCESS; + + iterator = list_iterator_create(select_cr_job_list); + while ((job = (struct select_cr_job *) list_next(iterator))) { + if (job->job_id == job_ptr->job_id) + break; + } + if (!job) { + error("select_p_job_fini: could not find data for job %d", + job_ptr->job_id); + list_iterator_destroy(iterator); + return SLURM_ERROR; + } + + _rm_job_from_nodes(select_node_ptr, job, "select_p_job_fini", 1); + + slurm_mutex_lock(&cr_mutex); + list_remove(iterator); + slurm_mutex_unlock(&cr_mutex); + _xfree_select_cr_job(job); + list_iterator_destroy(iterator); + + debug3("cons_res: select_p_job_fini Job_id %u: list_count: %d", + job_ptr->job_id, list_count(select_cr_job_list)); + + _verify_select_job_list(job_ptr->job_id); + last_cr_update_time = time(NULL); + + return SLURM_SUCCESS; +} + +/* NOTE: This function is not called with sched/gang because it needs + * to track how many jobs are running or suspended on each node. + * This sum is compared with the partition's Shared parameter */ +extern int select_p_job_suspend(struct job_record *job_ptr) +{ + struct select_cr_job *job; + int rc; + + xassert(job_ptr); + xassert(select_cr_job_list); + + job = list_find_first(select_cr_job_list, _find_job_by_id, + &job_ptr->job_id); + if (!job) + return ESLURM_INVALID_JOB_ID; + + rc = _rm_job_from_nodes(select_node_ptr, job, + "select_p_job_suspend", 0); + return SLURM_SUCCESS; +} + +/* See NOTE with select_p_job_suspend above */ +extern int select_p_job_resume(struct job_record *job_ptr) +{ + struct select_cr_job *job; + int rc; + + xassert(job_ptr); + xassert(select_cr_job_list); + + job = list_find_first(select_cr_job_list, _find_job_by_id, + &job_ptr->job_id); + if (!job) + return ESLURM_INVALID_JOB_ID; + + rc = _add_job_to_nodes(job, "select_p_job_resume", 0); + return SLURM_SUCCESS; +} + +extern uint16_t select_p_get_job_cores(uint32_t job_id, int alloc_index, int s) +{ + struct select_cr_job *job = list_find_first(select_cr_job_list, + _find_job_by_id, &job_id); + if (!job || alloc_index >= job->nhosts) + return 0; + if (cr_type == CR_CORE || cr_type == CR_CORE_MEMORY || + cr_type == CR_SOCKET || cr_type == CR_SOCKET_MEMORY) { + if (job->num_sockets == NULL || job->alloc_cores == NULL) + return 0; + if (s >= job->num_sockets[alloc_index]) + return 0; + + return job->alloc_cores[alloc_index][s]; + } + /* else return the total cpu count for the given node */ + if (job->alloc_cpus == NULL) + return 0; + + return job->alloc_cpus[alloc_index]; } extern int select_p_pack_node_info(time_t last_query_time, @@ -2174,108 +2776,59 @@ extern int select_p_get_extra_jobinfo(struct node_record *node_ptr, enum select_data_info cr_info, void *data) { - int rc = SLURM_SUCCESS, i, avail = 0; - uint32_t *tmp_32 = (uint32_t *) data; + int rc = SLURM_SUCCESS, i, index, node_offset, node_inx; + struct select_cr_job *job; + struct node_cr_record *this_cr_node; uint16_t *tmp_16 = (uint16_t *) data; xassert(job_ptr); xassert(job_ptr->magic == JOB_MAGIC); + xassert(node_ptr); switch (cr_info) { - case SELECT_AVAIL_MEMORY: - { - switch(cr_type) { - case CR_MEMORY: - case CR_CPU_MEMORY: - case CR_SOCKET_MEMORY: - case CR_CORE_MEMORY: - *tmp_32 = 0; - for (i = 0; i < node_record_count; i++) { - if (bit_test(job_ptr->details->req_node_bitmap, i) != 1) - continue; - avail = _get_avail_memory(i, false); - if (avail < 0) { - rc = SLURM_ERROR; - return rc; - } - } + case SELECT_AVAIL_CPUS: + *tmp_16 = 0; + job = list_find_first(select_cr_job_list, _find_job_by_id, + &job_ptr->job_id); + if (job == NULL) { + error("cons_res: job %u not active", job_ptr->job_id); break; - default: - *tmp_32 = 0; } - break; - } - case SELECT_CPU_COUNT: - { - struct multi_core_data *mc_ptr = job_ptr->details->mc_ptr; - - if (mc_ptr && - ((job_ptr->details->cpus_per_task > 1) || - (mc_ptr->max_sockets > 1) || - (mc_ptr->max_cores > 1) || - (mc_ptr->max_threads > 1))) { - *tmp_16 = 0; - for (i = 0; i < node_record_count; i++) { - if (bit_test(job_ptr->details->req_node_bitmap, i) != 1) - continue; - /* req_node_layout info is not supported for - * socket/core/threads/cpus_per task, but - * probably there should be something in - * here... */ - *tmp_16 += _get_avail_lps(job_ptr, i, false); - } - } else { - _count_cpus(job_ptr->details-> - req_node_bitmap, tmp_16); - } - break; - } - case SELECT_AVAIL_CPUS: - { - struct select_cr_job *job = NULL; - ListIterator iterator = - list_iterator_create(select_cr_job_list); - xassert(node_ptr); - xassert(node_ptr->magic == NODE_MAGIC); - *tmp_16 = 0; - while ((job = - (struct select_cr_job *) list_next(iterator)) != NULL) { - if (job->job_id != job_ptr->job_id) + node_offset = -1; + node_inx = node_ptr - node_record_table_ptr; + for (i = 0; i < node_record_count; i++) { + if (bit_test(job->node_bitmap, i) == 0) continue; - for (i = 0; i < job->nhosts; i++) { - if (strcmp(node_ptr->name, job->host[i]) != 0) - continue; - /* Usable and "allocated" resources for this - * given job for a specific node --> based - * on the output from _cr_dist */ - switch(cr_type) { - case CR_MEMORY: - *tmp_16 = node_ptr->cpus; - break; - case CR_SOCKET: - case CR_SOCKET_MEMORY: - case CR_CORE: - case CR_CORE_MEMORY: - case CR_CPU: - case CR_CPU_MEMORY: - default: - *tmp_16 = job->alloc_lps[i]; - break; - } - goto cleanup; + node_offset++; + if (i != node_inx) + continue; + /* Usable and "allocated" resources for this + * given job for a specific node --> based + * on the output from _cr_dist */ + switch(cr_type) { + case CR_MEMORY: + index = node_ptr - node_record_table_ptr; + this_cr_node = select_node_ptr + index; + *tmp_16 = this_cr_node->cpus; + break; + case CR_SOCKET: + case CR_SOCKET_MEMORY: + case CR_CORE: + case CR_CORE_MEMORY: + case CR_CPU: + case CR_CPU_MEMORY: + default: + *tmp_16 = job->alloc_cpus[node_offset]; + break; } + break; + } + if (i >= node_record_count) { error("cons_res could not find %s", node_ptr->name); rc = SLURM_ERROR; } - if (!job) { - debug3("cons_res: job %u not active", job_ptr->job_id); - *tmp_16 = 0; - } - cleanup: - list_iterator_destroy(iterator); break; - } default: error("select_g_get_extra_jobinfo cr_info %d invalid", cr_info); rc = SLURM_ERROR; @@ -2289,76 +2842,34 @@ extern int select_p_get_select_nodeinfo(struct node_record *node_ptr, enum select_data_info dinfo, void *data) { - int rc = SLURM_SUCCESS, i; + int index, i, j, rc = SLURM_SUCCESS; struct node_cr_record *this_cr_node; + struct part_cr_record *p_ptr; + uint16_t *tmp_16; xassert(node_ptr); - xassert(node_ptr->magic == NODE_MAGIC); switch (dinfo) { - case SELECT_AVAIL_MEMORY: - case SELECT_ALLOC_MEMORY: - { - uint32_t *tmp_32 = (uint32_t *) data; - - *tmp_32 = 0; - switch(cr_type) { - case CR_MEMORY: - case CR_SOCKET_MEMORY: - case CR_CORE_MEMORY: - case CR_CPU_MEMORY: - this_cr_node = find_cr_node_record (node_ptr->name); - if (this_cr_node == NULL) { - error(" cons_res: could not find node %s", - node_ptr->name); - rc = SLURM_ERROR; - return rc; - } - if (dinfo == SELECT_ALLOC_MEMORY) { - *tmp_32 = this_cr_node->alloc_memory; - } else - *tmp_32 = - this_cr_node->node_ptr->real_memory - - this_cr_node->alloc_memory; - break; - default: - *tmp_32 = 0; - break; - } - break; - } case SELECT_ALLOC_CPUS: - { - uint16_t *tmp_16 = (uint16_t *) data; + tmp_16 = (uint16_t *) data; *tmp_16 = 0; - this_cr_node = find_cr_node_record (node_ptr->name); - if (this_cr_node == NULL) { - error(" cons_res: could not find node %s", - node_ptr->name); - rc = SLURM_ERROR; - return rc; - } - switch(cr_type) { - case CR_SOCKET: - case CR_SOCKET_MEMORY: - *tmp_16 = this_cr_node->alloc_sockets * - node_ptr->cores * node_ptr->threads; - break; - case CR_CORE: - case CR_CORE_MEMORY: - for (i = 0; i < this_cr_node->num_sockets; i++) - *tmp_16 += this_cr_node->alloc_cores[i] * - node_ptr->threads; - break; - case CR_MEMORY: - case CR_CPU: - case CR_CPU_MEMORY: - default: - *tmp_16 = this_cr_node->alloc_lps; - break; + index = node_ptr - node_record_table_ptr; + this_cr_node = select_node_ptr + index; + + /* determine the highest number of allocated cores from */ + /* all rows of all partitions */ + for (p_ptr = this_cr_node->parts; p_ptr; p_ptr = p_ptr->next) { + i = 0; + for (j = 0; j < p_ptr->num_rows; j++) { + uint16_t tmp = 0; + for (; i < this_cr_node->sockets; i++) + tmp += p_ptr->alloc_cores[i] * + this_cr_node->threads; + if (tmp > *tmp_16) + *tmp_16 = tmp; + } } break; - } default: error("select_g_get_select_nodeinfo info %d invalid", dinfo); rc = SLURM_ERROR; @@ -2369,108 +2880,22 @@ extern int select_p_get_select_nodeinfo(struct node_record *node_ptr, extern int select_p_update_nodeinfo(struct job_record *job_ptr) { - int rc = SLURM_SUCCESS, i, j, job_id, nodes; - struct select_cr_job *job = NULL; - ListIterator iterator; + int rc = SLURM_SUCCESS; + struct select_cr_job *job; xassert(job_ptr); xassert(job_ptr->magic == JOB_MAGIC); if ((job_ptr->job_state != JOB_RUNNING) && (job_ptr->job_state != JOB_SUSPENDED)) - return rc; - - job_id = job_ptr->job_id; - - iterator = list_iterator_create(select_cr_job_list); - while ((job = (struct select_cr_job *) list_next(iterator)) - != NULL) { - if (job->job_id != job_id) - continue; + return SLURM_SUCCESS; - if (job_ptr->job_state == JOB_SUSPENDED) { - job->state |= CR_JOB_STATE_SUSPENDED; - nodes = 0; - } else { - job->state &= (~CR_JOB_STATE_SUSPENDED); - nodes = job->nhosts; - } - - for (i = 0; i < nodes; i++) { - struct node_cr_record *this_node; - this_node = find_cr_node_record (job->host[i]); - if (this_node == NULL) { - error(" cons_res: could not find node %s", - job->host[i]); - rc = SLURM_ERROR; - goto cleanup; - } - /* Updating this node's allocated resources */ - switch (cr_type) { - case CR_SOCKET_MEMORY: - this_node->alloc_memory += job->alloc_memory[i]; - case CR_SOCKET: - this_node->alloc_lps += job->alloc_lps[i]; - this_node->alloc_sockets += job->alloc_sockets[i]; - if (this_node->alloc_sockets > this_node->node_ptr->sockets) - error("Job %u Host %s too many allocated sockets %u", - job->job_id, this_node->node_ptr->name, - this_node->alloc_sockets); - break; - case CR_CORE_MEMORY: - this_node->alloc_memory += job->alloc_memory[i]; - case CR_CORE: - this_node->alloc_lps += job->alloc_lps[i]; - if (this_node->alloc_lps > this_node->node_ptr->cpus) - error("Job %u Host %s too many allocated lps %u", - job->job_id, this_node->node_ptr->name, - this_node->alloc_lps); - chk_resize_node(this_node, this_node->node_ptr->sockets); - chk_resize_job(job, i, this_node->num_sockets); - for (j = 0; j < this_node->num_sockets; j++) - this_node->alloc_cores[j] += job->alloc_cores[i][j]; - for (j = 0; j < this_node->num_sockets; j++) - if (this_node->alloc_cores[j] <= - this_node->node_ptr->cores) - continue; - else - error("Job %u Host %s too many allocated " - "cores %u for socket %d", - job->job_id, this_node->node_ptr->name, - this_node->alloc_cores[j], j); - break; - case CR_CPU_MEMORY: - this_node->alloc_memory += job->alloc_memory[i]; - case CR_CPU: - this_node->alloc_lps += job->alloc_lps[i]; - break; - case CR_MEMORY: - this_node->alloc_memory += job->alloc_memory[i]; - break; - default: - error("select_g_update_nodeinfo info %d invalid", cr_type); - rc = SLURM_ERROR; - break; - } -#if(CR_DEBUG) - /* Remove debug only */ - info("cons_res %u update_nodeinfo (+) node %s " - "alloc_ lps %u sockets %u mem %u", - job->job_id, this_node->node_ptr->name, this_node->alloc_lps, - this_node->alloc_sockets, this_node->alloc_memory); - if ((cr_type == CR_CORE) || (cr_type == CR_CORE_MEMORY)) - for (j = 0; j < this_node->num_sockets; j++) - info("cons_res %u update_nodeinfo (+) " - "node %s alloc_ cores %u", - job->job_id, this_node->node_ptr->name, - this_node->alloc_cores[j]); -#endif - } - break; - } - cleanup: - list_iterator_destroy(iterator); + job = list_find_first(select_cr_job_list, _find_job_by_id, + &job_ptr->job_id); + if (!job) + return SLURM_SUCCESS; + rc = _add_job_to_nodes(job, "select_p_update_nodeinfo", 0); return rc; } @@ -2530,3 +2955,207 @@ extern int select_p_alter_node_cnt(enum select_node_cnt type, void *data) { return SLURM_SUCCESS; } + +extern int select_p_reconfigure(void) +{ + ListIterator job_iterator; + struct select_cr_job *job; + struct job_record *job_ptr; + int rc, suspend; + + info("cons_res: select_p_reconfigure"); + select_fast_schedule = slurm_get_fast_schedule(); + + /* Refresh the select_node_ptr global array in case nodes + * have been added or removed. This procedure will clear all + * partition information and all allocated resource usage. + */ + rc = select_p_node_init(node_record_table_ptr, node_record_count); + + /* reload all of the allocated resource usage from job data */ + if (select_cr_job_list == NULL) + return SLURM_SUCCESS; + + slurm_mutex_lock(&cr_mutex); + job_iterator = list_iterator_create(select_cr_job_list); + while ((job = (struct select_cr_job *) list_next(job_iterator))) { + suspend = 0; + job_ptr = find_job_record(job->job_id); + if ((job_ptr == NULL) || + (job_ptr->part_ptr == NULL) || + ((job_ptr->job_state != JOB_RUNNING) && + (job_ptr->job_state != JOB_SUSPENDED))) { + list_remove(job_iterator); + error("cons_res: select_p_reconfigure: removing " + "nonexistent/invalid job %u", job->job_id); + _xfree_select_cr_job(job); + continue; + } + + if (job_ptr->job_state == JOB_SUSPENDED) + suspend = 1; + if ((job->state & CR_JOB_ALLOCATED_MEM) || + (job->state & CR_JOB_ALLOCATED_CPUS)) { + job->state = 0; + _add_job_to_nodes(job, "select_p_reconfigure", suspend); + /* ignore any errors. partition and/or node config + * may have changed while jobs remain running */ + } + } + list_iterator_destroy(job_iterator); + slurm_mutex_unlock(&cr_mutex); + last_cr_update_time = time(NULL); + + return SLURM_SUCCESS; +} + +extern struct multi_core_data * create_default_mc(void) +{ + struct multi_core_data *mc_ptr; + mc_ptr = xmalloc(sizeof(struct multi_core_data)); + mc_ptr->min_sockets = 1; + mc_ptr->max_sockets = 0xffff; + mc_ptr->min_cores = 1; + mc_ptr->max_cores = 0xffff; + mc_ptr->min_threads = 1; + mc_ptr->max_threads = 0xffff; +/* mc_ptr is initialized to zero by xmalloc*/ +/* mc_ptr->ntasks_per_socket = 0; */ +/* mc_ptr->ntasks_per_core = 0; */ +/* mc_ptr->plane_size = 0; */ + return mc_ptr; +} + +extern int select_p_step_begin(struct step_record *step_ptr) +{ + slurm_step_layout_t *step_layout = step_ptr->step_layout; + struct select_cr_job *job; + struct node_cr_record *this_node; + int job_node_inx, step_node_inx, host_index; + uint32_t avail_mem, step_mem; + + xassert(select_cr_job_list); + xassert(step_ptr->job_ptr); + xassert(step_ptr->job_ptr->details); + xassert(step_ptr->step_node_bitmap); + + if (step_layout == NULL) + return SLURM_SUCCESS; /* batch script */ + if (step_ptr->job_ptr->details->job_min_memory) + return SLURM_SUCCESS; + if ((cr_type != CR_CORE_MEMORY) && (cr_type != CR_CPU_MEMORY) && + (cr_type != CR_MEMORY) && (cr_type != CR_SOCKET_MEMORY)) + return SLURM_SUCCESS; + + job = list_find_first(select_cr_job_list, _find_job_by_id, + &step_ptr->job_ptr->job_id); + if (!job) { + error("select_p_step_begin: could not find step %u.%u", + step_ptr->job_ptr->job_id, step_ptr->step_id); + return ESLURM_INVALID_JOB_ID; + } + + /* test if there is sufficient memory */ + step_node_inx = -1; + for (host_index = 0; host_index < select_node_cnt; host_index++) { + if (bit_test(step_ptr->step_node_bitmap, host_index) == 0) + continue; + step_node_inx++; + + this_node = &select_node_ptr[host_index]; + step_mem = step_layout->tasks[step_node_inx] * + step_ptr->mem_per_task; + avail_mem = select_node_ptr[host_index].real_memory; + if ((this_node->alloc_memory + step_mem) > avail_mem) + return SLURM_ERROR; /* no room */ + } + + /* reserve the memory */ + job_node_inx = -1; + step_node_inx = -1; + for (host_index = 0; host_index < select_node_cnt; host_index++) { + if (bit_test(job->node_bitmap, host_index) == 0) + continue; + job_node_inx++; + if (bit_test(step_ptr->step_node_bitmap, host_index) == 0) + continue; + step_node_inx++; + + this_node = &select_node_ptr[host_index]; + step_mem = step_layout->tasks[step_node_inx] * + step_ptr->mem_per_task; + job->alloc_memory[job_node_inx] += step_mem; + this_node->alloc_memory += step_mem; + } + last_cr_update_time = time(NULL); + return SLURM_SUCCESS; +} + +extern int select_p_step_fini(struct step_record *step_ptr) +{ + slurm_step_layout_t *step_layout = step_ptr->step_layout; + struct select_cr_job *job; + struct node_cr_record *this_node; + int job_node_inx, step_node_inx, host_index, rc = SLURM_SUCCESS; + uint32_t step_mem; + + xassert(select_cr_job_list); + xassert(step_ptr->job_ptr); + xassert(step_ptr->job_ptr->details); + xassert(step_ptr->step_node_bitmap); + + if (step_layout == NULL) + return SLURM_SUCCESS; /* batch script */ + if (step_ptr->job_ptr->details->job_min_memory) + return SLURM_SUCCESS; + if ((cr_type != CR_CORE_MEMORY) && (cr_type != CR_CPU_MEMORY) && + (cr_type != CR_MEMORY) && (cr_type != CR_SOCKET_MEMORY)) + return SLURM_SUCCESS; + + job = list_find_first(select_cr_job_list, _find_job_by_id, + &step_ptr->job_ptr->job_id); + if (!job) { + error("select_p_step_fini: could not find step %u.%u", + step_ptr->job_ptr->job_id, step_ptr->step_id); + return ESLURM_INVALID_JOB_ID; + } + + job_node_inx = -1; + step_node_inx = -1; + for (host_index = 0; host_index < select_node_cnt; host_index++) { + if (bit_test(job->node_bitmap, host_index) == 0) + continue; + job_node_inx++; + if (bit_test(step_ptr->step_node_bitmap, host_index) == 0) + continue; + step_node_inx++; + + this_node = &select_node_ptr[host_index]; + step_mem = step_layout->tasks[step_node_inx] * + step_ptr->mem_per_task; + if (job->alloc_memory[job_node_inx] >= step_mem) + job->alloc_memory[job_node_inx] -= step_mem; + else { + if (rc == SLURM_SUCCESS) { + error("select_p_step_fini: job alloc_memory " + "underflow on %s", + this_node->node_ptr->name); + rc = SLURM_ERROR; + } + job->alloc_memory[host_index] = 0; + } + if (this_node->alloc_memory >= step_mem) + this_node->alloc_memory -= step_mem; + else { + if (rc == SLURM_SUCCESS) { + error("select_p_step_fini: node alloc_memory " + "underflow on %s", + this_node->node_ptr->name); + rc = SLURM_ERROR; + } + this_node->alloc_memory = 0; + } + } + last_cr_update_time = time(NULL); + return rc; +} diff --git a/src/plugins/select/cons_res/select_cons_res.h b/src/plugins/select/cons_res/select_cons_res.h index 3a494f505..63f50c6d6 100644 --- a/src/plugins/select/cons_res/select_cons_res.h +++ b/src/plugins/select/cons_res/select_cons_res.h @@ -5,7 +5,7 @@ ***************************************************************************** * Copyright (C) 2006 Hewlett-Packard Development Company, L.P. * Written by Susanne M. Balle, <susanne.balle@hp.com> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -41,12 +41,14 @@ #include <fcntl.h> #include <stdio.h> +#include <stdlib.h> #include <slurm/slurm.h> #include <slurm/slurm_errno.h> #include "src/common/list.h" #include "src/common/log.h" #include "src/common/node_select.h" +#include "src/common/pack.h" #include "src/common/slurm_protocol_api.h" #include "src/common/xassert.h" #include "src/common/xmalloc.h" @@ -56,31 +58,54 @@ #include "src/slurmd/slurmd/slurmd.h" +/* part_cr_record keeps track of the allocated cores of a node that + * has been assigned to a partition. SLURM allows a node to be + * assigned to more than one partition. One or more partitions + * may be configured to share the cores with more than one job. + */ +struct part_cr_record { + struct part_record *part_ptr; /* ptr to slurmctld partition record */ + uint16_t *alloc_cores; /* core count per socket reserved by + * already scheduled jobs */ + uint16_t num_rows; /* number of rows in alloc_cores. The + * length of alloc_cores is + * num_sockets * num_rows. */ + struct part_cr_record *next; /* ptr to next part_cr_record */ +}; + +/* + * node_cr_record.node_state assists with the unique state of each node. + * NOTES: + * - If node is in use by Shared=NO part, some CPUs/memory may be available + * - Caution with NODE_CR_AVAILABLE: a Sharing partition could be full!! + */ +enum node_cr_state { + NODE_CR_RESERVED, /* node is NOT available for use by any other jobs */ + NODE_CR_ONE_ROW, /* node is in use by Shared=NO part */ + NODE_CR_AVAILABLE /* The node may be IDLE or IN USE by Sharing part(s)*/ +}; + /* node_cr_record keeps track of the resources within a node which * have been reserved by already scheduled jobs. + * + * NOTE: The locations of these entries are synchronized with the + * job records in slurmctld (entry X in both tables are the same). */ -/*** NOTE: If any changes are made here, the following data structure has - *** persistent state which is maintained by select_cons_res.c: - *** select_p_state_save - *** select_p_state_restore - *** select_p_node_init - *** - *** as well as tracked by version control - *** select_cons_res.c:pstate_version - *** which should be incremented if any changes are made. - **/ struct node_cr_record { - struct node_record *node_ptr; /* ptr to the node that own these resources */ - char *name; /* reference copy of node_ptr name */ - uint16_t alloc_lps; /* cpu count reserved by already scheduled jobs */ - uint16_t alloc_sockets; /* socket count reserved by already scheduled jobs */ - uint16_t num_sockets; /* number of sockets in alloc_cores */ - uint16_t *alloc_cores; /* core count per socket reserved by - * already scheduled jobs */ - uint32_t alloc_memory; /* real memory reserved by already scheduled jobs */ - struct node_cr_record *node_next;/* next entry with same hash index */ + struct node_record *node_ptr; /* ptr to the actual node */ + uint16_t cpus; /* count of processors configured */ + uint16_t sockets; /* count of sockets configured */ + uint16_t cores; /* count of cores configured */ + uint16_t threads; /* count of threads configured */ + uint32_t real_memory; /* MB of real memory configured */ + enum node_cr_state node_state; /* see node_cr_state comments */ + struct part_cr_record *parts; /* ptr to singly-linked part_cr_record + * list that contains alloc_core info */ + uint32_t alloc_memory; /* real memory reserved by already + * scheduled jobs */ }; - +extern struct node_cr_record *select_node_ptr; +extern uint16_t select_fast_schedule; /*** NOTE: If any changes are made here, the following data structure has *** persistent state which is maintained by select_cons_res.c: @@ -92,46 +117,48 @@ struct node_cr_record { *** which should be incremented if any changes are made. **/ struct select_cr_job { + /* Information preserved across reboots */ uint32_t job_id; /* job ID, default set by SLURM */ - uint16_t state; /* job state information */ + enum node_cr_state node_req; /* see node_cr_state comments */ uint32_t nprocs; /* --nprocs=n, -n n */ - uint16_t nhosts; /* number of hosts allocated to job */ - char **host; /* hostname vector */ + uint32_t nhosts; /* number of hosts allocated to job */ uint16_t *cpus; /* number of processors on each host, * if using Moab scheduler (sched/wiki2) * then this will be initialized to the * number of CPUs desired on the node */ - uint16_t *alloc_lps; /* number of allocated threads/lps on + uint16_t *alloc_cpus; /* number of allocated threads/cpus on * each host */ - uint16_t *alloc_sockets;/* number of allocated sockets on each - * host */ uint16_t *num_sockets; /* number of sockets in alloc_cores[node] */ uint16_t **alloc_cores; /* number of allocated cores on each * host */ uint32_t *alloc_memory; /* number of allocated MB of real * memory on each host */ - uint16_t max_sockets; - uint16_t max_cores; - uint16_t max_threads; - uint16_t min_sockets; - uint16_t min_cores; - uint16_t min_threads; - uint16_t ntasks_per_node; - uint16_t ntasks_per_socket; - uint16_t ntasks_per_core; - uint16_t cpus_per_task; - bitstr_t *node_bitmap; /* bitmap of nodes allocated to job */ + uint16_t *node_offset; /* the node_cr_record->alloc_cores row to + * which this job was assigned */ + + /* Information re-established after reboot */ + struct job_record *job_ptr; /* pointer to slurmctld job record */ + uint16_t state; /* job state information */ + bitstr_t *node_bitmap; /* bitmap of nodes allocated to job, + * NOTE: The node_bitmap in slurmctld's job + * structure clears bits as on completion. + * This bitmap is persistent through lifetime + * of the job. */ }; struct node_cr_record * find_cr_node_record (const char *name); +/* Find a partition record based upon pointer to slurmctld record */ +struct part_cr_record *get_cr_part_ptr(struct node_cr_record *this_node, + struct part_record *part_ptr); + void get_resources_this_node(uint16_t *cpus, uint16_t *sockets, uint16_t *cores, uint16_t *threads, struct node_cr_record *this_cr_node, - uint16_t *alloc_sockets, - uint16_t *alloc_lps, - uint32_t *jobid); + uint32_t jobid); + +extern struct multi_core_data * create_default_mc(void); #endif /* !_CONS_RES_H */ diff --git a/src/plugins/select/linear/Makefile.am b/src/plugins/select/linear/Makefile.am index 97fd5647b..b398747c9 100644 --- a/src/plugins/select/linear/Makefile.am +++ b/src/plugins/select/linear/Makefile.am @@ -9,7 +9,6 @@ INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common pkglib_LTLIBRARIES = select_linear.la # Linear node selection plugin. -select_linear_la_SOURCES = select_linear.c +select_linear_la_SOURCES = select_linear.c select_linear.h select_linear_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) -select_linear_la_LIBADD = $(top_builddir)/src/common/libcommon.la diff --git a/src/plugins/select/linear/Makefile.in b/src/plugins/select/linear/Makefile.in index b18586a04..b97714262 100644 --- a/src/plugins/select/linear/Makefile.in +++ b/src/plugins/select/linear/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -44,6 +44,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -73,14 +75,13 @@ am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; am__installdirs = "$(DESTDIR)$(pkglibdir)" pkglibLTLIBRARIES_INSTALL = $(INSTALL) LTLIBRARIES = $(pkglib_LTLIBRARIES) -select_linear_la_DEPENDENCIES = \ - $(top_builddir)/src/common/libcommon.la +select_linear_la_LIBADD = am_select_linear_la_OBJECTS = select_linear.lo select_linear_la_OBJECTS = $(am_select_linear_la_OBJECTS) select_linear_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(select_linear_la_LDFLAGS) $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -120,6 +121,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -133,10 +135,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -156,7 +161,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -167,6 +175,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -182,6 +192,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -197,6 +208,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -259,9 +271,8 @@ INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common pkglib_LTLIBRARIES = select_linear.la # Linear node selection plugin. -select_linear_la_SOURCES = select_linear.c +select_linear_la_SOURCES = select_linear.c select_linear.h select_linear_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) -select_linear_la_LIBADD = $(top_builddir)/src/common/libcommon.la all: all-am .SUFFIXES: @@ -301,8 +312,8 @@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ else :; fi; \ done @@ -310,8 +321,8 @@ uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ done clean-pkglibLTLIBRARIES: @@ -365,8 +376,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -378,8 +389,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -389,13 +400,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/select/linear/select_linear.c b/src/plugins/select/linear/select_linear.c index ec46310a6..28c8ddc8e 100644 --- a/src/plugins/select/linear/select_linear.c +++ b/src/plugins/select/linear/select_linear.c @@ -3,12 +3,13 @@ * address space. Selects nodes for a job so as to minimize the number * of sets of consecutive nodes using a best-fit algorithm. * - * $Id: select_linear.c 13767 2008-04-02 17:29:59Z jette $ + * $Id: select_linear.c 14103 2008-05-21 20:31:22Z jette $ ***************************************************************************** - * Copyright (C) 2004-2006 The Regents of the University of California. + * Copyright (C) 2004-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -61,15 +62,46 @@ #include "src/common/node_select.h" #include "src/common/parse_time.h" #include "src/common/slurm_protocol_api.h" +#include "src/common/slurm_resource_info.h" #include "src/common/xassert.h" #include "src/common/xmalloc.h" -#include "src/common/slurm_resource_info.h" - #include "src/slurmctld/slurmctld.h" #include "src/slurmctld/proc_req.h" - -#define SELECT_DEBUG 0 +#include "src/plugins/select/linear/select_linear.h" + +#define SELECT_DEBUG 0 +#define NO_SHARE_LIMIT 0xfffe + +static int _add_job_to_nodes(struct node_cr_record *node_cr_ptr, + struct job_record *job_ptr, char *pre_err, + int suspended); +static int _add_step(struct step_record *step_ptr); +static void _cr_job_list_del(void *x); +static int _cr_job_list_sort(void *x, void *y); +static void _del_list_step(void *x); +static void _dump_node_cr(struct node_cr_record *node_cr_ptr); +static struct node_cr_record *_dup_node_cr(struct node_cr_record *node_cr_ptr); +static int _find_job_mate(struct job_record *job_ptr, bitstr_t *bitmap, + uint32_t min_nodes, uint32_t max_nodes, + uint32_t req_nodes); +static int _find_step(struct step_record *step_ptr); +static void _free_node_cr(struct node_cr_record *node_cr_ptr); +static void _init_node_cr(void); +static int _job_count_bitmap(struct node_cr_record *node_cr_ptr, + struct job_record *job_ptr, + bitstr_t * bitmap, bitstr_t * jobmap, + int run_job_cnt, int tot_job_cnt); +static int _job_test(struct job_record *job_ptr, bitstr_t *bitmap, + uint32_t min_nodes, uint32_t max_nodes, + uint32_t req_nodes); +static int _remove_step(struct step_record *step_ptr); +static int _rm_job_from_nodes(struct node_cr_record *node_cr_ptr, + struct job_record *job_ptr, char *pre_err, + int remove_all); +static int _will_run_test(struct job_record *job_ptr, bitstr_t *bitmap, + uint32_t min_nodes, uint32_t max_nodes, + int max_share, uint32_t req_nodes); /* * These variables are required by the generic plugin interface. If they @@ -106,6 +138,11 @@ const uint32_t plugin_version = 90; static struct node_record *select_node_ptr = NULL; static int select_node_cnt = 0; static uint16_t select_fast_schedule; +static uint16_t cr_type; + +static struct node_cr_record *node_cr_ptr = NULL; +static pthread_mutex_t cr_mutex = PTHREAD_MUTEX_INITIALIZER; +static List step_cr_list = NULL; #ifdef HAVE_XCPU #define XCPU_POLL_TIME 120 @@ -186,7 +223,7 @@ static int _init_status_pthread(void) static int _fini_status_pthread(void) { - int i, rc=SLURM_SUCCESS; + int i, rc = SLURM_SUCCESS; slurm_mutex_lock( &thread_flag_mutex ); if ( xcpu_thread ) { @@ -208,8 +245,7 @@ static int _fini_status_pthread(void) } #endif -static bool -_enough_nodes(int avail_nodes, int rem_nodes, +static bool _enough_nodes(int avail_nodes, int rem_nodes, uint32_t min_nodes, uint32_t req_nodes) { int needed_nodes; @@ -232,6 +268,12 @@ extern int init ( void ) #ifdef HAVE_XCPU rc = _init_status_pthread(); #endif +#ifdef HAVE_BG + error("%s is incompatable with BlueGene", plugin_name); + fatal("Use SelectType=select/bluegene"); +#endif + cr_type = (select_type_plugin_info_t) + slurmctld_conf.select_type_param; return rc; } @@ -241,6 +283,13 @@ extern int fini ( void ) #ifdef HAVE_XCPU rc = _fini_status_pthread(); #endif + slurm_mutex_lock(&cr_mutex); + _free_node_cr(node_cr_ptr); + node_cr_ptr = NULL; + if (step_cr_list) + list_destroy(step_cr_list); + step_cr_list = NULL; + slurm_mutex_unlock(&cr_mutex); return rc; } @@ -276,6 +325,17 @@ extern int select_p_node_init(struct node_record *node_ptr, int node_cnt) return SLURM_ERROR; } + /* NOTE: We free the consumable resources info here, but + * can't rebuild it since the partition and node structures + * have not yet had node bitmaps reset. */ + slurm_mutex_lock(&cr_mutex); + _free_node_cr(node_cr_ptr); + node_cr_ptr = NULL; + if (step_cr_list) + list_destroy(step_cr_list); + step_cr_list = NULL; + slurm_mutex_unlock(&cr_mutex); + select_node_ptr = node_ptr; select_node_cnt = node_cnt; select_fast_schedule = slurm_get_fast_schedule(); @@ -307,19 +367,19 @@ static uint16_t _get_avail_cpus(struct job_record *job_ptr, int index) multi_core_data_t *mc_ptr = NULL; int min_sockets = 0, min_cores = 0; - if (job_ptr->details) { - if (job_ptr->details->cpus_per_task) - cpus_per_task = job_ptr->details->cpus_per_task; - if (job_ptr->details->ntasks_per_node) - ntasks_per_node = job_ptr->details->ntasks_per_node; - mc_ptr = job_ptr->details->mc_ptr; - } - if (mc_ptr) { - max_sockets = job_ptr->details->mc_ptr->max_sockets; - max_cores = job_ptr->details->mc_ptr->max_cores; - max_threads = job_ptr->details->mc_ptr->max_threads; - ntasks_per_socket = job_ptr->details->mc_ptr->ntasks_per_socket; - ntasks_per_core = job_ptr->details->mc_ptr->ntasks_per_core; + if (job_ptr->details == NULL) + return (uint16_t) 0; + + if (job_ptr->details->cpus_per_task) + cpus_per_task = job_ptr->details->cpus_per_task; + if (job_ptr->details->ntasks_per_node) + ntasks_per_node = job_ptr->details->ntasks_per_node; + if ((mc_ptr = job_ptr->details->mc_ptr)) { + max_sockets = mc_ptr->max_sockets; + max_cores = mc_ptr->max_cores; + max_threads = mc_ptr->max_threads; + ntasks_per_socket = mc_ptr->ntasks_per_socket; + ntasks_per_core = mc_ptr->ntasks_per_core; } node_ptr = &(select_node_ptr[index]); @@ -347,8 +407,7 @@ static uint16_t _get_avail_cpus(struct job_record *job_ptr, int index) max_sockets, max_cores, max_threads, min_sockets, min_cores, cpus_per_task, ntasks_per_node, ntasks_per_socket, ntasks_per_core, - &cpus, &sockets, &cores, &threads, - (uint16_t) 0, NULL, (uint16_t) 0, + &cpus, &sockets, &cores, &threads, NULL, SELECT_TYPE_INFO_NONE, job_ptr->job_id, node_ptr->name); @@ -366,14 +425,16 @@ static uint16_t _get_avail_cpus(struct job_record *job_ptr, int index) * "best" is defined as either single set of consecutive nodes satisfying * the request and leaving the minimum number of unused nodes OR * the fewest number of consecutive node sets - * IN job_ptr - pointer to job being scheduled + * IN/OUT job_ptr - pointer to job being considered for initiation, + * set's start_time when job expected to start * IN/OUT bitmap - usable nodes are set on input, nodes not required to * satisfy the request are cleared, other left set * IN min_nodes - minimum count of nodes * IN req_nodes - requested (or desired) count of nodes * IN max_nodes - maximum count of nodes (0==don't care) - * IN test_only - if true, only test if ever could run, not necessarily now, - * not used in this implementation of plugin + * IN mode - SELECT_MODE_RUN_NOW: try to schedule job now + * SELECT_MODE_TEST_ONLY: test if job can ever run + * SELECT_MODE_WILL_RUN: determine when and where job can run * RET zero on success, EINVAL otherwise * globals (passed via select_p_node_init): * node_record_count - count of nodes configured @@ -387,7 +448,224 @@ static uint16_t _get_avail_cpus(struct job_record *job_ptr, int index) */ extern int select_p_job_test(struct job_record *job_ptr, bitstr_t *bitmap, uint32_t min_nodes, uint32_t max_nodes, - uint32_t req_nodes, bool test_only) + uint32_t req_nodes, int mode) +{ + bitstr_t *orig_map; + int max_run_job, j, sus_jobs, rc = EINVAL, prev_cnt = -1; + int min_share = 0, max_share = 0; + uint32_t save_mem = 0; + + xassert(bitmap); + if (job_ptr->details == NULL) + return EINVAL; + + slurm_mutex_lock(&cr_mutex); + if (node_cr_ptr == NULL) { + _init_node_cr(); + if (node_cr_ptr == NULL) { + slurm_mutex_unlock(&cr_mutex); + error("select_p_job_test: node_cr_ptr not initialized"); + return SLURM_ERROR; + } + } + + if (bit_set_count(bitmap) < min_nodes) { + slurm_mutex_unlock(&cr_mutex); + return EINVAL; + } + + if (mode != SELECT_MODE_TEST_ONLY) { + if (job_ptr->details->shared == 1) { + max_share = job_ptr->part_ptr->max_share & + ~SHARED_FORCE; + } else /* ((shared == 0) || (shared == (uint16_t) NO_VAL)) */ + max_share = 1; + } + + if (mode == SELECT_MODE_WILL_RUN) { + rc = _will_run_test(job_ptr, bitmap, min_nodes, max_nodes, + max_share, req_nodes); + slurm_mutex_unlock(&cr_mutex); + return rc; + } else if (mode == SELECT_MODE_TEST_ONLY) { + min_share = NO_SHARE_LIMIT; + max_share = min_share + 1; + save_mem = job_ptr->details->job_min_memory; + job_ptr->details->job_min_memory = 0; + } + + orig_map = bit_copy(bitmap); + for (max_run_job=min_share; max_run_job<max_share; max_run_job++) { + bool last_iteration = (max_run_job == (max_share -1)); + for (sus_jobs=0; ((sus_jobs<5) && (rc != SLURM_SUCCESS)); + sus_jobs++) { + if (last_iteration) + sus_jobs = NO_SHARE_LIMIT; + j = _job_count_bitmap(node_cr_ptr, job_ptr, + orig_map, bitmap, + max_run_job, + max_run_job + sus_jobs); + if ((j == prev_cnt) || (j < min_nodes)) + continue; + prev_cnt = j; + if ((mode == SELECT_MODE_RUN_NOW) && (max_run_job > 0)) { + /* We need to share. + * Try to find suitable job to share nodes with */ + rc = _find_job_mate(job_ptr, bitmap, min_nodes, + max_nodes, req_nodes); + if (rc == SLURM_SUCCESS) + break; + } + rc = _job_test(job_ptr, bitmap, min_nodes, max_nodes, + req_nodes); + if (rc == SLURM_SUCCESS) + break; + continue; + } + } + bit_free(orig_map); + slurm_mutex_unlock(&cr_mutex); + if (save_mem) + job_ptr->details->job_min_memory = save_mem; + return rc; +} + +/* + * select_p_job_list_test - Given a list of select_will_run_t's in + * accending priority order we will see if we can start and + * finish all the jobs without increasing the start times of the + * jobs specified and fill in the est_start of requests with no + * est_start. If you are looking to see if one job will ever run + * then use select_p_job_test instead. + * IN/OUT req_list - list of select_will_run_t's in asscending + * priority order on success of placement fill in + * est_start of request with time. + * RET zero on success, EINVAL otherwise + */ +extern int select_p_job_list_test(List req_list) +{ + /* not currently supported */ + return EINVAL; +} + +/* + * Set the bits in 'jobmap' that correspond to bits in the 'bitmap' + * that are running 'run_job_cnt' jobs or less, and clear the rest. + */ +static int _job_count_bitmap(struct node_cr_record *node_cr_ptr, + struct job_record *job_ptr, + bitstr_t * bitmap, bitstr_t * jobmap, + int run_job_cnt, int tot_job_cnt) +{ + int i, count = 0, total_jobs, total_run_jobs; + struct part_cr_record *part_cr_ptr; + uint32_t job_memory = 0; + bool exclusive; + + xassert(node_cr_ptr); + + /* Jobs submitted to a partition with + * Shared=FORCE:1 may share resources with jobs in other partitions + * Shared=NO may not share resources with jobs in other partitions */ + if (run_job_cnt || (job_ptr->part_ptr->max_share & SHARED_FORCE)) + exclusive = false; + else + exclusive = true; + + if (job_ptr->details->job_min_memory && (cr_type == CR_MEMORY)) + job_memory = job_ptr->details->job_min_memory; + + for (i = 0; i < node_record_count; i++) { + if (!bit_test(bitmap, i)) { + bit_clear(jobmap, i); + continue; + } + + if (select_fast_schedule) { + if ((node_cr_ptr[i].alloc_memory + job_memory) > + node_record_table_ptr[i].config_ptr->real_memory) { + bit_clear(jobmap, i); + continue; + } + } else { + if ((node_cr_ptr[i].alloc_memory + job_memory) > + node_record_table_ptr[i].real_memory) { + bit_clear(jobmap, i); + continue; + } + } + + if ((run_job_cnt != NO_SHARE_LIMIT) && + (node_cr_ptr[i].exclusive_jobid != 0)) { + /* already reserved by some exclusive job */ + bit_clear(jobmap, i); + continue; + } + + total_jobs = 0; + total_run_jobs = 0; + part_cr_ptr = node_cr_ptr[i].parts; + while (part_cr_ptr) { + if (exclusive) { /* count jobs in all partitions */ + total_run_jobs += part_cr_ptr->run_job_cnt; + total_jobs += part_cr_ptr->tot_job_cnt; + } else if (part_cr_ptr->part_ptr == job_ptr->part_ptr) { + total_run_jobs += part_cr_ptr->run_job_cnt; + total_jobs += part_cr_ptr->tot_job_cnt; + break; + } + part_cr_ptr = part_cr_ptr->next; + } + if ((run_job_cnt != 0) && (part_cr_ptr == NULL)) { + error("_job_count_bitmap: could not find " + "partition %s for node %s", + job_ptr->part_ptr->name, + node_record_table_ptr[i].name); + } + if ((total_run_jobs <= run_job_cnt) && + (total_jobs <= tot_job_cnt)) { + bit_set(jobmap, i); + count++; + } else { + bit_clear(jobmap, i); + } + + } + return count; +} + +/* _find_job_mate - does most of the real work for select_p_job_test(), + * in trying to find a suitable job to mate this one with. This is + * a pretty simple algorithm now, but could try to match the job + * with multiple jobs that add up to the proper size or a single + * job plus a few idle nodes. */ +static int _find_job_mate(struct job_record *job_ptr, bitstr_t *bitmap, + uint32_t min_nodes, uint32_t max_nodes, + uint32_t req_nodes) +{ + ListIterator job_iterator; + struct job_record *job_scan_ptr; + + job_iterator = list_iterator_create(job_list); + while ((job_scan_ptr = (struct job_record *) list_next(job_iterator))) { + if ((job_scan_ptr->part_ptr == job_ptr->part_ptr) && + (job_scan_ptr->job_state == JOB_RUNNING) && + (job_scan_ptr->node_cnt == req_nodes) && + (job_scan_ptr->total_procs >= job_ptr->num_procs) && + bit_super_set(job_scan_ptr->node_bitmap, bitmap)) { + bit_and(bitmap, job_scan_ptr->node_bitmap); + return SLURM_SUCCESS; + } + } + list_iterator_destroy(job_iterator); + return EINVAL; +} + +/* _job_test - does most of the real work for select_p_job_test(), which + * pretty much just handles load-leveling and max_share logic */ +static int _job_test(struct job_record *job_ptr, bitstr_t *bitmap, + uint32_t min_nodes, uint32_t max_nodes, + uint32_t req_nodes) { int i, index, error_code = EINVAL, sufficient; int *consec_nodes; /* how many nodes we can add from this @@ -402,20 +680,11 @@ extern int select_p_job_test(struct job_record *job_ptr, bitstr_t *bitmap, int rem_cpus, rem_nodes; /* remaining resources desired */ int best_fit_nodes, best_fit_cpus, best_fit_req; int best_fit_location = 0, best_fit_sufficient; - int avail_cpus; - multi_core_data_t *mc_ptr = job_ptr->details->mc_ptr; + int avail_cpus, alloc_cpus = 0; - xassert(bitmap); - if (mc_ptr) { - debug3("job min-[max]: -N %u-[%u]:%u-[%u]:%u-[%u]:%u-[%u]", - job_ptr->details->min_nodes, job_ptr->details->max_nodes, - mc_ptr->min_sockets, mc_ptr->max_sockets, - mc_ptr->min_cores, mc_ptr->max_cores, - mc_ptr->min_threads, mc_ptr->max_threads); - debug3("job ntasks-per: -node=%u -socket=%u -core=%u", - job_ptr->details->ntasks_per_node, - mc_ptr->ntasks_per_socket, mc_ptr->ntasks_per_core); - } + if ((job_ptr->details->req_node_bitmap) && + (!bit_super_set(job_ptr->details->req_node_bitmap, bitmap))) + return error_code; consec_index = 0; consec_size = 50; /* start allocation for 50 sets of @@ -450,7 +719,8 @@ extern int select_p_job_test(struct job_record *job_ptr, bitstr_t *bitmap, /* first required node in set */ consec_req[consec_index] = index; } - rem_cpus -= avail_cpus; + rem_cpus -= avail_cpus; + alloc_cpus += avail_cpus; rem_nodes--; max_nodes--; } else { /* node not required (yet) */ @@ -556,7 +826,8 @@ extern int select_p_job_test(struct job_record *job_ptr, bitstr_t *bitmap, rem_nodes--; max_nodes--; avail_cpus = _get_avail_cpus(job_ptr, i); - rem_cpus -= avail_cpus; + rem_cpus -= avail_cpus; + alloc_cpus += avail_cpus; } for (i = (best_fit_req - 1); i >= consec_start[best_fit_location]; i--) { @@ -569,7 +840,8 @@ extern int select_p_job_test(struct job_record *job_ptr, bitstr_t *bitmap, rem_nodes--; max_nodes--; avail_cpus = _get_avail_cpus(job_ptr, i); - rem_cpus -= avail_cpus; + rem_cpus -= avail_cpus; + alloc_cpus += avail_cpus; } } else { for (i = consec_start[best_fit_location]; @@ -583,7 +855,8 @@ extern int select_p_job_test(struct job_record *job_ptr, bitstr_t *bitmap, rem_nodes--; max_nodes--; avail_cpus = _get_avail_cpus(job_ptr, i); - rem_cpus -= avail_cpus; + rem_cpus -= avail_cpus; + alloc_cpus += avail_cpus; } } if (job_ptr->details->contiguous || @@ -599,6 +872,10 @@ extern int select_p_job_test(struct job_record *job_ptr, bitstr_t *bitmap, && _enough_nodes(0, rem_nodes, min_nodes, req_nodes)) { error_code = SLURM_SUCCESS; } + if (error_code == SLURM_SUCCESS) { + /* job's total_procs is needed for SELECT_MODE_WILL_RUN */ + job_ptr->total_procs = alloc_cpus; + } xfree(consec_cpus); xfree(consec_nodes); @@ -610,9 +887,9 @@ extern int select_p_job_test(struct job_record *job_ptr, bitstr_t *bitmap, extern int select_p_job_begin(struct job_record *job_ptr) { + int rc = SLURM_SUCCESS; #ifdef HAVE_XCPU int i; - int rc = SLURM_SUCCESS; char clone_path[128]; xassert(job_ptr); @@ -633,10 +910,13 @@ extern int select_p_job_begin(struct job_record *job_ptr) job_ptr->user_id); } } - return rc; -#else - return SLURM_SUCCESS; #endif + slurm_mutex_lock(&cr_mutex); + if (node_cr_ptr == NULL) + _init_node_cr(); + _add_job_to_nodes(node_cr_ptr, job_ptr, "select_p_job_begin", 1); + slurm_mutex_unlock(&cr_mutex); + return rc; } extern int select_p_job_fini(struct job_record *job_ptr) @@ -660,19 +940,39 @@ extern int select_p_job_fini(struct job_record *job_ptr) } } #endif + slurm_mutex_lock(&cr_mutex); + if (node_cr_ptr == NULL) + _init_node_cr(); + _rm_job_from_nodes(node_cr_ptr, job_ptr, "select_p_job_fini", 1); + slurm_mutex_unlock(&cr_mutex); return rc; } extern int select_p_job_suspend(struct job_record *job_ptr) { + slurm_mutex_lock(&cr_mutex); + if (node_cr_ptr == NULL) + _init_node_cr(); + _rm_job_from_nodes(node_cr_ptr, job_ptr, "select_p_job_suspend", 0); + slurm_mutex_unlock(&cr_mutex); return SLURM_SUCCESS; } extern int select_p_job_resume(struct job_record *job_ptr) { + slurm_mutex_lock(&cr_mutex); + if (node_cr_ptr == NULL) + _init_node_cr(); + _add_job_to_nodes(node_cr_ptr, job_ptr, "select_p_job_resume", 0); + slurm_mutex_unlock(&cr_mutex); return SLURM_SUCCESS; } +extern int select_p_get_job_cores(uint32_t job_id, int alloc_index, int s) +{ + return 0; +} + extern int select_p_job_ready(struct job_record *job_ptr) { if (job_ptr->job_state != JOB_RUNNING) @@ -696,7 +996,52 @@ extern int select_p_get_select_nodeinfo (struct node_record *node_ptr, extern int select_p_update_nodeinfo (struct job_record *job_ptr) { - return SLURM_SUCCESS; + int i, node_inx; + ListIterator step_iterator; + struct step_record *step_ptr; + uint32_t step_mem; + + xassert(job_ptr); + + slurm_mutex_lock(&cr_mutex); + if (node_cr_ptr == NULL) + _init_node_cr(); + slurm_mutex_unlock(&cr_mutex); + + if ((job_ptr->job_state != JOB_RUNNING) + && (job_ptr->job_state != JOB_SUSPENDED)) + return SLURM_SUCCESS; + if ((cr_type != CR_MEMORY) || (job_ptr->details == NULL) || + (job_ptr->details->shared == 0) || job_ptr->details->job_min_memory) + return SLURM_SUCCESS; + + slurm_mutex_lock(&cr_mutex); + step_iterator = list_iterator_create (job_ptr->step_list); + while ((step_ptr = (struct step_record *) list_next (step_iterator))) { + if ((step_ptr->step_node_bitmap == NULL) || + (step_ptr->step_layout == NULL) || + (step_ptr->mem_per_task == 0) || + (_find_step(step_ptr))) /* already added */ + continue; +#if SELECT_DEBUG + info("select_p_update_nodeinfo: %u.%u mem:%u", + step_ptr->job_ptr->job_id, step_ptr->step_id, + step_ptr->mem_per_task); +#endif + node_inx = -1; + for (i = 0; i < select_node_cnt; i++) { + if (bit_test(step_ptr->step_node_bitmap, i) == 0) + continue; + node_inx++; + step_mem = step_ptr->step_layout->tasks[node_inx] * + step_ptr->mem_per_task; + node_cr_ptr[i].alloc_memory += step_mem; + } + _add_step(step_ptr); + } + list_iterator_destroy (step_iterator); + slurm_mutex_unlock(&cr_mutex); + return SLURM_SUCCESS; } extern int select_p_update_block (update_part_msg_t *part_desc_ptr) @@ -714,14 +1059,14 @@ extern int select_p_get_extra_jobinfo (struct node_record *node_ptr, void *data) { int rc = SLURM_SUCCESS; + uint16_t *tmp_16; xassert(job_ptr); xassert(job_ptr->magic == JOB_MAGIC); switch (info) { case SELECT_AVAIL_CPUS: - { - uint16_t *tmp_16 = (uint16_t *) data; + tmp_16 = (uint16_t *) data; if (job_ptr->details && ((job_ptr->details->cpus_per_task > 1) || @@ -736,7 +1081,6 @@ extern int select_p_get_extra_jobinfo (struct node_record *node_ptr, } } break; - } default: error("select_g_get_extra_jobinfo info %d invalid", info); rc = SLURM_ERROR; @@ -761,3 +1105,635 @@ extern int select_p_alter_node_cnt(enum select_node_cnt type, void *data) { return SLURM_SUCCESS; } + +extern int select_p_reconfigure(void) +{ + slurm_mutex_lock(&cr_mutex); + _free_node_cr(node_cr_ptr); + node_cr_ptr = NULL; + if (step_cr_list) + list_destroy(step_cr_list); + step_cr_list = NULL; + _init_node_cr(); + slurm_mutex_unlock(&cr_mutex); + + return SLURM_SUCCESS; +} + +/* + * deallocate resources that were assigned to this job + * + * if remove_all = 0: the job has been suspended, so just deallocate CPUs + * if remove_all = 1: deallocate all resources + */ +static int _rm_job_from_nodes(struct node_cr_record *node_cr_ptr, + struct job_record *job_ptr, char *pre_err, + int remove_all) +{ + int i, rc = SLURM_SUCCESS; + struct part_cr_record *part_cr_ptr; + uint32_t job_memory = 0; + + if (node_cr_ptr == NULL) { + error("%s: node_cr_ptr not initialized", pre_err); + return SLURM_ERROR; + } + + if (remove_all && job_ptr->details && + job_ptr->details->job_min_memory && (cr_type == CR_MEMORY)) + job_memory = job_ptr->details->job_min_memory; + + for (i = 0; i < select_node_cnt; i++) { + if (bit_test(job_ptr->node_bitmap, i) == 0) + continue; + if (node_cr_ptr[i].alloc_memory >= job_memory) + node_cr_ptr[i].alloc_memory -= job_memory; + else { + node_cr_ptr[i].alloc_memory = 0; + error("%s: memory underflow for node %s", + pre_err, node_record_table_ptr[i].name); + } + if (node_cr_ptr[i].exclusive_jobid == job_ptr->job_id) + node_cr_ptr[i].exclusive_jobid = 0; + part_cr_ptr = node_cr_ptr[i].parts; + while (part_cr_ptr) { + if (part_cr_ptr->part_ptr != job_ptr->part_ptr) { + part_cr_ptr = part_cr_ptr->next; + continue; + } + if (part_cr_ptr->run_job_cnt > 0) + part_cr_ptr->run_job_cnt--; + else { + error("%s: run_job_cnt underflow for node %s", + pre_err, node_record_table_ptr[i].name); + } + if (remove_all) { + if (part_cr_ptr->tot_job_cnt > 0) + part_cr_ptr->tot_job_cnt--; + else { + error("%s: tot_job_cnt underflow " + "for node %s", pre_err, + node_record_table_ptr[i].name); + } + if ((part_cr_ptr->tot_job_cnt == 0) && + (part_cr_ptr->run_job_cnt)) { + part_cr_ptr->run_job_cnt = 0; + error("%s: run_job_count out of sync " + "for node %s", pre_err, + node_record_table_ptr[i].name); + } + } + break; + } + if (part_cr_ptr == NULL) { + error("%s: could not find partition %s for node %s", + pre_err, job_ptr->part_ptr->name, + node_record_table_ptr[i].name); + rc = SLURM_ERROR; + } + } + + return rc; +} + +/* + * allocate resources to the given job + * + * if alloc_all = 0: the job has been suspended, so just re-allocate CPUs + * if alloc_all = 1: allocate all resources (CPUs and memory) + */ +static int _add_job_to_nodes(struct node_cr_record *node_cr_ptr, + struct job_record *job_ptr, char *pre_err, + int alloc_all) +{ + int i, rc = SLURM_SUCCESS, exclusive = 0; + struct part_cr_record *part_cr_ptr; + uint32_t job_memory = 0; + + if (node_cr_ptr == NULL) { + error("%s: node_cr_ptr not initialized", pre_err); + return SLURM_ERROR; + } + + if (alloc_all && job_ptr->details && + job_ptr->details->job_min_memory && (cr_type == CR_MEMORY)) + job_memory = job_ptr->details->job_min_memory; + if (job_ptr->details->shared == 0) + exclusive = 1; + + for (i = 0; i < select_node_cnt; i++) { + if (bit_test(job_ptr->node_bitmap, i) == 0) + continue; + node_cr_ptr[i].alloc_memory += job_memory; + if (exclusive) { + if (node_cr_ptr[i].exclusive_jobid) { + error("select/linear: conflicting exclusive " + "jobs %u and %u on %s", + job_ptr->job_id, + node_cr_ptr[i].exclusive_jobid, + node_record_table_ptr[i].name); + } + node_cr_ptr[i].exclusive_jobid = job_ptr->job_id; + } + + part_cr_ptr = node_cr_ptr[i].parts; + while (part_cr_ptr) { + if (part_cr_ptr->part_ptr != job_ptr->part_ptr) { + part_cr_ptr = part_cr_ptr->next; + continue; + } + if (alloc_all) + part_cr_ptr->tot_job_cnt++; + part_cr_ptr->run_job_cnt++; + break; + } + if (part_cr_ptr == NULL) { + error("%s: could not find partition %s for node %s", + pre_err, job_ptr->part_ptr->name, + node_record_table_ptr[i].name); + rc = SLURM_ERROR; + } + } + + return rc; +} + +static void _free_node_cr(struct node_cr_record *node_cr_ptr) +{ + int i; + struct part_cr_record *part_cr_ptr1, *part_cr_ptr2; + + if (node_cr_ptr == NULL) + return; + + for (i = 0; i < select_node_cnt; i++) { + part_cr_ptr1 = node_cr_ptr[i].parts; + while (part_cr_ptr1) { + part_cr_ptr2 = part_cr_ptr1->next; + xfree(part_cr_ptr1); + part_cr_ptr1 = part_cr_ptr2; + } + } + xfree(node_cr_ptr); +} + +static inline void _dump_node_cr(struct node_cr_record *node_cr_ptr) +{ +#if SELECT_DEBUG + int i; + struct part_cr_record *part_cr_ptr; + + if (node_cr_ptr == NULL) + return; + + for (i = 0; i < select_node_cnt; i++) { + info("Node:%s exclusive:%u alloc_mem:%u", + node_record_table_ptr[i].name, + node_cr_ptr[i].exclusive_jobid, + node_cr_ptr[i].alloc_memory); + + part_cr_ptr = node_cr_ptr[i].parts; + while (part_cr_ptr) { + info(" Part:%s run:%u tot:%u", + part_cr_ptr->part_ptr->name, + part_cr_ptr->run_job_cnt, + part_cr_ptr->tot_job_cnt); + part_cr_ptr = part_cr_ptr->next; + } + } +#endif +} + +static struct node_cr_record *_dup_node_cr(struct node_cr_record *node_cr_ptr) +{ + int i; + struct node_cr_record *new_node_cr_ptr; + struct part_cr_record *part_cr_ptr, *new_part_cr_ptr; + + if (node_cr_ptr == NULL) + return NULL; + + new_node_cr_ptr = xmalloc(select_node_cnt * + sizeof(struct node_cr_record)); + + for (i = 0; i < select_node_cnt; i++) { + new_node_cr_ptr[i].alloc_memory = node_cr_ptr[i].alloc_memory; + new_node_cr_ptr[i].exclusive_jobid = + node_cr_ptr[i].exclusive_jobid; + part_cr_ptr = node_cr_ptr[i].parts; + while (part_cr_ptr) { + new_part_cr_ptr = xmalloc(sizeof(struct part_cr_record)); + new_part_cr_ptr->part_ptr = part_cr_ptr->part_ptr; + new_part_cr_ptr->run_job_cnt = part_cr_ptr->run_job_cnt; + new_part_cr_ptr->tot_job_cnt = part_cr_ptr->tot_job_cnt; + new_part_cr_ptr->next = new_node_cr_ptr[i].parts; + new_node_cr_ptr[i].parts = new_part_cr_ptr; + part_cr_ptr = part_cr_ptr->next; + } + } + return new_node_cr_ptr; +} + +static void _init_node_cr(void) +{ + struct part_record *part_ptr; + struct part_cr_record *part_cr_ptr; + ListIterator part_iterator; + struct job_record *job_ptr; + ListIterator job_iterator; + uint32_t job_memory, step_mem; + int exclusive, i, node_inx; + ListIterator step_iterator; + struct step_record *step_ptr; + + if (node_cr_ptr) + return; + + node_cr_ptr = xmalloc(select_node_cnt * sizeof(struct node_cr_record)); + + /* build partition records */ + part_iterator = list_iterator_create(part_list); + while ((part_ptr = (struct part_record *) list_next(part_iterator))) { + for (i = 0; i < select_node_cnt; i++) { + if (part_ptr->node_bitmap == NULL) + break; + if (!bit_test(part_ptr->node_bitmap, i)) + continue; + part_cr_ptr = xmalloc(sizeof(struct part_cr_record)); + part_cr_ptr->next = node_cr_ptr[i].parts; + part_cr_ptr->part_ptr = part_ptr; + node_cr_ptr[i].parts = part_cr_ptr; + } + + } + list_iterator_destroy(part_iterator); + + /* record running and suspended jobs in node_cr_records */ + job_iterator = list_iterator_create(job_list); + while ((job_ptr = (struct job_record *) list_next(job_iterator))) { + if ((job_ptr->job_state != JOB_RUNNING) && + (job_ptr->job_state != JOB_SUSPENDED)) + continue; + + if (job_ptr->details && + job_ptr->details->job_min_memory && (cr_type == CR_MEMORY)) + job_memory = job_ptr->details->job_min_memory; + else + job_memory = 0; + if (job_ptr->details->shared == 0) + exclusive = 1; + else + exclusive = 0; + + for (i = 0; i < select_node_cnt; i++) { + if (job_ptr->node_bitmap == NULL) + break; + if (!bit_test(job_ptr->node_bitmap, i)) + continue; + if (exclusive) { + if (node_cr_ptr[i].exclusive_jobid) { + error("select/linear: conflicting " + "exclusive jobs %u and %u on %s", + job_ptr->job_id, + node_cr_ptr[i].exclusive_jobid, + node_record_table_ptr[i].name); + } + node_cr_ptr[i].exclusive_jobid = job_ptr->job_id; + } + node_cr_ptr[i].alloc_memory += job_memory; + part_cr_ptr = node_cr_ptr[i].parts; + while (part_cr_ptr) { + if (part_cr_ptr->part_ptr != job_ptr->part_ptr) { + part_cr_ptr = part_cr_ptr->next; + continue; + } + part_cr_ptr->tot_job_cnt++; + if (job_ptr->job_state == JOB_RUNNING) + part_cr_ptr->run_job_cnt++; + break; + } + if (part_cr_ptr == NULL) { + error("_init_node_cr: could not find " + "partition %s for node %s", + job_ptr->part_ptr->name, + node_record_table_ptr[i].name); + } + } + + if (job_ptr->details->job_min_memory || + (job_ptr->details->shared == 0) || (cr_type != CR_MEMORY)) + continue; + + step_iterator = list_iterator_create (job_ptr->step_list); + while ((step_ptr = (struct step_record *) list_next (step_iterator))) { + if ((step_ptr->step_node_bitmap == NULL) || + (step_ptr->step_layout == NULL)) + continue; + + if (_find_step(step_ptr)) { + slurm_mutex_unlock(&cr_mutex); + error("_init_node_cr: duplicate for step %u.%u", + job_ptr->job_id, step_ptr->step_id); + continue; + } + + node_inx = -1; + for (i = 0; i < select_node_cnt; i++) { + if (bit_test(step_ptr->step_node_bitmap, i) == 0) + continue; + node_inx++; + step_mem = step_ptr->step_layout->tasks[node_inx] * + step_ptr->mem_per_task; + node_cr_ptr[i].alloc_memory += step_mem; + } +#if SELECT_DEBUG + info("_init_node_cr: added %u.%u mem:%u", + job_ptr->job_id, step_ptr->step_id, step_mem); +#endif + _add_step(step_ptr); + } + list_iterator_destroy (step_iterator); + } + list_iterator_destroy(job_iterator); + _dump_node_cr(node_cr_ptr); +} + +/* Determine where and when the job at job_ptr can begin execution by updating + * a scratch node_cr_record structure to reflect each job terminating at the + * end of its time limit and use this to show where and when the job at job_ptr + * will begin execution. Used by Moab for backfill scheduling. */ +static int _will_run_test(struct job_record *job_ptr, bitstr_t *bitmap, + uint32_t min_nodes, uint32_t max_nodes, + int max_share, uint32_t req_nodes) +{ + struct node_cr_record *exp_node_cr; + struct job_record *tmp_job_ptr, **tmp_job_pptr; + List cr_job_list; + ListIterator job_iterator; + bitstr_t *orig_map; + int i, rc = SLURM_ERROR; + int max_run_jobs = max_share - 1; /* exclude this job */ + + orig_map = bit_copy(bitmap); + + /* Try to run with currently available nodes */ + i = _job_count_bitmap(node_cr_ptr, job_ptr, orig_map, bitmap, + max_run_jobs, NO_SHARE_LIMIT); + if (i >= min_nodes) { + rc = _job_test(job_ptr, bitmap, min_nodes, max_nodes, + req_nodes); + if (rc == SLURM_SUCCESS) { + bit_free(orig_map); + job_ptr->start_time = time(NULL); + return SLURM_SUCCESS; + } + } + + /* Job is still pending. Simulate termination of jobs one at a time + * to determine when and where the job can start. */ + exp_node_cr = _dup_node_cr(node_cr_ptr); + if (exp_node_cr == NULL) { + bit_free(orig_map); + return SLURM_ERROR; + } + + /* Build list of running jobs */ + cr_job_list = list_create(_cr_job_list_del); + if (!cr_job_list) + fatal("list_create: memory allocation failure"); + job_iterator = list_iterator_create(job_list); + while ((tmp_job_ptr = (struct job_record *) list_next(job_iterator))) { + if (tmp_job_ptr->job_state != JOB_RUNNING) + continue; + if (tmp_job_ptr->end_time == 0) { + error("Job %u has zero end_time", tmp_job_ptr->job_id); + continue; + } + tmp_job_pptr = xmalloc(sizeof(struct job_record *)); + *tmp_job_pptr = tmp_job_ptr; + list_append(cr_job_list, tmp_job_pptr); + } + list_iterator_destroy(job_iterator); + list_sort(cr_job_list, _cr_job_list_sort); + + /* Remove the running jobs one at a time from exp_node_cr and try + * scheduling the pending job after each one */ + job_iterator = list_iterator_create(cr_job_list); + while ((tmp_job_pptr = (struct job_record **) list_next(job_iterator))) { + tmp_job_ptr = *tmp_job_pptr; + _rm_job_from_nodes(exp_node_cr, tmp_job_ptr, + "_will_run_test", 1); + i = _job_count_bitmap(exp_node_cr, job_ptr, orig_map, bitmap, + max_run_jobs, NO_SHARE_LIMIT); + if (i < min_nodes) + continue; + rc = _job_test(job_ptr, bitmap, min_nodes, max_nodes, + req_nodes); + if (rc != SLURM_SUCCESS) + continue; + job_ptr->start_time = tmp_job_ptr->end_time; + break; + } + list_iterator_destroy(job_iterator); + list_destroy(cr_job_list); + _free_node_cr(exp_node_cr); + bit_free(orig_map); + return rc; +} + +static void _cr_job_list_del(void *x) +{ + xfree(x); +} +static int _cr_job_list_sort(void *x, void *y) +{ + struct job_record **job1_pptr = (struct job_record **) x; + struct job_record **job2_pptr = (struct job_record **) y; + return (int) difftime(job1_pptr[0]->end_time, job2_pptr[0]->end_time); +} + +extern int select_p_step_begin(struct step_record *step_ptr) +{ + slurm_step_layout_t *step_layout = step_ptr->step_layout; + int i, node_inx = -1; + uint32_t avail_mem, step_mem; + + xassert(step_ptr->job_ptr); + xassert(step_ptr->job_ptr->details); + xassert(step_ptr->step_node_bitmap); + +#if SELECT_DEBUG + info("select_p_step_begin: mem:%u", step_ptr->mem_per_task); +#endif + if (step_layout == NULL) + return SLURM_SUCCESS; /* batch script */ + /* Don't track step memory use if job has reserved memory OR + * job has whole node OR we don't track memory usage */ + if (step_ptr->job_ptr->details->job_min_memory || + (step_ptr->job_ptr->details->shared == 0) || + (cr_type != CR_MEMORY)) + return SLURM_SUCCESS; + + /* test if there is sufficient memory */ + slurm_mutex_lock(&cr_mutex); + if (node_cr_ptr == NULL) + _init_node_cr(); + if (_find_step(step_ptr)) { + slurm_mutex_unlock(&cr_mutex); + error("select_p_step_begin: duplicate for step %u.%u", + step_ptr->job_ptr->job_id, step_ptr->step_id); + return SLURM_SUCCESS; + } + for (i = 0; i < select_node_cnt; i++) { + if (bit_test(step_ptr->step_node_bitmap, i) == 0) + continue; + node_inx++; + step_mem = step_layout->tasks[node_inx] * step_ptr->mem_per_task; + if (select_fast_schedule) + avail_mem = node_record_table_ptr[i]. + config_ptr->real_memory; + else + avail_mem = node_record_table_ptr[i].real_memory; +#if SELECT_DEBUG + info("alloc %u need %u avail %u", + node_cr_ptr[i].alloc_memory, step_mem, avail_mem); +#endif + if ((node_cr_ptr[i].alloc_memory + step_mem) > avail_mem) { + slurm_mutex_unlock(&cr_mutex); + return SLURM_ERROR; /* no room */ + } + } + + /* reserve the memory */ + node_inx = -1; + for (i = 0; i < select_node_cnt; i++) { + if (bit_test(step_ptr->step_node_bitmap, i) == 0) + continue; + node_inx++; + step_mem = step_layout->tasks[node_inx] * step_ptr->mem_per_task; + node_cr_ptr[i].alloc_memory += step_mem; + } + _add_step(step_ptr); + slurm_mutex_unlock(&cr_mutex); + return SLURM_SUCCESS; +} + +extern int select_p_step_fini(struct step_record *step_ptr) +{ + slurm_step_layout_t *step_layout = step_ptr->step_layout; + int i, node_inx = -1; + uint32_t step_mem; + + xassert(step_ptr->job_ptr); + xassert(step_ptr->job_ptr->details); + xassert(step_ptr->step_node_bitmap); + +#if SELECT_DEBUG + info("select_p_step_fini: mem:%u", step_ptr->mem_per_task); +#endif + if (step_layout == NULL) + return SLURM_SUCCESS; /* batch script */ + /* Don't track step memory use if job has reserved memory OR + * job has whole node OR we don't track memory usage */ + if (step_ptr->job_ptr->details->job_min_memory || + (step_ptr->job_ptr->details->shared == 0) || + (cr_type != CR_MEMORY)) + return SLURM_SUCCESS; + + /* release the memory */ + slurm_mutex_lock(&cr_mutex); + if (node_cr_ptr == NULL) + _init_node_cr(); + if (!_find_step(step_ptr)) { + slurm_mutex_unlock(&cr_mutex); + error("select_p_step_fini: could not find step %u.%u", + step_ptr->job_ptr->job_id, step_ptr->step_id); + return SLURM_ERROR; + } + for (i = 0; i < select_node_cnt; i++) { + if (bit_test(step_ptr->step_node_bitmap, i) == 0) + continue; + node_inx++; + step_mem = step_layout->tasks[node_inx] * step_ptr->mem_per_task; + if (node_cr_ptr[i].alloc_memory >= step_mem) + node_cr_ptr[i].alloc_memory -= step_mem; + else { + node_cr_ptr[i].alloc_memory = 0; + error("select_p_step_fini: alloc_memory underflow on %s", + node_record_table_ptr[i].name); + } + } + _remove_step(step_ptr); + slurm_mutex_unlock(&cr_mutex); + return SLURM_SUCCESS; +} + +/* return 1 if found, 0 otherwise */ +static int _find_step(struct step_record *step_ptr) +{ + ListIterator step_iterator; + struct step_cr_record *step; + int found = 0; + + if (!step_cr_list) + return found; + step_iterator = list_iterator_create(step_cr_list); + if (step_iterator == NULL) { + fatal("list_iterator_create: memory allocation failure"); + return found; + } + while ((step = list_next(step_iterator))) { + if ((step->job_id == step_ptr->job_ptr->job_id) && + (step->step_id == step_ptr->step_id)) { + found = 1; + break; + } + } + list_iterator_destroy(step_iterator); + return found; +} +static int _add_step(struct step_record *step_ptr) +{ + struct step_cr_record *step = xmalloc(sizeof(struct step_cr_record)); + + step->job_id = step_ptr->job_ptr->job_id; + step->step_id = step_ptr->step_id; + if (!step_cr_list) { + step_cr_list = list_create(_del_list_step); + if (!step_cr_list) + fatal("list_create: memory allocation failure"); + } + if (list_append(step_cr_list, step) == NULL) { + fatal("list_append: memory allocation failure"); + return SLURM_ERROR; + } + return SLURM_SUCCESS; +} +static int _remove_step(struct step_record *step_ptr) +{ + ListIterator step_iterator; + struct step_cr_record *step; + int found = 0; + + if (!step_cr_list) + return found; + step_iterator = list_iterator_create(step_cr_list); + if (step_iterator == NULL) { + fatal("list_iterator_create: memory allocation failure"); + return found; + } + while ((step = list_next(step_iterator))) { + if ((step->job_id == step_ptr->job_ptr->job_id) && + (step->step_id == step_ptr->step_id)) { + found = 1; + list_delete_item(step_iterator); + break; + } + } + list_iterator_destroy(step_iterator); + return found; +} +static void _del_list_step(void *x) +{ + xfree(x); +} diff --git a/src/plugins/select/linear/select_linear.h b/src/plugins/select/linear/select_linear.h new file mode 100644 index 000000000..cd40a7fd9 --- /dev/null +++ b/src/plugins/select/linear/select_linear.h @@ -0,0 +1,81 @@ +/*****************************************************************************\ + * select_linear.h + ***************************************************************************** + * Copyright (C) 2006 Hewlett-Packard Development Company, L.P. + * Written by Susanne M. Balle, <susanne.balle@hp.com> + * LLNL-CODE-402394. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#ifndef _SELECT_LINEAR_H +#define _SELECT_LINEAR_H + +#include "src/slurmctld/slurmctld.h" + +/* + * part_cr_record keeps track of the number of running jobs on + * this node in this partition. SLURM allows a node to be + * assigned to more than one partition. One or more partitions + * may be configured to share the cores with more than one job. + */ + +struct part_cr_record { + struct part_record *part_ptr; /* pointer to partition in slurmctld */ + uint16_t run_job_cnt; /* number of running jobs on this node + * for this partition */ + uint16_t tot_job_cnt; /* number of jobs allocated to this node + * for this partition */ + struct part_cr_record *next; /* ptr to next part_cr_record */ +}; + +/* + * node_cr_record keeps track of the resources within a node which + * have been reserved by already scheduled jobs. + */ +struct node_cr_record { + struct part_cr_record *parts; /* ptr to singly-linked part_cr_record + * list that contains alloc_core info */ + uint32_t alloc_memory; /* real memory reserved by already + * scheduled jobs */ + uint32_t exclusive_jobid; /* if the node is allocated exclusively + * to some job, put its jobid here, + * otherwise value is zero */ +}; + +/* + * step_cr_record keeps track of the steps which have been allocated memory + * independently of the job (ie. the job itself has no reserved memory + */ +struct step_cr_record { + uint32_t job_id; + uint32_t step_id; +}; + +#endif /* !_SELECT_LINEAR_H */ diff --git a/src/plugins/switch/Makefile.in b/src/plugins/switch/Makefile.in index ab6e17b4f..a7d38ed25 100644 --- a/src/plugins/switch/Makefile.in +++ b/src/plugins/switch/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -43,6 +43,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -101,6 +103,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -114,10 +117,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -137,7 +143,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -148,6 +157,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -163,6 +174,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -178,6 +190,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -349,8 +362,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -375,8 +388,8 @@ TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -386,13 +399,12 @@ ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/switch/elan/Makefile.in b/src/plugins/switch/elan/Makefile.in index 5310a895e..6fbf77a27 100644 --- a/src/plugins/switch/elan/Makefile.in +++ b/src/plugins/switch/elan/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -44,6 +44,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -84,7 +86,7 @@ switch_elan_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(switch_elan_la_LDFLAGS) $(LDFLAGS) -o $@ @HAVE_ELAN_TRUE@am_switch_elan_la_rpath = -rpath $(pkglibdir) -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -125,6 +127,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -138,10 +141,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -161,7 +167,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -172,6 +181,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -187,6 +198,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -202,6 +214,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -320,8 +333,8 @@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ else :; fi; \ done @@ -329,8 +342,8 @@ uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ done clean-pkglibLTLIBRARIES: @@ -385,8 +398,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -398,8 +411,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -409,13 +422,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/switch/elan/qsw.c b/src/plugins/switch/elan/qsw.c index 3df3847ac..98d317af6 100644 --- a/src/plugins/switch/elan/qsw.c +++ b/src/plugins/switch/elan/qsw.c @@ -1,11 +1,11 @@ /*****************************************************************************\ * qsw.c - Library routines for initiating jobs on QsNet. - * $Id: qsw.c 10574 2006-12-15 23:38:29Z jette $ + * $Id: qsw.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Jim Garlick <garlick@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -747,7 +747,7 @@ _free_hwcontext(uint32_t prog_num) if (prog_num != step_ctx_p->st_prognum) continue; _dump_step_ctx("_free_hwcontext", step_ctx_p); - list_delete(iter); + list_delete_item(iter); break; } if (!step_ctx_p) { diff --git a/src/plugins/switch/elan/qsw.h b/src/plugins/switch/elan/qsw.h index eab4eebe6..d82345791 100644 --- a/src/plugins/switch/elan/qsw.h +++ b/src/plugins/switch/elan/qsw.h @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Jim Garlick <garlick@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/switch/elan/switch_elan.c b/src/plugins/switch/elan/switch_elan.c index 445b8246c..2e89fec60 100644 --- a/src/plugins/switch/elan/switch_elan.c +++ b/src/plugins/switch/elan/switch_elan.c @@ -1,11 +1,11 @@ /*****************************************************************************\ * switch_elan.c - Library routines for initiating jobs on QsNet. - * $Id: switch_elan.c 10751 2007-01-11 22:19:34Z jette $ + * $Id: switch_elan.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2003-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Kevin Tew <tew1@llnl.gov>, et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -233,7 +233,7 @@ int switch_p_libstate_restore (char *dir_name, bool recover) int error_code = SLURM_SUCCESS; int state_fd, data_allocated = 0, data_read = 0, data_size = 0; char *ver_str = NULL; - uint16_t ver_str_len; + uint32_t ver_str_len; if (!recover) /* clean start, no recovery */ return qsw_init(NULL); @@ -271,11 +271,11 @@ int switch_p_libstate_restore (char *dir_name, bool recover) if (error_code == SLURM_SUCCESS) { buffer = create_buf (data, data_size); data = NULL; /* now in buffer, don't xfree() */ - if (buffer && (size_buf(buffer) >= sizeof(uint16_t) + + if (buffer && (size_buf(buffer) >= sizeof(uint32_t) + strlen(QSW_STATE_VERSION))) { char *ptr = get_buf_data(buffer); - if (!memcmp(&ptr[sizeof(uint16_t)], + if (!memcmp(&ptr[sizeof(uint32_t)], QSW_STATE_VERSION, 3)) { unpackstr_xmalloc(&ver_str, &ver_str_len, buffer); diff --git a/src/plugins/switch/federation/Makefile.am b/src/plugins/switch/federation/Makefile.am index 254d85f8d..f1517f6ff 100644 --- a/src/plugins/switch/federation/Makefile.am +++ b/src/plugins/switch/federation/Makefile.am @@ -21,9 +21,8 @@ switch_federation_la_SOURCES = \ federation.c federation.h \ federation_keys.h \ switch_federation.c -switch_federation_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) - -switch_federation_la_LIBADD = $(top_builddir)/src/common/libcommon.la +switch_federation_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) \ + $(FEDERATION_LDFLAGS) else EXTRA_switch_federation_la_SOURCES = \ diff --git a/src/plugins/switch/federation/Makefile.in b/src/plugins/switch/federation/Makefile.in index bfc93e746..11c2462b2 100644 --- a/src/plugins/switch/federation/Makefile.in +++ b/src/plugins/switch/federation/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -44,6 +44,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -73,8 +75,7 @@ am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; am__installdirs = "$(DESTDIR)$(pkglibdir)" pkglibLTLIBRARIES_INSTALL = $(INSTALL) LTLIBRARIES = $(pkglib_LTLIBRARIES) -@HAVE_FEDERATION_TRUE@switch_federation_la_DEPENDENCIES = \ -@HAVE_FEDERATION_TRUE@ $(top_builddir)/src/common/libcommon.la +switch_federation_la_LIBADD = am__switch_federation_la_SOURCES_DIST = federation.c federation.h \ federation_keys.h switch_federation.c @HAVE_FEDERATION_TRUE@am_switch_federation_la_OBJECTS = federation.lo \ @@ -87,7 +88,7 @@ switch_federation_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(switch_federation_la_LDFLAGS) $(LDFLAGS) -o $@ @HAVE_FEDERATION_TRUE@am_switch_federation_la_rpath = -rpath \ @HAVE_FEDERATION_TRUE@ $(pkglibdir) -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -129,6 +130,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -142,10 +144,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -165,7 +170,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -176,6 +184,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -191,6 +201,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -206,6 +217,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -275,8 +287,9 @@ pkglib_LTLIBRARIES = $(federation_lib) @HAVE_FEDERATION_TRUE@ federation_keys.h \ @HAVE_FEDERATION_TRUE@ switch_federation.c -@HAVE_FEDERATION_TRUE@switch_federation_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) -@HAVE_FEDERATION_TRUE@switch_federation_la_LIBADD = $(top_builddir)/src/common/libcommon.la +@HAVE_FEDERATION_TRUE@switch_federation_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) \ +@HAVE_FEDERATION_TRUE@ $(FEDERATION_LDFLAGS) + @HAVE_FEDERATION_FALSE@EXTRA_switch_federation_la_SOURCES = \ @HAVE_FEDERATION_FALSE@ federation.c federation.h \ @HAVE_FEDERATION_FALSE@ federation_keys.h \ @@ -321,8 +334,8 @@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ else :; fi; \ done @@ -330,8 +343,8 @@ uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ done clean-pkglibLTLIBRARIES: @@ -386,8 +399,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -399,8 +412,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -410,13 +423,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/switch/federation/federation.c b/src/plugins/switch/federation/federation.c index f741b44f7..3010298bf 100644 --- a/src/plugins/switch/federation/federation.c +++ b/src/plugins/switch/federation/federation.c @@ -1,11 +1,11 @@ /*****************************************************************************\ ** federation.c - Library routines for initiating jobs on IBM Federation - ** $Id: federation.c 12736 2007-11-29 21:53:34Z jette $ + ** $Id: federation.c 13702 2008-03-22 00:13:35Z jette $ ***************************************************************************** * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Jason King <jking@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -1134,8 +1134,8 @@ static int _fake_unpack_adapters(Buf buf) safe_unpack32(&adapter_count, buf); for (i = 0; i < adapter_count; i++) { /* no copy, just advances buf counters */ - unpackmem_ptr(&dummyptr, &dummy16, buf); - if (dummy16 != FED_ADAPTERNAME_LEN) + safe_unpackmem_ptr(&dummyptr, &dummy32, buf); + if (dummy32 != FED_ADAPTERNAME_LEN) goto unpack_error; safe_unpack16(&dummy16, buf); safe_unpack16(&dummy16, buf); @@ -1171,7 +1171,7 @@ _unpack_nodeinfo(fed_nodeinfo_t *n, Buf buf, bool believe_window_status) int i, j; fed_adapter_t *tmp_a = NULL; fed_window_t *tmp_w = NULL; - uint16_t size; + uint32_t size; fed_nodeinfo_t *tmp_n = NULL; char *name_ptr, name[FED_HOSTLEN]; int magic; @@ -1186,7 +1186,7 @@ _unpack_nodeinfo(fed_nodeinfo_t *n, Buf buf, bool believe_window_status) safe_unpack32(&magic, buf); if(magic != FED_NODEINFO_MAGIC) slurm_seterrno_ret(EBADMAGIC_FEDNODEINFO); - unpackmem_ptr(&name_ptr, &size, buf); + safe_unpackmem_ptr(&name_ptr, &size, buf); if(size != FED_HOSTLEN) goto unpack_error; memcpy(name, name_ptr, size); @@ -1234,7 +1234,7 @@ _unpack_nodeinfo(fed_nodeinfo_t *n, Buf buf, bool believe_window_status) safe_unpack32(&tmp_n->adapter_count, buf); for(i = 0; i < tmp_n->adapter_count; i++) { tmp_a = tmp_n->adapter_list + i; - unpackmem_ptr(&name_ptr, &size, buf); + safe_unpackmem_ptr(&name_ptr, &size, buf); if(size != FED_ADAPTERNAME_LEN) goto unpack_error; memcpy(tmp_a->name, name_ptr, size); @@ -1964,7 +1964,7 @@ fed_pack_jobinfo(fed_jobinfo_t *j, Buf buf) static int _unpack_tableinfo(fed_tableinfo_t *tableinfo, Buf buf) { - uint16_t size; + uint32_t size; char *name_ptr; int i; @@ -1978,7 +1978,7 @@ _unpack_tableinfo(fed_tableinfo_t *tableinfo, Buf buf) safe_unpack16(&tableinfo->table[i]->lid, buf); safe_unpack16(&tableinfo->table[i]->window_id, buf); } - unpackmem_ptr(&name_ptr, &size, buf); + safe_unpackmem_ptr(&name_ptr, &size, buf); if (size != FED_ADAPTERNAME_LEN) goto unpack_error; memcpy(tableinfo->adapter_name, name_ptr, size); @@ -1993,7 +1993,7 @@ unpack_error: /* safe_unpackXX are macros which jump to unpack_error */ int fed_unpack_jobinfo(fed_jobinfo_t *j, Buf buf) { - uint16_t size; + uint32_t size; int i, k; assert(j); @@ -2003,7 +2003,7 @@ fed_unpack_jobinfo(fed_jobinfo_t *j, Buf buf) safe_unpack32(&j->magic, buf); assert(j->magic == FED_JOBINFO_MAGIC); safe_unpack16(&j->job_key, buf); - unpackmem(j->job_desc, &size, buf); + safe_unpackmem(j->job_desc, &size, buf); if(size != DESCLEN) goto unpack_error; safe_unpack32(&j->window_memory, buf); @@ -2234,7 +2234,8 @@ _wait_for_all_windows(fed_tableinfo_t *tableinfo) if (err != SLURM_SUCCESS) { error("Window %hu adapter %s did not become" " free within %d seconds", - lid, tableinfo->table[i]->window_id, i); + lid, tableinfo->table[i]->window_id, + retry); rc = err; retry = 2; } diff --git a/src/plugins/switch/federation/federation.h b/src/plugins/switch/federation/federation.h index 0d8ddb700..7b321609c 100644 --- a/src/plugins/switch/federation/federation.h +++ b/src/plugins/switch/federation/federation.h @@ -1,11 +1,11 @@ /*****************************************************************************\ ** federation.h - Library routines for initiating jobs on IBM Federation - ** $Id: federation.h 10574 2006-12-15 23:38:29Z jette $ + ** $Id: federation.h 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Jason King <jking@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/switch/federation/federation_keys.h b/src/plugins/switch/federation/federation_keys.h index 315ab1e0e..9f785dcda 100644 --- a/src/plugins/switch/federation/federation_keys.h +++ b/src/plugins/switch/federation/federation_keys.h @@ -1,11 +1,11 @@ /*****************************************************************************\ ** federation_keys.h - Key definitions used by the get_jobinfo functions - ** $Id: federation_keys.h 10574 2006-12-15 23:38:29Z jette $ + ** $Id: federation_keys.h 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Jason King <jking@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/switch/federation/switch_federation.c b/src/plugins/switch/federation/switch_federation.c index 04484e991..1e8bd41c2 100644 --- a/src/plugins/switch/federation/switch_federation.c +++ b/src/plugins/switch/federation/switch_federation.c @@ -1,12 +1,12 @@ /***************************************************************************** \ ** switch_federation.c - Library routines for initiating jobs on IBM ** Federation - ** $Id: switch_federation.c 10574 2006-12-15 23:38:29Z jette $ + ** $Id: switch_federation.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2004-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Jason King <jking@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/switch/none/Makefile.am b/src/plugins/switch/none/Makefile.am index 2d74f777c..58a8ad437 100644 --- a/src/plugins/switch/none/Makefile.am +++ b/src/plugins/switch/none/Makefile.am @@ -11,7 +11,3 @@ pkglib_LTLIBRARIES = switch_none.la # Null switch plugin. switch_none_la_SOURCES = switch_none.c switch_none_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) - -if HAVE_AIX -switch_none_la_LIBADD = $(top_builddir)/src/common/libcommon.la -endif diff --git a/src/plugins/switch/none/Makefile.in b/src/plugins/switch/none/Makefile.in index 7c080e967..ba48b744f 100644 --- a/src/plugins/switch/none/Makefile.in +++ b/src/plugins/switch/none/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -44,6 +44,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -73,14 +75,13 @@ am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; am__installdirs = "$(DESTDIR)$(pkglibdir)" pkglibLTLIBRARIES_INSTALL = $(INSTALL) LTLIBRARIES = $(pkglib_LTLIBRARIES) -@HAVE_AIX_TRUE@switch_none_la_DEPENDENCIES = \ -@HAVE_AIX_TRUE@ $(top_builddir)/src/common/libcommon.la +switch_none_la_LIBADD = am_switch_none_la_OBJECTS = switch_none.lo switch_none_la_OBJECTS = $(am_switch_none_la_OBJECTS) switch_none_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(switch_none_la_LDFLAGS) $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -120,6 +121,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -133,10 +135,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -156,7 +161,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -167,6 +175,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -182,6 +192,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -197,6 +208,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -261,7 +273,6 @@ pkglib_LTLIBRARIES = switch_none.la # Null switch plugin. switch_none_la_SOURCES = switch_none.c switch_none_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) -@HAVE_AIX_TRUE@switch_none_la_LIBADD = $(top_builddir)/src/common/libcommon.la all: all-am .SUFFIXES: @@ -301,8 +312,8 @@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ else :; fi; \ done @@ -310,8 +321,8 @@ uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ done clean-pkglibLTLIBRARIES: @@ -365,8 +376,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -378,8 +389,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -389,13 +400,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/switch/none/switch_none.c b/src/plugins/switch/none/switch_none.c index f30cb0c3e..7d0b614f5 100644 --- a/src/plugins/switch/none/switch_none.c +++ b/src/plugins/switch/none/switch_none.c @@ -4,7 +4,7 @@ * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/task/Makefile.in b/src/plugins/task/Makefile.in index 1ed780c15..56b248579 100644 --- a/src/plugins/task/Makefile.in +++ b/src/plugins/task/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -43,6 +43,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -101,6 +103,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -114,10 +117,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -137,7 +143,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -148,6 +157,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -163,6 +174,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -178,6 +190,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -349,8 +362,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -375,8 +388,8 @@ TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -386,13 +399,12 @@ ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/task/affinity/Makefile.am b/src/plugins/task/affinity/Makefile.am index c3eb2f7fb..8fa366399 100644 --- a/src/plugins/task/affinity/Makefile.am +++ b/src/plugins/task/affinity/Makefile.am @@ -16,7 +16,7 @@ task_affinity_la_SOURCES = \ schedutils.c \ task_affinity.c task_affinity_la_LDFLAGS = $(SO_LDFLAGS) $(NUMA_LIBS) $(PLPA_LIBS) $(PLUGIN_FLAGS) -task_affinity_la_LIBADD = $(top_builddir)/src/common/libcommon.la + else pkglib_LTLIBRARIES = EXTRA_task_affinity_la_SOURCES = \ diff --git a/src/plugins/task/affinity/Makefile.in b/src/plugins/task/affinity/Makefile.in index e780ed3f8..7b4b935a6 100644 --- a/src/plugins/task/affinity/Makefile.in +++ b/src/plugins/task/affinity/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -44,6 +44,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -73,7 +75,7 @@ am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; am__installdirs = "$(DESTDIR)$(pkglibdir)" pkglibLTLIBRARIES_INSTALL = $(INSTALL) LTLIBRARIES = $(pkglib_LTLIBRARIES) -@HAVE_SCHED_SETAFFINITY_TRUE@task_affinity_la_DEPENDENCIES = $(top_builddir)/src/common/libcommon.la +task_affinity_la_LIBADD = am__task_affinity_la_SOURCES_DIST = affinity.c affinity.h cpuset.c \ dist_tasks.c dist_tasks.h numa.c schedutils.c task_affinity.c @HAVE_SCHED_SETAFFINITY_TRUE@am_task_affinity_la_OBJECTS = \ @@ -89,7 +91,7 @@ task_affinity_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(task_affinity_la_LDFLAGS) $(LDFLAGS) -o $@ @HAVE_SCHED_SETAFFINITY_TRUE@am_task_affinity_la_rpath = -rpath \ @HAVE_SCHED_SETAFFINITY_TRUE@ $(pkglibdir) -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -131,6 +133,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -144,10 +147,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -167,7 +173,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -178,6 +187,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -193,6 +204,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -208,6 +220,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -278,7 +291,6 @@ INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common @HAVE_SCHED_SETAFFINITY_TRUE@ task_affinity.c @HAVE_SCHED_SETAFFINITY_TRUE@task_affinity_la_LDFLAGS = $(SO_LDFLAGS) $(NUMA_LIBS) $(PLPA_LIBS) $(PLUGIN_FLAGS) -@HAVE_SCHED_SETAFFINITY_TRUE@task_affinity_la_LIBADD = $(top_builddir)/src/common/libcommon.la @HAVE_SCHED_SETAFFINITY_FALSE@EXTRA_task_affinity_la_SOURCES = \ @HAVE_SCHED_SETAFFINITY_FALSE@ affinity.c affinity.h \ @HAVE_SCHED_SETAFFINITY_FALSE@ cpuset.c \ @@ -326,8 +338,8 @@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ else :; fi; \ done @@ -335,8 +347,8 @@ uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ done clean-pkglibLTLIBRARIES: @@ -395,8 +407,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -408,8 +420,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -419,13 +431,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/task/affinity/affinity.c b/src/plugins/task/affinity/affinity.c index ef6593c1f..f6fb34144 100644 --- a/src/plugins/task/affinity/affinity.c +++ b/src/plugins/task/affinity/affinity.c @@ -41,9 +41,7 @@ void slurm_chkaffinity(cpu_set_t *mask, slurmd_job_t *job, int statval) { - char bind_type[42]; - char action[42]; - char status[42]; + char *bind_type, *action, *status, *units; char mstr[1 + CPU_SETSIZE / 4]; int task_gid = job->envtp->procid; int task_lid = job->envtp->localid; @@ -52,33 +50,42 @@ void slurm_chkaffinity(cpu_set_t *mask, slurmd_job_t *job, int statval) if (!(job->cpu_bind_type & CPU_BIND_VERBOSE)) return; - action[0] = '\0'; - status[0] = '\0'; if (statval) - strcpy(status, " FAILED"); + status = " FAILED"; + else + status = ""; if (job->cpu_bind_type & CPU_BIND_NONE) { - strcpy(action, ""); - strcpy(bind_type, "=NONE"); + action = ""; + units = ""; + bind_type = "NONE"; } else { - strcpy(action, " set"); + action = " set"; + if (job->cpu_bind_type & CPU_BIND_TO_THREADS) + units = "_threads"; + else if (job->cpu_bind_type & CPU_BIND_TO_CORES) + units = "_cores"; + else if (job->cpu_bind_type & CPU_BIND_TO_SOCKETS) + units = "_sockets"; + else + units = ""; if (job->cpu_bind_type & CPU_BIND_RANK) { - strcpy(bind_type, "=RANK"); + bind_type = "RANK"; } else if (job->cpu_bind_type & CPU_BIND_MAP) { - strcpy(bind_type, "=MAP "); + bind_type = "MAP "; } else if (job->cpu_bind_type & CPU_BIND_MASK) { - strcpy(bind_type, "=MASK"); + bind_type = "MASK"; } else if (job->cpu_bind_type & (~CPU_BIND_VERBOSE)) { - strcpy(bind_type, "=UNK "); + bind_type = "UNK "; } else { - strcpy(action, ""); - strcpy(bind_type, "=NULL"); + action = ""; + bind_type = "NULL"; } } - fprintf(stderr, "cpu_bind%s - " + fprintf(stderr, "cpu_bind%s=%s - " "%s, task %2u %2u [%u]: mask 0x%s%s%s\n", - bind_type, + units, bind_type, conf->hostname, task_gid, task_lid, diff --git a/src/plugins/task/affinity/affinity.h b/src/plugins/task/affinity/affinity.h index 8f213ae36..4ee3d27db 100644 --- a/src/plugins/task/affinity/affinity.h +++ b/src/plugins/task/affinity/affinity.h @@ -61,8 +61,13 @@ #include <sys/utsname.h> #include <unistd.h> -#define _GNU_SOURCE -#define __USE_GNU +#ifndef _GNU_SOURCE +# define _GNU_SOURCE +#endif +#ifndef __USE_GNU +#define __USE_GNU +#endif + #include <sched.h> /* SMB */ #ifdef HAVE_STDLIB_H @@ -80,7 +85,6 @@ #include "src/common/node_select.h" #include "src/common/fd.h" #include "src/common/safeopen.h" -#include "src/common/slurm_jobacct.h" #include "src/common/switch.h" #include "src/common/xsignal.h" #include "src/common/xstring.h" diff --git a/src/plugins/task/affinity/cpuset.c b/src/plugins/task/affinity/cpuset.c index 83bcc211d..dfd0a632f 100644 --- a/src/plugins/task/affinity/cpuset.c +++ b/src/plugins/task/affinity/cpuset.c @@ -5,7 +5,7 @@ * Copyright (C) 2007 The Regents of the University of California. * Written by Don Albert <Don.Albert@Bull.com> and * Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/task/affinity/dist_tasks.c b/src/plugins/task/affinity/dist_tasks.c index f1ba95232..9a725dc1a 100644 --- a/src/plugins/task/affinity/dist_tasks.c +++ b/src/plugins/task/affinity/dist_tasks.c @@ -1,7 +1,7 @@ /*****************************************************************************\ * Copyright (C) 2006 Hewlett-Packard Development Company, L.P. * Written by Susanne M. Balle, <susanne.balle@hp.com> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -35,13 +35,12 @@ #include "src/plugins/task/affinity/dist_tasks.h" static slurm_lllp_ctx_t *lllp_ctx = NULL; /* binding context */ -static struct node_gids *lllp_tasks = NULL; /* Keep track of the task count for - * logical processors - * socket/core/thread. - */ -static uint32_t lllp_reserved_size = 0;/* size of lllp reserved array */ -static uint32_t *lllp_reserved = NULL; /* count of Reserved lllps (socket, - * core, threads) */ +static struct node_gids *lllp_tasks = NULL; /* Keep track of the task count + * for logical processors + * socket/core/thread. */ +static uint32_t lllp_reserved_size = 0; /* lllp reserved array size */ +static uint32_t *lllp_reserved = NULL; /* count of Reserved lllps + * (socket, core, threads) */ static void _task_layout_display_masks(launch_tasks_request_msg_t *req, @@ -92,10 +91,8 @@ static void _get_resources_this_node(uint16_t *cpus, uint16_t *sockets, uint16_t *cores, uint16_t *threads, - uint16_t *alloc_sockets, uint16_t *alloc_cores, - uint16_t *alloc_lps, - uint32_t *jobid); + uint32_t jobid); static void _cr_update_reservation(int reserve, uint32_t *reserved, bitstr_t *mask); @@ -132,8 +129,8 @@ void lllp_distribution(launch_tasks_request_msg_t *req, uint32_t node_id) int rc = SLURM_SUCCESS; bitstr_t **masks = NULL; char buf_type[100]; - int maxtasks = req->tasks_to_launch[node_id]; - const uint32_t *gtid = req->global_task_ids[node_id]; + int maxtasks = req->tasks_to_launch[(int)node_id]; + const uint32_t *gtid = req->global_task_ids[(int)node_id]; slurm_sprint_cpu_bind_type(buf_type, req->cpu_bind_type); if(req->cpu_bind_type >= CPU_BIND_NONE) { @@ -164,24 +161,24 @@ void lllp_distribution(launch_tasks_request_msg_t *req, uint32_t node_id) switch (req->task_dist) { case SLURM_DIST_BLOCK_BLOCK: case SLURM_DIST_CYCLIC_BLOCK: - _task_layout_lllp_block(req, gtid, maxtasks, &masks); + rc = _task_layout_lllp_block(req, gtid, maxtasks, &masks); break; case SLURM_DIST_CYCLIC: case SLURM_DIST_BLOCK: case SLURM_DIST_CYCLIC_CYCLIC: case SLURM_DIST_BLOCK_CYCLIC: - _task_layout_lllp_cyclic(req, gtid, maxtasks, &masks); + rc = _task_layout_lllp_cyclic(req, gtid, maxtasks, &masks); break; case SLURM_DIST_PLANE: - _task_layout_lllp_plane(req, gtid, maxtasks, &masks); + rc = _task_layout_lllp_plane(req, gtid, maxtasks, &masks); break; default: - _task_layout_lllp_cyclic(req, gtid, maxtasks, &masks); + rc = _task_layout_lllp_cyclic(req, gtid, maxtasks, &masks); req->task_dist = SLURM_DIST_BLOCK_CYCLIC; break; } - if (masks) { + if (rc == SLURM_SUCCESS) { _task_layout_display_masks(req, gtid, maxtasks, masks); if (req->cpus_per_task > 1) { _lllp_enlarge_masks(req, maxtasks, masks); @@ -192,12 +189,8 @@ void lllp_distribution(launch_tasks_request_msg_t *req, uint32_t node_id) _lllp_map_abstract_masks(maxtasks, masks); _task_layout_display_masks(req, gtid, maxtasks, masks); _lllp_generate_cpu_bind(req, maxtasks, masks); - _lllp_free_masks(req, maxtasks, masks); } - - if(rc != SLURM_SUCCESS) - error (" Error in lllp_distribution_create %s ", - req->task_dist); + _lllp_free_masks(req, maxtasks, masks); } static @@ -585,9 +578,7 @@ static void _lllp_free_masks (launch_tasks_request_msg_t *req, } /* - * _task_layout_lllp_init - * - * task_layout_lllp_init performs common initialization required by: + * _task_layout_lllp_init performs common initialization required by: * _task_layout_lllp_cyclic * _task_layout_lllp_block * _task_layout_lllp_plane @@ -607,8 +598,7 @@ static int _task_layout_lllp_init(launch_tasks_request_msg_t *req, uint16_t *hw_threads, uint16_t *avail_cpus) { - int i; - uint16_t alloc_sockets = 0, alloc_lps = 0; + int min_sockets = 1, min_cores = 1; uint16_t alloc_cores[conf->sockets]; if (req->cpu_bind_type & CPU_BIND_TO_THREADS) { @@ -625,15 +615,12 @@ static int _task_layout_lllp_init(launch_tasks_request_msg_t *req, } _get_resources_this_node(usable_cpus, usable_sockets, usable_cores, - usable_threads, &alloc_sockets, alloc_cores, - &alloc_lps, &req->job_id); + usable_threads, alloc_cores, req->job_id); *hw_sockets = *usable_sockets; *hw_cores = *usable_cores; *hw_threads = *usable_threads; - int min_sockets = 1; - int min_cores = 1; *avail_cpus = slurm_get_avail_procs(req->max_sockets, req->max_cores, req->max_threads, @@ -645,15 +632,10 @@ static int _task_layout_lllp_init(launch_tasks_request_msg_t *req, req->ntasks_per_core, usable_cpus, usable_sockets, usable_cores, usable_threads, - alloc_sockets, alloc_cores, - alloc_lps, conf->cr_type, - req->job_id, - conf->hostname); + alloc_cores, conf->cr_type, + req->job_id, conf->hostname); /* Allocate masks array */ *masks_p = xmalloc(maxtasks * sizeof(bitstr_t*)); - for (i = 0; i < maxtasks; i++) { - (*masks_p)[i] = NULL; - } return SLURM_SUCCESS; } @@ -671,14 +653,11 @@ static void _get_resources_this_node(uint16_t *cpus, uint16_t *sockets, uint16_t *cores, uint16_t *threads, - uint16_t *alloc_sockets, uint16_t *alloc_cores, - uint16_t *alloc_lps, - uint32_t *jobid) + uint32_t jobid) { int bit_index = 0; - int i, j , k; - int this_socket = 0, cr_core_enabled = 0; + int i, j, k; /* FIX for heterogeneous socket/core/thread count per system * in future releases */ @@ -687,50 +666,30 @@ static void _get_resources_this_node(uint16_t *cpus, *cores = conf->cores; *threads = conf->threads; - switch(conf->cr_type) { - case CR_CORE: - case CR_CORE_MEMORY: - for(i = 0; i < *sockets; i++) - alloc_cores[i] = 0; - cr_core_enabled = 1; - case CR_SOCKET: - case CR_SOCKET_MEMORY: - case CR_CPU: - case CR_CPU_MEMORY: - for(i = 0; i < *sockets; i++) { - this_socket = 0; - for(j = 0; j < *cores; j++) { - for(k = 0; k < *threads; k++) { - info("jobid %d lllp_reserved[%d]=%d", - *jobid, bit_index, lllp_reserved[bit_index]); - if(lllp_reserved[bit_index] > 0) { - *alloc_lps += 1; - if ((k == 0) && (cr_core_enabled)) { - alloc_cores[i]++; - } - this_socket++; + for(i = 0; i < *sockets; i++) + alloc_cores[i] = 0; + + for(i = 0; i < *sockets; i++) { + for(j = 0; j < *cores; j++) { + for(k = 0; k < *threads; k++) { + info("jobid %u lllp_reserved[%d]=%d", jobid, + bit_index, lllp_reserved[bit_index]); + if(lllp_reserved[bit_index] > 0) { + if (k == 0) { + alloc_cores[i]++; } - bit_index++; } - } - if (this_socket > 0) { - *alloc_sockets += 1; + bit_index++; } } - - xassert(bit_index == (*sockets * *cores * *threads)); - break; - default: - break; } + + xassert(bit_index == (*sockets * *cores * *threads)); #if(0) - info("_get_resources jobid %d hostname %s alloc_sockets %d alloc_lps %d ", - *jobid, conf->hostname, *alloc_sockets, *alloc_lps); - if (cr_core_enabled) - for (i = 0; i < *sockets; i++) - info("_get_resources %d hostname %s socket id %d cores %d ", - *jobid, conf->hostname, i, alloc_cores[i]); + for (i = 0; i < *sockets; i++) + info("_get_resources jobid:%u hostname:%s socket id:%d cores:%u", + jobid, conf->hostname, i, alloc_cores[i]); #endif } @@ -761,8 +720,7 @@ static int _task_layout_lllp_cyclic(launch_tasks_request_msg_t *req, const uint32_t maxtasks, bitstr_t ***masks_p) { - int retval, i, taskcount = 0, taskid = 0; - int over_subscribe = 0, space_remaining = 0; + int retval, i, last_taskcount = -1, taskcount = 0, taskid = 0; uint16_t socket_index = 0, core_index = 0, thread_index = 0; uint16_t hw_sockets = 0, hw_cores = 0, hw_threads = 0; uint16_t usable_cpus = 0, avail_cpus = 0; @@ -787,54 +745,41 @@ static int _task_layout_lllp_cyclic(launch_tasks_request_msg_t *req, &hw_cores, &hw_threads, &avail_cpus); - if (retval != SLURM_SUCCESS) { + if (retval != SLURM_SUCCESS) return retval; - } masks = *masks_p; - for (i=0; taskcount<maxtasks; i++) { - space_remaining = 0; - socket_index = 0; - for (thread_index=0; ((thread_index<usable_threads) - && (taskcount<maxtasks)); thread_index++) { - for (core_index=0; ((core_index<usable_cores) - && (taskcount<maxtasks)); core_index++) { - for (socket_index=0; ((socket_index<usable_sockets) - && (taskcount<maxtasks)); socket_index++) { - if ((socket_index<usable_sockets) || over_subscribe) { - if ((core_index<usable_cores) || over_subscribe) { - if ((thread_index<usable_threads) - || over_subscribe) { - bitstr_t *bitmask = NULL; - taskid = gtid[taskcount]; - _single_mask(hw_sockets, - hw_cores, - hw_threads, - socket_index, - core_index, - thread_index, - bind_to_exact_socket, - bind_to_exact_core, - bind_to_exact_thread, - &bitmask); - xassert(masks[taskcount] == NULL); - xassert(taskcount < maxtasks); - masks[taskcount] = bitmask; - taskcount++; - if ((thread_index+1) < usable_threads) - space_remaining = 1; - } - if ((core_index+1) < usable_cores) - space_remaining = 1; - } - } + for (i=0; taskcount<maxtasks; i++) { + if (taskcount == last_taskcount) { + error("_task_layout_lllp_cyclic failure"); + return SLURM_ERROR; + } + last_taskcount = taskcount; + for (thread_index=0; thread_index<usable_threads; thread_index++) { + for (core_index=0; core_index<usable_cores; core_index++) { + for (socket_index=0; socket_index<usable_sockets; + socket_index++) { + bitstr_t *bitmask = NULL; + taskid = gtid[taskcount]; + _single_mask(hw_sockets, + hw_cores, + hw_threads, + socket_index, + core_index, + thread_index, + bind_to_exact_socket, + bind_to_exact_core, + bind_to_exact_thread, + &bitmask); + xassert(masks[taskcount] == NULL); + masks[taskcount] = bitmask; + if (++taskcount >= maxtasks) + goto fini; } - if (!space_remaining) - over_subscribe = 1; } } } - return SLURM_SUCCESS; + fini: return SLURM_SUCCESS; } /* @@ -864,7 +809,7 @@ static int _task_layout_lllp_block(launch_tasks_request_msg_t *req, const uint32_t maxtasks, bitstr_t ***masks_p) { - int retval, j, k, l, m, taskcount = 0, taskid = 0; + int retval, j, k, l, m, last_taskcount = -1, taskcount = 0, taskid = 0; int over_subscribe = 0, space_remaining = 0; uint16_t core_index = 0, thread_index = 0; uint16_t hw_sockets = 0, hw_cores = 0, hw_threads = 0; @@ -901,6 +846,11 @@ static int _task_layout_lllp_block(launch_tasks_request_msg_t *req, } while(taskcount < maxtasks) { + if (taskcount == last_taskcount) { + error("_task_layout_lllp_block failure"); + return SLURM_ERROR; + } + last_taskcount = taskcount; for (j=0; j<usable_sockets; j++) { for(core_index=0; core_index < usable_cores; core_index++) { if((core_index < usable_cores) || (over_subscribe)) { @@ -940,7 +890,7 @@ static int _task_layout_lllp_block(launch_tasks_request_msg_t *req, } /* Distribute the tasks and create masks for the task - affinity plug-in */ + * affinity plug-in */ taskid = 0; taskcount = 0; for (j=0; j<usable_sockets; j++) { @@ -1003,7 +953,7 @@ static int _task_layout_lllp_plane(launch_tasks_request_msg_t *req, const uint32_t maxtasks, bitstr_t ***masks_p) { - int retval, j, k, l, m, taskid = 0, next = 0; + int retval, j, k, l, m, taskid = 0, last_taskcount = -1, next = 0; uint16_t core_index = 0, thread_index = 0; uint16_t hw_sockets = 0, hw_cores = 0, hw_threads = 0; uint16_t usable_cpus = 0, avail_cpus = 0; @@ -1039,6 +989,11 @@ static int _task_layout_lllp_plane(launch_tasks_request_msg_t *req, next = 0; for (j=0; next<maxtasks; j++) { + if (next == last_taskcount) { + error("_task_layout_lllp_plan failure"); + return SLURM_ERROR; + } + last_taskcount = next; for (k=0; k<usable_sockets; k++) { max_plane_size = (plane_size > usable_cores) ? plane_size : usable_cores; for (m=0; m<max_plane_size; m++) { @@ -1147,7 +1102,7 @@ _remove_lllp_job_state(uint32_t jobid) i = list_iterator_create(lllp_ctx->job_list); while ((j = list_next(i)) && (j->jobid != jobid)) {;} if (j) { - list_delete(i); + list_delete_item(i); } list_iterator_destroy(i); } @@ -1162,9 +1117,7 @@ _append_lllp_job_state(lllp_job_state_t *j) void lllp_ctx_destroy(void) { - if (lllp_reserved) { - xfree(lllp_reserved); - } + xfree(lllp_reserved); if (lllp_ctx == NULL) return; @@ -1189,16 +1142,13 @@ lllp_ctx_alloc(void) debug3("alloc LLLP"); - if (lllp_reserved) { - xfree(lllp_reserved); - } + xfree(lllp_reserved); num_lllp = conf->sockets * conf->cores * conf->threads; if (conf->cpus > num_lllp) { num_lllp = conf->cpus; } lllp_reserved_size = num_lllp; lllp_reserved = xmalloc(num_lllp * sizeof(uint32_t)); - memset(lllp_reserved, 0, num_lllp * sizeof(uint32_t)); if (lllp_ctx) { lllp_ctx_destroy(); @@ -1255,7 +1205,7 @@ int _cleanup_lllp(void) } xfree(lllp_tasks->sockets[i].cores); } - xfree(lllp_tasks->sockets);; + xfree(lllp_tasks->sockets); xfree(lllp_tasks); return SLURM_SUCCESS; } @@ -1558,8 +1508,7 @@ static void _cr_update_reservation(int reserve, uint32_t *reserved, int num_bits = bit_size(mask); for(i=0; i < num_bits; i++) { - if (bit_test(mask,i)) - { + if (bit_test(mask,i)) { if (reserve) { /* reserve LLLP */ reserved[i]++; @@ -1597,8 +1546,7 @@ static void _cr_update_lllp(int reserve, uint32_t job_id, uint32_t job_step_id, _cr_reserve_unit(bitmap_test, conf->cr_type); - _cr_update_reservation(reserve, lllp_reserved, - bitmap_test); + _cr_update_reservation(reserve, lllp_reserved, bitmap_test); bit_free(bitmap_test); /* not currently stored with job_id */ @@ -1638,7 +1586,7 @@ void cr_reserve_lllp(uint32_t job_id, debug3("reserve LLLP job [%u.%u]\n", job_id, req->job_step_id); if (req->tasks_to_launch) { - numtasks = req->tasks_to_launch[node_id]; + numtasks = req->tasks_to_launch[(int)node_id]; } slurm_sprint_cpu_bind_type(buf_type, cpu_bind_type); @@ -1658,7 +1606,7 @@ void cr_reserve_lllp(uint32_t job_id, if (j) { _append_lllp_job_state(j); _cr_update_lllp(1, job_id, req->job_step_id, - cpu_bind_type, cpu_bind, numtasks); + cpu_bind_type, cpu_bind, numtasks); } slurm_mutex_unlock(&lllp_ctx->mutex); } @@ -1691,7 +1639,7 @@ void cr_release_lllp(uint32_t job_id) cpu_bind_type, cpu_bind, numtasks); /* done with saved state, remove entry */ - list_delete(i); + list_delete_item(i); } } list_iterator_destroy(i); diff --git a/src/plugins/task/affinity/dist_tasks.h b/src/plugins/task/affinity/dist_tasks.h index a5c0fc314..92b82a91f 100644 --- a/src/plugins/task/affinity/dist_tasks.h +++ b/src/plugins/task/affinity/dist_tasks.h @@ -1,7 +1,7 @@ /*****************************************************************************\ * Copyright (C) 2006 Hewlett-Packard Development Company, L.P. * Written by Susanne M. Balle, <susanne.balle@hp.com> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/task/affinity/numa.c b/src/plugins/task/affinity/numa.c index 0e62b3f9c..a3d7f57b1 100644 --- a/src/plugins/task/affinity/numa.c +++ b/src/plugins/task/affinity/numa.c @@ -5,7 +5,7 @@ * Copyright (C) 2006 The Regents of the University of California and * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/plugins/task/affinity/task_affinity.c b/src/plugins/task/affinity/task_affinity.c index aa78c13b6..466622d18 100644 --- a/src/plugins/task/affinity/task_affinity.c +++ b/src/plugins/task/affinity/task_affinity.c @@ -2,12 +2,12 @@ * task_affinity.c - Library for task pre-launch and post_termination * functions for task affinity support ***************************************************************************** - * Copyright (C) 2005 Hewlett-Packard Development Company, L.P. + * Copyright (C) 2005-2008 Hewlett-Packard Development Company, L.P. * Modified by Hewlett-Packard for task affinity support using task_none.c - * Copyright (C) 2005 The Regents of the University of California and + * Copyright (C) 2005-2007 The Regents of the University of California + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * task_none.c Written by Morris Jette <jette1@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -42,6 +42,7 @@ # include "config.h" #endif +#include <ctype.h> #include <signal.h> #include <sys/types.h> @@ -83,7 +84,7 @@ const uint32_t plugin_version = 100; * init() is called when the plugin is loaded, before any other functions * are called. Put global initialization here. */ -int init ( void ) +extern int init (void) { lllp_ctx_alloc(); verbose("%s loaded", plugin_name); @@ -94,20 +95,112 @@ int init ( void ) * fini() is called when the plugin is removed. Clear any allocated * storage here. */ -int fini ( void ) +extern int fini (void) { lllp_ctx_destroy(); verbose("%s unloaded", plugin_name); return SLURM_SUCCESS; } +/* + * _isvalue_task + * returns 1 is the argument appears to be a value, 0 otherwise + * this should be identical to _isvalue in src/srun/opt.c + */ +static int _isvalue_task(char *arg) +{ + if (isdigit(*arg)) { /* decimal values and 0x.. hex values */ + return 1; + } + + while (isxdigit(*arg)) { /* hex values not preceded by 0x */ + arg++; + } + + if ((*arg == ',') || (*arg == '\0')) { /* end of field or string */ + return 1; + } + + return 0; /* not a value */ +} + +/* cpu bind enforcement, update binding type based upon SLURM_ENFORCED_CPU_BIND + * environment variable */ +static void _update_bind_type(launch_tasks_request_msg_t *req) +{ + char *buf, *p, *tok; + char buf_type[100]; + cpu_bind_type_t cpu_bind_type; + int cpu_bind_type_is_valid = 0; + char* cpu_bind_type_str = getenv("SLURM_ENFORCED_CPU_BIND"); + + if (cpu_bind_type_str == NULL) + return; + + buf = xstrdup(cpu_bind_type_str); + p = buf; + + /* change all ',' delimiters not followed by a digit to ';' */ + /* simplifies parsing tokens while keeping map/mask together */ + while (p[0] != '\0') { + if ((p[0] == ',') && (!_isvalue_task(&(p[1])))) + p[0] = ';'; + p++; + } + + p = buf; + cpu_bind_type = 0; + while ((tok = strsep(&p, ";")) && !cpu_bind_type_is_valid) { + if ((strcasecmp(tok, "q") == 0) || + (strcasecmp(tok, "quiet") == 0)) { + cpu_bind_type &= ~CPU_BIND_VERBOSE; + } else if ((strcasecmp(tok, "v") == 0) || + (strcasecmp(tok, "verbose") == 0)) { + cpu_bind_type |= CPU_BIND_VERBOSE; + } else if ((strcasecmp(tok, "no") == 0) || + (strcasecmp(tok, "none") == 0)) { + cpu_bind_type |= CPU_BIND_NONE; + cpu_bind_type_is_valid = 1; + } else if ((strcasecmp(tok, "socket") == 0) || + (strcasecmp(tok, "sockets") == 0)) { + cpu_bind_type |= CPU_BIND_TO_SOCKETS; + cpu_bind_type_is_valid = 1; + } else if ((strcasecmp(tok, "core") == 0) || + (strcasecmp(tok, "cores") == 0)) { + cpu_bind_type |= CPU_BIND_TO_CORES; + cpu_bind_type_is_valid = 1; + } else if ((strcasecmp(tok, "thread") == 0) || + (strcasecmp(tok, "threads") == 0)) { + cpu_bind_type |= CPU_BIND_TO_THREADS; + cpu_bind_type_is_valid = 1; + } else { + error("task affinity : invalid enforced cpu bind " + "method '%s': none or an auto binding " + "(cores,sockets,threads) is required", + cpu_bind_type_str); + cpu_bind_type_is_valid = 0; + break; + } + } + xfree(buf); + + if (cpu_bind_type_is_valid) { + req->cpu_bind_type = cpu_bind_type; + slurm_sprint_cpu_bind_type(buf_type, req->cpu_bind_type); + info("task affinity : enforcing '%s' cpu bind method", + cpu_bind_type_str); + } +} + /* * task_slurmd_launch_request() */ -int task_slurmd_launch_request ( uint32_t job_id, - launch_tasks_request_msg_t *req, uint32_t node_id) +extern int task_slurmd_launch_request (uint32_t job_id, + launch_tasks_request_msg_t *req, + uint32_t node_id) { int hw_sockets, hw_cores, hw_threads; + char buf_type[100]; debug("task_slurmd_launch_request: %u %u", job_id, node_id); hw_sockets = conf->sockets; @@ -115,33 +208,62 @@ int task_slurmd_launch_request ( uint32_t job_id, hw_threads = conf->threads; if (((hw_sockets >= 1) && ((hw_cores > 1) || (hw_threads > 1))) - || (!(req->cpu_bind_type & CPU_BIND_NONE))) + || (!(req->cpu_bind_type & CPU_BIND_NONE))) { + _update_bind_type(req); + + slurm_sprint_cpu_bind_type(buf_type, req->cpu_bind_type); + info("task affinity : before lllp distribution cpu bind " + "method is '%s' (%s)", buf_type, req->cpu_bind); + lllp_distribution(req, node_id); + + slurm_sprint_cpu_bind_type(buf_type, req->cpu_bind_type); + info("task affinity : after lllp distribution cpu bind " + "method is '%s' (%s)", buf_type, req->cpu_bind); + } + /* Remove the slurm msg timeout needs to be investigated some more */ /* req->cpu_bind_type = CPU_BIND_NONE; */ - + return SLURM_SUCCESS; } /* * task_slurmd_reserve_resources() */ -int task_slurmd_reserve_resources ( uint32_t job_id, - launch_tasks_request_msg_t *req, uint32_t node_id) +extern int task_slurmd_reserve_resources (uint32_t job_id, + launch_tasks_request_msg_t *req, + uint32_t node_id) { - debug("task_slurmd_reserve_resources: %u", - job_id); + debug("task_slurmd_reserve_resources: %u", job_id); cr_reserve_lllp(job_id, req, node_id); return SLURM_SUCCESS; } +/* + * task_slurmd_suspend_job() + */ +extern int task_slurmd_suspend_job (uint32_t job_id) +{ + debug("task_slurmd_suspend_job: %u", job_id); + return SLURM_SUCCESS; +} + +/* + * task_slurmd_resume_job() + */ +extern int task_slurmd_resume_job (uint32_t job_id) +{ + debug("task_slurmd_resume_job: %u", job_id); + return SLURM_SUCCESS; +} + /* * task_slurmd_release_resources() */ -int task_slurmd_release_resources ( uint32_t job_id ) +extern int task_slurmd_release_resources (uint32_t job_id) { - debug("task_slurmd_release_resources: %u", - job_id); + debug("task_slurmd_release_resources: %u", job_id); cr_release_lllp(job_id); return SLURM_SUCCESS; } @@ -151,7 +273,7 @@ int task_slurmd_release_resources ( uint32_t job_id ) * user to launch his jobs. Use this to create the CPUSET directory * and set the owner appropriately. */ -int task_pre_setuid ( slurmd_job_t *job ) +extern int task_pre_setuid (slurmd_job_t *job) { char path[PATH_MAX]; @@ -172,7 +294,7 @@ int task_pre_setuid ( slurmd_job_t *job ) * It is followed by TaskProlog program (from slurm.conf) and * --task-prolog (from srun command line). */ -int task_pre_launch ( slurmd_job_t *job ) +extern int task_pre_launch (slurmd_job_t *job) { char base[PATH_MAX], path[PATH_MAX]; @@ -256,7 +378,7 @@ int task_pre_launch ( slurmd_job_t *job ) * It is preceeded by --task-epilog (from srun command line) * followed by TaskEpilog program (from slurm.conf). */ -int task_post_term ( slurmd_job_t *job ) +extern int task_post_term (slurmd_job_t *job) { debug("affinity task_post_term: %u.%u, task %d", job->jobid, job->stepid, job->envtp->procid); diff --git a/src/plugins/task/none/Makefile.am b/src/plugins/task/none/Makefile.am index d04c1207c..186541c6f 100644 --- a/src/plugins/task/none/Makefile.am +++ b/src/plugins/task/none/Makefile.am @@ -11,7 +11,3 @@ pkglib_LTLIBRARIES = task_none.la # Null task plugin. task_none_la_SOURCES = task_none.c task_none_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) - -if HAVE_AIX -task_none_la_LIBADD = $(top_builddir)/src/common/libcommon.la -endif diff --git a/src/plugins/task/none/Makefile.in b/src/plugins/task/none/Makefile.in index 74738401d..11fc8d070 100644 --- a/src/plugins/task/none/Makefile.in +++ b/src/plugins/task/none/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -44,6 +44,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -73,14 +75,13 @@ am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; am__installdirs = "$(DESTDIR)$(pkglibdir)" pkglibLTLIBRARIES_INSTALL = $(INSTALL) LTLIBRARIES = $(pkglib_LTLIBRARIES) -@HAVE_AIX_TRUE@task_none_la_DEPENDENCIES = \ -@HAVE_AIX_TRUE@ $(top_builddir)/src/common/libcommon.la +task_none_la_LIBADD = am_task_none_la_OBJECTS = task_none.lo task_none_la_OBJECTS = $(am_task_none_la_OBJECTS) task_none_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(task_none_la_LDFLAGS) $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -120,6 +121,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -133,10 +135,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -156,7 +161,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -167,6 +175,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -182,6 +192,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -197,6 +208,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -261,7 +273,6 @@ pkglib_LTLIBRARIES = task_none.la # Null task plugin. task_none_la_SOURCES = task_none.c task_none_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) -@HAVE_AIX_TRUE@task_none_la_LIBADD = $(top_builddir)/src/common/libcommon.la all: all-am .SUFFIXES: @@ -301,8 +312,8 @@ install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ if test -f $$p; then \ f=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ - $(LIBTOOL) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \ else :; fi; \ done @@ -310,8 +321,8 @@ uninstall-pkglibLTLIBRARIES: @$(NORMAL_UNINSTALL) @list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \ p=$(am__strip_dir) \ - echo " $(LIBTOOL) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ - $(LIBTOOL) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \ done clean-pkglibLTLIBRARIES: @@ -365,8 +376,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -378,8 +389,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -389,13 +400,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/plugins/task/none/task_none.c b/src/plugins/task/none/task_none.c index d45690f19..5e852eab6 100644 --- a/src/plugins/task/none/task_none.c +++ b/src/plugins/task/none/task_none.c @@ -2,10 +2,11 @@ * task_none.c - Library for task pre-launch and post_termination functions * with no actions ***************************************************************************** - * Copyright (C) 2005 The Regents of the University of California. + * Copyright (C) 2005-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -82,7 +83,7 @@ const uint32_t plugin_version = 100; * init() is called when the plugin is loaded, before any other functions * are called. Put global initialization here. */ -int init ( void ) +extern int init (void) { verbose("%s loaded", plugin_name); return SLURM_SUCCESS; @@ -92,7 +93,7 @@ int init ( void ) * fini() is called when the plugin is removed. Clear any allocated * storage here. */ -int fini ( void ) +extern int fini (void) { return SLURM_SUCCESS; } @@ -100,30 +101,49 @@ int fini ( void ) /* * task_slurmd_launch_request() */ -int task_slurmd_launch_request ( uint32_t job_id, launch_tasks_request_msg_t *req, uint32_t node_id) +extern int task_slurmd_launch_request (uint32_t job_id, + launch_tasks_request_msg_t *req, + uint32_t node_id) { - debug("task_slurmd_launch_request: %u %u", - job_id, node_id); + debug("task_slurmd_launch_request: %u %u", job_id, node_id); return SLURM_SUCCESS; } /* * task_slurmd_reserve_resources() */ -int task_slurmd_reserve_resources ( uint32_t job_id, launch_tasks_request_msg_t *req, uint32_t node_id ) +extern int task_slurmd_reserve_resources (uint32_t job_id, + launch_tasks_request_msg_t *req, + uint32_t node_id) { - debug("task_slurmd_reserve_resources: %u %u", - job_id, node_id); + debug("task_slurmd_reserve_resources: %u %u", job_id, node_id); + return SLURM_SUCCESS; +} + +/* + * task_slurmd_suspend_job() + */ +extern int task_slurmd_suspend_job (uint32_t job_id) +{ + debug("task_slurmd_suspend_job: %u", job_id); + return SLURM_SUCCESS; +} + +/* + * task_slurmd_resume_job() + */ +extern int task_slurmd_resume_job (uint32_t job_id) +{ + debug("task_slurmd_resume_job: %u", job_id); return SLURM_SUCCESS; } /* * task_slurmd_release_resources() */ -int task_slurmd_release_resources ( uint32_t job_id ) +extern int task_slurmd_release_resources (uint32_t job_id) { - debug("task_slurmd_release_resources: %u", - job_id); + debug("task_slurmd_release_resources: %u", job_id); return SLURM_SUCCESS; } @@ -132,7 +152,7 @@ int task_slurmd_release_resources ( uint32_t job_id ) * user to launch his jobs. Use this to create the CPUSET directory * and set the owner appropriately. */ -int task_pre_setuid ( slurmd_job_t *job ) +extern int task_pre_setuid (slurmd_job_t *job) { return SLURM_SUCCESS; } @@ -142,7 +162,7 @@ int task_pre_setuid ( slurmd_job_t *job ) * It is followed by TaskProlog program (from slurm.conf) and * --task-prolog (from srun command line). */ -int task_pre_launch ( slurmd_job_t *job ) +extern int task_pre_launch (slurmd_job_t *job) { debug("task_pre_launch: %u.%u, task %d", job->jobid, job->stepid, job->envtp->procid); @@ -154,7 +174,7 @@ int task_pre_launch ( slurmd_job_t *job ) * It is preceeded by --task-epilog (from srun command line) * followed by TaskEpilog program (from slurm.conf). */ -int task_post_term ( slurmd_job_t *job ) +extern int task_post_term (slurmd_job_t *job) { debug("task_post_term: %u.%u, task %d", job->jobid, job->stepid, job->envtp->procid); diff --git a/src/sacct/Makefile.am b/src/sacct/Makefile.am index 0b7a3a771..4b25bcca7 100644 --- a/src/sacct/Makefile.am +++ b/src/sacct/Makefile.am @@ -6,7 +6,8 @@ INCLUDES = -I$(top_srcdir) bin_PROGRAMS = sacct -sacct_LDADD = $(top_builddir)/src/api/libslurmhelper.la +sacct_LDADD = $(top_builddir)/src/common/libcommon.o -ldl \ + $(top_builddir)/src/api/libslurmhelper.la noinst_HEADERS = sacct.c sacct_SOURCES = sacct.c process.c print.c options.c sacct_stat.c diff --git a/src/sacct/Makefile.in b/src/sacct/Makefile.in index 339e1e9be..c3a7b5a9b 100644 --- a/src/sacct/Makefile.in +++ b/src/sacct/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -47,6 +47,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -73,11 +75,12 @@ PROGRAMS = $(bin_PROGRAMS) am_sacct_OBJECTS = sacct.$(OBJEXT) process.$(OBJEXT) print.$(OBJEXT) \ options.$(OBJEXT) sacct_stat.$(OBJEXT) sacct_OBJECTS = $(am_sacct_OBJECTS) -sacct_DEPENDENCIES = $(top_builddir)/src/api/libslurmhelper.la +sacct_DEPENDENCIES = $(top_builddir)/src/common/libcommon.o \ + $(top_builddir)/src/api/libslurmhelper.la sacct_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(sacct_LDFLAGS) \ $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -118,6 +121,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -131,10 +135,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -154,7 +161,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -165,6 +175,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -180,6 +192,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -195,6 +208,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -253,7 +267,9 @@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ AUTOMAKE_OPTIONS = foreign INCLUDES = -I$(top_srcdir) -sacct_LDADD = $(top_builddir)/src/api/libslurmhelper.la +sacct_LDADD = $(top_builddir)/src/common/libcommon.o -ldl \ + $(top_builddir)/src/api/libslurmhelper.la + noinst_HEADERS = sacct.c sacct_SOURCES = sacct.c process.c print.c options.c sacct_stat.c sacct_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) @@ -299,8 +315,8 @@ install-binPROGRAMS: $(bin_PROGRAMS) || test -f $$p1 \ ; then \ f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \ - echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) --mode=install $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ - $(INSTALL_PROGRAM_ENV) $(LIBTOOL) --mode=install $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ + echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ + $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ else :; fi; \ done @@ -366,8 +382,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -379,8 +395,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -390,13 +406,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/sacct/options.c b/src/sacct/options.c index 6ebb7c1b1..330b62548 100644 --- a/src/sacct/options.c +++ b/src/sacct/options.c @@ -6,7 +6,7 @@ * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <da@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -41,28 +41,17 @@ #include "sacct.h" #include <time.h> -typedef struct expired_rec { /* table of expired jobs */ - uint32_t job; - time_t job_submit; - char *line; -} expired_rec_t; - void _destroy_parts(void *object); void _destroy_steps(void *object); -void _destroy_exp(void *object); -char *_convert_type(int rec_type); -int _cmp_jrec(const void *a1, const void *a2); -void _dump_header(acct_header_t header); -FILE *_open_log_file(void); void _help_fields_msg(void); void _help_msg(void); void _usage(void); void _init_params(); -char *_prefix_filename(char *path, char *prefix); -int selected_status[STATUS_COUNT]; +int selected_state[STATE_COUNT]; List selected_parts = NULL; List selected_steps = NULL; +void *acct_db_conn = NULL; void _destroy_parts(void *object) { @@ -72,35 +61,13 @@ void _destroy_parts(void *object) void _destroy_steps(void *object) { - selected_step_t *step = (selected_step_t *)object; + jobacct_selected_step_t *step = (jobacct_selected_step_t *)object; if(step) { xfree(step->job); xfree(step->step); xfree(step); } } -void _destroy_exp(void *object) -{ - expired_rec_t *exp_rec = (expired_rec_t *)object; - if(exp_rec) { - xfree(exp_rec->line); - xfree(exp_rec); - } -} - -char *_convert_type(int rec_type) -{ - switch(rec_type) { - case JOB_START: - return "JOB_START"; - case JOB_STEP: - return "JOB_STEP"; - case JOB_TERMINATED: - return "JOB_TERMINATED"; - default: - return "UNKNOWN"; - } -} void _show_rec(char *f[]) { @@ -112,63 +79,6 @@ void _show_rec(char *f[]) return; } -int _cmp_jrec(const void *a1, const void *a2) { - expired_rec_t *j1 = (expired_rec_t *) a1; - expired_rec_t *j2 = (expired_rec_t *) a2; - - if (j1->job < j2->job) - return -1; - else if (j1->job == j2->job) { - if(j1->job_submit == j2->job_submit) - return 0; - else - return 1; - } - return 1; -} - -/* _dump_header() -- dump the common fields of a record - * - * In: Index into the jobs table - * Out: Nothing. - */ -void _dump_header(acct_header_t header) -{ - struct tm ts; - gmtime_r(&header.timestamp, &ts); - printf("%u %s %04d%02d%02d%02d%02d%02d %d %s %s ", - header.jobnum, - header.partition, - 1900+(ts.tm_year), - 1+(ts.tm_mon), - ts.tm_mday, - ts.tm_hour, - ts.tm_min, - ts.tm_sec, - (int)header.job_submit, - header.blockid, /* block id */ - "-"); /* reserved 1 */ -} -/* _open_log_file() -- find the current or specified log file, and open it - * - * IN: Nothing - * RETURNS: Nothing - * - * Side effects: - * - Sets opt_filein to the current system accounting log unless - * the user specified another file. - */ - -FILE *_open_log_file(void) -{ - FILE *fd = fopen(params.opt_filein, "r"); - if (fd == NULL) { - perror(params.opt_filein); - exit(1); - } - return fd; -} - void _help_fields_msg(void) { int i; @@ -205,17 +115,17 @@ void _help_msg(void) "\n" "Options:\n" "\n" - "-A, --Account\n" - " Equivalent to \"--fields=jobid,jobname,start,end,cpu,\n" - " vsize_short,status,exitcode\". This option has no effect\n" - " if --dump is specified.\n" "-a, --all\n" " Display job accounting data for all users. By default, only\n" " data for the current user is displayed for users other than\n" " root.\n" "-b, --brief\n" - " Equivalent to \"--fields=jobstep,status,error\". This option\n" + " Equivalent to \"--fields=jobstep,state,error\". This option\n" " has no effect if --dump is specified.\n" + "-c, --completion\n" + " Use job completion instead of accounting data.\n" + "-C, --cluster\n" + " Only send data about this cluster.\n" "-d, --dump\n" " Dump the raw data records\n" "--duplicates\n" @@ -223,7 +133,7 @@ void _help_msg(void) " isn't reset at the same time (with -e, for example), some\n" " job numbers will probably appear more than once in the\n" " accounting log file to refer to different jobs; such jobs\n" - " can be distinguished by the \"job_submit\" time stamp in the\n" + " can be distinguished by the \"submit\" time stamp in the\n" " data records.\n" " When data for specific jobs are requested with\n" " the --jobs option, we assume that the user\n" @@ -248,14 +158,14 @@ void _help_msg(void) "-F <field-list>, --fields=<field-list>\n" " Display the specified data (use \"--help-fields\" for a\n" " list of available fields). If no field option is specified,\n" - " we use \"--fields=jobstep,jobname,partition,ncpus,status,error\".\n" + " we use \"--fields=jobstep,jobname,partition,alloc_cpus,state,error\".\n" "-f<file>, --file=<file>\n" " Read data from the specified file, rather than SLURM's current\n" " accounting log file.\n" "-l, --long\n" " Equivalent to specifying\n" " \"--fields=jobstep,usercpu,systemcpu,minflt,majflt,nprocs,\n" - " ncpus,elapsed,status,exitcode\"\n" + " alloc_cpus,elapsed,state,exitcode\"\n" "-O, --formatted_dump\n" " Dump accounting records in an easy-to-read format, primarily\n" " for debugging.\n" @@ -281,20 +191,18 @@ void _help_msg(void) "-P --purge\n" " Used in conjunction with --expire to remove invalid data\n" " from the job accounting log.\n" - "-r --raw\n" - " don't format data leave in raw format\n" "-s <state-list>, --state=<state-list>\n" - " Select jobs based on their current status: running (r),\n" + " Select jobs based on their current state: running (r),\n" " completed (cd), failed (f), timeout (to), and node_fail (nf).\n" "-S, --stat\n" - " Get real time status of a jobstep supplied by the -j\n" + " Get real time state of a jobstep supplied by the -j\n" " option\n" "-t, --total\n" " Only show cumulative statistics for each job, not the\n" " intermediate steps\n" "-u <uid>, --uid <uid>\n" " Select only jobs submitted by the user with uid <uid>. Only\n" - " root users are allowed to specify a uid other than their own.\n" + " root users are allowed to specify a uid other than their own -1 for all users.\n" "--usage\n" " Pointer to this message.\n" "-v, --verbose\n" @@ -313,6 +221,8 @@ void _usage(void) void _init_params() { + params.opt_cluster = NULL; /* --cluster */ + params.opt_completion = 0; /* --completion */ params.opt_dump = 0; /* --dump */ params.opt_dup = -1; /* --duplicates; +1 = explicitly set */ params.opt_fdump = 0; /* --formattted_dump */ @@ -323,9 +233,9 @@ void _init_params() params.opt_long = 0; /* --long */ params.opt_lowmem = 0; /* --low_memory */ params.opt_purge = 0; /* --purge */ - params.opt_raw = 0; /* --raw */ params.opt_total = 0; /* --total */ params.opt_uid = -1; /* --uid (-1=wildcard, 0=root) */ + params.opt_uid_set = 0; params.opt_verbose = 0; /* --verbose */ params.opt_expire_timespec = NULL; /* --expire= */ params.opt_field_list = NULL; /* --fields= */ @@ -335,237 +245,86 @@ void _init_params() params.opt_state_list = NULL; /* --states */ } -/* prefix_filename() -- insert a filename prefix into a path - * - * IN: path = fully-qualified path+file name - * prefix = the prefix to insert into the file name - * RETURNS: pointer to the updated path+file name - */ - -char *_prefix_filename(char *path, char *prefix) { - char *out; - int i, - plen; - - plen = strlen(path); - out = xmalloc(plen+strlen(prefix)+1); - for (i=plen-1; i>=0; i--) - if (path[i]=='/') { - break; - } - i++; - *out = 0; - strncpy(out, path, i); - out[i] = 0; - strcat(out, prefix); - strcat(out, path+i); - return(out); -} - -int decode_status_char(char *status) +int decode_state_char(char *state) { - if (!strcasecmp(status, "p")) + if (!strcasecmp(state, "p")) return JOB_PENDING; /* we should never see this */ - else if (!strcasecmp(status, "r")) + else if (!strcasecmp(state, "r")) return JOB_RUNNING; - else if (!strcasecmp(status, "su")) + else if (!strcasecmp(state, "su")) return JOB_SUSPENDED; - else if (!strcasecmp(status, "cd")) + else if (!strcasecmp(state, "cd")) return JOB_COMPLETE; - else if (!strcasecmp(status, "ca")) + else if (!strcasecmp(state, "ca")) return JOB_CANCELLED; - else if (!strcasecmp(status, "f")) + else if (!strcasecmp(state, "f")) return JOB_FAILED; - else if (!strcasecmp(status, "to")) + else if (!strcasecmp(state, "to")) return JOB_TIMEOUT; - else if (!strcasecmp(status, "nf")) + else if (!strcasecmp(state, "nf")) return JOB_NODE_FAIL; else return -1; // unknown } -char *decode_status_int(int status) +int get_data(void) { - switch(status & ~JOB_COMPLETING) { - case JOB_PENDING: - return "PENDING"; /* we should never see this */ - case JOB_RUNNING: - return "RUNNING"; - case JOB_SUSPENDED: - return "SUSPENDED"; - case JOB_COMPLETE: - return "COMPLETED"; - case JOB_CANCELLED: - return "CANCELLED"; - case JOB_FAILED: - return "FAILED"; - case JOB_TIMEOUT: - return "TIMEOUT"; - case JOB_NODE_FAIL: - return "NODE_FAILED"; - default: - return "UNKNOWN"; - } -} + jobacct_job_rec_t *job = NULL; + jobacct_step_rec_t *step = NULL; -char *decode_status_int_abbrev(int status) -{ - switch(status & ~JOB_COMPLETING) { - case JOB_PENDING: - return "PD"; /* we should never see this */ - case JOB_RUNNING: - return "R"; - case JOB_SUSPENDED: - return "S"; - case JOB_COMPLETE: - return "CD"; - case JOB_CANCELLED: - return "CA"; - case JOB_FAILED: - return "F"; - case JOB_TIMEOUT: - return "TO"; - case JOB_NODE_FAIL: - return "NF"; - case JOB_END: - return "JOB_END"; - default: - return "UNKNOWN"; + ListIterator itr = NULL; + ListIterator itr_step = NULL; + + if(params.opt_completion) { + jobs = g_slurm_jobcomp_get_jobs(selected_steps, + selected_parts, ¶ms); + return SLURM_SUCCESS; + } else { + jobs = jobacct_storage_g_get_jobs(acct_db_conn, + selected_steps, + selected_parts, ¶ms); } -} -int get_data(void) -{ - char line[BUFFER_SIZE]; - char *f[MAX_RECORD_FIELDS+1]; /* End list with null entry and, - possibly, more data than we - expected */ - char *fptr; - int i; - FILE *fd = NULL; - int lc = 0; - int rec_type = -1; - selected_step_t *selected_step = NULL; - char *selected_part = NULL; - ListIterator itr = NULL; - int show_full = 0; + if (params.opt_fdump) + return SLURM_SUCCESS; - fd = _open_log_file(); - - while (fgets(line, BUFFER_SIZE, fd)) { - lc++; - fptr = line; /* break the record into NULL- - terminated strings */ - - for (i = 0; i < MAX_RECORD_FIELDS; i++) { - f[i] = fptr; - fptr = strstr(fptr, " "); - if (fptr == NULL) { - fptr = strstr(f[i], "\n"); - if (fptr) - *fptr = 0; - break; - } else - *fptr++ = 0; - } - f[++i] = 0; - - if(i < HEADER_LENGTH) { - continue; - } - - rec_type = atoi(f[F_RECTYPE]); - - if (list_count(selected_steps)) { - itr = list_iterator_create(selected_steps); - while((selected_step = list_next(itr))) { - if (strcmp(selected_step->job, f[F_JOB])) - continue; - /* job matches; does the step? */ - if(selected_step->step == NULL) { - show_full = 1; - list_iterator_destroy(itr); - goto foundjob; - } else if (rec_type != JOB_STEP - || !strcmp(f[F_JOBSTEP], - selected_step->step)) { - list_iterator_destroy(itr); - goto foundjob; - } - } - list_iterator_destroy(itr); - continue; /* no match */ - } else { - show_full = 1; - } - foundjob: - - if (list_count(selected_parts)) { - itr = list_iterator_create(selected_parts); - while((selected_part = list_next(itr))) - if (!strcasecmp(f[F_PARTITION], - selected_part)) { - list_iterator_destroy(itr); - goto foundp; - } - list_iterator_destroy(itr); - continue; /* no match */ + if(!jobs) + return SLURM_ERROR; + + itr = list_iterator_create(jobs); + while((job = list_next(itr))) { + if(job->user) { + struct passwd *pw = NULL; + if ((pw=getpwnam(job->user))) + job->uid = pw->pw_uid; } - foundp: - if (params.opt_fdump) { - do_fdump(f, lc); + if(!list_count(job->steps)) continue; - } - /* Build suitable tables with all the data */ - switch(rec_type) { - case JOB_START: - if(i < F_JOB_ACCOUNT) { - printf("Bad data on a Job Start\n"); - _show_rec(f); - } else - process_start(f, lc, show_full, i); - break; - case JOB_STEP: - if(i < F_MAX_VSIZE) { - printf("Bad data on a Step entry\n"); - _show_rec(f); - } else - process_step(f, lc, show_full, i); - break; - case JOB_SUSPEND: - if(i < JOB_TERM_LENGTH) { - printf("Bad data on a Suspend entry\n"); - _show_rec(f); - } else - process_suspend(f, lc, show_full, i); - break; - case JOB_TERMINATED: - if(i < JOB_TERM_LENGTH) { - printf("Bad data on a Job Term\n"); - _show_rec(f); - } else - process_terminated(f, lc, show_full, i); - break; - default: - if (params.opt_verbose > 1) - fprintf(stderr, - "Invalid record at line %d of " - "input file\n", - lc); - if (params.opt_verbose > 2) - _show_rec(f); - input_error++; - break; + itr_step = list_iterator_create(job->steps); + while((step = list_next(itr_step)) != NULL) { + /* now aggregate the aggregatable */ + job->alloc_cpus = MAX(job->alloc_cpus, step->ncpus); + + if(step->state < JOB_COMPLETE) + continue; + job->tot_cpu_sec += step->tot_cpu_sec; + job->tot_cpu_usec += step->tot_cpu_usec; + job->user_cpu_sec += + step->user_cpu_sec; + job->user_cpu_usec += + step->user_cpu_usec; + job->sys_cpu_sec += + step->sys_cpu_sec; + job->sys_cpu_usec += + step->sys_cpu_usec; + /* get the max for all the sacct_t struct */ + aggregate_sacct(&job->sacct, &step->sacct); } + list_iterator_destroy(itr_step); } - - if (ferror(fd)) { - perror(params.opt_filein); - exit(1); - } - fclose(fd); + list_iterator_destroy(itr); return SLURM_SUCCESS; } @@ -575,15 +334,17 @@ void parse_command_line(int argc, char **argv) extern int optind; int c, i, optionIndex = 0; char *end = NULL, *start = NULL, *acct_type = NULL; - selected_step_t *selected_step = NULL; + jobacct_selected_step_t *selected_step = NULL; ListIterator itr = NULL; struct stat stat_buf; char *dot = NULL; + bool brief_output = FALSE, long_output = FALSE; static struct option long_options[] = { {"all", 0,0, 'a'}, - {"Account", 0,0, 'A'}, {"brief", 0, 0, 'b'}, + {"cluster", 1, 0, 'C'}, + {"completion", 0, ¶ms.opt_completion, 'c'}, {"duplicates", 0, ¶ms.opt_dup, 1}, {"dump", 0, 0, 'd'}, {"expire", 1, 0, 'e'}, @@ -602,7 +363,6 @@ void parse_command_line(int argc, char **argv) {"noheader", 0, ¶ms.opt_header, 0}, {"partition", 1, 0, 'p'}, {"purge", 0, 0, 'P'}, - {"raw", 0, 0, 'r'}, {"state", 1, 0, 's'}, {"total", 0, 0, 't'}, {"uid", 1, 0, 'u'}, @@ -614,13 +374,14 @@ void parse_command_line(int argc, char **argv) _init_params(); - if ((i=getuid())) /* default to current user unless root*/ + if ((i=getuid())) + /* default to current user unless root*/ params.opt_uid = i; opterr = 1; /* Let getopt report problems to the user */ while (1) { /* now cycle through the command line */ - c = getopt_long(argc, argv, "aAbde:F:f:g:hj:J:lOPp:rs:StUu:Vv", + c = getopt_long(argc, argv, "abcC:de:F:f:g:hj:J:lOPp:s:StUu:Vv", long_options, &optionIndex); if (c == -1) break; @@ -628,26 +389,15 @@ void parse_command_line(int argc, char **argv) case 'a': params.opt_uid = -1; break; - case 'A': - params.opt_field_list = - xrealloc(params.opt_field_list, - (params.opt_field_list==NULL? 0 : - sizeof(params.opt_field_list)) + - sizeof(ACCOUNT_FIELDS)+1); - strcat(params.opt_field_list, ACCOUNT_FIELDS); - strcat(params.opt_field_list, ","); - break; - case 'b': - params.opt_field_list = - xrealloc(params.opt_field_list, - (params.opt_field_list==NULL? 0 : - sizeof(params.opt_field_list)) + - sizeof(BRIEF_FIELDS)+1); - strcat(params.opt_field_list, BRIEF_FIELDS); - strcat(params.opt_field_list, ","); + brief_output = true; + break; + case 'c': + params.opt_completion = 1; + break; + case 'C': + params.opt_cluster = xstrdup(optarg); break; - case 'd': params.opt_dump = 1; break; @@ -655,7 +405,7 @@ void parse_command_line(int argc, char **argv) case 'e': { /* decode the time spec */ long acc=0; - params.opt_expire_timespec = strdup(optarg); + params.opt_expire_timespec = xstrdup(optarg); for (i=0; params.opt_expire_timespec[i]; i++) { char c = params.opt_expire_timespec[i]; if (isdigit(c)) { @@ -755,13 +505,7 @@ void parse_command_line(int argc, char **argv) break; case 'l': - params.opt_field_list = - xrealloc(params.opt_field_list, - (params.opt_field_list==NULL? 0 : - strlen(params.opt_field_list)) + - sizeof(LONG_FIELDS)+1); - strcat(params.opt_field_list, LONG_FIELDS); - strcat(params.opt_field_list, ","); + long_output = true; break; case 'O': @@ -781,9 +525,7 @@ void parse_command_line(int argc, char **argv) strcat(params.opt_partition_list, optarg); strcat(params.opt_partition_list, ","); break; - case 'r': - params.opt_raw = 1; - break; + case 's': params.opt_state_list = xrealloc(params.opt_state_list, @@ -795,15 +537,12 @@ void parse_command_line(int argc, char **argv) break; case 'S': - params.opt_field_list = - xrealloc(params.opt_field_list, - (params.opt_field_list==NULL? 0 : - strlen(params.opt_field_list)) + - sizeof(STAT_FIELDS)+1); - - strcat(params.opt_field_list, STAT_FIELDS); - strcat(params.opt_field_list, ","); - + if(!params.opt_field_list) { + params.opt_field_list = + xmalloc(sizeof(STAT_FIELDS)+1); + strcat(params.opt_field_list, STAT_FIELDS); + strcat(params.opt_field_list, ","); + } params.opt_stat = 1; break; @@ -816,7 +555,7 @@ void parse_command_line(int argc, char **argv) break; case 'u': - if (isdigit((int) *optarg)) + if (isdigit((int) *optarg) || atoi(optarg) == -1) params.opt_uid = atoi(optarg); else { struct passwd *pwd; @@ -869,6 +608,8 @@ void parse_command_line(int argc, char **argv) if (params.opt_verbose) { fprintf(stderr, "Options selected:\n" + "\topt_cluster=%s\n" + "\topt_completion=%d\n" "\topt_dump=%d\n" "\topt_dup=%d\n" "\topt_expire=%s (%lu seconds)\n" @@ -883,11 +624,12 @@ void parse_command_line(int argc, char **argv) "\topt_lowmem=%d\n" "\topt_partition_list=%s\n" "\topt_purge=%d\n" - "\topt_raw=%d\n" "\topt_state_list=%s\n" "\topt_total=%d\n" "\topt_uid=%d\n" "\topt_verbose=%d\n", + params.opt_cluster, + params.opt_completion, params.opt_dump, params.opt_dup, params.opt_expire_timespec, params.opt_expire, @@ -902,7 +644,6 @@ void parse_command_line(int argc, char **argv) params.opt_lowmem, params.opt_partition_list, params.opt_purge, - params.opt_raw, params.opt_state_list, params.opt_total, params.opt_uid, @@ -910,15 +651,36 @@ void parse_command_line(int argc, char **argv) } /* check if we have accounting data to view */ - if (params.opt_filein == NULL) - params.opt_filein = slurm_get_jobacct_loc(); - acct_type = slurm_get_jobacct_type(); - if ((strcmp(acct_type, "jobacct/none") == 0) - && (stat(params.opt_filein, &stat_buf) != 0)) { - fprintf(stderr, "SLURM accounting is disabled\n"); - exit(1); + if (params.opt_filein == NULL) { + if(params.opt_completion) + params.opt_filein = slurm_get_jobcomp_loc(); + else + params.opt_filein = slurm_get_accounting_storage_loc(); + } + + if(params.opt_completion) { + g_slurm_jobcomp_init(params.opt_filein); + + acct_type = slurm_get_jobcomp_type(); + if ((strcmp(acct_type, "jobcomp/none") == 0) + && (stat(params.opt_filein, &stat_buf) != 0)) { + fprintf(stderr, "SLURM job completion is disabled\n"); + exit(1); + } + xfree(acct_type); + } else { + slurm_acct_storage_init(params.opt_filein); + acct_db_conn = acct_storage_g_get_connection(false, false); + + acct_type = slurm_get_accounting_storage_type(); + if ((strcmp(acct_type, "accounting_storage/none") == 0) + && (stat(params.opt_filein, &stat_buf) != 0)) { + fprintf(stderr, + "SLURM accounting storage is disabled\n"); + exit(1); + } + xfree(acct_type); } - xfree(acct_type); /* specific partitions requested? */ if (params.opt_partition_list) { @@ -952,18 +714,22 @@ void parse_command_line(int argc, char **argv) start++; /* discard whitespace */ if(!(int)*start) continue; - selected_step = xmalloc(sizeof(selected_step_t)); + selected_step = + xmalloc(sizeof(jobacct_selected_step_t)); list_append(selected_steps, selected_step); dot = strstr(start, "."); if (dot == NULL) { debug2("No jobstep requested"); selected_step->step = NULL; + selected_step->stepid = (uint32_t)NO_VAL; } else { *dot++ = 0; selected_step->step = xstrdup(dot); + selected_step->stepid = atoi(dot); } selected_step->job = xstrdup(start); + selected_step->jobid = atoi(start); start = end + 1; } if (params.opt_verbose) { @@ -982,7 +748,7 @@ void parse_command_line(int argc, char **argv) } } - /* specific states (completion status) requested? */ + /* specific states (completion state) requested? */ if (params.opt_state_list) { start = params.opt_state_list; while ((end = strstr(start, ",")) && start) { @@ -992,18 +758,18 @@ void parse_command_line(int argc, char **argv) start++; /* discard whitespace */ if(!(int)*start) continue; - c = decode_status_char(start); + c = decode_state_char(start); if (c == -1) fatal("unrecognized job state value"); - selected_status[c] = 1; + selected_state[c] = 1; start = end + 1; } if (params.opt_verbose) { fprintf(stderr, "States requested:\n"); - for(i=0; i< STATUS_COUNT; i++) { - if(selected_status[i]) { + for(i=0; i< STATE_COUNT; i++) { + if(selected_state[i]) { fprintf(stderr, "\t: %s\n", - decode_status_int(i)); + job_state_string(i)); break; } } @@ -1011,13 +777,48 @@ void parse_command_line(int argc, char **argv) } /* select the output fields */ + if(brief_output) { + if(params.opt_completion) + dot = BRIEF_COMP_FIELDS; + else + dot = BRIEF_FIELDS; + + params.opt_field_list = + xrealloc(params.opt_field_list, + (params.opt_field_list==NULL? 0 : + sizeof(params.opt_field_list)) + + strlen(dot)+1); + strcat(params.opt_field_list, dot); + strcat(params.opt_field_list, ","); + } + + if(long_output) { + if(params.opt_completion) + dot = LONG_COMP_FIELDS; + else + dot = LONG_FIELDS; + + params.opt_field_list = + xrealloc(params.opt_field_list, + (params.opt_field_list==NULL? 0 : + strlen(params.opt_field_list)) + + strlen(dot)+1); + strcat(params.opt_field_list, dot); + strcat(params.opt_field_list, ","); + } + if (params.opt_field_list==NULL) { if (params.opt_dump || params.opt_expire) goto endopt; - params.opt_field_list = xmalloc(sizeof(DEFAULT_FIELDS)+1); - strcpy(params.opt_field_list, DEFAULT_FIELDS); + if(params.opt_completion) + dot = DEFAULT_COMP_FIELDS; + else + dot = DEFAULT_FIELDS; + params.opt_field_list = xmalloc(strlen(dot)+1); + strcpy(params.opt_field_list, dot); strcat(params.opt_field_list, ","); } + start = params.opt_field_list; while ((end = strstr(start, ","))) { *end = 0; @@ -1075,24 +876,14 @@ void do_dump(void) { ListIterator itr = NULL; ListIterator itr_step = NULL; - job_rec_t *job = NULL; - step_rec_t *step = NULL; + jobacct_job_rec_t *job = NULL; + jobacct_step_rec_t *step = NULL; struct tm ts; itr = list_iterator_create(jobs); while((job = list_next(itr))) { - if (!params.opt_dup) - if (job->jobnum_superseded) { - if (params.opt_verbose > 1) - fprintf(stderr, - "Note: Skipping older" - " job %u dated %d\n", - job->header.jobnum, - (int)job->header.job_submit); - continue; - } if (params.opt_uid>=0) - if (job->header.uid != params.opt_uid) + if (job->uid != params.opt_uid) continue; if(job->sacct.min_cpu == (float)NO_VAL) job->sacct.min_cpu = 0; @@ -1106,71 +897,67 @@ void do_dump(void) /* JOB_START */ if (job->show_full) { - if (!job->job_start_seen && job->job_step_seen) { - /* If we only saw JOB_TERMINATED, the - * job was probably canceled. */ - fprintf(stderr, - "Error: No JOB_START record for " - "job %u\n", - job->header.jobnum); - } - _dump_header(job->header); + gmtime_r(&job->start, &ts); + printf("%u %s %04d%02d%02d%02d%02d%02d %d %s %s ", + job->jobid, + job->partition, + 1900+(ts.tm_year), + 1+(ts.tm_mon), + ts.tm_mday, + ts.tm_hour, + ts.tm_min, + ts.tm_sec, + (int)job->submit, + job->blockid, /* block id */ + "-"); /* reserved 1 */ + printf("JOB_START 1 16 %d %d %s %d %d %d %s %s\n", - job->header.uid, - job->header.gid, + job->uid, + job->gid, job->jobname, job->track_steps, job->priority, - job->ncpus, + job->alloc_cpus, job->nodes, job->account); } /* JOB_STEP */ itr_step = list_iterator_create(job->steps); while((step = list_next(itr_step))) { - if (step->status == JOB_RUNNING && - job->job_terminated_seen) { - step->status = JOB_FAILED; - step->exitcode=1; - } - _dump_header(step->header); + gmtime_r(&step->start, &ts); + printf("%u %s %04d%02d%02d%02d%02d%02d %d %s %s ", + job->jobid, + job->partition, + 1900+(ts.tm_year), + 1+(ts.tm_mon), + ts.tm_mday, + ts.tm_hour, + ts.tm_min, + ts.tm_sec, + (int)job->submit, + job->blockid, /* block id */ + "-"); /* reserved 1 */ if(step->end == 0) step->end = job->end; gmtime_r(&step->end, &ts); printf("JOB_STEP 1 50 %u %04d%02d%02d%02d%02d%02d ", - step->stepnum, + step->stepid, 1900+(ts.tm_year), 1+(ts.tm_mon), ts.tm_mday, ts.tm_hour, ts.tm_min, ts.tm_sec); printf("%s %d %d %d %d ", - decode_status_int_abbrev(step->status), + job_state_string_compact(step->state), step->exitcode, - step->ntasks, + step->ncpus, step->ncpus, step->elapsed); - printf("%d %d %d %d %d %d ", + printf("%d %d %d %d %d %d %d %d", step->tot_cpu_sec, step->tot_cpu_usec, - (int)step->rusage.ru_utime.tv_sec, - (int)step->rusage.ru_utime.tv_usec, - (int)step->rusage.ru_stime.tv_sec, - (int)step->rusage.ru_stime.tv_usec); - printf("%d %d %d %d %d %d %d %d %d " - "%d %d %d %d %d %d %d ", - (int)step->rusage.ru_maxrss, - (int)step->rusage.ru_ixrss, - (int)step->rusage.ru_idrss, - (int)step->rusage.ru_isrss, - (int)step->rusage.ru_minflt, - (int)step->rusage.ru_majflt, - (int)step->rusage.ru_nswap, - (int)step->rusage.ru_inblock, - (int)step->rusage.ru_oublock, - (int)step->rusage.ru_msgsnd, - (int)step->rusage.ru_msgrcv, - (int)step->rusage.ru_nsignals, - (int)step->rusage.ru_nvcsw, - (int)step->rusage.ru_nivcsw, + (int)step->user_cpu_sec, + (int)step->user_cpu_usec, + (int)step->sys_cpu_sec, + (int)step->sys_cpu_usec, step->sacct.max_vsize/1024, step->sacct.max_rss/1024); /* Data added in Slurm v1.1 */ @@ -1197,7 +984,19 @@ void do_dump(void) list_iterator_destroy(itr_step); /* JOB_TERMINATED */ if (job->show_full) { - _dump_header(job->header); + gmtime_r(&job->start, &ts); + printf("%u %s %04d%02d%02d%02d%02d%02d %d %s %s ", + job->jobid, + job->partition, + 1900+(ts.tm_year), + 1+(ts.tm_mon), + ts.tm_mday, + ts.tm_hour, + ts.tm_min, + ts.tm_sec, + (int)job->submit, + job->blockid, /* block id */ + "-"); /* reserved 1 */ gmtime_r(&job->end, &ts); printf("JOB_TERMINATED 1 50 %d ", job->elapsed); @@ -1205,34 +1004,18 @@ void do_dump(void) 1900+(ts.tm_year), 1+(ts.tm_mon), ts.tm_mday, ts.tm_hour, ts.tm_min, ts.tm_sec); printf("%s %d %d %d %d ", - decode_status_int_abbrev(job->status), + job_state_string_compact(job->state), job->exitcode, - job->ntasks, - job->ncpus, + job->alloc_cpus, + job->alloc_cpus, job->elapsed); - printf("%d %d %d %d %d %d ", + printf("%d %d %d %d %d %d %d %d", job->tot_cpu_sec, job->tot_cpu_usec, - (int)job->rusage.ru_utime.tv_sec, - (int)job->rusage.ru_utime.tv_usec, - (int)job->rusage.ru_stime.tv_sec, - (int)job->rusage.ru_stime.tv_usec); - printf("%d %d %d %d %d %d %d %d %d " - "%d %d %d %d %d %d %d ", - (int)job->rusage.ru_maxrss, - (int)job->rusage.ru_ixrss, - (int)job->rusage.ru_idrss, - (int)job->rusage.ru_isrss, - (int)job->rusage.ru_minflt, - (int)job->rusage.ru_majflt, - (int)job->rusage.ru_nswap, - (int)job->rusage.ru_inblock, - (int)job->rusage.ru_oublock, - (int)job->rusage.ru_msgsnd, - (int)job->rusage.ru_msgrcv, - (int)job->rusage.ru_nsignals, - (int)job->rusage.ru_nvcsw, - (int)job->rusage.ru_nivcsw, + (int)job->user_cpu_sec, + (int)job->user_cpu_usec, + (int)job->sys_cpu_sec, + (int)job->sys_cpu_usec, job->sacct.max_vsize/1024, job->sacct.max_rss/1024); /* Data added in Slurm v1.1 */ @@ -1261,429 +1044,46 @@ void do_dump(void) list_iterator_destroy(itr); } -/* do_expire() -- purge expired data from the accounting log file - * - * What we're doing: - * 1. Open logfile.orig - * 2. stat logfile.orig - * - confirm that it's not a sym link - * - capture the ownership and permissions - * 3. scan logfile.orig for JOB_TERMINATED records with F_TIMESTAMP dates - * that precede the specified expiration date. Build exp_table as - * a list of expired jobs. - * 4. Open logfile.expired for append - * 5. Create logfile.new as ".new.<logfile>" (output with line buffering) - * 6. Re-scan logfile.orig, writing - * - Expired job records to logfile.expired - * - Other job records to logfile.new - * 7. Rename logfile.orig as ".old.<logfile>" - * 8. Rename logfile.new as "<logfile>" - * 9. Execute "scontrol reconfigure" which will cause slurmctld to - * start writing to logfile.new - * 10. fseek(ftell(logfile.orig)) to clear EOF - * 11. Copy any new records from logfile.orig to logfile.new - * 12. Close logfile.expired, logfile.new - * 13. Unlink .old.<logfile> - */ - -void do_expire(void) +void do_dump_completion(void) { - char line[BUFFER_SIZE], - *f[EXPIRE_READ_LENGTH], - *fptr = NULL, - *logfile_name = NULL, - *old_logfile_name = NULL; - int file_err=0, - new_file, - i = 0; - expired_rec_t *exp_rec = NULL; - expired_rec_t *exp_rec2 = NULL; - List keep_list = list_create(_destroy_exp); - List exp_list = list_create(_destroy_exp); - List other_list = list_create(_destroy_exp); - struct stat statbuf; - mode_t prot = 0600; - uid_t uid; - gid_t gid; - FILE *expired_logfile = NULL, - *new_logfile = NULL; - FILE *fd = NULL; - int lc=0; - int rec_type = -1; ListIterator itr = NULL; - ListIterator itr2 = NULL; - char *temp = NULL; - - /* Figure out our expiration date */ - time_t expiry; - expiry = time(NULL)-params.opt_expire; - if (params.opt_verbose) - fprintf(stderr, "Purging jobs completed prior to %d\n", - (int)expiry); - - /* Open the current or specified logfile, or quit */ - fd = _open_log_file(); - if (stat(params.opt_filein, &statbuf)) { - perror("stat'ing logfile"); - goto finished; - } - if ((statbuf.st_mode & S_IFLNK) == S_IFLNK) { - fprintf(stderr, "%s is a symbolic link; --expire requires " - "a hard-linked file name\n", params.opt_filein); - goto finished; - } - if (!(statbuf.st_mode & S_IFREG)) { - fprintf(stderr, "%s is not a regular file; --expire " - "only works on accounting log files\n", - params.opt_filein); - goto finished; - } - prot = statbuf.st_mode & 0777; - gid = statbuf.st_gid; - uid = statbuf.st_uid; - old_logfile_name = _prefix_filename(params.opt_filein, ".old."); - if (stat(old_logfile_name, &statbuf)) { - if (errno != ENOENT) { - fprintf(stderr,"Error checking for %s: ", - old_logfile_name); - perror(""); - goto finished; - } - } else { - fprintf(stderr, "Warning! %s exists -- please remove " - "or rename it before proceeding\n", - old_logfile_name); - goto finished; - } - - /* create our initial buffer */ - while (fgets(line, BUFFER_SIZE, fd)) { - lc++; - fptr = line; /* break the record into NULL- - terminated strings */ - exp_rec = xmalloc(sizeof(expired_rec_t)); - exp_rec->line = xstrdup(line); - - for (i = 0; i < EXPIRE_READ_LENGTH; i++) { - f[i] = fptr; - fptr = strstr(fptr, " "); - if (fptr == NULL) - break; - else - *fptr++ = 0; - } + jobcomp_job_rec_t *job = NULL; - exp_rec->job = atoi(f[F_JOB]); - exp_rec->job_submit = atoi(f[F_JOB_SUBMIT]); - - rec_type = atoi(f[F_RECTYPE]); - /* Odd, but complain some other time */ - if (rec_type == JOB_TERMINATED) { - if (expiry < atoi(f[F_TIMESTAMP])) { - list_append(keep_list, exp_rec); - continue; - } - if (list_count(selected_parts)) { - itr = list_iterator_create(selected_parts); - while((temp = list_next(itr))) - if(!strcasecmp(f[F_PARTITION], temp)) - break; - list_iterator_destroy(itr); - if(!temp) { - list_append(keep_list, exp_rec); - continue; - } /* no match */ - } - list_append(exp_list, exp_rec); - if (params.opt_verbose > 2) - fprintf(stderr, "Selected: %8d %d\n", - exp_rec->job, - (int)exp_rec->job_submit); - } else { - list_append(other_list, exp_rec); - } - } - if (!list_count(exp_list)) { - printf("No job records were purged.\n"); - goto finished; - } - logfile_name = xmalloc(strlen(params.opt_filein)+sizeof(".expired")); - sprintf(logfile_name, "%s.expired", params.opt_filein); - new_file = stat(logfile_name, &statbuf); - if ((expired_logfile = fopen(logfile_name, "a"))==NULL) { - fprintf(stderr, "Error while opening %s", - logfile_name); - perror(""); - xfree(logfile_name); - goto finished; - } - - if (new_file) { /* By default, the expired file looks like the log */ - chmod(logfile_name, prot); - chown(logfile_name, uid, gid); - } - xfree(logfile_name); - - logfile_name = _prefix_filename(params.opt_filein, ".new."); - if ((new_logfile = fopen(logfile_name, "w"))==NULL) { - fprintf(stderr, "Error while opening %s", - logfile_name); - perror(""); - fclose(expired_logfile); - goto finished; - } - chmod(logfile_name, prot); /* preserve file protection */ - chown(logfile_name, uid, gid); /* and ownership */ - /* Use line buffering to allow us to safely write - * to the log file at the same time as slurmctld. */ - if (setvbuf(new_logfile, NULL, _IOLBF, 0)) { - perror("setvbuf()"); - fclose(expired_logfile); - goto finished2; - } - - list_sort(exp_list, (ListCmpF) _cmp_jrec); - list_sort(keep_list, (ListCmpF) _cmp_jrec); - - if (params.opt_verbose > 2) { - fprintf(stderr, "--- contents of exp_list ---"); - itr = list_iterator_create(exp_list); - while((exp_rec = list_next(itr))) { - if (!(i%5)) - fprintf(stderr, "\n"); - else - fprintf(stderr, "\t"); - fprintf(stderr, "%d", exp_rec->job); - } - fprintf(stderr, "\n---- end of exp_list ---\n"); - list_iterator_destroy(itr); - } - /* write the expired file */ - itr = list_iterator_create(exp_list); - while((exp_rec = list_next(itr))) { - itr2 = list_iterator_create(other_list); - while((exp_rec2 = list_next(itr2))) { - if((exp_rec2->job != exp_rec->job) - || (exp_rec2->job_submit != exp_rec->job_submit)) - continue; - if (fputs(exp_rec2->line, expired_logfile)<0) { - perror("writing expired_logfile"); - list_iterator_destroy(itr2); - list_iterator_destroy(itr); - fclose(expired_logfile); - goto finished2; - } - list_remove(itr2); - _destroy_exp(exp_rec2); - } - list_iterator_destroy(itr2); - if (fputs(exp_rec->line, expired_logfile)<0) { - perror("writing expired_logfile"); - list_iterator_destroy(itr); - fclose(expired_logfile); - goto finished2; - } - } - list_iterator_destroy(itr); - fclose(expired_logfile); - - /* write the new log */ - itr = list_iterator_create(keep_list); - while((exp_rec = list_next(itr))) { - itr2 = list_iterator_create(other_list); - while((exp_rec2 = list_next(itr2))) { - if(exp_rec2->job != exp_rec->job) - continue; - if (fputs(exp_rec2->line, new_logfile)<0) { - perror("writing keep_logfile"); - list_iterator_destroy(itr2); - list_iterator_destroy(itr); - goto finished2; - } - list_remove(itr2); - _destroy_exp(exp_rec2); - } - list_iterator_destroy(itr2); - if (fputs(exp_rec->line, new_logfile)<0) { - perror("writing keep_logfile"); - list_iterator_destroy(itr); - goto finished2; - } + itr = list_iterator_create(jobs); + while((job = list_next(itr))) { + printf("JOB %u %s %s %s %s(%u) %u(%s) %u %s %s %s %s", + job->jobid, job->partition, job->start_time, + job->end_time, job->uid_name, job->uid, job->gid, + job->gid_name, job->node_cnt, job->nodelist, + job->jobname, job->state, + job->timelimit); +#ifdef HAVE_BG + if(job->blockid) + printf(" %s %s %s %s %u %s %s", + job->blockid, job->connection, job->reboot, + job->rotate, job->max_procs, job->geo, + job->bg_start_point); +#endif + printf("\n"); } list_iterator_destroy(itr); - - if (rename(params.opt_filein, old_logfile_name)) { - perror("renaming logfile to .old."); - goto finished2; - } - if (rename(logfile_name, params.opt_filein)) { - perror("renaming new logfile"); - /* undo it? */ - if (!rename(old_logfile_name, params.opt_filein)) - fprintf(stderr, "Please correct the problem " - "and try again"); - else - fprintf(stderr, "SEVERE ERROR: Current accounting " - "log may have been renamed %s;\n" - "please rename it to \"%s\" if necessary, " - "and try again\n", - old_logfile_name, params.opt_filein); - goto finished2; - } - fflush(new_logfile); /* Flush the buffers before forking */ - fflush(fd); - - file_err = slurm_reconfigure (); - - if (file_err) { - file_err = 1; - fprintf(stderr, "Error: Attempt to reconfigure " - "SLURM failed.\n"); - if (rename(old_logfile_name, params.opt_filein)) { - perror("renaming logfile from .old."); - goto finished2; - } - - } - if (fseek(fd, 0, SEEK_CUR)) { /* clear EOF */ - perror("looking for late-arriving records"); - goto finished2; - } - while (fgets(line, BUFFER_SIZE, fd)) { - if (fputs(line, new_logfile)<0) { - perror("writing final records"); - goto finished2; - } - } - - printf("%d jobs expired.\n", list_count(exp_list)); -finished2: - fclose(new_logfile); - if (!file_err) { - if (unlink(old_logfile_name) == -1) - error("Unable to unlink old logfile %s: %m", - old_logfile_name); - } -finished: - fclose(fd); - list_destroy(exp_list); - list_destroy(keep_list); - list_destroy(other_list); - xfree(old_logfile_name); - xfree(logfile_name); } -void do_fdump(char* f[], int lc) +/* do_expire() -- purge expired data from the accounting log file + */ + +void do_expire(int dummy) { - int i=0, j=0; - char **type; - char *header[] = {"job", /* F_JOB */ - "partition", /* F_PARTITION */ - "job_submit", /* F_JOB_SUBMIT */ - "timestamp", /* F_TIMESTAMP */ - "uid", /* F_UIDGID */ - "gid", /* F_UIDGID */ - "BlockID", /* F_BLOCKID */ - "reserved-2",/* F_RESERVED1 */ - "recordType",/* F_RECTYPE */ - NULL}; - - char *start[] = {"jobName", /* F_JOBNAME */ - "TrackSteps", /* F_TRACK_STEPS */ - "priority", /* F_PRIORITY */ - "ncpus", /* F_NCPUS */ - "nodeList", /* F_NODES */ - "account", /* F_JOB_ACCOUNT */ - NULL}; - - char *step[] = {"jobStep", /* F_JOBSTEP */ - "status", /* F_STATUS */ - "exitcode", /* F_EXITCODE */ - "ntasks", /* F_NTASKS */ - "ncpus", /* F_STEPNCPUS */ - "elapsed", /* F_ELAPSED */ - "cpu_sec", /* F_CPU_SEC */ - "cpu_usec", /* F_CPU_USEC */ - "user_sec", /* F_USER_SEC */ - "user_usec", /* F_USER_USEC */ - "sys_sec", /* F_SYS_SEC */ - "sys_usec", /* F_SYS_USEC */ - "rss", /* F_RSS */ - "ixrss", /* F_IXRSS */ - "idrss", /* F_IDRSS */ - "isrss", /* F_ISRSS */ - "minflt", /* F_MINFLT */ - "majflt", /* F_MAJFLT */ - "nswap", /* F_NSWAP */ - "inblocks", /* F_INBLOCKS */ - "oublocks", /* F_OUTBLOCKS */ - "msgsnd", /* F_MSGSND */ - "msgrcv", /* F_MSGRCV */ - "nsignals", /* F_NSIGNALS */ - "nvcsw", /* F_VCSW */ - "nivcsw", /* F_NIVCSW */ - "max_vsize", /* F_MAX_VSIZE */ - "max_vsize_task", /* F_MAX_VSIZE_TASK */ - "ave_vsize", /* F_AVE_VSIZE */ - "max_rss", /* F_MAX_RSS */ - "max_rss_task", /* F_MAX_RSS_TASK */ - "ave_rss", /* F_AVE_RSS */ - "max_pages", /* F_MAX_PAGES */ - "max_pages_task", /* F_MAX_PAGES_TASK */ - "ave_pages", /* F_AVE_PAGES */ - "min_cputime", /* F_MIN_CPU */ - "min_cputime_task", /* F_MIN_CPU_TASK */ - "ave_cputime", /* F_AVE_RSS */ - "StepName", /* F_STEPNAME */ - "StepNodes", /* F_STEPNODES */ - "max_vsize_node", /* F_MAX_VSIZE_NODE */ - "max_rss_node", /* F_MAX_RSS_NODE */ - "max_pages_node", /* F_MAX_PAGES_NODE */ - "min_cputime_node", /* F_MIN_CPU_NODE */ - "account", /* F_STEP_ACCOUNT */ - "requid", /* F_STEP_REQUID */ - NULL}; - - char *suspend[] = {"Suspend/Run time", /* F_TOT_ELAPSED */ - "status", /* F_STATUS */ - NULL}; - - char *term[] = {"totElapsed", /* F_TOT_ELAPSED */ - "status", /* F_STATUS */ - "requid", /* F_JOB_REQUID */ - NULL}; - - i = atoi(f[F_RECTYPE]); - printf("\n------- Line %d %s -------\n", lc, _convert_type(i)); - - for(j=0; j < HEADER_LENGTH; j++) - printf("%12s: %s\n", header[j], f[j]); - switch(i) { - case JOB_START: - type = start; - j = JOB_START_LENGTH; - break; - case JOB_STEP: - type = step; - j = JOB_STEP_LENGTH; - break; - case JOB_SUSPEND: - type = suspend; - j = JOB_TERM_LENGTH; - case JOB_TERMINATED: - type = term; - j = JOB_TERM_LENGTH; - break; - default: - while(f[j]) { - printf(" Field[%02d]: %s\n", j, f[j]); - j++; - } - return; + if (dummy == NO_VAL) { + /* just load the symbol, don't want to execute */ + slurm_reconfigure(); } - - for(i=HEADER_LENGTH; i < j; i++) - printf("%12s: %s\n", type[i-HEADER_LENGTH], f[i]); + + if(params.opt_completion) + g_slurm_jobcomp_archive(selected_parts, ¶ms); + else + jobacct_storage_g_archive(acct_db_conn, + selected_parts, ¶ms); } void do_help(void) @@ -1715,61 +1115,26 @@ void do_help(void) void do_list(void) { int do_jobsteps = 1; - int rc = 0; ListIterator itr = NULL; ListIterator itr_step = NULL; - job_rec_t *job = NULL; - step_rec_t *step = NULL; + jobacct_job_rec_t *job = NULL; + jobacct_step_rec_t *step = NULL; if (params.opt_total) do_jobsteps = 0; - itr = list_iterator_create(jobs); while((job = list_next(itr))) { - if (!params.opt_dup) - if (job->jobnum_superseded) { - if (params.opt_verbose > 1) - fprintf(stderr, - "Note: Skipping older" - " job %u dated %d\n", - job->header.jobnum, - (int)job->header.job_submit); - continue; - } - if (!job->job_start_seen && job->job_step_seen) { - /* If we only saw JOB_TERMINATED, the job was - * probably canceled. */ - fprintf(stderr, - "Error: No JOB_START record for job %u\n", - job->header.jobnum); - if (rc<ERROR) - rc = ERROR; - } - if (params.opt_verbose > 1) { - if (!job->job_start_seen) - fprintf(stderr, - "Note: No JOB_START record for " - "job %u\n", - job->header.jobnum); - if (!job->job_step_seen) - fprintf(stderr, - "Note: No JOB_STEP record for " - "job %u\n", - job->header.jobnum); - if (!job->job_terminated_seen) - fprintf(stderr, - "Note: No JOB_TERMINATED record for " - "job %u\n", - job->header.jobnum); - } - if (params.opt_uid >= 0 && (job->header.uid != params.opt_uid)) + /* FIX ME: this should be handled while getting the + data, not afterwards. + */ + if (params.opt_uid >= 0 && (job->uid != params.opt_uid)) continue; - if (params.opt_gid >= 0 && (job->header.gid != params.opt_gid)) + if (params.opt_gid >= 0 && (job->gid != params.opt_gid)) continue; if(job->sacct.min_cpu == NO_VAL) job->sacct.min_cpu = 0; - + if(list_count(job->steps)) { job->sacct.ave_cpu /= list_count(job->steps); job->sacct.ave_rss /= list_count(job->steps); @@ -1779,7 +1144,7 @@ void do_list(void) if (job->show_full) { if (params.opt_state_list) { - if(!selected_status[job->status]) + if(!selected_state[job->state]) continue; } print_fields(JOB, job); @@ -1788,12 +1153,8 @@ void do_list(void) if (do_jobsteps && (job->track_steps || !job->show_full)) { itr_step = list_iterator_create(job->steps); while((step = list_next(itr_step))) { - if (step->status == JOB_RUNNING - && job->job_terminated_seen) { - step->status = JOB_FAILED; - } if (params.opt_state_list) { - if(!selected_status[step->status]) + if(!selected_state[step->state]) continue; } if(step->end == 0) @@ -1806,12 +1167,36 @@ void do_list(void) list_iterator_destroy(itr); } +/* do_list_completion() -- List the assembled data + * + * In: Nothing explicit. + * Out: void. + * + * At this point, we have already selected the desired data, + * so we just need to print it for the user. + */ +void do_list_completion(void) +{ + ListIterator itr = NULL; + jobcomp_job_rec_t *job = NULL; + + itr = list_iterator_create(jobs); + while((job = list_next(itr))) { + if (params.opt_uid >= 0 && (job->uid != params.opt_uid)) + continue; + if (params.opt_gid >= 0 && (job->gid != params.opt_gid)) + continue; + print_fields(JOBCOMP, job); + } + list_iterator_destroy(itr); +} + void do_stat() { ListIterator itr = NULL; uint32_t jobid = 0; uint32_t stepid = 0; - selected_step_t *selected_step = NULL; + jobacct_selected_step_t *selected_step = NULL; itr = list_iterator_create(selected_steps); while((selected_step = list_next(itr))) { @@ -1827,16 +1212,22 @@ void do_stat() void sacct_init() { int i=0; - jobs = list_create(destroy_job); selected_parts = list_create(_destroy_parts); selected_steps = list_create(_destroy_steps); - for(i=0; i<STATUS_COUNT; i++) - selected_status[i] = 0; + for(i=0; i<STATE_COUNT; i++) + selected_state[i] = 0; } void sacct_fini() { - list_destroy(jobs); + if(jobs) + list_destroy(jobs); list_destroy(selected_parts); list_destroy(selected_steps); + if(params.opt_completion) + g_slurm_jobcomp_fini(); + else { + acct_storage_g_close_connection(&acct_db_conn); + slurm_acct_storage_fini(); + } } diff --git a/src/sacct/print.c b/src/sacct/print.c index 6a55de7a3..61a70f8e4 100644 --- a/src/sacct/print.c +++ b/src/sacct/print.c @@ -6,7 +6,7 @@ * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <da@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -40,7 +40,7 @@ #include "sacct.h" #include "src/common/parse_time.h" #include "slurm.h" -#define FORMAT_STRING_SIZE 50 +#define FORMAT_STRING_SIZE 34 void _elapsed_time(long secs, long usecs, char *str); @@ -97,98 +97,129 @@ void print_fields(type_t type, void *object) void print_cpu(type_t type, void *object) { - job_rec_t *job = (job_rec_t *)object; - step_rec_t *step = (step_rec_t *)object; + jobacct_job_rec_t *job = (jobacct_job_rec_t *)object; + jobacct_step_rec_t *step = (jobacct_step_rec_t *)object; char str[FORMAT_STRING_SIZE]; switch(type) { case HEADLINE: - printf("%15s", "Cpu"); + printf("%-15s", "Cpu"); break; case UNDERSCORE: - printf("%15s", "---------------"); + printf("%-15s", "---------------"); break; case JOB: _elapsed_time(job->tot_cpu_sec, job->tot_cpu_usec, str); - printf("%15s", str); + printf("%-15s", str); break; case JOBSTEP: _elapsed_time(step->tot_cpu_sec, step->tot_cpu_usec, str); - printf("%15s", str); + printf("%-15s", str); + break; + default: + printf("%-15s", "n/a"); break; } } void print_elapsed(type_t type, void *object) { - job_rec_t *job = (job_rec_t *)object; - step_rec_t *step = (step_rec_t *)object; + jobacct_job_rec_t *job = (jobacct_job_rec_t *)object; + jobacct_step_rec_t *step = (jobacct_step_rec_t *)object; char str[FORMAT_STRING_SIZE]; switch(type) { case HEADLINE: - printf("%15s", "Elapsed"); + printf("%-15s", "Elapsed"); break; case UNDERSCORE: - printf("%15s", "---------------"); + printf("%-15s", "---------------"); break; case JOB: _elapsed_time(job->elapsed, 0, str); - printf("%15s", str); + printf("%-15s", str); break; case JOBSTEP: _elapsed_time(step->elapsed, 0, str); - printf("%15s", str); + printf("%-15s", str); + break; + default: + printf("%-15s", "n/a"); break; } } void print_exitcode(type_t type, void *object) { - job_rec_t *job = (job_rec_t *)object; - step_rec_t *step = (step_rec_t *)object; + jobacct_job_rec_t *job = (jobacct_job_rec_t *)object; + jobacct_step_rec_t *step = (jobacct_step_rec_t *)object; + char tmp[9]; + uint16_t term_sig = 0; switch(type) { case HEADLINE: - printf("%8s", "ExitCode"); + printf("%-8s", "ExitCode"); break; case UNDERSCORE: - printf("%8s", "--------"); + printf("%-8s", "--------"); break; case JOB: - printf("%8d", job->exitcode); + if (WIFSIGNALED(job->exitcode)) + term_sig = WTERMSIG(job->exitcode); + + snprintf(tmp, sizeof(tmp), "%u:%u", + WEXITSTATUS(job->exitcode), term_sig); + printf("%-8s", tmp); break; case JOBSTEP: - printf("%8d", step->exitcode); + if (WIFSIGNALED(step->exitcode)) + term_sig = WTERMSIG(step->exitcode); + + snprintf(tmp, sizeof(tmp), "%u:%u", + WEXITSTATUS(step->exitcode), term_sig); + printf("%-8s", tmp); + break; + default: + printf("%-8s", "n/a"); break; } } void print_gid(type_t type, void *object) { - job_rec_t *job = (job_rec_t *)object; - step_rec_t *step = (step_rec_t *)object; + jobacct_job_rec_t *job = (jobacct_job_rec_t *)object; + jobcomp_job_rec_t *jobcomp = (jobcomp_job_rec_t *)object; + int32_t gid = -1; switch(type) { case HEADLINE: - printf("%5s", "Gid"); + printf("%-5s", "Gid"); break; case UNDERSCORE: - printf("%5s", "-----"); + printf("%-5s", "-----"); break; case JOB: - printf("%5d", job->header.gid); + gid = job->gid; + break; + case JOBCOMP: + printf("%-5u", jobcomp->gid); break; case JOBSTEP: - printf("s%5d", step->header.gid); + printf("%-5s", " "); + break; + default: + printf("%-5s", "n/a"); break; } + + if(gid != -1) + printf("%-5d", gid); } void print_group(type_t type, void *object) { - job_rec_t *job = (job_rec_t *)object; - step_rec_t *step = (step_rec_t *)object; + jobacct_job_rec_t *job = (jobacct_job_rec_t *)object; + jobcomp_job_rec_t *jobcomp = (jobcomp_job_rec_t *)object; int gid = -1; char *tmp="(unknown)"; struct group *gr = NULL; @@ -201,10 +232,16 @@ void print_group(type_t type, void *object) printf("%-9s", "---------"); break; case JOB: - gid = job->header.gid; + gid = job->gid; + break; + case JOBCOMP: + printf("%-9s", jobcomp->gid_name); break; case JOBSTEP: - gid = step->header.gid; + printf("%-9s", " "); + break; + default: + printf("%-9s", "n/a"); break; } if(gid != -1) { @@ -214,125 +251,34 @@ void print_group(type_t type, void *object) } } -void print_idrss(type_t type, void *object) -{ - job_rec_t *job = (job_rec_t *)object; - step_rec_t *step = (step_rec_t *)object; - struct rusage rusage; - char outbuf[FORMAT_STRING_SIZE]; - rusage.ru_idrss = 0; - - switch(type) { - case HEADLINE: - printf("%8s", "Idrss"); - return; - break; - case UNDERSCORE: - printf("%8s", "------"); - return; - break; - case JOB: - rusage = job->rusage; - break; - case JOBSTEP: - rusage = step->rusage; - break; - } - convert_num_unit((float)rusage.ru_idrss, outbuf, sizeof(outbuf), - UNIT_NONE); - printf("%8s", outbuf); -} - -void print_inblocks(type_t type, void *object) -{ - job_rec_t *job = (job_rec_t *)object; - step_rec_t *step = (step_rec_t *)object; - - switch(type) { - case HEADLINE: - printf("%9s", "Inblocks"); - break; - case UNDERSCORE: - printf("%9s", "---------"); - break; - case JOB: - printf("%9ld", job->rusage.ru_inblock); - break; - case JOBSTEP: - printf("%9ld", step->rusage.ru_inblock); - break; - } -} - -void print_isrss(type_t type, void *object) -{ - job_rec_t *job = (job_rec_t *)object; - step_rec_t *step = (step_rec_t *)object; - - switch(type) { - case HEADLINE: - printf("%8s", "Isrss"); - break; - case UNDERSCORE: - printf("%8s", "------"); - break; - case JOB: - printf("%8ld", job->rusage.ru_isrss); - break; - case JOBSTEP: - printf("%8ld", step->rusage.ru_isrss); - break; - } - -} - -void print_ixrss(type_t type, void *object) -{ - job_rec_t *job = (job_rec_t *)object; - step_rec_t *step = (step_rec_t *)object; - - switch(type) { - case HEADLINE: - printf("%8s", "Ixrss"); - break; - case UNDERSCORE: - printf("%8s", "------"); - break; - case JOB: - printf("%8ld", job->rusage.ru_ixrss); - break; - case JOBSTEP: - printf("%8ld", step->rusage.ru_ixrss); - break; - } - -} - void print_job(type_t type, void *object) { - job_rec_t *job = (job_rec_t *)object; - step_rec_t *step = (step_rec_t *)object; + jobacct_job_rec_t *job = (jobacct_job_rec_t *)object; switch(type) { case HEADLINE: - printf("%8s", "Job"); + printf("%-8s", "Job"); break; case UNDERSCORE: - printf("%8s", "--------"); + printf("%-8s", "--------"); break; case JOB: - printf("%8d", job->header.jobnum); + printf("%-8u", job->jobid); break; case JOBSTEP: - printf("%8d", step->header.jobnum); + printf("%-8s", " "); + break; + default: + printf("%-8s", "n/a"); break; } } void print_name(type_t type, void *object) { - job_rec_t *job = (job_rec_t *)object; - step_rec_t *step = (step_rec_t *)object; + jobacct_job_rec_t *job = (jobacct_job_rec_t *)object; + jobcomp_job_rec_t *jobcomp = (jobcomp_job_rec_t *)object; + jobacct_step_rec_t *step = (jobacct_step_rec_t *)object; switch(type) { case HEADLINE: @@ -349,6 +295,15 @@ void print_name(type_t type, void *object) else printf("%-15.15s...", job->jobname); + break; + case JOBCOMP: + if(!jobcomp->jobname) + printf("%-18s", "unknown"); + else if(strlen(jobcomp->jobname)<19) + printf("%-18s", jobcomp->jobname); + else + printf("%-15.15s...", jobcomp->jobname); + break; case JOBSTEP: if(!step->stepname) @@ -358,13 +313,17 @@ void print_name(type_t type, void *object) else printf("%-15.15s...", step->stepname); break; + default: + printf("%-18s", "n/a"); + break; } } void print_jobid(type_t type, void *object) { - job_rec_t *job = (job_rec_t *)object; - step_rec_t *step = (step_rec_t *)object; + jobacct_job_rec_t *job = (jobacct_job_rec_t *)object; + jobcomp_job_rec_t *jobcomp = (jobcomp_job_rec_t *)object; + jobacct_step_rec_t *step = (jobacct_step_rec_t *)object; char outbuf[10]; switch(type) { @@ -375,148 +334,54 @@ void print_jobid(type_t type, void *object) printf("%-10s", "----------"); break; case JOB: - printf("%-10d", job->header.jobnum); + printf("%-10u", job->jobid); + break; + case JOBCOMP: + printf("%-10u", jobcomp->jobid); break; case JOBSTEP: snprintf(outbuf, sizeof(outbuf), "%u.%u", - step->header.jobnum, - step->stepnum); + step->jobid, + step->stepid); printf("%-10s", outbuf); break; - } - -} - -void print_majflt(type_t type, void *object) -{ - job_rec_t *job = (job_rec_t *)object; - step_rec_t *step = (step_rec_t *)object; - - switch(type) { - case HEADLINE: - printf("%8s", "Majflt"); - break; - case UNDERSCORE: - printf("%8s", "------"); - break; - case JOB: - printf("%8ld", job->rusage.ru_majflt); - break; - case JOBSTEP: - printf("%8ld", step->rusage.ru_majflt); - break; - } -} - -void print_minflt(type_t type, void *object) -{ - job_rec_t *job = (job_rec_t *)object; - step_rec_t *step = (step_rec_t *)object; - - switch(type) { - case HEADLINE: - printf("%8s", "Minflt"); - break; - case UNDERSCORE: - printf("%8s", "------"); - break; - case JOB: - printf("%8ld", job->rusage.ru_minflt); - break; - case JOBSTEP: - printf("%8ld", step->rusage.ru_minflt); + default: + printf("%-10s", "n/a"); break; } -} -void print_msgrcv(type_t type, void *object) -{ - job_rec_t *job = (job_rec_t *)object; - step_rec_t *step = (step_rec_t *)object; - - switch(type) { - case HEADLINE: - printf("%9s", "Msgrcv"); - break; - case UNDERSCORE: - printf("%9s", "---------"); - break; - case JOB: - printf("%9ld", job->rusage.ru_msgrcv); - break; - case JOBSTEP: - printf("%9ld", step->rusage.ru_msgrcv); - break; - } -} - -void print_msgsnd(type_t type, void *object) -{ - job_rec_t *job = (job_rec_t *)object; - step_rec_t *step = (step_rec_t *)object; - - switch(type) { - case HEADLINE: - printf("%9s", "Msgsnd"); - break; - case UNDERSCORE: - printf("%9s", "---------"); - break; - case JOB: - printf("%9ld", job->rusage.ru_msgsnd); - break; - case JOBSTEP: - printf("%9ld", step->rusage.ru_msgsnd); - break; - } } void print_ncpus(type_t type, void *object) { - job_rec_t *job = (job_rec_t *)object; - step_rec_t *step = (step_rec_t *)object; + jobacct_job_rec_t *job = (jobacct_job_rec_t *)object; + jobacct_step_rec_t *step = (jobacct_step_rec_t *)object; switch(type) { case HEADLINE: - printf("%7s", "Ncpus"); + printf("%-7s", "Ncpus"); break; case UNDERSCORE: - printf("%7s", "-------"); + printf("%-7s", "-------"); break; case JOB: - printf("%7d", job->ncpus); + printf("%-7u", job->alloc_cpus); break; case JOBSTEP: - printf("%7d", step->ncpus); - break; - } -} - -void print_nivcsw(type_t type, void *object) -{ - job_rec_t *job = (job_rec_t *)object; - step_rec_t *step = (step_rec_t *)object; - - switch(type) { - case HEADLINE: - printf("%9s", "Nivcsw"); - break; - case UNDERSCORE: - printf("%9s", "---------"); + printf("%-7u", step->ncpus); break; - case JOB: - printf("%9ld", job->rusage.ru_nivcsw); - break; - case JOBSTEP: - printf("%9ld", step->rusage.ru_nivcsw); + default: + printf("%-7s", "n/a"); break; } } void print_nodes(type_t type, void *object) { - job_rec_t *job = (job_rec_t *)object; - + jobacct_job_rec_t *job = (jobacct_job_rec_t *)object; + jobcomp_job_rec_t *jobcomp = (jobcomp_job_rec_t *)object; + jobacct_step_rec_t *step = (jobacct_step_rec_t *)object; + switch(type) { case HEADLINE: printf("%-30s", "Nodes"); @@ -527,121 +392,69 @@ void print_nodes(type_t type, void *object) case JOB: printf("%-30s", job->nodes); break; - case JOBSTEP: - printf("%-30s", " "); - break; - } -} - -void print_nsignals(type_t type, void *object) -{ - job_rec_t *job = (job_rec_t *)object; - step_rec_t *step = (step_rec_t *)object; - - switch(type) { - case HEADLINE: - printf("%9s", "Nsignals"); - break; - case UNDERSCORE: - printf("%9s", "---------"); - break; - case JOB: - printf("%9ld", job->rusage.ru_nsignals); + case JOBCOMP: + printf("%-30s", jobcomp->nodelist); break; case JOBSTEP: - printf("%9ld", step->rusage.ru_nsignals); - break; - } -} - -void print_nswap(type_t type, void *object) -{ - job_rec_t *job = (job_rec_t *)object; - step_rec_t *step = (step_rec_t *)object; - - switch(type) { - case HEADLINE: - printf("%8s", "Nswap"); - break; - case UNDERSCORE: - printf("%8s", "------"); - break; - case JOB: - printf("%8ld", job->rusage.ru_nswap); + printf("%-30s", step->nodes); break; - case JOBSTEP: - printf("%8ld", step->rusage.ru_nswap); + default: + printf("%-30s", "n/a"); break; } } -void print_ntasks(type_t type, void *object) +void print_nnodes(type_t type, void *object) { - job_rec_t *job = (job_rec_t *)object; - step_rec_t *step = (step_rec_t *)object; + jobcomp_job_rec_t *jobcomp = (jobcomp_job_rec_t *)object; + char temp[FORMAT_STRING_SIZE]; switch(type) { case HEADLINE: - printf("%7s", "Ntasks"); + printf("%-8s", "Node Cnt"); break; case UNDERSCORE: - printf("%7s", "-------"); + printf("%-8s", "--------"); break; - case JOB: - printf("%7d", job->ntasks); + case JOBCOMP: + convert_num_unit((float)jobcomp->node_cnt, temp, + sizeof(temp), UNIT_NONE); + printf("%-8s", temp); break; - case JOBSTEP: - printf("%7d", step->ntasks); + default: + printf("%-8s", "n/a"); break; } } -void print_nvcsw(type_t type, void *object) +void print_ntasks(type_t type, void *object) { - job_rec_t *job = (job_rec_t *)object; - step_rec_t *step = (step_rec_t *)object; + jobacct_job_rec_t *job = (jobacct_job_rec_t *)object; + jobacct_step_rec_t *step = (jobacct_step_rec_t *)object; switch(type) { case HEADLINE: - printf("%9s", "Nvcsw"); + printf("%-7s", "Ntasks"); break; case UNDERSCORE: - printf("%9s", "---------"); + printf("%-7s", "-------"); break; case JOB: - printf("%9ld", job->rusage.ru_nvcsw); + printf("%-7u", job->alloc_cpus); break; case JOBSTEP: - printf("%9ld", step->rusage.ru_nvcsw); - break; - } -} - -void print_outblocks(type_t type, void *object) -{ - job_rec_t *job = (job_rec_t *)object; - step_rec_t *step = (step_rec_t *)object; - - switch(type) { - case HEADLINE: - printf("%9s", "Outblocks"); + printf("%-7u", step->ncpus); break; - case UNDERSCORE: - printf("%9s", "---------"); - break; - case JOB: - printf("%9ld", job->rusage.ru_oublock); - break; - case JOBSTEP: - printf("%9ld", step->rusage.ru_oublock); + default: + printf("%-7s", "n/a"); break; } } void print_partition(type_t type, void *object) { - job_rec_t *job = (job_rec_t *)object; - step_rec_t *step = (step_rec_t *)object; + jobacct_job_rec_t *job = (jobacct_job_rec_t *)object; + jobcomp_job_rec_t *jobcomp = (jobcomp_job_rec_t *)object; switch(type) { case HEADLINE: @@ -651,30 +464,37 @@ void print_partition(type_t type, void *object) printf("%-10s", "----------"); break; case JOB: - if(!job->header.partition) + if(!job->partition) printf("%-10s", "unknown"); - else if(strlen(job->header.partition)<11) - printf("%-10s", job->header.partition); + else if(strlen(job->partition)<11) + printf("%-10s", job->partition); else - printf("%-7.7s...", job->header.partition); + printf("%-7.7s...", job->partition); break; - case JOBSTEP: - if(!step->header.partition) + case JOBCOMP: + if(!jobcomp->partition) printf("%-10s", "unknown"); - else if(strlen(step->header.partition)<11) - printf("%-10s", step->header.partition); + else if(strlen(jobcomp->partition)<11) + printf("%-10s", jobcomp->partition); else - printf("%-7.7s...", step->header.partition); - + printf("%-7.7s...", jobcomp->partition); + + break; + case JOBSTEP: + printf("%-10s", " "); + break; + default: + printf("%-10s", "n/a"); break; } } +#ifdef HAVE_BG void print_blockid(type_t type, void *object) { - job_rec_t *job = (job_rec_t *)object; - step_rec_t *step = (step_rec_t *)object; + jobacct_job_rec_t *job = (jobacct_job_rec_t *)object; + jobcomp_job_rec_t *jobcomp = (jobcomp_job_rec_t *)object; switch(type) { case HEADLINE: @@ -684,44 +504,51 @@ void print_blockid(type_t type, void *object) printf("%-16s", "----------------"); break; case JOB: - if(!job->header.blockid) + if(!job->blockid) printf("%-16s", "unknown"); - else if(strlen(job->header.blockid)<17) - printf("%-16s", job->header.blockid); + else if(strlen(job->blockid)<17) + printf("%-16s", job->blockid); else - printf("%-13.13s...", job->header.blockid); + printf("%-13.13s...", job->blockid); break; - case JOBSTEP: - if(!step->header.blockid) + case JOBCOMP: + if(!jobcomp->blockid) printf("%-16s", "unknown"); - else if(strlen(step->header.blockid)<17) - printf("%-16s", step->header.blockid); + else if(strlen(jobcomp->blockid)<17) + printf("%-16s", jobcomp->blockid); else - printf("%-13.13s...", step->header.blockid); - + printf("%-13.13s...", jobcomp->blockid); + + break; + case JOBSTEP: + printf("%-16s", " "); + break; + default: + printf("%-16s", "n/a"); break; } } +#endif void print_pages(type_t type, void *object) { - job_rec_t *job = (job_rec_t *)object; - step_rec_t *step = (step_rec_t *)object; + jobacct_job_rec_t *job = (jobacct_job_rec_t *)object; + jobacct_step_rec_t *step = (jobacct_step_rec_t *)object; char outbuf[FORMAT_STRING_SIZE]; char buf1[FORMAT_STRING_SIZE]; char buf2[FORMAT_STRING_SIZE]; - char buf3[50]; + char buf3[FORMAT_STRING_SIZE]; sacct_t sacct; char *nodes = NULL; uint32_t pos; switch(type) { case HEADLINE: - printf("%-50s", "MaxPages/Node:Task - Ave"); + printf("%-34s", "MaxPages/Node:Task - Ave"); break; case UNDERSCORE: - printf("%-50s", "----------------------------------"); + printf("%-34s", "----------------------------------"); break; case JOB: sacct = job->sacct; @@ -742,7 +569,7 @@ void print_pages(type_t type, void *object) sacct.max_pages_id.taskid, buf2); } - printf("%-50s", outbuf); + printf("%-34s", outbuf); break; case JOBSTEP: sacct = step->sacct; @@ -758,29 +585,32 @@ void print_pages(type_t type, void *object) buf3, sacct.max_pages_id.taskid, buf2); - printf("%-50s", outbuf); + printf("%-34s", outbuf); + break; + default: + printf("%-34s", "n/a"); break; } } void print_rss(type_t type, void *object) { - job_rec_t *job = (job_rec_t *)object; - step_rec_t *step = (step_rec_t *)object; + jobacct_job_rec_t *job = (jobacct_job_rec_t *)object; + jobacct_step_rec_t *step = (jobacct_step_rec_t *)object; char outbuf[FORMAT_STRING_SIZE]; char buf1[FORMAT_STRING_SIZE]; char buf2[FORMAT_STRING_SIZE]; - char buf3[50]; + char buf3[FORMAT_STRING_SIZE]; sacct_t sacct; char *nodes = NULL; uint32_t pos; switch(type) { case HEADLINE: - printf("%-50s", "MaxRSS/Node:Task - Ave"); + printf("%-34s", "MaxRSS/Node:Task - Ave"); break; case UNDERSCORE: - printf("%-50s", "--------------------------------"); + printf("%-34s", "----------------------------------"); break; case JOB: sacct = job->sacct; @@ -801,7 +631,7 @@ void print_rss(type_t type, void *object) sacct.max_rss_id.taskid, buf2); } - printf("%-50s", outbuf); + printf("%-34s", outbuf); break; case JOBSTEP: sacct = step->sacct; @@ -817,48 +647,58 @@ void print_rss(type_t type, void *object) buf3, sacct.max_rss_id.taskid, buf2); - printf("%-50s", outbuf); + printf("%-34s", outbuf); + break; + default: + printf("%-34s", "n/a"); break; } } -void print_status(type_t type, void *object) +void print_state(type_t type, void *object) { - job_rec_t *job = (job_rec_t *)object; - step_rec_t *step = (step_rec_t *)object; + jobacct_job_rec_t *job = (jobacct_job_rec_t *)object; + jobcomp_job_rec_t *jobcomp = (jobcomp_job_rec_t *)object; + jobacct_step_rec_t *step = (jobacct_step_rec_t *)object; switch(type) { case HEADLINE: - printf("%-20s", "Status"); + printf("%-20s", "State"); break; case UNDERSCORE: printf("%-20s", "--------------------"); break; case JOB: - if ( job->status == JOB_CANCELLED) { + if ( job->state == JOB_CANCELLED) { printf ("%-10s by %6d", - decode_status_int(job->status), job->requid); + job_state_string(job->state), job->requid); } else { - printf("%-20s", decode_status_int(job->status)); + printf("%-20s", job_state_string(job->state)); } break; + case JOBCOMP: + printf("%-20s", jobcomp->state); + break; case JOBSTEP: - if ( step->status == JOB_CANCELLED) { + if ( step->state == JOB_CANCELLED) { printf ("%-10s by %6d", - decode_status_int(step->status), step->requid); + job_state_string(step->state), step->requid); } else { - printf("%-20s", decode_status_int(step->status)); + printf("%-20s", job_state_string(step->state)); } break; + default: + printf("%-20s", "n/a"); + break; } } void print_submit(type_t type, void *object) { - job_rec_t *job = (job_rec_t *)object; - step_rec_t *step = (step_rec_t *)object; + jobacct_job_rec_t *job = (jobacct_job_rec_t *)object; + jobacct_step_rec_t *step = (jobacct_step_rec_t *)object; char time_str[32]; switch(type) { @@ -869,148 +709,183 @@ void print_submit(type_t type, void *object) printf("%-14s", "--------------"); break; case JOB: - slurm_make_time_str(&job->header.job_submit, + slurm_make_time_str(&job->submit, time_str, sizeof(time_str)); printf("%-14s", time_str); break; case JOBSTEP: - slurm_make_time_str(&step->header.timestamp, + slurm_make_time_str(&step->start, time_str, sizeof(time_str)); printf("%-14s", time_str); break; + default: + printf("%-14s", "n/a"); + break; } } void print_start(type_t type, void *object) { - job_rec_t *job = (job_rec_t *)object; - step_rec_t *step = (step_rec_t *)object; + jobacct_job_rec_t *job = (jobacct_job_rec_t *)object; + jobcomp_job_rec_t *jobcomp = (jobcomp_job_rec_t *)object; + jobacct_step_rec_t *step = (jobacct_step_rec_t *)object; char time_str[32]; switch(type) { case HEADLINE: - printf("%-14s", "Start Time"); + printf("%-19s", "Start Time"); break; case UNDERSCORE: - printf("%-14s", "--------------"); + printf("%-19s", "--------------------"); break; case JOB: - if(params.opt_raw) { - printf("%14d", (int)job->header.timestamp); - break; - } - slurm_make_time_str(&job->header.timestamp, + slurm_make_time_str(&job->start, time_str, sizeof(time_str)); - printf("%-14s", time_str); + printf("%-19s", time_str); + break; + case JOBCOMP: + printf("%-19s", jobcomp->start_time); break; case JOBSTEP: - if(params.opt_raw) { - printf("%14d", (int)step->header.timestamp); - break; - } - slurm_make_time_str(&step->header.timestamp, + slurm_make_time_str(&step->start, time_str, sizeof(time_str)); - printf("%-14s", time_str); + printf("%-19s", time_str); + break; + default: + printf("%-19s", "n/a"); + break; + } +} + +void print_timelimit(type_t type, void *object) +{ + jobcomp_job_rec_t *jobcomp = (jobcomp_job_rec_t *)object; + + switch(type) { + case HEADLINE: + printf("%-10s", "Time Limit"); + break; + case UNDERSCORE: + printf("%-10s", "----------"); + break; + case JOBCOMP: + printf("%-10s", jobcomp->timelimit); + break; + default: + printf("%-10s", "n/a"); break; } } void print_end(type_t type, void *object) { - job_rec_t *job = (job_rec_t *)object; - step_rec_t *step = (step_rec_t *)object; + jobacct_job_rec_t *job = (jobacct_job_rec_t *)object; + jobcomp_job_rec_t *jobcomp = (jobcomp_job_rec_t *)object; + jobacct_step_rec_t *step = (jobacct_step_rec_t *)object; char time_str[32]; switch(type) { case HEADLINE: - printf("%-14s", "End Time"); + printf("%-19s", "End Time"); break; case UNDERSCORE: - printf("%-14s", "--------------"); + printf("%-19s", "--------------------"); break; case JOB: - if(params.opt_raw) { - printf("%14d", (int)job->end); - break; - } slurm_make_time_str(&job->end, time_str, sizeof(time_str)); - printf("%-14s", time_str); + printf("%-19s", time_str); + break; + case JOBCOMP: + printf("%-19s", jobcomp->end_time); break; case JOBSTEP: - if(params.opt_raw) { - printf("%14d", (int)step->end); - break; - } slurm_make_time_str(&step->end, time_str, sizeof(time_str)); - printf("%-14s", time_str); + printf("%-19s", time_str); + break; + default: + printf("%-19s", "n/a"); break; } } void print_systemcpu(type_t type, void *object) { - job_rec_t *job = (job_rec_t *)object; - step_rec_t *step = (step_rec_t *)object; + jobacct_job_rec_t *job = (jobacct_job_rec_t *)object; + jobacct_step_rec_t *step = (jobacct_step_rec_t *)object; char str[FORMAT_STRING_SIZE]; switch(type) { case HEADLINE: - printf("%15s", "SystemCpu"); + printf("%-15s", "SystemCpu"); break; case UNDERSCORE: - printf("%15s", "---------------"); + printf("%-15s", "---------------"); break; case JOB: - _elapsed_time(job->rusage.ru_stime.tv_sec, - job->rusage.ru_stime.tv_usec, str); - printf("%15s", str); + _elapsed_time(job->sys_cpu_sec, + job->sys_cpu_usec, str); + printf("%-15s", str); break; case JOBSTEP: - _elapsed_time(step->rusage.ru_stime.tv_sec, - step->rusage.ru_stime.tv_usec, str); - printf("%15s", str); + _elapsed_time(step->sys_cpu_sec, + step->sys_cpu_usec, str); + printf("%-15s", str); + break; + default: + printf("%-15s", "n/a"); break; } - } void print_uid(type_t type, void *object) { - job_rec_t *job = (job_rec_t *)object; - step_rec_t *step = (step_rec_t *)object; + jobacct_job_rec_t *job = (jobacct_job_rec_t *)object; + jobcomp_job_rec_t *jobcomp = (jobcomp_job_rec_t *)object; + int32_t uid = -1; + struct passwd *pw = NULL; switch(type) { case HEADLINE: - printf("%5s", "Uid"); + printf("%-5s", "Uid"); break; case UNDERSCORE: - printf("%5s", "-----"); + printf("%-5s", "-----"); break; case JOB: - printf("%5d", job->header.uid); + if(job->user) { + if ((pw=getpwnam(job->user))) + uid = pw->pw_uid; + } else + uid = job->uid; + break; + case JOBCOMP: + printf("%-5u", jobcomp->uid); break; case JOBSTEP: - printf("%5d", step->header.uid); + printf("%-5s", " "); break; } + + if(uid != -1) + printf("%-5d", uid); } void print_user(type_t type, void *object) { - job_rec_t *job = (job_rec_t *)object; - step_rec_t *step = (step_rec_t *)object; + jobacct_job_rec_t *job = (jobacct_job_rec_t *)object; + jobcomp_job_rec_t *jobcomp = (jobcomp_job_rec_t *)object; int uid = -1; char *tmp="(unknown)"; - struct passwd *pw = NULL; - + struct passwd *pw = NULL; + switch(type) { case HEADLINE: printf("%-9s", "User"); @@ -1019,10 +894,19 @@ void print_user(type_t type, void *object) printf("%-9s", "---------"); break; case JOB: - uid = job->header.uid; + if(job->user) + printf("%-9s", job->user); + else + uid = job->uid; + break; + case JOBCOMP: + printf("%-9s", jobcomp->uid_name); break; case JOBSTEP: - uid = step->header.uid; + printf("%-9s", " "); + break; + default: + printf("%-9s", "n/a"); break; } if(uid != -1) { @@ -1034,26 +918,29 @@ void print_user(type_t type, void *object) void print_usercpu(type_t type, void *object) { - job_rec_t *job = (job_rec_t *)object; - step_rec_t *step = (step_rec_t *)object; + jobacct_job_rec_t *job = (jobacct_job_rec_t *)object; + jobacct_step_rec_t *step = (jobacct_step_rec_t *)object; char str[FORMAT_STRING_SIZE]; switch(type) { case HEADLINE: - printf("%15s", "UserCpu"); + printf("%-15s", "UserCpu"); break; case UNDERSCORE: - printf("%15s", "---------------"); + printf("%-15s", "---------------"); break; case JOB: - _elapsed_time(job->rusage.ru_utime.tv_sec, - job->rusage.ru_utime.tv_usec, str); - printf("%15s", str); + _elapsed_time(job->user_cpu_sec, + job->user_cpu_usec, str); + printf("%-15s", str); break; case JOBSTEP: - _elapsed_time(step->rusage.ru_utime.tv_sec, - step->rusage.ru_utime.tv_usec, str); - printf("%15s", str); + _elapsed_time(step->user_cpu_sec, + step->user_cpu_usec, str); + printf("%-15s", str); + break; + default: + printf("%-15s", "n/a"); break; } @@ -1061,22 +948,22 @@ void print_usercpu(type_t type, void *object) void print_vsize(type_t type, void *object) { - job_rec_t *job = (job_rec_t *)object; - step_rec_t *step = (step_rec_t *)object; + jobacct_job_rec_t *job = (jobacct_job_rec_t *)object; + jobacct_step_rec_t *step = (jobacct_step_rec_t *)object; char outbuf[FORMAT_STRING_SIZE]; char buf1[FORMAT_STRING_SIZE]; char buf2[FORMAT_STRING_SIZE]; - char buf3[50]; + char buf3[FORMAT_STRING_SIZE]; sacct_t sacct; char *nodes = NULL; uint32_t pos; switch(type) { case HEADLINE: - printf("%-50s", "MaxVSIZE/Node:Task - Ave"); + printf("%-34s", "MaxVSIZE/Node:Task - Ave"); break; case UNDERSCORE: - printf("%-50s", "----------------------------------"); + printf("%-34s", "----------------------------------"); break; case JOB: sacct = job->sacct; @@ -1096,7 +983,7 @@ void print_vsize(type_t type, void *object) sacct.max_vsize_id.taskid, buf2); } - printf("%-50s", outbuf); + printf("%-34s", outbuf); break; case JOBSTEP: sacct = step->sacct; @@ -1112,69 +999,32 @@ void print_vsize(type_t type, void *object) buf3, sacct.max_vsize_id.taskid, buf2); - printf("%-50s", outbuf); - break; - } -} - -void print_vsize_short(type_t type, void *object) -{ - job_rec_t *job = (job_rec_t *)object; - step_rec_t *step = (step_rec_t *)object; - char outbuf[FORMAT_STRING_SIZE]; - char buf1[FORMAT_STRING_SIZE]; - sacct_t sacct; - - switch(type) { - case HEADLINE: - printf("%10s", "MaxVSIZE"); - break; - case UNDERSCORE: - printf("%10s", "----------"); + printf("%-34s", outbuf); break; - case JOB: - sacct = job->sacct; - if(params.opt_raw) { - printf("%10d", sacct.max_vsize); - break; - } - convert_num_unit((float)sacct.max_vsize, - buf1, sizeof(buf1),UNIT_NONE); - snprintf(outbuf, FORMAT_STRING_SIZE, "%s", buf1); - printf("%10s", outbuf); - break; - case JOBSTEP: - sacct = step->sacct; - if(params.opt_raw) { - printf("%10d", sacct.max_vsize); - break; - } - convert_num_unit((float)sacct.max_vsize, - buf1, sizeof(buf1),UNIT_NONE); - snprintf(outbuf, FORMAT_STRING_SIZE, "%s", buf1); - printf("%10s", outbuf); + default: + printf("%-34s", "n/a"); break; } } void print_cputime(type_t type, void *object) { - job_rec_t *job = (job_rec_t *)object; - step_rec_t *step = (step_rec_t *)object; + jobacct_job_rec_t *job = (jobacct_job_rec_t *)object; + jobacct_step_rec_t *step = (jobacct_step_rec_t *)object; char outbuf[FORMAT_STRING_SIZE]; char buf1[FORMAT_STRING_SIZE]; char buf2[FORMAT_STRING_SIZE]; - char buf3[50]; + char buf3[FORMAT_STRING_SIZE]; sacct_t sacct; char *nodes = NULL; uint32_t pos; switch(type) { case HEADLINE: - printf("%-50s", "MinCPUtime/Node:Task - Ave"); + printf("%-37s", "MinCPUtime/Node:Task - Ave"); break; case UNDERSCORE: - printf("%-50s", "------------------------------------"); + printf("%-37s", "-------------------------------------"); break; case JOB: sacct = job->sacct; @@ -1194,7 +1044,7 @@ void print_cputime(type_t type, void *object) sacct.min_cpu_id.taskid, buf2); } - printf("%-50s", outbuf); + printf("%-37s", outbuf); break; case JOBSTEP: sacct = step->sacct; @@ -1209,15 +1059,17 @@ void print_cputime(type_t type, void *object) buf3, sacct.min_cpu_id.taskid, buf2); - printf("%-50s", outbuf); + printf("%-37s", outbuf); + break; + default: + printf("%-37s", "n/a"); break; } } void print_account(type_t type, void *object) { - job_rec_t *job = (job_rec_t *)object; - step_rec_t *step = (step_rec_t *)object; + jobacct_job_rec_t *job = (jobacct_job_rec_t *)object; switch(type) { case HEADLINE: @@ -1235,13 +1087,129 @@ void print_account(type_t type, void *object) printf("%-13.13s...", job->account); break; case JOBSTEP: - if(!step->account) - printf("%-16s", "unknown"); - else if(strlen(step->account)<17) - printf("%-16s", step->account); - else - printf("%-13.13s...", step->account); + printf("%-16s", " "); + default: + printf("%-16s", "n/a"); + break; + break; + } +} + + +#ifdef HAVE_BG +void print_connection(type_t type, void *object) +{ + jobcomp_job_rec_t *job = (jobcomp_job_rec_t *)object; + + switch(type) { + case HEADLINE: + printf("%-10s", "Connection"); + break; + case UNDERSCORE: + printf("%-10s", "----------"); + break; + case JOBCOMP: + printf("%-10s", job->connection); + break; + default: + printf("%-10s", "n/a"); + break; + } +} +void print_geo(type_t type, void *object) +{ + jobcomp_job_rec_t *job = (jobcomp_job_rec_t *)object; + + switch(type) { + case HEADLINE: + printf("%-8s", "Geometry"); + break; + case UNDERSCORE: + printf("%-8s", "--------"); + break; + case JOBCOMP: + printf("%-8s", job->geo); + break; + default: + printf("%-8s", "n/a"); + break; + } +} +void print_max_procs(type_t type, void *object) +{ + jobcomp_job_rec_t *job = (jobcomp_job_rec_t *)object; + + switch(type) { + case HEADLINE: + printf("%-9s", "Max Procs"); + break; + case UNDERSCORE: + printf("%-9s", "---------"); + break; + case JOBCOMP: + printf("%-9d", job->max_procs); + break; + default: + printf("%-9s", "n/a"); + break; + } +} +void print_reboot(type_t type, void *object) +{ + jobcomp_job_rec_t *job = (jobcomp_job_rec_t *)object; + + switch(type) { + case HEADLINE: + printf("%-6s", "Reboot"); + break; + case UNDERSCORE: + printf("%-6s", "------"); + break; + case JOBCOMP: + printf("%-6s", job->reboot); + break; + default: + printf("%-6s", "n/a"); + break; + } +} +void print_rotate(type_t type, void *object) +{ + jobcomp_job_rec_t *job = (jobcomp_job_rec_t *)object; + + switch(type) { + case HEADLINE: + printf("%-6s", "Rotate"); + break; + case UNDERSCORE: + printf("%-6s", "------"); + break; + case JOBCOMP: + printf("%-6s", job->rotate); + break; + default: + printf("%-6s", "n/a"); + break; + } +} +void print_bg_start_point(type_t type, void *object) +{ + jobcomp_job_rec_t *job = (jobcomp_job_rec_t *)object; + + switch(type) { + case HEADLINE: + printf("%-14s", "BG Start Point"); + break; + case UNDERSCORE: + printf("%-14s", "--------------"); + break; + case JOBCOMP: + printf("%-14s", job->bg_start_point); + break; + default: + printf("%-14s", "n/a"); break; } } +#endif diff --git a/src/sacct/process.c b/src/sacct/process.c index d09346eff..52808e35e 100644 --- a/src/sacct/process.c +++ b/src/sacct/process.c @@ -6,7 +6,7 @@ * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <da@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -39,503 +39,6 @@ #include "sacct.h" -job_rec_t *_find_job_record(acct_header_t header, int type); -int _remove_job_record(uint32_t jobnum); -step_rec_t *_find_step_record(job_rec_t *job, long jobstep); -job_rec_t *_init_job_rec(acct_header_t header); -step_rec_t *_init_step_rec(acct_header_t header); -int _parse_line(char *f[], void **data, int len); - -job_rec_t *_find_job_record(acct_header_t header, int type) -{ - job_rec_t *job = NULL; - ListIterator itr = list_iterator_create(jobs); - - while((job = (job_rec_t *)list_next(itr)) != NULL) { - if (job->header.jobnum == header.jobnum) { - if(job->header.job_submit == 0 && type == JOB_START) { - list_remove(itr); - destroy_job(job); - job = NULL; - break; - } - - if(job->header.job_submit == BATCH_JOB_TIMESTAMP) { - job->header.job_submit = header.job_submit; - break; - } - - if(job->header.job_submit == header.job_submit) - break; - else { - /* If we're looking for a later - * record with this job number, we - * know that this one is an older, - * duplicate record. - * We assume that the newer record - * will be created if it doesn't - * already exist. */ - job->jobnum_superseded = 1; - } - } - } - list_iterator_destroy(itr); - return job; -} - -int _remove_job_record(uint32_t jobnum) -{ - job_rec_t *job = NULL; - int rc = SLURM_ERROR; - ListIterator itr = list_iterator_create(jobs); - - while((job = (job_rec_t *)list_next(itr)) != NULL) { - if (job->header.jobnum == jobnum) { - list_remove(itr); - destroy_job(job); - rc = SLURM_SUCCESS; - } - } - list_iterator_destroy(itr); - return rc; -} - -step_rec_t *_find_step_record(job_rec_t *job, long stepnum) -{ - step_rec_t *step = NULL; - ListIterator itr = NULL; - - if(!list_count(job->steps)) - return step; - - itr = list_iterator_create(job->steps); - while((step = (step_rec_t *)list_next(itr)) != NULL) { - if (step->stepnum == stepnum) - break; - } - list_iterator_destroy(itr); - return step; -} - -job_rec_t *_init_job_rec(acct_header_t header) -{ - job_rec_t *job = xmalloc(sizeof(job_rec_t)); - memcpy(&job->header, &header, sizeof(acct_header_t)); - memset(&job->rusage, 0, sizeof(struct rusage)); - memset(&job->sacct, 0, sizeof(sacct_t)); - job->sacct.min_cpu = (float)NO_VAL; - job->job_start_seen = 0; - job->job_step_seen = 0; - job->job_terminated_seen = 0; - job->jobnum_superseded = 0; - job->jobname = xstrdup("(unknown)"); - job->status = JOB_PENDING; - job->nodes = NULL; - job->jobname = NULL; - job->exitcode = 0; - job->priority = 0; - job->ntasks = 0; - job->ncpus = 0; - job->elapsed = 0; - job->tot_cpu_sec = 0; - job->tot_cpu_usec = 0; - job->steps = list_create(destroy_step); - job->nodes = NULL; - job->track_steps = 0; - job->account = NULL; - job->requid = -1; - - return job; -} - -step_rec_t *_init_step_rec(acct_header_t header) -{ - step_rec_t *step = xmalloc(sizeof(job_rec_t)); - memcpy(&step->header, &header, sizeof(acct_header_t)); - memset(&step->rusage, 0, sizeof(struct rusage)); - memset(&step->sacct, 0, sizeof(sacct_t)); - step->stepnum = (uint32_t)NO_VAL; - step->nodes = NULL; - step->stepname = NULL; - step->status = NO_VAL; - step->exitcode = NO_VAL; - step->ntasks = (uint32_t)NO_VAL; - step->ncpus = (uint32_t)NO_VAL; - step->elapsed = (uint32_t)NO_VAL; - step->tot_cpu_sec = (uint32_t)NO_VAL; - step->tot_cpu_usec = (uint32_t)NO_VAL; - step->account = NULL; - step->requid = -1; - - return step; -} - -int _parse_header(char *f[], acct_header_t *header) -{ - header->jobnum = atoi(f[F_JOB]); - header->partition = xstrdup(f[F_PARTITION]); - header->job_submit = atoi(f[F_JOB_SUBMIT]); - header->timestamp = atoi(f[F_TIMESTAMP]); - header->uid = atoi(f[F_UID]); - header->gid = atoi(f[F_GID]); - header->blockid = xstrdup(f[F_BLOCKID]); - return SLURM_SUCCESS; -} - -int _parse_line(char *f[], void **data, int len) -{ - int i = atoi(f[F_RECTYPE]); - job_rec_t **job = (job_rec_t **)data; - step_rec_t **step = (step_rec_t **)data; - acct_header_t header; - _parse_header(f, &header); - - switch(i) { - case JOB_START: - *job = _init_job_rec(header); - (*job)->jobname = xstrdup(f[F_JOBNAME]); - (*job)->track_steps = atoi(f[F_TRACK_STEPS]); - (*job)->priority = atoi(f[F_PRIORITY]); - (*job)->ncpus = atoi(f[F_NCPUS]); - (*job)->nodes = xstrdup(f[F_NODES]); - for (i=0; (*job)->nodes[i]; i++) { /* discard trailing <CR> */ - if (isspace((*job)->nodes[i])) - (*job)->nodes[i] = '\0'; - } - if (!strcmp((*job)->nodes, "(null)")) { - xfree((*job)->nodes); - (*job)->nodes = xstrdup("(unknown)"); - } - if (len > F_JOB_ACCOUNT) { - (*job)->account = xstrdup(f[F_JOB_ACCOUNT]); - for (i=0; (*job)->account[i]; i++) { - /* discard trailing <CR> */ - if (isspace((*job)->account[i])) - (*job)->account[i] = '\0'; - } - } - break; - case JOB_STEP: - *step = _init_step_rec(header); - (*step)->stepnum = atoi(f[F_JOBSTEP]); - (*step)->status = atoi(f[F_STATUS]); - (*step)->exitcode = atoi(f[F_EXITCODE]); - (*step)->ntasks = atoi(f[F_NTASKS]); - (*step)->ncpus = atoi(f[F_STEPNCPUS]); - (*step)->elapsed = atoi(f[F_ELAPSED]); - (*step)->tot_cpu_sec = atoi(f[F_CPU_SEC]); - (*step)->tot_cpu_usec = atoi(f[F_CPU_USEC]); - (*step)->rusage.ru_utime.tv_sec = atoi(f[F_USER_SEC]); - (*step)->rusage.ru_utime.tv_usec = atoi(f[F_USER_USEC]); - (*step)->rusage.ru_stime.tv_sec = atoi(f[F_SYS_SEC]); - (*step)->rusage.ru_stime.tv_usec = atoi(f[F_SYS_USEC]); - (*step)->rusage.ru_maxrss = atoi(f[F_RSS]); - (*step)->rusage.ru_ixrss = atoi(f[F_IXRSS]); - (*step)->rusage.ru_idrss = atoi(f[F_IDRSS]); - (*step)->rusage.ru_isrss = atoi(f[F_ISRSS]); - (*step)->rusage.ru_minflt = atoi(f[F_MINFLT]); - (*step)->rusage.ru_majflt = atoi(f[F_MAJFLT]); - (*step)->rusage.ru_nswap = atoi(f[F_NSWAP]); - (*step)->rusage.ru_inblock = atoi(f[F_INBLOCKS]); - (*step)->rusage.ru_oublock = atoi(f[F_OUBLOCKS]); - (*step)->rusage.ru_msgsnd = atoi(f[F_MSGSND]); - (*step)->rusage.ru_msgrcv = atoi(f[F_MSGRCV]); - (*step)->rusage.ru_nsignals = atoi(f[F_NSIGNALS]); - (*step)->rusage.ru_nvcsw = atoi(f[F_NVCSW]); - (*step)->rusage.ru_nivcsw = atoi(f[F_NIVCSW]); - (*step)->sacct.max_vsize = atoi(f[F_MAX_VSIZE]) * 1024; - if(len > F_STEPNODES) { - (*step)->sacct.max_vsize_id.taskid = - atoi(f[F_MAX_VSIZE_TASK]); - (*step)->sacct.ave_vsize = atof(f[F_AVE_VSIZE]) * 1024; - (*step)->sacct.max_rss = atoi(f[F_MAX_RSS]) * 1024; - (*step)->sacct.max_rss_id.taskid = - atoi(f[F_MAX_RSS_TASK]); - (*step)->sacct.ave_rss = atof(f[F_AVE_RSS]) * 1024; - (*step)->sacct.max_pages = atoi(f[F_MAX_PAGES]); - (*step)->sacct.max_pages_id.taskid = - atoi(f[F_MAX_PAGES_TASK]); - (*step)->sacct.ave_pages = atof(f[F_AVE_PAGES]); - (*step)->sacct.min_cpu = atof(f[F_MIN_CPU]); - (*step)->sacct.min_cpu_id.taskid = - atoi(f[F_MIN_CPU_TASK]); - (*step)->sacct.ave_cpu = atof(f[F_AVE_CPU]); - (*step)->stepname = xstrdup(f[F_STEPNAME]); - (*step)->nodes = xstrdup(f[F_STEPNODES]); - } else { - (*step)->sacct.max_vsize_id.taskid = (uint16_t)NO_VAL; - (*step)->sacct.ave_vsize = (float)NO_VAL; - (*step)->sacct.max_rss = (uint32_t)NO_VAL; - (*step)->sacct.max_rss_id.taskid = (uint16_t)NO_VAL; - (*step)->sacct.ave_rss = (float)NO_VAL; - (*step)->sacct.max_pages = (uint32_t)NO_VAL; - (*step)->sacct.max_pages_id.taskid = (uint16_t)NO_VAL; - (*step)->sacct.ave_pages = (float)NO_VAL; - (*step)->sacct.min_cpu = (uint32_t)NO_VAL; - (*step)->sacct.min_cpu_id.taskid = (uint16_t)NO_VAL; - (*step)->sacct.ave_cpu = (float)NO_VAL; - (*step)->stepname = NULL; - (*step)->nodes = NULL; - } - if(len > F_MIN_CPU_NODE) { - (*step)->sacct.max_vsize_id.nodeid = - atoi(f[F_MAX_VSIZE_NODE]); - (*step)->sacct.max_rss_id.nodeid = - atoi(f[F_MAX_RSS_NODE]); - (*step)->sacct.max_pages_id.nodeid = - atoi(f[F_MAX_PAGES_NODE]); - (*step)->sacct.min_cpu_id.nodeid = - atoi(f[F_MIN_CPU_NODE]); - } else { - (*step)->sacct.max_vsize_id.nodeid = - (uint32_t)NO_VAL; - (*step)->sacct.max_rss_id.nodeid = - (uint32_t)NO_VAL; - (*step)->sacct.max_pages_id.nodeid = - (uint32_t)NO_VAL; - (*step)->sacct.min_cpu_id.nodeid = - (uint32_t)NO_VAL; - } - if(len > F_STEP_ACCOUNT) - (*step)->account = xstrdup(f[F_STEP_ACCOUNT]); - if(len > F_STEP_REQUID) - (*step)->requid = atoi(f[F_STEP_REQUID]); - break; - case JOB_SUSPEND: - case JOB_TERMINATED: - *job = _init_job_rec(header); - (*job)->elapsed = atoi(f[F_TOT_ELAPSED]); - (*job)->status = atoi(f[F_STATUS]); - if(len > F_JOB_REQUID) - (*job)->requid = atoi(f[F_JOB_REQUID]); - break; - default: - printf("UNKOWN TYPE %d",i); - break; - } - return SLURM_SUCCESS; -} - -void process_start(char *f[], int lc, int show_full, int len) -{ - job_rec_t *job = NULL; - job_rec_t *temp = NULL; - - _parse_line(f, (void **)&temp, len); - job = _find_job_record(temp->header, JOB_START); - if (job) { /* Hmmm... that's odd */ - printf("job->header.job_submit = %d", (int)job->header.job_submit); - if(job->header.job_submit == 0) - _remove_job_record(job->header.jobnum); - else { - fprintf(stderr, - "Conflicting JOB_START for job %u at" - " line %d -- ignoring it\n", - job->header.jobnum, lc); - input_error++; - destroy_job(temp); - return; - } - } - - job = temp; - job->show_full = show_full; - list_append(jobs, job); - job->job_start_seen = 1; - -} - -void process_step(char *f[], int lc, int show_full, int len) -{ - job_rec_t *job = NULL; - - step_rec_t *step = NULL; - step_rec_t *temp = NULL; - - _parse_line(f, (void **)&temp, len); - - job = _find_job_record(temp->header, JOB_STEP); - - if (temp->stepnum == -2) { - destroy_step(temp); - return; - } - if (!job) { /* fake it for now */ - job = _init_job_rec(temp->header); - if (params.opt_verbose > 1) - fprintf(stderr, - "Note: JOB_STEP record %u.%u preceded " - "JOB_START record at line %d\n", - temp->header.jobnum, temp->stepnum, lc); - } - job->show_full = show_full; - - if ((step = _find_step_record(job, temp->stepnum))) { - - if (temp->status == JOB_RUNNING) { - destroy_step(temp); - return;/* if "R" record preceded by F or CD; unusual */ - } - if (step->status != JOB_RUNNING) { /* if not JOB_RUNNING */ - fprintf(stderr, - "Conflicting JOB_STEP record for " - "jobstep %u.%u at line %d " - "-- ignoring it\n", - step->header.jobnum, - step->stepnum, lc); - input_error++; - - destroy_step(temp); - return; - } - step->status = temp->status; - step->exitcode = temp->exitcode; - step->ntasks = temp->ntasks; - step->ncpus = temp->ncpus; - step->elapsed = temp->elapsed; - step->tot_cpu_sec = temp->tot_cpu_sec; - step->tot_cpu_usec = temp->tot_cpu_usec; - job->requid = temp->requid; - step->requid = temp->requid; - memcpy(&step->rusage, &temp->rusage, sizeof(struct rusage)); - memcpy(&step->sacct, &temp->sacct, sizeof(sacct_t)); - xfree(step->stepname); - step->stepname = xstrdup(temp->stepname); - step->end = temp->header.timestamp; - destroy_step(temp); - goto got_step; - } - step = temp; - temp = NULL; - list_append(job->steps, step); - if(job->header.timestamp == 0) - job->header.timestamp = step->header.timestamp; - job->job_step_seen = 1; - job->ntasks += step->ntasks; - if(!job->nodes || !strcmp(job->nodes, "(unknown)")) { - xfree(job->nodes); - job->nodes = xstrdup(step->nodes); - } - -got_step: - - if ( job->exitcode == 0 ) - job->exitcode = step->exitcode; - - if (job->job_terminated_seen == 0) { /* If the job is still running, - this is the most recent - status */ - job->status = JOB_RUNNING; - job->elapsed = step->header.timestamp - job->header.timestamp; - } - /* now aggregate the aggregatable */ - job->ncpus = MAX(job->ncpus, step->ncpus); - if(step->status < JOB_COMPLETE) - return; - job->tot_cpu_sec += step->tot_cpu_sec; - job->tot_cpu_usec += step->tot_cpu_usec; - job->rusage.ru_utime.tv_sec += step->rusage.ru_utime.tv_sec; - job->rusage.ru_utime.tv_usec += step->rusage.ru_utime.tv_usec; - job->rusage.ru_stime.tv_sec += step->rusage.ru_stime.tv_sec; - job->rusage.ru_stime.tv_usec += step->rusage.ru_stime.tv_usec; - job->rusage.ru_inblock += step->rusage.ru_inblock; - job->rusage.ru_oublock += step->rusage.ru_oublock; - job->rusage.ru_msgsnd += step->rusage.ru_msgsnd; - job->rusage.ru_msgrcv += step->rusage.ru_msgrcv; - job->rusage.ru_nsignals += step->rusage.ru_nsignals; - job->rusage.ru_nvcsw += step->rusage.ru_nvcsw; - job->rusage.ru_nivcsw += step->rusage.ru_nivcsw; - - /* and finally the maximums for any process */ - job->rusage.ru_maxrss = MAX(job->rusage.ru_maxrss, - step->rusage.ru_maxrss); - job->rusage.ru_ixrss = MAX(job->rusage.ru_ixrss, - step->rusage.ru_ixrss); - job->rusage.ru_idrss = MAX(job->rusage.ru_idrss, - step->rusage.ru_idrss); - job->rusage.ru_isrss = MAX(job->rusage.ru_isrss, - step->rusage.ru_isrss); - job->rusage.ru_minflt = MAX(job->rusage.ru_minflt, - step->rusage.ru_minflt); - job->rusage.ru_majflt = MAX(job->rusage.ru_majflt, - step->rusage.ru_majflt); - job->rusage.ru_nswap = MAX(job->rusage.ru_nswap, - step->rusage.ru_nswap); - - /* get the max for all the sacct_t struct */ - aggregate_sacct(&job->sacct, &step->sacct); -} - -void process_suspend(char *f[], int lc, int show_full, int len) -{ - job_rec_t *job = NULL; - job_rec_t *temp = NULL; - - _parse_line(f, (void **)&temp, len); - job = _find_job_record(temp->header, JOB_SUSPEND); - if (!job) - job = _init_job_rec(temp->header); - - job->show_full = show_full; - if (job->status == JOB_SUSPENDED) - job->elapsed -= temp->elapsed; - - //job->header.timestamp = temp->header.timestamp; - job->status = temp->status; - destroy_job(temp); -} - -void process_terminated(char *f[], int lc, int show_full, int len) -{ - job_rec_t *job = NULL; - job_rec_t *temp = NULL; - - _parse_line(f, (void **)&temp, len); - job = _find_job_record(temp->header, JOB_TERMINATED); - if (!job) { /* fake it for now */ - job = _init_job_rec(temp->header); - if (params.opt_verbose > 1) - fprintf(stderr, "Note: JOB_TERMINATED record for job " - "%u preceded " - "other job records at line %d\n", - temp->header.jobnum, lc); - } else if (job->job_terminated_seen) { - if (temp->status == JOB_NODE_FAIL) { - /* multiple node failures - extra TERMINATED records */ - if (params.opt_verbose > 1) - fprintf(stderr, - "Note: Duplicate JOB_TERMINATED " - "record (nf) for job %u at " - "line %d\n", - temp->header.jobnum, lc); - /* JOB_TERMINATED/NF records may be preceded - * by a JOB_TERMINATED/CA record; NF is much - * more interesting. - */ - job->status = temp->status; - goto finished; - } - - fprintf(stderr, - "Conflicting JOB_TERMINATED record (%s) for " - "job %u at line %d -- ignoring it\n", - decode_status_int(temp->status), - job->header.jobnum, lc); - input_error++; - goto finished; - } - job->job_terminated_seen = 1; - job->elapsed = temp->elapsed; - job->end = temp->header.timestamp; - job->status = temp->status; - job->requid = temp->requid; - if(list_count(job->steps) > 1) - job->track_steps = 1; - job->show_full = show_full; - -finished: - destroy_job(temp); -} void find_hostname(uint32_t pos, char *hosts, char *host) { @@ -554,6 +57,7 @@ void find_hostname(uint32_t pos, char *hosts, char *host) } else { snprintf(host, 50, "'N/A'"); } + hostlist_destroy(hostlist); return; } @@ -584,35 +88,3 @@ void aggregate_sacct(sacct_t *dest, sacct_t *from) } dest->ave_cpu += from->ave_cpu; } - -void destroy_acct_header(void *object) -{ - acct_header_t *header = (acct_header_t *)object; - if(header) { - xfree(header->partition); - xfree(header->blockid); - } -} -void destroy_job(void *object) -{ - job_rec_t *job = (job_rec_t *)object; - if (job) { - if(job->steps) - list_destroy(job->steps); - destroy_acct_header(&job->header); - xfree(job->jobname); - xfree(job->nodes); - xfree(job); - } -} - -void destroy_step(void *object) -{ - step_rec_t *step = (step_rec_t *)object; - if (step) { - destroy_acct_header(&step->header); - xfree(step->stepname); - xfree(step->nodes); - xfree(step); - } -} diff --git a/src/sacct/sacct.c b/src/sacct/sacct.c index 680028464..8206aaf84 100644 --- a/src/sacct/sacct.c +++ b/src/sacct/sacct.c @@ -145,7 +145,6 @@ #include "sacct.h" - void invalidSwitchCombo(char *good, char *bad); void _print_header(void); @@ -154,7 +153,6 @@ void _print_header(void); */ sacct_parameters_t params; fields_t fields[] = {{"account", print_account}, - {"blockid", print_blockid}, {"cpu", print_cpu}, {"cputime", print_cputime}, {"elapsed", print_elapsed}, @@ -163,43 +161,39 @@ fields_t fields[] = {{"account", print_account}, {"finished", print_end}, /* Defunct name */ {"gid", print_gid}, {"group", print_group}, - {"idrss", print_idrss}, - {"inblocks", print_inblocks}, - {"isrss", print_isrss}, - {"ixrss", print_ixrss}, {"job", print_job}, {"jobid", print_jobid}, {"jobname", print_name}, - {"majflt", print_majflt}, - {"minflt", print_minflt}, - {"msgrcv", print_msgrcv}, - {"msgsnd", print_msgsnd}, {"ncpus", print_ncpus}, - {"nivcsw", print_nivcsw}, {"nodes", print_nodes}, + {"nnodes", print_nnodes}, {"nprocs", print_ntasks}, - {"nsignals", print_nsignals}, - {"nswap", print_nswap}, {"ntasks", print_ntasks}, - {"nvcsw", print_nvcsw}, - {"outblocks", print_outblocks}, {"pages", print_pages}, {"partition", print_partition}, {"rss", print_rss}, {"start", print_start}, - {"status", print_status}, + {"state", print_state}, + {"status", print_state}, {"submit", print_submit}, + {"timelimit", print_timelimit}, {"submitted", print_submit}, /* Defunct name */ {"systemcpu", print_systemcpu}, {"uid", print_uid}, {"user", print_user}, {"usercpu", print_usercpu}, {"vsize", print_vsize}, - {"vsize_short", print_vsize_short}, +#ifdef HAVE_BG + {"blockid", print_blockid}, + {"connection", print_connection}, + {"geo", print_geo}, + {"max_procs", print_max_procs}, + {"reboot", print_reboot}, + {"rotate", print_rotate}, + {"bg_start_point", print_bg_start_point}, +#endif {NULL, NULL}}; -long input_error = 0; /* Muddle through bad data, but complain! */ - List jobs = NULL; int printfields[MAX_PRINTFIELDS], /* Indexed into fields[] */ @@ -208,13 +202,13 @@ int printfields[MAX_PRINTFIELDS], /* Indexed into fields[] */ int main(int argc, char **argv) { enum { - DUMP, - EXPIRE, - FDUMP, - LIST, - STAT, - HELP, - USAGE + SACCT_DUMP, + SACCT_EXPIRE, + SACCT_FDUMP, + SACCT_LIST, + SACCT_STAT, + SACCT_HELP, + SACCT_USAGE } op; int rc = 0; @@ -226,9 +220,9 @@ int main(int argc, char **argv) */ if (params.opt_help) - op = HELP; + op = SACCT_HELP; else if (params.opt_dump) { - op = DUMP; + op = SACCT_DUMP; if (params.opt_long || params.opt_total || params.opt_field_list || params.opt_expire) { if (params.opt_verbose) @@ -247,11 +241,11 @@ int main(int argc, char **argv) goto finished; } } else if (params.opt_fdump) { - op = FDUMP; + op = SACCT_FDUMP; } else if (params.opt_stat) { - op = STAT; + op = SACCT_STAT; } else if (params.opt_expire) { - op = EXPIRE; + op = SACCT_EXPIRE; if (params.opt_long || params.opt_total || params.opt_field_list || (params.opt_gid>=0) || (params.opt_uid>=0) || @@ -281,32 +275,46 @@ int main(int argc, char **argv) goto finished; } } else - op = LIST; + op = SACCT_LIST; switch (op) { - case DUMP: - get_data(); - do_dump(); + case SACCT_DUMP: + if(get_data() == SLURM_ERROR) + exit(errno); + if(params.opt_completion) + do_dump_completion(); + else + do_dump(); break; - case EXPIRE: - do_expire(); + case SACCT_EXPIRE: + do_expire(0); break; - case FDUMP: - get_data(); + case SACCT_FDUMP: + if(get_data() == SLURM_ERROR) + exit(errno); break; - case LIST: + case SACCT_LIST: if (params.opt_header) /* give them something to look */ _print_header();/* at while we think... */ - get_data(); - do_list(); + if(get_data() == SLURM_ERROR) + exit(errno); + if(params.opt_completion) + do_list_completion(); + else + do_list(); break; - case STAT: + case SACCT_STAT: + fprintf(stderr, + "This functionality has been replaced with 'sstat' " + "in the future please make note this will " + "not be supported."); + if (params.opt_header) /* give them something to look */ _print_header();/* at while we think... */ do_stat(); break; - case HELP: + case SACCT_HELP: do_help(); break; default: diff --git a/src/sacct/sacct.h b/src/sacct/sacct.h index aa36d0b8d..c340bc9ee 100644 --- a/src/sacct/sacct.h +++ b/src/sacct/sacct.h @@ -6,7 +6,7 @@ * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <da@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -57,245 +57,60 @@ #include "src/common/xstring.h" #include "src/common/list.h" #include "src/common/hostlist.h" - -#include "src/sacct/sacct_stat.h" +#include "src/common/slurm_jobacct_gather.h" +#include "src/common/slurm_accounting_storage.h" +#include "src/common/slurm_jobcomp.h" #define ERROR 2 -/* slurmd uses "(uint32_t) -2" to track data for batch allocations - * which have no logical jobsteps. */ -#define BATCH_JOB_TIMESTAMP 0 +#define BRIEF_FIELDS "jobid,state,exitcode" +#define BRIEF_COMP_FIELDS "jobid,uid,state" +#define DEFAULT_FIELDS "jobid,jobname,partition,ncpus,state,exitcode" +#define DEFAULT_COMP_FIELDS "jobid,uid,jobname,partition,nnodes,nodes,state,end" +#define STAT_FIELDS "jobid,vsize,rss,pages,cputime,ntasks,state" +#define LONG_FIELDS "jobid,jobname,partition,vsize,rss,pages,cputime,ntasks,ncpus,elapsed,state,exitcode" -#define ACCOUNT_FIELDS "jobid,jobname,start,end,cpu,vsize_short,status,exitcode" -#define BRIEF_FIELDS "jobid,status,exitcode" -#define DEFAULT_FIELDS "jobid,jobname,partition,ncpus,status,exitcode" -#define STAT_FIELDS "jobid,vsize,rss,pages,cputime,ntasks,status" -#define LONG_FIELDS "jobid,jobname,partition,vsize,rss,pages,cputime,ntasks,ncpus,elapsed,status,exitcode" +#ifdef HAVE_BG +#define LONG_COMP_FIELDS "jobid,uid,jobname,partition,blockid,nnodes,nodes,state,start,end,timelimit,connection,reboot,rotate,max_procs,geo,bg_start_point" +#else +#define LONG_COMP_FIELDS "jobid,uid,jobname,partition,nnodes,nodes,state,start,end,timelimit" +#endif #define BUFFER_SIZE 4096 -#define STATUS_COUNT 10 +#define STATE_COUNT 10 #define MAX_PRINTFIELDS 100 -#define EXPIRE_READ_LENGTH 10 -#define MAX_RECORD_FIELDS 100 #define SECONDS_IN_MINUTE 60 #define SECONDS_IN_HOUR (60*SECONDS_IN_MINUTE) #define SECONDS_IN_DAY (24*SECONDS_IN_HOUR) -#define TIMESTAMP_LENGTH 15 - -/* Map field names to positions */ - -/* Fields common to all records */ -enum { F_JOB = 0, - F_PARTITION, - F_JOB_SUBMIT, - F_TIMESTAMP, - F_UID, - F_GID, - F_BLOCKID, - F_RESERVED2, - F_RECTYPE, - HEADER_LENGTH -}; - -/* JOB_START fields */ -enum { F_JOBNAME = HEADER_LENGTH, - F_TRACK_STEPS, - F_PRIORITY, - F_NCPUS, - F_NODES, - F_JOB_ACCOUNT, - JOB_START_LENGTH -}; - -/* JOB_STEP fields */ -enum { F_JOBSTEP = HEADER_LENGTH, - F_STATUS, - F_EXITCODE, - F_NTASKS, - F_STEPNCPUS, - F_ELAPSED, - F_CPU_SEC, - F_CPU_USEC, - F_USER_SEC, - F_USER_USEC, - F_SYS_SEC, - F_SYS_USEC, - F_RSS, - F_IXRSS, - F_IDRSS, - F_ISRSS, - F_MINFLT, - F_MAJFLT, - F_NSWAP, - F_INBLOCKS, - F_OUBLOCKS, - F_MSGSND, - F_MSGRCV, - F_NSIGNALS, - F_NVCSW, - F_NIVCSW, - F_MAX_VSIZE, - F_MAX_VSIZE_TASK, - F_AVE_VSIZE, - F_MAX_RSS, - F_MAX_RSS_TASK, - F_AVE_RSS, - F_MAX_PAGES, - F_MAX_PAGES_TASK, - F_AVE_PAGES, - F_MIN_CPU, - F_MIN_CPU_TASK, - F_AVE_CPU, - F_STEPNAME, - F_STEPNODES, - F_MAX_VSIZE_NODE, - F_MAX_RSS_NODE, - F_MAX_PAGES_NODE, - F_MIN_CPU_NODE, - F_STEP_ACCOUNT, - F_STEP_REQUID, - JOB_STEP_LENGTH -}; - -/* JOB_TERM / JOB_SUSPEND fields */ -enum { F_TOT_ELAPSED = HEADER_LENGTH, - F_TERM_STATUS, - F_JOB_REQUID, - JOB_TERM_LENGTH -}; - /* On output, use fields 12-37 from JOB_STEP */ typedef enum { HEADLINE, UNDERSCORE, JOB, - JOBSTEP + JOBSTEP, + JOBCOMP } type_t; -enum { CANCELLED, - COMPLETED, - COMPLETING, - FAILED, - NODEFAILED, - PENDING, - RUNNING, - TIMEDOUT -}; - -typedef struct header { - uint32_t jobnum; - char *partition; - char *blockid; - time_t job_submit; - time_t timestamp; - uint32_t uid; - uint32_t gid; - uint16_t rec_type; -} acct_header_t; - -typedef struct job_rec { - uint32_t job_start_seen, /* useful flags */ - job_step_seen, - job_terminated_seen, - jobnum_superseded; /* older jobnum was reused */ - acct_header_t header; - uint16_t show_full; - char *nodes; - char *jobname; - uint16_t track_steps; - int32_t priority; - uint32_t ncpus; - uint32_t ntasks; - int32_t status; - int32_t exitcode; - uint32_t elapsed; - time_t end; - uint32_t tot_cpu_sec; - uint32_t tot_cpu_usec; - struct rusage rusage; - sacct_t sacct; - List steps; - char *account; - uint32_t requid; -} job_rec_t; - -typedef struct step_rec { - acct_header_t header; - uint32_t stepnum; /* job's step number */ - char *nodes; - char *stepname; - int32_t status; - int32_t exitcode; - uint32_t ntasks; - uint32_t ncpus; - uint32_t elapsed; - time_t end; - uint32_t tot_cpu_sec; - uint32_t tot_cpu_usec; - struct rusage rusage; - sacct_t sacct; - char *account; - uint32_t requid; -} step_rec_t; - -typedef struct selected_step_t { - char *job; - char *step; -} selected_step_t; typedef struct fields { char *name; /* Specified in --fields= */ void (*print_routine) (); /* Who gets to print it? */ } fields_t; -/* Input parameters */ -typedef struct sacct_parameters { - int opt_dump; /* --dump */ - int opt_dup; /* --duplicates; +1 = explicitly set */ - int opt_fdump; /* --formattted_dump */ - int opt_stat; /* --stat */ - int opt_gid; /* --gid (-1=wildcard, 0=root) */ - int opt_header; /* can only be cleared */ - int opt_help; /* --help */ - int opt_long; /* --long */ - int opt_lowmem; /* --low_memory */ - int opt_purge; /* --purge */ - int opt_raw; /* --raw */ - int opt_total; /* --total */ - int opt_uid; /* --uid (-1=wildcard, 0=root) */ - int opt_verbose; /* --verbose */ - long opt_expire; /* --expire= */ - char *opt_expire_timespec; /* --expire= */ - char *opt_field_list; /* --fields= */ - char *opt_filein; /* --file */ - char *opt_job_list; /* --jobs */ - char *opt_partition_list;/* --partitions */ - char *opt_state_list; /* --states */ -} sacct_parameters_t; - extern fields_t fields[]; extern sacct_parameters_t params; -extern long input_error; /* Muddle through bad data, but complain! */ - extern List jobs; extern int printfields[MAX_PRINTFIELDS], /* Indexed into fields[] */ nprintfields; /* process.c */ -void process_start(char *f[], int lc, int show_full, int len); -void process_step(char *f[], int lc, int show_full, int len); -void process_suspend(char *f[], int lc, int show_full, int len); -void process_terminated(char *f[], int lc, int show_full, int len); void find_hostname(uint32_t pos, char *hosts, char *host); void aggregate_sacct(sacct_t *dest, sacct_t *from); -void destroy_acct_header(void *object); -void destroy_job(void *object); -void destroy_step(void *object); /* print.c */ void print_fields(type_t type, void *object); @@ -304,54 +119,55 @@ void print_elapsed(type_t type, void *object); void print_exitcode(type_t type, void *object); void print_gid(type_t type, void *object); void print_group(type_t type, void *object); -void print_idrss(type_t type, void *object); -void print_inblocks(type_t type, void *object); -void print_isrss(type_t type, void *object); -void print_ixrss(type_t type, void *object); void print_job(type_t type, void *object); void print_name(type_t type, void *object); void print_jobid(type_t type, void *object); -void print_majflt(type_t type, void *object); -void print_minflt(type_t type, void *object); -void print_msgrcv(type_t type, void *object); -void print_msgsnd(type_t type, void *object); void print_ncpus(type_t type, void *object); -void print_nivcsw(type_t type, void *object); void print_nodes(type_t type, void *object); -void print_nsignals(type_t type, void *object); -void print_nswap(type_t type, void *object); +void print_nnodes(type_t type, void *object); void print_ntasks(type_t type, void *object); -void print_nvcsw(type_t type, void *object); -void print_outblocks(type_t type, void *object); void print_partition(type_t type, void *object); void print_blockid(type_t type, void *object); void print_pages(type_t type, void *object); void print_rss(type_t type, void *object); -void print_status(type_t type, void *object); +void print_state(type_t type, void *object); void print_submit(type_t type, void *object); void print_start(type_t type, void *object); void print_end(type_t type, void *object); void print_systemcpu(type_t type, void *object); +void print_timelimit(type_t type, void *object); void print_uid(type_t type, void *object); void print_user(type_t type, void *object); void print_usercpu(type_t type, void *object); void print_vsize(type_t type, void *object); -void print_vsize_short(type_t type, void *object); void print_cputime(type_t type, void *object); void print_account(type_t type, void *object); +#ifdef HAVE_BG +void print_connection(type_t type, void *object); +void print_geo(type_t type, void *object); +void print_max_procs(type_t type, void *object); +void print_reboot(type_t type, void *object); +void print_rotate(type_t type, void *object); +void print_bg_start_point(type_t type, void *object); +#endif + /* options.c */ -int decode_status_char(char *status); -char *decode_status_int(int status); +int decode_state_char(char *state); +char *decode_state_int(int state); int get_data(void); void parse_command_line(int argc, char **argv); void do_dump(void); -void do_expire(void); -void do_fdump(char* fields[], int lc); +void do_dump_completion(void); +void do_expire(int dummy); void do_help(void); void do_list(void); +void do_list_completion(void); void do_stat(void); void sacct_init(); void sacct_fini(); +/* sacct_stat.c */ +extern int sacct_stat(uint32_t jobid, uint32_t stepid); + #endif /* !_SACCT_H */ diff --git a/src/sacct/sacct_stat.c b/src/sacct/sacct_stat.c index 5c4b29812..427fd1e82 100644 --- a/src/sacct/sacct_stat.c +++ b/src/sacct/sacct_stat.c @@ -6,7 +6,7 @@ * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <da@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -17,7 +17,7 @@ * any later version. * * In addition, as a special exception, the copyright holders give permission - * to link the code of portions of this program with the OpenSSL library under + * to link the code of portions of this program with the OpenSSL library under * certain conditions as described in each individual source file, and * distribute linked combinations including the two. You must obey the GNU * General Public License in all respects for all of the code used other than @@ -39,11 +39,10 @@ #include "sacct.h" #include <pthread.h> -#include "src/common/slurm_jobacct.h" #include "src/common/forward.h" #include "src/common/slurm_auth.h" -step_rec_t step; +jobacct_step_rec_t step; int thr_finished = 0; @@ -65,7 +64,7 @@ int _sacct_query(slurm_step_layout_t *step_layout, uint32_t job_id, ret_data_info_t *ret_data_info = NULL; int rc = SLURM_SUCCESS; int ntasks = 0; - + int tot_tasks = 0; debug("getting the stat of job %d on %d nodes", job_id, step_layout->node_cnt); @@ -74,20 +73,15 @@ int _sacct_query(slurm_step_layout_t *step_layout, uint32_t job_id, memset(&step.sacct, 0, sizeof(sacct_t)); step.sacct.min_cpu = (float)NO_VAL; - step.header.jobnum = job_id; - step.header.partition = NULL; - step.header.blockid = NULL; - step.stepnum = step_id; + step.stepid = step_id; step.nodes = step_layout->node_list; step.stepname = NULL; - step.status = JOB_RUNNING; - step.ntasks = 0; + step.state = JOB_RUNNING; slurm_msg_t_init(&msg); - /* Common message contents */ r.job_id = job_id; r.step_id = step_id; - r.jobacct = jobacct_g_alloc(NULL); + r.jobacct = jobacct_gather_g_create(NULL); msg.msg_type = MESSAGE_STAT_JOBACCT; msg.data = &r; @@ -107,7 +101,7 @@ int _sacct_query(slurm_step_layout_t *step_layout, uint32_t job_id, if(jobacct_msg) { debug2("got it back for job %d", jobacct_msg->job_id); - jobacct_g_2_sacct( + jobacct_gather_g_2_sacct( &temp_sacct, jobacct_msg->jobacct); ntasks += jobacct_msg->num_tasks; @@ -131,23 +125,23 @@ int _sacct_query(slurm_step_layout_t *step_layout, uint32_t job_id, list_iterator_destroy(itr); list_destroy(ret_list); - step.ntasks += ntasks; + tot_tasks += ntasks; cleanup: - if(step.ntasks) { + if(tot_tasks) { step.sacct.ave_rss *= 1024; step.sacct.max_rss *= 1024; step.sacct.ave_vsize *= 1024; step.sacct.max_vsize *= 1024; - step.sacct.ave_cpu /= step.ntasks; + step.sacct.ave_cpu /= tot_tasks; step.sacct.ave_cpu /= 100; step.sacct.min_cpu /= 100; - step.sacct.ave_rss /= step.ntasks; - step.sacct.ave_vsize /= step.ntasks; - step.sacct.ave_pages /= step.ntasks; + step.sacct.ave_rss /= tot_tasks; + step.sacct.ave_vsize /= tot_tasks; + step.sacct.ave_pages /= tot_tasks; } - jobacct_g_free(r.jobacct); + jobacct_gather_g_destroy(r.jobacct); return SLURM_SUCCESS; } @@ -198,8 +192,10 @@ int sacct_stat(uint32_t jobid, uint32_t stepid) } _sacct_query(step_layout, jobid, stepid); - slurm_step_layout_destroy(step_layout); _process_results(); + + slurm_step_layout_destroy(step_layout); + return rc; } diff --git a/src/sacctmgr/Makefile.am b/src/sacctmgr/Makefile.am new file mode 100644 index 000000000..bf354ad3e --- /dev/null +++ b/src/sacctmgr/Makefile.am @@ -0,0 +1,27 @@ +# Makefile for sacctmgr + +AUTOMAKE_OPTIONS = foreign + +INCLUDES = -I$(top_srcdir) + +bin_PROGRAMS = sacctmgr + +sacctmgr_SOURCES = \ + account_functions.c \ + association_functions.c \ + cluster_functions.c \ + common.c \ + print.c print.h \ + sacctmgr.c sacctmgr.h \ + user_functions.c + +sacctmgr_LDADD = \ + $(top_builddir)/src/common/libcommon.o -ldl \ + $(top_builddir)/src/api/libslurmhelper.la \ + $(READLINE_LIBS) + +sacctmgr_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) + +force: +$(convenience_libs) : force + @cd `dirname $@` && $(MAKE) `basename $@` diff --git a/src/sacctmgr/Makefile.in b/src/sacctmgr/Makefile.in new file mode 100644 index 000000000..585445fa5 --- /dev/null +++ b/src/sacctmgr/Makefile.in @@ -0,0 +1,576 @@ +# Makefile.in generated by automake 1.10.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# Makefile for sacctmgr + +VPATH = @srcdir@ +pkgdatadir = $(datadir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +target_triplet = @target@ +bin_PROGRAMS = sacctmgr$(EXEEXT) +subdir = src/sacctmgr +DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ + $(top_srcdir)/auxdir/slurm.m4 \ + $(top_srcdir)/auxdir/x_ac__system_configuration.m4 \ + $(top_srcdir)/auxdir/x_ac_affinity.m4 \ + $(top_srcdir)/auxdir/x_ac_aix.m4 \ + $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ + $(top_srcdir)/auxdir/x_ac_debug.m4 \ + $(top_srcdir)/auxdir/x_ac_elan.m4 \ + $(top_srcdir)/auxdir/x_ac_federation.m4 \ + $(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \ + $(top_srcdir)/auxdir/x_ac_gtk.m4 \ + $(top_srcdir)/auxdir/x_ac_munge.m4 \ + $(top_srcdir)/auxdir/x_ac_ncurses.m4 \ + $(top_srcdir)/auxdir/x_ac_pam.m4 \ + $(top_srcdir)/auxdir/x_ac_ptrace.m4 \ + $(top_srcdir)/auxdir/x_ac_readline.m4 \ + $(top_srcdir)/auxdir/x_ac_setpgrp.m4 \ + $(top_srcdir)/auxdir/x_ac_setproctitle.m4 \ + $(top_srcdir)/auxdir/x_ac_sgi_job.m4 \ + $(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \ + $(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h +CONFIG_CLEAN_FILES = +am__installdirs = "$(DESTDIR)$(bindir)" +binPROGRAMS_INSTALL = $(INSTALL_PROGRAM) +PROGRAMS = $(bin_PROGRAMS) +am_sacctmgr_OBJECTS = account_functions.$(OBJEXT) \ + association_functions.$(OBJEXT) cluster_functions.$(OBJEXT) \ + common.$(OBJEXT) print.$(OBJEXT) sacctmgr.$(OBJEXT) \ + user_functions.$(OBJEXT) +sacctmgr_OBJECTS = $(am_sacctmgr_OBJECTS) +am__DEPENDENCIES_1 = +sacctmgr_DEPENDENCIES = $(top_builddir)/src/common/libcommon.o \ + $(top_builddir)/src/api/libslurmhelper.la \ + $(am__DEPENDENCIES_1) +sacctmgr_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(sacctmgr_LDFLAGS) \ + $(LDFLAGS) -o $@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm +depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp +am__depfiles_maybe = depfiles +COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ + $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ + $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +CCLD = $(CC) +LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ + $(LDFLAGS) -o $@ +SOURCES = $(sacctmgr_SOURCES) +DIST_SOURCES = $(sacctmgr_SOURCES) +ETAGS = etags +CTAGS = ctags +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AR = @AR@ +AUTHD_CFLAGS = @AUTHD_CFLAGS@ +AUTHD_LIBS = @AUTHD_LIBS@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +BG_INCLUDES = @BG_INCLUDES@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CMD_LDFLAGS = @CMD_LDFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ +ECHO = @ECHO@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +ELAN_LIBS = @ELAN_LIBS@ +EXEEXT = @EXEEXT@ +F77 = @F77@ +FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@ +FFLAGS = @FFLAGS@ +GREP = @GREP@ +GTK2_CFLAGS = @GTK2_CFLAGS@ +GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ +HAVEPKGCONFIG = @HAVEPKGCONFIG@ +HAVE_AIX = @HAVE_AIX@ +HAVE_ELAN = @HAVE_ELAN@ +HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ +HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIB_LDFLAGS = @LIB_LDFLAGS@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MKDIR_P = @MKDIR_P@ +MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ +MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ +MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ +NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ +NUMA_LIBS = @NUMA_LIBS@ +OBJEXT = @OBJEXT@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PAM_LIBS = @PAM_LIBS@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ +PLPA_LIBS = @PLPA_LIBS@ +PROCTRACKDIR = @PROCTRACKDIR@ +PROJECT = @PROJECT@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +RANLIB = @RANLIB@ +READLINE_LIBS = @READLINE_LIBS@ +RELEASE = @RELEASE@ +SED = @SED@ +SEMAPHORE_LIBS = @SEMAPHORE_LIBS@ +SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ +SLURMD_PORT = @SLURMD_PORT@ +SLURM_API_AGE = @SLURM_API_AGE@ +SLURM_API_CURRENT = @SLURM_API_CURRENT@ +SLURM_API_MAJOR = @SLURM_API_MAJOR@ +SLURM_API_REVISION = @SLURM_API_REVISION@ +SLURM_API_VERSION = @SLURM_API_VERSION@ +SLURM_MAJOR = @SLURM_MAJOR@ +SLURM_MICRO = @SLURM_MICRO@ +SLURM_MINOR = @SLURM_MINOR@ +SLURM_VERSION = @SLURM_VERSION@ +SO_LDFLAGS = @SO_LDFLAGS@ +SSL_CPPFLAGS = @SSL_CPPFLAGS@ +SSL_LDFLAGS = @SSL_LDFLAGS@ +SSL_LIBS = @SSL_LIBS@ +STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ +VERSION = @VERSION@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_F77 = @ac_ct_F77@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target = @target@ +target_alias = @target_alias@ +target_cpu = @target_cpu@ +target_os = @target_os@ +target_vendor = @target_vendor@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +AUTOMAKE_OPTIONS = foreign +INCLUDES = -I$(top_srcdir) +sacctmgr_SOURCES = \ + account_functions.c \ + association_functions.c \ + cluster_functions.c \ + common.c \ + print.c print.h \ + sacctmgr.c sacctmgr.h \ + user_functions.c + +sacctmgr_LDADD = \ + $(top_builddir)/src/common/libcommon.o -ldl \ + $(top_builddir)/src/api/libslurmhelper.la \ + $(READLINE_LIBS) + +sacctmgr_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) +all: all-am + +.SUFFIXES: +.SUFFIXES: .c .lo .o .obj +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ + && exit 0; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/sacctmgr/Makefile'; \ + cd $(top_srcdir) && \ + $(AUTOMAKE) --foreign src/sacctmgr/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +install-binPROGRAMS: $(bin_PROGRAMS) + @$(NORMAL_INSTALL) + test -z "$(bindir)" || $(MKDIR_P) "$(DESTDIR)$(bindir)" + @list='$(bin_PROGRAMS)'; for p in $$list; do \ + p1=`echo $$p|sed 's/$(EXEEXT)$$//'`; \ + if test -f $$p \ + || test -f $$p1 \ + ; then \ + f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \ + echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ + $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ + else :; fi; \ + done + +uninstall-binPROGRAMS: + @$(NORMAL_UNINSTALL) + @list='$(bin_PROGRAMS)'; for p in $$list; do \ + f=`echo "$$p" | sed 's,^.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/'`; \ + echo " rm -f '$(DESTDIR)$(bindir)/$$f'"; \ + rm -f "$(DESTDIR)$(bindir)/$$f"; \ + done + +clean-binPROGRAMS: + @list='$(bin_PROGRAMS)'; for p in $$list; do \ + f=`echo $$p|sed 's/$(EXEEXT)$$//'`; \ + echo " rm -f $$p $$f"; \ + rm -f $$p $$f ; \ + done +sacctmgr$(EXEEXT): $(sacctmgr_OBJECTS) $(sacctmgr_DEPENDENCIES) + @rm -f sacctmgr$(EXEEXT) + $(sacctmgr_LINK) $(sacctmgr_OBJECTS) $(sacctmgr_LDADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + +distclean-compile: + -rm -f *.tab.c + +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/account_functions.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/association_functions.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cluster_functions.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/common.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/print.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sacctmgr.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/user_functions.Po@am__quote@ + +.c.o: +@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c $< + +.c.obj: +@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'` + +.c.lo: +@am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $< + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + mkid -fID $$unique +tags: TAGS + +TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + here=`pwd`; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$tags $$unique; \ + fi +ctags: CTAGS +CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + test -z "$(CTAGS_ARGS)$$tags$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$tags $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && cd $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) $$here + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ + fi; \ + cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ + else \ + test -f $(distdir)/$$file \ + || cp -p $$d/$$file $(distdir)/$$file \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(PROGRAMS) +installdirs: + for dir in "$(DESTDIR)$(bindir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + `test -z '$(STRIP)' || \ + echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-binPROGRAMS clean-generic clean-libtool mostlyclean-am + +distclean: distclean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-exec-am: install-binPROGRAMS + +install-html: install-html-am + +install-info: install-info-am + +install-man: + +install-pdf: install-pdf-am + +install-ps: install-ps-am + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-binPROGRAMS + +.MAKE: install-am install-strip + +.PHONY: CTAGS GTAGS all all-am check check-am clean clean-binPROGRAMS \ + clean-generic clean-libtool ctags distclean distclean-compile \ + distclean-generic distclean-libtool distclean-tags distdir dvi \ + dvi-am html html-am info info-am install install-am \ + install-binPROGRAMS install-data install-data-am install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-pdf install-pdf-am install-ps install-ps-am \ + install-strip installcheck installcheck-am installdirs \ + maintainer-clean maintainer-clean-generic mostlyclean \ + mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ + pdf pdf-am ps ps-am tags uninstall uninstall-am \ + uninstall-binPROGRAMS + + +force: +$(convenience_libs) : force + @cd `dirname $@` && $(MAKE) `basename $@` +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/src/sacctmgr/account_functions.c b/src/sacctmgr/account_functions.c new file mode 100644 index 000000000..ed0e3d916 --- /dev/null +++ b/src/sacctmgr/account_functions.c @@ -0,0 +1,1174 @@ +/*****************************************************************************\ + * account_functions.c - functions dealing with accounts in the + * accounting system. + ***************************************************************************** + * Copyright (C) 2002-2008 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * LLNL-CODE-402394. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#include "src/sacctmgr/sacctmgr.h" +#include "src/sacctmgr/print.h" + +static int _set_cond(int *start, int argc, char *argv[], + acct_account_cond_t *acct_cond, + List format_list) +{ + int i; + int a_set = 0; + int u_set = 0; + int end = 0; + + for (i=(*start); i<argc; i++) { + end = parse_option_end(argv[i]); + if (strncasecmp (argv[i], "Set", 3) == 0) { + i--; + break; + } else if (strncasecmp (argv[i], "WithAssoc", 4) == 0) { + acct_cond->with_assocs = 1; + } else if(!end) { + addto_char_list(acct_cond->acct_list, argv[i]); + addto_char_list(acct_cond->assoc_cond->acct_list, + argv[i]); + u_set = 1; + } else if (strncasecmp (argv[i], "Clusters", 1) == 0) { + addto_char_list(acct_cond->assoc_cond->cluster_list, + argv[i]+end); + a_set = 1; + } else if (strncasecmp (argv[i], "Descriptions", 1) == 0) { + addto_char_list(acct_cond->description_list, + argv[i]+end); + u_set = 1; + } else if (strncasecmp (argv[i], "Format", 1) == 0) { + if(format_list) + addto_char_list(format_list, argv[i]+end); + } else if (strncasecmp (argv[i], "Names", 1) == 0 + || strncasecmp (argv[i], "Accouts", 1) == 0) { + addto_char_list(acct_cond->acct_list, argv[i]+end); + addto_char_list(acct_cond->assoc_cond->acct_list, + argv[i]); + u_set = 1; + } else if (strncasecmp (argv[i], "Organizations", 1) == 0) { + addto_char_list(acct_cond->organization_list, + argv[i]+end); + u_set = 1; + } else if (strncasecmp (argv[i], "Parent", 1) == 0) { + acct_cond->assoc_cond->parent_acct = + xstrdup(argv[i]+end); + a_set = 1; + } else if (strncasecmp (argv[i], "QosLevel", 1) == 0) { + acct_cond->qos = str_2_acct_qos(argv[i]+end); + u_set = 1; + } else { + printf(" Unknown condition: %s\n" + " Use keyword 'set' to modify " + "SLURM_PRINT_VALUE\n", argv[i]); + } + } + (*start) = i; + + if(a_set) + return 2; + else if(u_set) + return 1; + + return 0; +} + +static int _set_rec(int *start, int argc, char *argv[], + acct_account_rec_t *acct, + acct_association_rec_t *assoc) +{ + int i, mins; + int u_set = 0; + int a_set = 0; + int end = 0; + + for (i=(*start); i<argc; i++) { + end = parse_option_end(argv[i]); + if (strncasecmp (argv[i], "Where", 5) == 0) { + i--; + break; + } else if(!end) { + printf(" Bad format on %s: End your option with " + "an '=' sign\n", argv[i]); + } else if (strncasecmp (argv[i], "Description", 1) == 0) { + acct->description = xstrdup(argv[i]+end); + u_set = 1; + } else if (strncasecmp (argv[i], "FairShare", 1) == 0) { + if (get_uint(argv[i]+end, &assoc->fairshare, + "FairShare") == SLURM_SUCCESS) + a_set = 1; + } else if (strncasecmp (argv[i], "MaxCPUSec", 4) == 0) { + if (get_uint(argv[i]+end, &assoc->max_cpu_secs_per_job, + "MaxCPUSec") == SLURM_SUCCESS) + a_set = 1; + } else if (strncasecmp (argv[i], "MaxJobs", 4) == 0) { + if (get_uint(argv[i]+end, &assoc->max_jobs, + "MaxJobs") == SLURM_SUCCESS) + a_set = 1; + } else if (strncasecmp (argv[i], "MaxNodes", 4) == 0) { + if (get_uint(argv[i]+end, &assoc->max_nodes_per_job, + "MaxNodes") == SLURM_SUCCESS) + a_set = 1; + } else if (strncasecmp (argv[i], "MaxWall", 4) == 0) { + mins = time_str2mins(argv[i]+end); + if (mins != NO_VAL) { + assoc->max_wall_duration_per_job + = (uint32_t) mins; + a_set = 1; + } else { + printf(" Bad MaxWall time format: %s\n", + argv[i]); + } + } else if (strncasecmp (argv[i], "Organization", 1) == 0) { + acct->organization = xstrdup(argv[i]+end); + u_set = 1; + } else if (strncasecmp (argv[i], "Parent", 1) == 0) { + assoc->parent_acct = xstrdup(argv[i]+end); + a_set = 1; + } else if (strncasecmp (argv[i], "QosLevel=", 1) == 0) { + acct->qos = str_2_acct_qos(argv[i]+end); + u_set = 1; + } else { + printf(" Unknown option: %s\n" + " Use keyword 'where' to modify condition\n", + argv[i]); + } + } + (*start) = i; + + if(u_set && a_set) + return 3; + else if(a_set) + return 2; + else if(u_set) + return 1; + return 0; +} + +/* static void _print_cond(acct_account_cond_t *acct_cond) */ +/* { */ +/* ListIterator itr = NULL; */ +/* char *tmp_char = NULL; */ + +/* if(!acct_cond) { */ +/* error("no acct_account_cond_t * given"); */ +/* return; */ +/* } */ + +/* if(acct_cond->acct_list && list_count(acct_cond->acct_list)) { */ +/* itr = list_iterator_create(acct_cond->acct_list); */ +/* printf(" Names = %s\n", (char *)list_next(itr)); */ +/* while((tmp_char = list_next(itr))) { */ +/* printf(" or %s\n", tmp_char); */ +/* } */ +/* } */ + +/* if(acct_cond->description_list */ +/* && list_count(acct_cond->description_list)) { */ +/* itr = list_iterator_create(acct_cond->description_list); */ +/* printf(" Description = %s\n", (char *)list_next(itr)); */ +/* while((tmp_char = list_next(itr))) { */ +/* printf(" or %s\n", tmp_char); */ +/* } */ +/* } */ + +/* if(acct_cond->organization_list */ +/* && list_count(acct_cond->organization_list)) { */ +/* itr = list_iterator_create(acct_cond->organization_list); */ +/* printf(" Organization = %s\n", (char *)list_next(itr)); */ +/* while((tmp_char = list_next(itr))) { */ +/* printf(" or %s\n", tmp_char); */ +/* } */ +/* } */ + +/* if(acct_cond->qos != ACCT_QOS_NOTSET) */ +/* printf(" Qos = %s\n", */ +/* acct_qos_str(acct_cond->qos)); */ +/* } */ + +/* static void _print_rec(acct_account_rec_t *acct) */ +/* { */ +/* if(!acct) { */ +/* error("no acct_account_rec_t * given"); */ +/* return; */ +/* } */ + +/* if(acct->name) */ +/* printf(" Name = %s\n", acct->name); */ + +/* if(acct->description) */ +/* printf(" Description = %s\n", acct->description); */ + +/* if(acct->organization) */ +/* printf(" Organization = %s\n", acct->organization); */ + +/* if(acct->qos != ACCT_QOS_NOTSET) */ +/* printf(" Qos = %s\n", */ +/* acct_qos_str(acct->qos)); */ + +/* } */ + +extern int sacctmgr_add_account(int argc, char *argv[]) +{ + int rc = SLURM_SUCCESS; + int i=0, mins; + ListIterator itr = NULL, itr_c = NULL; + acct_account_rec_t *acct = NULL; + acct_association_rec_t *assoc = NULL; + acct_association_cond_t assoc_cond; + List name_list = list_create(slurm_destroy_char); + List cluster_list = list_create(slurm_destroy_char); + char *description = NULL; + char *organization = NULL; + char *parent = NULL; + char *cluster = NULL; + char *name = NULL; + acct_qos_level_t qos = ACCT_QOS_NOTSET; + List acct_list = NULL; + List assoc_list = NULL; + List local_assoc_list = NULL; + List local_account_list = NULL; + uint32_t fairshare = NO_VAL; + uint32_t max_jobs = NO_VAL; + uint32_t max_nodes_per_job = NO_VAL; + uint32_t max_wall_duration_per_job = NO_VAL; + uint32_t max_cpu_secs_per_job = NO_VAL; + char *acct_str = NULL; + char *assoc_str = NULL; + int limit_set = 0; + + for (i=0; i<argc; i++) { + int end = parse_option_end(argv[i]); + if(!end) { + addto_char_list(name_list, argv[i]+end); + } else if (strncasecmp (argv[i], "Cluster", 1) == 0) { + addto_char_list(cluster_list, argv[i]+end); + } else if (strncasecmp (argv[i], "Description", 1) == 0) { + description = xstrdup(argv[i]+end); + } else if (strncasecmp (argv[i], "FairShare", 1) == 0) { + if (get_uint(argv[i]+end, &fairshare, + "FairShare") == SLURM_SUCCESS) + limit_set = 1; + } else if (strncasecmp (argv[i], "MaxCPUSecs", 4) == 0) { + if (get_uint(argv[i]+end, &max_cpu_secs_per_job, + "MaxCPUSecs") == SLURM_SUCCESS) + limit_set = 1; + } else if (strncasecmp (argv[i], "MaxJobs", 4) == 0) { + if (get_uint(argv[i]+end, &max_jobs, + "MaxJobs") == SLURM_SUCCESS) + limit_set = 1; + } else if (strncasecmp (argv[i], "MaxNodes", 4) == 0) { + if (get_uint(argv[i]+end, &max_nodes_per_job, + "MaxNodes") == SLURM_SUCCESS) + limit_set = 1; + } else if (strncasecmp (argv[i], "MaxWall", 4) == 0) { + mins = time_str2mins(argv[i]+end); + if (mins != NO_VAL) { + max_wall_duration_per_job = (uint32_t) mins; + limit_set = 1; + } else { + printf(" Bad MaxWall time format: %s\n", + argv[i]); + } + } else if (strncasecmp (argv[i], "Names", 1) == 0) { + addto_char_list(name_list, argv[i]+end); + } else if (strncasecmp (argv[i], "Organization", 1) == 0) { + organization = xstrdup(argv[i]+end); + } else if (strncasecmp (argv[i], "Parent", 1) == 0) { + parent = xstrdup(argv[i]+end); + } else if (strncasecmp (argv[i], "QosLevel", 1) == 0) { + qos = str_2_acct_qos(argv[i]+end); + } else { + printf(" Unknown option: %s\n", argv[i]); + } + } + + if(!list_count(name_list)) { + list_destroy(name_list); + list_destroy(cluster_list); + xfree(parent); + xfree(description); + xfree(organization); + printf(" Need name of account to add.\n"); + return SLURM_SUCCESS; + } else { + acct_account_cond_t account_cond; + + memset(&account_cond, 0, sizeof(acct_account_cond_t)); + account_cond.acct_list = name_list; + + local_account_list = acct_storage_g_get_accounts( + db_conn, &account_cond); + + } + if(!local_account_list) { + printf(" Problem getting accounts from database. " + "Contact your admin.\n"); + list_destroy(name_list); + list_destroy(cluster_list); + xfree(parent); + xfree(description); + xfree(organization); + return SLURM_ERROR; + } + + if(!parent) + parent = xstrdup("root"); + + if(!list_count(cluster_list)) { + List temp_list = NULL; + acct_cluster_rec_t *cluster_rec = NULL; + + temp_list = acct_storage_g_get_clusters(db_conn, NULL); + if(!cluster_list) { + printf(" Problem getting clusters from database. " + "Contact your admin.\n"); + list_destroy(name_list); + list_destroy(cluster_list); + list_destroy(local_account_list); + xfree(parent); + xfree(description); + xfree(organization); + return SLURM_ERROR; + } + + itr_c = list_iterator_create(temp_list); + while((cluster_rec = list_next(itr_c))) { + list_append(cluster_list, xstrdup(cluster_rec->name)); + } + list_iterator_destroy(itr_c); + + if(!list_count(cluster_list)) { + printf(" Can't add accounts, no cluster defined yet.\n" + " Please contact your administrator.\n"); + list_destroy(name_list); + list_destroy(cluster_list); + list_destroy(local_account_list); + xfree(parent); + xfree(description); + xfree(organization); + return SLURM_ERROR; + } + } else { + List temp_list = NULL; + acct_cluster_cond_t cluster_cond; + + memset(&cluster_cond, 0, sizeof(acct_cluster_cond_t)); + cluster_cond.cluster_list = cluster_list; + + temp_list = acct_storage_g_get_clusters(db_conn, &cluster_cond); + + itr_c = list_iterator_create(cluster_list); + itr = list_iterator_create(temp_list); + while((cluster = list_next(itr_c))) { + acct_cluster_rec_t *cluster_rec = NULL; + + list_iterator_reset(itr); + while((cluster_rec = list_next(itr))) { + if(!strcasecmp(cluster_rec->name, cluster)) + break; + } + if(!cluster_rec) { + printf(" error: This cluster '%s' " + "doesn't exist.\n" + " Contact your admin " + "to add it to accounting.\n", + cluster); + list_delete_item(itr_c); + } + } + list_iterator_destroy(itr); + list_iterator_destroy(itr_c); + list_destroy(temp_list); + if(!list_count(cluster_list)) { + list_destroy(name_list); + list_destroy(cluster_list); + list_destroy(local_account_list); + return SLURM_ERROR; + } + } + + + acct_list = list_create(destroy_acct_account_rec); + assoc_list = list_create(destroy_acct_association_rec); + + memset(&assoc_cond, 0, sizeof(acct_association_cond_t)); + + assoc_cond.acct_list = list_create(NULL); + itr = list_iterator_create(name_list); + while((name = list_next(itr))) { + list_append(assoc_cond.acct_list, name); + } + list_iterator_destroy(itr); + list_append(assoc_cond.acct_list, parent); + + assoc_cond.cluster_list = cluster_list; + local_assoc_list = acct_storage_g_get_associations( + db_conn, &assoc_cond); + list_destroy(assoc_cond.acct_list); + if(!local_assoc_list) { + printf(" Problem getting associations from database. " + "Contact your admin.\n"); + list_destroy(name_list); + list_destroy(cluster_list); + list_destroy(local_account_list); + xfree(parent); + xfree(description); + xfree(organization); + return SLURM_ERROR; + } + + itr = list_iterator_create(name_list); + while((name = list_next(itr))) { + acct = NULL; + if(!sacctmgr_find_account_from_list(local_account_list, name)) { + acct = xmalloc(sizeof(acct_account_rec_t)); + acct->assoc_list = list_create(NULL); + acct->name = xstrdup(name); + if(description) + acct->description = xstrdup(description); + else + acct->description = xstrdup(name); + + if(organization) + acct->organization = xstrdup(organization); + else if(strcmp(parent, "root")) + acct->organization = xstrdup(parent); + else + acct->organization = xstrdup(name); + + acct->qos = qos; + xstrfmtcat(acct_str, " %s\n", name); + list_append(acct_list, acct); + } + + itr_c = list_iterator_create(cluster_list); + while((cluster = list_next(itr_c))) { + if(sacctmgr_find_account_base_assoc_from_list( + local_assoc_list, name, cluster)) { + //printf(" already have this assoc\n"); + continue; + } + if(!sacctmgr_find_account_base_assoc_from_list( + local_assoc_list, parent, cluster)) { + printf(" error: Parent account '%s' " + "doesn't exist on " + "cluster %s\n" + " Contact your admin " + "to add this account.\n", + parent, cluster); + continue; + } + + assoc = xmalloc(sizeof(acct_association_rec_t)); + assoc->acct = xstrdup(name); + assoc->cluster = xstrdup(cluster); + assoc->parent_acct = xstrdup(parent); + assoc->fairshare = fairshare; + assoc->max_jobs = max_jobs; + assoc->max_nodes_per_job = max_nodes_per_job; + assoc->max_wall_duration_per_job = + max_wall_duration_per_job; + assoc->max_cpu_secs_per_job = + max_cpu_secs_per_job; + if(acct) + list_append(acct->assoc_list, assoc); + else + list_append(assoc_list, assoc); + xstrfmtcat(assoc_str, + " A = %-10.10s" + " C = %-10.10s\n", + assoc->acct, + assoc->cluster); + + } + list_iterator_destroy(itr_c); + } + list_iterator_destroy(itr); + list_destroy(local_account_list); + list_destroy(local_assoc_list); + list_destroy(name_list); + list_destroy(cluster_list); + + if(!list_count(acct_list) && !list_count(assoc_list)) { + printf(" Nothing new added.\n"); + goto end_it; + } else if(!assoc_str) { + printf(" Error: no associations created.\n"); + goto end_it; + } + + if(acct_str) { + printf(" Adding Account(s)\n%s", acct_str); + printf(" Settings\n"); + if(description) + printf(" Description = %s\n", description); + else + printf(" Description = %s\n", "Account Name"); + + if(organization) + printf(" Organization = %s\n", organization); + else + printf(" Organization = %s\n", + "Parent/Account Name"); + + if(qos != ACCT_QOS_NOTSET) + printf(" Qos = %s\n", acct_qos_str(qos)); + xfree(acct_str); + } + + if(assoc_str) { + printf(" Associations\n%s", assoc_str); + xfree(assoc_str); + } + + if(limit_set) { + printf(" Settings\n"); + if(fairshare == INFINITE) + printf(" Fairshare = NONE\n"); + else if(fairshare != NO_VAL) + printf(" Fairshare = %u\n", fairshare); + + if(max_cpu_secs_per_job == INFINITE) + printf(" MaxCPUSecs = NONE\n"); + else if(max_cpu_secs_per_job != NO_VAL) + printf(" MaxCPUSecs = %u\n", + max_cpu_secs_per_job); + + if(max_jobs == INFINITE) + printf(" MaxJobs = NONE\n"); + else if(max_jobs != NO_VAL) + printf(" MaxJobs = %u\n", max_jobs); + + if(max_nodes_per_job == INFINITE) + printf(" MaxNodes = NONE\n"); + else if(max_nodes_per_job != NO_VAL) + printf(" MaxNodes = %u\n", max_nodes_per_job); + + if(max_wall_duration_per_job == INFINITE) + printf(" MaxWall = NONE\n"); + else if(max_wall_duration_per_job != NO_VAL) { + char time_buf[32]; + mins2time_str((time_t) max_wall_duration_per_job, + time_buf, sizeof(time_buf)); + printf(" MaxWall = %s\n", time_buf); + } + } + + notice_thread_init(); + if(list_count(acct_list)) + rc = acct_storage_g_add_accounts(db_conn, my_uid, acct_list); + + + if(rc == SLURM_SUCCESS) { + if(list_count(assoc_list)) + rc = acct_storage_g_add_associations(db_conn, my_uid, + assoc_list); + } else { + printf(" error: Problem adding accounts\n"); + rc = SLURM_ERROR; + notice_thread_fini(); + goto end_it; + } + notice_thread_fini(); + + if(rc == SLURM_SUCCESS) { + if(commit_check("Would you like to commit changes?")) { + acct_storage_g_commit(db_conn, 1); + } else { + printf(" Changes Discarded\n"); + acct_storage_g_commit(db_conn, 0); + } + } else { + printf(" error: Problem adding account associations\n"); + rc = SLURM_ERROR; + } + +end_it: + list_destroy(acct_list); + list_destroy(assoc_list); + + xfree(parent); + xfree(description); + xfree(organization); + + return rc; +} + +extern int sacctmgr_list_account(int argc, char *argv[]) +{ + int rc = SLURM_SUCCESS; + acct_account_cond_t *acct_cond = + xmalloc(sizeof(acct_account_cond_t)); + List acct_list; + int i=0; + ListIterator itr = NULL; + ListIterator itr2 = NULL; + acct_account_rec_t *acct = NULL; + acct_association_rec_t *assoc = NULL; + char *object; + + print_field_t *field = NULL; + + List format_list = list_create(slurm_destroy_char); + List print_fields_list; /* types are of print_field_t */ + + enum { + PRINT_ACCOUNT, + PRINT_CLUSTER, + PRINT_DESC, + PRINT_FAIRSHARE, + PRINT_ID, + PRINT_MAXC, + PRINT_MAXJ, + PRINT_MAXN, + PRINT_MAXW, + PRINT_ORG, + PRINT_QOS, + PRINT_PID, + PRINT_PNAME, + PRINT_PART, + PRINT_USER + }; + + acct_cond->acct_list = list_create(slurm_destroy_char); + acct_cond->description_list = list_create(slurm_destroy_char); + acct_cond->organization_list = list_create(slurm_destroy_char); + acct_cond->with_assocs = with_assoc_flag; + + acct_cond->assoc_cond = xmalloc(sizeof(acct_association_cond_t)); + acct_cond->assoc_cond->user_list = list_create(slurm_destroy_char); + acct_cond->assoc_cond->acct_list = list_create(slurm_destroy_char); + acct_cond->assoc_cond->cluster_list = list_create(slurm_destroy_char); + acct_cond->assoc_cond->partition_list = list_create(slurm_destroy_char); + + _set_cond(&i, argc, argv, acct_cond, format_list); + + if(!list_count(format_list)) { + addto_char_list(format_list, "A,D,O,Q"); + if(acct_cond->with_assocs) + addto_char_list(format_list, + "C,ParentN,U,F,MaxC,MaxJ,MaxN,MaxW"); + + } + acct_list = acct_storage_g_get_accounts(db_conn, acct_cond); + destroy_acct_account_cond(acct_cond); + + if(!acct_list) { + list_destroy(format_list); + return SLURM_ERROR; + } + print_fields_list = list_create(destroy_print_field); + + itr = list_iterator_create(format_list); + while((object = list_next(itr))) { + field = xmalloc(sizeof(print_field_t)); + if(!strncasecmp("Account", object, 1)) { + field->type = PRINT_ACCOUNT; + field->name = xstrdup("Account"); + field->len = 10; + field->print_routine = print_str; + } else if(!strncasecmp("Cluster", object, 1)) { + field->type = PRINT_CLUSTER; + field->name = xstrdup("Cluster"); + field->len = 10; + field->print_routine = print_str; + } else if(!strncasecmp("Description", object, 1)) { + field->type = PRINT_DESC; + field->name = xstrdup("Descr"); + field->len = 20; + field->print_routine = print_str; + } else if(!strncasecmp("FairShare", object, 1)) { + field->type = PRINT_FAIRSHARE; + field->name = xstrdup("FairShare"); + field->len = 9; + field->print_routine = print_uint; + } else if(!strncasecmp("ID", object, 1)) { + field->type = PRINT_ID; + field->name = xstrdup("ID"); + field->len = 6; + field->print_routine = print_uint; + } else if(!strncasecmp("MaxCPUSecs", object, 4)) { + field->type = PRINT_MAXC; + field->name = xstrdup("MaxCPUSecs"); + field->len = 11; + field->print_routine = print_uint; + } else if(!strncasecmp("MaxJobs", object, 4)) { + field->type = PRINT_MAXJ; + field->name = xstrdup("MaxJobs"); + field->len = 7; + field->print_routine = print_uint; + } else if(!strncasecmp("MaxNodes", object, 4)) { + field->type = PRINT_MAXN; + field->name = xstrdup("MaxNodes"); + field->len = 8; + field->print_routine = print_uint; + } else if(!strncasecmp("MaxWall", object, 4)) { + field->type = PRINT_MAXW; + field->name = xstrdup("MaxWall"); + field->len = 11; + field->print_routine = print_time; + } else if(!strncasecmp("Organization", object, 1)) { + field->type = PRINT_ORG; + field->name = xstrdup("Org"); + field->len = 20; + field->print_routine = print_str; + } else if(!strncasecmp("QOS", object, 1)) { + field->type = PRINT_QOS; + field->name = xstrdup("QOS"); + field->len = 9; + field->print_routine = print_str; + } else if(!strncasecmp("ParentID", object, 7)) { + field->type = PRINT_PID; + field->name = xstrdup("Par ID"); + field->len = 6; + field->print_routine = print_uint; + } else if(!strncasecmp("ParentName", object, 7)) { + field->type = PRINT_PNAME; + field->name = xstrdup("Par Name"); + field->len = 10; + field->print_routine = print_str; + } else if(!strncasecmp("User", object, 1)) { + field->type = PRINT_USER; + field->name = xstrdup("User"); + field->len = 10; + field->print_routine = print_str; + } else { + printf("Unknown field '%s'\n", object); + xfree(field); + continue; + } + list_append(print_fields_list, field); + } + list_iterator_destroy(itr); + + itr = list_iterator_create(acct_list); + itr2 = list_iterator_create(print_fields_list); + print_header(print_fields_list); + + while((acct = list_next(itr))) { + if(acct->assoc_list && list_count(acct->assoc_list)) { + ListIterator itr3 = + list_iterator_create(acct->assoc_list); + + while((assoc = list_next(itr3))) { + while((field = list_next(itr2))) { + switch(field->type) { + case PRINT_ACCOUNT: + field->print_routine( + SLURM_PRINT_VALUE, + field, acct->name); + break; + case PRINT_CLUSTER: + field->print_routine( + SLURM_PRINT_VALUE, + field, assoc->cluster); + break; + case PRINT_DESC: + field->print_routine( + SLURM_PRINT_VALUE, + field, + acct->description); + break; + case PRINT_FAIRSHARE: + field->print_routine( + SLURM_PRINT_VALUE, + field, + assoc->fairshare); + break; + case PRINT_ID: + field->print_routine( + SLURM_PRINT_VALUE, + field, assoc->id); + break; + case PRINT_MAXC: + field->print_routine( + SLURM_PRINT_VALUE, + field, assoc-> + max_cpu_secs_per_job); + break; + case PRINT_MAXJ: + field->print_routine( + SLURM_PRINT_VALUE, + field, assoc->max_jobs); + break; + case PRINT_MAXN: + field->print_routine( + SLURM_PRINT_VALUE, + field, assoc-> + max_nodes_per_job); + break; + case PRINT_MAXW: + field->print_routine( + SLURM_PRINT_VALUE, + field, + assoc-> + max_wall_duration_per_job); + break; + case PRINT_ORG: + field->print_routine( + SLURM_PRINT_VALUE, + field, + acct->organization); + break; + case PRINT_QOS: + field->print_routine( + SLURM_PRINT_VALUE, + field, + acct_qos_str(acct->qos)); + break; + case PRINT_PID: + field->print_routine( + SLURM_PRINT_VALUE, + field, + assoc->parent_id); + break; + case PRINT_PNAME: + field->print_routine( + SLURM_PRINT_VALUE, + field, + assoc->parent_acct); + break; + case PRINT_PART: + field->print_routine( + SLURM_PRINT_VALUE, + field, + assoc->partition); + break; + case PRINT_USER: + field->print_routine( + SLURM_PRINT_VALUE, + field, assoc->user); + break; + default: + break; + } + } + list_iterator_reset(itr2); + printf("\n"); + } + list_iterator_destroy(itr3); + } else { + while((field = list_next(itr2))) { + switch(field->type) { + case PRINT_ACCOUNT: + field->print_routine( + SLURM_PRINT_VALUE, + field, acct->name); + break; + case PRINT_CLUSTER: + field->print_routine( + SLURM_PRINT_VALUE, + field, NULL); + break; + case PRINT_DESC: + field->print_routine( + SLURM_PRINT_VALUE, + field, acct->description); + break; + case PRINT_FAIRSHARE: + field->print_routine( + SLURM_PRINT_VALUE, + field, NULL); + break; + case PRINT_ID: + field->print_routine( + SLURM_PRINT_VALUE, + field, NULL); + break; + case PRINT_MAXC: + field->print_routine( + SLURM_PRINT_VALUE, + field, NULL); + break; + case PRINT_MAXJ: + field->print_routine( + SLURM_PRINT_VALUE, + field, NULL); + break; + case PRINT_MAXN: + field->print_routine( + SLURM_PRINT_VALUE, + field, NULL); + break; + case PRINT_MAXW: + field->print_routine( + SLURM_PRINT_VALUE, + field, NULL); + break; + case PRINT_ORG: + field->print_routine( + SLURM_PRINT_VALUE, + field, acct->organization); + break; + case PRINT_QOS: + field->print_routine( + SLURM_PRINT_VALUE, + field, acct_qos_str(acct->qos)); + break; + case PRINT_PID: + field->print_routine( + SLURM_PRINT_VALUE, + field, NULL); + break; + case PRINT_PNAME: + field->print_routine( + SLURM_PRINT_VALUE, + field, NULL); + break; + case PRINT_PART: + field->print_routine( + SLURM_PRINT_VALUE, + field, NULL); + break; + case PRINT_USER: + field->print_routine( + SLURM_PRINT_VALUE, + field, NULL); + break; + default: + break; + } + } + list_iterator_reset(itr2); + printf("\n"); + } + } + + printf("\n"); + + list_iterator_destroy(itr2); + list_iterator_destroy(itr); + list_destroy(acct_list); + list_destroy(print_fields_list); + + return rc; +} + +extern int sacctmgr_modify_account(int argc, char *argv[]) +{ + int rc = SLURM_SUCCESS; + acct_account_cond_t *acct_cond = + xmalloc(sizeof(acct_account_cond_t)); + acct_account_rec_t *acct = xmalloc(sizeof(acct_account_rec_t)); + acct_association_rec_t *assoc = xmalloc(sizeof(acct_association_rec_t)); + + int i=0; + int cond_set = 0, rec_set = 0, set = 0; + List ret_list = NULL; + + acct_cond->acct_list = list_create(slurm_destroy_char); + acct_cond->description_list = list_create(slurm_destroy_char); + acct_cond->organization_list = list_create(slurm_destroy_char); + + acct_cond->assoc_cond = xmalloc(sizeof(acct_association_cond_t)); + acct_cond->assoc_cond->cluster_list = list_create(slurm_destroy_char); + acct_cond->assoc_cond->acct_list = list_create(slurm_destroy_char); + acct_cond->assoc_cond->fairshare = NO_VAL; + acct_cond->assoc_cond->max_cpu_secs_per_job = NO_VAL; + acct_cond->assoc_cond->max_jobs = NO_VAL; + acct_cond->assoc_cond->max_nodes_per_job = NO_VAL; + acct_cond->assoc_cond->max_wall_duration_per_job = NO_VAL; + + assoc->fairshare = NO_VAL; + assoc->max_cpu_secs_per_job = NO_VAL; + assoc->max_jobs = NO_VAL; + assoc->max_nodes_per_job = NO_VAL; + assoc->max_wall_duration_per_job = NO_VAL; + + for (i=0; i<argc; i++) { + if (strncasecmp (argv[i], "Where", 5) == 0) { + i++; + cond_set = _set_cond(&i, argc, argv, acct_cond, NULL); + } else if (strncasecmp (argv[i], "Set", 3) == 0) { + i++; + rec_set = _set_rec(&i, argc, argv, acct, assoc); + } else { + cond_set = _set_cond(&i, argc, argv, acct_cond, NULL); + } + } + + if(!rec_set) { + printf(" You didn't give me anything to set\n"); + destroy_acct_account_cond(acct_cond); + destroy_acct_account_rec(acct); + destroy_acct_association_rec(assoc); + return SLURM_ERROR; + } else if(!cond_set) { + if(!commit_check("You didn't set any conditions with 'WHERE'.\n" + "Are you sure you want to continue?")) { + printf("Aborted\n"); + destroy_acct_account_cond(acct_cond); + destroy_acct_account_rec(acct); + destroy_acct_association_rec(assoc); + return SLURM_SUCCESS; + } + } + + notice_thread_init(); + if(rec_set == 3 || rec_set == 1) { // process the account changes + if(cond_set == 2) { + rc = SLURM_ERROR; + goto assoc_start; + } + ret_list = acct_storage_g_modify_accounts( + db_conn, my_uid, acct_cond, acct); + if(ret_list && list_count(ret_list)) { + char *object = NULL; + ListIterator itr = list_iterator_create(ret_list); + printf(" Modified accounts...\n"); + while((object = list_next(itr))) { + printf(" %s\n", object); + } + list_iterator_destroy(itr); + } else if(ret_list) { + printf(" Nothing modified\n"); + rc = SLURM_ERROR; + } else { + printf(" Error with request\n"); + rc = SLURM_ERROR; + } + + if(ret_list) + list_destroy(ret_list); + } + +assoc_start: + if(rec_set == 3 || rec_set == 2) { // process the association changes + ret_list = acct_storage_g_modify_associations( + db_conn, my_uid, acct_cond->assoc_cond, assoc); + + if(ret_list && list_count(ret_list)) { + char *object = NULL; + ListIterator itr = list_iterator_create(ret_list); + printf(" Modified account associations...\n"); + while((object = list_next(itr))) { + printf(" %s\n", object); + } + list_iterator_destroy(itr); + set = 1; + } else if(ret_list) { + printf(" Nothing modified\n"); + } else { + printf(" Error with request\n"); + rc = SLURM_ERROR; + } + + if(ret_list) + list_destroy(ret_list); + } + + notice_thread_fini(); + if(set) { + if(commit_check("Would you like to commit changes?")) + acct_storage_g_commit(db_conn, 1); + else { + printf(" Changes Discarded\n"); + acct_storage_g_commit(db_conn, 0); + } + } + destroy_acct_account_cond(acct_cond); + destroy_acct_account_rec(acct); + destroy_acct_association_rec(assoc); + + return rc; +} + +extern int sacctmgr_delete_account(int argc, char *argv[]) +{ + int rc = SLURM_SUCCESS; + acct_account_cond_t *acct_cond = + xmalloc(sizeof(acct_account_cond_t)); + int i=0; + List ret_list = NULL; + int set = 0; + + acct_cond->acct_list = list_create(slurm_destroy_char); + acct_cond->description_list = list_create(slurm_destroy_char); + acct_cond->organization_list = list_create(slurm_destroy_char); + + acct_cond->assoc_cond = xmalloc(sizeof(acct_association_cond_t)); + acct_cond->assoc_cond->user_list = list_create(slurm_destroy_char); + acct_cond->assoc_cond->acct_list = list_create(slurm_destroy_char); + acct_cond->assoc_cond->cluster_list = list_create(slurm_destroy_char); + acct_cond->assoc_cond->partition_list = + list_create(slurm_destroy_char); + + if(!(set = _set_cond(&i, argc, argv, acct_cond, NULL))) { + printf(" No conditions given to remove, not executing.\n"); + destroy_acct_account_cond(acct_cond); + return SLURM_ERROR; + } + + notice_thread_init(); + if(set == 1) { + ret_list = acct_storage_g_remove_accounts( + db_conn, my_uid, acct_cond); + } else if(set == 2) { + ret_list = acct_storage_g_remove_associations( + db_conn, my_uid, acct_cond->assoc_cond); + } + notice_thread_fini(); + destroy_acct_account_cond(acct_cond); + + if(ret_list && list_count(ret_list)) { + char *object = NULL; + ListIterator itr = list_iterator_create(ret_list); + if(set == 1) { + printf(" Deleting accounts...\n"); + } else if(set == 2) { + printf(" Deleting account associations...\n"); + } + while((object = list_next(itr))) { + printf(" %s\n", object); + } + list_iterator_destroy(itr); + if(commit_check("Would you like to commit changes?")) { + acct_storage_g_commit(db_conn, 1); + } else { + printf(" Changes Discarded\n"); + acct_storage_g_commit(db_conn, 0); + } + } else if(ret_list) { + printf(" Nothing deleted\n"); + } else { + printf(" Error with request\n"); + rc = SLURM_ERROR; + } + + if(ret_list) + list_destroy(ret_list); + + return rc; +} diff --git a/src/sacctmgr/association_functions.c b/src/sacctmgr/association_functions.c new file mode 100644 index 000000000..cec4ace3a --- /dev/null +++ b/src/sacctmgr/association_functions.c @@ -0,0 +1,402 @@ +/*****************************************************************************\ + * association_functions.c - functions dealing with associations in the + * accounting system. + ***************************************************************************** + * Copyright (C) 2002-2008 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * LLNL-CODE-402394. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#include "src/sacctmgr/sacctmgr.h" +#include "src/sacctmgr/print.h" + +static int _set_cond(int *start, int argc, char *argv[], + acct_association_cond_t *association_cond, + List format_list) +{ + int i, end = 0; + int set = 0; + + for (i=(*start); i<argc; i++) { + end = parse_option_end(argv[i]); + if(!end) { + addto_char_list(association_cond->id_list, argv[i]); + set = 1; + } else if (strncasecmp (argv[i], "Id", 1) == 0) { + addto_char_list(association_cond->id_list, argv[i]+end); + set = 1; + } else if (strncasecmp (argv[i], "Associations", 2) == 0) { + addto_char_list(association_cond->id_list, argv[i]+end); + set = 1; + } else if (strncasecmp (argv[i], "Users", 1) == 0) { + addto_char_list(association_cond->user_list, + argv[i]+end); + set = 1; + } else if (strncasecmp (argv[i], "Accounts", 2) == 0) { + addto_char_list(association_cond->acct_list, + argv[i]+end); + set = 1; + } else if (strncasecmp (argv[i], "Clusters", 1) == 0) { + addto_char_list(association_cond->cluster_list, + argv[i]+end); + set = 1; + } else if (strncasecmp (argv[i], "Format", 1) == 0) { + if(format_list) + addto_char_list(format_list, argv[i]+end); + } else if (strncasecmp (argv[i], "Partitions", 4) == 0) { + addto_char_list(association_cond->partition_list, + argv[i]+end); + set = 1; + } else if (strncasecmp (argv[i], "Parent", 4) == 0) { + association_cond->parent_acct = + strip_quotes(argv[i]+end, NULL); + set = 1; + } else { + printf(" Unknown condition: %s\n", argv[i]); + } + } + (*start) = i; + + return set; +} + +/* static void _print_cond(acct_association_cond_t *association_cond) */ +/* { */ +/* ListIterator itr = NULL; */ +/* char *tmp_char = NULL; */ + +/* if(!association_cond) { */ +/* error("no acct_association_cond_t * given"); */ +/* return; */ +/* } */ + +/* if(association_cond->id_list && list_count(association_cond->id_list)) { */ +/* itr = list_iterator_create(association_cond->id_list); */ +/* printf(" Id = %s\n", (char *)list_next(itr)); */ +/* while((tmp_char = list_next(itr))) { */ +/* printf(" or %s\n", tmp_char); */ +/* } */ +/* } */ + +/* if(association_cond->user_list */ +/* && list_count(association_cond->user_list)) { */ +/* itr = list_iterator_create(association_cond->user_list); */ +/* printf(" User = %s\n", (char *)list_next(itr)); */ +/* while((tmp_char = list_next(itr))) { */ +/* printf(" or %s\n", tmp_char); */ +/* } */ +/* } */ + +/* if(association_cond->acct_list */ +/* && list_count(association_cond->acct_list)) { */ +/* itr = list_iterator_create(association_cond->acct_list); */ +/* printf(" Account = %s\n", (char *)list_next(itr)); */ +/* while((tmp_char = list_next(itr))) { */ +/* printf(" or %s\n", tmp_char); */ +/* } */ +/* } */ + +/* if(association_cond->cluster_list */ +/* && list_count(association_cond->cluster_list)) { */ +/* itr = list_iterator_create(association_cond->cluster_list); */ +/* printf(" Cluster = %s\n", (char *)list_next(itr)); */ +/* while((tmp_char = list_next(itr))) { */ +/* printf(" or %s\n", tmp_char); */ +/* } */ +/* } */ + +/* if(association_cond->partition_list */ +/* && list_count(association_cond->partition_list)) { */ +/* itr = list_iterator_create(association_cond->partition_list); */ +/* printf(" Partition = %s\n", (char *)list_next(itr)); */ +/* while((tmp_char = list_next(itr))) { */ +/* printf(" or %s\n", tmp_char); */ +/* } */ +/* } */ + +/* if(association_cond->parent_account) */ +/* printf(" Parent = %s\n", association_cond->parent_account); */ + +/* } */ + +/* static void _print_rec(acct_association_rec_t *association) */ +/* { */ +/* if(!association) { */ +/* error("no acct_association_rec_t * given"); */ +/* return; */ +/* } */ + +/* if(association->id) */ +/* printf(" Id = %u\n", association->id); */ + +/* if(association->user) */ +/* printf(" User = %s\n", association->user); */ +/* if(association->account) */ +/* printf(" Account = %s\n", association->account); */ +/* if(association->cluster) */ +/* printf(" Cluster = %s\n", association->cluster); */ +/* if(association->partition) */ +/* printf(" Partition = %s\n", association->partition); */ +/* if(association->parent_account) */ +/* printf(" Parent = %s\n", association->parent_account); */ +/* if(association->fairshare) */ +/* printf(" FairShare = %u\n", association->fairshare); */ +/* if(association->max_jobs) */ +/* printf(" MaxJobs = %u\n", association->max_jobs); */ +/* if(association->max_nodes_per_job) */ +/* printf(" MaxNodes = %u\n", association->max_nodes_per_job); */ +/* if(association->max_wall_duration_per_job) { */ +/* char time_buf[32]; */ +/* mins2time_str((time_t) association->max_wall_duration_per_job, */ +/* time_buf, sizeof(time_buf)); */ +/* printf(" MaxWall = %s\n", time_buf); */ +/* } */ +/* if(association->max_cpu_seconds_per_job) */ +/* printf(" MaxCPUSecs = %u\n", */ +/* association->max_cpu_seconds_per_job); */ +/* } */ + +/* extern int sacctmgr_add_association(int argc, char *argv[]) */ +/* { */ +/* int rc = SLURM_SUCCESS; */ + +/* return rc; */ +/* } */ + +extern int sacctmgr_list_association(int argc, char *argv[]) +{ + int rc = SLURM_SUCCESS; + acct_association_cond_t *assoc_cond = + xmalloc(sizeof(acct_association_cond_t)); + List assoc_list = NULL; + acct_association_rec_t *assoc = NULL; + int i=0; + ListIterator itr = NULL; + ListIterator itr2 = NULL; + char *object; + + print_field_t *field = NULL; + + List format_list = list_create(slurm_destroy_char); + List print_fields_list; /* types are of print_field_t */ + + enum { + PRINT_ACCOUNT, + PRINT_CLUSTER, + PRINT_FAIRSHARE, + PRINT_ID, + PRINT_MAXC, + PRINT_MAXJ, + PRINT_MAXN, + PRINT_MAXW, + PRINT_PID, + PRINT_PNAME, + PRINT_PART, + PRINT_USER + }; + + assoc_cond->id_list = list_create(slurm_destroy_char); + assoc_cond->user_list = list_create(slurm_destroy_char); + assoc_cond->acct_list = list_create(slurm_destroy_char); + assoc_cond->cluster_list = list_create(slurm_destroy_char); + + _set_cond(&i, argc, argv, assoc_cond, format_list); + + assoc_list = acct_storage_g_get_associations(db_conn, assoc_cond); + destroy_acct_association_cond(assoc_cond); + + if(!assoc_list) { + list_destroy(format_list); + return SLURM_ERROR; + } + print_fields_list = list_create(destroy_print_field); + + if(!list_count(format_list)) + addto_char_list(format_list, "C,A,U,F,MaxC,MaxJ,MaxN,MaxW"); + + itr = list_iterator_create(format_list); + while((object = list_next(itr))) { + field = xmalloc(sizeof(print_field_t)); + if(!strncasecmp("Account", object, 1)) { + field->type = PRINT_ACCOUNT; + field->name = xstrdup("Account"); + field->len = 10; + field->print_routine = print_str; + } else if(!strncasecmp("Cluster", object, 1)) { + field->type = PRINT_CLUSTER; + field->name = xstrdup("Cluster"); + field->len = 10; + field->print_routine = print_str; + } else if(!strncasecmp("FairShare", object, 1)) { + field->type = PRINT_FAIRSHARE; + field->name = xstrdup("FairShare"); + field->len = 9; + field->print_routine = print_uint; + } else if(!strncasecmp("ID", object, 1)) { + field->type = PRINT_ID; + field->name = xstrdup("ID"); + field->len = 6; + field->print_routine = print_uint; + } else if(!strncasecmp("MaxCPUSecs", object, 4)) { + field->type = PRINT_MAXC; + field->name = xstrdup("MaxCPUSecs"); + field->len = 11; + field->print_routine = print_uint; + } else if(!strncasecmp("MaxJobs", object, 4)) { + field->type = PRINT_MAXJ; + field->name = xstrdup("MaxJobs"); + field->len = 7; + field->print_routine = print_uint; + } else if(!strncasecmp("MaxNodes", object, 4)) { + field->type = PRINT_MAXN; + field->name = xstrdup("MaxNodes"); + field->len = 8; + field->print_routine = print_uint; + } else if(!strncasecmp("MaxWall", object, 4)) { + field->type = PRINT_MAXW; + field->name = xstrdup("MaxWall"); + field->len = 11; + field->print_routine = print_time; + } else if(!strncasecmp("ParentID", object, 7)) { + field->type = PRINT_PID; + field->name = xstrdup("Par ID"); + field->len = 6; + field->print_routine = print_uint; + } else if(!strncasecmp("ParentName", object, 7)) { + field->type = PRINT_PNAME; + field->name = xstrdup("Par Name"); + field->len = 10; + field->print_routine = print_str; + } else if(!strncasecmp("Partition", object, 4)) { + field->type = PRINT_PART; + field->name = xstrdup("Partition"); + field->len = 10; + field->print_routine = print_str; + } else if(!strncasecmp("User", object, 1)) { + field->type = PRINT_USER; + field->name = xstrdup("User"); + field->len = 10; + field->print_routine = print_str; + } else { + printf("Unknown field '%s'\n", object); + xfree(field); + continue; + } + list_append(print_fields_list, field); + } + list_iterator_destroy(itr); + + itr = list_iterator_create(assoc_list); + itr2 = list_iterator_create(print_fields_list); + print_header(print_fields_list); + + while((assoc = list_next(itr))) { + while((field = list_next(itr2))) { + switch(field->type) { + case PRINT_ACCOUNT: + field->print_routine(SLURM_PRINT_VALUE, field, + assoc->acct); + break; + case PRINT_CLUSTER: + field->print_routine(SLURM_PRINT_VALUE, field, + assoc->cluster); + break; + case PRINT_FAIRSHARE: + field->print_routine(SLURM_PRINT_VALUE, field, + assoc->fairshare); + break; + case PRINT_ID: + field->print_routine(SLURM_PRINT_VALUE, field, + assoc->id); + break; + case PRINT_MAXC: + field->print_routine( + SLURM_PRINT_VALUE, field, + assoc->max_cpu_secs_per_job); + break; + case PRINT_MAXJ: + field->print_routine(SLURM_PRINT_VALUE, field, + assoc->max_jobs); + break; + case PRINT_MAXN: + field->print_routine(SLURM_PRINT_VALUE, field, + assoc->max_nodes_per_job); + break; + case PRINT_MAXW: + field->print_routine( + SLURM_PRINT_VALUE, field, + assoc->max_wall_duration_per_job); + break; + case PRINT_PID: + field->print_routine(SLURM_PRINT_VALUE, field, + assoc->parent_id); + break; + case PRINT_PNAME: + field->print_routine(SLURM_PRINT_VALUE, field, + assoc->parent_acct); + break; + case PRINT_PART: + field->print_routine(SLURM_PRINT_VALUE, field, + assoc->partition); + break; + case PRINT_USER: + field->print_routine(SLURM_PRINT_VALUE, field, + assoc->user); + break; + default: + break; + } + } + list_iterator_reset(itr2); + printf("\n"); + } + + printf("\n"); + + list_iterator_destroy(itr2); + list_iterator_destroy(itr); + list_destroy(assoc_list); + list_destroy(print_fields_list); + return rc; +} + +/* extern int sacctmgr_modify_association(int argc, char *argv[]) */ +/* { */ +/* int rc = SLURM_SUCCESS; */ +/* return rc; */ +/* } */ + +/* extern int sacctmgr_delete_association(int argc, char *argv[]) */ +/* { */ +/* int rc = SLURM_SUCCESS; */ +/* return rc; */ +/* } */ diff --git a/src/sacctmgr/cluster_functions.c b/src/sacctmgr/cluster_functions.c new file mode 100644 index 000000000..8a099cbce --- /dev/null +++ b/src/sacctmgr/cluster_functions.c @@ -0,0 +1,646 @@ +/*****************************************************************************\ + * cluster_functions.c - functions dealing with clusters in the + * accounting system. + ***************************************************************************** + * Copyright (C) 2008 Lawrence Livermore National Security. + * Copyright (C) 2002-2007 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * LLNL-CODE-402394. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#include "src/sacctmgr/sacctmgr.h" +#include "src/sacctmgr/print.h" + +static int _set_cond(int *start, int argc, char *argv[], + List cluster_list, + List format_list) +{ + int i; + int set = 0; + int end = 0; + + for (i=(*start); i<argc; i++) { + end = parse_option_end(argv[i]); + if (strncasecmp (argv[i], "Set", 3) == 0) { + i--; + break; + } else if(!end) { + addto_char_list(cluster_list, argv[i]); + set = 1; + } else if (strncasecmp (argv[i], "Format", 1) == 0) { + if(format_list) + addto_char_list(format_list, argv[i]+end); + } else if (strncasecmp (argv[i], "Names", 1) == 0) { + addto_char_list(cluster_list, + argv[i]+end); + set = 1; + } else { + printf(" Unknown condition: %s\n" + "Use keyword set to modify value\n", argv[i]); + } + } + (*start) = i; + + return set; +} + +static int _set_rec(int *start, int argc, char *argv[], + acct_association_rec_t *assoc) +{ + int i, mins; + int set = 0; + int end = 0; + + for (i=(*start); i<argc; i++) { + end = parse_option_end(argv[i]); + if (strncasecmp (argv[i], "Where", 5) == 0) { + i--; + break; + } else if(!end) { + printf(" Bad format on %s: End your option with " + "an '=' sign\n", argv[i]); + } else if (strncasecmp (argv[i], "FairShare", 1) == 0) { + if (get_uint(argv[i]+end, &assoc->fairshare, + "FairShare") == SLURM_SUCCESS) + set = 1; + } else if (strncasecmp (argv[i], "MaxJobs", 4) == 0) { + if (get_uint(argv[i]+end, &assoc->max_jobs, + "MaxJobs") == SLURM_SUCCESS) + set = 1; + } else if (strncasecmp (argv[i], "MaxNodes", 4) == 0) { + if (get_uint(argv[i]+end, + &assoc->max_nodes_per_job, + "MaxNodes") == SLURM_SUCCESS) + set = 1; + } else if (strncasecmp (argv[i], "MaxWall", 4) == 0) { + mins = time_str2mins(argv[i]+end); + if (mins != NO_VAL) { + assoc->max_wall_duration_per_job + = (uint32_t) mins; + set = 1; + } else { + printf(" Bad MaxWall time format: %s\n", + argv[i]); + } + } else if (strncasecmp (argv[i], "MaxCPUSecs", 4) == 0) { + if (get_uint(argv[i]+end, + &assoc->max_cpu_secs_per_job, + "MaxCPUSecs") == SLURM_SUCCESS) + set = 1; + } else { + printf(" Unknown option: %s\n" + " Use keyword 'where' to modify condition\n", + argv[i]); + } + } + (*start) = i; + + return set; + +} + +extern int sacctmgr_add_cluster(int argc, char *argv[]) +{ + int rc = SLURM_SUCCESS; + int i = 0, mins; + acct_cluster_rec_t *cluster = NULL; + List name_list = list_create(slurm_destroy_char); + List cluster_list = NULL; + uint32_t fairshare = NO_VAL; + uint32_t max_cpu_secs_per_job = NO_VAL; + uint32_t max_jobs = NO_VAL; + uint32_t max_nodes_per_job = NO_VAL; + uint32_t max_wall_duration_per_job = NO_VAL; + int limit_set = 0; + ListIterator itr = NULL, itr_c = NULL; + char *name = NULL; + + for (i=0; i<argc; i++) { + int end = parse_option_end(argv[i]); + if(!end) { + addto_char_list(name_list, argv[i]+end); + } else if (strncasecmp (argv[i], "FairShare", 1) == 0) { + fairshare = atoi(argv[i]+end); + limit_set = 1; + } else if (strncasecmp (argv[i], "MaxCPUSecs", 4) == 0) { + max_cpu_secs_per_job = atoi(argv[i]+end); + limit_set = 1; + } else if (strncasecmp (argv[i], "MaxJobs=", 4) == 0) { + max_jobs = atoi(argv[i]+end); + limit_set = 1; + } else if (strncasecmp (argv[i], "MaxNodes", 4) == 0) { + max_nodes_per_job = atoi(argv[i]+end); + limit_set = 1; + } else if (strncasecmp (argv[i], "MaxWall", 4) == 0) { + mins = time_str2mins(argv[i]+end); + if (mins != NO_VAL) { + max_wall_duration_per_job = (uint32_t) mins; + limit_set = 1; + } else { + printf(" Bad MaxWall time format: %s\n", + argv[i]); + } + } else if (strncasecmp (argv[i], "Names", 1) == 0) { + addto_char_list(name_list, argv[i]+end); + } else { + printf(" Unknown option: %s\n", argv[i]); + } + } + + if(!list_count(name_list)) { + list_destroy(name_list); + printf(" Need name of cluster to add.\n"); + return SLURM_ERROR; + } else { + List temp_list = NULL; + acct_cluster_cond_t cluster_cond; + char *name = NULL; + + memset(&cluster_cond, 0, sizeof(acct_cluster_cond_t)); + cluster_cond.cluster_list = name_list; + + temp_list = acct_storage_g_get_clusters(db_conn, &cluster_cond); + if(!temp_list) { + printf(" Problem getting clusters from database. " + "Contact your admin.\n"); + return SLURM_ERROR; + } + + itr_c = list_iterator_create(name_list); + itr = list_iterator_create(temp_list); + while((name = list_next(itr_c))) { + acct_cluster_rec_t *cluster_rec = NULL; + + list_iterator_reset(itr); + while((cluster_rec = list_next(itr))) { + if(!strcasecmp(cluster_rec->name, name)) + break; + } + if(cluster_rec) { + printf(" This cluster %s already exists. " + "Not adding.\n", name); + list_delete_item(itr_c); + } + } + list_iterator_destroy(itr); + list_iterator_destroy(itr_c); + list_destroy(temp_list); + if(!list_count(name_list)) { + list_destroy(name_list); + return SLURM_ERROR; + } + } + + printf(" Adding Cluster(s)\n"); + cluster_list = list_create(destroy_acct_cluster_rec); + itr = list_iterator_create(name_list); + while((name = list_next(itr))) { + cluster = xmalloc(sizeof(acct_cluster_rec_t)); + cluster->name = xstrdup(name); + list_append(cluster_list, cluster); + + printf(" Name = %s\n", cluster->name); + + cluster->default_fairshare = fairshare; + cluster->default_max_cpu_secs_per_job = max_cpu_secs_per_job; + cluster->default_max_jobs = max_jobs; + cluster->default_max_nodes_per_job = max_nodes_per_job; + cluster->default_max_wall_duration_per_job = + max_wall_duration_per_job; + } + list_iterator_destroy(itr); + list_destroy(name_list); + + if(limit_set) { + printf(" User Defaults\n"); + if(fairshare == INFINITE) + printf(" Fairshare = NONE\n"); + else if(fairshare != NO_VAL) + printf(" Fairshare = %u\n", fairshare); + + if(max_cpu_secs_per_job == INFINITE) + printf(" MaxCPUSecs = NONE\n"); + else if(max_cpu_secs_per_job != NO_VAL) + printf(" MaxCPUSecs = %u\n", + max_cpu_secs_per_job); + + if(max_jobs == INFINITE) + printf(" MaxJobs = NONE\n"); + else if(max_jobs != NO_VAL) + printf(" MaxJobs = %u\n", max_jobs); + + if(max_nodes_per_job == INFINITE) + printf(" MaxNodes = NONE\n"); + else if(max_nodes_per_job != NO_VAL) + printf(" MaxNodes = %u\n", max_nodes_per_job); + + if(max_wall_duration_per_job == INFINITE) + printf(" MaxWall = NONE\n"); + else if(max_wall_duration_per_job != NO_VAL) { + char time_buf[32]; + mins2time_str((time_t) max_wall_duration_per_job, + time_buf, sizeof(time_buf)); + printf(" MaxWall = %s\n", time_buf); + } + } + + if(!list_count(cluster_list)) { + printf(" Nothing new added.\n"); + goto end_it; + } + + notice_thread_init(); + rc = acct_storage_g_add_clusters(db_conn, my_uid, cluster_list); + notice_thread_fini(); + if(rc == SLURM_SUCCESS) { + if(commit_check("Would you like to commit changes?")) { + acct_storage_g_commit(db_conn, 1); + } else { + printf(" Changes Discarded\n"); + acct_storage_g_commit(db_conn, 0); + } + } else { + printf(" error: problem adding clusters\n"); + } +end_it: + list_destroy(cluster_list); + + return rc; +} + +extern int sacctmgr_list_cluster(int argc, char *argv[]) +{ + int rc = SLURM_SUCCESS; + acct_cluster_cond_t *cluster_cond = + xmalloc(sizeof(acct_cluster_cond_t)); + List cluster_list; + int i=0; + ListIterator itr = NULL; + ListIterator itr2 = NULL; + acct_cluster_rec_t *cluster = NULL; + char *object; + + print_field_t *field = NULL; + + List format_list = list_create(slurm_destroy_char); + List print_fields_list; /* types are of print_field_t */ + + enum { + PRINT_CLUSTER, + PRINT_CHOST, + PRINT_CPORT, + PRINT_FAIRSHARE, + PRINT_MAXC, + PRINT_MAXJ, + PRINT_MAXN, + PRINT_MAXW + }; + + + cluster_cond->cluster_list = list_create(slurm_destroy_char); + _set_cond(&i, argc, argv, cluster_cond->cluster_list, format_list); + + cluster_list = acct_storage_g_get_clusters(db_conn, cluster_cond); + destroy_acct_cluster_cond(cluster_cond); + + if(!cluster_list) { + list_destroy(format_list); + return SLURM_ERROR; + } + + print_fields_list = list_create(destroy_print_field); + + if(!list_count(format_list)) { + addto_char_list(format_list, + "Cl,Controlh,Controlp,F,MaxC,MaxJ,MaxN,MaxW"); + } + + itr = list_iterator_create(format_list); + while((object = list_next(itr))) { + field = xmalloc(sizeof(print_field_t)); + if(!strncasecmp("Cluster", object, 2)) { + field->type = PRINT_CLUSTER; + field->name = xstrdup("Cluster"); + field->len = 10; + field->print_routine = print_str; + } else if(!strncasecmp("ControlHost", object, 8)) { + field->type = PRINT_CHOST; + field->name = xstrdup("Control Host"); + field->len = 12; + field->print_routine = print_str; + } else if(!strncasecmp("ControlPort", object, 8)) { + field->type = PRINT_CPORT; + field->name = xstrdup("Control Port"); + field->len = 12; + field->print_routine = print_uint; + } else if(!strncasecmp("FairShare", object, 1)) { + field->type = PRINT_FAIRSHARE; + field->name = xstrdup("FairShare"); + field->len = 9; + field->print_routine = print_uint; + } else if(!strncasecmp("MaxCPUSecs", object, 4)) { + field->type = PRINT_MAXC; + field->name = xstrdup("MaxCPUSecs"); + field->len = 11; + field->print_routine = print_uint; + } else if(!strncasecmp("MaxJobs", object, 4)) { + field->type = PRINT_MAXJ; + field->name = xstrdup("MaxJobs"); + field->len = 7; + field->print_routine = print_uint; + } else if(!strncasecmp("MaxNodes", object, 4)) { + field->type = PRINT_MAXN; + field->name = xstrdup("MaxNodes"); + field->len = 8; + field->print_routine = print_uint; + } else if(!strncasecmp("MaxWall", object, 4)) { + field->type = PRINT_MAXW; + field->name = xstrdup("MaxWall"); + field->len = 11; + field->print_routine = print_time; + } else { + printf("Unknown field '%s'\n", object); + xfree(field); + continue; + } + list_append(print_fields_list, field); + } + list_iterator_destroy(itr); + + itr = list_iterator_create(cluster_list); + itr2 = list_iterator_create(print_fields_list); + print_header(print_fields_list); + + while((cluster = list_next(itr))) { + while((field = list_next(itr2))) { + switch(field->type) { + case PRINT_CLUSTER: + field->print_routine(SLURM_PRINT_VALUE, field, + cluster->name); + break; + case PRINT_CHOST: + field->print_routine(SLURM_PRINT_VALUE, field, + cluster->control_host); + break; + case PRINT_CPORT: + field->print_routine(SLURM_PRINT_VALUE, field, + cluster->control_port); + break; + case PRINT_FAIRSHARE: + field->print_routine( + SLURM_PRINT_VALUE, field, + cluster->default_fairshare); + break; + case PRINT_MAXC: + field->print_routine( + SLURM_PRINT_VALUE, field, + cluster->default_max_cpu_secs_per_job); + break; + case PRINT_MAXJ: + field->print_routine( + SLURM_PRINT_VALUE, field, + cluster->default_max_jobs); + break; + case PRINT_MAXN: + field->print_routine( + SLURM_PRINT_VALUE, field, + cluster->default_max_nodes_per_job); + break; + case PRINT_MAXW: + field->print_routine( + SLURM_PRINT_VALUE, field, + cluster-> + default_max_wall_duration_per_job); + break; + default: + break; + } + } + list_iterator_reset(itr2); + printf("\n"); + } + + printf("\n"); + + list_iterator_destroy(itr2); + list_iterator_destroy(itr); + list_destroy(cluster_list); + list_destroy(print_fields_list); + + return rc; +} + +extern int sacctmgr_modify_cluster(int argc, char *argv[]) +{ + int rc = SLURM_SUCCESS; + int i=0; + acct_association_rec_t *assoc = xmalloc(sizeof(acct_association_rec_t)); + acct_association_cond_t *assoc_cond = + xmalloc(sizeof(acct_association_cond_t)); + int cond_set = 0, rec_set = 0, set = 0; + List ret_list = NULL; + + assoc_cond = xmalloc(sizeof(acct_association_cond_t)); + assoc_cond->cluster_list = list_create(slurm_destroy_char); + assoc_cond->acct_list = list_create(NULL); + assoc_cond->fairshare = NO_VAL; + assoc_cond->max_cpu_secs_per_job = NO_VAL; + assoc_cond->max_jobs = NO_VAL; + assoc_cond->max_nodes_per_job = NO_VAL; + assoc_cond->max_wall_duration_per_job = NO_VAL; + + assoc->fairshare = NO_VAL; + assoc->max_cpu_secs_per_job = NO_VAL; + assoc->max_jobs = NO_VAL; + assoc->max_nodes_per_job = NO_VAL; + assoc->max_wall_duration_per_job = NO_VAL; + + for (i=0; i<argc; i++) { + if (strncasecmp (argv[i], "Where", 5) == 0) { + i++; + if(_set_cond(&i, argc, argv, + assoc_cond->cluster_list, NULL)) + cond_set = 1; + } else if (strncasecmp (argv[i], "Set", 3) == 0) { + i++; + if(_set_rec(&i, argc, argv, assoc)) + rec_set = 1; + } else { + if(_set_cond(&i, argc, argv, + assoc_cond->cluster_list, NULL)) + cond_set = 1; + } + } + + if(!rec_set) { + printf(" You didn't give me anything to set\n"); + destroy_acct_association_rec(assoc); + destroy_acct_association_cond(assoc_cond); + return SLURM_ERROR; + } else if(!cond_set) { + if(!commit_check("You didn't set any conditions with 'WHERE'.\n" + "Are you sure you want to continue?")) { + printf("Aborted\n"); + destroy_acct_association_rec(assoc); + destroy_acct_association_cond(assoc); + return SLURM_SUCCESS; + } + } + + printf(" Setting\n"); + if(rec_set) + printf(" User Defaults =\n"); + + if(assoc->fairshare == INFINITE) + printf(" Fairshare = NONE\n"); + else if(assoc->fairshare != NO_VAL) + printf(" Fairshare = %u\n", assoc->fairshare); + + if(assoc->max_cpu_secs_per_job == INFINITE) + printf(" MaxCPUSecs = NONE\n"); + else if(assoc->max_cpu_secs_per_job != NO_VAL) + printf(" MaxCPUSecs = %u\n", + assoc->max_cpu_secs_per_job); + + if(assoc->max_jobs == INFINITE) + printf(" MaxJobs = NONE\n"); + else if(assoc->max_jobs != NO_VAL) + printf(" MaxJobs = %u\n", assoc->max_jobs); + + if(assoc->max_nodes_per_job == INFINITE) + printf(" MaxNodes = NONE\n"); + else if(assoc->max_nodes_per_job != NO_VAL) + printf(" MaxNodes = %u\n", + assoc->max_nodes_per_job); + + if(assoc->max_wall_duration_per_job == INFINITE) + printf(" MaxWall = NONE\n"); + else if(assoc->max_wall_duration_per_job != NO_VAL) { + char time_buf[32]; + mins2time_str((time_t) + assoc->max_wall_duration_per_job, + time_buf, sizeof(time_buf)); + printf(" MaxWall = %s\n", time_buf); + } + + list_append(assoc_cond->acct_list, "root"); + notice_thread_init(); + ret_list = acct_storage_g_modify_associations( + db_conn, my_uid, assoc_cond, assoc); + + if(ret_list && list_count(ret_list)) { + char *object = NULL; + ListIterator itr = list_iterator_create(ret_list); + printf(" Modified cluster defaults for associations...\n"); + while((object = list_next(itr))) { + printf(" %s\n", object); + } + list_iterator_destroy(itr); + set = 1; + } else if(ret_list) { + printf(" Nothing modified\n"); + } else { + printf(" Error with request\n"); + rc = SLURM_ERROR; + } + + if(ret_list) + list_destroy(ret_list); + notice_thread_fini(); + + if(set) { + if(commit_check("Would you like to commit changes?")) + acct_storage_g_commit(db_conn, 1); + else { + printf(" Changes Discarded\n"); + acct_storage_g_commit(db_conn, 0); + } + } + destroy_acct_association_cond(assoc_cond); + destroy_acct_association_rec(assoc); + + return rc; +} + +extern int sacctmgr_delete_cluster(int argc, char *argv[]) +{ + int rc = SLURM_SUCCESS; + acct_cluster_cond_t *cluster_cond = + xmalloc(sizeof(acct_cluster_cond_t)); + int i=0; + List ret_list = NULL; + + cluster_cond->cluster_list = list_create(slurm_destroy_char); + + if(!_set_cond(&i, argc, argv, cluster_cond->cluster_list, NULL)) { + printf(" No conditions given to remove, not executing.\n"); + destroy_acct_cluster_cond(cluster_cond); + return SLURM_ERROR; + } + + if(!list_count(cluster_cond->cluster_list)) { + destroy_acct_cluster_cond(cluster_cond); + return SLURM_SUCCESS; + } + notice_thread_init(); + ret_list = acct_storage_g_remove_clusters( + db_conn, my_uid, cluster_cond); + notice_thread_fini(); + + destroy_acct_cluster_cond(cluster_cond); + + if(ret_list && list_count(ret_list)) { + char *object = NULL; + ListIterator itr = list_iterator_create(ret_list); + printf(" Deleting clusters...\n"); + while((object = list_next(itr))) { + printf(" %s\n", object); + } + list_iterator_destroy(itr); + if(commit_check("Would you like to commit changes?")) { + acct_storage_g_commit(db_conn, 1); + } else { + printf(" Changes Discarded\n"); + acct_storage_g_commit(db_conn, 0); + } + } else if(ret_list) { + printf(" Nothing deleted\n"); + } else { + printf(" Error with request\n"); + rc = SLURM_ERROR; + } + + if(ret_list) + list_destroy(ret_list); + + return rc; +} diff --git a/src/sacctmgr/common.c b/src/sacctmgr/common.c new file mode 100644 index 000000000..36b204125 --- /dev/null +++ b/src/sacctmgr/common.c @@ -0,0 +1,605 @@ +/*****************************************************************************\ + * common.c - definitions for functions common to all modules in sacctmgr. + ***************************************************************************** + * Copyright (C) 2008 Lawrence Livermore National Security. + * Copyright (C) 2002-2007 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * LLNL-CODE-402394. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#include "src/sacctmgr/sacctmgr.h" +#include <unistd.h> +#include <termios.h> + +#define FORMAT_STRING_SIZE 32 + +static pthread_t lock_warning_thread; + +static void *_print_lock_warn(void *no_data) +{ + sleep(2); + printf(" Waiting for lock from other user.\n"); + + return NULL; +} + +static void nonblock(int state) +{ + struct termios ttystate; + + //get the terminal state + tcgetattr(STDIN_FILENO, &ttystate); + + switch(state) { + case 1: + //turn off canonical mode + ttystate.c_lflag &= ~ICANON; + //minimum of number input read. + ttystate.c_cc[VMIN] = 1; + break; + default: + //turn on canonical mode + ttystate.c_lflag |= ICANON; + } + //set the terminal attributes. + tcsetattr(STDIN_FILENO, TCSANOW, &ttystate); + +} + +extern int parse_option_end(char *option) +{ + int end = 0; + + if(!option) + return 0; + + while(option[end] && option[end] != '=') + end++; + if(!option[end]) + return 0; + end++; + return end; +} + +/* you need to xfree whatever is sent from here */ +extern char *strip_quotes(char *option, int *increased) +{ + int end = 0; + int i=0, start=0; + char *meat = NULL; + + if(!option) + return NULL; + + /* first strip off the ("|')'s */ + if (option[i] == '\"' || option[i] == '\'') + i++; + start = i; + + while(option[i]) { + if(option[i] == '\"' || option[i] == '\'') { + end++; + break; + } + i++; + } + end += i; + + meat = xmalloc((i-start)+1); + memcpy(meat, option+start, (i-start)); + + if(increased) + (*increased) += end; + + return meat; +} + +extern void addto_char_list(List char_list, char *names) +{ + int i=0, start=0; + char *name = NULL, *tmp_char = NULL; + ListIterator itr = list_iterator_create(char_list); + + if(names && char_list) { + if (names[i] == '\"' || names[i] == '\'') + i++; + start = i; + while(names[i]) { + if(names[i] == '\"' || names[i] == '\'') + break; + else if(names[i] == ',') { + if((i-start) > 0) { + name = xmalloc((i-start+1)); + memcpy(name, names+start, (i-start)); + + while((tmp_char = list_next(itr))) { + if(!strcasecmp(tmp_char, name)) + break; + } + + if(!tmp_char) + list_append(char_list, name); + else + xfree(name); + list_iterator_reset(itr); + } + i++; + start = i; + } + i++; + } + if((i-start) > 0) { + name = xmalloc((i-start)+1); + memcpy(name, names+start, (i-start)); + while((tmp_char = list_next(itr))) { + if(!strcasecmp(tmp_char, name)) + break; + } + + if(!tmp_char) + list_append(char_list, name); + else + xfree(name); + } + } + list_iterator_destroy(itr); +} + +extern void destroy_sacctmgr_action(void *object) +{ + sacctmgr_action_t *action = (sacctmgr_action_t *)object; + + if(action) { + if(action->list) + list_destroy(action->list); + + switch(action->type) { + case SACCTMGR_ACTION_NOTSET: + case SACCTMGR_USER_CREATE: + case SACCTMGR_ACCOUNT_CREATE: + case SACCTMGR_CLUSTER_CREATE: + case SACCTMGR_ASSOCIATION_CREATE: + /* These only have a list so there isn't + * anything else to free + */ + break; + case SACCTMGR_USER_MODIFY: + destroy_acct_user_rec(action->rec); + destroy_acct_user_cond(action->cond); + break; + case SACCTMGR_USER_DELETE: + destroy_acct_user_cond(action->cond); + break; + case SACCTMGR_ACCOUNT_MODIFY: + destroy_acct_account_rec(action->rec); + destroy_acct_account_cond(action->cond); + break; + case SACCTMGR_ACCOUNT_DELETE: + destroy_acct_account_cond(action->cond); + break; + case SACCTMGR_CLUSTER_MODIFY: + destroy_acct_cluster_rec(action->rec); + destroy_acct_cluster_cond(action->cond); + break; + case SACCTMGR_CLUSTER_DELETE: + destroy_acct_cluster_cond(action->cond); + break; + case SACCTMGR_ASSOCIATION_MODIFY: + destroy_acct_association_rec(action->rec); + destroy_acct_association_cond(action->cond); + break; + case SACCTMGR_ASSOCIATION_DELETE: + destroy_acct_association_cond(action->cond); + break; + case SACCTMGR_COORD_CREATE: + xfree(action->rec); + destroy_acct_user_cond(action->cond); + break; + case SACCTMGR_COORD_DELETE: + xfree(action->rec); + destroy_acct_user_cond(action->cond); + break; + default: + error("unknown action %d", action->type); + break; + } + xfree(action); + } +} + +extern int notice_thread_init() +{ + pthread_attr_t attr; + + slurm_attr_init(&attr); + if(pthread_create(&lock_warning_thread, &attr, &_print_lock_warn, NULL)) + error ("pthread_create error %m"); + slurm_attr_destroy(&attr); + return SLURM_SUCCESS; +} + +extern int notice_thread_fini() +{ + return pthread_cancel(lock_warning_thread); +} + +extern int commit_check(char *warning) +{ + int ans = 0; + char c = '\0'; + int fd = fileno(stdin); + fd_set rfds; + struct timeval tv; + + if(!rollback_flag) + return 1; + + printf("%s (You have 30 seconds to decide)\n", warning); + nonblock(1); + while(c != 'Y' && c != 'y' + && c != 'N' && c != 'n' + && c != '\n') { + if(c) { + printf("Y or N please\n"); + } + printf("(N/y): "); + fflush(stdout); + FD_ZERO(&rfds); + FD_SET(fd, &rfds); + /* Wait up to 30 seconds. */ + tv.tv_sec = 30; + tv.tv_usec = 0; + if((ans = select(fd+1, &rfds, NULL, NULL, &tv)) <= 0) + break; + + c = getchar(); + printf("\n"); + } + nonblock(0); + if(ans <= 0) + printf("timeout\n"); + else if(c == 'Y' || c == 'y') + return 1; + + return 0; +} + +extern acct_association_rec_t *sacctmgr_find_association(char *user, + char *account, + char *cluster, + char *partition) +{ + acct_association_rec_t * assoc = NULL; + acct_association_cond_t assoc_cond; + List assoc_list = NULL; + + memset(&assoc_cond, 0, sizeof(acct_association_cond_t)); + if(account) { + assoc_cond.acct_list = list_create(NULL); + list_append(assoc_cond.acct_list, account); + } else { + error("need an account to find association"); + return NULL; + } + if(cluster) { + assoc_cond.cluster_list = list_create(NULL); + list_append(assoc_cond.cluster_list, cluster); + } else { + if(assoc_cond.acct_list) + list_destroy(assoc_cond.acct_list); + error("need an cluster to find association"); + return NULL; + } + + assoc_cond.user_list = list_create(NULL); + if(user) + list_append(assoc_cond.user_list, user); + else + list_append(assoc_cond.user_list, ""); + + assoc_cond.partition_list = list_create(NULL); + if(partition) + list_append(assoc_cond.partition_list, partition); + else + list_append(assoc_cond.partition_list, ""); + + assoc_list = acct_storage_g_get_associations(db_conn, &assoc_cond); + + list_destroy(assoc_cond.acct_list); + list_destroy(assoc_cond.cluster_list); + list_destroy(assoc_cond.user_list); + list_destroy(assoc_cond.partition_list); + + if(assoc_list) + assoc = list_pop(assoc_list); + + list_destroy(assoc_list); + + return assoc; +} + +extern acct_association_rec_t *sacctmgr_find_account_base_assoc(char *account, + char *cluster) +{ + acct_association_rec_t *assoc = NULL; + char *temp = "root"; + acct_association_cond_t assoc_cond; + List assoc_list = NULL; + + if(!cluster) + return NULL; + + if(account) + temp = account; + + memset(&assoc_cond, 0, sizeof(acct_association_cond_t)); + assoc_cond.acct_list = list_create(NULL); + list_append(assoc_cond.cluster_list, temp); + assoc_cond.cluster_list = list_create(NULL); + list_append(assoc_cond.cluster_list, cluster); + assoc_cond.user_list = list_create(NULL); + list_append(assoc_cond.user_list, ""); + +// info("looking for %s %s in %d", account, cluster, +// list_count(sacctmgr_association_list)); + + assoc_list = acct_storage_g_get_associations(db_conn, &assoc_cond); + + list_destroy(assoc_cond.acct_list); + list_destroy(assoc_cond.cluster_list); + list_destroy(assoc_cond.user_list); + + if(assoc_list) + assoc = list_pop(assoc_list); + + list_destroy(assoc_list); + + return assoc; +} + +extern acct_association_rec_t *sacctmgr_find_root_assoc(char *cluster) +{ + return sacctmgr_find_account_base_assoc(NULL, cluster); +} + +extern acct_user_rec_t *sacctmgr_find_user(char *name) +{ + acct_user_rec_t *user = NULL; + acct_user_cond_t user_cond; + List user_list = NULL; + + if(!name) + return NULL; + + memset(&user_cond, 0, sizeof(acct_user_cond_t)); + user_cond.user_list = list_create(NULL); + list_append(user_cond.user_list, name); + + user_list = acct_storage_g_get_users(db_conn, &user_cond); + + list_destroy(user_cond.user_list); + + if(user_list) + user = list_pop(user_list); + + list_destroy(user_list); + + return user; +} + +extern acct_account_rec_t *sacctmgr_find_account(char *name) +{ + acct_account_rec_t *account = NULL; + acct_account_cond_t account_cond; + List account_list = NULL; + + if(!name) + return NULL; + + memset(&account_cond, 0, sizeof(acct_account_cond_t)); + account_cond.acct_list = list_create(NULL); + list_append(account_cond.acct_list, name); + + account_list = acct_storage_g_get_accounts(db_conn, &account_cond); + + list_destroy(account_cond.acct_list); + + if(account_list) + account = list_pop(account_list); + + list_destroy(account_list); + + return account; +} + +extern acct_cluster_rec_t *sacctmgr_find_cluster(char *name) +{ + acct_cluster_rec_t *cluster = NULL; + acct_cluster_cond_t cluster_cond; + List cluster_list = NULL; + + if(!name) + return NULL; + + memset(&cluster_cond, 0, sizeof(acct_cluster_cond_t)); + cluster_cond.cluster_list = list_create(NULL); + list_append(cluster_cond.cluster_list, name); + + cluster_list = acct_storage_g_get_clusters(db_conn, &cluster_cond); + + list_destroy(cluster_cond.cluster_list); + + if(cluster_list) + cluster = list_pop(cluster_list); + + list_destroy(cluster_list); + + return cluster; +} + +extern acct_association_rec_t *sacctmgr_find_association_from_list( + List assoc_list, char *user, char *account, + char *cluster, char *partition) +{ + ListIterator itr = NULL; + acct_association_rec_t * assoc = NULL; + + if(!assoc_list) + return NULL; + + itr = list_iterator_create(assoc_list); + while((assoc = list_next(itr))) { + if((user && (!assoc->user || strcasecmp(user, assoc->user))) + || (account && (!assoc->acct + || strcasecmp(account, assoc->acct))) + || (cluster && (!assoc->cluster + || strcasecmp(cluster, assoc->cluster))) + || (partition && (!assoc->partition + || strcasecmp(partition, + assoc->partition)))) + continue; + break; + } + list_iterator_destroy(itr); + + return assoc; +} + +extern acct_association_rec_t *sacctmgr_find_account_base_assoc_from_list( + List assoc_list, char *account, char *cluster) +{ + ListIterator itr = NULL; + acct_association_rec_t *assoc = NULL; + char *temp = "root"; + + if(!cluster || !assoc_list) + return NULL; + + if(account) + temp = account; + /* info("looking for %s %s in %d", account, cluster, */ +/* list_count(assoc_list)); */ + itr = list_iterator_create(assoc_list); + while((assoc = list_next(itr))) { + /* info("is it %s %s %s", assoc->user, assoc->acct, assoc->cluster); */ + if(assoc->user + || strcasecmp(temp, assoc->acct) + || strcasecmp(cluster, assoc->cluster)) + continue; + /* info("found it"); */ + break; + } + list_iterator_destroy(itr); + + return assoc; +} +extern acct_user_rec_t *sacctmgr_find_user_from_list( + List user_list, char *name) +{ + ListIterator itr = NULL; + acct_user_rec_t *user = NULL; + + if(!name || !user_list) + return NULL; + + itr = list_iterator_create(user_list); + while((user = list_next(itr))) { + if(!strcasecmp(name, user->name)) + break; + } + list_iterator_destroy(itr); + + return user; + +} + +extern acct_account_rec_t *sacctmgr_find_account_from_list( + List acct_list, char *name) +{ + ListIterator itr = NULL; + acct_account_rec_t *account = NULL; + + if(!name || !acct_list) + return NULL; + + itr = list_iterator_create(acct_list); + while((account = list_next(itr))) { + if(!strcasecmp(name, account->name)) + break; + } + list_iterator_destroy(itr); + + return account; + +} + +extern acct_cluster_rec_t *sacctmgr_find_cluster_from_list( + List cluster_list, char *name) +{ + ListIterator itr = NULL; + acct_cluster_rec_t *cluster = NULL; + + if(!name || !cluster_list) + return NULL; + + itr = list_iterator_create(cluster_list); + while((cluster = list_next(itr))) { + if(!strcasecmp(name, cluster->name)) + break; + } + list_iterator_destroy(itr); + + return cluster; +} + +extern int get_uint(char *in_value, uint32_t *out_value, char *type) +{ + char *ptr = NULL, *meat = NULL; + long num; + + if(!(meat = strip_quotes(in_value, NULL))) + return SLURM_ERROR; + + num = strtol(meat, &ptr, 10); + if ((num == 0) && ptr && ptr[0]) { + error("Invalid value for %s (%s)", type, meat); + xfree(meat); + return SLURM_ERROR; + } + xfree(meat); + + if (num < 0) + *out_value = INFINITE; /* flag to clear */ + else + *out_value = (uint32_t) num; + return SLURM_SUCCESS; +} diff --git a/src/sacctmgr/print.c b/src/sacctmgr/print.c new file mode 100644 index 000000000..e852a4dfd --- /dev/null +++ b/src/sacctmgr/print.c @@ -0,0 +1,195 @@ +/*****************************************************************************\ + * print.c - definitions for all printing functions. + ***************************************************************************** + * Copyright (C) 2008 Lawrence Livermore National Security. + * Copyright (C) 2002-2007 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * LLNL-CODE-402394. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ +#include "src/sacctmgr/print.h" +#include "src/common/parse_time.h" +int parsable_print = 0; +int have_header = 1; + +extern void destroy_print_field(void *object) +{ + print_field_t *field = (print_field_t *)object; + + if(field) { + xfree(field->name); + xfree(field); + } +} + +extern void print_header(List print_fields_list) +{ + ListIterator itr = NULL; + print_field_t *object = NULL; + + if(!print_fields_list || !have_header) + return; + + itr = list_iterator_create(print_fields_list); + while((object = list_next(itr))) { + (object->print_routine)(SLURM_PRINT_HEADLINE, object, 0); + } + list_iterator_reset(itr); + printf("\n"); + if(parsable_print) + return; + while((object = list_next(itr))) { + (object->print_routine)(SLURM_PRINT_UNDERSCORE, object, 0); + } + list_iterator_destroy(itr); + printf("\n"); +} + +extern void print_date(void) +{ + time_t now; + + now = time(NULL); + printf("%s", ctime(&now)); + +} + +extern void print_str(type_t type, print_field_t *field, char *value) +{ + char *print_this = value; + + switch(type) { + case SLURM_PRINT_HEADLINE: + if(parsable_print) + printf("%s|", field->name); + else + printf("%-*.*s ", field->len, field->len, field->name); + break; + case SLURM_PRINT_UNDERSCORE: + if(!parsable_print) + printf("%-*.*s ", field->len, field->len, + "---------------------------------------"); + break; + case SLURM_PRINT_VALUE: + if(!print_this) { + if(parsable_print) + print_this = ""; + else + print_this = " "; + } + + if(parsable_print) + printf("%s|", print_this); + else + printf("%-*.*s ", field->len, field->len, print_this); + break; + default: + if(parsable_print) + printf("%s|", "n/a"); + else + printf("%-*s ", field->len, "n/a"); + break; + } +} + +extern void print_uint(type_t type, print_field_t *field, uint32_t value) +{ + switch(type) { + case SLURM_PRINT_HEADLINE: + if(parsable_print) + printf("%s|", field->name); + else + printf("%-*.*s ", field->len, field->len, field->name); + break; + case SLURM_PRINT_UNDERSCORE: + if(!parsable_print) + printf("%-*.*s ", field->len, field->len, + "---------------------------------------"); + break; + case SLURM_PRINT_VALUE: + /* (value == unset) || (value == cleared) */ + if((value == NO_VAL) || (value == INFINITE)) { + if(parsable_print) + printf("|"); + else + printf("%-*s ", field->len, " "); + } else { + if(parsable_print) + printf("%u|", value); + else + printf("%*u ", field->len, value); + } + break; + default: + if(parsable_print) + printf("%s|", "n/a"); + else + printf("%-*.*s ", field->len, field->len, "n/a"); + break; + } +} + +extern void print_time(type_t type, print_field_t *field, uint32_t value) +{ + switch(type) { + case SLURM_PRINT_HEADLINE: + if(parsable_print) + printf("%s|", field->name); + else + printf("%-*.*s ", field->len, field->len, field->name); + break; + case SLURM_PRINT_UNDERSCORE: + if(!parsable_print) + printf("%-*.*s ", field->len, field->len, + "---------------------------------------"); + break; + case SLURM_PRINT_VALUE: + /* (value == unset) || (value == cleared) */ + if((value == NO_VAL) || (value == INFINITE)) { + if(parsable_print) + printf("|"); + else + printf("%-*s ", field->len, " "); + } else { + char time_buf[32]; + mins2time_str((time_t) value, + time_buf, sizeof(time_buf)); + if(parsable_print) + printf("%s|", time_buf); + else + printf("%*s ", field->len, time_buf); + } + break; + default: + printf("%-*.*s ", field->len, field->len, "n/a"); + break; + } +} diff --git a/src/sacctmgr/print.h b/src/sacctmgr/print.h new file mode 100644 index 000000000..a4f0bf760 --- /dev/null +++ b/src/sacctmgr/print.h @@ -0,0 +1,93 @@ +/*****************************************************************************\ + * print.h - definitions for all printing functions. + ***************************************************************************** + * Copyright (C) 2008 Lawrence Livermore National Security. + * Copyright (C) 2002-2007 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * LLNL-CODE-402394. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ +#ifndef __SACCTMGR_PRINT_H__ +#define __SACCTMGR_PRINT_H__ + +#if HAVE_CONFIG_H +# include "config.h" +#endif + +#if HAVE_GETOPT_H +# include <getopt.h> +#else +# include "src/common/getopt.h" +#endif + +#include <ctype.h> +#include <errno.h> +#include <stdio.h> +#include <stdlib.h> +#ifdef HAVE_STRING_H +# include <string.h> +#endif +#ifdef HAVE_STRINGS_H +# include <strings.h> +#endif +#include <time.h> +#include <unistd.h> + +#include <slurm/slurm.h> + +#include "src/common/xstring.h" +#include "src/common/slurm_accounting_storage.h" +#include "src/common/jobacct_common.h" + +typedef enum { + SLURM_PRINT_HEADLINE, + SLURM_PRINT_UNDERSCORE, + SLURM_PRINT_VALUE +} type_t; + +typedef struct { + uint16_t len; /* what is the width of the print */ + char *name; /* name to be printed in header */ + void (*print_routine) (); /* what is the function to print with */ + uint16_t type; /* defined in the local function */ +} print_field_t; + +extern int parsable_print; +extern int have_header; + +extern void destroy_print_field(void *object); +extern void print_header(List print_fields_list); +extern void print_date(void); +extern void print_str(type_t type, print_field_t *field, char *value); +extern void print_uint(type_t type, print_field_t *field, uint32_t value); +extern void print_time(type_t type, print_field_t *field, uint32_t value); + +#endif diff --git a/src/sacctmgr/sacctmgr.c b/src/sacctmgr/sacctmgr.c new file mode 100644 index 000000000..c56e76dde --- /dev/null +++ b/src/sacctmgr/sacctmgr.c @@ -0,0 +1,1482 @@ +/*****************************************************************************\ + * sacctmgr.c - administration tool for slurm's accounting. + * provides interface to read, write, update, and configure + * accounting. + ***************************************************************************** + * Copyright (C) 2008 Lawrence Livermore National Security. + * Copyright (C) 2002-2007 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * LLNL-CODE-402394. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#include "src/sacctmgr/sacctmgr.h" +#include "src/sacctmgr/print.h" +#include "src/common/xsignal.h" + +#define OPT_LONG_HIDE 0x102 +#define BUFFER_SIZE 4096 + +typedef struct { + uint16_t admin; + char *def_acct; + char *desc; + uint32_t fairshare; + uint32_t max_cpu_secs_per_job; + uint32_t max_jobs; + uint32_t max_nodes_per_job; + uint32_t max_wall_duration_per_job; + char *name; + char *org; + char *part; + uint16_t qos; +} sacctmgr_file_opts_t; + + +char *command_name; +int all_flag; /* display even hidden partitions */ +int exit_code; /* sacctmgr's exit code, =1 on any error at any time */ +int exit_flag; /* program to terminate if =1 */ +int input_words; /* number of words of input permitted */ +int one_liner; /* one record per line if =1 */ +int quiet_flag; /* quiet=1, verbose=-1, normal=0 */ +int rollback_flag; /* immediate execute=1, else = 0 */ +int with_assoc_flag = 0; +void *db_conn = NULL; +uint32_t my_uid = 0; + +static void _show_it (int argc, char *argv[]); +static void _add_it (int argc, char *argv[]); +static void _modify_it (int argc, char *argv[]); +static void _delete_it (int argc, char *argv[]); +static void _load_file (int argc, char *argv[]); +static int _get_command (int *argc, char *argv[]); +static void _print_version( void ); +static int _process_command (int argc, char *argv[]); +static void _usage (); + +int +main (int argc, char *argv[]) +{ + int error_code = SLURM_SUCCESS, i, opt_char, input_field_count; + char **input_fields; + log_options_t opts = LOG_OPTS_STDERR_ONLY ; + + int option_index; + static struct option long_options[] = { + {"all", 0, 0, 'a'}, + {"help", 0, 0, 'h'}, + {"hide", 0, 0, OPT_LONG_HIDE}, + {"immediate",0, 0, 'i'}, + {"oneliner", 0, 0, 'o'}, + {"no_header", 0, 0, 'n'}, + {"parsable", 0, 0, 'p'}, + {"quiet", 0, 0, 'q'}, + {"usage", 0, 0, 'h'}, + {"verbose", 0, 0, 'v'}, + {"version", 0, 0, 'V'}, + {NULL, 0, 0, 0} + }; + + command_name = argv[0]; + all_flag = 0; + rollback_flag = 1; + exit_code = 0; + exit_flag = 0; + input_field_count = 0; + quiet_flag = 0; + log_init("sacctmgr", opts, SYSLOG_FACILITY_DAEMON, NULL); + + if (getenv ("SACCTMGR_ALL")) + all_flag= 1; + + while((opt_char = getopt_long(argc, argv, "ahionpqsvV", + long_options, &option_index)) != -1) { + switch (opt_char) { + case (int)'?': + fprintf(stderr, "Try \"sacctmgr --help\" " + "for more information\n"); + exit(1); + break; + case (int)'a': + all_flag = 1; + break; + case (int)'h': + _usage (); + exit(exit_code); + break; + case OPT_LONG_HIDE: + all_flag = 0; + break; + case (int)'i': + rollback_flag = 0; + break; + case (int)'o': + one_liner = 1; + break; + case (int)'n': + have_header = 0; + break; + case (int)'p': + parsable_print = 1; + break; + case (int)'q': + quiet_flag = 1; + break; + case (int)'s': + with_assoc_flag = 1; + break; + case (int)'v': + quiet_flag = -1; + break; + case (int)'V': + _print_version(); + exit(exit_code); + break; + default: + exit_code = 1; + fprintf(stderr, "getopt error, returned %c\n", + opt_char); + exit(exit_code); + } + } + + if (argc > MAX_INPUT_FIELDS) /* bogus input, but continue anyway */ + input_words = argc; + else + input_words = 128; + input_fields = (char **) xmalloc (sizeof (char *) * input_words); + if (optind < argc) { + for (i = optind; i < argc; i++) { + input_fields[input_field_count++] = argv[i]; + } + } + + db_conn = acct_storage_g_get_connection(false, rollback_flag); + my_uid = getuid(); + + if (input_field_count) + exit_flag = 1; + else + error_code = _get_command (&input_field_count, input_fields); + while (error_code == SLURM_SUCCESS) { + error_code = _process_command (input_field_count, + input_fields); + if (error_code || exit_flag) + break; + error_code = _get_command (&input_field_count, input_fields); + } + + acct_storage_g_close_connection(&db_conn); + slurm_acct_storage_fini(); + printf("\n"); + exit(exit_code); +} + +#if !HAVE_READLINE +/* + * Alternative to readline if readline is not available + */ +static char * +getline(const char *prompt) +{ + char buf[4096]; + char *line; + int len; + printf("%s", prompt); + + fgets(buf, 4096, stdin); + len = strlen(buf); + if ((len > 0) && (buf[len-1] == '\n')) + buf[len-1] = '\0'; + else + len++; + line = malloc (len * sizeof(char)); + return strncpy(line, buf, len); +} +#endif + +/* + * _get_command - get a command from the user + * OUT argc - location to store count of arguments + * OUT argv - location to store the argument list + */ +static int +_get_command (int *argc, char **argv) +{ + char *in_line; + static char *last_in_line = NULL; + int i, in_line_size; + static int last_in_line_size = 0; + + *argc = 0; + +#if HAVE_READLINE + in_line = readline ("sacctmgr: "); +#else + in_line = getline("sacctmgr: "); +#endif + if (in_line == NULL) + return 0; + else if (strcmp (in_line, "!!") == 0) { + free (in_line); + in_line = last_in_line; + in_line_size = last_in_line_size; + } else { + if (last_in_line) + free (last_in_line); + last_in_line = in_line; + last_in_line_size = in_line_size = strlen (in_line); + } + +#if HAVE_READLINE + add_history(in_line); +#endif + + /* break in_line into tokens */ + for (i = 0; i < in_line_size; i++) { + bool double_quote = false, single_quote = false; + if (in_line[i] == '\0') + break; + if (isspace ((int) in_line[i])) + continue; + if (((*argc) + 1) > MAX_INPUT_FIELDS) { /* bogus input line */ + exit_code = 1; + fprintf (stderr, + "%s: can not process over %d words\n", + command_name, input_words); + return E2BIG; + } + argv[(*argc)++] = &in_line[i]; + for (i++; i < in_line_size; i++) { + if (in_line[i] == '\042') { + double_quote = !double_quote; + continue; + } + if (in_line[i] == '\047') { + single_quote = !single_quote; + continue; + } + if (in_line[i] == '\0') + break; + if (double_quote || single_quote) + continue; + if (isspace ((int) in_line[i])) { + in_line[i] = '\0'; + break; + } + } + } + return 0; +} + + +static void _print_version(void) +{ + printf("%s %s\n", PACKAGE, SLURM_VERSION); + if (quiet_flag == -1) { + long version = slurm_api_version(); + printf("slurm_api_version: %ld, %ld.%ld.%ld\n", version, + SLURM_VERSION_MAJOR(version), + SLURM_VERSION_MINOR(version), + SLURM_VERSION_MICRO(version)); + } +} + +/* + * _process_command - process the user's command + * IN argc - count of arguments + * IN argv - the arguments + * RET 0 or errno (only for errors fatal to sacctmgr) + */ +static int +_process_command (int argc, char *argv[]) +{ + if (argc < 1) { + exit_code = 1; + if (quiet_flag == -1) + fprintf(stderr, "no input"); + } else if (strncasecmp (argv[0], "all", 3) == 0) { + all_flag = 1; + } else if (strncasecmp (argv[0], "associations", 3) == 0) { + with_assoc_flag = 1; + } else if (strncasecmp (argv[0], "help", 2) == 0) { + if (argc > 1) { + exit_code = 1; + fprintf (stderr, + "too many arguments for keyword:%s\n", + argv[0]); + } + _usage (); + } else if (strncasecmp (argv[0], "hide", 2) == 0) { + all_flag = 0; + } else if (strncasecmp (argv[0], "load", 2) == 0) { + if (argc < 2) { + exit_code = 1; + if (quiet_flag != 1) + fprintf(stderr, + "too few arguments for keyword:%s\n", + argv[0]); + } else + _load_file((argc - 1), &argv[1]); + } else if (strncasecmp (argv[0], "oneliner", 1) == 0) { + if (argc > 1) { + exit_code = 1; + fprintf (stderr, + "too many arguments for keyword:%s\n", + argv[0]); + } + one_liner = 1; + } else if (strncasecmp (argv[0], "quiet", 4) == 0) { + if (argc > 1) { + exit_code = 1; + fprintf (stderr, "too many arguments for keyword:%s\n", + argv[0]); + } + quiet_flag = 1; + } else if ((strncasecmp (argv[0], "exit", 4) == 0) || + (strncasecmp (argv[0], "quit", 4) == 0)) { + if (argc > 1) { + exit_code = 1; + fprintf (stderr, + "too many arguments for keyword:%s\n", + argv[0]); + } + exit_flag = 1; + } else if ((strncasecmp (argv[0], "add", 3) == 0) || + (strncasecmp (argv[0], "create", 3) == 0)) { + if (argc < 2) { + exit_code = 1; + if (quiet_flag != 1) + fprintf(stderr, + "too few arguments for keyword:%s\n", + argv[0]); + } else + _add_it((argc - 1), &argv[1]); + } else if ((strncasecmp (argv[0], "show", 3) == 0) || + (strncasecmp (argv[0], "list", 3) == 0)) { + if (argc < 2) { + exit_code = 1; + if (quiet_flag != 1) + fprintf(stderr, + "too few arguments for keyword:%s\n", + argv[0]); + } else + _show_it((argc - 1), &argv[1]); + } else if (strncasecmp (argv[0], "modify", 1) == 0) { + if (argc < 2) { + exit_code = 1; + fprintf (stderr, "too few arguments for %s keyword\n", + argv[0]); + return 0; + } else + _modify_it((argc - 1), &argv[1]); + } else if ((strncasecmp (argv[0], "delete", 3) == 0) || + (strncasecmp (argv[0], "remove", 3) == 0)) { + if (argc < 2) { + exit_code = 1; + fprintf (stderr, "too few arguments for %s keyword\n", + argv[0]); + return 0; + } else + _delete_it((argc - 1), &argv[1]); + } else if (strncasecmp (argv[0], "verbose", 4) == 0) { + if (argc > 1) { + exit_code = 1; + fprintf (stderr, + "too many arguments for %s keyword\n", + argv[0]); + } + quiet_flag = -1; + } else if (strncasecmp (argv[0], "rollup", 2) == 0) { + time_t my_time = 0; + if (argc > 2) { + exit_code = 1; + fprintf (stderr, + "too many arguments for %s keyword\n", + argv[0]); + } + + if(argc > 1) + my_time = parse_time(argv[1]); + if(acct_storage_g_roll_usage(db_conn, my_time) + == SLURM_SUCCESS) { + if(commit_check("Would you like to commit rollup?")) { + acct_storage_g_commit(db_conn, 1); + } else { + printf(" Rollup Discarded\n"); + acct_storage_g_commit(db_conn, 0); + } + } + } else if (strncasecmp (argv[0], "version", 4) == 0) { + if (argc > 1) { + exit_code = 1; + fprintf (stderr, + "too many arguments for %s keyword\n", + argv[0]); + } + _print_version(); + } else { + exit_code = 1; + fprintf (stderr, "invalid keyword: %s\n", argv[0]); + } + + return 0; +} + +/* + * _add_it - add the entity per the supplied arguments + * IN argc - count of arguments + * IN argv - list of arguments + */ +static void _add_it (int argc, char *argv[]) +{ + int error_code = SLURM_SUCCESS; + + /* First identify the entity to add */ + if (strncasecmp (argv[0], "User", 1) == 0) { + error_code = sacctmgr_add_user((argc - 1), &argv[1]); + } else if (strncasecmp (argv[0], "Account", 1) == 0) { + error_code = sacctmgr_add_account((argc - 1), &argv[1]); + } else if (strncasecmp (argv[0], "Cluster", 1) == 0) { + error_code = sacctmgr_add_cluster((argc - 1), &argv[1]); + } else { + exit_code = 1; + fprintf(stderr, "No valid entity in add command\n"); + fprintf(stderr, "Input line must include, "); + fprintf(stderr, "\"User\", \"Account\", "); + fprintf(stderr, "or \"Cluster\"\n"); + } + + if (error_code) { + exit_code = 1; + } +} + +/* + * _show_it - list the slurm configuration per the supplied arguments + * IN argc - count of arguments + * IN argv - list of arguments + */ +static void _show_it (int argc, char *argv[]) +{ + int error_code = SLURM_SUCCESS; + + /* First identify the entity to list */ + if (strncasecmp (argv[0], "User", 1) == 0) { + error_code = sacctmgr_list_user((argc - 1), &argv[1]); + } else if (strncasecmp (argv[0], "Account", 2) == 0) { + error_code = sacctmgr_list_account((argc - 1), &argv[1]); + } else if (strncasecmp (argv[0], "Association", 2) == 0) { + error_code = sacctmgr_list_association((argc - 1), &argv[1]); + } else if (strncasecmp (argv[0], "Cluster", 1) == 0) { + error_code = sacctmgr_list_cluster((argc - 1), &argv[1]); + } else { + exit_code = 1; + fprintf(stderr, "No valid entity in list command\n"); + fprintf(stderr, "Input line must include "); + fprintf(stderr, "\"User\", \"Account\", \"Association\", "); + fprintf(stderr, "or \"Cluster\"\n"); + } + + if (error_code) { + exit_code = 1; + } +} + + +/* + * _modify_it - modify the slurm configuration per the supplied arguments + * IN argc - count of arguments + * IN argv - list of arguments + */ +static void _modify_it (int argc, char *argv[]) +{ + int error_code = SLURM_SUCCESS; + + /* First identify the entity to modify */ + if (strncasecmp (argv[0], "User", 1) == 0) { + error_code = sacctmgr_modify_user((argc - 1), &argv[1]); + } else if (strncasecmp (argv[0], "Account", 1) == 0) { + error_code = sacctmgr_modify_account((argc - 1), &argv[1]); + } else if (strncasecmp (argv[0], "Cluster", 1) == 0) { + error_code = sacctmgr_modify_cluster((argc - 1), &argv[1]); + } else { + exit_code = 1; + fprintf(stderr, "No valid entity in modify command\n"); + fprintf(stderr, "Input line must include "); + fprintf(stderr, "\"User\", \"Account\", "); + fprintf(stderr, "or \"Cluster\"\n"); + } + + if (error_code) { + exit_code = 1; + } +} + +/* + * _delete_it - delete the slurm configuration per the supplied arguments + * IN argc - count of arguments + * IN argv - list of arguments + */ +static void _delete_it (int argc, char *argv[]) +{ + int error_code = SLURM_SUCCESS; + + /* First identify the entity to delete */ + if (strncasecmp (argv[0], "User", 1) == 0) { + error_code = sacctmgr_delete_user((argc - 1), &argv[1]); + } else if (strncasecmp (argv[0], "Account", 1) == 0) { + error_code = sacctmgr_delete_account((argc - 1), &argv[1]); + } else if (strncasecmp (argv[0], "Cluster", 1) == 0) { + error_code = sacctmgr_delete_cluster((argc - 1), &argv[1]); + } else { + exit_code = 1; + fprintf(stderr, "No valid entity in delete command\n"); + fprintf(stderr, "Input line must include "); + fprintf(stderr, "\"User\", \"Account\", "); + fprintf(stderr, "or \"Cluster\"\n"); + } + + if (error_code) { + exit_code = 1; + } +} + +static int _strip_continuation(char *buf, int len) +{ + char *ptr; + int bs = 0; + + for (ptr = buf+len-1; ptr >= buf; ptr--) { + if (*ptr == '\\') + bs++; + else if (isspace(*ptr) && bs == 0) + continue; + else + break; + } + /* Check for an odd number of contiguous backslashes at + the end of the line */ + if (bs % 2 == 1) { + ptr = ptr + bs; + *ptr = '\0'; + return (ptr - buf); + } else { + return len; /* no continuation */ + } +} + +/* Strip comments from a line by terminating the string + * where the comment begins. + * Everything after a non-escaped "#" is a comment. + */ +static void _strip_comments(char *line) +{ + int i; + int len = strlen(line); + int bs_count = 0; + + for (i = 0; i < len; i++) { + /* if # character is preceded by an even number of + * escape characters '\' */ + if (line[i] == '#' && (bs_count%2) == 0) { + line[i] = '\0'; + break; + } else if (line[i] == '\\') { + bs_count++; + } else { + bs_count = 0; + } + } +} + +/* + * Strips any escape characters, "\". If you WANT a back-slash, + * it must be escaped, "\\". + */ +static void _strip_escapes(char *line) +{ + int i, j; + int len = strlen(line); + + for (i = 0, j = 0; i < len+1; i++, j++) { + if (line[i] == '\\') + i++; + line[j] = line[i]; + } +} + +/* + * Reads the next line from the "file" into buffer "buf". + * + * Concatonates together lines that are continued on + * the next line by a trailing "\". Strips out comments, + * replaces escaped "\#" with "#", and replaces "\\" with "\". + */ +static int _get_next_line(char *buf, int buf_size, FILE *file) +{ + char *ptr = buf; + int leftover = buf_size; + int read_size, new_size; + int lines = 0; + + while (fgets(ptr, leftover, file)) { + lines++; + _strip_comments(ptr); + read_size = strlen(ptr); + new_size = _strip_continuation(ptr, read_size); + if (new_size < read_size) { + ptr += new_size; + leftover -= new_size; + } else { /* no continuation */ + break; + } + } + /* _strip_cr_nl(buf); */ /* not necessary */ + _strip_escapes(buf); + + return lines; +} + +static void _destroy_sacctmgr_file_opts(void *object) +{ + sacctmgr_file_opts_t *file_opts = (sacctmgr_file_opts_t *)object; + + if(file_opts) { + xfree(file_opts->def_acct); + xfree(file_opts->desc); + xfree(file_opts->name); + xfree(file_opts->org); + xfree(file_opts->part); + xfree(file_opts); + } +} + +static sacctmgr_file_opts_t *_parse_options(char *options) +{ + int start=0, i=0, end=0, mins, quote = 0; + char *sub = NULL; + sacctmgr_file_opts_t *file_opts = xmalloc(sizeof(sacctmgr_file_opts_t)); + char *option = NULL; + + file_opts->fairshare = NO_VAL; + file_opts->max_cpu_secs_per_job = NO_VAL; + file_opts->max_jobs = NO_VAL; + file_opts->max_nodes_per_job = NO_VAL; + file_opts->max_wall_duration_per_job = NO_VAL; + + while(options[i]) { + quote = 0; + start=i; + + while(options[i] && options[i] != ':' && options[i] != '\n') { + if(options[i] == '"') { + if(quote) + quote = 0; + else + quote = 1; + } + i++; + } + if(quote) { + while(options[i] && options[i] != '"') + i++; + if(!options[i]) + fatal("There is a problem with option " + "%s with quotes.", option); + i++; + } + sub = xstrndup(options+start, i-start); + end = parse_option_end(sub); + + option = strip_quotes(sub+end, NULL); + + if(!end) { + if(file_opts->name) { + printf(" Bad format on %s: " + "End your option with " + "an '=' sign\n", sub); + _destroy_sacctmgr_file_opts(file_opts); + break; + } + file_opts->name = xstrdup(option); + } else if (strncasecmp (sub, "AdminLevel", 2) == 0) { + file_opts->admin = str_2_acct_admin_level(option); + } else if (strncasecmp (sub, "DefaultAccount", 3) == 0) { + file_opts->def_acct = xstrdup(option); + } else if (strncasecmp (sub, "Description", 3) == 0) { + file_opts->desc = xstrdup(option); + } else if (strncasecmp (sub, "FairShare", 1) == 0) { + if (get_uint(option, &file_opts->fairshare, + "FairShare") != SLURM_SUCCESS) { + printf(" Bad FairShare value: %s\n", option); + _destroy_sacctmgr_file_opts(file_opts); + break; + } + } else if (strncasecmp (sub, "MaxCPUSec", 4) == 0 + || strncasecmp (sub, "MaxProcSec", 4) == 0) { + if (get_uint(option, &file_opts->max_cpu_secs_per_job, + "MaxCPUSec") != SLURM_SUCCESS) { + printf(" Bad MaxCPUSec value: %s\n", option); + _destroy_sacctmgr_file_opts(file_opts); + break; + } + } else if (strncasecmp (sub, "MaxJobs", 4) == 0) { + if (get_uint(option, &file_opts->max_jobs, + "MaxJobs") != SLURM_SUCCESS) { + printf(" Bad MaxJobs value: %s\n", option); + _destroy_sacctmgr_file_opts(file_opts); + break; + } + } else if (strncasecmp (sub, "MaxNodes", 4) == 0) { + if (get_uint(option, &file_opts->max_nodes_per_job, + "MaxNodes") != SLURM_SUCCESS) { + printf(" Bad MaxNodes value: %s\n", option); + _destroy_sacctmgr_file_opts(file_opts); + break; + } + } else if (strncasecmp (sub, "MaxWall", 4) == 0) { + mins = time_str2mins(option); + if (mins >= 0) { + file_opts->max_wall_duration_per_job + = (uint32_t) mins; + } else if (strcmp(option, "-1") == 0) { + file_opts->max_wall_duration_per_job = -1; + } else { + printf(" Bad MaxWall time format: %s\n", + option); + _destroy_sacctmgr_file_opts(file_opts); + break; + } + } else if (strncasecmp (sub, "Organization", 1) == 0) { + file_opts->org = xstrdup(option); + } else if (strncasecmp (sub, "QosLevel", 1) == 0 + || strncasecmp (sub, "Expedite", 1) == 0) { + file_opts->qos = str_2_acct_qos(option); + } else { + printf(" Unknown option: %s\n", sub); + } + + xfree(sub); + xfree(option); + + if(options[i] == ':') + i++; + else + break; + } + + xfree(sub); + xfree(option); + + if(!file_opts->name) { + printf(" error: No name given\n"); + _destroy_sacctmgr_file_opts(file_opts); + } + return file_opts; +} + +static void _load_file (int argc, char *argv[]) +{ + DEF_TIMERS; + char line[BUFFER_SIZE]; + FILE *fd = NULL; + char *parent = NULL; + char *cluster_name = NULL; + char object[25]; + int start = 0, len = 0, i = 0; + int lc=0, num_lines=0; + int rc = SLURM_SUCCESS; + + sacctmgr_file_opts_t *file_opts = NULL; + acct_association_rec_t *assoc = NULL; + acct_account_rec_t *acct = NULL; + acct_cluster_rec_t *cluster = NULL; + acct_user_rec_t *user = NULL; + + List curr_assoc_list = NULL; + List curr_acct_list = acct_storage_g_get_accounts(db_conn, NULL); + List curr_cluster_list = acct_storage_g_get_clusters(db_conn, NULL); + List curr_user_list = acct_storage_g_get_users(db_conn, NULL); + + /* This will be freed in their local counter parts */ + List acct_list = list_create(NULL); + List acct_assoc_list = list_create(NULL); + List user_list = list_create(NULL); + List user_assoc_list = list_create(NULL); + + ListIterator itr; + + List print_fields_list; + + print_field_t name_field; + print_field_t acct_field; + print_field_t parent_field; + print_field_t fs_field; + print_field_t mc_field; + print_field_t mj_field; + print_field_t mn_field; + print_field_t mw_field; + + print_field_t desc_field; + print_field_t org_field; + print_field_t qos_field; + + print_field_t admin_field; + print_field_t dacct_field; + + int set = 0; + + fd = fopen(argv[0], "r"); + if (fd == NULL) { + printf(" error: Unable to read \"%s\": %m\n", argv[0]); + return; + } + + while((num_lines = _get_next_line(line, BUFFER_SIZE, fd)) > 0) { + lc += num_lines; + /* skip empty lines */ + if (line[0] == '\0') { + continue; + } + len = strlen(line); + + memset(object, 0, sizeof(object)); + + /* first find the object */ + start=0; + for(i=0; i<len; i++) { + if(line[i] == '-') { + start = i; + if(line[i-1] == ' ') + i--; + if(i<sizeof(object)) + strncpy(object, line, i); + break; + } + } + if(!object[0]) { + printf(" error: Misformatted line(%d): %s\n", lc, line); + rc = SLURM_ERROR; + break; + } + while(line[start] != ' ' && start<len) + start++; + if(start>=len) { + printf(" error: Nothing after object " + "name '%s'. line(%d)\n", + object, lc); + rc = SLURM_ERROR; + break; + + } + start++; + + if(!strcasecmp("Machine", object) + || !strcasecmp("Cluster", object)) { + acct_association_cond_t assoc_cond; + + if(cluster_name) { + printf(" You can only add one cluster " + "at a time.\n"); + rc = SLURM_ERROR; + break; + } + file_opts = _parse_options(line+start); + + if(!file_opts) { + printf(" error: Problem with line(%d)\n", lc); + rc = SLURM_ERROR; + break; + } + cluster_name = xstrdup(file_opts->name); + if(!sacctmgr_find_cluster_from_list( + curr_cluster_list, cluster_name)) { + List cluster_list = + list_create(destroy_acct_cluster_rec); + cluster = xmalloc(sizeof(acct_cluster_rec_t)); + list_append(cluster_list, cluster); + cluster->name = xstrdup(cluster_name); + cluster->default_fairshare = + file_opts->fairshare; + cluster->default_max_cpu_secs_per_job = + file_opts->max_cpu_secs_per_job; + cluster->default_max_jobs = file_opts->max_jobs; + cluster->default_max_nodes_per_job = + file_opts->max_nodes_per_job; + cluster->default_max_wall_duration_per_job = + file_opts->max_wall_duration_per_job; + notice_thread_init(); + rc = acct_storage_g_add_clusters( + db_conn, my_uid, cluster_list); + notice_thread_fini(); + list_destroy(cluster_list); + + if(rc != SLURM_SUCCESS) { + printf(" Problem adding machine\n"); + rc = SLURM_ERROR; + break; + } + } + info("For cluster %s", cluster_name); + _destroy_sacctmgr_file_opts(file_opts); + + memset(&assoc_cond, 0, sizeof(acct_association_cond_t)); + assoc_cond.cluster_list = list_create(NULL); + list_append(assoc_cond.cluster_list, cluster_name); + curr_assoc_list = acct_storage_g_get_associations( + db_conn, &assoc_cond); + list_destroy(assoc_cond.cluster_list); + + if(!curr_assoc_list) { + printf(" Problem getting associations " + "for this cluster\n"); + rc = SLURM_ERROR; + break; + } + //info("got %d assocs", list_count(curr_assoc_list)); + continue; + } else if(!cluster_name) { + printf(" error: You need to specify a cluster name " + "first with 'Cluster - name' in your file\n"); + break; + } + + if(!strcasecmp("Parent", object)) { + if(parent) + xfree(parent); + + i = start; + while(line[i] != '\n' && i<len) + i++; + + if(i >= len) { + printf(" error: No parent name " + "given line(%d)\n", + lc); + rc = SLURM_ERROR; + break; + } + parent = xstrndup(line+start, i-start); + //info("got parent %s", parent); + if(!sacctmgr_find_account_base_assoc_from_list( + curr_assoc_list, parent, cluster_name) + && !sacctmgr_find_account_base_assoc_from_list( + acct_assoc_list, parent, cluster_name)) { + printf(" error: line(%d) You need to add " + "this parent (%s) as a child before " + "you can add childern to it.\n", + lc, parent); + break; + } + continue; + } else if(!parent) { + parent = xstrdup("root"); + printf(" No parent given creating off root, " + "If incorrect specify 'Parent - name' " + "before any childern in your file\n"); + } + + if(!strcasecmp("Project", object) + || !strcasecmp("Account", object)) { + file_opts = _parse_options(line+start); + + if(!file_opts) { + printf(" error: Problem with line(%d)\n", lc); + rc = SLURM_ERROR; + break; + } + + //info("got a project %s of %s", file_opts->name, parent); + if(!sacctmgr_find_account_from_list( + curr_acct_list, file_opts->name)) { + acct = xmalloc(sizeof(acct_account_rec_t)); + acct->assoc_list = NULL; + acct->name = xstrdup(file_opts->name); + if(file_opts->desc) + acct->description = + xstrdup(file_opts->desc); + else + acct->description = + xstrdup(file_opts->name); + if(file_opts->org) + acct->organization = + xstrdup(file_opts->org); + else if(strcmp(parent, "root")) + acct->organization = xstrdup(parent); + else + acct->organization = + xstrdup(file_opts->name); + /* info("adding acct %s (%s) (%s)", */ +/* acct->name, acct->description, */ +/* acct->organization); */ + acct->qos = file_opts->qos; + list_append(acct_list, acct); + list_append(curr_acct_list, acct); + + assoc = xmalloc(sizeof(acct_association_rec_t)); + assoc->acct = xstrdup(file_opts->name); + assoc->cluster = xstrdup(cluster_name); + assoc->parent_acct = xstrdup(parent); + assoc->fairshare = file_opts->fairshare; + assoc->max_jobs = file_opts->max_jobs; + assoc->max_nodes_per_job = + file_opts->max_nodes_per_job; + assoc->max_wall_duration_per_job = + file_opts->max_wall_duration_per_job; + assoc->max_cpu_secs_per_job = + file_opts->max_cpu_secs_per_job; + list_append(acct_assoc_list, assoc); + /* don't add anything to the + curr_assoc_list */ + } else if(!sacctmgr_find_account_base_assoc_from_list( + curr_assoc_list, file_opts->name, + cluster_name) && + !sacctmgr_find_account_base_assoc_from_list( + acct_assoc_list, file_opts->name, + cluster_name)) { + assoc = xmalloc(sizeof(acct_association_rec_t)); + assoc->acct = xstrdup(file_opts->name); + assoc->cluster = xstrdup(cluster_name); + assoc->parent_acct = xstrdup(parent); + assoc->fairshare = file_opts->fairshare; + assoc->max_jobs = file_opts->max_jobs; + assoc->max_nodes_per_job = + file_opts->max_nodes_per_job; + assoc->max_wall_duration_per_job = + file_opts->max_wall_duration_per_job; + assoc->max_cpu_secs_per_job = + file_opts->max_cpu_secs_per_job; + list_append(acct_assoc_list, assoc); + /* don't add anything to the + curr_assoc_list */ + } + _destroy_sacctmgr_file_opts(file_opts); + continue; + } else if(!strcasecmp("User", object)) { + file_opts = _parse_options(line+start); + + if(!file_opts) { + printf(" error: Problem with line(%d)\n", lc); + rc = SLURM_ERROR; + break; + } + if(!sacctmgr_find_user_from_list( + curr_user_list, file_opts->name) + && !sacctmgr_find_user_from_list( + user_list, file_opts->name)) { + user = xmalloc(sizeof(acct_user_rec_t)); + user->assoc_list = NULL; + user->name = xstrdup(file_opts->name); + if(file_opts->def_acct) + user->default_acct = + xstrdup(file_opts->def_acct); + else + user->default_acct = xstrdup(parent); + + user->qos = file_opts->qos; + user->admin_level = file_opts->admin; + + list_append(user_list, user); + /* don't add anything to the + curr_user_list */ + + assoc = xmalloc(sizeof(acct_association_rec_t)); + assoc->acct = xstrdup(parent); + assoc->cluster = xstrdup(cluster_name); + assoc->fairshare = file_opts->fairshare; + assoc->max_jobs = file_opts->max_jobs; + assoc->max_nodes_per_job = + file_opts->max_nodes_per_job; + assoc->max_wall_duration_per_job = + file_opts->max_wall_duration_per_job; + assoc->max_cpu_secs_per_job = + file_opts->max_cpu_secs_per_job; + assoc->partition = xstrdup(file_opts->part); + assoc->user = xstrdup(file_opts->name); + + list_append(user_assoc_list, assoc); + /* don't add anything to the + curr_assoc_list */ + } else if(!sacctmgr_find_association_from_list( + curr_assoc_list, + file_opts->name, parent, + cluster_name, file_opts->part) + && !sacctmgr_find_association_from_list( + user_assoc_list, + file_opts->name, parent, + cluster_name, file_opts->part)) { + assoc = xmalloc(sizeof(acct_association_rec_t)); + assoc->acct = xstrdup(parent); + assoc->cluster = xstrdup(cluster_name); + assoc->fairshare = file_opts->fairshare; + assoc->max_jobs = file_opts->max_jobs; + assoc->max_nodes_per_job = + file_opts->max_nodes_per_job; + assoc->max_wall_duration_per_job = + file_opts->max_wall_duration_per_job; + assoc->max_cpu_secs_per_job = + file_opts->max_cpu_secs_per_job; + assoc->partition = xstrdup(file_opts->part); + assoc->user = xstrdup(file_opts->name); + + list_append(user_assoc_list, assoc); + /* don't add anything to the + curr_assoc_list */ + } + //info("got a user %s", file_opts->name); + _destroy_sacctmgr_file_opts(file_opts); + continue; + } else { + printf(" error: Misformatted line(%d): %s\n", lc, line); + rc = SLURM_ERROR; + break; + } + } + fclose(fd); + xfree(cluster_name); + xfree(parent); + + admin_field.name = "Admin"; + admin_field.len = 9; + admin_field.print_routine = print_str; + + name_field.name = "Name"; + name_field.len = 10; + name_field.print_routine = print_str; + + parent_field.name = "Parent"; + parent_field.len = 10; + parent_field.print_routine = print_str; + + acct_field.name = "Account"; + acct_field.len = 10; + acct_field.print_routine = print_str; + + dacct_field.name = "Def Acct"; + dacct_field.len = 10; + dacct_field.print_routine = print_str; + + desc_field.name = "Descr"; + desc_field.len = 10; + desc_field.print_routine = print_str; + + org_field.name = "Org"; + org_field.len = 10; + org_field.print_routine = print_str; + + qos_field.name = "QOS"; + qos_field.len = 9; + qos_field.print_routine = print_str; + + fs_field.name = "FairShare"; + fs_field.len = 10; + fs_field.print_routine = print_uint; + + mc_field.name = "MaxCPUSecs"; + mc_field.len = 10; + mc_field.print_routine = print_uint; + + mj_field.name = "MaxJobs"; + mj_field.len = 7; + mj_field.print_routine = print_uint; + + mn_field.name = "MaxNodes"; + mn_field.len = 8; + mn_field.print_routine = print_uint; + + mw_field.name = "MaxWall"; + mw_field.len = 7; + mw_field.print_routine = print_time; + + START_TIMER; + if(rc == SLURM_SUCCESS && list_count(acct_list)) { + printf("Accounts\n"); + + print_fields_list = list_create(NULL); + list_append(print_fields_list, &name_field); + list_append(print_fields_list, &desc_field); + list_append(print_fields_list, &org_field); + list_append(print_fields_list, &qos_field); + + print_header(print_fields_list); + + itr = list_iterator_create(acct_list); + while((acct = list_next(itr))) { + print_str(SLURM_PRINT_VALUE, &name_field, + acct->name); + print_str(SLURM_PRINT_VALUE, &desc_field, + acct->description); + print_str(SLURM_PRINT_VALUE, &org_field, + acct->organization); + print_str(SLURM_PRINT_VALUE, &qos_field, + acct_qos_str(acct->qos)); + printf("\n"); + } + list_iterator_destroy(itr); + list_destroy(print_fields_list); + rc = acct_storage_g_add_accounts(db_conn, my_uid, acct_list); + printf("---------------------------------------------------\n"); + set = 1; + } + + if(rc == SLURM_SUCCESS && list_count(acct_assoc_list)) { + printf("Account Associations\n"); + + print_fields_list = list_create(NULL); + list_append(print_fields_list, &name_field); + list_append(print_fields_list, &parent_field); + list_append(print_fields_list, &fs_field); + list_append(print_fields_list, &mc_field); + list_append(print_fields_list, &mj_field); + list_append(print_fields_list, &mn_field); + list_append(print_fields_list, &mw_field); + + print_header(print_fields_list); + + itr = list_iterator_create(acct_assoc_list); + while((assoc = list_next(itr))) { + print_str(SLURM_PRINT_VALUE, &name_field, assoc->acct); + print_str(SLURM_PRINT_VALUE, &parent_field, + assoc->parent_acct); + print_uint(SLURM_PRINT_VALUE, &fs_field, + assoc->fairshare); + print_uint(SLURM_PRINT_VALUE, &mc_field, + assoc->max_cpu_secs_per_job); + print_uint(SLURM_PRINT_VALUE, &mj_field, + assoc->max_jobs); + print_uint(SLURM_PRINT_VALUE, &mn_field, + assoc->max_nodes_per_job); + print_time(SLURM_PRINT_VALUE, &mw_field, + assoc->max_wall_duration_per_job); + printf("\n"); + } + list_iterator_destroy(itr); + list_destroy(print_fields_list); + + rc = acct_storage_g_add_associations( + db_conn, my_uid, acct_assoc_list); + printf("---------------------------------------------------\n"); + set = 1; + } + if(rc == SLURM_SUCCESS && list_count(user_list)) { + printf("Users\n"); + + print_fields_list = list_create(NULL); + list_append(print_fields_list, &name_field); + list_append(print_fields_list, &dacct_field); + list_append(print_fields_list, &qos_field); + list_append(print_fields_list, &admin_field); + + print_header(print_fields_list); + + itr = list_iterator_create(user_list); + while((acct = list_next(itr))) { + print_str(SLURM_PRINT_VALUE, &name_field, user->name); + print_str(SLURM_PRINT_VALUE, &dacct_field, + user->default_acct); + print_str(SLURM_PRINT_VALUE, &qos_field, + acct_qos_str(user->qos)); + print_str(SLURM_PRINT_VALUE, &admin_field, + acct_admin_level_str(user->admin_level)); + printf("\n"); + } + list_iterator_destroy(itr); + list_destroy(print_fields_list); + + rc = acct_storage_g_add_users(db_conn, my_uid, user_list); + printf("---------------------------------------------------\n"); + set = 1; + } + + if(rc == SLURM_SUCCESS && list_count(user_assoc_list)) { + printf("User Associations\n"); + + print_fields_list = list_create(NULL); + list_append(print_fields_list, &name_field); + list_append(print_fields_list, &acct_field); + list_append(print_fields_list, &fs_field); + list_append(print_fields_list, &mc_field); + list_append(print_fields_list, &mj_field); + list_append(print_fields_list, &mn_field); + list_append(print_fields_list, &mw_field); + + print_header(print_fields_list); + + itr = list_iterator_create(user_assoc_list); + while((assoc = list_next(itr))) { + print_str(SLURM_PRINT_VALUE, &name_field, assoc->user); + print_str(SLURM_PRINT_VALUE, &acct_field, assoc->acct); + print_uint(SLURM_PRINT_VALUE, &fs_field, + assoc->fairshare); + print_uint(SLURM_PRINT_VALUE, &mc_field, + assoc->max_cpu_secs_per_job); + print_uint(SLURM_PRINT_VALUE, &mj_field, + assoc->max_jobs); + print_uint(SLURM_PRINT_VALUE, &mn_field, + assoc->max_nodes_per_job); + print_uint(SLURM_PRINT_VALUE, &mw_field, + assoc->max_wall_duration_per_job); + printf("\n"); + } + list_iterator_destroy(itr); + list_destroy(print_fields_list); + + rc = acct_storage_g_add_associations( + db_conn, my_uid, user_assoc_list); + printf("---------------------------------------------------\n"); + set = 1; + } + END_TIMER2("add cluster"); + + info("Done adding cluster in %s", TIME_STR); + + if(rc == SLURM_SUCCESS) { + if(set) { + if(commit_check("Would you like to commit changes?")) { + acct_storage_g_commit(db_conn, 1); + } else { + printf(" Changes Discarded\n"); + acct_storage_g_commit(db_conn, 0); + } + } else { + printf(" Nothing new added.\n"); + } + } else { + printf(" error: Problem with requests.\n"); + } + + list_destroy(acct_list); + list_destroy(acct_assoc_list); + list_destroy(user_list); + list_destroy(user_assoc_list); + if(curr_acct_list) + list_destroy(curr_acct_list); + if(curr_assoc_list) + list_destroy(curr_assoc_list); + if(curr_cluster_list) + list_destroy(curr_cluster_list); + if(curr_user_list) + list_destroy(curr_user_list); +} + +/* _usage - show the valid sacctmgr commands */ +void _usage () { + printf ("\ +sacctmgr [<OPTION>] [<COMMAND>] \n\ + Valid <OPTION> values are: \n\ + -a or --all: equivalent to \"all\" command \n\ + -h or --help: equivalent to \"help\" command \n\ + --hide: equivalent to \"hide\" command \n\ + -i or --immediate: commit changes immediately \n\ + -o or --oneliner: equivalent to \"oneliner\" command \n\ + -q or --quiet: equivalent to \"quiet\" command \n\ + -s or --associations: equivalent to \"associations\" command \n\ + -v or --verbose: equivalent to \"verbose\" command \n\ + -V or --version: equivalent to \"version\" command \n\ + \n\ + <keyword> may be omitted from the execute line and sacctmgr will execute \n\ + in interactive mode. It will process commands as entered until explicitly\n\ + terminated. \n\ + \n\ + Valid <COMMAND> values are: \n\ + all display information about all entities, \n\ + including hidden/deleted ones. \n\ + add <ENTITY> <SPECS> add entity \n\ + associations when using show/list will list the \n\ + associations associated with the entity. \n\ + delete <ENTITY> <SPECS> delete the specified entity(s) \n\ + exit terminate sacctmgr \n\ + help print this description of use. \n\ + hide do not display information about \n\ + hidden/deleted entities. \n\ + list <ENTITY> [<SPECS>] display info of identified entity, default \n\ + is display all. \n\ + modify <ENTITY> <SPECS> modify entity \n\ + no_header no header will be added to the beginning of \n\ + output. \n\ + oneliner report output one record per line. \n\ + quiet print no messages other than error messages. \n\ + quit terminate this command. \n\ + parsable output will be | delimited \n\ + show same as list \n\ + verbose enable detailed logging. \n\ + version display tool version number. \n\ + !! Repeat the last command entered. \n\ + \n\ + <ENTITY> may be \"cluster\", \"account\", or \"user\". \n\ + \n\ + <SPECS> are different for each command entity pair. \n\ + list account - Clusters=, Descriptions=, Format=, Names=, \n\ + Organizations=, Parents=, and WithAssocs \n\ + add account - Clusters=, Description=, Fairshare=, \n\ + MaxCPUSecs=, MaxJobs=, MaxNodes=, MaxWall=, \n\ + Names=, Organization=, Parent=, and QosLevel \n\ + modify account - (set options) Description=, Fairshare=, \n\ + MaxCPUSecs=, MaxJobs=, MaxNodes=, MaxWall=, \n\ + Organization=, Parent=, and QosLevel= \n\ + (where options) Clusters=, Descriptions=, \n\ + Names=, Organizations=, Parent=, and QosLevel= \n\ + delete account - Clusters=, Descriptions=, Names=, \n\ + Organizations=, and Parents= \n\ + \n\ + list associations - Accounts=, Clusters=, Format=, ID=, \n\ + Partitions=, Parent=, Users= \n\ + \n\ + list cluster - Names= Format= \n\ + add cluster - Fairshare=, MaxCPUSecs=, \n\ + MaxJobs=, MaxNodes=, MaxWall=, and Names= \n\ + modify cluster - (set options) Fairshare=, MaxCPUSecs=, \n\ + MaxJobs=, MaxNodes=, and MaxWall= \n\ + (where options) Names= \n\ + delete cluster - Names= \n\ + \n\ + list user - AdminLevel=, DefaultAccounts=, Format=, Names=,\n\ + QosLevel=, and WithAssocs \n\ + add user - Accounts=, AdminLevel=, Clusters=, \n\ + DefaultAccount=, Fairshare=, MaxCPUSecs=, \n\ + MaxJobs=, MaxNodes=, MaxWall=, Names=, \n\ + Partitions=, and QosLevel= \n\ + modify user - (set options) AdminLevel=, DefaultAccount=, \n\ + Fairshare=, MaxCPUSecs=, MaxJobs=, \n\ + MaxNodes=, MaxWall=, and QosLevel= \n\ + (where options) Accounts=, AdminLevel=, \n\ + Clusters=, DefaultAccounts=, Names=, \n\ + Partitions=, and QosLevel= \n\ + delete user - Accounts=, AdminLevel=, Clusters=, \n\ + DefaultAccounts=, and Names= \n\ + \n\ + \n\ + All commands entitys, and options are case-insensitive. \n\n"); + +} + diff --git a/src/sacctmgr/sacctmgr.h b/src/sacctmgr/sacctmgr.h new file mode 100644 index 000000000..00b665f56 --- /dev/null +++ b/src/sacctmgr/sacctmgr.h @@ -0,0 +1,185 @@ +/*****************************************************************************\ + * sacctmgr.h - definitions for all sacctmgr modules. + ***************************************************************************** + * Copyright (C) 2008 Lawrence Livermore National Security. + * Copyright (C) 2002-2007 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * LLNL-CODE-402394. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#ifndef __SACCTMGR_H__ +#define __SACCTMGR_H__ + +#if HAVE_CONFIG_H +# include "config.h" +#endif + +#if HAVE_GETOPT_H +# include <getopt.h> +#else +# include "src/common/getopt.h" +#endif + +#include <ctype.h> +#include <errno.h> +#include <stdio.h> +#include <stdlib.h> +#ifdef HAVE_STRING_H +# include <string.h> +#endif +#ifdef HAVE_STRINGS_H +# include <strings.h> +#endif +#include <time.h> +#include <unistd.h> + +#if HAVE_READLINE +# include <readline/readline.h> +# include <readline/history.h> +#endif + +#if HAVE_INTTYPES_H +# include <inttypes.h> +#else /* !HAVE_INTTYPES_H */ +# if HAVE_STDINT_H +# include <stdint.h> +# endif +#endif /* HAVE_INTTYPES_H */ + +#include <slurm/slurm.h> + +#include "src/common/jobacct_common.h" +#include "src/common/parse_time.h" +#include "src/common/slurm_accounting_storage.h" +#include "src/common/xstring.h" + +#define CKPT_WAIT 10 +#define MAX_INPUT_FIELDS 128 + +typedef enum { + SACCTMGR_ACTION_NOTSET, + SACCTMGR_USER_CREATE, + SACCTMGR_USER_MODIFY, + SACCTMGR_USER_DELETE, + SACCTMGR_ACCOUNT_CREATE, + SACCTMGR_ACCOUNT_MODIFY, + SACCTMGR_ACCOUNT_DELETE, + SACCTMGR_CLUSTER_CREATE, + SACCTMGR_CLUSTER_MODIFY, + SACCTMGR_CLUSTER_DELETE, + SACCTMGR_ASSOCIATION_CREATE, + SACCTMGR_ASSOCIATION_MODIFY, + SACCTMGR_ASSOCIATION_DELETE, + SACCTMGR_COORD_CREATE, + SACCTMGR_COORD_DELETE +} sacctmgr_action_type_t; + +typedef struct { + sacctmgr_action_type_t type; + void *cond; /* if the action has a condition typecast to an + * account_*_cond_t * */ + void *rec; /* if the action has a record typecast to an + * account_*_rec_t * or char * for type COORD */ + List list; /* if the action has a list */ +} sacctmgr_action_t; + +extern char *command_name; +extern int exit_code; /* sacctmgr's exit code, =1 on any error at any time */ +extern int exit_flag; /* program to terminate if =1 */ +extern int input_words; /* number of words of input permitted */ +extern int one_liner; /* one record per line if =1 */ +extern int quiet_flag; /* quiet=1, verbose=-1, normal=0 */ +extern int rollback_flag;/* immediate execute=0, else = 1 */ +extern int with_assoc_flag;/* show acct/user associations flag */ +extern void *db_conn; +extern uint32_t my_uid; + +extern int sacctmgr_add_association(int argc, char *argv[]); +extern int sacctmgr_add_user(int argc, char *argv[]); +extern int sacctmgr_add_account(int argc, char *argv[]); +extern int sacctmgr_add_cluster(int argc, char *argv[]); + +extern int sacctmgr_list_association(int argc, char *argv[]); +extern int sacctmgr_list_user(int argc, char *argv[]); +extern int sacctmgr_list_account(int argc, char *argv[]); +extern int sacctmgr_list_cluster(int argc, char *argv[]); + +extern int sacctmgr_modify_association(int argc, char *argv[]); +extern int sacctmgr_modify_user(int argc, char *argv[]); +extern int sacctmgr_modify_account(int argc, char *argv[]); +extern int sacctmgr_modify_cluster(int argc, char *argv[]); + +extern int sacctmgr_delete_association(int argc, char *argv[]); +extern int sacctmgr_delete_user(int argc, char *argv[]); +extern int sacctmgr_delete_account(int argc, char *argv[]); +extern int sacctmgr_delete_cluster(int argc, char *argv[]); + +/* common.c */ +extern int parse_option_end(char *option); +extern char *strip_quotes(char *option, int *increased); +extern void addto_char_list(List char_list, char *names); +extern void destroy_sacctmgr_action(void *object); +extern int notice_thread_init(); +extern int notice_thread_fini(); +extern int commit_check(char *warning); +extern int get_uint(char *in_value, uint32_t *out_value, char *type); + +/* you need to free the objects returned from these functions */ +extern acct_association_rec_t *sacctmgr_find_association(char *user, + char *account, + char *cluster, + char *partition); +extern acct_association_rec_t *sacctmgr_find_account_base_assoc( + char *account, char *cluster); +extern acct_association_rec_t *sacctmgr_find_root_assoc(char *cluster); +extern acct_user_rec_t *sacctmgr_find_user(char *name); +extern acct_account_rec_t *sacctmgr_find_account(char *name); +extern acct_cluster_rec_t *sacctmgr_find_cluster(char *name); + +/* do not free any of the object returned from these functions since + * they are pointing to an object in the list given + */ + +extern acct_association_rec_t *sacctmgr_find_association_from_list( + List assoc_list, char *user, char *account, + char *cluster, char *partition); +extern acct_association_rec_t *sacctmgr_find_account_base_assoc_from_list( + List assoc_list, char *account, char *cluster); +extern acct_user_rec_t *sacctmgr_find_user_from_list( + List user_list, char *name); +extern acct_account_rec_t *sacctmgr_find_account_from_list( + List acct_list, char *name); +extern acct_cluster_rec_t *sacctmgr_find_cluster_from_list( + List cluster_list, char *name); + + +#endif diff --git a/src/sacctmgr/user_functions.c b/src/sacctmgr/user_functions.c new file mode 100644 index 000000000..5891753a8 --- /dev/null +++ b/src/sacctmgr/user_functions.c @@ -0,0 +1,1224 @@ +/*****************************************************************************\ + * user_functions.c - functions dealing with users in the accounting system. + ***************************************************************************** + * Copyright (C) 2008 Lawrence Livermore National Security. + * Copyright (C) 2002-2007 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * LLNL-CODE-402394. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#include "src/sacctmgr/sacctmgr.h" +#include "src/sacctmgr/print.h" + +static int _set_cond(int *start, int argc, char *argv[], + acct_user_cond_t *user_cond, + List format_list) +{ + int i; + int u_set = 0; + int a_set = 0; + int end = 0; + + for (i=(*start); i<argc; i++) { + end = parse_option_end(argv[i]); + if (strncasecmp (argv[i], "Set", 3) == 0) { + i--; + break; + } else if (strncasecmp (argv[i], "WithAssoc", 4) == 0) { + user_cond->with_assocs = 1; + } else if(!end) { + addto_char_list(user_cond->user_list, argv[i]); + addto_char_list(user_cond->assoc_cond->user_list, + argv[i]); + u_set = 1; + } else if (strncasecmp (argv[i], "Account", 2) == 0) { + addto_char_list(user_cond->assoc_cond->acct_list, + argv[i]+end); + a_set = 1; + } else if (strncasecmp (argv[i], "AdminLevel", 2) == 0) { + user_cond->admin_level = + str_2_acct_admin_level(argv[i]+end); + u_set = 1; + } else if (strncasecmp (argv[i], "Clusters", 1) == 0) { + addto_char_list(user_cond->assoc_cond->cluster_list, + argv[i]+end); + a_set = 1; + } else if (strncasecmp (argv[i], "DefaultAccount", 1) == 0) { + addto_char_list(user_cond->def_acct_list, + argv[i]+end); + u_set = 1; + } else if (strncasecmp (argv[i], "Format", 1) == 0) { + if(format_list) + addto_char_list(format_list, argv[i]+end); + } else if (strncasecmp (argv[i], "Names", 1) == 0 + || strncasecmp (argv[i], "Users", 1) == 0) { + addto_char_list(user_cond->user_list, argv[i]+end); + addto_char_list(user_cond->assoc_cond->user_list, + argv[i]+end); + u_set = 1; + } else if (strncasecmp (argv[i], "Partition", 3) == 0) { + addto_char_list(user_cond->assoc_cond->partition_list, + argv[i]+end); + a_set = 1; + } else if (strncasecmp (argv[i], "QosLevel", 1) == 0) { + user_cond->qos = str_2_acct_qos(argv[i]+end); + u_set = 1; + } else { + printf(" Unknown condition: %s\n" + " Use keyword 'set' to modify value\n", argv[i]); + } + } + (*start) = i; + + if(a_set) { + return 2; + } else if(u_set) + return 1; + + return 0; +} + +static int _set_rec(int *start, int argc, char *argv[], + acct_user_rec_t *user, + acct_association_rec_t *association) +{ + int i, mins; + int u_set = 0; + int a_set = 0; + int end = 0; + + for (i=(*start); i<argc; i++) { + end = parse_option_end(argv[i]); + if (strncasecmp (argv[i], "Where", 5) == 0) { + i--; + break; + } else if(!end) { + printf(" Bad format on %s: End your option with " + "an '=' sign\n", argv[i]); + } else if (strncasecmp (argv[i], "AdminLevel", 2) == 0) { + user->admin_level = + str_2_acct_admin_level(argv[i]+end); + u_set = 1; + } else if (strncasecmp (argv[i], "DefaultAccount", 1) == 0) { + user->default_acct = xstrdup(argv[i]+end); + u_set = 1; + } else if (strncasecmp (argv[i], "FairShare", 1) == 0) { + if (get_uint(argv[i]+end, &association->fairshare, + "FairShare") == SLURM_SUCCESS) + a_set = 1; + } else if (strncasecmp (argv[i], "MaxCPUSec", 4) == 0) { + if (get_uint(argv[i]+end, + &association->max_cpu_secs_per_job, + "MaxCPUSec") == SLURM_SUCCESS) + a_set = 1; + } else if (strncasecmp (argv[i], "MaxJobs", 4) == 0) { + if (get_uint(argv[i]+end, &association->max_jobs, + "MaxJobs") == SLURM_SUCCESS) + a_set = 1; + } else if (strncasecmp (argv[i], "MaxNodes", 4) == 0) { + if (get_uint(argv[i]+end, + &association->max_nodes_per_job, + "MaxNodes") == SLURM_SUCCESS) + a_set = 1; + } else if (strncasecmp (argv[i], "MaxWall", 4) == 0) { + mins = time_str2mins(argv[i]+end); + if (mins != NO_VAL) { + association->max_wall_duration_per_job + = (uint32_t) mins; + a_set = 1; + } else { + printf(" Bad MaxWall time format: %s\n", + argv[i]); + } + } else if (strncasecmp (argv[i], "QosLevel", 1) == 0) { + user->qos = str_2_acct_qos(argv[i]+end); + u_set = 1; + } else { + printf(" Unknown option: %s\n" + " Use keyword 'where' to modify condition\n", + argv[i]); + } + } + (*start) = i; + + if(u_set && a_set) + return 3; + else if(u_set) + return 1; + else if(a_set) + return 2; + return 0; +} + +/* static void _print_cond(acct_user_cond_t *user_cond) */ +/* { */ +/* ListIterator itr = NULL; */ +/* char *tmp_char = NULL; */ + +/* if(!user_cond) { */ +/* error("no acct_user_cond_t * given"); */ +/* return; */ +/* } */ + +/* if(user_cond->user_list && list_count(user_cond->user_list)) { */ +/* itr = list_iterator_create(user_cond->user_list); */ +/* printf(" Names = %s\n", (char *)list_next(itr)); */ +/* while((tmp_char = list_next(itr))) { */ +/* printf(" or %s\n", tmp_char); */ +/* } */ +/* } */ + +/* if(user_cond->def_acct_list */ +/* && list_count(user_cond->def_acct_list)) { */ +/* itr = list_iterator_create(user_cond->def_acct_list); */ +/* printf(" Default Account = %s\n", (char *)list_next(itr)); */ +/* while((tmp_char = list_next(itr))) { */ +/* printf(" or %s\n", tmp_char); */ +/* } */ +/* } */ + +/* if(user_cond->qos != ACCT_QOS_NOTSET) */ +/* printf(" Qos = %s\n", */ +/* acct_qos_str(user_cond->qos)); */ + +/* if(user_cond->admin_level != ACCT_ADMIN_NOTSET) */ +/* printf(" Admin Level = %s\n", */ +/* acct_admin_level_str(user_cond->admin_level)); */ +/* } */ + +/* static void _print_rec(acct_user_rec_t *user) */ +/* { */ +/* if(!user) { */ +/* error("no acct_user_rec_t * given"); */ +/* return; */ +/* } */ + +/* if(user->name) */ +/* printf(" Name = %s\n", user->name); */ + +/* if(user->default_acct) */ +/* printf(" Default Account = %s\n", user->default_acct); */ + +/* if(user->qos != ACCT_QOS_NOTSET) */ +/* printf(" Qos = %s\n", */ +/* acct_qos_str(user->qos)); */ + +/* if(user->admin_level != ACCT_ADMIN_NOTSET) */ +/* printf(" Admin Level = %s\n", */ +/* acct_admin_level_str(user->admin_level)); */ +/* } */ + +extern int sacctmgr_add_user(int argc, char *argv[]) +{ + int rc = SLURM_SUCCESS; + int i=0; + ListIterator itr = NULL; + ListIterator itr_a = NULL; + ListIterator itr_c = NULL; + ListIterator itr_p = NULL; + acct_user_rec_t *user = NULL; + acct_association_rec_t *assoc = NULL; + char *default_acct = NULL; + acct_association_cond_t *assoc_cond = NULL; + acct_association_cond_t query_assoc_cond; + acct_qos_level_t qos = ACCT_QOS_NOTSET; + acct_admin_level_t admin_level = ACCT_ADMIN_NOTSET; + char *name = NULL, *account = NULL, *cluster = NULL, *partition = NULL; + int partition_set = 0; + List user_list = NULL; + List assoc_list = NULL; + List local_assoc_list = NULL; + List local_acct_list = NULL; + List local_user_list = NULL; + uint32_t fairshare = NO_VAL; + uint32_t max_jobs = NO_VAL; + uint32_t max_nodes_per_job = NO_VAL; + uint32_t max_wall_duration_per_job = NO_VAL; + uint32_t max_cpu_secs_per_job = NO_VAL; + char *user_str = NULL; + char *assoc_str = NULL; + int limit_set = 0, mins; + int first = 1; + int acct_first = 1; + +/* if(!list_count(sacctmgr_cluster_list)) { */ +/* printf(" Can't add users, no cluster defined yet.\n" */ +/* " Please contact your administrator.\n"); */ +/* return SLURM_ERROR; */ +/* } */ + + assoc_cond = xmalloc(sizeof(acct_association_cond_t)); + assoc_cond->user_list = list_create(slurm_destroy_char); + assoc_cond->acct_list = list_create(slurm_destroy_char); + assoc_cond->cluster_list = list_create(slurm_destroy_char); + assoc_cond->partition_list = list_create(slurm_destroy_char); + + for (i=0; i<argc; i++) { + int end = parse_option_end(argv[i]); + if(!end) { + addto_char_list(assoc_cond->user_list, argv[i]+end); + } else if (strncasecmp (argv[i], "Accounts", 2) == 0) { + addto_char_list(assoc_cond->acct_list, + argv[i]+end); + } else if (strncasecmp (argv[i], "AdminLevel", 2) == 0) { + admin_level = str_2_acct_admin_level(argv[i]+end); + } else if (strncasecmp (argv[i], "Clusters", 1) == 0) { + addto_char_list(assoc_cond->cluster_list, + argv[i]+end); + } else if (strncasecmp (argv[i], "DefaultAccount", 1) == 0) { + default_acct = xstrdup(argv[i]+end); + addto_char_list(assoc_cond->acct_list, + argv[i]+end); + } else if (strncasecmp (argv[i], "FairShare", 1) == 0) { + if (get_uint(argv[i]+end, &fairshare, + "FairShare") == SLURM_SUCCESS) + limit_set = 1; + } else if (strncasecmp (argv[i], "MaxCPUSecs", 4) == 0) { + if (get_uint(argv[i]+end, &max_cpu_secs_per_job, + "MaxCPUSecs") == SLURM_SUCCESS) + limit_set = 1; + } else if (strncasecmp (argv[i], "MaxJobs", 4) == 0) { + if (get_uint(argv[i]+end, &max_jobs, + "MaxJobs") == SLURM_SUCCESS) + limit_set = 1; + } else if (strncasecmp (argv[i], "MaxNodes", 4) == 0) { + if (get_uint(argv[i]+end, &max_nodes_per_job, + "MaxNodes") == SLURM_SUCCESS) + limit_set = 1; + } else if (strncasecmp (argv[i], "MaxWall", 4) == 0) { + mins = time_str2mins(argv[i]+end); + if (mins != NO_VAL) { + max_wall_duration_per_job = (uint32_t) mins; + limit_set = 1; + } else { + printf(" Bad MaxWall time format: %s\n", + argv[i]); + } + } else if (strncasecmp (argv[i], "Names", 1) == 0) { + addto_char_list(assoc_cond->user_list, argv[i]+end); + } else if (strncasecmp (argv[i], "Partitions", 1) == 0) { + addto_char_list(assoc_cond->partition_list, + argv[i]+end); + } else if (strncasecmp (argv[i], "QosLevel", 1) == 0) { + qos = str_2_acct_qos(argv[i]+end); + } else { + printf(" Unknown option: %s\n", argv[i]); + } + } + + if(!list_count(assoc_cond->user_list)) { + destroy_acct_association_cond(assoc_cond); + printf(" Need name of user to add.\n"); + return SLURM_ERROR; + } else { + acct_user_cond_t user_cond; + + memset(&user_cond, 0, sizeof(acct_user_cond_t)); + user_cond.user_list = assoc_cond->user_list; + + local_user_list = acct_storage_g_get_users( + db_conn, &user_cond); + + } + if(!local_user_list) { + printf(" Problem getting users from database. " + "Contact your admin.\n"); + destroy_acct_association_cond(assoc_cond); + return SLURM_ERROR; + } + + + if(!list_count(assoc_cond->acct_list)) { + destroy_acct_association_cond(assoc_cond); + printf(" Need name of acct to add user to.\n"); + return SLURM_ERROR; + } else { + acct_account_cond_t account_cond; + + memset(&account_cond, 0, sizeof(acct_account_cond_t)); + account_cond.acct_list = assoc_cond->acct_list; + + local_acct_list = acct_storage_g_get_accounts( + db_conn, &account_cond); + + } + + if(!local_acct_list) { + printf(" Problem getting accounts from database. " + "Contact your admin.\n"); + list_destroy(local_user_list); + destroy_acct_association_cond(assoc_cond); + return SLURM_ERROR; + } + + + if(!list_count(assoc_cond->cluster_list)) { + List cluster_list = NULL; + acct_cluster_rec_t *cluster_rec = NULL; + + cluster_list = acct_storage_g_get_clusters(db_conn, NULL); + if(!cluster_list) { + printf(" Problem getting clusters from database. " + "Contact your admin.\n"); + destroy_acct_association_cond(assoc_cond); + list_destroy(local_user_list); + list_destroy(local_acct_list); + return SLURM_ERROR; + } + + itr_c = list_iterator_create(cluster_list); + while((cluster_rec = list_next(itr_c))) { + list_append(assoc_cond->cluster_list, + xstrdup(cluster_rec->name)); + } + list_iterator_destroy(itr_c); + + if(!list_count(assoc_cond->cluster_list)) { + printf(" Can't add users, no cluster defined yet.\n" + " Please contact your administrator.\n"); + destroy_acct_association_cond(assoc_cond); + list_destroy(local_user_list); + list_destroy(local_acct_list); + return SLURM_ERROR; + } + } + + if(!default_acct) { + itr_a = list_iterator_create(assoc_cond->acct_list); + default_acct = xstrdup(list_next(itr_a)); + list_iterator_destroy(itr_a); + } + + /* we are adding these lists to the global lists and will be + freed when they are */ + user_list = list_create(destroy_acct_user_rec); + assoc_list = list_create(destroy_acct_association_rec); + + memset(&query_assoc_cond, 0, sizeof(acct_association_cond_t)); + query_assoc_cond.acct_list = assoc_cond->acct_list; + query_assoc_cond.cluster_list = assoc_cond->cluster_list; + local_assoc_list = acct_storage_g_get_associations( + db_conn, &query_assoc_cond); + + itr = list_iterator_create(assoc_cond->user_list); + while((name = list_next(itr))) { + user = NULL; + if(!sacctmgr_find_user_from_list(local_user_list, name)) { + if(!default_acct) { + printf(" Need a default account for " + "these users to add.\n"); + rc = SLURM_ERROR; + goto no_default; + } + if(first) { + if(!sacctmgr_find_account_from_list( + local_acct_list, default_acct)) { + printf(" error: This account '%s' " + "doesn't exist.\n" + " Contact your admin " + "to add this account.\n", + default_acct); + continue; + } + first = 0; + } + user = xmalloc(sizeof(acct_user_rec_t)); + user->assoc_list = list_create(NULL); + user->name = xstrdup(name); + user->default_acct = xstrdup(default_acct); + user->qos = qos; + user->admin_level = admin_level; + xstrfmtcat(user_str, " %s\n", name); + + list_append(user_list, user); + } + + itr_a = list_iterator_create(assoc_cond->acct_list); + while((account = list_next(itr_a))) { + if(acct_first) { + if(!sacctmgr_find_account_from_list( + local_acct_list, default_acct)) { + printf(" error: This account '%s' " + "doesn't exist.\n" + " Contact your admin " + "to add this account.\n", + account); + continue; + } + } + itr_c = list_iterator_create(assoc_cond->cluster_list); + while((cluster = list_next(itr_c))) { + if(!sacctmgr_find_account_base_assoc_from_list( + local_assoc_list, account, + cluster)) { + if(acct_first) + printf(" error: This " + "account '%s' " + "doesn't exist on " + "cluster %s\n" + " Contact your " + "admin " + "to add this account.\n", + account, cluster); + continue; + } + + itr_p = list_iterator_create( + assoc_cond->partition_list); + while((partition = list_next(itr_p))) { + partition_set = 1; + if(sacctmgr_find_association_from_list( + local_assoc_list, + name, account, + cluster, partition)) + continue; + assoc = xmalloc( + sizeof(acct_association_rec_t)); + assoc->user = xstrdup(name); + assoc->acct = xstrdup(account); + assoc->cluster = xstrdup(cluster); + assoc->partition = xstrdup(partition); + assoc->fairshare = fairshare; + assoc->max_jobs = max_jobs; + assoc->max_nodes_per_job = + max_nodes_per_job; + assoc->max_wall_duration_per_job = + max_wall_duration_per_job; + assoc->max_cpu_secs_per_job = + max_cpu_secs_per_job; + if(user) + list_append(user->assoc_list, + assoc); + else + list_append(assoc_list, assoc); + xstrfmtcat(assoc_str, + " U = %-9.9s" + " A = %-10.10s" + " C = %-10.10s" + " P = %-10.10s\n", + assoc->user, assoc->acct, + assoc->cluster, + assoc->partition); + } + list_iterator_destroy(itr_p); + if(partition_set) + continue; + + if(sacctmgr_find_association_from_list( + local_assoc_list, + name, account, cluster, NULL)) + continue; + + assoc = xmalloc(sizeof(acct_association_rec_t)); + assoc->user = xstrdup(name); + assoc->acct = xstrdup(account); + assoc->cluster = xstrdup(cluster); + assoc->fairshare = fairshare; + assoc->max_jobs = max_jobs; + assoc->max_nodes_per_job = max_nodes_per_job; + assoc->max_wall_duration_per_job = + max_wall_duration_per_job; + assoc->max_cpu_secs_per_job = + max_cpu_secs_per_job; + if(user) + list_append(user->assoc_list, assoc); + else + list_append(assoc_list, assoc); + xstrfmtcat(assoc_str, + " U = %-9.9s" + " A = %-10.10s" + " C = %-10.10s\n", + assoc->user, assoc->acct, + assoc->cluster); + } + list_iterator_destroy(itr_c); + } + list_iterator_destroy(itr_a); + acct_first = 0; + } +no_default: + list_iterator_destroy(itr); + list_destroy(local_user_list); + list_destroy(local_acct_list); + list_destroy(local_assoc_list); + destroy_acct_association_cond(assoc_cond); + + if(!list_count(user_list) && !list_count(assoc_list)) { + printf(" Nothing new added.\n"); + goto end_it; + } else if(!assoc_str) { + printf(" Error: no associations created.\n"); + goto end_it; + } + + if(user_str) { + printf(" Adding User(s)\n%s", user_str); + printf(" Settings =\n"); + printf(" Default Account = %s\n", default_acct); + if(qos != ACCT_QOS_NOTSET) + printf(" Qos = %s\n", acct_qos_str(qos)); + + if(admin_level != ACCT_ADMIN_NOTSET) + printf(" Admin Level = %s\n", + acct_admin_level_str(admin_level)); + xfree(user_str); + } + + if(assoc_str) { + printf(" Associations =\n%s", assoc_str); + xfree(assoc_str); + } + + if(limit_set) { + printf(" Non Default Settings\n"); + if(fairshare == INFINITE) + printf(" Fairshare = NONE\n"); + else if(fairshare != NO_VAL) + printf(" Fairshare = %u\n", fairshare); + + if(max_cpu_secs_per_job == INFINITE) + printf(" MaxCPUSecs = NONE\n"); + else if(max_cpu_secs_per_job != NO_VAL) + printf(" MaxCPUSecs = %u\n", + max_cpu_secs_per_job); + + if(max_jobs == INFINITE) + printf(" MaxJobs = NONE\n"); + else if(max_jobs != NO_VAL) + printf(" MaxJobs = %u\n", max_jobs); + + if(max_nodes_per_job == INFINITE) + printf(" MaxNodes = NONE\n"); + else if(max_nodes_per_job != NO_VAL) + printf(" MaxNodes = %u\n", max_nodes_per_job); + + if(max_wall_duration_per_job == INFINITE) + printf(" MaxWall = NONE\n"); + else if(max_wall_duration_per_job != NO_VAL) { + char time_buf[32]; + mins2time_str((time_t) max_wall_duration_per_job, + time_buf, sizeof(time_buf)); + printf(" MaxWall = %s\n", time_buf); + } + } + + notice_thread_init(); + if(list_count(user_list)) { + rc = acct_storage_g_add_users(db_conn, my_uid, + user_list); + } + + if(rc == SLURM_SUCCESS) { + if(list_count(assoc_list)) + rc = acct_storage_g_add_associations(db_conn, my_uid, + assoc_list); + } else { + printf(" error: Problem adding users\n"); + rc = SLURM_ERROR; + notice_thread_fini(); + goto end_it; + } + notice_thread_fini(); + + if(rc == SLURM_SUCCESS) { + if(commit_check("Would you like to commit changes?")) { + acct_storage_g_commit(db_conn, 1); + } else { + printf(" Changes Discarded\n"); + acct_storage_g_commit(db_conn, 0); + } + } else { + printf(" error: Problem adding user associations\n"); + rc = SLURM_ERROR; + } + +end_it: + list_destroy(user_list); + list_destroy(assoc_list); + xfree(default_acct); + + return rc; +} + +extern int sacctmgr_list_user(int argc, char *argv[]) +{ + int rc = SLURM_SUCCESS; + acct_user_cond_t *user_cond = xmalloc(sizeof(acct_user_cond_t)); + List user_list; + int i=0; + ListIterator itr = NULL; + ListIterator itr2 = NULL; + acct_user_rec_t *user = NULL; + acct_association_rec_t *assoc = NULL; + char *object; + + print_field_t *field = NULL; + + List format_list = list_create(slurm_destroy_char); + List print_fields_list; /* types are of print_field_t */ + + enum { + PRINT_ACCOUNT, + PRINT_ADMIN, + PRINT_CLUSTER, + PRINT_DACCT, + PRINT_FAIRSHARE, + PRINT_ID, + PRINT_MAXC, + PRINT_MAXJ, + PRINT_MAXN, + PRINT_MAXW, + PRINT_QOS, + PRINT_PID, + PRINT_PNAME, + PRINT_PART, + PRINT_USER + }; + + user_cond->user_list = list_create(slurm_destroy_char); + user_cond->def_acct_list = list_create(slurm_destroy_char); + user_cond->with_assocs = with_assoc_flag; + + user_cond->assoc_cond = xmalloc(sizeof(acct_association_cond_t)); + user_cond->assoc_cond->user_list = list_create(slurm_destroy_char); + user_cond->assoc_cond->acct_list = list_create(slurm_destroy_char); + user_cond->assoc_cond->cluster_list = list_create(slurm_destroy_char); + user_cond->assoc_cond->partition_list = list_create(slurm_destroy_char); + + _set_cond(&i, argc, argv, user_cond, format_list); + + if(!list_count(format_list)) { + addto_char_list(format_list, "U,D,Q,Ad"); + if(user_cond->with_assocs) + addto_char_list(format_list, + "C,Ac,Part,F,MaxC,MaxJ,MaxN,MaxW"); + + } + + user_list = acct_storage_g_get_users(db_conn, user_cond); + destroy_acct_user_cond(user_cond); + + if(!user_list) { + list_destroy(format_list); + return SLURM_ERROR; + } + + print_fields_list = list_create(destroy_print_field); + + itr = list_iterator_create(format_list); + while((object = list_next(itr))) { + field = xmalloc(sizeof(print_field_t)); + if(!strncasecmp("Account", object, 2)) { + field->type = PRINT_ACCOUNT; + field->name = xstrdup("Account"); + field->len = 10; + field->print_routine = print_str; + } else if(!strncasecmp("AdminLevel", object, 2)) { + field->type = PRINT_ADMIN; + field->name = xstrdup("Admin"); + field->len = 9; + field->print_routine = print_str; + } else if(!strncasecmp("Cluster", object, 1)) { + field->type = PRINT_CLUSTER; + field->name = xstrdup("Cluster"); + field->len = 10; + field->print_routine = print_str; + } else if(!strncasecmp("Default", object, 1)) { + field->type = PRINT_DACCT; + field->name = xstrdup("Def Acct"); + field->len = 10; + field->print_routine = print_str; + } else if(!strncasecmp("FairShare", object, 1)) { + field->type = PRINT_FAIRSHARE; + field->name = xstrdup("FairShare"); + field->len = 9; + field->print_routine = print_uint; + } else if(!strncasecmp("ID", object, 1)) { + field->type = PRINT_ID; + field->name = xstrdup("ID"); + field->len = 6; + field->print_routine = print_uint; + } else if(!strncasecmp("MaxCPUSecs", object, 4)) { + field->type = PRINT_MAXC; + field->name = xstrdup("MaxCPUSecs"); + field->len = 11; + field->print_routine = print_uint; + } else if(!strncasecmp("MaxJobs", object, 4)) { + field->type = PRINT_MAXJ; + field->name = xstrdup("MaxJobs"); + field->len = 7; + field->print_routine = print_uint; + } else if(!strncasecmp("MaxNodes", object, 4)) { + field->type = PRINT_MAXN; + field->name = xstrdup("MaxNodes"); + field->len = 8; + field->print_routine = print_uint; + } else if(!strncasecmp("MaxWall", object, 4)) { + field->type = PRINT_MAXW; + field->name = xstrdup("MaxWall"); + field->len = 11; + field->print_routine = print_time; + } else if(!strncasecmp("QOS", object, 1)) { + field->type = PRINT_QOS; + field->name = xstrdup("QOS"); + field->len = 9; + field->print_routine = print_str; + } else if(!strncasecmp("ParentID", object, 7)) { + field->type = PRINT_PID; + field->name = xstrdup("Par ID"); + field->len = 6; + field->print_routine = print_uint; + } else if(!strncasecmp("Partition", object, 4)) { + field->type = PRINT_PART; + field->name = xstrdup("Partition"); + field->len = 10; + field->print_routine = print_str; + } else if(!strncasecmp("User", object, 1)) { + field->type = PRINT_USER; + field->name = xstrdup("User"); + field->len = 10; + field->print_routine = print_str; + } else { + printf("Unknown field '%s'\n", object); + xfree(field); + continue; + } + list_append(print_fields_list, field); + } + list_iterator_destroy(itr); + + itr = list_iterator_create(user_list); + itr2 = list_iterator_create(print_fields_list); + print_header(print_fields_list); + + while((user = list_next(itr))) { + if(user->assoc_list && list_count(user->assoc_list)) { + ListIterator itr3 = + list_iterator_create(user->assoc_list); + + while((assoc = list_next(itr3))) { + while((field = list_next(itr2))) { + switch(field->type) { + case PRINT_ACCOUNT: + field->print_routine( + SLURM_PRINT_VALUE, + field, + assoc->acct); + break; + case PRINT_ADMIN: + field->print_routine( + SLURM_PRINT_VALUE, + field, + acct_admin_level_str( + user-> + admin_level)); + break; + case PRINT_CLUSTER: + field->print_routine( + SLURM_PRINT_VALUE, + field, + assoc->cluster); + break; + case PRINT_DACCT: + field->print_routine( + SLURM_PRINT_VALUE, + field, + user->default_acct); + break; + case PRINT_FAIRSHARE: + field->print_routine( + SLURM_PRINT_VALUE, + field, + assoc->fairshare); + break; + case PRINT_ID: + field->print_routine( + SLURM_PRINT_VALUE, + field, + assoc->id); + break; + case PRINT_MAXC: + field->print_routine( + SLURM_PRINT_VALUE, + field, + assoc-> + max_cpu_secs_per_job); + break; + case PRINT_MAXJ: + field->print_routine( + SLURM_PRINT_VALUE, + field, + assoc->max_jobs); + break; + case PRINT_MAXN: + field->print_routine( + SLURM_PRINT_VALUE, + field, + assoc-> + max_nodes_per_job); + break; + case PRINT_MAXW: + field->print_routine( + SLURM_PRINT_VALUE, + field, + assoc-> + max_wall_duration_per_job); + break; + case PRINT_QOS: + field->print_routine( + SLURM_PRINT_VALUE, + field, + acct_qos_str( + user->qos)); + break; + case PRINT_PID: + field->print_routine( + SLURM_PRINT_VALUE, + field, + assoc->parent_id); + break; + case PRINT_PNAME: + field->print_routine( + SLURM_PRINT_VALUE, + field, + assoc->parent_acct); + break; + case PRINT_PART: + field->print_routine( + SLURM_PRINT_VALUE, + field, + assoc->partition); + break; + case PRINT_USER: + field->print_routine( + SLURM_PRINT_VALUE, + field, + user->name); + break; + default: + break; + } + } + list_iterator_reset(itr2); + printf("\n"); + } + list_iterator_destroy(itr3); + } else { + while((field = list_next(itr2))) { + switch(field->type) { + case PRINT_ACCOUNT: + field->print_routine( + SLURM_PRINT_VALUE, field, + NULL); + break; + case PRINT_ADMIN: + field->print_routine( + SLURM_PRINT_VALUE, field, + acct_admin_level_str( + user->admin_level)); + break; + case PRINT_CLUSTER: + field->print_routine( + SLURM_PRINT_VALUE, field, + NULL); + break; + case PRINT_DACCT: + field->print_routine( + SLURM_PRINT_VALUE, field, + user->default_acct); + break; + case PRINT_FAIRSHARE: + field->print_routine( + SLURM_PRINT_VALUE, field, + NULL); + break; + case PRINT_ID: + field->print_routine( + SLURM_PRINT_VALUE, field, + NULL); + break; + case PRINT_MAXC: + field->print_routine( + SLURM_PRINT_VALUE, field, + NULL); + break; + case PRINT_MAXJ: + field->print_routine( + SLURM_PRINT_VALUE, field, + NULL); + break; + case PRINT_MAXN: + field->print_routine( + SLURM_PRINT_VALUE, field, + NULL); + break; + case PRINT_MAXW: + field->print_routine( + SLURM_PRINT_VALUE, field, + NULL); + break; + case PRINT_QOS: + field->print_routine( + SLURM_PRINT_VALUE, field, + acct_qos_str(user->qos)); + break; + case PRINT_PID: + field->print_routine( + SLURM_PRINT_VALUE, field, + NULL); + break; + case PRINT_PART: + field->print_routine( + SLURM_PRINT_VALUE, field, + NULL); + break; + case PRINT_USER: + field->print_routine( + SLURM_PRINT_VALUE, field, + user->name); + break; + default: + break; + } + } + list_iterator_reset(itr2); + printf("\n"); + } + } + + printf("\n"); + + list_iterator_destroy(itr2); + list_iterator_destroy(itr); + list_destroy(user_list); + list_destroy(print_fields_list); + + return rc; +} + +extern int sacctmgr_modify_user(int argc, char *argv[]) +{ + int rc = SLURM_SUCCESS; + acct_user_cond_t *user_cond = xmalloc(sizeof(acct_user_cond_t)); + acct_user_rec_t *user = xmalloc(sizeof(acct_user_rec_t)); + acct_association_rec_t *assoc = xmalloc(sizeof(acct_association_rec_t)); + int i=0; + int cond_set = 0, rec_set = 0, set = 0; + List ret_list = NULL; + + user_cond->user_list = list_create(slurm_destroy_char); + user_cond->def_acct_list = list_create(slurm_destroy_char); + + user_cond->assoc_cond = xmalloc(sizeof(acct_association_cond_t)); + user_cond->assoc_cond->user_list = list_create(slurm_destroy_char); + user_cond->assoc_cond->acct_list = list_create(slurm_destroy_char); + user_cond->assoc_cond->cluster_list = list_create(slurm_destroy_char); + user_cond->assoc_cond->partition_list = list_create(slurm_destroy_char); + user_cond->assoc_cond->fairshare = NO_VAL; + user_cond->assoc_cond->max_cpu_secs_per_job = NO_VAL; + user_cond->assoc_cond->max_jobs = NO_VAL; + user_cond->assoc_cond->max_nodes_per_job = NO_VAL; + user_cond->assoc_cond->max_wall_duration_per_job = NO_VAL; + + assoc->fairshare = NO_VAL; + assoc->max_cpu_secs_per_job = NO_VAL; + assoc->max_jobs = NO_VAL; + assoc->max_nodes_per_job = NO_VAL; + assoc->max_wall_duration_per_job = NO_VAL; + + for (i=0; i<argc; i++) { + if (strncasecmp (argv[i], "Where", 5) == 0) { + i++; + cond_set = _set_cond(&i, argc, argv, user_cond, NULL); + + } else if (strncasecmp (argv[i], "Set", 3) == 0) { + i++; + rec_set = _set_rec(&i, argc, argv, user, assoc); + } else { + cond_set = _set_cond(&i, argc, argv, user_cond, NULL); + } + } + + if(!rec_set) { + printf(" You didn't give me anything to set\n"); + destroy_acct_user_cond(user_cond); + destroy_acct_user_rec(user); + destroy_acct_association_rec(assoc); + return SLURM_ERROR; + } else if(!cond_set) { + if(!commit_check("You didn't set any conditions with 'WHERE'.\n" + "Are you sure you want to continue?")) { + printf("Aborted\n"); + destroy_acct_user_cond(user_cond); + destroy_acct_user_rec(user); + destroy_acct_association_rec(assoc); + return SLURM_SUCCESS; + } + } + + notice_thread_init(); + if(rec_set == 3 || rec_set == 1) { // process the account changes + if(cond_set == 2) { + rc = SLURM_ERROR; + goto assoc_start; + } + ret_list = acct_storage_g_modify_users( + db_conn, my_uid, user_cond, user); + if(ret_list && list_count(ret_list)) { + char *object = NULL; + ListIterator itr = list_iterator_create(ret_list); + printf(" Modified users...\n"); + while((object = list_next(itr))) { + printf(" %s\n", object); + } + list_iterator_destroy(itr); + list_destroy(ret_list); + set = 1; + } else if(ret_list) { + printf(" Nothing modified\n"); + } else { + printf(" Error with request\n"); + rc = SLURM_ERROR; + } + + if(ret_list) + list_destroy(ret_list); + } + +assoc_start: + if(rec_set == 3 || rec_set == 2) { // process the association changes + ret_list = acct_storage_g_modify_associations( + db_conn, my_uid, user_cond->assoc_cond, assoc); + + if(ret_list && list_count(ret_list)) { + char *object = NULL; + ListIterator itr = list_iterator_create(ret_list); + printf(" Modified account associations...\n"); + while((object = list_next(itr))) { + printf(" %s\n", object); + } + list_iterator_destroy(itr); + set = 1; + } else if(ret_list) { + printf(" Nothing modified\n"); + } else { + printf(" Error with request\n"); + rc = SLURM_ERROR; + } + + if(ret_list) + list_destroy(ret_list); + } + + notice_thread_fini(); + if(set) { + if(commit_check("Would you like to commit changes?")) + acct_storage_g_commit(db_conn, 1); + else { + printf(" Changes Discarded\n"); + acct_storage_g_commit(db_conn, 0); + } + } + + destroy_acct_user_cond(user_cond); + destroy_acct_user_rec(user); + destroy_acct_association_rec(assoc); + + return rc; +} + +extern int sacctmgr_delete_user(int argc, char *argv[]) +{ + int rc = SLURM_SUCCESS; + acct_user_cond_t *user_cond = xmalloc(sizeof(acct_user_cond_t)); + int i=0; + List ret_list = NULL; + int set = 0; + + user_cond->user_list = list_create(slurm_destroy_char); + user_cond->def_acct_list = list_create(slurm_destroy_char); + + user_cond->assoc_cond = xmalloc(sizeof(acct_association_cond_t)); + user_cond->assoc_cond->user_list = list_create(slurm_destroy_char); + user_cond->assoc_cond->acct_list = list_create(slurm_destroy_char); + user_cond->assoc_cond->cluster_list = list_create(slurm_destroy_char); + user_cond->assoc_cond->partition_list = list_create(slurm_destroy_char); + + if(!(set = _set_cond(&i, argc, argv, user_cond, NULL))) { + printf(" No conditions given to remove, not executing.\n"); + destroy_acct_user_cond(user_cond); + return SLURM_ERROR; + } + + notice_thread_init(); + if(set == 1) { + ret_list = acct_storage_g_remove_users( + db_conn, my_uid, user_cond); + } else if(set == 2) { + ret_list = acct_storage_g_remove_associations( + db_conn, my_uid, user_cond->assoc_cond); + } + notice_thread_fini(); + + destroy_acct_user_cond(user_cond); + + if(ret_list && list_count(ret_list)) { + char *object = NULL; + ListIterator itr = list_iterator_create(ret_list); + if(set == 1) { + printf(" Deleting users...\n"); + } else if(set == 2) { + printf(" Deleting user associations...\n"); + } + while((object = list_next(itr))) { + printf(" %s\n", object); + } + list_iterator_destroy(itr); + if(commit_check("Would you like to commit changes?")) { + acct_storage_g_commit(db_conn, 1); + } else { + printf(" Changes Discarded\n"); + acct_storage_g_commit(db_conn, 0); + } + } else if(ret_list) { + printf(" Nothing deleted\n"); + } else { + printf(" Error with request\n"); + rc = SLURM_ERROR; + } + + + if(ret_list) + list_destroy(ret_list); + + return rc; +} diff --git a/src/salloc/Makefile.am b/src/salloc/Makefile.am index 5f65b5e67..a27f098f6 100644 --- a/src/salloc/Makefile.am +++ b/src/salloc/Makefile.am @@ -6,14 +6,14 @@ INCLUDES = -I$(top_srcdir) bin_PROGRAMS = salloc -salloc_SOURCES = salloc.c salloc.h opt.c opt.h msg.c msg.h +salloc_SOURCES = salloc.c salloc.h opt.c opt.h convenience_libs = $(top_builddir)/src/api/libslurmhelper.la salloc_LDADD = \ $(convenience_libs) -salloc_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) $(FEDERATION_LDFLAGS) +salloc_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) force: $(convenience_libs) : force diff --git a/src/salloc/Makefile.in b/src/salloc/Makefile.in index 938fae8b1..1c34e396f 100644 --- a/src/salloc/Makefile.in +++ b/src/salloc/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -45,6 +45,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -68,13 +70,13 @@ CONFIG_CLEAN_FILES = am__installdirs = "$(DESTDIR)$(bindir)" binPROGRAMS_INSTALL = $(INSTALL_PROGRAM) PROGRAMS = $(bin_PROGRAMS) -am_salloc_OBJECTS = salloc.$(OBJEXT) opt.$(OBJEXT) msg.$(OBJEXT) +am_salloc_OBJECTS = salloc.$(OBJEXT) opt.$(OBJEXT) salloc_OBJECTS = $(am_salloc_OBJECTS) salloc_DEPENDENCIES = $(convenience_libs) salloc_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(salloc_LDFLAGS) \ $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -114,6 +116,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -127,10 +130,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -150,7 +156,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -161,6 +170,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -176,6 +187,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -191,6 +203,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -249,12 +262,12 @@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ AUTOMAKE_OPTIONS = foreign INCLUDES = -I$(top_srcdir) -salloc_SOURCES = salloc.c salloc.h opt.c opt.h msg.c msg.h +salloc_SOURCES = salloc.c salloc.h opt.c opt.h convenience_libs = $(top_builddir)/src/api/libslurmhelper.la salloc_LDADD = \ $(convenience_libs) -salloc_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) $(FEDERATION_LDFLAGS) +salloc_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) all: all-am .SUFFIXES: @@ -297,8 +310,8 @@ install-binPROGRAMS: $(bin_PROGRAMS) || test -f $$p1 \ ; then \ f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \ - echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) --mode=install $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ - $(INSTALL_PROGRAM_ENV) $(LIBTOOL) --mode=install $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ + echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ + $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ else :; fi; \ done @@ -326,7 +339,6 @@ mostlyclean-compile: distclean-compile: -rm -f *.tab.c -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/msg.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/opt.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/salloc.Po@am__quote@ @@ -362,8 +374,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -375,8 +387,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -386,13 +398,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/salloc/msg.h b/src/salloc/msg.h deleted file mode 100644 index 71267db9e..000000000 --- a/src/salloc/msg.h +++ /dev/null @@ -1,39 +0,0 @@ -/*****************************************************************************\ - * src/salloc/msg.h - Message handler for salloc - * - * $Id: salloc.c 8570 2006-07-13 21:12:58Z morrone $ - ***************************************************************************** - * Copyright (C) 2006 The Regents of the University of California. - * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Christopher J. Morrone <morrone2@llnl.gov> - * UCRL-CODE-226842. - * - * This file is part of SLURM, a resource management program. - * For details, see <http://www.llnl.gov/linux/slurm/>. - * - * SLURM is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY - * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License along - * with SLURM; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -\*****************************************************************************/ - -#ifndef _SALLOC_MSG_H -#define _SALLOC_MSG_H - -#include <stdint.h> - -typedef struct salloc_msg_thread salloc_msg_thread_t; - -extern salloc_msg_thread_t *msg_thr_create(uint16_t *port); -extern void msg_thr_destroy(salloc_msg_thread_t *msg_thr); - -#endif /* _SALLOC_MSG_H */ diff --git a/src/salloc/opt.c b/src/salloc/opt.c index fc630c8c8..652c91eb6 100644 --- a/src/salloc/opt.c +++ b/src/salloc/opt.c @@ -5,7 +5,7 @@ * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <grondona1@llnl.gov>, et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -61,6 +61,7 @@ #include "src/common/list.h" #include "src/common/log.h" #include "src/common/parse_time.h" +#include "src/common/proc_args.h" #include "src/common/slurm_protocol_api.h" #include "src/common/uid.h" #include "src/common/xmalloc.h" @@ -87,6 +88,7 @@ #define OPT_JOBID 0x11 #define OPT_EXCLUSIVE 0x12 #define OPT_OVERCOMMIT 0x13 +#define OPT_ACCTG_FREQ 0x14 /* generic getopt_long flags, integers and *not* valid characters */ #define LONG_OPT_JOBID 0x105 @@ -116,6 +118,15 @@ #define LONG_OPT_RAMDISK_IMAGE 0x123 #define LONG_OPT_NOSHELL 0x124 #define LONG_OPT_GET_USER_ENV 0x125 +#define LONG_OPT_SOCKETSPERNODE 0x130 +#define LONG_OPT_CORESPERSOCKET 0x131 +#define LONG_OPT_THREADSPERCORE 0x132 +#define LONG_OPT_NTASKSPERNODE 0x136 +#define LONG_OPT_NTASKSPERSOCKET 0x137 +#define LONG_OPT_NTASKSPERCORE 0x138 +#define LONG_OPT_TASK_MEM 0x13a +#define LONG_OPT_HINT 0x13b +#define LONG_OPT_ACCTG_FREQ 0x13c /*---- global variables, defined in opt.h ----*/ opt_t opt; @@ -124,9 +135,6 @@ opt_t opt; typedef struct env_vars env_vars_t; -/* return command name from its full path name */ -static char * _base_name(char* command); - /* Get a decimal integer from arg */ static int _get_int(const char *arg, const char *what); @@ -147,16 +155,9 @@ static void _opt_list(void); static bool _opt_verify(void); static void _proc_get_user_env(char *optarg); -static void _print_version(void); static void _process_env_var(env_vars_t *e, const char *val); -static uint16_t _parse_mail_type(const char *arg); -static char *_print_mail_type(const uint16_t type); static int _parse_signal(const char *signal_name); -static long _to_bytes(const char *arg); static void _usage(void); -static bool _verify_node_count(const char *arg, int *min, int *max); -static int _verify_geometry(const char *arg, uint16_t *geometry); -static int _verify_conn_type(const char *arg); /*---[ end forward declarations of static functions ]---------------------*/ @@ -178,216 +179,6 @@ int initialize_and_process_args(int argc, char *argv[]) } -static void _print_version(void) -{ - printf("%s %s\n", PACKAGE, SLURM_VERSION); -} - -/* - * verify that a connection type in arg is of known form - * returns the connection_type or -1 if not recognized - */ -static int _verify_conn_type(const char *arg) -{ - int len = strlen(arg); - - if (!strncasecmp(arg, "MESH", len)) - return SELECT_MESH; - else if (!strncasecmp(arg, "TORUS", len)) - return SELECT_TORUS; - else if (!strncasecmp(arg, "NAV", len)) - return SELECT_NAV; - - error("invalid --conn-type argument %s ignored.", arg); - return -1; -} - -/* - * verify geometry arguments, must have proper count - * returns -1 on error, 0 otherwise - */ -static int _verify_geometry(const char *arg, uint16_t *geometry) -{ - char* token, *delimiter = ",x", *next_ptr; - int i, rc = 0; - char* geometry_tmp = xstrdup(arg); - char* original_ptr = geometry_tmp; - - token = strtok_r(geometry_tmp, delimiter, &next_ptr); - for (i=0; i<SYSTEM_DIMENSIONS; i++) { - if (token == NULL) { - error("insufficient dimensions in --geometry"); - rc = -1; - break; - } - geometry[i] = (uint16_t)atoi(token); - if (geometry[i] == 0 || geometry[i] == (uint16_t)NO_VAL) { - error("invalid --geometry argument"); - rc = -1; - break; - } - geometry_tmp = next_ptr; - token = strtok_r(geometry_tmp, delimiter, &next_ptr); - } - if (token != NULL) { - error("too many dimensions in --geometry"); - rc = -1; - } - - if (original_ptr) - xfree(original_ptr); - - return rc; -} - -/* Convert a string into a node count */ -static int -_str_to_nodes(const char *num_str, char **leftover) -{ - long int num; - char *endptr; - - num = strtol(num_str, &endptr, 10); - if (endptr == num_str) { /* no valid digits */ - *leftover = (char *)num_str; - return 0; - } - if (*endptr != '\0' && (*endptr == 'k' || *endptr == 'K')) { - num *= 1024; - endptr++; - } - *leftover = endptr; - - return (int)num; -} - -/* - * verify that a node count in arg is of a known form (count or min-max) - * OUT min, max specified minimum and maximum node counts - * RET true if valid - */ -static bool -_verify_node_count(const char *arg, int *min_nodes, int *max_nodes) -{ - char *ptr, *min_str, *max_str; - char *leftover; - - /* Does the string contain a "-" character? If so, treat as a range. - * otherwise treat as an absolute node count. */ - if ((ptr = index(arg, '-')) != NULL) { - min_str = xstrndup(arg, ptr-arg); - *min_nodes = _str_to_nodes(min_str, &leftover); - if (!xstring_is_whitespace(leftover)) { - error("\"%s\" is not a valid node count", min_str); - xfree(min_str); - return false; - } - xfree(min_str); - if (*min_nodes == 0) - *min_nodes = 1; - - max_str = xstrndup(ptr+1, strlen(arg)-((ptr+1)-arg)); - *max_nodes = _str_to_nodes(max_str, &leftover); - if (!xstring_is_whitespace(leftover)) { - error("\"%s\" is not a valid node count", max_str); - xfree(max_str); - return false; - } - xfree(max_str); - } else { - *min_nodes = *max_nodes = _str_to_nodes(arg, &leftover); - if (!xstring_is_whitespace(leftover)) { - error("\"%s\" is not a valid node count", arg); - return false; - } - if (*min_nodes == 0) { - /* whitespace does not a valid node count make */ - error("\"%s\" is not a valid node count", arg); - return false; - } - } - - if ((*max_nodes != 0) && (*max_nodes < *min_nodes)) { - error("Maximum node count %d is less than" - " minimum node count %d", - *max_nodes, *min_nodes); - return false; - } - - return true; -} - -/* return command name from its full path name */ -static char * _base_name(char* command) -{ - char *char_ptr, *name; - int i; - - if (command == NULL) - return NULL; - - char_ptr = strrchr(command, (int)'/'); - if (char_ptr == NULL) - char_ptr = command; - else - char_ptr++; - - i = strlen(char_ptr); - name = xmalloc(i+1); - strcpy(name, char_ptr); - return name; -} - -/* - * _to_bytes(): verify that arg is numeric with optional "G" or "M" at end - * if "G" or "M" is there, multiply by proper power of 2 and return - * number in bytes - */ -static long _to_bytes(const char *arg) -{ - char *buf; - char *endptr; - int end; - int multiplier = 1; - long result; - - buf = xstrdup(arg); - - end = strlen(buf) - 1; - - if (isdigit(buf[end])) { - result = strtol(buf, &endptr, 10); - - if (*endptr != '\0') - result = -result; - - } else { - - switch (toupper(buf[end])) { - - case 'G': - multiplier = 1024; - break; - - case 'M': - /* do nothing */ - break; - - default: - multiplier = -1; - } - - buf[end] = '\0'; - - result = multiplier * strtol(buf, &endptr, 10); - - if (*endptr != '\0') - result = -result; - } - - return result; -} - /* * print error message to stderr with opt.progname prepended */ @@ -435,16 +226,29 @@ static void _opt_default() opt.min_nodes = 1; opt.max_nodes = 0; opt.nodes_set = false; + opt.min_sockets_per_node = NO_VAL; /* requested min/maxsockets */ + opt.max_sockets_per_node = NO_VAL; + opt.min_cores_per_socket = NO_VAL; /* requested min/maxcores */ + opt.max_cores_per_socket = NO_VAL; + opt.min_threads_per_core = NO_VAL; /* requested min/maxthreads */ + opt.max_threads_per_core = NO_VAL; + opt.ntasks_per_node = NO_VAL; /* ntask max limits */ + opt.ntasks_per_socket = NO_VAL; + opt.ntasks_per_core = NO_VAL; + opt.cpu_bind_type = 0; /* local dummy variable for now */ opt.time_limit = NO_VAL; opt.time_limit_str = NULL; opt.partition = NULL; opt.job_name = NULL; opt.jobid = NO_VAL; - opt.dependency = NO_VAL; + opt.dependency = NULL; opt.account = NULL; opt.comment = NULL; + opt.distribution = SLURM_DIST_UNKNOWN; + opt.plane_size = NO_VAL; + opt.shared = (uint16_t)NO_VAL; opt.no_kill = false; opt.kill_command_signal = SIGTERM; @@ -462,6 +266,7 @@ static void _opt_default() opt.minsockets = -1; opt.mincores = -1; opt.minthreads = -1; + opt.task_mem = -1; opt.realmem = -1; opt.tmpdisk = -1; @@ -481,6 +286,7 @@ static void _opt_default() opt.egid = (gid_t) -1; opt.bell = BELL_AFTER_DELAY; + opt.acctg_freq = -1; opt.no_shell = false; opt.get_user_env_time = -1; opt.get_user_env_mode = -1; @@ -519,6 +325,7 @@ env_vars_t env_vars[] = { {"SALLOC_NO_BELL", OPT_NO_BELL, NULL, NULL }, {"SALLOC_EXCLUSIVE", OPT_EXCLUSIVE, NULL, NULL }, {"SALLOC_OVERCOMMIT", OPT_OVERCOMMIT, NULL, NULL }, + {"SALLOC_ACCTG_FREQ", OPT_INT, &opt.acctg_freq, NULL }, {NULL, 0, NULL, NULL} }; @@ -591,16 +398,16 @@ _process_env_var(env_vars_t *e, const char *val) break; case OPT_NODES: - opt.nodes_set = _verify_node_count( val, - &opt.min_nodes, - &opt.max_nodes ); + opt.nodes_set = verify_node_count( val, + &opt.min_nodes, + &opt.max_nodes ); if (opt.nodes_set == false) { error("invalid node count in env variable, ignoring"); } break; case OPT_CONN_TYPE: - opt.conn_type = _verify_conn_type(val); + opt.conn_type = verify_conn_type(val); break; case OPT_NO_ROTATE: @@ -608,7 +415,7 @@ _process_env_var(env_vars_t *e, const char *val) break; case OPT_GEOMETRY: - if (_verify_geometry(val, opt.geometry)) { + if (verify_geometry(val, opt.geometry)) { error("\"%s=%s\" -- invalid geometry, ignoring...", e->var, val); } @@ -665,9 +472,9 @@ void set_options(const int argc, char **argv) int opt_char, option_index = 0; char *tmp; static struct option long_options[] = { + {"extra-node-info", required_argument, 0, 'B'}, {"cpus-per-task", required_argument, 0, 'c'}, {"constraint", required_argument, 0, 'C'}, - {"dependency", required_argument, 0, 'd'}, {"chdir", required_argument, 0, 'D'}, {"nodefile", required_argument, 0, 'F'}, {"geometry", required_argument, 0, 'g'}, @@ -677,11 +484,14 @@ void set_options(const int argc, char **argv) {"job-name", required_argument, 0, 'J'}, {"no-kill", no_argument, 0, 'k'}, {"kill-command", optional_argument, 0, 'K'}, + {"licenses", required_argument, 0, 'L'}, + {"distribution", required_argument, 0, 'm'}, {"tasks", required_argument, 0, 'n'}, {"ntasks", required_argument, 0, 'n'}, {"nodes", required_argument, 0, 'N'}, {"overcommit", no_argument, 0, 'O'}, {"partition", required_argument, 0, 'p'}, + {"dependency", required_argument, 0, 'P'}, {"quiet", no_argument, 0, 'q'}, {"no-rotate", no_argument, 0, 'R'}, {"share", no_argument, 0, 's'}, @@ -700,6 +510,16 @@ void set_options(const int argc, char **argv) {"mincores", required_argument, 0, LONG_OPT_MINCORES}, {"minthreads", required_argument, 0, LONG_OPT_MINTHREADS}, {"mem", required_argument, 0, LONG_OPT_MEM}, + {"job-mem", required_argument, 0, LONG_OPT_TASK_MEM}, + {"task-mem", required_argument, 0, LONG_OPT_TASK_MEM}, + {"hint", required_argument, 0, LONG_OPT_HINT}, + {"sockets-per-node", required_argument, 0, LONG_OPT_SOCKETSPERNODE}, + {"cores-per-socket", required_argument, 0, LONG_OPT_CORESPERSOCKET}, + {"threads-per-core", required_argument, 0, LONG_OPT_THREADSPERCORE}, + {"ntasks-per-node", required_argument, 0, LONG_OPT_NTASKSPERNODE}, + {"ntasks-per-socket",required_argument, 0, LONG_OPT_NTASKSPERSOCKET}, + {"ntasks-per-core", required_argument, 0, LONG_OPT_NTASKSPERCORE}, + {"tasks-per-node", required_argument, 0, LONG_OPT_NTASKSPERNODE}, {"tmp", required_argument, 0, LONG_OPT_TMP}, {"uid", required_argument, 0, LONG_OPT_UID}, {"gid", required_argument, 0, LONG_OPT_GID}, @@ -717,11 +537,12 @@ void set_options(const int argc, char **argv) {"linux-image", required_argument, 0, LONG_OPT_LINUX_IMAGE}, {"mloader-image", required_argument, 0, LONG_OPT_MLOADER_IMAGE}, {"ramdisk-image", required_argument, 0, LONG_OPT_RAMDISK_IMAGE}, + {"acctg-freq", required_argument, 0, LONG_OPT_ACCTG_FREQ}, {"no-shell", no_argument, 0, LONG_OPT_NOSHELL}, {"get-user-env", optional_argument, 0, LONG_OPT_GET_USER_ENV}, {NULL, 0, 0, 0} }; - char *opt_string = "+a:c:C:d:D:F:g:hHIJ:kK::n:N:Op:qR:st:uU:vVw:W:x:"; + char *opt_string = "+a:B:c:C:d:D:F:g:hHIJ:kK:L:m:n:N:Op:P:qR:st:uU:vVw:W:x:"; opt.progname = xbasename(argv[0]); optind = 0; @@ -734,6 +555,24 @@ void set_options(const int argc, char **argv) "information\n"); exit(1); break; + case 'B': + opt.extra_set = verify_socket_core_thread_count( + optarg, + &opt.min_sockets_per_node, + &opt.max_sockets_per_node, + &opt.min_cores_per_socket, + &opt.max_cores_per_socket, + &opt.min_threads_per_core, + &opt.max_threads_per_core, + &opt.cpu_bind_type); + + + if (opt.extra_set == false) { + error("invalid resource allocation -B `%s'", + optarg); + exit(1); + } + break; case 'c': opt.cpus_set = true; opt.cpus_per_task = @@ -743,9 +582,7 @@ void set_options(const int argc, char **argv) xfree(opt.constraints); opt.constraints = xstrdup(optarg); break; - case 'd': - opt.dependency = _get_int(optarg, "dependency"); - break; +/* case 'd': see 'P' below */ case 'D': xfree(opt.cwd); opt.cwd = xstrdup(optarg); @@ -762,7 +599,7 @@ void set_options(const int argc, char **argv) } break; case 'g': - if (_verify_geometry(optarg, opt.geometry)) + if (verify_geometry(optarg, opt.geometry)) exit(1); break; case 'h': @@ -789,6 +626,19 @@ void set_options(const int argc, char **argv) } opt.kill_command_signal_set = true; break; + case 'L': + xfree(opt.licenses); + opt.licenses = xstrdup(optarg); + break; + case 'm': + opt.distribution = verify_dist_type(optarg, + &opt.plane_size); + if (opt.distribution == SLURM_DIST_UNKNOWN) { + error("distribution type `%s' " + "is not recognized", optarg); + exit(1); + } + break; case 'n': opt.nprocs_set = true; opt.nprocs = @@ -796,9 +646,9 @@ void set_options(const int argc, char **argv) break; case 'N': opt.nodes_set = - _verify_node_count(optarg, - &opt.min_nodes, - &opt.max_nodes); + verify_node_count(optarg, + &opt.min_nodes, + &opt.max_nodes); if (opt.nodes_set == false) { exit(1); } @@ -810,6 +660,11 @@ void set_options(const int argc, char **argv) xfree(opt.partition); opt.partition = xstrdup(optarg); break; + case 'd': + case 'P': + xfree(opt.dependency); + opt.dependency = xstrdup(optarg); + break; case 'q': opt.quiet++; break; @@ -834,7 +689,7 @@ void set_options(const int argc, char **argv) opt.verbose++; break; case 'V': - _print_version(); + print_slurm_version(); exit(0); break; case 'w': @@ -896,15 +751,23 @@ void set_options(const int argc, char **argv) } break; case LONG_OPT_MEM: - opt.realmem = (int) _to_bytes(optarg); + opt.realmem = (int) str_to_bytes(optarg); if (opt.realmem < 0) { error("invalid memory constraint %s", optarg); exit(1); } break; + case LONG_OPT_TASK_MEM: + opt.task_mem = (int) str_to_bytes(optarg); + if (opt.task_mem < 0) { + error("invalid memory constraint %s", + optarg); + exit(1); + } + break; case LONG_OPT_TMP: - opt.tmpdisk = _to_bytes(optarg); + opt.tmpdisk = str_to_bytes(optarg); if (opt.tmpdisk < 0) { error("invalid tmp value %s", optarg); exit(1); @@ -921,7 +784,7 @@ void set_options(const int argc, char **argv) fatal ("--gid=\"%s\" invalid", optarg); break; case LONG_OPT_CONNTYPE: - opt.conn_type = _verify_conn_type(optarg); + opt.conn_type = verify_conn_type(optarg); break; case LONG_OPT_BEGIN: opt.begin = parse_time(optarg); @@ -931,7 +794,7 @@ void set_options(const int argc, char **argv) } break; case LONG_OPT_MAIL_TYPE: - opt.mail_type |= _parse_mail_type(optarg); + opt.mail_type |= parse_mail_type(optarg); if (opt.mail_type == 0) fatal("--mail-type=%s invalid", optarg); break; @@ -972,6 +835,48 @@ void set_options(const int argc, char **argv) xfree(opt.comment); opt.comment = xstrdup(optarg); break; + case LONG_OPT_SOCKETSPERNODE: + get_resource_arg_range( optarg, "sockets-per-node", + &opt.min_sockets_per_node, + &opt.max_sockets_per_node, + true ); + break; + case LONG_OPT_CORESPERSOCKET: + get_resource_arg_range( optarg, "cores-per-socket", + &opt.min_cores_per_socket, + &opt.max_cores_per_socket, + true); + break; + case LONG_OPT_THREADSPERCORE: + get_resource_arg_range( optarg, "threads-per-core", + &opt.min_threads_per_core, + &opt.max_threads_per_core, + true ); + break; + case LONG_OPT_HINT: + if (verify_hint(optarg, + &opt.min_sockets_per_node, + &opt.max_sockets_per_node, + &opt.min_cores_per_socket, + &opt.max_cores_per_socket, + &opt.min_threads_per_core, + &opt.max_threads_per_core, + &opt.cpu_bind_type)) { + exit(1); + } + break; + case LONG_OPT_NTASKSPERNODE: + opt.ntasks_per_node = _get_int(optarg, + "ntasks-per-node"); + break; + case LONG_OPT_NTASKSPERSOCKET: + opt.ntasks_per_socket = _get_int(optarg, + "ntasks-per-socket"); + break; + case LONG_OPT_NTASKSPERCORE: + opt.ntasks_per_core = _get_int(optarg, + "ntasks-per-core"); + break; case LONG_OPT_REBOOT: opt.reboot = true; break; @@ -991,6 +896,9 @@ void set_options(const int argc, char **argv) xfree(opt.ramdiskimage); opt.ramdiskimage = xstrdup(optarg); break; + case LONG_OPT_ACCTG_FREQ: + opt.acctg_freq = _get_int(optarg, "acctg-freq"); + break; case LONG_OPT_NOSHELL: opt.no_shell = true; break; @@ -1068,7 +976,7 @@ static bool _opt_verify(void) opt.mincpus = opt.cpus_per_task; if ((opt.job_name == NULL) && (command_argc > 0)) - opt.job_name = _base_name(command_argv[0]); + opt.job_name = base_name(command_argv[0]); if ((opt.no_shell == false) && (command_argc == 0)) { error("A local command is a required parameter!"); @@ -1096,11 +1004,88 @@ static bool _opt_verify(void) verified = false; } + /* When CR with memory as a CR is enabled we need to assign + * adequate value or check the value to opt.mem */ + if ((opt.realmem >= -1) && (opt.task_mem > 0)) { + if (opt.realmem == -1) { + opt.realmem = opt.task_mem; + } else if (opt.realmem < opt.task_mem) { + info("mem < task-mem - resizing mem to be equal " + "to task-mem"); + opt.realmem = opt.task_mem; + } + } + + /* Check to see if user has specified enough resources to + * satisfy the plane distribution with the specified + * plane_size. + * if (n/plane_size < N) and ((N-1) * plane_size >= n) --> + * problem Simple check will not catch all the problem/invalid + * cases. + * The limitations of the plane distribution in the cons_res + * environment are more extensive and are documented in the + * SLURM reference guide. */ + if (opt.distribution == SLURM_DIST_PLANE && opt.plane_size) { + if ((opt.nprocs/opt.plane_size) < opt.min_nodes) { + if (((opt.min_nodes-1)*opt.plane_size) >= opt.nprocs) { +#if(0) + info("Too few processes ((n/plane_size) %d < N %d) " + "and ((N-1)*(plane_size) %d >= n %d)) ", + opt.nprocs/opt.plane_size, opt.min_nodes, + (opt.min_nodes-1)*opt.plane_size, opt.nprocs); +#endif + error("Too few processes for the requested " + "{plane,node} distribution"); + exit(1); + } + } + } + + /* bound max_threads/cores from ntasks_cores/sockets */ + if ((opt.max_threads_per_core <= 0) && + (opt.ntasks_per_core > 0)) { + opt.max_threads_per_core = opt.ntasks_per_core; + /* if cpu_bind_type doesn't already have a auto pref, + * choose the level based on the level of ntasks + */ + if (!(opt.cpu_bind_type & (CPU_BIND_TO_SOCKETS | + CPU_BIND_TO_CORES | + CPU_BIND_TO_THREADS))) { + opt.cpu_bind_type |= CPU_BIND_TO_CORES; + } + } + if ((opt.max_cores_per_socket <= 0) && + (opt.ntasks_per_socket > 0)) { + opt.max_cores_per_socket = opt.ntasks_per_socket; + /* if cpu_bind_type doesn't already have a auto pref, + * choose the level based on the level of ntasks + */ + if (!(opt.cpu_bind_type & (CPU_BIND_TO_SOCKETS | + CPU_BIND_TO_CORES | + CPU_BIND_TO_THREADS))) { + opt.cpu_bind_type |= CPU_BIND_TO_SOCKETS; + } + } + /* massage the numbers */ if (opt.nodes_set && !opt.nprocs_set) { /* 1 proc / node default */ opt.nprocs = opt.min_nodes; + /* 1 proc / min_[socket * core * thread] default */ + if (opt.min_sockets_per_node > 0) { + opt.nprocs *= opt.min_sockets_per_node; + opt.nprocs_set = true; + } + if (opt.min_cores_per_socket > 0) { + opt.nprocs *= opt.min_cores_per_socket; + opt.nprocs_set = true; + } + if (opt.min_threads_per_core > 0) { + opt.nprocs *= opt.min_threads_per_core; + opt.nprocs_set = true; + } + } else if (opt.nodes_set && opt.nprocs_set) { /* @@ -1122,7 +1107,7 @@ static bool _opt_verify(void) if (opt.time_limit_str) { opt.time_limit = time_str2mins(opt.time_limit_str); - if (opt.time_limit < 0) { + if ((opt.time_limit < 0) && (opt.time_limit != INFINITE)) { error("Invalid time limit specification"); exit(1); } @@ -1152,40 +1137,6 @@ static bool _opt_verify(void) return verified; } -static uint16_t _parse_mail_type(const char *arg) -{ - uint16_t rc; - - if (strcasecmp(arg, "BEGIN") == 0) - rc = MAIL_JOB_BEGIN; - else if (strcasecmp(arg, "END") == 0) - rc = MAIL_JOB_END; - else if (strcasecmp(arg, "FAIL") == 0) - rc = MAIL_JOB_FAIL; - else if (strcasecmp(arg, "ALL") == 0) - rc = MAIL_JOB_BEGIN | MAIL_JOB_END | MAIL_JOB_FAIL; - else - rc = 0; /* failure */ - - return rc; -} -static char *_print_mail_type(const uint16_t type) -{ - if (type == 0) - return "NONE"; - - if (type == MAIL_JOB_BEGIN) - return "BEGIN"; - if (type == MAIL_JOB_END) - return "END"; - if (type == MAIL_JOB_FAIL) - return "FAIL"; - if (type == (MAIL_JOB_BEGIN | MAIL_JOB_END | MAIL_JOB_FAIL)) - return "ALL"; - - return "MULTIPLE"; -} - /* helper function for printing options * * warning: returns pointer to memory allocated on the stack. @@ -1197,9 +1148,21 @@ static char *print_constraints() if (opt.mincpus > 0) xstrfmtcat(buf, "mincpus=%d ", opt.mincpus); + if (opt.minsockets > 0) + xstrfmtcat(buf, "minsockets=%d ", opt.minsockets); + + if (opt.mincores > 0) + xstrfmtcat(buf, "mincores=%d ", opt.mincores); + + if (opt.minthreads > 0) + xstrfmtcat(buf, "minthreads=%d ", opt.minthreads); + if (opt.realmem > 0) xstrfmtcat(buf, "mem=%dM ", opt.realmem); + if (opt.task_mem > 0) + xstrfmtcat(buf, "task-mem=%dM ", opt.task_mem); + if (opt.tmpdisk > 0) xstrfmtcat(buf, "tmp=%ld ", opt.tmpdisk); @@ -1218,39 +1181,6 @@ static char *print_constraints() return buf; } -static char * -print_commandline() -{ - int i; - char buf[256]; - - buf[0] = '\0'; - for (i = 0; i < command_argc; i++) - snprintf(buf, 256, "%s", command_argv[i]); - return xstrdup(buf); -} - -static char * -print_geometry() -{ - int i; - char buf[32], *rc = NULL; - - if ((SYSTEM_DIMENSIONS == 0) - || (opt.geometry[0] == (uint16_t)NO_VAL)) - return NULL; - - for (i=0; i<SYSTEM_DIMENSIONS; i++) { - if (i > 0) - snprintf(buf, sizeof(buf), "x%u", opt.geometry[i]); - else - snprintf(buf, sizeof(buf), "%u", opt.geometry[i]); - xstrcat(rc, buf); - } - - return rc; -} - /* * Takes a string containing the number or name of a signal and returns * the signal number. The signal name is case insensitive, and may be of @@ -1333,6 +1263,9 @@ static void _opt_list() info("job name : `%s'", opt.job_name); if (opt.jobid != NO_VAL) info("jobid : %u", opt.jobid); + info("distribution : %s", format_task_dist_states(opt.distribution)); + if(opt.distribution == SLURM_DIST_PLANE) + info("plane size : %u", opt.plane_size); info("verbose : %d", opt.verbose); info("immediate : %s", tf_(opt.immediate)); info("overcommit : %s", tf_(opt.overcommit)); @@ -1345,16 +1278,13 @@ static void _opt_list() info("nice : %d", opt.nice); info("account : %s", opt.account); info("comment : %s", opt.comment); - if (opt.dependency == NO_VAL) - info("dependency : none"); - else - info("dependency : %u", opt.dependency); + info("dependency : %s", opt.dependency); str = print_constraints(); info("constraints : %s", str); xfree(str); if (opt.conn_type >= 0) info("conn_type : %u", opt.conn_type); - str = print_geometry(); + str = print_geometry(opt.geometry); info("geometry : %s", str); xfree(str); info("reboot : %s", opt.reboot ? "no" : "yes"); @@ -1372,9 +1302,19 @@ static void _opt_list() slurm_make_time_str(&opt.begin, time_str, sizeof(time_str)); info("begin : %s", time_str); } - info("mail_type : %s", _print_mail_type(opt.mail_type)); + info("mail_type : %s", print_mail_type(opt.mail_type)); info("mail_user : %s", opt.mail_user); - str = print_commandline(); + info("sockets-per-node : %d - %d", opt.min_sockets_per_node, + opt.max_sockets_per_node); + info("cores-per-socket : %d - %d", opt.min_cores_per_socket, + opt.max_cores_per_socket); + info("threads-per-core : %d - %d", opt.min_threads_per_core, + opt.max_threads_per_core); + info("ntasks-per-node : %d", opt.ntasks_per_node); + info("ntasks-per-socket : %d", opt.ntasks_per_socket); + info("ntasks-per-core : %d", opt.ntasks_per_core); + info("plane_size : %u", opt.plane_size); + str = print_commandline(command_argc, command_argv); info("user command : `%s'", str); xfree(str); @@ -1387,10 +1327,10 @@ static void _usage(void) " [[-c cpus-per-node] [-r n] [-p partition] [--hold] [-t minutes]\n" " [--immediate] [--no-kill] [--overcommit] [-D path]\n" " [--share] [-J jobname] [--jobid=id]\n" -" [--verbose] [--gid=group] [--uid=user]\n" +" [--verbose] [--gid=group] [--uid=user] [--licenses=names]\n" " [-W sec] [--minsockets=n] [--mincores=n] [--minthreads=n]\n" " [--contiguous] [--mincpus=n] [--mem=MB] [--tmp=MB] [-C list]\n" -" [--account=name] [--dependency=jobid] [--comment=name]\n" +" [--account=name] [--dependency=type:jobid] [--comment=name]\n" #ifdef HAVE_BG /* Blue gene specific options */ " [--geometry=XxYxZ] [--conn-type=type] [--no-rotate] [ --reboot]\n" " [--blrts-image=path] [--linux-image=path]\n" @@ -1404,6 +1344,8 @@ static void _usage(void) static void _help(void) { + slurm_ctl_conf_t *conf; + printf ( "Usage: salloc [OPTIONS...] executable [args...]\n" "\n" @@ -1411,6 +1353,7 @@ static void _help(void) " -N, --nodes=N number of nodes on which to run (N = min[-max])\n" " -n, --tasks=N number of processors required\n" " -c, --cpus-per-task=ncpus number of cpus required per task\n" +" --ntasks-per-node=n number of tasks to invoke on each node\n" " -p, --partition=partition partition requested\n" " -H, --hold submit job in held state\n" " -t, --time=minutes time limit\n" @@ -1420,17 +1363,20 @@ static void _help(void) " -K, --kill-command[=signal] signal to send terminating job\n" " -O, --overcommit overcommit resources\n" " -s, --share share nodes with other jobs\n" +" -m, --distribution=type distribution method for processes to nodes\n" +" (type = block|cyclic|arbitrary)\n" " -J, --job-name=jobname name of job\n" " --jobid=id specify jobid to use\n" " -W, --wait=sec seconds to wait for allocation if not\n" " immediately available\n" " -v, --verbose verbose mode (multiple -v's increase verbosity)\n" " -q, --quiet quiet mode (suppress informational messages)\n" -" -d, --dependency=jobid defer job until specified jobid completes\n" +" -P, --dependency=type:jobid defer job until condition on jobid is satisfied\n" " --nice[=value] decrease secheduling priority by value\n" " -U, --account=name charge job to specified account\n" " --begin=time defer job until HH:MM DD/MM/YY\n" " --comment=name arbitrary comment\n" +" -L, --licenses=names required license, comma separated\n" " --mail-type=type notify on state change: BEGIN, END, FAIL or ALL\n" " --mail-user=user who to send email notification for job state changes\n" " --bell ring the terminal bell when the job is allocated\n" @@ -1455,8 +1401,32 @@ static void _help(void) "Consumable resources related options:\n" " --exclusive allocate nodes in exclusive mode when\n" " cpu consumable resource is enabled\n" +" --task-mem=MB maximum amount of real memory per task\n" +" required by the job.\n" +" --mem >= --job-mem if --mem is specified.\n" "\n" +"Affinity/Multi-core options: (when the task/affinity plugin is enabled)\n" +" -B --extra-node-info=S[:C[:T]] Expands to:\n" +" --sockets-per-node=S number of sockets per node to allocate\n" +" --cores-per-socket=C number of cores per socket to allocate\n" +" --threads-per-core=T number of threads per core to allocate\n" +" each field can be 'min[-max]' or wildcard '*'\n" +" total cpus requested = (N x S x C x T)\n" +"\n" +" --ntasks-per-socket=n number of tasks to invoke on each socket\n" +" --ntasks-per-core=n number of tasks to invoke on each core\n"); + conf = slurm_conf_lock(); + if (conf->task_plugin != NULL + && strcasecmp(conf->task_plugin, "task/affinity") == 0) { + printf( +" --hint= Bind tasks according to application hints\n" +" (see \"--hint=help\" for options)\n"); + } + slurm_conf_unlock(); + + printf("\n" #ifdef HAVE_BG /* Blue gene specific options */ +"\n" "Blue Gene related options:\n" " -g, --geometry=XxYxZ geometry constraints of the job\n" " -R, --no-rotate disable geometry rotation\n" diff --git a/src/salloc/opt.h b/src/salloc/opt.h index b66a318e4..3f964a937 100644 --- a/src/salloc/opt.h +++ b/src/salloc/opt.h @@ -6,7 +6,7 @@ * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <grondona1@llnl.gov>, * Christopher J. Morrone <morrone2@llnl.gov>, et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -62,14 +62,28 @@ typedef struct salloc_options { int min_nodes; /* --nodes=n, -N n */ int max_nodes; /* --nodes=x-n, -N x-n */ bool nodes_set; /* true if nodes explicitly set */ + int min_sockets_per_node; /* --sockets-per-node=n */ + int max_sockets_per_node; /* --sockets-per-node=x-n */ + int min_cores_per_socket; /* --cores-per-socket=n */ + int max_cores_per_socket; /* --cores-per-socket=x-n */ + int min_threads_per_core; /* --threads-per-core=n */ + int max_threads_per_core; /* --threads-per-core=x-n */ + int ntasks_per_node; /* --ntasks-per-node=n */ + int ntasks_per_socket; /* --ntasks-per-socket=n */ + int ntasks_per_core; /* --ntasks-per-core=n */ + cpu_bind_type_t cpu_bind_type; /* --cpu_bind= */ + bool extra_set; /* true if extra node info explicitly set */ int time_limit; /* --time, -t (int minutes) */ char *time_limit_str; /* --time, -t (string) */ char *partition; /* --partition=n, -p n */ enum task_dist_states distribution; /* --distribution=, -m dist */ + uint32_t plane_size; /* lllp distribution -> plane_size for + * when -m plane=<# of lllp per + * plane> */ char *job_name; /* --job-name=, -J name */ unsigned int jobid; /* --jobid=jobid */ - unsigned int dependency;/* --dependency, -P jobid */ + char *dependency; /* --dependency, -P type:jobid */ int nice; /* --nice */ char *account; /* --account, -U acct_name */ char *comment; /* --comment */ @@ -78,6 +92,8 @@ typedef struct salloc_options { bool hold; /* --hold, -H */ bool no_kill; /* --no-kill, -k */ + int acctg_freq; /* --acctg-freq=secs */ + char *licenses; /* --licenses, -L */ bool overcommit; /* --overcommit -O */ int kill_command_signal;/* --kill-command, -K */ bool kill_command_signal_set; @@ -91,6 +107,7 @@ typedef struct salloc_options { int minsockets; /* --minsockets=n */ int mincores; /* --mincores=n */ int minthreads; /* --minthreads=n */ + int task_mem; /* --task-mem=n */ int realmem; /* --mem=n */ long tmpdisk; /* --tmp=n */ char *constraints; /* --constraints=, -C constraint*/ diff --git a/src/salloc/salloc.c b/src/salloc/salloc.c index d78f9d900..df7531c47 100644 --- a/src/salloc/salloc.c +++ b/src/salloc/salloc.c @@ -6,7 +6,7 @@ * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Christopher J. Morrone <morrone2@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -49,7 +49,6 @@ #include "src/salloc/salloc.h" #include "src/salloc/opt.h" -#include "src/salloc/msg.h" #define MAX_RETRIES 3 @@ -70,6 +69,11 @@ static void _pending_callback(uint32_t job_id); static void _ignore_signal(int signo); static void _exit_on_signal(int signo); static void _signal_while_allocating(int signo); +static void _job_complete_handler(srun_job_complete_msg_t *msg); +static void _timeout_handler(srun_timeout_msg_t *msg); +static void _user_msg_handler(srun_user_msg_t *msg); +static void _ping_handler(srun_ping_msg_t *msg); +static void _node_fail_handler(srun_node_fail_msg_t *msg); int main(int argc, char *argv[]) { @@ -77,7 +81,7 @@ int main(int argc, char *argv[]) job_desc_msg_t desc; resource_allocation_response_msg_t *alloc; time_t before, after; - salloc_msg_thread_t *msg_thr; + allocation_msg_thread_t *msg_thr; char **env = NULL; int status = 0; int errnum = 0; @@ -86,6 +90,7 @@ int main(int argc, char *argv[]) pid_t rc_pid = 0; int rc = 0; static char *msg = "Slurm job queue full, sleeping and retrying."; + slurm_allocation_callbacks_t callbacks; log_init(xbasename(argv[0]), logopt, 0, NULL); if (initialize_and_process_args(argc, argv) < 0) { @@ -126,9 +131,13 @@ int main(int argc, char *argv[]) exit(1); } + callbacks.ping = _ping_handler; + callbacks.timeout = _timeout_handler; + callbacks.job_complete = _job_complete_handler; + callbacks.user_msg = _user_msg_handler; + callbacks.node_fail = _node_fail_handler; /* create message thread to handle pings and such from slurmctld */ - msg_thr = msg_thr_create(&desc.other_port); - desc.other_hostname = xshort_hostname(); + msg_thr = slurm_allocation_msg_thr_create(&desc.other_port, &callbacks); xsignal(SIGHUP, _signal_while_allocating); xsignal(SIGINT, _signal_while_allocating); @@ -160,7 +169,7 @@ int main(int argc, char *argv[]) } else { error("Failed to allocate resources: %m"); } - msg_thr_destroy(msg_thr); + slurm_allocation_msg_thr_destroy(msg_thr); exit(1); } after = time(NULL); @@ -204,6 +213,14 @@ int main(int argc, char *argv[]) env_array_append_fmt(&env, "SLURM_OVERCOMMIT", "%d", opt.overcommit); } + if (opt.acctg_freq >= 0) { + env_array_append_fmt(&env, "SLURM_ACCTG_FREQ", "%d", + opt.acctg_freq); + } + if (opt.task_mem >= 0) { + env_array_append_fmt(&env, "SLURM_TASK_MEM", "%d", + opt.task_mem); + } env_array_set_environment(env); env_array_free(env); pthread_mutex_lock(&allocation_state_lock); @@ -240,7 +257,8 @@ relinquish: pthread_mutex_lock(&allocation_state_lock); if (allocation_state != REVOKED) { info("Relinquishing job allocation %d", alloc->job_id); - if (slurm_complete_job(alloc->job_id, 0) != 0) + if (slurm_complete_job(alloc->job_id, status) + != 0) error("Unable to clean up job allocation %d: %m", alloc->job_id); else @@ -249,7 +267,7 @@ relinquish: pthread_mutex_unlock(&allocation_state_lock); slurm_free_resource_allocation_response_msg(alloc); - msg_thr_destroy(msg_thr); + slurm_allocation_msg_thr_destroy(msg_thr); /* * Figure out what return code we should use. If the user's command @@ -257,6 +275,7 @@ relinquish: */ rc = 1; if (rc_pid != -1) { + if (WIFEXITED(status)) { rc = WEXITSTATUS(status); } else if (WIFSIGNALED(status)) { @@ -284,7 +303,13 @@ static int fill_job_desc_from_opts(job_desc_msg_t *desc) desc->max_nodes = opt.max_nodes; desc->user_id = opt.uid; desc->group_id = opt.gid; - desc->dependency = opt.dependency; + if (opt.dependency) + desc->dependency = xstrdup(opt.dependency); + desc->task_dist = opt.distribution; + if (opt.plane_size != NO_VAL) + desc->plane_size = opt.plane_size; + if (opt.licenses) + desc->licenses = xstrdup(opt.licenses); if (opt.nice) desc->nice = NICE_OFFSET + opt.nice; desc->mail_type = opt.mail_type; @@ -320,6 +345,8 @@ static int fill_job_desc_from_opts(job_desc_msg_t *desc) desc->mloaderimage = xstrdup(opt.mloaderimage); if (opt.ramdiskimage) desc->ramdiskimage = xstrdup(opt.ramdiskimage); + + /* job constraints */ if (opt.mincpus > -1) desc->job_min_procs = opt.mincpus; if (opt.minsockets > -1) @@ -341,6 +368,27 @@ static int fill_job_desc_from_opts(job_desc_msg_t *desc) desc->num_tasks = opt.nprocs; if (opt.cpus_set) desc->cpus_per_task = opt.cpus_per_task; + if (opt.ntasks_per_node > -1) + desc->ntasks_per_node = opt.ntasks_per_node; + if (opt.ntasks_per_socket > -1) + desc->ntasks_per_socket = opt.ntasks_per_socket; + if (opt.ntasks_per_core > -1) + desc->ntasks_per_core = opt.ntasks_per_core; + + /* node constraints */ + if (opt.min_sockets_per_node > -1) + desc->min_sockets = opt.min_sockets_per_node; + if (opt.max_sockets_per_node > -1) + desc->max_sockets = opt.max_sockets_per_node; + if (opt.min_cores_per_socket > -1) + desc->min_cores = opt.min_cores_per_socket; + if (opt.max_cores_per_socket > -1) + desc->max_cores = opt.max_cores_per_socket; + if (opt.min_threads_per_core > -1) + desc->min_threads = opt.min_threads_per_core; + if (opt.max_threads_per_core > -1) + desc->max_threads = opt.max_threads_per_core; + if (opt.no_kill) desc->kill_on_node_fail = 0; if (opt.time_limit != NO_VAL) @@ -402,3 +450,67 @@ static void _exit_on_signal(int signo) { exit_flag = true; } + +/* This typically signifies the job was cancelled by scancel */ +static void _job_complete_handler(srun_job_complete_msg_t *comp) +{ + if (comp->step_id == NO_VAL) { + pthread_mutex_lock(&allocation_state_lock); + if (allocation_state != REVOKED) { + /* If the allocation_state is already REVOKED, then + * no need to print this message. We probably + * relinquished the allocation ourself. + */ + info("Job allocation %u has been revoked.", + comp->job_id); + } + if (allocation_state == GRANTED + && command_pid > -1 + && opt.kill_command_signal_set) { + verbose("Sending signal %d to command \"%s\", pid %d", + opt.kill_command_signal, + command_argv[0], command_pid); + kill(command_pid, opt.kill_command_signal); + } + allocation_state = REVOKED; + pthread_mutex_unlock(&allocation_state_lock); + } else { + verbose("Job step %u.%u is finished.", + comp->job_id, comp->step_id); + } +} + +/* + * Job has been notified of it's approaching time limit. + * Job will be killed shortly after timeout. + * This RPC can arrive multiple times with the same or updated timeouts. + * FIXME: We may want to signal the job or perform other action for this. + * FIXME: How much lead time do we want for this message? Some jobs may + * require tens of minutes to gracefully terminate. + */ +static void _timeout_handler(srun_timeout_msg_t *msg) +{ + static time_t last_timeout = 0; + + if (msg->timeout != last_timeout) { + last_timeout = msg->timeout; + verbose("Job allocation time limit to be reached at %s", + ctime(&msg->timeout)); + } +} + +static void _user_msg_handler(srun_user_msg_t *msg) +{ + info("%s", msg->msg); +} + +static void _ping_handler(srun_ping_msg_t *msg) +{ + /* the api will respond so there really isn't anything to do + here */ +} + +static void _node_fail_handler(srun_node_fail_msg_t *msg) +{ + error("Node failure on %s", msg->nodelist); +} diff --git a/src/salloc/salloc.h b/src/salloc/salloc.h index 83d41b55e..fbe7c43a7 100644 --- a/src/salloc/salloc.h +++ b/src/salloc/salloc.h @@ -5,7 +5,7 @@ * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Christopher J. Morrone <morrone2@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/sattach/Makefile.am b/src/sattach/Makefile.am index fd9e764f4..e38c68075 100644 --- a/src/sattach/Makefile.am +++ b/src/sattach/Makefile.am @@ -17,7 +17,7 @@ convenience_libs = $(top_builddir)/src/api/libslurmhelper.la sattach_LDADD = \ $(convenience_libs) -sattach_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) $(FEDERATION_LDFLAGS) +sattach_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) force: $(convenience_libs) : force diff --git a/src/sattach/Makefile.in b/src/sattach/Makefile.in index 64e33cbdc..45cc34b83 100644 --- a/src/sattach/Makefile.in +++ b/src/sattach/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -45,6 +45,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -75,7 +77,7 @@ sattach_DEPENDENCIES = $(convenience_libs) sattach_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(sattach_LDFLAGS) \ $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -115,6 +117,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -128,10 +131,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -151,7 +157,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -162,6 +171,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -177,6 +188,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -192,6 +204,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -260,7 +273,7 @@ convenience_libs = $(top_builddir)/src/api/libslurmhelper.la sattach_LDADD = \ $(convenience_libs) -sattach_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) $(FEDERATION_LDFLAGS) +sattach_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) all: all-am .SUFFIXES: @@ -303,8 +316,8 @@ install-binPROGRAMS: $(bin_PROGRAMS) || test -f $$p1 \ ; then \ f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \ - echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) --mode=install $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ - $(INSTALL_PROGRAM_ENV) $(LIBTOOL) --mode=install $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ + echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ + $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ else :; fi; \ done @@ -369,8 +382,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -382,8 +395,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -393,13 +406,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/sattach/attach.c b/src/sattach/attach.c index c19433901..e4ca98076 100644 --- a/src/sattach/attach.c +++ b/src/sattach/attach.c @@ -1,11 +1,11 @@ /*****************************************************************************\ * attach.c - Definitions needed for parallel debugger - * $Id: attach.c 10574 2006-12-15 23:38:29Z jette $ + * $Id: attach.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <grondona1@llnl.gov>, et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/sattach/opt.c b/src/sattach/opt.c index 76463f276..b9abbf6fa 100644 --- a/src/sattach/opt.c +++ b/src/sattach/opt.c @@ -1,11 +1,11 @@ /*****************************************************************************\ * opt.c - options processing for sattach - * $Id: opt.c 12143 2007-08-27 15:59:41Z jette $ + * $Id: opt.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <grondona1@llnl.gov>, et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/sattach/opt.h b/src/sattach/opt.h index 2d9f1a0ff..84681975a 100644 --- a/src/sattach/opt.h +++ b/src/sattach/opt.h @@ -1,12 +1,12 @@ /*****************************************************************************\ * opt.h - definitions for sattach option processing - * $Id: opt.h 10574 2006-12-15 23:38:29Z jette $ + * $Id: opt.h 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <grondona1@llnl.gov>, * Christopher J. Morrone <morrone2@llnl.gov>, et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/sattach/sattach.c b/src/sattach/sattach.c index 98fe02aaa..988d59704 100644 --- a/src/sattach/sattach.c +++ b/src/sattach/sattach.c @@ -6,7 +6,7 @@ * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Christopher J. Morrone <morrone2@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -365,6 +365,7 @@ static message_thread_state_t *_msg_thr_create(int num_nodes, int num_tasks) eio_obj_t *obj; int i; message_thread_state_t *mts; + pthread_attr_t attr; debug("Entering _msg_thr_create()"); mts = (message_thread_state_t *)xmalloc(sizeof(message_thread_state_t)); @@ -386,11 +387,14 @@ static message_thread_state_t *_msg_thr_create(int num_nodes, int num_tasks) eio_new_initial_obj(mts->msg_handle, obj); } - if (pthread_create(&mts->msg_thread, NULL, + slurm_attr_init(&attr); + if (pthread_create(&mts->msg_thread, &attr, _msg_thr_internal, (void *)mts) != 0) { error("pthread_create of message thread: %m"); + slurm_attr_destroy(&attr); goto fail; } + slurm_attr_destroy(&attr); return mts; fail: @@ -558,7 +562,7 @@ _exit_handler(message_thread_state_t *mts, slurm_msg_t *exit_msg) static void _handle_msg(message_thread_state_t *mts, slurm_msg_t *msg) { - uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred); + uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); static uid_t slurm_uid; static bool slurm_uid_set = false; uid_t uid = getuid(); diff --git a/src/sbatch/Makefile.am b/src/sbatch/Makefile.am index de93daff0..de4ffb6aa 100644 --- a/src/sbatch/Makefile.am +++ b/src/sbatch/Makefile.am @@ -10,10 +10,9 @@ sbatch_SOURCES = sbatch.c opt.c opt.h convenience_libs = $(top_builddir)/src/api/libslurmhelper.la -sbatch_LDADD = \ - $(convenience_libs) +sbatch_LDADD = $(convenience_libs) -sbatch_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) $(FEDERATION_LDFLAGS) +sbatch_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) force: $(convenience_libs) : force diff --git a/src/sbatch/Makefile.in b/src/sbatch/Makefile.in index f30d9b1a6..7cd63bdf9 100644 --- a/src/sbatch/Makefile.in +++ b/src/sbatch/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -45,6 +45,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -74,7 +76,7 @@ sbatch_DEPENDENCIES = $(convenience_libs) sbatch_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(sbatch_LDFLAGS) \ $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -114,6 +116,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -127,10 +130,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -150,7 +156,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -161,6 +170,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -176,6 +187,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -191,6 +203,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -251,10 +264,8 @@ AUTOMAKE_OPTIONS = foreign INCLUDES = -I$(top_srcdir) sbatch_SOURCES = sbatch.c opt.c opt.h convenience_libs = $(top_builddir)/src/api/libslurmhelper.la -sbatch_LDADD = \ - $(convenience_libs) - -sbatch_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) $(FEDERATION_LDFLAGS) +sbatch_LDADD = $(convenience_libs) +sbatch_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) all: all-am .SUFFIXES: @@ -297,8 +308,8 @@ install-binPROGRAMS: $(bin_PROGRAMS) || test -f $$p1 \ ; then \ f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \ - echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) --mode=install $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ - $(INSTALL_PROGRAM_ENV) $(LIBTOOL) --mode=install $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ + echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ + $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ else :; fi; \ done @@ -361,8 +372,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -374,8 +385,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -385,13 +396,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/sbatch/opt.c b/src/sbatch/opt.c index f3cb69393..5c3daa39e 100644 --- a/src/sbatch/opt.c +++ b/src/sbatch/opt.c @@ -1,10 +1,11 @@ /*****************************************************************************\ * opt.c - options processing for sbatch ***************************************************************************** - * Copyright (C) 2002-2006 The Regents of the University of California. + * Copyright (C) 2002-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <grondona1@llnl.gov>, et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -60,6 +61,8 @@ #include "src/common/list.h" #include "src/common/log.h" #include "src/common/parse_time.h" +#include "src/common/plugstack.h" +#include "src/common/proc_args.h" #include "src/common/slurm_protocol_api.h" #include "src/common/uid.h" #include "src/common/xmalloc.h" @@ -83,6 +86,10 @@ #define OPT_MULTI 0x0f #define OPT_EXCLUSIVE 0x10 #define OPT_OVERCOMMIT 0x11 +#define OPT_OPEN_MODE 0x12 +#define OPT_ACCTG_FREQ 0x13 +#define OPT_NO_REQUEUE 0x14 +#define OPT_REQUEUE 0x15 /* generic getopt_long flags, integers and *not* valid characters */ #define LONG_OPT_PROPAGATE 0x100 @@ -107,13 +114,22 @@ #define LONG_OPT_COMMENT 0x117 #define LONG_OPT_WRAP 0x118 #define LONG_OPT_REQUEUE 0x119 +#define LONG_OPT_SOCKETSPERNODE 0x130 +#define LONG_OPT_CORESPERSOCKET 0x131 +#define LONG_OPT_THREADSPERCORE 0x132 +#define LONG_OPT_NTASKSPERNODE 0x136 +#define LONG_OPT_NTASKSPERSOCKET 0x137 +#define LONG_OPT_NTASKSPERCORE 0x138 +#define LONG_OPT_TASK_MEM 0x13a +#define LONG_OPT_HINT 0x13b #define LONG_OPT_BLRTS_IMAGE 0x140 #define LONG_OPT_LINUX_IMAGE 0x141 #define LONG_OPT_MLOADER_IMAGE 0x142 #define LONG_OPT_RAMDISK_IMAGE 0x143 #define LONG_OPT_REBOOT 0x144 -#define LONG_OPT_TASKSPERNODE 0x145 #define LONG_OPT_GET_USER_ENV 0x146 +#define LONG_OPT_OPEN_MODE 0x147 +#define LONG_OPT_ACCTG_FREQ 0x148 /*---- global variables, defined in opt.h ----*/ opt_t opt; @@ -122,10 +138,6 @@ opt_t opt; typedef struct env_vars env_vars_t; -/* return command name from its full path name */ -static char * _base_name(char* command); - -static List _create_path_list(void); /* Get a decimal integer from arg */ static int _get_int(const char *arg, const char *what); @@ -151,203 +163,18 @@ static void _opt_list(void); /* verify options sanity */ static bool _opt_verify(void); -static void _print_version(void); - static void _process_env_var(env_vars_t *e, const char *val); -static uint16_t _parse_mail_type(const char *arg); static uint16_t _parse_pbs_mail_type(const char *arg); -static char *_print_mail_type(const uint16_t type); - -/* search PATH for command returns full path */ -static char *_search_path(char *, bool, int); - -static long _to_bytes(const char *arg); static void _usage(void); -static bool _verify_node_count(const char *arg, int *min, int *max); -static int _verify_geometry(const char *arg, uint16_t *geometry); -static int _verify_conn_type(const char *arg); -static char *_fullpath(const char *filename); +static void _fullpath(char **filename, const char *cwd); static void _set_options(int argc, char **argv); static void _set_pbs_options(int argc, char **argv); static void _parse_pbs_resource_list(char *rl); /*---[ end forward declarations of static functions ]---------------------*/ -static void _print_version(void) -{ - printf("%s %s\n", PACKAGE, SLURM_VERSION); -} - -/* - * verify that a connection type in arg is of known form - * returns the connection_type or -1 if not recognized - */ -static int _verify_conn_type(const char *arg) -{ - int len = strlen(arg); - - if (!strncasecmp(arg, "MESH", len)) - return SELECT_MESH; - else if (!strncasecmp(arg, "TORUS", len)) - return SELECT_TORUS; - else if (!strncasecmp(arg, "NAV", len)) - return SELECT_NAV; - - error("invalid --conn-type argument %s ignored.", arg); - return -1; -} - -/* - * verify geometry arguments, must have proper count - * returns -1 on error, 0 otherwise - */ -static int _verify_geometry(const char *arg, uint16_t *geometry) -{ - char* token, *delimiter = ",x", *next_ptr; - int i, rc = 0; - char* geometry_tmp = xstrdup(arg); - char* original_ptr = geometry_tmp; - - token = strtok_r(geometry_tmp, delimiter, &next_ptr); - for (i=0; i<SYSTEM_DIMENSIONS; i++) { - if (token == NULL) { - error("insufficient dimensions in --geometry"); - rc = -1; - break; - } - geometry[i] = (uint16_t)atoi(token); - if (geometry[i] == 0 || geometry[i] == (uint16_t)NO_VAL) { - error("invalid --geometry argument"); - rc = -1; - break; - } - geometry_tmp = next_ptr; - token = strtok_r(geometry_tmp, delimiter, &next_ptr); - } - if (token != NULL) { - error("too many dimensions in --geometry"); - rc = -1; - } - - if (original_ptr) - xfree(original_ptr); - - return rc; -} - -/* - * verify that a node count in arg is of a known form (count or min-max) - * OUT min, max specified minimum and maximum node counts - * RET true if valid - */ -static bool -_verify_node_count(const char *arg, int *min_nodes, int *max_nodes) -{ - char *end_ptr; - double val1, val2; - - val1 = strtod(arg, &end_ptr); - if (end_ptr[0] == 'k' || end_ptr[0] == 'K') { - val1 *= 1024; - end_ptr++; - } - - if (end_ptr[0] == '\0') { - *min_nodes = val1; - return true; - } - - if (end_ptr[0] != '-') - return false; - - val2 = strtod(&end_ptr[1], &end_ptr); - if (end_ptr[0] == 'k' || end_ptr[0] == 'K') { - val2 *= 1024; - end_ptr++; - } - - if (end_ptr[0] == '\0') { - *min_nodes = val1; - *max_nodes = val2; - return true; - } else - return false; - -} - -/* return command name from its full path name */ -static char * _base_name(char* command) -{ - char *char_ptr, *name; - int i; - - if (command == NULL) - return NULL; - - char_ptr = strrchr(command, (int)'/'); - if (char_ptr == NULL) - char_ptr = command; - else - char_ptr++; - - i = strlen(char_ptr); - name = xmalloc(i+1); - strcpy(name, char_ptr); - return name; -} - -/* - * _to_bytes(): verify that arg is numeric with optional "G" or "M" at end - * if "G" or "M" is there, multiply by proper power of 2 and return - * number in bytes - */ -static long _to_bytes(const char *arg) -{ - char *buf; - char *endptr; - int end; - int multiplier = 1; - long result; - - buf = xstrdup(arg); - - end = strlen(buf) - 1; - - if (isdigit(buf[end])) { - result = strtol(buf, &endptr, 10); - - if (*endptr != '\0') - result = -result; - - } else { - - switch (toupper(buf[end])) { - - case 'G': - multiplier = 1024; - break; - - case 'M': - /* do nothing */ - break; - - default: - multiplier = -1; - } - - buf[end] = '\0'; - - result = multiplier * strtol(buf, &endptr, 10); - - if (*endptr != '\0') - result = -result; - } - - return result; -} - /* * print error message to stderr with opt.progname prepended */ @@ -399,25 +226,37 @@ static void _opt_default() opt.nprocs_set = false; opt.cpus_per_task = 1; opt.cpus_set = false; - opt.min_nodes = 1; + opt.min_nodes = 0; opt.max_nodes = 0; - opt.tasks_per_node = -1; opt.nodes_set = false; + opt.min_sockets_per_node = NO_VAL; /* requested min/maxsockets */ + opt.max_sockets_per_node = NO_VAL; + opt.min_cores_per_socket = NO_VAL; /* requested min/maxcores */ + opt.max_cores_per_socket = NO_VAL; + opt.min_threads_per_core = NO_VAL; /* requested min/maxthreads */ + opt.max_threads_per_core = NO_VAL; + opt.ntasks_per_node = 0; /* ntask max limits */ + opt.ntasks_per_socket = NO_VAL; + opt.ntasks_per_core = NO_VAL; + opt.cpu_bind_type = 0; /* local dummy variable for now */ opt.time_limit = NO_VAL; opt.partition = NULL; opt.job_name = NULL; opt.jobid = NO_VAL; opt.jobid_set = false; - opt.dependency = NO_VAL; + opt.dependency = NULL; opt.account = NULL; opt.comment = NULL; + opt.distribution = SLURM_DIST_UNKNOWN; + opt.plane_size = NO_VAL; + opt.shared = (uint16_t)NO_VAL; opt.no_kill = false; opt.immediate = false; - opt.no_requeue = false; + opt.requeue = NO_VAL; opt.overcommit = false; opt.quiet = 0; @@ -428,6 +267,7 @@ static void _opt_default() opt.minsockets = -1; opt.mincores = -1; opt.minthreads = -1; + opt.task_mem = -1; opt.realmem = -1; opt.tmpdisk = -1; @@ -454,6 +294,7 @@ static void _opt_default() opt.get_user_env_time = -1; opt.get_user_env_mode = -1; + opt.acctg_freq = -1; } /*---[ env var processing ]-----------------------------------------------*/ @@ -486,13 +327,16 @@ env_vars_t env_vars[] = { {"SBATCH_JOB_NAME", OPT_STRING, &opt.job_name, NULL }, {"SBATCH_LINUX_IMAGE", OPT_STRING, &opt.linuximage, NULL }, {"SBATCH_MLOADER_IMAGE", OPT_STRING, &opt.mloaderimage, NULL }, - {"SBATCH_NO_REQUEUE", OPT_BOOL, &opt.no_requeue, NULL }, + {"SBATCH_NO_REQUEUE", OPT_NO_REQUEUE, NULL, NULL }, + {"SBATCH_REQUEUE", OPT_REQUEUE, NULL, NULL }, {"SBATCH_NO_ROTATE", OPT_BOOL, &opt.no_rotate, NULL }, {"SBATCH_OVERCOMMIT", OPT_OVERCOMMIT, NULL, NULL }, {"SBATCH_PARTITION", OPT_STRING, &opt.partition, NULL }, {"SBATCH_RAMDISK_IMAGE", OPT_STRING, &opt.ramdiskimage, NULL }, {"SBATCH_TIMELIMIT", OPT_STRING, &opt.time_limit_str,NULL }, {"SBATCH_EXCLUSIVE", OPT_EXCLUSIVE, NULL, NULL }, + {"SBATCH_OPEN_MODE", OPT_OPEN_MODE, NULL, NULL }, + {"SBATCH_ACCTG_FREQ", OPT_INT, &opt.acctg_freq, NULL }, {NULL, 0, NULL, NULL} }; @@ -564,9 +408,9 @@ _process_env_var(env_vars_t *e, const char *val) break; case OPT_NODES: - opt.nodes_set = _verify_node_count( val, - &opt.min_nodes, - &opt.max_nodes ); + opt.nodes_set = verify_node_count( val, + &opt.min_nodes, + &opt.max_nodes ); if (opt.nodes_set == false) { error("\"%s=%s\" -- invalid node count. ignoring...", e->var, val); @@ -574,7 +418,7 @@ _process_env_var(env_vars_t *e, const char *val) break; case OPT_CONN_TYPE: - opt.conn_type = _verify_conn_type(val); + opt.conn_type = verify_conn_type(val); break; case OPT_NO_ROTATE: @@ -582,7 +426,7 @@ _process_env_var(env_vars_t *e, const char *val) break; case OPT_GEOMETRY: - if (_verify_geometry(val, opt.geometry)) { + if (verify_geometry(val, opt.geometry)) { error("\"%s=%s\" -- invalid geometry, ignoring...", e->var, val); } @@ -596,6 +440,23 @@ _process_env_var(env_vars_t *e, const char *val) opt.overcommit = true; break; + case OPT_OPEN_MODE: + if ((val[0] == 'a') || (val[0] == 'A')) + opt.open_mode = OPEN_MODE_APPEND; + else if ((val[0] == 't') || (val[0] == 'T')) + opt.open_mode = OPEN_MODE_TRUNCATE; + else + error("Invalid SBATCH_OPEN_MODE: %s. Ignored", val); + break; + + case OPT_NO_REQUEUE: + opt.requeue = 0; + break; + + case OPT_REQUEUE: + opt.requeue = 1; + break; + default: /* do nothing */ break; @@ -610,9 +471,9 @@ static struct option long_options[] = { is only here for moab tansition doesn't do anything */ + {"extra-node-info", required_argument, 0, 'B'}, {"cpus-per-task", required_argument, 0, 'c'}, {"constraint", required_argument, 0, 'C'}, - {"dependency", required_argument, 0, 'd'}, {"workdir", required_argument, 0, 'D'}, {"error", required_argument, 0, 'e'}, {"nodefile", required_argument, 0, 'F'}, @@ -623,12 +484,15 @@ static struct option long_options[] = { {"immediate", no_argument, 0, 'I'}, {"job-name", required_argument, 0, 'J'}, {"no-kill", no_argument, 0, 'k'}, + {"licenses", required_argument, 0, 'L'}, + {"distribution", required_argument, 0, 'm'}, {"tasks", required_argument, 0, 'n'}, {"ntasks", required_argument, 0, 'n'}, {"nodes", required_argument, 0, 'N'}, {"output", required_argument, 0, 'o'}, {"overcommit", no_argument, 0, 'O'}, {"partition", required_argument, 0, 'p'}, + {"dependency", required_argument, 0, 'P'}, {"quiet", no_argument, 0, 'q'}, {"no-rotate", no_argument, 0, 'R'}, {"share", no_argument, 0, 's'}, @@ -646,6 +510,9 @@ static struct option long_options[] = { {"mincores", required_argument, 0, LONG_OPT_MINCORES}, {"minthreads", required_argument, 0, LONG_OPT_MINTHREADS}, {"mem", required_argument, 0, LONG_OPT_MEM}, + {"job-mem", required_argument, 0, LONG_OPT_TASK_MEM}, + {"task-mem", required_argument, 0, LONG_OPT_TASK_MEM}, + {"hint", required_argument, 0, LONG_OPT_HINT}, {"tmp", required_argument, 0, LONG_OPT_TMP}, {"jobid", required_argument, 0, LONG_OPT_JOBID}, {"uid", required_argument, 0, LONG_OPT_UID}, @@ -655,24 +522,31 @@ static struct option long_options[] = { {"mail-type", required_argument, 0, LONG_OPT_MAIL_TYPE}, {"mail-user", required_argument, 0, LONG_OPT_MAIL_USER}, {"nice", optional_argument, 0, LONG_OPT_NICE}, - {"requeue", no_argument, 0, LONG_OPT_REQUEUE}, {"no-requeue", no_argument, 0, LONG_OPT_NO_REQUEUE}, + {"requeue", no_argument, 0, LONG_OPT_REQUEUE}, {"comment", required_argument, 0, LONG_OPT_COMMENT}, + {"sockets-per-node", required_argument, 0, LONG_OPT_SOCKETSPERNODE}, + {"cores-per-socket", required_argument, 0, LONG_OPT_CORESPERSOCKET}, + {"threads-per-core", required_argument, 0, LONG_OPT_THREADSPERCORE}, + {"ntasks-per-node", required_argument, 0, LONG_OPT_NTASKSPERNODE}, + {"ntasks-per-socket",required_argument, 0, LONG_OPT_NTASKSPERSOCKET}, + {"ntasks-per-core", required_argument, 0, LONG_OPT_NTASKSPERCORE}, {"blrts-image", required_argument, 0, LONG_OPT_BLRTS_IMAGE}, {"linux-image", required_argument, 0, LONG_OPT_LINUX_IMAGE}, {"mloader-image", required_argument, 0, LONG_OPT_MLOADER_IMAGE}, {"ramdisk-image", required_argument, 0, LONG_OPT_RAMDISK_IMAGE}, {"reboot", no_argument, 0, LONG_OPT_REBOOT}, - {"tasks-per-node", required_argument,0,LONG_OPT_TASKSPERNODE}, - {"ntasks-per-node", required_argument,0,LONG_OPT_TASKSPERNODE}, + {"tasks-per-node",required_argument, 0, LONG_OPT_NTASKSPERNODE}, {"wrap", required_argument, 0, LONG_OPT_WRAP}, {"get-user-env", optional_argument, 0, LONG_OPT_GET_USER_ENV}, + {"open-mode", required_argument, 0, LONG_OPT_OPEN_MODE}, + {"acctg-freq", required_argument, 0, LONG_OPT_ACCTG_FREQ}, {"propagate", optional_argument, 0, LONG_OPT_PROPAGATE}, {NULL, 0, 0, 0} }; static char *opt_string = - "+a:bc:C:d:D:e:F:g:hHi:IJ:kn:N:o:Op:qR:st:uU:vVw:x:"; + "+a:bB:c:C:d:D:e:F:g:hHi:IJ:kL:m:n:N:o:Op:P:qR:st:uU:vVw:x:"; /* @@ -694,6 +568,12 @@ char *process_options_first_pass(int argc, char **argv) { int opt_char, option_index = 0; char *str = NULL; + struct option *optz = spank_option_table_create (long_options); + + if (!optz) { + error ("Unable to create option table"); + exit (1); + } /* initialize option defaults */ _opt_default(); @@ -702,7 +582,7 @@ char *process_options_first_pass(int argc, char **argv) optind = 0; while((opt_char = getopt_long(argc, argv, opt_string, - long_options, &option_index)) != -1) { + optz, &option_index)) != -1) { switch (opt_char) { case '?': fprintf(stderr, "Try \"sbatch --help\" for more " @@ -723,7 +603,7 @@ char *process_options_first_pass(int argc, char **argv) opt.verbose++; break; case 'V': - _print_version(); + print_slurm_version(); exit(0); break; case LONG_OPT_WRAP: @@ -757,7 +637,7 @@ char *process_options_first_pass(int argc, char **argv) char *cmd = opt.script_argv[0]; int mode = R_OK; - if ((fullpath = _search_path(cmd, true, mode))) { + if ((fullpath = search_path(opt.cwd, cmd, true, mode))) { xfree(opt.script_argv[0]); opt.script_argv[0] = fullpath; } @@ -1021,10 +901,16 @@ static void _set_options(int argc, char **argv) { int opt_char, option_index = 0; char *tmp; + struct option *optz = spank_option_table_create (long_options); + + if (!optz) { + error ("Unable to create option table"); + exit (1); + } optind = 0; while((opt_char = getopt_long(argc, argv, opt_string, - long_options, &option_index)) != -1) { + optz, &option_index)) != -1) { switch (opt_char) { case '?': fatal("Try \"sbatch --help\" for more information"); @@ -1033,6 +919,24 @@ static void _set_options(int argc, char **argv) /* Only here for Moab transition not suppose to do anything */ break; + case 'B': + opt.extra_set = verify_socket_core_thread_count( + optarg, + &opt.min_sockets_per_node, + &opt.max_sockets_per_node, + &opt.min_cores_per_socket, + &opt.max_cores_per_socket, + &opt.min_threads_per_core, + &opt.max_threads_per_core, + &opt.cpu_bind_type); + + + if (opt.extra_set == false) { + error("invalid resource allocation -B `%s'", + optarg); + exit(1); + } + break; case 'c': opt.cpus_set = true; opt.cpus_per_task = @@ -1042,9 +946,7 @@ static void _set_options(int argc, char **argv) xfree(opt.constraints); opt.constraints = xstrdup(optarg); break; - case 'd': - opt.dependency = _get_int(optarg, "dependency"); - break; +/* case 'd': See 'P' below */ case 'D': xfree(opt.cwd); opt.cwd = xstrdup(optarg); @@ -1054,7 +956,7 @@ static void _set_options(int argc, char **argv) if (strncasecmp(optarg, "none", (size_t)4) == 0) opt.efname = xstrdup("/dev/null"); else - opt.efname = _fullpath(optarg); + opt.efname = xstrdup(optarg); break; case 'F': xfree(opt.nodelist); @@ -1068,7 +970,7 @@ static void _set_options(int argc, char **argv) } break; case 'g': - if (_verify_geometry(optarg, opt.geometry)) + if (verify_geometry(optarg, opt.geometry)) exit(1); break; case 'h': @@ -1082,7 +984,7 @@ static void _set_options(int argc, char **argv) if (strncasecmp(optarg, "none", (size_t)4) == 0) opt.ifname = xstrdup("/dev/null"); else - opt.ifname = _fullpath(optarg); + opt.ifname = xstrdup(optarg); break; case 'I': opt.immediate = true; @@ -1094,6 +996,19 @@ static void _set_options(int argc, char **argv) case 'k': opt.no_kill = true; break; + case 'L': + xfree(opt.licenses); + opt.licenses = xstrdup(optarg); + break; + case 'm': + opt.distribution = verify_dist_type(optarg, + &opt.plane_size); + if (opt.distribution == SLURM_DIST_UNKNOWN) { + error("distribution type `%s' " + "is not recognized", optarg); + exit(1); + } + break; case 'n': opt.nprocs_set = true; opt.nprocs = @@ -1101,9 +1016,9 @@ static void _set_options(int argc, char **argv) break; case 'N': opt.nodes_set = - _verify_node_count(optarg, - &opt.min_nodes, - &opt.max_nodes); + verify_node_count(optarg, + &opt.min_nodes, + &opt.max_nodes); if (opt.nodes_set == false) { error("invalid node count `%s'", optarg); @@ -1115,7 +1030,7 @@ static void _set_options(int argc, char **argv) if (strncasecmp(optarg, "none", (size_t)4) == 0) opt.ofname = xstrdup("/dev/null"); else - opt.ofname = _fullpath(optarg); + opt.ofname = xstrdup(optarg); break; case 'O': opt.overcommit = true; @@ -1124,6 +1039,12 @@ static void _set_options(int argc, char **argv) xfree(opt.partition); opt.partition = xstrdup(optarg); break; + case 'd': + case 'P': + /* use -P instead */ + xfree(opt.dependency); + opt.dependency = xstrdup(optarg); + break; case 'q': opt.quiet++; break; @@ -1148,7 +1069,7 @@ static void _set_options(int argc, char **argv) opt.verbose++; break; case 'V': - _print_version(); + print_slurm_version(); exit(0); break; case 'w': @@ -1207,15 +1128,24 @@ static void _set_options(int argc, char **argv) } break; case LONG_OPT_MEM: - opt.realmem = (int) _to_bytes(optarg); + opt.realmem = (int) str_to_bytes(optarg); if (opt.realmem < 0) { error("invalid memory constraint %s", optarg); exit(1); } break; + case LONG_OPT_TASK_MEM: + opt.task_mem = (int) str_to_bytes(optarg); + if (opt.task_mem < 0) { + error("invalid memory constraint %s", + optarg); + exit(1); + } + setenvf(NULL, "SLURM_TASK_MEM", "%d", opt.task_mem); + break; case LONG_OPT_TMP: - opt.tmpdisk = _to_bytes(optarg); + opt.tmpdisk = str_to_bytes(optarg); if (opt.tmpdisk < 0) { error("invalid tmp value %s", optarg); exit(1); @@ -1236,7 +1166,7 @@ static void _set_options(int argc, char **argv) fatal ("--gid=\"%s\" invalid", optarg); break; case LONG_OPT_CONNTYPE: - opt.conn_type = _verify_conn_type(optarg); + opt.conn_type = verify_conn_type(optarg); break; case LONG_OPT_BEGIN: opt.begin = parse_time(optarg); @@ -1246,7 +1176,7 @@ static void _set_options(int argc, char **argv) } break; case LONG_OPT_MAIL_TYPE: - opt.mail_type |= _parse_mail_type(optarg); + opt.mail_type |= parse_mail_type(optarg); if (opt.mail_type == 0) fatal("--mail-type=%s invalid", optarg); break; @@ -1275,15 +1205,63 @@ static void _set_options(int argc, char **argv) } break; case LONG_OPT_NO_REQUEUE: - opt.no_requeue = true; + opt.requeue = 0; break; case LONG_OPT_REQUEUE: - opt.no_requeue = false; /* the default */ + opt.requeue = 1; break; case LONG_OPT_COMMENT: xfree(opt.comment); opt.comment = xstrdup(optarg); break; + case LONG_OPT_SOCKETSPERNODE: + get_resource_arg_range( optarg, "sockets-per-node", + &opt.min_sockets_per_node, + &opt.max_sockets_per_node, + true ); + break; + case LONG_OPT_CORESPERSOCKET: + get_resource_arg_range( optarg, "cores-per-socket", + &opt.min_cores_per_socket, + &opt.max_cores_per_socket, + true); + break; + case LONG_OPT_THREADSPERCORE: + get_resource_arg_range( optarg, "threads-per-core", + &opt.min_threads_per_core, + &opt.max_threads_per_core, + true ); + break; + case LONG_OPT_HINT: + if (verify_hint(optarg, + &opt.min_sockets_per_node, + &opt.max_sockets_per_node, + &opt.min_cores_per_socket, + &opt.max_cores_per_socket, + &opt.min_threads_per_core, + &opt.max_threads_per_core, + &opt.cpu_bind_type)) { + exit(1); + } + break; + case LONG_OPT_NTASKSPERNODE: + opt.ntasks_per_node = _get_int(optarg, + "ntasks-per-node"); + setenvf(NULL, "SLURM_NTASKS_PER_NODE", "%d", + opt.ntasks_per_node); + break; + case LONG_OPT_NTASKSPERSOCKET: + opt.ntasks_per_socket = _get_int(optarg, + "ntasks-per-socket"); + setenvf(NULL, "SLURM_NTASKS_PER_SOCKET", "%d", + opt.ntasks_per_socket); + break; + case LONG_OPT_NTASKSPERCORE: + opt.ntasks_per_core = _get_int(optarg, + "ntasks-per-core"); + setenvf(NULL, "SLURM_NTASKS_PER_CORE", "%d", + opt.ntasks_per_socket); + break; case LONG_OPT_BLRTS_IMAGE: xfree(opt.blrtsimage); opt.blrtsimage = xstrdup(optarg); @@ -1303,11 +1281,6 @@ static void _set_options(int argc, char **argv) case LONG_OPT_REBOOT: opt.reboot = true; break; - case LONG_OPT_TASKSPERNODE: - opt.tasks_per_node = _get_int(optarg, "ntasks-per-node"); - setenvf(NULL, "SLURM_NTASKS_PER_NODE", "%d", - opt.tasks_per_node); - break; case LONG_OPT_WRAP: /* handled in process_options_first_pass() */ break; @@ -1317,6 +1290,19 @@ static void _set_options(int argc, char **argv) else opt.get_user_env_time = 0; break; + case LONG_OPT_OPEN_MODE: + if ((optarg[0] == 'a') || (optarg[0] == 'A')) + opt.open_mode = OPEN_MODE_APPEND; + else if ((optarg[0] == 't') || (optarg[0] == 'T')) + opt.open_mode = OPEN_MODE_TRUNCATE; + else { + error("Invalid --open-mode argument: %s. " + "Ignored", optarg); + } + break; + case LONG_OPT_ACCTG_FREQ: + opt.acctg_freq = _get_int(optarg, "acctg-freq"); + break; case LONG_OPT_PROPAGATE: xfree(opt.propagate); if (optarg) @@ -1325,14 +1311,15 @@ static void _set_options(int argc, char **argv) opt.propagate = xstrdup("ALL"); break; default: - fatal("Unrecognized command line parameter %c", - opt_char); + if (spank_process_option (opt_char, optarg) < 0) + exit (1); } } if (optind < argc) { fatal("Invalid argument: %s", argv[optind]); } + spank_option_table_destroy (optz); } static void _proc_get_user_env(char *optarg) @@ -1627,7 +1614,7 @@ static void _parse_pbs_resource_list(char *rl) */ temp[end] = '\0'; } - opt.tmpdisk = _to_bytes(temp); + opt.tmpdisk = str_to_bytes(temp); if (opt.tmpdisk < 0) { error("invalid tmp value %s", temp); exit(1); @@ -1651,7 +1638,7 @@ static void _parse_pbs_resource_list(char *rl) */ temp[end] = '\0'; } - opt.realmem = (int) _to_bytes(temp); + opt.realmem = (int) str_to_bytes(temp); if (opt.realmem < 0) { error("invalid memory constraint %s", temp); @@ -1732,11 +1719,15 @@ static bool _opt_verify(void) verified = false; } + _fullpath(&opt.efname, opt.cwd); + _fullpath(&opt.ifname, opt.cwd); + _fullpath(&opt.ofname, opt.cwd); + if (opt.mincpus < opt.cpus_per_task) opt.mincpus = opt.cpus_per_task; if ((opt.job_name == NULL) && (opt.script_argc > 0)) - opt.job_name = _base_name(opt.script_argv[0]); + opt.job_name = base_name(opt.script_argv[0]); /* check for realistic arguments */ if (opt.nprocs <= 0) { @@ -1751,17 +1742,95 @@ static bool _opt_verify(void) verified = false; } - if ((opt.min_nodes <= 0) || (opt.max_nodes < 0) || + if ((opt.min_nodes < 0) || (opt.max_nodes < 0) || (opt.max_nodes && (opt.min_nodes > opt.max_nodes))) { error("%s: invalid number of nodes (-N %d-%d)\n", opt.progname, opt.min_nodes, opt.max_nodes); verified = false; } + /* When CR with memory as a CR is enabled we need to assign + * adequate value or check the value to opt.mem */ + if ((opt.realmem >= -1) && (opt.task_mem > 0)) { + if (opt.realmem == -1) { + opt.realmem = opt.task_mem; + } else if (opt.realmem < opt.task_mem) { + info("mem < task-mem - resizing mem to be equal " + "to task-mem"); + opt.realmem = opt.task_mem; + } + } + + /* Check to see if user has specified enough resources to + * satisfy the plane distribution with the specified + * plane_size. + * if (n/plane_size < N) and ((N-1) * plane_size >= n) --> + * problem Simple check will not catch all the problem/invalid + * cases. + * The limitations of the plane distribution in the cons_res + * environment are more extensive and are documented in the + * SLURM reference guide. */ + if (opt.distribution == SLURM_DIST_PLANE && opt.plane_size) { + if ((opt.min_nodes <= 0) || + ((opt.nprocs/opt.plane_size) < opt.min_nodes)) { + if (((opt.min_nodes-1)*opt.plane_size) >= opt.nprocs) { +#if(0) + info("Too few processes ((n/plane_size) %d < N %d) " + "and ((N-1)*(plane_size) %d >= n %d)) ", + opt.nprocs/opt.plane_size, opt.min_nodes, + (opt.min_nodes-1)*opt.plane_size, opt.nprocs); +#endif + error("Too few processes for the requested " + "{plane,node} distribution"); + exit(1); + } + } + } + + /* bound max_threads/cores from ntasks_cores/sockets */ + if ((opt.max_threads_per_core <= 0) && + (opt.ntasks_per_core > 0)) { + opt.max_threads_per_core = opt.ntasks_per_core; + /* if cpu_bind_type doesn't already have a auto pref, + * choose the level based on the level of ntasks + */ + if (!(opt.cpu_bind_type & (CPU_BIND_TO_SOCKETS | + CPU_BIND_TO_CORES | + CPU_BIND_TO_THREADS))) { + opt.cpu_bind_type |= CPU_BIND_TO_CORES; + } + } + if ((opt.max_cores_per_socket <= 0) && + (opt.ntasks_per_socket > 0)) { + opt.max_cores_per_socket = opt.ntasks_per_socket; + /* if cpu_bind_type doesn't already have a auto pref, + * choose the level based on the level of ntasks + */ + if (!(opt.cpu_bind_type & (CPU_BIND_TO_SOCKETS | + CPU_BIND_TO_CORES | + CPU_BIND_TO_THREADS))) { + opt.cpu_bind_type |= CPU_BIND_TO_SOCKETS; + } + } + /* massage the numbers */ - if (opt.nodes_set && !opt.nprocs_set) { + if ((opt.nodes_set || opt.extra_set) && !opt.nprocs_set) { /* 1 proc / node default */ - opt.nprocs = opt.min_nodes; + opt.nprocs = MAX(opt.min_nodes, 1); + + /* 1 proc / min_[socket * core * thread] default */ + if (opt.min_sockets_per_node > 0) { + opt.nprocs *= opt.min_sockets_per_node; + opt.nprocs_set = true; + } + if (opt.min_cores_per_socket > 0) { + opt.nprocs *= opt.min_cores_per_socket; + opt.nprocs_set = true; + } + if (opt.min_threads_per_core > 0) { + opt.nprocs *= opt.min_threads_per_core; + opt.nprocs_set = true; + } } else if (opt.nodes_set && opt.nprocs_set) { @@ -1784,7 +1853,7 @@ static bool _opt_verify(void) if (opt.time_limit_str) { opt.time_limit = time_str2mins(opt.time_limit_str); - if (opt.time_limit < 0) { + if ((opt.time_limit < 0) && (opt.time_limit != INFINITE)) { error("Invalid time limit specification"); exit(1); } @@ -1811,30 +1880,22 @@ static bool _opt_verify(void) xfree(sched_name); } + if (opt.open_mode) { + /* Propage mode to spawned job using environment variable */ + if (opt.open_mode == OPEN_MODE_APPEND) + setenvf(NULL, "SLURM_OPEN_MODE", "a"); + else + setenvf(NULL, "SLURM_OPEN_MODE", "t"); + } if (opt.propagate && parse_rlimits( opt.propagate, PROPAGATE_RLIMITS)) { error( "--propagate=%s is not valid.", opt.propagate ); verified = false; } - return verified; -} - -static uint16_t _parse_mail_type(const char *arg) -{ - uint16_t rc; - - if (strcasecmp(arg, "BEGIN") == 0) - rc = MAIL_JOB_BEGIN; - else if (strcasecmp(arg, "END") == 0) - rc = MAIL_JOB_END; - else if (strcasecmp(arg, "FAIL") == 0) - rc = MAIL_JOB_FAIL; - else if (strcasecmp(arg, "ALL") == 0) - rc = MAIL_JOB_BEGIN | MAIL_JOB_END | MAIL_JOB_FAIL; - else - rc = 0; /* failure */ + if (opt.acctg_freq >= 0) + setenvf(NULL, "SLURM_ACCTG_FREQ", "%d", opt.acctg_freq); - return rc; + return verified; } static uint16_t _parse_pbs_mail_type(const char *arg) @@ -1867,101 +1928,6 @@ static uint16_t _parse_pbs_mail_type(const char *arg) return rc; } -static char *_print_mail_type(const uint16_t type) -{ - if (type == 0) - return "NONE"; - - if (type == MAIL_JOB_BEGIN) - return "BEGIN"; - if (type == MAIL_JOB_END) - return "END"; - if (type == MAIL_JOB_FAIL) - return "FAIL"; - if (type == (MAIL_JOB_BEGIN | MAIL_JOB_END | MAIL_JOB_FAIL)) - return "ALL"; - - return "MULTIPLE"; -} - -static void -_freeF(void *data) -{ - xfree(data); -} - -static List -_create_path_list(void) -{ - List l = list_create(_freeF); - char *path, *c, *lc; - - c = getenv("PATH"); - if (!c) { - verbose("No PATH environment variable"); - return l; - } - - path = xstrdup(c); - c = lc = path; - while (*c != '\0') { - if (*c == ':') { - /* nullify and push token onto list */ - *c = '\0'; - if (lc != NULL && strlen(lc) > 0) - list_append(l, xstrdup(lc)); - lc = ++c; - } else - c++; - } - - if (strlen(lc) > 0) - list_append(l, xstrdup(lc)); - - xfree(path); - - return l; -} - -static char * -_search_path(char *cmd, bool check_current_dir, int access_mode) -{ - List l = NULL; - ListIterator i = NULL; - char *path, *fullpath = NULL; - - if ( (cmd[0] == '.' || cmd[0] == '/') - && (access(cmd, access_mode) == 0 ) ) { - if (cmd[0] == '.') - xstrfmtcat(fullpath, "%s/", opt.cwd); - xstrcat(fullpath, cmd); - goto done; - } - - l = _create_path_list(); - if (l == NULL) - return NULL; - - if (check_current_dir) - list_prepend(l, xstrdup(opt.cwd)); - - i = list_iterator_create(l); - while ((path = list_next(i))) { - xstrfmtcat(fullpath, "%s/%s", path, cmd); - - if (access(fullpath, access_mode) == 0) - goto done; - - xfree(fullpath); - fullpath = NULL; - } - done: - if (l) - list_destroy(l); - return fullpath; -} - - /* helper function for printing options * * warning: returns pointer to memory allocated on the stack. @@ -1973,9 +1939,21 @@ static char *print_constraints() if (opt.mincpus > 0) xstrfmtcat(buf, "mincpus=%d ", opt.mincpus); + if (opt.minsockets > 0) + xstrfmtcat(buf, "minsockets=%d ", opt.minsockets); + + if (opt.mincores > 0) + xstrfmtcat(buf, "mincores=%d ", opt.mincores); + + if (opt.minthreads > 0) + xstrfmtcat(buf, "minthreads=%d ", opt.minthreads); + if (opt.realmem > 0) xstrfmtcat(buf, "mem=%dM ", opt.realmem); + if (opt.task_mem > 0) + xstrfmtcat(buf, "task-mem=%dM ", opt.task_mem); + if (opt.tmpdisk > 0) xstrfmtcat(buf, "tmp=%ld ", opt.tmpdisk); @@ -1994,40 +1972,6 @@ static char *print_constraints() return buf; } -static char * -print_commandline() -{ - int i; - char buf[256]; - - buf[0] = '\0'; - for (i = 0; i < opt.script_argc; i++) - snprintf(buf, 256, "%s", opt.script_argv[i]); - return xstrdup(buf); -} - -static char * -print_geometry() -{ - int i; - char buf[32], *rc = NULL; - - if ((SYSTEM_DIMENSIONS == 0) - || (opt.geometry[0] == (uint16_t)NO_VAL)) - return NULL; - - for (i=0; i<SYSTEM_DIMENSIONS; i++) { - if (i > 0) - snprintf(buf, sizeof(buf), "x%u", opt.geometry[i]); - else - snprintf(buf, sizeof(buf), "%u", opt.geometry[i]); - xstrcat(rc, buf); - } - - return rc; -} - - /* * Get a decimal integer from arg. * @@ -2057,23 +2001,18 @@ _get_int(const char *arg, const char *what) * Return an absolute path for the "filename". If "filename" is already * an absolute path, it returns a copy. Free the returned with xfree(). */ -static char *_fullpath(const char *filename) +static void _fullpath(char **filename, const char *cwd) { - char cwd[BUFSIZ]; char *ptr = NULL; - if (filename[0] == '/') { - return xstrdup(filename); - } else { - if (getcwd(cwd, BUFSIZ) == NULL) { - error("could not get current working directory"); - return NULL; - } - ptr = xstrdup(cwd); - xstrcat(ptr, "/"); - xstrcat(ptr, filename); - return ptr; - } + if ((*filename == NULL) || (*filename[0] == '/')) + return; + + ptr = xstrdup(cwd); + xstrcat(ptr, "/"); + xstrcat(ptr, *filename); + xfree(*filename); + *filename = ptr; } #define tf_(b) (b == true) ? "true" : "false" @@ -2104,9 +2043,13 @@ static void _opt_list() info("partition : %s", opt.partition == NULL ? "default" : opt.partition); info("job name : `%s'", opt.job_name); + info("distribution : %s", format_task_dist_states(opt.distribution)); + if(opt.distribution == SLURM_DIST_PLANE) + info("plane size : %u", opt.plane_size); info("verbose : %d", opt.verbose); info("immediate : %s", tf_(opt.immediate)); - info("no-requeue : %s", tf_(opt.no_requeue)); + if (opt.requeue != NO_VAL) + info("requeue : %u", opt.requeue); info("overcommit : %s", tf_(opt.overcommit)); if (opt.time_limit == INFINITE) info("time_limit : INFINITE"); @@ -2116,16 +2059,13 @@ static void _opt_list() info("nice : %d", opt.nice); info("account : %s", opt.account); info("comment : %s", opt.comment); - if (opt.dependency == NO_VAL) - info("dependency : none"); - else - info("dependency : %u", opt.dependency); + info("dependency : %s", opt.dependency); str = print_constraints(); info("constraints : %s", str); xfree(str); if (opt.conn_type != (uint16_t) NO_VAL) info("conn_type : %u", opt.conn_type); - str = print_geometry(); + str = print_geometry(opt.geometry); info("geometry : %s", str); xfree(str); info("reboot : %s", opt.reboot ? "no" : "yes"); @@ -2145,12 +2085,21 @@ static void _opt_list() slurm_make_time_str(&opt.begin, time_str, sizeof(time_str)); info("begin : %s", time_str); } - info("mail_type : %s", _print_mail_type(opt.mail_type)); + info("mail_type : %s", print_mail_type(opt.mail_type)); info("mail_user : %s", opt.mail_user); - info("tasks-per-node : %d", opt.tasks_per_node); + info("sockets-per-node : %d - %d", opt.min_sockets_per_node, + opt.max_sockets_per_node); + info("cores-per-socket : %d - %d", opt.min_cores_per_socket, + opt.max_cores_per_socket); + info("threads-per-core : %d - %d", opt.min_threads_per_core, + opt.max_threads_per_core); + info("ntasks-per-node : %d", opt.ntasks_per_node); + info("ntasks-per-socket : %d", opt.ntasks_per_socket); + info("ntasks-per-core : %d", opt.ntasks_per_core); + info("plane_size : %u", opt.plane_size); info("propagate : %s", opt.propagate == NULL ? "NONE" : opt.propagate); - str = print_commandline(); + str = print_commandline(opt.script_argc, opt.script_argv); info("remote command : `%s'", str); xfree(str); @@ -2162,25 +2111,27 @@ static void _usage(void) "Usage: sbatch [-N nnodes] [-n ntasks]\n" " [-c ncpus] [-r n] [-p partition] [--hold] [-t minutes]\n" " [-D path] [--immediate] [--no-kill] [--overcommit]\n" -" [--input file] [--output file] [--error file]\n" +" [--input file] [--output file] [--error file] [--licenses=names]\n" " [--workdir=directory] [--share] [-m dist] [-J jobname]\n" " [--jobid=id] [--verbose] [--gid=group] [--uid=user]\n" " [-W sec] [--minsockets=n] [--mincores=n] [--minthreads=n]\n" " [--contiguous] [--mincpus=n] [--mem=MB] [--tmp=MB] [-C list]\n" -" [--account=name] [--dependency=jobid] [--comment=name]\n" +" [--account=name] [--dependency=type:jobid] [--comment=name]\n" #ifdef HAVE_BG /* Blue gene specific options */ " [--geometry=XxYxZ] [--conn-type=type] [--no-rotate] [ --reboot]\n" " [--blrts-image=path] [--linux-image=path]\n" " [--mloader-image=path] [--ramdisk-image=path]\n" #endif " [--mail-type=type] [--mail-user=user][--nice[=value]]\n" -" [--no-requeue] [--ntasks-per-node=n] [--propagate]\n" +" [--requeue] [--no-requeue] [--ntasks-per-node=n] [--propagate]\n" " [--nodefile=file] [--nodelist=hosts] [--exclude=hosts]\n" " executable [args...]\n"); } static void _help(void) { + slurm_ctl_conf_t *conf; + printf ( "Usage: sbatch [OPTIONS...] executable [args...]\n" "\n" @@ -2203,19 +2154,21 @@ static void _help(void) " --jobid=id run under already allocated job\n" " -v, --verbose verbose mode (multiple -v's increase verbosity)\n" " -q, --quiet quiet mode (suppress informational messages)\n" -" -d, --dependency=jobid defer job until specified jobid completes\n" +" -P, --dependency=type:jobid defer job until condition on jobid is satisfied\n" " -D, --workdir=directory set working directory for batch script\n" " --nice[=value] decrease secheduling priority by value\n" " -O, --overcommit overcommit resources\n" " -U, --account=name charge job to specified account\n" " --begin=time defer job until HH:MM DD/MM/YY\n" " --comment=name arbitrary comment\n" +" -L, --licenses=names required license, comma separated\n" " --mail-type=type notify on state change: BEGIN, END, FAIL or ALL\n" " --mail-user=user who to send email notification for job state changes\n" " --gid=group_id group ID to run job as (user root only)\n" " --uid=user_id user ID to run job as (user root only)\n" " --get-user-env used by Moab. See srun man page.\n" " --no-requeue if set, do not permit the job to be requeued\n" +" --requeue if set, permit the job to be requeued\n" " --propagate[=rlimits] propagate all [or specific list of] rlimits\n" "\n" "Constraint options:\n" @@ -2234,7 +2187,33 @@ static void _help(void) "Consumable resources related options:\n" " --exclusive allocate nodes in exclusive mode when\n" " cpu consumable resource is enabled\n" +" --task-mem=MB maximum amount of real memory per task\n" +" required by the job.\n" +" --mem >= --job-mem if --mem is specified.\n" +"\n" +"Affinity/Multi-core options: (when the task/affinity plugin is enabled)\n" +" -B --extra-node-info=S[:C[:T]] Expands to:\n" +" --sockets-per-node=S number of sockets per node to allocate\n" +" --cores-per-socket=C number of cores per socket to allocate\n" +" --threads-per-core=T number of threads per core to allocate\n" +" each field can be 'min[-max]' or wildcard '*'\n" +" total cpus requested = (N x S x C x T)\n" "\n" +" --ntasks-per-socket=n number of tasks to invoke on each socket\n" +" --ntasks-per-core=n number of tasks to invoke on each core\n"); + conf = slurm_conf_lock(); + if (conf->task_plugin != NULL + && strcasecmp(conf->task_plugin, "task/affinity") == 0) { + printf( +" --hint= Bind tasks according to application hints\n" +" (see \"--hint=help\" for options)\n"); + } + slurm_conf_unlock(); + + printf("\n"); + spank_print_options (stdout, 6, 30); + + printf("\n" #ifdef HAVE_BG /* Blue gene specific options */ "Blue Gene related options:\n" " -g, --geometry=XxYxZ geometry constraints of the job\n" @@ -2246,8 +2225,8 @@ static void _help(void) " --linux-image=path path to linux image for bluegene block. Default if not set\n" " --mloader-image=path path to mloader image for bluegene block. Default if not set\n" " --ramdisk-image=path path to ramdisk image for bluegene block. Default if not set\n" -"\n" #endif +"\n" "Help options:\n" " -h, --help show this help message\n" " -u, --usage display brief usage message\n" diff --git a/src/sbatch/opt.h b/src/sbatch/opt.h index 52a2feaae..92565efb5 100644 --- a/src/sbatch/opt.h +++ b/src/sbatch/opt.h @@ -1,12 +1,12 @@ /*****************************************************************************\ * opt.h - definitions for srun option processing - * $Id: opt.h 12856 2007-12-19 00:18:44Z jette $ + * $Id: opt.h 13771 2008-04-02 20:03:47Z jette $ ***************************************************************************** * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <grondona1@llnl.gov>, * Christopher J. Morrone <morrone2@llnl.gov>, et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -42,6 +42,7 @@ #define MAX_USERNAME 9 + typedef struct sbatch_options { char *progname; /* argv[0] of this program or */ @@ -63,14 +64,30 @@ typedef struct sbatch_options { int min_nodes; /* --nodes=n, -N n */ int max_nodes; /* --nodes=x-n, -N x-n */ bool nodes_set; /* true if nodes explicitly set */ + int min_sockets_per_node; /* --sockets-per-node=n */ + int max_sockets_per_node; /* --sockets-per-node=x-n */ + int min_cores_per_socket; /* --cores-per-socket=n */ + int max_cores_per_socket; /* --cores-per-socket=x-n */ + int min_threads_per_core; /* --threads-per-core=n */ + int max_threads_per_core; /* --threads-per-core=x-n */ + int ntasks_per_node; /* --ntasks-per-node=n */ + int ntasks_per_socket; /* --ntasks-per-socket=n */ + int ntasks_per_core; /* --ntasks-per-core=n */ + cpu_bind_type_t cpu_bind_type; /* --cpu_bind= */ + bool extra_set; /* true if extra node info explicitly set */ int time_limit; /* --time, -t (int minutes) */ char *time_limit_str; /* --time, -t (string) */ char *partition; /* --partition=n, -p n */ + enum task_dist_states + distribution; /* --distribution=, -m dist */ + uint32_t plane_size; /* lllp distribution -> plane_size for + * when -m plane=<# of lllp per + * plane> */ char *job_name; /* --job-name=, -J name */ unsigned int jobid; /* --jobid=jobid */ bool jobid_set; /* true of jobid explicitly set */ char *mpi_type; /* --mpi=type */ - unsigned int dependency;/* --dependency, -P jobid */ + char *dependency; /* --dependency, -P type:jobid */ int nice; /* --nice */ char *account; /* --account, -U acct_name */ char *comment; /* --comment */ @@ -80,9 +97,12 @@ typedef struct sbatch_options { bool hold; /* --hold, -H */ bool no_kill; /* --no-kill, -k */ - bool no_requeue; /* --no-requeue */ + int requeue; /* --requeue and --no-requeue */ + uint8_t open_mode; /* --open-mode */ + int acctg_freq; /* --acctg-freq=secs */ bool overcommit; /* --overcommit -O */ uint16_t shared; /* --share, -s */ + char *licenses; /* --licenses, -L */ int quiet; int verbose; char *wrap; @@ -92,9 +112,9 @@ typedef struct sbatch_options { int minsockets; /* --minsockets=n */ int mincores; /* --mincores=n */ int minthreads; /* --minthreads=n */ + int task_mem; /* --task-mem=n */ int realmem; /* --mem=n */ long tmpdisk; /* --tmp=n */ - int tasks_per_node; /* --tasks-per-node=n */ char *constraints; /* --constraints=, -C constraint*/ bool contiguous; /* --contiguous */ char *nodelist; /* --nodelist=node1,node2,... */ diff --git a/src/sbatch/sbatch.c b/src/sbatch/sbatch.c index 9ad0b2a77..1b46e78dd 100644 --- a/src/sbatch/sbatch.c +++ b/src/sbatch/sbatch.c @@ -1,12 +1,12 @@ /*****************************************************************************\ * sbatch.c - Submit a SLURM batch script. * - * $Id: sbatch.c 13231 2008-02-08 17:16:47Z jette $ + * $Id: sbatch.c 14068 2008-05-19 15:58:22Z jette $ ***************************************************************************** * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Christopher J. Morrone <morrone2@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -41,6 +41,7 @@ #include <slurm/slurm.h> #include "src/common/env.h" +#include "src/common/plugstack.h" #include "src/common/read_config.h" #include "src/common/slurm_rlimits_info.h" #include "src/common/xstring.h" @@ -50,6 +51,8 @@ #define MAX_RETRIES 3 +static void _call_spank_local_user(job_desc_msg_t desc, + submit_response_msg_t *resp); static int fill_job_desc_from_opts(job_desc_msg_t *desc); static void *get_script_buffer(const char *filename, int *size); static void set_prio_process_env(void); @@ -68,6 +71,9 @@ int main(int argc, char *argv[]) int retries = 0; log_init(xbasename(argv[0]), logopt, 0, NULL); + if (spank_init(NULL) < 0) + fatal("Plug-in initialization failed"); + script_name = process_options_first_pass(argc, argv); /* reinit log with new verbosity (if changed by command line) */ if (opt.verbose || opt.quiet) { @@ -115,14 +121,31 @@ int main(int argc, char *argv[]) error(msg); sleep (++retries); } - + _call_spank_local_user(desc, resp); info("Submitted batch job %d", resp->job_id); xfree(desc.script); slurm_free_submit_response_response_msg(resp); - + spank_fini(NULL); return 0; } +static void _call_spank_local_user(job_desc_msg_t desc, + submit_response_msg_t *resp) +{ + struct spank_launcher_job_info info[1]; + + info->uid = desc.user_id; + info->gid = desc.group_id; + info->jobid = resp->job_id; + info->stepid = SLURM_BATCH_SCRIPT; + info->step_layout = NULL; + info->argc = desc.argc; + info->argv = desc.argv; + + if (spank_local_user(info) < 0) + error("spank_local_user: %m"); +} + /* Returns 0 on success, -1 on failure */ static int fill_job_desc_from_opts(job_desc_msg_t *desc) { @@ -140,12 +163,21 @@ static int fill_job_desc_from_opts(job_desc_msg_t *desc) desc->req_nodes = opt.nodelist; desc->exc_nodes = opt.exc_nodes; desc->partition = opt.partition; - desc->min_nodes = opt.min_nodes; + if (opt.min_nodes) + desc->min_nodes = opt.min_nodes; + if (opt.licenses) + desc->licenses = xstrdup(opt.licenses); if (opt.max_nodes) desc->max_nodes = opt.max_nodes; + if (opt.ntasks_per_node) + desc->ntasks_per_node = opt.ntasks_per_node; desc->user_id = opt.uid; desc->group_id = opt.gid; - desc->dependency = opt.dependency; + if (opt.dependency) + desc->dependency = xstrdup(opt.dependency); + desc->task_dist = opt.distribution; + if (opt.plane_size != NO_VAL) + desc->plane_size = opt.plane_size; if (opt.nice) desc->nice = NICE_OFFSET + opt.nice; desc->mail_type = opt.mail_type; @@ -182,6 +214,7 @@ static int fill_job_desc_from_opts(job_desc_msg_t *desc) if (opt.ramdiskimage) desc->ramdiskimage = xstrdup(opt.ramdiskimage); + /* job constraints */ if (opt.mincpus > -1) desc->job_min_procs = opt.mincpus; if (opt.minsockets > -1) @@ -195,16 +228,33 @@ static int fill_job_desc_from_opts(job_desc_msg_t *desc) if (opt.tmpdisk > -1) desc->job_min_tmp_disk = opt.tmpdisk; if (opt.overcommit) { - desc->num_procs = opt.min_nodes; + desc->num_procs = MAX(opt.min_nodes, 1); desc->overcommit = opt.overcommit; } else desc->num_procs = opt.nprocs * opt.cpus_per_task; - if (opt.tasks_per_node > -1) - desc->ntasks_per_node = opt.tasks_per_node; if (opt.nprocs_set) desc->num_tasks = opt.nprocs; if (opt.cpus_set) desc->cpus_per_task = opt.cpus_per_task; + if (opt.ntasks_per_socket > -1) + desc->ntasks_per_socket = opt.ntasks_per_socket; + if (opt.ntasks_per_core > -1) + desc->ntasks_per_core = opt.ntasks_per_core; + + /* node constraints */ + if (opt.min_sockets_per_node > -1) + desc->min_sockets = opt.min_sockets_per_node; + if (opt.max_sockets_per_node > -1) + desc->max_sockets = opt.max_sockets_per_node; + if (opt.min_cores_per_socket > -1) + desc->min_cores = opt.min_cores_per_socket; + if (opt.max_cores_per_socket > -1) + desc->max_cores = opt.max_cores_per_socket; + if (opt.min_threads_per_core > -1) + desc->min_threads = opt.min_threads_per_core; + if (opt.max_threads_per_core > -1) + desc->max_threads = opt.max_threads_per_core; + if (opt.no_kill) desc->kill_on_node_fail = 0; if (opt.time_limit != NO_VAL) @@ -232,7 +282,12 @@ static int fill_job_desc_from_opts(job_desc_msg_t *desc) desc->in = opt.ifname; desc->out = opt.ofname; desc->work_dir = opt.cwd; - desc->no_requeue = opt.no_requeue; + if (opt.requeue != NO_VAL) + desc->requeue = opt.requeue; + if (opt.open_mode) + desc->open_mode = opt.open_mode; + if (opt.acctg_freq >= 0) + desc->acctg_freq = opt.acctg_freq; return 0; } diff --git a/src/sbcast/Makefile.in b/src/sbcast/Makefile.in index d365b134e..2dc5fa5ce 100644 --- a/src/sbcast/Makefile.in +++ b/src/sbcast/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -48,6 +48,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -77,7 +79,7 @@ sbcast_DEPENDENCIES = $(top_builddir)/src/api/libslurmhelper.la sbcast_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(sbcast_LDFLAGS) \ $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -118,6 +120,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -131,10 +134,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -154,7 +160,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -165,6 +174,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -180,6 +191,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -195,6 +207,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -299,8 +312,8 @@ install-binPROGRAMS: $(bin_PROGRAMS) || test -f $$p1 \ ; then \ f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \ - echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) --mode=install $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ - $(INSTALL_PROGRAM_ENV) $(LIBTOOL) --mode=install $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ + echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ + $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ else :; fi; \ done @@ -364,8 +377,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -377,8 +390,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -388,13 +401,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/sbcast/agent.c b/src/sbcast/agent.c index 00bfad469..7b69d71ef 100644 --- a/src/sbcast/agent.c +++ b/src/sbcast/agent.c @@ -5,7 +5,7 @@ * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/sbcast/opts.c b/src/sbcast/opts.c index d23c068b0..d0b452584 100644 --- a/src/sbcast/opts.c +++ b/src/sbcast/opts.c @@ -5,7 +5,7 @@ * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/sbcast/sbcast.c b/src/sbcast/sbcast.c index 636641107..07f6e6ac2 100644 --- a/src/sbcast/sbcast.c +++ b/src/sbcast/sbcast.c @@ -5,7 +5,7 @@ * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -177,14 +177,15 @@ static ssize_t _get_block(char *buffer, size_t buf_size) /* read and broadcast the file */ static void _bcast_file(void) { - int buf_size, i; - ssize_t size_block, size_read = 0; + int buf_size; + ssize_t size_read = 0; file_bcast_msg_t bcast_msg; - char *buffer[FILE_BLOCKS]; + char *buffer; - /* NOTE: packmem() uses 16 bits to express a block size, - * buf_size must be no larger than 64k - 1 */ - buf_size = MIN((63 * 1024), f_stat.st_size); + if (params.block_size) + buf_size = MIN(params.block_size, f_stat.st_size); + else + buf_size = MIN((512 * 1024), f_stat.st_size); bcast_msg.fname = params.dst_fname; bcast_msg.block_no = 1; @@ -193,11 +194,9 @@ static void _bcast_file(void) bcast_msg.modes = f_stat.st_mode; bcast_msg.uid = f_stat.st_uid; bcast_msg.gid = f_stat.st_gid; - for (i=0; i<FILE_BLOCKS; i++) { - buffer[i] = xmalloc(buf_size); - bcast_msg.block[i] = buffer[i]; - bcast_msg.block_len[i] = 0; - } + buffer = xmalloc(buf_size); + bcast_msg.block = buffer; + bcast_msg.block_len = 0; if (params.preserve) { bcast_msg.atime = f_stat.st_atime; @@ -208,29 +207,18 @@ static void _bcast_file(void) } while (1) { - size_block = 0; - for (i=0; i<FILE_BLOCKS; i++) { - bcast_msg.block_len[i] = - _get_block(buffer[i], buf_size); - debug("block %d, size %u", (bcast_msg.block_no + i), - bcast_msg.block_len[i]); - size_read += bcast_msg.block_len[i]; - if (size_read >= f_stat.st_size) - bcast_msg.last_block = 1; - size_block += bcast_msg.block_len[i]; - if (params.block_size - && (size_block >= params.block_size)) { - for (i++; i<FILE_BLOCKS; i++) - bcast_msg.block_len[i] = 0; - break; - } - } + bcast_msg.block_len = _get_block(buffer, buf_size); + debug("block %d, size %u", bcast_msg.block_no, + bcast_msg.block_len); + size_read += bcast_msg.block_len; + if (size_read >= f_stat.st_size) + bcast_msg.last_block = 1; + send_rpc(&bcast_msg, alloc_resp); if (bcast_msg.last_block) break; /* end of file */ - bcast_msg.block_no += FILE_BLOCKS; + bcast_msg.block_no++; } - for (i=0; i<FILE_BLOCKS; i++) - xfree(buffer[i]); + xfree(buffer); } diff --git a/src/sbcast/sbcast.h b/src/sbcast/sbcast.h index 9a14292be..3560c13f4 100644 --- a/src/sbcast/sbcast.h +++ b/src/sbcast/sbcast.h @@ -5,7 +5,7 @@ * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/scancel/Makefile.in b/src/scancel/Makefile.in index f1825207c..66ffc8f21 100644 --- a/src/scancel/Makefile.in +++ b/src/scancel/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -47,6 +47,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -76,7 +78,7 @@ scancel_DEPENDENCIES = $(top_builddir)/src/api/libslurmhelper.la scancel_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(scancel_LDFLAGS) \ $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -117,6 +119,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -130,10 +133,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -153,7 +159,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -164,6 +173,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -179,6 +190,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -194,6 +206,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -298,8 +311,8 @@ install-binPROGRAMS: $(bin_PROGRAMS) || test -f $$p1 \ ; then \ f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \ - echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) --mode=install $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ - $(INSTALL_PROGRAM_ENV) $(LIBTOOL) --mode=install $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ + echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ + $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ else :; fi; \ done @@ -362,8 +375,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -375,8 +388,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -386,13 +399,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/scancel/opt.c b/src/scancel/opt.c index 93c9409f2..0916bf4d0 100644 --- a/src/scancel/opt.c +++ b/src/scancel/opt.c @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <grondona1@llnl.gov>, et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/scancel/scancel.c b/src/scancel/scancel.c index b13b88166..10fb82484 100644 --- a/src/scancel/scancel.c +++ b/src/scancel/scancel.c @@ -4,7 +4,7 @@ * Copyright (C) 2002-2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -229,17 +229,20 @@ static void _cancel_job_id (uint32_t job_id, uint16_t sig) { int error_code = SLURM_SUCCESS, i; + bool sig_set = true; - if (sig == (uint16_t)-1) + if (sig == (uint16_t)-1) { sig = SIGKILL; + sig_set = false; + } for (i=0; i<MAX_CANCEL_RETRY; i++) { - if (sig == SIGKILL) + if (!sig_set) verbose("Terminating job %u", job_id); else verbose("Signal %u to job %u", sig, job_id); - if ((sig == SIGKILL) || opt.ctld) { + if ((!sig_set) || opt.ctld) { error_code = slurm_kill_job (job_id, sig, (uint16_t)opt.batch); } else { diff --git a/src/scancel/scancel.h b/src/scancel/scancel.h index f3a020c9d..333b78b85 100644 --- a/src/scancel/scancel.h +++ b/src/scancel/scancel.h @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette<jette1@llnl.gov>, et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/scontrol/Makefile.in b/src/scontrol/Makefile.in index 267cab5a8..a872200ea 100644 --- a/src/scontrol/Makefile.in +++ b/src/scontrol/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -45,6 +45,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -77,7 +79,7 @@ scontrol_DEPENDENCIES = $(convenience_libs) $(am__DEPENDENCIES_1) scontrol_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(scontrol_LDFLAGS) \ $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -117,6 +119,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -130,10 +133,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -153,7 +159,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -164,6 +173,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -179,6 +190,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -194,6 +206,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -310,8 +323,8 @@ install-binPROGRAMS: $(bin_PROGRAMS) || test -f $$p1 \ ; then \ f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \ - echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) --mode=install $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ - $(INSTALL_PROGRAM_ENV) $(LIBTOOL) --mode=install $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ + echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ + $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ else :; fi; \ done @@ -379,8 +392,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -392,8 +405,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -403,13 +416,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/scontrol/info_job.c b/src/scontrol/info_job.c index fc99620c9..488ae8b68 100644 --- a/src/scontrol/info_job.c +++ b/src/scontrol/info_job.c @@ -4,7 +4,7 @@ * Copyright (C) 2002-2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/scontrol/info_node.c b/src/scontrol/info_node.c index f4706c571..78acfbc05 100644 --- a/src/scontrol/info_node.c +++ b/src/scontrol/info_node.c @@ -4,7 +4,7 @@ * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/scontrol/info_part.c b/src/scontrol/info_part.c index baa6c4454..8c9e06069 100644 --- a/src/scontrol/info_part.c +++ b/src/scontrol/info_part.c @@ -4,7 +4,7 @@ * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/scontrol/scontrol.c b/src/scontrol/scontrol.c index 86fc0095c..5ffd7091c 100644 --- a/src/scontrol/scontrol.c +++ b/src/scontrol/scontrol.c @@ -5,7 +5,7 @@ * Copyright (C) 2002-2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -572,7 +572,7 @@ _process_command (int argc, char *argv[]) argv[0]); } else { - error_code =scontrol_checkpoint(argv[1], argv[2]); + error_code = scontrol_checkpoint(argv[1], argv[2]); if (error_code) { exit_code = 1; if (quiet_flag != 1) @@ -594,7 +594,7 @@ _process_command (int argc, char *argv[]) "too few arguments for keyword:%s\n", argv[0]); } else { - error_code =scontrol_requeue(argv[1]); + error_code = scontrol_requeue(argv[1]); if (error_code) { exit_code = 1; if (quiet_flag != 1) @@ -627,6 +627,51 @@ _process_command (int argc, char *argv[]) } } } + else if (strncasecmp (argv[0], "setdebug", 4) == 0) { + if (argc > 2) { + exit_code = 1; + if (quiet_flag != 1) + fprintf(stderr, "too many arguments for keyword:%s\n", + argv[0]); + } else if (argc < 2) { + exit_code = 1; + if (quiet_flag != 1) + fprintf(stderr, "too few arguments for keyword:%s\n", + argv[0]); + } else { + int level = -1; + char *endptr; + char *levels[] = { + "quiet", "fatal", "error", "info", "verbose", + "debug", "debug2", "debug3", "debug4", "debug5", NULL}; + int index = 0; + while (levels[index]) { + if (strcasecmp(argv[1], levels[index]) == 0) { + level = index; + break; + } + index ++; + } + if (level == -1) { + level = (int)strtoul (argv[1], &endptr, 10); /* effective levels: 0 - 9 */ + if (*endptr != '\0' || level > 9) { + level = -1; + exit_code = 1; + if (quiet_flag != 1) + fprintf(stderr, "invalid debug level: %s\n", + argv[1]); + } + } + if (level != -1) { + error_code = slurm_set_debug_level(level); + if (error_code) { + exit_code = 1; + if (quiet_flag != 1) + slurm_perror ("slurm_set_debug_level error"); + } + } + } + } else if (strncasecmp (argv[0], "show", 3) == 0) { if (argc > 3) { exit_code = 1; @@ -773,6 +818,17 @@ _process_command (int argc, char *argv[]) argc <= 2 ? NULL : argv[2]); } } + else if (strncasecmp (argv[0], "notify", 6) == 0) { + if (argc < 3) { + exit_code = 1; + fprintf (stderr, + "too few arguments for keyword:%s\n", + argv[0]); + } else if (scontrol_job_notify(argc-1, &argv[1])) { + exit_code = 1; + slurm_perror("job notify failure"); + } + } else { exit_code = 1; fprintf (stderr, "invalid keyword: %s\n", argv[0]); @@ -996,6 +1052,7 @@ scontrol [<OPTION>] [<COMMAND>] \n\ scontrol is ran on, and only for those \n\ processes spawned by SLURM and their \n\ descendants) \n\ + notify <job_id> msg send message to specified job \n\ oneliner report output one record per line. \n\ pidinfo <pid> return slurm job information for given pid. \n\ ping print status of slurmctld daemons. \n\ @@ -1003,11 +1060,13 @@ scontrol [<OPTION>] [<COMMAND>] \n\ quit terminate this command. \n\ reconfigure re-read configuration files. \n\ requeue <job_id> re-queue a batch job \n\ + setdebug <LEVEL> reset slurmctld debug level \n\ show <ENTITY> [<ID>] display state of identified entity, default \n\ is all records. \n\ shutdown shutdown slurm controller. \n\ suspend <job_id> susend specified job \n\ resume <job_id> resume previously suspended job \n\ + setdebug <level> set slurmctld debug level \n\ update <SPECIFICATIONS> update job, node, partition, or bluegene \n\ block/subbp configuration \n\ verbose enable detailed logging. \n\ @@ -1025,6 +1084,10 @@ scontrol [<OPTION>] [<COMMAND>] \n\ absolute pathname of a file (with leading '/' containing host names \n\ either separated by commas or new-lines \n\ \n\ + <LEVEL> may be an integer value like SlurmctldDebug in the slurm.conf \n\ + file or the name of the most detailed errors to report (e.g. \"info\",\n\ + \"verbose\", \"debug\", \"debug2\", etc.). \n\ + \n\ Node names may be specified using simple range expressions, \n\ (e.g. \"lx[10-20]\" corresponsds to lx10, lx11, lx12, ...) \n\ The job step id is the job id followed by a period and the step id. \n\ diff --git a/src/scontrol/scontrol.h b/src/scontrol/scontrol.h index f943382c3..3fd1d2897 100644 --- a/src/scontrol/scontrol.h +++ b/src/scontrol/scontrol.h @@ -4,7 +4,7 @@ * Copyright (C) 2002-2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -99,6 +99,7 @@ extern int quiet_flag; /* quiet=1, verbose=-1, normal=0 */ extern int scontrol_checkpoint(char *op, char *job_step_id_str); extern int scontrol_encode_hostlist(char *hostlist); +extern int scontrol_job_notify(int argc, char *argv[]); extern int scontrol_load_jobs (job_info_msg_t ** job_buffer_pptr); extern int scontrol_load_nodes (node_info_msg_t ** node_buffer_pptr, uint16_t show_flags); diff --git a/src/scontrol/update_job.c b/src/scontrol/update_job.c index 281c27b1a..49ae2df8a 100644 --- a/src/scontrol/update_job.c +++ b/src/scontrol/update_job.c @@ -1,10 +1,11 @@ /*****************************************************************************\ * update_job.c - update job functions for scontrol. ***************************************************************************** - * Copyright (C) 2002-2006 The Regents of the University of California. + * Copyright (C) 2002-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -214,13 +215,13 @@ scontrol_update_job (int argc, char *argv[]) update_cnt++; } else if (strncasecmp(argv[i], "TimeLimit=", 10) == 0) { - if ((strcasecmp(&argv[i][10], "UNLIMITED") == 0) || - (strcasecmp(&argv[i][10], "INFINITE") == 0)) - job_msg.time_limit = INFINITE; - else - job_msg.time_limit = - (uint32_t) strtol(&argv[i][10], - (char **) NULL, 10); + int time_limit = time_str2mins(&argv[i][10]); + if ((time_limit < 0) && (time_limit != INFINITE)) { + error("Invalid TimeLimit value"); + exit_code = 1; + return 0; + } + job_msg.time_limit = time_limit; update_cnt++; } else if (strncasecmp(argv[i], "Priority=", 9) == 0) { @@ -251,6 +252,12 @@ scontrol_update_job (int argc, char *argv[]) (char **) NULL, 10); update_cnt++; } + else if (strncasecmp(argv[i], "Requeue=", 8) == 0) { + job_msg.requeue = + (uint16_t) strtol(&argv[i][8], + (char **) NULL, 10); + update_cnt++; + } else if ((strncasecmp(argv[i], "MinNodes=", 9) == 0) || (strncasecmp(argv[i], "ReqNodes=", 9) == 0)) { char *tmp; @@ -376,9 +383,7 @@ scontrol_update_job (int argc, char *argv[]) update_cnt++; } else if (strncasecmp(argv[i], "Dependency=", 11) == 0) { - job_msg.dependency = - (uint32_t) strtol(&argv[i][11], - (char **) NULL, 10); + job_msg.dependency = &argv[i][11]; update_cnt++; } #ifdef HAVE_BG @@ -435,6 +440,10 @@ scontrol_update_job (int argc, char *argv[]) update_cnt++; } #endif + else if (strncasecmp(argv[i], "Licenses=", 9) == 0) { + job_msg.licenses = &argv[i][9]; + update_cnt++; + } else if (strncasecmp(argv[i], "StartTime=", 10) == 0) { job_msg.begin_time = parse_time(&argv[i][10]); update_cnt++; @@ -458,3 +467,35 @@ scontrol_update_job (int argc, char *argv[]) else return 0; } + +/* + * Send message to stdout of specified job + * argv[0] == jobid + * argv[1]++ the message + */ +extern int +scontrol_job_notify(int argc, char *argv[]) +{ + int i; + uint32_t job_id; + char message[256]; + + job_id = atoi(argv[0]); + if (job_id <= 0) { + fprintf(stderr, "Invalid job_id %s", argv[0]); + return 1; + } + + message[0] = '\0'; + for (i=1; i<argc; i++) { + if (i > 1) + strncat(message, " ", sizeof(message)); + strncat(message, argv[i], sizeof(message)); + } + + if (slurm_notify_job(job_id, message)) + return slurm_get_errno (); + else + return 0; +} + diff --git a/src/scontrol/update_node.c b/src/scontrol/update_node.c index fcd347790..2623cf5b3 100644 --- a/src/scontrol/update_node.c +++ b/src/scontrol/update_node.c @@ -1,10 +1,10 @@ /*****************************************************************************\ * update_node.c - node update function for scontrol. ***************************************************************************** - * Copyright (C) 2002-2006 The Regents of the University of California. + * Copyright (C) 2002-2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -103,6 +103,10 @@ scontrol_update_node (int argc, char *argv[]) node_msg.node_state = NODE_STATE_DRAIN; update_cnt++; } + else if (strncasecmp(argv[i], "State=FAIL", 10) == 0) { + node_msg.node_state = NODE_STATE_FAIL; + update_cnt++; + } else if (strncasecmp(argv[i], "State=RES", 9) == 0) { node_msg.node_state = NODE_RESUME; update_cnt++; @@ -121,7 +125,7 @@ scontrol_update_node (int argc, char *argv[]) argv[i]); fprintf (stderr, "Request aborted\n"); fprintf (stderr, "Valid states are: "); - fprintf (stderr, "NoResp DRAIN RESUME "); + fprintf (stderr, "NoResp DRAIN FAIL RESUME "); for (k = 0; k < NODE_STATE_END; k++) { fprintf (stderr, "%s ", node_state_string(k)); @@ -141,8 +145,9 @@ scontrol_update_node (int argc, char *argv[]) } } - if ((node_msg.node_state == NODE_STATE_DRAIN) && - (node_msg.reason == NULL)) { + if (((node_msg.node_state == NODE_STATE_DRAIN) + || (node_msg.node_state == NODE_STATE_FAIL)) + && (node_msg.reason == NULL)) { fprintf (stderr, "You must specify a reason when DRAINING a " "node\nRequest aborted\n"); goto done; diff --git a/src/scontrol/update_part.c b/src/scontrol/update_part.c index 4b21a8542..6ec952c92 100644 --- a/src/scontrol/update_part.c +++ b/src/scontrol/update_part.c @@ -4,7 +4,7 @@ * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -35,7 +35,8 @@ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. \*****************************************************************************/ -#include "scontrol.h" +#include "src/common/proc_args.h" +#include "src/scontrol/scontrol.h" /* @@ -49,7 +50,7 @@ extern int scontrol_update_part (int argc, char *argv[]) { - int i, update_cnt = 0; + int i, min, max, update_cnt = 0; update_part_msg_t part_msg; slurm_init_part_desc_msg ( &part_msg ); @@ -57,29 +58,32 @@ scontrol_update_part (int argc, char *argv[]) if (strncasecmp(argv[i], "PartitionName=", 14) == 0) part_msg.name = &argv[i][14]; else if (strncasecmp(argv[i], "MaxTime=", 8) == 0) { - if ((strcasecmp(&argv[i][8],"UNLIMITED") == 0) || - (strcasecmp(&argv[i][8],"INFINITE") == 0)) - part_msg.max_time = INFINITE; - else - part_msg.max_time = - (uint32_t) strtol(&argv[i][8], - (char **) NULL, 10); + int max_time = time_str2mins(&argv[i][8]); + if ((max_time < 0) && (max_time != INFINITE)) { + exit_code = 1; + error("Invalid input %s", argv[i]); + return 0; + } + part_msg.max_time = max_time; update_cnt++; } else if (strncasecmp(argv[i], "MaxNodes=", 9) == 0) { if ((strcasecmp(&argv[i][9],"UNLIMITED") == 0) || (strcasecmp(&argv[i][8],"INFINITE") == 0)) part_msg.max_nodes = (uint32_t) INFINITE; - else - part_msg.max_nodes = - (uint32_t) strtol(&argv[i][9], - (char **) NULL, 10); + else { + min = 1; + get_resource_arg_range(&argv[i][9], + "MaxNodes", &min, &max, true); + part_msg.max_nodes = min; + } update_cnt++; } else if (strncasecmp(argv[i], "MinNodes=", 9) == 0) { - part_msg.min_nodes = - (uint32_t) strtol(&argv[i][9], - (char **) NULL, 10); + min = 1; + get_resource_arg_range(&argv[i][9], + "MinNodes", &min, &max, true); + part_msg.min_nodes = min; update_cnt++; } else if (strncasecmp(argv[i], "Default=", 8) == 0) { @@ -89,10 +93,9 @@ scontrol_update_part (int argc, char *argv[]) part_msg.default_part = 1; else { exit_code = 1; - fprintf (stderr, "Invalid input: %s\n", - argv[i]); - fprintf (stderr, "Acceptable Default values " - "are YES and NO\n"); + error("Invalid input: %s", argv[i]); + error("Acceptable Default values " + "are YES and NO"); return 0; } update_cnt++; @@ -104,10 +107,9 @@ scontrol_update_part (int argc, char *argv[]) part_msg.hidden = 1; else { exit_code = 1; - fprintf (stderr, "Invalid input: %s\n", - argv[i]); - fprintf (stderr, "Acceptable Hidden values " - "are YES and NO\n"); + error("Invalid input: %s", argv[i]); + error("Acceptable Hidden values " + "are YES and NO"); return 0; } update_cnt++; @@ -119,33 +121,43 @@ scontrol_update_part (int argc, char *argv[]) part_msg.root_only = 1; else { exit_code = 1; - fprintf (stderr, "Invalid input: %s\n", - argv[i]); - fprintf (stderr, "Acceptable RootOnly values " - "are YES and NO\n"); + error("Invalid input: %s", argv[i]); + error("Acceptable RootOnly values " + "are YES and NO"); return 0; } update_cnt++; } else if (strncasecmp(argv[i], "Shared=", 7) == 0) { - if (strcasecmp(&argv[i][7], "NO") == 0) - part_msg.shared = SHARED_NO; - else if (strcasecmp(&argv[i][7], "YES") == 0) - part_msg.shared = SHARED_YES; - else if (strcasecmp(&argv[i][7], "EXCLUSIVE") == 0) - part_msg.shared = SHARED_EXCLUSIVE; - else if (strcasecmp(&argv[i][7], "FORCE") == 0) - part_msg.shared = SHARED_FORCE; - else { + if (strncasecmp(&argv[i][7], "NO", 2) == 0) { + part_msg.max_share = 1; + } else if (strncasecmp(&argv[i][7], "EXCLUSIVE", 9) == 0) { + part_msg.max_share = 0; + } else if (strncasecmp(&argv[i][7], "YES:", 4) == 0) { + part_msg.max_share = (uint16_t) strtol(&argv[i][11], + (char **) NULL, 10); + } else if (strncasecmp(&argv[i][7], "YES", 3) == 0) { + part_msg.max_share = (uint16_t) 4; + } else if (strncasecmp(&argv[i][7], "FORCE:", 6) == 0) { + part_msg.max_share = (uint16_t) strtol(&argv[i][13], + (char **) NULL, 10) | SHARED_FORCE; + } else if (strncasecmp(&argv[i][7], "FORCE", 5) == 0) { + part_msg.max_share = (uint16_t) 4 | + SHARED_FORCE; + } else { exit_code = 1; - fprintf (stderr, "Invalid input: %s\n", - argv[i]); - fprintf (stderr, "Acceptable Shared values " - "are YES, NO and FORCE\n"); + error("Invalid input: %s", argv[i]); + error("Acceptable Shared values are " + "NO, EXCLUSIVE, YES:#, and FORCE:#"); return 0; } update_cnt++; } + else if (strncasecmp(argv[i], "Priority=", 9) == 0) { + part_msg.priority = (uint16_t) strtol(&argv[i][9], + (char **) NULL, 10); + update_cnt++; + } else if (strncasecmp(argv[i], "State=", 6) == 0) { if (strcasecmp(&argv[i][6], "DOWN") == 0) part_msg.state_up = 0; @@ -153,10 +165,9 @@ scontrol_update_part (int argc, char *argv[]) part_msg.state_up = 1; else { exit_code = 1; - fprintf (stderr, "Invalid input: %s\n", - argv[i]); - fprintf (stderr, "Acceptable State values " - "are UP and DOWN\n"); + error("Invalid input: %s", argv[i]); + error("Acceptable State values " + "are UP and DOWN"); return 0; } update_cnt++; @@ -171,15 +182,15 @@ scontrol_update_part (int argc, char *argv[]) } else { exit_code = 1; - fprintf (stderr, "Invalid input: %s\n", argv[i]); - fprintf (stderr, "Request aborted\n"); + error("Invalid input: %s", argv[i]); + error("Request aborted"); return 0; } } if (update_cnt == 0) { exit_code = 1; - fprintf (stderr, "No changes specified\n"); + error("No changes specified"); return 0; } diff --git a/src/sinfo/Makefile.in b/src/sinfo/Makefile.in index 6e9edb758..07e64aec7 100644 --- a/src/sinfo/Makefile.in +++ b/src/sinfo/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -48,6 +48,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -78,7 +80,7 @@ sinfo_DEPENDENCIES = $(top_builddir)/src/api/libslurmhelper.la sinfo_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(sinfo_LDFLAGS) \ $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -119,6 +121,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -132,10 +135,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -155,7 +161,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -166,6 +175,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -181,6 +192,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -196,6 +208,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -300,8 +313,8 @@ install-binPROGRAMS: $(bin_PROGRAMS) || test -f $$p1 \ ; then \ f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \ - echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) --mode=install $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ - $(INSTALL_PROGRAM_ENV) $(LIBTOOL) --mode=install $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ + echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ + $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ else :; fi; \ done @@ -366,8 +379,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -379,8 +392,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -390,13 +403,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/sinfo/opts.c b/src/sinfo/opts.c index a01a349d4..1313dfbb6 100644 --- a/src/sinfo/opts.c +++ b/src/sinfo/opts.c @@ -4,7 +4,7 @@ * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Joey Ekstrom <ekstrom1@llnl.gov>, Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -422,7 +422,8 @@ _parse_format( char* format ) { int field_size; bool right_justify; - char *prefix, *suffix, *token, *tmp_char, *tmp_format; + char *prefix = NULL, *suffix = NULL, *token = NULL, + *tmp_char = NULL, *tmp_format = NULL; char field[1]; if (format == NULL) { @@ -517,6 +518,12 @@ _parse_format( char* format ) field_size, right_justify, suffix ); + } else if (field[0] == 'p') { + params.match_flags.priority_flag = true; + format_add_priority( params.format_list, + field_size, + right_justify, + suffix ); } else if (field[0] == 'P') { params.match_flags.partition_flag = true; format_add_partition( params.format_list, @@ -692,6 +699,8 @@ void _print_options( void ) "true" : "false"); printf("partition_flag = %s\n", params.match_flags.partition_flag ? "true" : "false"); + printf("priority_flag = %s\n", params.match_flags.priority_flag ? + "true" : "false"); printf("reason_flag = %s\n", params.match_flags.reason_flag ? "true" : "false"); printf("root_flag = %s\n", params.match_flags.root_flag ? diff --git a/src/sinfo/print.c b/src/sinfo/print.c index 6e4704c24..0cd867ebf 100644 --- a/src/sinfo/print.c +++ b/src/sinfo/print.c @@ -1,11 +1,11 @@ /*****************************************************************************\ * print.c - sinfo print job functions ***************************************************************************** - * Copyright (C) 2002-2006 The Regents of the University of California. + * Copyright (C) 2002-2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Joey Ekstrom <ekstrom1@llnl.gov> and * Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -170,7 +170,8 @@ static int _print_secs(long time, int width, bool right, bool cut_output) } static int -_build_min_max_16_string(char *buffer, int buf_size, uint16_t min, uint16_t max, bool range) +_build_min_max_16_string(char *buffer, int buf_size, uint16_t min, uint16_t max, + bool range) { char tmp_min[8]; char tmp_max[8]; @@ -191,7 +192,8 @@ _build_min_max_16_string(char *buffer, int buf_size, uint16_t min, uint16_t max, } static int -_build_min_max_32_string(char *buffer, int buf_size, uint32_t min, uint32_t max, bool range) +_build_min_max_32_string(char *buffer, int buf_size, uint32_t min, uint32_t max, + bool range) { char tmp_min[8]; char tmp_max[8]; @@ -631,6 +633,24 @@ int _print_prefix(sinfo_data_t * job, int width, bool right_justify, return SLURM_SUCCESS; } +int _print_priority(sinfo_data_t * sinfo_data, int width, + bool right_justify, char *suffix) +{ + char id[FORMAT_STRING_SIZE]; + + if (sinfo_data) { + _build_min_max_16_string(id, FORMAT_STRING_SIZE, + sinfo_data->part_info->priority, + sinfo_data->part_info->priority, true); + _print_str(id, width, right_justify, true); + } else + _print_str("PRIORITY", width, right_justify, true); + + if (suffix) + printf("%s", suffix); + return SLURM_SUCCESS; +} + int _print_reason(sinfo_data_t * sinfo_data, int width, bool right_justify, char *suffix) { @@ -668,15 +688,20 @@ int _print_root(sinfo_data_t * sinfo_data, int width, int _print_share(sinfo_data_t * sinfo_data, int width, bool right_justify, char *suffix) { + char id[FORMAT_STRING_SIZE]; + if (sinfo_data) { - if (sinfo_data->part_info == NULL) - _print_str("n/a", width, right_justify, true); - else if (sinfo_data->part_info->shared > 1) - _print_str("force", width, right_justify, true); - else if (sinfo_data->part_info->shared) - _print_str("yes", width, right_justify, true); + bool force = sinfo_data->part_info->max_share & SHARED_FORCE; + uint16_t val = sinfo_data->part_info->max_share & (~SHARED_FORCE); + if (val == 0) + snprintf(id, sizeof(id), "EXCLUSIVE"); + else if (force) + snprintf(id, sizeof(id), "FORCE:%u", val); + else if (val == 1) + snprintf(id, sizeof(id), "NO"); else - _print_str("no", width, right_justify, true); + snprintf(id, sizeof(id), "YES:%u", val); + _print_str(id, width, right_justify, true); } else _print_str("SHARE", width, right_justify, true); diff --git a/src/sinfo/print.h b/src/sinfo/print.h index 989f25b79..a7d01c8d5 100644 --- a/src/sinfo/print.h +++ b/src/sinfo/print.h @@ -4,7 +4,7 @@ * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Joey Ekstrom <ekstrom1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -100,6 +100,8 @@ int print_sinfo_list(List sinfo_list); format_add_function(list,wid,right,suffix,_print_partition) #define format_add_prefix(list,wid,right,suffix) \ format_add_function(list,wid,right,suffix,_print_prefix) +#define format_add_priority(list,wid,right,suffix) \ + format_add_function(list,wid,right,suffix,_print_priority) #define format_add_reason(list,wid,right,suffix) \ format_add_function(list,wid,right,suffix,_print_reason) #define format_add_root(list,wid,right,prefix) \ @@ -155,6 +157,8 @@ int _print_partition(sinfo_data_t * sinfo_data, int width, bool right_justify, char *suffix); int _print_prefix(sinfo_data_t * sinfo_data, int width, bool right_justify, char *suffix); +int _print_priority(sinfo_data_t * sinfo_data, int width, + bool right_justify, char *suffix); int _print_reason(sinfo_data_t * sinfo_data, int width, bool right_justify, char *suffix); int _print_root(sinfo_data_t * sinfo_data, int width, diff --git a/src/sinfo/sinfo.c b/src/sinfo/sinfo.c index 02df66be0..42239d1ad 100644 --- a/src/sinfo/sinfo.c +++ b/src/sinfo/sinfo.c @@ -1,12 +1,12 @@ /*****************************************************************************\ * sinfo.c - Report overall state the system * - * $Id: sinfo.c 13835 2008-04-09 18:57:18Z jette $ + * $Id: sinfo.c 13929 2008-04-23 16:11:29Z jette $ ***************************************************************************** - * Copyright (C) 2002-2006 The Regents of the University of California. + * Copyright (C) 2002-2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Joey Ekstrom <ekstrom1@llnl.gov>, Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -552,9 +552,13 @@ static bool _match_node_data(sinfo_data_t *sinfo_ptr, (_strcmp(node_ptr->reason, sinfo_ptr->reason))) return false; - if (params.match_flags.state_flag && - (node_ptr->node_state != sinfo_ptr->node_state)) - return false; + if (params.match_flags.state_flag) { + char *state1, *state2; + state1 = node_state_string(node_ptr->node_state); + state2 = node_state_string(sinfo_ptr->node_state); + if (strcmp(state1, state2)) + return false; + } /* If no need to exactly match sizes, just return here * otherwise check cpus, disk, memory and weigth individually */ @@ -629,7 +633,11 @@ static bool _match_part_data(sinfo_data_t *sinfo_ptr, return false; if (params.match_flags.share_flag && - (part_ptr->shared != sinfo_ptr->part_info->shared)) + (part_ptr->max_share != sinfo_ptr->part_info->max_share)) + return false; + + if (params.match_flags.priority_flag && + (part_ptr->priority != sinfo_ptr->part_info->priority)) return false; return true; diff --git a/src/sinfo/sinfo.h b/src/sinfo/sinfo.h index cae8bf086..fbae0bded 100644 --- a/src/sinfo/sinfo.h +++ b/src/sinfo/sinfo.h @@ -1,12 +1,12 @@ /****************************************************************************\ * sinfo.h - definitions used for sinfo data functions * - * $Id: sinfo.h 10732 2007-01-10 18:39:14Z da $ + * $Id: sinfo.h 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Joey Ekstrom <ekstrom1@llnl.gov>, Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -105,7 +105,7 @@ typedef struct { hostlist_t ionodes; #endif /* part_info contains partition, avail, max_time, job_size, - * root, share, groups */ + * root, share, groups, priority */ partition_info_t* part_info; uint16_t part_inx; } sinfo_data_t; @@ -126,6 +126,7 @@ struct sinfo_match_flags { bool max_time_flag; bool memory_flag; bool partition_flag; + bool priority_flag; bool reason_flag; bool root_flag; bool share_flag; diff --git a/src/sinfo/sort.c b/src/sinfo/sort.c index 08303757f..d49b81a2a 100644 --- a/src/sinfo/sort.c +++ b/src/sinfo/sort.c @@ -5,7 +5,7 @@ * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Joey Ekstrom <ekstrom1@llnl.gov>, * Morris Jette <jette1@llnl.gov>, et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -63,6 +63,7 @@ static int _sort_by_node_list(void *void1, void *void2); static int _sort_by_nodes_ai(void *void1, void *void2); static int _sort_by_nodes(void *void1, void *void2); static int _sort_by_partition(void *void1, void *void2); +static int _sort_by_priority(void *void1, void *void2); static int _sort_by_reason(void *void1, void *void2); static int _sort_by_reason_time(void *void1, void *void2); static int _sort_by_root(void *void1, void *void2); @@ -122,6 +123,8 @@ void sort_sinfo_list(List sinfo_list) list_sort(sinfo_list, _sort_by_memory); else if (params.sort[i] == 'N') list_sort(sinfo_list, _sort_by_node_list); + else if (params.sort[i] == 'p') + list_sort(sinfo_list, _sort_by_priority); else if (params.sort[i] == 'P') list_sort(sinfo_list, _sort_by_partition); else if (params.sort[i] == 'r') @@ -527,9 +530,27 @@ static int _sort_by_share(void *void1, void *void2) int val1 = 0, val2 = 0; if (sinfo1->part_info) - val1 = sinfo1->part_info->shared; + val1 = sinfo1->part_info->max_share; if (sinfo2->part_info) - val2 = sinfo2->part_info->shared; + val2 = sinfo2->part_info->max_share; + diff = val1 - val2; + + if (reverse_order) + diff = -diff; + return diff; +} + +static int _sort_by_priority(void *void1, void *void2) +{ + int diff; + sinfo_data_t *sinfo1 = (sinfo_data_t *) void1; + sinfo_data_t *sinfo2 = (sinfo_data_t *) void2; + int val1 = 0, val2 = 0; + + if (sinfo1->part_info) + val1 = sinfo1->part_info->priority; + if (sinfo2->part_info) + val2 = sinfo2->part_info->priority; diff = val1 - val2; if (reverse_order) diff --git a/src/slaunch/Makefile.am b/src/slaunch/Makefile.am deleted file mode 100644 index 309c40008..000000000 --- a/src/slaunch/Makefile.am +++ /dev/null @@ -1,47 +0,0 @@ -# - -AUTOMAKE_OPTIONS = foreign - -INCLUDES = -I$(top_srcdir) - -bin_PROGRAMS = slaunch - -slaunch_SOURCES = \ - slaunch.c slaunch.h \ - opt.c opt.h \ - attach.h \ - attach.c \ - fname.c \ - fname.h \ - sigstr.c \ - sigstr.h \ - core-format.c \ - core-format.h \ - multi_prog.c multi_prog.h \ - slaunch.wrapper.c - -convenience_libs = $(top_builddir)/src/api/libslurmhelper.la - -slaunch_LDADD = \ - $(convenience_libs) - -slaunch_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) $(FEDERATION_LDFLAGS) - -force: -$(convenience_libs) : force - @cd `dirname $@` && $(MAKE) `basename $@` - -# debugging information is required for symbols in the attach -# module so that a debugger can attach to spawned tasks -attach.o : attach.c - $(COMPILE) -c -g -o attach.o $(srcdir)/attach.c - -install-exec-local: - umask 022; \ - if [ -x /usr/lib/rpm/debugedit ]; then \ - srcdir=`cd $(top_srcdir) && pwd`; \ - /usr/lib/rpm/debugedit -b $$srcdir -d $(pkglibdir) \ - $(DESTDIR)$(bindir)/slaunch; fi; \ - mkdir -p -m 755 $(DESTDIR)$(pkglibdir)/src/slaunch - $(INSTALL) -m 644 $(top_srcdir)/src/slaunch/slaunch.wrapper.c \ - $(DESTDIR)$(pkglibdir)/src/slaunch/slaunch.wrapper.c diff --git a/src/slaunch/attach.c b/src/slaunch/attach.c deleted file mode 100644 index 0a602fbd4..000000000 --- a/src/slaunch/attach.c +++ /dev/null @@ -1,57 +0,0 @@ -/*****************************************************************************\ - * attach.c - Definitions needed for parallel debugger - * $Id: attach.c 10574 2006-12-15 23:38:29Z jette $ - ***************************************************************************** - * Copyright (C) 2002 The Regents of the University of California. - * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Mark Grondona <grondona1@llnl.gov>, et. al. - * UCRL-CODE-226842. - * - * This file is part of SLURM, a resource management program. - * For details, see <http://www.llnl.gov/linux/slurm/>. - * - * SLURM is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY - * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License along - * with SLURM; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -\*****************************************************************************/ - -#if HAVE_CONFIG_H -# include "config.h" -#endif - -#include "src/common/log.h" - -#include "src/slaunch/attach.h" - -/* - * Instantiate extern variables from attach.h - */ -MPIR_PROCDESC *MPIR_proctable; -int MPIR_proctable_size; -VOLATILE int MPIR_debug_state; -VOLATILE int MPIR_debug_gate; -int MPIR_being_debugged; -int MPIR_i_am_starter; -int MPIR_acquired_pre_main; -char *totalview_jobid; - -void MPIR_Breakpoint(void) -{ - /* - * This just notifies parallel debugger that some event of - * interest occurred. - */ - debug("In MPIR_Breakpoint"); -} - - diff --git a/src/slaunch/core-format.c b/src/slaunch/core-format.c deleted file mode 100644 index 6feea50ec..000000000 --- a/src/slaunch/core-format.c +++ /dev/null @@ -1,152 +0,0 @@ -/*****************************************************************************\ - * src/srun/core-format.c - Change corefile characteristics for job - * $Id: core-format.c 10574 2006-12-15 23:38:29Z jette $ - ***************************************************************************** - * Copyright (C) 2002 The Regents of the University of California. - * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. - * - * This file is part of SLURM, a resource management program. - * For details, see <http://www.llnl.gov/linux/slurm/>. - * - * SLURM is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY - * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License along - * with SLURM; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -\*****************************************************************************/ - -#if HAVE_CONFIG_H -# include "config.h" -#endif - -#include <stdio.h> -#include <unistd.h> -#include <string.h> -#include <stdlib.h> -#include <sys/types.h> -#include <sys/stat.h> - -//#include "src/common/env.h" -#include "src/slaunch/core-format.h" -#include "src/common/log.h" -#include "src/common/env.h" - -#define CORE_NORMAL 0 -#define CORE_LIGHT 1 /* Default lightweight corefile from liblwcf */ -#define CORE_LCB 2 /* PTOOLS Lightweight Corefile Browser (LCB) compliant*/ -#define CORE_LIST 3 /* List core format types to stdout and exit */ -#define LIB_LIGHT "liblwcf-preload.so" - -struct core_format_info { - core_format_t type; - const char *name; - const char *descr; -}; - -/* - * Supported types for core=%s - */ -struct core_format_info core_types[] = { - { CORE_NORMAL, - "normal", - "Default full corefile (do nothing)" - }, - { CORE_LIGHT, - "light", - "liblwcf default lightweight corefile format" - }, - { CORE_LCB, - "lcb", - "liblwcf Lightweight Corefile Browser compliant" - }, - { CORE_LIST, - "list", - "list valid core format types" - }, - { CORE_INVALID, - NULL, - "Invalid format" - } -}; - -static struct core_format_info * _find_core_format_info (const char *name) -{ - struct core_format_info *ci; - - for (ci = core_types; ci && ci->name != NULL; ci++) { - if ( strncasecmp (ci->name, name, strlen (ci->name)) == 0) - break; - } - - return (ci); -} - -static void _print_valid_core_types (void) -{ - struct core_format_info *ci; - - info ("Valid corefile format types:"); - for (ci = core_types; ci && ci->name != NULL; ci++) { - if ((ci->type == CORE_LIGHT) || - (ci->type == CORE_LCB)) { - struct stat buf; - if ((stat("/lib/" LIB_LIGHT, &buf) < 0) && - (stat("/usr/lib/" LIB_LIGHT, &buf) < 0) && - (stat("/usr/local/lib/" LIB_LIGHT, &buf) < 0)) - continue; - } - if (ci->type != CORE_LIST) - info (" %-8s -- %s", ci->name, ci->descr); - } - return; -} - -core_format_t core_format_type (const char *str) -{ - struct core_format_info *ci = _find_core_format_info (str); - - if (ci->type == CORE_LIST) { - _print_valid_core_types (); - exit (0); - } - - return (ci->type); -} - -const char * core_format_name (core_format_t type) -{ - struct core_format_info *ci; - - for (ci = core_types; ci && ci->name != NULL; ci++) { - if (ci->type == type) - break; - } - - return (ci->name); -} - -int core_format_enable (core_format_t type) -{ - switch (type) { - case CORE_NORMAL: case CORE_INVALID: - break; - case CORE_LCB: - setenvfs ("LWCF_CORE_FORMAT=LCB"); - case CORE_LIGHT: - setenvfs ("LD_PRELOAD=" LIB_LIGHT); - break; - } - - return (0); -} - diff --git a/src/slaunch/core-format.h b/src/slaunch/core-format.h deleted file mode 100644 index 9c98f0687..000000000 --- a/src/slaunch/core-format.h +++ /dev/null @@ -1,42 +0,0 @@ -/*****************************************************************************\ - * src/srun/core-format.h - Change corefile characteristics for job - * $Id: core-format.h 10574 2006-12-15 23:38:29Z jette $ - ***************************************************************************** - * Copyright (C) 2002 The Regents of the University of California. - * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. - * - * This file is part of SLURM, a resource management program. - * For details, see <http://www.llnl.gov/linux/slurm/>. - * - * SLURM is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY - * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License along - * with SLURM; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -\*****************************************************************************/ - -#ifndef _HAVE_SLAUNCH_CORE_FORMAT_H -#define _HAVE_SLAUNCH_CORE_FORMAT_H - -typedef int core_format_t; - -#define CORE_INVALID -1 -#define CORE_DEFAULT 0 - -core_format_t core_format_type (const char *type); - -const char * core_format_name (core_format_t type); - -int core_format_enable (core_format_t type); - -#endif /* !_HAVE_SLAUNCH_CORE_FORMAT_H */ diff --git a/src/slaunch/fname.c b/src/slaunch/fname.c deleted file mode 100644 index 7ecf5124f..000000000 --- a/src/slaunch/fname.c +++ /dev/null @@ -1,160 +0,0 @@ -/*****************************************************************************\ - * src/slaunch/fname.h - IO filename type implementation (slaunch specific) - ***************************************************************************** - * Copyright (C) 2002 The Regents of the University of California. - * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. - * - * This file is part of SLURM, a resource management program. - * For details, see <http://www.llnl.gov/linux/slurm/>. - * - * SLURM is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY - * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License along - * with SLURM; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -\*****************************************************************************/ - -#ifdef HAVE_CONFIG_H -# include "config.h" -#endif - -#include <stdio.h> -#include <stdarg.h> -#include <stdlib.h> -#include <string.h> -#include <ctype.h> - -#include "src/slaunch/fname.h" -#include "src/slaunch/opt.h" - -#include "src/common/xmalloc.h" -#include "src/common/xstring.h" -#include "src/common/xassert.h" - -/* - * Max zero-padding width allowed - */ -#define MAX_WIDTH 10 - - -/* - * Fill in as much of filename as possible from slaunch, update - * filename type to one of the io types ALL, NONE, PER_TASK, ONE - */ -fname_t * -fname_create(char *format, int jobid, int stepid) -{ - unsigned long int wid = 0; - unsigned long int taskid = 0; - fname_t *fname = NULL; - char *p, *q, *name; - - fname = xmalloc(sizeof(*fname)); - fname->type = IO_ALL; - fname->name = NULL; - fname->taskid = -1; - - /* Handle special cases - */ - - if ((format == NULL) - || (strncasecmp(format, "all", (size_t) 3) == 0) - || (strncmp(format, "-", (size_t) 1) == 0) ) { - /* "all" explicitly sets IO_ALL and is the default */ - return fname; - } - - if (strncasecmp(format, "none", (size_t) 4) == 0) { - fname->name = xstrdup ("/dev/null"); - return fname; - } - - taskid = strtoul(format, &p, 10); - if ((*p == '\0') && ((int) taskid < opt.num_tasks)) { - fname->type = IO_ONE; - fname->taskid = (uint32_t) taskid; - /* Set the name string to pass to slurmd - * to the taskid requested, so that tasks with - * no IO can open /dev/null. - */ - fname->name = xstrdup (format); - return fname; - } - - name = NULL; - q = p = format; - while (*p != '\0') { - if (*p == '%') { - if (isdigit(*(++p))) { - xmemcat(name, q, p - 1); - if ((wid = strtoul(p, &p, 10)) > MAX_WIDTH) - wid = MAX_WIDTH; - q = p - 1; - if (*p == '\0') - break; - } - - switch (*p) { - case 't': /* '%t' => taskid */ - error("\"%%t\" is ignored for local files"); - p++; - break; - case 'n': /* '%n' => nodeid */ - error("\"%%n\" is ignored for local files"); - p++; - break; - case 'N': /* '%N' => node name */ - error("\"%%N\" is ignored for local files"); - p++; - break; - - case 'J': /* '%J' => "jobid.stepid" */ - case 'j': /* '%j' => jobid */ - - xmemcat(name, q, p - 1); - xstrfmtcat(name, "%0*d", wid, jobid); - - if ((*p == 'J') && (stepid != NO_VAL)) - xstrfmtcat(name, ".%d", stepid); - q = ++p; - break; - - case 's': /* '%s' => stepid */ - xmemcat(name, q, p - 1); - xstrfmtcat(name, "%0*d", wid, stepid); - q = ++p; - break; - - default: - break; - } - wid = 0; - } else - p++; - } - - if (q != p) - xmemcat(name, q, p); - - fname->name = name; - return fname; -} - -void -fname_destroy(fname_t *f) -{ - if (f->name) - xfree(f->name); - xfree(f); -} - diff --git a/src/slaunch/fname.h b/src/slaunch/fname.h deleted file mode 100644 index ed26b9e9c..000000000 --- a/src/slaunch/fname.h +++ /dev/null @@ -1,56 +0,0 @@ -/*****************************************************************************\ - * fname.c - IO filename type implementation (slaunch specific) - ***************************************************************************** - * Copyright (C) 2002 The Regents of the University of California. - * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. - * - * This file is part of SLURM, a resource management program. - * For details, see <http://www.llnl.gov/linux/slurm/>. - * - * SLURM is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY - * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License along - * with SLURM; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -\*****************************************************************************/ - -#ifndef _SLAUNCH_FNAME_H -#define _SLAUNCH_FNAME_H - -#ifdef HAVE_CONFIG_H -# include "config.h" -#endif - -enum io_t { - IO_ALL = 0, /* multiplex output from all/bcast stdin to all */ - IO_ONE = 1, /* output from only one task/stdin to one task */ - IO_NONE = 2, /* close output/close stdin */ -}; - -typedef struct fname { - char *name; - enum io_t type; - int taskid; /* taskid for IO if IO_ONE */ -} fname_t; - -/* - * Create an filename from a (probably user supplied) filename format. - * fname_create() will expand the format as much as possible for slaunch, - * leaving node or task specific format specifiers for the remote - * slurmd to handle. - */ -fname_t *fname_create(char *format, int jobid, int stepid); -void fname_destroy(fname_t *fname); - -#endif /* !_SLAUNCH_FNAME_H */ - diff --git a/src/slaunch/multi_prog.c b/src/slaunch/multi_prog.c deleted file mode 100644 index 84125355a..000000000 --- a/src/slaunch/multi_prog.c +++ /dev/null @@ -1,207 +0,0 @@ -/*****************************************************************************\ - * multi_prog.c - executing program according to task rank - * set MPIR_PROCDESC accordingly - * - * NOTE: The logic could be eliminated if slurmstepd kept track of the - * executable name for each task and returned that inforatmion in a new - * launch response message (with multiple executable names). - ***************************************************************************** - * Produced at National University of Defense Technology (China) - * Written by Hongjia Cao <hjcao@nudt.edu.cn> - * and - * Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Morris Jette <jette1@llnl.gov>. - * UCRL-CODE-226842. - * - * This file is part of SLURM, a resource management program. - * For details, see <http://www.llnl.gov/linux/slurm/>. - * - * SLURM is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY - * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License along - * with SLURM; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -\*****************************************************************************/ - -#ifdef HAVE_CONFIG_H -# include "config.h" -#endif - -#include <stdio.h> -#include <ctype.h> -#include <string.h> -#include <stdlib.h> -#include <sys/stat.h> -#include <sys/types.h> -#include <unistd.h> - -#include "src/common/log.h" -#include "src/common/xassert.h" -#include "src/common/xmalloc.h" -#include "src/common/xstring.h" -#include "src/slaunch/attach.h" - -/* Given a program name, translate it to a fully qualified pathname - * as needed based upon the PATH environment variable */ -static char * -_build_path(char* fname) -{ - int i; - char *path_env = NULL, *dir = NULL, *ptrptr = NULL; - static char file_name[256], file_path[256]; /* return values */ - struct stat buf; - - /* make copy of file name (end at white space) */ - snprintf(file_name, sizeof(file_name), "%s", fname); - for (i=0; i<sizeof(file_name); i++) { - if (file_name[i] == '\0') - break; - if (!isspace(file_name[i])) - continue; - file_name[i] = '\0'; - break; - } - - /* check if already absolute path */ - if (file_name[0] == '/') - return file_name; - - /* search for the file using PATH environment variable */ - dir = getenv("PATH"); - if (!dir) { - error("No PATH environment variable"); - return NULL; - } - path_env = xstrdup(dir); - dir = strtok_r(path_env, ":", &ptrptr); - while (dir) { - snprintf(file_path, sizeof(file_path), "%s/%s", dir, file_name); - if (stat(file_path, &buf) == 0) - break; - dir = strtok_r(NULL, ":", &ptrptr); - } - if (dir == NULL) { /* not found */ - error("Could not find executable %s", file_name); - snprintf(file_path, sizeof(file_path), "%s", file_name); - } - xfree(path_env); - return file_path; -} - -static void -_set_range(int low_num, int high_num, char *exec_name) -{ - int i; - - for (i=low_num; i<=high_num; i++) { - MPIR_PROCDESC *tv; - tv = &MPIR_proctable[i]; - if (tv->executable_name) { - error("duplicate configuration for task %d ignored", - i); - } else - tv->executable_name = xstrdup(exec_name); - } -} - -static void -_set_exec_names(char *ranks, char *exec_name, int ntasks) -{ - char *range = NULL, *p = NULL, *ptrptr = NULL; - char *exec_path = NULL, *upper = NULL; - int low_num, high_num; - - if (ranks[0] == '*' && ranks[1] == '\0') { - low_num = 0; - high_num = ntasks -1; - _set_range(low_num, high_num, exec_name); - return; - } - exec_path = _build_path(exec_name); - - for (range = strtok_r(ranks, ",", &ptrptr); - range != NULL; - range = strtok_r(NULL, ",", &ptrptr)) { - p = range; - while (*p != '\0' && isdigit (*p)) - p ++; - - if (*p == '\0') { /* single rank */ - low_num = MAX(0, atoi(range)); - high_num = MIN((ntasks-1), atoi(range)); - _set_range(low_num, high_num, exec_path); - } else if (*p == '-') { /* lower-upper */ - upper = ++ p; - while (isdigit (*p)) - p ++; - if (*p != '\0') { - error ("Invalid task range specification (%s) " - "ignored.", range); - continue; - } - low_num = MAX(0, atoi (range)); - high_num = MIN((ntasks-1), atoi(upper)); - _set_range(low_num, high_num, exec_path); - } else { - error ("Invalid task range specification (%s) ignored.", range); - } - } -} - -extern int -mpir_set_multi_name(int ntasks, const char *config_fname) -{ - FILE *config_fd; - char line[256]; - char *ranks = NULL, *exec_name = NULL, *p = NULL, *ptrptr = NULL; - int line_num = 0, i; - - for (i=0; i<ntasks; i++) { - MPIR_PROCDESC *tv; - tv = &MPIR_proctable[i]; - tv->executable_name = NULL; - } - - config_fd = fopen(config_fname, "r"); - if (config_fd == NULL) { - error("Unable to open configuration file %s", config_fname); - return -1; - } - while (fgets(line, sizeof(line), config_fd)) { - line_num ++; - if (strlen (line) >= (sizeof(line) - 1)) { - error ("Line %d of configuration file too long", - line_num); - fclose(config_fd); - return -1; - } - p = line; - while (*p != '\0' && isspace (*p)) /* remove leading spaces */ - p ++; - - if (*p == '#') /* only whole-line comments handled */ - continue; - - if (*p == '\0') /* blank line ignored */ - continue; - - ranks = strtok_r(p, " \t\n", &ptrptr); - exec_name = strtok_r(NULL, " \t\n", &ptrptr); - if (!ranks || !exec_name) { - error("Line %d is invalid", line_num); - fclose(config_fd); - return -1; - } - _set_exec_names(ranks, exec_name, ntasks); - } - fclose(config_fd); - return 0; -} diff --git a/src/slaunch/multi_prog.h b/src/slaunch/multi_prog.h deleted file mode 100644 index 9153b6cec..000000000 --- a/src/slaunch/multi_prog.h +++ /dev/null @@ -1,37 +0,0 @@ -/*****************************************************************************\ - * multi_prog.h - executing program according to task rank - * set MPIR_PROCDESC accordingly - ***************************************************************************** - * Produced at National University of Defense Technology (China) - * Written by Hongjia Cao <hjcao@nudt.edu.cn> - * and - * Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Morris Jette <jette1@llnl.gov>. - * UCRL-CODE-226842. - * - * This file is part of SLURM, a resource management program. - * For details, see <http://www.llnl.gov/linux/slurm/>. - * - * SLURM is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY - * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License along - * with SLURM; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -\*****************************************************************************/ -#ifndef _SLAUNCH_MULTI_PROG_H -#define _SLAUNCH_MULTI_PROG_H - -/* set global MPIR_PROCDESC executable names based upon multi-program - * configuration file */ -extern int mpir_set_multi_name(int ntasks, const char *config_fname); - -#endif - diff --git a/src/slaunch/opt.c b/src/slaunch/opt.c deleted file mode 100644 index de1b25d64..000000000 --- a/src/slaunch/opt.c +++ /dev/null @@ -1,2138 +0,0 @@ -/*****************************************************************************\ - * opt.c - options processing for slaunch - ***************************************************************************** - * Copyright (C) 2002-2006 The Regents of the University of California. - * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Mark Grondona <grondona1@llnl.gov>, et. al. - * UCRL-CODE-226842. - * - * This file is part of SLURM, a resource management program. - * For details, see <http://www.llnl.gov/linux/slurm/>. - * - * SLURM is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY - * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License along - * with SLURM; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -\*****************************************************************************/ - -#if HAVE_CONFIG_H -# include "config.h" -#endif - -#include <string.h> /* strcpy, strncasecmp */ - -#ifdef HAVE_STRINGS_H -# include <strings.h> -#endif - -#ifndef _GNU_SOURCE -# define _GNU_SOURCE -#endif - -#if HAVE_GETOPT_H -# include <getopt.h> -#else -# include "src/common/getopt.h" -#endif - -#include <fcntl.h> -#include <stdarg.h> /* va_start */ -#include <stdio.h> -#include <stdlib.h> /* getenv */ -#include <pwd.h> /* getpwuid */ -#include <ctype.h> /* isdigit */ -#include <sys/param.h> /* MAXPATHLEN */ -#include <sys/stat.h> -#include <unistd.h> -#include <sys/stat.h> -#include <sys/types.h> -#include <sys/utsname.h> -#include <regex.h> - -#include "src/slaunch/opt.h" - -#include "src/common/list.h" -#include "src/common/log.h" -#include "src/common/parse_time.h" -#include "src/common/slurm_protocol_api.h" -#include "src/common/uid.h" -#include "src/common/xmalloc.h" -#include "src/common/xstring.h" -#include "src/common/slurm_rlimits_info.h" -#include "src/common/plugstack.h" -#include "src/common/optz.h" -#include "src/common/read_config.h" /* getnodename() */ -#include "src/common/hostlist.h" -#include "src/common/mpi.h" -#include "src/api/pmi_server.h" - -#include "src/slaunch/attach.h" - -/* generic OPT_ definitions -- mainly for use with env vars */ -#define OPT_NONE 0x00 -#define OPT_INT 0x01 -#define OPT_STRING 0x02 -#define OPT_DEBUG 0x03 -#define OPT_DISTRIB 0x04 -#define OPT_BOOL 0x06 -#define OPT_CORE 0x07 -#define OPT_MPI 0x0c -#define OPT_CPU_BIND 0x0d -#define OPT_MEM_BIND 0x0e -#define OPT_MULTI 0x0f - -/* generic getopt_long flags, integers and *not* valid characters */ -#define LONG_OPT_USAGE 0x100 -#define LONG_OPT_LAUNCH 0x103 -#define LONG_OPT_JOBID 0x105 -#define LONG_OPT_UID 0x106 -#define LONG_OPT_GID 0x107 -#define LONG_OPT_MPI 0x108 -#define LONG_OPT_CORE 0x109 -#define LONG_OPT_DEBUG_TS 0x10a -#define LONG_OPT_NETWORK 0x10b -#define LONG_OPT_PROPAGATE 0x10c -#define LONG_OPT_PROLOG 0x10d -#define LONG_OPT_EPILOG 0x10e -#define LONG_OPT_TASK_PROLOG 0x10f -#define LONG_OPT_TASK_EPILOG 0x110 -#define LONG_OPT_CPU_BIND 0x111 -#define LONG_OPT_MEM_BIND 0x112 -#define LONG_OPT_COMM_HOSTNAME 0x113 -#define LONG_OPT_MULTI 0x114 -#define LONG_OPT_PMI_THREADS 0x115 -#define LONG_OPT_LIN_FILTER 0x116 -#define LONG_OPT_LOUT_FILTER 0x117 -#define LONG_OPT_LERR_FILTER 0x118 -#define LONG_OPT_RIN_FILTER 0x119 -#define LONG_OPT_ROUT_FILTER 0x11a -#define LONG_OPT_RERR_FILTER 0x11b - -/*---- forward declarations of static functions ----*/ - -typedef struct env_vars env_vars_t; - -/* return command name from its full path name */ -static char * _base_name(char* command); - -static List _create_path_list(void); - -/* Get a POSITIVE decimal integer from arg */ -static int _get_pos_int(const char *arg, const char *what); - -/* Get a decimal integer from arg */ -static int _get_int(const char *arg, const char *what, bool positive); - -static void _help(void); - -/* load a multi-program configuration file */ -static void _load_multi(int *argc, char **argv); - -/* fill in default options */ -static void _opt_default(void); - -/* set options based upon env vars */ -static void _opt_env(void); - -static void _opt_args(int argc, char **argv); - -/* list known options and their settings */ -static void _opt_list(void); - -/* verify options sanity */ -static bool _opt_verify(void); - -static void _print_version(void); - -static void _process_env_var(env_vars_t *e, const char *val); - -/* search PATH for command returns full path */ -static char *_search_path(char *, int); - -static void _usage(void); -static int _verify_cpu_bind(const char *arg, char **cpu_bind, - cpu_bind_type_t *cpu_bind_type); -static int _verify_mem_bind(const char *arg, char **mem_bind, - mem_bind_type_t *mem_bind_type); -static task_dist_states_t _verify_dist_type(const char *arg, uint32_t *psize); - -/*---[ end forward declarations of static functions ]---------------------*/ - -int initialize_and_process_args(int argc, char *argv[]) -{ - /* initialize option defaults */ - _opt_default(); - - /* initialize options with env vars */ - _opt_env(); - - /* initialize options with argv */ - _opt_args(argc, argv); - - if (opt.verbose > 1) - _opt_list(); - - return 1; - -} - -static void _print_version(void) -{ - printf("%s %s\n", PACKAGE, SLURM_VERSION); -} - -/* - * _isvalue - * returns 1 is the argument appears to be a value, 0 otherwise - */ -static int _isvalue(char *arg) { - if (isdigit(*arg)) { /* decimal values and 0x... hex values */ - return 1; - } - - while (isxdigit(*arg)) { /* hex values not preceded by 0x */ - arg++; - } - if (*arg == ',' || *arg == '\0') { /* end of field or string */ - return 1; - } - - return 0; /* not a value */ -} - -/* - * verify cpu_bind arguments - * returns -1 on error, 0 otherwise - */ -static int _verify_cpu_bind(const char *arg, char **cpu_bind, - cpu_bind_type_t *cpu_bind_type) -{ - char *buf, *p, *tok; - if (!arg) { - return 0; - } - /* we support different launch policy names - * we also allow a verbose setting to be specified - * --cpu_bind=threads - * --cpu_bind=cores - * --cpu_bind=sockets - * --cpu_bind=v - * --cpu_bind=rank,v - * --cpu_bind=rank - * --cpu_bind={MAP_CPU|MASK_CPU}:0,1,2,3,4 - */ - buf = xstrdup(arg); - p = buf; - /* change all ',' delimiters not followed by a digit to ';' */ - /* simplifies parsing tokens while keeping map/mask together */ - while (p[0] != '\0') { - if ((p[0] == ',') && (!_isvalue(&(p[1])))) - p[0] = ';'; - p++; - } - - p = buf; - while ((tok = strsep(&p, ";"))) { - if (strcasecmp(tok, "help") == 0) { - printf( -"CPU bind options:\n" -" --cpu_bind= Bind tasks to CPUs\n" -" q[uiet] quietly bind before task runs (default)\n" -" v[erbose] verbosely report binding before task runs\n" -" no[ne] don't bind tasks to CPUs (default)\n" -" rank bind by task rank\n" -" map_cpu:<list> specify a CPU ID binding for each task\n" -" where <list> is <cpuid1>,<cpuid2>,...<cpuidN>\n" -" mask_cpu:<list> specify a CPU ID binding mask for each task\n" -" where <list> is <mask1>,<mask2>,...<maskN>\n" -" sockets auto-generated masks bind to sockets\n" -" cores auto-generated masks bind to cores\n" -" threads auto-generated masks bind to threads\n" -" help show this help message\n"); - return 1; - } else if ((strcasecmp(tok, "q") == 0) || - (strcasecmp(tok, "quiet") == 0)) { - *cpu_bind_type &= ~CPU_BIND_VERBOSE; - } else if ((strcasecmp(tok, "v") == 0) || - (strcasecmp(tok, "verbose") == 0)) { - *cpu_bind_type |= CPU_BIND_VERBOSE; - } else if ((strcasecmp(tok, "no") == 0) || - (strcasecmp(tok, "none") == 0)) { - *cpu_bind_type |= CPU_BIND_NONE; - *cpu_bind_type &= ~CPU_BIND_RANK; - *cpu_bind_type &= ~CPU_BIND_MAP; - *cpu_bind_type &= ~CPU_BIND_MASK; - xfree(*cpu_bind); - } else if (strcasecmp(tok, "rank") == 0) { - *cpu_bind_type &= ~CPU_BIND_NONE; - *cpu_bind_type |= CPU_BIND_RANK; - *cpu_bind_type &= ~CPU_BIND_MAP; - *cpu_bind_type &= ~CPU_BIND_MASK; - xfree(*cpu_bind); - } else if ((strncasecmp(tok, "map_cpu", 7) == 0) || - (strncasecmp(tok, "mapcpu", 6) == 0)) { - char *list; - list = strsep(&tok, ":="); - list = strsep(&tok, ":="); - *cpu_bind_type &= ~CPU_BIND_NONE; - *cpu_bind_type &= ~CPU_BIND_RANK; - *cpu_bind_type |= CPU_BIND_MAP; - *cpu_bind_type &= ~CPU_BIND_MASK; - xfree(*cpu_bind); - if (list && *list) { - *cpu_bind = xstrdup(list); - } else { - error("missing list for \"--cpu_bind=map_cpu:<list>\""); - xfree(buf); - return 1; - } - } else if ((strncasecmp(tok, "mask_cpu", 8) == 0) || - (strncasecmp(tok, "maskcpu", 7) == 0)) { - char *list; - list = strsep(&tok, ":="); - list = strsep(&tok, ":="); - *cpu_bind_type &= ~CPU_BIND_NONE; - *cpu_bind_type &= ~CPU_BIND_RANK; - *cpu_bind_type &= ~CPU_BIND_MAP; - *cpu_bind_type |= CPU_BIND_MASK; - xfree(*cpu_bind); - if (list && *list) { - *cpu_bind = xstrdup(list); - } else { - error("missing list for \"--cpu_bind=mask_cpu:<list>\""); - xfree(buf); - return 1; - } - } else if ((strcasecmp(tok, "socket") == 0) || - (strcasecmp(tok, "sockets") == 0)) { - *cpu_bind_type |= CPU_BIND_TO_SOCKETS; - *cpu_bind_type &= ~CPU_BIND_TO_CORES; - *cpu_bind_type &= ~CPU_BIND_TO_THREADS; - } else if ((strcasecmp(tok, "core") == 0) || - (strcasecmp(tok, "cores") == 0)) { - *cpu_bind_type &= ~CPU_BIND_TO_SOCKETS; - *cpu_bind_type |= CPU_BIND_TO_CORES; - *cpu_bind_type &= ~CPU_BIND_TO_THREADS; - } else if ((strcasecmp(tok, "thread") == 0) || - (strcasecmp(tok, "threads") == 0)) { - *cpu_bind_type &= ~CPU_BIND_TO_SOCKETS; - *cpu_bind_type &= ~CPU_BIND_TO_CORES; - *cpu_bind_type |= CPU_BIND_TO_THREADS; - } else { - error("unrecognized --cpu_bind argument \"%s\"", tok); - xfree(buf); - return 1; - } - } - - xfree(buf); - return 0; -} - -/* - * verify mem_bind arguments - * returns -1 on error, 0 otherwise - */ -static int _verify_mem_bind(const char *arg, char **mem_bind, - mem_bind_type_t *mem_bind_type) -{ - char *buf, *p, *tok; - if (!arg) { - return 0; - } - /* we support different memory binding names - * we also allow a verbose setting to be specified - * --mem_bind=v - * --mem_bind=rank,v - * --mem_bind=rank - * --mem_bind={MAP_MEM|MASK_MEM}:0,1,2,3,4 - */ - buf = xstrdup(arg); - p = buf; - /* change all ',' delimiters not followed by a digit to ';' */ - /* simplifies parsing tokens while keeping map/mask together */ - while (p[0] != '\0') { - if ((p[0] == ',') && (!_isvalue(&(p[1])))) - p[0] = ';'; - p++; - } - - p = buf; - while ((tok = strsep(&p, ";"))) { - if (strcasecmp(tok, "help") == 0) { - printf( -"Memory bind options:\n" -" --mem_bind= Bind memory to locality domains (ldom)\n" -" q[uiet] quietly bind before task runs (default)\n" -" v[erbose] verbosely report binding before task runs\n" -" no[ne] don't bind tasks to memory (default)\n" -" rank bind by task rank\n" -" local bind to memory local to processor\n" -" map_mem:<list> specify a memory binding for each task\n" -" where <list> is <cpuid1>,<cpuid2>,...<cpuidN>\n" -" mask_mem:<list> specify a memory binding mask for each tasks\n" -" where <list> is <mask1>,<mask2>,...<maskN>\n" -" help show this help message\n"); - return 1; - - } else if ((strcasecmp(tok, "q") == 0) || - (strcasecmp(tok, "quiet") == 0)) { - *mem_bind_type &= ~MEM_BIND_VERBOSE; - } else if ((strcasecmp(tok, "v") == 0) || - (strcasecmp(tok, "verbose") == 0)) { - *mem_bind_type |= MEM_BIND_VERBOSE; - } else if ((strcasecmp(tok, "no") == 0) || - (strcasecmp(tok, "none") == 0)) { - *mem_bind_type |= MEM_BIND_NONE; - *mem_bind_type &= ~MEM_BIND_RANK; - *mem_bind_type &= ~MEM_BIND_LOCAL; - *mem_bind_type &= ~MEM_BIND_MAP; - *mem_bind_type &= ~MEM_BIND_MASK; - xfree(*mem_bind); - } else if (strcasecmp(tok, "rank") == 0) { - *mem_bind_type &= ~MEM_BIND_NONE; - *mem_bind_type |= MEM_BIND_RANK; - *mem_bind_type &= ~MEM_BIND_LOCAL; - *mem_bind_type &= ~MEM_BIND_MAP; - *mem_bind_type &= ~MEM_BIND_MASK; - xfree(*mem_bind); - } else if (strcasecmp(tok, "local") == 0) { - *mem_bind_type &= ~MEM_BIND_NONE; - *mem_bind_type &= ~MEM_BIND_RANK; - *mem_bind_type |= MEM_BIND_LOCAL; - *mem_bind_type &= ~MEM_BIND_MAP; - *mem_bind_type &= ~MEM_BIND_MASK; - xfree(*mem_bind); - } else if ((strncasecmp(tok, "map_mem", 7) == 0) || - (strncasecmp(tok, "mapmem", 6) == 0)) { - char *list; - list = strsep(&tok, ":="); - list = strsep(&tok, ":="); - *mem_bind_type &= ~MEM_BIND_NONE; - *mem_bind_type &= ~MEM_BIND_RANK; - *mem_bind_type &= ~MEM_BIND_LOCAL; - *mem_bind_type |= MEM_BIND_MAP; - *mem_bind_type &= ~MEM_BIND_MASK; - xfree(*mem_bind); - if (list && *list) { - *mem_bind = xstrdup(list); - } else { - error("missing list for \"--mem_bind=map_mem:<list>\""); - xfree(buf); - return 1; - } - } else if ((strncasecmp(tok, "mask_mem", 8) == 0) || - (strncasecmp(tok, "maskmem", 7) == 0)) { - char *list; - list = strsep(&tok, ":="); - list = strsep(&tok, ":="); - *mem_bind_type &= ~MEM_BIND_NONE; - *mem_bind_type &= ~MEM_BIND_RANK; - *mem_bind_type &= ~MEM_BIND_LOCAL; - *mem_bind_type &= ~MEM_BIND_MAP; - *mem_bind_type |= MEM_BIND_MASK; - xfree(*mem_bind); - if (list && *list) { - *mem_bind = xstrdup(list); - } else { - error("missing list for \"--mem_bind=mask_mem:<list>\""); - xfree(buf); - return 1; - } - } else { - error("unrecognized --mem_bind argument \"%s\"", tok); - xfree(buf); - return 1; - } - } - - xfree(buf); - return 0; -} -/* - * verify that a distribution type in arg is of a known form - * returns the task_dist_states, or -1 if state is unknown - */ -static task_dist_states_t _verify_dist_type(const char *arg, - uint32_t *plane_size) -{ - int len = strlen(arg); - char *dist_str = NULL; - task_dist_states_t result = SLURM_DIST_UNKNOWN; - bool lllp_dist = false, plane_dist = false; - - dist_str = strchr(arg,':'); - if (dist_str != NULL) { - /* -m cyclic|block:cyclic|block */ - lllp_dist = true; - } else { - /* -m plane=<plane_size> */ - dist_str = strchr(arg,'='); - if(dist_str != NULL) { - *plane_size=atoi(dist_str+1); - len = dist_str-arg; - plane_dist = true; - } - } - - if (lllp_dist) { - if (strcasecmp(arg, "cyclic:cyclic") == 0) { - result = SLURM_DIST_CYCLIC_CYCLIC; - } else if (strcasecmp(arg, "cyclic:block") == 0) { - result = SLURM_DIST_CYCLIC_BLOCK; - } else if (strcasecmp(arg, "block:block") == 0) { - result = SLURM_DIST_BLOCK_BLOCK; - } else if (strcasecmp(arg, "block:cyclic") == 0) { - result = SLURM_DIST_BLOCK_CYCLIC; - } - } else if (plane_dist) { - if (strncasecmp(arg, "plane", len) == 0) { - result = SLURM_DIST_PLANE; - } - } else { - if (strncasecmp(arg, "cyclic", len) == 0) { - result = SLURM_DIST_CYCLIC; - } else if (strncasecmp(arg, "block", len) == 0) { - result = SLURM_DIST_BLOCK; - } else if ((strncasecmp(arg, "arbitrary", len) == 0) || - (strncasecmp(arg, "hostfile", len) == 0)) { - result = SLURM_DIST_ARBITRARY; - } - } - - return result; -} - -/* - * Parse the next greatest of: - * CPUS(REPS), - * or - * CPUS(REPS) - * or - * CPUS, - * or - * CPUS - * moving "ptr" past the parsed cpu/reps pair - * - * Return 1 after succesfully parsing a new number or pair, and 0 otherwise. - */ -static int _parse_cpu_rep_pair(char **ptr, uint32_t *cpu, uint32_t *rep) -{ - char *endptr; - - *rep = 1; - *cpu = strtol(*ptr, &endptr, 10); - if (*cpu == 0 && endptr == *ptr) { - /* no more numbers */ - return 0; - } - - if (endptr[0] == (char)',') { - *ptr = endptr+1; - return 1; - } else if (endptr[0] == (char)'(' - && endptr[1] == (char)'x') { - *ptr = endptr+2; - *rep = strtol(*ptr, &endptr, 10); - if (*rep == 0 && endptr == *ptr) { - error("was expecting a number at \"%s\"", *ptr); - return 0; - } - if (endptr[0] != (char)')') { - error("was expecting a closing parenthasis at \"%s\"", - endptr); - return 0; - } - endptr = endptr+1; - - /* finally, swallow the next comma, if there is one */ - if (endptr[0] == (char)',') { - *ptr = endptr + 1; - } else { - *ptr = endptr; - } - return 1; - } else { - *ptr = endptr; - return 1; - } -} - - -/* Take a string representing cpus-per-node in compressed representation, - * and set variables in "alloc_info" pertaining to cpus-per-node. - */ -static int _set_cpus_per_node(const char *str, - resource_allocation_response_msg_t *alloc_info) -{ - char *ptr = (char *)str; - uint16_t num_cpus_groups = 0; - uint32_t *cpus = NULL; - uint32_t *cpus_reps = NULL; - uint32_t cpu, rep; - - while (_parse_cpu_rep_pair(&ptr, &cpu, &rep)) { - num_cpus_groups++; - xrealloc(cpus, sizeof(uint32_t)*num_cpus_groups); - xrealloc(cpus_reps, sizeof(uint32_t)*num_cpus_groups); - cpus[num_cpus_groups-1] = cpu; - cpus_reps[num_cpus_groups-1] = rep; - } - if (num_cpus_groups == 0) - return 0; - - alloc_info->num_cpu_groups = num_cpus_groups; - alloc_info->cpus_per_node = cpus; - alloc_info->cpu_count_reps = cpus_reps; - - return 1; -} - -/* return command name from its full path name */ -static char * _base_name(char* command) -{ - char *char_ptr, *name; - int i; - - if (command == NULL) - return NULL; - - char_ptr = strrchr(command, (int)'/'); - if (char_ptr == NULL) - char_ptr = command; - else - char_ptr++; - - i = strlen(char_ptr); - name = xmalloc(i+1); - strcpy(name, char_ptr); - return name; -} - -/* - * print error message to stderr with opt.progname prepended - */ -#undef USE_ARGERROR -#if USE_ARGERROR -static void argerror(const char *msg, ...) -{ - va_list ap; - char buf[256]; - - va_start(ap, msg); - vsnprintf(buf, sizeof(buf), msg, ap); - - fprintf(stderr, "%s: %s\n", - opt.progname ? opt.progname : "slaunch", buf); - va_end(ap); -} -#else -# define argerror error -#endif /* USE_ARGERROR */ - -/* - * _opt_default(): used by initialize_and_process_args to set defaults - */ -static void _opt_default() -{ - char buf[MAXPATHLEN + 1]; - struct passwd *pw; - - if ((pw = getpwuid(getuid())) != NULL) { - strncpy(opt.user, pw->pw_name, MAX_USERNAME); - opt.uid = pw->pw_uid; - } else - error("who are you?"); - - opt.gid = getgid(); - - if ((getcwd(buf, MAXPATHLEN)) == NULL) - fatal("getcwd failed: %m"); - opt.cwd = xstrdup(buf); - - opt.progname = NULL; - - opt.num_tasks = 1; - opt.num_tasks_set = false; - opt.cpus_per_task = 1; - opt.cpus_per_task_set = false; - opt.num_nodes = 1; - opt.num_nodes_set = false; - opt.cpu_bind_type = 0; - opt.cpu_bind = NULL; - opt.mem_bind_type = 0; - opt.mem_bind = NULL; - opt.relative = (uint16_t)NO_VAL; - opt.relative_set = false; - - opt.job_name = NULL; - opt.jobid = NO_VAL; - opt.jobid_set = false; - - opt.distribution = SLURM_DIST_UNKNOWN; - opt.plane_size = NO_VAL; - - opt.local_ofname = NULL; - opt.local_ifname = NULL; - opt.local_efname = NULL; - opt.remote_ofname = NULL; - opt.remote_ifname = NULL; - opt.remote_efname = NULL; - opt.local_input_filter = (uint32_t)-1; - opt.local_input_filter_set = false; - opt.local_output_filter = (uint32_t)-1; - opt.local_output_filter_set = false; - opt.local_error_filter = (uint32_t)-1; - opt.local_error_filter_set = false; - opt.remote_input_filter = (uint32_t)-1; - opt.remote_output_filter = (uint32_t)-1; - opt.remote_error_filter = (uint32_t)-1; - - opt.core_type = CORE_DEFAULT; - - opt.labelio = false; - opt.unbuffered = false; - opt.overcommit = false; - opt.no_kill = false; - opt.kill_bad_exit = false; - opt.max_wait = slurm_get_wait_time(); - opt.quiet = 0; - opt.verbose = 0; - opt.slurmd_debug = LOG_LEVEL_QUIET; - opt.nodelist = NULL; - opt.nodelist_byid = NULL; - opt.task_layout = NULL; - opt.task_layout_file_set = false; - - opt.euid = (uid_t) -1; - opt.egid = (gid_t) -1; - - opt.propagate = NULL; /* propagate specific rlimits */ - - opt.prolog = slurm_get_srun_prolog(); - opt.epilog = slurm_get_srun_epilog(); - - opt.task_prolog = NULL; - opt.task_epilog = NULL; - - opt.comm_hostname = xshort_hostname(); -} - -/*---[ env var processing ]-----------------------------------------------*/ - -/* - * try to use a similar scheme as popt. - * - * in order to add a new env var (to be processed like an option): - * - * define a new entry into env_vars[], if the option is a simple int - * or string you may be able to get away with adding a pointer to the - * option to set. Otherwise, process var based on "type" in _opt_env. - */ -struct env_vars { - const char *var; - int type; - void *arg; - void *set_flag; -}; - -env_vars_t env_vars[] = { - /* SLURM_JOBID is handled like SLAUNCH_JOBID as backwards compatibility - with LCRM. If we get LCRM to call a slurm API function which - tells LCRM which variables to set for a particular jobid number, - then there would be no need for LCRM's static SLURM_JOBID code or - the handling of SLURM_JOBID below.*/ - {"SLURM_JOBID", OPT_INT, &opt.jobid, &opt.jobid_set }, - {"SLAUNCH_JOBID", OPT_INT, &opt.jobid, &opt.jobid_set }, - {"SLURMD_DEBUG", OPT_INT, &opt.slurmd_debug, NULL }, - {"SLAUNCH_CORE_FORMAT", OPT_CORE, NULL, NULL }, - {"SLAUNCH_CPU_BIND", OPT_CPU_BIND, NULL, NULL }, - {"SLAUNCH_MEM_BIND", OPT_MEM_BIND, NULL, NULL }, - {"SLAUNCH_DEBUG", OPT_DEBUG, NULL, NULL }, - {"SLAUNCH_DISTRIBUTION", OPT_DISTRIB, NULL, NULL }, - {"SLAUNCH_KILL_BAD_EXIT",OPT_BOOL, &opt.kill_bad_exit, NULL }, - {"SLAUNCH_LABELIO", OPT_BOOL, &opt.labelio, NULL }, - {"SLAUNCH_OVERCOMMIT", OPT_BOOL, &opt.overcommit, NULL }, - {"SLAUNCH_WAIT", OPT_INT, &opt.max_wait, NULL }, - {"SLAUNCH_MPI_TYPE", OPT_MPI, NULL, NULL }, - {"SLAUNCH_COMM_HOSTNAME",OPT_STRING, &opt.comm_hostname, NULL }, - {"SLAUNCH_PROLOG", OPT_STRING, &opt.prolog, NULL }, - {"SLAUNCH_EPILOG", OPT_STRING, &opt.epilog, NULL }, - {"SLAUNCH_TASK_PROLOG", OPT_STRING, &opt.task_prolog, NULL }, - {"SLAUNCH_TASK_EPILOG", OPT_STRING, &opt.task_epilog, NULL }, - - {NULL, 0, NULL, NULL} -}; - - -/* - * _opt_env(): used by initialize_and_process_args to set options via - * environment variables. See comments above for how to - * extend slaunch to process different vars - */ -static void _opt_env() -{ - char *val = NULL; - env_vars_t *e = env_vars; - - while (e->var) { - if ((val = getenv(e->var)) != NULL) - _process_env_var(e, val); - e++; - } -} - - -static void -_process_env_var(env_vars_t *e, const char *val) -{ - char *end = NULL; - - debug2("now processing env var %s=%s", e->var, val); - - if (e->set_flag) { - *((bool *) e->set_flag) = true; - } - - switch (e->type) { - case OPT_STRING: - *((char **) e->arg) = xstrdup(val); - break; - case OPT_INT: - if (val != NULL) { - *((int *) e->arg) = (int) strtol(val, &end, 10); - if (!(end && *end == '\0')) - error("%s=%s invalid. ignoring...", e->var, val); - } - break; - - case OPT_BOOL: - /* A boolean env variable is true if: - * - set, but no argument - * - argument is "yes" - * - argument is a non-zero number - */ - if (val == NULL || strcmp(val, "") == 0) { - *((bool *)e->arg) = true; - } else if (strcasecmp(val, "yes") == 0) { - *((bool *)e->arg) = true; - } else if ((strtol(val, &end, 10) != 0) - && end != val) { - *((bool *)e->arg) = true; - } else { - *((bool *)e->arg) = false; - } - break; - - case OPT_DEBUG: - if (val != NULL) { - opt.verbose = (int) strtol(val, &end, 10); - if (!(end && *end == '\0')) - error("%s=%s invalid", e->var, val); - } - break; - - case OPT_DISTRIB: - opt.plane_size = 0; - opt.distribution = _verify_dist_type(val, &opt.plane_size); - if (opt.distribution == SLURM_DIST_UNKNOWN) { - error("\"%s=%s\" -- invalid distribution type. ", - e->var, val); - exit(1); - } - break; - - case OPT_CPU_BIND: - if (_verify_cpu_bind(val, &opt.cpu_bind, - &opt.cpu_bind_type)) - exit(1); - break; - - case OPT_MEM_BIND: - if (_verify_mem_bind(val, &opt.mem_bind, - &opt.mem_bind_type)) - exit(1); - break; - - case OPT_CORE: - opt.core_type = core_format_type (val); - break; - - case OPT_MPI: - if (mpi_hook_client_init((char *)val) == SLURM_ERROR) { - fatal("\"%s=%s\" -- invalid MPI type, " - "--mpi=list for acceptable types.", - e->var, val); - } - break; - - default: - /* do nothing */ - break; - } -} - -/* - * Get a POSITIVE decimal integer from arg. - * - * Returns the integer on success, exits program on failure. - * - */ -static int -_get_pos_int(const char *arg, const char *what) -{ - char *p; - long int result = strtol(arg, &p, 10); - - if (p == arg || !xstring_is_whitespace(p) || (result < 0L)) { - error ("Invalid numeric value \"%s\" for %s.", arg, what); - exit(1); - } - - if (result > INT_MAX) { - error ("Numeric argument %ld to big for %s.", result, what); - exit(1); - } - - return (int) result; -} - -/* - * Get a decimal integer from arg. - * - * Returns the integer on success, exits program on failure. - * - */ -static int -_get_int(const char *arg, const char *what, bool positive) -{ - char *p; - long int result = strtol(arg, &p, 10); - - if ((p == arg) || (!xstring_is_whitespace(p)) - || (positive && (result <= 0L))) { - error ("Invalid numeric value \"%s\" for %s.", arg, what); - exit(1); - } - - if (result > INT_MAX) { - error ("Numeric argument %ld to big for %s.", result, what); - } else if (result < INT_MIN) { - error ("Numeric argument %ld to small for %s.", result, what); - } - - return (int) result; -} - -void set_options(const int argc, char **argv) -{ - int opt_char, option_index = 0; - char *tmp; - static struct option long_options[] = { - {"cpus-per-task", required_argument, 0, 'c'}, - {"overcommit", no_argument, 0, 'C'}, - {"slurmd-debug", required_argument, 0, 'd'}, - {"workdir", required_argument, 0, 'D'}, - {"slaunch-error", required_argument, 0, 'e'}, - {"task-error", required_argument, 0, 'E'}, - {"task-layout-file",required_argument,0,'F'}, - {"help", no_argument, 0, 'h'}, - {"slaunch-input", required_argument, 0, 'i'}, - {"task-input", required_argument, 0, 'I'}, - {"name", required_argument, 0, 'J'}, - {"kill-on-bad-exit", no_argument, 0, 'K'}, - {"label", no_argument, 0, 'l'}, - {"nodelist-byid", required_argument, 0, 'L'}, - {"distribution", required_argument, 0, 'm'}, - {"tasks", required_argument, 0, 'n'}, - {"nodes", required_argument, 0, 'N'}, - {"slaunch-output",required_argument, 0, 'o'}, - {"task-output", required_argument, 0, 'O'}, - {"quiet", no_argument, 0, 'q'}, - {"relative", required_argument, 0, 'r'}, - {"unbuffered", no_argument, 0, 'u'}, - {"task-layout-byid", required_argument, 0, 'T'}, - {"verbose", no_argument, 0, 'v'}, - {"version", no_argument, 0, 'V'}, - {"nodelist-byname", required_argument, 0, 'w'}, - {"wait", required_argument, 0, 'W'}, - {"task-layout-byname", required_argument, 0, 'Y'}, - {"cpu_bind", required_argument, 0, LONG_OPT_CPU_BIND}, - {"mem_bind", required_argument, 0, LONG_OPT_MEM_BIND}, - {"core", required_argument, 0, LONG_OPT_CORE}, - {"mpi", required_argument, 0, LONG_OPT_MPI}, - {"jobid", required_argument, 0, LONG_OPT_JOBID}, - {"uid", required_argument, 0, LONG_OPT_UID}, - {"gid", required_argument, 0, LONG_OPT_GID}, - /* debugger-test intentionally undocumented in the man page */ - {"debugger-test", no_argument, 0, LONG_OPT_DEBUG_TS}, - {"usage", no_argument, 0, LONG_OPT_USAGE}, - {"network", required_argument, 0, LONG_OPT_NETWORK}, - {"propagate", optional_argument, 0, LONG_OPT_PROPAGATE}, - {"prolog", required_argument, 0, LONG_OPT_PROLOG}, - {"epilog", required_argument, 0, LONG_OPT_EPILOG}, - {"task-prolog", required_argument, 0, LONG_OPT_TASK_PROLOG}, - {"task-epilog", required_argument, 0, LONG_OPT_TASK_EPILOG}, - {"ctrl-comm-ifhn", required_argument, 0, LONG_OPT_COMM_HOSTNAME}, - {"multi-prog", no_argument, 0, LONG_OPT_MULTI}, - /* pmi-threads intentionally undocumented in the man page */ - {"pmi-threads", required_argument, 0, LONG_OPT_PMI_THREADS}, - {"slaunch-input-filter",required_argument,0, LONG_OPT_LIN_FILTER}, - {"slaunch-output-filter",required_argument,0,LONG_OPT_LOUT_FILTER}, - {"slaunch-error-filter",required_argument,0, LONG_OPT_LERR_FILTER}, - /* task-*-filter are not yet functional, and intentionally - undocumented in the man page */ - {"task-input-filter", required_argument, 0, LONG_OPT_RIN_FILTER}, - {"task-output-filter",required_argument, 0, LONG_OPT_ROUT_FILTER}, - {"task-error-filter", required_argument, 0, LONG_OPT_RERR_FILTER}, - {NULL, 0, 0, 0} - }; - char *opt_string = - "+c:Cd:D:e:E:F:hi:I:J:KlL:m:n:N:o:O:qr:T:uvVw:W:Y:"; - - struct option *optz = spank_option_table_create (long_options); - - if (!optz) { - error ("Unable to create option table"); - exit (1); - } - - opt.progname = xbasename(argv[0]); - - optind = 0; - while((opt_char = getopt_long(argc, argv, opt_string, - optz, &option_index)) != -1) { - switch (opt_char) { - case '?': - fprintf(stderr, "Try \"slaunch --help\" for more " - "information\n"); - exit(1); - break; - case 'c': - opt.cpus_per_task_set = true; - opt.cpus_per_task = - _get_pos_int(optarg, "cpus-per-task"); - break; - case 'C': - opt.overcommit = true; - break; - case 'd': - opt.slurmd_debug = - _get_pos_int(optarg, "slurmd-debug"); - break; - case 'D': - xfree(opt.cwd); - opt.cwd = xstrdup(optarg); - break; - case 'e': - xfree(opt.local_efname); - if (strncasecmp(optarg, "none", (size_t) 4) == 0) - opt.local_efname = xstrdup("/dev/null"); - else - opt.local_efname = xstrdup(optarg); - break; - case 'F': - xfree(opt.task_layout); - tmp = slurm_read_hostfile(optarg, 0); - if (tmp != NULL) { - opt.task_layout = xstrdup(tmp); - free(tmp); - opt.task_layout_file_set = true; - } else { - error("\"%s\" is not a valid task layout file"); - exit(1); - } - break; - case 'E': - xfree(opt.remote_efname); - if (strncasecmp(optarg, "none", (size_t) 4) == 0) - opt.remote_efname = xstrdup("/dev/null"); - else - opt.remote_efname = xstrdup(optarg); - break; - case 'h': - _help(); - exit(0); - case 'i': - xfree(opt.local_ifname); - opt.local_ifname = xstrdup(optarg); - break; - case 'I': - xfree(opt.remote_ifname); - opt.remote_ifname = xstrdup(optarg); - break; - case 'J': - xfree(opt.job_name); - opt.job_name = xstrdup(optarg); - break; - case 'K': - opt.kill_bad_exit = true; - break; - case 'l': - opt.labelio = true; - break; - case 'L': - xfree(opt.nodelist_byid); - opt.nodelist_byid = xstrdup(optarg); - break; - case 'm': - opt.plane_size = 0; - opt.distribution = _verify_dist_type(optarg, - &opt.plane_size); - if (opt.distribution == SLURM_DIST_UNKNOWN) { - error("distribution type `%s' " - "is not recognized", optarg); - exit(1); - } - break; - case 'n': - opt.num_tasks_set = true; - opt.num_tasks = _get_pos_int(optarg, "number of tasks"); - break; - case 'N': - opt.num_nodes_set = true; - opt.num_nodes = _get_pos_int(optarg, "number of nodes"); - break; - case 'o': - xfree(opt.local_ofname); - if (strncasecmp(optarg, "none", (size_t) 4) == 0) - opt.local_ofname = xstrdup("/dev/null"); - else - opt.local_ofname = xstrdup(optarg); - break; - case 'O': - xfree(opt.remote_ofname); - if (strncasecmp(optarg, "none", (size_t) 4) == 0) - opt.remote_ofname = xstrdup("/dev/null"); - else - opt.remote_ofname = xstrdup(optarg); - break; - case 'q': - opt.quiet++; - break; - case 'r': - opt.relative_set = true; - opt.relative = _get_int(optarg, "relative start node", - false); - break; - case 'T': - xfree(opt.task_layout); - opt.task_layout_byid = xstrdup(optarg); - opt.task_layout_byid_set = true; - break; - case 'u': - opt.unbuffered = true; - break; - case 'v': - opt.verbose++; - break; - case 'V': - _print_version(); - exit(0); - break; - case 'w': - xfree(opt.nodelist); - opt.nodelist = xstrdup(optarg); -#ifdef HAVE_BG - info("\tThe nodelist option should only be used if\n" - "\tthe block you are asking for can be created.\n" - "\tIt should also include all the midplanes you\n" - "\twant to use, partial lists may not\n" - "\twork correctly.\n" - "\tPlease consult smap before using this option\n" - "\tor your job may be stuck with no way to run."); -#endif - break; - case 'W': - opt.max_wait = _get_pos_int(optarg, "wait"); - break; - case 'Y': - xfree(opt.task_layout); - opt.task_layout = xstrdup(optarg); - opt.task_layout_byname_set = true; - break; - case LONG_OPT_CPU_BIND: - if (_verify_cpu_bind(optarg, &opt.cpu_bind, - &opt.cpu_bind_type)) - exit(1); - break; - case LONG_OPT_MEM_BIND: - if (_verify_mem_bind(optarg, &opt.mem_bind, - &opt.mem_bind_type)) - exit(1); - break; - case LONG_OPT_CORE: - opt.core_type = core_format_type (optarg); - if (opt.core_type == CORE_INVALID) - error ("--core=\"%s\" Invalid -- ignoring.\n", - optarg); - break; - case LONG_OPT_MPI: - if (mpi_hook_client_init((char *)optarg) - == SLURM_ERROR) { - fatal("\"--mpi=%s\" -- long invalid MPI type, " - "--mpi=list for acceptable types.", - optarg); - } - break; - case LONG_OPT_JOBID: - opt.jobid = _get_pos_int(optarg, "jobid"); - opt.jobid_set = true; - break; - case LONG_OPT_UID: - opt.euid = uid_from_string (optarg); - if (opt.euid == (uid_t) -1) - fatal ("--uid=\"%s\" invalid", optarg); - break; - case LONG_OPT_GID: - opt.egid = gid_from_string (optarg); - if (opt.egid == (gid_t) -1) - fatal ("--gid=\"%s\" invalid", optarg); - break; - case LONG_OPT_DEBUG_TS: - /* simulate running under a parallel debugger */ - opt.debugger_test = true; - MPIR_being_debugged = 1; - break; - case LONG_OPT_USAGE: - _usage(); - exit(0); - case LONG_OPT_NETWORK: - xfree(opt.network); - opt.network = xstrdup(optarg); -#ifdef HAVE_AIX - setenv("SLURM_NETWORK", opt.network, 1); -#endif - break; - case LONG_OPT_PROPAGATE: - xfree(opt.propagate); - if (optarg) opt.propagate = xstrdup(optarg); - else opt.propagate = xstrdup("ALL"); - break; - case LONG_OPT_PROLOG: - xfree(opt.prolog); - opt.prolog = xstrdup(optarg); - break; - case LONG_OPT_EPILOG: - xfree(opt.epilog); - opt.epilog = xstrdup(optarg); - break; - case LONG_OPT_TASK_PROLOG: - xfree(opt.task_prolog); - opt.task_prolog = xstrdup(optarg); - break; - case LONG_OPT_TASK_EPILOG: - xfree(opt.task_epilog); - opt.task_epilog = xstrdup(optarg); - break; - case LONG_OPT_COMM_HOSTNAME: - xfree(opt.comm_hostname); - opt.comm_hostname = xstrdup(optarg); - break; - case LONG_OPT_MULTI: - opt.multi_prog = true; - break; - case LONG_OPT_PMI_THREADS: /* undocumented option */ - pmi_server_max_threads(_get_pos_int(optarg, - "pmi-threads")); - break; - case LONG_OPT_LIN_FILTER: - if (strcmp(optarg, "-") != 0) { - opt.local_input_filter = - _get_pos_int(optarg, - "slaunch-input-filter"); - } - opt.local_input_filter_set = true; - break; - case LONG_OPT_LOUT_FILTER: - if (strcmp(optarg, "-") != 0) { - opt.local_output_filter = - _get_pos_int(optarg, - "slaunch-output-filter"); - } - opt.local_output_filter_set = true; - break; - case LONG_OPT_LERR_FILTER: - if (strcmp(optarg, "-") != 0) { - opt.local_error_filter = - _get_pos_int(optarg, - "slaunch-error-filter"); - } - opt.local_error_filter_set = true; - break; - case LONG_OPT_RIN_FILTER: - opt.remote_input_filter = - _get_pos_int(optarg, "task-input-filter"); - error("task-input-filter not yet implemented"); - break; - case LONG_OPT_ROUT_FILTER: - opt.remote_output_filter = - _get_pos_int(optarg, "task-output-filter"); - error("task-output-filter not yet implemented"); - break; - case LONG_OPT_RERR_FILTER: - opt.remote_error_filter = - _get_pos_int(optarg, "task-error-filter"); - error("task-error-filter not yet implemented"); - break; - default: - if (spank_process_option (opt_char, optarg) < 0) { - exit (1); - } - } - } - - spank_option_table_destroy (optz); -} - -/* - * Use the supplied compiled regular expression "re" to convert a string - * into first and last numbers in the range. - * - * If there is only a single number in the "token" string, both - * "first" and "last" will be set to the same value. - * - * Returns 1 on success, 0 on failure - */ -static int _get_range(regex_t *re, char *token, int *first, int *last, - int num_nodes) -{ - size_t nmatch = 8; - regmatch_t pmatch[8]; - long f, l; - bool first_set = false; - char *ptr; - - *first = *last = 0; - memset(pmatch, 0, sizeof(regmatch_t)*nmatch); - if (regexec(re, token, nmatch, pmatch, 0) == REG_NOMATCH) { - error("\"%s\" is not a valid node index range", token); - return 0; - } - - /* convert the second, possibly only, number */ - ptr = (char *)(xstrndup(token + pmatch[3].rm_so, - pmatch[3].rm_eo - pmatch[3].rm_so)); - l = strtol(ptr, NULL, 10); - xfree(ptr); - if ((l >= 0 && l >= num_nodes) - || (l < 0 && l < -num_nodes)) { - error("\"%ld\" is beyond the range of the" - " %d available nodes", l, num_nodes); - return 0; - } - *last = (int)l; - *first = (int)l; - - /* convert the first number, if it exists */ - if (pmatch[2].rm_so != -1) { - first_set = true; - ptr = (char *)(xstrndup(token + pmatch[2].rm_so, - pmatch[2].rm_eo - pmatch[2].rm_so)); - f = strtol(ptr, NULL, 10); - xfree(ptr); - if ((f >= 0 && f >= num_nodes) - || (f < 0 && f < -num_nodes)) { - error("\"%ld\" is beyond the range of the" - " %d available nodes", f, num_nodes); - return 0; - } - *first = (int)f; - } - - return 1; -} - -/* - * Convert a node index string into a nodelist string. - * - * A node index string is a string of single numbers and/or ranges seperated - * by commas. For instance: 2,6,-3,8,-3-2,16,2--4,7-9,0 - * - * If both numbers in a range are of the same sign (both positive, or both - * negative), then the range counts directly from the first number to the - * second number; it will not wrap around the "end" of the node list. - * - * If the numbers in a range differ in sign, the range wraps around the - * end of the list of nodes. - * - * Examples: Given a node allocation of foo[1-16]: - * - * -2-3 (negative 2 to positive 3) becomes foo[15-16,1-4] - * 3--2 (positive 3 to negative 2) becomes foo[4,3,2,1,16,15] - * -3--2 becomes foo[14-15] - * -2--3 becomes foo[15,14] - * 2-3 becomes foo[3-4] - * 3-2 becomes foo[4,3] - */ -static char *_node_indices_to_nodelist(const char *indices_list, - resource_allocation_response_msg_t *alloc_info) -{ - char *list; - int list_len; - char *start, *end; - hostlist_t node_l, alloc_l; - regex_t range_re; - char *range_re_pattern = - "^[[:space:]]*" - "((-?[[:digit:]]+)[[:space:]]*-)?" /* optional start */ - "[[:space:]]*" - "(-?[[:digit:]]+)[[:space:]]*$"; - char *nodelist = NULL; - int i, idx; - - /* intialize the regular expression */ - if (regcomp(&range_re, range_re_pattern, REG_EXTENDED) != 0) { - error("Node index range regex compilation failed\n"); - return NULL; - } - - /* Now break the string up into tokens between commas, - feed each token into the regular expression, and make - certain that the range numbers are valid. */ - node_l = hostlist_create(NULL); - alloc_l = hostlist_create(alloc_info->node_list); - list = xstrdup(indices_list); - start = (char *)list; - list_len = strlen(list); - while (start != NULL && start < (list + list_len)) { - int first = 0; - int last = 0; - - /* Find the next index range in the list */ - end = strchr(start,','); - if (end == NULL) { - end = list + list_len; - } - *end = '\0'; - - /* Use the regexp to get the range numbers */ - if (!_get_range(&range_re, start, &first, &last, - hostlist_count(alloc_l))) { - goto cleanup; - } - - /* Now find all nodes in this range, and add them to node_l */ - if (first <= last) { - char *node; - for (i = first; i <= last; i++) { - if (i < 0) - idx = i + hostlist_count(alloc_l); - else - idx = i; - node = hostlist_nth(alloc_l, idx); - hostlist_push(node_l, node); - free(node); - } - } else { /* first > last */ - char *node; - for (i = first; i >= last; i--) { - if (i < 0) - idx = i + hostlist_count(alloc_l); - else - idx = i; - node = hostlist_nth(alloc_l, idx); - hostlist_push(node_l, node); - free(node); - } - } - start = end+1; - } - - i = 2048; - nodelist = NULL; - do { - i *= 2; - xrealloc(nodelist, i); - } while (hostlist_ranged_string(node_l, i, nodelist) == -1); - -cleanup: - xfree(list); - hostlist_destroy(alloc_l); - hostlist_destroy(node_l); - regfree(&range_re); - - return nodelist; -} - - -/* Load the multi_prog config file into argv, pass the entire file contents - * in order to avoid having to read the file on every node. We could parse - * the infomration here too for loading the MPIR records for TotalView */ -static void _load_multi(int *argc, char **argv) -{ - int config_fd, data_read = 0, i; - struct stat stat_buf; - char *data_buf; - - if ((config_fd = open(argv[0], O_RDONLY)) == -1) { - error("Could not open multi_prog config file %s", - argv[0]); - exit(1); - } - if (fstat(config_fd, &stat_buf) == -1) { - error("Could not stat multi_prog config file %s", - argv[0]); - exit(1); - } - if (stat_buf.st_size > 60000) { - error("Multi_prog config file %s is too large", - argv[0]); - exit(1); - } - data_buf = xmalloc(stat_buf.st_size); - while ((i = read(config_fd, &data_buf[data_read], stat_buf.st_size - - data_read)) != 0) { - if (i < 0) { - error("Error reading multi_prog config file %s", - argv[0]); - exit(1); - } else - data_read += i; - } - close(config_fd); - for (i=1; i<*argc; i++) - xfree(argv[i]); - argv[1] = data_buf; - *argc = 2; -} - -/* - * _opt_args() : set options via commandline args and popt - */ -static void _opt_args(int argc, char **argv) -{ - int i; - char **rest = NULL; - - set_options(argc, argv); - -#ifdef HAVE_AIX - if (opt.network == NULL) { - opt.network = "us,sn_all,bulk_xfer"; - setenv("SLURM_NETWORK", opt.network, 1); - } -#endif - - opt.argc = 0; - if (optind < argc) { - rest = argv + optind; - while (rest[opt.argc] != NULL) - opt.argc++; - } - opt.argv = (char **) xmalloc((opt.argc + 1) * sizeof(char *)); - for (i = 0; i < opt.argc; i++) - opt.argv[i] = xstrdup(rest[i]); - opt.argv[i] = NULL; /* End of argv's (for possible execv) */ - - if (opt.multi_prog) { - if (opt.argc < 1) { - error("configuration file not specified"); - exit(1); - } - _load_multi(&opt.argc, opt.argv); - - } - else if (opt.argc > 0) { - char *fullpath; - - if ((fullpath = _search_path(opt.argv[0], R_OK|X_OK))) { - xfree(opt.argv[0]); - opt.argv[0] = fullpath; - } - } - - if (!_opt_verify()) - exit(1); -} - -static bool -_allocation_lookup_env(resource_allocation_response_msg_t **alloc_info) -{ - char *ptr, *val; - resource_allocation_response_msg_t *alloc; - long l; - - alloc = (resource_allocation_response_msg_t *)xmalloc( - sizeof(resource_allocation_response_msg_t)); - - /* get SLURM_JOB_ID */ - val = getenv("SLURM_JOB_ID"); - if (val == NULL) - goto fail1; - l = strtol(val, &ptr, 10); - if (ptr == val || !xstring_is_whitespace(ptr) || l < 0) - goto fail1; - alloc->job_id = (uint32_t)l; - - /* get SLURM_JOB_NUM_NODES */ - val = getenv("SLURM_JOB_NUM_NODES"); - if (val == NULL) - goto fail1; - l = strtol(val, &ptr, 10); - if (ptr == val || !xstring_is_whitespace(ptr) || l < 1) - goto fail1; - alloc->node_cnt = (uint16_t)l; - - /* get SLURM_JOB_NODELIST */ - val = getenv("SLURM_JOB_NODELIST"); - if (val == NULL) - goto fail1; - alloc->node_list = xstrdup(val); - - /* get SLURM_JOB_CPUS_PER_NODE */ - val = getenv("SLURM_JOB_CPUS_PER_NODE"); - if (val == NULL) - goto fail2; - if (!_set_cpus_per_node(val, alloc)) - goto fail2; - - *alloc_info = alloc; - return true; - -fail2: - xfree(alloc->node_list); -fail1: - xfree(alloc); - *alloc_info = NULL; - return false; -} - -static bool -_set_allocation_info(resource_allocation_response_msg_t **alloc_info) -{ - bool env_flag; - - /* First, try to set the allocation info from the environment */ - env_flag = _allocation_lookup_env(alloc_info); - - /* If that fails, we need to try to get the allocation info - * from the slurmctld. We also need to talk to the slurmctld if - * opt.job_id is set and does not match the information from the - * environment variables. - */ - if (!env_flag || (env_flag - && opt.jobid_set - && opt.jobid != (*alloc_info)->job_id)) { - verbose("Need to look up allocation info with the controller"); - if (slurm_allocation_lookup_lite(opt.jobid, alloc_info) < 0) { - error("Unable to look up job ID %u: %m", opt.jobid); - return false; - } - } else if (!env_flag && !opt.jobid_set) { - error("A job ID MUST be specified on the command line,"); - error("or through the SLAUNCH_JOBID environment variable."); - return false; - } - - return true; -} - - -/* - * _opt_verify : perform some post option processing verification - * - */ -static bool _opt_verify(void) -{ - bool verified = true; - hostlist_t task_l = NULL; - hostlist_t node_l = NULL; - resource_allocation_response_msg_t *alloc_info; - - if (!_set_allocation_info(&alloc_info)) { - /* error messages printed under _set_allocation_info */ - exit(1); - } - - /* - * Now set default options based on allocation info. - */ - if (!opt.jobid_set) - opt.jobid = alloc_info->job_id; - if (!opt.num_nodes_set) - opt.num_nodes = alloc_info->node_cnt; - - if (opt.task_layout_byid_set && opt.task_layout == NULL) { - opt.task_layout = _node_indices_to_nodelist( - opt.task_layout_byid, alloc_info); - if (opt.task_layout == NULL) - verified = false; - } - if (opt.nodelist_byid != NULL && opt.nodelist == NULL) { - hostlist_t hl; - char *nodenames; - - nodenames = _node_indices_to_nodelist(opt.nodelist_byid, - alloc_info); - if (nodenames == NULL) { - verified = false; - } else { - hl = hostlist_create(nodenames); - hostlist_uniq(hl); - /* assumes that the sorted unique hostlist must be a - shorter string than unsorted (or equal lenght) */ - hostlist_ranged_string(hl, strlen(nodenames)+1, - nodenames); - opt.nodelist = nodenames; - } - } - - /* - * Now, all the rest of the checks and setup. - */ - if (opt.task_layout_byid_set && opt.task_layout_file_set) { - error("-T/--task-layout-byid and -F/--task-layout-file" - " are incompatible."); - verified = false; - } - if (opt.task_layout_byname_set && opt.task_layout_file_set) { - error("-Y/--task-layout-byname and -F/--task-layout-file" - " are incompatible."); - verified = false; - } - if (opt.task_layout_byname_set && opt.task_layout_byid_set) { - error("-Y/--task-layout-byname and -T/--task-layout-byid" - " are incompatible."); - verified = false; - } - - if (opt.nodelist && (opt.task_layout_byid_set - || opt.task_layout_byname_set - || opt.task_layout_file_set)) { - error("-w/--nodelist is incompatible with task layout" - " options."); - verified = false; - } - if (opt.nodelist && opt.task_layout_file_set) { - error("Only one of -w/--nodelist or -F/--task-layout-file" - " may be used."); - verified = false; - } - if (opt.num_nodes_set && (opt.task_layout_byid_set - || opt.task_layout_byname_set - || opt.task_layout_file_set)) { - error("-N/--node is incompatible with task layout options."); - verified = false; - } - - if (opt.task_layout != NULL) { - task_l = hostlist_create(opt.task_layout); - if (opt.num_tasks_set) { - if (opt.num_tasks < hostlist_count(task_l)) { - /* shrink the hostlist */ - int i, shrink; - char buf[8192]; - shrink = hostlist_count(task_l) - opt.num_tasks; - for (i = 0; i < shrink; i++) - free(hostlist_pop(task_l)); - xfree(opt.task_layout); - hostlist_ranged_string(task_l, 8192, buf); - opt.task_layout = xstrdup(buf); - } else if (opt.num_tasks > hostlist_count(task_l)) { - error("Asked for more tasks (%d) than listed" - " in the task layout (%d)", - opt.num_tasks, hostlist_count(task_l)); - verified = false; - } else { - /* they are equal, no problemo! */ - } - } else { - opt.num_tasks = hostlist_count(task_l); - opt.num_tasks_set = true; - } - node_l = hostlist_copy(task_l); - hostlist_uniq(node_l); - opt.num_nodes = hostlist_count(node_l); - opt.num_nodes_set = true; - /* task_layout parameters implicitly trigger - arbitrary task layout mode */ - opt.distribution = SLURM_DIST_ARBITRARY; - - } else if (opt.nodelist != NULL) { - hostlist_t tmp; - tmp = hostlist_create(opt.nodelist); - node_l = hostlist_copy(tmp); - hostlist_uniq(node_l); - if (hostlist_count(node_l) != hostlist_count(tmp)) { - error("Node names may only appear once in the" - " nodelist (-w/--nodelist)"); - verified = false; - } - hostlist_destroy(tmp); - - if (opt.num_nodes_set - && (opt.num_nodes != hostlist_count(node_l))) { - error("You asked for %d nodes (-N/--nodes), but there" - " are %d nodes in the nodelist (-w/--nodelist)", - opt.num_nodes, hostlist_count(node_l)); - verified = false; - } else { - opt.num_nodes = hostlist_count(node_l); - opt.num_nodes_set = true; - } - } - - if (opt.overcommit && opt.cpus_per_task_set) { - error("--overcommit/-C and --cpus-per-task/-c are incompatible"); - verified = false; - } - - if (!opt.num_nodes_set && opt.num_tasks_set - && opt.num_tasks < opt.num_nodes) - opt.num_nodes = opt.num_tasks; - - if (!opt.num_tasks_set) { - if (opt.nodelist != NULL) { - opt.num_tasks = hostlist_count(node_l); - } else { - opt.num_tasks = opt.num_nodes; - } - } - - if (opt.quiet && opt.verbose) { - error ("don't specify both --verbose (-v) and --quiet (-q)"); - verified = false; - } - - if (opt.relative_set) { - if (opt.nodelist != NULL) { - error("-r/--relative not allowed with" - " -w/--nodelist."); - verified = false; - } - - if (opt.task_layout_byid_set) { - error("-r/--relative not allowed with" - " -T/--task-layout-byid"); - verified = false; - } - - if (opt.task_layout_byname_set) { - error("-r/--relative not allowed with" - " -Y/--task-layout-byname"); - verified = false; - } - - if (opt.task_layout_file_set) { - error("-r/--relative not allowed with" - " -F/--task-layout-file"); - verified = false; - } - - /* convert a negative relative number into a positive number - that the slurmctld will accept */ - if (opt.relative < 0 && opt.relative >= -(alloc_info->node_cnt)) - opt.relative += alloc_info->node_cnt; - } - if ((opt.job_name == NULL) && (opt.argc > 0)) - opt.job_name = _base_name(opt.argv[0]); - - if (opt.argc == 0) { - error("must supply remote command"); - verified = false; - } - - - /* check for realistic arguments */ - if (opt.num_tasks <= 0) { - error("%s: invalid number of tasks (-n %d)", - opt.progname, opt.num_tasks); - verified = false; - } - - if (opt.cpus_per_task <= 0) { - error("%s: invalid number of cpus per task (-c %d)\n", - opt.progname, opt.cpus_per_task); - verified = false; - } - - if (opt.num_nodes <= 0) { - error("%s: invalid number of nodes (-N %d)\n", - opt.progname, opt.num_nodes); - verified = false; - } - - core_format_enable (opt.core_type); - - if (opt.labelio && opt.unbuffered) { - error("Do not specify both -l (--label) and " - "-u (--unbuffered)"); - exit(1); - } - - if ((opt.euid != (uid_t) -1) && (opt.euid != opt.uid)) - opt.uid = opt.euid; - - if ((opt.egid != (gid_t) -1) && (opt.egid != opt.gid)) - opt.gid = opt.egid; - - if ((opt.egid != (gid_t) -1) && (opt.egid != opt.gid)) - opt.gid = opt.egid; - - if (opt.propagate && parse_rlimits( opt.propagate, PROPAGATE_RLIMITS)) { - error( "--propagate=%s is not valid.", opt.propagate ); - verified = false; - } - - /* FIXME - figure out the proper way to free alloc_info */ - hostlist_destroy(task_l); - hostlist_destroy(node_l); - return verified; -} - -static void -_freeF(void *data) -{ - xfree(data); -} - -static List -_create_path_list(void) -{ - List l; - char *path = xstrdup(getenv("PATH")); - char *c, *lc; - - if (path == NULL) { - error("No PATH environment variable (or empty PATH)"); - return NULL; - } - - l = list_create(_freeF); - c = lc = path; - - while (*c != '\0') { - if (*c == ':') { - /* nullify and push token onto list */ - *c = '\0'; - if (lc != NULL && strlen(lc) > 0) - list_append(l, xstrdup(lc)); - lc = ++c; - } else - c++; - } - - if (strlen(lc) > 0) - list_append(l, xstrdup(lc)); - - xfree(path); - - return l; -} - -static char * -_search_path(char *cmd, int access_mode) -{ - List l = _create_path_list(); - ListIterator i = NULL; - char *path, *fullpath = NULL; - - if (l == NULL) - return NULL; - - if ((cmd[0] == '.' || cmd[0] == '/') - && (access(cmd, access_mode) == 0 ) ) { - if (cmd[0] == '.') - xstrfmtcat(fullpath, "%s/", opt.cwd); - xstrcat(fullpath, cmd); - goto done; - } - - i = list_iterator_create(l); - while ((path = list_next(i))) { - xstrfmtcat(fullpath, "%s/%s", path, cmd); - - if (access(fullpath, access_mode) == 0) - goto done; - - xfree(fullpath); - fullpath = NULL; - } -done: - list_destroy(l); - return fullpath; -} - - -static char * -print_remote_command() -{ - int i; - char *buf = NULL; - char *space; - - for (i = 0; i < opt.argc; i++) { - if (i == opt.argc-1) { - space = ""; - } else { - space = " "; - } - - xstrfmtcat(buf, "\"%s\"%s", opt.argv[i], space); - } - - return buf; -} - -#define tf_(b) (b == true) ? "true" : "false" - -static void _opt_list() -{ - char *str; - - info("defined options for program \"%s\"", opt.progname); - info("--------------- ---------------------"); - - info("user : \"%s\"", opt.user); - info("uid : %ld", (long) opt.uid); - info("gid : %ld", (long) opt.gid); - info("cwd : %s", opt.cwd); - info("num_tasks : %d %s", opt.num_tasks, - opt.num_tasks_set ? "(set)" : "(default)"); - info("cpus_per_task : %d %s", opt.cpus_per_task, - opt.cpus_per_task_set ? "(set)" : "(default)"); - info("nodes : %d %s", - opt.num_nodes, opt.num_nodes_set ? "(set)" : "(default)"); - info("jobid : %u %s", opt.jobid, - opt.jobid_set ? "(set)" : "(default)"); - info("job name : \"%s\"", opt.job_name); - info("distribution : %s", - format_task_dist_states(opt.distribution)); - info("cpu_bind : %s", - opt.cpu_bind == NULL ? "default" : opt.cpu_bind); - info("mem_bind : %s", - opt.mem_bind == NULL ? "default" : opt.mem_bind); - info("core format : %s", core_format_name (opt.core_type)); - info("verbose : %d", opt.verbose); - info("slurmd_debug : %d", opt.slurmd_debug); - info("label output : %s", tf_(opt.labelio)); - info("unbuffered IO : %s", tf_(opt.unbuffered)); - info("overcommit : %s", tf_(opt.overcommit)); - info("wait : %d", opt.max_wait); - info("required nodes : %s", opt.nodelist); - info("network : %s", opt.network); - info("propagate : %s", - opt.propagate == NULL ? "NONE" : opt.propagate); - info("prolog : %s", opt.prolog); - info("epilog : %s", opt.epilog); - info("task_prolog : %s", opt.task_prolog); - info("task_epilog : %s", opt.task_epilog); - info("comm_hostname : %s", opt.comm_hostname); - info("multi_prog : %s", opt.multi_prog ? "yes" : "no"); - info("plane_size : %u", opt.plane_size); - str = print_remote_command(); - info("remote command : %s", str); - xfree(str); - -} - -static void _usage(void) -{ - printf( -"Usage: slaunch [-N nnodes] [-n ntasks] [-i in] [-o out] [-e err]\n" -" [-c ncpus] [-r n] [-t minutes]\n" -" [-D path] [--overcommit] [--no-kill]\n" -" [--label] [--unbuffered] [-m dist] [-J jobname]\n" -" [--jobid=id] [--batch] [--verbose] [--slurmd_debug=#]\n" -" [--core=type] [-W sec]\n" -" [--mpi=type]\n" -" [--kill-on-bad-exit] [--propagate[=rlimits] ]\n" -" [--cpu_bind=...] [--mem_bind=...]\n" -" [--prolog=fname] [--epilog=fname]\n" -" [--task-prolog=fname] [--task-epilog=fname]\n" -" [--comm-hostname=<hostname|address>] [--multi-prog]\n" -" [-w hosts...] [-L hostids...] executable [args...]\n"); -} - -static void _help(void) -{ - slurm_ctl_conf_t *conf; - - printf ( -"Usage: slaunch [OPTIONS...] executable [args...]\n" -"\n" -"Parallel run options:\n" -" -n, --ntasks=ntasks number of tasks to run\n" -" -N, --nodes=N number of nodes on which to run\n" -" -c, --cpus-per-task=ncpus number of cpus required per task\n" -" -i, --slaunch-input=file slaunch will read stdin from \"file\"\n" -" -o, --slaunch-output=file slaunch will write stdout to \"file\"\n" -" -e, --slaunch-error=file slaunch will write stderr to \"file\"\n" -" --slaunch-input-filter=taskid send stdin to only the specified task\n" -" --slaunch-output-filter=taskid only print stdout from the specified task\n" -" --slaunch-error-filter=taskid only print stderr from the specified task\n" -" -I, --task-input=file connect task stdin to \"file\"\n" -" -O, --task-output=file connect task stdout to \"file\"\n" -" -E, --task-error=file connect task stderr to \"file\"\n" -" -r, --relative=n run job step relative to node n of allocation\n" -" -t, --time=minutes time limit\n" -" -D, --workdir=path the working directory for the launched tasks\n" -" -C, --overcommit overcommit resources\n" -" -k, --no-kill do not kill job on node failure\n" -" -K, --kill-on-bad-exit kill the job if any task terminates with a\n" -" non-zero exit code\n" -" -l, --label prepend task number to lines of stdout/err\n" -" -u, --unbuffered do not line-buffer stdout/err\n" -" -m, --distribution=type distribution method for processes to nodes\n" -" (type = block|cyclic|hostfile)\n" -" -J, --job-name=jobname name of job\n" -" --jobid=id run under already allocated job\n" -" -W, --wait=sec seconds to wait after first task exits\n" -" before killing job\n" -" -v, --verbose verbose mode (multiple -v's increase verbosity)\n" -" -q, --quiet quiet mode (suppress informational messages)\n" -" -d, --slurmd-debug=level slurmd debug level\n" -" --core=type change default corefile format type\n" -" (type=\"list\" to list of valid formats)\n" -" --propagate[=rlimits] propagate all [or specific list of] rlimits\n" -" --mpi=type specifies version of MPI to use\n" -" --prolog=program run \"program\" before launching job step\n" -" --epilog=program run \"program\" after launching job step\n" -" --task-prolog=program run \"program\" before launching task\n" -" --task-epilog=program run \"program\" after launching task\n" -" --comm-hostname=hostname hostname for PMI communications with slaunch\n" -" --multi-prog if set the program name specified is the\n" -" configuration specificaiton for multiple programs\n" -" -w, --nodelist-byname=hosts... request a specific list of hosts\n" -" -L, --nodelist-byid=hosts... request a specific list of hosts\n"); - conf = slurm_conf_lock(); - if (conf->task_plugin != NULL - && strcasecmp(conf->task_plugin, "task/affinity") == 0) { - printf( - " --cpu_bind= Bind tasks to CPUs\n" - " (see \"--cpu_bind=help\" for options)\n" - " --mem_bind= Bind memory to locality domains (ldom)\n" - " (see \"--mem_bind=help\" for options)\n" - ); - } - slurm_conf_unlock(); - spank_print_options (stdout, 6, 30); - printf("\n"); - - printf( -#ifdef HAVE_AIX /* AIX/Federation specific options */ - "AIX related options:\n" - " --network=type communication protocol to be used\n" - "\n" -#endif - - "Help options:\n" - " -h, --help show this help message\n" - " --usage display brief usage message\n" - "\n" - "Other options:\n" - " -V, --version output version information and exit\n" - "\n" - ); - -} diff --git a/src/slaunch/opt.h b/src/slaunch/opt.h deleted file mode 100644 index 1d6c18a07..000000000 --- a/src/slaunch/opt.h +++ /dev/null @@ -1,165 +0,0 @@ -/*****************************************************************************\ - * opt.h - definitions for slaunch option processing - * $Id: opt.h 10574 2006-12-15 23:38:29Z jette $ - ***************************************************************************** - * Copyright (C) 2002-2006 The Regents of the University of California. - * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Mark Grondona <grondona1@llnl.gov>, et. al. - * UCRL-CODE-226842. - * - * This file is part of SLURM, a resource management program. - * For details, see <http://www.llnl.gov/linux/slurm/>. - * - * SLURM is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY - * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License along - * with SLURM; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -\*****************************************************************************/ - -#ifndef _HAVE_SLAUNCH_OPT_H -#define _HAVE_SLAUNCH_OPT_H - -#if HAVE_CONFIG_H -# include "config.h" -#endif - -#include <time.h> -#include <sys/types.h> -#include <unistd.h> -#include <slurm/slurm.h> - -#include "src/common/macros.h" /* true and false */ -#include "src/common/env.h" -#include "src/slaunch/core-format.h" -//#include "src/common/mpi.h" - -#define MAX_USERNAME 9 - - -/* global variables relating to user options */ - -#define format_task_dist_states(t) (t == SLURM_DIST_BLOCK) ? "block" : \ - (t == SLURM_DIST_CYCLIC) ? "cyclic" : \ - (t == SLURM_DIST_PLANE) ? "plane" : \ - (t == SLURM_DIST_CYCLIC_CYCLIC) ? "cyclic:cyclic" : \ - (t == SLURM_DIST_CYCLIC_BLOCK) ? "cyclic:block" : \ - (t == SLURM_DIST_BLOCK_CYCLIC) ? "block:cyclic" : \ - (t == SLURM_DIST_BLOCK_BLOCK) ? "block:block" : \ - (t == SLURM_DIST_ARBITRARY) ? "arbitrary" : \ - "unknown" - -#define format_io_t(t) (t == IO_ONE) ? "one" : (t == IO_ALL) ? \ - "all" : "per task" - -typedef struct slaunch_options { - - char *progname; /* argv[0] of this program or - * configuration file if multi_prog */ - bool multi_prog; /* multiple programs to execute */ - char user[MAX_USERNAME];/* local username */ - uid_t uid; /* local uid */ - gid_t gid; /* local gid */ - uid_t euid; /* effective user --uid=user */ - gid_t egid; /* effective group --gid=group */ - char *cwd; /* current working directory */ - - int num_tasks; /* --ntasks=n, -n n */ - bool num_tasks_set; /* true if ntasks explicitly set */ - uint32_t plane_size; /* lllp distribution -> plane_size for - * when -m plane=<# of lllp per - * plane> */ - int cpus_per_task; /* --cpus-per-task=n, -c n */ - bool cpus_per_task_set; /* true if cpus_per_task explicitly set */ - int num_nodes; /* --nodes=n, -N n */ - bool num_nodes_set; /* true if num_nodes explicitly set */ - cpu_bind_type_t cpu_bind_type; /* --cpu_bind= */ - char *cpu_bind; /* binding map for map/mask_cpu */ - mem_bind_type_t mem_bind_type; /* --mem_bind= */ - char *mem_bind; /* binding map for map/mask_mem */ - enum task_dist_states - distribution; /* --distribution=, -m dist */ - char *job_name; /* --job-name=, -J name */ - unsigned int jobid; /* --jobid=jobid */ - bool jobid_set; /* true of jobid explicitly set */ - char *mpi_type; /* --mpi=type */ - - char *local_ofname; /* --local-output, -o filename */ - char *local_ifname; /* --local-input, -i filename */ - char *local_efname; /* --local-error, -e filename */ - uint32_t local_input_filter; - bool local_input_filter_set; - uint32_t local_output_filter; - bool local_output_filter_set; - uint32_t local_error_filter; - bool local_error_filter_set; - char *remote_ofname; /* --remote-output filename */ - char *remote_ifname; /* --remote-input filename */ - char *remote_efname; /* --remote-error filename */ - uint32_t remote_input_filter; - uint32_t remote_output_filter; - uint32_t remote_error_filter; - - int slurmd_debug; /* --slurmd-debug, -D */ - core_format_t core_type;/* --core= */ - - bool labelio; /* --label-output, -l */ - bool unbuffered; /* --unbuffered, -u */ - bool overcommit; /* --overcommit, -O */ - bool no_kill; /* --no-kill, -k */ - bool kill_bad_exit; /* --kill-on-bad-exit, -K */ - int max_wait; /* --wait, -W */ - int quiet; - int verbose; - bool debugger_test; /* --debugger-test */ - char *propagate; /* --propagate[=RLIMIT_CORE,...]*/ - char *task_epilog; /* --task-epilog= */ - char *task_prolog; /* --task-prolog= */ - char *nodelist; /* -w,--nodelist=node1,node2,...*/ - char *nodelist_byid; - char *task_layout; - char *task_layout_byid; - bool task_layout_byid_set; - bool task_layout_byname_set; - bool task_layout_file_set; - int relative; /* --relative -r N */ - bool relative_set; /* true if --relative set explicitly */ - char *network; /* --network= */ - - char *prolog; /* --prolog */ - char *epilog; /* --epilog */ - char *comm_hostname; /* --comm-hostname */ - int argc; /* length of argv array */ - char **argv; /* left over on command line */ -} opt_t; - -opt_t opt; - -/* return whether any constraints were specified by the user - * (if new constraints are added above, might want to add them to this - * macro or move this to a function if it gets a little complicated) - */ -#define constraints_given() opt.mincpus != -1 || opt.realmem != -1 ||\ - opt.tmpdisk != -1 || opt.contiguous - -/* process options: - * 1. set defaults - * 2. update options with env vars - * 3. update options with commandline args - * 4. perform some verification that options are reasonable - */ -int initialize_and_process_args(int argc, char *argv[]); - -/* set options based upon commandline args */ -void set_options(const int argc, char **argv); - - -#endif /* _HAVE_SLAUNCH_OPT_H */ diff --git a/src/slaunch/sigstr.c b/src/slaunch/sigstr.c deleted file mode 100644 index 7e9dec5b2..000000000 --- a/src/slaunch/sigstr.c +++ /dev/null @@ -1,54 +0,0 @@ -/*****************************************************************************\ - * src/slaunch/sigstr.c - Function to convert signal to description - ***************************************************************************** - * Copyright (C) 2002 The Regents of the University of California. - * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by AUTHOR <AUTHOR@llnl.gov>. - * UCRL-CODE-226842. - * - * This file is part of SLURM, a resource management program. - * For details, see <http://www.llnl.gov/linux/slurm/>. - * - * SLURM is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY - * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License along - * with SLURM; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -\*****************************************************************************/ - -#if HAVE_CONFIG_H -# include "config.h" -#endif - -#include <string.h> -#include <sys/wait.h> - -#include "src/common/xassert.h" - -/* - * Get a definition for strsignal : - */ -#if defined (HAVE_DECL_STRSIGNAL) && !HAVE_DECL_STRSIGNAL -# ifndef strsignal - extern char *strsignal(int); -# endif -#endif /* defined HAVE_DECL_STRSIGNAL && !HAVE_DECL_STRSIGNAL */ - -char * -sigstr(int status) -{ - xassert(WIFSIGNALED(status)); - - return strsignal(WTERMSIG(status)); -} - - - diff --git a/src/slaunch/sigstr.h b/src/slaunch/sigstr.h deleted file mode 100644 index 510c588b2..000000000 --- a/src/slaunch/sigstr.h +++ /dev/null @@ -1,39 +0,0 @@ -/*****************************************************************************\ - * src/slaunch/sigstr.h - - * $Id: sigstr.h 10574 2006-12-15 23:38:29Z jette $ - ***************************************************************************** - * Copyright (C) 2002 The Regents of the University of California. - * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. - * - * This file is part of SLURM, a resource management program. - * For details, see <http://www.llnl.gov/linux/slurm/>. - * - * SLURM is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY - * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License along - * with SLURM; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -\*****************************************************************************/ - -#ifndef _SLAUNCH_SIGSTR_H -#define _SLAUNCH_SIGSTR_H - -/* - * Returns a descriptive string regarding the signal given in the - * exit status 'status.' - * - * WIFSIGNALED(status) must be true in order to call this function! - */ -char *sigstr(int status); - -#endif /* !_SLAUNCH_SIGSTR_H */ diff --git a/src/slaunch/slaunch.c b/src/slaunch/slaunch.c deleted file mode 100644 index 14000581f..000000000 --- a/src/slaunch/slaunch.c +++ /dev/null @@ -1,906 +0,0 @@ -/*****************************************************************************\ - * slaunch.c - user command for launching parallel jobs - * - * $Id: slaunch.c 11250 2007-03-27 17:29:45Z jette $ - ***************************************************************************** - * Copyright (C) 2002-2006 The Regents of the University of California. - * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Christopher J. Morrone <morrone2@llnl.gov>, - * Mark Grondona <grondona@llnl.gov>, et. al. - * UCRL-CODE-226842. - * - * This file is part of SLURM, a resource management program. - * For details, see <http://www.llnl.gov/linux/slurm/>. - * - * SLURM is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY - * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License along - * with SLURM; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -\*****************************************************************************/ - -#ifdef HAVE_CONFIG_H -# include "config.h" -#endif - -#ifdef WITH_PTHREADS -# include <pthread.h> -#endif - -#ifdef HAVE_AIX -# undef HAVE_UNSETENV -# include <sys/checkpnt.h> -#endif -#ifndef HAVE_UNSETENV -# include "src/common/unsetenv.h" -#endif - -#include <sys/resource.h> -#include <sys/stat.h> -#include <sys/time.h> -#include <sys/types.h> -#include <sys/utsname.h> -#include <sys/wait.h> -#include <ctype.h> -#include <fcntl.h> -#include <pwd.h> -#include <stdio.h> -#include <stdlib.h> -#include <string.h> -#include <signal.h> -#include <unistd.h> -#include <fcntl.h> -#include <grp.h> - -#include <slurm/slurm.h> - -#include "src/common/macros.h" -#include "src/common/fd.h" -#include "src/common/log.h" -#include "src/common/slurm_protocol_api.h" -#include "src/common/switch.h" -#include "src/common/xmalloc.h" -#include "src/common/xsignal.h" -#include "src/common/xstring.h" -#include "src/common/net.h" -#include "src/common/mpi.h" -#include "src/common/slurm_rlimits_info.h" -#include "src/common/plugstack.h" -#include "src/common/env.h" - -#include "src/slaunch/opt.h" -#include "src/slaunch/sigstr.h" -#include "src/slaunch/attach.h" -#include "src/slaunch/slaunch.h" -#include "src/slaunch/fname.h" -#include "src/slaunch/multi_prog.h" -#include "src/api/pmi_server.h" - -/* FIXME doesn't belong here, we don't want to expose ctx contents */ -#include "src/api/step_ctx.h" - -extern char **environ; -slurm_step_ctx step_ctx; -int global_rc; -struct { - bitstr_t *start_success; - bitstr_t *start_failure; - bitstr_t *finish_normal; - bitstr_t *finish_abnormal; -} task_state; - -/* - * declaration of static funcs - */ -static void _set_prio_process_env(char ***env); -static int _set_rlimit_env(char ***env); -static int _set_umask_env(char ***env); -static char **_init_task_environment(void); -#if 0 -static int _become_user(uid_t uid, gid_t gid); -#endif -static void _run_slaunch_prolog(char **env); -static void _run_slaunch_epilog(char **env); -static int _run_slaunch_script(char *script, char **env); -static void _setup_local_fds(slurm_step_io_fds_t *cio_fds, slurm_step_ctx ctx); -static void _task_start(launch_tasks_response_msg_t *msg); -static void _task_finish(task_exit_msg_t *msg); -static void _task_state_struct_init(int num_tasks); -static void _task_state_struct_print(void); -static void _task_state_struct_free(void); -static void _mpir_init(int num_tasks); -static void _mpir_cleanup(void); -static void _mpir_set_executable_names(const char *executable_name); -static void _mpir_dump_proctable(void); -static void _ignore_signal(int signo); -static void _exit_on_signal(int signo); -static int _call_spank_local_user(slurm_step_ctx step_ctx, - slurm_step_launch_params_t *step_params); -static void _define_symbols(void); - -int slaunch(int argc, char **argv) -{ - log_options_t logopt = LOG_OPTS_STDERR_ONLY; - slurm_step_ctx_params_t ctx_params[1]; - slurm_step_launch_params_t launch_params[1]; - slurm_step_launch_callbacks_t callbacks[1]; - char **env; - uint32_t job_id, step_id; - int i; - - log_init(xbasename(argv[0]), logopt, 0, NULL); - - xsignal(SIGQUIT, _ignore_signal); - xsignal(SIGPIPE, _ignore_signal); - xsignal(SIGUSR1, _ignore_signal); - xsignal(SIGUSR2, _ignore_signal); - - /* Initialize plugin stack, read options from plugins, etc. */ - if (spank_init(NULL) < 0) { - fatal("Plug-in initialization failed"); - _define_symbols(); - } - - /* Be sure to call spank_fini when slaunch exits. */ - if (atexit((void (*) (void)) spank_fini) < 0) - error("Failed to register atexit handler for plugins: %m"); - - /* set default options, process commandline arguments, and - * verify some basic values - */ - if (initialize_and_process_args(argc, argv) < 0) { - error ("slaunch initialization failed"); - exit (1); - } - - /* reinit log with new verbosity (if changed by command line) */ - if (opt.verbose || opt.quiet) { - logopt.stderr_level += opt.verbose; - logopt.stderr_level -= opt.quiet; - logopt.prefix_level = 1; - log_alter(logopt, 0, NULL); - } - debug("slaunch pid %d", getpid()); - - /* - * Create a job step context. - */ - slurm_step_ctx_params_t_init(ctx_params); - ctx_params->job_id = opt.jobid; - totalview_jobid = NULL; - xstrfmtcat(totalview_jobid, "%u", ctx_params->job_id); - ctx_params->node_count = opt.num_nodes; - ctx_params->task_count = opt.num_tasks; - if (opt.cpus_per_task_set) { - ctx_params->cpu_count = opt.num_tasks * opt.cpus_per_task; - } else if (opt.overcommit) { - ctx_params->cpu_count = 0; - } else { - ctx_params->cpu_count = opt.num_tasks; - } - ctx_params->relative = opt.relative; - switch (opt.distribution) { - case SLURM_DIST_BLOCK: - case SLURM_DIST_ARBITRARY: - case SLURM_DIST_CYCLIC: - case SLURM_DIST_CYCLIC_CYCLIC: - case SLURM_DIST_CYCLIC_BLOCK: - case SLURM_DIST_BLOCK_CYCLIC: - case SLURM_DIST_BLOCK_BLOCK: - ctx_params->task_dist = opt.distribution; - break; - case SLURM_DIST_PLANE: - ctx_params->task_dist = SLURM_DIST_PLANE; - ctx_params->plane_size = opt.plane_size; - break; - default: - ctx_params->task_dist = (ctx_params->task_count <= - ctx_params->node_count) - ? SLURM_DIST_CYCLIC : SLURM_DIST_BLOCK; - break; - - } - ctx_params->overcommit = opt.overcommit; - - /* SLURM overloads the node_list parameter in the - * job_step_create_request_msg_t. It can either be a node list, - * or when distribution type is SLURM_DIST_ARBITRARY, it is a list - * of repeated nodenames which represent to which node each task - * is assigned. - */ - if (opt.task_layout_byid_set - || opt.task_layout_byname_set - || opt.task_layout_file_set) { - ctx_params->node_list = opt.task_layout; - } else if (opt.nodelist != NULL) { - ctx_params->node_list = opt.nodelist; - } else { - ctx_params->node_list = NULL; /* let the controller pick nodes */ - } - - ctx_params->network = opt.network; - ctx_params->name = opt.job_name; - - for (i=0; ;i++) { - step_ctx = slurm_step_ctx_create(ctx_params); - if (step_ctx != NULL) - break; - if (slurm_get_errno() != ESLURM_DISABLED) { - error("Failed creating job step context: %m"); - exit(1); - } - if (i == 0) - info("Job step creation temporarily disabled, retrying"); - sleep(MIN((i*10), 60)); - } - - /* Now we can register a few more signal handlers. It - * is only safe to have _exit_on_signal call - * slurm_step_launch_abort after the the step context - * has been created. - */ - xsignal(SIGHUP, _exit_on_signal); - xsignal(SIGINT, _exit_on_signal); - xsignal(SIGTERM, _exit_on_signal); - - /* set up environment variables */ - env = _init_task_environment(); - - /* - * Use the job step context to launch the tasks. - */ - _task_state_struct_init(opt.num_tasks); - slurm_step_launch_params_t_init(launch_params); - launch_params->gid = opt.gid; - launch_params->argc = opt.argc; - launch_params->argv = opt.argv; - launch_params->multi_prog = opt.multi_prog ? true : false; - launch_params->envc = envcount(env); - launch_params->env = env; - launch_params->cwd = opt.cwd; - launch_params->slurmd_debug = opt.slurmd_debug; - launch_params->buffered_stdio = opt.unbuffered ? false : true; - launch_params->labelio = opt.labelio ? true : false; - launch_params->remote_output_filename = opt.remote_ofname; - launch_params->remote_input_filename = opt.remote_ifname; - launch_params->remote_error_filename = opt.remote_efname; - launch_params->task_prolog = opt.task_prolog; - launch_params->task_epilog = opt.task_epilog; - launch_params->cpu_bind = opt.cpu_bind; - launch_params->cpu_bind_type = opt.cpu_bind_type; - launch_params->mem_bind = opt.mem_bind; - launch_params->mem_bind_type = opt.mem_bind_type; - - _setup_local_fds(&launch_params->local_fds, step_ctx); - if (MPIR_being_debugged) { - launch_params->parallel_debug = true; - pmi_server_max_threads(1); - } else { - launch_params->parallel_debug = false; - } - callbacks->task_start = _task_start; - callbacks->task_finish = _task_finish; - - _run_slaunch_prolog(env); - - _mpir_init(ctx_params->task_count); - - slurm_step_ctx_get(step_ctx, SLURM_STEP_CTX_JOBID, &job_id); - slurm_step_ctx_get(step_ctx, SLURM_STEP_CTX_STEPID, &step_id); - verbose("Launching job step %u.%u", job_id, step_id); - - _call_spank_local_user(step_ctx, launch_params); - - if (slurm_step_launch(step_ctx, launch_params, callbacks) - != SLURM_SUCCESS) { - error("Application launch failed: %m"); - goto cleanup; - } - - if (slurm_step_launch_wait_start(step_ctx) == SLURM_SUCCESS) { - /* Only set up MPIR structures if the step launched - correctly. */ - if (opt.multi_prog) - mpir_set_multi_name(ctx_params->task_count, - launch_params->argv[0]); - else - _mpir_set_executable_names(launch_params->argv[0]); - MPIR_debug_state = MPIR_DEBUG_SPAWNED; - MPIR_Breakpoint(); - if (opt.debugger_test) - _mpir_dump_proctable(); - } else { - info("Job step aborted before step completely launched."); - } - - slurm_step_launch_wait_finish(step_ctx); - -cleanup: - _run_slaunch_epilog(env); - slurm_step_ctx_destroy(step_ctx); - _mpir_cleanup(); - _task_state_struct_free(); - - return global_rc; -} - -/* Set SLURM_UMASK environment variable with current state */ -static int _set_umask_env(char ***env) -{ - char mask_char[5]; - mode_t mask = (int)umask(0); - umask(mask); - - sprintf(mask_char, "0%d%d%d", - ((mask>>6)&07), ((mask>>3)&07), mask&07); - if (!env_array_overwrite_fmt(env, "SLURM_UMASK", "%s", mask_char)) { - error ("unable to set SLURM_UMASK in environment"); - return SLURM_FAILURE; - } - debug ("propagating UMASK=%s", mask_char); - return SLURM_SUCCESS; -} - -/* - * _set_prio_process_env - * - * Set the internal SLURM_PRIO_PROCESS environment variable to support - * the propagation of the users nice value and the "PropagatePrioProcess" - * config keyword. - */ -static void _set_prio_process_env(char ***env) -{ - int retval; - - errno = 0; /* needed to detect a real failure since prio can be -1 */ - - if ((retval = getpriority (PRIO_PROCESS, 0)) == -1) { - if (errno) { - error ("getpriority(PRIO_PROCESS): %m"); - return; - } - } - - if (!env_array_overwrite_fmt(env, "SLURM_PRIO_PROCESS", - "%d", retval)) { - error ("unable to set SLURM_PRIO_PROCESS in environment"); - return; - } - - debug ("propagating SLURM_PRIO_PROCESS=%d", retval); -} - -/* Set SLURM_RLIMIT_* environment variables with current resource - * limit values, reset RLIMIT_NOFILE to maximum possible value */ -static int _set_rlimit_env(char ***env) -{ - int rc = SLURM_SUCCESS; - struct rlimit rlim[1]; - unsigned long cur; - char name[64], *format; - slurm_rlimits_info_t *rli; - - for (rli = get_slurm_rlimits_info(); rli->name != NULL; rli++ ) { - - if (getrlimit (rli->resource, rlim) < 0) { - error ("getrlimit (RLIMIT_%s): %m", rli->name); - rc = SLURM_FAILURE; - continue; - } - - cur = (unsigned long) rlim->rlim_cur; - snprintf(name, sizeof(name), "SLURM_RLIMIT_%s", rli->name); - if (opt.propagate && rli->propagate_flag == PROPAGATE_RLIMITS) - /* - * Prepend 'U' to indicate user requested propagate - */ - format = "U%lu"; - else - format = "%lu"; - - if (!env_array_overwrite_fmt(env, name, format, cur)) { - error ("unable to set %s in environment", name); - rc = SLURM_FAILURE; - continue; - } - - debug ("propagating RLIMIT_%s=%lu", rli->name, cur); - } - - /* - * Now increase NOFILE to the max available for this slaunch - */ - if (getrlimit (RLIMIT_NOFILE, rlim) < 0) - return (error ("getrlimit (RLIMIT_NOFILE): %m")); - - if (rlim->rlim_cur < rlim->rlim_max) { - rlim->rlim_cur = rlim->rlim_max; - if (setrlimit (RLIMIT_NOFILE, rlim) < 0) - return (error ("Unable to increase max no. files: %m")); - } - - return rc; -} - -static char **_init_task_environment(void) -{ - char **env; - - env = env_array_copy((const char **)environ); - - (void)_set_rlimit_env(&env); - _set_prio_process_env(&env); - (void)_set_umask_env(&env); - - env_array_overwrite_fmt(&env, "SLURM_CPUS_PER_TASK", - "%d", opt.cpus_per_task); - - return env; -} - -#if 0 -static int _become_user (uid_t uid, gid_t gid) -{ - struct passwd *pwd = getpwuid (opt.uid); - - if (uid == getuid()) - return (0); - - if ((gid != (gid_t)-1) && (setgid(gid) < 0)) - return (error("setgid: %m")); - - initgroups(pwd->pw_name, pwd->pw_gid); /* Ignore errors */ - - if (setuid(uid) < 0) - return (error("setuid: %m")); - - return (0); -} -#endif - -static void _run_slaunch_prolog (char **env) -{ - int rc; - - if (opt.prolog && strcasecmp(opt.prolog, "none") != 0) { - rc = _run_slaunch_script(opt.prolog, env); - debug("slaunch prolog rc = %d", rc); - } -} - -static void _run_slaunch_epilog (char **env) -{ - int rc; - - if (opt.epilog && strcasecmp(opt.epilog, "none") != 0) { - rc = _run_slaunch_script(opt.epilog, env); - debug("slaunch epilog rc = %d", rc); - } -} - -static int _run_slaunch_script (char *script, char **env) -{ - int status; - pid_t cpid; - int i; - char **args = NULL; - - if (script == NULL || script[0] == '\0') - return 0; - - if (access(script, R_OK | X_OK) < 0) { - info("Access denied for %s: %m", script); - return 0; - } - - if ((cpid = fork()) < 0) { - error ("run_slaunch_script: fork: %m"); - return -1; - } - if (cpid == 0) { - /* set the script's command line arguments to the arguments - * for the application, but shifted one higher - */ - args = xmalloc(sizeof(char *) * (opt.argc+2)); - args[0] = script; - for (i = 0; i < opt.argc; i++) { - args[i+1] = opt.argv[i]; - } - args[i+1] = NULL; - execve(script, args, env); - error("help! %m"); - exit(127); - } - - do { - if (waitpid(cpid, &status, 0) < 0) { - if (errno == EINTR) - continue; - error("waidpid: %m"); - return 0; - } else - return status; - } while(1); - - /* NOTREACHED */ -} - -static int -_taskid_to_nodeid(slurm_step_layout_t *layout, int taskid) -{ - int i, nodeid; - - for (nodeid = 0; nodeid < layout->node_cnt; nodeid++) { - for (i = 0; i < layout->tasks[nodeid]; i++) { - if (layout->tids[nodeid][i] == taskid) { - debug3("task %d is on node %d", - taskid, nodeid); - return nodeid; - } - } - } - - return -1; /* node ID not found */ -} - -static void -_setup_local_fds(slurm_step_io_fds_t *cio_fds, slurm_step_ctx ctx) -{ - bool err_shares_out = false; - fname_t *ifname, *ofname, *efname; - uint32_t job_id, step_id; - - slurm_step_ctx_get(ctx, SLURM_STEP_CTX_JOBID, &job_id); - slurm_step_ctx_get(ctx, SLURM_STEP_CTX_STEPID, &step_id); - - ifname = fname_create(opt.local_ifname, (int)job_id, (int)step_id); - ofname = fname_create(opt.local_ofname, (int)job_id, (int)step_id); - efname = fname_create(opt.local_efname, (int)job_id, (int)step_id); - - /* - * create stdin file descriptor - */ - if (ifname->name == NULL) { - cio_fds->in.fd = STDIN_FILENO; - } else { - cio_fds->in.fd = open(ifname->name, O_RDONLY); - if (cio_fds->in.fd == -1) - fatal("Could not open stdin file: %m"); - } - /* - * create stdout file descriptor - */ - if (ofname->name == NULL) { - cio_fds->out.fd = STDOUT_FILENO; - } else { - cio_fds->out.fd = open(ofname->name, - O_CREAT|O_WRONLY|O_TRUNC, 0644); - if (cio_fds->out.fd == -1) - fatal("Could not open stdout file: %m"); - } - /* FIXME - need to change condition for shared output and error */ - if (ofname->name != NULL - && efname->name != NULL - && !strcmp(ofname->name, efname->name)) { - err_shares_out = true; - } - - /* - * create seperate stderr file descriptor only if stderr is not sharing - * the stdout file descriptor - */ - if (err_shares_out) { - debug3("stdout and stderr sharing a file"); - cio_fds->err.fd = cio_fds->out.fd; - cio_fds->err.taskid = cio_fds->out.taskid; - } else { - if (efname->name == NULL) { - cio_fds->err.fd = STDERR_FILENO; - } else { - cio_fds->err.fd = open(efname->name, - O_CREAT|O_WRONLY|O_TRUNC, 0644); - if (cio_fds->err.fd == -1) - fatal("Could not open stderr file: %m"); - } - } - - - /* - * set up local standard IO filters - */ - if (opt.local_input_filter_set) { - cio_fds->in.taskid = opt.local_input_filter; - } - /* FIXME - don't peek into the step context, that's cheating! */ - if (opt.local_input_filter != (uint32_t)-1) { - cio_fds->in.nodeid = - _taskid_to_nodeid(step_ctx->step_resp->step_layout, - opt.local_input_filter); - } - if (opt.local_output_filter_set) { - cio_fds->out.taskid = opt.local_output_filter; - } - if (opt.local_error_filter_set) { - cio_fds->err.taskid = opt.local_error_filter; - } else if (opt.local_output_filter_set) { - cio_fds->err.taskid = opt.local_output_filter; - } -} - -static void -_task_start(launch_tasks_response_msg_t *msg) -{ - MPIR_PROCDESC *table; - int taskid; - int i; - - verbose("Node %s (%d), %d tasks started", - msg->node_name, msg->srun_node_id, msg->count_of_pids); - - for (i = 0; i < msg->count_of_pids; i++) { - taskid = msg->task_ids[i]; - table = &MPIR_proctable[taskid]; - table->host_name = xstrdup(msg->node_name); - /* table->executable_name is set elsewhere */ - table->pid = msg->local_pids[i]; - - if (msg->return_code == 0) { - bit_set(task_state.start_success, taskid); - } else { - bit_set(task_state.start_failure, taskid); - } - } - -} - -static void -_terminate_job_step(slurm_step_ctx ctx) -{ - uint32_t job_id, step_id; - - slurm_step_ctx_get(step_ctx, SLURM_STEP_CTX_JOBID, &job_id); - slurm_step_ctx_get(step_ctx, SLURM_STEP_CTX_STEPID, &step_id); - info("Terminating job step %u.%u", job_id, step_id); - slurm_kill_job_step(job_id, step_id, SIGKILL); -} - -static void -_handle_max_wait(int signo) -{ - info("First task exited %ds ago", opt.max_wait); - _task_state_struct_print(); - _terminate_job_step(step_ctx); -} - -static void -_task_finish(task_exit_msg_t *msg) -{ - static bool first_done = true; - static bool first_error = true; - int rc = 0; - int i; - - verbose("%d tasks finished (rc=%u)", - msg->num_tasks, msg->return_code); - if (WIFEXITED(msg->return_code)) { - rc = WEXITSTATUS(msg->return_code); - if (rc != 0) { - for (i = 0; i < msg->num_tasks; i++) { - error("task %u exited with exit code %d", - msg->task_id_list[i], rc); - bit_set(task_state.finish_abnormal, - msg->task_id_list[i]); - } - } else { - for (i = 0; i < msg->num_tasks; i++) { - bit_set(task_state.finish_normal, - msg->task_id_list[i]); - } - } - } else if (WIFSIGNALED(msg->return_code)) { - for (i = 0; i < msg->num_tasks; i++) { - verbose("task %u killed by signal %d", - msg->task_id_list[i], - WTERMSIG(msg->return_code)); - bit_set(task_state.finish_abnormal, - msg->task_id_list[i]); - } - rc = 1; - } - global_rc = MAX(global_rc, rc); - - if (first_error && rc > 0 && opt.kill_bad_exit) { - first_error = false; - _terminate_job_step(step_ctx); - } else if (first_done && opt.max_wait > 0) { - /* If these are the first tasks to finish we need to - * start a timer to kill off the job step if the other - * tasks don't finish within opt.max_wait seconds. - */ - first_done = false; - debug2("First task has exited"); - xsignal(SIGALRM, _handle_max_wait); - verbose("starting alarm of %d seconds", opt.max_wait); - alarm(opt.max_wait); - } -} - -static void -_task_state_struct_init(int num_tasks) -{ - task_state.start_success = bit_alloc(num_tasks); - task_state.start_failure = bit_alloc(num_tasks); - task_state.finish_normal = bit_alloc(num_tasks); - task_state.finish_abnormal = bit_alloc(num_tasks); -} - -/* - * Tasks will most likely have bits set in multiple of the task_state - * bit strings (e.g. a task can start normally and then later exit normally) - * so we ensure that a task is only "seen" once. - */ -static void -_task_state_struct_print(void) -{ - bitstr_t *tmp, *seen, *not_seen; - char buf[BUFSIZ]; - int len; - - len = bit_size(task_state.finish_abnormal); /* all the same length */ - tmp = bit_alloc(len); - seen = bit_alloc(len); - not_seen = bit_alloc(len); - bit_not(not_seen); - - if (bit_set_count(task_state.finish_abnormal) > 0) { - bit_copybits(tmp, task_state.finish_abnormal); - bit_and(tmp, not_seen); - bit_fmt(buf, BUFSIZ, tmp); - info("task%s: exited abnormally", buf); - bit_or(seen, tmp); - bit_copybits(not_seen, seen); - bit_not(not_seen); - } - - if (bit_set_count(task_state.finish_normal) > 0) { - bit_copybits(tmp, task_state.finish_normal); - bit_and(tmp, not_seen); - bit_fmt(buf, BUFSIZ, tmp); - info("task%s: exited", buf); - bit_or(seen, tmp); - bit_copybits(not_seen, seen); - bit_not(not_seen); - } - - if (bit_set_count(task_state.start_failure) > 0) { - bit_copybits(tmp, task_state.start_failure); - bit_and(tmp, not_seen); - bit_fmt(buf, BUFSIZ, tmp); - info("task%s: failed to start", buf); - bit_or(seen, tmp); - bit_copybits(not_seen, seen); - bit_not(not_seen); - } - - if (bit_set_count(task_state.start_success) > 0) { - bit_copybits(tmp, task_state.start_success); - bit_and(tmp, not_seen); - bit_fmt(buf, BUFSIZ, tmp); - info("task%s: running", buf); - bit_or(seen, tmp); - bit_copybits(not_seen, seen); - bit_not(not_seen); - } -} - -static void -_task_state_struct_free(void) -{ - bit_free(task_state.start_success); - bit_free(task_state.start_failure); - bit_free(task_state.finish_normal); - bit_free(task_state.finish_abnormal); -} - - -/* FIXME - maybe we can push this under the step_launch function? */ -static int _call_spank_local_user (slurm_step_ctx step_ctx, - slurm_step_launch_params_t *step_params) -{ - struct spank_launcher_job_info info[1]; - job_step_create_response_msg_t *step_resp; - - info->uid = getuid(); - info->gid = step_params->gid; - slurm_step_ctx_get(step_ctx, SLURM_STEP_CTX_JOBID, &info->jobid); - slurm_step_ctx_get(step_ctx, SLURM_STEP_CTX_STEPID, &info->stepid); - slurm_step_ctx_get(step_ctx, SLURM_STEP_CTX_RESP, &step_resp); - info->step_layout = step_resp->step_layout; - info->argc = step_params->argc; - info->argv = step_params->argv; - - return spank_local_user(info); -} - - -/********************************************************************** - * Functions for manipulating the MPIR_* global variables which - * are accessed by parallel debuggers which trace slaunch. - **********************************************************************/ -static void -_mpir_init(int num_tasks) -{ - MPIR_proctable_size = num_tasks; - MPIR_proctable = xmalloc(sizeof(MPIR_PROCDESC) * num_tasks); - if (MPIR_proctable == NULL) - fatal("Unable to initialize MPIR_proctable: %m"); -} - -static void -_mpir_cleanup() -{ - int i; - - for (i = 0; i < MPIR_proctable_size; i++) { - xfree(MPIR_proctable[i].host_name); - xfree(MPIR_proctable[i].executable_name); - } - xfree(MPIR_proctable); -} - -static void -_mpir_set_executable_names(const char *executable_name) -{ - int i; - - for (i = 0; i < MPIR_proctable_size; i++) { - MPIR_proctable[i].executable_name = xstrdup(executable_name); - if (MPIR_proctable[i].executable_name == NULL) - fatal("Unable to set MPI_proctable executable_name:" - " %m"); - } -} - -static void -_mpir_dump_proctable() -{ - MPIR_PROCDESC *tv; - int i; - - for (i = 0; i < MPIR_proctable_size; i++) { - tv = &MPIR_proctable[i]; - if (!tv) - break; - info("task:%d, host:%s, pid:%d, executable:%s", - i, tv->host_name, tv->pid, tv->executable_name); - } -} - -static void _ignore_signal(int signo) -{ - /* do nothing */ -} - -static void _exit_on_signal(int signo) -{ - slurm_step_launch_abort(step_ctx); -} - -/* Plugins must be able to resolve symbols. - * Since slaunch statically links with src/api/libslurmhelper rather than - * dynamicaly linking with libslurm, we need to reference all needed - * symbols within slaunch. None of the functions below are actually - * used, but we need to load the symbols. */ -static void _define_symbols(void) -{ - slurm_signal_job_step(0,0,0); /* needed by mvapich and mpichgm */ -} - diff --git a/src/slaunch/slaunch.h b/src/slaunch/slaunch.h deleted file mode 100644 index f7fd2ffb7..000000000 --- a/src/slaunch/slaunch.h +++ /dev/null @@ -1,34 +0,0 @@ -/*****************************************************************************\ - * src/slaunch/slaunch.h - header for external functions in slaunch.c - ***************************************************************************** - * Copyright (C) 2006 The Regents of the University of California. - * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. - * - * This file is part of SLURM, a resource management program. - * For details, see <http://www.llnl.gov/linux/slurm/>. - * - * SLURM is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY - * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License along - * with SLURM; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -\*****************************************************************************/ - -#ifndef _HAVE_SLAUNCH_H -#define _HAVE_SLAUNCH_H - -#ifdef HAVE_CONFIG_H -# include "config.h" -#endif - -#endif /* !_HAVE_SLAUNCH_H */ diff --git a/src/slaunch/slaunch.wrapper.c b/src/slaunch/slaunch.wrapper.c deleted file mode 100644 index 0c95e444a..000000000 --- a/src/slaunch/slaunch.wrapper.c +++ /dev/null @@ -1,17 +0,0 @@ -/* - * slaunch.wrapper.c - slaunch command wrapper for use with debuggers - * slaunch is the SLURM parallel application launcher - * - * For TotalView, a parallel job debugger from Etnus <http://www.etnus.com> - * Type "<ctrl-a>" to specify arguments for slaunch - * Type "g" to start the program - * - * Information for other debuggers may be submitted to slurm-dev@lists.llnl.gov - */ - -extern int slaunch(int argc, char **argv); - -int main(int argc, char **argv) -{ - return slaunch(argc, argv); -} diff --git a/src/slurmctld/Makefile.am b/src/slurmctld/Makefile.am index ce5b031cc..34db68f90 100644 --- a/src/slurmctld/Makefile.am +++ b/src/slurmctld/Makefile.am @@ -4,13 +4,13 @@ AUTOMAKE_OPTIONS = foreign CLEANFILES = core.* -INCLUDES = -I$(top_srcdir) $(SSL_CPPFLAGS) +INCLUDES = -I$(top_srcdir) sbin_PROGRAMS = slurmctld slurmctld_LDADD = \ - $(top_builddir)/src/common/libcommon.la \ - $(top_builddir)/src/common/libdaemonize.la + $(top_builddir)/src/common/libdaemonize.la \ + $(top_builddir)/src/common/libcommon.o -ldl slurmctld_SOURCES = \ @@ -20,6 +20,9 @@ slurmctld_SOURCES = \ controller.c \ job_mgr.c \ job_scheduler.c \ + job_scheduler.h \ + licenses.c \ + licenses.h \ locks.c \ locks.h \ node_mgr.c \ @@ -44,8 +47,8 @@ slurmctld_SOURCES = \ trigger_mgr.c \ trigger_mgr.h -slurmctld_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) \ - $(FEDERATION_LDFLAGS) +slurmctld_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) + force: $(slurmctld_LDADD) : force diff --git a/src/slurmctld/Makefile.in b/src/slurmctld/Makefile.in index c438bcabe..c76c5dba4 100644 --- a/src/slurmctld/Makefile.in +++ b/src/slurmctld/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -46,6 +46,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -71,18 +73,19 @@ sbinPROGRAMS_INSTALL = $(INSTALL_PROGRAM) PROGRAMS = $(sbin_PROGRAMS) am_slurmctld_OBJECTS = agent.$(OBJEXT) backup.$(OBJEXT) \ controller.$(OBJEXT) job_mgr.$(OBJEXT) job_scheduler.$(OBJEXT) \ - locks.$(OBJEXT) node_mgr.$(OBJEXT) node_scheduler.$(OBJEXT) \ - partition_mgr.$(OBJEXT) ping_nodes.$(OBJEXT) \ - power_save.$(OBJEXT) proc_req.$(OBJEXT) read_config.$(OBJEXT) \ - sched_plugin.$(OBJEXT) srun_comm.$(OBJEXT) \ - state_save.$(OBJEXT) step_mgr.$(OBJEXT) trigger_mgr.$(OBJEXT) + licenses.$(OBJEXT) locks.$(OBJEXT) node_mgr.$(OBJEXT) \ + node_scheduler.$(OBJEXT) partition_mgr.$(OBJEXT) \ + ping_nodes.$(OBJEXT) power_save.$(OBJEXT) proc_req.$(OBJEXT) \ + read_config.$(OBJEXT) sched_plugin.$(OBJEXT) \ + srun_comm.$(OBJEXT) state_save.$(OBJEXT) step_mgr.$(OBJEXT) \ + trigger_mgr.$(OBJEXT) slurmctld_OBJECTS = $(am_slurmctld_OBJECTS) -slurmctld_DEPENDENCIES = $(top_builddir)/src/common/libcommon.la \ - $(top_builddir)/src/common/libdaemonize.la +slurmctld_DEPENDENCIES = $(top_builddir)/src/common/libdaemonize.la \ + $(top_builddir)/src/common/libcommon.o slurmctld_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(slurmctld_LDFLAGS) $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -122,6 +125,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -135,10 +139,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -158,7 +165,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -169,6 +179,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -184,6 +196,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -199,6 +212,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -257,10 +271,10 @@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ AUTOMAKE_OPTIONS = foreign CLEANFILES = core.* -INCLUDES = -I$(top_srcdir) $(SSL_CPPFLAGS) +INCLUDES = -I$(top_srcdir) slurmctld_LDADD = \ - $(top_builddir)/src/common/libcommon.la \ - $(top_builddir)/src/common/libdaemonize.la + $(top_builddir)/src/common/libdaemonize.la \ + $(top_builddir)/src/common/libcommon.o -ldl slurmctld_SOURCES = \ agent.c \ @@ -269,6 +283,9 @@ slurmctld_SOURCES = \ controller.c \ job_mgr.c \ job_scheduler.c \ + job_scheduler.h \ + licenses.c \ + licenses.h \ locks.c \ locks.h \ node_mgr.c \ @@ -293,9 +310,7 @@ slurmctld_SOURCES = \ trigger_mgr.c \ trigger_mgr.h -slurmctld_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) \ - $(FEDERATION_LDFLAGS) - +slurmctld_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) all: all-am .SUFFIXES: @@ -338,8 +353,8 @@ install-sbinPROGRAMS: $(sbin_PROGRAMS) || test -f $$p1 \ ; then \ f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \ - echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) --mode=install $(sbinPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(sbindir)/$$f'"; \ - $(INSTALL_PROGRAM_ENV) $(LIBTOOL) --mode=install $(sbinPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(sbindir)/$$f" || exit 1; \ + echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(sbinPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(sbindir)/$$f'"; \ + $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(sbinPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(sbindir)/$$f" || exit 1; \ else :; fi; \ done @@ -372,6 +387,7 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/controller.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/job_mgr.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/job_scheduler.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/licenses.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/locks.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/node_mgr.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/node_scheduler.Po@am__quote@ @@ -418,8 +434,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -431,8 +447,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -442,13 +458,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/slurmctld/agent.c b/src/slurmctld/agent.c index fff8cc7f2..89f073d9b 100644 --- a/src/slurmctld/agent.c +++ b/src/slurmctld/agent.c @@ -1,14 +1,13 @@ /*****************************************************************************\ * agent.c - parallel background communication functions. This is where * logic could be placed for broadcast communications. - * - * $Id: agent.c 12462 2007-10-08 17:42:47Z jette $ ***************************************************************************** * Copyright (C) 2002-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov>, et. al. * Derived from pdsh written by Jim Garlick <garlick1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -89,6 +88,7 @@ #include "src/common/uid.h" #include "src/common/forward.h" #include "src/slurmctld/agent.h" +#include "src/slurmctld/job_scheduler.h" #include "src/slurmctld/locks.h" #include "src/slurmctld/ping_nodes.h" #include "src/slurmctld/slurmctld.h" @@ -214,8 +214,10 @@ void *agent(void *args) task_info_t *task_specific_ptr; time_t begin_time; - /* info("I am here and agent_cnt is %d of %d with type %d", */ -/* agent_cnt, MAX_AGENT_CNT, agent_arg_ptr->msg_type); */ +#if 0 + info("Agent_cnt is %d of %d with msg_type %d", + agent_cnt, MAX_AGENT_CNT, agent_arg_ptr->msg_type); +#endif slurm_mutex_lock(&agent_cnt_mutex); while (slurmctld_config.shutdown_time == 0) { if (agent_cnt < MAX_AGENT_CNT) { @@ -321,13 +323,19 @@ void *agent(void *args) xfree(agent_info_ptr); } slurm_mutex_lock(&agent_cnt_mutex); + if (agent_cnt > 0) agent_cnt--; - else + else { error("agent_cnt underflow"); - if (agent_cnt < MAX_AGENT_CNT) - agent_retry(RPC_RETRY_INTERVAL); + agent_cnt = 0; + } + + if (agent_cnt && agent_cnt < MAX_AGENT_CNT) + agent_retry(RPC_RETRY_INTERVAL, true); + slurm_mutex_unlock(&agent_cnt_mutex); + pthread_cond_broadcast(&agent_cnt_cond); return NULL; @@ -359,7 +367,6 @@ static agent_info_t *_make_agent_info(agent_arg_t *agent_arg_ptr) thd_t *thread_ptr = NULL; int *span = NULL; int thr_count = 0; - //forward_t forward; hostlist_t hl = NULL; char buf[8192]; char *name = NULL; @@ -384,9 +391,15 @@ static agent_info_t *_make_agent_info(agent_arg_t *agent_arg_ptr) && (agent_arg_ptr->msg_type != SRUN_NODE_FAIL) && (agent_arg_ptr->msg_type != SRUN_USER_MSG) && (agent_arg_ptr->msg_type != SRUN_JOB_COMPLETE)) { + /* Sending message to a possibly large number of slurmd. + * Push all message forwarding to slurmd in order to + * offload as much work from slurmctld as possible. */ agent_info_ptr->get_reply = true; - span = set_span(agent_arg_ptr->node_count, 0); + span = set_span(agent_arg_ptr->node_count, 1); } else { + /* Message is going to one node (for srun) or we want + * it to get processed ASAP (SHUTDOWN or RECONFIGURE). + * Send the message directly to each node. */ span = set_span(agent_arg_ptr->node_count, agent_arg_ptr->node_count); } @@ -411,7 +424,6 @@ static agent_info_t *_make_agent_info(agent_arg_t *agent_arg_ptr) name = hostlist_shift(agent_arg_ptr->hostlist); if(!name) break; - /* info("adding %s", name); */ hostlist_push(hl, name); free(name); i++; @@ -420,9 +432,9 @@ static agent_info_t *_make_agent_info(agent_arg_t *agent_arg_ptr) hostlist_ranged_string(hl, sizeof(buf), buf); hostlist_destroy(hl); thread_ptr[thr_count].nodelist = xstrdup(buf); - - /* info("sending to nodes %s", */ -/* thread_ptr[thr_count].nodelist); */ +#if 0 + info("sending to nodes %s", thread_ptr[thr_count].nodelist); +#endif thr_count++; } xfree(span); @@ -669,6 +681,9 @@ static void _notify_slurmctld_nodes(agent_info_t *agent_ptr, thread_ptr[i].start_time); break; case DSH_FAILED: +#ifdef HAVE_BG + error("Prolog/epilog failure"); +#else if(!is_ret_list) { set_node_down(thread_ptr[i].nodelist, "Prolog/epilog failure"); @@ -676,6 +691,7 @@ static void _notify_slurmctld_nodes(agent_info_t *agent_ptr, } set_node_down(ret_data_info->node_name, "Prolog/epilog failure"); +#endif break; case DSH_DONE: if(!is_ret_list) { @@ -710,6 +726,7 @@ finished: ; } } if ((agent_ptr->msg_type == REQUEST_PING) || + (agent_ptr->msg_type == REQUEST_HEALTH_CHECK) || (agent_ptr->msg_type == REQUEST_NODE_REGISTRATION_STATUS)) ping_end(); #else @@ -806,17 +823,21 @@ static void *_thread_per_group_rpc(void *args) slurm_msg_t_init(&msg); msg.msg_type = msg_type; msg.data = task_ptr->msg_args_ptr; -/* info("sending message type %u to %s", msg_type, - thread_ptr->nodelist); */ +#if 0 + info("sending message type %u to %s", msg_type, thread_ptr->nodelist); +#endif if (task_ptr->get_reply) { if(thread_ptr->addr) { msg.address = *thread_ptr->addr; + if(!(ret_list = slurm_send_addr_recv_msgs( &msg, thread_ptr->nodelist, 0))) { error("_thread_per_group_rpc: " "no ret_list given"); goto cleanup; } + + } else { if(!(ret_list = slurm_send_recv_msgs( thread_ptr->nodelist, @@ -829,8 +850,10 @@ static void *_thread_per_group_rpc(void *args) } } else { if(thread_ptr->addr) { + //info("got the address"); msg.address = *thread_ptr->addr; } else { + //info("no address given"); if(slurm_conf_get_addr(thread_ptr->nodelist, &msg.address) == SLURM_ERROR) { error("_thread_per_group_rpc: " @@ -839,6 +862,7 @@ static void *_thread_per_group_rpc(void *args) goto cleanup; } } + //info("sending %u to %s", msg_type, thread_ptr->nodelist); if (slurm_send_only_node_msg(&msg) == SLURM_SUCCESS) { thread_state = DSH_DONE; } else { @@ -1087,9 +1111,12 @@ static void _list_delete_retry(void *retry_entry) * agent_retry - Agent for retrying pending RPCs. One pending request is * issued if it has been pending for at least min_wait seconds * IN min_wait - Minimum wait time between re-issue of a pending RPC + * IN mai_too - Send pending email too, note this performed using a + * fork/waitpid, so it can take longer than just creating a pthread + * to send RPCs * RET count of queued requests remaining */ -extern int agent_retry (int min_wait) +extern int agent_retry (int min_wait, bool mail_too) { int list_size = 0; time_t now = time(NULL); @@ -1165,11 +1192,11 @@ extern int agent_retry (int min_wait) if (queued_req_ptr) { agent_arg_ptr = queued_req_ptr->agent_arg_ptr; xfree(queued_req_ptr); - if (agent_arg_ptr) + if (agent_arg_ptr) { _spawn_retry_agent(agent_arg_ptr); - else + } else error("agent_retry found record with no agent_args"); - } else { + } else if (mail_too) { mail_info_t *mi = NULL; slurm_mutex_lock(&mail_mutex); if (mail_list) @@ -1178,7 +1205,7 @@ extern int agent_retry (int min_wait) if (mi) _mail_proc(mi); } - + return list_size; } @@ -1191,7 +1218,8 @@ void agent_queue_request(agent_arg_t *agent_arg_ptr) { queued_request_t *queued_req_ptr = NULL; - if (agent_arg_ptr->msg_type == REQUEST_SHUTDOWN) { /* execute now */ + if (agent_arg_ptr->msg_type == REQUEST_SHUTDOWN) { + /* execute now */ pthread_attr_t attr_agent; pthread_t thread_agent; int rc; @@ -1215,6 +1243,7 @@ void agent_queue_request(agent_arg_t *agent_arg_ptr) /* queued_req_ptr->last_attempt = 0; Implicit */ slurm_mutex_lock(&retry_mutex); + if (retry_list == NULL) { retry_list = list_create(_list_delete_retry); if (retry_list == NULL) @@ -1222,6 +1251,10 @@ void agent_queue_request(agent_arg_t *agent_arg_ptr) } list_append(retry_list, (void *)queued_req_ptr); slurm_mutex_unlock(&retry_mutex); + + /* now process the request in a separate pthread + * (if we can create another pthread to do so) */ + agent_retry(999, false); } /* _spawn_retry_agent - pthread_create an agent for the given task */ diff --git a/src/slurmctld/agent.h b/src/slurmctld/agent.h index 11f20f410..73c1de884 100644 --- a/src/slurmctld/agent.h +++ b/src/slurmctld/agent.h @@ -2,13 +2,13 @@ * agent.h - data structures and function definitions for parallel * background communications * - * $Id: agent.h 10574 2006-12-15 23:38:29Z jette $ + * $Id: agent.h 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette@llnl.gov>, et. al. * Derived from dsh written by Jim Garlick <garlick1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -42,7 +42,6 @@ #ifndef _AGENT_H #define _AGENT_H -#include "src/slurmctld/agent.h" #include "src/slurmctld/slurmctld.h" #define AGENT_IS_THREAD 1 /* set if agent itself a thread of @@ -88,9 +87,12 @@ extern void agent_queue_request(agent_arg_t *agent_arg_ptr); * agent_retry - Agent for retrying pending RPCs. One pending request is * issued if it has been pending for at least min_wait seconds * IN min_wait - Minimum wait time between re-issue of a pending RPC + * IN mai_too - Send pending email too, note this performed using a + * fork/waitpid, so it can take longer than just creating + * a pthread to send RPCs * RET count of queued requests remaining */ -extern int agent_retry (int min_wait); +extern int agent_retry (int min_wait, bool mail_too); /* agent_purge - purge all pending RPC requests */ extern void agent_purge (void); diff --git a/src/slurmctld/backup.c b/src/slurmctld/backup.c index 795142655..82c1084b4 100644 --- a/src/slurmctld/backup.c +++ b/src/slurmctld/backup.c @@ -4,7 +4,7 @@ * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette@llnl.gov>, Kevin Tew <tew1@llnl.gov>, et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -344,7 +344,7 @@ static int _background_process_msg(slurm_msg_t * msg) if (msg->msg_type != REQUEST_PING) { bool super_user = false; - uid_t uid = g_slurm_auth_get_uid(msg->auth_cred); + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); if ((uid == 0) || (uid == getuid())) super_user = true; diff --git a/src/slurmctld/controller.c b/src/slurmctld/controller.c index 0d6bce56b..0e8f00bad 100644 --- a/src/slurmctld/controller.c +++ b/src/slurmctld/controller.c @@ -1,11 +1,11 @@ /*****************************************************************************\ * controller.c - main control machine daemon for slurm - * $Id: controller.c 13506 2008-03-07 00:13:15Z jette $ ***************************************************************************** - * Copyright (C) 2002-2006 The Regents of the University of California. + * Copyright (C) 2002-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov>, Kevin Tew <tew1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -68,7 +68,8 @@ #include "src/common/node_select.h" #include "src/common/pack.h" #include "src/common/read_config.h" -#include "src/common/slurm_jobacct.h" +#include "src/common/slurm_jobacct_gather.h" +#include "src/common/slurm_accounting_storage.h" #include "src/common/slurm_auth.h" #include "src/common/slurm_jobcomp.h" #include "src/common/slurm_protocol_api.h" @@ -76,8 +77,11 @@ #include "src/common/uid.h" #include "src/common/xsignal.h" #include "src/common/xstring.h" +#include "src/common/assoc_mgr.h" #include "src/slurmctld/agent.h" +#include "src/slurmctld/job_scheduler.h" +#include "src/slurmctld/licenses.h" #include "src/slurmctld/locks.h" #include "src/slurmctld/ping_nodes.h" #include "src/slurmctld/proc_req.h" @@ -121,6 +125,8 @@ * (640 bytes), it is really not lost. * The _keyvalue_regex_init() function will generate two blocks "definitely * lost", both of size zero. We haven't bothered to address this. + * On some systems dlopen() will generate a small number of "definitely + * lost" blocks that are not cleared by dlclose(). * On some systems, pthread_create() will generated a small number of * "possibly lost" blocks. * Otherwise the report should be free of errors. Remember to reset @@ -134,6 +140,9 @@ log_options_t log_opts = LOG_OPTS_INITIALIZER; /* Global variables */ slurmctld_config_t slurmctld_config; int bg_recover = DEFAULT_RECOVER; +char *slurmctld_cluster_name = NULL; /* name of cluster */ +void *acct_db_conn = NULL; +int accounting_enforce = 0; /* Local variables */ static int daemonize = DEFAULT_DAEMONIZE; @@ -158,8 +167,8 @@ static int controller_sigarray[] = { static void _default_sigaction(int sig); inline static void _free_server_thread(void); -static int _gold_cluster_ready(); -static int _gold_mark_all_nodes_down(char *reason, time_t event_time); +static int _accounting_cluster_ready(); +static int _accounting_mark_all_nodes_down(char *reason); static void _init_config(void); static void _init_pidfile(void); static void _kill_old_slurmctld(void); @@ -181,9 +190,12 @@ typedef struct connection_arg { /* main - slurmctld main function, start various threads and process RPCs */ int main(int argc, char *argv[]) { - int error_code; + int cnt, error_code, i; pthread_attr_t thread_attr; struct stat stat_buf; + /* Locks: Write configuration, job, node, and partition */ + slurmctld_lock_t config_write_lock = { + WRITE_LOCK, WRITE_LOCK, WRITE_LOCK, WRITE_LOCK }; /* * Establish initial configuration @@ -222,6 +234,22 @@ int main(int argc, char *argv[]) if (stat(slurmctld_conf.mail_prog, &stat_buf) != 0) error("Configured MailProg is invalid"); + if (!strcmp(slurmctld_conf.accounting_storage_type, + "accounting_storage/none")) { + if (strcmp(slurmctld_conf.job_acct_gather_type, + "jobacct_gather/none")) + error("Job accounting information gathered, " + "but not stored"); + } else { + if (!strcmp(slurmctld_conf.job_acct_gather_type, + "jobacct_gather/none")) + info("Job accounting information stored, " + "but details not gathered"); + } + + if (license_init(slurmctld_conf.licenses) != SLURM_SUCCESS) + fatal("Invalid Licenses value: %s", slurmctld_conf.licenses); + #ifndef NDEBUG # ifdef PR_SET_DUMPABLE if (prctl(PR_SET_DUMPABLE, 1) < 0) @@ -236,11 +264,11 @@ int main(int argc, char *argv[]) fatal("Unable to initialize StateSaveLocation"); if (daemonize) { - error_code = daemon(1, 1); + slurmctld_config.daemonize = 1; + if (daemon(1, 1)) + error("daemon(): %m"); log_alter(log_opts, LOG_DAEMON, slurmctld_conf.slurmctld_logfile); - if (error_code) - error("daemon error %d", error_code); if (slurmctld_conf.slurmctld_logfile && (slurmctld_conf.slurmctld_logfile[0] == '/')) { char *slash_ptr, *work_dir; @@ -259,8 +287,25 @@ int main(int argc, char *argv[]) slurmctld_conf.state_save_location); } } + } else { + slurmctld_config.daemonize = 0; } - info("slurmctld version %s started", SLURM_VERSION); + + /* This needs to be copied for other modules to access the + * memory, it will report 'HashBase' if it is not duped + */ + slurmctld_cluster_name = xstrdup(slurmctld_conf.cluster_name); + accounting_enforce = slurmctld_conf.accounting_storage_enforce; + acct_db_conn = acct_storage_g_get_connection(true, false); + if (assoc_mgr_init(acct_db_conn, accounting_enforce) && + accounting_enforce) { + error("assoc_mgr_init failure"); + fatal("slurmdbd and/or database must be up at " + "slurmctld start time"); + } + + info("slurmctld version %s started on cluster %s", + SLURM_VERSION, slurmctld_cluster_name); if ((error_code = gethostname_short(node_name, MAX_SLURM_NAME))) fatal("getnodename error %s", slurm_strerror(error_code)); @@ -268,8 +313,10 @@ int main(int argc, char *argv[]) /* init job credential stuff */ slurmctld_config.cred_ctx = slurm_cred_creator_ctx_create( slurmctld_conf.job_credential_private_key); - if (!slurmctld_config.cred_ctx) - fatal("slurm_cred_creator_ctx_create: %m"); + if (!slurmctld_config.cred_ctx) { + fatal("slurm_cred_creator_ctx_create(%s): %m", + slurmctld_conf.job_credential_private_key); + } /* Not used in creator @@ -290,7 +337,12 @@ int main(int argc, char *argv[]) SLURM_SUCCESS ) fatal( "failed to initialize checkpoint plugin" ); if (slurm_select_init() != SLURM_SUCCESS ) - fatal( "failed to initialize node selection plugin state"); + fatal( "failed to initialize node selection plugin"); + if (slurm_acct_storage_init(NULL) != SLURM_SUCCESS ) + fatal( "failed to initialize accounting_storage plugin"); + + if (slurm_jobacct_gather_init() != SLURM_SUCCESS ) + fatal( "failed to initialize jobacct_gather plugin"); while (1) { /* initialization for each primary<->backup switch */ @@ -311,43 +363,23 @@ int main(int argc, char *argv[]) if (switch_restore(slurmctld_conf.state_save_location, recover ? true : false)) fatal(" failed to initialize switch plugin" ); + lock_slurmctld(config_write_lock); if ((error_code = read_slurm_conf(recover))) { fatal("read_slurm_conf reading %s: %s", slurmctld_conf.slurm_conf, slurm_strerror(error_code)); } + unlock_slurmctld(config_write_lock); - if (recover == 0) - _gold_mark_all_nodes_down("cold-start", - time(NULL)); - else if (!stat("/tmp/slurm_gold_first", &stat_buf)) { - /* this is here for when slurm is - * started with gold for the first - * time to log any downed nodes. - */ - struct node_record *node_ptr = - node_record_table_ptr; - int i=0; - time_t event_time = time(NULL); - debug("found /tmp/slurm_gold_first, " - "setting nodes down"); - for (i = 0; - i < node_record_count; - i++, node_ptr++) { - if (node_ptr->name == '\0' - || !node_ptr->reason) - continue; - - if(jobacct_g_node_down( - node_ptr, - event_time, - node_ptr->reason) - == SLURM_ERROR) - break; - } - if(unlink("/tmp/slurm_gold_first") < 0) - error("Error deleting " - "/tmp/slurm_gold_first"); + if ((recover == 0) || + (!stat("/tmp/slurm_accounting_first", &stat_buf))) { + /* When first starting to write node state + * information to Gold or SlurmDBD, create + * a file called "/tmp/slurm_accounting_first" to + * capture node initialization information */ + + _accounting_mark_all_nodes_down("cold-start"); + unlink("/tmp/slurm_accounting_first"); } } else { error("this host (%s) not valid controller (%s or %s)", @@ -355,8 +387,20 @@ int main(int argc, char *argv[]) slurmctld_conf.backup_controller); exit(0); } + + if(!acct_db_conn) { + acct_db_conn = + acct_storage_g_get_connection(true, false); + if (assoc_mgr_init(acct_db_conn, accounting_enforce) && + accounting_enforce) { + error("assoc_mgr_init failure"); + fatal("slurmdbd and/or database must be up at " + "slurmctld start time"); + } + } + info("Running as primary controller"); - _gold_cluster_ready(); + _accounting_cluster_ready(); if (slurm_sched_init() != SLURM_SUCCESS) fatal("failed to initialize scheduling plugin"); @@ -368,9 +412,12 @@ int main(int argc, char *argv[]) slurm_mutex_unlock(&slurmctld_config.thread_count_lock); slurm_attr_init(&thread_attr); if (pthread_create(&slurmctld_config.thread_id_rpc, - &thread_attr,_slurmctld_rpc_mgr, NULL)) + &thread_attr, _slurmctld_rpc_mgr, NULL)) fatal("pthread_create error %m"); slurm_attr_destroy(&thread_attr); + clusteracct_storage_g_register_ctld( + slurmctld_conf.cluster_name, + slurmctld_conf.slurmctld_port); /* * create attached thread for signal handling @@ -420,6 +467,10 @@ int main(int argc, char *argv[]) if (slurmctld_config.resume_backup == false) break; recover = 2; + + /* Save any pending state save RPCs */ + acct_storage_g_close_connection(&acct_db_conn); + assoc_mgr_fini(); } /* Since pidfile is created as user root (its owner is @@ -428,14 +479,10 @@ int main(int argc, char *argv[]) if (unlink(slurmctld_conf.slurmctld_pidfile) < 0) verbose("Unable to remove pidfile '%s': %m", slurmctld_conf.slurmctld_pidfile); - - jobacct_g_fini_slurmctld(); /* Save pending message traffic */ - + #ifdef MEMORY_LEAK_DEBUG -{ /* This should purge all allocated memory, *\ \* Anything left over represents a leak. */ - int i, cnt; /* Give running agents a chance to complete and free memory. * Wait up to 30 seconds (3 seconds * 10) */ @@ -458,21 +505,38 @@ int main(int argc, char *argv[]) /* Plugins are needed to purge job/node data structures, * unplug after other data structures are purged */ g_slurm_jobcomp_fini(); + slurm_acct_storage_fini(); + slurm_jobacct_gather_fini(); slurm_sched_fini(); slurm_select_fini(); checkpoint_fini(); slurm_auth_fini(); switch_fini(); + assoc_mgr_fini(); /* purge remaining data structures */ slurm_cred_ctx_destroy(slurmctld_config.cred_ctx); + slurm_crypto_fini(); /* must be after ctx_destroy */ slurm_conf_destroy(); slurm_api_clear_config(); sleep(2); -} +#else + /* Give REQUEST_SHUTDOWN a chance to get propagated, + * up to 3 seconds. */ + for (i=0; i<3; i++) { + agent_purge(); + cnt = get_agent_count(); + if (cnt == 0) + break; + sleep(1); + } #endif - info("Slurmctld shutdown completing"); + xfree(slurmctld_cluster_name); + if (cnt) { + info("Slurmctld shutdown completing with %d active agent " + "threads\n\n", cnt); + } log_fini(); if (dump_core) @@ -507,6 +571,7 @@ static void _init_config(void) (void) setrlimit(RLIMIT_DATA, &rlim); } + slurmctld_config.boot_time = time(NULL); slurmctld_config.daemonize = DEFAULT_DAEMONIZE; slurmctld_config.resume_backup = false; slurmctld_config.server_thread_count = 0; @@ -525,6 +590,35 @@ static void _init_config(void) #endif } +/* Read configuration file. + * Same name as API function for use in accounting_storage plugin */ +extern int slurm_reconfigure(void) +{ + /* Locks: Write configuration, job, node, and partition */ + slurmctld_lock_t config_write_lock = { + WRITE_LOCK, WRITE_LOCK, WRITE_LOCK, WRITE_LOCK }; + int rc; + + /* + * XXX - need to shut down the scheduler + * plugin, re-read the configuration, and then + * restart the (possibly new) plugin. + */ + lock_slurmctld(config_write_lock); + rc = read_slurm_conf(0); + if (rc) + error("read_slurm_conf: %s", slurm_strerror(rc)); + else { + _update_cred_key(); + set_slurmctld_state_loc(); + } + unlock_slurmctld(config_write_lock); + trigger_reconfig(); + slurm_sched_partition_change(); /* notify sched plugin */ + select_g_reconfigure(); /* notify select plugin too */ + return rc; +} + /* _slurmctld_signal_hand - Process daemon-wide signals */ static void *_slurmctld_signal_hand(void *no_data) { @@ -535,9 +629,6 @@ static void *_slurmctld_signal_hand(void *no_data) /* Locks: Read configuration */ slurmctld_lock_t config_read_lock = { READ_LOCK, NO_LOCK, NO_LOCK, NO_LOCK }; - /* Locks: Write configuration, job, node, and partition */ - slurmctld_lock_t config_write_lock = { - WRITE_LOCK, WRITE_LOCK, WRITE_LOCK, WRITE_LOCK }; (void) pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); (void) pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); @@ -571,23 +662,7 @@ static void *_slurmctld_signal_hand(void *no_data) break; case SIGHUP: /* kill -1 */ info("Reconfigure signal (SIGHUP) received"); - /* - * XXX - need to shut down the scheduler - * plugin, re-read the configuration, and then - * restart the (possibly new) plugin. - */ - lock_slurmctld(config_write_lock); - rc = read_slurm_conf(0); - if (rc) - error("read_slurm_conf: %s", - slurm_strerror(rc)); - else { - _update_cred_key(); - set_slurmctld_state_loc(); - } - unlock_slurmctld(config_write_lock); - trigger_reconfig(); - slurm_sched_partition_change(); + slurm_reconfigure(); break; case SIGABRT: /* abort */ info("SIGABRT received"); @@ -805,7 +880,7 @@ static void _free_server_thread(void) pthread_cond_broadcast(&server_thread_cond); } -static int _gold_cluster_ready() +static int _accounting_cluster_ready() { uint32_t procs = 0; struct node_record *node_ptr; @@ -827,34 +902,47 @@ static int _gold_cluster_ready() #endif } - rc = jobacct_g_cluster_procs(procs, event_time); + rc = clusteracct_storage_g_cluster_procs(acct_db_conn, + slurmctld_cluster_name, + procs, event_time); return rc; } -static int _gold_mark_all_nodes_down(char *reason, time_t event_time) +static int _accounting_mark_all_nodes_down(char *reason) { char *state_file; struct stat stat_buf; struct node_record *node_ptr; int i; + time_t event_time; int rc = SLURM_ERROR; state_file = xstrdup (slurmctld_conf.state_save_location); xstrcat (state_file, "/node_state"); if (stat(state_file, &stat_buf)) { - debug("_gold_mark_all_nodes_down: could not stat(%s) " + debug("_accounting_mark_all_nodes_down: could not stat(%s) " "to record node down time", state_file); - xfree(state_file); - return rc; + event_time = time(NULL); + } else { + event_time = stat_buf.st_mtime; } xfree(state_file); + if((rc = acct_storage_g_flush_jobs_on_cluster(acct_db_conn, + slurmctld_cluster_name, + event_time)) + == SLURM_ERROR) + return rc; + node_ptr = node_record_table_ptr; for (i = 0; i < node_record_count; i++, node_ptr++) { if (node_ptr->name == '\0') continue; - if((rc = jobacct_g_node_down(node_ptr, event_time, reason)) + if((rc = clusteracct_storage_g_node_down(acct_db_conn, + slurmctld_cluster_name, + node_ptr, event_time, + reason)) == SLURM_ERROR) break; } @@ -947,9 +1035,10 @@ static void *_slurmctld_background(void *no_data) if (difftime(now, last_timelimit_time) >= PERIODIC_TIMEOUT) { last_timelimit_time = now; - debug2("Performing job time limit check"); + debug2("Performing job time limit and checkpoint test"); lock_slurmctld(job_write_lock); job_time_limit(); + step_checkpoint(); unlock_slurmctld(job_write_lock); } @@ -978,8 +1067,8 @@ static void *_slurmctld_background(void *no_data) unlock_slurmctld(job_read_lock); } - /* Process pending agent work */ - agent_retry(RPC_RETRY_INTERVAL); + /* Process any pending agent work */ + agent_retry(RPC_RETRY_INTERVAL, true); if (difftime(now, last_group_time) >= PERIODIC_GROUP_CHECK) { last_group_time = now; @@ -1020,7 +1109,7 @@ static void *_slurmctld_background(void *no_data) * or reconfigured nodes */ last_node_acct = now; lock_slurmctld(node_read_lock); - _gold_cluster_ready(); + _accounting_cluster_ready(); unlock_slurmctld(node_read_lock); } /* Reassert this machine as the primary controller. diff --git a/src/slurmctld/job_mgr.c b/src/slurmctld/job_mgr.c index 64a03b2f0..918c053e2 100644 --- a/src/slurmctld/job_mgr.c +++ b/src/slurmctld/job_mgr.c @@ -3,12 +3,13 @@ * Note: there is a global job list (job_list), time stamp * (last_job_update), and hash table (job_hash) * - * $Id: job_mgr.c 13871 2008-04-15 15:47:33Z jette $ + * $Id: job_mgr.c 14154 2008-05-29 17:51:52Z jette $ ***************************************************************************** * Copyright (C) 2002-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -51,24 +52,28 @@ #include <stdio.h> #include <stdlib.h> #include <string.h> +#include <strings.h> #include <sys/stat.h> #include <slurm/slurm_errno.h> #include "src/api/job_info.h" #include "src/common/bitstring.h" +#include "src/common/forward.h" #include "src/common/hostlist.h" #include "src/common/node_select.h" #include "src/common/parse_time.h" +#include "src/common/slurm_accounting_storage.h" #include "src/common/slurm_jobcomp.h" +#include "src/common/slurm_protocol_pack.h" #include "src/common/switch.h" #include "src/common/xassert.h" #include "src/common/xstring.h" -#include "src/common/forward.h" -#include "src/common/slurm_jobacct.h" -#include "src/common/slurm_protocol_pack.h" +#include "src/common/assoc_mgr.h" #include "src/slurmctld/agent.h" +#include "src/slurmctld/job_scheduler.h" +#include "src/slurmctld/licenses.h" #include "src/slurmctld/locks.h" #include "src/slurmctld/node_scheduler.h" #include "src/slurmctld/proc_req.h" @@ -79,7 +84,6 @@ #define DETAILS_FLAG 0xdddd #define MAX_RETRIES 10 -#define MAX_STR_LEN 1024 #define SLURM_CREATE_JOB_FLAG_NO_ALLOCATE_0 0 #define STEP_FLAG 0xbbbb #define TOP_PRIORITY 0xffff0000 /* large, but leave headroom for higher */ @@ -87,7 +91,7 @@ #define JOB_HASH_INX(_job_id) (_job_id % hash_table_size) /* Change JOB_STATE_VERSION value when changing the state save format */ -#define JOB_STATE_VERSION "VER005" +#define JOB_STATE_VERSION "VER006" /* Global variables */ List job_list = NULL; /* job_record list */ @@ -99,6 +103,8 @@ static int hash_table_size = 0; static int job_count = 0; /* job's in the system */ static uint32_t job_id_sequence = 0; /* first job_id to assign new job */ static struct job_record **job_hash = NULL; +static bool wiki_sched = false; +static bool wiki_sched_test = false; /* Local functions */ static void _add_job_hash(struct job_record *job_ptr); @@ -134,7 +140,7 @@ static void _pack_pending_job_details(struct job_details *detail_ptr, static int _purge_job_record(uint32_t job_id); static void _purge_lost_batch_jobs(int node_inx, time_t now); static void _read_data_array_from_file(char *file_name, char ***data, - uint16_t * size); + uint32_t * size); static void _read_data_from_file(char *file_name, char **data); static void _remove_defunct_batch_dirs(List batch_dirs); static int _reset_detail_bitmaps(struct job_record *job_ptr); @@ -147,13 +153,16 @@ static void _signal_job(struct job_record *job_ptr, int signal); static void _suspend_job(struct job_record *job_ptr, uint16_t op); static int _suspend_job_nodes(struct job_record *job_ptr); static bool _top_priority(struct job_record *job_ptr); +static bool _validate_acct_policy(job_desc_msg_t *job_desc, + struct part_record *part_ptr, + acct_association_rec_t *assoc_ptr); static int _validate_job_create_req(job_desc_msg_t * job_desc); static int _validate_job_desc(job_desc_msg_t * job_desc_msg, int allocate, uid_t submit_uid); static void _validate_job_files(List batch_dirs); static int _write_data_to_file(char *file_name, char *data); static int _write_data_array_to_file(char *file_name, char **data, - uint16_t size); + uint32_t size); static void _xmit_new_end_time(struct job_record *job_ptr); /* @@ -229,6 +238,11 @@ void delete_job_details(struct job_record *job_entry) xfree(job_entry->details->out); xfree(job_entry->details->work_dir); xfree(job_entry->details->mc_ptr); + if (job_entry->details->feature_list) + list_destroy(job_entry->details->feature_list); + xfree(job_entry->details->dependency); + if (job_entry->details->depend_list) + list_destroy(job_entry->details->depend_list); xfree(job_entry->details); } @@ -355,7 +369,7 @@ int dump_all_job_state(void) * checkpoint. Execute this after loading the configuration file data. * RET 0 or error code */ -int load_all_job_state(void) +extern int load_all_job_state(void) { int data_allocated, data_read = 0, error_code = 0; uint32_t data_size = 0; @@ -365,7 +379,7 @@ int load_all_job_state(void) time_t buf_time; uint32_t saved_job_id; char *ver_str = NULL; - uint16_t ver_str_len; + uint32_t ver_str_len; /* read the file */ state_file = xstrdup(slurmctld_conf.state_save_location); @@ -405,30 +419,9 @@ int load_all_job_state(void) buffer = create_buf(data, data_size); - /* - * The old header of the "job_state" file simply contained a - * timestamp, while the new header contains a "VERXXX" at the - * beginning (VER001, VER002, etc), a timestamp, and the last - * job id. To determine if we're looking at an old header or - * new header, we first check if the file begins with "VER". - * - * Each field is preceeded by two bytes which contains the field - * size. Since we are bypassing the "pack" functions in order - * see if the header contains a "VERXXX" string, we need to make - * sure that there is enough data in the buffer to compare against. - */ - if (size_buf(buffer) >= sizeof(uint16_t) + strlen(JOB_STATE_VERSION)) - { - char *ptr = get_buf_data(buffer); - - if (memcmp(&ptr[sizeof(uint16_t)], JOB_STATE_VERSION, 3) == 0) - { - safe_unpackstr_xmalloc(&ver_str, &ver_str_len, buffer); - debug3("Version string in job_state header is %s", - ver_str); - } - } - if (ver_str && (strcmp(ver_str, JOB_STATE_VERSION) != 0)) { + safe_unpackstr_xmalloc(&ver_str, &ver_str_len, buffer); + debug3("Version string in job_state header is %s", ver_str); + if ((!ver_str) || strcmp(ver_str, JOB_STATE_VERSION) != 0) { error("***********************************************"); error("Can not recover job state, incompatable version"); error("***********************************************"); @@ -476,33 +469,38 @@ static void _dump_job_state(struct job_record *dump_job_ptr, Buf buffer) struct step_record *step_ptr; /* Dump basic job info */ + pack32(dump_job_ptr->assoc_id, buffer); pack32(dump_job_ptr->job_id, buffer); pack32(dump_job_ptr->user_id, buffer); pack32(dump_job_ptr->group_id, buffer); pack32(dump_job_ptr->time_limit, buffer); pack32(dump_job_ptr->priority, buffer); pack32(dump_job_ptr->alloc_sid, buffer); - pack32(dump_job_ptr->dependency, buffer); pack32(dump_job_ptr->num_procs, buffer); + pack32(dump_job_ptr->total_procs, buffer); pack32(dump_job_ptr->exit_code, buffer); + pack32(dump_job_ptr->db_index, buffer); + pack32(dump_job_ptr->assoc_id, buffer); pack_time(dump_job_ptr->start_time, buffer); pack_time(dump_job_ptr->end_time, buffer); pack_time(dump_job_ptr->suspend_time, buffer); pack_time(dump_job_ptr->pre_sus_time, buffer); + pack_time(dump_job_ptr->tot_sus_time, buffer); pack16(dump_job_ptr->job_state, buffer); pack16(dump_job_ptr->next_step_id, buffer); pack16(dump_job_ptr->kill_on_node_fail, buffer); pack16(dump_job_ptr->kill_on_step_done, buffer); pack16(dump_job_ptr->batch_flag, buffer); - pack16(dump_job_ptr->alloc_resp_port, buffer); - pack16(dump_job_ptr->other_port, buffer); pack16(dump_job_ptr->mail_type, buffer); + pack16(dump_job_ptr->qos, buffer); pack16(dump_job_ptr->state_reason, buffer); - packstr(dump_job_ptr->alloc_resp_host, buffer); - packstr(dump_job_ptr->other_host, buffer); + packstr(dump_job_ptr->resp_host, buffer); + pack16(dump_job_ptr->alloc_resp_port, buffer); + pack16(dump_job_ptr->other_port, buffer); + if (dump_job_ptr->job_state & JOB_COMPLETING) { if (dump_job_ptr->nodes_completing == NULL) { dump_job_ptr->nodes_completing = @@ -518,6 +516,7 @@ static void _dump_job_state(struct job_record *dump_job_ptr, Buf buffer) packstr(dump_job_ptr->account, buffer); packstr(dump_job_ptr->comment, buffer); packstr(dump_job_ptr->network, buffer); + packstr(dump_job_ptr->licenses, buffer); packstr(dump_job_ptr->mail_user, buffer); select_g_pack_jobinfo(dump_job_ptr->select_jobinfo, @@ -547,47 +546,53 @@ static void _dump_job_state(struct job_record *dump_job_ptr, Buf buffer) static int _load_job_state(Buf buffer) { uint32_t job_id, user_id, group_id, time_limit, priority, alloc_sid; - uint32_t dependency, exit_code, num_procs; - time_t start_time, end_time, suspend_time, pre_sus_time; + uint32_t exit_code, num_procs, assoc_id, db_index, name_len, + total_procs; + time_t start_time, end_time, suspend_time, pre_sus_time, tot_sus_time; uint16_t job_state, next_step_id, details, batch_flag, step_flag; - uint16_t kill_on_node_fail, kill_on_step_done, name_len; + uint16_t kill_on_node_fail, kill_on_step_done, qos; uint16_t alloc_resp_port, other_port, mail_type, state_reason; - char *nodes = NULL, *partition = NULL, *name = NULL; - char *alloc_node = NULL, *alloc_resp_host = NULL, *other_host = NULL; + char *nodes = NULL, *partition = NULL, *name = NULL, *resp_host = NULL; char *account = NULL, *network = NULL, *mail_user = NULL; - char *comment = NULL, *nodes_completing = NULL; + char *comment = NULL, *nodes_completing = NULL, *alloc_node = NULL; + char *licenses = NULL; struct job_record *job_ptr; struct part_record *part_ptr; int error_code; select_jobinfo_t select_jobinfo = NULL; + safe_unpack32(&assoc_id, buffer); safe_unpack32(&job_id, buffer); safe_unpack32(&user_id, buffer); safe_unpack32(&group_id, buffer); safe_unpack32(&time_limit, buffer); safe_unpack32(&priority, buffer); safe_unpack32(&alloc_sid, buffer); - safe_unpack32(&dependency, buffer); safe_unpack32(&num_procs, buffer); + safe_unpack32(&total_procs, buffer); safe_unpack32(&exit_code, buffer); + safe_unpack32(&db_index, buffer); + safe_unpack32(&assoc_id, buffer); safe_unpack_time(&start_time, buffer); safe_unpack_time(&end_time, buffer); safe_unpack_time(&suspend_time, buffer); safe_unpack_time(&pre_sus_time, buffer); + safe_unpack_time(&tot_sus_time, buffer); safe_unpack16(&job_state, buffer); safe_unpack16(&next_step_id, buffer); safe_unpack16(&kill_on_node_fail, buffer); safe_unpack16(&kill_on_step_done, buffer); safe_unpack16(&batch_flag, buffer); - safe_unpack16(&alloc_resp_port, buffer); - safe_unpack16(&other_port, buffer); safe_unpack16(&mail_type, buffer); + safe_unpack16(&qos, buffer); safe_unpack16(&state_reason, buffer); - safe_unpackstr_xmalloc(&alloc_resp_host, &name_len, buffer); - safe_unpackstr_xmalloc(&other_host, &name_len, buffer); + safe_unpackstr_xmalloc(&resp_host, &name_len, buffer); + safe_unpack16(&alloc_resp_port, buffer); + safe_unpack16(&other_port, buffer); + if (job_state & JOB_COMPLETING) { safe_unpackstr_xmalloc(&nodes_completing, &name_len, buffer); @@ -599,6 +604,7 @@ static int _load_job_state(Buf buffer) safe_unpackstr_xmalloc(&account, &name_len, buffer); safe_unpackstr_xmalloc(&comment, &name_len, buffer); safe_unpackstr_xmalloc(&network, &name_len, buffer); + safe_unpackstr_xmalloc(&licenses, &name_len, buffer); safe_unpackstr_xmalloc(&mail_user, &name_len, buffer); if (select_g_alloc_jobinfo(&select_jobinfo) @@ -627,6 +633,10 @@ static int _load_job_state(Buf buffer) job_id, kill_on_node_fail); goto unpack_error; } + if (partition == NULL) { + error("No partition for job %u", job_id); + goto unpack_error; + } part_ptr = find_part_record (partition); if (part_ptr == NULL) { verbose("Invalid partition (%s) for job_id %u", @@ -662,59 +672,69 @@ static int _load_job_state(Buf buffer) goto unpack_error; } - job_ptr->user_id = user_id; - job_ptr->group_id = group_id; - job_ptr->time_limit = time_limit; - job_ptr->priority = priority; + xfree(job_ptr->account); + job_ptr->account = account; + account = NULL; /* reused, nothing left to free */ + xfree(job_ptr->alloc_node); + job_ptr->alloc_node = alloc_node; + alloc_node = NULL; /* reused, nothing left to free */ + job_ptr->alloc_resp_port = alloc_resp_port; job_ptr->alloc_sid = alloc_sid; - job_ptr->start_time = start_time; + job_ptr->assoc_id = assoc_id; + job_ptr->batch_flag = batch_flag; + xfree(job_ptr->comment); + job_ptr->comment = comment; + comment = NULL; /* reused, nothing left to free */ + job_ptr->db_index = db_index; job_ptr->end_time = end_time; - job_ptr->suspend_time = suspend_time; - job_ptr->pre_sus_time = pre_sus_time; + job_ptr->exit_code = exit_code; + job_ptr->group_id = group_id; job_ptr->job_state = job_state; + job_ptr->kill_on_node_fail = kill_on_node_fail; + job_ptr->kill_on_step_done = kill_on_step_done; + xfree(job_ptr->licenses); + job_ptr->licenses = licenses; + licenses = NULL; /* reused, nothing left to free */ + job_ptr->mail_type = mail_type; + xfree(job_ptr->mail_user); + job_ptr->mail_user = mail_user; + mail_user = NULL; /* reused, nothing left to free */ + xfree(job_ptr->name); /* in case duplicate record */ + job_ptr->name = name; + name = NULL; /* reused, nothing left to free */ + xfree(job_ptr->network); + job_ptr->network = network; + network = NULL; /* reused, nothing left to free */ job_ptr->next_step_id = next_step_id; - job_ptr->dependency = dependency; - job_ptr->exit_code = exit_code; - job_ptr->state_reason = state_reason; - job_ptr->num_procs = num_procs; - job_ptr->time_last_active = time(NULL); - strncpy(job_ptr->name, name, MAX_JOBNAME_LEN); - xfree(name); - xfree(job_ptr->nodes); - job_ptr->nodes = nodes; - nodes = NULL; /* reused, nothing left to free */ + xfree(job_ptr->nodes); /* in case duplicate record */ + job_ptr->nodes = nodes; + nodes = NULL; /* reused, nothing left to free */ if (nodes_completing) { xfree(job_ptr->nodes_completing); job_ptr->nodes_completing = nodes_completing; nodes_completing = NULL; /* reused, nothing left to free */ } - xfree(job_ptr->alloc_node); - job_ptr->alloc_node = alloc_node; - alloc_node = NULL; /* reused, nothing left to free */ - strncpy(job_ptr->partition, partition, MAX_SLURM_NAME); - xfree(partition); - job_ptr->account = account; - account = NULL; /* reused, nothing left to free */ - job_ptr->comment = comment; - comment = NULL; /* reused, nothing left to free */ - job_ptr->network = network; - network = NULL; /* reused, nothing left to free */ + job_ptr->num_procs = num_procs; + job_ptr->other_port = other_port; + xfree(job_ptr->partition); + job_ptr->partition = partition; + partition = NULL; /* reused, nothing left to free */ job_ptr->part_ptr = part_ptr; - job_ptr->kill_on_node_fail = kill_on_node_fail; - job_ptr->kill_on_step_done = kill_on_step_done; - job_ptr->batch_flag = batch_flag; - job_ptr->alloc_resp_port = alloc_resp_port; - job_ptr->alloc_resp_host = alloc_resp_host; - job_ptr->other_port = other_port; - job_ptr->other_host = other_host; - job_ptr->mail_type = mail_type; - job_ptr->mail_user = mail_user; - mail_user = NULL; /* reused, nothing left to free */ + job_ptr->pre_sus_time = pre_sus_time; + job_ptr->priority = priority; + job_ptr->qos = qos; + xfree(job_ptr->resp_host); + job_ptr->resp_host = resp_host; + resp_host = NULL; /* reused, nothing left to free */ job_ptr->select_jobinfo = select_jobinfo; - - build_node_details(job_ptr); /* set: num_cpu_groups, cpus_per_node, - * cpu_count_reps, node_cnt, and - * node_addr */ + job_ptr->start_time = start_time; + job_ptr->state_reason = state_reason; + job_ptr->suspend_time = suspend_time; + job_ptr->time_last_active = time(NULL); + job_ptr->time_limit = time_limit; + job_ptr->total_procs = total_procs; + job_ptr->tot_sus_time = tot_sus_time; + job_ptr->user_id = user_id; info("recovered job id %u", job_id); safe_unpack16(&step_flag, buffer); @@ -724,12 +744,13 @@ static int _load_job_state(Buf buffer) safe_unpack16(&step_flag, buffer); } + build_node_details(job_ptr); /* set: num_cpu_groups, cpus_per_node, + * cpu_count_reps, node_cnt, + * node_addr, alloc_lps, used_lps */ return SLURM_SUCCESS; unpack_error: error("Incomplete job record"); - xfree(alloc_resp_host); - xfree(other_host); xfree(nodes); xfree(nodes_completing); xfree(partition); @@ -737,6 +758,8 @@ unpack_error: xfree(alloc_node); xfree(account); xfree(comment); + xfree(resp_host); + xfree(licenses); xfree(mail_user); select_g_free_jobinfo(&select_jobinfo); return SLURM_FAILURE; @@ -752,26 +775,28 @@ void _dump_job_details(struct job_details *detail_ptr, Buf buffer) { pack32(detail_ptr->min_nodes, buffer); pack32(detail_ptr->max_nodes, buffer); - pack32(detail_ptr->total_procs, buffer); pack32(detail_ptr->num_tasks, buffer); pack16(detail_ptr->shared, buffer); pack16(detail_ptr->contiguous, buffer); pack16(detail_ptr->cpus_per_task, buffer); pack16(detail_ptr->ntasks_per_node, buffer); - pack16(detail_ptr->no_requeue, buffer); - pack16(detail_ptr->overcommit, buffer); + pack16(detail_ptr->requeue, buffer); + pack16(detail_ptr->acctg_freq, buffer); + + pack8(detail_ptr->open_mode, buffer); + pack8(detail_ptr->overcommit, buffer); pack32(detail_ptr->job_min_procs, buffer); pack32(detail_ptr->job_min_memory, buffer); - pack32(detail_ptr->job_max_memory, buffer); pack32(detail_ptr->job_min_tmp_disk, buffer); pack_time(detail_ptr->begin_time, buffer); pack_time(detail_ptr->submit_time, buffer); - packstr(detail_ptr->req_nodes, buffer); - packstr(detail_ptr->exc_nodes, buffer); - packstr(detail_ptr->features, buffer); + packstr(detail_ptr->req_nodes, buffer); + packstr(detail_ptr->exc_nodes, buffer); + packstr(detail_ptr->features, buffer); + packstr(detail_ptr->dependency, buffer); packstr(detail_ptr->err, buffer); packstr(detail_ptr->in, buffer); @@ -786,14 +811,16 @@ void _dump_job_details(struct job_details *detail_ptr, Buf buffer) static int _load_job_details(struct job_record *job_ptr, Buf buffer) { char *req_nodes = NULL, *exc_nodes = NULL, *features = NULL; + char *dependency = NULL; char *err = NULL, *in = NULL, *out = NULL, *work_dir = NULL; char **argv = (char **) NULL; uint32_t min_nodes, max_nodes; - uint32_t job_min_procs, total_procs; - uint32_t job_min_memory, job_max_memory, job_min_tmp_disk; - uint32_t num_tasks; - uint16_t argc = 0, shared, contiguous, ntasks_per_node; - uint16_t cpus_per_task, name_len, no_requeue, overcommit; + uint32_t job_min_procs; + uint32_t job_min_memory, job_min_tmp_disk; + uint32_t num_tasks, name_len, argc = 0; + uint16_t shared, contiguous, ntasks_per_node; + uint16_t acctg_freq, cpus_per_task, requeue; + uint8_t open_mode, overcommit; time_t begin_time, submit_time; int i; multi_core_data_t *mc_ptr; @@ -801,26 +828,28 @@ static int _load_job_details(struct job_record *job_ptr, Buf buffer) /* unpack the job's details from the buffer */ safe_unpack32(&min_nodes, buffer); safe_unpack32(&max_nodes, buffer); - safe_unpack32(&total_procs, buffer); safe_unpack32(&num_tasks, buffer); safe_unpack16(&shared, buffer); safe_unpack16(&contiguous, buffer); safe_unpack16(&cpus_per_task, buffer); safe_unpack16(&ntasks_per_node, buffer); - safe_unpack16(&no_requeue, buffer); - safe_unpack16(&overcommit, buffer); + safe_unpack16(&requeue, buffer); + safe_unpack16(&acctg_freq, buffer); + + safe_unpack8(&open_mode, buffer); + safe_unpack8(&overcommit, buffer); safe_unpack32(&job_min_procs, buffer); safe_unpack32(&job_min_memory, buffer); - safe_unpack32(&job_max_memory, buffer); safe_unpack32(&job_min_tmp_disk, buffer); safe_unpack_time(&begin_time, buffer); safe_unpack_time(&submit_time, buffer); - safe_unpackstr_xmalloc(&req_nodes, &name_len, buffer); - safe_unpackstr_xmalloc(&exc_nodes, &name_len, buffer); - safe_unpackstr_xmalloc(&features, &name_len, buffer); + safe_unpackstr_xmalloc(&req_nodes, &name_len, buffer); + safe_unpackstr_xmalloc(&exc_nodes, &name_len, buffer); + safe_unpackstr_xmalloc(&features, &name_len, buffer); + safe_unpackstr_xmalloc(&dependency, &name_len, buffer); safe_unpackstr_xmalloc(&err, &name_len, buffer); safe_unpackstr_xmalloc(&in, &name_len, buffer); @@ -837,9 +866,9 @@ static int _load_job_details(struct job_record *job_ptr, Buf buffer) job_ptr->job_id, contiguous); goto unpack_error; } - if ((no_requeue > 1) || (overcommit > 1)) { - error("Invalid data for job %u: no_requeue=%u overcommit=%u", - no_requeue, overcommit); + if ((requeue > 1) || (overcommit > 1)) { + error("Invalid data for job %u: requeue=%u overcommit=%u", + requeue, overcommit); goto unpack_error; } @@ -860,17 +889,17 @@ static int _load_job_details(struct job_record *job_ptr, Buf buffer) /* now put the details into the job record */ job_ptr->details->min_nodes = min_nodes; job_ptr->details->max_nodes = max_nodes; - job_ptr->details->total_procs = total_procs; job_ptr->details->num_tasks = num_tasks; job_ptr->details->shared = shared; + job_ptr->details->acctg_freq = acctg_freq; job_ptr->details->contiguous = contiguous; job_ptr->details->cpus_per_task = cpus_per_task; job_ptr->details->ntasks_per_node = ntasks_per_node; job_ptr->details->job_min_procs = job_min_procs; job_ptr->details->job_min_memory = job_min_memory; - job_ptr->details->job_max_memory = job_max_memory; job_ptr->details->job_min_tmp_disk = job_min_tmp_disk; - job_ptr->details->no_requeue = no_requeue; + job_ptr->details->requeue = requeue; + job_ptr->details->open_mode = open_mode; job_ptr->details->overcommit = overcommit; job_ptr->details->begin_time = begin_time; job_ptr->details->submit_time = submit_time; @@ -884,6 +913,7 @@ static int _load_job_details(struct job_record *job_ptr, Buf buffer) job_ptr->details->argc = argc; job_ptr->details->argv = argv; job_ptr->details->mc_ptr = mc_ptr; + job_ptr->details->dependency = dependency; return SLURM_SUCCESS; @@ -891,6 +921,7 @@ unpack_error: xfree(req_nodes); xfree(exc_nodes); xfree(features); + xfree(dependency); xfree(err); xfree(in); xfree(out); @@ -974,12 +1005,14 @@ extern int kill_job_by_part_name(char *part_name) job_ptr->job_state = JOB_NODE_FAIL | JOB_COMPLETING; job_ptr->exit_code = MAX(job_ptr->exit_code, 1); job_ptr->state_reason = FAIL_DOWN_PARTITION; - if (suspended) + if (suspended) { job_ptr->end_time = job_ptr->suspend_time; - else - job_ptr->end_time = time(NULL); - job_completion_logger(job_ptr); + job_ptr->tot_sus_time += + difftime(now, job_ptr->suspend_time); + } else + job_ptr->end_time = now; deallocate_nodes(job_ptr, false, suspended); + job_completion_logger(job_ptr); } else if (job_ptr->job_state == JOB_PENDING) { job_count++; info("Killing job_id %u on defunct partition %s", @@ -1062,15 +1095,24 @@ extern int kill_running_job_by_node_name(char *node_name, bool step_test) node_name, job_ptr->job_id); _excise_node_from_job(job_ptr, node_ptr); } else if (job_ptr->batch_flag && job_ptr->details && - (job_ptr->details->no_requeue == 0)) { + (job_ptr->details->requeue > 0)) { + char requeue_msg[128]; + srun_node_fail(job_ptr->job_id, node_name); + info("requeue job %u due to failure of node %s", job_ptr->job_id, node_name); _set_job_prio(job_ptr); + snprintf(requeue_msg, sizeof(requeue_msg), + "Job requeued due to failure of node %s", + node_name); + slurm_sched_requeue(job_ptr, requeue_msg); job_ptr->time_last_active = now; - if (suspended) + if (suspended) { job_ptr->end_time = job_ptr->suspend_time; - else + job_ptr->tot_sus_time += + difftime(now, job_ptr->suspend_time); + } else job_ptr->end_time = now; /* We want this job to look like it was cancelled in the @@ -1092,10 +1134,12 @@ extern int kill_running_job_by_node_name(char *node_name, bool step_test) job_ptr->exit_code = MAX(job_ptr->exit_code, 1); job_ptr->state_reason = FAIL_DOWN_NODE; - if (suspended) + if (suspended) { job_ptr->end_time = job_ptr->suspend_time; - else + job_ptr->tot_sus_time += + difftime(now, job_ptr->suspend_time); + } else job_ptr->end_time = time(NULL); deallocate_nodes(job_ptr, false, suspended); job_completion_logger(job_ptr); @@ -1134,10 +1178,10 @@ void dump_job_desc(job_desc_msg_t * job_specs) { long job_id; long job_min_procs, job_min_sockets, job_min_cores, job_min_threads; - long job_min_memory, job_max_memory, job_min_tmp_disk, num_procs; - long time_limit, priority, contiguous; - long kill_on_node_fail, shared, immediate, dependency; - long cpus_per_task, no_requeue, num_tasks, overcommit; + long job_min_memory, job_min_tmp_disk, num_procs; + long time_limit, priority, contiguous, acctg_freq; + long kill_on_node_fail, shared, immediate; + long cpus_per_task, requeue, num_tasks, overcommit; long ntasks_per_node, ntasks_per_socket, ntasks_per_core; char buf[100]; @@ -1175,12 +1219,10 @@ void dump_job_desc(job_desc_msg_t * job_specs) job_min_memory = (job_specs->job_min_memory != NO_VAL) ? (long) job_specs->job_min_memory : -1L; - job_max_memory = (job_specs->job_max_memory != NO_VAL) ? - (long) job_specs->job_max_memory : -1L; job_min_tmp_disk = (job_specs->job_min_tmp_disk != NO_VAL) ? (long) job_specs->job_min_tmp_disk : -1L; - debug3(" job_min_memory=%ld job_max_memory=%ld job_min_tmp_disk=%ld", - job_min_memory, job_max_memory, job_min_tmp_disk); + debug3(" job_min_memory=%ld job_min_tmp_disk=%ld", + job_min_memory, job_min_tmp_disk); immediate = (job_specs->immediate == 0) ? 0L : 1L; debug3(" immediate=%ld features=%s", immediate, job_specs->features); @@ -1242,31 +1284,34 @@ void dump_job_desc(job_desc_msg_t * job_specs) job_specs->work_dir, job_specs->alloc_node, job_specs->alloc_sid); - dependency = (job_specs->dependency != NO_VAL) ? - (long) job_specs->dependency : -1L; - debug3(" alloc_resp_hostname=%s alloc_resp_port=%u", - job_specs->alloc_resp_hostname, job_specs->alloc_resp_port); - debug3(" other_hostname=%s other_port=%u", - job_specs->other_hostname, job_specs->other_port); - debug3(" dependency=%ld account=%s comment=%s", - dependency, job_specs->account, job_specs->comment); + debug3(" resp_host=%s alloc_resp_port=%u other_port=%u", + job_specs->resp_host, + job_specs->alloc_resp_port, job_specs->other_port); + debug3(" dependency=%s account=%s comment=%s", + job_specs->dependency, job_specs->account, + job_specs->comment); num_tasks = (job_specs->num_tasks != (uint16_t) NO_VAL) ? (long) job_specs->num_tasks : -1L; - overcommit = (job_specs->overcommit != (uint16_t) NO_VAL) ? + overcommit = (job_specs->overcommit != (uint8_t) NO_VAL) ? (long) job_specs->overcommit : -1L; + acctg_freq = (job_specs->acctg_freq != (uint16_t) NO_VAL) ? + (long) job_specs->acctg_freq : -1L; debug3(" mail_type=%u mail_user=%s nice=%d num_tasks=%d " - "overcommit=%d", + "open_mode=%u overcommit=%d acctg_freq=%d", job_specs->mail_type, job_specs->mail_user, - (int)job_specs->nice - NICE_OFFSET, num_tasks, overcommit); + (int)job_specs->nice - NICE_OFFSET, num_tasks, + job_specs->open_mode, overcommit, acctg_freq); slurm_make_time_str(&job_specs->begin_time, buf, sizeof(buf)); cpus_per_task = (job_specs->cpus_per_task != (uint16_t) NO_VAL) ? (long) job_specs->cpus_per_task : -1L; - no_requeue = (job_specs->no_requeue != (uint16_t) NO_VAL) ? - (long) job_specs->no_requeue : -1L; - debug3(" network=%s begin=%s cpus_per_task=%ld no_requeue=%ld", - job_specs->network, buf, cpus_per_task, no_requeue); + requeue = (job_specs->requeue != (uint16_t) NO_VAL) ? + (long) job_specs->requeue : -1L; + debug3(" network=%s begin=%s cpus_per_task=%ld requeue=%ld " + "licenses=%s", + job_specs->network, buf, cpus_per_task, requeue, + job_specs->licenses); ntasks_per_node = (job_specs->ntasks_per_node != (uint16_t) NO_VAL) ? (long) job_specs->ntasks_per_node : -1L; @@ -1336,6 +1381,7 @@ extern void rehash_jobs(void) * IN immediate - if set then either initiate the job immediately or fail * IN will_run - don't initiate the job if set, just test if it could run * now or later + * OUT resp - will run response (includes start location, time, etc.) * IN allocate - resource allocation request if set, not a full job * IN submit_uid -uid of user issuing the request * OUT job_pptr - set to pointer to job record @@ -1351,7 +1397,7 @@ extern void rehash_jobs(void) * NOTE: lock_slurmctld on entry: Read config Write job, Write node, Read part */ extern int job_allocate(job_desc_msg_t * job_specs, int immediate, - int will_run, + int will_run, will_run_response_msg_t **resp, int allocate, uid_t submit_uid, struct job_record **job_pptr) { @@ -1376,6 +1422,8 @@ extern int job_allocate(job_desc_msg_t * job_specs, int immediate, xassert(job_ptr); independent = job_independent(job_ptr); + if (license_job_test(job_ptr) != SLURM_SUCCESS) + independent = false; /* Avoid resource fragmentation if important */ if (independent && switch_no_frag() && @@ -1410,6 +1458,18 @@ extern int job_allocate(job_desc_msg_t * job_specs, int immediate, return ESLURM_NOT_TOP_PRIORITY; } + if (will_run && resp) { + job_desc_msg_t job_desc_msg; + int rc; + bzero(&job_desc_msg, sizeof(job_desc_msg_t)); + job_desc_msg.job_id = job_ptr->job_id; + rc = job_start_data(&job_desc_msg, resp); + job_ptr->job_state = JOB_FAILED; + job_ptr->exit_code = 1; + job_ptr->start_time = job_ptr->end_time = now; + return rc; + } + test_only = will_run || (allocate == 0); no_alloc = test_only || too_fragmented || @@ -1419,7 +1479,7 @@ extern int job_allocate(job_desc_msg_t * job_specs, int immediate, last_job_update = now; slurm_sched_schedule(); /* work for external scheduler */ } - + if ((error_code == ESLURM_NODES_BUSY) || (error_code == ESLURM_JOB_HELD) || (error_code == ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE)) { @@ -1479,9 +1539,11 @@ extern int job_fail(uint32_t job_id) if ((job_ptr->job_state == JOB_RUNNING) || suspended) { /* No need to signal steps, deallocate kills them */ job_ptr->time_last_active = now; - if (suspended) + if (suspended) { job_ptr->end_time = job_ptr->suspend_time; - else + job_ptr->tot_sus_time += + difftime(now, job_ptr->suspend_time); + } else job_ptr->end_time = now; last_job_update = now; job_ptr->job_state = JOB_FAILED | JOB_COMPLETING; @@ -1557,6 +1619,7 @@ extern int job_signal(uint32_t job_id, uint16_t signal, uint16_t batch_flag, && (signal == SIGKILL)) { last_job_update = now; job_ptr->end_time = job_ptr->suspend_time; + job_ptr->tot_sus_time += difftime(now, job_ptr->suspend_time); job_ptr->job_state = JOB_CANCELLED | JOB_COMPLETING; deallocate_nodes(job_ptr, false, true); job_completion_logger(job_ptr); @@ -1700,9 +1763,11 @@ extern int job_complete(uint32_t job_id, uid_t uid, bool requeue, job_ptr->state_reason = FAIL_TIMEOUT; } else job_ptr->job_state = JOB_COMPLETE | job_comp_flag; - if (suspended) + if (suspended) { job_ptr->end_time = job_ptr->suspend_time; - else + job_ptr->tot_sus_time += + difftime(now, job_ptr->suspend_time); + } else job_ptr->end_time = now; job_completion_logger(job_ptr); } @@ -1744,6 +1809,10 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run, bool super_user = false; struct job_record *job_ptr; uint32_t total_nodes, max_procs; + acct_association_rec_t assoc_rec, *assoc_ptr; + List license_list = NULL; + bool valid; + #if SYSTEM_DIMENSIONS uint16_t geo[SYSTEM_DIMENSIONS]; uint16_t reboot; @@ -1751,21 +1820,6 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run, uint16_t conn_type; #endif - debug2("before alteration asking for nodes %u-%u procs %u", - job_desc->min_nodes, job_desc->max_nodes, - job_desc->num_procs); - select_g_alter_node_cnt(SELECT_SET_NODE_CNT, job_desc); - select_g_get_jobinfo(job_desc->select_jobinfo, - SELECT_DATA_MAX_PROCS, &max_procs); - - debug2("after alteration asking for nodes %u-%u procs %u-%u", - job_desc->min_nodes, job_desc->max_nodes, - job_desc->num_procs, max_procs); - - *job_pptr = (struct job_record *) NULL; - if ((error_code = _validate_job_desc(job_desc, allocate, submit_uid))) - return error_code; - /* find selected partition */ if (job_desc->partition) { part_ptr = list_find_first(part_list, &list_find_part, @@ -1784,6 +1838,40 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run, } part_ptr = default_part_loc; } + if (job_desc->min_nodes == NO_VAL) + job_desc->min_nodes = part_ptr->min_nodes_orig; + if (job_desc->max_nodes == NO_VAL) { +#ifdef HAVE_BG + job_desc->max_nodes = part_ptr->min_nodes_orig; +#else + ; +#endif + } else if (job_desc->max_nodes < part_ptr->min_nodes_orig) { + info("_job_create: job's max nodes less than partition's " + "min nodes (%u < %u)", + job_desc->max_nodes, part_ptr->min_nodes_orig); + error_code = ESLURM_TOO_MANY_REQUESTED_NODES; + return error_code; + } + + debug3("before alteration asking for nodes %u-%u procs %u", + job_desc->min_nodes, job_desc->max_nodes, + job_desc->num_procs); + select_g_alter_node_cnt(SELECT_SET_NODE_CNT, job_desc); + select_g_get_jobinfo(job_desc->select_jobinfo, + SELECT_DATA_MAX_PROCS, &max_procs); + debug3("after alteration asking for nodes %u-%u procs %u-%u", + job_desc->min_nodes, job_desc->max_nodes, + job_desc->num_procs, max_procs); + + *job_pptr = (struct job_record *) NULL; + if ((error_code = _validate_job_desc(job_desc, allocate, submit_uid))) + return error_code; + + if ((job_desc->user_id == 0) && part_ptr->disable_root_jobs) { + error("Security violation, SUBMIT_JOB for user root disabled"); + return ESLURM_USER_ID_MISSING; + } /* can this user access this partition */ if ((part_ptr->root_only) && (submit_uid != 0)) { @@ -1800,6 +1888,28 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run, return error_code; } + bzero(&assoc_rec, sizeof(acct_association_rec_t)); + assoc_rec.uid = job_desc->user_id; + assoc_rec.partition = part_ptr->name; + assoc_rec.acct = job_desc->account; + + if (assoc_mgr_fill_in_assoc(acct_db_conn, &assoc_rec, + accounting_enforce, &assoc_ptr)) { + info("_job_create: invalid account or partition for user %u", + job_desc->user_id); + error_code = ESLURM_INVALID_ACCOUNT; + return error_code; + } + if (job_desc->account == NULL) + job_desc->account = xstrdup(assoc_rec.acct); + if (accounting_enforce && + (!_validate_acct_policy(job_desc, part_ptr, &assoc_rec))) { + info("_job_create: exceeded association's node or time limit " + "for user %u", job_desc->user_id); + error_code = ESLURM_ACCOUNTING_POLICY; + return error_code; + } + /* check if select partition has sufficient resources to satisfy * the request */ @@ -1814,10 +1924,9 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run, if (job_desc->contiguous) bit_fill_gaps(req_bitmap); if (bit_super_set(req_bitmap, part_ptr->node_bitmap) != 1) { - char *tmp = bitmap2node_name(req_bitmap); info("_job_create: requested nodes %s not in " - "partition %s", tmp, part_ptr->name); - xfree(tmp); + "partition %s", + job_desc->req_nodes, part_ptr->name); error_code = ESLURM_REQUESTED_NODES_NOT_IN_PARTITION; goto cleanup; } @@ -1930,10 +2039,17 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run, goto cleanup; } + license_list = license_job_validate(job_desc->licenses, &valid); + if (!valid) { + info("Job's requested licenses are invalid: %s", + job_desc->licenses); + error_code = ESLURM_INVALID_LICENSES; + goto cleanup; + } if ((error_code =_validate_job_create_req(job_desc))) goto cleanup; - + if ((error_code = _copy_job_desc_to_job_record(job_desc, job_pptr, part_ptr, @@ -1942,10 +2058,11 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run, error_code = ESLURM_ERROR_ON_DESC_TO_RECORD_COPY; goto cleanup; } - + job_ptr = *job_pptr; - if (job_ptr->dependency == job_ptr->job_id) { - info("User specified self as dependent job"); + job_ptr->assoc_id = assoc_rec.id; + job_ptr->assoc_ptr = (void *) assoc_ptr; + if (update_job_dependency(job_ptr, job_desc->dependency)) { error_code = ESLURM_DEPENDENCY; goto cleanup; } @@ -1965,6 +2082,9 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run, } else job_ptr->batch_flag = 0; + job_ptr->license_list = license_list; + license_list = NULL; + /* Insure that requested partition is valid right now, * otherwise leave job queued and provide warning code */ detail_ptr = job_ptr->details; @@ -1999,6 +2119,8 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run, cleanup: + if (license_list) + list_destroy(license_list); FREE_NULL_BITMAP(req_bitmap); FREE_NULL_BITMAP(exc_bitmap); return error_code; @@ -2009,25 +2131,95 @@ cleanup: * RET 0 or error code */ static int _validate_job_create_req(job_desc_msg_t * job_desc) { + if (job_desc->account && (strlen(job_desc->account) > MAX_STR_LEN)) { + info("_validate_job_create_req: strlen(account) too big (%d)", + strlen(job_desc->account)); + return ESLURM_PATHNAME_TOO_LONG; + } + if (job_desc->alloc_node && (strlen(job_desc->alloc_node) > MAX_STR_LEN)) { + info("_validate_job_create_req: strlen(alloc_node) too big (%d)", + strlen(job_desc->alloc_node)); + return ESLURM_PATHNAME_TOO_LONG; + } + if (job_desc->blrtsimage && (strlen(job_desc->blrtsimage) > MAX_STR_LEN)) { + info("_validate_job_create_req: strlen(blrtsimage) too big (%d)", + strlen(job_desc->blrtsimage)); + return ESLURM_PATHNAME_TOO_LONG; + } + if (job_desc->comment && (strlen(job_desc->comment) > MAX_STR_LEN)) { + info("_validate_job_create_req: strlen(comment) too big (%d)", + strlen(job_desc->comment)); + return ESLURM_PATHNAME_TOO_LONG; + } + if (job_desc->dependency && (strlen(job_desc->dependency) > MAX_STR_LEN)) { + info("_validate_job_create_req: strlen(dependency) too big (%d)", + strlen(job_desc->dependency)); + return ESLURM_PATHNAME_TOO_LONG; + } if (job_desc->err && (strlen(job_desc->err) > MAX_STR_LEN)) { info("_validate_job_create_req: strlen(err) too big (%d)", strlen(job_desc->err)); return ESLURM_PATHNAME_TOO_LONG; } + if (job_desc->features && (strlen(job_desc->features) > MAX_STR_LEN)) { + info("_validate_job_create_req: strlen(features) too big (%d)", + strlen(job_desc->features)); + return ESLURM_PATHNAME_TOO_LONG; + } if (job_desc->in && (strlen(job_desc->in) > MAX_STR_LEN)) { info("_validate_job_create_req: strlen(in) too big (%d)", strlen(job_desc->in)); - return ESLURM_PATHNAME_TOO_LONG; + return ESLURM_PATHNAME_TOO_LONG; + } + if (job_desc->linuximage && (strlen(job_desc->linuximage) > MAX_STR_LEN)) { + info("_validate_job_create_req: strlen(linuximage) too big (%d)", + strlen(job_desc->linuximage)); + return ESLURM_PATHNAME_TOO_LONG; + } + if (job_desc->licenses && (strlen(job_desc->licenses) > MAX_STR_LEN)) { + info("_validate_job_create_req: strlen(licenses) too big (%d)", + strlen(job_desc->licenses)); + return ESLURM_PATHNAME_TOO_LONG; + } + if (job_desc->mail_user && (strlen(job_desc->mail_user) > MAX_STR_LEN)) { + info("_validate_job_create_req: strlen(mail_user) too big (%d)", + strlen(job_desc->mail_user)); + return ESLURM_PATHNAME_TOO_LONG; + } + if (job_desc->mloaderimage && (strlen(job_desc->mloaderimage) > MAX_STR_LEN)) { + info("_validate_job_create_req: strlen(mloaderimage) too big (%d)", + strlen(job_desc->features)); + return ESLURM_PATHNAME_TOO_LONG; + } + if (job_desc->name && (strlen(job_desc->name) > MAX_STR_LEN)) { + info("_validate_job_create_req: strlen(name) too big (%d)", + strlen(job_desc->name)); + return ESLURM_PATHNAME_TOO_LONG; + } + if (job_desc->network && (strlen(job_desc->network) > MAX_STR_LEN)) { + info("_validate_job_create_req: strlen(network) too big (%d)", + strlen(job_desc->network)); + return ESLURM_PATHNAME_TOO_LONG; } if (job_desc->out && (strlen(job_desc->out) > MAX_STR_LEN)) { info("_validate_job_create_req: strlen(out) too big (%d)", strlen(job_desc->out)); - return ESLURM_PATHNAME_TOO_LONG; + return ESLURM_PATHNAME_TOO_LONG; + } + if (job_desc->partition && (strlen(job_desc->partition) > MAX_STR_LEN)) { + info("_validate_job_create_req: strlen(partition) too big (%d)", + strlen(job_desc->partition)); + return ESLURM_PATHNAME_TOO_LONG; + } + if (job_desc->ramdiskimage && (strlen(job_desc->ramdiskimage) > MAX_STR_LEN)) { + info("_validate_job_create_req: strlen(ramdiskimage) too big (%d)", + strlen(job_desc->ramdiskimage)); + return ESLURM_PATHNAME_TOO_LONG; } if (job_desc->work_dir && (strlen(job_desc->work_dir) > MAX_STR_LEN)) { info("_validate_job_create_req: strlen(work_dir) too big (%d)", strlen(job_desc->work_dir)); - return ESLURM_PATHNAME_TOO_LONG; + return ESLURM_PATHNAME_TOO_LONG; } return SLURM_SUCCESS; } @@ -2083,7 +2275,7 @@ _copy_job_desc_to_file(job_desc_msg_t * job_desc, uint32_t job_id) * IN size - number of elements in data */ static int -_write_data_array_to_file(char *file_name, char **data, uint16_t size) +_write_data_array_to_file(char *file_name, char **data, uint32_t size) { int fd, i, pos, nwrite, amount; @@ -2093,8 +2285,8 @@ _write_data_array_to_file(char *file_name, char **data, uint16_t size) return ESLURM_WRITING_TO_FILE; } - amount = write(fd, &size, sizeof(uint16_t)); - if (amount < sizeof(uint16_t)) { + amount = write(fd, &size, sizeof(uint32_t)); + if (amount < sizeof(uint32_t)) { error("Error writing file %s, %m", file_name); close(fd); return ESLURM_WRITING_TO_FILE; @@ -2167,7 +2359,7 @@ static int _write_data_to_file(char *file_name, char *data) * RET point to array of string pointers containing environment variables * NOTE: READ lock_slurmctld config before entry */ -char **get_job_env(struct job_record *job_ptr, uint16_t * env_size) +char **get_job_env(struct job_record *job_ptr, uint32_t * env_size) { char job_dir[30], *file_name, **environment = NULL; @@ -2210,11 +2402,11 @@ char *get_job_script(struct job_record *job_ptr) * NOTE: The output format of this must be identical with _xduparray2() */ static void -_read_data_array_from_file(char *file_name, char ***data, uint16_t * size) +_read_data_array_from_file(char *file_name, char ***data, uint32_t * size) { int fd, pos, buf_size, amount, i; char *buffer, **array_ptr; - uint16_t rec_cnt; + uint32_t rec_cnt; xassert(file_name); xassert(data); @@ -2228,8 +2420,8 @@ _read_data_array_from_file(char *file_name, char ***data, uint16_t * size) return; } - amount = read(fd, &rec_cnt, sizeof(uint16_t)); - if (amount < sizeof(uint16_t)) { + amount = read(fd, &rec_cnt, sizeof(uint32_t)); + if (amount < sizeof(uint32_t)) { if (amount != 0) /* incomplete write */ error("Error reading file %s, %m", file_name); else @@ -2421,7 +2613,7 @@ _copy_job_desc_to_job_record(job_desc_msg_t * job_desc, if (error_code) return error_code; - strncpy(job_ptr->partition, part_ptr->name, MAX_SLURM_NAME); + job_ptr->partition = xstrdup(part_ptr->name); job_ptr->part_ptr = part_ptr; if (job_desc->job_id != NO_VAL) /* already confirmed unique */ job_ptr->job_id = job_desc->job_id; @@ -2429,9 +2621,8 @@ _copy_job_desc_to_job_record(job_desc_msg_t * job_desc, _set_job_id(job_ptr); _add_job_hash(job_ptr); - if (job_desc->name) { - strncpy(job_ptr->name, job_desc->name, MAX_JOBNAME_LEN); - } + if (job_desc->name) + job_ptr->name = xstrdup(job_desc->name); job_ptr->user_id = (uid_t) job_desc->user_id; job_ptr->group_id = (gid_t) job_desc->group_id; job_ptr->job_state = JOB_PENDING; @@ -2441,9 +2632,22 @@ _copy_job_desc_to_job_record(job_desc_msg_t * job_desc, job_ptr->account = xstrdup(job_desc->account); job_ptr->network = xstrdup(job_desc->network); job_ptr->comment = xstrdup(job_desc->comment); - if (job_desc->dependency != NO_VAL) /* leave as zero */ - job_ptr->dependency = job_desc->dependency; - + if (!wiki_sched_test) { + char *sched_type = slurm_get_sched_type(); + if (strcmp(sched_type, "sched/wiki") == 0) + wiki_sched = true; + xfree(sched_type); + wiki_sched_test = true; + } + if (wiki_sched && job_ptr->comment && + strstr(job_ptr->comment, "QOS:")) { + if (strstr(job_ptr->comment, "FLAGS:PREEMPTOR")) + job_ptr->qos = QOS_EXPEDITE; + else if (strstr(job_ptr->comment, "FLAGS:PREEMPTEE")) + job_ptr->qos = QOS_STANDBY; + else + job_ptr->qos = QOS_NORMAL; + } if (job_desc->priority != NO_VAL) /* already confirmed submit_uid==0 */ job_ptr->priority = job_desc->priority; else { @@ -2454,14 +2658,14 @@ _copy_job_desc_to_job_record(job_desc_msg_t * job_desc, if (job_desc->kill_on_node_fail != (uint16_t) NO_VAL) job_ptr->kill_on_node_fail = job_desc->kill_on_node_fail; + job_ptr->resp_host = xstrdup(job_desc->resp_host); job_ptr->alloc_resp_port = job_desc->alloc_resp_port; - job_ptr->alloc_resp_host = xstrdup(job_desc->alloc_resp_hostname); job_ptr->other_port = job_desc->other_port; - job_ptr->other_host = xstrdup(job_desc->other_hostname); job_ptr->time_last_active = time(NULL); job_ptr->num_procs = job_desc->num_procs; job_ptr->cr_enabled = 0; + job_ptr->licenses = xstrdup(job_desc->licenses); job_ptr->mail_type = job_desc->mail_type; job_ptr->mail_user = xstrdup(job_desc->mail_user); @@ -2470,8 +2674,10 @@ _copy_job_desc_to_job_record(job_desc_msg_t * job_desc, detail_ptr->argv = job_desc->argv; job_desc->argv = (char **) NULL; /* nothing left */ job_desc->argc = 0; /* nothing left */ - detail_ptr->min_nodes = job_desc->min_nodes; - detail_ptr->max_nodes = job_desc->max_nodes; + detail_ptr->acctg_freq = job_desc->acctg_freq; + detail_ptr->open_mode = job_desc->open_mode; + detail_ptr->min_nodes = job_desc->min_nodes; + detail_ptr->max_nodes = job_desc->max_nodes; if (job_desc->req_nodes) { detail_ptr->req_nodes = _copy_nodelist_no_dup(job_desc->req_nodes); @@ -2493,19 +2699,19 @@ _copy_job_desc_to_job_record(job_desc_msg_t * job_desc, if (job_desc->task_dist != (uint16_t) NO_VAL) detail_ptr->task_dist = job_desc->task_dist; if (job_desc->cpus_per_task != (uint16_t) NO_VAL) - detail_ptr->cpus_per_task = job_desc->cpus_per_task; + detail_ptr->cpus_per_task = MIN(job_desc->cpus_per_task, 1); if (job_desc->ntasks_per_node != (uint16_t) NO_VAL) detail_ptr->ntasks_per_node = job_desc->ntasks_per_node; - if (job_desc->no_requeue != (uint16_t) NO_VAL) - detail_ptr->no_requeue = job_desc->no_requeue; + if (job_desc->requeue != (uint16_t) NO_VAL) + detail_ptr->requeue = MIN(job_desc->requeue, 1); + else + detail_ptr->requeue = slurmctld_conf.job_requeue; if (job_desc->job_min_procs != (uint16_t) NO_VAL) detail_ptr->job_min_procs = job_desc->job_min_procs; detail_ptr->job_min_procs = MAX(detail_ptr->job_min_procs, detail_ptr->cpus_per_task); if (job_desc->job_min_memory != NO_VAL) detail_ptr->job_min_memory = job_desc->job_min_memory; - if (job_desc->job_max_memory != NO_VAL) - detail_ptr->job_max_memory = job_desc->job_max_memory; if (job_desc->job_min_tmp_disk != NO_VAL) detail_ptr->job_min_tmp_disk = job_desc->job_min_tmp_disk; if (job_desc->num_tasks != NO_VAL) @@ -2518,14 +2724,13 @@ _copy_job_desc_to_job_record(job_desc_msg_t * job_desc, detail_ptr->out = xstrdup(job_desc->out); if (job_desc->work_dir) detail_ptr->work_dir = xstrdup(job_desc->work_dir); - if (job_desc->overcommit != (uint16_t) NO_VAL) + if (job_desc->overcommit != (uint8_t) NO_VAL) detail_ptr->overcommit = job_desc->overcommit; if (job_desc->begin_time > time(NULL)) detail_ptr->begin_time = job_desc->begin_time; job_ptr->select_jobinfo = select_g_copy_jobinfo(job_desc->select_jobinfo); detail_ptr->mc_ptr = _set_multi_core_data(job_desc); - *job_rec_ptr = job_ptr; return SLURM_SUCCESS; } @@ -2632,25 +2837,18 @@ static void _job_timed_out(struct job_record *job_ptr) static int _validate_job_desc(job_desc_msg_t * job_desc_msg, int allocate, uid_t submit_uid) { - static bool wiki_sched = false; - static bool wiki_sched_test = false; - - /* Permit normal user to specify job id only for sched/wiki - * and sched/wiki2 */ + /* Permit normal user to specify job id only for sched/wiki + * (Maui scheduler). This was also required with earlier + * versions of the Moab scheduler (wiki2), but was fixed + * in early 2007 to submit jobs as user root */ if (!wiki_sched_test) { char *sched_type = slurm_get_sched_type(); - if ((strcmp(sched_type, "sched/wiki") == 0) - || (strcmp(sched_type, "sched/wiki2") == 0)) + if (strcmp(sched_type, "sched/wiki") == 0) wiki_sched = true; xfree(sched_type); wiki_sched_test = true; } - if ((job_desc_msg->user_id == 0) && slurmctld_conf.disable_root_jobs) { - error("Security violation, SUBMIT_JOB for user root disabled"); - return ESLURM_USER_ID_MISSING; - } - if ((job_desc_msg->num_procs == NO_VAL) && (job_desc_msg->min_nodes == NO_VAL) && (job_desc_msg->req_nodes == NULL)) { @@ -2670,10 +2868,6 @@ static int _validate_job_desc(job_desc_msg_t * job_desc_msg, int allocate, debug("_validate_job_desc: job failed to specify group"); job_desc_msg->group_id = 0; /* uses user default */ } - if ((job_desc_msg->name) && - (strlen(job_desc_msg->name) >= MAX_JOBNAME_LEN)) { - job_desc_msg->name[MAX_JOBNAME_LEN-1] = '\0'; - } if (job_desc_msg->contiguous == (uint16_t) NO_VAL) job_desc_msg->contiguous = 0; @@ -2744,9 +2938,7 @@ static int _validate_job_desc(job_desc_msg_t * job_desc_msg, int allocate, if (job_desc_msg->job_min_threads == (uint16_t) NO_VAL) job_desc_msg->job_min_threads = 1; /* default 1 thread per core */ if (job_desc_msg->job_min_memory == NO_VAL) - job_desc_msg->job_min_memory = 1; /* default 1MB mem per node */ - if (job_desc_msg->job_max_memory == NO_VAL) - job_desc_msg->job_max_memory = 1; /* default 1MB mem per node */ + job_desc_msg->job_min_memory = 0; /* default no memory limit */ if (job_desc_msg->job_min_tmp_disk == NO_VAL) job_desc_msg->job_min_tmp_disk = 0;/* default 0MB disk per node */ @@ -2781,18 +2973,23 @@ static void _list_delete_job(void *job_entry) delete_job_details(job_ptr); xfree(job_ptr->alloc_node); + xfree(job_ptr->name); xfree(job_ptr->nodes); xfree(job_ptr->nodes_completing); FREE_NULL_BITMAP(job_ptr->node_bitmap); + xfree(job_ptr->partition); xfree(job_ptr->cpus_per_node); xfree(job_ptr->cpu_count_reps); xfree(job_ptr->node_addr); - xfree(job_ptr->alloc_resp_host); - xfree(job_ptr->other_host); xfree(job_ptr->account); + xfree(job_ptr->resp_host); + xfree(job_ptr->licenses); + if (job_ptr->license_list) + list_destroy(job_ptr->license_list); xfree(job_ptr->mail_user); xfree(job_ptr->network); xfree(job_ptr->alloc_lps); + xfree(job_ptr->used_lps); xfree(job_ptr->comment); select_g_free_jobinfo(&job_ptr->select_jobinfo); if (job_ptr->step_list) { @@ -2893,6 +3090,10 @@ extern void pack_all_jobs(char **buffer_ptr, int *buffer_size, (job_ptr->part_ptr->hidden)) continue; + if (slurmctld_conf.private_data + && (job_ptr->user_id != uid) && !validate_super_user(uid)) + continue; + pack_job(job_ptr, buffer); jobs_packed++; } @@ -2938,18 +3139,13 @@ void pack_job(struct job_record *dump_job_ptr, Buf buffer) else pack32(dump_job_ptr->time_limit, buffer); - if (dump_job_ptr->details) { + if (dump_job_ptr->details) pack_time(dump_job_ptr->details->submit_time, buffer); - } else { + else pack_time((time_t) 0, buffer); - } - if (IS_JOB_PENDING(dump_job_ptr)) { - if (dump_job_ptr->details) - pack_time(dump_job_ptr->details->begin_time, - buffer); - else - pack_time((time_t) 0, buffer); - } else + if (IS_JOB_PENDING(dump_job_ptr) && dump_job_ptr->details) + pack_time(dump_job_ptr->details->begin_time, buffer); + else pack_time(dump_job_ptr->start_time, buffer); pack_time(dump_job_ptr->end_time, buffer); pack_time(dump_job_ptr->suspend_time, buffer); @@ -2961,7 +3157,8 @@ void pack_job(struct job_record *dump_job_ptr, Buf buffer) packstr(dump_job_ptr->account, buffer); packstr(dump_job_ptr->network, buffer); packstr(dump_job_ptr->comment, buffer); - pack32(dump_job_ptr->dependency, buffer); + packstr(dump_job_ptr->licenses, buffer); + pack32(dump_job_ptr->exit_code, buffer); pack16(dump_job_ptr->num_cpu_groups, buffer); @@ -2995,16 +3192,36 @@ void pack_job(struct job_record *dump_job_ptr, Buf buffer) static void _pack_default_job_details(struct job_details *detail_ptr, Buf buffer) { + int i; + char *cmd_line = NULL; + if (detail_ptr) { - packstr(detail_ptr->features, buffer); + packstr(detail_ptr->features, buffer); + packstr(detail_ptr->work_dir, buffer); + packstr(detail_ptr->dependency, buffer); + if (detail_ptr->argv) { + for (i=0; detail_ptr->argv[i]; i++) { + if (cmd_line) + xstrcat(cmd_line, " "); + xstrcat(cmd_line, detail_ptr->argv[i]); + } + packstr(cmd_line, buffer); + xfree(cmd_line); + } else + packnull(buffer); pack32(detail_ptr->min_nodes, buffer); pack32(detail_ptr->max_nodes, buffer); + pack16(detail_ptr->requeue, buffer); } else { packnull(buffer); + packnull(buffer); + packnull(buffer); + packnull(buffer); pack32((uint32_t) 0, buffer); pack32((uint32_t) 0, buffer); + pack16((uint16_t) 0, buffer); } } @@ -3019,7 +3236,6 @@ static void _pack_pending_job_details(struct job_details *detail_ptr, pack16(detail_ptr->job_min_procs, buffer); pack32(detail_ptr->job_min_memory, buffer); - pack32(detail_ptr->job_max_memory, buffer); pack32(detail_ptr->job_min_tmp_disk, buffer); packstr(detail_ptr->req_nodes, buffer); @@ -3037,7 +3253,6 @@ static void _pack_pending_job_details(struct job_details *detail_ptr, pack16((uint16_t) 0, buffer); pack16((uint16_t) 0, buffer); - pack32((uint32_t) 0, buffer); pack32((uint32_t) 0, buffer); pack32((uint32_t) 0, buffer); @@ -3094,6 +3309,7 @@ void reset_job_bitmaps(void) struct job_record *job_ptr; struct part_record *part_ptr; bool job_fail = false; + time_t now = time(NULL); xassert(job_list); @@ -3101,12 +3317,19 @@ void reset_job_bitmaps(void) while ((job_ptr = (struct job_record *) list_next(job_iterator))) { xassert (job_ptr->magic == JOB_MAGIC); job_fail = false; - part_ptr = list_find_first(part_list, &list_find_part, - job_ptr->partition); - if (part_ptr == NULL) { - error("Invalid partition (%s) for job_id %u", - job_ptr->partition, job_ptr->job_id); + + if (job_ptr->partition == NULL) { + error("No partition for job_id %u", job_ptr->job_id); + part_ptr = NULL; job_fail = true; + } else { + part_ptr = list_find_first(part_list, &list_find_part, + job_ptr->partition); + if (part_ptr == NULL) { + error("Invalid partition (%s) for job_id %u", + job_ptr->partition, job_ptr->job_id); + job_fail = true; + } } job_ptr->part_ptr = part_ptr; @@ -3128,12 +3351,6 @@ void reset_job_bitmaps(void) build_node_details(job_ptr); /* set: num_cpu_groups, * cpu_count_reps, node_cnt, * cpus_per_node, node_addr */ - if (select_g_update_nodeinfo(job_ptr) != SLURM_SUCCESS) { - error("select_g_update_nodeinfo(%u): %m", - job_ptr->job_id); - /* not critical ... ? */ - /* probably job_fail should be set here */ - } if (_reset_detail_bitmaps(job_ptr)) job_fail = true; @@ -3159,6 +3376,8 @@ void reset_job_bitmaps(void) job_ptr->end_time = job_ptr->suspend_time; job_ptr->job_state = JOB_NODE_FAIL | JOB_COMPLETING; + job_ptr->tot_sus_time += + difftime(now, job_ptr->suspend_time); } job_ptr->exit_code = MAX(job_ptr->exit_code, 1); job_ptr->state_reason = FAIL_DOWN_NODE; @@ -3166,8 +3385,19 @@ void reset_job_bitmaps(void) } } + list_iterator_reset(job_iterator); + /* This will reinitialize the select plugin database, which + * we can only do after ALL job's states and bitmaps are set + * (i.e. it needs to be in this second loop) */ + while ((job_ptr = (struct job_record *) list_next(job_iterator))) { + if (select_g_update_nodeinfo(job_ptr) != SLURM_SUCCESS) { + error("select_g_update_nodeinfo(%u): %m", + job_ptr->job_id); + } + } list_iterator_destroy(job_iterator); - last_job_update = time(NULL); + + last_job_update = now; } static int _reset_detail_bitmaps(struct job_record *job_ptr) @@ -3216,7 +3446,9 @@ static void _reset_step_bitmaps(struct job_record *job_ptr) job_ptr->job_id, step_ptr->step_id); delete_step_record (job_ptr, step_ptr->step_id); } - } + if (step_ptr->step_node_bitmap) + step_alloc_lps(step_ptr); + } list_iterator_destroy (step_iterator); return; @@ -3313,9 +3545,9 @@ void reset_job_priority(void) } /* - * _top_priority - determine if any other job for this partition has a - * higher priority than specified job - * IN job_ptr - pointer to selected partition + * _top_priority - determine if any other job has a higher priority than the + * specified job + * IN job_ptr - pointer to selected job * RET true if selected job has highest priority */ static bool _top_priority(struct job_record *job_ptr) @@ -3345,8 +3577,11 @@ static bool _top_priority(struct job_record *job_ptr) continue; if (!job_independent(job_ptr2)) continue; - if ((job_ptr2->priority > job_ptr->priority) && - (job_ptr2->part_ptr == job_ptr->part_ptr)) { + if ((job_ptr2->part_ptr->priority > + job_ptr ->part_ptr->priority) || + ((job_ptr2->part_ptr->priority == + job_ptr ->part_ptr->priority) && + (job_ptr2->priority > job_ptr->priority))) { top = false; break; } @@ -3694,12 +3929,29 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid) job_specs->comment = NULL; /* Nothing left to free */ info("update_job: setting comment to %s for job_id %u", job_ptr->comment, job_specs->job_id); + + if (wiki_sched && strstr(job_ptr->comment, "QOS:")) { + if (strstr(job_ptr->comment, "FLAGS:PREEMPTOR")) + job_ptr->qos = QOS_EXPEDITE; + else if (strstr(job_ptr->comment, "FLAGS:PREEMPTEE")) + job_ptr->qos = QOS_STANDBY; + else + job_ptr->qos = QOS_NORMAL; + } } if (job_specs->name) { - strncpy(job_ptr->name, job_specs->name, MAX_JOBNAME_LEN); + xfree(job_ptr->name); + job_ptr->name = job_specs->name; + job_specs->name = NULL; /* Nothing left to free */ info("update_job: setting name to %s for job_id %u", - job_specs->name, job_specs->job_id); + job_ptr->name, job_specs->job_id); + } + + if (job_specs->requeue != (uint16_t) NO_VAL) { + detail_ptr->requeue = job_specs->requeue; + info("update_job: setting requeue to %u for job_id %u", + job_specs->requeue, job_specs->job_id); } if (job_specs->partition) { @@ -3709,8 +3961,25 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid) else if (tmp_part_ptr == NULL) error_code = ESLURM_INVALID_PARTITION_NAME; else if (super_user) { - strncpy(job_ptr->partition, job_specs->partition, - MAX_SLURM_NAME); + acct_association_rec_t assoc_rec, *assoc_ptr; + bzero(&assoc_rec, sizeof(acct_association_rec_t)); + assoc_rec.uid = job_ptr->user_id; + assoc_rec.partition = job_specs->partition; + assoc_rec.acct = job_ptr->account; + if (assoc_mgr_fill_in_assoc(acct_db_conn, &assoc_rec, + accounting_enforce, + &assoc_ptr)) { + info("job_update: invalid account %s for job %u", + job_specs->account, job_ptr->job_id); + error_code = ESLURM_INVALID_ACCOUNT; + /* Let update proceed. Note there is an invalid + * association ID for accounting purposes */ + } else { + job_ptr->assoc_id = assoc_rec.id; + job_ptr->assoc_ptr = (void *) assoc_ptr; + } + xfree(job_ptr->partition); + job_ptr->partition = xstrdup(job_specs->partition); job_ptr->part_ptr = tmp_part_ptr; info("update_job: setting partition to %s for " "job_id %u", job_specs->partition, @@ -3781,15 +4050,39 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid) } if (job_specs->account) { - xfree(job_ptr->account); - if (job_specs->account[0] != '\0') { - job_ptr->account = job_specs->account; - job_specs->account = NULL; /* Nothing left to free */ - info("update_job: setting account to %s for job_id %u", - job_ptr->account, job_specs->job_id); + if ((!IS_JOB_PENDING(job_ptr)) || (detail_ptr == NULL)) { + info("update_job: attempt to modify account for " + "non-pending job_id %u", job_specs->job_id); + error_code = ESLURM_DISABLED; } else { - info("update_job: cleared account for job_id %u", - job_specs->job_id); + acct_association_rec_t assoc_rec, *assoc_ptr; + bzero(&assoc_rec, sizeof(acct_association_rec_t)); + + assoc_rec.uid = job_ptr->user_id; + assoc_rec.partition = job_ptr->partition; + assoc_rec.acct = job_specs->account; + if (assoc_mgr_fill_in_assoc(acct_db_conn, &assoc_rec, + accounting_enforce, + &assoc_ptr)) { + info("job_update: invalid account %s for " + "job_id %u", + job_specs->account, job_ptr->job_id); + error_code = ESLURM_INVALID_ACCOUNT; + } else { + xfree(job_ptr->account); + if (assoc_rec.acct[0] != '\0') { + job_ptr->account = xstrdup(assoc_rec.acct); + info("update_job: setting account to " + "%s for job_id %u", + assoc_rec.acct, job_ptr->job_id); + } else { + info("update_job: cleared account for " + "job_id %u", + job_specs->job_id); + } + job_ptr->assoc_id = assoc_rec.id; + job_ptr->assoc_ptr = (void *) assoc_ptr; + } } } @@ -3808,15 +4101,15 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid) } } - if (job_specs->dependency != NO_VAL) { - if (!IS_JOB_PENDING(job_ptr)) + if (job_specs->dependency) { + if ((!IS_JOB_PENDING(job_ptr)) || (job_ptr->details == NULL)) error_code = ESLURM_DISABLED; - else if (job_specs->dependency == job_ptr->job_id) + else if (update_job_dependency(job_ptr, job_specs->dependency) + != SLURM_SUCCESS) { error_code = ESLURM_DEPENDENCY; - else { - job_ptr->dependency = job_specs->dependency; - info("update_job: setting dependency to %u for " - "job_id %u", job_ptr->dependency, + } else { + info("update_job: setting dependency to %s for " + "job_id %u", job_ptr->details->dependency, job_ptr->job_id); } } @@ -3828,6 +4121,50 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid) error_code = ESLURM_DISABLED; } + if (job_specs->licenses) { + List license_list = NULL; + bool valid; + license_list = license_job_validate(job_specs->licenses, + &valid); + + if (!valid) { + info("update_job: invalid licenses: %s", + job_specs->licenses); + error_code = ESLURM_INVALID_LICENSES; + } else if (IS_JOB_PENDING(job_ptr)) { + if (job_ptr->license_list) + list_destroy(job_ptr->license_list); + job_ptr->license_list = license_list; + xfree(job_ptr->licenses); + job_ptr->licenses = job_specs->licenses; + job_specs->licenses = NULL; /* nothing to free */ + info("update_job: setting licenses to %s for job %u", + job_ptr->licenses, job_ptr->job_id); + } else if ((job_ptr->job_state == JOB_RUNNING) && super_user) { + /* NOTE: This can result in oversubscription of + * licenses */ + license_job_return(job_ptr); + if (job_ptr->license_list) + list_destroy(job_ptr->license_list); + job_ptr->license_list = license_list; + info("update_job: changing licenses from %s to %s for " + " running job %u", + job_ptr->licenses, job_specs->licenses, + job_ptr->job_id); + xfree(job_ptr->licenses); + job_ptr->licenses = job_specs->licenses; + job_specs->licenses = NULL; /* nothing to free */ + license_job_get(job_ptr); + } else { + /* licenses are valid, but job state or user not + * allowed to make changes */ + info("update_job: could not change licenses for job %u", + job_ptr->job_id); + error_code = ESLURM_DISABLED; + list_destroy(license_list); + } + } + #ifdef HAVE_BG { uint16_t reboot = (uint16_t) NO_VAL; @@ -3966,50 +4303,49 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid) /* * validate_jobs_on_node - validate that any jobs that should be on the node * are actually running, if not clean up the job records and/or node - * records - * IN node_name - node which should have jobs running - * IN/OUT job_count - number of jobs which should be running on specified node - * IN job_id_ptr - pointer to array of job_ids that should be on this node - * IN step_id_ptr - pointer to array of job step ids that should be on node + * records, call this function after validate_node_specs() sets the node + * state properly + * IN reg_msg - node registration message */ -void -validate_jobs_on_node(char *node_name, uint32_t * job_count, - uint32_t * job_id_ptr, uint16_t * step_id_ptr) +extern void validate_jobs_on_node(slurm_node_registration_status_msg_t *reg_msg) { int i, node_inx, jobs_on_node; struct node_record *node_ptr; struct job_record *job_ptr; time_t now = time(NULL); - node_ptr = find_node_record(node_name); + node_ptr = find_node_record(reg_msg->node_name); if (node_ptr == NULL) { - error("slurmd registered on unknown node %s", node_name); + error("slurmd registered on unknown node %s", + reg_msg->node_name); return; } node_inx = node_ptr - node_record_table_ptr; /* Check that jobs running are really supposed to be there */ - for (i = 0; i < *job_count; i++) { - if ( (job_id_ptr[i] >= MIN_NOALLOC_JOBID) && - (job_id_ptr[i] <= MAX_NOALLOC_JOBID) ) { + for (i = 0; i < reg_msg->job_count; i++) { + if ( (reg_msg->job_id[i] >= MIN_NOALLOC_JOBID) && + (reg_msg->job_id[i] <= MAX_NOALLOC_JOBID) ) { info("NoAllocate job %u.%u reported on node %s", - job_id_ptr[i], step_id_ptr[i], node_name); + reg_msg->job_id[i], reg_msg->step_id[i], + reg_msg->node_name); continue; } - job_ptr = find_job_record(job_id_ptr[i]); + job_ptr = find_job_record(reg_msg->job_id[i]); if (job_ptr == NULL) { error("Orphan job %u.%u reported on node %s", - job_id_ptr[i], step_id_ptr[i], node_name); - kill_job_on_node(job_id_ptr[i], job_ptr, node_ptr); + reg_msg->job_id[i], reg_msg->step_id[i], + reg_msg->node_name); + kill_job_on_node(reg_msg->job_id[i], job_ptr, node_ptr); } else if ((job_ptr->job_state == JOB_RUNNING) || - (job_ptr->job_state == JOB_SUSPENDED)) { + (job_ptr->job_state == JOB_SUSPENDED)) { if (bit_test(job_ptr->node_bitmap, node_inx)) { debug3("Registered job %u.%u on node %s ", - job_id_ptr[i], step_id_ptr[i], - node_name); + reg_msg->job_id[i], reg_msg->step_id[i], + reg_msg->node_name); if ((job_ptr->batch_flag) && (node_inx == bit_ffs( job_ptr->node_bitmap))) { @@ -4018,10 +4354,10 @@ validate_jobs_on_node(char *node_name, uint32_t * job_count, job_ptr->time_last_active = now; } } else { - error - ("Registered job %u.%u on wrong node %s ", - job_id_ptr[i], step_id_ptr[i], node_name); - kill_job_on_node(job_id_ptr[i], job_ptr, + error("Registered job %u.%u on wrong node %s ", + reg_msg->job_id[i], reg_msg->step_id[i], + reg_msg->node_name); + kill_job_on_node(reg_msg->job_id[i], job_ptr, node_ptr); } } @@ -4029,19 +4365,20 @@ validate_jobs_on_node(char *node_name, uint32_t * job_count, else if (job_ptr->job_state & JOB_COMPLETING) { /* Re-send kill request as needed, * not necessarily an error */ - kill_job_on_node(job_id_ptr[i], job_ptr, node_ptr); + kill_job_on_node(reg_msg->job_id[i], job_ptr, node_ptr); } else if (job_ptr->job_state == JOB_PENDING) { error("Registered PENDING job %u.%u on node %s ", - job_id_ptr[i], step_id_ptr[i], node_name); + reg_msg->job_id[i], reg_msg->step_id[i], + reg_msg->node_name); job_ptr->job_state = JOB_FAILED; job_ptr->exit_code = 1; job_ptr->state_reason = FAIL_SYSTEM; - last_job_update = now; + last_job_update = now; job_ptr->start_time = job_ptr->end_time = now; - kill_job_on_node(job_id_ptr[i], job_ptr, node_ptr); + kill_job_on_node(reg_msg->job_id[i], job_ptr, node_ptr); job_completion_logger(job_ptr); delete_job_details(job_ptr); } @@ -4049,10 +4386,10 @@ validate_jobs_on_node(char *node_name, uint32_t * job_count, else { /* else job is supposed to be done */ error ("Registered job %u.%u in state %s on node %s ", - job_id_ptr[i], step_id_ptr[i], + reg_msg->job_id[i], reg_msg->step_id[i], job_state_string(job_ptr->job_state), - node_name); - kill_job_on_node(job_id_ptr[i], job_ptr, node_ptr); + reg_msg->node_name); + kill_job_on_node(reg_msg->job_id[i], job_ptr, node_ptr); } } @@ -4060,14 +4397,14 @@ validate_jobs_on_node(char *node_name, uint32_t * job_count, if (jobs_on_node) _purge_lost_batch_jobs(node_inx, now); - if (jobs_on_node != *job_count) { + if (jobs_on_node != reg_msg->job_count) { /* slurmd will not know of a job unless the job has * steps active at registration time, so this is not * an error condition, slurmd is also reporting steps * rather than jobs */ debug3("resetting job_count on node %s from %d to %d", - node_name, *job_count, jobs_on_node); - *job_count = jobs_on_node; + reg_msg->node_name, reg_msg->job_count, jobs_on_node); + reg_msg->job_count = jobs_on_node; } return; @@ -4155,6 +4492,9 @@ job_alloc_info(uint32_t uid, uint32_t job_id, struct job_record **job_pptr) if ((job_ptr->user_id != uid) && (uid != 0) && (uid != slurmctld_conf.slurm_user_id)) return ESLURM_ACCESS_DENIED; + if (slurmctld_conf.private_data + && (job_ptr->user_id != uid) && !validate_super_user(uid)) + return ESLURM_ACCESS_DENIED; if (IS_JOB_PENDING(job_ptr)) return ESLURM_JOB_PENDING; if (IS_JOB_FINISHED(job_ptr)) @@ -4415,6 +4755,10 @@ extern void job_completion_logger(struct job_record *job_ptr) int base_state; xassert(job_ptr); + /* make sure all parts of the job are notified */ + srun_job_complete(job_ptr); + + /* mail out notifications of completion */ base_state = job_ptr->job_state & (~JOB_COMPLETING); if ((base_state == JOB_COMPLETE) || (base_state == JOB_CANCELLED)) { if (job_ptr->mail_type & MAIL_JOB_END) @@ -4425,7 +4769,7 @@ extern void job_completion_logger(struct job_record *job_ptr) } g_slurm_jobcomp_write(job_ptr); - srun_job_complete(job_ptr); + jobacct_storage_g_job_complete(acct_db_conn, job_ptr); } /* @@ -4436,47 +4780,50 @@ extern void job_completion_logger(struct job_record *job_ptr) */ extern bool job_independent(struct job_record *job_ptr) { - struct job_record *dep_ptr; struct job_details *detail_ptr = job_ptr->details; time_t now = time(NULL); - bool send_acct_rec = false; + int rc; if (detail_ptr && (detail_ptr->begin_time > now)) { job_ptr->state_reason = WAIT_TIME; return false; /* not yet time */ } - if (job_ptr->dependency == 0) - goto indi; - - dep_ptr = find_job_record(job_ptr->dependency); - if (dep_ptr == NULL) - goto indi; - - if (((dep_ptr->job_state & JOB_COMPLETING) == 0) && - (dep_ptr->job_state >= JOB_COMPLETE)) - goto indi; - - job_ptr->state_reason = WAIT_DEPENDENCY; - return false; /* job exists and incomplete */ - - indi: /* job is independent, set begin time as needed */ - if (detail_ptr && (detail_ptr->begin_time == 0)) { - detail_ptr->begin_time = now; - send_acct_rec = true; - } else if (job_ptr->state_reason == WAIT_TIME) { - job_ptr->state_reason = WAIT_NO_REASON; - send_acct_rec = true; - } - if (send_acct_rec) { - /* We want to record when a job becomes eligible in - * order to calculate reserved time (a measure of - * system over-subscription), job really is not - * starting now */ - jobacct_g_job_start_slurmctld(job_ptr); + rc = test_job_dependency(job_ptr); + if (rc == 0) { + bool send_acct_rec = false; + if (job_ptr->state_reason == WAIT_DEPENDENCY) + job_ptr->state_reason = WAIT_NO_REASON; + if (detail_ptr && (detail_ptr->begin_time == 0)) { + detail_ptr->begin_time = now; + send_acct_rec = true; + } else if (job_ptr->state_reason == WAIT_TIME) { + job_ptr->state_reason = WAIT_NO_REASON; + send_acct_rec = true; + } + if (send_acct_rec) { + /* We want to record when a job becomes eligible in + * order to calculate reserved time (a measure of + * system over-subscription), job really is not + * starting now */ + jobacct_storage_g_job_start(acct_db_conn, job_ptr); + } + return true; + } else if (rc == 1) { + job_ptr->state_reason = WAIT_DEPENDENCY; + return false; + } else { /* rc == 2 */ + time_t now = time(NULL); + info("Job dependency can't be satisfied, cancelling job %u", + job_ptr->job_id); + job_ptr->job_state = JOB_CANCELLED; + job_ptr->start_time = now; + job_ptr->end_time = now; + job_completion_logger(job_ptr); + return false; } - return true; } + /* * determine if job is ready to execute per the node select plugin * IN job_id - job to test @@ -4588,11 +4935,21 @@ static void _suspend_job(struct job_record *job_ptr, uint16_t op) /* Specified job is being suspended, release allocated nodes */ static int _suspend_job_nodes(struct job_record *job_ptr) { - int i, rc; + int i, rc = SLURM_SUCCESS; struct node_record *node_ptr = node_record_table_ptr; uint16_t base_state, node_flags; + static bool sched_gang_test = false; + static bool sched_gang = false; - if ((rc = select_g_job_suspend(job_ptr)) != SLURM_SUCCESS) + if (!sched_gang_test) { + char *sched_type = slurm_get_sched_type(); + if (strcmp(sched_type, "sched/gang") == 0) + sched_gang = true; + xfree(sched_type); + sched_gang_test = true; + } + if ((sched_gang == false) && + ((rc = select_g_job_suspend(job_ptr)) != SLURM_SUCCESS)) return rc; for (i=0; i<node_record_count; i++, node_ptr++) { @@ -4640,11 +4997,21 @@ static int _suspend_job_nodes(struct job_record *job_ptr) /* Specified job is being resumed, re-allocate the nodes */ static int _resume_job_nodes(struct job_record *job_ptr) { - int i, rc; + int i, rc = SLURM_SUCCESS; struct node_record *node_ptr = node_record_table_ptr; uint16_t base_state, node_flags; + static bool sched_gang_test = false; + static bool sched_gang = false; - if ((rc = select_g_job_resume(job_ptr)) != SLURM_SUCCESS) + if (!sched_gang_test) { + char *sched_type = slurm_get_sched_type(); + if (strcmp(sched_type, "sched/gang") == 0) + sched_gang = true; + xfree(sched_type); + sched_gang_test = true; + } + if ((sched_gang == false) && + ((rc = select_g_job_resume(job_ptr)) != SLURM_SUCCESS)) return rc; for (i=0; i<node_record_count; i++, node_ptr++) { @@ -4761,17 +5128,20 @@ extern int job_suspend(suspend_msg_t *sus_ptr, uid_t uid, goto reply; _suspend_job(job_ptr, sus_ptr->op); job_ptr->job_state = JOB_RUNNING; + job_ptr->tot_sus_time += + difftime(now, job_ptr->suspend_time); if (job_ptr->time_limit != INFINITE) { /* adjust effective time_limit */ job_ptr->end_time = now + (job_ptr->time_limit * 60) - job_ptr->pre_sus_time; } + resume_job_step(job_ptr); } job_ptr->time_last_active = now; job_ptr->suspend_time = now; - jobacct_g_suspend_slurmctld(job_ptr); + jobacct_storage_g_job_suspend(acct_db_conn, job_ptr); reply: if (conn_fd >= 0) { @@ -4818,7 +5188,7 @@ extern int job_requeue (uid_t uid, uint32_t job_id, slurm_fd conn_fd) rc = ESLURM_ALREADY_DONE; goto reply; } - if ((job_ptr->details == NULL) || job_ptr->details->no_requeue) { + if ((job_ptr->details == NULL) || (job_ptr->details->requeue == 0)) { rc = ESLURM_DISABLED; goto reply; } @@ -4829,6 +5199,7 @@ extern int job_requeue (uid_t uid, uint32_t job_id, slurm_fd conn_fd) /* reset the priority */ _set_job_prio(job_ptr); + slurm_sched_requeue(job_ptr, "Job requeued by user/admin"); last_job_update = now; /* nothing else to do if pending */ @@ -4921,3 +5292,53 @@ extern void update_job_nodes_completing(void) } list_iterator_destroy(job_iterator); } + +static bool _validate_acct_policy(job_desc_msg_t *job_desc, + struct part_record *part_ptr, + acct_association_rec_t *assoc_ptr) +{ + uint32_t time_limit; + + //log_assoc_rec(assoc_ptr); + if ((assoc_ptr->max_wall_duration_per_job != NO_VAL) && + (assoc_ptr->max_wall_duration_per_job != INFINITE)) { + time_limit = assoc_ptr->max_wall_duration_per_job; + if (job_desc->time_limit == NO_VAL) { + if (part_ptr->max_time == INFINITE) + job_desc->time_limit = time_limit; + else + job_desc->time_limit = MIN(time_limit, + part_ptr->max_time); + } else if (job_desc->time_limit > time_limit) { + info("job for user %u: " + "time limit %u exceeds account max %u", + job_desc->user_id, + job_desc->time_limit, time_limit); + return false; + } + } + + if ((assoc_ptr->max_nodes_per_job != NO_VAL) && + (assoc_ptr->max_nodes_per_job != INFINITE)) { + if (job_desc->max_nodes == 0) + job_desc->max_nodes = assoc_ptr->max_nodes_per_job; + else if (job_desc->max_nodes > assoc_ptr->max_nodes_per_job) { + if (job_desc->min_nodes > + assoc_ptr->max_nodes_per_job) { + info("job %u for user %u: " + "node limit %u exceeds account max %u", + job_desc->job_id, job_desc->user_id, + job_desc->min_nodes, + assoc_ptr->max_nodes_per_job); + return false; + } + job_desc->max_nodes = assoc_ptr->max_nodes_per_job; + } + } + + /* NOTE: We can't enforce assoc_ptr->max_cpu_secs_per_job at this + * time because we don't have access to a CPU count for the job + * due to how all of the job's specifications interact */ + + return true; +} diff --git a/src/slurmctld/job_scheduler.c b/src/slurmctld/job_scheduler.c index c093a9c37..0d496b23e 100644 --- a/src/slurmctld/job_scheduler.c +++ b/src/slurmctld/job_scheduler.c @@ -2,10 +2,10 @@ * job_scheduler.c - manage the scheduling of pending jobs in priority order * Note there is a global job list (job_list) ***************************************************************************** - * Copyright (C) 2002-2006 The Regents of the University of California. + * Copyright (C) 2002-2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -46,39 +46,35 @@ #include <string.h> #include <unistd.h> +#include "src/common/assoc_mgr.h" #include "src/common/list.h" #include "src/common/macros.h" #include "src/common/node_select.h" +#include "src/common/slurm_accounting_storage.h" #include "src/common/xassert.h" #include "src/common/xstring.h" #include "src/slurmctld/agent.h" +#include "src/slurmctld/job_scheduler.h" +#include "src/slurmctld/licenses.h" #include "src/slurmctld/locks.h" #include "src/slurmctld/node_scheduler.h" #include "src/slurmctld/slurmctld.h" #include "src/slurmctld/srun_comm.h" +#define _DEBUG 0 #define MAX_RETRIES 10 -struct job_queue { - int priority; - struct job_record *job_ptr; -}; - -static int _build_job_queue(struct job_queue **job_queue); -static void _launch_job(struct job_record *job_ptr); -static void _sort_job_queue(struct job_queue *job_queue, - int job_queue_size); +static void _depend_list_del(void *dep_ptr); static char **_xduparray(uint16_t size, char ** array); /* - * _build_job_queue - build (non-priority ordered) list of pending jobs + * build_job_queue - build (non-priority ordered) list of pending jobs * OUT job_queue - pointer to job queue * RET number of entries in job_queue - * global: job_list - global list of job records * NOTE: the buffer at *job_queue must be xfreed by the caller */ -static int _build_job_queue(struct job_queue **job_queue) +extern int build_job_queue(struct job_queue **job_queue) { ListIterator job_iterator; struct job_record *job_ptr = NULL; @@ -96,15 +92,17 @@ static int _build_job_queue(struct job_queue **job_queue) (job_ptr->job_state & JOB_COMPLETING) || (job_ptr->priority == 0)) /* held */ continue; - if (!job_independent(job_ptr)) /* waiting for other job */ + if (!job_independent(job_ptr)) /* can not run now */ continue; if (job_buffer_size <= job_queue_size) { - job_buffer_size += 50; + job_buffer_size += 200; xrealloc(my_job_queue, job_buffer_size * sizeof(struct job_queue)); } my_job_queue[job_queue_size].job_ptr = job_ptr; - my_job_queue[job_queue_size].priority = job_ptr->priority; + my_job_queue[job_queue_size].job_priority = job_ptr->priority; + my_job_queue[job_queue_size].part_priority = + job_ptr->part_ptr->priority; job_queue_size++; } list_iterator_destroy(job_iterator); @@ -177,24 +175,68 @@ extern void set_job_elig_time(void) unlock_slurmctld(job_write_lock); } +/* Test of part_ptr can still run jobs or if its nodes have + * already been reserved by higher priority jobs (those in + * the failed_parts array) */ +static bool _failed_partition(struct part_record *part_ptr, + struct part_record **failed_parts, + int failed_part_cnt) +{ + int i; + + for (i = 0; i < failed_part_cnt; i++) { + if (failed_parts[i] == part_ptr) + return true; + } + return false; +} + +#ifndef HAVE_BG +/* Add a partition to the failed_parts array, reserving its nodes + * from use by lower priority jobs. Also flags all partitions with + * nodes overlapping this partition. */ +static void _add_failed_partition(struct part_record *failed_part_ptr, + struct part_record **failed_parts, + int *failed_part_cnt) +{ + int count = *failed_part_cnt; + ListIterator part_iterator; + struct part_record *part_ptr; + + failed_parts[count++] = failed_part_ptr; + + /* We also need to add partitions that have overlapping nodes */ + part_iterator = list_iterator_create(part_list); + while ((part_ptr = (struct part_record *) list_next(part_iterator))) { + if ((part_ptr == failed_part_ptr) || + (_failed_partition(part_ptr, failed_parts, count)) || + (!bit_super_set(failed_part_ptr->node_bitmap, + part_ptr->node_bitmap))) + continue; + failed_parts[count++] = part_ptr; + } + list_iterator_destroy(part_iterator); + + *failed_part_cnt = count; +} +#endif + /* * schedule - attempt to schedule all pending jobs * pending jobs for each partition will be scheduled in priority * order until a request fails * RET count of jobs scheduled - * global: job_list - global list of job records - * last_job_update - time of last update to job table * Note: We re-build the queue every time. Jobs can not only be added * or removed from the queue, but have their priority or partition * changed with the update_job RPC. In general nodes will be in priority * order (by submit time), so the sorting should be pretty fast. */ -int schedule(void) +extern int schedule(void) { struct job_queue *job_queue; - int i, j, error_code, failed_part_cnt, job_queue_size, job_cnt = 0; + int i, error_code, failed_part_cnt = 0, job_queue_size, job_cnt = 0; struct job_record *job_ptr; - struct part_record **failed_parts; + struct part_record **failed_parts = NULL; /* Locks: Read config, write job, write node, read partition */ slurmctld_lock_t job_write_lock = { READ_LOCK, WRITE_LOCK, WRITE_LOCK, READ_LOCK }; @@ -227,25 +269,48 @@ int schedule(void) return SLURM_SUCCESS; } debug("Running job scheduler"); - job_queue_size = _build_job_queue(&job_queue); + job_queue_size = build_job_queue(&job_queue); if (job_queue_size == 0) { unlock_slurmctld(job_write_lock); return SLURM_SUCCESS; } - _sort_job_queue(job_queue, job_queue_size); + sort_job_queue(job_queue, job_queue_size); + + failed_parts = xmalloc(sizeof(struct part_record *) * + list_count(part_list)); - failed_part_cnt = 0; - failed_parts = NULL; for (i = 0; i < job_queue_size; i++) { job_ptr = job_queue[i].job_ptr; if (job_ptr->priority == 0) /* held */ continue; - for (j = 0; j < failed_part_cnt; j++) { - if (failed_parts[j] == job_ptr->part_ptr) - break; + if (_failed_partition(job_ptr->part_ptr, failed_parts, + failed_part_cnt)) { + job_ptr->state_reason = WAIT_PRIORITY; + continue; } - if (j < failed_part_cnt) + + if (license_job_test(job_ptr) != SLURM_SUCCESS) { + job_ptr->state_reason = WAIT_LICENSES; continue; + } + + if (assoc_mgr_validate_assoc_id(acct_db_conn, job_ptr->assoc_id, + accounting_enforce)) { + /* NOTE: This only happens if a user's account is + * disabled between when the job was submitted and + * the time we consider running it. It should be + * very rare. */ + info("schedule: JobId=%u has invalid account", + job_ptr->job_id); + last_job_update = time(NULL); + job_ptr->job_state = JOB_FAILED; + job_ptr->exit_code = 1; + job_ptr->state_reason = FAIL_BANK_ACCOUNT; + job_ptr->start_time = job_ptr->end_time = time(NULL); + job_completion_logger(job_ptr); + delete_job_details(job_ptr); + continue; + } error_code = select_nodes(job_ptr, false, NULL); if (error_code == ESLURM_NODES_BUSY) { @@ -263,11 +328,8 @@ int schedule(void) * group all Blue Gene job partitions of type * 2x2x2 coprocessor mesh into a single SLURM * partition, say "co-mesh-222") */ - xrealloc(failed_parts, - (failed_part_cnt + 1) * - sizeof(struct part_record *)); - failed_parts[failed_part_cnt++] = - job_ptr->part_ptr; + _add_failed_partition(job_ptr->part_ptr, failed_parts, + &failed_part_cnt); #endif } else if (error_code == SLURM_SUCCESS) { /* job initiated */ @@ -278,8 +340,7 @@ int schedule(void) &ionodes); if(ionodes) { sprintf(tmp_char,"%s[%s]", - job_ptr->nodes, - ionodes); + job_ptr->nodes, ionodes); } else { sprintf(tmp_char,"%s",job_ptr->nodes); } @@ -291,7 +352,7 @@ int schedule(void) job_ptr->job_id, job_ptr->nodes); #endif if (job_ptr->batch_flag) - _launch_job(job_ptr); + launch_job(job_ptr); else srun_allocate(job_ptr->job_id); job_cnt++; @@ -321,40 +382,57 @@ int schedule(void) /* - * _sort_job_queue - sort job_queue in decending priority order + * sort_job_queue - sort job_queue in decending priority order * IN job_queue_size - count of elements in the job queue * IN/OUT job_queue - pointer to sorted job queue */ -static void _sort_job_queue(struct job_queue *job_queue, int job_queue_size) +extern void sort_job_queue(struct job_queue *job_queue, int job_queue_size) { int i, j, top_prio_inx; - int tmp_prio, top_prio; struct job_record *tmp_job_ptr; + uint32_t top_job_prio, tmp_job_prio; + uint16_t top_part_prio, tmp_part_prio; for (i = 0; i < job_queue_size; i++) { - top_prio = job_queue[i].priority; - top_prio_inx = i; + top_prio_inx = i; + top_job_prio = job_queue[i].job_priority; + top_part_prio = job_queue[i].part_priority; + for (j = (i + 1); j < job_queue_size; j++) { - if (top_prio >= job_queue[j].priority) + if (top_part_prio > job_queue[j].part_priority) continue; - top_prio = job_queue[j].priority; - top_prio_inx = j; + if ((top_part_prio == job_queue[j].part_priority) && + (top_job_prio >= job_queue[j].job_priority)) + continue; + + top_prio_inx = j; + top_job_prio = job_queue[j].job_priority; + top_part_prio = job_queue[j].part_priority; } if (top_prio_inx == i) - continue; - tmp_prio = job_queue[i].priority; - tmp_job_ptr = job_queue[i].job_ptr; - job_queue[i].priority = job_queue[top_prio_inx].priority; - job_queue[i].job_ptr = job_queue[top_prio_inx].job_ptr; - job_queue[top_prio_inx].priority = tmp_prio; - job_queue[top_prio_inx].job_ptr = tmp_job_ptr; + continue; /* in correct order */ + + /* swap records at top_prio_inx and i */ + tmp_job_ptr = job_queue[i].job_ptr; + tmp_job_prio = job_queue[i].job_priority; + tmp_part_prio = job_queue[i].part_priority; + + job_queue[i].job_ptr = job_queue[top_prio_inx].job_ptr; + job_queue[i].job_priority = job_queue[top_prio_inx].job_priority; + job_queue[i].part_priority = job_queue[top_prio_inx].part_priority; + + job_queue[top_prio_inx].job_ptr = tmp_job_ptr; + job_queue[top_prio_inx].job_priority = tmp_job_prio; + job_queue[top_prio_inx].part_priority = tmp_part_prio; + } } -/* _launch_job - send an RPC to a slurmd to initiate a batch job +/* + * launch_job - send an RPC to a slurmd to initiate a batch job * IN job_ptr - pointer to job that will be initiated */ -static void _launch_job(struct job_record *job_ptr) +extern void launch_job(struct job_record *job_ptr) { batch_job_launch_msg_t *launch_msg_ptr; agent_arg_t *agent_arg_ptr; @@ -365,9 +443,8 @@ static void _launch_job(struct job_record *job_ptr) return; /* Initialization of data structures */ - launch_msg_ptr = - (batch_job_launch_msg_t *) - xmalloc(sizeof(batch_job_launch_msg_t)); + launch_msg_ptr = (batch_job_launch_msg_t *) + xmalloc(sizeof(batch_job_launch_msg_t)); launch_msg_ptr->job_id = job_ptr->job_id; launch_msg_ptr->step_id = NO_VAL; launch_msg_ptr->uid = job_ptr->user_id; @@ -375,6 +452,8 @@ static void _launch_job(struct job_record *job_ptr) launch_msg_ptr->nprocs = job_ptr->details->num_tasks; launch_msg_ptr->nodes = xstrdup(job_ptr->nodes); launch_msg_ptr->overcommit = job_ptr->details->overcommit; + launch_msg_ptr->open_mode = job_ptr->details->open_mode; + launch_msg_ptr->acctg_freq = job_ptr->details->acctg_freq; if (make_batch_job_cred(launch_msg_ptr)) { error("aborting batch job %u", job_ptr->job_id); @@ -398,7 +477,7 @@ static void _launch_job(struct job_record *job_ptr) launch_msg_ptr->script = get_job_script(job_ptr); launch_msg_ptr->environment = get_job_env(job_ptr, &launch_msg_ptr->envc); - + launch_msg_ptr->job_mem = job_ptr->details->job_min_memory; launch_msg_ptr->num_cpu_groups = job_ptr->num_cpu_groups; launch_msg_ptr->cpus_per_node = xmalloc(sizeof(uint32_t) * job_ptr->num_cpu_groups); @@ -463,3 +542,324 @@ extern int make_batch_job_cred(batch_job_launch_msg_t *launch_msg_ptr) error("slurm_cred_create failure for batch job %u", cred_arg.jobid); return SLURM_ERROR; } + +static void _depend_list_del(void *dep_ptr) +{ + xfree(dep_ptr); +} + +/* Print a job's dependency information based upon job_ptr->depend_list */ +extern void print_job_dependency(struct job_record *job_ptr) +{ + ListIterator depend_iter; + struct depend_spec *dep_ptr; + char *dep_str; + + info("Dependency information for job %u", job_ptr->job_id); + if ((job_ptr->details == NULL) || + (job_ptr->details->depend_list == NULL)) + return; + + depend_iter = list_iterator_create(job_ptr->details->depend_list); + if (!depend_iter) + fatal("list_iterator_create memory allocation failure"); + while ((dep_ptr = list_next(depend_iter))) { + if (dep_ptr->depend_type == SLURM_DEPEND_AFTER) + dep_str = "after"; + else if (dep_ptr->depend_type == SLURM_DEPEND_AFTER_ANY) + dep_str = "afterany"; + else if (dep_ptr->depend_type == SLURM_DEPEND_AFTER_NOT_OK) + dep_str = "afternotok"; + else if (dep_ptr->depend_type == SLURM_DEPEND_AFTER_OK) + dep_str = "afterok"; + else + dep_str = "unknown"; + info(" %s:%u", dep_str, dep_ptr->job_id); + } + list_iterator_destroy(depend_iter); +} + +/* + * Determine if a job's dependencies are met + * RET: 0 = no dependencies + * 1 = dependencies remain + * 2 = failure (job completion code not per dependency), delete the job + */ +extern int test_job_dependency(struct job_record *job_ptr) +{ + ListIterator depend_iter; + struct depend_spec *dep_ptr; + bool failure = false; + + if ((job_ptr->details == NULL) || + (job_ptr->details->depend_list == NULL)) + return 0; + + depend_iter = list_iterator_create(job_ptr->details->depend_list); + if (!depend_iter) + fatal("list_iterator_create memory allocation failure"); + while ((dep_ptr = list_next(depend_iter))) { + if (dep_ptr->job_ptr->job_id != dep_ptr->job_id) { + /* job is gone, dependency lifted */ + list_delete_item(depend_iter); + } else if (dep_ptr->depend_type == SLURM_DEPEND_AFTER) { + if (!IS_JOB_PENDING(dep_ptr->job_ptr)) + list_delete_item(depend_iter); + else + break; + } else if (dep_ptr->depend_type == SLURM_DEPEND_AFTER_ANY) { + if (IS_JOB_FINISHED(dep_ptr->job_ptr)) + list_delete_item(depend_iter); + else + break; + } else if (dep_ptr->depend_type == SLURM_DEPEND_AFTER_NOT_OK) { + if (!IS_JOB_FINISHED(dep_ptr->job_ptr)) + break; + if ((dep_ptr->job_ptr->job_state & (~JOB_COMPLETING)) + != JOB_COMPLETE) + list_delete_item(depend_iter); + else { + failure = true; + break; + } + } else if (dep_ptr->depend_type == SLURM_DEPEND_AFTER_OK) { + if (!IS_JOB_FINISHED(dep_ptr->job_ptr)) + break; + if ((dep_ptr->job_ptr->job_state & (~JOB_COMPLETING)) + == JOB_COMPLETE) + list_delete_item(depend_iter); + else { + failure = true; + break; + } + } else + failure = true; + } + list_iterator_destroy(depend_iter); + + if (failure) + return 2; + if (dep_ptr) + return 1; + return 0; +} + +/* + * Parse a job dependency string and use it to establish a "depend_spec" + * list of dependencies. We accept both old format (a single job ID) and + * new format (e.g. "afterok:123:124,after:128"). + * IN job_ptr - job record to have dependency and depend_list updated + * IN new_depend - new dependency description + * RET returns an error code from slurm_errno.h + */ +extern int update_job_dependency(struct job_record *job_ptr, char *new_depend) +{ + int rc = SLURM_SUCCESS; + uint16_t depend_type = 0; + uint32_t job_id = 0; + char *tok = new_depend, *sep_ptr, *sep_ptr2; + List new_depend_list = NULL; + struct depend_spec *dep_ptr; + struct job_record *dep_job_ptr; + char dep_buf[32]; + + if (job_ptr->details == NULL) + return EINVAL; + + /* Clear dependencies on NULL or empty dependency input */ + if ((new_depend == NULL) || (new_depend[0] == '\0')) { + xfree(job_ptr->details->dependency); + if (job_ptr->details->depend_list) + list_destroy(job_ptr->details->depend_list); + return rc; + + } + + new_depend_list = list_create(_depend_list_del); + /* validate new dependency string */ + while (rc == SLURM_SUCCESS) { + sep_ptr = strchr(tok, ':'); + if ((sep_ptr == NULL) && (job_id == 0)) { + job_id = strtol(tok, &sep_ptr, 10); + if ((sep_ptr == NULL) || (sep_ptr[0] != '\0') || + (job_id <= 0) || (job_id == job_ptr->job_id)) { + rc = EINVAL; + break; + } + /* old format, just a single job_id */ + dep_job_ptr = find_job_record(job_id); + if (!dep_job_ptr) /* assume already done */ + break; + snprintf(dep_buf, sizeof(dep_buf), "afterany:%u", job_id); + new_depend = dep_buf; + dep_ptr = xmalloc(sizeof(struct depend_spec)); + dep_ptr->depend_type = SLURM_DEPEND_AFTER_ANY; + dep_ptr->job_id = job_id; + dep_ptr->job_ptr = dep_job_ptr; + if (!list_append(new_depend_list, dep_ptr)) + fatal("list_append memory allocation failure"); + break; + } + + if (strncasecmp(tok, "afternotok", 10) == 0) + depend_type = SLURM_DEPEND_AFTER_NOT_OK; + else if (strncasecmp(tok, "afterany", 8) == 0) + depend_type = SLURM_DEPEND_AFTER_ANY; + else if (strncasecmp(tok, "afterok", 7) == 0) + depend_type = SLURM_DEPEND_AFTER_OK; + else if (strncasecmp(tok, "after", 5) == 0) + depend_type = SLURM_DEPEND_AFTER; + else { + rc = EINVAL; + break; + } + sep_ptr++; /* skip over ":" */ + while (rc == SLURM_SUCCESS) { + job_id = strtol(sep_ptr, &sep_ptr2, 10); + if ((sep_ptr2 == NULL) || + (job_id <= 0) || (job_id == job_ptr->job_id) || + ((sep_ptr2[0] != '\0') && (sep_ptr2[0] != ',') && + (sep_ptr2[0] != ':'))) { + rc = EINVAL; + break; + } + dep_job_ptr = find_job_record(job_id); + if (dep_job_ptr) { /* job still active */ + dep_ptr = xmalloc(sizeof(struct depend_spec)); + dep_ptr->depend_type = depend_type; + dep_ptr->job_id = job_id; + dep_ptr->job_ptr = dep_job_ptr; + if (!list_append(new_depend_list, dep_ptr)) { + fatal("list_append memory allocation " + "failure"); + } + } + if (sep_ptr2[0] != ':') + break; + sep_ptr = sep_ptr2 + 1; /* skip over ":" */ + } + if (sep_ptr2[0] == ',') + tok = sep_ptr2 + 1; + else + break; + } + + if (rc == SLURM_SUCCESS) { + xfree(job_ptr->details->dependency); + job_ptr->details->dependency = xstrdup(new_depend); + if (job_ptr->details->depend_list) + list_destroy(job_ptr->details->depend_list); + job_ptr->details->depend_list = new_depend_list; +#if _DEBUG + print_job_dependency(job_ptr); +#endif + } else { + list_destroy(new_depend_list); + } + return rc; +} + +/* Determine if a pending job will run using only the specified nodes + * (in job_desc_msg->req_nodes), build response message and return + * SLURM_SUCCESS on success. Otherwise return an error code. Caller + * must free response message */ +extern int job_start_data(job_desc_msg_t *job_desc_msg, + will_run_response_msg_t **resp) +{ + struct job_record *job_ptr; + struct part_record *part_ptr; + bitstr_t *avail_bitmap = NULL; + uint32_t min_nodes, max_nodes, req_nodes; + int rc = SLURM_SUCCESS; + + job_ptr = find_job_record(job_desc_msg->job_id); + if (job_ptr == NULL) + return ESLURM_INVALID_JOB_ID; + + part_ptr = job_ptr->part_ptr; + if (part_ptr == NULL) + return ESLURM_INVALID_PARTITION_NAME; + + if ((job_ptr->details == NULL) || + (job_ptr->job_state != JOB_PENDING)) + return ESLURM_DISABLED; + + if ((job_desc_msg->req_nodes == NULL) || + (job_desc_msg->req_nodes == '\0')) { + /* assume all nodes available to job for testing */ + avail_bitmap = bit_copy(avail_node_bitmap); + } else if (node_name2bitmap(job_desc_msg->req_nodes, false, + &avail_bitmap) != 0) { + return ESLURM_INVALID_NODE_NAME; + } + + /* Only consider nodes that are not DOWN or DRAINED */ + bit_and(avail_bitmap, avail_node_bitmap); + + /* Consider only nodes in this job's partition */ + if (part_ptr->node_bitmap) + bit_and(avail_bitmap, part_ptr->node_bitmap); + else + rc = ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE; + + if (job_req_node_filter(job_ptr, avail_bitmap)) + rc = ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE; + if (job_ptr->details->exc_node_bitmap) { + bitstr_t *exc_node_mask = NULL; + exc_node_mask = bit_copy(job_ptr->details->exc_node_bitmap); + if (exc_node_mask == NULL) + fatal("bit_copy malloc failure"); + bit_not(exc_node_mask); + bit_and(avail_bitmap, exc_node_mask); + FREE_NULL_BITMAP(exc_node_mask); + } + if (job_ptr->details->req_node_bitmap) { + if (!bit_super_set(job_ptr->details->req_node_bitmap, + avail_bitmap)) { + rc = ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE; + } + } + + if (rc == SLURM_SUCCESS) { + min_nodes = MAX(job_ptr->details->min_nodes, + part_ptr->min_nodes); + if (job_ptr->details->max_nodes == 0) + max_nodes = part_ptr->max_nodes; + else + max_nodes = MIN(job_ptr->details->max_nodes, + part_ptr->max_nodes); + max_nodes = MIN(max_nodes, 500000); /* prevent overflows */ + if (job_ptr->details->max_nodes) + req_nodes = max_nodes; + else + req_nodes = min_nodes; + + rc = select_g_job_test(job_ptr, avail_bitmap, + min_nodes, max_nodes, req_nodes, + SELECT_MODE_WILL_RUN); + } + + if (rc == SLURM_SUCCESS) { + will_run_response_msg_t *resp_data; + resp_data = xmalloc(sizeof(will_run_response_msg_t)); + resp_data->job_id = job_ptr->job_id; +#ifdef HAVE_BG + select_g_get_jobinfo(job_ptr->select_jobinfo, + SELECT_DATA_NODE_CNT, + &resp_data->proc_cnt); + +#else + resp_data->proc_cnt = job_ptr->total_procs; +#endif + resp_data->start_time = job_ptr->start_time; + job_ptr->start_time = 0; /* restore pending job start time */ + resp_data->node_list = bitmap2node_name(avail_bitmap); + FREE_NULL_BITMAP(avail_bitmap); + *resp = resp_data; + return SLURM_SUCCESS; + } else { + FREE_NULL_BITMAP(avail_bitmap); + return ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE; + } + +} diff --git a/src/slurmctld/job_scheduler.h b/src/slurmctld/job_scheduler.h new file mode 100644 index 000000000..b40310137 --- /dev/null +++ b/src/slurmctld/job_scheduler.h @@ -0,0 +1,135 @@ +/*****************************************************************************\ + * job_scheduler.h - data structures and function definitions for scheduling + * of pending jobs in priority order + ***************************************************************************** + * Copyright (C) 2002-2006 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Morris Jette <jette@llnl.gov>, et. al. + * Derived from dsh written by Jim Garlick <garlick1@llnl.gov> + * LLNL-CODE-402394. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#ifndef _JOB_SCHEDULER_H +#define _JOB_SCHEDULER_H + +#include "src/slurmctld/slurmctld.h" + +struct job_queue { + struct job_record *job_ptr; + uint32_t job_priority; + uint16_t part_priority; +}; + +/* + * build_job_queue - build (non-priority ordered) list of pending jobs + * OUT job_queue - pointer to job queue + * RET number of entries in job_queue + * NOTE: the buffer at *job_queue must be xfreed by the caller + */ +extern int build_job_queue(struct job_queue **job_queue); + +/* + * job_is_completing - Determine if jobs are in the process of completing. + * RET - True of any job is in the process of completing + * NOTE: This function can reduce resource fragmentation, which is a + * critical issue on Elan interconnect based systems. + */ + +extern bool job_is_completing(void); + +/* Determine if a pending job will run using only the specified nodes + * (in job_desc_msg->req_nodes), build response message and return + * SLURM_SUCCESS on success. Otherwise return an error code. Caller + * must free response message */ +extern int job_start_data(job_desc_msg_t *job_desc_msg, + will_run_response_msg_t **resp); + +/* + * launch_job - send an RPC to a slurmd to initiate a batch job + * IN job_ptr - pointer to job that will be initiated + */ +extern void launch_job(struct job_record *job_ptr); + +/* + * make_batch_job_cred - add a job credential to the batch_job_launch_msg + * IN/OUT launch_msg_ptr - batch_job_launch_msg in which job_id, step_id, + * uid and nodes have already been set + * RET 0 or error code + */ +extern int make_batch_job_cred(batch_job_launch_msg_t *launch_msg_ptr); + +/* Print a job's dependency information based upon job_ptr->depend_list */ +extern void print_job_dependency(struct job_record *job_ptr); + +/* + * schedule - attempt to schedule all pending jobs + * pending jobs for each partition will be scheduled in priority + * order until a request fails + * RET count of jobs scheduled + * Note: We re-build the queue every time. Jobs can not only be added + * or removed from the queue, but have their priority or partition + * changed with the update_job RPC. In general nodes will be in priority + * order (by submit time), so the sorting should be pretty fast. + */ +extern int schedule(void); + +/* + * set_job_elig_time - set the eligible time for pending jobs once their + * dependencies are lifted (in job->details->begin_time) + */ +extern void set_job_elig_time(void); + +/* + * sort_job_queue - sort job_queue in decending priority order + * IN job_queue_size - count of elements in the job queue + * IN/OUT job_queue - pointer to sorted job queue + */ +extern void sort_job_queue(struct job_queue *job_queue, int job_queue_size); + +/* + * Determine if a job's dependencies are met + * RET: 0 = no dependencies + * 1 = dependencies remain + * 2 = failure (job completion code not per dependency), delete the job + */ +extern int test_job_dependency(struct job_record *job_ptr); + +/* + * Parse a job dependency string and use it to establish a "depend_spec" + * list of dependencies. We accept both old format (a single job ID) and + * new format (e.g. "afterok:123:124,after:128"). + * IN job_ptr - job record to have dependency and depend_list updated + * IN new_depend - new dependency description + * RET returns an error code from slurm_errno.h + */ +extern int update_job_dependency(struct job_record *job_ptr, char *new_depend); + +#endif /* !_JOB_SCHEDULER_H */ diff --git a/src/slurmctld/licenses.c b/src/slurmctld/licenses.c new file mode 100644 index 000000000..002c07d50 --- /dev/null +++ b/src/slurmctld/licenses.c @@ -0,0 +1,394 @@ +/*****************************************************************************\ + * licenses.c - Functions for handling cluster-wide consumable resources + ***************************************************************************** + * Copyright (C) 2008 Lawrence Livermore National Security. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Morris Jette <jette@llnl.gov>, et. al. + * LLNL-CODE-402394. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#include <ctype.h> +#include <errno.h> +#include <pthread.h> +#include <slurm/slurm_errno.h> +#include <stdlib.h> +#include <string.h> + +#include "src/common/list.h" +#include "src/common/log.h" +#include "src/common/macros.h" +#include "src/common/xmalloc.h" +#include "src/common/xstring.h" +#include "src/slurmctld/licenses.h" +#include "src/slurmctld/slurmctld.h" + +#define _DEBUG 0 + +List license_list = (List) NULL; +static pthread_mutex_t license_mutex = PTHREAD_MUTEX_INITIALIZER; + +/* Print all licenses on a list */ +static inline void _licenses_print(char *header, List licenses) +{ +#if _DEBUG + ListIterator iter; + licenses_t *license_entry; + + info("licenses: %s", header); + if (licenses == NULL) + return; + + iter = list_iterator_create(licenses); + if (iter == NULL) + fatal("malloc failure from list_iterator_create"); + while ((license_entry = (licenses_t *) list_next(iter))) { + info("name:%s total:%u used:%u", license_entry->name, + license_entry->total, license_entry->used); + } + list_iterator_destroy(iter); +#endif +} + +/* Free a license_t record (for use by list_destroy) */ +static void _license_free_rec(void *x) +{ + licenses_t *license_entry = (licenses_t *) x; + + if (license_entry) { + xfree(license_entry->name); + xfree(license_entry); + } +} + +/* Find a license_t record by license name (for use by list_find_first) */ +static int _license_find_rec(void *x, void *key) +{ + licenses_t *license_entry = (licenses_t *) x; + char *name = (char *) key; + + if ((license_entry->name == NULL) || (name == NULL)) + return 0; + if (strcmp(license_entry->name, name)) + return 0; + return 1; +} + +/* Given a license string, return a list of license_t records */ +static List _build_license_list(char *licenses, bool *valid) +{ + int i; + char *end_num, *tmp_str, *token, *last; + licenses_t *license_entry; + List lic_list; + + *valid = true; + if ((licenses == NULL) || (licenses[0] == '\0')) + return NULL; + + lic_list = list_create(_license_free_rec); + tmp_str = xstrdup(licenses); + token = strtok_r(tmp_str, ",;", &last); + while (token && *valid) { + uint16_t num = 1; + for (i=0; token[i]; i++) { + if (isspace(token[i])) { + *valid = false; + break; + } + if (token[i] == '*') { + token[i++] = '\0'; + num = (uint16_t)strtol(&token[i], &end_num, 10); + } + } + if (num <= 0) { + *valid = false; + break; + } + license_entry = xmalloc(sizeof(licenses_t)); + license_entry->name = xstrdup(token); + license_entry->total = num; + list_push(lic_list, license_entry); + token = strtok_r(NULL, ",;", &last); + } + xfree(tmp_str); + + if (*valid == false) { + list_destroy(lic_list); + lic_list = NULL; + } + return lic_list; +} + +/* Initialize licenses on this system based upon slurm.conf */ +extern int license_init(char *licenses) +{ + bool valid; + + slurm_mutex_lock(&license_mutex); + if (license_list) + fatal("license_list already defined"); + + license_list = _build_license_list(licenses, &valid); + if (!valid) + fatal("Invalid configured licenses: %s", licenses); + + _licenses_print("licences_init", license_list); + slurm_mutex_unlock(&license_mutex); + return SLURM_SUCCESS; +} + + +/* Update licenses on this system based upon slurm.conf. + * Preserve all previously allocated licenses */ +extern int license_update(char *licenses) +{ + ListIterator iter; + licenses_t *license_entry, *match; + List new_list; + bool valid; + + new_list = _build_license_list(licenses, &valid); + if (!valid) + fatal("Invalid configured licenses: %s", licenses); + + slurm_mutex_lock(&license_mutex); + if (!license_list) { /* no licenses before now */ + license_list = new_list; + slurm_mutex_unlock(&license_mutex); + return SLURM_SUCCESS; + } + + iter = list_iterator_create(license_list); + if (iter == NULL) + fatal("malloc failure from list_iterator_create"); + while ((license_entry = (licenses_t *) list_next(iter))) { + match = list_find_first(new_list, _license_find_rec, + license_entry->name); + if (!match) { + info("license %s removed with %u in use", + license_entry->name, license_entry->used); + } else { + match->used = license_entry->used; + if (match->used > match->total) + info("license %s count decreased", match->name); + } + } + list_iterator_destroy(iter); + + list_destroy(license_list); + license_list = new_list; + _licenses_print("licences_update", license_list); + slurm_mutex_unlock(&license_mutex); + return SLURM_SUCCESS; +} + +/* Free memory associated with licenses on this system */ +extern void license_free(void) +{ + slurm_mutex_lock(&license_mutex); + if (license_list) { + list_destroy(license_list); + license_list = (List) NULL; + } + slurm_mutex_unlock(&license_mutex); +} + +/* + * license_job_validate - Test if the licenses required by a job are valid + * IN licenses - required licenses + * OUT valid - true if required licenses are valid and a sufficient number + * are configured (though not necessarily available now) + * RET license_list, must be destroyed by caller + */ +extern List license_job_validate(char *licenses, bool *valid) +{ + ListIterator iter; + licenses_t *license_entry, *match; + List job_license_list; + + job_license_list = _build_license_list(licenses, valid); + _licenses_print("job_validate", job_license_list); + if (!job_license_list) + return job_license_list; + + slurm_mutex_lock(&license_mutex); + iter = list_iterator_create(job_license_list); + if (iter == NULL) + fatal("malloc failure from list_iterator_create"); + while ((license_entry = (licenses_t *) list_next(iter))) { + if (license_list) { + match = list_find_first(license_list, + _license_find_rec, license_entry->name); + } else + match = NULL; + if (!match) { + debug("could not find license %s for job", + license_entry->name); + *valid = false; + break; + } else if (license_entry->total > match->total) { + debug("job wants more %s licenses than configured", + match->name); + *valid = false; + break; + } + } + list_iterator_destroy(iter); + slurm_mutex_unlock(&license_mutex); + + if (!(*valid)) { + list_destroy(job_license_list); + job_license_list = NULL; + } + return job_license_list; +} + +/* + * license_job_test - Test if the licenses required for a job are available + * IN job_ptr - job identification + * RET: SLURM_SUCCESS, EAGAIN (not available now), SLURM_ERROR (never runnable) + */ +extern int license_job_test(struct job_record *job_ptr) +{ + ListIterator iter; + licenses_t *license_entry, *match; + int rc = SLURM_SUCCESS; + + if (!job_ptr->license_list) /* no licenses needed */ + return rc; + + slurm_mutex_lock(&license_mutex); + iter = list_iterator_create(job_ptr->license_list); + if (iter == NULL) + fatal("malloc failure from list_iterator_create"); + while ((license_entry = (licenses_t *) list_next(iter))) { + match = list_find_first(license_list, _license_find_rec, + license_entry->name); + if (!match) { + error("could not find license %s for job %u", + license_entry->name, job_ptr->job_id); + rc = SLURM_ERROR; + break; + } else if (license_entry->total > match->total) { + info("job %u wants more %s licenses than configured", + job_ptr->job_id, match->name); + rc = SLURM_ERROR; + break; + } else if ((license_entry->total + match->used) > + match->total) { + rc = EAGAIN; + break; + } + } + list_iterator_destroy(iter); + slurm_mutex_unlock(&license_mutex); + return rc; +} + +/* + * license_job_get - Get the licenses required for a job + * IN job_ptr - job identification + * RET SLURM_SUCCESS or failure code + */ +extern int license_job_get(struct job_record *job_ptr) +{ + ListIterator iter; + licenses_t *license_entry, *match; + int rc = SLURM_SUCCESS; + + if (!job_ptr->license_list) /* no licenses needed */ + return rc; + + slurm_mutex_lock(&license_mutex); + iter = list_iterator_create(job_ptr->license_list); + if (iter == NULL) + fatal("malloc failure from list_iterator_create"); + while ((license_entry = (licenses_t *) list_next(iter))) { + match = list_find_first(license_list, _license_find_rec, + license_entry->name); + if (match) { + match->used += license_entry->total; + license_entry->used += license_entry->total; + } else { + error("could not find license %s for job %u", + license_entry->name, job_ptr->job_id); + rc = SLURM_ERROR; + } + } + list_iterator_destroy(iter); + _licenses_print("licences_job_get", license_list); + slurm_mutex_unlock(&license_mutex); + return rc; +} + +/* + * license_job_return - Return the licenses allocated to a job + * IN job_ptr - job identification + * RET SLURM_SUCCESS or failure code + */ +extern int license_job_return(struct job_record *job_ptr) +{ + ListIterator iter; + licenses_t *license_entry, *match; + int rc = SLURM_SUCCESS; + + if (!job_ptr->license_list) /* no licenses needed */ + return rc; + + slurm_mutex_lock(&license_mutex); + iter = list_iterator_create(job_ptr->license_list); + if (iter == NULL) + fatal("malloc failure from list_iterator_create"); + while ((license_entry = (licenses_t *) list_next(iter))) { + match = list_find_first(license_list, _license_find_rec, + license_entry->name); + if (match) { + if (match->used >= license_entry->total) + match->used -= license_entry->total; + else { + error("license use count underflow for %s", + match->name); + match->used = 0; + rc = SLURM_ERROR; + } + license_entry->used = 0; + } else { + /* This can happen after a reconfiguration */ + error("job returning unknown license %s", + license_entry->name); + } + } + list_iterator_destroy(iter); + _licenses_print("licences_job_return", license_list); + slurm_mutex_unlock(&license_mutex); + return rc; +} + diff --git a/src/slurmctld/licenses.h b/src/slurmctld/licenses.h new file mode 100644 index 000000000..449e1002b --- /dev/null +++ b/src/slurmctld/licenses.h @@ -0,0 +1,94 @@ +/*****************************************************************************\ + * licenses.h - Definitions for handling cluster-wide consumable resources + ***************************************************************************** + * Copyright (C) 2008 Lawrence Livermore National Security. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Morris Jette <jette@llnl.gov>, et. al. + * LLNL-CODE-402394. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#ifndef _LICENSES_H +#define _LICENSES_H + +#include "src/common/list.h" +#include "src/slurmctld/slurmctld.h" + +typedef struct licenses { + char * name; /* name associated with a license */ + uint16_t total; /* total license configued */ + uint16_t used; /* used licenses */ +} licenses_t; + +extern List license_list; + + +/* Initialize licenses on this system based upon slurm.conf */ +extern int license_init(char *licenses); + +/* Update licenses on this system based upon slurm.conf. + * Preserve all previously allocated licenses */ +extern int license_update(char *licenses); + +/* Free memory associated with licenses on this system */ +extern void license_free(void); + + +/* + * license_job_get - Get the licenses required for a job + * IN job_ptr - job identification + * RET SLURM_SUCCESS or failure code + */ +extern int license_job_get(struct job_record *job_ptr); + +/* + * license_job_return - Return the licenses allocated to a job + * IN job_ptr - job identification + * RET SLURM_SUCCESS or failure code + */ +extern int license_job_return(struct job_record *job_ptr); + +/* + * license_job_test - Test if the licenses required for a job are available + * IN job_ptr - job identification + * RET SLURM_SUCCESS, EAGAIN (not available now), SLURM_ERROR (never runnable) + */ +extern int license_job_test(struct job_record *job_ptr); + +/* + * license_job_validate - Test if the licenses required by a job are valid + * IN licenses - required licenses + * OUT valid - true if required licenses are valid and a sufficient number + * are configured (though not necessarily available now) + * RET license_list, must be destroyed by caller + */ +extern List license_job_validate(char *licenses, bool *valid); + +#endif /* !_LICENSES_H */ diff --git a/src/slurmctld/locks.c b/src/slurmctld/locks.c index b1e4cb6cd..71eca604b 100644 --- a/src/slurmctld/locks.c +++ b/src/slurmctld/locks.c @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette@llnl.gov>, Randy Sanchez <rsancez@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/slurmctld/locks.h b/src/slurmctld/locks.h index f43f35928..997f35722 100644 --- a/src/slurmctld/locks.h +++ b/src/slurmctld/locks.h @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette@llnl.gov>, Randy Sanchez <rsancez@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/slurmctld/node_mgr.c b/src/slurmctld/node_mgr.c index 44e9992d5..7772adb0a 100644 --- a/src/slurmctld/node_mgr.c +++ b/src/slurmctld/node_mgr.c @@ -4,12 +4,12 @@ * hash table (node_hash_table), time stamp (last_node_update) and * configuration list (config_list) * - * $Id: node_mgr.c 13869 2008-04-15 00:51:16Z jette $ + * $Id: node_mgr.c 14124 2008-05-23 21:12:21Z da $ ***************************************************************************** * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov>, et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -62,7 +62,7 @@ #include "src/common/xstring.h" #include "src/common/node_select.h" #include "src/common/read_config.h" -#include "src/common/slurm_jobacct.h" +#include "src/common/slurm_accounting_storage.h" #include "src/slurmctld/agent.h" #include "src/slurmctld/locks.h" #include "src/slurmctld/ping_nodes.h" @@ -188,7 +188,6 @@ create_node_record (struct config_record *config_ptr, char *node_name) last_node_update = time (NULL); xassert(config_ptr); xassert(node_name); - xassert(strlen (node_name) < MAX_SLURM_NAME); /* round up the buffer size to reduce overhead of xrealloc */ old_buffer_size = (node_record_count) * sizeof (struct node_record); @@ -204,7 +203,7 @@ create_node_record (struct config_record *config_ptr, char *node_name) else if (old_buffer_size != new_buffer_size) xrealloc (node_record_table_ptr, new_buffer_size); node_ptr = node_record_table_ptr + (node_record_count++); - strcpy (node_ptr->name, node_name); + node_ptr->name = xstrdup(node_name); node_ptr->last_response = (time_t)0; node_ptr->config_ptr = config_ptr; node_ptr->part_cnt = 0; @@ -404,15 +403,14 @@ extern int load_all_node_state ( bool state_only ) { char *node_name, *reason = NULL, *data = NULL, *state_file, *features; int data_allocated, data_read = 0, error_code = 0, node_cnt = 0; - uint16_t node_state, name_len; + uint16_t node_state; uint16_t cpus = 1, sockets = 1, cores = 1, threads = 1; - uint32_t real_memory, tmp_disk, data_size = 0; + uint32_t real_memory, tmp_disk, data_size = 0, name_len; struct node_record *node_ptr; int state_fd; time_t time_stamp, now = time(NULL); Buf buffer; char *ver_str = NULL; - uint16_t ver_str_len; /* read the file */ state_file = xstrdup (slurmctld_conf.state_save_location); @@ -449,26 +447,18 @@ extern int load_all_node_state ( bool state_only ) buffer = create_buf (data, data_size); - /* - * Check the data version so that when the format changes, we - * we don't try to unpack data using the wrong format routines - */ - if (size_buf(buffer) >= sizeof(uint16_t) + strlen(NODE_STATE_VERSION)) { - char *ptr = get_buf_data(buffer); - - if (memcmp( &ptr[sizeof(uint16_t)], NODE_STATE_VERSION, 3) == 0) { - safe_unpackstr_xmalloc( &ver_str, &ver_str_len, buffer); - debug3("Version string in node_state header is %s", - ver_str); - } - } - if (ver_str && (strcmp(ver_str, NODE_STATE_VERSION) != 0)) { + safe_unpackstr_xmalloc( &ver_str, &name_len, buffer); + debug3("Version string in node_state header is %s", ver_str); + if ((!ver_str) || (strcmp(ver_str, NODE_STATE_VERSION) != 0)) { + error("*****************************************************"); error("Can not recover node state, data version incompatable"); + error("*****************************************************"); xfree(ver_str); free_buf(buffer); return EFAULT; } xfree(ver_str); + safe_unpack_time (&time_stamp, buffer); while (remaining_buf (buffer) > 0) { @@ -521,7 +511,11 @@ extern int load_all_node_state ( bool state_only ) | orig_flags; } if (node_state & NODE_STATE_DRAIN) - node_ptr->node_state |= NODE_STATE_DRAIN; + node_ptr->node_state |= + NODE_STATE_DRAIN; + if (node_state & NODE_STATE_FAIL) + node_ptr->node_state |= + NODE_STATE_FAIL; } if (node_ptr->reason == NULL) node_ptr->reason = reason; @@ -596,7 +590,7 @@ find_node_record (char *name) if ((node_record_count == 1) && (strcmp(node_record_table_ptr[0].name, "localhost") == 0)) return (&node_record_table_ptr[0]); - + error ("find_node_record: lookup failure for %s", name); } @@ -655,7 +649,12 @@ int init_node_conf (void) int i; for (i=0; i<node_record_count; i++) { + xfree(node_record_table_ptr[i].arch); + xfree(node_record_table_ptr[i].comm_name); xfree(node_record_table_ptr[i].features); + xfree(node_record_table_ptr[i].name); + xfree(node_record_table_ptr[i].os); + xfree(node_record_table_ptr[i].part_pptr); xfree(node_record_table_ptr[i].reason); } @@ -845,6 +844,9 @@ extern void pack_all_node (char **buffer_ptr, int *buffer_size, if (((show_flags & SHOW_ALL) == 0) && (_node_is_hidden(node_ptr))) continue; + if ((node_ptr->name == NULL) || + (node_ptr->name[0] == '\0')) + continue; _pack_node(node_ptr, cr_flag, buffer); nodes_packed ++ ; @@ -909,12 +911,17 @@ static void _pack_node (struct node_record *dump_node_ptr, bool cr_flag, pack16(allocated_cpus, buffer); } else if ((dump_node_ptr->node_state & NODE_STATE_COMPLETING) || (dump_node_ptr->node_state == NODE_STATE_ALLOCATED)) { - pack16(dump_node_ptr->config_ptr->cpus, buffer); + if (slurmctld_conf.fast_schedule) + pack16(dump_node_ptr->config_ptr->cpus, buffer); + else + pack16(dump_node_ptr->cpus, buffer); } else { pack16((uint16_t) 0, buffer); } + packstr (dump_node_ptr->arch, buffer); packstr (dump_node_ptr->config_ptr->feature, buffer); + packstr (dump_node_ptr->os, buffer); packstr (dump_node_ptr->reason, buffer); } @@ -928,17 +935,19 @@ static void _pack_node (struct node_record *dump_node_ptr, bool cr_flag, void rehash_node (void) { int i, inx; + struct node_record *node_ptr = node_record_table_ptr; xfree (node_hash_table); node_hash_table = xmalloc (sizeof (struct node_record *) * node_record_count); - for (i = 0; i < node_record_count; i++) { - if (strlen (node_record_table_ptr[i].name) == 0) + for (i = 0; i < node_record_count; i++, node_ptr++) { + if ((node_ptr->name == NULL) || + (node_ptr->name[0] == '\0')) continue; /* vestigial record */ - inx = _hash_index (node_record_table_ptr[i].name); - node_record_table_ptr[i].node_next = node_hash_table[inx]; - node_hash_table[inx] = &node_record_table_ptr[i]; + inx = _hash_index (node_ptr->name); + node_ptr->node_next = node_hash_table[inx]; + node_hash_table[inx] = node_ptr; } #if _DEBUG @@ -961,7 +970,8 @@ void set_slurmd_addr (void) START_TIMER; for (i = 0; i < node_record_count; i++, node_ptr++) { - if (node_ptr->name[0] == '\0') + if ((node_ptr->name == NULL) || + (node_ptr->name[0] == '\0')) continue; if (node_ptr->port == 0) node_ptr->port = slurmctld_conf.slurmd_port; @@ -1044,10 +1054,18 @@ int update_node ( update_node_msg_t * update_node_msg ) if (state_val == NODE_RESUME) { base_state &= NODE_STATE_BASE; if ((base_state == NODE_STATE_IDLE) && - (node_ptr->node_state & NODE_STATE_DRAIN)) { - jobacct_g_node_up(node_ptr, now); + ((node_ptr->node_state & NODE_STATE_DRAIN) + || (node_ptr->node_state & + NODE_STATE_FAIL))) { + clusteracct_storage_g_node_up( + acct_db_conn, + slurmctld_cluster_name, + node_ptr, + now); } node_ptr->node_state &= (~NODE_STATE_DRAIN); + node_ptr->node_state &= (~NODE_STATE_FAIL); + base_state &= NODE_STATE_BASE; if (base_state == NODE_STATE_DOWN) state_val = NODE_STATE_IDLE; else @@ -1061,17 +1079,29 @@ int update_node ( update_node_msg_t * update_node_msg ) false); } else if (state_val == NODE_STATE_IDLE) { + /* assume they want to clear DRAIN and + * FAIL flags too */ base_state &= NODE_STATE_BASE; if (base_state == NODE_STATE_DOWN) { trigger_node_up(node_ptr); - jobacct_g_node_up(node_ptr, now); + clusteracct_storage_g_node_up( + acct_db_conn, + slurmctld_cluster_name, + node_ptr, + now); } else if ((base_state == NODE_STATE_IDLE) && - (node_ptr->node_state & NODE_STATE_DRAIN)) { - jobacct_g_node_up(node_ptr, now); + ((node_ptr->node_state & + NODE_STATE_DRAIN) || + (node_ptr->node_state & + NODE_STATE_FAIL))) { + clusteracct_storage_g_node_up( + acct_db_conn, + slurmctld_cluster_name, + node_ptr, + now); } - - /* assume they want to clear DRAIN flag too */ node_ptr->node_state &= (~NODE_STATE_DRAIN); + node_ptr->node_state &= (~NODE_STATE_FAIL); bit_set (avail_node_bitmap, node_inx); bit_set (idle_node_bitmap, node_inx); bit_set (up_node_bitmap, node_inx); @@ -1079,7 +1109,8 @@ int update_node ( update_node_msg_t * update_node_msg ) reset_job_priority(); } else if (state_val == NODE_STATE_ALLOCATED) { - if (!(node_ptr->node_state & NODE_STATE_DRAIN)) + if (!(node_ptr->node_state & (NODE_STATE_DRAIN + | NODE_STATE_FAIL))) bit_set (up_node_bitmap, node_inx); bit_set (avail_node_bitmap, node_inx); bit_clear (idle_node_bitmap, node_inx); @@ -1088,10 +1119,26 @@ int update_node ( update_node_msg_t * update_node_msg ) bit_clear (avail_node_bitmap, node_inx); state_val = node_ptr->node_state | NODE_STATE_DRAIN; + if ((node_ptr->run_job_cnt == 0) && + (node_ptr->comp_job_cnt == 0)) { + trigger_node_drained(node_ptr); + clusteracct_storage_g_node_down( + acct_db_conn, + slurmctld_cluster_name, + node_ptr, now, NULL); + } + } + else if (state_val == NODE_STATE_FAIL) { + bit_clear (avail_node_bitmap, node_inx); + state_val = node_ptr->node_state | + NODE_STATE_FAIL; + trigger_node_failing(node_ptr); if ((node_ptr->run_job_cnt == 0) && (node_ptr->comp_job_cnt == 0)) - jobacct_g_node_down(node_ptr, now, - NULL); + clusteracct_storage_g_node_down( + acct_db_conn, + slurmctld_cluster_name, + node_ptr, now, NULL); } else { info ("Invalid node state specified %d", @@ -1116,7 +1163,8 @@ int update_node ( update_node_msg_t * update_node_msg ) base_state = node_ptr->node_state & NODE_STATE_BASE; if ((base_state != NODE_STATE_DOWN) - && ((node_ptr->node_state & NODE_STATE_DRAIN) == 0)) + && ((node_ptr->node_state & (NODE_STATE_DRAIN | + NODE_STATE_FAIL)) == 0)) xfree(node_ptr->reason); free (this_node_name); @@ -1323,7 +1371,10 @@ extern int drain_nodes ( char *nodes, char *reason ) if ((node_ptr->run_job_cnt == 0) && (node_ptr->comp_job_cnt == 0)) { /* no jobs, node is drained */ - jobacct_g_node_down(node_ptr, now, NULL); + trigger_node_drained(node_ptr); + clusteracct_storage_g_node_down(acct_db_conn, + slurmctld_cluster_name, + node_ptr, now, NULL); } select_g_update_node_state(node_inx, node_ptr->node_state); @@ -1346,6 +1397,7 @@ static bool _valid_node_state_change(uint16_t old, uint16_t new) switch (new) { case NODE_STATE_DOWN: case NODE_STATE_DRAIN: + case NODE_STATE_FAIL: return true; break; @@ -1353,7 +1405,8 @@ static bool _valid_node_state_change(uint16_t old, uint16_t new) if (base_state == NODE_STATE_UNKNOWN) return false; if ((base_state == NODE_STATE_DOWN) - || (node_flags & NODE_STATE_DRAIN)) + || (node_flags & NODE_STATE_DRAIN) + || (node_flags & NODE_STATE_FAIL)) return true; break; @@ -1377,25 +1430,12 @@ static bool _valid_node_state_change(uint16_t old, uint16_t new) /* * validate_node_specs - validate the node's specifications as valid, - * if not set state to down, in any case update last_response - * IN node_name - name of the node - * IN cpus - number of cpus measured - * IN sockets - number of sockets per cpu measured - * IN cores - number of cores per socket measured - * IN threads - number of threads per core measured - * IN real_memory - mega_bytes of real_memory measured - * IN tmp_disk - mega_bytes of tmp_disk measured - * IN job_count - number of jobs allocated to this node - * IN status - node status code + * if not set state to down, in any case update last_response + * IN reg_msg - node registration message * RET 0 if no error, ENOENT if no such node, EINVAL if values too low - * global: node_record_table_ptr - pointer to global node table * NOTE: READ lock_slurmctld config before entry */ -extern int -validate_node_specs (char *node_name, uint16_t cpus, - uint16_t sockets, uint16_t cores, uint16_t threads, - uint32_t real_memory, uint32_t tmp_disk, - uint32_t job_count, uint32_t status) +extern int validate_node_specs(slurm_node_registration_status_msg_t *reg_msg) { int error_code, i; struct config_record *config_ptr; @@ -1404,7 +1444,7 @@ validate_node_specs (char *node_name, uint16_t cpus, uint16_t base_state, node_flags; time_t now = time(NULL); - node_ptr = find_node_record (node_name); + node_ptr = find_node_record (reg_msg->node_name); if (node_ptr == NULL) return ENOENT; node_ptr->last_response = now; @@ -1417,78 +1457,90 @@ validate_node_specs (char *node_name, uint16_t cpus, * an error if the user did not specify the values on slurm.conf * for a multi-core system */ if ((slurmctld_conf.fast_schedule != 2) - && (sockets < config_ptr->sockets)) { - error("Node %s has low socket count %u", node_name, sockets); + && (reg_msg->sockets < config_ptr->sockets)) { + error("Node %s has low socket count %u", + reg_msg->node_name, reg_msg->sockets); error_code = EINVAL; reason_down = "Low socket count"; } - node_ptr->sockets = sockets; + node_ptr->sockets = reg_msg->sockets; if ((slurmctld_conf.fast_schedule != 2) - && (cores < config_ptr->cores)) { - error("Node %s has low core count %u", node_name, cores); + && (reg_msg->cores < config_ptr->cores)) { + error("Node %s has low core count %u", + reg_msg->node_name, reg_msg->cores); error_code = EINVAL; reason_down = "Low core count"; } - node_ptr->cores = cores; + node_ptr->cores = reg_msg->cores; if ((slurmctld_conf.fast_schedule != 2) - && (threads < config_ptr->threads)) { - error("Node %s has low thread count %u", node_name, threads); + && (reg_msg->threads < config_ptr->threads)) { + error("Node %s has low thread count %u", + reg_msg->node_name, reg_msg->threads); error_code = EINVAL; reason_down = "Low thread count"; } - node_ptr->threads = threads; + node_ptr->threads = reg_msg->threads; #else if (slurmctld_conf.fast_schedule != 2) { int tot1, tot2; - tot1 = sockets * cores * threads; + tot1 = reg_msg->sockets * reg_msg->cores * reg_msg->threads; tot2 = config_ptr->sockets * config_ptr->cores * config_ptr->threads; if (tot1 < tot2) { error("Node %s has low socket*core*thread count %u", - node_name, tot1); + reg_msg->node_name, tot1); error_code = EINVAL; reason_down = "Low socket*core*thread count"; } } - node_ptr->sockets = sockets; - node_ptr->cores = cores; - node_ptr->threads = threads; + node_ptr->sockets = reg_msg->sockets; + node_ptr->cores = reg_msg->cores; + node_ptr->threads = reg_msg->threads; #endif if ((slurmctld_conf.fast_schedule != 2) - && (cpus < config_ptr->cpus)) { - error ("Node %s has low cpu count %u", node_name, cpus); + && (reg_msg->cpus < config_ptr->cpus)) { + error ("Node %s has low cpu count %u", + reg_msg->node_name, reg_msg->cpus); error_code = EINVAL; reason_down = "Low CPUs"; } - if ((node_ptr->cpus != cpus) + if ((node_ptr->cpus != reg_msg->cpus) && (slurmctld_conf.fast_schedule == 0)) { for (i=0; i<node_ptr->part_cnt; i++) { node_ptr->part_pptr[i]->total_cpus += - (cpus - node_ptr->cpus); + (reg_msg->cpus - node_ptr->cpus); } } - node_ptr->cpus = cpus; + node_ptr->cpus = reg_msg->cpus; if ((slurmctld_conf.fast_schedule != 2) - && (real_memory < config_ptr->real_memory)) { + && (reg_msg->real_memory < config_ptr->real_memory)) { error ("Node %s has low real_memory size %u", - node_name, real_memory); + reg_msg->node_name, reg_msg->real_memory); error_code = EINVAL; reason_down = "Low RealMemory"; } - node_ptr->real_memory = real_memory; + node_ptr->real_memory = reg_msg->real_memory; if ((slurmctld_conf.fast_schedule != 2) - && (tmp_disk < config_ptr->tmp_disk)) { + && (reg_msg->tmp_disk < config_ptr->tmp_disk)) { error ("Node %s has low tmp_disk size %u", - node_name, tmp_disk); + reg_msg->node_name, reg_msg->tmp_disk); error_code = EINVAL; reason_down = "Low TmpDisk"; } - node_ptr->tmp_disk = tmp_disk; + node_ptr->tmp_disk = reg_msg->tmp_disk; + + xfree(node_ptr->arch); + node_ptr->arch = reg_msg->arch; + reg_msg->arch = NULL; /* Nothing left to free */ + + xfree(node_ptr->os); + node_ptr->os = reg_msg->os; + reg_msg->os = NULL; /* Nothing left to free */ if (node_ptr->node_state & NODE_STATE_NO_RESPOND) { last_node_update = time (NULL); @@ -1498,25 +1550,31 @@ validate_node_specs (char *node_name, uint16_t cpus, base_state = node_ptr->node_state & NODE_STATE_BASE; node_flags = node_ptr->node_state & NODE_STATE_FLAGS; if (error_code) { - if (base_state != NODE_STATE_DOWN) - error ("Setting node %s state to DOWN", node_name); + if (base_state != NODE_STATE_DOWN) { + error ("Setting node %s state to DOWN", + reg_msg->node_name); + } last_node_update = time (NULL); - set_node_down(node_name, reason_down); - _sync_bitmaps(node_ptr, job_count); - } else if (status == ESLURMD_PROLOG_FAILED) { - if ((node_flags & NODE_STATE_DRAIN) == 0) { + set_node_down(reg_msg->node_name, reason_down); + _sync_bitmaps(node_ptr, reg_msg->job_count); + } else if (reg_msg->status == ESLURMD_PROLOG_FAILED) { + if ((node_flags & (NODE_STATE_DRAIN | NODE_STATE_FAIL)) == 0) { +#ifdef HAVE_BG + info("Prolog failure on node %s", reg_msg->node_name); +#else last_node_update = time (NULL); - error ("Prolog failure on node %s, state to DOWN", - node_name); - set_node_down(node_name, "Prolog failed"); + error("Prolog failure on node %s, state to DOWN", + reg_msg->node_name); + set_node_down(reg_msg->node_name, "Prolog failed"); +#endif } } else { if (base_state == NODE_STATE_UNKNOWN) { last_node_update = time (NULL); reset_job_priority(); debug("validate_node_specs: node %s has registered", - node_name); - if (job_count) { + reg_msg->node_name); + if (reg_msg->job_count) { node_ptr->node_state = NODE_STATE_ALLOCATED | node_flags; } else { @@ -1526,14 +1584,16 @@ validate_node_specs (char *node_name, uint16_t cpus, } if ((node_flags & NODE_STATE_DRAIN) == 0) xfree(node_ptr->reason); - jobacct_g_node_up(node_ptr, now); + clusteracct_storage_g_node_up(acct_db_conn, + slurmctld_cluster_name, + node_ptr, now); } else if ((base_state == NODE_STATE_DOWN) && (slurmctld_conf.ret2service == 1) && (node_ptr->reason != NULL) && (strncmp(node_ptr->reason, "Not responding", 14) == 0)) { last_node_update = time (NULL); - if (job_count) { + if (reg_msg->job_count) { node_ptr->node_state = NODE_STATE_ALLOCATED | node_flags; } else { @@ -1541,24 +1601,26 @@ validate_node_specs (char *node_name, uint16_t cpus, node_flags; node_ptr->last_idle = now; } - info ("node %s returned to service", node_name); + info ("node %s returned to service", reg_msg->node_name); xfree(node_ptr->reason); reset_job_priority(); trigger_node_up(node_ptr); - jobacct_g_node_up(node_ptr, now); + clusteracct_storage_g_node_up(acct_db_conn, + slurmctld_cluster_name, + node_ptr, now); } else if ((base_state == NODE_STATE_ALLOCATED) && - (job_count == 0)) { /* job vanished */ + (reg_msg->job_count == 0)) { /* job vanished */ last_node_update = now; node_ptr->node_state = NODE_STATE_IDLE | node_flags; node_ptr->last_idle = now; } else if ((node_flags & NODE_STATE_COMPLETING) && - (job_count == 0)) { /* job already done */ + (reg_msg->job_count == 0)) { /* job already done */ last_node_update = now; node_ptr->node_state &= (~NODE_STATE_COMPLETING); } select_g_update_node_state((node_ptr - node_record_table_ptr), node_ptr->node_state); - _sync_bitmaps(node_ptr, job_count); + _sync_bitmaps(node_ptr, reg_msg->job_count); } return error_code; @@ -1568,20 +1630,18 @@ validate_node_specs (char *node_name, uint16_t cpus, * validate_nodes_via_front_end - validate all nodes on a cluster as having * a valid configuration as soon as the front-end registers. Individual * nodes will not register with this configuration - * IN job_count - number of jobs which should be running on cluster - * IN job_id_ptr - pointer to array of job_ids that should be on cluster - * IN step_id_ptr - pointer to array of job step ids that should be on cluster - * IN status - cluster status code + * IN reg_msg - node registration message * RET 0 if no error, SLURM error code otherwise - * global: node_record_table_ptr - pointer to global node table * NOTE: READ lock_slurmctld config before entry */ -extern int validate_nodes_via_front_end(uint32_t job_count, - uint32_t *job_id_ptr, uint16_t *step_id_ptr, - uint32_t status) +extern int validate_nodes_via_front_end( + slurm_node_registration_status_msg_t *reg_msg) { int error_code = 0, i, jobs_on_node; bool updated_job = false; +#ifdef HAVE_BG + bool failure_logged = false; +#endif struct job_record *job_ptr; struct config_record *config_ptr; struct node_record *node_ptr; @@ -1595,25 +1655,25 @@ extern int validate_nodes_via_front_end(uint32_t job_count, /* First validate the job info */ node_ptr = &node_record_table_ptr[0]; /* All msg send to node zero, * the front-end for the wholel cluster */ - for (i = 0; i < job_count; i++) { - if ( (job_id_ptr[i] >= MIN_NOALLOC_JOBID) && - (job_id_ptr[i] <= MAX_NOALLOC_JOBID) ) { + for (i = 0; i < reg_msg->job_count; i++) { + if ( (reg_msg->job_id[i] >= MIN_NOALLOC_JOBID) && + (reg_msg->job_id[i] <= MAX_NOALLOC_JOBID) ) { info("NoAllocate job %u.%u reported", - job_id_ptr[i], step_id_ptr[i]); + reg_msg->job_id[i], reg_msg->step_id[i]); continue; } - job_ptr = find_job_record(job_id_ptr[i]); + job_ptr = find_job_record(reg_msg->job_id[i]); if (job_ptr == NULL) { error("Orphan job %u.%u reported", - job_id_ptr[i], step_id_ptr[i]); - kill_job_on_node(job_id_ptr[i], job_ptr, node_ptr); + reg_msg->job_id[i], reg_msg->step_id[i]); + kill_job_on_node(reg_msg->job_id[i], job_ptr, node_ptr); } - else if ((job_ptr->job_state == JOB_RUNNING) - || (job_ptr->job_state == JOB_SUSPENDED)) { + else if ((job_ptr->job_state == JOB_RUNNING) || + (job_ptr->job_state == JOB_SUSPENDED)) { debug3("Registered job %u.%u", - job_id_ptr[i], step_id_ptr[i]); + reg_msg->job_id[i], reg_msg->step_id[i]); if (job_ptr->batch_flag) { /* NOTE: Used for purging defunct batch jobs */ job_ptr->time_last_active = now; @@ -1623,37 +1683,37 @@ extern int validate_nodes_via_front_end(uint32_t job_count, else if (job_ptr->job_state & JOB_COMPLETING) { /* Re-send kill request as needed, * not necessarily an error */ - kill_job_on_node(job_id_ptr[i], job_ptr, node_ptr); + kill_job_on_node(reg_msg->job_id[i], job_ptr, node_ptr); } else if (job_ptr->job_state == JOB_PENDING) { error("Registered PENDING job %u.%u", - job_id_ptr[i], step_id_ptr[i]); + reg_msg->job_id[i], reg_msg->step_id[i]); /* FIXME: Could possibly recover the job */ job_ptr->job_state = JOB_FAILED; job_ptr->exit_code = 1; job_ptr->state_reason = FAIL_SYSTEM; last_job_update = now; - job_ptr->start_time = job_ptr->end_time = now; - kill_job_on_node(job_id_ptr[i], job_ptr, node_ptr); + job_ptr->start_time = job_ptr->end_time = now; + kill_job_on_node(reg_msg->job_id[i], job_ptr, node_ptr); job_completion_logger(job_ptr); delete_job_details(job_ptr); } else { /* else job is supposed to be done */ error("Registered job %u.%u in state %s", - job_id_ptr[i], step_id_ptr[i], + reg_msg->job_id[i], reg_msg->step_id[i], job_state_string(job_ptr->job_state)); - kill_job_on_node(job_id_ptr[i], job_ptr, node_ptr); + kill_job_on_node(reg_msg->job_id[i], job_ptr, node_ptr); } } /* purge orphan batch jobs */ job_iterator = list_iterator_create(job_list); while ((job_ptr = (struct job_record *) list_next(job_iterator))) { - if ((job_ptr->job_state != JOB_RUNNING) - || (job_ptr->batch_flag == 0)) + if ((job_ptr->job_state != JOB_RUNNING) || + (job_ptr->batch_flag == 0)) continue; #ifdef HAVE_BG /* slurmd does not report job presence until after prolog @@ -1685,8 +1745,15 @@ extern int validate_nodes_via_front_end(uint32_t job_count, (~NODE_STATE_NO_RESPOND); } - if (status == ESLURMD_PROLOG_FAILED) { - if (!(node_ptr->node_state & NODE_STATE_DRAIN)) { + if (reg_msg->status == ESLURMD_PROLOG_FAILED) { + if (!(node_ptr->node_state & (NODE_STATE_DRAIN | + NODE_STATE_FAIL))) { +#ifdef HAVE_BG + if (!failure_logged) { + error("Prolog failure"); + failure_logged = true; + } +#else updated_job = true; if (prolog_hostlist) (void) hostlist_push_host( @@ -1696,6 +1763,7 @@ extern int validate_nodes_via_front_end(uint32_t job_count, prolog_hostlist = hostlist_create( node_ptr->name); set_node_down(node_ptr->name, "Prolog failed"); +#endif } } else { base_state = node_ptr->node_state & NODE_STATE_BASE; @@ -1719,7 +1787,13 @@ extern int validate_nodes_via_front_end(uint32_t job_count, node_ptr->last_idle = now; } xfree(node_ptr->reason); - jobacct_g_node_up(node_ptr, now); + if ((node_flags & + (NODE_STATE_DRAIN | NODE_STATE_FAIL)) == 0) + clusteracct_storage_g_node_up( + acct_db_conn, + slurmctld_cluster_name, + node_ptr, + now); } else if ((base_state == NODE_STATE_DOWN) && (slurmctld_conf.ret2service == 1)) { updated_job = true; @@ -1735,23 +1809,30 @@ extern int validate_nodes_via_front_end(uint32_t job_count, } if (return_hostlist) (void) hostlist_push_host( - return_hostlist, node_ptr->name); + return_hostlist, + node_ptr->name); else return_hostlist = hostlist_create( node_ptr->name); xfree(node_ptr->reason); trigger_node_up(node_ptr); - jobacct_g_node_up(node_ptr, now); + clusteracct_storage_g_node_up( + acct_db_conn, + slurmctld_cluster_name, + node_ptr, now); } else if ((base_state == NODE_STATE_ALLOCATED) && - (jobs_on_node == 0)) { /* job vanished */ + (jobs_on_node == 0)) { + /* job vanished */ updated_job = true; node_ptr->node_state = NODE_STATE_IDLE | node_flags; node_ptr->last_idle = now; } else if ((node_flags & NODE_STATE_COMPLETING) && - (jobs_on_node == 0)) { /* job already done */ + (jobs_on_node == 0)) { + /* job already done */ updated_job = true; - node_ptr->node_state &= (~NODE_STATE_COMPLETING); + node_ptr->node_state &= + (~NODE_STATE_COMPLETING); } select_g_update_node_state( (node_ptr - node_record_table_ptr), @@ -1786,7 +1867,7 @@ extern int validate_nodes_via_front_end(uint32_t job_count, last_node_update = time (NULL); reset_job_priority(); } - return error_code;; + return error_code; } /* Sync idle, share, and avail_node_bitmaps for a given node */ @@ -1801,7 +1882,7 @@ static void _sync_bitmaps(struct node_record *node_ptr, int job_count) } base_state = node_ptr->node_state & NODE_STATE_BASE; if ((base_state == NODE_STATE_DOWN) - || (node_ptr->node_state & NODE_STATE_DRAIN)) + || (node_ptr->node_state & (NODE_STATE_DRAIN | NODE_STATE_FAIL))) bit_clear (avail_node_bitmap, node_inx); else bit_set (avail_node_bitmap, node_inx); @@ -1859,7 +1940,10 @@ static void _node_did_resp(struct node_record *node_ptr) last_node_update = now; node_ptr->last_idle = now; node_ptr->node_state = NODE_STATE_IDLE | node_flags; - jobacct_g_node_up(node_ptr, now); + if ((node_flags & (NODE_STATE_DRAIN | NODE_STATE_FAIL)) == 0) + clusteracct_storage_g_node_up(acct_db_conn, + slurmctld_cluster_name, + node_ptr, now); } if ((base_state == NODE_STATE_DOWN) && (slurmctld_conf.ret2service == 1) && @@ -1872,7 +1956,10 @@ static void _node_did_resp(struct node_record *node_ptr) node_ptr->name); xfree(node_ptr->reason); trigger_node_up(node_ptr); - jobacct_g_node_up(node_ptr, now); + if ((node_flags & (NODE_STATE_DRAIN | NODE_STATE_FAIL)) == 0) + clusteracct_storage_g_node_up(acct_db_conn, + slurmctld_cluster_name, + node_ptr, now); } base_state = node_ptr->node_state & NODE_STATE_BASE; if ((base_state == NODE_STATE_IDLE) @@ -1881,7 +1968,7 @@ static void _node_did_resp(struct node_record *node_ptr) bit_set (share_node_bitmap, node_inx); } if ((base_state == NODE_STATE_DOWN) - || (node_flags & NODE_STATE_DRAIN)) + || (node_flags & (NODE_STATE_DRAIN | NODE_STATE_FAIL))) bit_clear (avail_node_bitmap, node_inx); else bit_set (avail_node_bitmap, node_inx); @@ -1946,8 +2033,8 @@ static void _node_not_resp (struct node_record *node_ptr, time_t msg_time) } /* - * set_node_down - make the specified node's state DOWN if possible - * (not in a DRAIN state), kill jobs as needed + * set_node_down - make the specified node's state DOWN and + * kill jobs as needed * IN name - name of the node * IN reason - why the node is DOWN */ @@ -2173,8 +2260,13 @@ extern void make_node_comp(struct node_record *node_ptr, if ((node_ptr->run_job_cnt == 0) && (node_ptr->comp_job_cnt == 0)) { bit_set(idle_node_bitmap, inx); - if (node_ptr->node_state & NODE_STATE_DRAIN) - jobacct_g_node_down(node_ptr, now, NULL); + if ((node_ptr->node_state & NODE_STATE_DRAIN) || + (node_ptr->node_state & NODE_STATE_FAIL)) { + trigger_node_drained(node_ptr); + clusteracct_storage_g_node_down(acct_db_conn, + slurmctld_cluster_name, + node_ptr, now, NULL); + } } if (base_state == NODE_STATE_DOWN) { @@ -2205,7 +2297,9 @@ static void _make_node_down(struct node_record *node_ptr, time_t event_time) bit_clear (up_node_bitmap, inx); select_g_update_node_state(inx, node_ptr->node_state); trigger_node_down(node_ptr); - jobacct_g_node_down(node_ptr, event_time, NULL); + clusteracct_storage_g_node_down(acct_db_conn, + slurmctld_cluster_name, + node_ptr, event_time, NULL); } /* @@ -2270,7 +2364,7 @@ void make_node_idle(struct node_record *node_ptr, if (base_state == NODE_STATE_DOWN) { debug3("make_node_idle: Node %s being left DOWN", node_ptr->name); - } else if ((node_ptr->node_state & NODE_STATE_DRAIN) && + } else if ((node_flags & (NODE_STATE_DRAIN | NODE_STATE_FAIL)) && (node_ptr->run_job_cnt == 0) && (node_ptr->comp_job_cnt == 0)) { node_ptr->node_state = NODE_STATE_IDLE | node_flags; @@ -2279,7 +2373,10 @@ void make_node_idle(struct node_record *node_ptr, debug3("make_node_idle: Node %s is DRAINED", node_ptr->name); node_ptr->last_idle = now; - jobacct_g_node_down(node_ptr, now, NULL); + trigger_node_drained(node_ptr); + clusteracct_storage_g_node_down(acct_db_conn, + slurmctld_cluster_name, + node_ptr, now, NULL); } else if (node_ptr->run_job_cnt) { node_ptr->node_state = NODE_STATE_ALLOCATED | node_flags; } else { @@ -2302,8 +2399,12 @@ void node_fini(void) } for (i=0; i< node_record_count; i++) { - xfree(node_record_table_ptr[i].part_pptr); + xfree(node_record_table_ptr[i].arch); + xfree(node_record_table_ptr[i].comm_name); xfree(node_record_table_ptr[i].features); + xfree(node_record_table_ptr[i].name); + xfree(node_record_table_ptr[i].os); + xfree(node_record_table_ptr[i].part_pptr); xfree(node_record_table_ptr[i].reason); } diff --git a/src/slurmctld/node_scheduler.c b/src/slurmctld/node_scheduler.c index b92527b9a..2798ba45e 100644 --- a/src/slurmctld/node_scheduler.c +++ b/src/slurmctld/node_scheduler.c @@ -1,13 +1,12 @@ /*****************************************************************************\ * node_scheduler.c - select and allocated nodes to jobs * Note: there is a global node table (node_record_table_ptr) - * - * $Id: node_scheduler.c 13639 2008-03-18 19:25:32Z jette $ ***************************************************************************** - * Copyright (C) 2002-2006 The Regents of the University of California. + * Copyright (C) 2002-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -57,19 +56,19 @@ #include <slurm/slurm_errno.h> #include "src/common/hostlist.h" +#include "src/common/list.h" #include "src/common/node_select.h" #include "src/common/xassert.h" #include "src/common/xmalloc.h" #include "src/common/xstring.h" -#include "src/common/slurm_jobacct.h" +#include "src/common/slurm_accounting_storage.h" #include "src/slurmctld/agent.h" +#include "src/slurmctld/licenses.h" #include "src/slurmctld/node_scheduler.h" #include "src/slurmctld/sched_plugin.h" #include "src/slurmctld/slurmctld.h" -#define FEATURE_OP_OR 0 -#define FEATURE_OP_AND 1 #define MAX_FEATURES 32 /* max exclusive features "[fs1|fs2]"=2 */ #define MAX_RETRIES 10 @@ -82,44 +81,37 @@ struct node_set { /* set of nodes with same configuration */ uint32_t real_memory; uint32_t nodes; uint32_t weight; + char *features; bitstr_t *feature_bits; bitstr_t *my_bitmap; }; -static int _add_node_set_info(struct node_set *node_set_ptr, - bitstr_t ** node_bitmap, - int *node_cnt, int *cpu_cnt, - const int mem_cnt, int cr_enabled, - struct job_record *job); +static int _build_feature_list(struct job_record *job_ptr); static int _build_node_list(struct job_record *job_ptr, struct node_set **node_set_pptr, int *node_set_size); +static void _feature_list_delete(void *x); static void _filter_nodes_in_set(struct node_set *node_set_ptr, struct job_details *detail_ptr); -static int _job_count_bitmap(bitstr_t * bitmap, bitstr_t * jobmap, - int job_cnt); static int _match_feature(char *seek, char *available); static int _nodes_in_sets(bitstr_t *req_bitmap, struct node_set * node_set_ptr, int node_set_size); -static int _pick_best_load(struct job_record *job_ptr, bitstr_t * bitmap, - uint32_t min_nodes, uint32_t max_nodes, - uint32_t req_nodes, bool test_only); static int _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size, bitstr_t ** select_bitmap, struct job_record *job_ptr, struct part_record *part_ptr, uint32_t min_nodes, uint32_t max_nodes, - uint32_t req_nodes); -static bitstr_t *_valid_features(char *requested, char *available); + uint32_t req_nodes, bool test_only); +static void _print_feature_list(uint32_t job_id, List feature_list); +static bitstr_t *_valid_features(struct job_details *detail_ptr, + char *available); /* * allocate_nodes - change state of specified nodes to NODE_STATE_ALLOCATED + * also claim required licenses * IN job_ptr - job being allocated resources - * globals: node_record_count - number of nodes in the system - * node_record_table_ptr - pointer to global node table - * last_node_update - last update time of node table */ extern void allocate_nodes(struct job_record *job_ptr) { @@ -131,44 +123,21 @@ extern void allocate_nodes(struct job_record *job_ptr) if (bit_test(job_ptr->node_bitmap, i)) make_node_alloc(&node_record_table_ptr[i], job_ptr); } - return; -} - - -/* - * count_cpus - report how many cpus are associated with the identified nodes - * IN bitmap - map of nodes to tally - * RET cpu count - * globals: node_record_count - number of nodes configured - * node_record_table_ptr - pointer to global node table - */ -extern int count_cpus(bitstr_t *bitmap) -{ - int i, sum; - sum = 0; - for (i = 0; i < node_record_count; i++) { - if (bit_test(bitmap, i) != 1) - continue; - if (slurmctld_conf.fast_schedule) - sum += node_record_table_ptr[i].config_ptr->cpus; - else - sum += node_record_table_ptr[i].cpus; - } - return sum; + license_job_get(job_ptr); + return; } /* * deallocate_nodes - for a given job, deallocate its nodes and make * their state NODE_STATE_COMPLETING + * also release the job's licenses * IN job_ptr - pointer to terminating job (already in some COMPLETING state) * IN timeout - true if job exhausted time limit, send REQUEST_KILL_TIMELIMIT * RPC instead of REQUEST_TERMINATE_JOB * IN suspended - true if job was already suspended (node's job_run_cnt * already decremented); - * globals: node_record_count - number of nodes in the system - * node_record_table_ptr - pointer to global node table */ extern void deallocate_nodes(struct job_record *job_ptr, bool timeout, bool suspended) @@ -182,6 +151,9 @@ extern void deallocate_nodes(struct job_record *job_ptr, bool timeout, xassert(job_ptr); xassert(job_ptr->details); + license_job_return(job_ptr); + if (slurm_sched_freealloc(job_ptr) != SLURM_SUCCESS) + error("slurm_sched_freealloc(%u): %m", job_ptr->job_id); if (select_g_job_fini(job_ptr) != SLURM_SUCCESS) error("select_g_job_fini(%u): %m", job_ptr->job_id); @@ -193,11 +165,12 @@ extern void deallocate_nodes(struct job_record *job_ptr, bool timeout, agent_args->retry = 0; /* re_kill_job() resends as needed */ agent_args->hostlist = hostlist_create(""); kill_job = xmalloc(sizeof(kill_job_msg_t)); - last_node_update = time(NULL); - kill_job->job_id = job_ptr->job_id; - kill_job->job_uid = job_ptr->user_id; - kill_job->nodes = xstrdup(job_ptr->nodes); - kill_job->time = time(NULL); + last_node_update = time(NULL); + kill_job->job_id = job_ptr->job_id; + kill_job->job_state = job_ptr->job_state; + kill_job->job_uid = job_ptr->user_id; + kill_job->nodes = xstrdup(job_ptr->nodes); + kill_job->time = time(NULL); kill_job->select_jobinfo = select_g_copy_jobinfo( job_ptr->select_jobinfo); @@ -237,10 +210,6 @@ extern void deallocate_nodes(struct job_record *job_ptr, bool timeout, return; } - /* log this in the accounting plugin since it was allocated - * something */ - jobacct_g_job_complete_slurmctld(job_ptr); - agent_args->msg_args = kill_job; agent_queue_request(agent_args); return; @@ -260,7 +229,7 @@ static int _match_feature(char *seek, char *available) if (seek == NULL) return 1; /* nothing to look for */ if (available == NULL) - return SLURM_SUCCESS; /* nothing to find */ + return 0; /* nothing to find */ tmp_available = xstrdup(available); found = 0; @@ -278,135 +247,213 @@ static int _match_feature(char *seek, char *available) } -/* - * _pick_best_load - Given a specification of scheduling requirements, - * identify the nodes which "best" satisfy the request. - * "best" is defined as the least loaded nodes - * IN job_ptr - pointer to job being scheduled - * IN/OUT bitmap - usable nodes are set on input, nodes not required to - * satisfy the request are cleared, other left set - * IN min_nodes - minimum count of nodes - * IN max_nodes - maximum count of nodes (0==don't care) - * IN req_nodes - requested (or desired) count of nodes - * RET zero on success, EINVAL otherwise - * globals: node_record_count - count of nodes configured - * node_record_table_ptr - pointer to global node table - * NOTE: bitmap must be a superset of req_nodes at the time that - * _pick_best_load is called - */ -static int -_pick_best_load(struct job_record *job_ptr, bitstr_t * bitmap, - uint32_t min_nodes, uint32_t max_nodes, - uint32_t req_nodes, bool test_only) -{ - bitstr_t *basemap; - int i, max_bit, error_code = EINVAL; - int node_cnt = 0, prev_cnt = 0, set_cnt; - - set_cnt = bit_set_count(bitmap); - if ((set_cnt < min_nodes) || - ((req_nodes > min_nodes) && (set_cnt < req_nodes))) - return error_code; /* not usable */ - - if (job_ptr->details && job_ptr->details->req_node_bitmap && - (!bit_super_set(job_ptr->details->req_node_bitmap, bitmap))) - return error_code; /* required nodes not available */ - - basemap = bit_copy(bitmap); - if (basemap == NULL) - fatal("bit_copy malloc failure"); - - max_bit = bit_size(bitmap) - 1; - for (i=0; node_cnt<set_cnt; i++) { - /* if req_nodes, then start with those as a baseline */ - if (job_ptr->details && job_ptr->details->req_node_bitmap) { - bit_copybits(bitmap, job_ptr->details->req_node_bitmap); - } else { - bit_nclear(bitmap, 0, max_bit); - } - node_cnt = _job_count_bitmap(basemap, bitmap, i); - if ((node_cnt == 0) || (node_cnt == prev_cnt)) - continue; /* nothing new to test */ - if ((node_cnt < min_nodes) || - ((req_nodes > min_nodes) && (node_cnt < req_nodes))) - continue; /* need more nodes */ - error_code = select_g_job_test(job_ptr, bitmap, - min_nodes, max_nodes, - req_nodes, test_only); - if (!error_code) - break; - prev_cnt = node_cnt; - } - - FREE_NULL_BITMAP(basemap); - return error_code; -} - -/* - * Set the bits in 'jobmap' that correspond to bits in the 'bitmap' - * that are running 'job_cnt' jobs or less. - */ -static int -_job_count_bitmap(bitstr_t * bitmap, bitstr_t * jobmap, int job_cnt) -{ - int i, count = 0; - bitoff_t size = bit_size(bitmap); - - for (i = 0; i < size; i++) { - if (bit_test(bitmap, i) && - (node_record_table_ptr[i].run_job_cnt <= job_cnt)) { - bit_set(jobmap, i); - count++; - } - } - return count; -} - /* * Decide if a job can share nodes with other jobs based on the * following three input parameters: * * IN user_flag - may be 0 (do not share nodes), 1 (node sharing allowed), * or any other number means "don't care" - * IN part_enum - current partition's node sharing policy + * IN part_max_share - current partition's node sharing policy * IN cons_res_flag - 1 if the consumable resources flag is enable, 0 otherwise * * RET - 1 if nodes can be shared, 0 if nodes cannot be shared + * + * + * The followed table details the node SHARED state for the various scenarios + * + * part= part= part= part= + * cons_res user_request EXCLUS NO YES FORCE + * -------- ------------ ------ ----- ----- ----- + * no default/exclus whole whole whole share/O + * no share=yes whole whole share/O share/O + * yes default whole share share/O share/O + * yes exclusive whole whole whole share/O + * yes share=yes whole share share/O share/O + * + * whole = whole node is allocated exclusively to the user + * share = nodes may be shared but the resources are not overcommitted + * share/O = nodes are shared and the resources can be overcommitted + * + * part->max_share: + * &SHARED_FORCE = FORCE + * 0 = EXCLUSIVE + * 1 = NO + * > 1 = YES + * + * job_ptr->details->shared: + * (uint16_t)NO_VAL = default + * 0 = exclusive + * 1 = share=yes */ static int -_resolve_shared_status(uint16_t user_flag, uint16_t part_enum, +_resolve_shared_status(uint16_t user_flag, uint16_t part_max_share, int cons_res_flag) { - int shared; + /* no sharing if part=EXCLUSIVE */ + if (part_max_share == 0) + return 0; + /* sharing if part=FORCE */ + if (part_max_share & SHARED_FORCE) + return 1; if (cons_res_flag) { - /* - * Consumable resources will always share nodes by default, - * the partition or user has to explicitly disable sharing to - * get exclusive nodes. - */ - if ((part_enum == SHARED_EXCLUSIVE) || (user_flag == 0)) - shared = 0; - else - shared = 1; + /* sharing unless user requested exclusive */ + if (user_flag == 0) + return 0; + return 1; } else { - /* The partition sharing option is only used if - * the consumable resources plugin is NOT in use. - */ - if (part_enum == SHARED_FORCE) /* shared=force */ - shared = 1; - else if (part_enum == SHARED_NO) /* can't share */ - shared = 0; - else - shared = (user_flag == 1) ? 1 : 0; + /* no sharing if part=NO */ + if (part_max_share == 1) + return 0; + /* share if the user requested it */ + if (user_flag == 1) + return 1; } + return 0; +} + +/* + * If the job has required feature counts, then accumulate those + * required resources using multiple calls to _pick_best_nodes() + * and adding those selected nodes to the job's required node list. + * Upon completion, return job's requirements to match the values + * which were in effect upon calling this function. + * Input and output are the same as _pick_best_nodes(). + */ +static int +_get_req_features(struct node_set *node_set_ptr, int node_set_size, + bitstr_t ** select_bitmap, struct job_record *job_ptr, + struct part_record *part_ptr, + uint32_t min_nodes, uint32_t max_nodes, uint32_t req_nodes, + bool test_only) +{ + uint32_t saved_min_nodes, saved_job_min_nodes; + bitstr_t *saved_req_node_bitmap = NULL; + uint32_t saved_num_procs, saved_req_nodes; + int tmp_node_set_size; + struct node_set *tmp_node_set_ptr; + int error_code = SLURM_SUCCESS, i; + bitstr_t *feature_bitmap, *accumulate_bitmap = NULL; - return shared; + /* save job and request state */ + saved_min_nodes = min_nodes; + saved_req_nodes = req_nodes; + saved_job_min_nodes = job_ptr->details->min_nodes; + if (job_ptr->details->req_node_bitmap) { + saved_req_node_bitmap = job_ptr->details->req_node_bitmap; + job_ptr->details->req_node_bitmap = NULL; + } + saved_num_procs = job_ptr->num_procs; + job_ptr->num_procs = 1; + tmp_node_set_ptr = xmalloc(sizeof(struct node_set) * node_set_size); + + /* Accumulate nodes with required feature counts. + * Ignored if job_ptr->details->req_node_layout is set (by wiki2). + * Selected nodes become part of job's required node list. */ + if (job_ptr->details->feature_list && + (job_ptr->details->req_node_layout == NULL)) { + ListIterator feat_iter; + struct feature_record *feat_ptr; + feat_iter = list_iterator_create(job_ptr->details->feature_list); + while((feat_ptr = (struct feature_record *) + list_next(feat_iter))) { + if (feat_ptr->count == 0) + continue; + tmp_node_set_size = 0; + /* _pick_best_nodes() is destructive of the node_set + * data structure, so we need to make a copy then + * purge it */ + for (i=0; i<node_set_size; i++) { + if (!_match_feature(feat_ptr->name, + node_set_ptr[i].features)) + continue; + tmp_node_set_ptr[tmp_node_set_size].cpus_per_node = + node_set_ptr[i].cpus_per_node; + tmp_node_set_ptr[tmp_node_set_size].real_memory = + node_set_ptr[i].real_memory; + tmp_node_set_ptr[tmp_node_set_size].nodes = + node_set_ptr[i].nodes; + tmp_node_set_ptr[tmp_node_set_size].weight = + node_set_ptr[i].weight; + tmp_node_set_ptr[tmp_node_set_size].features = + xstrdup(node_set_ptr[i].features); + tmp_node_set_ptr[tmp_node_set_size].feature_bits = + bit_copy(node_set_ptr[i].feature_bits); + tmp_node_set_ptr[tmp_node_set_size].my_bitmap = + bit_copy(node_set_ptr[i].my_bitmap); + tmp_node_set_size++; + } + feature_bitmap = NULL; + min_nodes = feat_ptr->count; + req_nodes = feat_ptr->count; + job_ptr->details->min_nodes = feat_ptr->count; + job_ptr->num_procs = feat_ptr->count; + error_code = _pick_best_nodes(tmp_node_set_ptr, + tmp_node_set_size, &feature_bitmap, + job_ptr, part_ptr, min_nodes, + max_nodes, req_nodes, test_only); +#if 0 +{ + char *tmp_str = bitmap2node_name(feature_bitmap); + info("job %u needs %u nodes with feature %s, using %s", + job_ptr->job_id, feat_ptr->count, + feat_ptr->name, tmp_str); + xfree(tmp_str); } +#endif + for (i=0; i<tmp_node_set_size; i++) { + xfree(tmp_node_set_ptr[i].features); + FREE_NULL_BITMAP(tmp_node_set_ptr[i].feature_bits); + FREE_NULL_BITMAP(tmp_node_set_ptr[i].my_bitmap); + } + if (error_code != SLURM_SUCCESS) + break; + if (feature_bitmap) { + if (accumulate_bitmap) { + bit_or(accumulate_bitmap, feature_bitmap); + bit_free(feature_bitmap); + } else + accumulate_bitmap = feature_bitmap; + } + } + list_iterator_destroy(feat_iter); + } + + /* restore most of job state and accumulate remaining resources */ + min_nodes = saved_min_nodes; + req_nodes = saved_req_nodes; + job_ptr->details->min_nodes = saved_job_min_nodes; + job_ptr->num_procs = saved_num_procs; + if (saved_req_node_bitmap) { + FREE_NULL_BITMAP(job_ptr->details->req_node_bitmap); + job_ptr->details->req_node_bitmap = + bit_copy(saved_req_node_bitmap); + } + if (accumulate_bitmap) { + if (job_ptr->details->req_node_bitmap) { + bit_or(job_ptr->details->req_node_bitmap, + accumulate_bitmap); + FREE_NULL_BITMAP(accumulate_bitmap); + } else + job_ptr->details->req_node_bitmap = accumulate_bitmap; + } + xfree(tmp_node_set_ptr); + if (error_code == SLURM_SUCCESS) { + error_code = _pick_best_nodes(node_set_ptr, node_set_size, + select_bitmap, job_ptr, part_ptr, min_nodes, + max_nodes, req_nodes, test_only); + } + /* restore job's initial required node bitmap */ + FREE_NULL_BITMAP(job_ptr->details->req_node_bitmap); + job_ptr->details->req_node_bitmap = saved_req_node_bitmap; + + + return error_code; +} /* - * _pick_best_nodes - from a weigh order list of all nodes satisfying a + * _pick_best_nodes - from a weight order list of all nodes satisfying a * job's specifications, select the "best" for use * IN node_set_ptr - pointer to node specification information * IN node_set_size - number of entries in records pointed to by node_set_ptr @@ -416,6 +463,7 @@ _resolve_shared_status(uint16_t user_flag, uint16_t part_enum, * IN min_nodes - minimum count of nodes required by the job * IN max_nodes - maximum count of nodes required by the job (0==no limit) * IN req_nodes - requested (or desired) count of nodes + * IN test_only - do not actually allocate resources * RET SLURM_SUCCESS on success, * ESLURM_NODES_BUSY if request can not be satisfied now, * ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE if request can never @@ -431,9 +479,8 @@ _resolve_shared_status(uint16_t user_flag, uint16_t part_enum, * 3) For each feature: find matching node table entries, identify nodes * that are up and available (idle or shared) and add them to a bit * map - * 4) If nodes _not_ shared then call select_g_job_test() to select the - * "best" of those based upon topology, else call _pick_best_load() - * to pick the "best" nodes in terms of workload + * 4) Select_g_job_test() to select the "best" of those based upon + * topology and/or workload * 5) If request can't be satisfied now, execute select_g_job_test() * against the list of nodes that exist in any state (perhaps DOWN * DRAINED or ALLOCATED) to determine if the request can @@ -443,62 +490,60 @@ static int _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size, bitstr_t ** select_bitmap, struct job_record *job_ptr, struct part_record *part_ptr, - uint32_t min_nodes, uint32_t max_nodes, uint32_t req_nodes) + uint32_t min_nodes, uint32_t max_nodes, uint32_t req_nodes, + bool test_only) { int error_code = SLURM_SUCCESS, i, j, pick_code; - int total_nodes = 0, total_cpus = 0; - uint32_t total_mem = 0; /* total_: total resources configured in - partition */ - int avail_nodes = 0, avail_cpus = 0; - int avail_mem = 0; /* avail_: resources available for use now */ + int total_nodes = 0, avail_nodes = 0; bitstr_t *avail_bitmap = NULL, *total_bitmap = NULL; bitstr_t *backup_bitmap = NULL; - bitstr_t *partially_idle_node_bitmap = NULL, *possible_bitmap = NULL; + bitstr_t *possible_bitmap = NULL; + bitstr_t *partially_idle_node_bitmap = NULL; int max_feature, min_feature; bool runable_ever = false; /* Job can ever run */ bool runable_avail = false; /* Job can run with available nodes */ - bool pick_light_load = false; - uint32_t cr_enabled = 0; - int shared = 0; + bool tried_sched = false; /* Tried to schedule with avail nodes */ + static uint32_t cr_enabled = NO_VAL; select_type_plugin_info_t cr_type = SELECT_TYPE_INFO_NONE; + int shared = 0, select_mode; + + if (test_only) + select_mode = SELECT_MODE_TEST_ONLY; + else + select_mode = SELECT_MODE_RUN_NOW; if (node_set_size == 0) { info("_pick_best_nodes: empty node set for selection"); return ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE; } - /* Is Consumable Resources enabled? */ - error_code = select_g_get_info_from_plugin (SELECT_CR_PLUGIN, - &cr_enabled); - if (error_code != SLURM_SUCCESS) - return error_code; + /* Are Consumable Resources enabled? Check once. */ + if (cr_enabled == NO_VAL) { + cr_enabled = 0; /* select/linear and bluegene are no-ops */ + error_code = select_g_get_info_from_plugin (SELECT_CR_PLUGIN, + &cr_enabled); + if (error_code != SLURM_SUCCESS) { + cr_enabled = NO_VAL; + return error_code; + } + } shared = _resolve_shared_status(job_ptr->details->shared, - part_ptr->shared, cr_enabled); + part_ptr->max_share, cr_enabled); job_ptr->details->shared = shared; - if (cr_enabled) { - shared = 0; + if (cr_enabled) { + /* Determine which nodes might be used by this job based upon + * its ability to share resources */ job_ptr->cr_enabled = cr_enabled; /* CR enabled for this job */ - cr_type = (select_type_plugin_info_t) slurmctld_conf.select_type_param; - if (cr_type == CR_MEMORY) { - shared = 1; /* Sharing set when only memory as a CR is enabled */ - } else if ((cr_type == CR_SOCKET) - || (cr_type == CR_CORE) - || (cr_type == CR_CPU)) { - job_ptr->details->job_max_memory = 0; - } - - debug3("Job %u in exclusive mode? " - "%d cr_enabled %d CR type %d num_procs %d", - job_ptr->job_id, - job_ptr->details->shared ? 0 : 1, - cr_enabled, - cr_type, + cr_type = (select_type_plugin_info_t) slurmctld_conf. + select_type_param; + debug3("Job %u shared %d cr_enabled %d CR type %d num_procs %d", + job_ptr->job_id, shared, cr_enabled, cr_type, job_ptr->num_procs); - if (job_ptr->details->shared == 0) { + if (shared == 0) { partially_idle_node_bitmap = bit_copy(idle_node_bitmap); } else { /* Update partially_idle_node_bitmap to reflect the @@ -515,104 +560,52 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size, } if (job_ptr->details->req_node_bitmap) { /* specific nodes required */ - /* we have already confirmed that all of these nodes have a - * usable configuration and are in the proper partition */ + /* We have already confirmed that all of these nodes have a + * usable configuration and are in the proper partition. + * Check that these nodes can be used by this job. */ if (min_nodes != 0) { total_nodes = bit_set_count( job_ptr->details->req_node_bitmap); } - if (job_ptr->num_procs != 0) { - if (cr_enabled) { - uint16_t tmp16; - if ((cr_type == CR_MEMORY) - || (cr_type == CR_SOCKET_MEMORY) - || (cr_type == CR_CORE_MEMORY) - || (cr_type == CR_CPU_MEMORY)) { - /* Check if the requested amount of - * memory is available */ - error_code = select_g_get_extra_jobinfo ( - NULL, - job_ptr, - SELECT_AVAIL_MEMORY, - &total_mem); - if (error_code != SLURM_SUCCESS) { - FREE_NULL_BITMAP( - partially_idle_node_bitmap); - return ESLURM_NODES_BUSY; - } - } - error_code = select_g_get_extra_jobinfo ( - NULL, - job_ptr, - SELECT_CPU_COUNT, - &tmp16); - if (error_code != SLURM_SUCCESS) { - FREE_NULL_BITMAP( - partially_idle_node_bitmap); - return error_code; - } - total_cpus = (int) tmp16; - } else { - total_cpus = count_cpus( - job_ptr->details->req_node_bitmap); - } + if (total_nodes > max_nodes) { /* exceeds node limit */ + FREE_NULL_BITMAP(partially_idle_node_bitmap); + return ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE; } - if (total_nodes > max_nodes) { - /* exceeds node limit */ - if (cr_enabled) - FREE_NULL_BITMAP(partially_idle_node_bitmap); + + /* check the availability of these nodes */ + /* Should we check memory availability on these nodes? */ + if (!bit_super_set(job_ptr->details->req_node_bitmap, + avail_node_bitmap)) { + FREE_NULL_BITMAP(partially_idle_node_bitmap); return ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE; } - if ((min_nodes <= total_nodes) && - (max_nodes <= min_nodes) && - (job_ptr->num_procs <= total_cpus )) { + + if (partially_idle_node_bitmap) { if (!bit_super_set(job_ptr->details->req_node_bitmap, - avail_node_bitmap)) { - if (cr_enabled) { - FREE_NULL_BITMAP( - partially_idle_node_bitmap); - } - return ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE; + partially_idle_node_bitmap)) { + FREE_NULL_BITMAP(partially_idle_node_bitmap); + return ESLURM_NODES_BUSY; } - - /* shared needs to be checked before cr_enabled - * to make sure that CR_MEMORY works correctly */ - if (shared) { - if (!bit_super_set(job_ptr->details-> - req_node_bitmap, - share_node_bitmap)) { - if (cr_enabled) { - FREE_NULL_BITMAP( - partially_idle_node_bitmap); - } - return ESLURM_NODES_BUSY; - } - } else if (cr_enabled) { - if (!bit_super_set(job_ptr->details-> - req_node_bitmap, - partially_idle_node_bitmap)) { - FREE_NULL_BITMAP( - partially_idle_node_bitmap); - return ESLURM_NODES_BUSY; - } - } else { - if (!bit_super_set(job_ptr->details-> - req_node_bitmap, - idle_node_bitmap)) { - return ESLURM_NODES_BUSY; - } + } + if (shared) { + if (!bit_super_set(job_ptr->details->req_node_bitmap, + share_node_bitmap)) { + FREE_NULL_BITMAP(partially_idle_node_bitmap); + return ESLURM_NODES_BUSY; + } + } else { + if (!bit_super_set(job_ptr->details->req_node_bitmap, + idle_node_bitmap)) { + FREE_NULL_BITMAP(partially_idle_node_bitmap); + return ESLURM_NODES_BUSY; } - /* still must go through select_g_job_test() to - * determine validity of request and/or perform - * set-up before job launch */ } - total_nodes = total_cpus = 0; /* reinitialize */ - } -#ifndef HAVE_BG - if (shared) - pick_light_load = true; -#endif + /* still must go through select_g_job_test() to + * determine validity of request and/or perform + * set-up before job launch */ + total_nodes = 0; /* reinitialize */ + } /* identify the min and max feature values for exclusive OR */ max_feature = -1; @@ -625,82 +618,62 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size, if ((j >= 0) && (j > max_feature)) max_feature = j; } - + + /* Accumulate resources for this job based upon its required + * features (possibly with node counts). */ for (j = min_feature; j <= max_feature; j++) { for (i = 0; i < node_set_size; i++) { if (!bit_test(node_set_ptr[i].feature_bits, j)) continue; - if (!runable_ever) { - int cr_disabled = 0; - total_mem = 0; - error_code = _add_node_set_info( - &node_set_ptr[i], - &total_bitmap, - &total_nodes, - &total_cpus, - total_mem, - cr_disabled, - job_ptr); - if (error_code != SLURM_SUCCESS) { - if (cr_enabled) { - FREE_NULL_BITMAP( - partially_idle_node_bitmap); - } - FREE_NULL_BITMAP(avail_bitmap); - FREE_NULL_BITMAP(total_bitmap); - FREE_NULL_BITMAP(possible_bitmap); - return error_code; - } + + if (total_bitmap) + bit_or(total_bitmap, node_set_ptr[i].my_bitmap); + else { + total_bitmap = bit_copy( + node_set_ptr[i].my_bitmap); + if (total_bitmap == NULL) + fatal("bit_copy malloc failure"); } - bit_and(node_set_ptr[i].my_bitmap, avail_node_bitmap); - /* shared needs to be checked before cr_enabled - * to make sure that CR_MEMORY works correctly. */ + bit_and(node_set_ptr[i].my_bitmap, avail_node_bitmap); + if (partially_idle_node_bitmap) { + bit_and(node_set_ptr[i].my_bitmap, + partially_idle_node_bitmap); + } if (shared) { bit_and(node_set_ptr[i].my_bitmap, share_node_bitmap); - } else if (cr_enabled) { - bit_and(node_set_ptr[i].my_bitmap, - partially_idle_node_bitmap); } else { bit_and(node_set_ptr[i].my_bitmap, idle_node_bitmap); } - node_set_ptr[i].nodes = - bit_set_count(node_set_ptr[i].my_bitmap); - avail_mem = job_ptr->details->job_max_memory; - error_code = _add_node_set_info(&node_set_ptr[i], - &avail_bitmap, - &avail_nodes, - &avail_cpus, - avail_mem, - cr_enabled, - job_ptr); - if (error_code != SLURM_SUCCESS) { - if (cr_enabled) { - FREE_NULL_BITMAP( - partially_idle_node_bitmap); - } - FREE_NULL_BITMAP(total_bitmap); - FREE_NULL_BITMAP(avail_bitmap); - FREE_NULL_BITMAP(possible_bitmap); - return error_code; + if (avail_bitmap) + bit_or(avail_bitmap, node_set_ptr[i].my_bitmap); + else { + avail_bitmap = bit_copy( + node_set_ptr[i].my_bitmap); + if (avail_bitmap == NULL) + fatal("bit_copy malloc failure"); + } + avail_nodes = bit_set_count(avail_bitmap); + tried_sched = false; /* need to test these nodes */ + + if (shared) { + /* Keep accumulating so we can pick the + * most lighly loaded nodes */ + continue; } - if (pick_light_load) - continue; /* Keep accumulating */ - if (avail_nodes == 0) - continue; /* Keep accumulating */ + if ((job_ptr->details->req_node_bitmap) && (!bit_super_set(job_ptr->details->req_node_bitmap, avail_bitmap))) continue; + if ((avail_nodes < min_nodes) || - ((req_nodes > min_nodes) && + ((req_nodes > min_nodes) && (avail_nodes < req_nodes))) continue; /* Keep accumulating nodes */ - if (avail_cpus < job_ptr->num_procs) - continue; /* Keep accumulating CPUs */ /* NOTE: select_g_job_test() is destructive of * avail_bitmap, so save a backup copy */ @@ -710,8 +683,7 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size, min_nodes, max_nodes, req_nodes, - false); - + select_mode); if (pick_code == SLURM_SUCCESS) { FREE_NULL_BITMAP(backup_bitmap); if (bit_set_count(avail_bitmap) > max_nodes) { @@ -719,69 +691,33 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size, avail_nodes = 0; break; } + FREE_NULL_BITMAP(partially_idle_node_bitmap); FREE_NULL_BITMAP(total_bitmap); FREE_NULL_BITMAP(possible_bitmap); - if (cr_enabled) { - FREE_NULL_BITMAP( - partially_idle_node_bitmap); - } *select_bitmap = avail_bitmap; return SLURM_SUCCESS; } else { + tried_sched = true; /* test failed */ FREE_NULL_BITMAP(avail_bitmap); avail_bitmap = backup_bitmap; } } /* for (i = 0; i < node_set_size; i++) */ - /* try picking the lightest load from all - available nodes with this feature set */ - if (pick_light_load) { - backup_bitmap = bit_copy(avail_bitmap); - pick_code = _pick_best_load(job_ptr, - avail_bitmap, - min_nodes, - max_nodes, - req_nodes, - false); - if (pick_code == SLURM_SUCCESS) { - FREE_NULL_BITMAP(backup_bitmap); - if (bit_set_count(avail_bitmap) > max_nodes) { - avail_nodes = 0; - } else { - FREE_NULL_BITMAP(total_bitmap); - FREE_NULL_BITMAP(possible_bitmap); - if (cr_enabled) { - FREE_NULL_BITMAP( - partially_idle_node_bitmap); - } - *select_bitmap = avail_bitmap; - return SLURM_SUCCESS; - } - } else { - FREE_NULL_BITMAP(avail_bitmap); - avail_bitmap = backup_bitmap; - } - } - /* try to get req_nodes now for this feature */ - if (avail_bitmap - && (req_nodes > min_nodes) + if (avail_bitmap && (!tried_sched) && (avail_nodes >= min_nodes) - && (avail_nodes < req_nodes) && ((job_ptr->details->req_node_bitmap == NULL) || bit_super_set(job_ptr->details->req_node_bitmap, avail_bitmap))) { pick_code = select_g_job_test(job_ptr, avail_bitmap, min_nodes, max_nodes, - req_nodes, false); + req_nodes, + select_mode); if ((pick_code == SLURM_SUCCESS) && (bit_set_count(avail_bitmap) <= max_nodes)) { + FREE_NULL_BITMAP(partially_idle_node_bitmap); FREE_NULL_BITMAP(total_bitmap); FREE_NULL_BITMAP(possible_bitmap); - if (cr_enabled) { - FREE_NULL_BITMAP( - partially_idle_node_bitmap); - } *select_bitmap = avail_bitmap; return SLURM_SUCCESS; } @@ -789,12 +725,11 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size, /* determine if job could possibly run (if all configured * nodes available) */ - + if (total_bitmap) + total_nodes = bit_set_count(total_bitmap); if (total_bitmap && (!runable_ever || !runable_avail) && (total_nodes >= min_nodes) - && ((slurmctld_conf.fast_schedule == 0) || - (total_cpus >= job_ptr->num_procs)) && ((job_ptr->details->req_node_bitmap == NULL) || (bit_super_set(job_ptr->details->req_node_bitmap, total_bitmap)))) { @@ -809,9 +744,7 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size, min_nodes, max_nodes, req_nodes, - true); - if (cr_enabled) - job_ptr->cr_enabled = 1; + SELECT_MODE_TEST_ONLY); if (pick_code == SLURM_SUCCESS) { runable_ever = true; if (bit_set_count(avail_bitmap) <= @@ -828,9 +761,7 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size, min_nodes, max_nodes, req_nodes, - true); - if (cr_enabled) - job_ptr->cr_enabled = 1; + SELECT_MODE_TEST_ONLY); if (pick_code == SLURM_SUCCESS) { FREE_NULL_BITMAP(possible_bitmap); possible_bitmap = total_bitmap; @@ -845,9 +776,6 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size, break; } - if (cr_enabled) - FREE_NULL_BITMAP(partially_idle_node_bitmap); - /* The job is not able to start right now, return a * value indicating when the job can start */ if (!runable_avail) @@ -863,134 +791,11 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size, } else { FREE_NULL_BITMAP(possible_bitmap); } + FREE_NULL_BITMAP(partially_idle_node_bitmap); return error_code; } -/* - * _add_node_set_info - add info in node_set_ptr to node_bitmap - * IN node_set_ptr - node set info - * IN/OUT node_bitmap - add nodes in set to this bitmap - * IN/OUT node_cnt - add count of nodes in set to this total - * IN/OUT cpu_cnt - add count of cpus in set to this total - * IN/OUT mem_cnt - add count of memory in set to this total - * IN cr_enabled - specify if consumable resources (of processors) is enabled - * IN job_ptr - the job to be updated - */ -static int -_add_node_set_info(struct node_set *node_set_ptr, - bitstr_t ** node_bitmap, - int *node_cnt, int *cpu_cnt, - const int mem_cnt, int cr_enabled, - struct job_record * job_ptr) -{ - int error_code = SLURM_SUCCESS, i; - int this_cpu_cnt, this_mem_cnt; - uint32_t alloc_mem; - uint16_t alloc_cpus; - uint32_t job_id = job_ptr->job_id; - - xassert(node_set_ptr->my_bitmap); - - if (cr_enabled == 0) { - if (*node_bitmap) - bit_or(*node_bitmap, node_set_ptr->my_bitmap); - else { - *node_bitmap = bit_copy(node_set_ptr->my_bitmap); - if (*node_bitmap == NULL) - fatal("bit_copy malloc failure"); - } - *node_cnt += node_set_ptr->nodes; - if (slurmctld_conf.fast_schedule) { - *cpu_cnt += node_set_ptr->nodes * - node_set_ptr->cpus_per_node; - } else { - for (i = 0; i < node_record_count; i++) { - if (bit_test (node_set_ptr->my_bitmap, i) == 0) - continue; - *cpu_cnt += node_record_table_ptr[i].cpus; - } - } - } else { - int ll; /* layout array index */ - uint16_t * layout_ptr = NULL; - if (job_ptr->details) - layout_ptr = job_ptr->details->req_node_layout; - - for (i = 0, ll = -1; i < node_record_count; i++) { - if (layout_ptr && - bit_test(job_ptr->details->req_node_bitmap, i)) { - ll ++; - } - if (bit_test (node_set_ptr->my_bitmap, i) == 0) - continue; - alloc_cpus = 0; - error_code = select_g_get_select_nodeinfo( - &node_record_table_ptr[i], - SELECT_ALLOC_CPUS, - &alloc_cpus); - if (error_code != SLURM_SUCCESS) { - error("cons_res: Invalid Node reference %s", - node_record_table_ptr[i].name); - return error_code; - } - alloc_mem = 0; - error_code = select_g_get_select_nodeinfo( - &node_record_table_ptr[i], - SELECT_ALLOC_MEMORY, - &alloc_mem); - if (error_code != SLURM_SUCCESS) { - error("cons_res: Invalid Node reference %s", - node_record_table_ptr[i]. name); - return error_code; - } - - /* Determine processors and memory available for use */ - if (slurmctld_conf.fast_schedule) { - this_cpu_cnt = node_set_ptr->cpus_per_node - - alloc_cpus; - this_mem_cnt = (node_set_ptr->real_memory - - alloc_mem) - mem_cnt; - } else { - this_cpu_cnt = node_record_table_ptr[i].cpus - - alloc_cpus; - this_mem_cnt = (node_record_table_ptr[i].real_memory - - alloc_mem) - mem_cnt; - } - - debug3("_add_node_set_info %u %s this_cpu_cnt %d" - " this_mem_cnt %d", - job_id, node_record_table_ptr[i].name, - this_cpu_cnt, this_mem_cnt); - - if (layout_ptr && - bit_test(job_ptr->details->req_node_bitmap, i)) { - this_cpu_cnt = MIN(this_cpu_cnt, layout_ptr[ll]); - debug3("_add_node_set_info %u %s this_cpu_cnt" - " limited by task layout %d: %u", - job_id, node_record_table_ptr[i].name, - ll, layout_ptr[ll]); - } else if (layout_ptr) { - this_cpu_cnt = 0; - } - - if ((this_cpu_cnt > 0) && (this_mem_cnt > 0)) { - *node_cnt += 1; - *cpu_cnt += this_cpu_cnt; - - if (*node_bitmap) - bit_or(*node_bitmap, node_set_ptr->my_bitmap); - else { - *node_bitmap = bit_copy(node_set_ptr->my_bitmap); - if (*node_bitmap == NULL) - fatal("bit_copy malloc failure"); - } - } - } - } - return error_code; -} - /* * select_nodes - select and allocate nodes to a specific job * IN job_ptr - pointer to the job record @@ -1107,24 +912,28 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only, if (max_nodes < min_nodes) { error_code = ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE; } else { - error_code = _pick_best_nodes(node_set_ptr, node_set_size, - &select_bitmap, job_ptr, - part_ptr, min_nodes, max_nodes, - req_nodes); + /* Select resources for the job here */ + error_code = _get_req_features(node_set_ptr, node_set_size, + &select_bitmap, job_ptr, + part_ptr, min_nodes, max_nodes, + req_nodes, test_only); } if (error_code) { - job_ptr->state_reason = WAIT_RESOURCES; if (error_code == ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE) { /* Required nodes are down or * too many nodes requested */ debug3("JobId=%u not runnable with present config", job_ptr->job_id); + job_ptr->state_reason = WAIT_PART_NODE_LIMIT; if (job_ptr->priority != 0) /* Move to end of queue */ job_ptr->priority = 1; last_job_update = now; - } else if (error_code == ESLURM_NODES_BUSY) - slurm_sched_job_is_pending(); + } else { + job_ptr->state_reason = WAIT_RESOURCES; + if (error_code == ESLURM_NODES_BUSY) + slurm_sched_job_is_pending(); + } goto cleanup; } if (test_only) { /* set if job not highest priority */ @@ -1140,10 +949,26 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only, xfree(job_ptr->nodes); job_ptr->node_bitmap = select_bitmap; + + /* we need to have these times set to know when the endtime + * is for the job when we place it + */ + job_ptr->start_time = job_ptr->time_last_active = now; + if (job_ptr->time_limit == NO_VAL) + job_ptr->time_limit = part_ptr->max_time; + if (job_ptr->time_limit == INFINITE) + job_ptr->end_time = job_ptr->start_time + + (365 * 24 * 60 * 60); /* secs in year */ + else + job_ptr->end_time = job_ptr->start_time + + (job_ptr->time_limit * 60); /* secs */ + if (select_g_job_begin(job_ptr) != SLURM_SUCCESS) { /* Leave job queued, something is hosed */ error("select_g_job_begin(%u): %m", job_ptr->job_id); error_code = ESLURM_NODES_BUSY; + job_ptr->start_time = job_ptr->time_last_active + = job_ptr->end_time = 0; goto cleanup; } @@ -1158,19 +983,12 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only, error("select_g_update_nodeinfo(%u): %m", job_ptr->job_id); /* not critical ... by now */ } - job_ptr->start_time = job_ptr->time_last_active = now; - if (job_ptr->time_limit == NO_VAL) - job_ptr->time_limit = part_ptr->max_time; - if (job_ptr->time_limit == INFINITE) - job_ptr->end_time = job_ptr->start_time + - (365 * 24 * 60 * 60); /* secs in year */ - else - job_ptr->end_time = job_ptr->start_time + - (job_ptr->time_limit * 60); /* secs */ if (job_ptr->mail_type & MAIL_JOB_BEGIN) mail_job_info(job_ptr, MAIL_JOB_BEGIN); - jobacct_g_job_start_slurmctld(job_ptr); + jobacct_storage_g_job_start(acct_db_conn, job_ptr); + + slurm_sched_newalloc(job_ptr); cleanup: if (select_node_bitmap) @@ -1179,6 +997,7 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only, FREE_NULL_BITMAP(select_bitmap); if (node_set_ptr) { for (i = 0; i < node_set_size; i++) { + xfree(node_set_ptr[i].features); FREE_NULL_BITMAP(node_set_ptr[i].my_bitmap); FREE_NULL_BITMAP(node_set_ptr[i].feature_bits); } @@ -1187,8 +1006,247 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only, return error_code; } +static void _print_feature_list(uint32_t job_id, List feature_list) +{ + ListIterator feat_iter; + struct feature_record *feat_ptr; + char *buf = NULL, tmp[16]; + int bracket = 0; + + if (feature_list == NULL) { + info("Job %u feature list is empty", job_id); + return; + } + + feat_iter = list_iterator_create(feature_list); + while((feat_ptr = (struct feature_record *)list_next(feat_iter))) { + if (feat_ptr->op_code == FEATURE_OP_XOR) { + if (bracket == 0) + xstrcat(buf, "["); + bracket = 1; + } + xstrcat(buf, feat_ptr->name); + if (feat_ptr->count) { + snprintf(tmp, sizeof(tmp), "*%u", feat_ptr->count); + xstrcat(buf, tmp); + } + if (bracket && (feat_ptr->op_code != FEATURE_OP_XOR)) { + xstrcat(buf, "]"); + bracket = 0; + } + if (feat_ptr->op_code == FEATURE_OP_AND) + xstrcat(buf, "&"); + else if ((feat_ptr->op_code == FEATURE_OP_OR) || + (feat_ptr->op_code == FEATURE_OP_XOR)) + xstrcat(buf, "|"); + } + list_iterator_destroy(feat_iter); + info("Job %u feature list: %s", job_id, buf); + xfree(buf); +} + +static void _feature_list_delete(void *x) +{ + struct feature_record *feature = (struct feature_record *)x; + xfree(feature->name); + xfree(feature); +} + +/* + * _build_feature_list - Translate a job's feature string into a feature_list + * IN details->features + * OUT details->feature_list + * RET error code + */ +static int _build_feature_list(struct job_record *job_ptr) +{ + struct job_details *detail_ptr = job_ptr->details; + char *tmp_requested, *str_ptr1, *str_ptr2, *feature = NULL; + int bracket = 0, count = 0, i; + bool have_count = false, have_or = false; + struct feature_record *feat; + + if (detail_ptr->features == NULL) /* no constraints */ + return SLURM_SUCCESS; + if (detail_ptr->feature_list) /* already processed */ + return SLURM_SUCCESS; + + tmp_requested = xstrdup(detail_ptr->features); + str_ptr1 = tmp_requested; + detail_ptr->feature_list = list_create(_feature_list_delete); + for (i=0; ; i++) { + if (tmp_requested[i] == '*') { + tmp_requested[i] = '\0'; + have_count = true; + count = strtol(&tmp_requested[i+1], &str_ptr2, 10); + if ((feature == NULL) || (count <= 0)) { + info("Job %u invalid constraint %s", + job_ptr->job_id, detail_ptr->features); + xfree(tmp_requested); + return ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE; + } + i = str_ptr2 - tmp_requested - 1; + } else if (tmp_requested[i] == '&') { + tmp_requested[i] = '\0'; + if ((feature == NULL) || (bracket != 0)) { + info("Job %u invalid constraint %s", + job_ptr->job_id, detail_ptr->features); + xfree(tmp_requested); + return ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE; + } + feat = xmalloc(sizeof(struct feature_record)); + feat->name = xstrdup(feature); + feat->count = count; + feat->op_code = FEATURE_OP_AND; + list_append(detail_ptr->feature_list, feat); + feature = NULL; + count = 0; + } else if (tmp_requested[i] == '|') { + tmp_requested[i] = '\0'; + have_or = true; + if (feature == NULL) { + info("Job %u invalid constraint %s", + job_ptr->job_id, detail_ptr->features); + xfree(tmp_requested); + return ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE; + } + feat = xmalloc(sizeof(struct feature_record)); + feat->name = xstrdup(feature); + feat->count = count; + if (bracket) + feat->op_code = FEATURE_OP_XOR; + else + feat->op_code = FEATURE_OP_OR; + list_append(detail_ptr->feature_list, feat); + feature = NULL; + count = 0; + } else if (tmp_requested[i] == '[') { + tmp_requested[i] = '\0'; + if ((feature != NULL) || bracket) { + info("Job %u invalid constraint %s", + job_ptr->job_id, detail_ptr->features); + xfree(tmp_requested); + return ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE; + } + bracket++; + } else if (tmp_requested[i] == ']') { + tmp_requested[i] = '\0'; + if ((feature == NULL) || (bracket == 0)) { + info("Job %u invalid constraint %s", + job_ptr->job_id, detail_ptr->features); + xfree(tmp_requested); + return ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE; + } + bracket = 0; + } else if (tmp_requested[i] == '\0') { + if (feature) { + feat = xmalloc(sizeof(struct feature_record)); + feat->name = xstrdup(feature); + feat->count = count; + feat->op_code = FEATURE_OP_END; + list_append(detail_ptr->feature_list, feat); + } + break; + } else if (feature == NULL) { + feature = &tmp_requested[i]; + } + } + xfree(tmp_requested); + if (have_count && have_or) { + info("Job %u invalid constraint (OR with feature count): %s", + job_ptr->job_id, detail_ptr->features); + return ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE; + } + + _print_feature_list(job_ptr->job_id, detail_ptr->feature_list); + return SLURM_SUCCESS; +} + +/* + * job_req_node_filter - job reqeust node filter. + * clear from a bitmap the nodes which can not be used for a job + * test memory size, required features, processor count, etc. + * NOTE: Does not support exclusive OR of features or feature counts. + * It just matches first element of XOR and ignores count. + * IN job_ptr - pointer to node to be scheduled + * IN/OUT bitmap - set of nodes being considered for use + * RET SLURM_SUCCESS or EINVAL if can't filter (exclusive OR of features) + */ +extern int job_req_node_filter(struct job_record *job_ptr, + bitstr_t *avail_bitmap) +{ + int i; + struct job_details *detail_ptr = job_ptr->details; + multi_core_data_t *mc_ptr; + struct node_record *node_ptr; + struct config_record *config_ptr; + bitstr_t *feature_bitmap = NULL; + + if (detail_ptr == NULL) { + error("job_req_node_filter: job %u has no details", + job_ptr->job_id); + return EINVAL; + } + if (_build_feature_list(job_ptr)) + return EINVAL; + + mc_ptr = detail_ptr->mc_ptr; + for (i=0; i< node_record_count; i++) { + if (!bit_test(avail_bitmap, i)) + continue; + node_ptr = node_record_table_ptr + i; + config_ptr = node_ptr->config_ptr; + feature_bitmap = _valid_features(detail_ptr, config_ptr->feature); + if ((feature_bitmap == NULL) || (!bit_test(feature_bitmap, 0))) { + bit_clear(avail_bitmap, i); + continue; + } + FREE_NULL_BITMAP(feature_bitmap); + if (slurmctld_conf.fast_schedule) { + if ((detail_ptr->job_min_procs > config_ptr->cpus ) + || (detail_ptr->job_min_memory > config_ptr->real_memory) + || (detail_ptr->job_min_tmp_disk > config_ptr->tmp_disk)) { + bit_clear(avail_bitmap, i); + continue; + } + if (mc_ptr + && ((mc_ptr->min_sockets > config_ptr->sockets ) + || (mc_ptr->min_cores > config_ptr->cores ) + || (mc_ptr->min_threads > config_ptr->threads ) + || (mc_ptr->job_min_sockets > config_ptr->sockets ) + || (mc_ptr->job_min_cores > config_ptr->cores ) + || (mc_ptr->job_min_threads > config_ptr->threads ))) { + bit_clear(avail_bitmap, i); + continue; + } + } else { + if ((detail_ptr->job_min_procs > node_ptr->cpus ) + || (detail_ptr->job_min_memory > node_ptr->real_memory) + || (detail_ptr->job_min_tmp_disk > node_ptr->tmp_disk)) { + bit_clear(avail_bitmap, i); + continue; + } + if (mc_ptr + && ((mc_ptr->min_sockets > node_ptr->sockets ) + || (mc_ptr->min_cores > node_ptr->cores ) + || (mc_ptr->min_threads > node_ptr->threads ) + || (mc_ptr->job_min_sockets > node_ptr->sockets ) + || (mc_ptr->job_min_cores > node_ptr->cores ) + || (mc_ptr->job_min_threads > node_ptr->threads ))) { + bit_clear(avail_bitmap, i); + continue; + } + } + } + FREE_NULL_BITMAP(feature_bitmap); + return SLURM_SUCCESS; +} + /* * _build_node_list - identify which nodes could be allocated to a job + * based upon node features, memory, processors, etc. Note that a + * bitmap is set to indicate which of the job's features that the + * nodes satisfy. * IN job_ptr - pointer to node to be scheduled * OUT node_set_pptr - list of node sets which could be used for the job * OUT node_set_size - number of node_set entries @@ -1209,6 +1267,11 @@ static int _build_node_list(struct job_record *job_ptr, multi_core_data_t *mc_ptr = detail_ptr->mc_ptr; bitstr_t *tmp_feature; + if (detail_ptr->features && (detail_ptr->feature_list == NULL)) { + int error_code = _build_feature_list(job_ptr); + if (error_code) + return error_code; + } node_set_inx = 0; node_set_ptr = (struct node_set *) xmalloc(sizeof(struct node_set) * 2); @@ -1275,7 +1338,7 @@ static int _build_node_list(struct job_record *job_ptr, continue; } - tmp_feature = _valid_features(job_ptr->details->features, + tmp_feature = _valid_features(job_ptr->details, config_ptr->feature); if (tmp_feature == NULL) { FREE_NULL_BITMAP(node_set_ptr[node_set_inx].my_bitmap); @@ -1288,7 +1351,9 @@ static int _build_node_list(struct job_record *job_ptr, node_set_ptr[node_set_inx].real_memory = config_ptr->real_memory; node_set_ptr[node_set_inx].weight = - config_ptr->weight; + config_ptr->weight; + node_set_ptr[node_set_inx].features = + xstrdup(config_ptr->feature); node_set_ptr[node_set_inx].feature_bits = tmp_feature; debug2("found %d usable nodes from config containing %s", node_set_ptr[node_set_inx].nodes, config_ptr->nodes); @@ -1300,6 +1365,7 @@ static int _build_node_list(struct job_record *job_ptr, } list_iterator_destroy(config_iterator); /* eliminate last (incomplete) node_set record */ + xfree(node_set_ptr[node_set_inx].features); FREE_NULL_BITMAP(node_set_ptr[node_set_inx].my_bitmap); FREE_NULL_BITMAP(node_set_ptr[node_set_inx].feature_bits); FREE_NULL_BITMAP(exc_node_mask); @@ -1440,6 +1506,7 @@ extern void build_node_details(struct job_record *job_ptr) job_ptr->node_addr = NULL; job_ptr->alloc_lps_cnt = 0; xfree(job_ptr->alloc_lps); + xfree(job_ptr->used_lps); return; } @@ -1461,6 +1528,8 @@ extern void build_node_details(struct job_record *job_ptr) job_ptr->alloc_lps_cnt = job_ptr->node_cnt; xrealloc(job_ptr->alloc_lps, (sizeof(uint32_t) * job_ptr->node_cnt)); + xrealloc(job_ptr->used_lps, + (sizeof(uint32_t) * job_ptr->node_cnt)); while ((this_node_name = hostlist_shift(host_list))) { node_ptr = find_node_record(this_node_name); @@ -1478,6 +1547,8 @@ extern void build_node_details(struct job_record *job_ptr) job_ptr->num_procs; total_procs += job_ptr->num_procs; job_ptr->cpu_count_reps[cpu_inx] = 1; + job_ptr->alloc_lps[0] = job_ptr->num_procs; + job_ptr->used_lps[0] = 0; goto cleanup; } #endif @@ -1486,14 +1557,19 @@ extern void build_node_details(struct job_record *job_ptr) &usable_lps); if (error_code == SLURM_SUCCESS) { if (job_ptr->alloc_lps) { + job_ptr->used_lps[cr_count] = 0; job_ptr->alloc_lps[cr_count++] = usable_lps; } } else { - xfree(job_ptr->alloc_lps); - job_ptr->alloc_lps_cnt = 0; error("Unable to get extra jobinfo " "from JobId=%u", job_ptr->job_id); + /* Job is likely completed according to + * select plugin */ + if (job_ptr->alloc_lps) { + job_ptr->used_lps[cr_count] = 0; + job_ptr->alloc_lps[cr_count++] = 0; + } } memcpy(&job_ptr->node_addr[node_inx++], @@ -1525,138 +1601,81 @@ extern void build_node_details(struct job_record *job_ptr) job_ptr->job_id, job_ptr->node_cnt, node_inx); } job_ptr->num_cpu_groups = cpu_inx + 1; - if (job_ptr->details) - job_ptr->details->total_procs = total_procs; + job_ptr->total_procs = total_procs; } /* * _valid_features - determine if the requested features are satisfied by * those available - * IN requested - requested features (by a job) + * IN details_ptr - job requirement details, includes requested features * IN available - available features (on a node) - * RET 0 if request is not satisfied, otherwise an integer indicating which - * mutually exclusive feature is satisfied. for example - * _valid_features("[fs1|fs2|fs3|fs4]", "fs3") returns 3. see the - * slurm administrator and user guides for details. returns 1 if - * requirements are satisfied without mutually exclusive feature list. + * RET NULL if request is not satisfied, otherwise a bitmap indicating + * which mutually exclusive features are satisfied. For example + * _valid_features("[fs1|fs2|fs3|fs4]", "fs3") returns a bitmap with + * the third bit set. For another example + * _valid_features("[fs1|fs2|fs3|fs4]", "fs1,fs3") returns a bitmap + * with the first and third bits set. The function returns a bitmap + * with the first bit set if requirements are satisfied without a + * mutually exclusive feature list. */ -static bitstr_t *_valid_features(char *requested, char *available) +static bitstr_t *_valid_features(struct job_details *details_ptr, + char *available) { - char *tmp_requested, *str_ptr1; - int bracket, found, i, position, result; - int last_op; /* last operation 0 for or, 1 for and */ - int save_op = 0, save_result = 0; /* for bracket support */ bitstr_t *result_bits = (bitstr_t *) NULL; + ListIterator feat_iter; + struct feature_record *feat_ptr; + int found, last_op, position = 0, result; + int save_op = FEATURE_OP_AND, save_result=1; - if (requested == NULL) { /* no constraints */ + if (details_ptr->feature_list == NULL) {/* no constraints */ result_bits = bit_alloc(MAX_FEATURES); bit_set(result_bits, 0); return result_bits; } - if (available == NULL) /* no features */ - return result_bits; - tmp_requested = xstrdup(requested); - bracket = position = 0; - str_ptr1 = tmp_requested; /* start of feature name */ - result = 1; /* assume good for now */ + result = 1; /* assume good for now */ last_op = FEATURE_OP_AND; - for (i=0; ; i++) { - if (tmp_requested[i] == '\0') { - if (strlen(str_ptr1) == 0) - break; - found = _match_feature(str_ptr1, available); - if (last_op == FEATURE_OP_AND) - result &= found; - else /* FEATURE_OP_OR */ - result |= found; - break; - } - - if (tmp_requested[i] == '&') { - if (bracket != 0) { - debug("_valid_features: parsing failure on %s", - requested); - result = 0; - break; - } - tmp_requested[i] = '\0'; - found = _match_feature(str_ptr1, available); - if (last_op == FEATURE_OP_AND) - result &= found; - else /* FEATURE_OP_OR */ + feat_iter = list_iterator_create(details_ptr->feature_list); + while ((feat_ptr = (struct feature_record *) list_next(feat_iter))) { + if (feat_ptr->count) + found = 1; /* handle feature counts elsewhere */ + else + found = _match_feature(feat_ptr->name, available); + + if ((last_op == FEATURE_OP_XOR) || + (feat_ptr->op_code == FEATURE_OP_XOR)) { + if (position == 0) { + save_op = last_op; + save_result = result; + result = found; + } else result |= found; - str_ptr1 = &tmp_requested[i + 1]; - last_op = FEATURE_OP_AND; - } else if (tmp_requested[i] == '|') { - tmp_requested[i] = '\0'; - found = _match_feature(str_ptr1, available); - if (bracket != 0) { - if (found) { - if (!result_bits) - result_bits = bit_alloc(MAX_FEATURES); - if (position < MAX_FEATURES) - bit_set(result_bits, (position-1)); - else - error("_valid_features: overflow"); - } - position++; - } - if (last_op == FEATURE_OP_AND) - result &= found; - else /* FEATURE_OP_OR */ - result |= found; - str_ptr1 = &tmp_requested[i + 1]; - last_op = FEATURE_OP_OR; + if (!result_bits) + result_bits = bit_alloc(MAX_FEATURES); - } else if (tmp_requested[i] == '[') { - bracket++; - position = 1; - save_op = last_op; - save_result = result; - last_op = FEATURE_OP_AND; - result = 1; - str_ptr1 = &tmp_requested[i + 1]; + if (!found) + ; + else if (position < MAX_FEATURES) + bit_set(result_bits, position); + else + error("_valid_features: overflow"); + position++; - } else if (tmp_requested[i] == ']') { - tmp_requested[i] = '\0'; - found = _match_feature(str_ptr1, available); - if (found) { - if (!result_bits) - result_bits = bit_alloc(MAX_FEATURES); - if (position < MAX_FEATURES) - bit_set(result_bits, (position-1)); - else - error("_valid_features: overflow"); + if (feat_ptr->op_code != FEATURE_OP_XOR) { + if (save_op == FEATURE_OP_OR) + result |= save_result; + else /* (save_op == FEATURE_OP_AND) */ + result &= save_result; } - position++; + } else if (last_op == FEATURE_OP_OR) { result |= found; - if (save_op == FEATURE_OP_AND) - result &= save_result; - else /* FEATURE_OP_OR */ - result |= save_result; - if ((tmp_requested[i + 1] == '&') - && (bracket == 1)) { - last_op = FEATURE_OP_AND; - str_ptr1 = &tmp_requested[i + 2]; - } else if ((tmp_requested[i + 1] == '|') - && (bracket == 1)) { - last_op = FEATURE_OP_OR; - str_ptr1 = &tmp_requested[i + 2]; - } else if ((tmp_requested[i + 1] == '\0') - && (bracket == 1)) { - break; - } else { - debug("_valid_features: parsing failure on %s", - requested); - result = 0; - break; - } - bracket = 0; + } else if (last_op == FEATURE_OP_AND) { + result &= found; } + last_op = feat_ptr->op_code; } - xfree(tmp_requested); + list_iterator_destroy(feat_iter); if (result) { if (!result_bits) { diff --git a/src/slurmctld/node_scheduler.h b/src/slurmctld/node_scheduler.h index d44fa4eff..42a4bf7df 100644 --- a/src/slurmctld/node_scheduler.h +++ b/src/slurmctld/node_scheduler.h @@ -1,10 +1,11 @@ /*****************************************************************************\ * node_scheduler.h - definitions of functions in node_scheduler.c ***************************************************************************** - * Copyright (C) 2004 The Regents of the University of California. + * Copyright (C) 2004-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette@llnl.gov> et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -40,10 +41,8 @@ /* * allocate_nodes - change state of specified nodes to NODE_STATE_ALLOCATED + * also claim required licenses * IN job_ptr - job being allocated resources - * globals: node_record_count - number of nodes in the system - * node_record_table_ptr - pointer to global node table - * last_node_update - last update time of node table */ extern void allocate_nodes(struct job_record *job_ptr); @@ -54,25 +53,15 @@ extern void allocate_nodes(struct job_record *job_ptr); */ extern void build_node_details(struct job_record *job_ptr); -/* - * count_cpus - report how many cpus are associated with the identified nodes - * IN bitmap - map of nodes to tally - * RET cpu count - * globals: node_record_count - number of nodes configured - * node_record_table_ptr - pointer to global node table - */ -extern int count_cpus(bitstr_t *bitmap); - /* * deallocate_nodes - for a given job, deallocate its nodes and make * their state NODE_STATE_COMPLETING + * also release the job's licenses * IN job_ptr - pointer to terminating job (already in some COMPLETING state) * IN timeout - true if job exhausted time limit, send REQUEST_KILL_TIMELIMIT * RPC instead of REQUEST_TERMINATE_JOB * IN suspended - true if job was already suspended (node's job_run_cnt * already decremented); - * globals: node_record_count - number of nodes in the system - * node_record_table_ptr - pointer to global node table */ extern void deallocate_nodes(struct job_record *job_ptr, bool timeout, bool suspended); diff --git a/src/slurmctld/partition_mgr.c b/src/slurmctld/partition_mgr.c index 4d31c895a..a88b5998b 100644 --- a/src/slurmctld/partition_mgr.c +++ b/src/slurmctld/partition_mgr.c @@ -2,12 +2,12 @@ * partition_mgr.c - manage the partition information of slurm * Note: there is a global partition list (part_list) and * time stamp (last_part_update) - * $Id: partition_mgr.c 13279 2008-02-15 00:14:16Z jette $ + * $Id: partition_mgr.c 14068 2008-05-19 15:58:22Z jette $ ***************************************************************************** * Copyright (C) 2002-2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette@llnl.gov> et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -65,10 +65,13 @@ #include "src/slurmctld/sched_plugin.h" #include "src/slurmctld/slurmctld.h" +/* Change PART_STATE_VERSION value when changing the state save format */ +#define PART_STATE_VERSION "VER001" + /* Global variables */ struct part_record default_part; /* default configuration values */ List part_list = NULL; /* partition list */ -char default_part_name[MAX_SLURM_NAME]; /* name of default partition */ +char *default_part_name = NULL; /* name of default partition */ struct part_record *default_part_loc = NULL; /* default partition location */ time_t last_part_update; /* time of last update to partition records */ @@ -214,15 +217,19 @@ struct part_record *create_part_record(void) (struct part_record *) xmalloc(sizeof(struct part_record)); xassert (part_ptr->magic = PART_MAGIC); /* set value */ - strcpy(part_ptr->name, "DEFAULT"); - part_ptr->hidden = default_part.hidden; - part_ptr->max_time = default_part.max_time; - part_ptr->max_nodes = default_part.max_nodes; - part_ptr->min_nodes = default_part.min_nodes; - part_ptr->root_only = default_part.root_only; - part_ptr->state_up = default_part.state_up; - part_ptr->shared = default_part.shared; - part_ptr->node_bitmap = NULL; + part_ptr->name = xstrdup("DEFAULT"); + part_ptr->disable_root_jobs = default_part.disable_root_jobs; + part_ptr->hidden = default_part.hidden; + part_ptr->max_time = default_part.max_time; + part_ptr->max_nodes = default_part.max_nodes; + part_ptr->max_nodes_orig = default_part.max_nodes; + part_ptr->min_nodes = default_part.min_nodes; + part_ptr->min_nodes_orig = default_part.min_nodes; + part_ptr->root_only = default_part.root_only; + part_ptr->state_up = default_part.state_up; + part_ptr->max_share = default_part.max_share; + part_ptr->priority = default_part.priority; + part_ptr->node_bitmap = NULL; if (default_part.allow_groups) part_ptr->allow_groups = xstrdup(default_part.allow_groups); @@ -282,6 +289,7 @@ int dump_all_part_state(void) START_TIMER; /* write header: time */ + packstr(PART_STATE_VERSION, buffer); pack_time(time(NULL), buffer); /* write partition records to buffer */ @@ -361,19 +369,20 @@ static void _dump_part_state(struct part_record *part_ptr, Buf buffer) else default_part_flag = 0; - packstr(part_ptr->name, buffer); - pack32(part_ptr->max_time, buffer); - pack32(part_ptr->max_nodes, buffer); - pack32(part_ptr->min_nodes, buffer); + packstr(part_ptr->name, buffer); + pack32(part_ptr->max_time, buffer); + pack32(part_ptr->max_nodes_orig, buffer); + pack32(part_ptr->min_nodes_orig, buffer); - pack16(default_part_flag, buffer); - pack16(part_ptr->hidden, buffer); - pack16(part_ptr->root_only, buffer); - pack16(part_ptr->shared, buffer); + pack16(default_part_flag, buffer); + pack16(part_ptr->hidden, buffer); + pack16(part_ptr->root_only, buffer); + pack16(part_ptr->max_share, buffer); + pack16(part_ptr->priority, buffer); - pack16(part_ptr->state_up, buffer); - packstr(part_ptr->allow_groups, buffer); - packstr(part_ptr->nodes, buffer); + pack16(part_ptr->state_up, buffer); + packstr(part_ptr->allow_groups, buffer); + packstr(part_ptr->nodes, buffer); } /* @@ -387,12 +396,14 @@ int load_all_part_state(void) char *part_name, *allow_groups, *nodes, *state_file, *data = NULL; uint32_t max_time, max_nodes, min_nodes; time_t time; - uint16_t name_len, def_part_flag, hidden, root_only, shared, state_up; + uint16_t def_part_flag, hidden, root_only; + uint16_t max_share, priority, state_up; struct part_record *part_ptr; - uint32_t data_size = 0; + uint32_t data_size = 0, name_len; int data_allocated, data_read = 0, error_code = 0, part_cnt = 0; int state_fd; Buf buffer; + char *ver_str = NULL; /* read the file */ state_file = xstrdup(slurmctld_conf.state_save_location); @@ -429,6 +440,18 @@ int load_all_part_state(void) unlock_state_files(); buffer = create_buf(data, data_size); + + safe_unpackstr_xmalloc( &ver_str, &name_len, buffer); + debug3("Version string in part_state header is %s", ver_str); + if ((!ver_str) || (strcmp(ver_str, PART_STATE_VERSION) != 0)) { + error("**********************************************************"); + error("Can not recover partition state, data version incompatable"); + error("**********************************************************"); + xfree(ver_str); + free_buf(buffer); + return EFAULT; + } + xfree(ver_str); safe_unpack_time(&time, buffer); while (remaining_buf(buffer) > 0) { @@ -438,9 +461,10 @@ int load_all_part_state(void) safe_unpack32(&min_nodes, buffer); safe_unpack16(&def_part_flag, buffer); - safe_unpack16(&hidden, buffer); + safe_unpack16(&hidden, buffer); safe_unpack16(&root_only, buffer); - safe_unpack16(&shared, buffer); + safe_unpack16(&max_share, buffer); + safe_unpack16(&priority, buffer); safe_unpack16(&state_up, buffer); safe_unpackstr_xmalloc(&allow_groups, &name_len, buffer); @@ -449,10 +473,10 @@ int load_all_part_state(void) /* validity test as possible */ if ((def_part_flag > 1) || (root_only > 1) || (hidden > 1) || - (shared > SHARED_EXCLUSIVE) || (state_up > 1)) { + (state_up > 1)) { error("Invalid data for partition %s: def_part_flag=%u, " - "hidden=%u root_only=%u, shared=%u, state_up=%u", - part_name, def_part_flag, hidden, root_only, shared, + "hidden=%u root_only=%u, state_up=%u", + part_name, def_part_flag, hidden, root_only, state_up); error("No more partition data will be processed from " "the checkpoint file"); @@ -467,19 +491,23 @@ int load_all_part_state(void) if (part_ptr) { part_cnt++; - part_ptr->hidden = hidden; - part_ptr->max_time = max_time; - part_ptr->max_nodes = max_nodes; - part_ptr->min_nodes = min_nodes; + part_ptr->hidden = hidden; + part_ptr->max_time = max_time; + part_ptr->max_nodes = max_nodes; + part_ptr->max_nodes_orig = max_nodes; + part_ptr->min_nodes = min_nodes; + part_ptr->min_nodes_orig = min_nodes; if (def_part_flag) { - strncpy(default_part_name, part_name, MAX_SLURM_NAME); + xfree(default_part_name); + default_part_name = xstrdup(part_name); default_part_loc = part_ptr; } - part_ptr->root_only = root_only; - part_ptr->shared = shared; - part_ptr->state_up = state_up; + part_ptr->root_only = root_only; + part_ptr->max_share = max_share; + part_ptr->priority = priority; + part_ptr->state_up = state_up; xfree(part_ptr->allow_groups); - part_ptr->allow_groups = allow_groups; + part_ptr->allow_groups = allow_groups; xfree(part_ptr->nodes); part_ptr->nodes = nodes; } else { @@ -525,16 +553,21 @@ int init_part_conf(void) { last_part_update = time(NULL); - strcpy(default_part.name, "DEFAULT"); - default_part.hidden = 0; - default_part.max_time = INFINITE; - default_part.max_nodes = INFINITE; - default_part.min_nodes = 1; - default_part.root_only = 0; - default_part.state_up = 1; - default_part.shared = SHARED_NO; - default_part.total_nodes = 0; - default_part.total_cpus = 0; + xfree(default_part.name); /* needed for reconfig */ + default_part.name = xstrdup("DEFAULT"); + default_part.disable_root_jobs = slurmctld_conf.disable_root_jobs; + default_part.hidden = 0; + default_part.max_time = INFINITE; + default_part.max_nodes = INFINITE; + default_part.max_nodes_orig = INFINITE; + default_part.min_nodes = 1; + default_part.min_nodes_orig = 1; + default_part.root_only = 0; + default_part.state_up = 1; + default_part.max_share = 1; + default_part.priority = 1; + default_part.total_nodes = 0; + default_part.total_cpus = 0; xfree(default_part.nodes); xfree(default_part.allow_groups); xfree(default_part.allow_uids); @@ -548,7 +581,7 @@ int init_part_conf(void) if (part_list == NULL) fatal ("memory allocation failure"); - strcpy(default_part_name, ""); + xfree(default_part_name); default_part_loc = (struct part_record *) NULL; return 0; @@ -580,6 +613,7 @@ static void _list_delete_part(void *part_entry) break; } } + xfree(part_ptr->name); xfree(part_ptr->allow_groups); xfree(part_ptr->allow_uids); xfree(part_ptr->nodes); @@ -597,11 +631,13 @@ static void _list_delete_part(void *part_entry) */ int list_find_part(void *part_entry, void *key) { + if (key == NULL) + return 0; + if (strcmp(key, "universal_key") == 0) return 1; - if (strncmp(((struct part_record *) part_entry)->name, - (char *) key, MAX_SLURM_NAME) == 0) + if (strcmp(((struct part_record *)part_entry)->name, (char *) key) == 0) return 1; return 0; @@ -715,14 +751,8 @@ void pack_part(struct part_record *part_ptr, Buf buffer) packstr(part_ptr->name, buffer); pack32(part_ptr->max_time, buffer); - altered = part_ptr->max_nodes; - select_g_alter_node_cnt(SELECT_APPLY_NODE_MAX_OFFSET, - &altered); - pack32(altered, buffer); - altered = part_ptr->min_nodes; - select_g_alter_node_cnt(SELECT_APPLY_NODE_MIN_OFFSET, - &altered); - pack32(altered, buffer); + pack32(part_ptr->max_nodes_orig, buffer); + pack32(part_ptr->min_nodes_orig, buffer); altered = part_ptr->total_nodes; select_g_alter_node_cnt(SELECT_APPLY_NODE_MAX_OFFSET, &altered); @@ -731,10 +761,12 @@ void pack_part(struct part_record *part_ptr, Buf buffer) &node_scaling); pack16(node_scaling, buffer); pack32(part_ptr->total_cpus, buffer); - pack16(default_part_flag, buffer); - pack16(part_ptr->hidden, buffer); - pack16(part_ptr->root_only, buffer); - pack16(part_ptr->shared, buffer); + pack16(default_part_flag, buffer); + pack16(part_ptr->disable_root_jobs, buffer); + pack16(part_ptr->hidden, buffer); + pack16(part_ptr->root_only, buffer); + pack16(part_ptr->max_share, buffer); + pack16(part_ptr->priority, buffer); pack16(part_ptr->state_up, buffer); packstr(part_ptr->allow_groups, buffer); @@ -742,7 +774,7 @@ void pack_part(struct part_record *part_ptr, Buf buffer) if (part_ptr->node_bitmap) { bit_fmt(node_inx_ptr, BUF_SIZE, part_ptr->node_bitmap); - packstr(node_inx_ptr, buffer); + packstr((char *)node_inx_ptr, buffer); } else packstr("", buffer); } @@ -760,10 +792,8 @@ int update_part(update_part_msg_t * part_desc) int error_code; struct part_record *part_ptr; - if ((part_desc->name == NULL) || - (strlen(part_desc->name) >= MAX_SLURM_NAME)) { - error("update_part: invalid partition name %s", - part_desc->name); + if (part_desc->name == NULL) { + error("update_part: invalid partition name, NULL"); return ESLURM_INVALID_PARTITION_NAME; } @@ -775,14 +805,15 @@ int update_part(update_part_msg_t * part_desc) info("update_part: partition %s does not exist, " "being created", part_desc->name); part_ptr = create_part_record(); - strcpy(part_ptr->name, part_desc->name); + xfree(part_ptr->name); + part_ptr->name = xstrdup(part_desc->name); } last_part_update = time(NULL); if (part_desc->hidden != (uint16_t) NO_VAL) { - info("update_part: setting hidden to %u for partition %s", - part_desc->hidden, part_desc->name); + info("update_part: setting hidden to %u for partition %s", + part_desc->hidden, part_desc->name); part_ptr->hidden = part_desc->hidden; } @@ -795,13 +826,19 @@ int update_part(update_part_msg_t * part_desc) if (part_desc->max_nodes != NO_VAL) { info("update_part: setting max_nodes to %u for partition %s", part_desc->max_nodes, part_desc->name); - part_ptr->max_nodes = part_desc->max_nodes; + part_ptr->max_nodes = part_desc->max_nodes; + part_ptr->max_nodes_orig = part_desc->max_nodes; + select_g_alter_node_cnt(SELECT_SET_BP_CNT, + &part_ptr->max_nodes); } if (part_desc->min_nodes != NO_VAL) { info("update_part: setting min_nodes to %u for partition %s", part_desc->min_nodes, part_desc->name); - part_ptr->min_nodes = part_desc->min_nodes; + part_ptr->min_nodes = part_desc->min_nodes; + part_ptr->min_nodes_orig = part_desc->min_nodes; + select_g_alter_node_cnt(SELECT_SET_BP_CNT, + &part_ptr->min_nodes); } if (part_desc->root_only != (uint16_t) NO_VAL) { @@ -816,17 +853,39 @@ int update_part(update_part_msg_t * part_desc) part_ptr->state_up = part_desc->state_up; } - if (part_desc->shared != (uint16_t) NO_VAL) { - info("update_part: setting shared to %u for partition %s", - part_desc->shared, part_desc->name); - part_ptr->shared = part_desc->shared; + if (part_desc->max_share != (uint16_t) NO_VAL) { + uint16_t force = part_desc->max_share & SHARED_FORCE; + uint16_t val = part_desc->max_share & (~SHARED_FORCE); + char tmp_str[24]; + if (val == 0) + snprintf(tmp_str, sizeof(tmp_str), "EXCLUSIVE"); + else if (force) + snprintf(tmp_str, sizeof(tmp_str), "FORCE:%u", val); + else if (val == 1) + snprintf(tmp_str, sizeof(tmp_str), "NO"); + else + snprintf(tmp_str, sizeof(tmp_str), "YES:%u", val); + info("update_part: setting share to %s for partition %s", + tmp_str, part_desc->name); + part_ptr->max_share = part_desc->max_share; + } + + if (part_desc->priority != (uint16_t) NO_VAL) { + info("update_part: setting priority to %u for partition %s", + part_desc->priority, part_desc->name); + part_ptr->priority = part_desc->priority; } - if ((part_desc->default_part == 1) && - (strcmp(default_part_name, part_desc->name) != 0)) { - info("update_part: changing default partition from %s to %s", - default_part_name, part_desc->name); - strncpy(default_part_name, part_desc->name, MAX_SLURM_NAME); + if (part_desc->default_part == 1) { + if (default_part_name == NULL) { + info("update_part: setting default partition to %s", + part_desc->name); + } else if (strcmp(default_part_name, part_desc->name) != 0) { + info("update_part: changing default partition from %s to %s", + default_part_name, part_desc->name); + } + xfree(default_part_name); + default_part_name = xstrdup(part_desc->name); default_part_loc = part_ptr; } @@ -876,9 +935,12 @@ int update_part(update_part_msg_t * part_desc) if (error_code == SLURM_SUCCESS) { slurm_sched_partition_change(); /* notify sched plugin */ - reset_job_priority(); /* free jobs */ - if (select_g_block_init(part_list) != SLURM_SUCCESS ) - error("failed to update node selection plugin state"); + select_g_reconfigure(); /* notify select plugin too */ + reset_job_priority(); /* free jobs */ + + /* I am not sure why this was ever there (da) */ +/* if (select_g_block_init(part_list) != SLURM_SUCCESS ) */ +/* error("failed to update node selection plugin state"); */ } return error_code; @@ -1086,6 +1148,8 @@ void part_fini (void) list_destroy(part_list); part_list = NULL; } + xfree(default_part_name); + xfree(default_part.name); default_part_loc = (struct part_record *) NULL; } diff --git a/src/slurmctld/ping_nodes.c b/src/slurmctld/ping_nodes.c index 9b9e3fac3..85da41b8d 100644 --- a/src/slurmctld/ping_nodes.c +++ b/src/slurmctld/ping_nodes.c @@ -3,9 +3,10 @@ * Note: there is a global node table (node_record_table_ptr) ***************************************************************************** * Copyright (C) 2003-2006 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -65,6 +66,8 @@ static pthread_mutex_t lock_mutex = PTHREAD_MUTEX_INITIALIZER; static int ping_count = 0; +static void _run_health_check(void); + /* struct timeval start_time, end_time; */ /* @@ -112,15 +115,17 @@ void ping_end (void) else fatal ("ping_count < 0"); slurm_mutex_unlock(&lock_mutex); - - /* gettimeofday(&end_time, NULL); */ -/* start = start_time.tv_sec; */ -/* start *= 1000000; */ -/* start += start_time.tv_usec; */ -/* end = end_time.tv_sec; */ -/* end *= 1000000; */ -/* end += end_time.tv_usec; */ -/* info("done with ping took %ld",(end-start)); */ + +#if 0 + gettimeofday(&end_time, NULL); + start = start_time.tv_sec; + start *= 1000000; + start += start_time.tv_usec; + end = end_time.tv_sec; + end *= 1000000; + end += end_time.tv_usec; + info("done with ping took %ld",(end-start)); +#endif } /* @@ -133,12 +138,22 @@ void ping_nodes (void) int i; time_t now, still_live_time, node_dead_time; static time_t last_ping_time = (time_t) 0; + static time_t last_health_check = (time_t) 0; uint16_t base_state, no_resp_flag; bool restart_flag; hostlist_t down_hostlist = NULL; char host_str[MAX_SLURM_NAME]; agent_arg_t *ping_agent_args = NULL; agent_arg_t *reg_agent_args = NULL; + + now = time (NULL); + if (slurmctld_conf.health_check_interval && + (difftime(now, last_health_check) >= + slurmctld_conf.health_check_interval)) { + last_health_check = now; + _run_health_check(); + return; + } ping_agent_args = xmalloc (sizeof (agent_arg_t)); ping_agent_args->msg_type = REQUEST_PING; @@ -160,7 +175,6 @@ void ping_nodes (void) * Because of this, we extend the SlurmdTimeout by the * time needed to complete a ping of all nodes. */ - now = time (NULL); if ((slurmctld_conf.slurmd_timeout == 0) || (last_ping_time == (time_t) 0)) { node_dead_time = (time_t) 0; @@ -270,3 +284,46 @@ void ping_nodes (void) hostlist_destroy(down_hostlist); } } + +static void _run_health_check(void) +{ + int i; + uint16_t base_state; + char host_str[MAX_SLURM_NAME]; + agent_arg_t *check_agent_args = NULL; + + check_agent_args = xmalloc (sizeof (agent_arg_t)); + check_agent_args->msg_type = REQUEST_HEALTH_CHECK; + check_agent_args->retry = 0; + check_agent_args->hostlist = hostlist_create(""); + + for (i = 0; i < node_record_count; i++) { + struct node_record *node_ptr; + + node_ptr = &node_record_table_ptr[i]; + base_state = node_ptr->node_state & NODE_STATE_BASE; + + if (base_state == NODE_STATE_DOWN) + continue; + +#ifdef HAVE_FRONT_END /* Operate only on front-end */ + if (i > 0) + continue; +#endif + + hostlist_push(check_agent_args->hostlist, node_ptr->name); + check_agent_args->node_count++; + } + + if (check_agent_args->node_count == 0) { + hostlist_destroy(check_agent_args->hostlist); + xfree (check_agent_args); + } else { + hostlist_uniq(check_agent_args->hostlist); + hostlist_ranged_string(check_agent_args->hostlist, + sizeof(host_str), host_str); + verbose("Spawning health check agent for %s", host_str); + ping_begin(); + agent_queue_request(check_agent_args); + } +} diff --git a/src/slurmctld/ping_nodes.h b/src/slurmctld/ping_nodes.h index 23b6cd794..2fae42d4b 100644 --- a/src/slurmctld/ping_nodes.h +++ b/src/slurmctld/ping_nodes.h @@ -4,7 +4,7 @@ * Copyright (C) 2003 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/slurmctld/power_save.c b/src/slurmctld/power_save.c index 8e894fef9..4e6d3edba 100644 --- a/src/slurmctld/power_save.c +++ b/src/slurmctld/power_save.c @@ -8,7 +8,7 @@ * Copyright (C) 2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -56,40 +56,15 @@ #define _DEBUG 0 -/* NOTE: These paramters will be moved into the slurm.conf file in version 1.3 - * Directly modify the default values here in order to enable this capability - * in SLURM version 1.2. */ - -/* Node becomes elligible for power saving mode after being idle for - * this number of seconds. A negative number disables power saving mode. */ -#define DEFAULT_IDLE_TIME -1 - -/* Maximum number of nodes to be placed into or removed from power saving mode - * per minute. Use this to prevent rapid changes in power requirements. - * A value of zero results in no limits being imposed. */ -#define DEFAULT_SUSPEND_RATE 60 -#define DEFAULT_RESUME_RATE 60 - -/* Programs to be executed to place nodes or out of power saving mode. These - * are run as user SlurmUser. The hostname of the node to be modified will be - * passed as an argument to the program. */ -#define DEFAULT_SUSPEND_PROGRAM "/home/jette/slurm.mdev/sbin/slurm.node.suspend" -#define DEFAULT_RESUME_PROGRAM "/home/jette/slurm.mdev/sbin/slurm.node.resume" - -/* Individual nodes or all nodes in selected partitions can be excluded from - * being placed into power saving mode. SLURM hostlist expressions can be used. - * Multiple partitions may be listed with a comma separator. */ -#define DEFAULT_EXCLUDE_SUSPEND_NODES NULL -#define DEFAULT_EXCLUDE_SUSPEND_PARTITIONS NULL - int idle_time, suspend_rate, resume_rate; char *suspend_prog = NULL, *resume_prog = NULL; char *exc_nodes = NULL, *exc_parts = NULL; - +time_t last_config = (time_t) 0; bitstr_t *exc_node_bitmap = NULL; int suspend_cnt, resume_cnt; +static void _clear_power_config(void); static void _do_power_work(void); static void _do_resume(char *host); static void _do_suspend(char *host); @@ -292,22 +267,37 @@ static void _kill_zombies(void) ; } +/* Free all allocated memory */ +static void _clear_power_config(void) +{ + xfree(suspend_prog); + xfree(resume_prog); + xfree(exc_nodes); + xfree(exc_parts); + FREE_NULL_BITMAP(exc_node_bitmap); +} + /* Initialize power_save module paramters. * Return 0 on valid configuration to run power saving, * otherwise log the problem and return -1 */ static int _init_power_config(void) { - idle_time = DEFAULT_IDLE_TIME; - suspend_rate = DEFAULT_SUSPEND_RATE; - resume_rate = DEFAULT_RESUME_RATE; - if (DEFAULT_SUSPEND_PROGRAM) - suspend_prog = xstrdup(DEFAULT_SUSPEND_PROGRAM); - if (DEFAULT_RESUME_PROGRAM) - resume_prog = xstrdup(DEFAULT_RESUME_PROGRAM); - if (DEFAULT_EXCLUDE_SUSPEND_NODES) - exc_nodes = xstrdup(DEFAULT_EXCLUDE_SUSPEND_NODES); - if (DEFAULT_EXCLUDE_SUSPEND_PARTITIONS) - exc_parts = xstrdup(DEFAULT_EXCLUDE_SUSPEND_PARTITIONS); + slurm_ctl_conf_t *conf = slurm_conf_lock(); + + last_config = slurmctld_conf.last_update; + idle_time = conf->suspend_time - 1; + suspend_rate = conf->suspend_rate; + resume_rate = conf->resume_rate; + _clear_power_config(); + if (conf->suspend_program) + suspend_prog = xstrdup(conf->suspend_program); + if (conf->resume_program) + resume_prog = xstrdup(conf->resume_program); + if (conf->suspend_exc_nodes) + exc_nodes = xstrdup(conf->suspend_exc_nodes); + if (conf->suspend_exc_parts) + exc_parts = xstrdup(conf->suspend_exc_parts); + slurm_conf_unlock(); if (idle_time < 0) { /* not an error */ debug("power_save module disabled, idle_time < 0"); @@ -411,25 +401,22 @@ static bool _valid_prog(char *file_name) */ extern void *init_power_save(void *arg) { - /* Locks: Read config, node, and partitions */ - slurmctld_lock_t config_read_lock = { - READ_LOCK, NO_LOCK, READ_LOCK, READ_LOCK }; /* Locks: Write node, read jobs and partitions */ slurmctld_lock_t node_write_lock = { NO_LOCK, READ_LOCK, WRITE_LOCK, READ_LOCK }; - int rc; time_t now, last_power_scan = 0; - lock_slurmctld(config_read_lock); - rc = _init_power_config(); - unlock_slurmctld(config_read_lock); - if (rc) + if (_init_power_config()) goto fini; while (slurmctld_config.shutdown_time == 0) { sleep(1); _kill_zombies(); + if ((last_config != slurmctld_conf.last_update) + && (_init_power_config())) + goto fini; + /* Only run every 60 seconds or after * a node state change, whichever * happens first */ @@ -444,11 +431,6 @@ extern void *init_power_save(void *arg) last_power_scan = now; } -fini: /* Free all allocated memory */ - xfree(suspend_prog); - xfree(resume_prog); - xfree(exc_nodes); - xfree(exc_parts); - FREE_NULL_BITMAP(exc_node_bitmap); +fini: _clear_power_config(); return NULL; } diff --git a/src/slurmctld/proc_req.c b/src/slurmctld/proc_req.c index 924a118bd..80aa09c03 100644 --- a/src/slurmctld/proc_req.c +++ b/src/slurmctld/proc_req.c @@ -1,13 +1,11 @@ /*****************************************************************************\ * proc_req.c - process incomming messages to slurmctld - * - * $Id: proc_req.c 13871 2008-04-15 15:47:33Z jette $ ***************************************************************************** * Copyright (C) 2002-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Morris Jette <jette@llnl.gov>, Kevin Tew - * <tew1@llnl.gov>, et. al. - * UCRL-CODE-226842. + * Written by Morris Jette <jette@llnl.gov>, et. al. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -69,18 +67,20 @@ #include "src/common/switch.h" #include "src/common/xstring.h" #include "src/common/forward.h" +#include "src/common/assoc_mgr.h" #include "src/slurmctld/agent.h" +#include "src/slurmctld/job_scheduler.h" #include "src/slurmctld/locks.h" #include "src/slurmctld/proc_req.h" #include "src/slurmctld/read_config.h" #include "src/slurmctld/sched_plugin.h" #include "src/slurmctld/slurmctld.h" +#include "src/slurmctld/srun_comm.h" #include "src/slurmctld/state_save.h" #include "src/slurmctld/trigger_mgr.h" static void _fill_ctld_conf(slurm_ctl_conf_t * build_ptr); -static inline bool _is_super_user(uid_t uid); static void _kill_job_on_msg_fail(uint32_t job_id); static int _launch_batch_step(job_desc_msg_t *job_desc_msg, uid_t uid, uint32_t *step_id); @@ -89,6 +89,7 @@ static int _make_step_cred(struct step_record *step_rec, inline static void _slurm_rpc_allocate_resources(slurm_msg_t * msg); inline static void _slurm_rpc_checkpoint(slurm_msg_t * msg); inline static void _slurm_rpc_checkpoint_comp(slurm_msg_t * msg); +inline static void _slurm_rpc_checkpoint_task_comp(slurm_msg_t * msg); inline static void _slurm_rpc_delete_partition(slurm_msg_t * msg); inline static void _slurm_rpc_complete_job_allocation(slurm_msg_t * msg); inline static void _slurm_rpc_complete_batch_script(slurm_msg_t * msg); @@ -97,6 +98,7 @@ inline static void _slurm_rpc_dump_jobs(slurm_msg_t * msg); inline static void _slurm_rpc_dump_nodes(slurm_msg_t * msg); inline static void _slurm_rpc_dump_partitions(slurm_msg_t * msg); inline static void _slurm_rpc_epilog_complete(slurm_msg_t * msg); +inline static void _slurm_rpc_job_notify(slurm_msg_t * msg); inline static void _slurm_rpc_job_ready(slurm_msg_t * msg); inline static void _slurm_rpc_job_step_kill(slurm_msg_t * msg); inline static void _slurm_rpc_job_step_create(slurm_msg_t * msg); @@ -124,6 +126,8 @@ inline static void _slurm_rpc_update_node(slurm_msg_t * msg); inline static void _slurm_rpc_update_partition(slurm_msg_t * msg); inline static void _slurm_rpc_end_time(slurm_msg_t * msg); inline static void _update_cred_key(void); +inline static void _slurm_rpc_set_debug_level(slurm_msg_t *msg); +inline static void _slurm_rpc_accounting_update_msg(slurm_msg_t *msg); /* @@ -250,6 +254,10 @@ void slurmctld_req (slurm_msg_t * msg) _slurm_rpc_checkpoint_comp(msg); slurm_free_checkpoint_comp_msg(msg->data); break; + case REQUEST_CHECKPOINT_TASK_COMP: + _slurm_rpc_checkpoint_task_comp(msg); + slurm_free_checkpoint_task_comp_msg(msg->data); + break; case REQUEST_SUSPEND: _slurm_rpc_suspend(msg); slurm_free_suspend_msg(msg->data); @@ -286,6 +294,18 @@ void slurmctld_req (slurm_msg_t * msg) _slurm_rpc_trigger_clear(msg); slurm_free_trigger_msg(msg->data); break; + case REQUEST_JOB_NOTIFY: + _slurm_rpc_job_notify(msg); + slurm_free_job_notify_msg(msg->data); + break; + case REQUEST_SET_DEBUG_LEVEL: + _slurm_rpc_set_debug_level(msg); + slurm_free_set_debug_level_msg(msg->data); + break; + case ACCOUNTING_UPDATE_MSG: + _slurm_rpc_accounting_update_msg(msg); + slurm_free_accounting_update_msg(msg->data); + break; default: error("invalid RPC msg_type=%d", msg->msg_type); slurm_send_rc_msg(msg, EINVAL); @@ -303,38 +323,82 @@ void _fill_ctld_conf(slurm_ctl_conf_t * conf_ptr) slurm_ctl_conf_t *conf = slurm_conf_lock(); conf_ptr->last_update = time(NULL); + conf_ptr->accounting_storage_enforce = conf->accounting_storage_enforce; + conf_ptr->accounting_storage_host = + xstrdup(conf->accounting_storage_host); + conf_ptr->accounting_storage_loc = + xstrdup(conf->accounting_storage_loc); + conf_ptr->accounting_storage_pass = + xstrdup(conf->accounting_storage_pass); + conf_ptr->accounting_storage_port = conf->accounting_storage_port; + conf_ptr->accounting_storage_type = + xstrdup(conf->accounting_storage_type); + conf_ptr->accounting_storage_user = + xstrdup(conf->accounting_storage_user); + conf_ptr->accounting_storage_port = conf->accounting_storage_port; conf_ptr->authtype = xstrdup(conf->authtype); + conf_ptr->backup_addr = xstrdup(conf->backup_addr); conf_ptr->backup_controller = xstrdup(conf->backup_controller); + conf_ptr->boot_time = slurmctld_config.boot_time; + conf_ptr->cache_groups = conf->cache_groups; conf_ptr->checkpoint_type = xstrdup(conf->checkpoint_type); + conf_ptr->cluster_name = xstrdup(conf->cluster_name); conf_ptr->control_addr = xstrdup(conf->control_addr); conf_ptr->control_machine = xstrdup(conf->control_machine); + conf_ptr->crypto_type = xstrdup(conf->crypto_type); + + conf_ptr->def_mem_per_task = conf->def_mem_per_task; conf_ptr->disable_root_jobs = conf->disable_root_jobs; + conf_ptr->epilog = xstrdup(conf->epilog); + conf_ptr->epilog_msg_time = conf->epilog_msg_time; + conf_ptr->fast_schedule = conf->fast_schedule; conf_ptr->first_job_id = conf->first_job_id; + conf_ptr->inactive_limit = conf->inactive_limit; - conf_ptr->job_acct_logfile = xstrdup(conf->job_acct_logfile); - conf_ptr->job_acct_freq = conf->job_acct_freq; - conf_ptr->job_acct_type = xstrdup(conf->job_acct_type); + + conf_ptr->health_check_interval = conf->health_check_interval; + conf_ptr->health_check_program = xstrdup(conf->health_check_program); + + conf_ptr->job_acct_gather_freq = conf->job_acct_gather_freq; + conf_ptr->job_acct_gather_type = xstrdup(conf->job_acct_gather_type); + + conf_ptr->job_comp_host = xstrdup(conf->job_comp_host); conf_ptr->job_comp_loc = xstrdup(conf->job_comp_loc); + conf_ptr->job_comp_pass = xstrdup(conf->job_comp_pass); + conf_ptr->job_comp_port = conf->job_comp_port; conf_ptr->job_comp_type = xstrdup(conf->job_comp_type); + conf_ptr->job_comp_user = xstrdup(conf->job_comp_user); + conf_ptr->job_credential_private_key = xstrdup(conf-> job_credential_private_key); conf_ptr->job_credential_public_certificate = xstrdup(conf-> job_credential_public_certificate); conf_ptr->job_file_append = conf->job_file_append; + conf_ptr->job_requeue = conf->job_requeue; + conf_ptr->get_env_timeout = conf->get_env_timeout; + conf_ptr->kill_wait = conf->kill_wait; + + conf_ptr->licenses = xstrdup(conf->licenses); + conf_ptr->mail_prog = xstrdup(conf->mail_prog); conf_ptr->max_job_cnt = conf->max_job_cnt; + conf_ptr->max_mem_per_task = conf->max_mem_per_task; conf_ptr->min_job_age = conf->min_job_age; conf_ptr->mpi_default = xstrdup(conf->mpi_default); conf_ptr->msg_timeout = conf->msg_timeout; + conf_ptr->next_job_id = get_next_job_id(); + conf_ptr->node_prefix = xstrdup(conf->node_prefix); + conf_ptr->plugindir = xstrdup(conf->plugindir); conf_ptr->plugstack = xstrdup(conf->plugstack); + conf_ptr->private_data = conf->private_data; conf_ptr->proctrack_type = xstrdup(conf->proctrack_type); conf_ptr->prolog = xstrdup(conf->prolog); conf_ptr->propagate_prio_process = @@ -342,9 +406,18 @@ void _fill_ctld_conf(slurm_ctl_conf_t * conf_ptr) conf_ptr->propagate_rlimits = xstrdup(conf->propagate_rlimits); conf_ptr->propagate_rlimits_except = xstrdup(conf-> propagate_rlimits_except); + + conf_ptr->resume_program = xstrdup(conf->resume_program); + conf_ptr->resume_rate = conf->resume_rate; conf_ptr->ret2service = conf->ret2service; + + if (conf->sched_params) + conf_ptr->sched_params = xstrdup(conf->sched_params); + else + conf_ptr->sched_params = slurm_sched_p_get_conf(); conf_ptr->schedport = conf->schedport; conf_ptr->schedrootfltr = conf->schedrootfltr; + conf_ptr->sched_time_slice = conf->sched_time_slice; conf_ptr->schedtype = xstrdup(conf->schedtype); conf_ptr->select_type = xstrdup(conf->select_type); conf_ptr->select_type_param = conf->select_type_param; @@ -362,18 +435,25 @@ void _fill_ctld_conf(slurm_ctl_conf_t * conf_ptr) conf_ptr->slurmd_spooldir = xstrdup(conf->slurmd_spooldir); conf_ptr->slurmd_timeout = conf->slurmd_timeout; conf_ptr->slurm_conf = xstrdup(conf->slurm_conf); + conf_ptr->srun_prolog = xstrdup(conf->srun_prolog); + conf_ptr->srun_epilog = xstrdup(conf->srun_epilog); conf_ptr->state_save_location = xstrdup(conf->state_save_location); + conf_ptr->suspend_exc_nodes = xstrdup(conf->suspend_exc_nodes); + conf_ptr->suspend_exc_parts = xstrdup(conf->suspend_exc_parts); + conf_ptr->suspend_program = xstrdup(conf->suspend_program); + conf_ptr->suspend_rate = conf->suspend_rate; + conf_ptr->suspend_time = conf->suspend_time; conf_ptr->switch_type = xstrdup(conf->switch_type); + conf_ptr->task_epilog = xstrdup(conf->task_epilog); conf_ptr->task_prolog = xstrdup(conf->task_prolog); conf_ptr->task_plugin = xstrdup(conf->task_plugin); conf_ptr->task_plugin_param = conf->task_plugin_param; conf_ptr->tmp_fs = xstrdup(conf->tmp_fs); - conf_ptr->wait_time = conf->wait_time; - conf_ptr->srun_prolog = xstrdup(conf->srun_prolog); - conf_ptr->srun_epilog = xstrdup(conf->srun_epilog); - conf_ptr->node_prefix = xstrdup(conf->node_prefix); conf_ptr->tree_width = conf->tree_width; + + conf_ptr->wait_time = conf->wait_time; + conf_ptr->use_pam = conf->use_pam; conf_ptr->unkillable_program = xstrdup(conf->unkillable_program); conf_ptr->unkillable_timeout = conf->unkillable_timeout; @@ -382,16 +462,15 @@ void _fill_ctld_conf(slurm_ctl_conf_t * conf_ptr) return; } -/* return true if supplied uid is a super-user: root, self, or SlurmUser */ -static inline bool _is_super_user(uid_t uid) +/* + * validate_super_user - validate that the uid is authorized to see + * privileged data (either user root or SlurmUser) + * IN uid - user to validate + * RET true if permitted to run, false otherwise + */ +extern bool validate_super_user(uid_t uid) { - /* READ lock_slurmctld config would be ideal here, but - * that value should be identical to getuid() anyway. - * privileged calls should be coming from user root too, - * so we forgo the overhead here. */ - if ( (uid == 0) || - (uid == slurmctld_conf.slurm_user_id) || - (uid == getuid()) ) + if ((uid == 0) || (uid == getuid())) return true; else return false; @@ -422,6 +501,8 @@ static int _make_step_cred(struct step_record *step_rec, cred_arg.jobid = step_rec->job_ptr->job_id; cred_arg.stepid = step_rec->step_id; cred_arg.uid = step_rec->job_ptr->user_id; + cred_arg.job_mem = step_rec->job_ptr->details->job_min_memory; + cred_arg.task_mem = step_rec->mem_per_task; cred_arg.hostlist = step_rec->step_layout->node_list; if(step_rec->job_ptr->details->shared == 0) cred_arg.alloc_lps_cnt = 0; @@ -459,29 +540,39 @@ static void _slurm_rpc_allocate_resources(slurm_msg_t * msg) /* Locks: Read config, write job, write node, read partition */ slurmctld_lock_t job_write_lock = { READ_LOCK, WRITE_LOCK, WRITE_LOCK, READ_LOCK }; - uid_t uid; + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); int immediate = job_desc_msg->immediate; bool do_unlock = false; bool job_waiting = false; struct job_record *job_ptr; + uint16_t port; /* dummy value */ + slurm_addr resp_addr; START_TIMER; - debug2("Processing RPC: REQUEST_RESOURCE_ALLOCATION"); - - /* do RPC call */ - dump_job_desc(job_desc_msg); - uid = g_slurm_auth_get_uid(msg->auth_cred); - if ( (uid != job_desc_msg->user_id) && (!_is_super_user(uid)) ) { + if ((uid != job_desc_msg->user_id) && (!validate_super_user(uid))) { error_code = ESLURM_USER_ID_MISSING; error("Security violation, RESOURCE_ALLOCATE from uid=%u", - (unsigned int) uid); + (unsigned int) uid); } + debug2("Processing RPC: REQUEST_RESOURCE_ALLOCATION from uid=%u", + (unsigned int) uid); + /* do RPC call */ + if ((job_desc_msg->alloc_node == NULL) + || (job_desc_msg->alloc_node[0] == '\0')) { + error_code = ESLURM_INVALID_NODE_NAME; + error("REQUEST_RESOURCE_ALLOCATE lacks alloc_node from uid=%u", + (unsigned int) uid); + } + slurm_get_peer_addr(msg->conn_fd, &resp_addr); + job_desc_msg->resp_host = xmalloc(16); + slurm_get_ip_str(&resp_addr, &port, job_desc_msg->resp_host, 16); + dump_job_desc(job_desc_msg); if (error_code == SLURM_SUCCESS) { do_unlock = true; lock_slurmctld(job_write_lock); - error_code = job_allocate(job_desc_msg, - immediate, false, + error_code = job_allocate(job_desc_msg, immediate, + false, NULL, true, uid, &job_ptr); /* unlock after finished using the job structure data */ END_TIMER2("_slurm_rpc_allocate_resources"); @@ -544,12 +635,14 @@ static void _slurm_rpc_dump_conf(slurm_msg_t * msg) slurm_msg_t response_msg; last_update_msg_t *last_time_msg = (last_update_msg_t *) msg->data; slurm_ctl_conf_info_msg_t config_tbl; - /* Locks: Read config */ + /* Locks: Read config, partition*/ slurmctld_lock_t config_read_lock = { - READ_LOCK, NO_LOCK, NO_LOCK, NO_LOCK }; + READ_LOCK, NO_LOCK, NO_LOCK, READ_LOCK }; + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); START_TIMER; - debug2("Processing RPC: REQUEST_BUILD_INFO"); + debug2("Processing RPC: REQUEST_BUILD_INFO from uid=%u", + (unsigned int) uid); lock_slurmctld(config_read_lock); /* check to see if configuration data has changed */ @@ -583,12 +676,14 @@ static void _slurm_rpc_dump_jobs(slurm_msg_t * msg) slurm_msg_t response_msg; job_info_request_msg_t *job_info_request_msg = (job_info_request_msg_t *) msg->data; - /* Locks: Read job, write node (for hiding) */ + /* Locks: Read config job, write node (for hiding) */ slurmctld_lock_t job_read_lock = { - NO_LOCK, READ_LOCK, NO_LOCK, WRITE_LOCK }; + READ_LOCK, READ_LOCK, NO_LOCK, WRITE_LOCK }; + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); START_TIMER; - debug2("Processing RPC: REQUEST_JOB_INFO"); + debug2("Processing RPC: REQUEST_JOB_INFO from uid=%u", + (unsigned int) uid); lock_slurmctld(job_read_lock); if ((job_info_request_msg->last_update - 1) >= last_job_update) { @@ -598,7 +693,7 @@ static void _slurm_rpc_dump_jobs(slurm_msg_t * msg) } else { pack_all_jobs(&dump, &dump_size, job_info_request_msg->show_flags, - g_slurm_auth_get_uid(msg->auth_cred)); + g_slurm_auth_get_uid(msg->auth_cred, NULL)); unlock_slurmctld(job_read_lock); END_TIMER2("_slurm_rpc_dump_jobs"); debug2("_slurm_rpc_dump_jobs, size=%d %s", @@ -629,9 +724,11 @@ static void _slurm_rpc_end_time(slurm_msg_t * msg) /* Locks: Read job */ slurmctld_lock_t job_read_lock = { NO_LOCK, READ_LOCK, NO_LOCK, NO_LOCK }; + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); START_TIMER; - debug2("Processing RPC: REQUEST JOB_END_TIME"); + debug2("Processing RPC: REQUEST JOB_END_TIME from uid=%u", + (unsigned int) uid); lock_slurmctld(job_read_lock); rc = job_end_time(time_req_msg, &timeout_msg); unlock_slurmctld(job_read_lock); @@ -662,18 +759,24 @@ static void _slurm_rpc_dump_nodes(slurm_msg_t * msg) /* Locks: Read config, read node, write node (for hiding) */ slurmctld_lock_t node_read_lock = { READ_LOCK, NO_LOCK, READ_LOCK, WRITE_LOCK }; + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); START_TIMER; - debug2("Processing RPC: REQUEST_NODE_INFO"); + debug2("Processing RPC: REQUEST_NODE_INFO from uid=%u", + (unsigned int) uid); lock_slurmctld(node_read_lock); - if ((node_req_msg->last_update - 1) >= last_node_update) { + if (slurmctld_conf.private_data && !validate_super_user(uid)) { + unlock_slurmctld(node_read_lock); + error("Security violation, REQUEST_NODE_INFO RPC from uid=%d", uid); + slurm_send_rc_msg(msg, ESLURM_USER_ID_MISSING); + } else if ((node_req_msg->last_update - 1) >= last_node_update) { unlock_slurmctld(node_read_lock); debug2("_slurm_rpc_dump_nodes, no change"); slurm_send_rc_msg(msg, SLURM_NO_CHANGE_IN_DATA); } else { pack_all_node(&dump, &dump_size, node_req_msg->show_flags, - g_slurm_auth_get_uid(msg->auth_cred)); + uid); unlock_slurmctld(node_read_lock); END_TIMER2("_slurm_rpc_dump_nodes"); debug2("_slurm_rpc_dump_nodes, size=%d %s", @@ -701,22 +804,28 @@ static void _slurm_rpc_dump_partitions(slurm_msg_t * msg) slurm_msg_t response_msg; part_info_request_msg_t *part_req_msg; - /* Locks: Read partition */ + /* Locks: Read configuration and partition */ slurmctld_lock_t part_read_lock = { - NO_LOCK, NO_LOCK, NO_LOCK, READ_LOCK }; + READ_LOCK, NO_LOCK, NO_LOCK, READ_LOCK }; + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); START_TIMER; - debug2("Processing RPC: REQUEST_PARTITION_INFO"); + debug2("Processing RPC: REQUEST_PARTITION_INFO uid=%u", + (unsigned int) uid); part_req_msg = (part_info_request_msg_t *) msg->data; lock_slurmctld(part_read_lock); - if ((part_req_msg->last_update - 1) >= last_part_update) { + if (slurmctld_conf.private_data && !validate_super_user(uid)) { + unlock_slurmctld(part_read_lock); + debug2("Security violation, PARTITION_INFO RPC from uid=%d", uid); + slurm_send_rc_msg(msg, ESLURM_USER_ID_MISSING); + } else if ((part_req_msg->last_update - 1) >= last_part_update) { unlock_slurmctld(part_read_lock); debug2("_slurm_rpc_dump_partitions, no change"); slurm_send_rc_msg(msg, SLURM_NO_CHANGE_IN_DATA); } else { pack_all_part(&dump, &dump_size, part_req_msg->show_flags, - g_slurm_auth_get_uid(msg->auth_cred)); + uid); unlock_slurmctld(part_read_lock); END_TIMER2("_slurm_rpc_dump_partitions"); debug2("_slurm_rpc_dump_partitions, size=%d %s", @@ -740,24 +849,25 @@ static void _slurm_rpc_dump_partitions(slurm_msg_t * msg) static void _slurm_rpc_epilog_complete(slurm_msg_t * msg) { DEF_TIMERS; - /* Locks: Write job, write node */ + /* Locks: Read configuration, write job, write node */ slurmctld_lock_t job_write_lock = { - NO_LOCK, WRITE_LOCK, WRITE_LOCK, NO_LOCK }; - uid_t uid; + READ_LOCK, WRITE_LOCK, WRITE_LOCK, NO_LOCK }; + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); epilog_complete_msg_t *epilog_msg = (epilog_complete_msg_t *) msg->data; bool run_scheduler = false; START_TIMER; - debug2("Processing RPC: MESSAGE_EPILOG_COMPLETE"); - uid = g_slurm_auth_get_uid(msg->auth_cred); - if (!_is_super_user(uid)) { + debug2("Processing RPC: MESSAGE_EPILOG_COMPLETE uid=%u", + (unsigned int) uid); + lock_slurmctld(job_write_lock); + if (slurmctld_conf.private_data && !validate_super_user(uid)) { + unlock_slurmctld(job_write_lock); error("Security violation, EPILOG_COMPLETE RPC from uid=%u", (unsigned int) uid); return; } - lock_slurmctld(job_write_lock); if (job_epilog_complete(epilog_msg->job_id, epilog_msg->node_name, epilog_msg->return_code)) run_scheduler = true; @@ -795,11 +905,11 @@ static void _slurm_rpc_job_step_kill(slurm_msg_t * msg) /* Locks: Read config, write job, write node */ slurmctld_lock_t job_write_lock = { READ_LOCK, WRITE_LOCK, WRITE_LOCK, NO_LOCK }; - uid_t uid; + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); START_TIMER; - debug2("Processing RPC: REQUEST_CANCEL_JOB_STEP"); - uid = g_slurm_auth_get_uid(msg->auth_cred); + debug2("Processing RPC: REQUEST_CANCEL_JOB_STEP uid=%u", + (unsigned int) uid); lock_slurmctld(job_write_lock); /* do RPC call */ @@ -864,14 +974,14 @@ static void _slurm_rpc_complete_job_allocation(slurm_msg_t * msg) slurmctld_lock_t job_write_lock = { NO_LOCK, WRITE_LOCK, WRITE_LOCK, NO_LOCK }; - uid_t uid; + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); bool job_requeue = false; /* init */ START_TIMER; - debug2("Processing RPC: REQUEST_COMPLETE_JOB_ALLOCATION %u", - comp_msg->job_id); - uid = g_slurm_auth_get_uid(msg->auth_cred); + debug2("Processing RPC: REQUEST_COMPLETE_JOB_ALLOCATION from " + "uid=%u, JobId=%u rc=%d", + uid, comp_msg->job_id, comp_msg->job_rc); lock_slurmctld(job_write_lock); @@ -908,17 +1018,17 @@ static void _slurm_rpc_complete_batch_script(slurm_msg_t * msg) slurmctld_lock_t job_write_lock = { NO_LOCK, WRITE_LOCK, WRITE_LOCK, NO_LOCK }; - uid_t uid; + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); bool job_requeue = false; bool dump_job = false, dump_node = false; /* init */ START_TIMER; - debug2("Processing RPC: REQUEST_COMPLETE_BATCH_SCRIPT %u", - comp_msg->job_id); - uid = g_slurm_auth_get_uid(msg->auth_cred); + debug2("Processing RPC: REQUEST_COMPLETE_BATCH_SCRIPT from " + "uid=%u JobId=%u", + uid, comp_msg->job_id); - if (!_is_super_user(uid)) { + if (!validate_super_user(uid)) { /* Only the slurmstepd can complete a batch script */ END_TIMER2("_slurm_rpc_complete_batch_script"); return; @@ -997,13 +1107,13 @@ static void _slurm_rpc_job_step_create(slurm_msg_t * msg) /* Locks: Write jobs, read nodes */ slurmctld_lock_t job_write_lock = { NO_LOCK, WRITE_LOCK, READ_LOCK, NO_LOCK }; - uid_t uid; + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); START_TIMER; - debug2("Processing RPC: REQUEST_JOB_STEP_CREATE"); + debug2("Processing RPC: REQUEST_JOB_STEP_CREATE from uid=%u", + (unsigned int) uid); dump_step_desc(req_step_msg); - uid = g_slurm_auth_get_uid(msg->auth_cred); if (uid != req_step_msg->user_id) { error("Security violation, JOB_STEP_CREATE RPC from uid=%u " "to run as uid %u", @@ -1015,7 +1125,7 @@ static void _slurm_rpc_job_step_create(slurm_msg_t * msg) #ifdef HAVE_FRONT_END /* Limited job step support */ /* Non-super users not permitted to run job steps on front-end. * A single slurmd can not handle a heavy load. */ - if (!_is_super_user(uid)) { + if (!validate_super_user(uid)) { info("Attempt to execute job step by uid=%u", (unsigned int) uid); slurm_send_rc_msg(msg, ESLURM_BATCH_ONLY); @@ -1075,12 +1185,14 @@ static void _slurm_rpc_job_step_get_info(slurm_msg_t * msg) int error_code = SLURM_SUCCESS; job_step_info_request_msg_t *request = (job_step_info_request_msg_t *) msg->data; - /* Locks: Read job, write partition (for filtering) */ + /* Locks: Read config, job, write partition (for filtering) */ slurmctld_lock_t job_read_lock = { - NO_LOCK, READ_LOCK, NO_LOCK, WRITE_LOCK }; + READ_LOCK, READ_LOCK, NO_LOCK, WRITE_LOCK }; + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); START_TIMER; - debug2("Processing RPC: REQUEST_JOB_STEP_INFO"); + debug2("Processing RPC: REQUEST_JOB_STEP_INFO from uid=%u", + (unsigned int) uid); lock_slurmctld(job_read_lock); @@ -1090,7 +1202,6 @@ static void _slurm_rpc_job_step_get_info(slurm_msg_t * msg) error_code = SLURM_NO_CHANGE_IN_DATA; } else { Buf buffer = init_buf(BUF_SIZE); - uid_t uid = g_slurm_auth_get_uid(msg->auth_cred); error_code = pack_ctld_job_step_info_response_msg( request->job_id, request->step_id, uid, request->show_flags, buffer); @@ -1137,24 +1248,40 @@ static void _slurm_rpc_job_will_run(slurm_msg_t * msg) /* Locks: Write job, read node, read partition */ slurmctld_lock_t job_write_lock = { NO_LOCK, WRITE_LOCK, READ_LOCK, READ_LOCK }; - uid_t uid; + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); + uint16_t port; /* dummy value */ + slurm_addr resp_addr; + will_run_response_msg_t *resp = NULL; START_TIMER; - debug2("Processing RPC: REQUEST_JOB_WILL_RUN"); + debug2("Processing RPC: REQUEST_JOB_WILL_RUN from uid=%u", + (unsigned int) uid); /* do RPC call */ - dump_job_desc(job_desc_msg); - uid = g_slurm_auth_get_uid(msg->auth_cred); - if ( (uid != job_desc_msg->user_id) && (!_is_super_user(uid)) ) { + if ( (uid != job_desc_msg->user_id) && (!validate_super_user(uid)) ) { error_code = ESLURM_USER_ID_MISSING; error("Security violation, JOB_WILL_RUN RPC from uid=%u", - (unsigned int) uid); + (unsigned int) uid); } - + if ((job_desc_msg->alloc_node == NULL) + || (job_desc_msg->alloc_node[0] == '\0')) { + error_code = ESLURM_INVALID_NODE_NAME; + error("REQUEST_JOB_WILL_RUN lacks alloc_node from uid=%u", + (unsigned int) uid); + } + slurm_get_peer_addr(msg->conn_fd, &resp_addr); + job_desc_msg->resp_host = xmalloc(16); + slurm_get_ip_str(&resp_addr, &port, job_desc_msg->resp_host, 16); + dump_job_desc(job_desc_msg); if (error_code == SLURM_SUCCESS) { lock_slurmctld(job_write_lock); - error_code = job_allocate(job_desc_msg, - true, true, true, uid, &job_ptr); + if (job_desc_msg->job_id == NO_VAL) { + error_code = job_allocate(job_desc_msg, false, + true, &resp, + true, uid, &job_ptr); + } else { /* existing job test */ + error_code = job_start_data(job_desc_msg, &resp); + } unlock_slurmctld(job_write_lock); END_TIMER2("_slurm_rpc_job_will_run"); } @@ -1164,9 +1291,20 @@ static void _slurm_rpc_job_will_run(slurm_msg_t * msg) debug2("_slurm_rpc_job_will_run: %s", slurm_strerror(error_code)); slurm_send_rc_msg(msg, error_code); + } else if (resp) { + slurm_msg_t response_msg; + /* init response_msg structure */ + slurm_msg_t_init(&response_msg); + response_msg.address = msg->address; + response_msg.msg_type = RESPONSE_JOB_WILL_RUN; + response_msg.data = resp; + slurm_send_node_msg(msg->conn_fd, &response_msg); + slurm_free_will_run_response_msg(resp); + debug2("_slurm_rpc_job_will_run success %s", TIME_STR); } else { debug2("_slurm_rpc_job_will_run success %s", TIME_STR); - slurm_send_rc_msg(msg, SLURM_SUCCESS); + if (job_desc_msg->job_id == NO_VAL) + slurm_send_rc_msg(msg, SLURM_SUCCESS); } } @@ -1182,12 +1320,12 @@ static void _slurm_rpc_node_registration(slurm_msg_t * msg) /* Locks: Read config, write job, write node */ slurmctld_lock_t job_write_lock = { READ_LOCK, WRITE_LOCK, WRITE_LOCK, NO_LOCK }; - uid_t uid; + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); START_TIMER; - debug2("Processing RPC: MESSAGE_NODE_REGISTRATION_STATUS"); - uid = g_slurm_auth_get_uid(msg->auth_cred); - if (!_is_super_user(uid)) { + debug2("Processing RPC: MESSAGE_NODE_REGISTRATION_STATUS from uid=%u", + (unsigned int) uid); + if (!validate_super_user(uid)) { error_code = ESLURM_USER_ID_MISSING; error("Security violation, NODE_REGISTER RPC from uid=%u", (unsigned int) uid); @@ -1196,26 +1334,10 @@ static void _slurm_rpc_node_registration(slurm_msg_t * msg) /* do RPC call */ lock_slurmctld(job_write_lock); #ifdef HAVE_FRONT_END /* Operates only on front-end */ - error_code = validate_nodes_via_front_end( - node_reg_stat_msg->job_count, - node_reg_stat_msg->job_id, - node_reg_stat_msg->step_id, - node_reg_stat_msg->status); + error_code = validate_nodes_via_front_end(node_reg_stat_msg); #else - validate_jobs_on_node(node_reg_stat_msg->node_name, - &node_reg_stat_msg->job_count, - node_reg_stat_msg->job_id, - node_reg_stat_msg->step_id); - error_code = - validate_node_specs(node_reg_stat_msg->node_name, - node_reg_stat_msg->cpus, - node_reg_stat_msg->sockets, - node_reg_stat_msg->cores, - node_reg_stat_msg->threads, - node_reg_stat_msg->real_memory_size, - node_reg_stat_msg->temporary_disk_space, - node_reg_stat_msg->job_count, - node_reg_stat_msg->status); + validate_jobs_on_node(node_reg_stat_msg); + error_code = validate_node_specs(node_reg_stat_msg); #endif unlock_slurmctld(job_write_lock); END_TIMER2("_slurm_rpc_node_registration"); @@ -1244,17 +1366,17 @@ static void _slurm_rpc_job_alloc_info(slurm_msg_t * msg) job_alloc_info_msg_t *job_info_msg = (job_alloc_info_msg_t *) msg->data; job_alloc_info_response_msg_t job_info_resp_msg; - /* Locks: Read job, read node */ + /* Locks: Read config, job, read node */ slurmctld_lock_t job_read_lock = { - NO_LOCK, READ_LOCK, READ_LOCK, NO_LOCK }; - uid_t uid; + READ_LOCK, READ_LOCK, READ_LOCK, NO_LOCK }; + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); bool do_unlock = false; START_TIMER; - debug2("Processing RPC: REQUEST_JOB_ALLOCATION_INFO"); + debug2("Processing RPC: REQUEST_JOB_ALLOCATION_INFO from uid=%u", + (unsigned int) uid); /* do RPC call */ - uid = g_slurm_auth_get_uid(msg->auth_cred); do_unlock = true; lock_slurmctld(job_read_lock); error_code = job_alloc_info(uid, job_info_msg->job_id, &job_ptr); @@ -1319,17 +1441,17 @@ static void _slurm_rpc_job_alloc_info_lite(slurm_msg_t * msg) job_alloc_info_msg_t *job_info_msg = (job_alloc_info_msg_t *) msg->data; resource_allocation_response_msg_t job_info_resp_msg; - /* Locks: Read job, read node */ + /* Locks: Read config, job, read node */ slurmctld_lock_t job_read_lock = { - NO_LOCK, READ_LOCK, READ_LOCK, NO_LOCK }; - uid_t uid; + READ_LOCK, READ_LOCK, READ_LOCK, NO_LOCK }; + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); bool do_unlock = false; START_TIMER; - debug2("Processing RPC: REQUEST_JOB_ALLOCATION_INFO_LITE"); + debug2("Processing RPC: REQUEST_JOB_ALLOCATION_INFO_LITE from uid=%u", + (unsigned int) uid); /* do RPC call */ - uid = g_slurm_auth_get_uid(msg->auth_cred); do_unlock = true; lock_slurmctld(job_read_lock); error_code = job_alloc_info(uid, job_info_msg->job_id, &job_ptr); @@ -1392,20 +1514,18 @@ static void _slurm_rpc_ping(slurm_msg_t * msg) * slurmctld from configuration file */ static void _slurm_rpc_reconfigure_controller(slurm_msg_t * msg) { - /* init */ int error_code = SLURM_SUCCESS; static bool in_progress = false; - DEF_TIMERS; /* Locks: Write configuration, job, node and partition */ slurmctld_lock_t config_write_lock = { WRITE_LOCK, WRITE_LOCK, WRITE_LOCK, WRITE_LOCK }; - uid_t uid; + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); START_TIMER; - info("Processing RPC: REQUEST_RECONFIGURE"); - uid = g_slurm_auth_get_uid(msg->auth_cred); - if (!_is_super_user(uid)) { + info("Processing RPC: REQUEST_RECONFIGURE from uid=%u", + (unsigned int) uid); + if (!validate_super_user(uid)) { error("Security violation, RECONFIGURE RPC from uid=%u", (unsigned int) uid); error_code = ESLURM_USER_ID_MISSING; @@ -1438,8 +1558,9 @@ static void _slurm_rpc_reconfigure_controller(slurm_msg_t * msg) info("_slurm_rpc_reconfigure_controller: completed %s", TIME_STR); slurm_send_rc_msg(msg, SLURM_SUCCESS); - slurm_sched_partition_change(); - schedule(); /* has its own locks */ + slurm_sched_partition_change(); /* notify sched plugin */ + select_g_reconfigure(); /* notify select plugin too */ + schedule(); /* has its own locks */ save_all_state(); } } @@ -1450,13 +1571,12 @@ static void _slurm_rpc_shutdown_controller(slurm_msg_t * msg) int error_code = SLURM_SUCCESS, i; uint16_t core_arg = 0; shutdown_msg_t *shutdown_msg = (shutdown_msg_t *) msg->data; - uid_t uid; + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); /* Locks: Read node */ slurmctld_lock_t node_read_lock = { NO_LOCK, NO_LOCK, READ_LOCK, NO_LOCK }; - uid = g_slurm_auth_get_uid(msg->auth_cred); - if (!_is_super_user(uid)) { + if (!validate_super_user(uid)) { error("Security violation, SHUTDOWN RPC from uid=%u", (unsigned int) uid); error_code = ESLURM_USER_ID_MISSING; @@ -1520,10 +1640,9 @@ static void _slurm_rpc_shutdown_controller(slurm_msg_t * msg) static void _slurm_rpc_shutdown_controller_immediate(slurm_msg_t * msg) { int error_code = SLURM_SUCCESS; - uid_t uid; + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); - uid = g_slurm_auth_get_uid(msg->auth_cred); - if (!_is_super_user(uid)) { + if (!validate_super_user(uid)) { error ("Security violation, SHUTDOWN_IMMEDIATE RPC from uid=%u", (unsigned int) uid); @@ -1548,18 +1667,18 @@ static void _slurm_rpc_step_complete(slurm_msg_t *msg) /* Locks: Write job, write node */ slurmctld_lock_t job_write_lock = { NO_LOCK, WRITE_LOCK, WRITE_LOCK, NO_LOCK }; - uid_t uid; + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); bool job_requeue = false; bool dump_job = false, dump_node = false; /* init */ START_TIMER; debug("Processing RPC: REQUEST_STEP_COMPLETE for %u.%u " - "nodes %u-%u rc=%u", + "nodes %u-%u rc=%u uid=%u", req->job_id, req->job_step_id, - req->range_first, req->range_last, req->step_rc); - uid = g_slurm_auth_get_uid(msg->auth_cred); - if (!_is_super_user(uid)) { + req->range_first, req->range_last, + req->step_rc, (unsigned int) uid); + if (!validate_super_user(uid)) { /* Don't trust RPC, it is not from slurmstepd */ error("Invalid user %d attempted REQUEST_STEP_COMPLETE", uid); @@ -1635,15 +1754,16 @@ static void _slurm_rpc_step_layout(slurm_msg_t *msg) DEF_TIMERS; job_step_id_msg_t *req = (job_step_id_msg_t *)msg->data; slurm_step_layout_t *step_layout = NULL; - /* Locks: Write job, write node */ + /* Locks: Read config job, write node */ slurmctld_lock_t job_read_lock = { - NO_LOCK, READ_LOCK, READ_LOCK, NO_LOCK }; - uid_t uid = g_slurm_auth_get_uid(msg->auth_cred); + READ_LOCK, READ_LOCK, READ_LOCK, NO_LOCK }; + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); struct job_record *job_ptr = NULL; struct step_record *step_ptr = NULL; START_TIMER; - debug2("Processing RPC: REQUEST_STEP_LAYOUT"); + debug2("Processing RPC: REQUEST_STEP_LAYOUT, from uid=%u", + (unsigned int) uid); lock_slurmctld(job_read_lock); error_code = job_alloc_info(uid, req->job_id, &job_ptr); @@ -1681,7 +1801,6 @@ static void _slurm_rpc_step_layout(slurm_msg_t *msg) /* _slurm_rpc_submit_batch_job - process RPC to submit a batch job */ static void _slurm_rpc_submit_batch_job(slurm_msg_t * msg) { - /* init */ int error_code = SLURM_SUCCESS; DEF_TIMERS; uint32_t step_id = 0; @@ -1689,25 +1808,30 @@ static void _slurm_rpc_submit_batch_job(slurm_msg_t * msg) slurm_msg_t response_msg; submit_response_msg_t submit_msg; job_desc_msg_t *job_desc_msg = (job_desc_msg_t *) msg->data; - /* Locks: Write job, read node, read partition */ slurmctld_lock_t job_write_lock = { NO_LOCK, WRITE_LOCK, READ_LOCK, READ_LOCK }; - uid_t uid; + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); START_TIMER; - debug2("Processing RPC: REQUEST_SUBMIT_BATCH_JOB"); + debug2("Processing RPC: REQUEST_SUBMIT_BATCH_JOB from uid=%u", + (unsigned int) uid); slurm_msg_t_init(&response_msg); /* do RPC call */ - dump_job_desc(job_desc_msg); - uid = g_slurm_auth_get_uid(msg->auth_cred); - if ( (uid != job_desc_msg->user_id) && (!_is_super_user(uid)) ) { + if ( (uid != job_desc_msg->user_id) && (!validate_super_user(uid)) ) { /* NOTE: User root can submit a batch job for any other user */ error_code = ESLURM_USER_ID_MISSING; error("Security violation, SUBMIT_JOB from uid=%u", - (unsigned int) uid); + (unsigned int) uid); + } + if ((job_desc_msg->alloc_node == NULL) + || (job_desc_msg->alloc_node[0] == '\0')) { + error_code = ESLURM_INVALID_NODE_NAME; + error("REQUEST_SUBMIT_BATCH_JOB lacks alloc_node from uid=%u", + (unsigned int) uid); } + dump_job_desc(job_desc_msg); if (error_code == SLURM_SUCCESS) { lock_slurmctld(job_write_lock); if (job_desc_msg->job_id != SLURM_BATCH_SCRIPT) { @@ -1721,7 +1845,7 @@ static void _slurm_rpc_submit_batch_job(slurm_msg_t * msg) #ifdef HAVE_FRONT_END /* Limited job step support */ /* Non-super users not permitted to run job steps on front-end. * A single slurmd can not handle a heavy load. */ - if (!_is_super_user(uid)) { + if (!validate_super_user(uid)) { info("Attempt to execute batch job step by uid=%u", (unsigned int) uid); slurm_send_rc_msg(msg, ESLURM_BATCH_ONLY); @@ -1769,7 +1893,8 @@ static void _slurm_rpc_submit_batch_job(slurm_msg_t * msg) /* Create new job allocation */ error_code = job_allocate(job_desc_msg, - job_desc_msg->immediate, false, + job_desc_msg->immediate, + false, NULL, false, uid, &job_ptr); unlock_slurmctld(job_write_lock); END_TIMER2("_slurm_rpc_submit_batch_job"); @@ -1802,21 +1927,20 @@ static void _slurm_rpc_submit_batch_job(slurm_msg_t * msg) * job (e.g. priority) */ static void _slurm_rpc_update_job(slurm_msg_t * msg) { - /* init */ int error_code; DEF_TIMERS; job_desc_msg_t *job_desc_msg = (job_desc_msg_t *) msg->data; /* Locks: Write job, read node, read partition */ slurmctld_lock_t job_write_lock = { NO_LOCK, WRITE_LOCK, READ_LOCK, READ_LOCK }; - uid_t uid; + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); START_TIMER; - debug2("Processing RPC: REQUEST_UPDATE_JOB"); + debug2("Processing RPC: REQUEST_UPDATE_JOB from uid=%u", + (unsigned int) uid); /* do RPC call */ dump_job_desc(job_desc_msg); - uid = g_slurm_auth_get_uid(msg->auth_cred); lock_slurmctld(job_write_lock); error_code = update_job(job_desc_msg, uid); unlock_slurmctld(job_write_lock); @@ -1893,7 +2017,6 @@ extern int slurm_fail_job(uint32_t job_id) * node (e.g. UP/DOWN) */ static void _slurm_rpc_update_node(slurm_msg_t * msg) { - /* init */ int error_code = SLURM_SUCCESS; DEF_TIMERS; update_node_msg_t *update_node_msg_ptr = @@ -1901,12 +2024,12 @@ static void _slurm_rpc_update_node(slurm_msg_t * msg) /* Locks: Write job and write node */ slurmctld_lock_t node_write_lock = { NO_LOCK, WRITE_LOCK, WRITE_LOCK, NO_LOCK }; - uid_t uid; + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); START_TIMER; - debug2("Processing RPC: REQUEST_UPDATE_NODE"); - uid = g_slurm_auth_get_uid(msg->auth_cred); - if (!_is_super_user(uid)) { + debug2("Processing RPC: REQUEST_UPDATE_NODE from uid=%u", + (unsigned int) uid); + if (!validate_super_user(uid)) { error_code = ESLURM_USER_ID_MISSING; error("Security violation, UPDATE_NODE RPC from uid=%u", (unsigned int) uid); @@ -1943,19 +2066,18 @@ static void _slurm_rpc_update_node(slurm_msg_t * msg) * of a partition (e.g. UP/DOWN) */ static void _slurm_rpc_update_partition(slurm_msg_t * msg) { - /* init */ int error_code = SLURM_SUCCESS; DEF_TIMERS; update_part_msg_t *part_desc_ptr = (update_part_msg_t *) msg->data; /* Locks: Read config, read node, write partition */ slurmctld_lock_t part_write_lock = { READ_LOCK, NO_LOCK, READ_LOCK, WRITE_LOCK }; - uid_t uid; + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); START_TIMER; - debug2("Processing RPC: REQUEST_UPDATE_PARTITION"); - uid = g_slurm_auth_get_uid(msg->auth_cred); - if (!_is_super_user(uid)) { + debug2("Processing RPC: REQUEST_UPDATE_PARTITION from uid=%u", + (unsigned int) uid); + if (!validate_super_user(uid)) { error_code = ESLURM_USER_ID_MISSING; error ("Security violation, UPDATE_PARTITION RPC from uid=%u", @@ -2005,12 +2127,12 @@ static void _slurm_rpc_delete_partition(slurm_msg_t * msg) /* Locks: write job, read node, write partition */ slurmctld_lock_t part_write_lock = { NO_LOCK, WRITE_LOCK, READ_LOCK, WRITE_LOCK }; - uid_t uid; + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); START_TIMER; - debug2("Processing RPC: REQUEST_DELETE_PARTITION"); - uid = g_slurm_auth_get_uid(msg->auth_cred); - if (!_is_super_user(uid)) { + debug2("Processing RPC: REQUEST_DELETE_PARTITION from uid=%u", + (unsigned int) uid); + if (!validate_super_user(uid)) { error_code = ESLURM_USER_ID_MISSING; error ("Security violation, DELETE_PARTITION RPC from uid=%u", @@ -2079,12 +2201,27 @@ static void _slurm_rpc_node_select_info(slurm_msg_t * msg) node_info_select_request_msg_t *sel_req_msg = (node_info_select_request_msg_t *) msg->data; slurm_msg_t response_msg; + /* Locks: read config */ + slurmctld_lock_t config_read_lock = { + READ_LOCK, NO_LOCK, NO_LOCK, NO_LOCK }; DEF_TIMERS; + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); START_TIMER; - debug2("Processing RPC: REQUEST_NODE_SELECT_INFO"); - error_code = select_g_pack_node_info(sel_req_msg->last_update, - &buffer); + debug2("Processing RPC: REQUEST_NODE_SELECT_INFO from uid=%u", + (unsigned int) uid); + lock_slurmctld(config_read_lock); + if (slurmctld_conf.private_data && !validate_super_user(uid)) { + error_code = ESLURM_USER_ID_MISSING; + error("Security violation, NODE_SELECT_INFO RPC from uid=u", + (unsigned int) uid); + } + unlock_slurmctld(config_read_lock); + if (error_code == SLURM_SUCCESS) { + error_code = select_g_pack_node_info( + sel_req_msg->last_update, + &buffer); + } END_TIMER2("_slurm_rpc_node_select_info"); if (error_code) { @@ -2122,7 +2259,7 @@ inline static void _slurm_rpc_suspend(slurm_msg_t * msg) /* Locks: write job and node */ slurmctld_lock_t job_write_lock = { NO_LOCK, WRITE_LOCK, WRITE_LOCK, NO_LOCK }; - uid_t uid; + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); char *op; START_TIMER; @@ -2136,8 +2273,8 @@ inline static void _slurm_rpc_suspend(slurm_msg_t * msg) default: op = "unknown"; } - info("Processing RPC: REQUEST_SUSPEND(%s)", op); - uid = g_slurm_auth_get_uid(msg->auth_cred); + info("Processing RPC: REQUEST_SUSPEND(%s) from uid=%u", + op, (unsigned int) uid); lock_slurmctld(job_write_lock); error_code = job_suspend(sus_ptr, uid, msg->conn_fd); @@ -2165,11 +2302,11 @@ inline static void _slurm_rpc_requeue(slurm_msg_t * msg) /* Locks: write job and node */ slurmctld_lock_t job_write_lock = { NO_LOCK, WRITE_LOCK, WRITE_LOCK, NO_LOCK }; - uid_t uid; + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); START_TIMER; - info("Processing RPC: REQUEST_REQUEUE"); - uid = g_slurm_auth_get_uid(msg->auth_cred); + info("Processing RPC: REQUEST_REQUEUE from uid=%u", + (unsigned int) uid); lock_slurmctld(job_write_lock); error_code = job_requeue(uid, requeue_ptr->job_id, @@ -2197,7 +2334,7 @@ inline static void _slurm_rpc_checkpoint(slurm_msg_t * msg) /* Locks: write job */ slurmctld_lock_t job_write_lock = { NO_LOCK, WRITE_LOCK, NO_LOCK, NO_LOCK }; - uid_t uid; + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); char *op; START_TIMER; @@ -2226,8 +2363,8 @@ inline static void _slurm_rpc_checkpoint(slurm_msg_t * msg) default: op = "unknown"; } - debug2("Processing RPC: REQUEST_CHECKPOINT %s", op); - uid = g_slurm_auth_get_uid(msg->auth_cred); + debug2("Processing RPC: REQUEST_CHECKPOINT(%s) from uid=%u", + op, (unsigned int) uid); /* do RPC call and send reply */ lock_slurmctld(job_write_lock); @@ -2268,11 +2405,11 @@ inline static void _slurm_rpc_checkpoint_comp(slurm_msg_t * msg) /* Locks: read job */ slurmctld_lock_t job_read_lock = { NO_LOCK, READ_LOCK, NO_LOCK, NO_LOCK }; - uid_t uid; + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); START_TIMER; - debug2("Processing RPC: REQUEST_CHECKPOINT_COMP"); - uid = g_slurm_auth_get_uid(msg->auth_cred); + debug2("Processing RPC: REQUEST_CHECKPOINT_COMP from uid=%u", + (unsigned int) uid); /* do RPC call and send reply */ lock_slurmctld(job_read_lock); @@ -2290,6 +2427,37 @@ inline static void _slurm_rpc_checkpoint_comp(slurm_msg_t * msg) } } +inline static void _slurm_rpc_checkpoint_task_comp(slurm_msg_t * msg) +{ + int error_code = SLURM_SUCCESS; + DEF_TIMERS; + checkpoint_task_comp_msg_t *ckpt_ptr; + /* Locks: read job */ + slurmctld_lock_t job_read_lock = { + NO_LOCK, READ_LOCK, NO_LOCK, NO_LOCK }; + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); + + ckpt_ptr = (checkpoint_task_comp_msg_t *) msg->data; + START_TIMER; + debug2("Processing RPC: REQUEST_CHECKPOINT_TASK_COMP from uid=%u", + (unsigned int) uid); + + /* do RPC call and send reply */ + lock_slurmctld(job_read_lock); + error_code = job_step_checkpoint_task_comp(ckpt_ptr, uid, msg->conn_fd); + unlock_slurmctld(job_read_lock); + END_TIMER2("_slurm_rpc_checkpoint_task_comp"); + + if (error_code) { + info("_slurm_rpc_checkpoint_task_comp %u.%u: %s", + ckpt_ptr->job_id, ckpt_ptr->step_id, + slurm_strerror(error_code)); + } else { + info("_slurm_rpc_checkpoint_task_comp %u.%u %s", + ckpt_ptr->job_id, ckpt_ptr->step_id, TIME_STR); + } +} + static char ** _xduparray(uint16_t size, char ** array) { @@ -2377,18 +2545,14 @@ int _launch_batch_step(job_desc_msg_t *job_desc_msg, uid_t uid, * to the slurmd should contain the proper allocation values * for subsequent srun jobs within the batch script. */ + memset(&req_step_msg, 0, sizeof(job_step_create_request_msg_t)); req_step_msg.job_id = job_desc_msg->job_id; req_step_msg.user_id = uid; req_step_msg.node_count = 1; req_step_msg.cpu_count = 1; req_step_msg.num_tasks = 1; - req_step_msg.relative = 0; req_step_msg.task_dist = SLURM_DIST_CYCLIC; - req_step_msg.port = 0; - req_step_msg.host = NULL; req_step_msg.name = job_desc_msg->name; - req_step_msg.network = NULL; - req_step_msg.node_list = NULL; error_code = step_create(&req_step_msg, &step_rec, false, true); xfree(req_step_msg.node_list); /* may be set by step_create */ @@ -2453,6 +2617,8 @@ int _launch_batch_step(job_desc_msg_t *job_desc_msg, uid_t uid, launch_msg_ptr->err = xstrdup(job_desc_msg->err); launch_msg_ptr->in = xstrdup(job_desc_msg->in); launch_msg_ptr->out = xstrdup(job_desc_msg->out); + launch_msg_ptr->acctg_freq = job_desc_msg->acctg_freq; + launch_msg_ptr->open_mode = job_desc_msg->open_mode; launch_msg_ptr->work_dir = xstrdup(job_desc_msg->work_dir); launch_msg_ptr->argc = job_desc_msg->argc; launch_msg_ptr->argv = _xduparray(job_desc_msg->argc, @@ -2461,6 +2627,7 @@ int _launch_batch_step(job_desc_msg_t *job_desc_msg, uid_t uid, launch_msg_ptr->environment = _xduparray2(job_desc_msg->env_size, job_desc_msg->environment); launch_msg_ptr->envc = job_desc_msg->env_size; + launch_msg_ptr->job_mem = job_desc_msg->job_min_memory; /* _max_nprocs() represents the total number of CPUs available * for this step (overcommit not supported yet). If job_desc_msg @@ -2508,13 +2675,13 @@ int _launch_batch_step(job_desc_msg_t *job_desc_msg, uid_t uid, inline static void _slurm_rpc_trigger_clear(slurm_msg_t * msg) { int rc; - uid_t uid; + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); trigger_info_msg_t * trigger_ptr = (trigger_info_msg_t *) msg->data; DEF_TIMERS; START_TIMER; - debug("Processing RPC: REQUEST_TRIGGER_CLEAR"); - uid = g_slurm_auth_get_uid(msg->auth_cred); + debug("Processing RPC: REQUEST_TRIGGER_CLEAR from uid=%u", + (unsigned int) uid); rc = trigger_clear(uid, trigger_ptr); END_TIMER2("_slurm_rpc_trigger_clear"); @@ -2524,15 +2691,15 @@ inline static void _slurm_rpc_trigger_clear(slurm_msg_t * msg) inline static void _slurm_rpc_trigger_get(slurm_msg_t * msg) { - uid_t uid; + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); trigger_info_msg_t *resp_data; trigger_info_msg_t * trigger_ptr = (trigger_info_msg_t *) msg->data; slurm_msg_t response_msg; DEF_TIMERS; START_TIMER; - debug("Processing RPC: REQUEST_TRIGGER_GET"); - uid = g_slurm_auth_get_uid(msg->auth_cred); + debug("Processing RPC: REQUEST_TRIGGER_GET from uid=%u", + (unsigned int) uid); resp_data = trigger_get(uid, trigger_ptr); END_TIMER2("_slurm_rpc_trigger_get"); @@ -2548,18 +2715,164 @@ inline static void _slurm_rpc_trigger_get(slurm_msg_t * msg) inline static void _slurm_rpc_trigger_set(slurm_msg_t * msg) { int rc; - uid_t uid; - gid_t gid; + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); + gid_t gid = g_slurm_auth_get_gid(msg->auth_cred, NULL);; trigger_info_msg_t * trigger_ptr = (trigger_info_msg_t *) msg->data; DEF_TIMERS; START_TIMER; - debug("Processing RPC: REQUEST_TRIGGER_SET"); - uid = g_slurm_auth_get_uid(msg->auth_cred); - gid = g_slurm_auth_get_gid(msg->auth_cred); + debug("Processing RPC: REQUEST_TRIGGER_SET from uid=%u", + (unsigned int) uid); rc = trigger_set(uid, gid, trigger_ptr); END_TIMER2("_slurm_rpc_trigger_set"); slurm_send_rc_msg(msg, rc); } + +inline static void _slurm_rpc_job_notify(slurm_msg_t * msg) +{ + int error_code = SLURM_SUCCESS; + /* Locks: read job */ + slurmctld_lock_t job_read_lock = { + NO_LOCK, READ_LOCK, NO_LOCK, NO_LOCK }; + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); + job_notify_msg_t * notify_msg = (job_notify_msg_t *) msg->data; + DEF_TIMERS; + + START_TIMER; + debug("Processing RPC: REQUEST_JOB_NOTIFY from uid=%u", + (unsigned int) uid); + if (!validate_super_user(uid)) { + error_code = ESLURM_USER_ID_MISSING; + error("Security violation, REQUEST_JOB_NOTIFY RPC from uid=%u", + (unsigned int) uid); + } + + if (error_code == SLURM_SUCCESS) { + /* do RPC call */ + struct job_record *job_ptr; + lock_slurmctld(job_read_lock); + job_ptr = find_job_record(notify_msg->job_id); + if (job_ptr) + srun_user_message(job_ptr, notify_msg->message); + else + error_code = ESLURM_INVALID_JOB_ID; + unlock_slurmctld(job_read_lock); + } + + END_TIMER2("_slurm_rpc_job_notify"); + slurm_send_rc_msg(msg, error_code); +} + +/* defined in controller.c */ +inline static void _slurm_rpc_set_debug_level(slurm_msg_t *msg) +{ + int debug_level; + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); + slurmctld_lock_t config_read_lock = + { READ_LOCK, NO_LOCK, NO_LOCK, NO_LOCK }; + set_debug_level_msg_t *request_msg = (set_debug_level_msg_t *) msg->data; + log_options_t log_opts = LOG_OPTS_INITIALIZER; + slurm_ctl_conf_t *conf; + + debug2("Processing RPC: REQUEST_SET_DEBUG_LEVEL from uid=%u", + (unsigned int) uid); + if (!validate_super_user(uid)) { + error("set debug level request from non-super user uid=%d", + uid); + slurm_send_rc_msg(msg, EACCES); + return; + } + + /* NOTE: not offset by LOG_LEVEL_INFO, since it's inconveniet + * to provide negative values for scontrol */ + debug_level = MIN (request_msg->debug_level, (LOG_LEVEL_END - 1)); + debug_level = MAX (debug_level, LOG_LEVEL_QUIET); + + info ("Setting debug level to %d", debug_level); + + lock_slurmctld (config_read_lock); + + if (slurmctld_config.daemonize) { + log_opts.stderr_level = LOG_LEVEL_QUIET; + if (slurmctld_conf.slurmctld_logfile) { + log_opts.logfile_level = debug_level; + log_opts.syslog_level = LOG_LEVEL_QUIET; + } else { + log_opts.syslog_level = debug_level; + log_opts.logfile_level = LOG_LEVEL_QUIET; + } + } else { + log_opts.syslog_level = LOG_LEVEL_QUIET; + log_opts.stderr_level = debug_level; + if (slurmctld_conf.slurmctld_logfile) + log_opts.logfile_level = debug_level; + else + log_opts.logfile_level = LOG_LEVEL_QUIET; + } + + log_alter(log_opts, LOG_DAEMON, slurmctld_conf.slurmctld_logfile); + + unlock_slurmctld (config_read_lock); + + conf = slurm_conf_lock(); + conf->slurmctld_debug = debug_level; + slurm_conf_unlock(); + slurmctld_conf.last_update = time(NULL); + + slurm_send_rc_msg(msg, SLURM_SUCCESS); +} + +inline static void _slurm_rpc_accounting_update_msg(slurm_msg_t *msg) +{ + int rc = SLURM_SUCCESS; + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); + ListIterator itr = NULL; + accounting_update_msg_t *update_ptr = + (accounting_update_msg_t *) msg->data; + acct_update_object_t *object = NULL; + + DEF_TIMERS; + + START_TIMER; + debug2("Processing RPC: ACCOUNTING_UPDATE_MSG from uid=%u", + (unsigned int) uid); + if (!validate_super_user(uid)) { + error("Update Association request from non-super user uid=%d", + uid); + slurm_send_rc_msg(msg, EACCES); + return; + } + if(update_ptr->update_list && list_count(update_ptr->update_list)) { + itr = list_iterator_create(update_ptr->update_list); + while((object = list_next(itr))) { + if(!object->objects || !list_count(object->objects)) + continue; + switch(object->type) { + case ACCT_MODIFY_USER: + case ACCT_ADD_USER: + case ACCT_REMOVE_USER: + rc = assoc_mgr_update_local_users(object); + break; + case ACCT_ADD_ASSOC: + case ACCT_MODIFY_ASSOC: + case ACCT_REMOVE_ASSOC: + rc = assoc_mgr_update_local_assocs( + object); + break; + case ACCT_UPDATE_NOTSET: + default: + error("unknown type set in update_object: %d", + object->type); + break; + } + } + list_iterator_destroy(itr); + } + + END_TIMER2("_slurm_rpc_accounting_update_msg"); + + slurm_send_rc_msg(msg, rc); +} + diff --git a/src/slurmctld/proc_req.h b/src/slurmctld/proc_req.h index 3d1fe46ee..e3f3ed89b 100644 --- a/src/slurmctld/proc_req.h +++ b/src/slurmctld/proc_req.h @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> and Kevin Tew <tew1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/slurmctld/read_config.c b/src/slurmctld/read_config.c index 49b8d18fd..bf839eb66 100644 --- a/src/slurmctld/read_config.c +++ b/src/slurmctld/read_config.c @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -62,12 +62,12 @@ #include "src/common/parse_spec.h" #include "src/common/read_config.h" #include "src/common/slurm_jobcomp.h" +#include "src/common/slurm_rlimits_info.h" #include "src/common/switch.h" #include "src/common/xstring.h" -#include "src/common/node_select.h" -#include "src/common/slurm_jobacct.h" -#include "src/common/slurm_rlimits_info.h" +#include "src/slurmctld/job_scheduler.h" +#include "src/slurmctld/licenses.h" #include "src/slurmctld/locks.h" #include "src/slurmctld/node_scheduler.h" #include "src/slurmctld/proc_req.h" @@ -77,17 +77,19 @@ #include "src/slurmctld/trigger_mgr.h" static int _build_bitmaps(void); +static void _build_bitmaps_pre_select(void); static int _init_all_slurm_conf(void); static void _purge_old_node_state(struct node_record *old_node_table_ptr, int old_node_record_count); -static void _restore_node_state(struct node_record *old_node_table_ptr, +static int _restore_job_dependencies(void); +static int _restore_node_state(struct node_record *old_node_table_ptr, int old_node_record_count); static int _preserve_select_type_param(slurm_ctl_conf_t * ctl_conf_ptr, select_type_plugin_info_t old_select_type_p); static int _preserve_plugins(slurm_ctl_conf_t * ctl_conf_ptr, char *old_auth_type, char *old_checkpoint_type, - char *old_sched_type, char *old_select_type, - char *old_switch_type); + char *old_crypto_type, char *old_sched_type, + char *old_select_type, char *old_switch_type); static int _sync_nodes_to_comp_job(void); static int _sync_nodes_to_jobs(void); static int _sync_nodes_to_active_job(struct job_record *job_ptr); @@ -95,12 +97,60 @@ static int _sync_nodes_to_active_job(struct job_record *job_ptr); static void _validate_node_proc_count(void); #endif -static char highest_node_name[MAX_SLURM_NAME] = ""; +static char *highest_node_name = NULL; int node_record_count = 0; /* FIXME - declarations for temporarily moved functions */ #define MULTIPLE_VALUE_MSG "Multiple values for %s, latest one used" +/* + * _build_bitmaps_pre_select - recover some state for jobs and nodes prior to + * calling the select_* functions + */ +static void _build_bitmaps_pre_select(void) +{ + struct part_record *part_ptr; + struct node_record *node_ptr; + ListIterator part_iterator; + int i; + + + /* scan partition table and identify nodes in each */ + part_iterator = list_iterator_create(part_list); + if (part_iterator == NULL) + fatal ("memory allocation failure"); + + while ((part_ptr = (struct part_record *) list_next(part_iterator))) { + FREE_NULL_BITMAP(part_ptr->node_bitmap); + + if ((part_ptr->nodes == NULL) || (part_ptr->nodes[0] == '\0')) + continue; + + if (node_name2bitmap(part_ptr->nodes, false, + &part_ptr->node_bitmap)) { + fatal("Invalid node names in partition %s", + part_ptr->name); + } + + for (i=0; i<node_record_count; i++) { + if (bit_test(part_ptr->node_bitmap, i) == 0) + continue; + node_ptr = &node_record_table_ptr[i]; + part_ptr->total_nodes++; + if (slurmctld_conf.fast_schedule) + part_ptr->total_cpus += + node_ptr->config_ptr->cpus; + else + part_ptr->total_cpus += node_ptr->cpus; + node_ptr->part_cnt++; + xrealloc(node_ptr->part_pptr, (node_ptr->part_cnt * + sizeof(struct part_record *))); + node_ptr->part_pptr[node_ptr->part_cnt-1] = part_ptr; + } + } + list_iterator_destroy(part_iterator); + return; +} /* * _build_bitmaps - build node bitmaps to define which nodes are in which @@ -114,16 +164,11 @@ int node_record_count = 0; */ static int _build_bitmaps(void) { - int i, j, error_code = SLURM_SUCCESS; - char *this_node_name; + int i, error_code = SLURM_SUCCESS; ListIterator config_iterator; - ListIterator part_iterator; struct config_record *config_ptr; - struct part_record *part_ptr; - struct node_record *node_ptr; struct job_record *job_ptr; ListIterator job_iterator; - hostlist_t host_list; last_node_update = time(NULL); last_part_update = time(NULL); @@ -189,7 +234,7 @@ static int _build_bitmaps(void) base_state = node_record_table_ptr[i].node_state & NODE_STATE_BASE; drain_flag = node_record_table_ptr[i].node_state & - NODE_STATE_DRAIN; + (NODE_STATE_DRAIN | NODE_STATE_FAIL); no_resp_flag = node_record_table_ptr[i].node_state & NODE_STATE_NO_RESPOND; job_cnt = node_record_table_ptr[i].run_job_cnt + @@ -208,56 +253,6 @@ static int _build_bitmaps(void) bit_set(node_record_table_ptr[i].config_ptr-> node_bitmap, i); } - - /* scan partition table and identify nodes in each */ - part_iterator = list_iterator_create(part_list); - if (part_iterator == NULL) - fatal ("memory allocation failure"); - - while ((part_ptr = (struct part_record *) list_next(part_iterator))) { - FREE_NULL_BITMAP(part_ptr->node_bitmap); - part_ptr->node_bitmap = - (bitstr_t *) bit_alloc(node_record_count); - if (part_ptr->node_bitmap == NULL) - fatal ("bit_alloc malloc failure"); - - /* check for each node in the partition */ - if ((part_ptr->nodes == NULL) || (part_ptr->nodes[0] == '\0')) - continue; - - if ((host_list = hostlist_create(part_ptr->nodes)) == NULL) { - fatal("hostlist_create error for %s, %m", - part_ptr->nodes); - continue; - } - - while ((this_node_name = hostlist_shift(host_list))) { - node_ptr = find_node_record(this_node_name); - if (node_ptr == NULL) { - fatal("_build_bitmaps: node %s is referenced " - "but not defined in slurm.conf " - "(no NodeName specification)", - this_node_name); - free(this_node_name); - continue; - } - j = node_ptr - node_record_table_ptr; - bit_set(part_ptr->node_bitmap, j); - part_ptr->total_nodes++; - if (slurmctld_conf.fast_schedule) - part_ptr->total_cpus += - node_ptr->config_ptr->cpus; - else - part_ptr->total_cpus += node_ptr->cpus; - node_ptr->part_cnt++; - xrealloc(node_ptr->part_pptr, (node_ptr->part_cnt * - sizeof(struct part_record *))); - node_ptr->part_pptr[node_ptr->part_cnt-1] = part_ptr; - free(this_node_name); - } - hostlist_destroy(host_list); - } - list_iterator_destroy(part_iterator); return error_code; } @@ -286,7 +281,7 @@ static int _init_all_slurm_conf(void) if ((error_code = init_job_conf())) return error_code; - strcpy(highest_node_name, ""); + xfree(highest_node_name); return 0; } @@ -303,9 +298,12 @@ static int _state_str2int(const char *state_str) break; } } - if ((i >= NODE_STATE_END) - && (strncasecmp("DRAIN", state_str, 5) == 0)) - state_val = NODE_STATE_UNKNOWN | NODE_STATE_DRAIN; + if (i >= NODE_STATE_END) { + if (strncasecmp("DRAIN", state_str, 5) == 0) + state_val = NODE_STATE_UNKNOWN | NODE_STATE_DRAIN; + else if (strncasecmp("FAIL", state_str, 4) == 0) + state_val = NODE_STATE_IDLE | NODE_STATE_FAIL; + } if (state_val == NO_VAL) { error("invalid node state %s", state_str); errno = EINVAL; @@ -344,6 +342,7 @@ static void _set_node_prefix(const char *nodenames, slurm_ctl_conf_t *conf) debug3("Prefix is %s %s %d", conf->node_prefix, nodenames, i); } #endif /* HAVE_BG */ + /* * _build_single_nodeline_info - From the slurm.conf reader, build table, * and set values @@ -391,7 +390,8 @@ static int _build_single_nodeline_info(slurm_conf_node_t *node_ptr, } #ifdef HAVE_BG - _set_node_prefix(node_ptr->nodenames, conf); + if (conf->node_prefix == NULL) + _set_node_prefix(node_ptr->nodenames, conf); #endif /* some sanity checks */ @@ -422,14 +422,16 @@ static int _build_single_nodeline_info(slurm_conf_node_t *node_ptr, hostname = hostlist_shift(hostname_list); address = hostlist_shift(address_list); #endif - if (strcmp(alias, highest_node_name) <= 0) { + if (highest_node_name && + (strcmp(alias, highest_node_name) <= 0)) { /* find_node_record locks this to get the alias so we need to unlock */ slurm_conf_unlock(); node_rec = find_node_record(alias); slurm_conf_lock(); } else { - strncpy(highest_node_name, alias, MAX_SLURM_NAME); + xfree(highest_node_name); + highest_node_name = xstrdup(alias); node_rec = NULL; } @@ -439,7 +441,7 @@ static int _build_single_nodeline_info(slurm_conf_node_t *node_ptr, (state_val != NODE_STATE_UNKNOWN)) node_rec->node_state = state_val; node_rec->last_response = (time_t) 0; - strncpy(node_rec->comm_name, address, MAX_SLURM_NAME); + node_rec->comm_name = xstrdup(address); node_rec->port = node_ptr->port; node_rec->reason = xstrdup(node_ptr->reason); @@ -567,6 +569,22 @@ static int _build_all_nodeline_info(slurm_ctl_conf_t *conf) _build_single_nodeline_info(node, config_ptr, conf); } + xfree(highest_node_name); +#ifdef HAVE_BG +{ + char *node_000 = NULL; + struct node_record *node_rec = NULL; + if (conf->node_prefix) + node_000 = xstrdup(conf->node_prefix); + xstrcat(node_000, "000"); + slurm_conf_unlock(); + node_rec = find_node_record(node_000); + slurm_conf_lock(); + if (node_rec == NULL) + fatal("No node %s configured", node_000); + xfree(node_000); +} +#endif return SLURM_SUCCESS; } @@ -582,37 +600,44 @@ static int _build_single_partitionline_info(slurm_conf_partition_t *part) { struct part_record *part_ptr; - if (strlen(part->name) >= MAX_SLURM_NAME) { - error("_parse_part_spec: partition name %s too long", - part->name); - return EINVAL; - } - part_ptr = list_find_first(part_list, &list_find_part, part->name); if (part_ptr == NULL) { part_ptr = create_part_record(); - strcpy(part_ptr->name, part->name); + xfree(part_ptr->name); + part_ptr->name = xstrdup(part->name); } else { verbose("_parse_part_spec: duplicate entry for partition %s", part->name); } if (part->default_flag) { - if ((strlen(default_part_name) > 0) + if (default_part_name && strcmp(default_part_name, part->name)) info("_parse_part_spec: changing default partition " "from %s to %s", default_part_name, part->name); - strcpy(default_part_name, part->name); + xfree(default_part_name); + default_part_name = xstrdup(part->name); default_part_loc = part_ptr; } - part_ptr->hidden = part->hidden_flag ? 1 : 0; - part_ptr->max_time = part->max_time; - part_ptr->max_nodes = part->max_nodes; - part_ptr->min_nodes = part->min_nodes; - part_ptr->root_only = part->root_only_flag ? 1 : 0; - part_ptr->state_up = part->state_up_flag ? 1 : 0; - part_ptr->shared = part->shared; + if(part->disable_root_jobs == (uint16_t)NO_VAL) + part_ptr->disable_root_jobs = slurmctld_conf.disable_root_jobs; + else + part_ptr->disable_root_jobs = part->disable_root_jobs; + + if(part_ptr->disable_root_jobs) + debug2("partition %s does not allow root jobs", part_ptr->name); + + part_ptr->hidden = part->hidden_flag ? 1 : 0; + part_ptr->max_time = part->max_time; + part_ptr->max_share = part->max_share; + part_ptr->max_nodes = part->max_nodes; + part_ptr->max_nodes_orig = part->max_nodes; + part_ptr->min_nodes = part->min_nodes; + part_ptr->min_nodes_orig = part->min_nodes; + part_ptr->priority = part->priority; + part_ptr->root_only = part->root_only_flag ? 1 : 0; + part_ptr->state_up = part->state_up_flag ? 1 : 0; if (part->allow_groups) { xfree(part_ptr->allow_groups); part_ptr->allow_groups = xstrdup(part->allow_groups); @@ -677,19 +702,20 @@ static int _build_all_partitionline_info() * state information depending upon value * 0 = use no saved state information * 1 = recover saved job and trigger state, - * node DOWN/DRAIN state and reason information + * node DOWN/DRAIN/FAIL state and reason information * 2 = recover all state saved from last slurmctld shutdown - * RET 0 if no error, otherwise an error code + * RET SLURM_SUCCESS if no error, otherwise an error code * Note: Operates on common variables only */ int read_slurm_conf(int recover) { DEF_TIMERS; - int error_code, i; + int error_code, i, rc; int old_node_record_count; struct node_record *old_node_table_ptr; char *old_auth_type = xstrdup(slurmctld_conf.authtype); char *old_checkpoint_type = xstrdup(slurmctld_conf.checkpoint_type); + char *old_crypto_type = xstrdup(slurmctld_conf.crypto_type); char *old_sched_type = xstrdup(slurmctld_conf.schedtype); char *old_select_type = xstrdup(slurmctld_conf.select_type); char *old_switch_type = xstrdup(slurmctld_conf.switch_type); @@ -711,9 +737,16 @@ int read_slurm_conf(int recover) old_node_record_count = node_record_count; old_node_table_ptr = node_record_table_ptr; for (i=0; i<node_record_count; i++) { + xfree(old_node_table_ptr[i].arch); xfree(old_node_table_ptr[i].features); + xfree(old_node_table_ptr[i].os); old_node_table_ptr[i].features = xstrdup( old_node_table_ptr[i].config_ptr->feature); + /* Store the original configured CPU count somewhere + * (port is reused here for that purpose) so we can + * report changes in its configuration. */ + old_node_table_ptr[i].port = old_node_table_ptr[i]. + config_ptr->cpus; } node_record_table_ptr = NULL; node_record_count = 0; @@ -729,7 +762,6 @@ int read_slurm_conf(int recover) _build_all_partitionline_info(); update_logging(); - jobacct_g_init_slurmctld(slurmctld_conf.job_acct_logfile); g_slurm_jobcomp_init(slurmctld_conf.job_comp_loc); slurm_sched_init(); if (switch_init() < 0) @@ -757,22 +789,24 @@ int read_slurm_conf(int recover) (void) load_all_job_state(); } else { /* Load no info, preserve all state */ if (old_node_table_ptr) { - debug("restoring original state of nodes"); - _restore_node_state(old_node_table_ptr, - old_node_record_count); + info("restoring original state of nodes"); + rc = _restore_node_state(old_node_table_ptr, + old_node_record_count); + error_code = MAX(error_code, rc); /* not fatal */ } reset_first_job_id(); (void) slurm_sched_reconfig(); xfree(state_save_dir); } + _build_bitmaps_pre_select(); if ((select_g_node_init(node_record_table_ptr, node_record_count) - != SLURM_SUCCESS) + != SLURM_SUCCESS) || (select_g_block_init(part_list) != SLURM_SUCCESS) || (select_g_state_restore(state_save_dir) != SLURM_SUCCESS) || (select_g_job_init(job_list) != SLURM_SUCCESS)) { - error("failed to initialize node selection plugin state"); - abort(); + fatal("failed to initialize node selection plugin state, " + "Clean start required."); } xfree(state_save_dir); reset_job_bitmaps(); /* must follow select_g_job_init() */ @@ -781,8 +815,14 @@ int read_slurm_conf(int recover) (void) sync_job_files(); _purge_old_node_state(old_node_table_ptr, old_node_record_count); - if ((error_code = _build_bitmaps())) - return error_code; + if ((rc = _build_bitmaps())) + return rc; /* fatal error */ + + license_free(); + if (license_init(slurmctld_conf.licenses) != SLURM_SUCCESS) + fatal("Invalid Licenses value: %s", slurmctld_conf.licenses); + + _restore_job_dependencies(); restore_node_features(); #ifdef HAVE_ELAN _validate_node_proc_count(); @@ -797,15 +837,16 @@ int read_slurm_conf(int recover) list_sort(config_list, &list_compare_config); /* Update plugins as possible */ - error_code = _preserve_plugins(&slurmctld_conf, - old_auth_type, old_checkpoint_type, - old_sched_type, old_select_type, - old_switch_type); + rc = _preserve_plugins(&slurmctld_conf, + old_auth_type, old_checkpoint_type, + old_crypto_type, old_sched_type, + old_select_type, old_switch_type); + error_code = MAX(error_code, rc); /* not fatal */ /* Update plugin parameters as possible */ - error_code = _preserve_select_type_param( - &slurmctld_conf, - old_select_type_p); + rc = _preserve_select_type_param(&slurmctld_conf, + old_select_type_p); + error_code = MAX(error_code, rc); /* not fatal */ slurmctld_conf.last_update = time(NULL); END_TIMER2("read_slurm_conf"); @@ -815,11 +856,11 @@ int read_slurm_conf(int recover) /* Restore node state and size information from saved records. * If a node was re-configured to be down or drained, we set those states */ -static void _restore_node_state(struct node_record *old_node_table_ptr, +static int _restore_node_state(struct node_record *old_node_table_ptr, int old_node_record_count) { struct node_record *node_ptr; - int i; + int i, rc = SLURM_SUCCESS; for (i = 0; i < old_node_record_count; i++) { uint16_t drain_flag = false, down_flag = false; @@ -840,6 +881,12 @@ static void _restore_node_state(struct node_record *old_node_table_ptr, node_ptr->node_state |= NODE_STATE_DRAIN; node_ptr->last_response = old_node_table_ptr[i].last_response; + if (old_node_table_ptr[i].port != node_ptr->config_ptr->cpus) { + rc = ESLURM_NEED_RESTART; + error("Configured cpu count change on %s (%u to %u)", + node_ptr->name, old_node_table_ptr[i].port, + node_ptr->config_ptr->cpus); + } node_ptr->cpus = old_node_table_ptr[i].cpus; node_ptr->sockets = old_node_table_ptr[i].sockets; node_ptr->cores = old_node_table_ptr[i].cores; @@ -856,7 +903,18 @@ static void _restore_node_state(struct node_record *old_node_table_ptr, node_ptr->features = old_node_table_ptr[i].features; old_node_table_ptr[i].features = NULL; } + if (old_node_table_ptr[i].arch) { + xfree(node_ptr->arch); + node_ptr->arch = old_node_table_ptr[i].arch; + old_node_table_ptr[i].arch = NULL; + } + if (old_node_table_ptr[i].os) { + xfree(node_ptr->os); + node_ptr->os = old_node_table_ptr[i].os; + old_node_table_ptr[i].os = NULL; + } } + return rc; } /* Purge old node state information */ @@ -866,8 +924,12 @@ static void _purge_old_node_state(struct node_record *old_node_table_ptr, int i; for (i = 0; i < old_node_record_count; i++) { - xfree(old_node_table_ptr[i].part_pptr); + xfree(old_node_table_ptr[i].arch); + xfree(old_node_table_ptr[i].comm_name); xfree(old_node_table_ptr[i].features); + xfree(old_node_table_ptr[i].name); + xfree(old_node_table_ptr[i].os); + xfree(old_node_table_ptr[i].part_pptr); xfree(old_node_table_ptr[i].reason); } xfree(old_node_table_ptr); @@ -904,8 +966,8 @@ static int _preserve_select_type_param(slurm_ctl_conf_t *ctl_conf_ptr, */ static int _preserve_plugins(slurm_ctl_conf_t * ctl_conf_ptr, char *old_auth_type, char *old_checkpoint_type, - char *old_sched_type, char *old_select_type, - char *old_switch_type) + char *old_crypto_type, char *old_sched_type, + char *old_select_type, char *old_switch_type) { int rc = SLURM_SUCCESS; @@ -928,6 +990,16 @@ static int _preserve_plugins(slurm_ctl_conf_t * ctl_conf_ptr, xfree(old_checkpoint_type); } + if (old_crypto_type) { + if (strcmp(old_crypto_type, + ctl_conf_ptr->crypto_type)) { + xfree(ctl_conf_ptr->crypto_type); + ctl_conf_ptr->crypto_type = old_crypto_type; + rc = ESLURM_INVALID_CRYPTO_TYPE_CHANGE; + } else + xfree(old_crypto_type); + } + if (old_sched_type) { if (strcmp(old_sched_type, ctl_conf_ptr->schedtype)) { xfree(ctl_conf_ptr->schedtype); @@ -1101,3 +1173,41 @@ static void _validate_node_proc_count(void) } #endif +/* + * _restore_job_dependencies - Build depend_list and license_list for every job + */ +static int _restore_job_dependencies(void) +{ + int error_code = SLURM_SUCCESS, rc; + struct job_record *job_ptr; + ListIterator job_iterator; + char *new_depend; + bool valid; + List license_list; + + job_iterator = list_iterator_create(job_list); + while ((job_ptr = (struct job_record *) list_next(job_iterator))) { + license_list = license_job_validate(job_ptr->licenses, &valid); + if (job_ptr->license_list) + list_destroy(job_ptr->license_list); + if (valid) + job_ptr->license_list = license_list; + if (job_ptr->job_state == JOB_RUNNING) + license_job_get(job_ptr); + + if ((job_ptr->details == NULL) || + (job_ptr->details->dependency == NULL)) + continue; + new_depend = job_ptr->details->dependency; + job_ptr->details->dependency = NULL; + rc = update_job_dependency(job_ptr, new_depend); + if (rc != SLURM_SUCCESS) { + error("Invalid dependencies discarded for job %u: %s", + job_ptr->job_id, new_depend); + error_code = rc; + } + xfree(new_depend); + } + list_iterator_destroy(job_iterator); + return error_code; +} diff --git a/src/slurmctld/read_config.h b/src/slurmctld/read_config.h index bc1fe154a..2305565ae 100644 --- a/src/slurmctld/read_config.h +++ b/src/slurmctld/read_config.h @@ -1,10 +1,10 @@ /*****************************************************************************\ - * read_config.h - header to manager node ping + * read_config.h - functions for reading slurmctld configuration ***************************************************************************** * Copyright (C) 2003 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -48,7 +48,7 @@ * node DOWN/DRAIN state and reason information * 1 = recover only saved job state information * 2 = recover all state saved from last slurmctld shutdown - * RET 0 if no error, otherwise an error code + * RET SLURM_SUCCESS if no error, otherwise an error code * Note: Operates on common variables only */ extern int read_slurm_conf(int recover); diff --git a/src/slurmctld/sched_plugin.c b/src/slurmctld/sched_plugin.c index 8b3febb53..fea958bde 100644 --- a/src/slurmctld/sched_plugin.c +++ b/src/slurmctld/sched_plugin.c @@ -4,7 +4,7 @@ * Copyright (C) 2002-2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Jay Windley <jwindley@lnxi.com>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -52,6 +52,8 @@ /* ************************************************************************ */ typedef struct slurm_sched_ops { int (*schedule) ( void ); + int (*newalloc) ( struct job_record * ); + int (*freealloc) ( struct job_record * ); uint32_t (*initial_priority) ( uint32_t, struct job_record * ); void (*job_is_pending) ( void ); @@ -59,6 +61,9 @@ typedef struct slurm_sched_ops { void (*partition_change) ( void ); int (*get_errno) ( void ); char * (*strerror) ( int ); + void (*job_requeue) ( struct job_record *, + char *reason ); + char * (*get_conf) ( void ); } slurm_sched_ops_t; @@ -88,12 +93,16 @@ slurm_sched_get_ops( slurm_sched_context_t *c ) */ static const char *syms[] = { "slurm_sched_plugin_schedule", + "slurm_sched_plugin_newalloc", + "slurm_sched_plugin_freealloc", "slurm_sched_plugin_initial_priority", "slurm_sched_plugin_job_is_pending", "slurm_sched_plugin_reconfig", "slurm_sched_plugin_partition_change", "slurm_sched_get_errno", - "slurm_sched_strerror" + "slurm_sched_strerror", + "slurm_sched_plugin_requeue", + "slurm_sched_get_conf" }; int n_syms = sizeof( syms ) / sizeof( char * ); @@ -260,6 +269,30 @@ slurm_sched_schedule( void ) return (*(g_sched_context->ops.schedule))(); } +/* *********************************************************************** */ +/* TAG( slurm_sched_newalloc ) */ +/* *********************************************************************** */ +int +slurm_sched_newalloc( struct job_record *job_ptr ) +{ + if ( slurm_sched_init() < 0 ) + return SLURM_ERROR; + + return (*(g_sched_context->ops.newalloc))( job_ptr ); +} + +/* *********************************************************************** */ +/* TAG( slurm_sched_freealloc ) */ +/* *********************************************************************** */ +int +slurm_sched_freealloc( struct job_record *job_ptr ) +{ + if ( slurm_sched_init() < 0 ) + return SLURM_ERROR; + + return (*(g_sched_context->ops.freealloc))( job_ptr ); +} + /* *********************************************************************** */ /* TAG( slurm_sched_initital_priority ) */ @@ -322,3 +355,29 @@ slurm_sched_p_strerror( int errnum ) return (*(g_sched_context->ops.strerror))( errnum ); } + +/* *********************************************************************** */ +/* TAG( slurm_sched_requeue ) */ +/* *********************************************************************** */ +void +slurm_sched_requeue( struct job_record *job_ptr, char *reason ) +{ + if ( slurm_sched_init() < 0 ) + return; + + (*(g_sched_context->ops.job_requeue))( job_ptr, reason ); +} + +/* *********************************************************************** */ +/* TAG( slurm_sched_p_get_conf ) */ +/* *********************************************************************** */ +char * +slurm_sched_p_get_conf( void ) +{ + if ( slurm_sched_init() < 0 ) + return NULL; + + return (*(g_sched_context->ops.get_conf))( ); +} + + diff --git a/src/slurmctld/sched_plugin.h b/src/slurmctld/sched_plugin.h index e3dc03a98..de16bb451 100644 --- a/src/slurmctld/sched_plugin.h +++ b/src/slurmctld/sched_plugin.h @@ -4,7 +4,7 @@ * Copyright (C) 2004-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Jay Windley <jwindley@lnxi.com> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -71,12 +71,27 @@ int slurm_sched_reconfig( void ); */ int slurm_sched_schedule( void ); +/* + * Note the successful allocation of resources to a job. + */ +int slurm_sched_newalloc( struct job_record *job_ptr ); + +/* + * Note the successful release of resources to a job. + */ +int slurm_sched_freealloc( struct job_record *job_ptr ); + /* * Supply the initial SLURM priority for a newly-submitted job. */ uint32_t slurm_sched_initial_priority( uint32_t max_prio, struct job_record *job_ptr ); +/* + * Requeue a job + */ +void slurm_sched_requeue( struct job_record *job_ptr, char *reason ); + /* * Note that some job is pending. */ @@ -97,4 +112,10 @@ int slurm_sched_p_get_errno( void ); */ char *slurm_sched_p_strerror( int errnum ); +/* + * Return any plugin-specific configuration information + * Caller must xfree return value + */ +char *slurm_sched_p_get_conf( void ); + #endif /*__SLURM_CONTROLLER_SCHED_PLUGIN_API_H__*/ diff --git a/src/slurmctld/slurmctld.h b/src/slurmctld/slurmctld.h index 8c7d733d7..6ff3e1ee4 100644 --- a/src/slurmctld/slurmctld.h +++ b/src/slurmctld/slurmctld.h @@ -1,12 +1,11 @@ /*****************************************************************************\ * slurmctld.h - definitions of functions and structures for slurmcltd use - * - * $Id: slurmctld.h 13061 2008-01-22 21:23:56Z da $ ***************************************************************************** * Copyright (C) 2002-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -92,7 +91,11 @@ * GENERAL CONFIGURATION parameters and data structures \*****************************************************************************/ /* Maximum parallel threads to service incoming RPCs */ -#define MAX_SERVER_THREADS 60 +#define MAX_SERVER_THREADS 100 + +/* Maximum size we want to support for user strings (e.g. job comment). + * Try to prevent user from filling slurmctld's memory */ +#define MAX_STR_LEN 64 * 1024 /* Perform full slurmctld's state every PERIODIC_CHECKPOINT seconds */ #define PERIODIC_CHECKPOINT 300 @@ -135,6 +138,7 @@ typedef struct slurmctld_config { int daemonize; bool resume_backup; + time_t boot_time; time_t shutdown_time; int server_thread_count; @@ -158,11 +162,13 @@ typedef struct slurmctld_config { extern slurmctld_config_t slurmctld_config; extern int bg_recover; /* state recovery mode */ +extern char *slurmctld_cluster_name; /* name of cluster */ +extern void *acct_db_conn; +extern int accounting_enforce; /*****************************************************************************\ * NODE parameters and data structures \*****************************************************************************/ -#define MAX_JOBNAME_LEN 256 #define CONFIG_MAGIC 0xc065eded #define NODE_MAGIC 0x0de575ed @@ -185,7 +191,7 @@ extern List config_list; /* list of config_record entries */ struct node_record { uint32_t magic; /* magic cookie for data integrity */ - char name[MAX_SLURM_NAME]; /* name of the node. NULL==defunct */ + char *name; /* name of the node. NULL==defunct */ uint16_t node_state; /* enum node_states, ORed with * NODE_STATE_NO_RESPOND if not * responding */ @@ -201,7 +207,7 @@ struct node_record { uint16_t part_cnt; /* number of associated partitions */ struct part_record **part_pptr; /* array of pointers to partitions * associated with this node*/ - char comm_name[MAX_SLURM_NAME]; /* communications path name to node */ + char *comm_name; /* communications path name to node */ uint16_t port; /* TCP port number of the slurmd */ slurm_addr slurm_addr; /* network address */ uint16_t comp_job_cnt; /* count of jobs completing on node */ @@ -212,6 +218,8 @@ struct node_record { char *features; /* associated features, used only * for state save/restore, DO NOT * use for scheduling purposes */ + char *arch; /* computer architecture */ + char *os; /* operating system currently running */ struct node_record *node_next; /* next entry with same hash index */ }; @@ -233,19 +241,23 @@ extern bitstr_t *up_node_bitmap; /* bitmap of up nodes, not DOWN */ #define PART_MAGIC 0xaefe8495 struct part_record { + uint16_t disable_root_jobs; /* if set then user root can't run jobs */ uint32_t magic; /* magic cookie to test data integrity */ - char name[MAX_SLURM_NAME];/* name of the partition */ + char *name; /* name of the partition */ uint16_t hidden; /* 1 if hidden by default */ uint32_t max_time; /* minutes or INFINITE */ uint32_t max_nodes; /* per job or INFINITE */ + uint32_t max_nodes_orig;/* unscaled value (c-nodes on BlueGene) */ uint32_t min_nodes; /* per job */ + uint32_t min_nodes_orig;/* unscaled value (c-nodes on BlueGene) */ uint32_t total_nodes; /* total number of nodes in the partition */ uint32_t total_cpus; /* total number of cpus in the partition */ uint32_t min_offset; /* select plugin min offset */ uint32_t max_offset; /* select plugin max offset */ uint16_t root_only; /* 1 if allocate/submit RPC can only be issued by user root */ - uint16_t shared; /* See part_shared in slurm.h */ + uint16_t max_share; /* number of jobs to gang schedule */ + uint16_t priority; /* scheduling priority for jobs */ uint16_t state_up; /* 1 if state is up, 0 if down */ char *nodes; /* comma delimited list names of nodes */ char *allow_groups; /* comma delimited list of groups, @@ -257,7 +269,7 @@ struct part_record { extern List part_list; /* list of part_record entries */ extern time_t last_part_update; /* time of last part_list update */ extern struct part_record default_part; /* default configuration values */ -extern char default_part_name[MAX_SLURM_NAME]; /* name of default partition */ +extern char *default_part_name; /* name of default partition */ extern struct part_record *default_part_loc; /* default partition ptr */ /*****************************************************************************\ @@ -265,11 +277,29 @@ extern struct part_record *default_part_loc; /* default partition ptr */ \*****************************************************************************/ extern time_t last_job_update; /* time of last update to part records */ +/* Used for Moab + * These QOS values only apply to LLNL's configuration + * Other values may apply at other sites, + * These may be mapped to partition priorities in the future */ +#define QOS_EXPEDITE 300 +#define QOS_NORMAL 200 +#define QOS_STANDBY 100 + #define DETAILS_MAGIC 0xdea84e7 #define JOB_MAGIC 0xf0b7392c #define STEP_MAGIC 0xce593bc1 #define KILL_ON_STEP_DONE 1 +#define FEATURE_OP_OR 0 +#define FEATURE_OP_AND 1 +#define FEATURE_OP_XOR 2 +#define FEATURE_OP_END 3 /* last entry lacks separator */ +struct feature_record { + char *name; /* name of feature */ + uint16_t count; /* count of nodes with this feature */ + uint8_t op_code; /* separator, see FEATURE_OP_ above */ +}; + /* job_details - specification of a job's constraints, * can be purged after initiation */ struct job_details { @@ -282,28 +312,29 @@ struct job_details { uint16_t *req_node_layout; /* task layout for required nodes */ bitstr_t *exc_node_bitmap; /* bitmap of excluded nodes */ char *features; /* required features */ + List feature_list; /* required features with node counts */ uint16_t shared; /* 1 if job can share nodes, - 0 if job cannot share nodes, - any other value accepts the default - sharing policy. */ + * 0 if job cannot share nodes, + * any other value accepts the default + * sharing policy. */ uint16_t contiguous; /* set if requires contiguous nodes */ - uint16_t task_dist; /* task layout for this job. Only useful - * when Consumable Resources is enabled */ + uint16_t task_dist; /* task layout for this job. Only + * useful when Consumable Resources + * is enabled */ uint32_t num_tasks; /* number of tasks to start */ - uint16_t overcommit; /* processors being over subscribed */ + uint8_t open_mode; /* stdout/err append or trunctate */ + uint8_t overcommit; /* processors being over subscribed */ + uint16_t acctg_freq; /* accounting polling interval */ uint16_t cpus_per_task; /* number of processors required for * each task */ uint16_t ntasks_per_node; /* number of tasks on each node */ /* job constraints: */ uint32_t job_min_procs; /* minimum processors per node */ uint32_t job_min_memory; /* minimum memory per node, MB */ - uint32_t job_max_memory; /* maximum memory per node, MB */ uint32_t job_min_tmp_disk; /* minimum tempdisk per node, MB */ char *err; /* pathname of job's stderr file */ char *in; /* pathname of job's stdin file */ char *out; /* pathname of job's stdout file */ - uint32_t total_procs; /* number of allocated processors, - for accounting */ time_t submit_time; /* time of submission */ time_t begin_time; /* start at this time (srun --being), * resets to time first eligible @@ -313,89 +344,116 @@ struct job_details { char *work_dir; /* pathname of working directory */ char **argv; /* arguments for a batch job script */ uint16_t argc; /* count of argv elements */ - uint16_t no_requeue; /* don't requeue job if set */ + uint16_t requeue; /* controls ability requeue job */ multi_core_data_t *mc_ptr; /* multi-core specific data */ + char *dependency; /* wait for other jobs */ + List depend_list; /* list of job_ptr:state pairs */ }; struct job_record { - uint32_t job_id; /* job ID */ - uint32_t magic; /* magic cookie for data integrity */ - char name[MAX_JOBNAME_LEN]; /* name of the job */ - char partition[MAX_SLURM_NAME]; /* name of the partition */ - struct part_record *part_ptr; /* pointer to the partition record */ + char *account; /* account number to charge */ + char *alloc_node; /* local node making resource alloc */ + uint16_t alloc_resp_port; /* RESPONSE_RESOURCE_ALLOCATION port */ + uint32_t alloc_sid; /* local sid making resource alloc */ + uint32_t assoc_id; /* used for accounting plugins */ + void *assoc_ptr; /* job's association record ptr, it is + * void* because of interdependencies + * in the header files, confirm the + * value before use */ uint16_t batch_flag; /* 1 or 2 if batch job (with script), * 2 indicates retry mode (one retry) */ - uint32_t user_id; /* user the job runs as */ + char *comment; /* arbitrary comment */ + uint16_t cr_enabled; /* specify if if Consumable Resources + * is enabled. Needed since CR deals + * with a finer granularity in its + * node/cpu scheduling (available cpus + * instead of available nodes) than the + * bluegene and the linear plugins + * 0 if cr is NOT enabled, + * 1 if cr is enabled */ + uint32_t db_index; /* used only for database + * plugins */ + struct job_details *details; /* job details */ + time_t end_time; /* time of termination, + * actual or expected */ + uint32_t exit_code; /* exit code for job (status from + * wait call) */ uint32_t group_id; /* group submitted under */ + uint32_t job_id; /* job ID */ + struct job_record *job_next; /* next entry with same hash index */ enum job_states job_state; /* state of the job */ uint16_t kill_on_node_fail; /* 1 if job should be killed on * node failure */ uint16_t kill_on_step_done; /* 1 if job should be killed when * the job step completes, 2 if kill * in progress */ - select_jobinfo_t select_jobinfo; /* opaque data */ + char *licenses; /* licenses required by the job */ + List license_list; /* structure with license info */ + uint16_t mail_type; /* see MAIL_JOB_* in slurm.h */ + char *mail_user; /* user to get e-mail notification */ + uint32_t magic; /* magic cookie for data integrity */ + char *name; /* name of the job */ + char *network; /* network/switch requirement spec */ + uint16_t next_step_id; /* next step id to be used */ char *nodes; /* list of nodes allocated to job */ + slurm_addr *node_addr; /* addresses of the nodes allocated to + * job */ bitstr_t *node_bitmap; /* bitmap of nodes allocated to job */ + uint32_t node_cnt; /* count of nodes currently + * allocated to job */ char *nodes_completing; /* nodes still in completing state * for this job, used to insure * epilog is not re-run for job */ uint32_t num_procs; /* count of required processors */ - uint32_t time_limit; /* time_limit minutes or INFINITE, - * NO_VAL implies partition max_time */ + uint16_t other_port; /* port for client communications */ + char *partition; /* name of the partition */ + struct part_record *part_ptr; /* pointer to the partition record */ + time_t pre_sus_time; /* time job ran prior to last suspend */ + uint32_t priority; /* relative priority of the job, + * zero == held (don't initiate) */ + uint16_t qos; /* quality of service, used only by Moab */ + uint32_t requid; /* requester user ID */ + char *resp_host; /* host for srun communications */ + select_jobinfo_t select_jobinfo;/* opaque data */ time_t start_time; /* time execution begins, * actual or expected */ - time_t end_time; /* time of termination, - * actual or expected */ + uint16_t state_reason; /* reason job still pending or failed + * see slurm.h:enum job_wait_reason */ + List step_list; /* list of job's steps */ time_t suspend_time; /* time job last suspended or resumed */ - time_t pre_sus_time; /* time job ran prior to last suspend */ time_t time_last_active; /* time of last job activity */ - uint32_t priority; /* relative priority of the job, - * zero == held (don't initiate) */ - struct job_details *details; /* job details */ + uint32_t time_limit; /* time_limit minutes or INFINITE, + * NO_VAL implies partition max_time */ + time_t tot_sus_time; /* total time in suspend state */ + uint32_t total_procs; /* number of allocated processors, + * for accounting */ + uint32_t user_id; /* user the job runs as */ + + /* Per node allocation details */ uint16_t num_cpu_groups; /* record count in cpus_per_node and * cpu_count_reps */ uint32_t *cpus_per_node; /* array of cpus per node allocated */ uint32_t *cpu_count_reps; /* array of consecutive nodes with * same cpu count */ - uint32_t alloc_sid; /* local sid making resource alloc */ - char *alloc_node; /* local node making resource alloc */ - uint16_t next_step_id; /* next step id to be used */ - uint32_t node_cnt; /* count of nodes allocated to job */ - slurm_addr *node_addr; /* addresses of the nodes allocated to - * job */ - List step_list; /* list of job's steps */ - uint16_t alloc_resp_port; /* RESPONSE_RESOURCE_ALLOCATION port */ - char *alloc_resp_host; /* RESPONSE_RESOURCE_ALLOCATION host */ - uint16_t other_port; /* port for client communications */ - char *other_host; /* host for client communications */ - char *account; /* account number to charge */ - char *comment; /* arbitrary comment */ - uint32_t dependency; /* defer until this job completes */ - char *network; /* network/switch requirement spec */ - struct job_record *job_next; /* next entry with same hash index */ - uint16_t cr_enabled; /* specify if if Consumable - * Resources is - * enabled. Needed since CR - * deals with a finer - * granularity in its node/cpu - * scheduling (available cpus - * instead of available nodes) - * than the bluegene and the - * linear plugins - * 0 if cr is NOT enabled, - * 1 if cr is enabled */ + uint32_t alloc_lps_cnt; /* number of hosts in alloc_lps * or 0 if alloc_lps is not needed * for the credentials */ uint32_t *alloc_lps; /* number of logical processors * allocated for this job */ - uint16_t mail_type; /* see MAIL_JOB_* in slurm.h */ - char *mail_user; /* user to get e-mail notification */ - uint32_t requid; /* requester user ID */ - uint32_t exit_code; /* exit code for job (status from - * wait call) */ - uint16_t state_reason; /* reason job still pending or failed - * see slurm.h:enum job_wait_reason */ + uint32_t *used_lps; /* number of logical processors + * already allocated to job steps */ +}; + +/* Job dependency specification, used in "depend_list" within job_record */ +#define SLURM_DEPEND_AFTER 1 +#define SLURM_DEPEND_AFTER_ANY 2 +#define SLURM_DEPEND_AFTER_NOT_OK 3 +#define SLURM_DEPEND_AFTER_OK 4 +struct depend_spec { + uint16_t depend_type; /* SLURM_DEPEND_* type */ + uint32_t job_id; /* SLURM job_id */ + struct job_record *job_ptr; /* pointer to this job */ }; struct step_record { @@ -408,11 +466,17 @@ struct step_record { * implicitly the same as suspend_time * in the job record */ time_t pre_sus_time; /* time step ran prior to last suspend */ + time_t tot_sus_time; /* total time in suspended state */ bitstr_t *step_node_bitmap; /* bitmap of nodes allocated to job step */ uint16_t port; /* port for srun communications */ char *host; /* host for srun communications */ uint16_t batch_step; /* 1 if batch job step, 0 otherwise */ + uint16_t mem_per_task; /* MB memory per task, 0=no limit */ + uint16_t ckpt_interval; /* checkpoint interval in minutes */ + char *ckpt_path; /* path to store checkpoint image files */ + uint16_t exclusive; /* dedicated resources for the step */ + time_t ckpt_time; /* time of last checkpoint */ switch_jobinfo_t switch_job; /* switch context, opaque */ check_jobinfo_t check_job; /* checkpoint context, opaque */ char *name; /* name of job step */ @@ -439,11 +503,9 @@ extern List job_list; /* list of job_record entries */ */ enum select_data_info { SELECT_CR_PLUGIN, /* data-> uint32 1 if CR plugin */ - SELECT_CPU_COUNT, /* data-> uint16 count_cpus (CR support) */ SELECT_BITMAP, /* data-> partially_idle_bitmap (CR support) */ SELECT_ALLOC_CPUS, /* data-> uint16 alloc cpus (CR support) */ SELECT_ALLOC_LPS, /* data-> uint32 alloc lps (CR support) */ - SELECT_ALLOC_MEMORY, /* data-> uint32 alloc mem (CR support) */ SELECT_AVAIL_CPUS, /* data-> uint16 avail cpus (CR support) */ SELECT_AVAIL_MEMORY /* data-> uint32 avail mem (CR support) */ } ; @@ -627,7 +689,7 @@ extern struct part_record *find_part_record (char *name); * OUT env_size - number of elements to read * RET point to array of string pointers containing environment variables */ -extern char **get_job_env (struct job_record *job_ptr, uint16_t *env_size); +extern char **get_job_env (struct job_record *job_ptr, uint32_t *env_size); /* * get_job_script - return the script for a given job @@ -713,6 +775,7 @@ extern bool is_node_resp (char *name); * IN immediate - if set then either initiate the job immediately or fail * IN will_run - don't initiate the job if set, just test if it could run * now or later + * OUT resp - will run response (includes start location, time, etc.) * IN allocate - resource allocation request if set, not a full job * IN submit_uid -uid of user issuing the request * OUT job_pptr - set to pointer to job record @@ -727,7 +790,8 @@ extern bool is_node_resp (char *name); * default_part_loc - pointer to default partition * NOTE: lock_slurmctld on entry: Read config Write job, Write node, Read part */ -extern int job_allocate(job_desc_msg_t * job_specs, int immediate, int will_run, +extern int job_allocate(job_desc_msg_t * job_specs, int immediate, + int will_run, will_run_response_msg_t **resp, int allocate, uid_t submit_uid, struct job_record **job_pptr); /* log the completion of the specified job */ @@ -756,14 +820,6 @@ extern int job_end_time(job_alloc_info_msg_t *time_req_msg, /* job_fini - free all memory associated with job records */ extern void job_fini (void); -/* - * job_is_completing - Determine if jobs are in the process of completing. - * RET - True of any job is in the process of completing - * NOTE: This function can reduce resource fragmentation, which is a - * critical issue on Elan interconnect based systems. - */ -extern bool job_is_completing(void); - /* * job_fail - terminate a job due to initiation failure * IN job_id - id of the job to be killed @@ -822,6 +878,15 @@ extern int job_step_checkpoint(checkpoint_msg_t *ckpt_ptr, */ extern int job_step_checkpoint_comp(checkpoint_comp_msg_t *ckpt_ptr, uid_t uid, slurm_fd conn_fd); +/* + * job_step_checkpoint_task_comp - note task checkpoint completion + * IN ckpt_ptr - checkpoint task complete status message + * IN uid - user id of the user issuing the RPC + * IN conn_fd - file descriptor on which to send reply + * RET 0 on success, otherwise ESLURM error code + */ +extern int job_step_checkpoint_task_comp(checkpoint_task_comp_msg_t *ckpt_ptr, + uid_t uid, slurm_fd conn_fd); /* * job_step_suspend - perform some suspend/resume operation @@ -855,6 +920,17 @@ extern int job_complete (uint32_t job_id, uid_t uid, bool requeue, */ extern bool job_independent(struct job_record *job_ptr); +/* + * job_req_node_filter - job reqeust node filter. + * clear from a bitmap the nodes which can not be used for a job + * test memory size, required features, processor count, etc. + * IN job_ptr - pointer to node to be scheduled + * IN/OUT bitmap - set of nodes being considered for use + * RET SLURM_SUCCESS or EINVAL if can't filter (exclusive OR of features) + */ +extern int job_req_node_filter(struct job_record *job_ptr, + bitstr_t *avail_bitmap); + /* * job_requeue - Requeue a running or pending batch job * IN uid - user id of user issuing the RPC @@ -981,14 +1057,6 @@ extern int load_all_part_state ( void ); */ extern int load_step_state(struct job_record *job_ptr, Buf buffer); -/* - * make_batch_job_cred - add a job credential to the batch_job_launch_msg - * IN/OUT launch_msg_ptr - batch_job_launch_msg in which job_id, step_id, - * uid and nodes have already been set - * RET 0 or error code - */ -extern int make_batch_job_cred(batch_job_launch_msg_t *launch_msg_ptr); - /* make_node_alloc - flag specified node as allocated to a job * IN node_ptr - pointer to node being allocated * IN job_ptr - pointer to job that is starting @@ -1188,6 +1256,9 @@ extern void reset_job_priority(void); */ extern void restore_node_features(void); +/* Update time stamps for job step resume */ +extern void resume_job_step(struct job_record *job_ptr); + /* run_backup - this is the backup controller, it should run in standby * mode, assuming control when the primary controller stops responding */ extern void run_backup(void); @@ -1195,26 +1266,6 @@ extern void run_backup(void); /* save_all_state - save entire slurmctld state for later recovery */ extern void save_all_state(void); -/* - * schedule - attempt to schedule all pending jobs - * pending jobs for each partition will be scheduled in priority - * order until a request fails - * RET count of jobs scheduled - * global: job_list - global list of job records - * last_job_update - time of last update to job table - * Note: We re-build the queue every time. Jobs can not only be added - * or removed from the queue, but have their priority or partition - * changed with the update_job RPC. In general nodes will be in priority - * order (by submit time), so the sorting should be pretty fast. - */ -extern int schedule (void); - -/* - * set_job_elig_time - set the eligible time for pending jobs once their - * dependencies are lifted (in job->details->begin_time) - */ -extern void set_job_elig_time(void); - /* * set_node_down - make the specified node's state DOWN if possible * (not in a DRAIN state), kill jobs as needed @@ -1239,12 +1290,22 @@ extern void set_slurmd_addr (void); */ extern void signal_step_tasks(struct step_record *step_ptr, uint16_t signal); +/* Read configuration file. + * Same name as API function for use in accounting_storage plugin */ +extern int slurm_reconfigure(void); + /* * slurmctld_shutdown - wake up slurm_rpc_mgr thread via signal * RET 0 or error code */ extern int slurmctld_shutdown(void); +/* Perform periodic job step checkpoints (per user request) */ +extern void step_checkpoint(void); + +/* Update a job's record of allocated CPUs when a job step gets scheduled */ +extern void step_alloc_lps(struct step_record *step_ptr); + /* * step_create - creates a step_record in step_specs->job_id, sets up the * according to the step_specs. @@ -1367,52 +1428,36 @@ extern int validate_group (struct part_record *part_ptr, uid_t run_uid); * are actually running, if not clean up the job records and/or node * records, call this function after validate_node_specs() sets the node * state properly - * IN node_name - node which should have jobs running - * IN/OUT job_count - number of jobs which should be running on specified node - * IN job_id_ptr - pointer to array of job_ids that should be on this node - * IN step_id_ptr - pointer to array of job step ids that should be on node + * IN reg_msg - node registration message */ -extern void validate_jobs_on_node ( char *node_name, uint32_t *job_count, - uint32_t *job_id_ptr, uint16_t *step_id_ptr); +extern void validate_jobs_on_node(slurm_node_registration_status_msg_t *reg_msg); /* * validate_node_specs - validate the node's specifications as valid, - * if not set state to down, in any case update last_response - * IN node_name - name of the node - * IN cpus - number of cpus measured - * IN sockets - number of sockets per cpu measured - * IN cores - number of cores per socket measured - * IN threads - number of threads per core measured - * IN real_memory - mega_bytes of real_memory measured - * IN tmp_disk - mega_bytes of tmp_disk measured - * IN job_count - number of jobs allocated to this node - * IN status - node status code + * if not set state to down, in any case update last_response + * IN reg_msg - node registration message * RET 0 if no error, ENOENT if no such node, EINVAL if values too low - * global: node_record_table_ptr - pointer to global node table + * NOTE: READ lock_slurmctld config before entry */ -extern int validate_node_specs (char *node_name, - uint16_t cpus, - uint16_t sockets, - uint16_t cores, - uint16_t threads, - uint32_t real_memory, - uint32_t tmp_disk, uint32_t job_count, - uint32_t status); +extern int validate_node_specs(slurm_node_registration_status_msg_t *reg_msg); /* * validate_nodes_via_front_end - validate all nodes on a cluster as having * a valid configuration as soon as the front-end registers. Individual * nodes will not register with this configuration - * IN job_count - number of jobs which should be running on cluster - * IN job_id_ptr - pointer to array of job_ids that should be on cluster - * IN step_id_ptr - pointer to array of job step ids that should be on cluster - * IN status - cluster status code + * IN reg_msg - node registration message * RET 0 if no error, SLURM error code otherwise - * global: node_record_table_ptr - pointer to global node table * NOTE: READ lock_slurmctld config before entry */ -extern int validate_nodes_via_front_end(uint32_t job_count, - uint32_t *job_id_ptr, uint16_t *step_id_ptr, - uint32_t status); +extern int validate_nodes_via_front_end( + slurm_node_registration_status_msg_t *reg_msg); + +/* + * validate_super_user - validate that the uid is authorized to see + * privileged data (either user root or SlurmUser) + * IN uid - user to validate + * RET true if permitted to run, false otherwise + */ +extern bool validate_super_user (uid_t uid); #endif /* !_HAVE_SLURMCTLD_H */ diff --git a/src/slurmctld/srun_comm.c b/src/slurmctld/srun_comm.c index 0ecedad7b..322c6dbb9 100644 --- a/src/slurmctld/srun_comm.c +++ b/src/slurmctld/srun_comm.c @@ -4,7 +4,7 @@ * Copyright (C) 2002-2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -78,14 +78,14 @@ extern void srun_allocate (uint32_t job_id) struct job_record *job_ptr = find_job_record (job_id); xassert(job_ptr); - if (job_ptr && job_ptr->alloc_resp_port - && job_ptr->alloc_resp_host && job_ptr->alloc_resp_host[0]) { + if (job_ptr && job_ptr->alloc_resp_port && job_ptr->alloc_node + && job_ptr->resp_host) { slurm_addr * addr; resource_allocation_response_msg_t *msg_arg; addr = xmalloc(sizeof(struct sockaddr_in)); - slurm_set_addr(addr, job_ptr->alloc_resp_port, - job_ptr->alloc_resp_host); + slurm_set_addr(addr, job_ptr->alloc_resp_port, + job_ptr->resp_host); msg_arg = xmalloc(sizeof(resource_allocation_response_msg_t)); msg_arg->job_id = job_ptr->job_id; msg_arg->node_list = xstrdup(job_ptr->nodes); @@ -102,7 +102,7 @@ extern void srun_allocate (uint32_t job_id) msg_arg->select_jobinfo = select_g_copy_jobinfo( job_ptr->select_jobinfo); msg_arg->error_code = SLURM_SUCCESS; - _srun_agent_launch(addr, job_ptr->alloc_resp_host, + _srun_agent_launch(addr, job_ptr->alloc_node, RESPONSE_RESOURCE_ALLOCATION, msg_arg); } } @@ -126,23 +126,11 @@ extern void srun_node_fail (uint32_t job_id, char *node_name) xassert(node_name); if (!job_ptr || job_ptr->job_state != JOB_RUNNING) return; + if (!node_name || (node_ptr = find_node_record(node_name)) == NULL) return; bit_position = node_ptr - node_record_table_ptr; - if (job_ptr->other_port - && job_ptr->other_host && job_ptr->other_host[0]) { - addr = xmalloc(sizeof(struct sockaddr_in)); - slurm_set_addr(addr, job_ptr->other_port, job_ptr->other_host); - msg_arg = xmalloc(sizeof(srun_node_fail_msg_t)); - msg_arg->job_id = job_id; - msg_arg->step_id = NO_VAL; - msg_arg->nodelist = xstrdup(node_name); - _srun_agent_launch(addr, job_ptr->other_host, SRUN_NODE_FAIL, - msg_arg); - } - - step_iterator = list_iterator_create(job_ptr->step_list); while ((step_ptr = (struct step_record *) list_next(step_iterator))) { if (!bit_test(step_ptr->step_node_bitmap, bit_position)) @@ -162,6 +150,17 @@ extern void srun_node_fail (uint32_t job_id, char *node_name) msg_arg); } list_iterator_destroy(step_iterator); + + if (job_ptr->other_port && job_ptr->alloc_node && job_ptr->resp_host) { + addr = xmalloc(sizeof(struct sockaddr_in)); + slurm_set_addr(addr, job_ptr->other_port, job_ptr->resp_host); + msg_arg = xmalloc(sizeof(srun_node_fail_msg_t)); + msg_arg->job_id = job_id; + msg_arg->step_id = NO_VAL; + msg_arg->nodelist = xstrdup(node_name); + _srun_agent_launch(addr, job_ptr->alloc_node, SRUN_NODE_FAIL, + msg_arg); + } } /* srun_ping - ping all srun commands that have not been heard from recently */ @@ -183,16 +182,16 @@ extern void srun_ping (void) if (job_ptr->job_state != JOB_RUNNING) continue; - if ( (job_ptr->time_last_active <= old) - && job_ptr->other_port - && job_ptr->other_host && job_ptr->other_host[0] ) { + + if ((job_ptr->time_last_active <= old) && job_ptr->other_port + && job_ptr->alloc_node && job_ptr->resp_host) { addr = xmalloc(sizeof(struct sockaddr_in)); slurm_set_addr(addr, job_ptr->other_port, - job_ptr->other_host); + job_ptr->resp_host); msg_arg = xmalloc(sizeof(srun_ping_msg_t)); msg_arg->job_id = job_ptr->job_id; msg_arg->step_id = NO_VAL; - _srun_agent_launch(addr, job_ptr->other_host, + _srun_agent_launch(addr, job_ptr->alloc_node, SRUN_PING, msg_arg); } } @@ -210,20 +209,19 @@ extern void srun_timeout (struct job_record *job_ptr) srun_timeout_msg_t *msg_arg; ListIterator step_iterator; struct step_record *step_ptr; - + xassert(job_ptr); if (job_ptr->job_state != JOB_RUNNING) return; - - if (job_ptr->other_port - && job_ptr->other_host && job_ptr->other_host[0]) { + + if (job_ptr->other_port && job_ptr->alloc_node && job_ptr->resp_host) { addr = xmalloc(sizeof(struct sockaddr_in)); - slurm_set_addr(addr, job_ptr->other_port, job_ptr->other_host); + slurm_set_addr(addr, job_ptr->other_port, job_ptr->resp_host); msg_arg = xmalloc(sizeof(srun_timeout_msg_t)); msg_arg->job_id = job_ptr->job_id; msg_arg->step_id = NO_VAL; msg_arg->timeout = job_ptr->end_time; - _srun_agent_launch(addr, job_ptr->other_host, SRUN_TIMEOUT, + _srun_agent_launch(addr, job_ptr->alloc_node, SRUN_TIMEOUT, msg_arg); } @@ -242,7 +240,7 @@ extern void srun_timeout (struct job_record *job_ptr) msg_arg->step_id = step_ptr->step_id; msg_arg->timeout = job_ptr->end_time; _srun_agent_launch(addr, step_ptr->host, SRUN_TIMEOUT, - msg_arg); + msg_arg); } list_iterator_destroy(step_iterator); } @@ -262,13 +260,13 @@ extern void srun_user_message(struct job_record *job_ptr, char *msg) return; if (job_ptr->other_port - && job_ptr->other_host && job_ptr->other_host[0]) { + && job_ptr->resp_host && job_ptr->resp_host[0]) { addr = xmalloc(sizeof(struct sockaddr_in)); - slurm_set_addr(addr, job_ptr->other_port, job_ptr->other_host); + slurm_set_addr(addr, job_ptr->other_port, job_ptr->resp_host); msg_arg = xmalloc(sizeof(srun_user_msg_t)); msg_arg->job_id = job_ptr->job_id; msg_arg->msg = xstrdup(msg); - _srun_agent_launch(addr, job_ptr->other_host, SRUN_USER_MSG, + _srun_agent_launch(addr, job_ptr->resp_host, SRUN_USER_MSG, msg_arg); } } @@ -285,19 +283,17 @@ extern void srun_job_complete (struct job_record *job_ptr) struct step_record *step_ptr; xassert(job_ptr); - if (job_ptr->other_port - && job_ptr->other_host && job_ptr->other_host[0]) { + + if (job_ptr->other_port && job_ptr->alloc_node && job_ptr->resp_host) { addr = xmalloc(sizeof(struct sockaddr_in)); - slurm_set_addr(addr, job_ptr->other_port, job_ptr->other_host); - msg_arg = xmalloc(sizeof(srun_timeout_msg_t)); + slurm_set_addr(addr, job_ptr->other_port, job_ptr->resp_host); + msg_arg = xmalloc(sizeof(srun_job_complete_msg_t)); msg_arg->job_id = job_ptr->job_id; msg_arg->step_id = NO_VAL; - _srun_agent_launch(addr, job_ptr->other_host, - SRUN_JOB_COMPLETE, - msg_arg); + _srun_agent_launch(addr, job_ptr->alloc_node, + SRUN_JOB_COMPLETE, msg_arg); } - step_iterator = list_iterator_create(job_ptr->step_list); while ((step_ptr = (struct step_record *) list_next(step_iterator))) { if (step_ptr->batch_step) /* batch script itself */ diff --git a/src/slurmctld/srun_comm.h b/src/slurmctld/srun_comm.h index f74cdec7a..6e796100b 100644 --- a/src/slurmctld/srun_comm.h +++ b/src/slurmctld/srun_comm.h @@ -4,7 +4,7 @@ * Copyright (C) 2002-2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette@llnl.gov> et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/slurmctld/state_save.c b/src/slurmctld/state_save.c index f46d23dc8..105b624bf 100644 --- a/src/slurmctld/state_save.c +++ b/src/slurmctld/state_save.c @@ -4,7 +4,7 @@ * Copyright (C) 2004-2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/slurmctld/state_save.h b/src/slurmctld/state_save.h index 270566604..d800752ad 100644 --- a/src/slurmctld/state_save.h +++ b/src/slurmctld/state_save.h @@ -4,7 +4,7 @@ * Copyright (C) 2004-2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/slurmctld/step_mgr.c b/src/slurmctld/step_mgr.c index 5b39ef9b8..cef575330 100644 --- a/src/slurmctld/step_mgr.c +++ b/src/slurmctld/step_mgr.c @@ -1,11 +1,11 @@ /*****************************************************************************\ * step_mgr.c - manage the job step information of slurm - * $Id: step_mgr.c 13857 2008-04-11 19:14:36Z jette $ + * $Id: step_mgr.c 13858 2008-04-11 19:29:30Z jette $ ***************************************************************************** - * Copyright (C) 2002-2006 The Regents of the University of California. + * Copyright (C) 2002-2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov>, et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -59,7 +59,8 @@ #include "src/common/switch.h" #include "src/common/xstring.h" #include "src/common/forward.h" -#include "src/common/slurm_jobacct.h" +#include "src/common/slurm_accounting_storage.h" +#include "src/common/slurm_jobacct_gather.h" #include "src/slurmctld/agent.h" #include "src/slurmctld/locks.h" @@ -67,15 +68,20 @@ #include "src/slurmctld/slurmctld.h" #include "src/slurmctld/srun_comm.h" +#define STEP_DEBUG 0 #define MAX_RETRIES 10 +static int _count_cpus(bitstr_t *bitmap); static void _pack_ctld_job_step_info(struct step_record *step, Buf buffer); static bitstr_t * _pick_step_nodes (struct job_record *job_ptr, - job_step_create_request_msg_t *step_spec ); + job_step_create_request_msg_t *step_spec, + bool batch_step, int *return_code); static hostlist_t _step_range_to_hostlist(struct step_record *step_ptr, uint32_t range_first, uint32_t range_last); static int _step_hostname_to_inx(struct step_record *step_ptr, char *node_name); +static void _step_dealloc_lps(struct step_record *step_ptr); + /* * create_step_record - create an empty step_record for the specified job. @@ -94,8 +100,9 @@ create_step_record (struct job_record *job_ptr) last_job_update = time(NULL); step_ptr->job_ptr = job_ptr; step_ptr->step_id = (job_ptr->next_step_id)++; - step_ptr->start_time = time ( NULL ) ; - step_ptr->jobacct = jobacct_g_alloc(NULL); + step_ptr->start_time = time(NULL) ; + step_ptr->jobacct = jobacct_gather_g_create(NULL); + step_ptr->ckpt_path = NULL; if (list_append (job_ptr->step_list, step_ptr) == NULL) fatal ("create_step_record: unable to allocate memory"); @@ -135,11 +142,12 @@ delete_step_records (struct job_record *job_ptr, int filter) xfree(step_ptr->host); xfree(step_ptr->name); slurm_step_layout_destroy(step_ptr->step_layout); - jobacct_g_free(step_ptr->jobacct); + jobacct_gather_g_destroy(step_ptr->jobacct); FREE_NULL_BITMAP(step_ptr->step_node_bitmap); FREE_NULL_BITMAP(step_ptr->exit_node_bitmap); if (step_ptr->network) xfree(step_ptr->network); + xfree(step_ptr->ckpt_path); xfree(step_ptr); } @@ -179,14 +187,19 @@ delete_step_record (struct job_record *job_ptr, uint32_t step_id) switch_free_jobinfo (step_ptr->switch_job); } checkpoint_free_jobinfo (step_ptr->check_job); + + if (step_ptr->mem_per_task) + select_g_step_fini(step_ptr); + xfree(step_ptr->host); xfree(step_ptr->name); slurm_step_layout_destroy(step_ptr->step_layout); - jobacct_g_free(step_ptr->jobacct); + jobacct_gather_g_destroy(step_ptr->jobacct); FREE_NULL_BITMAP(step_ptr->step_node_bitmap); FREE_NULL_BITMAP(step_ptr->exit_node_bitmap); if (step_ptr->network) xfree(step_ptr->network); + xfree(step_ptr->ckpt_path); xfree(step_ptr); error_code = 0; break; @@ -214,9 +227,12 @@ dump_step_desc(job_step_create_request_msg_t *step_spec) debug3(" num_tasks=%u relative=%u task_dist=%u node_list=%s", step_spec->num_tasks, step_spec->relative, step_spec->task_dist, step_spec->node_list); - debug3(" host=%s port=%u name=%s network=%s", + debug3(" host=%s port=%u name=%s network=%s checkpoint=%u", step_spec->host, step_spec->port, step_spec->name, - step_spec->network); + step_spec->network, step_spec->ckpt_interval); + debug3(" checkpoint-path=%s exclusive=%u immediate=%u mem_per_task=%u", + step_spec->ckpt_path, step_spec->exclusive, + step_spec->immediate, step_spec->mem_per_task); } @@ -374,23 +390,24 @@ int job_step_complete(uint32_t job_id, uint32_t step_id, uid_t uid, info("job_step_complete: invalid job id %u", job_id); return ESLURM_INVALID_JOB_ID; } - + + if ((job_ptr->user_id != uid) && (uid != 0) && (uid != getuid())) { + error("Security violation, JOB_COMPLETE RPC from uid %d", + uid); + return ESLURM_USER_ID_MISSING; + } + step_ptr = find_step_record(job_ptr, step_id); if (step_ptr == NULL) return ESLURM_INVALID_JOB_ID; - else - jobacct_g_step_complete_slurmctld(step_ptr); - + + jobacct_storage_g_step_complete(acct_db_conn, step_ptr); + _step_dealloc_lps(step_ptr); + if ((job_ptr->kill_on_step_done) && (list_count(job_ptr->step_list) <= 1) && (!IS_JOB_FINISHED(job_ptr))) return job_complete(job_id, uid, requeue, job_return_code); - - if ((job_ptr->user_id != uid) && (uid != 0) && (uid != getuid())) { - error("Security violation, JOB_COMPLETE RPC from uid %d", - uid); - return ESLURM_USER_ID_MISSING; - } last_job_update = time(NULL); error_code = delete_step_record(job_ptr, step_id); @@ -407,36 +424,75 @@ int job_step_complete(uint32_t job_id, uint32_t step_id, uid_t uid, * we satisfy the super-set of constraints. * IN job_ptr - pointer to job to have new step started * IN step_spec - job step specification + * IN batch_step - if set then step is a batch script + * OUT return_code - exit code or SLURM_SUCCESS * global: node_record_table_ptr - pointer to global node table * NOTE: returns all of a job's nodes if step_spec->node_count == INFINITE * NOTE: returned bitmap must be freed by the caller using bit_free() */ static bitstr_t * _pick_step_nodes (struct job_record *job_ptr, - job_step_create_request_msg_t *step_spec) + job_step_create_request_msg_t *step_spec, + bool batch_step, int *return_code) { bitstr_t *nodes_avail = NULL, *nodes_idle = NULL; bitstr_t *nodes_picked = NULL, *node_tmp = NULL; - int error_code, nodes_picked_cnt = 0, cpus_picked_cnt, i; -/* char *temp; */ + int error_code, nodes_picked_cnt=0, cpus_picked_cnt = 0, i; ListIterator step_iterator; struct step_record *step_p; +#if STEP_DEBUG + char *temp; +#endif - if (job_ptr->node_bitmap == NULL) + *return_code = SLURM_SUCCESS; + if (job_ptr->node_bitmap == NULL) { + *return_code = ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE; return NULL; + } nodes_avail = bit_copy (job_ptr->node_bitmap); if (nodes_avail == NULL) fatal("bit_copy malloc failure"); bit_and (nodes_avail, up_node_bitmap); + /* In exclusive mode, just satisfy the processor count. + * Do not use nodes that have no unused CPUs */ + if (step_spec->exclusive) { + int i, j=0, avail, tot_cpus = 0; + for (i=bit_ffs(job_ptr->node_bitmap); i<node_record_count; + i++) { + if (!bit_test(job_ptr->node_bitmap, i)) + continue; + avail = job_ptr->alloc_lps[j] - job_ptr->used_lps[j]; + tot_cpus += job_ptr->alloc_lps[j]; + if ((avail <= 0) || + (cpus_picked_cnt >= step_spec->cpu_count)) + bit_clear(nodes_avail, i); + else + cpus_picked_cnt += avail; + if (++j >= job_ptr->node_cnt) + break; + } + if (cpus_picked_cnt >= step_spec->cpu_count) + return nodes_avail; + + FREE_NULL_BITMAP(nodes_avail); + if (tot_cpus >= step_spec->cpu_count) + *return_code = ESLURM_NODES_BUSY; + else + *return_code = ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE; + return NULL; + } + if ( step_spec->node_count == INFINITE) /* use all nodes */ return nodes_avail; if (step_spec->node_list) { bitstr_t *selected_nodes = NULL; -/* info("selected nodelist is %s", step_spec->node_list); */ +#if STEP_DEBUG + info("selected nodelist is %s", step_spec->node_list); +#endif error_code = node_name2bitmap(step_spec->node_list, false, &selected_nodes); @@ -532,26 +588,30 @@ _pick_step_nodes (struct job_record *job_ptr, while ((step_p = (struct step_record *) list_next(step_iterator))) { bit_or(nodes_idle, step_p->step_node_bitmap); - /* temp = bitmap2node_name(step_p->step_node_bitmap); */ -/* info("step %d has nodes %s", step_p->step_id, temp); */ -/* xfree(temp); */ +#if STEP_DEBUG + temp = bitmap2node_name(step_p->step_node_bitmap); + info("step %d has nodes %s", step_p->step_id, temp); + xfree(temp); +#endif } list_iterator_destroy (step_iterator); bit_not(nodes_idle); bit_and(nodes_idle, nodes_avail); } -/* temp = bitmap2node_name(nodes_avail); */ -/* info("can pick from %s %d", temp, step_spec->node_count); */ -/* xfree(temp); */ -/* temp = bitmap2node_name(nodes_idle); */ -/* info("can pick from %s", temp); */ -/* xfree(temp); */ - +#if STEP_DEBUG + temp = bitmap2node_name(nodes_avail); + info("can pick from %s %d", temp, step_spec->node_count); + xfree(temp); + temp = bitmap2node_name(nodes_idle); + info("can pick from %s", temp); + xfree(temp); +#endif + /* if user specifies step needs a specific processor count and * all nodes have the same processor count, just translate this to * a node count */ - if (step_spec->cpu_count && (job_ptr->num_cpu_groups == 1) - && job_ptr->cpus_per_node[0]) { + if (step_spec->cpu_count && (job_ptr->num_cpu_groups == 1) && + job_ptr->cpus_per_node[0]) { i = (step_spec->cpu_count + (job_ptr->cpus_per_node[0] - 1) ) / job_ptr->cpus_per_node[0]; step_spec->node_count = (i > step_spec->node_count) ? @@ -561,7 +621,9 @@ _pick_step_nodes (struct job_record *job_ptr, if (step_spec->node_count) { nodes_picked_cnt = bit_set_count(nodes_picked); -/* info("got %d %d", step_spec->node_count, nodes_picked_cnt); */ +#if STEP_DEBUG + info("got %u %d", step_spec->node_count, nodes_picked_cnt); +#endif if (nodes_idle && (bit_set_count(nodes_idle) >= step_spec->node_count) && (step_spec->node_count > nodes_picked_cnt)) { @@ -594,9 +656,10 @@ _pick_step_nodes (struct job_record *job_ptr, } if (step_spec->cpu_count) { - cpus_picked_cnt = count_cpus(nodes_picked); - /* person is requesting more cpus than we got from the - picked nodes we should return with an error */ + /* make sure the selected nodes have enough cpus */ + cpus_picked_cnt = _count_cpus(nodes_picked); + /* user is requesting more cpus than we got from the + * picked nodes we should return with an error */ if(step_spec->cpu_count > cpus_picked_cnt) { debug2("Have %d nodes with %d cpus which is less " "than what the user is asking for (%d cpus) " @@ -605,60 +668,6 @@ _pick_step_nodes (struct job_record *job_ptr, step_spec->cpu_count); goto cleanup; } - /* Not sure why the rest of this 'if' is here - since this will only - change the number of requested nodes by added nodes - to the picked bitmap which isn't what we want to do - if the user requests a node count. If the user - doesn't specify one then the entire allocation is - already set so we should return an error in either - case */ - -/* if (nodes_idle */ -/* && (step_spec->cpu_count > cpus_picked_cnt)) { */ -/* int first_bit, last_bit; */ -/* first_bit = bit_ffs(nodes_idle); */ -/* if(first_bit == -1) */ -/* goto no_idle_bits; */ -/* last_bit = bit_fls(nodes_idle); */ -/* if(last_bit == -1) */ -/* goto no_idle_bits; */ - -/* for (i = first_bit; i <= last_bit; i++) { */ -/* if (bit_test (nodes_idle, i) != 1) */ -/* continue; */ -/* bit_set (nodes_picked, i); */ -/* bit_clear (nodes_avail, i); */ -/* /\* bit_clear (nodes_idle, i); unused *\/ */ -/* cpus_picked_cnt += */ -/* node_record_table_ptr[i].cpus; */ -/* if (cpus_picked_cnt >= step_spec->cpu_count) */ -/* break; */ -/* } */ -/* if (step_spec->cpu_count > cpus_picked_cnt) */ -/* goto cleanup; */ -/* } */ -/* no_idle_bits: */ -/* if (step_spec->cpu_count > cpus_picked_cnt) { */ -/* int first_bit, last_bit; */ -/* first_bit = bit_ffs(nodes_avail); */ -/* if(first_bit == -1) */ -/* goto cleanup; */ -/* last_bit = bit_fls(nodes_avail); */ -/* if(last_bit == -1) */ -/* goto cleanup; */ -/* for (i = first_bit; i <= last_bit; i++) { */ -/* if (bit_test (nodes_avail, i) != 1) */ -/* continue; */ -/* bit_set (nodes_picked, i); */ -/* cpus_picked_cnt += */ -/* node_record_table_ptr[i].cpus; */ -/* if (cpus_picked_cnt >= step_spec->cpu_count) */ -/* break; */ -/* } */ -/* if (step_spec->cpu_count > cpus_picked_cnt) */ -/* goto cleanup; */ -/* } */ } FREE_NULL_BITMAP(nodes_avail); @@ -669,9 +678,105 @@ cleanup: FREE_NULL_BITMAP(nodes_avail); FREE_NULL_BITMAP(nodes_idle); FREE_NULL_BITMAP(nodes_picked); + *return_code = ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE; return NULL; } +/* + * _count_cpus - report how many cpus are associated with the identified nodes + * IN bitmap - map of nodes to tally + * RET cpu count + * globals: node_record_count - number of nodes configured + * node_record_table_ptr - pointer to global node table + */ +static int _count_cpus(bitstr_t *bitmap) +{ + int i, sum; + + sum = 0; + for (i = 0; i < node_record_count; i++) { + if (bit_test(bitmap, i) != 1) + continue; + if (slurmctld_conf.fast_schedule) + sum += node_record_table_ptr[i].config_ptr->cpus; + else + sum += node_record_table_ptr[i].cpus; + } + return sum; +} + +/* Update a job's record of allocated CPUs when a job step gets scheduled */ +extern void step_alloc_lps(struct step_record *step_ptr) +{ + struct job_record *job_ptr = step_ptr->job_ptr; + int i_node, i_first, i_last; + int job_node_inx = -1, step_node_inx = -1; + + i_first = bit_ffs(job_ptr->node_bitmap); + i_last = bit_fls(job_ptr->node_bitmap); + if (i_first == -1) /* empty bitmap */ + return; + for (i_node = i_first; i_node <= i_last; i_node++) { + if (!bit_test(job_ptr->node_bitmap, i_node)) + continue; + job_node_inx++; + if (!bit_test(step_ptr->step_node_bitmap, i_node)) + continue; + step_node_inx++; + job_ptr->used_lps[job_node_inx] += + step_ptr->step_layout->tasks[step_node_inx]; +#if 0 + info("step alloc of %s procs: %u of %u", + node_record_table_ptr[i_node].name, + job_ptr->used_lps[job_node_inx], + job_ptr->alloc_lps[job_node_inx]); +#endif + if (step_node_inx == (step_ptr->step_layout->node_cnt - 1)) + break; + } + +} + +static void _step_dealloc_lps(struct step_record *step_ptr) +{ + struct job_record *job_ptr = step_ptr->job_ptr; + int i_node, i_first, i_last; + int job_node_inx = -1, step_node_inx = -1; + + if (step_ptr->step_layout == NULL) /* batch step */ + return; + + i_first = bit_ffs(job_ptr->node_bitmap); + i_last = bit_fls(job_ptr->node_bitmap); + if (i_first == -1) /* empty bitmap */ + return; + for (i_node = i_first; i_node <= i_last; i_node++) { + if (!bit_test(job_ptr->node_bitmap, i_node)) + continue; + job_node_inx++; + if (!bit_test(step_ptr->step_node_bitmap, i_node)) + continue; + step_node_inx++; + if (job_ptr->used_lps[job_node_inx] >= + step_ptr->step_layout->tasks[step_node_inx]) { + job_ptr->used_lps[job_node_inx] -= + step_ptr->step_layout->tasks[step_node_inx]; + } else { + error("_step_dealloc_lps: underflow for %u.%u", + job_ptr->job_id, step_ptr->step_id); + job_ptr->used_lps[job_node_inx] = 0; + } +#if 0 + info("step dealloc of %s procs: %u of %u", + node_record_table_ptr[i_node].name, + job_ptr->used_lps[job_node_inx], + job_ptr->alloc_lps[job_node_inx]); +#endif + if (step_node_inx == (step_ptr->step_layout->node_cnt - 1)) + break; + } + +} /* * step_create - creates a step_record in step_specs->job_id, sets up the @@ -692,7 +797,7 @@ step_create(job_step_create_request_msg_t *step_specs, struct step_record *step_ptr; struct job_record *job_ptr; bitstr_t *nodeset; - int node_count; + int node_count, ret_code; time_t now = time(NULL); char *step_node_list = NULL; @@ -701,8 +806,10 @@ step_create(job_step_create_request_msg_t *step_specs, if (job_ptr == NULL) return ESLURM_INVALID_JOB_ID ; - if (job_ptr->job_state == JOB_SUSPENDED) + if ((job_ptr->details == NULL) || + (job_ptr->job_state == JOB_SUSPENDED)) return ESLURM_DISABLED; + if (IS_JOB_PENDING(job_ptr)) { /* NOTE: LSF creates a job allocation for batch jobs. * After the allocation has been made, LSF submits a @@ -728,6 +835,16 @@ step_create(job_step_create_request_msg_t *step_specs, (job_ptr->end_time <= time(NULL))) return ESLURM_ALREADY_DONE; + if (job_ptr->details->job_min_memory) { + /* use memory reserved by job, no limit on steps */ + step_specs->mem_per_task = 0; + } else if (step_specs->mem_per_task) { + if (slurmctld_conf.max_mem_per_task && + (step_specs->mem_per_task > slurmctld_conf.max_mem_per_task)) + return ESLURM_INVALID_TASK_MEMORY; + } else + step_specs->mem_per_task = slurmctld_conf.def_mem_per_task; + if ((step_specs->task_dist != SLURM_DIST_CYCLIC) && (step_specs->task_dist != SLURM_DIST_BLOCK) && (step_specs->task_dist != SLURM_DIST_CYCLIC_CYCLIC) && @@ -742,7 +859,14 @@ step_create(job_step_create_request_msg_t *step_specs, && (!strcmp(slurmctld_conf.switch_type, "switch/elan"))) { return ESLURM_TASKDIST_ARBITRARY_UNSUPPORTED; } - + + if ((step_specs->host && (strlen(step_specs->host) > MAX_STR_LEN)) || + (step_specs->node_list && (strlen(step_specs->node_list) > MAX_STR_LEN)) || + (step_specs->network && (strlen(step_specs->network) > MAX_STR_LEN)) || + (step_specs->name && (strlen(step_specs->name) > MAX_STR_LEN)) || + (step_specs->ckpt_path && (strlen(step_specs->ckpt_path) > MAX_STR_LEN))) + return ESLURM_PATHNAME_TOO_LONG; + /* if the overcommit flag is checked we 0 out the cpu_count * which makes it so we don't check to see the available cpus */ @@ -755,9 +879,9 @@ step_create(job_step_create_request_msg_t *step_specs, job_ptr->kill_on_step_done = kill_job_when_step_done; job_ptr->time_last_active = now; - nodeset = _pick_step_nodes(job_ptr, step_specs); + nodeset = _pick_step_nodes(job_ptr, step_specs, batch_step, &ret_code); if (nodeset == NULL) - return ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE ; + return ret_code; node_count = bit_set_count(nodeset); if (step_specs->num_tasks == NO_VAL) { @@ -792,8 +916,10 @@ step_create(job_step_create_request_msg_t *step_specs, xfree(step_specs->node_list); step_specs->node_list = xstrdup(step_node_list); } -/* info("got %s and %s looking for %d nodes", step_node_list, */ -/* step_specs->node_list, step_specs->node_count); */ +#if STEP_DEBUG + info("got %s and %s looking for %d nodes", step_node_list, + step_specs->node_list, step_specs->node_count); +#endif step_ptr->step_node_bitmap = nodeset; switch(step_specs->task_dist) { @@ -810,7 +936,12 @@ step_create(job_step_create_request_msg_t *step_specs, step_ptr->port = step_specs->port; step_ptr->host = xstrdup(step_specs->host); step_ptr->batch_step = batch_step; + step_ptr->mem_per_task = step_specs->mem_per_task; + step_ptr->ckpt_interval = step_specs->ckpt_interval; + step_ptr->ckpt_time = now; step_ptr->exit_code = NO_VAL; + step_ptr->exclusive = step_specs->exclusive; + step_ptr->ckpt_path = xstrdup(step_specs->ckpt_path); /* step's name and network default to job's values if not * specified in the step specification */ @@ -822,7 +953,7 @@ step_create(job_step_create_request_msg_t *step_specs, step_ptr->network = xstrdup(step_specs->network); else step_ptr->network = xstrdup(job_ptr->network); - + /* a batch script does not need switch info */ if (!batch_step) { step_ptr->step_layout = @@ -846,12 +977,20 @@ step_create(job_step_create_request_msg_t *step_specs, delete_step_record (job_ptr, step_ptr->step_id); return ESLURM_INTERCONNECT_FAILURE; } + step_alloc_lps(step_ptr); } if (checkpoint_alloc_jobinfo (&step_ptr->check_job) < 0) fatal ("step_create: checkpoint_alloc_jobinfo error"); xfree(step_node_list); + if (step_ptr->mem_per_task && + (select_g_step_begin(step_ptr) != SLURM_SUCCESS)) { + error("No memory to allocate step for job %u", job_ptr->job_id); + step_ptr->mem_per_task = 0; /* no memory to be freed */ + delete_step_record (job_ptr, step_ptr->step_id); + return ESLURM_INVALID_TASK_MEMORY; + } *new_step_record = step_ptr; - jobacct_g_step_start_slurmctld(step_ptr); + jobacct_storage_g_step_start(acct_db_conn, step_ptr); return SLURM_SUCCESS; } @@ -866,14 +1005,10 @@ extern slurm_step_layout_t *step_layout_create(struct step_record *step_ptr, uint32_t cpu_count_reps[node_count]; int cpu_inx = -1; int usable_cpus = 0, i; - int set_nodes = 0; - int inx = 0; + int set_nodes = 0, set_cpus = 0; int pos = -1; struct job_record *job_ptr = step_ptr->job_ptr; - /* node_pos is the position in the node in the job */ - uint32_t node_pos = job_ptr->cpu_count_reps[inx]; - /* build the cpus-per-node arrays for the subset of nodes used by this job step */ for (i = 0; i < node_record_count; i++) { @@ -882,15 +1017,19 @@ extern slurm_step_layout_t *step_layout_create(struct step_record *step_ptr, pos = bit_get_pos_num(job_ptr->node_bitmap, i); if (pos == -1) return NULL; - /* need to get the correct num of cpus on the - node */ - while(pos >= node_pos) { - node_pos += - job_ptr->cpu_count_reps[++inx]; - } - debug2("%d got inx of %d cpus = %d pos = %d", - i, inx, job_ptr->cpus_per_node[inx], pos); - usable_cpus = job_ptr->cpus_per_node[inx]; + if (step_ptr->exclusive) { + usable_cpus = job_ptr->alloc_lps[pos] - + job_ptr->used_lps[pos]; + if (usable_cpus < 0) { + error("step_layout_create exclusive"); + return NULL; + } + usable_cpus = MAX(usable_cpus, + (num_tasks - set_cpus)); + } else + usable_cpus = job_ptr->alloc_lps[pos]; + debug2("step_layout cpus = %d pos = %d", + usable_cpus, pos); if ((cpu_inx == -1) || (cpus_per_node[cpu_inx] != usable_cpus)) { @@ -901,10 +1040,12 @@ extern slurm_step_layout_t *step_layout_create(struct step_record *step_ptr, } else cpu_count_reps[cpu_inx]++; set_nodes++; - if(set_nodes == node_count) + set_cpus += usable_cpus; + if (set_nodes == node_count) break; } } + /* layout the tasks on the nodes */ return slurm_step_layout_create(step_node_list, cpus_per_node, cpu_count_reps, @@ -931,6 +1072,7 @@ static void _pack_ctld_job_step_info(struct step_record *step_ptr, Buf buffer) } pack32(step_ptr->job_ptr->job_id, buffer); pack16(step_ptr->step_id, buffer); + pack16(step_ptr->ckpt_interval, buffer); pack32(step_ptr->job_ptr->user_id, buffer); pack32(task_cnt, buffer); @@ -949,6 +1091,7 @@ static void _pack_ctld_job_step_info(struct step_record *step_ptr, Buf buffer) packstr(step_ptr->name, buffer); packstr(step_ptr->network, buffer); pack_bit_fmt(step_ptr->step_node_bitmap, buffer); + packstr(step_ptr->ckpt_path, buffer); } @@ -988,6 +1131,12 @@ extern int pack_ctld_job_step_info_response_msg(uint32_t job_id, (job_ptr->part_ptr) && (job_ptr->part_ptr->hidden)) continue; + + if (slurmctld_conf.private_data + && (job_ptr->user_id != uid) + && !validate_super_user(uid)) + continue; + step_iterator = list_iterator_create(job_ptr->step_list); while ((step_ptr = @@ -1007,6 +1156,10 @@ extern int pack_ctld_job_step_info_response_msg(uint32_t job_id, (job_ptr->part_ptr) && (job_ptr->part_ptr->hidden)) job_ptr = NULL; + else if (slurmctld_conf.private_data + && (job_ptr->user_id != uid) && !validate_super_user(uid)) + job_ptr = NULL; + if (job_ptr) { step_iterator = list_iterator_create(job_ptr->step_list); @@ -1027,6 +1180,10 @@ extern int pack_ctld_job_step_info_response_msg(uint32_t job_id, && (job_ptr->part_ptr) && (job_ptr->part_ptr->hidden)) job_ptr = NULL; + else if (slurmctld_conf.private_data + && (job_ptr->user_id != uid) && !validate_super_user(uid)) + job_ptr = NULL; + step_ptr = find_step_record(job_ptr, step_id); if (step_ptr == NULL) error_code = ESLURM_INVALID_JOB_ID; @@ -1225,6 +1382,62 @@ extern int job_step_checkpoint_comp(checkpoint_comp_msg_t *ckpt_ptr, return rc; } +/* + * job_step_checkpoint_task_comp - note task checkpoint completion + * IN ckpt_ptr - checkpoint task complete status message + * IN uid - user id of the user issuing the RPC + * IN conn_fd - file descriptor on which to send reply + * RET 0 on success, otherwise ESLURM error code + */ +extern int job_step_checkpoint_task_comp(checkpoint_task_comp_msg_t *ckpt_ptr, + uid_t uid, slurm_fd conn_fd) +{ + int rc = SLURM_SUCCESS; + struct job_record *job_ptr; + struct step_record *step_ptr; + slurm_msg_t resp_msg; + return_code_msg_t rc_msg; + + slurm_msg_t_init(&resp_msg); + + /* find the job */ + job_ptr = find_job_record (ckpt_ptr->job_id); + if (job_ptr == NULL) { + rc = ESLURM_INVALID_JOB_ID; + goto reply; + } + if ((uid != job_ptr->user_id) && (uid != 0)) { + rc = ESLURM_ACCESS_DENIED; + goto reply; + } + if (job_ptr->job_state == JOB_PENDING) { + rc = ESLURM_JOB_PENDING; + goto reply; + } else if ((job_ptr->job_state != JOB_RUNNING) + && (job_ptr->job_state != JOB_SUSPENDED)) { + rc = ESLURM_ALREADY_DONE; + goto reply; + } + + step_ptr = find_step_record(job_ptr, ckpt_ptr->step_id); + if (step_ptr == NULL) { + rc = ESLURM_INVALID_JOB_ID; + goto reply; + } else { + rc = checkpoint_task_comp((void *)step_ptr, + ckpt_ptr->task_id, ckpt_ptr->begin_time, + ckpt_ptr->error_code, ckpt_ptr->error_msg); + last_job_update = time(NULL); + } + + reply: + rc_msg.return_code = rc; + resp_msg.msg_type = RESPONSE_SLURM_RC; + resp_msg.data = &rc_msg; + (void) slurm_send_node_msg(conn_fd, &resp_msg); + return rc; +} + /* * step_partial_comp - Note the completion of a job step on at least * some of its nodes @@ -1255,7 +1468,7 @@ extern int step_partial_comp(step_complete_msg_t *req, int *rem, step_ptr->exit_code = req->step_rc; if (max_rc) *max_rc = step_ptr->exit_code; - jobacct_g_aggregate(step_ptr->jobacct, req->jobacct); + jobacct_gather_g_aggregate(step_ptr->jobacct, req->jobacct); /* we don't want to delete the step record here since right after we delete this step again if we delete it here we won't find it when we try the second @@ -1269,7 +1482,7 @@ extern int step_partial_comp(step_complete_msg_t *req, int *rem, return EINVAL; } - jobacct_g_aggregate(step_ptr->jobacct, req->jobacct); + jobacct_gather_g_aggregate(step_ptr->jobacct, req->jobacct); if (step_ptr->exit_code == NO_VAL) { /* initialize the node bitmap for exited nodes */ @@ -1435,12 +1648,10 @@ _suspend_job_step(struct job_record *job_ptr, if ((job_ptr->suspend_time) && (job_ptr->suspend_time > step_ptr->start_time)) { step_ptr->pre_sus_time += - difftime(now, - job_ptr->suspend_time); + difftime(now, job_ptr->suspend_time); } else { step_ptr->pre_sus_time += - difftime(now, - step_ptr->start_time); + difftime(now, step_ptr->start_time); } } @@ -1460,6 +1671,34 @@ suspend_job_step(struct job_record *job_ptr) list_iterator_destroy (step_iterator); } +static void +_resume_job_step(struct job_record *job_ptr, + struct step_record *step_ptr, time_t now) +{ + if ((job_ptr->suspend_time) && + (job_ptr->suspend_time < step_ptr->start_time)) { + step_ptr->tot_sus_time += + difftime(now, step_ptr->start_time); + } else { + step_ptr->tot_sus_time += + difftime(now, job_ptr->suspend_time); + } +} + +/* Update time stamps for job step resume */ +extern void +resume_job_step(struct job_record *job_ptr) +{ + time_t now = time(NULL); + ListIterator step_iterator; + struct step_record *step_ptr; + + step_iterator = list_iterator_create (job_ptr->step_list); + while ((step_ptr = (struct step_record *) list_next (step_iterator))) { + _resume_job_step(job_ptr, step_ptr, now); + } + list_iterator_destroy (step_iterator); +} /* @@ -1473,6 +1712,9 @@ extern void dump_job_step_state(struct step_record *step_ptr, Buf buffer) pack16(step_ptr->step_id, buffer); pack16(step_ptr->cyclic_alloc, buffer); pack16(step_ptr->port, buffer); + pack16(step_ptr->ckpt_interval, buffer); + pack16(step_ptr->mem_per_task, buffer); + pack32(step_ptr->exit_code, buffer); if (step_ptr->exit_code != NO_VAL) { pack_bit_fmt(step_ptr->exit_node_bitmap, buffer); @@ -1482,9 +1724,13 @@ extern void dump_job_step_state(struct step_record *step_ptr, Buf buffer) pack_time(step_ptr->start_time, buffer); pack_time(step_ptr->pre_sus_time, buffer); + pack_time(step_ptr->tot_sus_time, buffer); + pack_time(step_ptr->ckpt_time, buffer); + packstr(step_ptr->host, buffer); packstr(step_ptr->name, buffer); packstr(step_ptr->network, buffer); + packstr(step_ptr->ckpt_path, buffer); pack16(step_ptr->batch_step, buffer); if (!step_ptr->batch_step) { pack_slurm_step_layout(step_ptr->step_layout, buffer); @@ -1501,10 +1747,11 @@ extern void dump_job_step_state(struct step_record *step_ptr, Buf buffer) extern int load_step_state(struct job_record *job_ptr, Buf buffer) { struct step_record *step_ptr = NULL; - uint16_t step_id, cyclic_alloc, name_len, port, batch_step, bit_cnt; - uint32_t exit_code; - time_t start_time, pre_sus_time; - char *host = NULL; + uint16_t step_id, cyclic_alloc, port, batch_step, bit_cnt; + uint16_t ckpt_interval, mem_per_task; + uint32_t exit_code, name_len; + time_t start_time, pre_sus_time, tot_sus_time, ckpt_time; + char *host = NULL, *ckpt_path = NULL; char *name = NULL, *network = NULL, *bit_fmt = NULL; switch_jobinfo_t switch_tmp = NULL; check_jobinfo_t check_tmp = NULL; @@ -1513,6 +1760,9 @@ extern int load_step_state(struct job_record *job_ptr, Buf buffer) safe_unpack16(&step_id, buffer); safe_unpack16(&cyclic_alloc, buffer); safe_unpack16(&port, buffer); + safe_unpack16(&ckpt_interval, buffer); + safe_unpack16(&mem_per_task, buffer); + safe_unpack32(&exit_code, buffer); if (exit_code != NO_VAL) { safe_unpackstr_xmalloc(&bit_fmt, &name_len, buffer); @@ -1521,9 +1771,13 @@ extern int load_step_state(struct job_record *job_ptr, Buf buffer) safe_unpack_time(&start_time, buffer); safe_unpack_time(&pre_sus_time, buffer); + safe_unpack_time(&tot_sus_time, buffer); + safe_unpack_time(&ckpt_time, buffer); + safe_unpackstr_xmalloc(&host, &name_len, buffer); safe_unpackstr_xmalloc(&name, &name_len, buffer); safe_unpackstr_xmalloc(&network, &name_len, buffer); + safe_unpackstr_xmalloc(&ckpt_path, &name_len, buffer); safe_unpack16(&batch_step, buffer); if (!batch_step) { if (unpack_slurm_step_layout(&step_layout, buffer)) @@ -1554,15 +1808,20 @@ extern int load_step_state(struct job_record *job_ptr, Buf buffer) step_ptr->cyclic_alloc = cyclic_alloc; step_ptr->name = name; step_ptr->network = network; + step_ptr->ckpt_path = ckpt_path; step_ptr->port = port; + step_ptr->ckpt_interval= ckpt_interval; + step_ptr->mem_per_task = mem_per_task; step_ptr->host = host; step_ptr->batch_step = batch_step; host = NULL; /* re-used, nothing left to free */ step_ptr->start_time = start_time; step_ptr->pre_sus_time = pre_sus_time; + step_ptr->tot_sus_time = tot_sus_time; + step_ptr->ckpt_time = ckpt_time; slurm_step_layout_destroy(step_ptr->step_layout); - step_ptr->step_layout = step_layout; + step_ptr->step_layout = step_layout; step_ptr->switch_job = switch_tmp; step_ptr->check_job = check_tmp; @@ -1595,9 +1854,59 @@ extern int load_step_state(struct job_record *job_ptr, Buf buffer) xfree(host); xfree(name); xfree(network); + xfree(ckpt_path); xfree(bit_fmt); if (switch_tmp) switch_free_jobinfo(switch_tmp); slurm_step_layout_destroy(step_layout); return SLURM_FAILURE; } + +/* Perform periodic job step checkpoints (per user request) */ +extern void step_checkpoint(void) +{ + static int ckpt_run = -1; + time_t now = time(NULL), ckpt_due; + ListIterator job_iterator; + struct job_record *job_ptr; + ListIterator step_iterator; + struct step_record *step_ptr; + time_t event_time; + uint32_t error_code; + char *error_msg; + + /* Exit if "checkpoint/none" is configured */ + if (ckpt_run == -1) { + char *ckpt_type = slurm_get_checkpoint_type(); + if (strcasecmp(ckpt_type, "checkpoint/none")) + ckpt_run = 1; + else + ckpt_run = 0; + xfree(ckpt_type); + } + if (ckpt_run == 0) + return; + + job_iterator = list_iterator_create(job_list); + while ((job_ptr = (struct job_record *) list_next(job_iterator))) { + if (job_ptr->job_state != JOB_RUNNING) + continue; + step_iterator = list_iterator_create (job_ptr->step_list); + while ((step_ptr = (struct step_record *) + list_next (step_iterator))) { + if (step_ptr->ckpt_interval == 0) + continue; + ckpt_due = step_ptr->ckpt_time + + (step_ptr->ckpt_interval * 60); + if (ckpt_due > now) + continue; + step_ptr->ckpt_time = now; + last_job_update = now; + (void) checkpoint_op(CHECK_CREATE, 0, + (void *)step_ptr, &event_time, + &error_code, &error_msg); + } + list_iterator_destroy (step_iterator); + } + list_iterator_destroy(job_iterator); +} diff --git a/src/slurmctld/trigger_mgr.c b/src/slurmctld/trigger_mgr.c index 6b4871e49..c1686129a 100644 --- a/src/slurmctld/trigger_mgr.c +++ b/src/slurmctld/trigger_mgr.c @@ -2,9 +2,10 @@ * trigger_mgr.c - Event trigger management ***************************************************************************** * Copyright (C) 2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -64,7 +65,7 @@ #define MAX_PROG_TIME 300 /* maximum run time for program */ /* Change TRIGGER_STATE_VERSION value when changing the state save format */ -#define TRIGGER_STATE_VERSION "VER001" +#define TRIGGER_STATE_VERSION "VER002" /* TRIG_IS_JOB_FINI differs from IS_JOB_FINISHED by considering * completing jobs as not really finished */ @@ -75,13 +76,15 @@ List trigger_list; uint32_t next_trigger_id = 1; static pthread_mutex_t trigger_mutex = PTHREAD_MUTEX_INITIALIZER; bitstr_t *trigger_down_nodes_bitmap = NULL; +bitstr_t *trigger_drained_nodes_bitmap = NULL; +bitstr_t *trigger_fail_nodes_bitmap = NULL; bitstr_t *trigger_up_nodes_bitmap = NULL; static bool trigger_block_err = false; static bool trigger_node_reconfig = false; typedef struct trig_mgr_info { uint32_t trig_id; /* trigger ID */ - uint8_t res_type; /* TRIGGER_RES_TYPE_* */ + uint16_t res_type; /* TRIGGER_RES_TYPE_* */ char * res_id; /* node name or job_id (string) */ bitstr_t *nodes_bitmap; /* bitmap of requested nodes (if applicable) */ uint32_t job_id; /* job ID (if applicable) */ @@ -103,8 +106,7 @@ void _trig_del(void *x) { xfree(tmp); } -#if _DEBUG -static char *_res_type(uint8_t res_type) +static char *_res_type(uint16_t res_type) { if (res_type == TRIGGER_RES_TYPE_JOB) return "job"; @@ -120,6 +122,10 @@ static char *_trig_type(uint16_t trig_type) return "up"; else if (trig_type == TRIGGER_TYPE_DOWN) return "down"; + else if (trig_type == TRIGGER_TYPE_DRAINED) + return "drained"; + else if (trig_type == TRIGGER_TYPE_FAIL) + return "fail"; else if (trig_type == TRIGGER_TYPE_IDLE) return "idle"; else if (trig_type == TRIGGER_TYPE_TIME) @@ -134,6 +140,7 @@ static char *_trig_type(uint16_t trig_type) return "unknown"; } +#if _DEBUG static int _trig_offset(uint16_t offset) { static int rc; @@ -170,6 +177,33 @@ static void _dump_trigger_msg(char *header, trigger_info_msg_t *msg) } #endif +/* Validate trigger program */ +static bool _validate_trigger(trig_mgr_info_t *trig_in) +{ + struct stat buf; + int modes; + + if (stat(trig_in->program, &buf) != 0) { + info("trigger program %s not found", trig_in->program); + return false; + } + if (!S_ISREG(buf.st_mode)) { + info("trigger program %s not a regular file", trig_in->program); + return false; + } + if (buf.st_uid == trig_in->user_id) + modes = (buf.st_mode >> 6) & 07; + else if (buf.st_gid == trig_in->group_id) + modes = (buf.st_mode >> 3) & 07; + else + modes = buf.st_mode & 07; + if (modes & 01) + return true; + + info("trigger program %s not executable", trig_in->program); + return false; +} + extern int trigger_clear(uid_t uid, trigger_info_msg_t *msg) { int rc = ESRCH; @@ -215,7 +249,7 @@ extern int trigger_clear(uid_t uid, trigger_info_msg_t *msg) continue; if (trig_test->state == 2) /* wait for proc termination */ continue; - list_delete(trig_iter); + list_delete_item(trig_iter); rc = SLURM_SUCCESS; } list_iterator_destroy(trig_iter); @@ -288,8 +322,8 @@ extern int trigger_set(uid_t uid, gid_t gid, trigger_info_msg_t *msg) * launched. To prevent the launched program for an arbitrary * user being executed as user SlurmUser, disable all other * users from setting triggers. */ - info("Attemt to set trigger by uid %u", uid); - rc = EPERM; + info("Attempt to set trigger by uid %u != SlurmUser", uid); + rc = ESLURM_ACCESS_DENIED; goto fini; } @@ -345,6 +379,13 @@ extern int trigger_set(uid_t uid, gid_t gid, trigger_info_msg_t *msg) /* move don't copy "program" */ trig_add->program = msg->trigger_array[i].program; msg->trigger_array[i].program = NULL; + if (!_validate_trigger(trig_add)) { + rc = ESLURM_ACCESS_DENIED; + xfree(trig_add->program); + xfree(trig_add->res_id); + xfree(trig_add); + continue; + } list_append(trigger_list, trig_add); schedule_trigger_save(); } @@ -365,6 +406,28 @@ extern void trigger_node_down(struct node_record *node_ptr) slurm_mutex_unlock(&trigger_mutex); } +extern void trigger_node_drained(struct node_record *node_ptr) +{ + int inx = node_ptr - node_record_table_ptr; + + slurm_mutex_lock(&trigger_mutex); + if (trigger_drained_nodes_bitmap == NULL) + trigger_drained_nodes_bitmap = bit_alloc(node_record_count); + bit_set(trigger_drained_nodes_bitmap, inx); + slurm_mutex_unlock(&trigger_mutex); +} + +extern void trigger_node_failing(struct node_record *node_ptr) +{ + int inx = node_ptr - node_record_table_ptr; + + slurm_mutex_lock(&trigger_mutex); + if (trigger_fail_nodes_bitmap == NULL) + trigger_fail_nodes_bitmap = bit_alloc(node_record_count); + bit_set(trigger_fail_nodes_bitmap, inx); + slurm_mutex_unlock(&trigger_mutex); +} + extern void trigger_node_up(struct node_record *node_ptr) { @@ -394,7 +457,7 @@ extern void trigger_block_error(void) static void _dump_trigger_state(trig_mgr_info_t *trig_ptr, Buf buffer) { pack32 (trig_ptr->trig_id, buffer); - pack8 (trig_ptr->res_type, buffer); + pack16 (trig_ptr->res_type, buffer); packstr (trig_ptr->res_id, buffer); /* rebuild nodes_bitmap as needed from res_id */ /* rebuild job_id as needed from res_id */ @@ -410,11 +473,11 @@ static void _dump_trigger_state(trig_mgr_info_t *trig_ptr, Buf buffer) static int _load_trigger_state(Buf buffer) { trig_mgr_info_t *trig_ptr; - uint16_t str_len; + uint32_t str_len; trig_ptr = xmalloc(sizeof(trig_mgr_info_t)); safe_unpack32 (&trig_ptr->trig_id, buffer); - safe_unpack8 (&trig_ptr->res_type, buffer); + safe_unpack16 (&trig_ptr->res_type, buffer); safe_unpackstr_xmalloc(&trig_ptr->res_id, &str_len, buffer); /* rebuild nodes_bitmap as needed from res_id */ /* rebuild job_id as needed from res_id */ @@ -550,7 +613,7 @@ extern int trigger_state_restore(void) Buf buffer; time_t buf_time; char *ver_str = NULL; - uint16_t ver_str_len; + uint32_t ver_str_len; /* read the file */ state_file = xstrdup(slurmctld_conf.state_save_location); @@ -586,17 +649,8 @@ extern int trigger_state_restore(void) unlock_state_files(); buffer = create_buf(data, data_size); - if (size_buf(buffer) >= sizeof(uint16_t) + - strlen(TRIGGER_STATE_VERSION)) { - char *ptr = get_buf_data(buffer); - - if (!memcmp(&ptr[sizeof(uint16_t)], TRIGGER_STATE_VERSION, 3)) { - safe_unpackstr_xmalloc(&ver_str, &ver_str_len, buffer); - debug3("Version string in trigger_state header is %s", - ver_str); - } - } - if (ver_str && (strcmp(ver_str, TRIGGER_STATE_VERSION) != 0)) { + safe_unpackstr_xmalloc(&ver_str, &ver_str_len, buffer); + if (strcmp(ver_str, TRIGGER_STATE_VERSION) != 0) { error("Can't recover trigger state, data version incompatable"); xfree(ver_str); free_buf(buffer); @@ -678,6 +732,21 @@ static void _trigger_job_event(trig_mgr_info_t *trig_in, time_t now) } } + if (trig_in->trig_type & TRIGGER_TYPE_FAIL) { + if (trigger_fail_nodes_bitmap + && bit_overlap(trig_in->job_ptr->node_bitmap, + trigger_fail_nodes_bitmap)) { +#if _DEBUG + info("trigger[%u] for job %u node fail", + trig_in->trig_id, trig_in->job_id); +#endif + trig_in->state = 1; + trig_in->trig_time = now + + (trig_in->trig_time - 0x8000); + return; + } + } + if (trig_in->trig_type & TRIGGER_TYPE_UP) { if (trigger_up_nodes_bitmap && bit_overlap(trig_in->job_ptr->node_bitmap, @@ -734,6 +803,62 @@ static void _trigger_node_event(trig_mgr_info_t *trig_in, time_t now) } } + if ((trig_in->trig_type & TRIGGER_TYPE_DRAINED) + && trigger_drained_nodes_bitmap + && (bit_ffs(trigger_drained_nodes_bitmap) != -1)) { + if (trig_in->nodes_bitmap == NULL) { /* all nodes */ + xfree(trig_in->res_id); + trig_in->res_id = bitmap2node_name( + trigger_drained_nodes_bitmap); + trig_in->state = 1; + } else if (bit_overlap(trig_in->nodes_bitmap, + trigger_drained_nodes_bitmap)) { + bit_and(trig_in->nodes_bitmap, + trigger_drained_nodes_bitmap); + xfree(trig_in->res_id); + trig_in->res_id = bitmap2node_name( + trig_in->nodes_bitmap); + trig_in->state = 1; + } + if (trig_in->state == 1) { + trig_in->trig_time = now + + (trig_in->trig_time - 0x8000); +#if _DEBUG + info("trigger[%u] for node %s drained", + trig_in->trig_id, trig_in->res_id); +#endif + return; + } + } + + if ((trig_in->trig_type & TRIGGER_TYPE_FAIL) + && trigger_fail_nodes_bitmap + && (bit_ffs(trigger_fail_nodes_bitmap) != -1)) { + if (trig_in->nodes_bitmap == NULL) { /* all nodes */ + xfree(trig_in->res_id); + trig_in->res_id = bitmap2node_name( + trigger_fail_nodes_bitmap); + trig_in->state = 1; + } else if (bit_overlap(trig_in->nodes_bitmap, + trigger_fail_nodes_bitmap)) { + bit_and(trig_in->nodes_bitmap, + trigger_fail_nodes_bitmap); + xfree(trig_in->res_id); + trig_in->res_id = bitmap2node_name( + trig_in->nodes_bitmap); + trig_in->state = 1; + } + if (trig_in->state == 1) { + trig_in->trig_time = now + + (trig_in->trig_time - 0x8000); +#if _DEBUG + info("trigger[%u] for node %s fail", + trig_in->trig_id, trig_in->res_id); +#endif + return; + } + } + if (trig_in->trig_type & TRIGGER_TYPE_IDLE) { /* We need to determine which (if any) of these * nodes have been idle for at least the offset time */ @@ -830,6 +955,8 @@ static void _trigger_run_program(trig_mgr_info_t *trig_in) gid_t gid; pid_t child; + if (!_validate_trigger(trig_in)) + return; strncpy(program, trig_in->program, sizeof(program)); pname = strrchr(program, '/'); if (pname == NULL) @@ -849,7 +976,11 @@ static void _trigger_run_program(trig_mgr_info_t *trig_in) int i; for (i=0; i<128; i++) close(i); +#ifdef SETPGRP_TWO_ARGS + setpgrp(0, 0); +#else setpgrp(); +#endif setsid(); setuid(uid); setgid(gid); @@ -864,6 +995,8 @@ static void _clear_event_triggers(void) { if (trigger_down_nodes_bitmap) bit_nclear(trigger_down_nodes_bitmap, 0, (node_record_count-1)); + if (trigger_drained_nodes_bitmap) + bit_nclear(trigger_drained_nodes_bitmap, 0, (node_record_count-1)); if (trigger_up_nodes_bitmap) bit_nclear(trigger_up_nodes_bitmap, 0, (node_record_count-1)); trigger_node_reconfig = false; @@ -878,6 +1011,8 @@ extern void trigger_process(void) slurmctld_lock_t job_node_read_lock = { NO_LOCK, READ_LOCK, READ_LOCK, NO_LOCK }; bool state_change = false; + pid_t rc; + int prog_stat; lock_slurmctld(job_node_read_lock); slurm_mutex_lock(&trigger_mutex); @@ -908,32 +1043,45 @@ extern void trigger_process(void) } else if ((trig_in->state == 2) && (difftime(now, trig_in->trig_time) > MAX_PROG_TIME)) { - bool purge; - if (trig_in->group_id != 0) { - pid_t rc; - killpg(trig_in->group_id, SIGKILL); - rc = waitpid(trig_in->group_id, NULL, WNOHANG); - if ((rc == trig_in->group_id) - || ((rc == -1) && (errno == ECHILD))) - purge = true; - else - purge = false; - } else /* No PID to wait for */ - purge = true; - - if (purge) { + rc = waitpid(trig_in->group_id, &prog_stat, + WNOHANG); + if ((rc > 0) && prog_stat) { + info("trigger uid=%u type=%s:%s " + "exit=%u:%u", + trig_in->user_id, + _res_type(trig_in->res_type), + _trig_type(trig_in->trig_type), + WIFEXITED(prog_stat), + WTERMSIG(prog_stat)); + } + if ((rc == trig_in->group_id) || + ((rc == -1) && (errno == ECHILD))) + trig_in->group_id = 0; + } + + if (trig_in->group_id == 0) { #if _DEBUG info("purging trigger[%u]", trig_in->trig_id); #endif - list_delete(trig_iter); + list_delete_item(trig_iter); state_change = true; } } else if (trig_in->state == 2) { /* Elimiate zombie processes right away. * Purge trigger entry above MAX_PROG_TIME later */ - waitpid(trig_in->group_id, NULL, WNOHANG); + rc = waitpid(trig_in->group_id, &prog_stat, WNOHANG); + if ((rc > 0) && prog_stat) { + info("trigger uid=%u type=%s:%s exit=%u:%u", + trig_in->user_id, + _res_type(trig_in->res_type), + _trig_type(trig_in->trig_type), + WIFEXITED(prog_stat), WTERMSIG(prog_stat)); + } + if ((rc == trig_in->group_id) || + ((rc == -1) && (errno == ECHILD))) + trig_in->group_id = 0; } } list_iterator_destroy(trig_iter); @@ -952,5 +1100,7 @@ extern void trigger_fini(void) trigger_list = NULL; } FREE_NULL_BITMAP(trigger_down_nodes_bitmap); + FREE_NULL_BITMAP(trigger_drained_nodes_bitmap); + FREE_NULL_BITMAP(trigger_fail_nodes_bitmap); FREE_NULL_BITMAP(trigger_up_nodes_bitmap); } diff --git a/src/slurmctld/trigger_mgr.h b/src/slurmctld/trigger_mgr.h index 80d922bde..69986d143 100644 --- a/src/slurmctld/trigger_mgr.h +++ b/src/slurmctld/trigger_mgr.h @@ -2,9 +2,10 @@ * trigger_mgr.h - header to manager event triggers ***************************************************************************** * Copyright (C) 2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -51,6 +52,8 @@ extern int trigger_set(uid_t uid, gid_t gid, trigger_info_msg_t *msg); /* Note the some event has occured and flag triggers as needed */ extern void trigger_block_error(void); extern void trigger_node_down(struct node_record *node_ptr); +extern void trigger_node_drained(struct node_record *node_ptr); +extern void trigger_node_failing(struct node_record *node_ptr); extern void trigger_node_up(struct node_record *node_ptr); extern void trigger_reconfig(void); diff --git a/src/slurmd/Makefile.in b/src/slurmd/Makefile.in index 4a9b859e7..408c46fd0 100644 --- a/src/slurmd/Makefile.in +++ b/src/slurmd/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -41,6 +41,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -99,6 +101,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -112,10 +115,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -135,7 +141,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -146,6 +155,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -161,6 +172,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -176,6 +188,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -347,8 +360,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -373,8 +386,8 @@ TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -384,13 +397,12 @@ ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/slurmd/common/proctrack.c b/src/slurmd/common/proctrack.c index 43b317a42..651af3b85 100644 --- a/src/slurmd/common/proctrack.c +++ b/src/slurmd/common/proctrack.c @@ -4,7 +4,7 @@ * Copyright (C) 2005 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/slurmd/common/proctrack.h b/src/slurmd/common/proctrack.h index 604b4e1c4..35325770e 100644 --- a/src/slurmd/common/proctrack.h +++ b/src/slurmd/common/proctrack.h @@ -4,7 +4,7 @@ * Copyright (C) 2005-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/slurmd/common/reverse_tree.h b/src/slurmd/common/reverse_tree.h index 5020cd14a..a406db9a3 100644 --- a/src/slurmd/common/reverse_tree.h +++ b/src/slurmd/common/reverse_tree.h @@ -5,7 +5,7 @@ * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Christopher J. Morrone <morrone2@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/slurmd/common/run_script.c b/src/slurmd/common/run_script.c index e03e1d3db..278d6964e 100644 --- a/src/slurmd/common/run_script.c +++ b/src/slurmd/common/run_script.c @@ -4,7 +4,7 @@ * Copyright (C) 2005 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Christopher Morrone <morrone2@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -72,7 +72,11 @@ run_script(const char *name, const char *path, uint32_t jobid, if (path == NULL || path[0] == '\0') return 0; - debug("[job %u] attempting to run %s [%s]", jobid, name, path); + if (jobid) { + debug("[job %u] attempting to run %s [%s]", + jobid, name, path); + } else + debug("attempting to run %s [%s]", name, path); if (access(path, R_OK | X_OK) < 0) { debug("Not running %s [%s]: %m", name, path); diff --git a/src/slurmd/common/run_script.h b/src/slurmd/common/run_script.h index a83b40898..837d4dce6 100644 --- a/src/slurmd/common/run_script.h +++ b/src/slurmd/common/run_script.h @@ -4,7 +4,7 @@ * Copyright (C) 2005 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Christopher Morrone <morrone2@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/slurmd/common/setproctitle.c b/src/slurmd/common/setproctitle.c index b10e2bd67..646e9e558 100644 --- a/src/slurmd/common/setproctitle.c +++ b/src/slurmd/common/setproctitle.c @@ -1,11 +1,11 @@ /*****************************************************************************\ * src/slurmd/common/setproctitle.c - argv manipulation - * $Id: setproctitle.c 10574 2006-12-15 23:38:29Z jette $ + * $Id: setproctitle.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/slurmd/common/setproctitle.h b/src/slurmd/common/setproctitle.h index 625ecc6a9..30caef886 100644 --- a/src/slurmd/common/setproctitle.h +++ b/src/slurmd/common/setproctitle.h @@ -1,11 +1,11 @@ /*****************************************************************************\ * src/slurmd/common/setproctitle.h - Emulation of BSD setproctitle() - * $Id: setproctitle.h 10574 2006-12-15 23:38:29Z jette $ + * $Id: setproctitle.h 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/slurmd/common/slurmstepd_init.c b/src/slurmd/common/slurmstepd_init.c index 903520a8d..81cfd21be 100644 --- a/src/slurmd/common/slurmstepd_init.c +++ b/src/slurmd/common/slurmstepd_init.c @@ -4,7 +4,7 @@ * Copyright (C) 2005 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <da@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -49,7 +49,7 @@ extern void pack_slurmd_conf_lite(slurmd_conf_t *conf, Buf buffer) packstr(conf->logfile, buffer); packstr(conf->task_prolog, buffer); packstr(conf->task_epilog, buffer); - pack16(conf->job_acct_freq, buffer); + pack16(conf->job_acct_gather_freq, buffer); pack16(conf->propagate_prio, buffer); pack32(conf->debug_level, buffer); pack32(conf->daemonize, buffer); @@ -60,18 +60,18 @@ extern void pack_slurmd_conf_lite(slurmd_conf_t *conf, Buf buffer) extern int unpack_slurmd_conf_lite_no_alloc(slurmd_conf_t *conf, Buf buffer) { - uint16_t uint16_tmp; uint32_t uint32_tmp; - safe_unpackstr_xmalloc(&conf->hostname, &uint16_tmp, buffer); + + safe_unpackstr_xmalloc(&conf->hostname, &uint32_tmp, buffer); safe_unpack16(&conf->sockets, buffer); safe_unpack16(&conf->cores, buffer); safe_unpack16(&conf->threads, buffer); - safe_unpackstr_xmalloc(&conf->spooldir, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&conf->node_name, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&conf->logfile, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&conf->task_prolog, &uint16_tmp, buffer); - safe_unpackstr_xmalloc(&conf->task_epilog, &uint16_tmp, buffer); - safe_unpack16(&conf->job_acct_freq, buffer); + safe_unpackstr_xmalloc(&conf->spooldir, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&conf->node_name, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&conf->logfile, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&conf->task_prolog, &uint32_tmp, buffer); + safe_unpackstr_xmalloc(&conf->task_epilog, &uint32_tmp, buffer); + safe_unpack16(&conf->job_acct_gather_freq, buffer); safe_unpack16(&conf->propagate_prio, buffer); safe_unpack32(&uint32_tmp, buffer); conf->debug_level = uint32_tmp; diff --git a/src/slurmd/common/slurmstepd_init.h b/src/slurmd/common/slurmstepd_init.h index 47914187c..0499d17a0 100644 --- a/src/slurmd/common/slurmstepd_init.h +++ b/src/slurmd/common/slurmstepd_init.h @@ -4,7 +4,7 @@ * Copyright (C) 2005 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <da@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/slurmd/common/task_plugin.c b/src/slurmd/common/task_plugin.c index e148fdfc4..1882a9b05 100644 --- a/src/slurmd/common/task_plugin.c +++ b/src/slurmd/common/task_plugin.c @@ -1,10 +1,11 @@ /*****************************************************************************\ * task_plugin.h - task launch plugin stub. ***************************************************************************** - * Copyright (C) 2005 The Regents of the University of California. + * Copyright (C) 2005-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -46,13 +47,19 @@ #include "src/slurmd/slurmstepd/slurmstepd_job.h" typedef struct slurmd_task_ops { - int (*slurmd_launch_request) ( uint32_t job_id, launch_tasks_request_msg_t *req, uint32_t node_id); - int (*slurmd_reserve_resources) ( uint32_t job_id, launch_tasks_request_msg_t *req, uint32_t node_id ); - int (*slurmd_release_resources) ( uint32_t job_id); - - int (*pre_setuid) ( slurmd_job_t *job ); - int (*pre_launch) ( slurmd_job_t *job ); - int (*post_term) ( slurmd_job_t *job ); + int (*slurmd_launch_request) (uint32_t job_id, + launch_tasks_request_msg_t *req, + uint32_t node_id); + int (*slurmd_reserve_resources) (uint32_t job_id, + launch_tasks_request_msg_t *req, + uint32_t node_id); + int (*slurmd_suspend_job) (uint32_t job_id); + int (*slurmd_resume_job) (uint32_t job_id); + int (*slurmd_release_resources) (uint32_t job_id); + + int (*pre_setuid) (slurmd_job_t *job); + int (*pre_launch) (slurmd_job_t *job); + int (*post_term) (slurmd_job_t *job); } slurmd_task_ops_t; @@ -68,7 +75,7 @@ static pthread_mutex_t g_task_context_lock = PTHREAD_MUTEX_INITIALIZER; static slurmd_task_ops_t * -_slurmd_task_get_ops( slurmd_task_context_t *c ) +_slurmd_task_get_ops(slurmd_task_context_t *c) { /* * Must be synchronized with slurmd_task_ops_t above. @@ -76,6 +83,8 @@ _slurmd_task_get_ops( slurmd_task_context_t *c ) static const char *syms[] = { "task_slurmd_launch_request", "task_slurmd_reserve_resources", + "task_slurmd_suspend_job", + "task_slurmd_resume_job", "task_slurmd_release_resources", "task_pre_setuid", "task_pre_launch", @@ -118,7 +127,7 @@ _slurmd_task_get_ops( slurmd_task_context_t *c ) static slurmd_task_context_t * -_slurmd_task_context_create( const char *task_plugin_type ) +_slurmd_task_context_create(const char *task_plugin_type) { slurmd_task_context_t *c; @@ -137,7 +146,7 @@ _slurmd_task_context_create( const char *task_plugin_type ) static int -_slurmd_task_context_destroy( slurmd_task_context_t *c ) +_slurmd_task_context_destroy(slurmd_task_context_t *c) { /* * Must check return code here because plugins might still @@ -161,7 +170,7 @@ _slurmd_task_context_destroy( slurmd_task_context_t *c ) * * RET - slurm error code */ -extern int slurmd_task_init( void ) +extern int slurmd_task_init(void) { int retval = SLURM_SUCCESS; char *task_plugin_type = NULL; @@ -198,7 +207,7 @@ extern int slurmd_task_init( void ) * * RET - slurm error code */ -extern int slurmd_task_fini( void ) +extern int slurmd_task_fini(void) { int rc; @@ -215,9 +224,11 @@ extern int slurmd_task_fini( void ) * * RET - slurm error code */ -extern int slurmd_launch_request( uint32_t job_id, launch_tasks_request_msg_t *req, uint32_t node_id) +extern int slurmd_launch_request(uint32_t job_id, + launch_tasks_request_msg_t *req, + uint32_t node_id) { - if ( slurmd_task_init() ) + if (slurmd_task_init()) return SLURM_ERROR; return (*(g_task_context->ops.slurmd_launch_request))(job_id, req, node_id); @@ -228,22 +239,50 @@ extern int slurmd_launch_request( uint32_t job_id, launch_tasks_request_msg_t *r * * RET - slurm error code */ -extern int slurmd_reserve_resources( uint32_t job_id, launch_tasks_request_msg_t *req, uint32_t node_id ) +extern int slurmd_reserve_resources(uint32_t job_id, + launch_tasks_request_msg_t *req, + uint32_t node_id ) { - if ( slurmd_task_init() ) + if (slurmd_task_init()) return SLURM_ERROR; return (*(g_task_context->ops.slurmd_reserve_resources))(job_id, req, node_id); } +/* + * Slurmd is suspending a job. + * + * RET - slurm error code + */ +extern int slurmd_suspend_job(uint32_t job_id) +{ + if (slurmd_task_init()) + return SLURM_ERROR; + + return (*(g_task_context->ops.slurmd_suspend_job))(job_id); +} + +/* + * Slurmd is resuming a previously suspended job. + * + * RET - slurm error code + */ +extern int slurmd_resume_job(uint32_t job_id) +{ + if (slurmd_task_init()) + return SLURM_ERROR; + + return (*(g_task_context->ops.slurmd_resume_job))(job_id); +} + /* * Slurmd is releasing resources for the task. * * RET - slurm error code */ -extern int slurmd_release_resources( uint32_t job_id ) +extern int slurmd_release_resources(uint32_t job_id) { - if ( slurmd_task_init() ) + if (slurmd_task_init()) return SLURM_ERROR; return (*(g_task_context->ops.slurmd_release_resources))(job_id); @@ -255,9 +294,9 @@ extern int slurmd_release_resources( uint32_t job_id ) * * RET - slurm error code */ -extern int pre_setuid( slurmd_job_t *job ) +extern int pre_setuid(slurmd_job_t *job) { - if ( slurmd_task_init() ) + if (slurmd_task_init()) return SLURM_ERROR; return (*(g_task_context->ops.pre_setuid))(job); @@ -268,9 +307,9 @@ extern int pre_setuid( slurmd_job_t *job ) * * RET - slurm error code */ -extern int pre_launch( slurmd_job_t *job ) +extern int pre_launch(slurmd_job_t *job) { - if ( slurmd_task_init() ) + if (slurmd_task_init()) return SLURM_ERROR; return (*(g_task_context->ops.pre_launch))(job); @@ -281,9 +320,9 @@ extern int pre_launch( slurmd_job_t *job ) * * RET - slurm error code */ -extern int post_term( slurmd_job_t *job ) +extern int post_term(slurmd_job_t *job) { - if ( slurmd_task_init() ) + if (slurmd_task_init()) return SLURM_ERROR; return (*(g_task_context->ops.post_term))(job); diff --git a/src/slurmd/common/task_plugin.h b/src/slurmd/common/task_plugin.h index 6e566a1c2..121876569 100644 --- a/src/slurmd/common/task_plugin.h +++ b/src/slurmd/common/task_plugin.h @@ -1,10 +1,11 @@ /*****************************************************************************\ * task_plugin.h - Define plugin functions for task pre_launch and post_term. ***************************************************************************** - * Copyright (C) 2005 The Regents of the University of California. + * Copyright (C) 2005-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -65,21 +66,39 @@ extern int slurmd_task_fini(void); * * RET - slurm error code */ -extern int slurmd_launch_request( uint32_t job_id, launch_tasks_request_msg_t *req, uint32_t node_id ); +extern int slurmd_launch_request(uint32_t job_id, + launch_tasks_request_msg_t *req, + uint32_t node_id ); /* * Slurmd is reserving resources for the task. * * RET - slurm error code */ -extern int slurmd_reserve_resources( uint32_t job_id, launch_tasks_request_msg_t *req, uint32_t node_id ); +extern int slurmd_reserve_resources(uint32_t job_id, + launch_tasks_request_msg_t *req, + uint32_t node_id ); + +/* + * Slurmd is suspending a job. + * + * RET - slurm error code + */ +extern int slurmd_suspend_job(uint32_t job_id); + +/* + * Slurmd is resuming a previously suspended job. + * + * RET - slurm error code + */ +extern int slurmd_resume_job(uint32_t job_id); /* * Slurmd is releasing resources for the task. * * RET - slurm error code */ -extern int slurmd_release_resources( uint32_t job_id ); +extern int slurmd_release_resources(uint32_t job_id); /* * Note that a task launch is about to occur. diff --git a/src/slurmd/slurmd/Makefile.am b/src/slurmd/slurmd/Makefile.am index 6b0e7ba4d..ec4cebd46 100644 --- a/src/slurmd/slurmd/Makefile.am +++ b/src/slurmd/slurmd/Makefile.am @@ -7,13 +7,13 @@ sbin_PROGRAMS = slurmd noinst_PROGRAMS = slurmd.test CPPFLAGS = -DLIBSLURM_SO=\"$(libdir)/libslurm.so\" -INCLUDES = -I$(top_srcdir) $(SSL_CPPFLAGS) +INCLUDES = -I$(top_srcdir) slurmd_LDADD = \ - $(top_builddir)/src/common/libcommon.la \ $(top_builddir)/src/common/libdaemonize.la \ $(top_builddir)/src/common/libeio.la \ - $(PLPA_LIBS) $(SSL_LIBS) + $(top_builddir)/src/common/libcommon.o -ldl \ + $(PLPA_LIBS) slurmd_test_LDADD = $(slurmd_LDADD) @@ -47,9 +47,9 @@ slurmd_test_SOURCES = \ if HAVE_AIX # We need to set maxdata back to 0 because this effects the "max memory size" # ulimit, and the ulimit is inherited by child processes. -slurmd_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) $(FEDERATION_LDFLAGS) -Wl,-bmaxdata:0x0 +slurmd_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) -Wl,-bmaxdata:0x0 else -slurmd_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) $(FEDERATION_LDFLAGS) +slurmd_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) endif slurmd_test_LDFLAGS = $(slurmd_LDFLAGS) diff --git a/src/slurmd/slurmd/Makefile.in b/src/slurmd/slurmd/Makefile.in index 034679356..d92d8a394 100644 --- a/src/slurmd/slurmd/Makefile.in +++ b/src/slurmd/slurmd/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -47,6 +47,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -78,24 +80,22 @@ am__objects_1 = slurmd.$(OBJEXT) req.$(OBJEXT) get_mach_stat.$(OBJEXT) \ am_slurmd_OBJECTS = $(am__objects_1) config.$(OBJEXT) slurmd_OBJECTS = $(am_slurmd_OBJECTS) am__DEPENDENCIES_1 = -slurmd_DEPENDENCIES = $(top_builddir)/src/common/libcommon.la \ - $(top_builddir)/src/common/libdaemonize.la \ - $(top_builddir)/src/common/libeio.la $(am__DEPENDENCIES_1) \ - $(am__DEPENDENCIES_1) +slurmd_DEPENDENCIES = $(top_builddir)/src/common/libdaemonize.la \ + $(top_builddir)/src/common/libeio.la \ + $(top_builddir)/src/common/libcommon.o $(am__DEPENDENCIES_1) slurmd_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(slurmd_LDFLAGS) \ $(LDFLAGS) -o $@ am_slurmd_test_OBJECTS = $(am__objects_1) testconfig.$(OBJEXT) slurmd_test_OBJECTS = $(am_slurmd_test_OBJECTS) -am__DEPENDENCIES_2 = $(top_builddir)/src/common/libcommon.la \ - $(top_builddir)/src/common/libdaemonize.la \ - $(top_builddir)/src/common/libeio.la $(am__DEPENDENCIES_1) \ - $(am__DEPENDENCIES_1) +am__DEPENDENCIES_2 = $(top_builddir)/src/common/libdaemonize.la \ + $(top_builddir)/src/common/libeio.la \ + $(top_builddir)/src/common/libcommon.o $(am__DEPENDENCIES_1) slurmd_test_DEPENDENCIES = $(am__DEPENDENCIES_2) slurmd_test_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(slurmd_test_LDFLAGS) $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -135,6 +135,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -148,10 +149,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -171,7 +175,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -182,6 +189,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -197,6 +206,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -212,6 +222,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -269,12 +280,12 @@ target_vendor = @target_vendor@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ AUTOMAKE_OPTIONS = foreign -INCLUDES = -I$(top_srcdir) $(SSL_CPPFLAGS) +INCLUDES = -I$(top_srcdir) slurmd_LDADD = \ - $(top_builddir)/src/common/libcommon.la \ $(top_builddir)/src/common/libdaemonize.la \ $(top_builddir)/src/common/libeio.la \ - $(PLPA_LIBS) $(SSL_LIBS) + $(top_builddir)/src/common/libcommon.o -ldl \ + $(PLPA_LIBS) slurmd_test_LDADD = $(slurmd_LDADD) SLURMD_SOURCES = \ @@ -304,11 +315,11 @@ slurmd_test_SOURCES = \ $(SLURMD_SOURCES) \ testconfig.c -@HAVE_AIX_FALSE@slurmd_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) $(FEDERATION_LDFLAGS) +@HAVE_AIX_FALSE@slurmd_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) # We need to set maxdata back to 0 because this effects the "max memory size" # ulimit, and the ulimit is inherited by child processes. -@HAVE_AIX_TRUE@slurmd_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) $(FEDERATION_LDFLAGS) -Wl,-bmaxdata:0x0 +@HAVE_AIX_TRUE@slurmd_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) -Wl,-bmaxdata:0x0 slurmd_test_LDFLAGS = $(slurmd_LDFLAGS) all: all-am @@ -359,8 +370,8 @@ install-sbinPROGRAMS: $(sbin_PROGRAMS) || test -f $$p1 \ ; then \ f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \ - echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) --mode=install $(sbinPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(sbindir)/$$f'"; \ - $(INSTALL_PROGRAM_ENV) $(LIBTOOL) --mode=install $(sbinPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(sbindir)/$$f" || exit 1; \ + echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(sbinPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(sbindir)/$$f'"; \ + $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(sbinPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(sbindir)/$$f" || exit 1; \ else :; fi; \ done @@ -507,8 +518,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -520,8 +531,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -531,13 +542,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/slurmd/slurmd/get_mach_stat.c b/src/slurmd/slurmd/get_mach_stat.c index 526b02400..a082be7a2 100644 --- a/src/slurmd/slurmd/get_mach_stat.c +++ b/src/slurmd/slurmd/get_mach_stat.c @@ -10,7 +10,7 @@ * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/slurmd/slurmd/get_mach_stat.h b/src/slurmd/slurmd/get_mach_stat.h index 46482e345..586c7e534 100644 --- a/src/slurmd/slurmd/get_mach_stat.h +++ b/src/slurmd/slurmd/get_mach_stat.h @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/slurmd/slurmd/read_proc.c b/src/slurmd/slurmd/read_proc.c index 5ed4ca0a1..a0169fef5 100644 --- a/src/slurmd/slurmd/read_proc.c +++ b/src/slurmd/slurmd/read_proc.c @@ -6,7 +6,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -77,7 +77,7 @@ struct sess_record *session_ptr; int dump_proc(int uid, int sid); void init_proc(void); int parse_proc_stat(char* proc_stat, int *session, - long unsigned *time, long *resident_set_size); + unsigned long *time, long *resident_set_size); int read_proc(); #if DEBUG_MODULE @@ -178,7 +178,7 @@ init_proc (void) * RET - zero or errno code */ int -parse_proc_stat(char* proc_stat, int *session, long unsigned *time, +parse_proc_stat(char* proc_stat, int *session, unsigned long *time, long *resident_set_size) { int pid, ppid, pgrp, tty, tpgid; char cmd[16], state[1]; @@ -239,7 +239,7 @@ read_proc() struct stat buffer; int uid, session; long resident_set_size; - long unsigned time; + unsigned long time; struct sess_record *s_ptr, *sess_free; /* Initialization */ diff --git a/src/slurmd/slurmd/req.c b/src/slurmd/slurmd/req.c index 84242acf6..bab35a4a6 100644 --- a/src/slurmd/slurmd/req.c +++ b/src/slurmd/slurmd/req.c @@ -1,11 +1,11 @@ /*****************************************************************************\ * src/slurmd/slurmd/req.c - slurmd request handling - * $Id: req.c 13326 2008-02-21 20:37:56Z jette $ ***************************************************************************** * Copyright (C) 2002-2006 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -56,12 +56,13 @@ #include <grp.h> #include "src/common/hostlist.h" +#include "src/common/jobacct_common.h" #include "src/common/log.h" #include "src/common/macros.h" #include "src/common/node_select.h" #include "src/common/slurm_auth.h" #include "src/common/slurm_cred.h" -#include "src/common/slurm_jobacct.h" +#include "src/common/slurm_jobacct_gather.h" #include "src/common/slurm_protocol_defs.h" #include "src/common/slurm_protocol_api.h" #include "src/common/slurm_protocol_interface.h" @@ -93,18 +94,26 @@ typedef struct { gid_t *gids; } gids_t; +typedef struct { + uint32_t job_id; + uint32_t job_mem; +} job_mem_limits_t; + static int _abort_job(uint32_t job_id); static int _abort_step(uint32_t job_id, uint32_t step_id); static char ** _build_env(uint32_t jobid, uid_t uid, char *bg_part_id); static void _delay_rpc(int host_inx, int host_cnt, int usec_per_rpc); static void _destroy_env(char **env); static bool _slurm_authorized_user(uid_t uid); +static void _job_limits_free(void *x); +static int _job_limits_match(void *x, void *key); static bool _job_still_running(uint32_t job_id); static int _kill_all_active_steps(uint32_t jobid, int sig, bool batch); static int _terminate_all_steps(uint32_t jobid, bool batch); static void _rpc_launch_tasks(slurm_msg_t *); static void _rpc_batch_job(slurm_msg_t *); static void _rpc_signal_tasks(slurm_msg_t *); +static void _rpc_checkpoint_tasks(slurm_msg_t *); static void _rpc_terminate_tasks(slurm_msg_t *); static void _rpc_timelimit(slurm_msg_t *); static void _rpc_reattach_tasks(slurm_msg_t *); @@ -117,6 +126,7 @@ static void _rpc_reconfig(slurm_msg_t *msg); static void _rpc_pid2jid(slurm_msg_t *msg); static int _rpc_file_bcast(slurm_msg_t *msg); static int _rpc_ping(slurm_msg_t *); +static int _rpc_health_check(slurm_msg_t *); static int _rpc_step_complete(slurm_msg_t *msg); static int _rpc_stat_jobacct(slurm_msg_t *msg); static int _rpc_daemon_status(slurm_msg_t *msg); @@ -144,6 +154,18 @@ static pthread_mutex_t launch_mutex = PTHREAD_MUTEX_INITIALIZER; static time_t booted = 0; static time_t last_slurmctld_msg = 0; +static pthread_mutex_t job_limits_mutex = PTHREAD_MUTEX_INITIALIZER; +static List job_limits_list = NULL; +static bool job_limits_loaded = false; + +/* NUM_PARALLEL_SUSPEND controls the number of jobs suspended/resumed + * at one time as well as the number of jobsteps per job that can be + * suspended at one time */ +#define NUM_PARALLEL_SUSPEND 8 +static pthread_mutex_t suspend_mutex = PTHREAD_MUTEX_INITIALIZER; +static uint32_t job_suspend_array[NUM_PARALLEL_SUSPEND]; +static int job_suspend_size = 0; + void slurmd_req(slurm_msg_t *msg) { @@ -156,6 +178,13 @@ slurmd_req(slurm_msg_t *msg) list_destroy(waiters); waiters = NULL; } + slurm_mutex_lock(&job_limits_mutex); + if (job_limits_list) { + list_destroy(job_limits_list); + job_limits_list = NULL; + job_limits_loaded = false; + } + slurm_mutex_unlock(&job_limits_mutex); return; } @@ -180,6 +209,11 @@ slurmd_req(slurm_msg_t *msg) _rpc_signal_tasks(msg); slurm_free_kill_tasks_msg(msg->data); break; + case REQUEST_CHECKPOINT_TASKS: + debug2("Processing RPC: REQUEST_CHECKPOINT_TASKS"); + _rpc_checkpoint_tasks(msg); + slurm_free_checkpoint_tasks_msg(msg->data); + break; case REQUEST_TERMINATE_TASKS: debug2("Processing RPC: REQUEST_TERMINATE_TASKS"); _rpc_terminate_tasks(msg); @@ -240,6 +274,11 @@ slurmd_req(slurm_msg_t *msg) last_slurmctld_msg = time(NULL); /* No body to free */ break; + case REQUEST_HEALTH_CHECK: + _rpc_health_check(msg); + last_slurmctld_msg = time(NULL); + /* No body to free */ + break; case REQUEST_JOB_ID: _rpc_pid2jid(msg); slurm_free_job_id_request_msg(msg->data); @@ -561,22 +600,21 @@ _forkexec_slurmstepd(slurmd_step_type_t type, void *req, /* * The job(step) credential is the only place to get a definitive * list of the nodes allocated to a job step. We need to return - * a hostset_t of the nodes. - * - * FIXME - Rewrite this to only take a slurm_cred_t and only return a - * slurm_cred_arg_t. The other parameters, jobid, stepid, etc. - * should be checked one caller layer higher. + * a hostset_t of the nodes. Validate the incoming RPC, updating + * job_mem and task_mem as needed. */ static int -_check_job_credential(slurm_cred_t cred, uint32_t jobid, - uint32_t stepid, uid_t uid, int tasks_to_launch, - hostset_t *step_hset) +_check_job_credential(launch_tasks_request_msg_t *req, uid_t uid, + int tasks_to_launch, hostset_t *step_hset) { slurm_cred_arg_t arg; hostset_t hset = NULL; bool user_ok = _slurm_authorized_user(uid); int host_index = -1; int rc; + slurm_cred_t cred = req->cred; + uint32_t jobid = req->job_id; + uint32_t stepid = req->job_step_id; /* * First call slurm_cred_verify() so that all valid @@ -591,6 +629,11 @@ _check_job_credential(slurm_cred_t cred, uint32_t jobid, } } + /* Overwrite any memory limits in the RPC with + * contents of the credential */ + req->job_mem = arg.job_mem; + req->task_mem = arg.task_mem; + /* * If uid is the slurm user id or root, do not bother * performing validity check of the credential @@ -689,20 +732,19 @@ _rpc_launch_tasks(slurm_msg_t *msg) char host[MAXHOSTNAMELEN]; uid_t req_uid; launch_tasks_request_msg_t *req = msg->data; - uint32_t jobid = req->job_id; - uint32_t stepid = req->job_step_id; bool super_user = false; bool first_job_run; slurm_addr self; slurm_addr *cli = &msg->orig_addr; socklen_t adlen; hostset_t step_hset = NULL; + job_mem_limits_t *job_limits_ptr; int nodeid = nodelist_find(req->complete_nodelist, conf->node_name); - req_uid = g_slurm_auth_get_uid(msg->auth_cred); + req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); memcpy(&req->orig_addr, &msg->orig_addr, sizeof(slurm_addr)); - slurmd_launch_request(jobid, req, nodeid); + slurmd_launch_request(req->job_id, req, nodeid); super_user = _slurm_authorized_user(req_uid); @@ -718,24 +760,52 @@ _rpc_launch_tasks(slurm_msg_t *msg) req->job_step_id, req->uid, req->gid, host, port); first_job_run = !slurm_cred_jobid_cached(conf->vctx, req->job_id); - if (_check_job_credential(req->cred, jobid, stepid, req_uid, - req->tasks_to_launch[nodeid], + if (_check_job_credential(req, req_uid, req->tasks_to_launch[nodeid], &step_hset) < 0) { errnum = errno; error("Invalid job credential from %ld@%s: %m", (long) req_uid, host); goto done; } - + #ifndef HAVE_FRONT_END if (first_job_run) { - if (_run_prolog(req->job_id, req->uid, NULL) != 0) { - error("[job %u] prolog failed", req->job_id); + int rc; + rc = _run_prolog(req->job_id, req->uid, NULL); + if (rc) { + int term_sig, exit_status; + if (WIFSIGNALED(rc)) { + exit_status = 0; + term_sig = WTERMSIG(rc); + } else { + exit_status = WEXITSTATUS(rc); + term_sig = 0; + } + error("[job %u] prolog failed status=%d:%d", + req->job_id, exit_status, term_sig); errnum = ESLURMD_PROLOG_FAILED; goto done; } } #endif + + if (req->job_mem) { + slurm_mutex_lock(&job_limits_mutex); + if (!job_limits_list) + job_limits_list = list_create(_job_limits_free); + job_limits_ptr = list_find_first (job_limits_list, + _job_limits_match, + &req->job_id); + if (!job_limits_ptr) { + //info("AddLim job:%u mem:%u",req->job_id,req->job_mem); + job_limits_ptr = xmalloc(sizeof(job_mem_limits_t)); + job_limits_ptr->job_id = req->job_id; + list_append(job_limits_list, job_limits_ptr); + } + job_limits_ptr->job_mem = req->job_mem; /* reset limit */ + slurm_mutex_unlock(&job_limits_mutex); + } + adlen = sizeof(self); _slurm_getsockname(msg->conn_fd, (struct sockaddr *)&self, &adlen); @@ -750,7 +820,7 @@ _rpc_launch_tasks(slurm_msg_t *msg) if (slurm_send_rc_msg(msg, errnum) < 0) { - error("launch_tasks: unable to send return code: %m"); + error("_rpc_launch_tasks: unable to send return code: %m"); /* * Rewind credential so that srun may perform retry @@ -806,7 +876,7 @@ _rpc_batch_job(slurm_msg_t *msg) batch_job_launch_msg_t *req = (batch_job_launch_msg_t *)msg->data; bool first_job_run = true; int rc = SLURM_SUCCESS; - uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred); + uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); char *bg_part_id = NULL; bool replied = false; slurm_addr *cli = &msg->orig_addr; @@ -851,8 +921,17 @@ _rpc_batch_job(slurm_msg_t *msg) rc = _run_prolog(req->job_id, req->uid, bg_part_id); xfree(bg_part_id); - if (rc != 0) { - error("[job %u] prolog failed", req->job_id); + if (rc) { + int term_sig, exit_status; + if (WIFSIGNALED(rc)) { + exit_status = 0; + term_sig = WTERMSIG(rc); + } else { + exit_status = WEXITSTATUS(rc); + term_sig = 0; + } + error("[job %u] prolog failed status=%d:%d", + req->job_id, exit_status, term_sig); _prolog_error(req, rc); rc = ESLURMD_PROLOG_FAILED; goto done; @@ -934,7 +1013,7 @@ _abort_step(uint32_t job_id, uint32_t step_id) resp.range_first = 0; resp.range_last = 0; resp.step_rc = 1; - resp.jobacct = jobacct_g_alloc(NULL); + resp.jobacct = jobacct_gather_g_create(NULL); resp_msg.msg_type = REQUEST_STEP_COMPLETE; resp_msg.data = &resp; return slurm_send_recv_controller_rc_msg(&resp_msg, &rc); @@ -943,7 +1022,7 @@ _abort_step(uint32_t job_id, uint32_t step_id) static void _rpc_reconfig(slurm_msg_t *msg) { - uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred); + uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); if (!_slurm_authorized_user(req_uid)) error("Security violation, reconfig RPC from uid %u", @@ -957,7 +1036,7 @@ _rpc_reconfig(slurm_msg_t *msg) static void _rpc_shutdown(slurm_msg_t *msg) { - uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred); + uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); forward_wait(msg); if (!_slurm_authorized_user(req_uid)) @@ -971,11 +1050,183 @@ _rpc_shutdown(slurm_msg_t *msg) /* Never return a message, slurmctld does not expect one */ } +static void _job_limits_free(void *x) +{ + xfree(x); +} + + +static int _job_limits_match(void *x, void *key) +{ + job_mem_limits_t *job_limits_ptr = (job_mem_limits_t *) x; + uint32_t *job_id = (uint32_t *) key; + if (job_limits_ptr->job_id == *job_id) + return 1; + return 0; +} + +/* Call only with job_limits_mutex locked */ +static void +_load_job_limits(void) +{ + List steps; + ListIterator step_iter; + step_loc_t *stepd; + int fd; + job_mem_limits_t *job_limits_ptr; + slurmstepd_info_t *stepd_info_ptr; + + if (!job_limits_list) + job_limits_list = list_create(_job_limits_free); + job_limits_loaded = true; + + steps = stepd_available(conf->spooldir, conf->node_name); + step_iter = list_iterator_create(steps); + while ((stepd = list_next(step_iter))) { + job_limits_ptr = list_find_first(job_limits_list, + _job_limits_match, + &stepd->jobid); + if (job_limits_ptr) /* already processed */ + continue; + fd = stepd_connect(stepd->directory, stepd->nodename, + stepd->jobid, stepd->stepid); + if (fd == -1) + continue; /* step completed */ + stepd_info_ptr = stepd_get_info(fd); + if (stepd_info_ptr && stepd_info_ptr->job_mem_limit) { + /* create entry for this job */ + job_limits_ptr = xmalloc(sizeof(job_mem_limits_t)); + job_limits_ptr->job_id = stepd->jobid; + job_limits_ptr->job_mem = stepd_info_ptr->job_mem_limit; + debug("RecLim job:%u mem:%u", + stepd->jobid, stepd_info_ptr->job_mem_limit); + list_append(job_limits_list, job_limits_ptr); + } + xfree(stepd_info_ptr); + close(fd); + } + list_iterator_destroy(step_iter); + list_destroy(steps); +} + +static void +_enforce_job_mem_limit(void) +{ + List steps; + ListIterator step_iter, job_limits_iter; + job_mem_limits_t *job_limits_ptr; + step_loc_t *stepd; + int fd, i, job_inx, job_cnt = 0; + uint32_t step_rss; + stat_jobacct_msg_t acct_req; + stat_jobacct_msg_t *resp = NULL; + struct job_mem_info { + uint32_t job_id; + uint32_t mem_limit; /* MB */ + uint32_t mem_used; /* KB */ + }; + struct job_mem_info *job_mem_info_ptr = NULL; + slurm_msg_t msg; + job_notify_msg_t notify_req; + job_step_kill_msg_t kill_req; + + slurm_mutex_lock(&job_limits_mutex); + if (!job_limits_loaded) + _load_job_limits(); + if (list_count(job_limits_list) == 0) { + slurm_mutex_unlock(&job_limits_mutex); + return; + } + + job_mem_info_ptr = xmalloc((list_count(job_limits_list) + 1) * + sizeof(struct job_mem_info)); + job_cnt = 0; + job_limits_iter = list_iterator_create(job_limits_list); + while ((job_limits_ptr = list_next(job_limits_iter))) { + job_mem_info_ptr[job_cnt].job_id = job_limits_ptr->job_id; + job_mem_info_ptr[job_cnt].mem_limit = job_limits_ptr->job_mem; + job_cnt++; + } + list_iterator_destroy(job_limits_iter); + slurm_mutex_unlock(&job_limits_mutex); + + steps = stepd_available(conf->spooldir, conf->node_name); + step_iter = list_iterator_create(steps); + while ((stepd = list_next(step_iter))) { + for (job_inx=0; job_inx<job_cnt; job_inx++) { + if (job_mem_info_ptr[job_inx].job_id == stepd->jobid) + break; + } + if (job_inx >= job_cnt) + continue; /* job not being tracked */ + + fd = stepd_connect(stepd->directory, stepd->nodename, + stepd->jobid, stepd->stepid); + if (fd == -1) + continue; /* step completed */ + acct_req.job_id = stepd->jobid; + acct_req.step_id = stepd->stepid; + resp = xmalloc(sizeof(stat_jobacct_msg_t)); + if ((!stepd_stat_jobacct(fd, &acct_req, resp)) && + (resp->jobacct)) { + /* resp->jobacct is NULL if account is disabled */ + jobacct_common_getinfo((struct jobacctinfo *) + resp->jobacct, + JOBACCT_DATA_TOT_RSS, + &step_rss); + //info("job %u.%u rss:%u",stepd->jobid, stepd->stepid, step_rss); + step_rss = MAX(step_rss, 1); + job_mem_info_ptr[job_inx].mem_used += step_rss; + } + slurm_free_stat_jobacct_msg(resp); + close(fd); + } + list_iterator_destroy(step_iter); + list_destroy(steps); + + for (i=0; i<job_cnt; i++) { + if ((job_mem_info_ptr[i].mem_limit == 0) || + (job_mem_info_ptr[i].mem_used == 0)) { + /* no memory limit or no steps found, purge record */ + slurm_mutex_lock(&job_limits_mutex); + list_delete_all(job_limits_list, _job_limits_match, + &job_mem_info_ptr[i].job_id); + slurm_mutex_unlock(&job_limits_mutex); + break; + } + job_mem_info_ptr[i].mem_used /= 1024; /* KB to MB */ + if (job_mem_info_ptr[i].mem_used <= + job_mem_info_ptr[i].mem_limit) + continue; + + info("Job %u exceeded memory limit (%u>%u), cancelling it", + job_mem_info_ptr[i].job_id, job_mem_info_ptr[i].mem_used, + job_mem_info_ptr[i].mem_limit); + /* NOTE: Batch jobs may have no srun to get this message */ + slurm_msg_t_init(&msg); + notify_req.job_id = job_mem_info_ptr[i].job_id; + notify_req.job_step_id = NO_VAL; + notify_req.message = "Exceeded job memory limit"; + msg.msg_type = REQUEST_JOB_NOTIFY; + msg.data = ¬ify_req; + slurm_send_only_controller_msg(&msg); + + kill_req.job_id = job_mem_info_ptr[i].job_id; + kill_req.job_step_id = NO_VAL; + kill_req.signal = SIGKILL; + kill_req.batch_flag = (uint16_t) 0; + msg.msg_type = REQUEST_CANCEL_JOB_STEP; + msg.data = &kill_req; + slurm_send_only_controller_msg(&msg); + } + xfree(job_mem_info_ptr); +} + static int _rpc_ping(slurm_msg_t *msg) { int rc = SLURM_SUCCESS; - uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred); + uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); if (!_slurm_authorized_user(req_uid)) { error("Security violation, ping RPC from uid %u", @@ -994,6 +1245,44 @@ _rpc_ping(slurm_msg_t *msg) error("Error responding to ping: %m"); send_registration_msg(SLURM_SUCCESS, false); } + + /* Take this opportunity to enforce any job memory limits */ + _enforce_job_mem_limit(); + return rc; +} + +static int +_rpc_health_check(slurm_msg_t *msg) +{ + int rc = SLURM_SUCCESS; + uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); + + if (!_slurm_authorized_user(req_uid)) { + error("Security violation, ping RPC from uid %u", + (unsigned int) req_uid); + rc = ESLURM_USER_ID_MISSING; /* or bad in this case */ + } + + /* Return result. If the reply can't be sent this indicates that + * 1. The network is broken OR + * 2. slurmctld has died OR + * 3. slurmd was paged out due to full memory + * If the reply request fails, we send an registration message to + * slurmctld in hopes of avoiding having the node set DOWN due to + * slurmd paging and not being able to respond in a timely fashion. */ + if (slurm_send_rc_msg(msg, rc) < 0) { + error("Error responding to ping: %m"); + send_registration_msg(SLURM_SUCCESS, false); + } + + if ((rc == SLURM_SUCCESS) && (conf->health_check_program)) { + char *env[1] = { NULL }; + rc = run_script("health_check", conf->health_check_program, + 0, 60, env); + } + + /* Take this opportunity to enforce any job memory limits */ + _enforce_job_mem_limit(); return rc; } @@ -1002,7 +1291,7 @@ _rpc_signal_tasks(slurm_msg_t *msg) { int fd; int rc = SLURM_SUCCESS; - uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred); + uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); kill_tasks_msg_t *req = (kill_tasks_msg_t *) msg->data; slurmstepd_info_t *step; @@ -1063,6 +1352,50 @@ done: slurm_send_rc_msg(msg, rc); } +static void +_rpc_checkpoint_tasks(slurm_msg_t *msg) +{ + int fd; + int rc = SLURM_SUCCESS; + uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); + checkpoint_tasks_msg_t *req = (checkpoint_tasks_msg_t *) msg->data; + slurmstepd_info_t *step; + + fd = stepd_connect(conf->spooldir, conf->node_name, + req->job_id, req->job_step_id); + if (fd == -1) { + debug("checkpoint for nonexistant %u.%u stepd_connect failed: %m", + req->job_id, req->job_step_id); + rc = ESLURM_INVALID_JOB_ID; + goto done; + } + if ((step = stepd_get_info(fd)) == NULL) { + debug("checkpoint for nonexistent job %u.%u requested", + req->job_id, req->job_step_id); + rc = ESLURM_INVALID_JOB_ID; + goto done2; + } + + if ((req_uid != step->uid) && (!_slurm_authorized_user(req_uid))) { + debug("checkpoint req from uid %ld for job %u.%u owned by uid %ld", + (long) req_uid, req->job_id, req->job_step_id, + (long) step->uid); + rc = ESLURM_USER_ID_MISSING; /* or bad in this case */ + goto done3; + } + + rc = stepd_checkpoint(fd, req->signal, req->timestamp); + if (rc == -1) + rc = ESLURMD_JOB_NOTRUNNING; + + done3: + xfree(step); + done2: + close(fd); + done: + slurm_send_rc_msg(msg, rc); +} + static void _rpc_terminate_tasks(slurm_msg_t *msg) { @@ -1088,7 +1421,7 @@ _rpc_terminate_tasks(slurm_msg_t *msg) goto done2; } - req_uid = g_slurm_auth_get_uid(msg->auth_cred); + req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); if ((req_uid != step->uid) && (!_slurm_authorized_user(req_uid))) { debug("kill req from uid %ld for job %u.%u owned by uid %ld", (long) req_uid, req->job_id, req->job_step_id, @@ -1127,9 +1460,9 @@ _rpc_step_complete(slurm_msg_t *msg) goto done; } - /* step completionmessages are only allowed from other slurmstepd, + /* step completion messages are only allowed from other slurmstepd, so only root or SlurmUser is allowed here */ - req_uid = g_slurm_auth_get_uid(msg->auth_cred); + req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); if (!_slurm_authorized_user(req_uid)) { debug("step completion from uid %ld for job %u.%u", (long) req_uid, req->job_id, req->job_step_id); @@ -1238,7 +1571,7 @@ _rpc_stat_jobacct(slurm_msg_t *msg) debug3("Entering _rpc_stat_jobacct"); /* step completion messages are only allowed from other slurmstepd, so only root or SlurmUser is allowed here */ - req_uid = g_slurm_auth_get_uid(msg->auth_cred); + req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); job_uid = _get_job_uid(req->job_id); if (job_uid < 0) { @@ -1279,7 +1612,7 @@ _rpc_stat_jobacct(slurm_msg_t *msg) } if (stepd_stat_jobacct(fd, req, resp) == SLURM_ERROR) { - debug("kill for nonexistent job %u.%u requested", + debug("accounting for nonexistent job %u.%u requested", req->job_id, req->step_id); } close(fd); @@ -1296,12 +1629,11 @@ _rpc_stat_jobacct(slurm_msg_t *msg) /* * For the specified job_id: reply to slurmctld, * sleep(configured kill_wait), then send SIGKILL - * FIXME! - Perhaps we should send SIGXCPU first? */ static void _rpc_timelimit(slurm_msg_t *msg) { - uid_t uid = g_slurm_auth_get_uid(msg->auth_cred); + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); kill_job_msg_t *req = msg->data; int nsteps; @@ -1323,6 +1655,7 @@ _rpc_timelimit(slurm_msg_t *msg) _kill_all_active_steps(req->job_id, SIGTERM, false); verbose( "Job %u: timeout: sent SIGTERM to %d active steps", req->job_id, nsteps ); + _kill_all_active_steps(req->job_id, SIGXCPU, true); /* Revoke credential, send SIGKILL, run epilog, etc. */ _rpc_terminate_job(msg); @@ -1378,9 +1711,9 @@ static int _rpc_file_bcast(slurm_msg_t *msg) { file_bcast_msg_t *req = msg->data; - int i, fd, flags, offset, inx, rc; - uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred); - uid_t req_gid = g_slurm_auth_get_gid(msg->auth_cred); + int fd, flags, offset, inx, rc; + uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); + uid_t req_gid = g_slurm_auth_get_gid(msg->auth_cred, NULL); pid_t child; #if 0 @@ -1433,21 +1766,18 @@ _rpc_file_bcast(slurm_msg_t *msg) exit(errno); } - for (i=0; i<FILE_BLOCKS; i++) { - offset = 0; - while (req->block_len[i] - offset) { - inx = write(fd, &req->block[i][offset], - (req->block_len[i] - offset)); - if (inx == -1) { - if ((errno == EINTR) || (errno == EAGAIN)) - continue; - error("sbcast: uid:%u can't write `%s`: %s", - req_uid, req->fname, strerror(errno)); - close(fd); - exit(errno); - } - offset += inx; + offset = 0; + while (req->block_len - offset) { + inx = write(fd, &req->block[offset], (req->block_len - offset)); + if (inx == -1) { + if ((errno == EINTR) || (errno == EAGAIN)) + continue; + error("sbcast: uid:%u can't write `%s`: %s", + req_uid, req->fname, strerror(errno)); + close(fd); + exit(errno); } + offset += inx; } if (req->last_block && fchmod(fd, (req->modes & 0777))) { error("sbcast: uid:%u can't chmod `%s`: %s", @@ -1508,7 +1838,7 @@ _rpc_reattach_tasks(slurm_msg_t *msg) nodeid = step->nodeid; debug2("_rpc_reattach_tasks: nodeid %d in the job step", nodeid); - req_uid = g_slurm_auth_get_uid(msg->auth_cred); + req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); if ((req_uid != step->uid) && (!_slurm_authorized_user(req_uid))) { error("uid %ld attempt to attach to job %u.%u owned by %ld", (long) req_uid, req->job_id, req->job_step_id, @@ -1852,7 +2182,7 @@ static void _rpc_signal_job(slurm_msg_t *msg) { signal_job_msg_t *req = msg->data; - uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred); + uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); long job_uid; List steps; ListIterator i; @@ -1951,6 +2281,51 @@ _rpc_signal_job(slurm_msg_t *msg) } } +/* if a lock is granted to the job then return 1; else return 0 if + * the lock for the job is already taken or there's no more locks */ +static int +_get_suspend_job_lock(uint32_t jobid) +{ + int i, spot = -1; + pthread_mutex_lock(&suspend_mutex); + + for (i = 0; i < job_suspend_size; i++) { + if (job_suspend_array[i] == -1) { + spot = i; + continue; + } + if (job_suspend_array[i] == jobid) { + /* another thread already has the lock */ + pthread_mutex_unlock(&suspend_mutex); + return 0; + } + } + i = 0; + if (spot != -1) { + /* nobody has the lock and here's an available used lock */ + job_suspend_array[spot] = jobid; + i = 1; + } else if (job_suspend_size < NUM_PARALLEL_SUSPEND) { + /* a new lock is available */ + job_suspend_array[job_suspend_size++] = jobid; + i = 1; + } + pthread_mutex_unlock(&suspend_mutex); + return i; +} + +static void +_unlock_suspend_job(uint32_t jobid) +{ + int i; + pthread_mutex_lock(&suspend_mutex); + for (i = 0; i < job_suspend_size; i++) { + if (job_suspend_array[i] == jobid) + job_suspend_array[i] = -1; + } + pthread_mutex_unlock(&suspend_mutex); +} + /* * Send a job suspend/resume request through the appropriate slurmstepds for * each job step belonging to a given job allocation. @@ -1959,90 +2334,134 @@ static void _rpc_suspend_job(slurm_msg_t *msg) { suspend_msg_t *req = msg->data; - uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred); - long job_uid; + uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); List steps; ListIterator i; step_loc_t *stepd; int step_cnt = 0; - int fd, rc = SLURM_SUCCESS; + int first_time, rc = SLURM_SUCCESS; if (req->op != SUSPEND_JOB && req->op != RESUME_JOB) { error("REQUEST_SUSPEND: bad op code %u", req->op); rc = ESLURM_NOT_SUPPORTED; - goto fini; } - debug("_rpc_suspend_job jobid=%u uid=%d", - req->job_id, req_uid); - job_uid = _get_job_uid(req->job_id); - if (job_uid < 0) - goto no_job; + /* * check that requesting user ID is the SLURM UID or root */ if (!_slurm_authorized_user(req_uid)) { - error("Security violation: signal_job(%u) from uid %ld", + error("Security violation: suspend_job(%u) from uid %ld", req->job_id, (long) req_uid); rc = ESLURM_USER_ID_MISSING; - goto fini; - } + } + + /* send a response now, which will include any errors + * detected with the request */ + if (msg->conn_fd >= 0) { + slurm_send_rc_msg(msg, rc); + if (slurm_close_accepted_conn(msg->conn_fd) < 0) + error ("_rpc_suspend_job: close(%d): %m", msg->conn_fd); + msg->conn_fd = -1; + } + if (rc != SLURM_SUCCESS) + return; + + /* now we can focus on performing the requested action, + * which could take a few seconds to complete */ + debug("_rpc_suspend_job jobid=%u uid=%d action=%s", req->job_id, + req_uid, req->op == SUSPEND_JOB ? "suspend" : "resume"); + + /* Try to get a thread lock for this job. If the lock + * is not available then sleep and try again */ + first_time = 1; + while (!_get_suspend_job_lock(req->job_id)) { + first_time = 0; + debug3("suspend lock sleep for %u", req->job_id); + sleep(1); + } + + /* If suspending and you got the lock on the first try then + * sleep for 1 second to give any launch requests a chance + * to get started and avoid a race condition that would + * effectively cause the suspend request to get ignored + * because "there's no job to suspend" */ + if (first_time && req->op == SUSPEND_JOB) { + debug3("suspend first sleep for %u", req->job_id); + sleep(1); + } + + /* Release or reclaim resources bound to these tasks (task affinity) */ + if (req->op == SUSPEND_JOB) + (void) slurmd_suspend_job(req->job_id); + else + (void) slurmd_resume_job(req->job_id); /* * Loop through all job steps and call stepd_suspend or stepd_resume - * as appropriate. + * as appropriate. Since the "suspend" action contains a 'sleep 1', + * suspend multiple jobsteps in parallel. */ steps = stepd_available(conf->spooldir, conf->node_name); i = list_iterator_create(steps); - while ((stepd = list_next(i))) { - if (stepd->jobid != req->job_id) { - /* multiple jobs expected on shared nodes */ - debug3("Step from other job: jobid=%u (this jobid=%u)", - stepd->jobid, req->job_id); - continue; - } - step_cnt++; - fd = stepd_connect(stepd->directory, stepd->nodename, - stepd->jobid, stepd->stepid); - if (fd == -1) { - debug3("Unable to connect to step %u.%u", - stepd->jobid, stepd->stepid); - continue; + while (1) { + int x, fdi, fd[NUM_PARALLEL_SUSPEND]; + fdi = 0; + while ((stepd = list_next(i))) { + if (stepd->jobid != req->job_id) { + /* multiple jobs expected on shared nodes */ + debug3("Step from other job: jobid=%u (this jobid=%u)", + stepd->jobid, req->job_id); + continue; + } + step_cnt++; + + fd[fdi] = stepd_connect(stepd->directory, + stepd->nodename, stepd->jobid, + stepd->stepid); + if (fd[fdi] == -1) { + debug3("Unable to connect to step %u.%u", + stepd->jobid, stepd->stepid); + continue; + } + + + fdi++; + if (fdi >= NUM_PARALLEL_SUSPEND) + break; } + /* check for open connections */ + if (fdi == 0) + break; if (req->op == SUSPEND_JOB) { - debug2("Suspending job step %u.%u", - stepd->jobid, stepd->stepid); - if (stepd_suspend(fd) < 0) - debug(" suspend failed: %m", stepd->jobid); + stepd_suspend(fd, fdi, req->job_id); } else { - debug2("Resuming job step %u.%u", - stepd->jobid, stepd->stepid); - if (stepd_resume(fd) < 0) - debug(" resume failed: %m", stepd->jobid); + /* "resume" remains a serial action (for now) */ + for (x = 0; x < fdi; x++) { + debug2("Resuming job %u (cached step count %d)", + req->job_id, x); + if (stepd_resume(fd[x]) < 0) + debug(" resume failed: %m"); + } } + for (x = 0; x < fdi; x++) + /* fd may have been closed by stepd_suspend */ + if (fd[x] != -1) + close(fd[x]); - close(fd); + /* check for no more jobs */ + if (fdi < NUM_PARALLEL_SUSPEND) + break; } list_iterator_destroy(i); list_destroy(steps); + _unlock_suspend_job(req->job_id); - no_job: if (step_cnt == 0) { debug2("No steps in jobid %u to suspend/resume", req->job_id); } - - /* - * At this point, if connection still open, we send controller - * a reply. - */ - fini: if (msg->conn_fd >= 0) { - slurm_send_rc_msg(msg, rc); - if (slurm_close_accepted_conn(msg->conn_fd) < 0) - error ("_rpc_signal_job: close(%d): %m", msg->conn_fd); - msg->conn_fd = -1; - } } static void @@ -2050,10 +2469,11 @@ _rpc_terminate_job(slurm_msg_t *msg) { int rc = SLURM_SUCCESS; kill_job_msg_t *req = msg->data; - uid_t uid = g_slurm_auth_get_uid(msg->auth_cred); + uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); int nsteps = 0; int delay; char *bg_part_id = NULL; + uint16_t base_job_state = req->job_state & (~JOB_COMPLETING); slurm_ctl_conf_t *cf; debug("_rpc_terminate_job, uid = %d", uid); @@ -2095,6 +2515,12 @@ _rpc_terminate_job(slurm_msg_t *msg) debug("credential for job %u revoked", req->job_id); } + if ((base_job_state == JOB_NODE_FAIL) || + (base_job_state == JOB_PENDING)) /* requeued */ + _kill_all_active_steps(req->job_id, SIG_NODE_FAIL, true); + else if (base_job_state == JOB_FAILED) + _kill_all_active_steps(req->job_id, SIG_FAILURE, true); + /* * Tasks might be stopped (possibly by a debugger) * so send SIGCONT first. @@ -2178,8 +2604,17 @@ _rpc_terminate_job(slurm_msg_t *msg) rc = _run_epilog(req->job_id, req->job_uid, bg_part_id); xfree(bg_part_id); - if (rc != 0) { - error ("[job %u] epilog failed", req->job_id); + if (rc) { + int term_sig, exit_status; + if (WIFSIGNALED(rc)) { + exit_status = 0; + term_sig = WTERMSIG(rc); + } else { + exit_status = WEXITSTATUS(rc); + term_sig = 0; + } + error("[job %u] epilog failed status=%d:%d", + req->job_id, exit_status, term_sig); rc = ESLURMD_EPILOG_FAILED; } else debug("completed epilog for jobid %u", req->job_id); @@ -2201,10 +2636,11 @@ static void _sync_messages_kill(kill_job_msg_t *req) int host_cnt, host_inx; char *host; hostset_t hosts; + int epilog_msg_time; hosts = hostset_create(req->nodes); host_cnt = hostset_count(hosts); - if (host_cnt <= 32) + if (host_cnt <= 64) goto fini; if (conf->hostname == NULL) goto fini; /* should never happen */ @@ -2219,7 +2655,8 @@ static void _sync_messages_kill(kill_job_msg_t *req) } free(host); } - _delay_rpc(host_inx, host_cnt, 10000); + epilog_msg_time = slurm_get_epilog_msg_time(); + _delay_rpc(host_inx, host_cnt, epilog_msg_time); fini: hostset_destroy(hosts); } @@ -2354,7 +2791,7 @@ static void _rpc_update_time(slurm_msg_t *msg) { int rc = SLURM_SUCCESS; - uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred); + uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL); if ((req_uid != conf->slurm_user_id) && (req_uid != 0)) { rc = ESLURM_USER_ID_MISSING; diff --git a/src/slurmd/slurmd/req.h b/src/slurmd/slurmd/req.h index 7d13065c1..211c90ac7 100644 --- a/src/slurmd/slurmd/req.h +++ b/src/slurmd/slurmd/req.h @@ -1,11 +1,11 @@ /*****************************************************************************\ * src/slurmd/slurmd/req.h - slurmd request handling - * $Id: req.h 10574 2006-12-15 23:38:29Z jette $ + * $Id: req.h 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/slurmd/slurmd/reverse_tree_math.c b/src/slurmd/slurmd/reverse_tree_math.c index 134d2a8d0..9cb03b9f6 100644 --- a/src/slurmd/slurmd/reverse_tree_math.c +++ b/src/slurmd/slurmd/reverse_tree_math.c @@ -5,7 +5,7 @@ * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Christopher J. Morrone <morrone2@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/slurmd/slurmd/reverse_tree_math.h b/src/slurmd/slurmd/reverse_tree_math.h index d40d75887..6896db8ac 100644 --- a/src/slurmd/slurmd/reverse_tree_math.h +++ b/src/slurmd/slurmd/reverse_tree_math.h @@ -5,7 +5,7 @@ * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Christopher J. Morrone <morrone2@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/slurmd/slurmd/slurmd.c b/src/slurmd/slurmd/slurmd.c index d523cd2f1..d46d3fc08 100644 --- a/src/slurmd/slurmd/slurmd.c +++ b/src/slurmd/slurmd/slurmd.c @@ -1,11 +1,11 @@ /*****************************************************************************\ * src/slurmd/slurmd/slurmd.c - main slurm node server daemon - * $Id: slurmd.c 13688 2008-03-21 17:27:38Z jette $ + * $Id: slurmd.c 13690 2008-03-21 18:17:38Z jette $ ***************************************************************************** * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -49,6 +49,7 @@ #include <sys/types.h> #include <sys/param.h> #include <sys/resource.h> +#include <sys/utsname.h> #include <unistd.h> #include <stdlib.h> #include <sys/mman.h> @@ -258,6 +259,8 @@ main (int argc, char *argv[]) _slurmd_fini(); _destroy_conf(); + slurm_crypto_fini(); /* must be after _destroy_conf() */ + info("Slurmd shutdown completing"); log_fini(); return 0; @@ -447,20 +450,31 @@ _fill_registration_msg(slurm_node_registration_status_msg_t *msg) List steps; ListIterator i; step_loc_t *stepd; - int n; - - msg->node_name = xstrdup (conf->node_name); - msg->cpus = conf->cpus; - msg->sockets = conf->sockets; - msg->cores = conf->cores; - msg->threads = conf->threads; - - msg->real_memory_size = conf->real_memory_size; - msg->temporary_disk_space = conf->tmp_disk_space; + int n; + char *arch, *os; + struct utsname buf; + + msg->node_name = xstrdup (conf->node_name); + msg->cpus = conf->cpus; + msg->sockets = conf->sockets; + msg->cores = conf->cores; + msg->threads = conf->threads; + msg->real_memory = conf->real_memory_size; + msg->tmp_disk = conf->tmp_disk_space; debug3("Procs=%u Sockets=%u Cores=%u Threads=%u Memory=%u TmpDisk=%u", msg->cpus, msg->sockets, msg->cores, msg->threads, - msg->real_memory_size, msg->temporary_disk_space); + msg->real_memory, msg->tmp_disk); + + uname(&buf); + if ((arch = getenv("SLURM_ARCH"))) + msg->arch = xstrdup(arch); + else + msg->arch = xstrdup(buf.machine); + if ((os = getenv("SLURM_OS"))) + msg->os = xstrdup(os); + else + msg->os = xstrdup(buf.sysname); if (msg->startup) { if (switch_g_alloc_node_info(&msg->switch_nodeinfo)) @@ -607,6 +621,8 @@ _read_config() _free_and_set(&conf->epilog, xstrdup(cf->epilog)); _free_and_set(&conf->prolog, xstrdup(cf->prolog)); _free_and_set(&conf->tmpfs, xstrdup(cf->tmp_fs)); + _free_and_set(&conf->health_check_program, + xstrdup(cf->health_check_program)); _free_and_set(&conf->spooldir, xstrdup(cf->slurmd_spooldir)); _massage_pathname(&conf->spooldir); _free_and_set(&conf->pidfile, xstrdup(cf->slurmd_pidfile)); @@ -616,7 +632,7 @@ _read_config() _free_and_set(&conf->pubkey, path_pubkey); conf->propagate_prio = cf->propagate_prio_process; - conf->job_acct_freq = cf->job_acct_freq; + conf->job_acct_gather_freq = cf->job_acct_gather_freq; if ( (conf->node_name == NULL) || (conf->node_name[0] == '\0') ) @@ -712,6 +728,7 @@ _print_conf() debug3("TmpDisk = %u", conf->tmp_disk_space); debug3("Epilog = `%s'", conf->epilog); debug3("Logfile = `%s'", cf->slurmd_logfile); + debug3("HealthCheck = `%s'", conf->health_check_program); debug3("NodeName = %s", conf->node_name); debug3("Port = %u", conf->port); debug3("Prolog = `%s'", conf->prolog); @@ -747,6 +764,7 @@ _init_conf() conf->block_map_inv = NULL; conf->conffile = NULL; conf->epilog = NULL; + conf->health_check_program = NULL; conf->logfile = NULL; conf->pubkey = NULL; conf->prolog = NULL; @@ -775,6 +793,7 @@ _destroy_conf() if(conf) { xfree(conf->block_map); xfree(conf->block_map_inv); + xfree(conf->health_check_program); xfree(conf->hostname); xfree(conf->node_name); xfree(conf->conffile); @@ -892,7 +911,7 @@ _slurmd_init() return SLURM_FAILURE; if (slurmd_task_init() != SLURM_SUCCESS) return SLURM_FAILURE; - if (slurm_auth_init() != SLURM_SUCCESS) + if (slurm_auth_init(NULL) != SLURM_SUCCESS) return SLURM_FAILURE; if (getrlimit(RLIMIT_NOFILE,&rlim) == 0) { diff --git a/src/slurmd/slurmd/slurmd.h b/src/slurmd/slurmd/slurmd.h index 191bf951e..1723f9d4d 100644 --- a/src/slurmd/slurmd/slurmd.h +++ b/src/slurmd/slurmd/slurmd.h @@ -1,11 +1,11 @@ /*****************************************************************************\ * src/slurmd/slurmd/slurmd.h - header for slurmd - * $Id: slurmd.h 13688 2008-03-21 17:27:38Z jette $ + * $Id: slurmd.h 13690 2008-03-21 18:17:38Z jette $ ***************************************************************************** * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -99,7 +99,7 @@ typedef struct slurmd_config { char *logfile; /* slurmd logfile, if any */ char *spooldir; /* SlurmdSpoolDir */ char *pidfile; /* PidFile location */ - + char *health_check_program; /* run on RPC request */ char *tmpfs; /* directory of tmp FS */ char *pubkey; /* location of job cred public key */ char *epilog; /* Path to Epilog script */ @@ -119,7 +119,7 @@ typedef struct slurmd_config { uid_t slurm_user_id; /* UID that slurmctld runs as */ pthread_mutex_t config_mutex; /* lock for slurmd_config access */ - uint16_t job_acct_freq; + uint16_t job_acct_gather_freq; uint16_t use_pam; uint16_t use_cpusets; /* Use cpusets, if available */ uint16_t propagate_prio; /* PropagatePrioProcess flag */ diff --git a/src/slurmd/slurmd/xcpu.c b/src/slurmd/slurmd/xcpu.c index 576c49af1..6bc5d330e 100644 --- a/src/slurmd/slurmd/xcpu.c +++ b/src/slurmd/slurmd/xcpu.c @@ -4,7 +4,7 @@ * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/slurmd/slurmd/xcpu.h b/src/slurmd/slurmd/xcpu.h index e1d2b0552..b98ba58d7 100644 --- a/src/slurmd/slurmd/xcpu.h +++ b/src/slurmd/slurmd/xcpu.h @@ -4,7 +4,7 @@ * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/slurmd/slurmstepd/Makefile.am b/src/slurmd/slurmstepd/Makefile.am index c4a1f0ec2..4e344a188 100644 --- a/src/slurmd/slurmstepd/Makefile.am +++ b/src/slurmd/slurmstepd/Makefile.am @@ -5,14 +5,14 @@ AUTOMAKE_OPTIONS = foreign sbin_PROGRAMS = slurmstepd -INCLUDES = -I$(top_srcdir) $(SSL_CPPFLAGS) +INCLUDES = -I$(top_srcdir) slurmstepd_LDADD = \ - $(top_builddir)/src/common/libcommon.la \ $(top_builddir)/src/common/libdaemonize.la \ $(top_builddir)/src/common/libeio.la \ $(top_builddir)/src/common/libspank.la \ - $(PLPA_LIBS) $(SSL_LIBS) $(PAM_LIBS) + $(top_builddir)/src/common/libcommon.o -ldl\ + $(PLPA_LIBS) $(PAM_LIBS) $(UTIL_LIBS) slurmstepd_SOURCES = \ slurmstepd.c slurmstepd.h \ @@ -42,9 +42,9 @@ slurmstepd_SOURCES = \ if HAVE_AIX # We need to set maxdata back to 0 because this effects the "max memory size" # ulimit, and the ulimit is inherited by child processes. -slurmstepd_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) $(FEDERATION_LDFLAGS) -Wl,-bmaxdata:0x0 +slurmstepd_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) -Wl,-bmaxdata:0x0 else -slurmstepd_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) $(FEDERATION_LDFLAGS) +slurmstepd_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) endif force: diff --git a/src/slurmd/slurmstepd/Makefile.in b/src/slurmd/slurmstepd/Makefile.in index 35e2e263b..5bc200c7e 100644 --- a/src/slurmd/slurmstepd/Makefile.in +++ b/src/slurmd/slurmstepd/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -46,6 +46,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -78,15 +80,15 @@ am_slurmstepd_OBJECTS = slurmstepd.$(OBJEXT) mgr.$(OBJEXT) \ run_script.$(OBJEXT) task_plugin.$(OBJEXT) slurmstepd_OBJECTS = $(am_slurmstepd_OBJECTS) am__DEPENDENCIES_1 = -slurmstepd_DEPENDENCIES = $(top_builddir)/src/common/libcommon.la \ - $(top_builddir)/src/common/libdaemonize.la \ +slurmstepd_DEPENDENCIES = $(top_builddir)/src/common/libdaemonize.la \ $(top_builddir)/src/common/libeio.la \ - $(top_builddir)/src/common/libspank.la $(am__DEPENDENCIES_1) \ + $(top_builddir)/src/common/libspank.la \ + $(top_builddir)/src/common/libcommon.o $(am__DEPENDENCIES_1) \ $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1) slurmstepd_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \ $(slurmstepd_LDFLAGS) $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -126,6 +128,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -139,10 +142,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -162,7 +168,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -173,6 +182,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -188,6 +199,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -203,6 +215,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -260,13 +273,13 @@ target_vendor = @target_vendor@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ AUTOMAKE_OPTIONS = foreign -INCLUDES = -I$(top_srcdir) $(SSL_CPPFLAGS) +INCLUDES = -I$(top_srcdir) slurmstepd_LDADD = \ - $(top_builddir)/src/common/libcommon.la \ $(top_builddir)/src/common/libdaemonize.la \ $(top_builddir)/src/common/libeio.la \ $(top_builddir)/src/common/libspank.la \ - $(PLPA_LIBS) $(SSL_LIBS) $(PAM_LIBS) + $(top_builddir)/src/common/libcommon.o -ldl\ + $(PLPA_LIBS) $(PAM_LIBS) $(UTIL_LIBS) slurmstepd_SOURCES = \ slurmstepd.c slurmstepd.h \ @@ -293,11 +306,11 @@ slurmstepd_SOURCES = \ $(top_builddir)/src/slurmd/common/task_plugin.h \ $(top_builddir)/src/slurmd/common/reverse_tree.h -@HAVE_AIX_FALSE@slurmstepd_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) $(FEDERATION_LDFLAGS) +@HAVE_AIX_FALSE@slurmstepd_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) # We need to set maxdata back to 0 because this effects the "max memory size" # ulimit, and the ulimit is inherited by child processes. -@HAVE_AIX_TRUE@slurmstepd_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) $(FEDERATION_LDFLAGS) -Wl,-bmaxdata:0x0 +@HAVE_AIX_TRUE@slurmstepd_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) -Wl,-bmaxdata:0x0 all: all-am .SUFFIXES: @@ -340,8 +353,8 @@ install-sbinPROGRAMS: $(sbin_PROGRAMS) || test -f $$p1 \ ; then \ f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \ - echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) --mode=install $(sbinPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(sbindir)/$$f'"; \ - $(INSTALL_PROGRAM_ENV) $(LIBTOOL) --mode=install $(sbinPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(sbindir)/$$f" || exit 1; \ + echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(sbinPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(sbindir)/$$f'"; \ + $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(sbinPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(sbindir)/$$f" || exit 1; \ else :; fi; \ done @@ -489,8 +502,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -502,8 +515,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -513,13 +526,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/slurmd/slurmstepd/fname.c b/src/slurmd/slurmstepd/fname.c index ce8295700..6c639a970 100644 --- a/src/slurmd/slurmstepd/fname.c +++ b/src/slurmd/slurmstepd/fname.c @@ -5,7 +5,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -140,45 +140,6 @@ fname_create(slurmd_job_t *job, const char *format, int taskid) return name; } -static int -find_fname(void *obj, void *key) -{ - char *str = obj; - char *name = key; - - if (strcmp(str, name) == 0) - return 1; - return 0; -} - -static int -_trunc_file(char *path) -{ - int flags = O_CREAT|O_TRUNC|O_WRONLY; - int fd; - - do { - fd = open(path, flags, 0644); - } while ((fd < 0) && (errno == EINTR)); - - if (fd < 0) { - error("Unable to open `%s': %m", path); - return -1; - } else - debug3("opened and truncated `%s'", path); - - if (close(fd) < 0) - error("Unable to close `%s': %m", path); - - return 0; -} - -static void -fname_free(void *name) -{ - xfree(name); -} - /* * Return >= 0 if fmt specifies "single task only" IO * i.e. if it specifies a single integer only @@ -195,30 +156,3 @@ int fname_single_task_io (const char *fmt) return -1; } - -int -fname_trunc_all(slurmd_job_t *job, const char *fmt) -{ - int i, rc = SLURM_SUCCESS; - char *fname; - ListIterator filei; - List files = NULL; - - if (fname_single_task_io (fmt) >= 0) - return (0); - - files = list_create((ListDelF)fname_free); - for (i = 0; i < job->ntasks; i++) { - fname = fname_create(job, fmt, job->task[i]->gtid); - if (!list_find_first(files, (ListFindF) find_fname, fname)) - list_append(files, (void *)fname); - } - - filei = list_iterator_create(files); - while ((fname = list_next(filei))) { - if ((rc = _trunc_file(fname)) < 0) - break; - } - list_destroy(files); - return rc; -} diff --git a/src/slurmd/slurmstepd/fname.h b/src/slurmd/slurmstepd/fname.h index 124549dbf..156423b57 100644 --- a/src/slurmd/slurmstepd/fname.h +++ b/src/slurmd/slurmstepd/fname.h @@ -5,7 +5,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/slurmd/slurmstepd/io.c b/src/slurmd/slurmstepd/io.c index 3e7120ab3..bc0128130 100644 --- a/src/slurmd/slurmstepd/io.c +++ b/src/slurmd/slurmstepd/io.c @@ -1,11 +1,11 @@ /*****************************************************************************\ * src/slurmd/slurmstepd/io.c - Standard I/O handling routines for slurmstepd - * $Id: io.c 11873 2007-07-25 21:08:46Z jette $ + * $Id: io.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -52,6 +52,15 @@ # include <stdlib.h> #endif +#ifdef HAVE_PTY_H +# include <pty.h> +#endif + +#ifdef HAVE_UTMP_H +# include <utmp.h> +#endif + +#include <sys/poll.h> #include <sys/types.h> #include <sys/socket.h> #include <sys/stat.h> @@ -69,6 +78,7 @@ #include "src/common/read_config.h" #include "src/common/xmalloc.h" #include "src/common/xsignal.h" +#include "src/common/xstring.h" #include "src/slurmd/slurmd/slurmd.h" #include "src/slurmd/slurmstepd/io.h" @@ -160,6 +170,19 @@ struct task_read_info { bool eof_msg_sent; }; +/********************************************************************** + * Pseudo terminal declarations + **********************************************************************/ +struct window_info { + slurmd_task_info_t *task; + slurmd_job_t *job; + slurm_fd pty_fd; +}; +#ifdef HAVE_PTY_H +static void _spawn_window_manager(slurmd_task_info_t *task, slurmd_job_t *job); +static void *_window_manager(void *arg); +#endif + /********************************************************************** * General declarations **********************************************************************/ @@ -301,7 +324,8 @@ _client_read(eio_obj_t *obj, List objs) if (client->header.length == 0) { /* zero length is an eof message */ debug5(" got stdin eof message!"); } else { - buf = client->in_msg->data + (client->in_msg->length - client->in_remaining); + buf = client->in_msg->data + + (client->in_msg->length - client->in_remaining); again: if ((n = read(obj->fd, buf, client->in_remaining)) < 0) { if (errno == EINTR) @@ -392,7 +416,8 @@ _client_write(eio_obj_t *obj, List objs) debug5("_client_write: nothing in the queue"); return SLURM_SUCCESS; } - debug5(" dequeue successful, client->out_msg->length = %d", client->out_msg->length); + debug5(" dequeue successful, client->out_msg->length = %d", + client->out_msg->length); client->out_remaining = client->out_msg->length; } @@ -401,7 +426,8 @@ _client_write(eio_obj_t *obj, List objs) /* * Write message to socket. */ - buf = client->out_msg->data + (client->out_msg->length - client->out_remaining); + buf = client->out_msg->data + + (client->out_msg->length - client->out_remaining); again: if ((n = write(obj->fd, buf, client->out_remaining)) < 0) { if (errno == EINTR) { @@ -647,6 +673,119 @@ again: return SLURM_SUCCESS; } +/********************************************************************** + * Pseudo terminal functions + **********************************************************************/ +#ifdef HAVE_PTY_H +static void *_window_manager(void *arg) +{ + struct window_info *win_info = (struct window_info *) arg; + pty_winsz_t winsz; + ssize_t len; + struct winsize ws; + struct pollfd ufds; + char buf[4]; + + info("in _window_manager"); + ufds.fd = win_info->pty_fd; + ufds.events = POLLIN; + + while (1) { + if (poll(&ufds, 1, -1) <= 0) { + if (errno == EINTR) + continue; + error("poll(pty): %m"); + break; + } + if (!(ufds.revents & POLLIN)) { + /* ((ufds.revents & POLLHUP) || + * (ufds.revents & POLLERR)) */ + break; + } + len = slurm_read_stream(win_info->pty_fd, buf, 4); + if ((len == -1) && ((errno == EINTR) || (errno == EAGAIN))) + continue; + if (len < 4) { + error("read window size error: %m"); + return NULL; + } + memcpy(&winsz.cols, buf, 2); + memcpy(&winsz.rows, buf+2, 2); + ws.ws_col = ntohs(winsz.cols); + ws.ws_row = ntohs(winsz.rows); + debug("new pty size %u:%u", ws.ws_row, ws.ws_col); + if (ioctl(win_info->task->to_stdin, TIOCSWINSZ, &ws)) + error("ioctl(TIOCSWINSZ): %s"); + if (kill(win_info->task->pid, SIGWINCH)) { + if (errno == ESRCH) + break; + error("kill(%d, SIGWINCH): %m", + (int)win_info->task->pid); + } + } + return NULL; +} + +static void +_spawn_window_manager(slurmd_task_info_t *task, slurmd_job_t *job) +{ + char *host, *port, *rows, *cols; + slurm_fd pty_fd; + slurm_addr pty_addr; + uint16_t port_u; + struct window_info *win_info; + pthread_attr_t attr; + pthread_t win_id; + +#if 0 + /* NOTE: SLURM_LAUNCH_NODE_IPADDR is not available at this point */ + if (!(ip_addr = getenvp(job->env, "SLURM_LAUNCH_NODE_IPADDR"))) { + error("SLURM_LAUNCH_NODE_IPADDR env var not set"); + return; + } +#endif + if (!(host = getenvp(job->env, "SLURM_SRUN_COMM_HOST"))) { + error("SLURM_SRUN_COMM_HOST env var not set"); + return; + } + if (!(port = getenvp(job->env, "SLURM_PTY_PORT"))) { + error("SLURM_PTY_PORT env var not set"); + return; + } + if (!(cols = getenvp(job->env, "SLURM_PTY_WIN_COL"))) + error("SLURM_PTY_WIN_COL env var not set"); + if (!(rows = getenvp(job->env, "SLURM_PTY_WIN_ROW"))) + error("SLURM_PTY_WIN_ROW env var not set"); + + if (rows && cols) { + struct winsize ws; + ws.ws_col = atoi(cols); + ws.ws_row = atoi(rows); + debug("init pty size %u:%u", ws.ws_row, ws.ws_col); + if (ioctl(task->to_stdin, TIOCSWINSZ, &ws)) + error("ioctl(TIOCSWINSZ): %s"); + } + + port_u = atoi(port); + slurm_set_addr(&pty_addr, port_u, host); + pty_fd = slurm_open_msg_conn(&pty_addr); + if (pty_fd < 0) { + error("slurm_open_msg_conn(pty_conn) %s,%u: %m", + host, port_u); + return; + } + + win_info = xmalloc(sizeof(struct window_info)); + win_info->task = task; + win_info->job = job; + win_info->pty_fd = pty_fd; + slurm_attr_init(&attr); + pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); + if (pthread_create(&win_id, &attr, &_window_manager, (void *) win_info)) + error("pthread_create(pty_conn): %m"); +} +#endif + /********************************************************************** * General fuctions **********************************************************************/ @@ -663,17 +802,56 @@ _init_task_stdio_fds(slurmd_task_info_t *task, slurmd_job_t *job) int file_flags; /* set files for opening stdout/err */ - conf = slurm_conf_lock(); - if (conf->job_file_append) + if (job->open_mode == OPEN_MODE_APPEND) file_flags = O_CREAT|O_WRONLY|O_APPEND; - else + else if (job->open_mode == OPEN_MODE_TRUNCATE) file_flags = O_CREAT|O_WRONLY|O_APPEND|O_TRUNC; - slurm_conf_unlock(); + else { + conf = slurm_conf_lock(); + if (conf->job_file_append) + file_flags = O_CREAT|O_WRONLY|O_APPEND; + else + file_flags = O_CREAT|O_WRONLY|O_APPEND|O_TRUNC; + slurm_conf_unlock(); + } /* * Initialize stdin */ +#ifdef HAVE_PTY_H + if (job->pty) { + /* All of the stdin fails unless EVERY + * task gets an eio object for stdin. + * Its not clear why that is. */ + if (task->gtid == 0) { + int amaster, aslave; + debug(" stdin uses a pty object"); + if (openpty(&amaster, &aslave, NULL, NULL, NULL) < 0) { + error("stdin openpty: %m"); + return SLURM_ERROR; + } + task->stdin_fd = aslave; + fd_set_close_on_exec(task->stdin_fd); + task->to_stdin = amaster; + fd_set_close_on_exec(task->to_stdin); + fd_set_nonblocking(task->to_stdin); + _spawn_window_manager(task, job); + task->in = _create_task_in_eio(task->to_stdin, job); + eio_new_initial_obj(job->eio, (void *)task->in); + } else { + xfree(task->ifname); + task->ifname = xstrdup("/dev/null"); + task->stdin_fd = open("/dev/null", O_RDWR); + fd_set_close_on_exec(task->stdin_fd); + task->to_stdin = dup(task->stdin_fd); + fd_set_nonblocking(task->to_stdin); + task->in = _create_task_in_eio(task->to_stdin, job); + eio_new_initial_obj(job->eio, (void *)task->in); + } + } else if (task->ifname != NULL) { +#else if (task->ifname != NULL) { +#endif /* open file on task's stdin */ debug5(" stdin file name = %s", task->ifname); if ((task->stdin_fd = open(task->ifname, O_RDONLY)) == -1) { @@ -702,7 +880,29 @@ _init_task_stdio_fds(slurmd_task_info_t *task, slurmd_job_t *job) /* * Initialize stdout */ +#ifdef HAVE_PTY_H + if (job->pty) { + if (task->gtid == 0) { + task->stdout_fd = dup(task->stdin_fd); + fd_set_close_on_exec(task->stdout_fd); + task->from_stdout = dup(task->to_stdin); + fd_set_close_on_exec(task->from_stdout); + fd_set_nonblocking(task->from_stdout); + task->out = _create_task_out_eio(task->from_stdout, + SLURM_IO_STDOUT, job, task); + list_append(job->stdout_eio_objs, (void *)task->out); + eio_new_initial_obj(job->eio, (void *)task->out); + } else { + xfree(task->ofname); + task->ofname = xstrdup("/dev/null"); + task->stdout_fd = open("/dev/null", O_RDWR); + fd_set_close_on_exec(task->stdout_fd); + task->from_stdout = -1; /* not used */ + } + } else if (task->ofname != NULL) { +#else if (task->ofname != NULL) { +#endif /* open file on task's stdout */ debug5(" stdout file name = %s", task->ofname); task->stdout_fd = open(task->ofname, file_flags, 0666); @@ -710,8 +910,7 @@ _init_task_stdio_fds(slurmd_task_info_t *task, slurmd_job_t *job) error("Could not open stdout file: %m"); xfree(task->ofname); task->ofname = fname_create(job, "slurm-%J.out", 0); - task->stdout_fd = open(task->ofname, - O_CREAT|O_WRONLY|O_TRUNC|O_APPEND, 0666); + task->stdout_fd = open(task->ofname, file_flags, 0666); if (task->stdout_fd == -1) return SLURM_ERROR; } @@ -739,7 +938,29 @@ _init_task_stdio_fds(slurmd_task_info_t *task, slurmd_job_t *job) /* * Initialize stderr */ +#ifdef HAVE_PTY_H + if (job->pty) { + if (task->gtid == 0) { + task->stderr_fd = dup(task->stdin_fd); + fd_set_close_on_exec(task->stderr_fd); + task->from_stderr = dup(task->to_stdin); + fd_set_close_on_exec(task->from_stderr); + fd_set_nonblocking(task->from_stderr); + task->err = _create_task_out_eio(task->from_stderr, + SLURM_IO_STDERR, job, task); + list_append(job->stderr_eio_objs, (void *)task->err); + eio_new_initial_obj(job->eio, (void *)task->err); + } else { + xfree(task->efname); + task->efname = xstrdup("/dev/null"); + task->stderr_fd = open("/dev/null", O_RDWR); + fd_set_close_on_exec(task->stderr_fd); + task->from_stderr = -1; /* not used */ + } + } else if (task->efname != NULL) { +#else if (task->efname != NULL) { +#endif /* open file on task's stdout */ debug5(" stderr file name = %s", task->efname); task->stderr_fd = open(task->efname, file_flags, 0666); @@ -747,8 +968,7 @@ _init_task_stdio_fds(slurmd_task_info_t *task, slurmd_job_t *job) error("Could not open stderr file: %m"); xfree(task->efname); task->efname = fname_create(job, "slurm-%J.err", 0); - task->stderr_fd = open(task->efname, - O_CREAT|O_WRONLY|O_TRUNC|O_APPEND, 0666); + task->stderr_fd = open(task->efname, file_flags, 0666); if (task->stderr_fd == -1) return SLURM_ERROR; } diff --git a/src/slurmd/slurmstepd/io.h b/src/slurmd/slurmstepd/io.h index 72d4e5a57..e1adb77f0 100644 --- a/src/slurmd/slurmstepd/io.h +++ b/src/slurmd/slurmstepd/io.h @@ -1,11 +1,11 @@ /*****************************************************************************\ * src/slurmd/slurmstepd/io.h - slurmstepd standard IO routines - * $Id: io.h 10574 2006-12-15 23:38:29Z jette $ + * $Id: io.h 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/slurmd/slurmstepd/mgr.c b/src/slurmd/slurmstepd/mgr.c index 494dc360b..c468c7bf5 100644 --- a/src/slurmd/slurmstepd/mgr.c +++ b/src/slurmd/slurmstepd/mgr.c @@ -1,11 +1,11 @@ /*****************************************************************************\ * src/slurmd/slurmstepd/mgr.c - job manager functions for slurmstepd - * $Id: mgr.c 13414 2008-02-28 23:22:33Z da $ + * $Id: mgr.c 13971 2008-05-02 20:23:00Z jette $ ***************************************************************************** * Copyright (C) 2002-2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -82,7 +82,7 @@ #include "src/common/node_select.h" #include "src/common/fd.h" #include "src/common/safeopen.h" -#include "src/common/slurm_jobacct.h" +#include "src/common/slurm_jobacct_gather.h" #include "src/common/switch.h" #include "src/common/xsignal.h" #include "src/common/xstring.h" @@ -158,6 +158,7 @@ typedef struct kill_thread { /* * Job manager related prototypes */ +static int _access(const char *path, int modes, uid_t uid, gid_t gid); static void _send_launch_failure(launch_tasks_request_msg_t *, slurm_addr *, int); static int _fork_all_tasks(slurmd_job_t *job); @@ -342,7 +343,7 @@ _setup_normal_io(slurmd_job_t *job) /* * Temporarily drop permissions, initialize task stdio file - * decriptors (which may be connected to files), then + * descriptors (which may be connected to files), then * reclaim privileges. */ if (_drop_privileges(job, true, &sprivs) < 0) @@ -511,11 +512,11 @@ _one_step_complete_msg(slurmd_job_t *job, int first, int last) msg.range_first = first; msg.range_last = last; msg.step_rc = step_complete.step_rc; - msg.jobacct = jobacct_g_alloc(NULL); + msg.jobacct = jobacct_gather_g_create(NULL); /************* acct stuff ********************/ if(!acct_sent) { - jobacct_g_aggregate(step_complete.jobacct, job->jobacct); - jobacct_g_getinfo(step_complete.jobacct, JOBACCT_DATA_TOTAL, + jobacct_gather_g_aggregate(step_complete.jobacct, job->jobacct); + jobacct_gather_g_getinfo(step_complete.jobacct, JOBACCT_DATA_TOTAL, msg.jobacct); acct_sent = true; } @@ -560,7 +561,7 @@ _one_step_complete_msg(slurmd_job_t *job, int first, int last) error("Rank %d failed sending step completion message" " directly to slurmctld", step_complete.rank); finished: - jobacct_g_free(msg.jobacct); + jobacct_gather_g_destroy(msg.jobacct); } /* Given a starting bit in the step_complete.bits bitstring, "start", @@ -670,7 +671,7 @@ job_manager(slurmd_job_t *job) || slurmd_task_init() != SLURM_SUCCESS || mpi_hook_slurmstepd_init(&job->env) != SLURM_SUCCESS || slurm_proctrack_init() != SLURM_SUCCESS - || jobacct_init() != SLURM_SUCCESS) { + || slurm_jobacct_gather_init() != SLURM_SUCCESS) { rc = SLURM_FAILURE; goto fail1; } @@ -733,7 +734,7 @@ job_manager(slurmd_job_t *job) _send_launch_resp(job, 0); _wait_for_all_tasks(job); - jobacct_g_endpoll(); + jobacct_gather_g_endpoll(); job->state = SLURMSTEPD_STEP_ENDING; @@ -913,7 +914,7 @@ _fork_all_tasks(slurmd_job_t *job) if (j > i) close(readfds[j]); } - /* jobacct_g_endpoll(); + /* jobacct_gather_g_endpoll(); * closing jobacct files here causes deadlock */ if (conf->propagate_prio == 1) @@ -966,19 +967,26 @@ _fork_all_tasks(slurmd_job_t *job) for (i = 0; i < job->ntasks; i++) { /* - * Put this task in the step process group - */ - if (setpgid (job->task[i]->pid, job->pgid) < 0) - error ("Unable to put task %d (pid %ld) into pgrp %ld", - i, job->task[i]->pid, job->pgid); - - if (slurm_container_add(job, job->task[i]->pid) == SLURM_ERROR) { - error("slurm_container_create: %m"); + * Put this task in the step process group + * login_tty() must put task zero in its own + * session, causing setpgid() to fail, setsid() + * has already set its process group as desired + */ + if ((job->pty == 0) + && (setpgid (job->task[i]->pid, job->pgid) < 0)) { + error("Unable to put task %d (pid %ld) into " + "pgrp %ld: %m", + i, job->task[i]->pid, job->pgid); + } + + if (slurm_container_add(job, job->task[i]->pid) + == SLURM_ERROR) { + error("slurm_container_add: %m"); goto fail1; } jobacct_id.nodeid = job->nodeid; jobacct_id.taskid = job->task[i]->gtid; - jobacct_g_add_task(job->task[i]->pid, + jobacct_gather_g_add_task(job->task[i]->pid, &jobacct_id); if (spank_task_post_fork (job, i) < 0) { @@ -986,8 +994,8 @@ _fork_all_tasks(slurmd_job_t *job) return SLURM_ERROR; } } - jobacct_g_set_proctrack_container_id(job->cont_id); - + jobacct_gather_g_set_proctrack_container_id(job->cont_id); + /* * Now it's ok to unblock the tasks, so they may call exec. */ @@ -1108,12 +1116,12 @@ _wait_for_any_task(slurmd_job_t *job, bool waitflag) } /************* acct stuff ********************/ - jobacct = jobacct_g_remove_task(pid); + jobacct = jobacct_gather_g_remove_task(pid); if(jobacct) { - jobacct_g_setinfo(jobacct, - JOBACCT_DATA_RUSAGE, &rusage); - jobacct_g_aggregate(job->jobacct, jobacct); - jobacct_g_free(jobacct); + jobacct_gather_g_setinfo(jobacct, + JOBACCT_DATA_RUSAGE, &rusage); + jobacct_gather_g_aggregate(job->jobacct, jobacct); + jobacct_gather_g_destroy(jobacct); } /*********************************************/ @@ -1141,7 +1149,7 @@ _wait_for_any_task(slurmd_job_t *job, bool waitflag) if (job->task_epilog) { _run_script_as_user("user task_epilog", job->task_epilog, - job, 2, job->env); + job, 5, job->env); } if (conf->task_epilog) { char *my_epilog; @@ -1671,6 +1679,34 @@ _initgroups(slurmd_job_t *job) return 0; } +/* + * Check this user's access rights to a file + * path IN: pathname of file to test + * modes IN: desired access + * uid IN: user ID to access the file + * gid IN: group ID to access the file + * RET 0 on success, -1 on failure + */ +static int _access(const char *path, int modes, uid_t uid, gid_t gid) +{ + struct stat buf; + int f_mode; + + if (stat(path, &buf) != 0) + return -1; + + if (buf.st_uid == uid) + f_mode = (buf.st_mode >> 6) & 07; + else if (buf.st_gid == gid) + f_mode = (buf.st_mode >> 3) & 07; + else + f_mode = buf.st_mode & 07; + + if ((f_mode & modes) == modes) + return 0; + return -1; +} + /* * Run a script as a specific user, with the specified uid, gid, and * extended groups. @@ -1697,8 +1733,8 @@ _run_script_as_user(const char *name, const char *path, slurmd_job_t *job, debug("[job %u] attempting to run %s [%s]", job->jobid, name, path); - if (access(path, R_OK | X_OK) < 0) { - error("Could not run %s [%s]: %m", name, path); + if (_access(path, 5, job->pwd->pw_uid, job->pwd->pw_gid) < 0) { + error("Could not run %s [%s]: access denied", name, path); return -1; } @@ -1729,7 +1765,11 @@ _run_script_as_user(const char *name, const char *path, slurmd_job_t *job, } chdir(job->cwd); +#ifdef SETPGRP_TWO_ARGS + setpgrp(0, 0); +#else setpgrp(); +#endif execve(path, argv, env); error("execve(): %m"); exit(127); diff --git a/src/slurmd/slurmstepd/mgr.h b/src/slurmd/slurmstepd/mgr.h index 38d183bb1..2efa3be73 100644 --- a/src/slurmd/slurmstepd/mgr.h +++ b/src/slurmd/slurmstepd/mgr.h @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/slurmd/slurmstepd/multi_prog.c b/src/slurmd/slurmstepd/multi_prog.c index 6a1c2e8fd..3dfd090f8 100644 --- a/src/slurmd/slurmstepd/multi_prog.c +++ b/src/slurmd/slurmstepd/multi_prog.c @@ -11,7 +11,7 @@ * and * Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov>, - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -142,11 +142,11 @@ extern int multi_prog_get_argv(char *file_contents, char **prog_env, int task_rank, int *argc, char ***argv) { - char *line; + char *line = NULL; int line_num = 0; int task_offset; - char *p, *s, *ptrptr; - char *rank_spec, *args_spec; + char *p = NULL, *s = NULL, *ptrptr = NULL; + char *rank_spec = NULL, *args_spec = NULL; int prog_argc = 0; char **prog_argv = NULL; char *local_data = NULL; diff --git a/src/slurmd/slurmstepd/multi_prog.h b/src/slurmd/slurmstepd/multi_prog.h index e262add27..a36256b6f 100644 --- a/src/slurmd/slurmstepd/multi_prog.h +++ b/src/slurmd/slurmstepd/multi_prog.h @@ -4,7 +4,7 @@ * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/slurmd/slurmstepd/pam_ses.c b/src/slurmd/slurmstepd/pam_ses.c index 2e25621cf..c754492fe 100644 --- a/src/slurmd/slurmstepd/pam_ses.c +++ b/src/slurmd/slurmstepd/pam_ses.c @@ -5,7 +5,7 @@ * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Donna Mecozzi <dmecozzi@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/slurmd/slurmstepd/pam_ses.h b/src/slurmd/slurmstepd/pam_ses.h index 2c0092a58..f933cc277 100644 --- a/src/slurmd/slurmstepd/pam_ses.h +++ b/src/slurmd/slurmstepd/pam_ses.h @@ -6,7 +6,7 @@ * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Donna Mecozzi <dmecozzi@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/slurmd/slurmstepd/pdebug.c b/src/slurmd/slurmstepd/pdebug.c index 4917d689e..59a26f87d 100644 --- a/src/slurmd/slurmstepd/pdebug.c +++ b/src/slurmd/slurmstepd/pdebug.c @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/slurmd/slurmstepd/pdebug.h b/src/slurmd/slurmstepd/pdebug.h index c6ae9a027..228dcfb02 100644 --- a/src/slurmd/slurmstepd/pdebug.h +++ b/src/slurmd/slurmstepd/pdebug.h @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/slurmd/slurmstepd/req.c b/src/slurmd/slurmstepd/req.c index fc744d250..110083f7e 100644 --- a/src/slurmd/slurmstepd/req.c +++ b/src/slurmd/slurmstepd/req.c @@ -1,11 +1,11 @@ /*****************************************************************************\ * src/slurmd/slurmstepd/req.c - slurmstepd domain socket request handling - * $Id: req.c 13322 2008-02-21 19:06:27Z da $ + * $Id: req.c 13959 2008-04-30 21:00:47Z jette $ ***************************************************************************** * Copyright (C) 2005 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Christopher Morrone <morrone2@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -53,7 +53,7 @@ #include "src/common/fd.h" #include "src/common/eio.h" #include "src/common/slurm_auth.h" -#include "src/common/slurm_jobacct.h" +#include "src/common/slurm_jobacct_gather.h" #include "src/common/stepd_api.h" #include "src/slurmd/slurmd/slurmd.h" @@ -72,6 +72,7 @@ static int _handle_info(int fd, slurmd_job_t *job); static int _handle_signal_process_group(int fd, slurmd_job_t *job, uid_t uid); static int _handle_signal_task_local(int fd, slurmd_job_t *job, uid_t uid); static int _handle_signal_container(int fd, slurmd_job_t *job, uid_t uid); +static int _handle_checkpoint_tasks(int fd, slurmd_job_t *job, uid_t uid); static int _handle_attach(int fd, slurmd_job_t *job, uid_t uid); static int _handle_pid_in_container(int fd, slurmd_job_t *job); static int _handle_daemon_pid(int fd, slurmd_job_t *job); @@ -384,7 +385,7 @@ _handle_accept(void *arg) free_buf(buffer); goto fail; } - rc = g_slurm_auth_verify(auth_cred, NULL, 2); + rc = g_slurm_auth_verify(auth_cred, NULL, 2, NULL); if (rc != SLURM_SUCCESS) { error("Verifying authentication credential: %s", g_slurm_auth_errstr(g_slurm_auth_errno(auth_cred))); @@ -394,8 +395,8 @@ _handle_accept(void *arg) } /* Get the uid & gid from the credential, then destroy it. */ - uid = g_slurm_auth_get_uid(auth_cred); - gid = g_slurm_auth_get_gid(auth_cred); + uid = g_slurm_auth_get_uid(auth_cred, NULL); + gid = g_slurm_auth_get_gid(auth_cred, NULL); debug3(" Identity: uid=%d, gid=%d", uid, gid); g_slurm_auth_destroy(auth_cred); free_buf(buffer); @@ -464,6 +465,10 @@ _handle_request(int fd, slurmd_job_t *job, uid_t uid, gid_t gid) debug("Handling REQUEST_SIGNAL_CONTAINER"); rc = _handle_signal_container(fd, job, uid); break; + case REQUEST_CHECKPOINT_TASKS: + debug("Handling REQUEST_CHECKPOINT_TASKS"); + rc = _handle_checkpoint_tasks(fd, job, uid); + break; case REQUEST_STATE: debug("Handling REQUEST_STATE"); rc = _handle_state(fd, job); @@ -540,6 +545,7 @@ _handle_info(int fd, slurmd_job_t *job) safe_write(fd, &job->jobid, sizeof(uint32_t)); safe_write(fd, &job->stepid, sizeof(uint32_t)); safe_write(fd, &job->nodeid, sizeof(uint32_t)); + safe_write(fd, &job->job_mem, sizeof(uint32_t)); return SLURM_SUCCESS; rwfail: @@ -714,6 +720,29 @@ _handle_signal_container(int fd, slurmd_job_t *job, uid_t uid) /* * Signal the container */ + if (job->nodeid == 0) { + static int msg_sent = 0; + /* Not really errors, + * but we want messages displayed by default */ + if (msg_sent) + ; + else if (sig == SIGXCPU) { + error("*** JOB CANCELLED DUE TO TIME LIMIT ***"); + msg_sent = 1; + } else if (sig == SIG_NODE_FAIL) { + error("*** JOB CANCELLED DUE TO NODE FAILURE ***"); + msg_sent = 1; + } else if (sig == SIG_FAILURE) { + error("*** JOB CANCELLED DUE TO SYSTEM FAILURE ***"); + msg_sent = 1; + } else if ((sig == SIGTERM) || (sig == SIGKILL)) { + error("*** JOB CANCELLED ***"); + msg_sent = 1; + } + } + if ((sig == SIG_NODE_FAIL) || (sig == SIG_FAILURE)) + goto done; + pthread_mutex_lock(&suspend_mutex); if (suspended) { rc = -1; @@ -742,6 +771,76 @@ rwfail: return SLURM_FAILURE; } +static int +_handle_checkpoint_tasks(int fd, slurmd_job_t *job, uid_t uid) +{ + static time_t last_timestamp = 0; + int rc = SLURM_SUCCESS; + int signal; + time_t timestamp; + + debug3("_handle_checkpoint_tasks for job %u.%u", + job->jobid, job->stepid); + + safe_read(fd, &signal, sizeof(int)); + safe_read(fd, ×tamp, sizeof(time_t)); + + debug3(" uid = %d", uid); + if (uid != job->uid && !_slurm_authorized_user(uid)) { + debug("checkpoint req from uid %ld for job %u.%u owned by uid %ld", + (long)uid, job->jobid, job->stepid, (long)job->uid); + rc = EPERM; + goto done; + } + + if (timestamp == last_timestamp) { + debug("duplicate checkpoint req for job %u.%u, timestamp %ld. discarded.", + job->jobid, job->stepid, (long)timestamp); + rc = ESLURM_ALREADY_DONE; /* EINPROGRESS? */ + goto done; + } + + /* + * Sanity checks + */ + if (job->pgid <= (pid_t)1) { + debug ("step %u.%u invalid [jmgr_pid:%d pgid:%u]", + job->jobid, job->stepid, job->jmgr_pid, job->pgid); + rc = ESLURMD_JOB_NOTRUNNING; + goto done; + } + + /* + * Signal the process group + */ + pthread_mutex_lock(&suspend_mutex); + if (suspended) { + rc = ESLURMD_STEP_SUSPENDED; + pthread_mutex_unlock(&suspend_mutex); + goto done; + } + + /* TODO: send timestamp with signal */ + if (killpg(job->pgid, signal) == -1) { + rc = -1; /* Most probable ESRCH, resulting in ESLURMD_JOB_NOTRUNNING */ + verbose("Error sending signal %d to %u.%u, pgid %d, errno: %d: %s", + signal, job->jobid, job->stepid, job->pgid, + errno, slurm_strerror(rc)); + } else { + last_timestamp = timestamp; + verbose("Sent signal %d to %u.%u, pgid %d", + signal, job->jobid, job->stepid, job->pgid); + } + pthread_mutex_unlock(&suspend_mutex); + +done: + /* Send the return code */ + safe_write(fd, &rc, sizeof(int)); + return SLURM_SUCCESS; +rwfail: + return SLURM_FAILURE; +} + static int _handle_terminate(int fd, slurmd_job_t *job, uid_t uid) { @@ -933,7 +1032,15 @@ _handle_suspend(int fd, slurmd_job_t *job, uid_t uid) goto done; } - jobacct_g_suspend_poll(); + if (cont_id == 0) { + debug ("step %u.%u invalid container [cont_id:%u]", + job->jobid, job->stepid, job->cont_id); + rc = -1; + errnum = ESLURMD_JOB_NOTRUNNING; + goto done; + } + + jobacct_gather_g_suspend_poll(); /* * Signal the container @@ -990,7 +1097,15 @@ _handle_resume(int fd, slurmd_job_t *job, uid_t uid) goto done; } - jobacct_g_resume_poll(); + if (job->cont_id == 0) { + debug ("step %u.%u invalid container [cont_id:%u]", + job->jobid, job->stepid, job->cont_id); + rc = -1; + errnum = ESLURMD_JOB_NOTRUNNING; + goto done; + } + + jobacct_gather_g_resume_poll(); /* * Signal the container */ @@ -1049,8 +1164,8 @@ _handle_completion(int fd, slurmd_job_t *job, uid_t uid) safe_read(fd, &first, sizeof(int)); safe_read(fd, &last, sizeof(int)); safe_read(fd, &step_rc, sizeof(int)); - jobacct = jobacct_g_alloc(NULL); - jobacct_g_getinfo(jobacct, JOBACCT_DATA_PIPE, &fd); + jobacct = jobacct_gather_g_create(NULL); + jobacct_gather_g_getinfo(jobacct, JOBACCT_DATA_PIPE, &fd); /* * Record the completed nodes @@ -1075,9 +1190,9 @@ _handle_completion(int fd, slurmd_job_t *job, uid_t uid) step_complete.step_rc = MAX(step_complete.step_rc, step_rc); /************* acct stuff ********************/ - jobacct_g_aggregate(step_complete.jobacct, jobacct); + jobacct_gather_g_aggregate(step_complete.jobacct, jobacct); timeout: - jobacct_g_free(jobacct); + jobacct_gather_g_destroy(jobacct); /*********************************************/ /* Send the return code and errno, we do this within the locked @@ -1109,24 +1224,24 @@ _handle_stat_jobacct(int fd, slurmd_job_t *job, uid_t uid) "owned by uid %ld", (long)uid, job->jobid, job->stepid, (long)job->uid); /* Send NULL */ - jobacct_g_setinfo(jobacct, JOBACCT_DATA_PIPE, &fd); + jobacct_gather_g_setinfo(jobacct, JOBACCT_DATA_PIPE, &fd); return SLURM_ERROR; } - jobacct = jobacct_g_alloc(NULL); + jobacct = jobacct_gather_g_create(NULL); debug3("num tasks = %d", job->ntasks); for (i = 0; i < job->ntasks; i++) { - temp_jobacct = jobacct_g_stat_task(job->task[i]->pid); + temp_jobacct = jobacct_gather_g_stat_task(job->task[i]->pid); if(temp_jobacct) { - jobacct_g_aggregate(jobacct, temp_jobacct); - jobacct_g_free(temp_jobacct); + jobacct_gather_g_aggregate(jobacct, temp_jobacct); + jobacct_gather_g_destroy(temp_jobacct); num_tasks++; } } - jobacct_g_setinfo(jobacct, JOBACCT_DATA_PIPE, &fd); + jobacct_gather_g_setinfo(jobacct, JOBACCT_DATA_PIPE, &fd); safe_write(fd, &num_tasks, sizeof(int)); - jobacct_g_free(jobacct); + jobacct_gather_g_destroy(jobacct); return SLURM_SUCCESS; rwfail: return SLURM_ERROR; diff --git a/src/slurmd/slurmstepd/req.h b/src/slurmd/slurmstepd/req.h index 5a041309b..cc63c6fdb 100644 --- a/src/slurmd/slurmstepd/req.h +++ b/src/slurmd/slurmstepd/req.h @@ -1,11 +1,11 @@ /*****************************************************************************\ * src/slurmd/slurmstepd/req.h - slurmstepd request handling - * $Id: req.h 10574 2006-12-15 23:38:29Z jette $ + * $Id: req.h 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2005 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Christopher Morrone <morrone2@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/slurmd/slurmstepd/slurmstepd.c b/src/slurmd/slurmstepd/slurmstepd.c index 0a293d339..40d126f71 100644 --- a/src/slurmd/slurmstepd/slurmstepd.c +++ b/src/slurmd/slurmstepd/slurmstepd.c @@ -1,12 +1,12 @@ /*****************************************************************************\ * src/slurmd/slurmstepd/slurmstepd.c - SLURM job-step manager. - * $Id: slurmstepd.c 11602 2007-06-01 01:01:25Z morrone $ + * $Id: slurmstepd.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <da@llnl.gov> * and Christopher Morrone <morrone2@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -47,7 +47,7 @@ #include "src/common/xmalloc.h" #include "src/common/xsignal.h" -#include "src/common/slurm_jobacct.h" +#include "src/common/slurm_jobacct_gather.h" #include "src/common/switch.h" #include "src/common/stepd_api.h" @@ -182,6 +182,8 @@ _init_from_slurmd(int sock, char **argv, slurm_msg_t *msg = NULL; int ngids = 0; gid_t *gids = NULL; + uint16_t port; + char buf[16]; /* receive job type from slurmd */ safe_read(sock, &step_type, sizeof(int)); @@ -196,7 +198,7 @@ _init_from_slurmd(int sock, char **argv, safe_read(sock, &step_complete.max_depth, sizeof(int)); safe_read(sock, &step_complete.parent_addr, sizeof(slurm_addr)); step_complete.bits = bit_alloc(step_complete.children); - step_complete.jobacct = jobacct_g_alloc(NULL); + step_complete.jobacct = jobacct_gather_g_create(NULL); pthread_mutex_unlock(&step_complete.lock); /* receive conf from slurmd */ @@ -230,17 +232,13 @@ _init_from_slurmd(int sock, char **argv, log_init(argv[0], conf->log_opts, LOG_DAEMON, conf->logfile); /* acct info */ - jobacct_g_startpoll(conf->job_acct_freq); + jobacct_gather_g_startpoll(conf->job_acct_gather_freq); switch_g_slurmd_step_init(); - { - uint16_t port; - char buf[16]; - slurm_get_ip_str(&step_complete.parent_addr, &port, buf, 16); - debug3("slurmstepd rank %d, parent address = %s, port = %u", - step_complete.rank, buf, port); - } + slurm_get_ip_str(&step_complete.parent_addr, &port, buf, 16); + debug3("slurmstepd rank %d, parent address = %s, port = %u", + step_complete.rank, buf, port); /* receive cli from slurmd */ safe_read(sock, &len, sizeof(int)); @@ -342,7 +340,7 @@ _step_setup(slurm_addr *cli, slurm_addr *self, slurm_msg_t *msg) fatal("_step_setup: no job returned"); } job->jmgr_pid = getpid(); - job->jobacct = jobacct_g_alloc(NULL); + job->jobacct = jobacct_gather_g_create(NULL); return job; } @@ -350,7 +348,7 @@ _step_setup(slurm_addr *cli, slurm_addr *self, slurm_msg_t *msg) static void _step_cleanup(slurmd_job_t *job, slurm_msg_t *msg, int rc) { - jobacct_g_free(job->jobacct); + jobacct_gather_g_destroy(job->jobacct); if (!job->batch) job_destroy(job); /* @@ -369,7 +367,7 @@ _step_cleanup(slurmd_job_t *job, slurm_msg_t *msg, int rc) fatal("handle_launch_message: Unrecognized launch RPC"); break; } - jobacct_g_free(step_complete.jobacct); + jobacct_gather_g_destroy(step_complete.jobacct); xfree(msg); } diff --git a/src/slurmd/slurmstepd/slurmstepd.h b/src/slurmd/slurmstepd/slurmstepd.h index 2ef0cc40f..d053aeb3c 100644 --- a/src/slurmd/slurmstepd/slurmstepd.h +++ b/src/slurmd/slurmstepd/slurmstepd.h @@ -1,11 +1,11 @@ /*****************************************************************************\ * src/slurmd/slurmstepd/slurmstepd.h - slurmstepd general header file - * $Id: slurmstepd.h 13322 2008-02-21 19:06:27Z da $ + * $Id: slurmstepd.h 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2005 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Christopher J. Morrone <morrone2@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/slurmd/slurmstepd/slurmstepd_job.c b/src/slurmd/slurmstepd/slurmstepd_job.c index 122ac1549..e00850f62 100644 --- a/src/slurmd/slurmstepd/slurmstepd_job.c +++ b/src/slurmd/slurmstepd/slurmstepd_job.c @@ -1,11 +1,11 @@ /*****************************************************************************\ * src/slurmd/slurmstepd/slurmstepd_job.c - slurmd_job_t routines - * $Id: slurmstepd_job.c 12580 2007-10-29 20:17:09Z jette $ + * $Id: slurmstepd_job.c 13755 2008-04-01 19:12:53Z jette $ ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -54,6 +54,7 @@ #include "src/common/fd.h" #include "src/common/log.h" #include "src/common/eio.h" +#include "src/common/slurm_jobacct_gather.h" #include "src/common/slurm_protocol_api.h" #include "src/slurmd/slurmd/slurmd.h" @@ -155,7 +156,7 @@ job_create(launch_tasks_request_msg_t *msg) srun_info_t *srun = NULL; slurm_addr resp_addr; slurm_addr io_addr; - int nodeid = NO_VAL; + int nodeid = NO_VAL; xassert(msg != NULL); xassert(msg->complete_nodelist != NULL); @@ -185,23 +186,33 @@ job_create(launch_tasks_request_msg_t *msg) return NULL; } - job->state = SLURMSTEPD_STEP_STARTING; - job->pwd = pwd; - job->ntasks = msg->tasks_to_launch[nodeid]; - job->nprocs = msg->nprocs; - job->jobid = msg->job_id; - job->stepid = msg->job_step_id; + job->state = SLURMSTEPD_STEP_STARTING; + job->pwd = pwd; + job->ntasks = msg->tasks_to_launch[nodeid]; + job->nprocs = msg->nprocs; + job->jobid = msg->job_id; + job->stepid = msg->job_step_id; + + job->job_mem = msg->job_mem; + job->task_mem = msg->task_mem; + if (job->job_mem) + jobacct_common_set_mem_limit(job->jobid, job->job_mem); + else if (job->task_mem && job->ntasks) { + jobacct_common_set_mem_limit(job->jobid, + (job->task_mem * job->ntasks)); + } - job->uid = (uid_t) msg->uid; - job->gid = (gid_t) msg->gid; - job->cwd = xstrdup(msg->cwd); - job->task_dist = msg->task_dist; - job->plane_size = msg->plane_size; + job->uid = (uid_t) msg->uid; + job->gid = (gid_t) msg->gid; + job->cwd = xstrdup(msg->cwd); + job->task_dist = msg->task_dist; + job->plane_size = msg->plane_size; job->cpu_bind_type = msg->cpu_bind_type; job->cpu_bind = xstrdup(msg->cpu_bind); job->mem_bind_type = msg->mem_bind_type; job->mem_bind = xstrdup(msg->mem_bind); + job->ckpt_path = xstrdup(msg->ckpt_path); job->env = _array_copy(msg->envc, msg->env); job->eio = eio_handle_create(); @@ -229,6 +240,7 @@ job_create(launch_tasks_request_msg_t *msg) job->envtp->cpu_bind = NULL; job->envtp->mem_bind_type = 0; job->envtp->mem_bind = NULL; + job->envtp->ckpt_path = NULL; memcpy(&resp_addr, &msg->orig_addr, sizeof(slurm_addr)); slurm_set_addr(&resp_addr, @@ -256,12 +268,15 @@ job_create(launch_tasks_request_msg_t *msg) job->nodeid = nodeid; job->debug = msg->slurmd_debug; job->cpus = msg->cpus_allocated[nodeid]; + if (msg->acctg_freq != (uint16_t) NO_VAL) + jobacct_gather_g_change_poll(msg->acctg_freq); job->multi_prog = msg->multi_prog; job->timelimit = (time_t) -1; job->task_flags = msg->task_flags; - job->switch_job = msg->switch_job; - - job->options = msg->options; + job->switch_job = msg->switch_job; + job->pty = msg->pty; + job->open_mode = msg->open_mode; + job->options = msg->options; list_append(job->sruns, (void *) srun); @@ -314,10 +329,18 @@ job_batch_job_create(batch_job_launch_msg_t *msg) job->nprocs = msg->nprocs; job->jobid = msg->job_id; job->stepid = msg->step_id; + + job->job_mem = msg->job_mem; + if (job->job_mem) + jobacct_common_set_mem_limit(job->jobid, job->job_mem); + job->batch = true; + if (msg->acctg_freq != (uint16_t) NO_VAL) + jobacct_gather_g_change_poll(msg->acctg_freq); job->multi_prog = 0; + job->open_mode = msg->open_mode; job->overcommit = (bool) msg->overcommit; - job->node_name = xstrdup(conf->node_name); + job->node_name = xstrdup(conf->node_name); job->uid = (uid_t) msg->uid; job->gid = (gid_t) msg->gid; @@ -340,6 +363,7 @@ job_batch_job_create(batch_job_launch_msg_t *msg) job->envtp->cpu_bind = NULL; job->envtp->mem_bind_type = 0; job->envtp->mem_bind = NULL; + job->envtp->ckpt_path = NULL; srun = srun_info_create(NULL, NULL, NULL); diff --git a/src/slurmd/slurmstepd/slurmstepd_job.h b/src/slurmd/slurmstepd/slurmstepd_job.h index 1da6d9678..5b0a653ef 100644 --- a/src/slurmd/slurmstepd/slurmstepd_job.h +++ b/src/slurmd/slurmstepd/slurmstepd_job.h @@ -1,11 +1,11 @@ /*****************************************************************************\ * src/slurmd/slurmstepd/slurmstepd_job.h slurmd_job_t definition - * $Id: slurmstepd_job.h 10574 2006-12-15 23:38:29Z jette $ + * $Id: slurmstepd_job.h 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -117,6 +117,8 @@ typedef struct slurmd_job { uint32_t nodeid; /* relative position of this node in job */ uint32_t ntasks; /* number of tasks on *this* node */ uint32_t debug; /* debug level for job slurmd */ + uint32_t job_mem; /* MB of memory reserved for the job */ + uint32_t task_mem; /* MB of memory reserved for each task */ uint16_t cpus; /* number of cpus to use for this job */ uint16_t argc; /* number of commandline arguments */ char **env; /* job environment */ @@ -188,8 +190,10 @@ typedef struct slurmd_job { char *batchdir; jobacctinfo_t *jobacct; - + uint8_t open_mode; /* stdout/err append or truncate */ + uint8_t pty; /* set if creating pseudo tty */ job_options_t options; + char *ckpt_path; } slurmd_job_t; diff --git a/src/slurmd/slurmstepd/step_terminate_monitor.c b/src/slurmd/slurmstepd/step_terminate_monitor.c index 9b01edd91..3438374f8 100644 --- a/src/slurmd/slurmstepd/step_terminate_monitor.c +++ b/src/slurmd/slurmstepd/step_terminate_monitor.c @@ -5,7 +5,7 @@ * Copyright (C) 2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Christopher J. Morrone <morrone2@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -56,6 +56,7 @@ static int call_external_program(void); void step_terminate_monitor_start(uint32_t jobid, uint32_t stepid) { slurm_ctl_conf_t *conf; + pthread_attr_t attr; pthread_mutex_lock(&lock); @@ -75,7 +76,9 @@ void step_terminate_monitor_start(uint32_t jobid, uint32_t stepid) program_name = xstrdup(conf->unkillable_program); slurm_conf_unlock(); - pthread_create(&tid, NULL, monitor, NULL); + slurm_attr_init(&attr); + pthread_create(&tid, &attr, monitor, NULL); + slurm_attr_destroy(&attr); running_flag = 1; recorded_jobid = jobid; recorded_stepid = stepid; diff --git a/src/slurmd/slurmstepd/step_terminate_monitor.h b/src/slurmd/slurmstepd/step_terminate_monitor.h index d2c8ce56d..ee2caf4be 100644 --- a/src/slurmd/slurmstepd/step_terminate_monitor.h +++ b/src/slurmd/slurmstepd/step_terminate_monitor.h @@ -5,7 +5,7 @@ * Copyright (C) 2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Christopher J. Morrone <morrone2@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/slurmd/slurmstepd/task.c b/src/slurmd/slurmstepd/task.c index f8909617a..c38df2e2c 100644 --- a/src/slurmd/slurmstepd/task.c +++ b/src/slurmd/slurmstepd/task.c @@ -1,11 +1,11 @@ /*****************************************************************************\ * slurmd/slurmstepd/task.c - task launching functions for slurmstepd - * $Id: task.c 12573 2007-10-26 15:57:01Z jette $ + * $Id: task.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark A. Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -62,6 +62,14 @@ # include <sys/checkpnt.h> #endif +#ifdef HAVE_PTY_H +# include <pty.h> +#endif + +#ifdef HAVE_UTMP_H +# include <utmp.h> +#endif + #include <sys/resource.h> #include <slurm/slurm_errno.h> @@ -69,7 +77,6 @@ #include "src/common/env.h" #include "src/common/fd.h" #include "src/common/log.h" -#include "src/common/slurm_jobacct.h" #include "src/common/switch.h" #include "src/common/xsignal.h" #include "src/common/xstring.h" @@ -291,6 +298,16 @@ exec_task(slurmd_job_t *job, int i, int waitfd) int rc; slurmd_task_info_t *task = job->task[i]; +#ifdef HAVE_PTY_H + /* Execute login_tty() before setpgid() calls */ + if (job->pty && (task->gtid == 0)) { + if (login_tty(task->stdin_fd)) + error("login_tty: %m"); + else + debug3("login_tty good"); + } +#endif + if (set_user_limits(job) < 0) { debug("Unable to set user limits"); log_fini(); @@ -331,6 +348,7 @@ exec_task(slurmd_job_t *job, int i, int waitfd) job->envtp->mem_bind = xstrdup(job->mem_bind); job->envtp->mem_bind_type = job->mem_bind_type; job->envtp->distribution = -1; + job->envtp->ckpt_path = xstrdup(job->ckpt_path); setup_env(job->envtp); setenvf(&job->envtp->env, "SLURMD_NODENAME", "%s", conf->node_name); job->env = job->envtp->env; @@ -361,12 +379,19 @@ exec_task(slurmd_job_t *job, int i, int waitfd) pdebug_stop_current(job); } - io_dup_stdio(task); +#ifdef HAVE_PTY_H + if (job->pty && (task->gtid == 0)) { + /* Need to perform the login_tty() before all tasks + * register and the process groups are reset, otherwise + * login_tty() gets disabled */ + } else +#endif + io_dup_stdio(task); /* task-specific pre-launch activities */ if (spank_user_task (job, i) < 0) { - error ("Failed to invoke task plugin stack\n"); + error ("Failed to invoke task plugin stack"); exit (1); } diff --git a/src/slurmd/slurmstepd/task.h b/src/slurmd/slurmstepd/task.h index b2aeade2d..9daa80dfa 100644 --- a/src/slurmd/slurmstepd/task.h +++ b/src/slurmd/slurmstepd/task.h @@ -1,11 +1,11 @@ /*****************************************************************************\ * src/slurmd/slurmstepd/task.h - task launching functions for slurmstepd - * $Id: task.h 10574 2006-12-15 23:38:29Z jette $ + * $Id: task.h 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/slurmd/slurmstepd/ulimits.c b/src/slurmd/slurmstepd/ulimits.c index 9d52bafba..08eda040d 100644 --- a/src/slurmd/slurmstepd/ulimits.c +++ b/src/slurmd/slurmstepd/ulimits.c @@ -1,11 +1,11 @@ /*****************************************************************************\ * src/slurmd/slurmstepd/ulimits.c - set user limits for job - * $Id: ulimits.c 11693 2007-06-13 16:22:30Z jette $ + * $Id: ulimits.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -76,10 +76,46 @@ static int _set_limit(char **env, slurm_rlimits_info_t *rli); int set_user_limits(slurmd_job_t *job) { slurm_rlimits_info_t *rli; + struct rlimit r; + rlim_t task_mem_bytes; for (rli = get_slurm_rlimits_info(); rli->name; rli++) _set_limit( job->env, rli ); + /* Set soft and hard memory and data size limit for this process, + * try to handle job and task limit (all spawned processes) in slurmd */ + task_mem_bytes = job->task_mem; /* MB */ + task_mem_bytes *= (1024 * 1024); +#ifdef RLIMIT_AS + if ((task_mem_bytes) && (getrlimit(RLIMIT_AS, &r) == 0) && + (r.rlim_max > task_mem_bytes)) { + r.rlim_max = r.rlim_cur = task_mem_bytes; + if (setrlimit(RLIMIT_AS, &r)) { + /* Indicates that limit has already been exceeded */ + fatal("setrlimit(RLIMIT_AS, %u MB): %m", job->task_mem); + } else + info("Set task_mem(%u MB)", job->task_mem); +#if 0 + getrlimit(RLIMIT_AS, &r); + info("task memory limits: %u %u", r.rlim_cur, r.rlim_max); +#endif + } +#endif +#ifdef RLIMIT_DATA + if ((task_mem_bytes) && (getrlimit(RLIMIT_DATA, &r) == 0) && + (r.rlim_max > task_mem_bytes)) { + r.rlim_max = r.rlim_cur = task_mem_bytes; + if (setrlimit(RLIMIT_DATA, &r)) { + /* Indicates that limit has already been exceeded */ + fatal("setrlimit(RLIMIT_DATA, %u MB): %m", job->task_mem); + } else + info("Set task_data(%u MB)", job->task_mem); +#if 0 + getrlimit(RLIMIT_DATA, &r); + info("task DATA limits: %u %u", r.rlim_cur, r.rlim_max); +#endif + } +#endif return SLURM_SUCCESS; } diff --git a/src/slurmd/slurmstepd/ulimits.h b/src/slurmd/slurmstepd/ulimits.h index b51314e80..6a1656b60 100644 --- a/src/slurmd/slurmstepd/ulimits.h +++ b/src/slurmd/slurmstepd/ulimits.h @@ -5,7 +5,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/slurmdbd/Makefile.am b/src/slurmdbd/Makefile.am new file mode 100644 index 000000000..c8c9e1613 --- /dev/null +++ b/src/slurmdbd/Makefile.am @@ -0,0 +1,32 @@ +# +# Makefile for slurmdbd + +AUTOMAKE_OPTIONS = foreign +CLEANFILES = core.* + +INCLUDES = -I$(top_srcdir) + +sbin_PROGRAMS = slurmdbd + +slurmdbd_LDADD = \ + $(top_builddir)/src/common/libdaemonize.la \ + $(top_builddir)/src/common/libcommon.o -ldl + + +slurmdbd_SOURCES = \ + agent.c \ + agent.h \ + proc_req.c \ + proc_req.h \ + read_config.c \ + read_config.h \ + rpc_mgr.c \ + rpc_mgr.h \ + slurmdbd.c \ + slurmdbd.h + +slurmdbd_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) + +force: +$(slurmdbd_LDADD) : force + @cd `dirname $@` && $(MAKE) `basename $@` diff --git a/src/slurmdbd/Makefile.in b/src/slurmdbd/Makefile.in new file mode 100644 index 000000000..450be380c --- /dev/null +++ b/src/slurmdbd/Makefile.in @@ -0,0 +1,576 @@ +# Makefile.in generated by automake 1.10.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# +# Makefile for slurmdbd + +VPATH = @srcdir@ +pkgdatadir = $(datadir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +target_triplet = @target@ +sbin_PROGRAMS = slurmdbd$(EXEEXT) +subdir = src/slurmdbd +DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ + $(top_srcdir)/auxdir/slurm.m4 \ + $(top_srcdir)/auxdir/x_ac__system_configuration.m4 \ + $(top_srcdir)/auxdir/x_ac_affinity.m4 \ + $(top_srcdir)/auxdir/x_ac_aix.m4 \ + $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ + $(top_srcdir)/auxdir/x_ac_debug.m4 \ + $(top_srcdir)/auxdir/x_ac_elan.m4 \ + $(top_srcdir)/auxdir/x_ac_federation.m4 \ + $(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \ + $(top_srcdir)/auxdir/x_ac_gtk.m4 \ + $(top_srcdir)/auxdir/x_ac_munge.m4 \ + $(top_srcdir)/auxdir/x_ac_ncurses.m4 \ + $(top_srcdir)/auxdir/x_ac_pam.m4 \ + $(top_srcdir)/auxdir/x_ac_ptrace.m4 \ + $(top_srcdir)/auxdir/x_ac_readline.m4 \ + $(top_srcdir)/auxdir/x_ac_setpgrp.m4 \ + $(top_srcdir)/auxdir/x_ac_setproctitle.m4 \ + $(top_srcdir)/auxdir/x_ac_sgi_job.m4 \ + $(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \ + $(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h +CONFIG_CLEAN_FILES = +am__installdirs = "$(DESTDIR)$(sbindir)" +sbinPROGRAMS_INSTALL = $(INSTALL_PROGRAM) +PROGRAMS = $(sbin_PROGRAMS) +am_slurmdbd_OBJECTS = agent.$(OBJEXT) proc_req.$(OBJEXT) \ + read_config.$(OBJEXT) rpc_mgr.$(OBJEXT) slurmdbd.$(OBJEXT) +slurmdbd_OBJECTS = $(am_slurmdbd_OBJECTS) +slurmdbd_DEPENDENCIES = $(top_builddir)/src/common/libdaemonize.la \ + $(top_builddir)/src/common/libcommon.o +slurmdbd_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(slurmdbd_LDFLAGS) \ + $(LDFLAGS) -o $@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm +depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp +am__depfiles_maybe = depfiles +COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ + $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ + $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +CCLD = $(CC) +LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ + $(LDFLAGS) -o $@ +SOURCES = $(slurmdbd_SOURCES) +DIST_SOURCES = $(slurmdbd_SOURCES) +ETAGS = etags +CTAGS = ctags +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AR = @AR@ +AUTHD_CFLAGS = @AUTHD_CFLAGS@ +AUTHD_LIBS = @AUTHD_LIBS@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +BG_INCLUDES = @BG_INCLUDES@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CMD_LDFLAGS = @CMD_LDFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ +ECHO = @ECHO@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +ELAN_LIBS = @ELAN_LIBS@ +EXEEXT = @EXEEXT@ +F77 = @F77@ +FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@ +FFLAGS = @FFLAGS@ +GREP = @GREP@ +GTK2_CFLAGS = @GTK2_CFLAGS@ +GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ +HAVEPKGCONFIG = @HAVEPKGCONFIG@ +HAVE_AIX = @HAVE_AIX@ +HAVE_ELAN = @HAVE_ELAN@ +HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ +HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIB_LDFLAGS = @LIB_LDFLAGS@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MKDIR_P = @MKDIR_P@ +MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ +MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ +MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ +NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ +NUMA_LIBS = @NUMA_LIBS@ +OBJEXT = @OBJEXT@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PAM_LIBS = @PAM_LIBS@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ +PLPA_LIBS = @PLPA_LIBS@ +PROCTRACKDIR = @PROCTRACKDIR@ +PROJECT = @PROJECT@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +RANLIB = @RANLIB@ +READLINE_LIBS = @READLINE_LIBS@ +RELEASE = @RELEASE@ +SED = @SED@ +SEMAPHORE_LIBS = @SEMAPHORE_LIBS@ +SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ +SLURMD_PORT = @SLURMD_PORT@ +SLURM_API_AGE = @SLURM_API_AGE@ +SLURM_API_CURRENT = @SLURM_API_CURRENT@ +SLURM_API_MAJOR = @SLURM_API_MAJOR@ +SLURM_API_REVISION = @SLURM_API_REVISION@ +SLURM_API_VERSION = @SLURM_API_VERSION@ +SLURM_MAJOR = @SLURM_MAJOR@ +SLURM_MICRO = @SLURM_MICRO@ +SLURM_MINOR = @SLURM_MINOR@ +SLURM_VERSION = @SLURM_VERSION@ +SO_LDFLAGS = @SO_LDFLAGS@ +SSL_CPPFLAGS = @SSL_CPPFLAGS@ +SSL_LDFLAGS = @SSL_LDFLAGS@ +SSL_LIBS = @SSL_LIBS@ +STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ +VERSION = @VERSION@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_F77 = @ac_ct_F77@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target = @target@ +target_alias = @target_alias@ +target_cpu = @target_cpu@ +target_os = @target_os@ +target_vendor = @target_vendor@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +AUTOMAKE_OPTIONS = foreign +CLEANFILES = core.* +INCLUDES = -I$(top_srcdir) +slurmdbd_LDADD = \ + $(top_builddir)/src/common/libdaemonize.la \ + $(top_builddir)/src/common/libcommon.o -ldl + +slurmdbd_SOURCES = \ + agent.c \ + agent.h \ + proc_req.c \ + proc_req.h \ + read_config.c \ + read_config.h \ + rpc_mgr.c \ + rpc_mgr.h \ + slurmdbd.c \ + slurmdbd.h + +slurmdbd_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) +all: all-am + +.SUFFIXES: +.SUFFIXES: .c .lo .o .obj +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ + && exit 0; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/slurmdbd/Makefile'; \ + cd $(top_srcdir) && \ + $(AUTOMAKE) --foreign src/slurmdbd/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +install-sbinPROGRAMS: $(sbin_PROGRAMS) + @$(NORMAL_INSTALL) + test -z "$(sbindir)" || $(MKDIR_P) "$(DESTDIR)$(sbindir)" + @list='$(sbin_PROGRAMS)'; for p in $$list; do \ + p1=`echo $$p|sed 's/$(EXEEXT)$$//'`; \ + if test -f $$p \ + || test -f $$p1 \ + ; then \ + f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \ + echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(sbinPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(sbindir)/$$f'"; \ + $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(sbinPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(sbindir)/$$f" || exit 1; \ + else :; fi; \ + done + +uninstall-sbinPROGRAMS: + @$(NORMAL_UNINSTALL) + @list='$(sbin_PROGRAMS)'; for p in $$list; do \ + f=`echo "$$p" | sed 's,^.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/'`; \ + echo " rm -f '$(DESTDIR)$(sbindir)/$$f'"; \ + rm -f "$(DESTDIR)$(sbindir)/$$f"; \ + done + +clean-sbinPROGRAMS: + @list='$(sbin_PROGRAMS)'; for p in $$list; do \ + f=`echo $$p|sed 's/$(EXEEXT)$$//'`; \ + echo " rm -f $$p $$f"; \ + rm -f $$p $$f ; \ + done +slurmdbd$(EXEEXT): $(slurmdbd_OBJECTS) $(slurmdbd_DEPENDENCIES) + @rm -f slurmdbd$(EXEEXT) + $(slurmdbd_LINK) $(slurmdbd_OBJECTS) $(slurmdbd_LDADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + +distclean-compile: + -rm -f *.tab.c + +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/agent.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc_req.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/read_config.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/rpc_mgr.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurmdbd.Po@am__quote@ + +.c.o: +@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c $< + +.c.obj: +@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'` + +.c.lo: +@am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $< + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + mkid -fID $$unique +tags: TAGS + +TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + here=`pwd`; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$tags $$unique; \ + fi +ctags: CTAGS +CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + test -z "$(CTAGS_ARGS)$$tags$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$tags $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && cd $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) $$here + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ + fi; \ + cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ + else \ + test -f $(distdir)/$$file \ + || cp -p $$d/$$file $(distdir)/$$file \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(PROGRAMS) +installdirs: + for dir in "$(DESTDIR)$(sbindir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + `test -z '$(STRIP)' || \ + echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install +mostlyclean-generic: + +clean-generic: + -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libtool clean-sbinPROGRAMS \ + mostlyclean-am + +distclean: distclean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-exec-am: install-sbinPROGRAMS + +install-html: install-html-am + +install-info: install-info-am + +install-man: + +install-pdf: install-pdf-am + +install-ps: install-ps-am + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-sbinPROGRAMS + +.MAKE: install-am install-strip + +.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \ + clean-libtool clean-sbinPROGRAMS ctags distclean \ + distclean-compile distclean-generic distclean-libtool \ + distclean-tags distdir dvi dvi-am html html-am info info-am \ + install install-am install-data install-data-am install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-pdf install-pdf-am install-ps install-ps-am \ + install-sbinPROGRAMS install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-compile \ + mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ + tags uninstall uninstall-am uninstall-sbinPROGRAMS + + +force: +$(slurmdbd_LDADD) : force + @cd `dirname $@` && $(MAKE) `basename $@` +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/src/slurmdbd/agent.c b/src/slurmdbd/agent.c new file mode 100644 index 000000000..006160d49 --- /dev/null +++ b/src/slurmdbd/agent.c @@ -0,0 +1,37 @@ +/*****************************************************************************\ + * agent.c - functions for queued requests + ***************************************************************************** + * Copyright (C) 2002-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Morris Jette <jette1@llnl.gov> + * LLNL-CODE-402394. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ diff --git a/src/srun/reattach.h b/src/slurmdbd/agent.h similarity index 82% rename from src/srun/reattach.h rename to src/slurmdbd/agent.h index 04882a3c0..70c133abf 100644 --- a/src/srun/reattach.h +++ b/src/slurmdbd/agent.h @@ -1,10 +1,10 @@ /*****************************************************************************\ - * src/srun/reattach.h support for re/attach to running jobs in slurm + * agent.h - data structures and function definitions for queued requests ***************************************************************************** - * Copyright (C) 2002 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * Written by Morris Jette <jette@llnl.gov> + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -35,12 +35,8 @@ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. \*****************************************************************************/ -#ifndef _REATTACH_H -#define _REATTACH_H +#ifndef _AGENT_H +#define _AGENT_H -/* reattach to running job, if possible. - * jobid/stepid to attach to are held in srun options "opt" - */ -int reattach(void); -#endif /* !_REATTACH_H */ +#endif /* !_AGENT_H */ diff --git a/src/slurmdbd/proc_req.c b/src/slurmdbd/proc_req.c new file mode 100644 index 000000000..a3065e982 --- /dev/null +++ b/src/slurmdbd/proc_req.c @@ -0,0 +1,1800 @@ +/*****************************************************************************\ + * proc_req.c - functions for processing incoming RPCs. + ***************************************************************************** + * Copyright (C) 2008 Lawrence Livermore National Security. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Morris Jette <jette1@llnl.gov> + * LLNL-CODE-402394. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#include "src/common/macros.h" +#include "src/common/pack.h" +#include "src/common/slurmdbd_defs.h" +#include "src/common/slurm_accounting_storage.h" +#include "src/common/jobacct_common.h" +#include "src/common/slurm_protocol_api.h" +#include "src/common/slurm_protocol_defs.h" +#include "src/slurmdbd/read_config.h" +#include "src/slurmdbd/rpc_mgr.h" +#include "src/slurmctld/slurmctld.h" + +/* Local functions */ +static int _add_accounts(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid); +static int _add_account_coords(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid); +static int _add_assocs(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid); +static int _add_clusters(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid); +static int _add_users(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid); +static int _cluster_procs(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid); +static int _get_accounts(void *db_conn, Buf in_buffer, Buf *out_buffer); +static int _get_assocs(void *db_conn, Buf in_buffer, Buf *out_buffer); +static int _get_clusters(void *db_conn, Buf in_buffer, Buf *out_buffer); +static int _get_jobs(void *db_conn, Buf in_buffer, Buf *out_buffer); +static int _get_usage(uint16_t type, void *db_conn, + Buf in_buffer, Buf *out_buffer); +static int _get_users(void *db_conn, Buf in_buffer, Buf *out_buffer); +static int _flush_jobs(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid); +static void *_init_conn(Buf in_buffer, Buf *out_buffer, uint32_t *uid); +static int _fini_conn(void **db_conn, Buf in_buffer, Buf *out_buffer); +static int _job_complete(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid); +static int _job_start(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid); +static int _job_suspend(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid); +static int _modify_accounts(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid); +static int _modify_assocs(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid); +static int _modify_clusters(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid); +static int _modify_users(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid); +static int _node_state(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid); +static char *_node_state_string(uint16_t node_state); +static int _register_ctld(void *db_conn, slurm_fd orig_fd, + Buf in_buffer, Buf *out_buffer, uint32_t *uid); +static int _remove_accounts(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid); +static int _remove_account_coords(void *db_conn, + Buf in_buffer, Buf *out_buffer, + uint32_t *uid); +static int _remove_assocs(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid); +static int _remove_clusters(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid); +static int _remove_users(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid); +static int _roll_usage(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid); +static int _step_complete(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid); +static int _step_start(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid); +static int _update_shares_used(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid); + +/* Process an incoming RPC + * orig_fd IN - originating file descriptor of the RPC + * msg IN - incoming message + * msg_size IN - size of msg in bytes + * first IN - set if first message received on the socket + * buffer OUT - outgoing response, must be freed by caller + * uid IN/OUT - user ID who initiated the RPC + * RET SLURM_SUCCESS or error code */ +extern int +proc_req(void **db_conn, slurm_fd orig_fd, + char *msg, uint32_t msg_size, + bool first, Buf *out_buffer, uint32_t *uid) +{ + int rc = SLURM_SUCCESS; + uint16_t msg_type; + Buf in_buffer; + char *comment = NULL; + + in_buffer = create_buf(msg, msg_size); /* puts msg into buffer struct */ + safe_unpack16(&msg_type, in_buffer); + + if (first && (msg_type != DBD_INIT)) { + comment = "Initial RPC not DBD_INIT"; + error("%s type (%d)", comment, msg_type); + rc = EINVAL; + *out_buffer = make_dbd_rc_msg(rc, comment, DBD_INIT); + } else { + switch (msg_type) { + case DBD_ADD_ACCOUNTS: + rc = _add_accounts(*db_conn, + in_buffer, out_buffer, uid); + break; + case DBD_ADD_ACCOUNT_COORDS: + rc = _add_account_coords(*db_conn, + in_buffer, out_buffer, uid); + break; + case DBD_ADD_ASSOCS: + rc = _add_assocs(*db_conn, in_buffer, out_buffer, uid); + break; + case DBD_ADD_CLUSTERS: + rc = _add_clusters(*db_conn, + in_buffer, out_buffer, uid); + break; + case DBD_ADD_USERS: + rc = _add_users(*db_conn, in_buffer, out_buffer, uid); + break; + case DBD_CLUSTER_PROCS: + rc = _cluster_procs(*db_conn, + in_buffer, out_buffer, uid); + break; + case DBD_GET_ACCOUNTS: + rc = _get_accounts(*db_conn, in_buffer, out_buffer); + break; + case DBD_GET_ASSOCS: + rc = _get_assocs(*db_conn, in_buffer, out_buffer); + break; + case DBD_GET_ASSOC_USAGE: + case DBD_GET_CLUSTER_USAGE: + rc = _get_usage(msg_type, *db_conn, + in_buffer, out_buffer); + break; + case DBD_GET_CLUSTERS: + rc = _get_clusters(*db_conn, in_buffer, out_buffer); + break; + case DBD_GET_JOBS: + rc = _get_jobs(*db_conn, in_buffer, out_buffer); + break; + case DBD_GET_USERS: + rc = _get_users(*db_conn, in_buffer, out_buffer); + break; + case DBD_FLUSH_JOBS: + rc = _flush_jobs(*db_conn, in_buffer, out_buffer, uid); + break; + case DBD_INIT: + if (first) + (*db_conn) = _init_conn( + in_buffer, out_buffer, uid); + else { + comment = "DBD_INIT sent after connection established"; + error("%s", comment); + rc = EINVAL; + *out_buffer = make_dbd_rc_msg(rc, comment, + DBD_INIT); + } + break; + case DBD_FINI: + rc = _fini_conn(db_conn, in_buffer, out_buffer); + break; + case DBD_JOB_COMPLETE: + rc = _job_complete(*db_conn, + in_buffer, out_buffer, uid); + break; + case DBD_JOB_START: + rc = _job_start(*db_conn, + in_buffer, out_buffer, uid); + break; + case DBD_JOB_SUSPEND: + rc = _job_suspend(*db_conn, + in_buffer, out_buffer, uid); + break; + case DBD_MODIFY_ACCOUNTS: + rc = _modify_accounts(*db_conn, + in_buffer, out_buffer, uid); + break; + case DBD_MODIFY_ASSOCS: + rc = _modify_assocs(*db_conn, + in_buffer, out_buffer, uid); + break; + case DBD_MODIFY_CLUSTERS: + rc = _modify_clusters(*db_conn, + in_buffer, out_buffer, uid); + break; + case DBD_MODIFY_USERS: + rc = _modify_users(*db_conn, + in_buffer, out_buffer, uid); + break; + case DBD_NODE_STATE: + rc = _node_state(*db_conn, + in_buffer, out_buffer, uid); + break; + case DBD_REGISTER_CTLD: + rc = _register_ctld(*db_conn, orig_fd, in_buffer, + out_buffer, uid); + break; + case DBD_REMOVE_ACCOUNTS: + rc = _remove_accounts(*db_conn, + in_buffer, out_buffer, uid); + break; + case DBD_REMOVE_ACCOUNT_COORDS: + rc = _remove_account_coords(*db_conn, + in_buffer, out_buffer, uid); + break; + case DBD_REMOVE_ASSOCS: + rc = _remove_assocs(*db_conn, + in_buffer, out_buffer, uid); + break; + case DBD_REMOVE_CLUSTERS: + rc = _remove_clusters(*db_conn, + in_buffer, out_buffer, uid); + break; + case DBD_REMOVE_USERS: + rc = _remove_users(*db_conn, + in_buffer, out_buffer, uid); + break; + case DBD_ROLL_USAGE: + rc = _roll_usage(*db_conn, in_buffer, out_buffer, uid); + break; + case DBD_STEP_COMPLETE: + rc = _step_complete(*db_conn, + in_buffer, out_buffer, uid); + break; + case DBD_STEP_START: + rc = _step_start(*db_conn, + in_buffer, out_buffer, uid); + break; + case DBD_UPDATE_SHARES_USED: + rc = _update_shares_used(*db_conn, + in_buffer, out_buffer, uid); + break; + default: + comment = "Invalid RPC"; + error("%s msg_type=%d", comment, msg_type); + rc = EINVAL; + *out_buffer = make_dbd_rc_msg(rc, comment, 0); + break; + } + } + + xfer_buf_data(in_buffer); /* delete in_buffer struct without + * xfree of msg */ + return rc; + +unpack_error: + free_buf(in_buffer); + return SLURM_ERROR; +} + +static int _add_accounts(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid) +{ + int rc = SLURM_SUCCESS; + dbd_list_msg_t *get_msg = NULL; + char *comment = NULL; + + debug2("DBD_ADD_ACCOUNTS: called"); + if(*uid != slurmdbd_conf->slurm_user_id + && assoc_mgr_get_admin_level(db_conn, *uid) < ACCT_ADMIN_OPERATOR) { + acct_user_rec_t user; + + memset(&user, 0, sizeof(acct_user_rec_t)); + user.uid = *uid; + if(!assoc_mgr_fill_in_user(db_conn, &user, 1)) { + comment = "Your user has not been added to the accounting system yet."; + error("%s", comment); + rc = SLURM_ERROR; + goto end_it; + } + if(!list_count(user.coord_accts)) { + comment = "Your user doesn't have privilege to preform this action"; + error("%s", comment); + rc = ESLURM_ACCESS_DENIED; + goto end_it; + } + /* If the user is a coord of any acct they can add + * accounts they are only able to make associations to + * these accounts if they are coordinators of the + * parent they are trying to add to + */ + } + + if (slurmdbd_unpack_list_msg(DBD_ADD_ACCOUNTS, &get_msg, in_buffer) != + SLURM_SUCCESS) { + comment = "Failed to unpack DBD_ADD_ACCOUNTS message"; + error("%s", comment); + rc = SLURM_ERROR; + goto end_it; + } + + rc = acct_storage_g_add_accounts(db_conn, *uid, get_msg->my_list); +end_it: + slurmdbd_free_list_msg(get_msg); + *out_buffer = make_dbd_rc_msg(rc, comment, DBD_ADD_ACCOUNTS); + return rc; +} +static int _add_account_coords(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid) +{ + int rc = SLURM_SUCCESS; + dbd_acct_coord_msg_t *get_msg = NULL; + char *comment = NULL; + + if (slurmdbd_unpack_acct_coord_msg(&get_msg, in_buffer) != + SLURM_SUCCESS) { + comment = "Failed to unpack DBD_ADD_ACCOUNT_COORDS message"; + error("%s", comment); + rc = SLURM_ERROR; + goto end_it; + } + + debug2("DBD_ADD_ACCOUNT_COORDS: called"); + if(*uid != slurmdbd_conf->slurm_user_id + && assoc_mgr_get_admin_level(db_conn, *uid) < ACCT_ADMIN_OPERATOR) { + ListIterator itr = NULL; + acct_user_rec_t user; + acct_coord_rec_t *coord = NULL; + + memset(&user, 0, sizeof(acct_user_rec_t)); + user.uid = *uid; + if(!assoc_mgr_fill_in_user(db_conn, &user, 1)) { + comment = "Your user has not been added to the accounting system yet."; + error("%s", comment); + rc = SLURM_ERROR; + goto end_it; + } + if(!list_count(user.coord_accts)) { + comment = "Your user doesn't have privilege to preform this action"; + error("%s", comment); + rc = ESLURM_ACCESS_DENIED; + goto end_it; + } + itr = list_iterator_create(user.coord_accts); + while((coord = list_next(itr))) { + if(!strcasecmp(coord->acct_name, get_msg->acct)) + break; + } + list_iterator_destroy(itr); + + if(!coord) { + comment = "Your user doesn't have privilege to preform this action"; + error("%s", comment); + rc = ESLURM_ACCESS_DENIED; + goto end_it; + } + } + + rc = acct_storage_g_add_coord(db_conn, *uid, get_msg->acct, + get_msg->cond); +end_it: + slurmdbd_free_acct_coord_msg(get_msg); + *out_buffer = make_dbd_rc_msg(rc, comment, DBD_ADD_ACCOUNT_COORDS); + return rc; +} + +static int _add_assocs(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid) +{ + int rc = SLURM_SUCCESS; + dbd_list_msg_t *get_msg = NULL; + char *comment = NULL; + + debug2("DBD_ADD_ASSOCS: called"); + + if (slurmdbd_unpack_list_msg(DBD_ADD_ASSOCS, &get_msg, in_buffer) != + SLURM_SUCCESS) { + comment = "Failed to unpack DBD_ADD_ASSOCS message"; + error("%s", comment); + rc = SLURM_ERROR; + goto end_it; + } + + if(*uid != slurmdbd_conf->slurm_user_id + && assoc_mgr_get_admin_level(db_conn, *uid) < ACCT_ADMIN_OPERATOR) { + ListIterator itr = NULL; + ListIterator itr2 = NULL; + acct_user_rec_t user; + acct_coord_rec_t *coord = NULL; + acct_association_rec_t *object = NULL; + + memset(&user, 0, sizeof(acct_user_rec_t)); + user.uid = *uid; + if(!assoc_mgr_fill_in_user(db_conn, &user, 1)) { + comment = "Your user has not been added to the accounting system yet."; + error("%s", comment); + rc = SLURM_ERROR; + goto end_it; + } + if(!user.coord_accts || !list_count(user.coord_accts)) { + comment = "Your user doesn't have privilege to preform this action"; + error("%s", comment); + rc = ESLURM_ACCESS_DENIED; + goto end_it; + } + itr = list_iterator_create(get_msg->my_list); + itr2 = list_iterator_create(user.coord_accts); + while((object = list_next(itr))) { + char *account = "root"; + if(object->user) + account = object->acct; + else if(object->parent_acct) + account = object->parent_acct; + list_iterator_reset(itr2); + while((coord = list_next(itr2))) { + if(!strcasecmp(coord->acct_name, account)) + break; + } + if(!coord) + break; + } + list_iterator_destroy(itr2); + list_iterator_destroy(itr); + if(!coord) { + comment = "Your user doesn't have privilege to preform this action"; + error("%s", comment); + rc = ESLURM_ACCESS_DENIED; + goto end_it; + } + } + + rc = acct_storage_g_add_associations(db_conn, *uid, get_msg->my_list); +end_it: + slurmdbd_free_list_msg(get_msg); + *out_buffer = make_dbd_rc_msg(rc, comment, DBD_ADD_ASSOCS); + return rc; +} + +static int _add_clusters(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid) +{ + int rc = SLURM_SUCCESS; + dbd_list_msg_t *get_msg = NULL; + char *comment = NULL; + + debug2("DBD_ADD_CLUSTERS: called"); + if(*uid != slurmdbd_conf->slurm_user_id + && assoc_mgr_get_admin_level(db_conn, *uid) < ACCT_ADMIN_SUPER_USER) { + comment = "Your user doesn't have privilege to preform this action"; + error("%s", comment); + rc = ESLURM_ACCESS_DENIED; + goto end_it; + } + + if (slurmdbd_unpack_list_msg(DBD_ADD_CLUSTERS, &get_msg, in_buffer) != + SLURM_SUCCESS) { + comment = "Failed to unpack DBD_ADD_CLUSTERS message"; + error("%s", comment); + rc = SLURM_ERROR; + goto end_it; + } + + rc = acct_storage_g_add_clusters(db_conn, *uid, get_msg->my_list); + if(rc != SLURM_SUCCESS) + comment = "Failed to add cluster."; + +end_it: + slurmdbd_free_list_msg(get_msg); + *out_buffer = make_dbd_rc_msg(rc, comment, DBD_ADD_CLUSTERS); + return rc; +} +static int _add_users(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid) +{ + int rc = SLURM_SUCCESS; + dbd_list_msg_t *get_msg = NULL; + char *comment = NULL; + debug2("DBD_ADD_USERS: called"); + if(*uid != slurmdbd_conf->slurm_user_id + && assoc_mgr_get_admin_level(db_conn, *uid) < ACCT_ADMIN_OPERATOR) { + acct_user_rec_t user; + + memset(&user, 0, sizeof(acct_user_rec_t)); + user.uid = *uid; + if(!assoc_mgr_fill_in_user(db_conn, &user, 1)) { + comment = "Your user has not been added to the accounting system yet."; + error("%s", comment); + rc = SLURM_ERROR; + goto end_it; + } + if(!list_count(user.coord_accts)) { + comment = "Your user doesn't have privilege to preform this action"; + error("%s", comment); + rc = ESLURM_ACCESS_DENIED; + goto end_it; + } + /* If the user is a coord of any acct they can add + * users they are only able to make associations to + * these users if they are coordinators of the + * account they are trying to add to + */ + } + + if (slurmdbd_unpack_list_msg(DBD_ADD_USERS, &get_msg, in_buffer) != + SLURM_SUCCESS) { + comment = "Failed to unpack DBD_ADD_USERS message"; + error("%s", comment); + rc = SLURM_ERROR; + goto end_it; + } + + rc = acct_storage_g_add_users(db_conn, *uid, get_msg->my_list); + +end_it: + slurmdbd_free_list_msg(get_msg); + *out_buffer = make_dbd_rc_msg(rc, comment, DBD_ADD_USERS); + return rc; +} + +static int _cluster_procs(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid) +{ + dbd_cluster_procs_msg_t *cluster_procs_msg = NULL; + int rc = SLURM_SUCCESS; + char *comment = NULL; + + if (*uid != slurmdbd_conf->slurm_user_id) { + comment = "DBD_CLUSTER_PROCS message from invalid uid"; + error("DBD_CLUSTER_PROCS message from invalid uid %u", *uid); + rc = ESLURM_ACCESS_DENIED; + goto end_it; + } + if (slurmdbd_unpack_cluster_procs_msg(&cluster_procs_msg, in_buffer) != + SLURM_SUCCESS) { + comment = "Failed to unpack DBD_CLUSTER_PROCS message"; + error("%s", comment); + rc = SLURM_ERROR; + goto end_it; + } + debug2("DBD_CLUSTER_PROCS: called for %s(%u)", + cluster_procs_msg->cluster_name, + cluster_procs_msg->proc_count); + + rc = clusteracct_storage_g_cluster_procs( + db_conn, + cluster_procs_msg->cluster_name, + cluster_procs_msg->proc_count, + cluster_procs_msg->event_time); +end_it: + slurmdbd_free_cluster_procs_msg(cluster_procs_msg); + *out_buffer = make_dbd_rc_msg(rc, comment, DBD_CLUSTER_PROCS); + return rc; +} + +static int _get_accounts(void *db_conn, Buf in_buffer, Buf *out_buffer) +{ + dbd_cond_msg_t *get_msg = NULL; + dbd_list_msg_t list_msg; + char *comment = NULL; + + debug2("DBD_GET_ACCOUNTS: called"); + if (slurmdbd_unpack_cond_msg(DBD_GET_ACCOUNTS, &get_msg, in_buffer) != + SLURM_SUCCESS) { + comment = "Failed to unpack DBD_GET_ACCOUNTS message"; + error("%s", comment); + *out_buffer = make_dbd_rc_msg(SLURM_ERROR, comment, + DBD_GET_ACCOUNTS); + return SLURM_ERROR; + } + + list_msg.my_list = acct_storage_g_get_accounts(db_conn, get_msg->cond); + slurmdbd_free_cond_msg(DBD_GET_ACCOUNTS, get_msg); + + + *out_buffer = init_buf(1024); + pack16((uint16_t) DBD_GOT_ACCOUNTS, *out_buffer); + slurmdbd_pack_list_msg(DBD_GOT_ACCOUNTS, &list_msg, *out_buffer); + if(list_msg.my_list) + list_destroy(list_msg.my_list); + + return SLURM_SUCCESS; +} + +static int _get_assocs(void *db_conn, Buf in_buffer, Buf *out_buffer) +{ + dbd_cond_msg_t *get_msg = NULL; + dbd_list_msg_t list_msg; + char *comment = NULL; + + debug2("DBD_GET_ASSOCS: called"); + if (slurmdbd_unpack_cond_msg(DBD_GET_ASSOCS, &get_msg, in_buffer) != + SLURM_SUCCESS) { + comment = "Failed to unpack DBD_GET_ASSOCS message"; + error("%s", comment); + *out_buffer = make_dbd_rc_msg(SLURM_ERROR, comment, + DBD_GET_ASSOCS); + return SLURM_ERROR; + } + + list_msg.my_list = acct_storage_g_get_associations( + db_conn, get_msg->cond); + slurmdbd_free_cond_msg(DBD_GET_ASSOCS, get_msg); + + + *out_buffer = init_buf(1024); + pack16((uint16_t) DBD_GOT_ASSOCS, *out_buffer); + slurmdbd_pack_list_msg(DBD_GOT_ASSOCS, &list_msg, *out_buffer); + if(list_msg.my_list) + list_destroy(list_msg.my_list); + + return SLURM_SUCCESS; +} + +static int _get_clusters(void *db_conn, Buf in_buffer, Buf *out_buffer) +{ + dbd_cond_msg_t *get_msg = NULL; + dbd_list_msg_t list_msg; + char *comment = NULL; + + debug2("DBD_GET_CLUSTERS: called"); + if (slurmdbd_unpack_cond_msg(DBD_GET_CLUSTERS, &get_msg, in_buffer) != + SLURM_SUCCESS) { + comment = "Failed to unpack DBD_GET_CLUSTERS message"; + error("%s", comment); + *out_buffer = make_dbd_rc_msg(SLURM_ERROR, comment, + DBD_GET_CLUSTERS); + return SLURM_ERROR; + } + + list_msg.my_list = acct_storage_g_get_clusters( + db_conn, get_msg->cond); + slurmdbd_free_cond_msg(DBD_GET_CLUSTERS, get_msg); + + + *out_buffer = init_buf(1024); + pack16((uint16_t) DBD_GOT_CLUSTERS, *out_buffer); + slurmdbd_pack_list_msg(DBD_GOT_CLUSTERS, &list_msg, *out_buffer); + if(list_msg.my_list) + list_destroy(list_msg.my_list); + + return SLURM_SUCCESS; +} + +static int _get_jobs(void *db_conn, Buf in_buffer, Buf *out_buffer) +{ + dbd_get_jobs_msg_t *get_jobs_msg = NULL; + dbd_list_msg_t list_msg; + sacct_parameters_t sacct_params; + char *comment = NULL; + + debug2("DBD_GET_JOBS: called"); + if (slurmdbd_unpack_get_jobs_msg(&get_jobs_msg, in_buffer) != + SLURM_SUCCESS) { + comment = "Failed to unpack DBD_GET_JOBS message"; + error("%s", comment); + *out_buffer = make_dbd_rc_msg(SLURM_ERROR, comment, + DBD_GET_JOBS); + return SLURM_ERROR; + } + + memset(&sacct_params, 0, sizeof(sacct_parameters_t)); + sacct_params.opt_cluster = get_jobs_msg->cluster_name; + + list_msg.my_list = jobacct_storage_g_get_jobs( + db_conn, + get_jobs_msg->selected_steps, get_jobs_msg->selected_parts, + &sacct_params); + slurmdbd_free_get_jobs_msg(get_jobs_msg); + + + *out_buffer = init_buf(1024); + pack16((uint16_t) DBD_GOT_JOBS, *out_buffer); + slurmdbd_pack_list_msg(DBD_GOT_JOBS, &list_msg, *out_buffer); + if(list_msg.my_list) + list_destroy(list_msg.my_list); + + return SLURM_SUCCESS; +} + +static int _get_usage(uint16_t type, void *db_conn, + Buf in_buffer, Buf *out_buffer) +{ + dbd_usage_msg_t *get_msg = NULL; + dbd_usage_msg_t got_msg; + uint16_t ret_type = 0; + int (*my_function) (void *db_conn, void *object, + time_t start, time_t end); + int rc = SLURM_SUCCESS; + char *comment = NULL; + + info("DBD_GET_USAGE: called"); + + if (slurmdbd_unpack_usage_msg(type, &get_msg, in_buffer) != + SLURM_SUCCESS) { + comment = "Failed to unpack DBD_GET_USAGE message"; + error("%s", comment); + *out_buffer = make_dbd_rc_msg(SLURM_ERROR, comment, type); + return SLURM_ERROR; + } + switch(type) { + case DBD_GET_ASSOC_USAGE: + ret_type = DBD_GOT_ASSOC_USAGE; + my_function = acct_storage_g_get_usage; + break; + case DBD_GET_CLUSTER_USAGE: + ret_type = DBD_GOT_CLUSTER_USAGE; + my_function = clusteracct_storage_g_get_usage; + break; + default: + comment = "Unknown type of usage to get"; + error("%s %u", comment, type); + *out_buffer = make_dbd_rc_msg(SLURM_ERROR, comment, type); + return SLURM_ERROR; + } + + rc = (*(my_function))(db_conn, get_msg->rec, + get_msg->start, get_msg->end); + slurmdbd_free_usage_msg(type, get_msg); + + if(rc != SLURM_SUCCESS) { + comment = "Problem getting usage info"; + error("%s", comment); + *out_buffer = make_dbd_rc_msg(SLURM_ERROR, comment, type); + return SLURM_ERROR; + + } + memset(&got_msg, 0, sizeof(dbd_usage_msg_t)); + got_msg.rec = get_msg->rec; + get_msg->rec = NULL; + *out_buffer = init_buf(1024); + pack16((uint16_t) ret_type, *out_buffer); + slurmdbd_pack_usage_msg(ret_type, &got_msg, *out_buffer); + + return SLURM_SUCCESS; +} + +static int _get_users(void *db_conn, Buf in_buffer, Buf *out_buffer) +{ + dbd_cond_msg_t *get_msg = NULL; + dbd_list_msg_t list_msg; + char *comment = NULL; + + debug2("DBD_GET_USERS: called"); + + if (slurmdbd_unpack_cond_msg(DBD_GET_USERS, &get_msg, in_buffer) != + SLURM_SUCCESS) { + comment = "Failed to unpack DBD_GET_USERS message"; + error("%s", comment); + *out_buffer = make_dbd_rc_msg(SLURM_ERROR, comment, + DBD_GET_USERS); + return SLURM_ERROR; + } + + list_msg.my_list = acct_storage_g_get_users(db_conn, get_msg->cond); + slurmdbd_free_cond_msg(DBD_GET_USERS, get_msg); + + *out_buffer = init_buf(1024); + pack16((uint16_t) DBD_GOT_USERS, *out_buffer); + slurmdbd_pack_list_msg(DBD_GOT_USERS, &list_msg, *out_buffer); + if(list_msg.my_list) + list_destroy(list_msg.my_list); + + return SLURM_SUCCESS; +} + +static int _flush_jobs(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid) +{ + dbd_cluster_procs_msg_t *cluster_procs_msg = NULL; + int rc = SLURM_SUCCESS; + char *comment = NULL; + + if (*uid != slurmdbd_conf->slurm_user_id) { + comment = "DBD_FLUSH_JOBS message from invalid uid"; + error("DBD_FLUSH_JOBS message from invalid uid %u", *uid); + rc = ESLURM_ACCESS_DENIED; + goto end_it; + } + if (slurmdbd_unpack_cluster_procs_msg(&cluster_procs_msg, in_buffer) != + SLURM_SUCCESS) { + comment = "Failed to unpack DBD_FLUSH_JOBS message"; + error("%s", comment); + rc = SLURM_ERROR; + goto end_it; + } + debug2("DBD_FLUSH_JOBS: called for %s", + cluster_procs_msg->cluster_name); + + rc = acct_storage_g_flush_jobs_on_cluster( + db_conn, + cluster_procs_msg->cluster_name, + cluster_procs_msg->event_time); +end_it: + slurmdbd_free_cluster_procs_msg(cluster_procs_msg); + *out_buffer = make_dbd_rc_msg(rc, comment, DBD_FLUSH_JOBS); + return rc; +} + +static void *_init_conn(Buf in_buffer, Buf *out_buffer, uint32_t *uid) +{ + dbd_init_msg_t *init_msg = NULL; + char *comment = NULL; + int rc = SLURM_SUCCESS; + void *new_conn = NULL; + + if (slurmdbd_unpack_init_msg(&init_msg, in_buffer, + slurmdbd_conf->auth_info) + != SLURM_SUCCESS) { + comment = "Failed to unpack DBD_INIT message"; + error("%s", comment); + rc = SLURM_ERROR; + goto end_it; + } + if (init_msg->version != SLURMDBD_VERSION) { + comment = "Incompatable RPC version"; + error("Incompatable RPC version (%d != %d)", + init_msg->version, SLURMDBD_VERSION); + goto end_it; + } + *uid = init_msg->uid; + + debug("DBD_INIT: VERSION:%u UID:%u", init_msg->version, init_msg->uid); + new_conn = acct_storage_g_get_connection(false, init_msg->rollback); + +end_it: + slurmdbd_free_init_msg(init_msg); + *out_buffer = make_dbd_rc_msg(rc, comment, DBD_INIT); + + return new_conn; +} + +static int _fini_conn(void **db_conn, Buf in_buffer, Buf *out_buffer) +{ + dbd_fini_msg_t *fini_msg = NULL; + char *comment = NULL; + int rc = SLURM_SUCCESS; + + if (slurmdbd_unpack_fini_msg(&fini_msg, in_buffer) != SLURM_SUCCESS) { + comment = "Failed to unpack DBD_FINI message"; + error("%s", comment); + rc = SLURM_ERROR; + goto end_it; + } + + debug2("DBD_FINI: CLOSE:%u COMMIT:%u", + fini_msg->close_conn, fini_msg->commit); + if(fini_msg->close_conn == 1) + rc = acct_storage_g_close_connection(db_conn); + else + rc = acct_storage_g_commit((*db_conn), fini_msg->commit); +end_it: + slurmdbd_free_fini_msg(fini_msg); + *out_buffer = make_dbd_rc_msg(rc, comment, DBD_FINI); + + return rc; + +} + +static int _job_complete(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid) +{ + dbd_job_comp_msg_t *job_comp_msg = NULL; + struct job_record job; + struct job_details details; + int rc = SLURM_SUCCESS; + char *comment = NULL; + + if (*uid != slurmdbd_conf->slurm_user_id) { + comment = "DBD_JOB_COMPLETE message from invalid uid"; + error("%s %u", comment, *uid); + rc = ESLURM_ACCESS_DENIED; + goto end_it; + } + if (slurmdbd_unpack_job_complete_msg(&job_comp_msg, in_buffer) != + SLURM_SUCCESS) { + comment = "Failed to unpack DBD_JOB_COMPLETE message"; + error("%s", comment); + rc = SLURM_ERROR; + goto end_it; + } + + debug2("DBD_JOB_COMPLETE: ID:%u ", job_comp_msg->job_id); + + memset(&job, 0, sizeof(struct job_record)); + memset(&details, 0, sizeof(struct job_details)); + + job.assoc_id = job_comp_msg->assoc_id; + job.db_index = job_comp_msg->db_index; + job.end_time = job_comp_msg->end_time; + job.exit_code = job_comp_msg->exit_code; + job.job_id = job_comp_msg->job_id; + job.job_state = job_comp_msg->job_state; + job.nodes = job_comp_msg->nodes; + job.start_time = job_comp_msg->start_time; + details.submit_time = job_comp_msg->submit_time; + + job.details = &details; + rc = jobacct_storage_g_job_complete(db_conn, &job); + + if(rc && errno == 740) /* meaning data is already there */ + rc = SLURM_SUCCESS; +end_it: + slurmdbd_free_job_complete_msg(job_comp_msg); + *out_buffer = make_dbd_rc_msg(rc, comment, DBD_JOB_COMPLETE); + return SLURM_SUCCESS; +} + +static int _job_start(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid) +{ + dbd_job_start_msg_t *job_start_msg = NULL; + dbd_job_start_rc_msg_t job_start_rc_msg; + struct job_record job; + struct job_details details; + char *comment = NULL; + + if (*uid != slurmdbd_conf->slurm_user_id) { + comment = "DBD_JOB_START message from invalid uid"; + error("%s %u", comment, *uid); + *out_buffer = make_dbd_rc_msg(ESLURM_ACCESS_DENIED, comment, + DBD_JOB_START); + return SLURM_ERROR; + } + if (slurmdbd_unpack_job_start_msg(&job_start_msg, in_buffer) != + SLURM_SUCCESS) { + comment = "Failed to unpack DBD_JOB_START message"; + error("%s", comment); + *out_buffer = make_dbd_rc_msg(SLURM_ERROR, comment, + DBD_JOB_START); + return SLURM_ERROR; + } + memset(&job, 0, sizeof(struct job_record)); + memset(&details, 0, sizeof(struct job_details)); + memset(&job_start_rc_msg, 0, sizeof(dbd_job_start_rc_msg_t)); + + job.total_procs = job_start_msg->alloc_cpus; + job.account = job_start_msg->account; + job.assoc_id = job_start_msg->assoc_id; + job.comment = job_start_msg->block_id; + job.db_index = job_start_msg->db_index; + details.begin_time = job_start_msg->eligible_time; + job.user_id = job_start_msg->uid; + job.group_id = job_start_msg->gid; + job.job_id = job_start_msg->job_id; + job.job_state = job_start_msg->job_state; + job.name = job_start_msg->name; + job.nodes = job_start_msg->nodes; + job.partition = job_start_msg->partition; + job.num_procs = job_start_msg->req_cpus; + job.priority = job_start_msg->priority; + job.start_time = job_start_msg->start_time; + details.submit_time = job_start_msg->submit_time; + + job.details = &details; + + if(job.db_index) { + debug2("DBD_JOB_START: START CALL ID:%u NAME:%s INX:%u", + job_start_msg->job_id, job_start_msg->name, + job.db_index); + } else { + debug2("DBD_JOB_START: ELIGIBLE CALL ID:%u NAME:%s", + job_start_msg->job_id, job_start_msg->name); + } + job_start_rc_msg.return_code = jobacct_storage_g_job_start(db_conn, + &job); + job_start_rc_msg.db_index = job.db_index; + + slurmdbd_free_job_start_msg(job_start_msg); + *out_buffer = init_buf(1024); + pack16((uint16_t) DBD_JOB_START_RC, *out_buffer); + slurmdbd_pack_job_start_rc_msg(&job_start_rc_msg, *out_buffer); + return SLURM_SUCCESS; +} + +static int _job_suspend(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid) +{ + dbd_job_suspend_msg_t *job_suspend_msg = NULL; + struct job_record job; + struct job_details details; + int rc = SLURM_SUCCESS; + char *comment = NULL; + + if (*uid != slurmdbd_conf->slurm_user_id) { + comment = "DBD_JOB_SUSPEND message from invalid uid"; + error("%s %u", comment, *uid); + rc = ESLURM_ACCESS_DENIED; + goto end_it; + } + if (slurmdbd_unpack_job_suspend_msg(&job_suspend_msg, in_buffer) != + SLURM_SUCCESS) { + comment = "Failed to unpack DBD_JOB_SUSPEND message"; + error("%s", comment); + rc = SLURM_ERROR; + goto end_it; + } + + debug2("DBD_JOB_SUSPEND: ID:%u STATE:%s", + job_suspend_msg->job_id, + job_state_string((enum job_states) job_suspend_msg->job_state)); + + memset(&job, 0, sizeof(struct job_record)); + memset(&details, 0, sizeof(struct job_details)); + + job.assoc_id = job_suspend_msg->assoc_id; + job.db_index = job_suspend_msg->db_index; + job.job_id = job_suspend_msg->job_id; + job.job_state = job_suspend_msg->job_state; + details.submit_time = job_suspend_msg->submit_time; + job.suspend_time = job_suspend_msg->suspend_time; + + job.details = &details; + rc = jobacct_storage_g_job_suspend(db_conn, &job); + + if(rc && errno == 740) /* meaning data is already there */ + rc = SLURM_SUCCESS; +end_it: + slurmdbd_free_job_suspend_msg(job_suspend_msg); + *out_buffer = make_dbd_rc_msg(rc, comment, DBD_JOB_SUSPEND); + return SLURM_SUCCESS; +} + +static int _modify_accounts(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid) +{ + int rc = SLURM_SUCCESS; + dbd_modify_msg_t *get_msg = NULL; + dbd_list_msg_t list_msg; + char *comment = NULL; + + debug2("DBD_MODIFY_ACCOUNTS: called"); + if(*uid != slurmdbd_conf->slurm_user_id + && assoc_mgr_get_admin_level(db_conn, *uid) < ACCT_ADMIN_OPERATOR) { + comment = "Your user doesn't have privilege to preform this action"; + error("%s", comment); + *out_buffer = make_dbd_rc_msg(ESLURM_ACCESS_DENIED, + comment, DBD_MODIFY_ACCOUNTS); + + return ESLURM_ACCESS_DENIED; + } + + if (slurmdbd_unpack_modify_msg(DBD_MODIFY_ACCOUNTS, &get_msg, + in_buffer) != SLURM_SUCCESS) { + comment = "Failed to unpack DBD_MODIFY_ACCOUNTS message"; + error("%s", comment); + *out_buffer = make_dbd_rc_msg(SLURM_ERROR, + comment, DBD_MODIFY_ACCOUNTS); + return SLURM_ERROR; + } + + + list_msg.my_list = acct_storage_g_modify_accounts( + db_conn, *uid, get_msg->cond, get_msg->rec); + slurmdbd_free_modify_msg(DBD_MODIFY_ACCOUNTS, get_msg); + + *out_buffer = init_buf(1024); + pack16((uint16_t) DBD_GOT_LIST, *out_buffer); + slurmdbd_pack_list_msg(DBD_GOT_LIST, &list_msg, *out_buffer); + if(list_msg.my_list) + list_destroy(list_msg.my_list); + *out_buffer = make_dbd_rc_msg(rc, comment, DBD_MODIFY_ACCOUNTS); + return rc; +} + +static int _modify_assocs(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid) +{ + int rc = SLURM_SUCCESS; + dbd_modify_msg_t *get_msg = NULL; + char *comment = NULL; + dbd_list_msg_t list_msg; + + debug2("DBD_MODIFY_ASSOCS: called"); + + if(*uid != slurmdbd_conf->slurm_user_id + && assoc_mgr_get_admin_level(db_conn, *uid) < ACCT_ADMIN_OPERATOR) { + comment = "Your user doesn't have privilege to preform this action"; + error("%s", comment); + *out_buffer = make_dbd_rc_msg(ESLURM_ACCESS_DENIED, + comment, DBD_MODIFY_ASSOCS); + + return ESLURM_ACCESS_DENIED; + } + + if (slurmdbd_unpack_modify_msg(DBD_MODIFY_ASSOCS, &get_msg, + in_buffer) != SLURM_SUCCESS) { + comment = "Failed to unpack DBD_MODIFY_ASSOCS message"; + error("%s", comment); + *out_buffer = make_dbd_rc_msg(SLURM_ERROR, + comment, DBD_MODIFY_ASSOCS); + return SLURM_ERROR; + } + + + list_msg.my_list = acct_storage_g_modify_associations(db_conn, *uid, + get_msg->cond, get_msg->rec); + + slurmdbd_free_modify_msg(DBD_MODIFY_ASSOCS, get_msg); + *out_buffer = init_buf(1024); + pack16((uint16_t) DBD_GOT_LIST, *out_buffer); + slurmdbd_pack_list_msg(DBD_GOT_LIST, &list_msg, *out_buffer); + if(list_msg.my_list) + list_destroy(list_msg.my_list); + + return rc; +} + +static int _modify_clusters(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid) +{ + dbd_list_msg_t list_msg; + int rc = SLURM_SUCCESS; + dbd_modify_msg_t *get_msg = NULL; + char *comment = NULL; + + if(*uid != slurmdbd_conf->slurm_user_id + && assoc_mgr_get_admin_level(db_conn, *uid) + < ACCT_ADMIN_SUPER_USER) { + comment = "Your user doesn't have privilege to preform this action"; + error("%s", comment); + *out_buffer = make_dbd_rc_msg(ESLURM_ACCESS_DENIED, + comment, DBD_MODIFY_CLUSTERS); + + return ESLURM_ACCESS_DENIED; + } + + if (slurmdbd_unpack_modify_msg(DBD_MODIFY_CLUSTERS, &get_msg, + in_buffer) != SLURM_SUCCESS) { + comment = "Failed to unpack DBD_MODIFY_CLUSTERS message"; + error("%s", comment); + *out_buffer = make_dbd_rc_msg(SLURM_ERROR, + comment, DBD_MODIFY_CLUSTERS); + return SLURM_ERROR; + } + + debug2("DBD_MODIFY_CLUSTERS: called"); + + list_msg.my_list = acct_storage_g_modify_clusters(db_conn, *uid, + get_msg->cond, get_msg->rec); + + slurmdbd_free_modify_msg(DBD_MODIFY_CLUSTERS, get_msg); + *out_buffer = init_buf(1024); + pack16((uint16_t) DBD_GOT_LIST, *out_buffer); + slurmdbd_pack_list_msg(DBD_GOT_LIST, &list_msg, *out_buffer); + if(list_msg.my_list) + list_destroy(list_msg.my_list); + + return rc; +} + +static int _modify_users(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid) +{ + dbd_list_msg_t list_msg; + int rc = SLURM_SUCCESS; + dbd_modify_msg_t *get_msg = NULL; + char *comment = NULL; + + debug2("DBD_MODIFY_USERS: called"); + + if(*uid != slurmdbd_conf->slurm_user_id + && assoc_mgr_get_admin_level(db_conn, *uid) < ACCT_ADMIN_OPERATOR) { + comment = "Your user doesn't have privilege to preform this action"; + error("%s", comment); + *out_buffer = make_dbd_rc_msg(ESLURM_ACCESS_DENIED, + comment, DBD_MODIFY_USERS); + + return ESLURM_ACCESS_DENIED; + } + + if (slurmdbd_unpack_modify_msg(DBD_MODIFY_USERS, &get_msg, in_buffer) != + SLURM_SUCCESS) { + comment = "Failed to unpack DBD_MODIFY_USERS message"; + error("%s", comment); + *out_buffer = make_dbd_rc_msg(SLURM_ERROR, + comment, DBD_MODIFY_USERS); + return SLURM_ERROR; + } + + if(((acct_user_rec_t *)get_msg->rec)->admin_level != ACCT_ADMIN_NOTSET + && *uid != slurmdbd_conf->slurm_user_id + && assoc_mgr_get_admin_level(db_conn, *uid) + < ((acct_user_rec_t *)get_msg->rec)->admin_level) { + comment = "You have to be the same or higher admin level to change another persons"; + ((acct_user_rec_t *)get_msg->rec)->admin_level = + ACCT_ADMIN_NOTSET; + } + + list_msg.my_list = acct_storage_g_modify_users( + db_conn, *uid, get_msg->cond, get_msg->rec); + + slurmdbd_free_modify_msg(DBD_MODIFY_USERS, get_msg); + *out_buffer = init_buf(1024); + pack16((uint16_t) DBD_GOT_LIST, *out_buffer); + slurmdbd_pack_list_msg(DBD_GOT_LIST, &list_msg, *out_buffer); + if(list_msg.my_list) + list_destroy(list_msg.my_list); + + return rc; +} + +static int _node_state(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid) +{ + dbd_node_state_msg_t *node_state_msg = NULL; + struct node_record node_ptr; + int rc = SLURM_SUCCESS; + char *comment = NULL; + + memset(&node_ptr, 0, sizeof(struct node_record)); + + if (*uid != slurmdbd_conf->slurm_user_id) { + comment = "DBD_NODE_STATE message from invalid uid"; + error("%s %u", comment, *uid); + rc = ESLURM_ACCESS_DENIED; + goto end_it; + } + if (slurmdbd_unpack_node_state_msg(&node_state_msg, in_buffer) != + SLURM_SUCCESS) { + comment = "Failed to unpack DBD_NODE_STATE message"; + error("%s", comment); + rc = SLURM_ERROR; + goto end_it; + } + + debug2("DBD_NODE_STATE: NODE:%s STATE:%s REASON:%s TIME:%u", + node_state_msg->hostlist, + _node_state_string(node_state_msg->new_state), + node_state_msg->reason, + node_state_msg->event_time); + node_ptr.name = node_state_msg->hostlist; + node_ptr.cpus = node_state_msg->cpu_count; + + slurmctld_conf.fast_schedule = 0; + + if(node_state_msg->new_state == DBD_NODE_STATE_DOWN) + rc = clusteracct_storage_g_node_down( + db_conn, + node_state_msg->cluster_name, + &node_ptr, + node_state_msg->event_time, + node_state_msg->reason); + else + rc = clusteracct_storage_g_node_up(db_conn, + node_state_msg->cluster_name, + &node_ptr, + node_state_msg->event_time); + + if(rc && errno == 740) /* meaning data is already there */ + rc = SLURM_SUCCESS; + +end_it: + slurmdbd_free_node_state_msg(node_state_msg); + *out_buffer = make_dbd_rc_msg(rc, comment, DBD_NODE_STATE); + return SLURM_SUCCESS; +} + +static char *_node_state_string(uint16_t node_state) +{ + switch(node_state) { + case DBD_NODE_STATE_DOWN: + return "DOWN"; + case DBD_NODE_STATE_UP: + return "UP"; + } + return "UNKNOWN"; +} + +static int _register_ctld(void *db_conn, slurm_fd orig_fd, + Buf in_buffer, Buf *out_buffer, uint32_t *uid) +{ + dbd_register_ctld_msg_t *register_ctld_msg = NULL; + int rc = SLURM_SUCCESS; + char *comment = NULL, ip[32]; + slurm_addr ctld_address; + uint16_t orig_port; + acct_cluster_cond_t cluster_q; + acct_cluster_rec_t cluster; + dbd_list_msg_t list_msg; + + if (*uid != slurmdbd_conf->slurm_user_id) { + comment = "DBD_REGISTER_CTLD message from invalid uid"; + error("%s %u", comment, *uid); + rc = ESLURM_ACCESS_DENIED; + goto end_it; + } + if (slurmdbd_unpack_register_ctld_msg(®ister_ctld_msg, in_buffer) != + SLURM_SUCCESS) { + comment = "Failed to unpack DBD_REGISTER_CTLD message"; + error("%s", comment); + rc = SLURM_ERROR; + goto end_it; + } + debug2("DBD_REGISTER_CTLD: called for %s(%u)", + register_ctld_msg->cluster_name, register_ctld_msg->port); + slurm_get_peer_addr(orig_fd, &ctld_address); + slurm_get_ip_str(&ctld_address, &orig_port, ip, sizeof(ip)); + debug2("slurmctld at ip:%s, port:%d", ip, register_ctld_msg->port); + + memset(&cluster_q, 0, sizeof(acct_cluster_cond_t)); + memset(&cluster, 0, sizeof(acct_cluster_rec_t)); + cluster_q.cluster_list = list_create(NULL); + list_append(cluster_q.cluster_list, register_ctld_msg->cluster_name); + cluster.control_host = ip; + cluster.control_port = register_ctld_msg->port; + list_msg.my_list = acct_storage_g_modify_clusters( + db_conn, *uid, &cluster_q, &cluster); + + if(!list_msg.my_list || !list_count(list_msg.my_list)) { + comment = "This cluster hasn't been added to accounting yet"; + rc = SLURM_ERROR; + } + + if(list_msg.my_list) + list_destroy(list_msg.my_list); + + + list_destroy(cluster_q.cluster_list); + /* + * Outgoing message header must have flag set: + * out_msg.flags = SLURM_GLOBAL_AUTH_KEY; + */ +#if 0 +{ + /* Code to validate communications back to slurmctld */ + slurm_fd fd; + slurm_set_addr_char(&ctld_address, register_ctld_msg->port, ip); + fd = slurm_open_msg_conn(&ctld_address); + if (fd < 0) { + error("can not open socket back to slurmctld"); + } else { + slurm_msg_t out_msg; + slurm_msg_t_init(&out_msg); + out_msg.msg_type = REQUEST_PING; + out_msg.flags = SLURM_GLOBAL_AUTH_KEY; + slurm_send_node_msg(fd, &out_msg); + /* We probably need to add matching recv_msg function + * for an arbitray fd or should these be fire and forget? */ + slurm_close_stream(fd); + } +} +#endif + +end_it: + slurmdbd_free_register_ctld_msg(register_ctld_msg); + *out_buffer = make_dbd_rc_msg(rc, comment, DBD_REGISTER_CTLD); + return rc; +} + +static int _remove_accounts(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid) +{ + int rc = SLURM_SUCCESS; + dbd_cond_msg_t *get_msg = NULL; + dbd_list_msg_t list_msg; + char *comment = NULL; + + debug2("DBD_REMOVE_ACCOUNTS: called"); + + if(*uid != slurmdbd_conf->slurm_user_id + && assoc_mgr_get_admin_level(db_conn, *uid) < ACCT_ADMIN_OPERATOR) { + comment = "Your user doesn't have privilege to preform this action"; + error("%s", comment); + *out_buffer = make_dbd_rc_msg(ESLURM_ACCESS_DENIED, + comment, DBD_REMOVE_ACCOUNTS); + + return ESLURM_ACCESS_DENIED; + } + + if (slurmdbd_unpack_cond_msg(DBD_REMOVE_ACCOUNTS, &get_msg, + in_buffer) != SLURM_SUCCESS) { + comment = "Failed to unpack DBD_REMOVE_ACCOUNTS message"; + error("%s", comment); + *out_buffer = make_dbd_rc_msg(SLURM_ERROR, + comment, DBD_REMOVE_ACCOUNTS); + return SLURM_ERROR; + } + + list_msg.my_list = acct_storage_g_remove_accounts( + db_conn, *uid, get_msg->cond); +/* this should be done inside the plugin */ +/* if(rc == SLURM_SUCCESS) { */ +/* memset(&assoc_q, 0, sizeof(acct_association_cond_t)); */ +/* assoc_q.acct_list = */ +/* ((acct_account_cond_t *)get_msg->cond)->acct_list; */ +/* list_msg.my_list = acct_storage_g_remove_associations(db_conn, *uid, &assoc_q); */ +/* } */ + + slurmdbd_free_cond_msg(DBD_REMOVE_ACCOUNTS, get_msg); + *out_buffer = init_buf(1024); + pack16((uint16_t) DBD_GOT_LIST, *out_buffer); + slurmdbd_pack_list_msg(DBD_GOT_LIST, &list_msg, *out_buffer); + if(list_msg.my_list) + list_destroy(list_msg.my_list); + + return rc; +} + +static int _remove_account_coords(void *db_conn, + Buf in_buffer, Buf *out_buffer, + uint32_t *uid) +{ + int rc = SLURM_SUCCESS; + dbd_acct_coord_msg_t *get_msg = NULL; + dbd_list_msg_t list_msg; + char *comment = NULL; + + debug2("DBD_REMOVE_ACCOUNT_COORDS: called"); + + if(*uid != slurmdbd_conf->slurm_user_id + && assoc_mgr_get_admin_level(db_conn, *uid) < ACCT_ADMIN_OPERATOR) { + comment = "Your user doesn't have privilege to preform this action"; + error("%s", comment); + *out_buffer = make_dbd_rc_msg( + ESLURM_ACCESS_DENIED, comment, + DBD_REMOVE_ACCOUNT_COORDS); + + return ESLURM_ACCESS_DENIED; + } + + if (slurmdbd_unpack_acct_coord_msg(&get_msg, in_buffer) != + SLURM_SUCCESS) { + comment = "Failed to unpack DBD_REMOVE_ACCOUNT_COORDS message"; + error("%s", comment); + *out_buffer = make_dbd_rc_msg( + SLURM_ERROR, comment, DBD_REMOVE_ACCOUNT_COORDS); + return SLURM_ERROR; + } + + list_msg.my_list = acct_storage_g_remove_coord( + db_conn, *uid, get_msg->acct, get_msg->cond); + + slurmdbd_free_acct_coord_msg(get_msg); + *out_buffer = init_buf(1024); + pack16((uint16_t) DBD_GOT_LIST, *out_buffer); + slurmdbd_pack_list_msg(DBD_GOT_LIST, &list_msg, *out_buffer); + if(list_msg.my_list) + list_destroy(list_msg.my_list); + + return rc; +} + +static int _remove_assocs(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid) +{ + int rc = SLURM_SUCCESS; + dbd_cond_msg_t *get_msg = NULL; + dbd_list_msg_t list_msg; + char *comment = NULL; + + debug2("DBD_REMOVE_ASSOCS: called"); + + if(*uid != slurmdbd_conf->slurm_user_id + && assoc_mgr_get_admin_level(db_conn, *uid) < ACCT_ADMIN_OPERATOR) { + comment = "Your user doesn't have privilege to preform this action"; + error("%s", comment); + *out_buffer = make_dbd_rc_msg(ESLURM_ACCESS_DENIED, + comment, DBD_REMOVE_ASSOCS); + + return ESLURM_ACCESS_DENIED; + } + + if (slurmdbd_unpack_cond_msg(DBD_REMOVE_ASSOCS, &get_msg, in_buffer) != + SLURM_SUCCESS) { + comment = "Failed to unpack DBD_REMOVE_ASSOCS message"; + error("%s", comment); + *out_buffer = make_dbd_rc_msg(SLURM_ERROR, + comment, DBD_REMOVE_ASSOCS); + return SLURM_ERROR; + } + + list_msg.my_list = acct_storage_g_remove_associations( + db_conn, *uid, get_msg->cond); + + slurmdbd_free_cond_msg(DBD_REMOVE_ASSOCS, get_msg); + *out_buffer = init_buf(1024); + pack16((uint16_t) DBD_GOT_LIST, *out_buffer); + slurmdbd_pack_list_msg(DBD_GOT_LIST, &list_msg, *out_buffer); + if(list_msg.my_list) + list_destroy(list_msg.my_list); + + return rc; + +} + +static int _remove_clusters(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid) +{ + int rc = SLURM_SUCCESS; + dbd_cond_msg_t *get_msg = NULL; + dbd_list_msg_t list_msg; + char *comment = NULL; + + debug2("DBD_REMOVE_CLUSTERS: called"); + + if(*uid != slurmdbd_conf->slurm_user_id + && assoc_mgr_get_admin_level(db_conn, *uid) + < ACCT_ADMIN_SUPER_USER) { + comment = "Your user doesn't have privilege to preform this action"; + error("%s", comment); + *out_buffer = make_dbd_rc_msg(ESLURM_ACCESS_DENIED, + comment, DBD_REMOVE_CLUSTERS); + + return ESLURM_ACCESS_DENIED; + } + + if (slurmdbd_unpack_cond_msg(DBD_REMOVE_CLUSTERS, &get_msg, + in_buffer) != SLURM_SUCCESS) { + comment = "Failed to unpack DBD_REMOVE_CLUSTERS message"; + error("%s", comment); + *out_buffer = make_dbd_rc_msg(SLURM_ERROR, + comment, DBD_REMOVE_CLUSTERS); + return SLURM_ERROR; + } + + list_msg.my_list = acct_storage_g_remove_clusters( + db_conn, *uid, get_msg->cond); +/* this should be done inside the plugin */ +/* if(rc == SLURM_SUCCESS) { */ +/* memset(&assoc_q, 0, sizeof(acct_association_cond_t)); */ +/* assoc_q.cluster_list = */ +/* ((acct_cluster_cond_t *)get_msg->cond)->cluster_list; */ +/* list_msg.my_list = acct_storage_g_remove_associations(db_conn, *uid, &assoc_q); */ +/* } */ + + slurmdbd_free_cond_msg(DBD_REMOVE_CLUSTERS, get_msg); + *out_buffer = init_buf(1024); + pack16((uint16_t) DBD_GOT_LIST, *out_buffer); + slurmdbd_pack_list_msg(DBD_GOT_LIST, &list_msg, *out_buffer); + if(list_msg.my_list) + list_destroy(list_msg.my_list); + + return rc; +} + +static int _remove_users(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid) +{ + int rc = SLURM_SUCCESS; + dbd_cond_msg_t *get_msg = NULL; + dbd_list_msg_t list_msg; + char *comment = NULL; + + debug2("DBD_REMOVE_USERS: called"); + + if(*uid != slurmdbd_conf->slurm_user_id + && assoc_mgr_get_admin_level(db_conn, *uid) < ACCT_ADMIN_OPERATOR) { + comment = "Your user doesn't have privilege to preform this action"; + error("%s", comment); + *out_buffer = make_dbd_rc_msg(ESLURM_ACCESS_DENIED, + comment, DBD_REMOVE_USERS); + + return ESLURM_ACCESS_DENIED; + } + + if (slurmdbd_unpack_cond_msg(DBD_REMOVE_USERS, &get_msg, in_buffer) != + SLURM_SUCCESS) { + comment = "Failed to unpack DBD_REMOVE_USERS message"; + error("%s", comment); + *out_buffer = make_dbd_rc_msg(SLURM_ERROR, + comment, DBD_REMOVE_USERS); + return SLURM_ERROR; + } + + list_msg.my_list = acct_storage_g_remove_users( + db_conn, *uid, get_msg->cond); +/* this should be done inside the plugin */ + /* if(rc == SLURM_SUCCESS) { */ +/* memset(&assoc_q, 0, sizeof(acct_association_cond_t)); */ +/* assoc_q.user_list = */ +/* ((acct_user_cond_t *)get_msg->cond)->user_list; */ +/* list_msg.my_list = acct_storage_g_remove_associations(db_conn, *uid, &assoc_q); */ +/* } */ + + slurmdbd_free_cond_msg(DBD_REMOVE_USERS, get_msg); + *out_buffer = init_buf(1024); + pack16((uint16_t) DBD_GOT_LIST, *out_buffer); + slurmdbd_pack_list_msg(DBD_GOT_LIST, &list_msg, *out_buffer); + if(list_msg.my_list) + list_destroy(list_msg.my_list); + + return rc; +} + +static int _roll_usage(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid) +{ + dbd_roll_usage_msg_t *get_msg = NULL; + int rc = SLURM_SUCCESS; + char *comment = NULL; + + info("DBD_ROLL_USAGE: called"); + + if(*uid != slurmdbd_conf->slurm_user_id + && assoc_mgr_get_admin_level(db_conn, *uid) < ACCT_ADMIN_OPERATOR) { + comment = "Your user doesn't have privilege to preform this action"; + error("%s", comment); + rc = ESLURM_ACCESS_DENIED; + goto end_it; + } + + if (slurmdbd_unpack_roll_usage_msg(&get_msg, in_buffer) != + SLURM_SUCCESS) { + comment = "Failed to unpack DBD_ROLL_USAGE message"; + error("%s", comment); + rc = SLURM_ERROR; + goto end_it; + } + + rc = acct_storage_g_roll_usage(db_conn, get_msg->start); + +end_it: + slurmdbd_free_roll_usage_msg(get_msg); + *out_buffer = make_dbd_rc_msg(rc, comment, DBD_ROLL_USAGE); + return rc; +} + +static int _step_complete(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid) +{ + dbd_step_comp_msg_t *step_comp_msg = NULL; + struct step_record step; + struct job_record job; + struct job_details details; + int rc = SLURM_SUCCESS; + char *comment = NULL; + + if (*uid != slurmdbd_conf->slurm_user_id) { + comment = "DBD_STEP_COMPLETE message from invalid uid"; + error("%s %u", comment, *uid); + rc = ESLURM_ACCESS_DENIED; + goto end_it; + } + if (slurmdbd_unpack_step_complete_msg(&step_comp_msg, in_buffer) != + SLURM_SUCCESS) { + comment = "Failed to unpack DBD_STEP_COMPLETE message"; + error("%s", comment); + rc = SLURM_ERROR; + goto end_it; + } + + debug2("DBD_STEP_COMPLETE: ID:%u.%u SUBMIT:%u", + step_comp_msg->job_id, step_comp_msg->step_id, + step_comp_msg->job_submit_time); + + memset(&step, 0, sizeof(struct step_record)); + memset(&job, 0, sizeof(struct job_record)); + memset(&details, 0, sizeof(struct job_details)); + + job.assoc_id = step_comp_msg->assoc_id; + job.db_index = step_comp_msg->db_index; + job.end_time = step_comp_msg->end_time; + step.jobacct = step_comp_msg->jobacct; + job.job_id = step_comp_msg->job_id; + job.requid = step_comp_msg->req_uid; + job.start_time = step_comp_msg->start_time; + details.submit_time = step_comp_msg->job_submit_time; + step.step_id = step_comp_msg->step_id; + job.total_procs = step_comp_msg->total_procs; + + job.details = &details; + step.job_ptr = &job; + + rc = jobacct_storage_g_step_complete(db_conn, &step); + + if(rc && errno == 740) /* meaning data is already there */ + rc = SLURM_SUCCESS; + +end_it: + slurmdbd_free_step_complete_msg(step_comp_msg); + *out_buffer = make_dbd_rc_msg(rc, comment, DBD_STEP_COMPLETE); + return rc; +} + +static int _step_start(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid) +{ + dbd_step_start_msg_t *step_start_msg = NULL; + struct step_record step; + struct job_record job; + struct job_details details; + int rc = SLURM_SUCCESS; + char *comment = NULL; + + if (*uid != slurmdbd_conf->slurm_user_id) { + comment = "DBD_STEP_START message from invalid uid"; + error("%s %u", comment, *uid); + rc = ESLURM_ACCESS_DENIED; + goto end_it; + } + if (slurmdbd_unpack_step_start_msg(&step_start_msg, in_buffer) != + SLURM_SUCCESS) { + comment = "Failed to unpack DBD_STEP_START message"; + error("%s", comment); + rc = SLURM_ERROR; + goto end_it; + } + + debug2("DBD_STEP_START: ID:%u.%u NAME:%s SUBMIT:%d", + step_start_msg->job_id, step_start_msg->step_id, + step_start_msg->name, step_start_msg->job_submit_time); + + memset(&step, 0, sizeof(struct step_record)); + memset(&job, 0, sizeof(struct job_record)); + memset(&details, 0, sizeof(struct job_details)); + + job.assoc_id = step_start_msg->assoc_id; + job.db_index = step_start_msg->db_index; + job.job_id = step_start_msg->job_id; + step.name = step_start_msg->name; + job.nodes = step_start_msg->nodes; + step.start_time = step_start_msg->start_time; + details.submit_time = step_start_msg->job_submit_time; + step.step_id = step_start_msg->step_id; + job.total_procs = step_start_msg->total_procs; + + job.details = &details; + step.job_ptr = &job; + + rc = jobacct_storage_g_step_start(db_conn, &step); + + if(rc && errno == 740) /* meaning data is already there */ + rc = SLURM_SUCCESS; +end_it: + slurmdbd_free_step_start_msg(step_start_msg); + *out_buffer = make_dbd_rc_msg(rc, comment, DBD_STEP_START); + return rc; +} + +static int _update_shares_used(void *db_conn, + Buf in_buffer, Buf *out_buffer, uint32_t *uid) +{ + int rc = SLURM_SUCCESS; + dbd_list_msg_t *used_shares_msg = NULL; + char *comment = NULL; + + if (*uid != slurmdbd_conf->slurm_user_id) { + comment = "DBD_UPDATE_SHARES_USED message from invalid uid"; + error("%s %u", comment, *uid); + rc = ESLURM_ACCESS_DENIED; + goto end_it; + } + debug2("DBD_UPDATE_SHARES_USED"); + if (slurmdbd_unpack_list_msg(DBD_UPDATE_SHARES_USED, &used_shares_msg, + in_buffer) != SLURM_SUCCESS) { + comment = "Failed to unpack DBD_UPDATE_SHARES_USED message"; + error("%s", comment); + rc = SLURM_ERROR; + goto end_it; + } else { +#if 0 + /* This was only added to verify the logic. + * It is not useful for production use */ + ListIterator itr = NULL; + shares_used_object_t *usage; + itr = list_iterator_create(used_shares_msg->my_list); + while((usage = list_next(itr))) { + debug2("assoc_id:%u shares_used:%u", + usage->assoc_id, usage->shares_used); + } + list_iterator_destroy(itr); +#endif + } + + rc = acct_storage_g_update_shares_used(db_conn, + used_shares_msg->my_list); + +end_it: + slurmdbd_free_list_msg(used_shares_msg); + *out_buffer = make_dbd_rc_msg(rc, comment, DBD_UPDATE_SHARES_USED); + return rc; +} diff --git a/src/sacct/sacct_stat.h b/src/slurmdbd/proc_req.h similarity index 68% rename from src/sacct/sacct_stat.h rename to src/slurmdbd/proc_req.h index af3d6794f..6ed7cc191 100644 --- a/src/sacct/sacct_stat.h +++ b/src/slurmdbd/proc_req.h @@ -1,12 +1,10 @@ /*****************************************************************************\ - * sacct_stat.h - header file for sacct - * - * $Id: sacct.h 7541 2006-03-18 01:44:58Z da $ + * proc_req.h - functions and definitions for processing incoming RPCs. ***************************************************************************** - * Copyright (C) 2006 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Danny Auble <da@llnl.gov>. - * UCRL-CODE-226842. + * Written by Morris Jette <jette1@llnl.gov> + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -17,7 +15,7 @@ * any later version. * * In addition, as a special exception, the copyright holders give permission - * to link the code of portions of this program with the OpenSSL library under + * to link the code of portions of this program with the OpenSSL library under * certain conditions as described in each individual source file, and * distribute linked combinations including the two. You must obey the GNU * General Public License in all respects for all of the code used other than @@ -36,31 +34,24 @@ * with SLURM; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. \*****************************************************************************/ -#ifndef _SACCT_STAT_H -#define _SACCT_STAT_H - -#include "src/common/slurm_protocol_api.h" -typedef struct { - uint16_t taskid; /* contains which task number it was on */ - uint32_t nodeid; /* contains which node number it was on */ -} jobacct_id_t; +#ifndef _PROC_REQ_H +#define _PROC_REQ_H -typedef struct sacct_struct { - uint32_t max_vsize; - jobacct_id_t max_vsize_id; - float ave_vsize; - uint32_t max_rss; - jobacct_id_t max_rss_id; - float ave_rss; - uint32_t max_pages; - jobacct_id_t max_pages_id; - float ave_pages; - float min_cpu; - jobacct_id_t min_cpu_id; - float ave_cpu; -} sacct_t; +#include "src/common/macros.h" +#include "src/common/pack.h" +#include "src/common/slurm_protocol_defs.h" -extern int sacct_stat(uint32_t jobid, uint32_t stepid); +/* Process an incoming RPC + * orig_fd IN - originating file descriptor of the RPC + * msg IN - incoming message + * msg_size IN - size of msg in bytes + * first IN - set if first message received on the socket + * buffer OUT - outgoing response, must be freed by caller + * uid IN/OUT - user ID who initiated the RPC + * RET SLURM_SUCCESS or error code */ +extern int proc_req(void **db_conn, slurm_fd orig_fd, char *msg, + uint32_t msg_size, bool first, Buf *out_buffer, + uint32_t *uid); -#endif +#endif /* !_PROC_REQ */ diff --git a/src/slurmdbd/read_config.c b/src/slurmdbd/read_config.c new file mode 100644 index 000000000..73bf52519 --- /dev/null +++ b/src/slurmdbd/read_config.c @@ -0,0 +1,283 @@ +/*****************************************************************************\ + * read_config.c - functions for reading slurmdbd.conf + ***************************************************************************** + * Copyright (C) 2003-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Morris Jette <jette1@llnl.gov> + * LLNL-CODE-402394. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#include <pwd.h> +#include <stdlib.h> +#include <string.h> +#include <sys/stat.h> +#include <sys/types.h> +#include <unistd.h> +#include <slurm/slurm_errno.h> + +#include "src/common/macros.h" +#include "src/common/log.h" +#include "src/common/parse_config.h" +#include "src/common/read_config.h" +#include "src/common/xmalloc.h" +#include "src/common/xstring.h" +#include "src/slurmdbd/read_config.h" + +/* Global variables */ +pthread_mutex_t conf_mutex = PTHREAD_MUTEX_INITIALIZER; +//slurm_dbd_conf_t *slurmdbd_conf = NULL; + +/* Local functions */ +static void _clear_slurmdbd_conf(void); +static char * _get_conf_path(void); + +/* + * free_slurmdbd_conf - free storage associated with the global variable + * slurmdbd_conf + */ +extern void free_slurmdbd_conf(void) +{ + slurm_mutex_lock(&conf_mutex); + _clear_slurmdbd_conf(); + xfree(slurmdbd_conf); + slurm_mutex_unlock(&conf_mutex); +} + +static void _clear_slurmdbd_conf(void) +{ + if (slurmdbd_conf) { + xfree(slurmdbd_conf->auth_info); + xfree(slurmdbd_conf->auth_type); + xfree(slurmdbd_conf->dbd_addr); + xfree(slurmdbd_conf->dbd_host); + slurmdbd_conf->dbd_port = 0; + xfree(slurmdbd_conf->log_file); + xfree(slurmdbd_conf->pid_file); + xfree(slurmdbd_conf->plugindir); + xfree(slurmdbd_conf->slurm_user_name); + xfree(slurmdbd_conf->storage_host); + xfree(slurmdbd_conf->storage_loc); + xfree(slurmdbd_conf->storage_pass); + slurmdbd_conf->storage_port = 0; + xfree(slurmdbd_conf->storage_type); + xfree(slurmdbd_conf->storage_user); + } +} + +/* + * read_slurmdbd_conf - load the SlurmDBD configuration from the slurmdbd.conf + * file. Store result into global variable slurmdbd_conf. + * This function can be called more than once. + * RET SLURM_SUCCESS if no error, otherwise an error code + */ +extern int read_slurmdbd_conf(void) +{ + s_p_options_t options[] = { + {"AuthInfo", S_P_STRING}, + {"AuthType", S_P_STRING}, + {"DbdAddr", S_P_STRING}, + {"DbdHost", S_P_STRING}, + {"DbdPort", S_P_UINT16}, + {"DebugLevel", S_P_UINT16}, + {"LogFile", S_P_STRING}, + {"MessageTimeout", S_P_UINT16}, + {"PidFile", S_P_STRING}, + {"PluginDir", S_P_STRING}, + {"SlurmUser", S_P_STRING}, + {"StorageHost", S_P_STRING}, + {"StorageLoc", S_P_STRING}, + {"StoragePass", S_P_STRING}, + {"StoragePort", S_P_UINT16}, + {"StorageType", S_P_STRING}, + {"StorageUser", S_P_STRING}, + {NULL} }; + s_p_hashtbl_t *tbl; + char *conf_path; + struct stat buf; + + /* Set initial values */ + slurm_mutex_lock(&conf_mutex); + if (slurmdbd_conf == NULL) + slurmdbd_conf = xmalloc(sizeof(slurm_dbd_conf_t)); + slurmdbd_conf->debug_level = LOG_LEVEL_INFO; + _clear_slurmdbd_conf(); + + /* Get the slurmdbd.conf path and validate the file */ + conf_path = _get_conf_path(); + if ((conf_path == NULL) || (stat(conf_path, &buf) == -1)) { + info("No slurmdbd.conf file (%s)", conf_path); + } else { + debug("Reading slurmdbd.conf file %s", conf_path); + + tbl = s_p_hashtbl_create(options); + if (s_p_parse_file(tbl, conf_path) == SLURM_ERROR) { + fatal("Could not open/read/parse slurmdbd.conf file %s", + conf_path); + } + + s_p_get_string(&slurmdbd_conf->auth_info, "AuthInfo", tbl); + s_p_get_string(&slurmdbd_conf->auth_type, "AuthType", tbl); + s_p_get_string(&slurmdbd_conf->dbd_host, "DbdHost", tbl); + s_p_get_string(&slurmdbd_conf->dbd_addr, "DbdAddr", tbl); + s_p_get_uint16(&slurmdbd_conf->dbd_port, "DbdPort", tbl); + s_p_get_uint16(&slurmdbd_conf->debug_level, "DebugLevel", tbl); + s_p_get_string(&slurmdbd_conf->log_file, "LogFile", tbl); + if (!s_p_get_uint16(&slurmdbd_conf->msg_timeout, + "MessageTimeout", tbl)) + slurmdbd_conf->msg_timeout = DEFAULT_MSG_TIMEOUT; + else if (slurmdbd_conf->msg_timeout > 100) { + info("WARNING: MessageTimeout is too high for " + "effective fault-tolerance"); + } + s_p_get_string(&slurmdbd_conf->pid_file, "PidFile", tbl); + s_p_get_string(&slurmdbd_conf->plugindir, "PluginDir", tbl); + s_p_get_string(&slurmdbd_conf->slurm_user_name, "SlurmUser", + tbl); + s_p_get_string(&slurmdbd_conf->storage_host, + "StorageHost", tbl); + s_p_get_string(&slurmdbd_conf->storage_loc, + "StorageLoc", tbl); + s_p_get_string(&slurmdbd_conf->storage_pass, + "StoragePass", tbl); + s_p_get_uint16(&slurmdbd_conf->storage_port, + "StoragePort", tbl); + s_p_get_string(&slurmdbd_conf->storage_type, + "StorageType", tbl); + s_p_get_string(&slurmdbd_conf->storage_user, + "StorageUser", tbl); + + s_p_hashtbl_destroy(tbl); + } + + xfree(conf_path); + if (slurmdbd_conf->auth_type == NULL) + slurmdbd_conf->auth_type = xstrdup(DEFAULT_SLURMDBD_AUTHTYPE); + if (slurmdbd_conf->dbd_host == NULL) { + error("slurmdbd.conf lacks DbdHost parameter, using 'localhost'"); + slurmdbd_conf->dbd_host = xstrdup("localhost"); + } + if (slurmdbd_conf->dbd_addr == NULL) + slurmdbd_conf->dbd_addr = xstrdup(slurmdbd_conf->dbd_host); + if (slurmdbd_conf->pid_file == NULL) + slurmdbd_conf->pid_file = xstrdup(DEFAULT_SLURMDBD_PIDFILE); + if (slurmdbd_conf->dbd_port == 0) + slurmdbd_conf->dbd_port = SLURMDBD_PORT; + if(slurmdbd_conf->plugindir == NULL) + slurmdbd_conf->plugindir = xstrdup(default_plugin_path); + if (slurmdbd_conf->slurm_user_name) { + struct passwd *slurm_passwd; + slurm_passwd = getpwnam(slurmdbd_conf->slurm_user_name); + if (slurm_passwd == NULL) { + fatal("Invalid user for SlurmUser %s, ignored", + slurmdbd_conf->slurm_user_name); + } else + slurmdbd_conf->slurm_user_id = slurm_passwd->pw_uid; + } else { + slurmdbd_conf->slurm_user_name = xstrdup("root"); + slurmdbd_conf->slurm_user_id = 0; + } + if (slurmdbd_conf->storage_type == NULL) + fatal("StorageType must be specified"); + + slurm_mutex_unlock(&conf_mutex); + return SLURM_SUCCESS; +} + +/* Log the current configuration using verbose() */ +extern void log_config(void) +{ + debug2("AuthInfo = %s", slurmdbd_conf->auth_info); + debug2("AuthType = %s", slurmdbd_conf->auth_type); + debug2("DbdAddr = %s", slurmdbd_conf->dbd_addr); + debug2("DbdHost = %s", slurmdbd_conf->dbd_host); + debug2("DbdPort = %u", slurmdbd_conf->dbd_port); + debug2("DebugLevel = %u", slurmdbd_conf->debug_level); + debug2("LogFile = %s", slurmdbd_conf->log_file); + debug2("MessageTimeout = %u", slurmdbd_conf->msg_timeout); + debug2("PidFile = %s", slurmdbd_conf->pid_file); + debug2("PluginDir = %s", slurmdbd_conf->plugindir); + debug2("SlurmUser = %s(%u)", + slurmdbd_conf->slurm_user_name, slurmdbd_conf->slurm_user_id); + debug2("StorageHost = %s", slurmdbd_conf->storage_host); + debug2("StorageLoc = %s", slurmdbd_conf->storage_loc); + debug2("StoragePass = %s", slurmdbd_conf->storage_pass); + debug2("StoragePort = %u", slurmdbd_conf->storage_port); + debug2("StorageType = %s", slurmdbd_conf->storage_type); + debug2("StorageUser = %s", slurmdbd_conf->storage_user); +} + +/* Return the DbdPort value */ +extern uint16_t get_dbd_port(void) +{ + uint16_t port; + + slurm_mutex_lock(&conf_mutex); + port = slurmdbd_conf->dbd_port; + slurm_mutex_unlock(&conf_mutex); + return port; +} + +extern void slurmdbd_conf_lock(void) +{ + slurm_mutex_lock(&conf_mutex); +} + +extern void slurmdbd_conf_unlock(void) +{ + slurm_mutex_unlock(&conf_mutex); +} + + +/* Return the pathname of the slurmdbd.conf file. + * xfree() the value returned */ +static char * _get_conf_path(void) +{ + char *val = getenv("SLURM_CONF"); + char *path = NULL; + int i; + + if (!val) + val = default_slurm_config_file; + + /* Replace file name on end of path */ + i = strlen(val) + 15; + path = xmalloc(i); + strcpy(path, val); + val = strrchr(path, (int)'/'); + if (val) /* absolute path */ + val++; + else /* not absolute path */ + val = path; + strcpy(val, "slurmdbd.conf"); + + return path; +} diff --git a/src/slurmdbd/read_config.h b/src/slurmdbd/read_config.h new file mode 100644 index 000000000..79d09c514 --- /dev/null +++ b/src/slurmdbd/read_config.h @@ -0,0 +1,110 @@ +/*****************************************************************************\ + * read_config.h - functions and declarations for reading slurmdbd.conf + ***************************************************************************** + * Copyright (C) 2003-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Morris Jette <jette1@llnl.gov> + * LLNL-CODE-402394. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#ifndef _DBD_READ_CONFIG_H +#define _DBD_READ_CONFIG_H + +#if HAVE_CONFIG_H +# include "config.h" +#if HAVE_INTTYPES_H +# include <inttypes.h> +#else /* !HAVE_INTTYPES_H */ +# if HAVE_STDINT_H +# include <stdint.h> +# endif +#endif /* HAVE_INTTYPES_H */ +#else /* !HAVE_CONFIG_H */ +#include <stdint.h> +#endif /* HAVE_CONFIG_H */ + +#include <time.h> + +#define DEFAULT_SLURMDBD_AUTHTYPE "auth/none" +#define DEFAULT_SLURMDBD_PIDFILE "/var/run/slurmdbd.pid" + +/* SlurmDBD configuration parameters */ +typedef struct slurm_dbd_conf { + time_t last_update; /* time slurmdbd.conf read */ + char * auth_info; /* authentication info */ + char * auth_type; /* authentication mechanism */ + char * dbd_addr; /* network address of Slurm DBD */ + char * dbd_host; /* hostname of Slurm DBD */ + uint16_t dbd_port; /* port number for RPCs to DBD */ + uint16_t debug_level; /* Debug level, default=3 */ + char * log_file; /* Log file */ + uint16_t msg_timeout; /* message timeout */ + char * pid_file; /* where to store current PID */ + char * plugindir; /* dir to look for plugins */ + uint32_t slurm_user_id; /* uid of slurm_user_name */ + char * slurm_user_name;/* user that slurmcdtld runs as */ + char * storage_host; /* host where DB is running */ + char * storage_loc; /* database name */ + char * storage_pass; /* password for DB write */ + uint16_t storage_port; /* port DB is listening to */ + char * storage_type; /* DB to be used for storage */ + char * storage_user; /* user authorized to write DB */ +} slurm_dbd_conf_t; + +extern pthread_mutex_t conf_mutex; +extern slurm_dbd_conf_t *slurmdbd_conf; + + +/* + * free_slurmdbd_conf - free storage associated with the global variable + * slurmdbd_conf + */ +extern void free_slurmdbd_conf(void); + +/* Return the DbdPort value */ +extern uint16_t get_dbd_port(void); + +/* lock and unlock the dbd_conf */ +extern void slurmdbd_conf_lock(void); +extern void slurmdbd_conf_unlock(void); + +/* Log the current configuration using verbose() */ +extern void log_config(void); + +/* + * read_slurmdbd_conf - load the SlurmDBD configuration from the slurmdbd.conf + * file. This function can be called more than once if so desired. + * RET SLURM_SUCCESS if no error, otherwise an error code + */ +extern int read_slurmdbd_conf(void); + +#endif /* !_DBD_READ_CONFIG_H */ diff --git a/src/slurmdbd/rpc_mgr.c b/src/slurmdbd/rpc_mgr.c new file mode 100644 index 000000000..e58f503cc --- /dev/null +++ b/src/slurmdbd/rpc_mgr.c @@ -0,0 +1,523 @@ +/*****************************************************************************\ + * rpc_mgr.h - functions for processing RPCs. + ***************************************************************************** + * Copyright (C) 2002-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Morris Jette <jette1@llnl.gov> + * LLNL-CODE-402394. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#ifdef HAVE_CONFIG_H +# include "config.h" +#endif +#include <arpa/inet.h> +#include <pthread.h> +#include <signal.h> +#include <sys/poll.h> +#include <sys/time.h> + +#include "src/common/fd.h" +#include "src/common/log.h" +#include "src/common/macros.h" +#include "src/common/slurm_protocol_api.h" +#include "src/common/slurm_accounting_storage.h" +#include "src/common/slurmdbd_defs.h" +#include "src/common/xmalloc.h" +#include "src/common/xsignal.h" +#include "src/slurmdbd/proc_req.h" +#include "src/slurmdbd/read_config.h" +#include "src/slurmdbd/rpc_mgr.h" +#include "src/slurmdbd/slurmdbd.h" + +#define MAX_THREAD_COUNT 50 + +/* Local functions */ +static bool _fd_readable(slurm_fd fd); +static bool _fd_writeable(slurm_fd fd); +static void _free_server_thread(pthread_t my_tid); +static int _send_resp(slurm_fd fd, Buf buffer); +static void * _service_connection(void *arg); +static void _sig_handler(int signal); +static int _tot_wait (struct timeval *start_time); +static int _wait_for_server_thread(void); +static void _wait_for_thread_fini(void); + +/* Local variables */ +static pthread_t master_thread_id = 0, slave_thread_id[MAX_THREAD_COUNT]; +static int thread_count = 0; +static pthread_mutex_t thread_count_lock = PTHREAD_MUTEX_INITIALIZER; +static pthread_cond_t thread_count_cond = PTHREAD_COND_INITIALIZER; + +typedef struct connection_arg { + slurm_fd newsockfd; +} connection_arg_t; + + +/* Process incoming RPCs. Meant to execute as a pthread */ +extern void *rpc_mgr(void *no_data) +{ + pthread_attr_t thread_attr_rpc_req; + slurm_fd sockfd, newsockfd; + int i, retry_cnt, sigarray[] = {SIGUSR1, 0}; + slurm_addr cli_addr; + connection_arg_t *conn_arg = NULL; + + slurm_mutex_lock(&thread_count_lock); + master_thread_id = pthread_self(); + slurm_mutex_unlock(&thread_count_lock); + + (void) pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); + (void) pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); + + /* threads to process individual RPC's are detached */ + slurm_attr_init(&thread_attr_rpc_req); + if (pthread_attr_setdetachstate + (&thread_attr_rpc_req, PTHREAD_CREATE_DETACHED)) + fatal("pthread_attr_setdetachstate %m"); + + /* initialize port for RPCs */ + if ((sockfd = slurm_init_msg_engine_port(get_dbd_port())) + == SLURM_SOCKET_ERROR) + fatal("slurm_init_msg_engine_port error %m"); + + /* Prepare to catch SIGUSR1 to interrupt accept(). + * This signal is generated by the slurmdbd signal + * handler thread upon receipt of SIGABRT, SIGINT, + * or SIGTERM. That thread does all processing of + * all signals. */ + xsignal(SIGUSR1, _sig_handler); + xsignal_unblock(sigarray); + + /* + * Process incoming RPCs until told to shutdown + */ + while ((i = _wait_for_server_thread()) >= 0) { + /* + * accept needed for stream implementation is a no-op in + * message implementation that just passes sockfd to newsockfd + */ + if ((newsockfd = slurm_accept_msg_conn(sockfd, + &cli_addr)) == + SLURM_SOCKET_ERROR) { + _free_server_thread((pthread_t) 0); + if (errno != EINTR) + error("slurm_accept_msg_conn: %m"); + continue; + } + fd_set_nonblocking(newsockfd); + conn_arg = xmalloc(sizeof(connection_arg_t)); + conn_arg->newsockfd = newsockfd; + retry_cnt = 0; + while (pthread_create(&slave_thread_id[i], + &thread_attr_rpc_req, + _service_connection, + (void *) conn_arg)) { + if (retry_cnt > 0) { + error("pthread_create failure, " + "aborting RPC: %m"); + close(newsockfd); + break; + } + error("pthread_create failure: %m"); + retry_cnt++; + usleep(1000); /* retry in 1 msec */ + } + } + + debug3("rpc_mgr shutting down"); + slurm_attr_destroy(&thread_attr_rpc_req); + (void) slurm_shutdown_msg_engine(sockfd); + _wait_for_thread_fini(); + pthread_exit((void *) 0); + return NULL; +} + +/* Wake up the RPC manager and all spawned threads so they can exit */ +extern void rpc_mgr_wake(void) +{ + int i; + + slurm_mutex_lock(&thread_count_lock); + if (master_thread_id) + pthread_kill(master_thread_id, SIGUSR1); + for (i=0; i<MAX_THREAD_COUNT; i++) { + if (slave_thread_id[i]) + pthread_kill(slave_thread_id[i], SIGUSR1); + } + slurm_mutex_unlock(&thread_count_lock); +} + +static void * _service_connection(void *arg) +{ + connection_arg_t *conn = (connection_arg_t *) arg; + uint32_t nw_size, msg_size, uid; + char *msg = NULL; + ssize_t msg_read, offset; + bool fini = false, first = true; + Buf buffer = NULL; + int rc; + void *db_conn = NULL; + + debug2("Opened connection %d", conn->newsockfd); + while (!fini) { + if (!_fd_readable(conn->newsockfd)) + break; /* problem with this socket */ + msg_read = read(conn->newsockfd, &nw_size, sizeof(nw_size)); + if (msg_read == 0) /* EOF */ + break; + if (msg_read != sizeof(nw_size)) { + error("Could not read msg_size from connection %d", + conn->newsockfd); + break; + } + msg_size = ntohl(nw_size); + if ((msg_size < 2) || (msg_size > 1000000)) { + error("Invalid msg_size (%u) from connection %d", + conn->newsockfd); + break; + } + + msg = xmalloc(msg_size); + offset = 0; + while (msg_size > offset) { + if (!_fd_readable(conn->newsockfd)) + break; /* problem with this socket */ + msg_read = read(conn->newsockfd, (msg + offset), + (msg_size - offset)); + if (msg_read <= 0) { + error("read(%d): %m", conn->newsockfd); + break; + } + offset += msg_read; + } + if (msg_size == offset) { + rc = proc_req(&db_conn, conn->newsockfd, + msg, msg_size, first, &buffer, &uid); + first = false; + if (rc != SLURM_SUCCESS) { + error("Processing message from connection %d", + conn->newsockfd); + //fini = true; + } + } else { + buffer = make_dbd_rc_msg(SLURM_ERROR, "Bad offset", 0); + fini = true; + } + + rc = _send_resp(conn->newsockfd, buffer); + xfree(msg); + } + + acct_storage_g_close_connection(&db_conn); + if (slurm_close_accepted_conn(conn->newsockfd) < 0) + error("close(%d): %m", conn->newsockfd); + else + debug2("Closed connection %d uid(%d)", conn->newsockfd, uid); + xfree(arg); + _free_server_thread(pthread_self()); + return NULL; +} + +/* Return a buffer containing a DBD_RC (return code) message + * caller must free returned buffer */ +extern Buf make_dbd_rc_msg(int rc, char *comment, uint16_t sent_type) +{ + Buf buffer; + + dbd_rc_msg_t msg; + buffer = init_buf(1024); + pack16((uint16_t) DBD_RC, buffer); + msg.return_code = rc; + msg.comment = comment; + msg.sent_type = sent_type; + slurmdbd_pack_rc_msg(&msg, buffer); + return buffer; +} + +static int _send_resp(slurm_fd fd, Buf buffer) +{ + uint32_t msg_size, nw_size; + ssize_t msg_wrote; + char *out_buf; + + if ((fd < 0) || (!_fd_writeable(fd))) + goto io_err; + + msg_size = get_buf_offset(buffer); + nw_size = htonl(msg_size); + if (!_fd_writeable(fd)) + goto io_err; + msg_wrote = write(fd, &nw_size, sizeof(nw_size)); + if (msg_wrote != sizeof(nw_size)) + goto io_err; + + out_buf = get_buf_data(buffer); + while (msg_size > 0) { + if (!_fd_writeable(fd)) + goto io_err; + msg_wrote = write(fd, out_buf, msg_size); + if (msg_wrote <= 0) + goto io_err; + out_buf += msg_wrote; + msg_size -= msg_wrote; + } + free_buf(buffer); + return SLURM_SUCCESS; + +io_err: + free_buf(buffer); + return SLURM_ERROR; +} + +/* Return time in msec since "start time" */ +static int _tot_wait (struct timeval *start_time) +{ + struct timeval end_time; + int msec_delay; + + gettimeofday(&end_time, NULL); + msec_delay = (end_time.tv_sec - start_time->tv_sec ) * 1000; + msec_delay += ((end_time.tv_usec - start_time->tv_usec + 500) / 1000); + return msec_delay; +} + +/* Wait until a file is readable, return false if can not be read */ +static bool _fd_readable(slurm_fd fd) +{ + struct pollfd ufds; + int rc; + + ufds.fd = fd; + ufds.events = POLLIN; + while (1) { + rc = poll(&ufds, 1, -1); + if (shutdown_time) + return false; + if (rc == -1) { + if ((errno == EINTR) || (errno == EAGAIN)) + continue; + error("poll: %m"); + return false; + } + if (ufds.revents & POLLHUP) { + debug3("Read connection %d closed", fd); + return false; + } + if (ufds.revents & POLLNVAL) { + error("Connection %d is invalid", fd); + return false; + } + if (ufds.revents & POLLERR) { + error("Connection %d experienced an error", fd); + return false; + } + if ((ufds.revents & POLLIN) == 0) { + error("Connection %d events %d", fd, ufds.revents); + return false; + } + break; + } + return true; +} + +/* Wait until a file is writeable, + * RET false if can not be written to within 5 seconds */ +static bool _fd_writeable(slurm_fd fd) +{ + struct pollfd ufds; + int msg_timeout = 5000; + int rc, time_left; + struct timeval tstart; + + ufds.fd = fd; + ufds.events = POLLOUT; + gettimeofday(&tstart, NULL); + while (1) { + time_left = msg_timeout - _tot_wait(&tstart); + rc = poll(&ufds, 1, time_left); + if (shutdown_time) + return false; + if (rc == -1) { + if ((errno == EINTR) || (errno == EAGAIN)) + continue; + error("poll: %m"); + return false; + } + if (rc == 0) { + error("write timeout"); + return false; + } + if (ufds.revents & POLLHUP) { + debug3("Write connection %d closed", fd); + return false; + } + if (ufds.revents & POLLNVAL) { + error("Connection %d is invalid", fd); + return false; + } + if (ufds.revents & POLLERR) { + error("Connection %d experienced an error", fd); + return false; + } + if ((ufds.revents & POLLOUT) == 0) { + error("Connection %d events %d", fd, ufds.revents); + return false; + } + break; + } + return true; +} + +/* Increment thread_count and don't return until its value is no larger + * than MAX_THREAD_COUNT, + * RET index of free index in slave_pthread_id or -1 to exit */ +static int _wait_for_server_thread(void) +{ + bool print_it = true; + int i, rc = -1; + + slurm_mutex_lock(&thread_count_lock); + while (1) { + if (shutdown_time) + break; + + if (thread_count < MAX_THREAD_COUNT) { + thread_count++; + for (i=0; i<MAX_THREAD_COUNT; i++) { + if (slave_thread_id[i]) + continue; + rc = i; + break; + } + if (rc == -1) { + /* thread_count and slave_thread_id + * out of sync */ + fatal("No free slave_thread_id"); + } + break; + } else { + /* wait for state change and retry, + * just a delay and not an error. + * This can happen when the epilog completes + * on a bunch of nodes at the same time, which + * can easily happen for highly parallel jobs. */ + if (print_it) { + static time_t last_print_time = 0; + time_t now = time(NULL); + if (difftime(now, last_print_time) > 2) { + verbose("thread_count over " + "limit (%d), waiting", + thread_count); + last_print_time = now; + } + print_it = false; + } + pthread_cond_wait(&thread_count_cond, + &thread_count_lock); + } + } + slurm_mutex_unlock(&thread_count_lock); + return rc; +} + +/* my_tid IN - Thread ID of spawned thread, 0 if no thread spawned */ +static void _free_server_thread(pthread_t my_tid) +{ + int i; + + slurm_mutex_lock(&thread_count_lock); + if (thread_count > 0) + thread_count--; + else + error("thread_count underflow"); + + if (my_tid) { + for (i=0; i<MAX_THREAD_COUNT; i++) { + if (slave_thread_id[i] != my_tid) + continue; + slave_thread_id[i] = (pthread_t) 0; + break; + } + if (i >= MAX_THREAD_COUNT) + error("Could not find slave_thread_id"); + } + + slurm_mutex_unlock(&thread_count_lock); + pthread_cond_broadcast(&thread_count_cond); +} + +/* Wait for all RPC handler threads to exit. + * After one second, start sending SIGKILL to the threads. */ +static void _wait_for_thread_fini(void) +{ + int i, j; + + if (thread_count == 0) + return; + usleep(500000); /* Give the threads 500 msec to clean up */ + + /* Interupt any hung I/O */ + slurm_mutex_lock(&thread_count_lock); + for (j=0; j<MAX_THREAD_COUNT; j++) { + if (slave_thread_id[j] == 0) + continue; + pthread_kill(slave_thread_id[j], SIGUSR1); + } + slurm_mutex_unlock(&thread_count_lock); + usleep(100000); /* Give the threads 100 msec to clean up */ + + for (i=0; ; i++) { + if (thread_count == 0) + return; + + slurm_mutex_lock(&thread_count_lock); + for (j=0; j<MAX_THREAD_COUNT; j++) { + if (slave_thread_id[j] == 0) + continue; + info("rpc_mgr sending SIGKILL to thread %u", + slave_thread_id[j]); + if (pthread_kill(slave_thread_id[j], SIGKILL)) { + slave_thread_id[j] = 0; + if (thread_count > 0) + thread_count--; + else + error("thread_count underflow"); + } + } + slurm_mutex_unlock(&thread_count_lock); + sleep(1); + } +} + +static void _sig_handler(int signal) +{ +} diff --git a/src/srun/msg.h b/src/slurmdbd/rpc_mgr.h similarity index 73% rename from src/srun/msg.h rename to src/slurmdbd/rpc_mgr.h index 0bd2078c8..2f5c881cb 100644 --- a/src/srun/msg.h +++ b/src/slurmdbd/rpc_mgr.h @@ -1,11 +1,10 @@ /*****************************************************************************\ - * src/srun/msg.h - message traffic between srun and slurmd routines - * $Id: msg.h 10574 2006-12-15 23:38:29Z jette $ + * rpc_mgr.h - functions and definitions for processing RPCs. ***************************************************************************** - * Copyright (C) 2002 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * Written by Morris Jette <jette1@llnl.gov> + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -16,7 +15,7 @@ * any later version. * * In addition, as a special exception, the copyright holders give permission - * to link the code of portions of this program with the OpenSSL library under + * to link the code of portions of this program with the OpenSSL library under * certain conditions as described in each individual source file, and * distribute linked combinations including the two. You must obey the GNU * General Public License in all respects for all of the code used other than @@ -36,21 +35,20 @@ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. \*****************************************************************************/ -#include "src/srun/srun_job.h" +#ifndef _RPC_MGR_H +#define _RPC_MGR_H -#ifndef _HAVE_MSG_H -#define _HAVE_MSG_H +#include "src/common/pack.h" +#include "src/common/assoc_mgr.h" -void *msg_thr(void *arg); -int msg_thr_create(srun_job_t *job); -slurm_fd slurmctld_msg_init(void); -void timeout_handler(time_t timeout); +/* Return a buffer containing a DBD_RC (return code) message + * caller must free returned buffer */ +extern Buf make_dbd_rc_msg(int rc, char *comment, uint16_t sent_type); -typedef struct slurmctld_communication_addr { - char *hostname; - uint16_t port; -} slurmctld_comm_addr_t; +/* Process incoming RPCs. Meant to execute as a pthread */ +extern void *rpc_mgr(void *no_data); -slurmctld_comm_addr_t slurmctld_comm_addr; +/* Wake up the RPC manager so that it can exit */ +extern void rpc_mgr_wake(void); -#endif /* !_HAVE_MSG_H */ +#endif /* !_RPC_MGR_H */ diff --git a/src/slurmdbd/slurmdbd.c b/src/slurmdbd/slurmdbd.c new file mode 100644 index 000000000..6738308ef --- /dev/null +++ b/src/slurmdbd/slurmdbd.c @@ -0,0 +1,465 @@ +/*****************************************************************************\ + * slurmdbd.c - functions for SlurmDBD + ***************************************************************************** + * Copyright (C) 2002-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Morris Jette <jette@llnl.gov> + * LLNL-CODE-402394. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#ifdef HAVE_CONFIG_H +# include "config.h" +#endif +#include <pthread.h> +#include <signal.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/resource.h> +#include <sys/types.h> +#include <unistd.h> + +#include "src/common/daemonize.h" +#include "src/common/fd.h" +#include "src/common/log.h" +#include "src/common/read_config.h" +#include "src/common/slurm_accounting_storage.h" +#include "src/common/slurm_auth.h" +#include "src/common/xmalloc.h" +#include "src/common/xsignal.h" +#include "src/common/xstring.h" +#include "src/slurmdbd/read_config.h" +#include "src/slurmdbd/rpc_mgr.h" + +/* Global variables */ +time_t shutdown_time = 0; /* when shutdown request arrived */ + +/* Local variables */ +static int dbd_sigarray[] = { /* blocked signals for this process */ + SIGINT, SIGTERM, SIGCHLD, SIGUSR1, + SIGUSR2, SIGTSTP, SIGXCPU, SIGQUIT, + SIGPIPE, SIGALRM, SIGABRT, SIGHUP, 0 }; +static int debug_level = 0; /* incremented for -v on command line */ +static int foreground = 0; /* run process as a daemon */ +static log_options_t log_opts = /* Log to stderr & syslog */ + LOG_OPTS_INITIALIZER; +static pthread_t rpc_handler_thread; /* thread ID for RPC hander */ +static pthread_t signal_handler_thread; /* thread ID for signal hander */ +static pthread_t rollup_handler_thread; /* thread ID for rollup hander */ +static pthread_mutex_t rollup_lock = PTHREAD_MUTEX_INITIALIZER; +static bool running_rollup = 0; + +/* Local functions */ +static void _daemonize(void); +static void _default_sigaction(int sig); +static void _init_config(void); +static void _init_pidfile(void); +static void _kill_old_slurmdbd(void); +static void _parse_commandline(int argc, char *argv[]); +static void _rollup_handler_cancel(); +static void *_rollup_handler(void *no_data); +static void *_signal_handler(void *no_data); +static void _update_logging(void); +static void _usage(char *prog_name); + +/* main - slurmctld main function, start various threads and process RPCs */ +int main(int argc, char *argv[]) +{ + pthread_attr_t thread_attr; + char node_name[128]; + void *db_conn = NULL; + + _init_config(); + log_init(argv[0], log_opts, LOG_DAEMON, NULL); + if (read_slurmdbd_conf()) + exit(1); + _parse_commandline(argc, argv); + _update_logging(); + + if (gethostname_short(node_name, sizeof(node_name))) + fatal("getnodename: %m"); + if (slurmdbd_conf->dbd_host && + strcmp(slurmdbd_conf->dbd_host, node_name) && + strcmp(slurmdbd_conf->dbd_host, "localhost")) { + fatal("This host not configured to run SlurmDBD (%s != %s)", + node_name, slurmdbd_conf->dbd_host); + } + if (slurm_auth_init(NULL) != SLURM_SUCCESS) { + fatal("Unable to initialize %s authentication plugin", + slurmdbd_conf->auth_type); + } + if (slurm_acct_storage_init(NULL) != SLURM_SUCCESS) { + fatal("Unable to initialize %s accounting storage plugin", + slurmdbd_conf->storage_type); + } + _kill_old_slurmdbd(); + if (foreground == 0) + _daemonize(); + _init_pidfile(); + log_config(); + info("slurmdbd version %s started", SLURM_VERSION); + if (xsignal_block(dbd_sigarray) < 0) + error("Unable to block signals"); + + /* Create attached thread for signal handling */ + slurm_attr_init(&thread_attr); + if (pthread_create(&signal_handler_thread, &thread_attr, + _signal_handler, NULL)) + fatal("pthread_create %m"); + slurm_attr_destroy(&thread_attr); + + db_conn = acct_storage_g_get_connection(false, false); + + if(assoc_mgr_init(db_conn, 0) == SLURM_ERROR) { + error("Problem getting cache of data"); + acct_storage_g_close_connection(&db_conn); + goto end_it; + } + + /* Create attached thread to process incoming RPCs */ + slurm_attr_init(&thread_attr); + if (pthread_create(&rpc_handler_thread, &thread_attr, rpc_mgr, NULL)) + fatal("pthread_create error %m"); + slurm_attr_destroy(&thread_attr); + + /* Create attached thread to do usage rollup */ + slurm_attr_init(&thread_attr); + if (pthread_create(&rollup_handler_thread, &thread_attr, + _rollup_handler, db_conn)) + fatal("pthread_create error %m"); + slurm_attr_destroy(&thread_attr); + + /* Daemon is fully operational here */ + + /* Daemon termination handled here */ + pthread_join(rollup_handler_thread, NULL); + + pthread_join(rpc_handler_thread, NULL); + + pthread_join(signal_handler_thread, NULL); + +end_it: + acct_storage_g_close_connection(&db_conn); + + if (slurmdbd_conf->pid_file && + (unlink(slurmdbd_conf->pid_file) < 0)) { + verbose("Unable to remove pidfile '%s': %m", + slurmdbd_conf->pid_file); + } + + assoc_mgr_fini(); + slurm_acct_storage_fini(); + slurm_auth_fini(); + log_fini(); + free_slurmdbd_conf(); + exit(0); +} + +/* Reset some of the processes resource limits to the hard limits */ +static void _init_config(void) +{ + struct rlimit rlim; + + if (getrlimit(RLIMIT_NOFILE, &rlim) == 0) { + rlim.rlim_cur = rlim.rlim_max; + (void) setrlimit(RLIMIT_NOFILE, &rlim); + } + if (getrlimit(RLIMIT_CORE, &rlim) == 0) { + rlim.rlim_cur = rlim.rlim_max; + (void) setrlimit(RLIMIT_CORE, &rlim); + } + if (getrlimit(RLIMIT_STACK, &rlim) == 0) { + /* slurmctld can spawn lots of pthreads. + * Set the (per thread) stack size to a + * more "reasonable" value to avoid running + * out of virtual memory and dying */ + rlim.rlim_cur = rlim.rlim_max; + (void) setrlimit(RLIMIT_STACK, &rlim); + } + if (getrlimit(RLIMIT_DATA, &rlim) == 0) { + rlim.rlim_cur = rlim.rlim_max; + (void) setrlimit(RLIMIT_DATA, &rlim); + } +} + +/* + * _parse_commandline - parse and process any command line arguments + * IN argc - number of command line arguments + * IN argv - the command line arguments + * IN/OUT conf_ptr - pointer to current configuration, update as needed + */ +static void _parse_commandline(int argc, char *argv[]) +{ + int c = 0; + + opterr = 0; + while ((c = getopt(argc, argv, "DhvV")) != -1) + switch (c) { + case 'D': + foreground = 1; + break; + case 'h': + _usage(argv[0]); + exit(0); + break; + case 'v': + debug_level++; + break; + case 'V': + printf("%s %s\n", PACKAGE, SLURM_VERSION); + exit(0); + break; + default: + _usage(argv[0]); + exit(1); + } +} + +/* _usage - print a message describing the command line arguments of + * slurmctld */ +static void _usage(char *prog_name) +{ + fprintf(stderr, "Usage: %s [OPTIONS]\n", prog_name); + fprintf(stderr, " -D \t" + "Run daemon in foreground.\n"); + fprintf(stderr, " -h \t" + "Print this help message.\n"); + fprintf(stderr, " -v \t" + "Verbose mode. Multiple -v's increase verbosity.\n"); + fprintf(stderr, " -V \t" + "Print version information and exit.\n"); +} + +/* Reset slurmctld logging based upon configuration parameters */ +static void _update_logging(void) +{ + /* Preserve execute line arguments (if any) */ + if (debug_level) { + slurmdbd_conf->debug_level = MIN( + (LOG_LEVEL_INFO + debug_level), + (LOG_LEVEL_END - 1)); + } + + log_opts.stderr_level = slurmdbd_conf->debug_level; + log_opts.logfile_level = slurmdbd_conf->debug_level; + log_opts.syslog_level = slurmdbd_conf->debug_level; + + if (foreground) + log_opts.syslog_level = LOG_LEVEL_QUIET; + else { + log_opts.stderr_level = LOG_LEVEL_QUIET; + if (slurmdbd_conf->log_file) + log_opts.syslog_level = LOG_LEVEL_QUIET; + } + + log_alter(log_opts, SYSLOG_FACILITY_DAEMON, slurmdbd_conf->log_file); +} + +/* Kill the currently running slurmdbd */ +static void _kill_old_slurmdbd(void) +{ + int fd; + pid_t oldpid; + + if (slurmdbd_conf->pid_file == NULL) { + error("No PidFile configured"); + return; + } + + oldpid = read_pidfile(slurmdbd_conf->pid_file, &fd); + if (oldpid != (pid_t) 0) { + info("Killing old slurmdbd[%ld]", (long) oldpid); + kill(oldpid, SIGTERM); + + /* + * Wait for previous daemon to terminate + */ + if (fd_get_readw_lock(fd) < 0) + fatal("Unable to wait for readw lock: %m"); + (void) close(fd); /* Ignore errors */ + } +} + +/* Create the PidFile if one is configured */ +static void _init_pidfile(void) +{ + int fd; + + if (slurmdbd_conf->pid_file == NULL) { + error("No PidFile configured"); + return; + } + + if ((fd = create_pidfile(slurmdbd_conf->pid_file)) < 0) + return; +} + +/* Become a daemon (child of init) and + * "cd" to the LogFile directory (if one is configured) */ +static void _daemonize(void) +{ + if (daemon(1, 1)) + error("daemon(): %m"); + log_alter(log_opts, LOG_DAEMON, slurmdbd_conf->log_file); + + if (slurmdbd_conf->log_file && + (slurmdbd_conf->log_file[0] == '/')) { + char *slash_ptr, *work_dir; + work_dir = xstrdup(slurmdbd_conf->log_file); + slash_ptr = strrchr(work_dir, '/'); + if (slash_ptr == work_dir) + work_dir[1] = '\0'; + else + slash_ptr[0] = '\0'; + if (chdir(work_dir) < 0) + fatal("chdir(%s): %m", work_dir); + xfree(work_dir); + } +} + +static void _rollup_handler_cancel() +{ + if(running_rollup) + debug("Waiting for rollup thread to finish."); + slurm_mutex_lock(&rollup_lock); + pthread_cancel(rollup_handler_thread); + slurm_mutex_unlock(&rollup_lock); +} + +/* _rollup_handler - Process rollup duties */ +static void *_rollup_handler(void *db_conn) +{ + time_t start_time = time(NULL); + time_t next_time; +/* int sigarray[] = {SIGUSR1, 0}; */ + struct tm tm; + + (void) pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); + (void) pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); + + if(!localtime_r(&start_time, &tm)) { + fatal("Couldn't get localtime for rollup handler %d", + start_time); + return NULL; + } + + while (1) { + if(!db_conn) + break; + /* run the roll up */ + slurm_mutex_lock(&rollup_lock); + running_rollup = 1; + debug2("running rollup at %s", ctime(&start_time)); + acct_storage_g_roll_usage(db_conn, 0); + running_rollup = 0; + slurm_mutex_unlock(&rollup_lock); + + /* sleep for an hour */ + tm.tm_sec = 0; + tm.tm_min = 0; + tm.tm_hour++; + tm.tm_isdst = -1; + next_time = mktime(&tm); + sleep((next_time-start_time)); + start_time = next_time; + /* repeat ;) */ + } + + return NULL; +} + +/* _signal_handler - Process daemon-wide signals */ +static void *_signal_handler(void *no_data) +{ + int rc, sig; + int sig_array[] = {SIGINT, SIGTERM, SIGHUP, SIGABRT, 0}; + sigset_t set; + + (void) pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); + (void) pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); + + /* Make sure no required signals are ignored (possibly inherited) */ + _default_sigaction(SIGINT); + _default_sigaction(SIGTERM); + _default_sigaction(SIGHUP); + _default_sigaction(SIGABRT); + + while (1) { + xsignal_sigset_create(sig_array, &set); + rc = sigwait(&set, &sig); + if (rc == EINTR) + continue; + switch (sig) { + case SIGHUP: /* kill -1 */ + info("Reconfigure signal (SIGHUP) received"); + read_slurmdbd_conf(); + _update_logging(); + break; + case SIGINT: /* kill -2 or <CTRL-C> */ + case SIGTERM: /* kill -15 */ + info("Terminate signal (SIGINT or SIGTERM) received"); + shutdown_time = time(NULL); + rpc_mgr_wake(); + _rollup_handler_cancel(); + + return NULL; /* Normal termination */ + case SIGABRT: /* abort */ + info("SIGABRT received"); + abort(); /* Should terminate here */ + shutdown_time = time(NULL); + rpc_mgr_wake(); + _rollup_handler_cancel(); + return NULL; + default: + error("Invalid signal (%d) received", sig); + } + } + +} + +/* Reset some signals to their default state to clear any + * inherited signal states */ +static void _default_sigaction(int sig) +{ + struct sigaction act; + + if (sigaction(sig, NULL, &act)) { + error("sigaction(%d): %m", sig); + return; + } + if (act.sa_handler != SIG_IGN) + return; + + act.sa_handler = SIG_DFL; + if (sigaction(sig, &act, NULL)) + error("sigaction(%d): %m", sig); +} diff --git a/src/srun/sigstr.h b/src/slurmdbd/slurmdbd.h similarity index 80% rename from src/srun/sigstr.h rename to src/slurmdbd/slurmdbd.h index d69a6d577..a5fb94502 100644 --- a/src/srun/sigstr.h +++ b/src/slurmdbd/slurmdbd.h @@ -1,11 +1,11 @@ /*****************************************************************************\ - * src/srun/sigstr.h - - * $Id: sigstr.h 10574 2006-12-15 23:38:29Z jette $ + * slurmdbd.h - data structures and function definitions for SlurmDBD ***************************************************************************** - * Copyright (C) 2002 The Regents of the University of California. + * Copyright (C) 2002-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * Written by Morris Jette <jette@llnl.gov> + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -36,15 +36,9 @@ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. \*****************************************************************************/ -#ifndef _SIGSTR_H -#define _SIGSTR_H +#ifndef _SLURM_DBD_H +#define _SLURM_DBD_H -/* - * Returns a descriptive string regarding the signal given in the - * exit status 'status.' - * - * WIFSIGNALED(status) must be true in order to call this function! - */ -char *sigstr(int status); +extern time_t shutdown_time; /* when shutdown request arrived */ -#endif /* !_SIGSTR_H */ +#endif /* !_SLURM_DBD_H */ diff --git a/src/smap/Makefile.am b/src/smap/Makefile.am index 32f7aff2b..2ec5094dc 100644 --- a/src/smap/Makefile.am +++ b/src/smap/Makefile.am @@ -14,7 +14,8 @@ if HAVE_SOME_CURSES bin_PROGRAMS = smap smap_LDADD = \ - $(top_builddir)/src/plugins/select/bluegene/block_allocator/libbluegene_block_allocator.la + $(top_builddir)/src/plugins/select/bluegene/block_allocator/libbluegene_block_allocator.la \ + $(top_builddir)/src/api/libslurmhelper.la noinst_HEADERS = smap.h smap_SOURCES = smap.c \ diff --git a/src/smap/Makefile.in b/src/smap/Makefile.in index a4bcfd7d4..06f6ec162 100644 --- a/src/smap/Makefile.in +++ b/src/smap/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -51,6 +51,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -85,11 +87,12 @@ am__EXTRA_smap_SOURCES_DIST = smap.h smap.c job_functions.c \ partition_functions.c configure_functions.c grid_functions.c \ opts.c smap_OBJECTS = $(am_smap_OBJECTS) -@HAVE_SOME_CURSES_TRUE@smap_DEPENDENCIES = $(top_builddir)/src/plugins/select/bluegene/block_allocator/libbluegene_block_allocator.la +@HAVE_SOME_CURSES_TRUE@smap_DEPENDENCIES = $(top_builddir)/src/plugins/select/bluegene/block_allocator/libbluegene_block_allocator.la \ +@HAVE_SOME_CURSES_TRUE@ $(top_builddir)/src/api/libslurmhelper.la smap_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(smap_LDFLAGS) \ $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -131,6 +134,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -144,10 +148,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -167,7 +174,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -178,6 +188,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -193,6 +205,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -208,6 +221,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -267,7 +281,8 @@ top_srcdir = @top_srcdir@ AUTOMAKE_OPTIONS = foreign INCLUDES = -I$(top_srcdir) $(BG_INCLUDES) @HAVE_SOME_CURSES_TRUE@smap_LDADD = \ -@HAVE_SOME_CURSES_TRUE@ $(top_builddir)/src/plugins/select/bluegene/block_allocator/libbluegene_block_allocator.la +@HAVE_SOME_CURSES_TRUE@ $(top_builddir)/src/plugins/select/bluegene/block_allocator/libbluegene_block_allocator.la \ +@HAVE_SOME_CURSES_TRUE@ $(top_builddir)/src/api/libslurmhelper.la @HAVE_SOME_CURSES_TRUE@noinst_HEADERS = smap.h @HAVE_SOME_CURSES_TRUE@smap_SOURCES = smap.c \ @@ -321,8 +336,8 @@ install-binPROGRAMS: $(bin_PROGRAMS) || test -f $$p1 \ ; then \ f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \ - echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) --mode=install $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ - $(INSTALL_PROGRAM_ENV) $(LIBTOOL) --mode=install $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ + echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ + $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ else :; fi; \ done @@ -389,8 +404,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -402,8 +417,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -413,13 +428,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/smap/configure_functions.c b/src/smap/configure_functions.c index aa5294f71..0f15f3628 100644 --- a/src/smap/configure_functions.c +++ b/src/smap/configure_functions.c @@ -1,12 +1,12 @@ /*****************************************************************************\ * configure_functions.c - Functions related to configure mode of smap. - * $Id: configure_functions.c 13270 2008-02-14 19:40:44Z da $ + * $Id: configure_functions.c 13783 2008-04-03 00:07:07Z da $ ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <da@llnl.gov> * - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -585,11 +585,14 @@ static int _change_state_bps(char *com, int state) #ifdef HAVE_BG if ((com[i+3] == 'x') || (com[i+3] == '-')) { - for(j=0; j<3; j++) - if((i+j)>len - || (com[i+j] < '0' || com[i+j] > 'Z' - || (com[i+j] > '9' && com[i+j] < 'A'))) - goto error_message2; + for(j=0; j<3; j++) { + if (((i+j) <= len) && + (((com[i+j] >= '0') && (com[i+j] <= '9')) || + ((com[i+j] >= 'A') && (com[i+j] <= 'Z')))) + continue; + goto error_message2; + + } number = xstrntol(com + i, NULL, BA_SYSTEM_DIMENSIONS, HOSTLIST_BASE); start[X] = number / (HOSTLIST_BASE * HOSTLIST_BASE); @@ -598,11 +601,13 @@ static int _change_state_bps(char *com, int state) start[Z] = (number % HOSTLIST_BASE); i += 4; - for(j=0; j<3; j++) - if((i+j)>len - || (com[i+j] < '0' || com[i+j] > 'Z' - || (com[i+j] > '9' && com[i+j] < 'A'))) - goto error_message2; + for(j=0; j<3; j++) { + if (((i+j) <= len) && + (((com[i+j] >= '0') && (com[i+j] <= '9')) || + ((com[i+j] >= 'A') && (com[i+j] <= 'Z')))) + continue; + goto error_message2; + } number = xstrntol(com + i, NULL, BA_SYSTEM_DIMENSIONS, HOSTLIST_BASE); end[X] = number / (HOSTLIST_BASE * HOSTLIST_BASE); @@ -610,11 +615,13 @@ static int _change_state_bps(char *com, int state) / HOSTLIST_BASE; end[Z] = (number % HOSTLIST_BASE); } else { - for(j=0; j<3; j++) - if((i+j)>len - || (com[i+j] < '0' || com[i+j] > 'Z' - || (com[i+j] > '9' && com[i+j] < 'A'))) - goto error_message2; + for(j=0; j<3; j++) { + if (((i+j) <= len) && + (((com[i+j] >= '0') && (com[i+j] <= '9')) || + ((com[i+j] >= 'A') && (com[i+j] <= 'Z')))) + continue; + goto error_message2; + } number = xstrntol(com + i, NULL, BA_SYSTEM_DIMENSIONS, HOSTLIST_BASE); start[X] = end[X] = number / (HOSTLIST_BASE * HOSTLIST_BASE); @@ -649,7 +656,7 @@ static int _change_state_bps(char *com, int state) if ((com[i+3] == 'x') || (com[i+3] == '-')) { start[X] = xstrntol(com + i, NULL, - BA_SYSTEM_DIMENSIONS, HOSTLIST_BASE);; + BA_SYSTEM_DIMENSIONS, HOSTLIST_BASE); i += 4; end[X] = xstrntol(com + i, NULL, BA_SYSTEM_DIMENSIONS, HOSTLIST_BASE); diff --git a/src/smap/grid_functions.c b/src/smap/grid_functions.c index b97c73be6..86cc09957 100644 --- a/src/smap/grid_functions.c +++ b/src/smap/grid_functions.c @@ -5,7 +5,7 @@ * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <da@llnl.gov> * - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/smap/job_functions.c b/src/smap/job_functions.c index 21eedfc16..9a49b3729 100644 --- a/src/smap/job_functions.c +++ b/src/smap/job_functions.c @@ -5,7 +5,7 @@ * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <da@llnl.gov> * - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/smap/opts.c b/src/smap/opts.c index 2abc184ed..40a73dbc5 100644 --- a/src/smap/opts.c +++ b/src/smap/opts.c @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <da@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/smap/partition_functions.c b/src/smap/partition_functions.c index 9e8c6743b..89e24e1b6 100644 --- a/src/smap/partition_functions.c +++ b/src/smap/partition_functions.c @@ -6,7 +6,7 @@ * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <da@llnl.gov> * - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/smap/smap.c b/src/smap/smap.c index 9e704f32f..f6aa3f8f9 100644 --- a/src/smap/smap.c +++ b/src/smap/smap.c @@ -1,12 +1,12 @@ /*****************************************************************************\ * smap.c - Report overall state the system - * $Id: smap.c 13794 2008-04-04 15:56:01Z da $ + * $Id: smap.c 13795 2008-04-04 15:59:14Z da $ ***************************************************************************** * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <da@llnl.gov> * - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/smap/smap.h b/src/smap/smap.h index b6f3413f6..28e17a6dd 100644 --- a/src/smap/smap.h +++ b/src/smap/smap.h @@ -4,7 +4,7 @@ * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <da@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/squeue/Makefile.in b/src/squeue/Makefile.in index 7c71dab52..1f178ae7d 100644 --- a/src/squeue/Makefile.in +++ b/src/squeue/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -48,6 +48,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -78,7 +80,7 @@ squeue_DEPENDENCIES = $(top_builddir)/src/api/libslurmhelper.la squeue_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(squeue_LDFLAGS) \ $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -119,6 +121,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -132,10 +135,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -155,7 +161,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -166,6 +175,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -181,6 +192,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -196,6 +208,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -300,8 +313,8 @@ install-binPROGRAMS: $(bin_PROGRAMS) || test -f $$p1 \ ; then \ f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \ - echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) --mode=install $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ - $(INSTALL_PROGRAM_ENV) $(LIBTOOL) --mode=install $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ + echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ + $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ else :; fi; \ done @@ -366,8 +379,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -379,8 +392,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -390,13 +403,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/squeue/opts.c b/src/squeue/opts.c index 488a3b947..d295de2d7 100644 --- a/src/squeue/opts.c +++ b/src/squeue/opts.c @@ -1,12 +1,12 @@ /****************************************************************************\ * opts.c - srun command line option parsing * - * $Id: opts.c 12590 2007-10-31 16:08:11Z jette $ + * $Id: opts.c 14150 2008-05-29 00:14:29Z jette $ ***************************************************************************** * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Joey Ekstrom <ekstrom1@llnl.gov>, Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -558,6 +558,11 @@ extern int parse_format( char* format ) field_size, right_justify, suffix ); + else if (field[0] == 'Q') + job_format_add_priority_long( params.format_list, + field_size, + right_justify, + suffix ); else if (field[0] == 'r') job_format_add_reason( params.format_list, field_size, diff --git a/src/squeue/print.c b/src/squeue/print.c index 7aba3a523..0225f2ca3 100644 --- a/src/squeue/print.c +++ b/src/squeue/print.c @@ -1,12 +1,12 @@ /*****************************************************************************\ * print.c - squeue print job functions - * $Id: print.c 12951 2008-01-04 00:29:45Z jette $ ***************************************************************************** - * Copyright (C) 2002 The Regents of the University of California. + * Copyright (C) 2002-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Joey Ekstrom <ekstrom1@llnl.gov>, * Morris Jette <jette1@llnl.gov>, et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -525,6 +525,20 @@ int _print_job_priority(job_info_t * job, int width, bool right, char* suffix) return SLURM_SUCCESS; } +int _print_job_priority_long(job_info_t * job, int width, bool right, char* suffix) +{ + char temp[FORMAT_STRING_SIZE]; + if (job == NULL) /* Print the Header instead */ + _print_str("PRIORITY", width, right, true); + else { + sprintf(temp, "%u", job->priority); + _print_str(temp, width, right, true); + } + if (suffix) + printf("%s", suffix); + return SLURM_SUCCESS; +} + int _print_job_nodes(job_info_t * job, int width, bool right, char* suffix) { if (job == NULL) { /* Print the Header instead */ @@ -870,19 +884,12 @@ int _print_job_min_memory(job_info_t * job, int width, bool right_justify, char* suffix) { char min_mem[10]; - char max_mem[10]; char tmp_char[21]; if (job == NULL) /* Print the Header instead */ _print_str("MIN_MEMORY", width, right_justify, true); else { tmp_char[0] = '\0'; - if (job->job_max_memory < job->job_min_memory) { - convert_num_unit((float)job->job_max_memory, max_mem, - sizeof(max_mem), UNIT_NONE); - strcat(tmp_char, max_mem); - strcat(tmp_char, "-"); - } convert_num_unit((float)job->job_min_memory, min_mem, sizeof(min_mem), UNIT_NONE); strcat(tmp_char, min_mem); @@ -1024,11 +1031,10 @@ int _print_job_dependency(job_info_t * job, int width, bool right_justify, { if (job == NULL) /* Print the Header instead */ _print_str("DEPENDENCY", width, right_justify, true); - else { - char id[FORMAT_STRING_SIZE]; - snprintf(id, FORMAT_STRING_SIZE, "%u", job->dependency); - _print_str(id, width, right_justify, true); - } + else if (job->dependency) + _print_str(job->dependency, width, right_justify, true); + else + _print_str("", width, right_justify, true); if (suffix) printf("%s", suffix); return SLURM_SUCCESS; diff --git a/src/squeue/print.h b/src/squeue/print.h index f596f0a14..a8181aae6 100644 --- a/src/squeue/print.h +++ b/src/squeue/print.h @@ -4,7 +4,7 @@ * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Joey Ekstrom <ekstrom1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -115,6 +115,8 @@ int job_format_add_function(List list, int width, bool right_justify, job_format_add_function(list,wid,right,suffix,_print_job_time_end) #define job_format_add_priority(list,wid,right,suffix) \ job_format_add_function(list,wid,right,suffix,_print_job_priority) +#define job_format_add_priority_long(list,wid,right,suffix) \ + job_format_add_function(list,wid,right,suffix,_print_job_priority_long) #define job_format_add_nodes(list,wid,right,suffix) \ job_format_add_function(list,wid,right,suffix,_print_job_nodes) #define job_format_add_node_inx(list,wid,right,suffix) \ @@ -203,6 +205,8 @@ int _print_job_time_end(job_info_t * job, int width, bool right_justify, char* suffix); int _print_job_priority(job_info_t * job, int width, bool right_justify, char* suffix); +int _print_job_priority_long(job_info_t * job, int width, bool right_justify, + char* suffix); int _print_job_nodes(job_info_t * job, int width, bool right_justify, char* suffix); int _print_job_node_inx(job_info_t * job, int width, bool right_justify, diff --git a/src/squeue/sort.c b/src/squeue/sort.c index 3783871f9..05c42f4f7 100644 --- a/src/squeue/sort.c +++ b/src/squeue/sort.c @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov>, et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/squeue/squeue.c b/src/squeue/squeue.c index 488d9e1e7..7a0a5f3a7 100644 --- a/src/squeue/squeue.c +++ b/src/squeue/squeue.c @@ -1,13 +1,13 @@ /*****************************************************************************\ * squeue.c - Report jobs in the slurm system * - * $Id: squeue.c 10574 2006-12-15 23:38:29Z jette $ + * $Id: squeue.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Joey Ekstrom <ekstrom1@llnl.gov>, * Morris Jette <jette1@llnl.gov>, et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/squeue/squeue.h b/src/squeue/squeue.h index c5ff3e097..0227512ac 100644 --- a/src/squeue/squeue.h +++ b/src/squeue/squeue.h @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Joey Ekstrom <ekstrom1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/sreport/Makefile.am b/src/sreport/Makefile.am new file mode 100644 index 000000000..a4bf7fccb --- /dev/null +++ b/src/sreport/Makefile.am @@ -0,0 +1,21 @@ +# Makefile for sreport + +AUTOMAKE_OPTIONS = foreign + +INCLUDES = -I$(top_srcdir) + +bin_PROGRAMS = sreport + +sreport_SOURCES = \ + sreport.c sreport.h + +sreport_LDADD = \ + $(top_builddir)/src/common/libcommon.o -ldl \ + $(top_builddir)/src/api/libslurmhelper.la \ + $(READLINE_LIBS) + +sreport_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) + +force: +$(convenience_libs) : force + @cd `dirname $@` && $(MAKE) `basename $@` diff --git a/src/slaunch/Makefile.in b/src/sreport/Makefile.in similarity index 80% rename from src/slaunch/Makefile.in rename to src/sreport/Makefile.in index 6e537e164..e15b13e50 100644 --- a/src/slaunch/Makefile.in +++ b/src/sreport/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -14,7 +14,7 @@ @SET_MAKE@ -# +# Makefile for sreport VPATH = @srcdir@ pkgdatadir = $(datadir)/@PACKAGE@ @@ -35,8 +35,8 @@ POST_UNINSTALL = : build_triplet = @build@ host_triplet = @host@ target_triplet = @target@ -bin_PROGRAMS = slaunch$(EXEEXT) -subdir = src/slaunch +bin_PROGRAMS = sreport$(EXEEXT) +subdir = src/sreport DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ @@ -45,6 +45,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -68,15 +70,16 @@ CONFIG_CLEAN_FILES = am__installdirs = "$(DESTDIR)$(bindir)" binPROGRAMS_INSTALL = $(INSTALL_PROGRAM) PROGRAMS = $(bin_PROGRAMS) -am_slaunch_OBJECTS = slaunch.$(OBJEXT) opt.$(OBJEXT) attach.$(OBJEXT) \ - fname.$(OBJEXT) sigstr.$(OBJEXT) core-format.$(OBJEXT) \ - multi_prog.$(OBJEXT) slaunch.wrapper.$(OBJEXT) -slaunch_OBJECTS = $(am_slaunch_OBJECTS) -slaunch_DEPENDENCIES = $(convenience_libs) -slaunch_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ - --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(slaunch_LDFLAGS) \ +am_sreport_OBJECTS = sreport.$(OBJEXT) +sreport_OBJECTS = $(am_sreport_OBJECTS) +am__DEPENDENCIES_1 = +sreport_DEPENDENCIES = $(top_builddir)/src/common/libcommon.o \ + $(top_builddir)/src/api/libslurmhelper.la \ + $(am__DEPENDENCIES_1) +sreport_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(sreport_LDFLAGS) \ $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -88,8 +91,8 @@ CCLD = $(CC) LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ $(LDFLAGS) -o $@ -SOURCES = $(slaunch_SOURCES) -DIST_SOURCES = $(slaunch_SOURCES) +SOURCES = $(sreport_SOURCES) +DIST_SOURCES = $(sreport_SOURCES) ETAGS = etags CTAGS = ctags DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) @@ -116,6 +119,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -129,10 +133,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -152,7 +159,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -163,6 +173,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -178,6 +190,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -193,6 +206,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -250,26 +264,16 @@ target_vendor = @target_vendor@ top_builddir = @top_builddir@ top_srcdir = @top_srcdir@ AUTOMAKE_OPTIONS = foreign -INCLUDES = -I$(top_srcdir) -slaunch_SOURCES = \ - slaunch.c slaunch.h \ - opt.c opt.h \ - attach.h \ - attach.c \ - fname.c \ - fname.h \ - sigstr.c \ - sigstr.h \ - core-format.c \ - core-format.h \ - multi_prog.c multi_prog.h \ - slaunch.wrapper.c - -convenience_libs = $(top_builddir)/src/api/libslurmhelper.la -slaunch_LDADD = \ - $(convenience_libs) - -slaunch_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) $(FEDERATION_LDFLAGS) +INCLUDES = -I$(top_srcdir) +sreport_SOURCES = \ + sreport.c sreport.h + +sreport_LDADD = \ + $(top_builddir)/src/common/libcommon.o -ldl \ + $(top_builddir)/src/api/libslurmhelper.la \ + $(READLINE_LIBS) + +sreport_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) all: all-am .SUFFIXES: @@ -283,9 +287,9 @@ $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__confi exit 1;; \ esac; \ done; \ - echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/slaunch/Makefile'; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/sreport/Makefile'; \ cd $(top_srcdir) && \ - $(AUTOMAKE) --foreign src/slaunch/Makefile + $(AUTOMAKE) --foreign src/sreport/Makefile .PRECIOUS: Makefile Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status @case '$?' in \ @@ -312,8 +316,8 @@ install-binPROGRAMS: $(bin_PROGRAMS) || test -f $$p1 \ ; then \ f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \ - echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) --mode=install $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ - $(INSTALL_PROGRAM_ENV) $(LIBTOOL) --mode=install $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ + echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ + $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ else :; fi; \ done @@ -331,9 +335,9 @@ clean-binPROGRAMS: echo " rm -f $$p $$f"; \ rm -f $$p $$f ; \ done -slaunch$(EXEEXT): $(slaunch_OBJECTS) $(slaunch_DEPENDENCIES) - @rm -f slaunch$(EXEEXT) - $(slaunch_LINK) $(slaunch_OBJECTS) $(slaunch_LDADD) $(LIBS) +sreport$(EXEEXT): $(sreport_OBJECTS) $(sreport_DEPENDENCIES) + @rm -f sreport$(EXEEXT) + $(sreport_LINK) $(sreport_OBJECTS) $(sreport_LDADD) $(LIBS) mostlyclean-compile: -rm -f *.$(OBJEXT) @@ -341,14 +345,7 @@ mostlyclean-compile: distclean-compile: -rm -f *.tab.c -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/attach.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/core-format.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fname.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/multi_prog.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/opt.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sigstr.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slaunch.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slaunch.wrapper.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sreport.Po@am__quote@ .c.o: @am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @@ -382,8 +379,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -395,8 +392,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -406,13 +403,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique @@ -506,7 +502,7 @@ install-data-am: install-dvi: install-dvi-am -install-exec-am: install-binPROGRAMS install-exec-local +install-exec-am: install-binPROGRAMS install-html: install-html-am @@ -547,34 +543,19 @@ uninstall-am: uninstall-binPROGRAMS distclean-generic distclean-libtool distclean-tags distdir dvi \ dvi-am html html-am info info-am install install-am \ install-binPROGRAMS install-data install-data-am install-dvi \ - install-dvi-am install-exec install-exec-am install-exec-local \ - install-html install-html-am install-info install-info-am \ - install-man install-pdf install-pdf-am install-ps \ - install-ps-am install-strip installcheck installcheck-am \ - installdirs maintainer-clean maintainer-clean-generic \ - mostlyclean mostlyclean-compile mostlyclean-generic \ - mostlyclean-libtool pdf pdf-am ps ps-am tags uninstall \ - uninstall-am uninstall-binPROGRAMS + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-pdf install-pdf-am install-ps install-ps-am \ + install-strip installcheck installcheck-am installdirs \ + maintainer-clean maintainer-clean-generic mostlyclean \ + mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ + pdf pdf-am ps ps-am tags uninstall uninstall-am \ + uninstall-binPROGRAMS force: $(convenience_libs) : force @cd `dirname $@` && $(MAKE) `basename $@` - -# debugging information is required for symbols in the attach -# module so that a debugger can attach to spawned tasks -attach.o : attach.c - $(COMPILE) -c -g -o attach.o $(srcdir)/attach.c - -install-exec-local: - umask 022; \ - if [ -x /usr/lib/rpm/debugedit ]; then \ - srcdir=`cd $(top_srcdir) && pwd`; \ - /usr/lib/rpm/debugedit -b $$srcdir -d $(pkglibdir) \ - $(DESTDIR)$(bindir)/slaunch; fi; \ - mkdir -p -m 755 $(DESTDIR)$(pkglibdir)/src/slaunch - $(INSTALL) -m 644 $(top_srcdir)/src/slaunch/slaunch.wrapper.c \ - $(DESTDIR)$(pkglibdir)/src/slaunch/slaunch.wrapper.c # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. .NOEXPORT: diff --git a/src/sreport/sreport.c b/src/sreport/sreport.c new file mode 100644 index 000000000..c3dfb8223 --- /dev/null +++ b/src/sreport/sreport.c @@ -0,0 +1,379 @@ +/*****************************************************************************\ + * sreport.c - report generating tool for slurm accounting. + ***************************************************************************** + * Copyright (C) 2008 Lawrence Livermore National Security. + * Copyright (C) 2002-2007 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * LLNL-CODE-402394. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#include "src/sreport/sreport.h" +#include "src/common/xsignal.h" + +#define OPT_LONG_HIDE 0x102 +#define BUFFER_SIZE 4096 + +char *command_name; +int exit_code; /* sreport's exit code, =1 on any error at any time */ +int exit_flag; /* program to terminate if =1 */ +int input_words; /* number of words of input permitted */ +int one_liner; /* one record per line if =1 */ +int quiet_flag; /* quiet=1, verbose=-1, normal=0 */ +int rollback_flag; /* immediate execute=1, else = 0 */ +int with_assoc_flag = 0; +void *db_conn = NULL; +uint32_t my_uid = 0; + +static int _get_command (int *argc, char *argv[]); +static void _print_version( void ); +static int _process_command (int argc, char *argv[]); +static void _usage (); + +int +main (int argc, char *argv[]) +{ + int error_code = SLURM_SUCCESS, i, opt_char, input_field_count; + char **input_fields; + log_options_t opts = LOG_OPTS_STDERR_ONLY ; + + int option_index; + static struct option long_options[] = { + {"help", 0, 0, 'h'}, + {"immediate",0, 0, 'i'}, + {"oneliner", 0, 0, 'o'}, + {"no_header", 0, 0, 'n'}, + {"parsable", 0, 0, 'p'}, + {"quiet", 0, 0, 'q'}, + {"usage", 0, 0, 'h'}, + {"verbose", 0, 0, 'v'}, + {"version", 0, 0, 'V'}, + {NULL, 0, 0, 0} + }; + + command_name = argv[0]; + rollback_flag = 1; + exit_code = 0; + exit_flag = 0; + input_field_count = 0; + quiet_flag = 0; + log_init("sacctmgr", opts, SYSLOG_FACILITY_DAEMON, NULL); + + while((opt_char = getopt_long(argc, argv, "hionpqsvV", + long_options, &option_index)) != -1) { + switch (opt_char) { + case (int)'?': + fprintf(stderr, "Try \"sacctmgr --help\" " + "for more information\n"); + exit(1); + break; + case (int)'h': + _usage (); + exit(exit_code); + break; + case (int)'i': + rollback_flag = 0; + break; + case (int)'o': + one_liner = 1; + break; +/* case (int)'n': */ +/* have_header = 0; */ +/* break; */ +/* case (int)'p': */ +/* parsable_print = 1; */ +/* break; */ + case (int)'q': + quiet_flag = 1; + break; + case (int)'s': + with_assoc_flag = 1; + break; + case (int)'v': + quiet_flag = -1; + break; + case (int)'V': + _print_version(); + exit(exit_code); + break; + default: + exit_code = 1; + fprintf(stderr, "getopt error, returned %c\n", + opt_char); + exit(exit_code); + } + } + + if (argc > MAX_INPUT_FIELDS) /* bogus input, but continue anyway */ + input_words = argc; + else + input_words = 128; + input_fields = (char **) xmalloc (sizeof (char *) * input_words); + if (optind < argc) { + for (i = optind; i < argc; i++) { + input_fields[input_field_count++] = argv[i]; + } + } + + db_conn = acct_storage_g_get_connection(false, rollback_flag); + my_uid = getuid(); + + if (input_field_count) + exit_flag = 1; + else + error_code = _get_command (&input_field_count, input_fields); + while (error_code == SLURM_SUCCESS) { + error_code = _process_command (input_field_count, + input_fields); + if (error_code || exit_flag) + break; + error_code = _get_command (&input_field_count, input_fields); + } + + acct_storage_g_close_connection(&db_conn); + slurm_acct_storage_fini(); + printf("\n"); + exit(exit_code); +} + +#if !HAVE_READLINE +/* + * Alternative to readline if readline is not available + */ +static char * +getline(const char *prompt) +{ + char buf[4096]; + char *line; + int len; + printf("%s", prompt); + + fgets(buf, 4096, stdin); + len = strlen(buf); + if ((len > 0) && (buf[len-1] == '\n')) + buf[len-1] = '\0'; + else + len++; + line = malloc (len * sizeof(char)); + return strncpy(line, buf, len); +} +#endif + +/* + * _get_command - get a command from the user + * OUT argc - location to store count of arguments + * OUT argv - location to store the argument list + */ +static int +_get_command (int *argc, char **argv) +{ + char *in_line; + static char *last_in_line = NULL; + int i, in_line_size; + static int last_in_line_size = 0; + + *argc = 0; + +#if HAVE_READLINE + in_line = readline ("sreport: "); +#else + in_line = getline("sreport: "); +#endif + if (in_line == NULL) + return 0; + else if (strcmp (in_line, "!!") == 0) { + free (in_line); + in_line = last_in_line; + in_line_size = last_in_line_size; + } else { + if (last_in_line) + free (last_in_line); + last_in_line = in_line; + last_in_line_size = in_line_size = strlen (in_line); + } + +#if HAVE_READLINE + add_history(in_line); +#endif + + /* break in_line into tokens */ + for (i = 0; i < in_line_size; i++) { + bool double_quote = false, single_quote = false; + if (in_line[i] == '\0') + break; + if (isspace ((int) in_line[i])) + continue; + if (((*argc) + 1) > MAX_INPUT_FIELDS) { /* bogus input line */ + exit_code = 1; + fprintf (stderr, + "%s: can not process over %d words\n", + command_name, input_words); + return E2BIG; + } + argv[(*argc)++] = &in_line[i]; + for (i++; i < in_line_size; i++) { + if (in_line[i] == '\042') { + double_quote = !double_quote; + continue; + } + if (in_line[i] == '\047') { + single_quote = !single_quote; + continue; + } + if (in_line[i] == '\0') + break; + if (double_quote || single_quote) + continue; + if (isspace ((int) in_line[i])) { + in_line[i] = '\0'; + break; + } + } + } + return 0; +} + + +static void _print_version(void) +{ + printf("%s %s\n", PACKAGE, SLURM_VERSION); + if (quiet_flag == -1) { + long version = slurm_api_version(); + printf("slurm_api_version: %ld, %ld.%ld.%ld\n", version, + SLURM_VERSION_MAJOR(version), + SLURM_VERSION_MINOR(version), + SLURM_VERSION_MICRO(version)); + } +} + +/* + * _process_command - process the user's command + * IN argc - count of arguments + * IN argv - the arguments + * RET 0 or errno (only for errors fatal to sreport) + */ +static int +_process_command (int argc, char *argv[]) +{ + if (argc < 1) { + exit_code = 1; + if (quiet_flag == -1) + fprintf(stderr, "no input"); + } else if (strncasecmp (argv[0], "help", 2) == 0) { + if (argc > 1) { + exit_code = 1; + fprintf (stderr, + "too many arguments for keyword:%s\n", + argv[0]); + } + _usage (); + } else if (strncasecmp (argv[0], "oneliner", 1) == 0) { + if (argc > 1) { + exit_code = 1; + fprintf (stderr, + "too many arguments for keyword:%s\n", + argv[0]); + } + one_liner = 1; + } else if (strncasecmp (argv[0], "quiet", 4) == 0) { + if (argc > 1) { + exit_code = 1; + fprintf (stderr, "too many arguments for keyword:%s\n", + argv[0]); + } + quiet_flag = 1; + } else if ((strncasecmp (argv[0], "exit", 4) == 0) || + (strncasecmp (argv[0], "quit", 4) == 0)) { + if (argc > 1) { + exit_code = 1; + fprintf (stderr, + "too many arguments for keyword:%s\n", + argv[0]); + } + exit_flag = 1; + } else if (strncasecmp (argv[0], "verbose", 4) == 0) { + if (argc > 1) { + exit_code = 1; + fprintf (stderr, + "too many arguments for %s keyword\n", + argv[0]); + } + quiet_flag = -1; + } else if (strncasecmp (argv[0], "version", 4) == 0) { + if (argc > 1) { + exit_code = 1; + fprintf (stderr, + "too many arguments for %s keyword\n", + argv[0]); + } + _print_version(); + } else { + exit_code = 1; + fprintf (stderr, "invalid keyword: %s\n", argv[0]); + } + + return 0; +} + +/* _usage - show the valid sreport commands */ +void _usage () { + printf ("\ +sreport [<OPTION>] [<COMMAND>] \n\ + Valid <OPTION> values are: \n\ + -h or --help: equivalent to \"help\" command \n\ + -o or --oneliner: equivalent to \"oneliner\" command \n\ + -q or --quiet: equivalent to \"quiet\" command \n\ + -s or --associations: equivalent to \"associations\" command \n\ + -v or --verbose: equivalent to \"verbose\" command \n\ + -V or --version: equivalent to \"version\" command \n\ + \n\ + <keyword> may be omitted from the execute line and sreport will execute \n\ + in interactive mode. It will process commands as entered until explicitly\n\ + terminated. \n\ + \n\ + Valid <COMMAND> values are: \n\ + exit terminate sreport \n\ + help print this description of use. \n\ + no_header no header will be added to the beginning of \n\ + output. \n\ + oneliner report output one record per line. \n\ + quiet print no messages other than error messages. \n\ + quit terminate this command. \n\ + parsable output will be | delimited \n\ + verbose enable detailed logging. \n\ + version display tool version number. \n\ + !! Repeat the last command entered. \n\ + \n\ + \n\ + All commands entitys, and options are case-insensitive. \n\n"); + +} + diff --git a/src/sreport/sreport.h b/src/sreport/sreport.h new file mode 100644 index 000000000..034f03e5c --- /dev/null +++ b/src/sreport/sreport.h @@ -0,0 +1,88 @@ +/*****************************************************************************\ + * sreport.h - report generating tool for slurm accounting header file. + ***************************************************************************** + * Copyright (C) 2008 Lawrence Livermore National Security. + * Copyright (C) 2002-2007 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov> + * LLNL-CODE-402394. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#ifndef __SREPORT_H__ +#define __SREPORT_H__ + +#if HAVE_CONFIG_H +# include "config.h" +#endif + +#if HAVE_GETOPT_H +# include <getopt.h> +#else +# include "src/common/getopt.h" +#endif + +#include <ctype.h> +#include <errno.h> +#include <stdio.h> +#include <stdlib.h> +#ifdef HAVE_STRING_H +# include <string.h> +#endif +#ifdef HAVE_STRINGS_H +# include <strings.h> +#endif +#include <time.h> +#include <unistd.h> + +#if HAVE_READLINE +# include <readline/readline.h> +# include <readline/history.h> +#endif + +#if HAVE_INTTYPES_H +# include <inttypes.h> +#else /* !HAVE_INTTYPES_H */ +# if HAVE_STDINT_H +# include <stdint.h> +# endif +#endif /* HAVE_INTTYPES_H */ + +#include <slurm/slurm.h> + +#include "src/common/jobacct_common.h" +#include "src/common/parse_time.h" +#include "src/common/slurm_accounting_storage.h" +#include "src/common/xstring.h" + +#define CKPT_WAIT 10 +#define MAX_INPUT_FIELDS 128 + +#endif /* HAVE_SREPORT_H */ diff --git a/src/srun/Makefile.am b/src/srun/Makefile.am index 386123778..80a846909 100644 --- a/src/srun/Makefile.am +++ b/src/srun/Makefile.am @@ -10,18 +10,11 @@ srun_SOURCES = \ srun.c srun.h \ opt.c opt.h \ srun_job.c srun_job.h \ - msg.c msg.h \ - signals.c signals.h \ - launch.c \ - launch.h \ - attach.h \ - attach.c \ - reattach.c \ - reattach.h \ + srun_pty.c srun_pty.h \ + debugger.h \ + debugger.c \ fname.c \ fname.h \ - sigstr.c \ - sigstr.h \ allocate.c \ allocate.h \ core-format.c \ @@ -34,7 +27,7 @@ convenience_libs = $(top_builddir)/src/api/libslurmhelper.la srun_LDADD = \ $(convenience_libs) -srun_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) $(FEDERATION_LDFLAGS) +srun_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) force: $(convenience_libs) : force diff --git a/src/srun/Makefile.in b/src/srun/Makefile.in index 98aa2874a..4485b7d17 100644 --- a/src/srun/Makefile.in +++ b/src/srun/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -45,6 +45,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -69,16 +71,15 @@ am__installdirs = "$(DESTDIR)$(bindir)" binPROGRAMS_INSTALL = $(INSTALL_PROGRAM) PROGRAMS = $(bin_PROGRAMS) am_srun_OBJECTS = srun.$(OBJEXT) opt.$(OBJEXT) srun_job.$(OBJEXT) \ - msg.$(OBJEXT) signals.$(OBJEXT) launch.$(OBJEXT) \ - attach.$(OBJEXT) reattach.$(OBJEXT) fname.$(OBJEXT) \ - sigstr.$(OBJEXT) allocate.$(OBJEXT) core-format.$(OBJEXT) \ - multi_prog.$(OBJEXT) srun.wrapper.$(OBJEXT) + srun_pty.$(OBJEXT) debugger.$(OBJEXT) fname.$(OBJEXT) \ + allocate.$(OBJEXT) core-format.$(OBJEXT) multi_prog.$(OBJEXT) \ + srun.wrapper.$(OBJEXT) srun_OBJECTS = $(am_srun_OBJECTS) srun_DEPENDENCIES = $(convenience_libs) srun_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(srun_LDFLAGS) \ $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -118,6 +119,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -131,10 +133,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -154,7 +159,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -165,6 +173,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -180,6 +190,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -195,6 +206,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -257,18 +269,11 @@ srun_SOURCES = \ srun.c srun.h \ opt.c opt.h \ srun_job.c srun_job.h \ - msg.c msg.h \ - signals.c signals.h \ - launch.c \ - launch.h \ - attach.h \ - attach.c \ - reattach.c \ - reattach.h \ + srun_pty.c srun_pty.h \ + debugger.h \ + debugger.c \ fname.c \ fname.h \ - sigstr.c \ - sigstr.h \ allocate.c \ allocate.h \ core-format.c \ @@ -280,7 +285,7 @@ convenience_libs = $(top_builddir)/src/api/libslurmhelper.la srun_LDADD = \ $(convenience_libs) -srun_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) $(FEDERATION_LDFLAGS) +srun_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) all: all-am .SUFFIXES: @@ -323,8 +328,8 @@ install-binPROGRAMS: $(bin_PROGRAMS) || test -f $$p1 \ ; then \ f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \ - echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) --mode=install $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ - $(INSTALL_PROGRAM_ENV) $(LIBTOOL) --mode=install $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ + echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ + $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ else :; fi; \ done @@ -353,19 +358,15 @@ distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/allocate.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/attach.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/core-format.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/debugger.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fname.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/launch.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/msg.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/multi_prog.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/opt.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/reattach.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/signals.Po@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sigstr.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/srun.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/srun.wrapper.Po@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/srun_job.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/srun_pty.Po@am__quote@ .c.o: @am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< @@ -399,8 +400,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -412,8 +413,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -423,13 +424,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/srun/allocate.c b/src/srun/allocate.c index b1abd672a..0ae018334 100644 --- a/src/srun/allocate.c +++ b/src/srun/allocate.c @@ -1,11 +1,11 @@ /*****************************************************************************\ * src/srun/allocate.c - srun functions for managing node allocations - * $Id: allocate.c 13231 2008-02-08 17:16:47Z jette $ + * $Id: allocate.c 13771 2008-04-02 20:03:47Z jette $ ***************************************************************************** * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -55,41 +55,141 @@ #include "src/common/xstring.h" #include "src/common/forward.h" #include "src/common/env.h" +#include "src/common/fd.h" #include "src/srun/allocate.h" -#include "src/srun/msg.h" #include "src/srun/opt.h" -#include "src/srun/attach.h" +#include "src/srun/debugger.h" #define MAX_ALLOC_WAIT 60 /* seconds */ #define MIN_ALLOC_WAIT 5 /* seconds */ #define MAX_RETRIES 10 +pthread_mutex_t msg_lock = PTHREAD_MUTEX_INITIALIZER; +pthread_cond_t msg_cond = PTHREAD_COND_INITIALIZER; +allocation_msg_thread_t *msg_thr = NULL; +resource_allocation_response_msg_t *global_resp = NULL; +struct pollfd global_fds[1]; + extern char **environ; +static bool exit_flag = false; +static uint32_t pending_job_id = 0; + /* * Static Prototypes */ -static int _accept_msg_connection(slurm_fd slurmctld_fd, - resource_allocation_response_msg_t **resp); -static int _handle_msg(slurm_msg_t *msg, \ - resource_allocation_response_msg_t **resp); -static int _wait_for_alloc_rpc(int sleep_time, - resource_allocation_response_msg_t **resp); -static void _wait_for_resources(resource_allocation_response_msg_t **resp); -static bool _retry(); +static void _set_pending_job_id(uint32_t job_id); +static void _ignore_signal(int signo); +static void _exit_on_signal(int signo); +static void _signal_while_allocating(int signo); static void _intr_handler(int signo); -static job_step_create_request_msg_t * _step_req_create(srun_job_t *j); - static sig_atomic_t destroy_job = 0; -static srun_job_t *allocate_job = NULL; + +static void _set_pending_job_id(uint32_t job_id) +{ + info("Pending job allocation %u", job_id); + pending_job_id = job_id; +} + +static void _signal_while_allocating(int signo) +{ + destroy_job = 1; + if (pending_job_id != 0) { + slurm_complete_job(pending_job_id, 0); + } +} + +static void _ignore_signal(int signo) +{ + /* do nothing */ +} + +static void _exit_on_signal(int signo) +{ + exit_flag = true; +} + +/* This typically signifies the job was cancelled by scancel */ +static void _job_complete_handler(srun_job_complete_msg_t *msg) +{ + info("Force Terminated job"); +} + +/* + * Job has been notified of it's approaching time limit. + * Job will be killed shortly after timeout. + * This RPC can arrive multiple times with the same or updated timeouts. + * FIXME: We may want to signal the job or perform other action for this. + * FIXME: How much lead time do we want for this message? Some jobs may + * require tens of minutes to gracefully terminate. + */ +static void _timeout_handler(srun_timeout_msg_t *msg) +{ + static time_t last_timeout = 0; + + if (msg->timeout != last_timeout) { + last_timeout = msg->timeout; + verbose("job time limit to be reached at %s", + ctime(&msg->timeout)); + } +} + +static void _user_msg_handler(srun_user_msg_t *msg) +{ + info("%s", msg->msg); +} + +static void _ping_handler(srun_ping_msg_t *msg) +{ + /* the api will respond so there really isn't anything to do + here */ +} + +static void _node_fail_handler(srun_node_fail_msg_t *msg) +{ + error("Node failure on %s", msg->nodelist); +} + + + +static bool _retry() +{ + static int retries = 0; + static char *msg = "Slurm controller not responding, " + "sleeping and retrying."; + + if (errno == ESLURM_ERROR_ON_DESC_TO_RECORD_COPY) { + if (retries == 0) + error (msg); + else if (retries < MAX_RETRIES) + debug (msg); + else + return false; + sleep (++retries); + } else { + error("Unable to allocate resources: %m"); + return false; + } + + return true; +} + +/* + * SIGINT handler while waiting for resources to become available. + */ +static void +_intr_handler(int signo) +{ + destroy_job = 1; +} int allocate_test(void) { int rc; - job_desc_msg_t *j = job_desc_msg_create_from_opts (NULL); + job_desc_msg_t *j = job_desc_msg_create_from_opts(); if(!j) return SLURM_ERROR; @@ -101,23 +201,13 @@ allocate_test(void) resource_allocation_response_msg_t * allocate_nodes(void) { - int rc = 0; - static int sigarray[] = { SIGQUIT, SIGINT, SIGTERM, 0 }; - SigFunc *oquitf, *ointf, *otermf; - sigset_t oset; resource_allocation_response_msg_t *resp = NULL; - job_desc_msg_t *j = job_desc_msg_create_from_opts (NULL); + job_desc_msg_t *j = job_desc_msg_create_from_opts(); + slurm_allocation_callbacks_t callbacks; if(!j) return NULL; - oquitf = xsignal(SIGQUIT, _intr_handler); - ointf = xsignal(SIGINT, _intr_handler); - otermf = xsignal(SIGTERM, _intr_handler); - - xsignal_save_mask(&oset); - xsignal_unblock(sigarray); - /* Do not re-use existing job id when submitting new job * from within a running job */ if ((j->job_id != NO_VAL) && !opt.jobid_set) { @@ -127,33 +217,54 @@ allocate_nodes(void) if (!opt.jobid_set) /* Let slurmctld set jobid */ j->job_id = NO_VAL; } - - while ((rc = slurm_allocate_resources(j, &resp) < 0) && _retry()) { - if (destroy_job) - goto done; - } - - if(!resp) - goto done; - - if ((rc == 0) && (resp->node_list == NULL)) { - if (resp->error_code) - verbose("Warning: %s", - slurm_strerror(resp->error_code)); - _wait_for_resources(&resp); + callbacks.ping = _ping_handler; + callbacks.timeout = _timeout_handler; + callbacks.job_complete = _job_complete_handler; + callbacks.user_msg = _user_msg_handler; + callbacks.node_fail = _node_fail_handler; + + /* create message thread to handle pings and such from slurmctld */ + msg_thr = slurm_allocation_msg_thr_create(&j->other_port, &callbacks); + + xsignal(SIGHUP, _signal_while_allocating); + xsignal(SIGINT, _signal_while_allocating); + xsignal(SIGQUIT, _signal_while_allocating); + xsignal(SIGPIPE, _signal_while_allocating); + xsignal(SIGTERM, _signal_while_allocating); + xsignal(SIGUSR1, _signal_while_allocating); + xsignal(SIGUSR2, _signal_while_allocating); + + while (!resp) { + resp = slurm_allocate_resources_blocking(j, opt.max_wait, + _set_pending_job_id); + if (destroy_job) { + /* cancelled by signal */ + break; + } else if(!resp && !_retry()) { + break; + } } - - done: - xsignal_set_mask(&oset); - xsignal(SIGINT, ointf); - xsignal(SIGTERM, otermf); - xsignal(SIGQUIT, oquitf); + + xsignal(SIGHUP, _exit_on_signal); + xsignal(SIGINT, _ignore_signal); + xsignal(SIGQUIT, _ignore_signal); + xsignal(SIGPIPE, _ignore_signal); + xsignal(SIGTERM, _ignore_signal); + xsignal(SIGUSR1, _ignore_signal); + xsignal(SIGUSR2, _ignore_signal); job_desc_msg_destroy(j); return resp; } +int +cleanup_allocation() +{ + slurm_allocation_msg_thr_destroy(msg_thr); + return SLURM_SUCCESS; +} + resource_allocation_response_msg_t * existing_allocation(void) { @@ -181,222 +292,44 @@ existing_allocation(void) return resp; } -static void -_wait_for_resources(resource_allocation_response_msg_t **resp) -{ - resource_allocation_response_msg_t *r = *resp; - int sleep_time = MIN_ALLOC_WAIT; - int job_id = r->job_id; - - if (!opt.quiet) - info ("job %u queued and waiting for resources", r->job_id); - - slurm_free_resource_allocation_response_msg(r); - - /* Keep polling until the job is allocated resources */ - while (_wait_for_alloc_rpc(sleep_time, resp) <= 0) { - - if (slurm_allocation_lookup_lite(job_id, resp) >= 0) - break; - - if (slurm_get_errno() == ESLURM_JOB_PENDING) - debug3 ("Still waiting for allocation"); - else - fatal ("Unable to confirm allocation for job %u: %m", - job_id); - - if (destroy_job) { - verbose("cancelling job %u", job_id); - slurm_complete_job(job_id, 0); - debugger_launch_failure(allocate_job); - exit(0); - } - - if (sleep_time < MAX_ALLOC_WAIT) - sleep_time++; - } - if (!opt.quiet) - info ("job %u has been allocated resources", (*resp)->job_id); -} - -/* Wait up to sleep_time for RPC from slurmctld indicating resource allocation - * has occured. - * IN sleep_time: delay in seconds - * OUT resp: resource allocation response message - * RET 1 if resp is filled in, 0 otherwise */ -static int -_wait_for_alloc_rpc(int sleep_time, resource_allocation_response_msg_t **resp) -{ - struct pollfd fds[1]; - slurm_fd slurmctld_fd; - - if ((slurmctld_fd = slurmctld_msg_init()) < 0) { - sleep (sleep_time); - return (0); - } - - fds[0].fd = slurmctld_fd; - fds[0].events = POLLIN; - - while (poll (fds, 1, (sleep_time * 1000)) < 0) { - switch (errno) { - case EAGAIN: - case EINTR: - return (-1); - case ENOMEM: - case EINVAL: - case EFAULT: - fatal("poll: %m"); - default: - error("poll: %m. Continuing..."); - } - } - - if (fds[0].revents & POLLIN) - return (_accept_msg_connection(slurmctld_fd, resp)); - - return (0); -} - -/* Accept RPC from slurmctld and process it. - * IN slurmctld_fd: file descriptor for slurmctld communications - * OUT resp: resource allocation response message - * RET 1 if resp is filled in, 0 otherwise */ -static int -_accept_msg_connection(slurm_fd slurmctld_fd, - resource_allocation_response_msg_t **resp) -{ - slurm_fd fd; - slurm_msg_t *msg = NULL; - slurm_addr cli_addr; - char host[256]; - uint16_t port; - int rc = 0; - - fd = slurm_accept_msg_conn(slurmctld_fd, &cli_addr); - if (fd < 0) { - error("Unable to accept connection: %m"); - return rc; - } - - slurm_get_addr(&cli_addr, &port, host, sizeof(host)); - debug2("got message connection from %s:%hu", host, port); - - msg = xmalloc(sizeof(slurm_msg_t)); - slurm_msg_t_init(msg); - - again: - if(slurm_receive_msg(fd, msg, 0) != 0) { - if (errno == EINTR) { - goto again; - } - error("_accept_msg_connection[%s]: %m", host); - rc = SLURM_ERROR; - goto cleanup; - - } - - rc = _handle_msg(msg, resp); /* handle_msg frees msg->data */ -cleanup: - slurm_free_msg(msg); - - slurm_close_accepted_conn(fd); - return rc; -} - -/* process RPC from slurmctld - * IN msg: message recieved - * OUT resp: resource allocation response message - * RET 1 if resp is filled in, 0 otherwise */ -static int -_handle_msg(slurm_msg_t *msg, resource_allocation_response_msg_t **resp) +/* Set up port to handle messages from slurmctld */ +slurm_fd +slurmctld_msg_init(void) { - uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred); - uid_t uid = getuid(); - uid_t slurm_uid = (uid_t) slurm_get_slurm_user_id(); - int rc = 0; - srun_timeout_msg_t *to; - srun_user_msg_t *um; - - if ((req_uid != slurm_uid) && (req_uid != 0) && (req_uid != uid)) { - error ("Security violation, slurm message from uid %u", - (unsigned int) req_uid); - return 0; - } - - switch (msg->msg_type) { - case SRUN_PING: - debug3("slurmctld ping received"); - slurm_send_rc_msg(msg, SLURM_SUCCESS); - slurm_free_srun_ping_msg(msg->data); - break; - case SRUN_JOB_COMPLETE: - debug3("job complete received"); - /* FIXME: do something here */ - slurm_free_srun_job_complete_msg(msg->data); - break; - case RESPONSE_RESOURCE_ALLOCATION: - debug2("resource allocation response received"); - slurm_send_rc_msg(msg, SLURM_SUCCESS); - *resp = msg->data; - rc = 1; - break; - case SRUN_TIMEOUT: - debug2("timeout received"); - to = msg->data; - timeout_handler(to->timeout); - slurm_free_srun_timeout_msg(msg->data); - break; - case SRUN_USER_MSG: - um = msg->data; - info("%s", um->msg); - slurm_free_srun_user_msg(msg->data); - break; - default: - error("received spurious message type: %d\n", - msg->msg_type); - } - return rc; -} - -static bool -_retry() -{ - static int retries = 0; - static char *msg = "Slurm job queue full, sleeping and retrying."; - - if (errno == ESLURM_ERROR_ON_DESC_TO_RECORD_COPY) { - if (retries == 0) - error (msg); - else if (retries < MAX_RETRIES) - debug (msg); - else - return false; - sleep (++retries); - } else { - error("Unable to allocate resources: %m"); - return false; - } - - return true; + slurm_addr slurm_address; + uint16_t port; + static slurm_fd slurmctld_fd = (slurm_fd) NULL; + + if (slurmctld_fd) /* May set early for queued job allocation */ + return slurmctld_fd; + + slurmctld_fd = -1; + slurmctld_comm_addr.hostname = NULL; + slurmctld_comm_addr.port = 0; + + if ((slurmctld_fd = slurm_init_msg_engine_port(0)) < 0) + fatal("slurm_init_msg_engine_port error %m"); + if (slurm_get_stream_addr(slurmctld_fd, &slurm_address) < 0) + fatal("slurm_get_stream_addr error %m"); + fd_set_nonblocking(slurmctld_fd); + /* hostname is not set, so slurm_get_addr fails + slurm_get_addr(&slurm_address, &port, hostname, sizeof(hostname)); */ + port = ntohs(slurm_address.sin_port); + slurmctld_comm_addr.hostname = xstrdup(opt.ctrl_comm_ifhn); + slurmctld_comm_addr.port = port; + debug2("slurmctld messages to host=%s,port=%u", + slurmctld_comm_addr.hostname, + slurmctld_comm_addr.port); + + return slurmctld_fd; } -/* - * SIGINT handler while waiting for resources to become available. - */ -static void -_intr_handler(int signo) -{ - destroy_job = 1; -} - - /* * Create job description structure based off srun options * (see opt.h) */ job_desc_msg_t * -job_desc_msg_create_from_opts (char *script) +job_desc_msg_create_from_opts () { job_desc_msg_t *j = xmalloc(sizeof(*j)); char buf[8192]; @@ -461,6 +394,8 @@ job_desc_msg_create_from_opts (char *script) j->mail_user = xstrdup(opt.mail_user); if (opt.begin) j->begin_time = opt.begin; + if (opt.licenses) + j->licenses = xstrdup(opt.licenses); if (opt.network) j->network = xstrdup(opt.network); if (opt.account) @@ -516,8 +451,6 @@ job_desc_msg_create_from_opts (char *script) j->job_min_threads = opt.job_min_threads; if (opt.job_min_memory != NO_VAL) j->job_min_memory = opt.job_min_memory; - if (opt.job_max_memory != NO_VAL) - j->job_max_memory = opt.job_max_memory; if (opt.job_min_tmp_disk != NO_VAL) j->job_min_tmp_disk = opt.job_min_tmp_disk; if (opt.overcommit) { @@ -541,45 +474,6 @@ job_desc_msg_create_from_opts (char *script) * message as all other messages */ j->alloc_resp_port = slurmctld_comm_addr.port; j->other_port = slurmctld_comm_addr.port; - if (slurmctld_comm_addr.hostname) { - j->alloc_resp_hostname = xstrdup(slurmctld_comm_addr.hostname); - j->other_hostname = xstrdup(slurmctld_comm_addr.hostname); - } else { - j->alloc_resp_hostname = NULL; - j->other_hostname = NULL; - } - - if (script) { - /* - * If script is set then we are building a request for - * a batch job - */ - xassert (opt.batch); - - j->environment = NULL; - if (opt.get_user_env_time >= 0) { - struct passwd *pw = NULL; - pw = getpwuid(opt.uid); - if (pw != NULL) { - j->environment = env_array_user_default( - pw->pw_name, - opt.get_user_env_time, - opt.get_user_env_mode); - if (j->environment == NULL) - exit(1); /* error already logged */ - } - } - env_array_merge(&j->environment, (const char **)environ); - j->env_size = envcount (j->environment); - j->script = script; - j->argv = remote_argv; - j->argc = remote_argc; - j->err = opt.efname; - j->in = opt.ifname; - j->out = opt.ofname; - j->work_dir = opt.cwd; - j->no_requeue = opt.no_requeue; - } return (j); } @@ -590,136 +484,126 @@ job_desc_msg_destroy(job_desc_msg_t *j) if (j) { xfree(j->account); xfree(j->comment); - xfree(j->alloc_resp_hostname); - xfree(j->other_hostname); xfree(j); } } -static job_step_create_request_msg_t * -_step_req_create(srun_job_t *j) +int +create_job_step(srun_job_t *job) { - job_step_create_request_msg_t *r = xmalloc(sizeof(*r)); - r->job_id = j->jobid; - r->user_id = opt.uid; - - r->node_count = j->nhosts; - /* info("send %d or %d? sending %d", opt.max_nodes, */ -/* j->nhosts, r->node_count); */ - if(r->node_count > j->nhosts) { - error("Asking for more nodes that allocated"); - return NULL; - } - r->cpu_count = opt.overcommit ? r->node_count - : (opt.nprocs*opt.cpus_per_task); - r->num_tasks = opt.nprocs; - r->node_list = xstrdup(opt.nodelist); - r->network = xstrdup(opt.network); - r->name = xstrdup(opt.job_name); - r->relative = (uint16_t)opt.relative; - r->overcommit = opt.overcommit ? 1 : 0; - debug("requesting job %d, user %d, nodes %d including (%s)", - r->job_id, r->user_id, r->node_count, r->node_list); - debug("cpus %d, tasks %d, name %s, relative %d", - r->cpu_count, r->num_tasks, r->name, r->relative); + int i, rc; + SigFunc *oquitf = NULL, *ointf = NULL, *otermf = NULL; + + slurm_step_ctx_params_t_init(&job->ctx_params); + job->ctx_params.job_id = job->jobid; + job->ctx_params.uid = opt.uid; + + /* set the jobid for totalview */ + totalview_jobid = NULL; + xstrfmtcat(totalview_jobid, "%u", job->ctx_params.job_id); + + job->ctx_params.node_count = job->nhosts; + job->ctx_params.task_count = opt.nprocs; + job->ctx_params.cpu_count = opt.overcommit ? job->ctx_params.node_count + : (opt.nprocs*opt.cpus_per_task); + + job->ctx_params.relative = (uint16_t)opt.relative; + if (opt.task_mem != NO_VAL) + job->ctx_params.mem_per_task = (uint16_t)opt.task_mem; + job->ctx_params.ckpt_interval = (uint16_t)opt.ckpt_interval; + job->ctx_params.ckpt_path = opt.ckpt_path; + job->ctx_params.exclusive = (uint16_t)opt.exclusive; + job->ctx_params.immediate = (uint16_t)opt.immediate; + job->ctx_params.verbose_level = (uint16_t)_verbose; switch (opt.distribution) { case SLURM_DIST_BLOCK: - r->task_dist = SLURM_DIST_BLOCK; - break; case SLURM_DIST_ARBITRARY: - r->task_dist = SLURM_DIST_ARBITRARY; - break; case SLURM_DIST_CYCLIC: - r->task_dist = SLURM_DIST_CYCLIC; - break; case SLURM_DIST_CYCLIC_CYCLIC: - r->task_dist = SLURM_DIST_CYCLIC_CYCLIC; - break; case SLURM_DIST_CYCLIC_BLOCK: - r->task_dist = SLURM_DIST_CYCLIC_BLOCK; - break; case SLURM_DIST_BLOCK_CYCLIC: - r->task_dist = SLURM_DIST_BLOCK_CYCLIC; - break; case SLURM_DIST_BLOCK_BLOCK: - r->task_dist = SLURM_DIST_BLOCK_BLOCK; + job->ctx_params.task_dist = opt.distribution; break; case SLURM_DIST_PLANE: - r->task_dist = SLURM_DIST_PLANE; - r->plane_size = opt.plane_size; + job->ctx_params.task_dist = SLURM_DIST_PLANE; + job->ctx_params.plane_size = opt.plane_size; break; default: - r->task_dist = (r->num_tasks <= r->node_count) + job->ctx_params.task_dist = (job->ctx_params.task_count <= + job->ctx_params.node_count) ? SLURM_DIST_CYCLIC : SLURM_DIST_BLOCK; break; } - opt.distribution = r->task_dist; - - if (slurmctld_comm_addr.port) { - r->host = xstrdup(slurmctld_comm_addr.hostname); - r->port = slurmctld_comm_addr.port; - } + job->ctx_params.overcommit = opt.overcommit ? 1 : 0; - return(r); -} - -int -create_job_step(srun_job_t *job) -{ - job_step_create_request_msg_t *req = NULL; - job_step_create_response_msg_t *resp = NULL; - int i; + job->ctx_params.node_list = opt.nodelist; - if (!(req = _step_req_create(job))) { - error ("Unable to allocate step request message"); - return -1; - } - - for (i=0; ;i++) { - if ((slurm_job_step_create(req, &resp) == SLURM_SUCCESS) - && (resp != NULL)) + job->ctx_params.network = opt.network; + job->ctx_params.name = opt.job_name; + + debug("requesting job %d, user %d, nodes %d including (%s)", + job->ctx_params.job_id, job->ctx_params.uid, + job->ctx_params.node_count, job->ctx_params.node_list); + debug("cpus %d, tasks %d, name %s, relative %d", + job->ctx_params.cpu_count, job->ctx_params.task_count, + job->ctx_params.name, job->ctx_params.relative); + + for (i=0; (!destroy_job); i++) { + if(opt.no_alloc) { + job->step_ctx = slurm_step_ctx_create_no_alloc( + &job->ctx_params, job->stepid); + } else + job->step_ctx = slurm_step_ctx_create( + &job->ctx_params); + if (job->step_ctx != NULL) { + if (i > 0) + info("Job step created"); + break; - if (slurm_get_errno() != ESLURM_DISABLED) { + } + rc = slurm_get_errno(); + + if (opt.immediate || + ((rc != ESLURM_NODES_BUSY) && (rc != ESLURM_DISABLED))) { error ("Unable to create job step: %m"); return -1; } - if (i == 0) - info("Job step creation temporarily disabled, retrying"); + + if (i == 0) { + info("Job step creation temporarily disabled, retrying"); + ointf = xsignal(SIGINT, _intr_handler); + otermf = xsignal(SIGTERM, _intr_handler); + oquitf = xsignal(SIGQUIT, _intr_handler); + } else + info("Job step creation still disabled, retrying"); sleep(MIN((i*10), 60)); } - - job->stepid = resp->job_step_id; - job->step_layout = resp->step_layout; - job->cred = resp->cred; - job->switch_job = resp->switch_job; - + if (i > 0) { + xsignal(SIGINT, ointf); + xsignal(SIGQUIT, oquitf); + xsignal(SIGTERM, otermf); + if (destroy_job) { + info("Cancelled pending job step"); + return -1; + } + } + slurm_step_ctx_get(job->step_ctx, SLURM_STEP_CTX_STEPID, &job->stepid); /* Number of hosts in job may not have been initialized yet if * --jobid was used or only SLURM_JOBID was set in user env. * Reset the value here just in case. */ - job->nhosts = job->step_layout->node_cnt; - - if(!job->step_layout) { - error("step_layout not returned"); - return -1; - } + slurm_step_ctx_get(job->step_ctx, SLURM_STEP_CTX_NUM_HOSTS, + &job->nhosts); /* * Recreate filenames which may depend upon step id */ job_update_io_fnames(job); - slurm_free_job_step_create_request_msg(req); - return 0; } -void -set_allocate_job(srun_job_t *job) -{ - allocate_job = job; - return; -} diff --git a/src/srun/allocate.h b/src/srun/allocate.h index 0be8ff65e..91474c653 100644 --- a/src/srun/allocate.h +++ b/src/srun/allocate.h @@ -1,11 +1,11 @@ /*****************************************************************************\ * src/srun/allocate.h - node allocation functions for srun - * $Id: allocate.h 10574 2006-12-15 23:38:29Z jette $ + * $Id: allocate.h 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -43,6 +43,13 @@ #include "src/srun/srun_job.h" +typedef struct slurmctld_communication_addr { + char *hostname; + uint16_t port; +} slurmctld_comm_addr_t; + +slurmctld_comm_addr_t slurmctld_comm_addr; + /* * Allocate nodes from the slurm controller -- retrying the attempt * if the controller appears to be down, and optionally waiting for @@ -53,18 +60,24 @@ */ resource_allocation_response_msg_t * allocate_nodes(void); +/* clean up the msg thread polling for information from the controller */ +int cleanup_allocation(); + /* * Test if an allocation would occur now given the job request. * Do not actually allocate resources */ int allocate_test(void); +/* Set up port to handle messages from slurmctld */ +slurm_fd slurmctld_msg_init(void); + /* * Create a job_desc_msg_t object, filled in from the current srun options - * (see opt.h), if script != NULL then this is a batch job. + * (see opt.h) * The resulting memory must be freed with job_desc_msg_destroy() */ -job_desc_msg_t * job_desc_msg_create_from_opts (char *script); +job_desc_msg_t * job_desc_msg_create_from_opts (); /* * Destroy (free memory from) a job_desc_msg_t object allocated with diff --git a/src/srun/attach.h b/src/srun/attach.h deleted file mode 100644 index 4aacc0b48..000000000 --- a/src/srun/attach.h +++ /dev/null @@ -1,84 +0,0 @@ -/****************************************************************************\ - * attach.h - definitions needed for TotalView interactions - ***************************************************************************** - * This file was supplied by James Cownie <jcownie@etnus.com> and provides - * information required to interface Slurm to the TotalView debugger from - * the Etnus Corporation. For more information about TotalView, see - * http://www.etnus.com/ -\*****************************************************************************/ - -/* $Id: attach.h 5227 2005-08-08 18:12:07Z da $ - */ - -/* This file contains support for bringing processes up stopped, so that - * a debugger can attach to them (done for TotalView) - */ - -/* Update log - * - * Nov 27 1996 jcownie@dolphinics.com: Added the executable_name to MPIR_PROCDESC - */ - -#ifndef _ATTACH_INCLUDE -#define _ATTACH_INCLUDE - -#ifndef VOLATILE -#if defined(__STDC__) || defined(__cplusplus) -#define VOLATILE volatile -#else -#define VOLATILE -#endif -#endif -#include "src/srun/srun_job.h" -/***************************************************************************** -* DEBUGGING SUPPORT * -*****************************************************************************/ - - -/* A little struct to hold the target processor name and pid for - * each process which forms part of the MPI program. - * We may need to think more about this once we have dynamic processes... - * - * DO NOT change the name of this structure or its fields. The debugger knows - * them, and will be confused if you change them. - */ -typedef struct { - char * host_name; /* Something we can pass to inet_addr */ - char * executable_name; /* The name of the image */ - int pid; /* The pid of the process */ -} MPIR_PROCDESC; - -/* Array of procdescs for debugging purposes */ -extern MPIR_PROCDESC *MPIR_proctable; -extern int MPIR_proctable_size; - -/* Various global variables which a debugger can use for - * 1) finding out what the state of the program is at - * the time the magic breakpoint is hit. - * 2) inform the process that it has been attached to and is - * now free to run. - */ -extern VOLATILE int MPIR_debug_state; -extern VOLATILE int MPIR_debug_gate; -extern int MPIR_being_debugged; /* Cause extra info on internal state - * to be maintained - */ - -/* Values for the debug_state, this seems to be all we need at the moment - * but that may change... - */ -#define MPIR_DEBUG_SPAWNED 1 -#define MPIR_DEBUG_ABORTING 2 - -/* SLURM specific declarations */ -extern int MPIR_i_am_starter; -extern int MPIR_acquired_pre_main; - -extern void MPIR_Breakpoint(void); -extern void debugger_launch_failure(srun_job_t *job); - -/* Value for totalview %J expansion in bulk launch string - */ -extern char *totalview_jobid; - -#endif diff --git a/src/srun/core-format.c b/src/srun/core-format.c index 0efa68112..1edf3fe6a 100644 --- a/src/srun/core-format.c +++ b/src/srun/core-format.c @@ -1,11 +1,11 @@ /*****************************************************************************\ * src/srun/core-format.c - Change corefile characteristics for job - * $Id: core-format.c 10574 2006-12-15 23:38:29Z jette $ + * $Id: core-format.c 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/srun/core-format.h b/src/srun/core-format.h index 3255ae3e7..696e522f3 100644 --- a/src/srun/core-format.h +++ b/src/srun/core-format.h @@ -1,11 +1,11 @@ /*****************************************************************************\ * src/srun/core-format.h - Change corefile characteristics for job - * $Id: core-format.h 10574 2006-12-15 23:38:29Z jette $ + * $Id: core-format.h 13672 2008-03-19 23:10:58Z jette $ ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/srun/attach.c b/src/srun/debugger.c similarity index 91% rename from src/srun/attach.c rename to src/srun/debugger.c index 7934007a3..e27b37e3c 100644 --- a/src/srun/attach.c +++ b/src/srun/debugger.c @@ -1,11 +1,11 @@ /*****************************************************************************\ - * attach.c - Definitions needed for parallel debugger - * $Id: attach.c 10574 2006-12-15 23:38:29Z jette $ + * debugger.c - Definitions needed for parallel debugger + * $Id: debugger.c 11149 2007-03-14 20:53:19Z morrone $ ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <grondona1@llnl.gov>, et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -16,7 +16,7 @@ * any later version. * * In addition, as a special exception, the copyright holders give permission - * to link the code of portions of this program with the OpenSSL library under + * to link the code of portions of this program with the OpenSSL library under * certain conditions as described in each individual source file, and * distribute linked combinations including the two. You must obey the GNU * General Public License in all respects for all of the code used other than @@ -42,10 +42,10 @@ #include "src/common/log.h" -#include "src/srun/attach.h" +#include "src/srun/debugger.h" /* - * Instantiate extern variables from attach.h + * Instantiate extern variables from debugger.h */ MPIR_PROCDESC *MPIR_proctable; int MPIR_proctable_size; diff --git a/src/slaunch/attach.h b/src/srun/debugger.h similarity index 92% rename from src/slaunch/attach.h rename to src/srun/debugger.h index c066ca40e..6c34800df 100644 --- a/src/slaunch/attach.h +++ b/src/srun/debugger.h @@ -1,5 +1,5 @@ /****************************************************************************\ - * attach.h - definitions needed for TotalView interactions + * debugger.h - definitions needed for TotalView interactions ***************************************************************************** * This file was supplied by James Cownie <jcownie@etnus.com> and provides * information required to interface Slurm to the TotalView debugger from @@ -7,7 +7,7 @@ * http://www.etnus.com/ \*****************************************************************************/ -/* $Id: attach.h 8570 2006-07-13 21:12:58Z morrone $ +/* $Id: debugger.h 11149 2007-03-14 20:53:19Z morrone $ */ /* This file contains support for bringing processes up stopped, so that @@ -19,8 +19,8 @@ * Nov 27 1996 jcownie@dolphinics.com: Added the executable_name to MPIR_PROCDESC */ -#ifndef _SLAUNCH_ATTACH_INCLUDE -#define _SLAUNCH_ATTACH_INCLUDE +#ifndef _DEBUGGER_INCLUDE +#define _DEBUGGER_INCLUDE #ifndef VOLATILE #if defined(__STDC__) || defined(__cplusplus) @@ -29,7 +29,7 @@ #define VOLATILE #endif #endif - +#include "src/srun/srun_job.h" /***************************************************************************** * DEBUGGING SUPPORT * *****************************************************************************/ diff --git a/src/srun/fname.c b/src/srun/fname.c index 25f7a3357..2b4093234 100644 --- a/src/srun/fname.c +++ b/src/srun/fname.c @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -64,12 +64,12 @@ * Fill in as much of filename as possible from srun, update * filename type to one of the io types ALL, NONE, PER_TASK, ONE */ -io_filename_t * +fname_t * fname_create(srun_job_t *job, char *format) { unsigned long int wid = 0; unsigned long int taskid = 0; - io_filename_t *fname = NULL; + fname_t *fname = NULL; char *p, *q, *name; fname = xmalloc(sizeof(*fname)); @@ -166,7 +166,7 @@ fname_create(srun_job_t *job, char *format) } void -fname_destroy(io_filename_t *f) +fname_destroy(fname_t *f) { if (f->name) xfree(f->name); @@ -174,7 +174,7 @@ fname_destroy(io_filename_t *f) } char * -fname_remote_string (io_filename_t *f) +fname_remote_string (fname_t *f) { if ((f->type == IO_PER_TASK) || (f->type == IO_ONE)) return (xstrdup (f->name)); diff --git a/src/srun/fname.h b/src/srun/fname.h index 09576a1ed..ff5114553 100644 --- a/src/srun/fname.h +++ b/src/srun/fname.h @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -44,34 +44,17 @@ #include "src/srun/srun_job.h" -enum io_t { - IO_ALL = 0, /* multiplex output from all/bcast stdin to all */ - IO_ONE = 1, /* output from only one task/stdin to one task */ - IO_PER_TASK = 2, /* separate output/input file per task */ - IO_NONE = 3, /* close output/close stdin */ -}; - -#define format_io_t(t) (t == IO_ONE) ? "one" : (t == IO_ALL) ? \ - "all" : "per task" - -struct io_filename { - char *name; - enum io_t type; - int taskid; /* taskid for IO if IO_ONE */ -}; - /* * Create an filename from a (probably user supplied) filename format. * fname_create() will expand the format as much as possible for srun, * leaving node or task specific format specifiers for the remote * slurmd to handle. */ -typedef struct srun_job fname_job_t; -io_filename_t *fname_create(fname_job_t *job, char *format); -void fname_destroy(io_filename_t *fname); +fname_t *fname_create(srun_job_t *job, char *format); +void fname_destroy(fname_t *fname); -char * fname_remote_string (io_filename_t *fname); +char * fname_remote_string (fname_t *fname); #endif /* !_FNAME_H */ diff --git a/src/srun/launch.c b/src/srun/launch.c deleted file mode 100644 index c072aafe7..000000000 --- a/src/srun/launch.c +++ /dev/null @@ -1,377 +0,0 @@ -/****************************************************************************\ - * launch.c - initiate the user job's tasks. - * $Id: launch.c 11920 2007-08-01 22:13:12Z jette $ - ***************************************************************************** - * Copyright (C) 2002-2006 The Regents of the University of California. - * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Mark Grondona <grondona@llnl.gov>. - * UCRL-CODE-226842. - * - * This file is part of SLURM, a resource management program. - * For details, see <http://www.llnl.gov/linux/slurm/>. - * - * SLURM is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * In addition, as a special exception, the copyright holders give permission - * to link the code of portions of this program with the OpenSSL library under - * certain conditions as described in each individual source file, and - * distribute linked combinations including the two. You must obey the GNU - * General Public License in all respects for all of the code used other than - * OpenSSL. If you modify file(s) with this exception, you may extend this - * exception to your version of the file(s), but you are not obligated to do - * so. If you do not wish to do so, delete this exception statement from your - * version. If you delete this exception statement from all source files in - * the program, then also delete it here. - * - * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY - * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License along - * with SLURM; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -\*****************************************************************************/ - -#if HAVE_CONFIG_H -# include "config.h" -#endif - -#include <errno.h> -#include <signal.h> -#include <string.h> -#include <unistd.h> -#include <stdlib.h> -#include <sys/param.h> - -#include "src/common/log.h" -#include "src/common/macros.h" -#include "src/common/hostlist.h" -#include "src/common/plugstack.h" -#include "src/common/slurm_protocol_api.h" -#include "src/common/slurm_protocol_interface.h" -#include "src/common/xmalloc.h" -#include "src/common/xstring.h" -#include "src/common/xsignal.h" -#include "src/common/forward.h" -#include "src/common/mpi.h" -#include "src/api/step_io.h" - -#include "src/srun/srun_job.h" -#include "src/srun/launch.h" -#include "src/srun/opt.h" - -#define MAX_RETRIES 3 - -extern char **environ; - -/* number of active threads */ -static int fail_launch_cnt = 0; - -static void _print_launch_msg(launch_tasks_request_msg_t *msg, - char * hostname); -static void _update_failed_node(srun_job_t *j, int id); -static void _update_contacted_node(srun_job_t *j, int id); - -int -launch_thr_create(srun_job_t *job) -{ - int e, retries = 0; - pthread_attr_t attr; - - slurm_attr_init(&attr); - while ((e = pthread_create(&job->lid, &attr, &launch, (void *) job))) { - if (++retries > MAX_RETRIES) { - error ("pthread_create error %m"); - slurm_attr_destroy(&attr); - slurm_seterrno_ret(e); - } - sleep(1); /* sleep and try again */ - } - slurm_attr_destroy(&attr); - - debug("Started launch thread (%lu)", (unsigned long) job->lid); - - return SLURM_SUCCESS; -} - -void * -launch(void *arg) -{ - launch_tasks_request_msg_t r; - srun_job_t *job = (srun_job_t *) arg; - int i, my_envc; - slurm_msg_t msg; - ret_data_info_t *ret_data = NULL; - List ret_list = NULL; - ListIterator ret_itr; - int rc = SLURM_SUCCESS; - int nodeid = NO_VAL; - - update_job_state(job, SRUN_JOB_LAUNCHING); - - debug("going to launch %d tasks on %d hosts", - opt.nprocs, job->step_layout->node_cnt); - - my_envc = envcount(environ); - /* convert timeout from sec to milliseconds */ - opt.msg_timeout *= 1000; - memset(&r, 0, sizeof(r)); - - /* Common message contents */ - r.job_id = job->jobid; - r.uid = opt.uid; - r.gid = opt.gid; - r.argc = remote_argc; - r.argv = remote_argv; - r.cred = job->cred; - r.job_step_id = job->stepid; - r.envc = my_envc; - r.env = environ; - r.cwd = opt.cwd; - r.nnodes = job->step_layout->node_cnt; - r.nprocs = opt.nprocs; - r.slurmd_debug = opt.slurmd_debug; - r.switch_job = job->switch_job; - r.task_prolog = opt.task_prolog; - r.task_epilog = opt.task_epilog; - r.task_dist = opt.distribution; - r.plane_size = opt.plane_size; - r.cpu_bind_type = opt.cpu_bind_type; - r.cpu_bind = opt.cpu_bind; - r.mem_bind_type = opt.mem_bind_type; - r.mem_bind = opt.mem_bind; - r.multi_prog = opt.multi_prog; - r.options = job_options_create(); - r.complete_nodelist = xstrdup(job->step_layout->node_list); - spank_set_remote_options (r.options); - - r.ofname = fname_remote_string (job->ofname); - r.efname = fname_remote_string (job->efname); - r.ifname = fname_remote_string (job->ifname); - r.buffered_stdio = !opt.unbuffered; - - r.task_flags = 0; - if (opt.parallel_debug) - r.task_flags |= TASK_PARALLEL_DEBUG; - - /* Node specific message contents */ - if (mpi_hook_client_single_task_per_node ()) { - for (i = 0; i < job->step_layout->node_cnt; i++) - job->step_layout->tasks[i] = 1; - } - r.tasks_to_launch = job->step_layout->tasks; - - r.global_task_ids = job->step_layout->tids; - r.cpus_allocated = job->step_layout->tasks; - r.max_sockets = opt.max_sockets_per_node; - r.max_cores = opt.max_cores_per_socket; - r.max_threads = opt.max_threads_per_core; - r.cpus_per_task = opt.cpus_per_task; - - r.ntasks_per_node = opt.ntasks_per_node; - r.ntasks_per_socket = opt.ntasks_per_socket; - r.ntasks_per_core = opt.ntasks_per_core; - - r.num_resp_port = job->njfds; - r.resp_port = xmalloc(sizeof(uint16_t) * r.num_resp_port); - for (i = 0; i < r.num_resp_port; i++) { - r.resp_port[i] = ntohs(job->jaddr[i].sin_port); - } - - r.num_io_port = job->client_io->num_listen; - r.io_port = xmalloc(sizeof(uint16_t) * r.num_io_port); - for (i = 0; i < r.num_io_port; i++) { - r.io_port[i] = job->client_io->listenport[i]; - } - - - - //hostlist = hostlist_create(job->nodelist); - debug("sending to list %s", job->step_layout->node_list); - - slurm_msg_t_init(&msg); - msg.msg_type = REQUEST_LAUNCH_TASKS; - msg.data = &r; - - if (_verbose) { - for(i=0; i<job->step_layout->node_cnt; i++) { - char *name = nodelist_nth_host( - job->step_layout->node_list, i); - _print_launch_msg(&r, name); - free(name); - } - } - if(!(ret_list = slurm_send_recv_msgs( - job->step_layout->node_list, - &msg, opt.msg_timeout))) { - error("slurm_send_recv_msgs failed miserably: %m"); - return NULL; - } - - ret_itr = list_iterator_create(ret_list); - while ((ret_data = list_next(ret_itr))) { - rc = slurm_get_return_code(ret_data->type, - ret_data->data); - debug("launch returned msg_rc=%d err=%d type=%d", - rc, ret_data->err, ret_data->type); - nodeid = nodelist_find(job->step_layout->node_list, - ret_data->node_name); - - if(nodeid >= job->step_layout->node_cnt) { - /* Make sure we aren't trying to mark - * something we haven't requested but was - * included in the nodelist. This should never - * happen */ - error("Job step allocation has more nodes than " - "expected, ignoring node %s(%d): %s", - ret_data->node_name, nodeid, - slurm_strerror(rc)); - } else if (rc != SLURM_SUCCESS) { - slurm_seterrno(rc); - error("Task launch failed on node %s(%d): %s", - ret_data->node_name, nodeid, - slurm_strerror(rc)); - _update_failed_node(job, nodeid); - fail_launch_cnt++; - } else { -#if 0 /* only for debugging, might want to make this a callback */ - slurm_seterrno(rc); - info("Launch success on node %s(%d)", - ret_data->node_name, nodeid); -#endif - _update_contacted_node(job, nodeid); - } - } - list_iterator_destroy(ret_itr); - list_destroy(ret_list); - - if (fail_launch_cnt) { - srun_job_state_t jstate; - - slurm_mutex_lock(&job->state_mutex); - jstate = job->state; - slurm_mutex_unlock(&job->state_mutex); - - if (jstate < SRUN_JOB_TERMINATED) { - error("%d launch request%s failed", - fail_launch_cnt, fail_launch_cnt > 1 ? "s" : ""); - job->rc = 124; - srun_job_kill(job); - } - - } else { - debug("All task launch requests sent"); - update_job_state(job, SRUN_JOB_STARTING); - } - xfree(r.io_port); - xfree(r.resp_port); - xfree(r.complete_nodelist); - - return(void *)(0); -} - -static void -_update_failed_node(srun_job_t *j, int id) -{ - int i; - pipe_enum_t pipe_enum = PIPE_HOST_STATE; - - pthread_mutex_lock(&j->task_mutex); - if (j->host_state[id] == SRUN_HOST_INIT) { - j->host_state[id] = SRUN_HOST_UNREACHABLE; - - if(message_thread) { - safe_write(j->forked_msg->par_msg->msg_pipe[1], - &pipe_enum,sizeof(int)); - safe_write(j->forked_msg->par_msg->msg_pipe[1], - &id,sizeof(int)); - safe_write(j->forked_msg->par_msg->msg_pipe[1], - &j->host_state[id],sizeof(int)); - } - } - - pipe_enum = PIPE_TASK_STATE; - for (i = 0; i < j->step_layout->tasks[id]; i++) { - j->task_state[j->step_layout->tids[id][i]] = SRUN_TASK_FAILED; - - if(message_thread) { - safe_write(j->forked_msg->par_msg->msg_pipe[1], - &pipe_enum, sizeof(int)); - safe_write(j->forked_msg->par_msg->msg_pipe[1], - &j->step_layout->tids[id][i], sizeof(int)); - safe_write(j->forked_msg->par_msg->msg_pipe[1], - &j->task_state[j->step_layout->tids[id][i]], - sizeof(int)); - } - } - pthread_mutex_unlock(&j->task_mutex); - - /* update_failed_tasks(j, id); */ - return; -rwfail: - pthread_mutex_unlock(&j->task_mutex); - error("_update_failed_node: " - "write from srun message-handler process failed"); -} - -static void -_update_contacted_node(srun_job_t *j, int id) -{ - pipe_enum_t pipe_enum = PIPE_HOST_STATE; - pthread_mutex_lock(&j->task_mutex); - if (j->host_state[id] == SRUN_HOST_INIT) { - j->host_state[id] = SRUN_HOST_CONTACTED; - if(message_thread) { - safe_write(j->forked_msg->par_msg->msg_pipe[1], - &pipe_enum, sizeof(int)); - safe_write(j->forked_msg->par_msg->msg_pipe[1], - &id, sizeof(int)); - safe_write(j->forked_msg->par_msg->msg_pipe[1], - &j->host_state[id], sizeof(int)); - } - } - pthread_mutex_unlock(&j->task_mutex); - return; -rwfail: - pthread_mutex_unlock(&j->task_mutex); - error("_update_contacted_node: " - "write from srun message-handler process failed"); -} - -static void -_print_launch_msg(launch_tasks_request_msg_t *msg, char * hostname) -{ - int i; - char tmp_str[10], task_list[4096]; - int nodeid = nodelist_find(msg->complete_nodelist, hostname); - - if (opt.distribution == SLURM_DIST_BLOCK) { - sprintf(task_list, "%u-%u", - msg->global_task_ids[nodeid][0], - msg->global_task_ids[nodeid] - [(msg->tasks_to_launch[nodeid]-1)]); - } else { - for (i=0; i<msg->tasks_to_launch[nodeid]; i++) { - sprintf(tmp_str, ",%u", - msg->global_task_ids[nodeid][i]); - if (i == 0) - strcpy(task_list, &tmp_str[1]); - else if ((strlen(tmp_str) + strlen(task_list)) < - sizeof(task_list)) - strcat(task_list, tmp_str); - else - break; - } - } - - info("launching %u.%u on host %s, %u tasks: %s", - msg->job_id, msg->job_step_id, hostname, - msg->tasks_to_launch[nodeid], task_list); - - debug3("uid:%ld gid:%ld cwd:%s %d", (long) msg->uid, - (long) msg->gid, msg->cwd, nodeid); -} diff --git a/src/srun/msg.c b/src/srun/msg.c deleted file mode 100644 index 21e2d12a1..000000000 --- a/src/srun/msg.c +++ /dev/null @@ -1,1565 +0,0 @@ -/****************************************************************************\ - * msg.c - process message traffic between srun and slurm daemons - * $Id: msg.c 12809 2007-12-11 18:41:21Z jette $ - ***************************************************************************** - * Copyright (C) 2002 The Regents of the University of California. - * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Mark Grondona <mgrondona@llnl.gov>, et. al. - * UCRL-CODE-226842. - * - * This file is part of SLURM, a resource management program. - * For details, see <http://www.llnl.gov/linux/slurm/>. - * - * SLURM is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * In addition, as a special exception, the copyright holders give permission - * to link the code of portions of this program with the OpenSSL library under - * certain conditions as described in each individual source file, and - * distribute linked combinations including the two. You must obey the GNU - * General Public License in all respects for all of the code used other than - * OpenSSL. If you modify file(s) with this exception, you may extend this - * exception to your version of the file(s), but you are not obligated to do - * so. If you do not wish to do so, delete this exception statement from your - * version. If you delete this exception statement from all source files in - * the program, then also delete it here. - * - * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY - * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License along - * with SLURM; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -\*****************************************************************************/ - -#if HAVE_CONFIG_H -# include "config.h" -#endif - -#if HAVE_PTHREAD_H -# include <pthread.h> -#endif - -#include <errno.h> -#include <fcntl.h> -#include <signal.h> -#include <string.h> -#include <sys/poll.h> -#include <sys/stat.h> -#include <sys/types.h> -#include <sys/wait.h> -#include <time.h> -#include <stdlib.h> -#include <unistd.h> - -#include <slurm/slurm_errno.h> - -#include "src/common/fd.h" -#include "src/common/hostlist.h" -#include "src/common/log.h" -#include "src/common/macros.h" -#include "src/common/read_config.h" -#include "src/common/slurm_auth.h" -#include "src/common/slurm_protocol_api.h" -#include "src/common/slurm_protocol_defs.h" -#include "src/common/xassert.h" -#include "src/common/xmalloc.h" -#include "src/common/mpi.h" -#include "src/common/forward.h" -#include "src/api/pmi_server.h" - -#include "src/srun/srun_job.h" -#include "src/srun/opt.h" -#include "src/srun/msg.h" -#include "src/srun/sigstr.h" -#include "src/srun/attach.h" -#include "src/srun/allocate.h" -#include "src/srun/multi_prog.h" -#include "src/srun/signals.h" -#include "src/srun/srun.h" - -#include "src/common/xstring.h" - -#define LAUNCH_WAIT_SEC 60 /* max wait to confirm launches, sec */ -#define MAX_RETRIES 3 /* pthread_create retries */ - -static int tasks_exited = 0; -static uid_t slurm_uid; -static slurm_fd slurmctld_fd = (slurm_fd) NULL; - -/* - * Static prototypes - */ -static void _accept_msg_connection(srun_job_t *job, int fdnum); -static void _confirm_launch_complete(srun_job_t *job); -static void _dump_proctable(srun_job_t *job); -static void _exec_prog(slurm_msg_t *msg); -static void _exit_handler(srun_job_t *job, slurm_msg_t *exit_msg); -static void _handle_msg(srun_job_t *job, slurm_msg_t *msg); -static inline bool _job_msg_done(srun_job_t *job); -static void _launch_handler(srun_job_t *job, slurm_msg_t *resp); -static void _job_step_complete(srun_job_t *job, slurm_msg_t *msg); -static void _do_poll_timeout(srun_job_t *job); -static int _get_next_timeout(srun_job_t *job); -static void _msg_thr_poll(srun_job_t *job); -static void _set_jfds_nonblocking(srun_job_t *job); -static void _print_pid_list(const char *host, int ntasks, - uint32_t *pid, char *executable_name); -static void _node_fail_handler(int fd, srun_job_t *job); -static void _node_fail_forwarder(char *nodelist, srun_job_t *job); - -#define _poll_set_rd(_pfd, _fd) do { \ - (_pfd).fd = _fd; \ - (_pfd).events = POLLIN; \ - } while (0) - -#define _poll_set_wr(_pfd, _fd) do { \ - (_pfd).fd = _fd; \ - (_pfd).events = POLLOUT; \ - } while (0) - -#define _poll_rd_isset(pfd) ((pfd).revents & POLLIN ) -#define _poll_wr_isset(pfd) ((pfd).revents & POLLOUT) -#define _poll_err(pfd) ((pfd).revents & POLLERR) - -/* fd is job->forked_msg->par_msg->msg_pipe[1] */ -static void _update_mpir_proctable(int fd, srun_job_t *job, - int nodeid, int ntasks, uint32_t *pid, - char *executable) -{ - int msg_type = PIPE_UPDATE_MPIR_PROCTABLE; - int dummy = 0xdeadbeef; - int len; - int i; - - xassert(message_thread); - safe_write(fd, &msg_type, sizeof(int)); /* read by par_thr() */ - safe_write(fd, &dummy, sizeof(int)); /* read by par_thr() */ - - /* the rest are read by _handle_update_mpir_proctable() */ - safe_write(fd, &nodeid, sizeof(int)); - safe_write(fd, &ntasks, sizeof(int)); - len = strlen(executable) + 1; - safe_write(fd, &len, sizeof(int)); - if (len > 0) { - safe_write(fd, executable, len); - } - for (i = 0; i < ntasks; i++) { - int taskid = job->step_layout->tids[nodeid][i]; - safe_write(fd, &taskid, sizeof(int)); - safe_write(fd, &pid[i], sizeof(int)); - } - - return; - -rwfail: - error("_update_mpir_proctable: write to srun main process failed"); -} - -static void _handle_update_mpir_proctable(int fd, srun_job_t *job) -{ - static int tasks_recorded = 0; - int nodeid; - int ntasks; - int len; - char *executable = NULL; - int i; - char *name = NULL; - - /* some initialization */ - if (MPIR_proctable_size == 0) { - MPIR_proctable_size = job->step_layout->task_cnt; - MPIR_proctable = xmalloc(sizeof(MPIR_PROCDESC) - * MPIR_proctable_size); - totalview_jobid = NULL; - xstrfmtcat(totalview_jobid, "%u", job->jobid); - } - - safe_read(fd, &nodeid, sizeof(int)); - safe_read(fd, &ntasks, sizeof(int)); - safe_read(fd, &len, sizeof(int)); - if (len > 0) { - executable = xmalloc(len); - safe_read(fd, executable, len); - - /* remote_argv global will be NULL during an srun --attach */ - if (remote_argv == NULL) { - remote_argc = 1; - xrealloc(remote_argv, 2 * sizeof(char *)); - remote_argv[0] = executable; - remote_argv[1] = NULL; - } - } - name = nodelist_nth_host(job->step_layout->node_list, nodeid); - for (i = 0; i < ntasks; i++) { - MPIR_PROCDESC *tv; - int taskid, pid; - - safe_read(fd, &taskid, sizeof(int)); - safe_read(fd, &pid, sizeof(int)); - - tv = &MPIR_proctable[taskid]; - tv->host_name = xstrdup(name); - tv->pid = pid; - tv->executable_name = executable; - tasks_recorded++; - } - free(name); - /* if all tasks are now accounted for, set the debug state and - call the Breakpoint */ - if (tasks_recorded == job->step_layout->task_cnt) { - if (opt.multi_prog) - set_multi_name(tasks_recorded); - MPIR_debug_state = MPIR_DEBUG_SPAWNED; - MPIR_Breakpoint(); - if (opt.debugger_test) - _dump_proctable(job); - } - - return; - -rwfail: - error("_handle_update_mpir_proctable: " - "read from srun message-handler process failed"); -} - -static void _update_step_layout(int fd, slurm_step_layout_t *layout, - int nodeid) -{ - int msg_type = PIPE_UPDATE_STEP_LAYOUT; - int dummy = 0xdeadbeef; - - safe_write(fd, &msg_type, sizeof(int)); /* read by par_thr() */ - safe_write(fd, &dummy, sizeof(int)); /* read by par_thr() */ - - /* the rest are read by _handle_update_step_layout() */ - safe_write(fd, &nodeid, sizeof(int)); - safe_write(fd, &layout->node_cnt, sizeof(uint32_t)); - safe_write(fd, &layout->task_cnt, sizeof(uint32_t)); - safe_write(fd, &layout->tasks[nodeid], sizeof(uint16_t)); - safe_write(fd, layout->tids[nodeid], - layout->tasks[nodeid]*sizeof(uint32_t)); - - return; - -rwfail: - error("_update_step_layout: write to srun main process failed"); -} - -static void _handle_update_step_layout(int fd, slurm_step_layout_t *layout) -{ - int nodeid; - - safe_read(fd, &nodeid, sizeof(int)); - safe_read(fd, &layout->node_cnt, sizeof(uint32_t)); - safe_read(fd, &layout->task_cnt, sizeof(uint32_t)); - xassert(nodeid >= 0 && nodeid <= layout->task_cnt); - - /* If this is the first call to this function, then we probably need - to intialize some of the arrays */ - if (layout->tasks == NULL) - layout->tasks = xmalloc(layout->node_cnt * sizeof(uint16_t *)); - if (layout->tids == NULL) - layout->tids = xmalloc(layout->node_cnt * sizeof(uint32_t *)); - - safe_read(fd, &layout->tasks[nodeid], sizeof(uint16_t)); - xassert(layout->tids[nodeid] == NULL); - layout->tids[nodeid] = xmalloc(layout->tasks[nodeid]*sizeof(uint32_t)); - safe_read(fd, layout->tids[nodeid], - layout->tasks[nodeid]*sizeof(uint32_t)); - return; - -rwfail: - error("_handle_update_step_layout: " - "read from srun message-handler process failed"); -} - -static void _dump_proctable(srun_job_t *job) -{ - int node_inx, task_inx, taskid; - int task_cnt; - MPIR_PROCDESC *tv; - - for (node_inx=0; node_inx<job->nhosts; node_inx++) { - task_cnt = job->step_layout->tasks[node_inx]; - for (task_inx = 0; task_inx < task_cnt; task_inx++) { - taskid = job->step_layout->tids[node_inx][task_inx]; - tv = &MPIR_proctable[taskid]; - if (!tv) - break; - info("task:%d, host:%s, pid:%d, executable:%s", - taskid, tv->host_name, tv->pid, - tv->executable_name); - } - } -} - -void debugger_launch_failure(srun_job_t *job) -{ - int i; - pipe_enum_t pipe_enum = PIPE_MPIR_DEBUG_STATE; - - if (opt.parallel_debug) { - if(message_thread && job) { - i = MPIR_DEBUG_ABORTING; - safe_write(job->forked_msg->par_msg->msg_pipe[1], - &pipe_enum, sizeof(int)); - safe_write(job->forked_msg->par_msg->msg_pipe[1], - &i, sizeof(int)); - } - } - return; -rwfail: - error("debugger_launch_failure: " - "write from srun message-handler process failed"); - -} - -/* - * Job has been notified of it's approaching time limit. - * Job will be killed shortly after timeout. - * This RPC can arrive multiple times with the same or updated timeouts. - * FIXME: We may want to signal the job or perform other action for this. - * FIXME: How much lead time do we want for this message? Some jobs may - * require tens of minutes to gracefully terminate. - */ -void timeout_handler(time_t timeout) -{ - static time_t last_timeout = 0; - - if (timeout != last_timeout) { - last_timeout = timeout; - verbose("job time limit to be reached at %s", - ctime(&timeout)); - } -} - -/* - * Job has been notified of a node's failure (at least the node's slurmd - * has stopped responding to slurmctld). It is possible that the user's - * job is continuing to execute on the specified nodes, but quite possibly - * not. The job will continue to execute given the --no-kill option. - * Otherwise all of the job's tasks and the job itself are killed.. - */ -static void _node_fail_handler(int fd, srun_job_t *job) -{ - char *nodelist = NULL; - int len = 0; - hostset_t fail_nodes, all_nodes; - hostlist_iterator_t fail_itr; - char *node; - int num_node_ids; - int *node_ids; - int i, j; - int node_id, num_tasks; - - /* get the hostlist string of failed nodes from the message thread */ - safe_read(fd, &len, sizeof(int)); - nodelist = (char *)xmalloc(len+1); - safe_read(fd, nodelist, len); - nodelist[len] = '\0'; - - /* now process the down nodes and tell the IO client about them */ - fail_nodes = hostset_create(nodelist); - fail_itr = hostset_iterator_create(fail_nodes); - num_node_ids = hostset_count(fail_nodes); - node_ids = xmalloc(sizeof(int) * num_node_ids); - - all_nodes = hostset_create(job->step_layout->node_list); - /* find the index number of each down node */ - slurm_mutex_lock(&job->task_mutex); - for (i = 0; i < num_node_ids; i++) { - node = hostlist_next(fail_itr); - node_id = node_ids[i] = hostset_find(all_nodes, node); - if (job->host_state[node_id] != SRUN_HOST_UNREACHABLE) { - error("Node failure: %s.", node); - job->host_state[node_id] = SRUN_HOST_UNREACHABLE; - } - free(node); - - /* find all of the tasks that should run on this failed node - * and mark them as having failed. - */ - num_tasks = job->step_layout->tasks[node_id]; - for (j = 0; j < num_tasks; j++) { - int gtaskid; - debug2("marking task %d done on failed node %d", - job->step_layout->tids[node_id][j], node_id); - gtaskid = job->step_layout->tids[node_id][j]; - job->task_state[gtaskid] = SRUN_TASK_FAILED; - } - } - slurm_mutex_unlock(&job->task_mutex); - - if (!opt.allocate) { - client_io_handler_downnodes(job->client_io, node_ids, - num_node_ids); - } - - if (!opt.no_kill) { - update_job_state(job, SRUN_JOB_FORCETERM); - info("sending SIGINT to remaining tasks"); - fwd_signal(job, SIGINT, opt.max_threads); - } - - xfree(nodelist); - return; -rwfail: - error("Failure reading node failure message from message process: %m"); - if (nodelist != NULL) - xfree(nodelist); - return; -} - -/* - * Forward the node failure message to the main srun process. - * - * NOTE: this is called from the forked message handling process - */ -static void _node_fail_forwarder(char *nodelist, srun_job_t *job) -{ - pipe_enum_t pipe_enum = PIPE_NODE_FAIL; - int dummy = 0xdeadbeef; - int pipe_fd = job->forked_msg->par_msg->msg_pipe[1]; - int len; - - len = strlen(nodelist); - if (message_thread) { - safe_write(pipe_fd, &pipe_enum, sizeof(int)); - safe_write(pipe_fd, &dummy, sizeof(int)); - - /* the following writes are handled by _node_fail_handler */ - safe_write(pipe_fd, &len, sizeof(int)); - safe_write(pipe_fd, nodelist, len); - } - return; -rwfail: - error("Failure sending node failure message to main process: %m"); - return; -} - -static bool _job_msg_done(srun_job_t *job) -{ - return (job->state >= SRUN_JOB_TERMINATED); -} - -static void -_process_launch_resp(srun_job_t *job, launch_tasks_response_msg_t *msg) -{ - pipe_enum_t pipe_enum = PIPE_HOST_STATE; - int nodeid = nodelist_find(job->step_layout->node_list, - msg->node_name); - - if ((nodeid < 0) || (nodeid >= job->nhosts)) { - error ("Bad launch response from %s", msg->node_name); - return; - } - pthread_mutex_lock(&job->task_mutex); - job->host_state[nodeid] = SRUN_HOST_REPLIED; - pthread_mutex_unlock(&job->task_mutex); - - if(message_thread) { - safe_write(job->forked_msg->par_msg->msg_pipe[1], - &pipe_enum, sizeof(int)); - safe_write(job->forked_msg->par_msg->msg_pipe[1], - &nodeid, sizeof(int)); - safe_write(job->forked_msg->par_msg->msg_pipe[1], - &job->host_state[nodeid], sizeof(int)); - - } - _update_mpir_proctable(job->forked_msg->par_msg->msg_pipe[1], job, - nodeid, msg->count_of_pids, - msg->local_pids, remote_argv[0]); - _print_pid_list( msg->node_name, msg->count_of_pids, - msg->local_pids, remote_argv[0] ); - return; -rwfail: - error("_process_launch_resp: " - "write from srun message-handler process failed"); - -} - -/* This is used to initiate an OpenMPI checkpoint program, - * but is written to be general purpose */ -static void -_exec_prog(slurm_msg_t *msg) -{ - pid_t child; - int pfd[2], status, exit_code = 0, i; - ssize_t len; - char *argv[4], buf[256] = ""; - time_t now = time(NULL); - bool checkpoint = false; - srun_exec_msg_t *exec_msg = msg->data; - - if (exec_msg->argc > 2) { - verbose("Exec '%s %s' for %u.%u", - exec_msg->argv[0], exec_msg->argv[1], - exec_msg->job_id, exec_msg->step_id); - } else { - verbose("Exec '%s' for %u.%u", - exec_msg->argv[0], - exec_msg->job_id, exec_msg->step_id); - } - - if (strcmp(exec_msg->argv[0], "ompi-checkpoint") == 0) - checkpoint = true; - if (checkpoint) { - /* OpenMPI specific checkpoint support */ - info("Checkpoint started at %s", ctime(&now)); - for (i=0; (exec_msg->argv[i] && (i<2)); i++) { - argv[i] = exec_msg->argv[i]; - } - snprintf(buf, sizeof(buf), "%ld", (long) srun_ppid); - argv[i] = buf; - argv[i+1] = NULL; - } - - if (pipe(pfd) == -1) { - snprintf(buf, sizeof(buf), "pipe: %s", strerror(errno)); - error("%s", buf); - exit_code = errno; - goto fini; - } - - child = fork(); - if (child == 0) { - int fd = open("/dev/null", O_RDONLY); - dup2(fd, 0); /* stdin from /dev/null */ - dup2(pfd[1], 1); /* stdout to pipe */ - dup2(pfd[1], 2); /* stderr to pipe */ - close(pfd[0]); - close(pfd[1]); - if (checkpoint) - execvp(exec_msg->argv[0], argv); - else - execvp(exec_msg->argv[0], exec_msg->argv); - error("execvp(%s): %m", exec_msg->argv[0]); - } else if (child < 0) { - snprintf(buf, sizeof(buf), "fork: %s", strerror(errno)); - error("%s", buf); - exit_code = errno; - goto fini; - } else { - close(pfd[1]); - len = read(pfd[0], buf, sizeof(buf)); - close(pfd[0]); - waitpid(child, &status, 0); - exit_code = WEXITSTATUS(status); - } - -fini: if (checkpoint) { - now = time(NULL); - if (exit_code) { - info("Checkpoint completion code %d at %s", - exit_code, ctime(&now)); - } else { - info("Checkpoint completed successfully at %s", - ctime(&now)); - } - if (buf[0]) - info("Checkpoint location: %s", buf); - slurm_checkpoint_complete(exec_msg->job_id, exec_msg->step_id, - time(NULL), (uint32_t) exit_code, buf); - } -} - -/* This typically signifies the job was cancelled by scancel */ -static void -_job_step_complete(srun_job_t *job, slurm_msg_t *msg) -{ - srun_job_complete_msg_t *step_msg = msg->data; - - if (step_msg->step_id == NO_VAL) { - verbose("Complete job %u received", - step_msg->job_id); - } else { - verbose("Complete job step %u.%u received", - step_msg->job_id, step_msg->step_id); - } - update_job_state(job, SRUN_JOB_FORCETERM); - job->removed = true; -} - -static void -update_tasks_state(srun_job_t *job, uint32_t nodeid) -{ - int i; - pipe_enum_t pipe_enum = PIPE_TASK_STATE; - slurm_mutex_lock(&job->task_mutex); - debug2("updating %u tasks state for node %u", - job->step_layout->tasks[nodeid], nodeid); - for (i = 0; i < job->step_layout->tasks[nodeid]; i++) { - uint32_t tid = job->step_layout->tids[nodeid][i]; - - if(message_thread) { - safe_write(job->forked_msg->par_msg->msg_pipe[1], - &pipe_enum,sizeof(int)); - safe_write(job->forked_msg->par_msg->msg_pipe[1], - &tid,sizeof(int)); - safe_write(job->forked_msg->par_msg->msg_pipe[1], - &job->task_state[tid],sizeof(int)); - } - } - slurm_mutex_unlock(&job->task_mutex); - return; -rwfail: - slurm_mutex_unlock(&job->task_mutex); - error("update_tasks_state: " - "write from srun message-handler process failed"); - -} - -static void -update_running_tasks(srun_job_t *job, uint32_t nodeid) -{ - int i; - pipe_enum_t pipe_enum = PIPE_TASK_STATE; - debug2("updating %u running tasks for node %u", - job->step_layout->tasks[nodeid], nodeid); - slurm_mutex_lock(&job->task_mutex); - for (i = 0; i < job->step_layout->tasks[nodeid]; i++) { - uint32_t tid = job->step_layout->tids[nodeid][i]; - job->task_state[tid] = SRUN_TASK_RUNNING; - - if(message_thread) { - safe_write(job->forked_msg-> - par_msg->msg_pipe[1], - &pipe_enum,sizeof(int)); - safe_write(job->forked_msg-> - par_msg->msg_pipe[1],&tid, sizeof(int)); - safe_write(job->forked_msg->par_msg->msg_pipe[1], - &job->task_state[tid], sizeof(int)); - } - } - slurm_mutex_unlock(&job->task_mutex); - return; -rwfail: - slurm_mutex_unlock(&job->task_mutex); - error("update_running_tasks: " - "write from srun message-handler process failed"); -} - -static void -update_failed_tasks(srun_job_t *job, uint32_t nodeid) -{ - int i; - pipe_enum_t pipe_enum = PIPE_TASK_STATE; - - slurm_mutex_lock(&job->task_mutex); - for (i = 0; i < job->step_layout->tasks[nodeid]; i++) { - uint32_t tid = job->step_layout->tids[nodeid][i]; - job->task_state[tid] = SRUN_TASK_FAILED; - - if(message_thread) { - safe_write(job->forked_msg->par_msg->msg_pipe[1], - &pipe_enum, sizeof(int)); - safe_write(job->forked_msg->par_msg->msg_pipe[1], - &tid, sizeof(int)); - safe_write(job->forked_msg->par_msg->msg_pipe[1], - &job->task_state[tid], sizeof(int)); - } - tasks_exited++; - } - slurm_mutex_unlock(&job->task_mutex); - - if (tasks_exited == opt.nprocs) { - debug2("all tasks exited"); - update_job_state(job, SRUN_JOB_TERMINATED); - } -rwfail: - slurm_mutex_unlock(&job->task_mutex); - error("update_failed_tasks: " - "write from srun message-handler process failed"); - -} - -static void -_launch_handler(srun_job_t *job, slurm_msg_t *resp) -{ - launch_tasks_response_msg_t *msg = resp->data; - pipe_enum_t pipe_enum = PIPE_HOST_STATE; - int nodeid = nodelist_find(job->step_layout->node_list, - msg->node_name); - - debug3("received launch resp from %s nodeid=%d", - msg->node_name, - nodeid); - - if (msg->return_code != 0) { - - error("%s: launch failed: %s", - msg->node_name, slurm_strerror(msg->return_code)); - - slurm_mutex_lock(&job->task_mutex); - job->host_state[nodeid] = SRUN_HOST_REPLIED; - slurm_mutex_unlock(&job->task_mutex); - - if(message_thread) { - safe_write(job->forked_msg->par_msg->msg_pipe[1], - &pipe_enum, sizeof(int)); - safe_write(job->forked_msg->par_msg->msg_pipe[1], - &nodeid, sizeof(int)); - safe_write(job->forked_msg->par_msg->msg_pipe[1], - &job->host_state[nodeid], - sizeof(int)); - } - update_failed_tasks(job, nodeid); - - /* - if (!opt.no_kill) { - job->rc = 124; - update_job_state(job, SRUN_JOB_WAITING_ON_IO); - } else - update_failed_tasks(job, nodeid); - */ - debugger_launch_failure(job); - return; - } else { - _process_launch_resp(job, msg); - update_running_tasks(job, nodeid); - } - return; -rwfail: - error("_launch_handler: " - "write from srun message-handler process failed"); - -} - -/* _confirm_launch_complete - * confirm that all tasks registers a sucessful launch - * pthread_exit with job kill on failure */ -static void -_confirm_launch_complete(srun_job_t *job) -{ - int i; - char *name = NULL; - - printf("job->nhosts %d\n",job->nhosts); - - for (i=0; i<job->nhosts; i++) { - printf("job->nhosts %d\n",job->nhosts); - if (job->host_state[i] != SRUN_HOST_REPLIED) { - name = nodelist_nth_host(job->step_layout->node_list, - i); - error ("Node %s not responding, terminating job step", - name); - free(name); - info("sending Ctrl-C to remaining tasks"); - fwd_signal(job, SIGINT, opt.max_threads); - job->rc = 124; - update_job_state(job, SRUN_JOB_FAILED); - pthread_exit(0); - } - } - - /* - * Reset launch timeout so timer will no longer go off - */ - job->ltimeout = 0; -} - -static void -_reattach_handler(srun_job_t *job, slurm_msg_t *msg) -{ - int i; - reattach_tasks_response_msg_t *resp = msg->data; - int nodeid = nodelist_find(job->step_layout->node_list, - resp->node_name); - - if ((nodeid < 0) || (nodeid >= job->nhosts)) { - error ("Invalid reattach response received"); - return; - } - - slurm_mutex_lock(&job->task_mutex); - job->host_state[nodeid] = SRUN_HOST_REPLIED; - slurm_mutex_unlock(&job->task_mutex); - - if(message_thread) { - pipe_enum_t pipe_enum = PIPE_HOST_STATE; - safe_write(job->forked_msg->par_msg->msg_pipe[1], - &pipe_enum, sizeof(int)); - safe_write(job->forked_msg->par_msg->msg_pipe[1], - &nodeid, sizeof(int)); - safe_write(job->forked_msg->par_msg->msg_pipe[1], - &job->host_state[nodeid], sizeof(int)); - } - - if (resp->return_code != 0) { - if (job->stepid == NO_VAL) { - error ("Unable to attach to job %d: %s", - job->jobid, slurm_strerror(resp->return_code)); - } else { - error ("Unable to attach to step %d.%d on node %d: %s", - job->jobid, job->stepid, nodeid, - slurm_strerror(resp->return_code)); - } - job->rc = 1; - - update_job_state(job, SRUN_JOB_FAILED); - return; - } - - /* - * store global task id information as returned from slurmd - */ - job->step_layout->tids[nodeid] = - xmalloc( resp->ntasks * sizeof(uint32_t) ); - - job->step_layout->tasks[nodeid] = resp->ntasks; - - info ("ntasks = %d\n"); - - for (i = 0; i < resp->ntasks; i++) { - job->step_layout->tids[nodeid][i] = resp->gtids[i]; - info ("setting task%d on hostid %d\n", - resp->gtids[i], nodeid); - } - _update_step_layout(job->forked_msg->par_msg->msg_pipe[1], - job->step_layout, nodeid); - - /* Build process table for any parallel debugger - */ - if ((remote_argc == 0) && (resp->executable_names)) { - remote_argc = 1; - xrealloc(remote_argv, 2 * sizeof(char *)); - remote_argv[0] = resp->executable_names[0]; - resp->executable_names = NULL; /* nothing left to free */ - remote_argv[1] = NULL; - } - _update_mpir_proctable(job->forked_msg->par_msg->msg_pipe[1], job, - nodeid, resp->ntasks, - resp->local_pids, remote_argv[0]); - - _print_pid_list(resp->node_name, resp->ntasks, resp->local_pids, - remote_argv[0]); - - update_running_tasks(job, nodeid); - return; -rwfail: - error("_reattach_handler: " - "write from srun message-handler process failed"); -} - - -static void -_print_exit_status(srun_job_t *job, hostlist_t hl, char *host, int status) -{ - char buf[MAXHOSTRANGELEN]; - char *corestr = ""; - bool signaled = false; - void (*print) (const char *, ...) = (void *) &error; - - xassert(hl != NULL); - - slurm_mutex_lock(&job->state_mutex); - signaled = job->signaled; - slurm_mutex_unlock(&job->state_mutex); - - /* - * Print message that task was signaled as verbose message - * not error message if the user generated the signal. - */ - if (signaled) - print = &verbose; - - hostlist_ranged_string(hl, sizeof(buf), buf); - - if (status == 0) { - verbose("%s: %s: Done", host, buf); - return; - } - -#ifdef WCOREDUMP - if (WCOREDUMP(status)) - corestr = " (core dumped)"; -#endif - - if (WIFSIGNALED(status)) { - (*print) ("%s: task%s: %s%s", host, buf, - sigstr(status), corestr); - } else { - error ("%s: task%s: Exited with exit code %d", - host, buf, WEXITSTATUS(status)); - } - - return; -} - -static void -_die_if_signaled(srun_job_t *job, int status) -{ - bool signaled = false; - - slurm_mutex_lock(&job->state_mutex); - signaled = job->signaled; - slurm_mutex_unlock(&job->state_mutex); - - if (WIFSIGNALED(status) && !signaled) { - job->rc = 128 + WTERMSIG(status); - update_job_state(job, SRUN_JOB_FAILED); - } -} - -static void -_update_task_exitcode(srun_job_t *job, int taskid) -{ - pipe_enum_t pipe_enum = PIPE_TASK_EXITCODE; - - if(message_thread) { - safe_write(job->forked_msg->par_msg->msg_pipe[1], - &pipe_enum, sizeof(int)); - safe_write(job->forked_msg->par_msg->msg_pipe[1], - &taskid, sizeof(int)); - safe_write(job->forked_msg->par_msg->msg_pipe[1], - &job->tstatus[taskid], sizeof(int)); - } - return; -rwfail: - error("_update_task_exitcode: " - "write from srun message-handler process failed"); -} - -static void -_exit_handler(srun_job_t *job, slurm_msg_t *exit_msg) -{ - task_exit_msg_t *msg = (task_exit_msg_t *) exit_msg->data; - hostlist_t hl = hostlist_create(NULL); - int task0 = msg->task_id_list[0]; - char *host = NULL; - int status = msg->return_code; - int i; - char buf[1024]; - - if (!(host = slurm_step_layout_host_name(job->step_layout, task0))) - host = "Unknown host"; - debug2("exited host %s", host); - if (!job->etimeout && !tasks_exited) - job->etimeout = time(NULL) + opt.max_exit_timeout; - - for (i = 0; i < msg->num_tasks; i++) { - uint32_t taskid = msg->task_id_list[i]; - - if ((taskid < 0) || (taskid >= opt.nprocs)) { - error("task exit resp has bad task id %d", taskid); - continue; - } - - snprintf(buf, sizeof(buf), "%d", taskid); - hostlist_push(hl, buf); - - slurm_mutex_lock(&job->task_mutex); - job->tstatus[taskid] = status; - _update_task_exitcode(job, taskid); - if (status) - job->task_state[taskid] = SRUN_TASK_ABNORMAL_EXIT; - else { - job->task_state[taskid] = SRUN_TASK_EXITED; - } - - slurm_mutex_unlock(&job->task_mutex); - - tasks_exited++; - debug2("looking for %d got %d", opt.nprocs, tasks_exited); - if ((tasks_exited == opt.nprocs) - || (mpi_hook_client_single_task_per_node () - && (tasks_exited == job->nhosts))) { - debug2("All tasks exited"); - update_job_state(job, SRUN_JOB_TERMINATED); - } - } - - update_tasks_state(job, slurm_step_layout_host_id(job->step_layout, - task0)); - - _print_exit_status(job, hl, host, status); - - hostlist_destroy(hl); - - _die_if_signaled(job, status); - - /* - * When a task terminates with a non-zero exit code and the - * "--kill-on-bad-exit" option is set, terminate the entire job. - */ - if (status != 0 && opt.kill_bad_exit) - { - static int first_time = 1; - - /* Only kill the job once. */ - if (first_time) - { - debug("Terminating job due to a non-zero exit code"); - - first_time = 0; - - srun_job_kill(job); - } - } -} - -static void -_handle_msg(srun_job_t *job, slurm_msg_t *msg) -{ - uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred); - uid_t uid = getuid(); - int rc; - srun_timeout_msg_t *to; - srun_node_fail_msg_t *nf; - srun_user_msg_t *um; - - if ((req_uid != slurm_uid) && (req_uid != 0) && (req_uid != uid)) { - error ("Security violation, slurm message from uid %u", - (unsigned int) req_uid); - return; - } - - switch (msg->msg_type) - { - case RESPONSE_LAUNCH_TASKS: - debug("received task launch response"); - _launch_handler(job, msg); - slurm_free_launch_tasks_response_msg(msg->data); - break; - case MESSAGE_TASK_EXIT: - debug2("task_exit received"); - _exit_handler(job, msg); - slurm_free_task_exit_msg(msg->data); - break; - case RESPONSE_REATTACH_TASKS: - debug2("received reattach response"); - _reattach_handler(job, msg); - slurm_free_reattach_tasks_response_msg(msg->data); - break; - case SRUN_PING: - debug3("slurmctld ping received"); - slurm_send_rc_msg(msg, SLURM_SUCCESS); - slurm_free_srun_ping_msg(msg->data); - break; - case SRUN_EXEC: - _exec_prog(msg); - slurm_free_srun_exec_msg(msg->data); - break; - case SRUN_JOB_COMPLETE: - _job_step_complete(job, msg); - slurm_free_srun_job_complete_msg(msg->data); - break; - case SRUN_TIMEOUT: - verbose("timeout received"); - to = msg->data; - timeout_handler(to->timeout); - slurm_free_srun_timeout_msg(msg->data); - break; - case SRUN_USER_MSG: - um = msg->data; - info("%s", um->msg); - slurm_free_srun_user_msg(msg->data); - break; - case SRUN_NODE_FAIL: - verbose("node_fail received"); - nf = msg->data; - _node_fail_forwarder(nf->nodelist, job); - slurm_free_srun_node_fail_msg(msg->data); - break; - case RESPONSE_RESOURCE_ALLOCATION: - debug3("resource allocation response received"); - slurm_send_rc_msg(msg, SLURM_SUCCESS); - slurm_free_resource_allocation_response_msg(msg->data); - break; - case PMI_KVS_PUT_REQ: - debug3("PMI_KVS_PUT_REQ received"); - rc = pmi_kvs_put((struct kvs_comm_set *) msg->data); - slurm_send_rc_msg(msg, rc); - break; - case PMI_KVS_GET_REQ: - debug3("PMI_KVS_GET_REQ received"); - rc = pmi_kvs_get((kvs_get_msg_t *) msg->data); - slurm_send_rc_msg(msg, rc); - slurm_free_get_kvs_msg((kvs_get_msg_t *) msg->data); - break; - default: - error("received spurious message type: %d\n", - msg->msg_type); - break; - } - return; -} - -/* NOTE: One extra FD for incoming slurmctld messages */ -static void -_accept_msg_connection(srun_job_t *job, int fdnum) -{ - slurm_fd fd = (slurm_fd) NULL; - slurm_msg_t *msg = NULL; - slurm_addr cli_addr; - unsigned char *uc; - short port; - int timeout = 0; /* slurm default value */ - - if (fdnum < job->njfds) - fd = slurm_accept_msg_conn(job->jfd[fdnum], &cli_addr); - else - fd = slurm_accept_msg_conn(slurmctld_fd, &cli_addr); - - if (fd < 0) { - error("Unable to accept connection: %m"); - return; - } - - /* Should not call slurm_get_addr() because the IP may not be - in /etc/hosts. */ - uc = (unsigned char *)&cli_addr.sin_addr.s_addr; - port = cli_addr.sin_port; - debug2("got message connection from %u.%u.%u.%u:%hu", - uc[0], uc[1], uc[2], uc[3], ntohs(port)); - - msg = xmalloc(sizeof(slurm_msg_t)); - slurm_msg_t_init(msg); - - /* multiple jobs (easily induced via no_alloc) and highly - * parallel jobs using PMI sometimes result in slow message - * responses and timeouts. Raise the default timeout for srun. */ - timeout = slurm_get_msg_timeout() * 8000; -again: - if(slurm_receive_msg(fd, msg, timeout) != 0) { - if (errno == EINTR) { - goto again; - } - error("slurm_receive_msg[%u.%u.%u.%u]: %m", - uc[0],uc[1],uc[2],uc[3]); - goto cleanup; - } - - _handle_msg(job, msg); /* handle_msg frees msg->data */ -cleanup: - if ((msg->conn_fd >= 0) && slurm_close_accepted_conn(msg->conn_fd) < 0) - error ("close(%d): %m", msg->conn_fd); - slurm_free_msg(msg); - - - return; -} - - -static void -_set_jfds_nonblocking(srun_job_t *job) -{ - int i; - for (i = 0; i < job->njfds; i++) - fd_set_nonblocking(job->jfd[i]); -} - -/* - * Call poll() with a timeout. (timeout argument is in seconds) - * NOTE: One extra FD for incoming slurmctld messages - */ -static int -_do_poll(srun_job_t *job, struct pollfd *fds, int timeout) -{ - nfds_t nfds = (job->njfds + 1); - int rc, to; - - if (timeout > 0) - to = timeout * 1000; - else - to = timeout; - - while ((rc = poll(fds, nfds, to)) < 0) { - switch (errno) { - case EAGAIN: - case EINTR: continue; - case ENOMEM: - case EINVAL: - case EFAULT: fatal("poll: %m"); - default: error("poll: %m. Continuing..."); - continue; - } - } - - return rc; -} - - -/* - * Get the next timeout in seconds from now. - */ -static int -_get_next_timeout(srun_job_t *job) -{ - int timeout = -1; - - if (!job->ltimeout && !job->etimeout) - return -1; - - if (!job->ltimeout) - timeout = job->etimeout - time(NULL); - else if (!job->etimeout) - timeout = job->ltimeout - time(NULL); - else - timeout = job->ltimeout < job->etimeout ? - job->ltimeout - time(NULL) : - job->etimeout - time(NULL); - - return timeout; -} - -/* - * Handle the two poll timeout cases: - * 1. Job launch timed out - * 2. Exit timeout has expired (either print a message or kill job) - */ -static void -_do_poll_timeout(srun_job_t *job) -{ - time_t now = time(NULL); - - if ((job->ltimeout > 0) && (job->ltimeout <= now)) - _confirm_launch_complete(job); - - if ((job->etimeout > 0) && (job->etimeout <= now)) { - if (!opt.max_wait) - info("Warning: first task terminated %ds ago", - opt.max_exit_timeout); - else { - error("First task exited %ds ago", opt.max_wait); - report_task_status(job); - update_job_state(job, SRUN_JOB_FAILED); - } - job->etimeout = 0; - } -} - -/* NOTE: One extra FD for incoming slurmctld messages */ -static void -_msg_thr_poll(srun_job_t *job) -{ - struct pollfd *fds; - int i; - - fds = xmalloc((job->njfds + 1) * sizeof(*fds)); - - _set_jfds_nonblocking(job); - - for (i = 0; i < job->njfds; i++) - _poll_set_rd(fds[i], job->jfd[i]); - _poll_set_rd(fds[i], slurmctld_fd); - - while (!_job_msg_done(job)) { - if (_do_poll(job, fds, _get_next_timeout(job)) == 0) { - _do_poll_timeout(job); - continue; - } - - for (i = 0; i < (job->njfds + 1) ; i++) { - unsigned short revents = fds[i].revents; - if ((revents & POLLERR) || - (revents & POLLHUP) || - (revents & POLLNVAL)) - error("poll error on jfd %d: %m", fds[i].fd); - else if (revents & POLLIN) - _accept_msg_connection(job, i); - } - - } - - xfree(fds); /* if we were to break out of while loop */ -} - -void * -msg_thr(void *arg) -{ - srun_job_t *job = (srun_job_t *) arg; - forked_msg_pipe_t *par_msg = job->forked_msg->par_msg; - debug3("msg thread pid = %lu", (unsigned long) getpid()); - - slurm_uid = (uid_t) slurm_get_slurm_user_id(); - - _msg_thr_poll(job); - - close(par_msg->msg_pipe[1]); // close excess fildes - debug3("msg thread done"); - return (void *)1; -} - - -/* - * This function runs in a pthread of the parent srun process and - * handles messages from the srun message-handler process. - */ -void * -par_thr(void *arg) -{ - srun_job_t *job = (srun_job_t *) arg; - forked_msg_pipe_t *par_msg = job->forked_msg->par_msg; - forked_msg_pipe_t *msg_par = job->forked_msg->msg_par; - int c; - pipe_enum_t type=0; - int tid=-1; - int status; - debug3("par thread pid = %lu", (unsigned long) getpid()); - - //slurm_uid = (uid_t) slurm_get_slurm_user_id(); - close(msg_par->msg_pipe[0]); // close read end of pipe - close(par_msg->msg_pipe[1]); // close write end of pipe - /* Note: On some message types, we read a task ID as the - * first number then read status or exit code as the second */ - while(read(par_msg->msg_pipe[0], &c, sizeof(int)) - == sizeof(int)) { - // getting info from msg thread - if(type == PIPE_NONE) { - debug2("got type %d\n",c); - type = c; - continue; - } - - switch(type) { - case PIPE_JOB_STATE: - debug("PIPE_JOB_STATE, c = %d", c); - update_job_state(job, c); - break; - case PIPE_TASK_STATE: - if(tid == -1) { - tid = c; - continue; - } - debug("PIPE_TASK_STATE tid=%d, state=%d", tid, c); - slurm_mutex_lock(&job->task_mutex); - job->task_state[tid] = c; - if(c == SRUN_TASK_FAILED) - tasks_exited++; - slurm_mutex_unlock(&job->task_mutex); - if (tasks_exited == opt.nprocs) { - debug2("all tasks exited"); - update_job_state(job, SRUN_JOB_TERMINATED); - } - tid = -1; - break; - case PIPE_TASK_EXITCODE: - if(tid == -1) { - tid = c; - continue; - } - debug("PIPE_TASK_EXITCODE tid=%d code=%d", tid, c); - slurm_mutex_lock(&job->task_mutex); - job->tstatus[tid] = c; - slurm_mutex_unlock(&job->task_mutex); - tid = -1; - break; - case PIPE_HOST_STATE: - if(tid == -1) { - tid = c; - continue; - } - slurm_mutex_lock(&job->task_mutex); - job->host_state[tid] = c; - slurm_mutex_unlock(&job->task_mutex); - tid = -1; - break; - case PIPE_SIGNALED: - slurm_mutex_lock(&job->state_mutex); - job->signaled = c; - slurm_mutex_unlock(&job->state_mutex); - break; - case PIPE_MPIR_DEBUG_STATE: - MPIR_debug_state = c; - MPIR_Breakpoint(); - if (opt.debugger_test) - _dump_proctable(job); - break; - case PIPE_UPDATE_MPIR_PROCTABLE: - _handle_update_mpir_proctable(par_msg->msg_pipe[0], - job); - break; - case PIPE_UPDATE_STEP_LAYOUT: - _handle_update_step_layout(par_msg->msg_pipe[0], - job->step_layout); - break; - case PIPE_NODE_FAIL: - _node_fail_handler(par_msg->msg_pipe[0], job); - break; - default: - error("Unrecognized message from message thread %d", - type); - } - type = PIPE_NONE; - } - close(par_msg->msg_pipe[0]); // close excess fildes - close(msg_par->msg_pipe[1]); // close excess fildes - if(waitpid(par_msg->pid,&status,0)<0) // wait for pid to finish - return NULL;// there was an error - debug3("par thread done"); - return (void *)1; -} - -/* - * Forks the srun process that handles messages even if the main srun - * process is stopped (for instance, by totalview). Also creates - * the various pthreads used in the original and monitor process. - * - * NOTE: call this before creating any pthreads to avoid having forked process - * hang on localtime_t() mutex locked in parent processes pthread. - */ -extern int -msg_thr_create(srun_job_t *job) -{ - int i, retries = 0; - pthread_attr_t attr; - int c; - - job->forked_msg = xmalloc(sizeof(forked_msg_t)); - job->forked_msg->par_msg = xmalloc(sizeof(forked_msg_pipe_t)); - job->forked_msg->msg_par = xmalloc(sizeof(forked_msg_pipe_t)); - - set_allocate_job(job); - - for (i = 0; i < job->njfds; i++) { - if ((job->jfd[i] = slurm_init_msg_engine_port(0)) < 0) - fatal("init_msg_engine_port: %m"); - if (slurm_get_stream_addr(job->jfd[i], - &job->jaddr[i]) - < 0) - fatal("slurm_get_stream_addr: %m"); - debug("initialized job control port %d\n", - ntohs(((struct sockaddr_in) - job->jaddr[i]).sin_port)); - } - - if (pipe(job->forked_msg->par_msg->msg_pipe) == -1) { - error("pipe(): %m"); - return SLURM_ERROR; - } - if (pipe(job->forked_msg->msg_par->msg_pipe) == -1) { - error("pipe(): %m"); - return SLURM_ERROR; - } - debug2("created the pipes for communication"); - - /* retry fork for super-heavily loaded systems */ - for (i = 0; ; i++) { - if((job->forked_msg->par_msg->pid = fork()) != -1) - break; - if (i < 3) - usleep(1000); - else { - error("fork(): %m"); - return SLURM_ERROR; - } - } - - if (job->forked_msg->par_msg->pid == 0) { - /* child */ - setsid(); - message_thread = 1; - close(job->forked_msg-> - par_msg->msg_pipe[0]); // close read end of pipe - close(job->forked_msg-> - msg_par->msg_pipe[1]); // close write end of pipe - slurm_attr_init(&attr); - pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); - while ((errno = pthread_create(&job->jtid, &attr, &msg_thr, - (void *)job))) { - if (++retries > MAX_RETRIES) - fatal("Can't create pthread"); - sleep(1); - } - slurm_attr_destroy(&attr); - debug("Started msg to parent server thread (%lu)", - (unsigned long) job->jtid); - - /* - * Wait for the main srun process to exit. When it - * does, the other end of the msg_par->msg_pipe will - * close. - */ - while(read(job->forked_msg->msg_par->msg_pipe[0], - &c, sizeof(int)) > 0) - ; /* do nothing */ - - close(job->forked_msg->msg_par->msg_pipe[0]); - /* - * These xfree aren't really necessary if we are just going - * to exit, and they can cause the message thread to - * segfault. - */ - /* xfree(job->forked_msg->par_msg); */ - /* xfree(job->forked_msg->msg_par); */ - /* xfree(job->forked_msg); */ - _exit(0); - } else { - /* parent */ - - slurm_attr_init(&attr); - while ((errno = pthread_create(&job->jtid, &attr, &par_thr, - (void *)job))) { - if (++retries > MAX_RETRIES) - fatal("Can't create pthread"); - sleep(1); /* sleep and try again */ - } - slurm_attr_destroy(&attr); - - debug("Started parent to msg server thread (%lu)", - (unsigned long) job->jtid); - } - - - return SLURM_SUCCESS; -} - -static void -_print_pid_list(const char *host, int ntasks, uint32_t *pid, - char *executable_name) -{ - if (_verbose) { - int i; - hostlist_t pids = hostlist_create(NULL); - char buf[MAXHOSTRANGELEN]; - - for (i = 0; i < ntasks; i++) { - snprintf(buf, sizeof(buf), "pids:%d", pid[i]); - hostlist_push(pids, buf); - } - - hostlist_ranged_string(pids, sizeof(buf), buf); - verbose("%s: %s %s", host, executable_name, buf); - } -} - -/* Set up port to handle messages from slurmctld */ -extern slurm_fd slurmctld_msg_init(void) -{ - slurm_addr slurm_address; - uint16_t port; - - if (slurmctld_fd) /* May set early for queued job allocation */ - return slurmctld_fd; - - if (opt.allocate && opt.noshell) - return -1; - - slurmctld_fd = -1; - slurmctld_comm_addr.hostname = NULL; - slurmctld_comm_addr.port = 0; - - if ((slurmctld_fd = slurm_init_msg_engine_port(0)) < 0) - fatal("slurm_init_msg_engine_port error %m"); - if (slurm_get_stream_addr(slurmctld_fd, &slurm_address) < 0) - fatal("slurm_get_stream_addr error %m"); - fd_set_nonblocking(slurmctld_fd); - /* hostname is not set, so slurm_get_addr fails - slurm_get_addr(&slurm_address, &port, hostname, sizeof(hostname)); */ - port = ntohs(slurm_address.sin_port); - slurmctld_comm_addr.hostname = xstrdup(opt.ctrl_comm_ifhn); - slurmctld_comm_addr.port = port; - debug2("slurmctld messages to host=%s,port=%u", - slurmctld_comm_addr.hostname, - slurmctld_comm_addr.port); - - return slurmctld_fd; -} - - diff --git a/src/srun/multi_prog.c b/src/srun/multi_prog.c index f2d49dcbd..91d09ce0e 100644 --- a/src/srun/multi_prog.c +++ b/src/srun/multi_prog.c @@ -11,7 +11,7 @@ * and * Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -22,7 +22,7 @@ * any later version. * * In addition, as a special exception, the copyright holders give permission - * to link the code of portions of this program with the OpenSSL library under + * to link the code of portions of this program with the OpenSSL library under * certain conditions as described in each individual source file, and * distribute linked combinations including the two. You must obey the GNU * General Public License in all respects for all of the code used other than @@ -59,7 +59,7 @@ #include "src/common/xassert.h" #include "src/common/xmalloc.h" #include "src/common/xstring.h" -#include "src/srun/attach.h" +#include "src/srun/debugger.h" /* Given a program name, translate it to a fully qualified pathname * as needed based upon the PATH environment variable */ @@ -170,18 +170,16 @@ _set_exec_names(char *ranks, char *exec_name, int ntasks) } extern int -set_multi_name(int ntasks) +mpir_set_multi_name(int ntasks, const char *config_fname) { FILE *config_fd; char line[256]; - char *config_fname = NULL, *ranks, *exec_name, *p, *ptrptr; + char *ranks, *exec_name, *p, *ptrptr; int line_num = 0, i; for (i=0; i<ntasks; i++) { MPIR_PROCDESC *tv; tv = &MPIR_proctable[i]; - if (i == 0) - config_fname = tv->executable_name; tv->executable_name = NULL; } @@ -222,6 +220,55 @@ set_multi_name(int ntasks) return 0; } +extern void +mpir_init(int num_tasks) +{ + MPIR_proctable_size = num_tasks; + MPIR_proctable = xmalloc(sizeof(MPIR_PROCDESC) * num_tasks); + if (MPIR_proctable == NULL) + fatal("Unable to initialize MPIR_proctable: %m"); +} + +extern void +mpir_cleanup() +{ + int i; + + for (i = 0; i < MPIR_proctable_size; i++) { + xfree(MPIR_proctable[i].host_name); + xfree(MPIR_proctable[i].executable_name); + } + xfree(MPIR_proctable); +} + +extern void +mpir_set_executable_names(const char *executable_name) +{ + int i; + + for (i = 0; i < MPIR_proctable_size; i++) { + MPIR_proctable[i].executable_name = xstrdup(executable_name); + if (MPIR_proctable[i].executable_name == NULL) + fatal("Unable to set MPI_proctable executable_name:" + " %m"); + } +} + +extern void +mpir_dump_proctable() +{ + MPIR_PROCDESC *tv; + int i; + + for (i = 0; i < MPIR_proctable_size; i++) { + tv = &MPIR_proctable[i]; + if (!tv) + break; + info("task:%d, host:%s, pid:%d, executable:%s", + i, tv->host_name, tv->pid, tv->executable_name); + } +} + static int _update_task_mask(int low_num, int high_num, int ntasks, bitstr_t *task_mask) { diff --git a/src/srun/multi_prog.h b/src/srun/multi_prog.h index 28c2cc37c..e32d98efb 100644 --- a/src/srun/multi_prog.h +++ b/src/srun/multi_prog.h @@ -7,7 +7,7 @@ * and * Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -18,7 +18,7 @@ * any later version. * * In addition, as a special exception, the copyright holders give permission - * to link the code of portions of this program with the OpenSSL library under + * to link the code of portions of this program with the OpenSSL library under * certain conditions as described in each individual source file, and * distribute linked combinations including the two. You must obey the GNU * General Public License in all respects for all of the code used other than @@ -42,7 +42,11 @@ /* set global MPIR_PROCDESC executable names based upon multi-program * configuration file */ -extern int set_multi_name(int ntasks); +extern int mpir_set_multi_name(int ntasks, const char *config_fname); +extern void mpir_init(int num_tasks); +extern void mpir_cleanup(void); +extern void mpir_set_executable_names(const char *executable_name); +extern void mpir_dump_proctable(void); /* * Verify that we have a valid executable program specified for each task diff --git a/src/srun/opt.c b/src/srun/opt.c index 06afe966a..59d46290e 100644 --- a/src/srun/opt.c +++ b/src/srun/opt.c @@ -1,11 +1,11 @@ /*****************************************************************************\ * opt.c - options processing for srun - * $Id: opt.c 13727 2008-03-27 23:49:35Z jette $ + * $Id: opt.c 14110 2008-05-22 16:34:50Z jette $ ***************************************************************************** * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <grondona1@llnl.gov>, et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -72,6 +72,7 @@ #include "src/common/list.h" #include "src/common/log.h" #include "src/common/parse_time.h" +#include "src/common/proc_args.h" #include "src/common/slurm_protocol_api.h" #include "src/common/slurm_protocol_interface.h" #include "src/common/uid.h" @@ -82,9 +83,9 @@ #include "src/common/optz.h" #include "src/api/pmi_server.h" -#include "src/srun/attach.h" #include "src/srun/multi_prog.h" #include "src/srun/opt.h" +#include "src/srun/debugger.h" #include "src/common/mpi.h" /* generic OPT_ definitions -- mainly for use with env vars */ @@ -106,6 +107,8 @@ #define OPT_NCORES 0x11 #define OPT_NTHREADS 0x12 #define OPT_EXCLUSIVE 0x13 +#define OPT_OPEN_MODE 0x14 +#define OPT_ACCTG_FREQ 0x15 /* generic getopt_long flags, integers and *not* valid characters */ #define LONG_OPT_HELP 0x100 @@ -122,7 +125,6 @@ #define LONG_OPT_GID 0x10b #define LONG_OPT_MPI 0x10c #define LONG_OPT_CORE 0x10e -#define LONG_OPT_NOSHELL 0x10f #define LONG_OPT_DEBUG_TS 0x110 #define LONG_OPT_CONNTYPE 0x111 #define LONG_OPT_TEST_ONLY 0x113 @@ -141,9 +143,7 @@ #define LONG_OPT_MEM_BIND 0x120 #define LONG_OPT_CTRL_COMM_IFHN 0x121 #define LONG_OPT_MULTI 0x122 -#define LONG_OPT_NO_REQUEUE 0x123 #define LONG_OPT_COMMENT 0x124 -#define LONG_OPT_REQUEUE 0x125 #define LONG_OPT_SOCKETSPERNODE 0x130 #define LONG_OPT_CORESPERSOCKET 0x131 #define LONG_OPT_THREADSPERCORE 0x132 @@ -153,7 +153,7 @@ #define LONG_OPT_NTASKSPERNODE 0x136 #define LONG_OPT_NTASKSPERSOCKET 0x137 #define LONG_OPT_NTASKSPERCORE 0x138 -#define LONG_OPT_JOBMEM 0x13a +#define LONG_OPT_TASK_MEM 0x13a #define LONG_OPT_HINT 0x13b #define LONG_OPT_BLRTS_IMAGE 0x140 #define LONG_OPT_LINUX_IMAGE 0x141 @@ -161,27 +161,23 @@ #define LONG_OPT_RAMDISK_IMAGE 0x143 #define LONG_OPT_REBOOT 0x144 #define LONG_OPT_GET_USER_ENV 0x145 +#define LONG_OPT_PTY 0x146 +#define LONG_OPT_CHECKPOINT 0x147 +#define LONG_OPT_CHECKPOINT_PATH 0x148 +#define LONG_OPT_OPEN_MODE 0x149 +#define LONG_OPT_ACCTG_FREQ 0x14a /*---- global variables, defined in opt.h ----*/ -char **remote_argv; -int remote_argc; int _verbose; -enum modes mode; opt_t opt; /*---- forward declarations of static functions ----*/ typedef struct env_vars env_vars_t; -/* return command name from its full path name */ -static char * _base_name(char* command); - -static List _create_path_list(void); /* Get a decimal integer from arg */ static int _get_int(const char *arg, const char *what, bool positive); -static bool _get_resource_range(const char *arg, const char *what, - int *min, int *max, bool isFatal); static void _help(void); @@ -193,8 +189,8 @@ static void _opt_default(void); /* set options based upon env vars */ static void _opt_env(void); + static void _opt_args(int argc, char **argv); -static void _proc_get_user_env(char *optarg); /* list known options and their settings */ static void _opt_list(void); @@ -202,39 +198,16 @@ static void _opt_list(void); /* verify options sanity */ static bool _opt_verify(void); -static void _print_version(void); - static void _process_env_var(env_vars_t *e, const char *val); -static uint16_t _parse_mail_type(const char *arg); -static char *_print_mail_type(const uint16_t type); - -/* search PATH for command returns full path */ -static char *_search_path(char *, bool, int); - -static long _to_bytes(const char *arg); - static bool _under_parallel_debugger(void); static void _usage(void); static bool _valid_node_list(char **node_list_pptr); -static task_dist_states_t _verify_dist_type(const char *arg, uint32_t *psize); -static bool _verify_socket_core_thread_count(const char *arg, - int *min_sockets, int *max_sockets, - int *min_cores, int *max_cores, - int *min_threads, int *max_threads, - cpu_bind_type_t *cpu_bind_type); -static bool _verify_hint(const char *arg, - int *min_sockets, int *max_sockets, - int *min_cores, int *max_cores, - int *min_threads, int *max_threads, - cpu_bind_type_t *cpu_bind_type); static int _verify_cpu_bind(const char *arg, char **cpu_bind, - cpu_bind_type_t *cpu_bind_type); -static int _verify_geometry(const char *arg, uint16_t *geometry); + cpu_bind_type_t *flags); static int _verify_mem_bind(const char *arg, char **mem_bind, - mem_bind_type_t *mem_bind_type); -static int _verify_conn_type(const char *arg); + mem_bind_type_t *flags); /*---[ end forward declarations of static functions ]---------------------*/ @@ -249,6 +222,9 @@ int initialize_and_process_args(int argc, char *argv[]) /* initialize options with argv */ _opt_args(argc, argv); + if (!_opt_verify()) + exit(1); + if (_verbose > 3) _opt_list(); @@ -256,11 +232,6 @@ int initialize_and_process_args(int argc, char *argv[]) } -static void _print_version(void) -{ - printf("%s %s\n", PACKAGE, SLURM_VERSION); -} - /* * If the node list supplied is a file name, translate that into * a list of nodes, we orphan the data pointed to @@ -298,117 +269,6 @@ static bool _valid_node_list(char **node_list_pptr) return true; } -/* - * verify that a distribution type in arg is of a known form - * returns the task_dist_states, or -1 if state is unknown - */ -static task_dist_states_t _verify_dist_type(const char *arg, - uint32_t *plane_size) -{ - int len = strlen(arg); - char *dist_str = NULL; - task_dist_states_t result = SLURM_DIST_UNKNOWN; - bool lllp_dist = false, plane_dist = false; - - dist_str = strchr(arg,':'); - if (dist_str != NULL) { - /* -m cyclic|block:cyclic|block */ - lllp_dist = true; - } else { - /* -m plane=<plane_size> */ - dist_str = strchr(arg,'='); - if(dist_str != NULL) { - *plane_size=atoi(dist_str+1); - len = dist_str-arg; - plane_dist = true; - } - } - - if (lllp_dist) { - if (strcasecmp(arg, "cyclic:cyclic") == 0) { - result = SLURM_DIST_CYCLIC_CYCLIC; - } else if (strcasecmp(arg, "cyclic:block") == 0) { - result = SLURM_DIST_CYCLIC_BLOCK; - } else if (strcasecmp(arg, "block:block") == 0) { - result = SLURM_DIST_BLOCK_BLOCK; - } else if (strcasecmp(arg, "block:cyclic") == 0) { - result = SLURM_DIST_BLOCK_CYCLIC; - } - } else if (plane_dist) { - if (strncasecmp(arg, "plane", len) == 0) { - result = SLURM_DIST_PLANE; - } - } else { - if (strncasecmp(arg, "cyclic", len) == 0) { - result = SLURM_DIST_CYCLIC; - } else if (strncasecmp(arg, "block", len) == 0) { - result = SLURM_DIST_BLOCK; - } else if ((strncasecmp(arg, "arbitrary", len) == 0) || - (strncasecmp(arg, "hostfile", len) == 0)) { - result = SLURM_DIST_ARBITRARY; - } - } - - return result; -} - -/* - * verify that a connection type in arg is of known form - * returns the connection_type or -1 if not recognized - */ -static int _verify_conn_type(const char *arg) -{ - int len = strlen(arg); - - if (!strncasecmp(arg, "MESH", len)) - return SELECT_MESH; - else if (!strncasecmp(arg, "TORUS", len)) - return SELECT_TORUS; - else if (!strncasecmp(arg, "NAV", len)) - return SELECT_NAV; - - error("invalid --conn-type argument %s ignored.", arg); - return -1; -} - -/* - * verify geometry arguments, must have proper count - * returns -1 on error, 0 otherwise - */ -static int _verify_geometry(const char *arg, uint16_t *geometry) -{ - char* token, *delimiter = ",x", *next_ptr; - int i, rc = 0; - char* geometry_tmp = xstrdup(arg); - char* original_ptr = geometry_tmp; - - token = strtok_r(geometry_tmp, delimiter, &next_ptr); - for (i=0; i<SYSTEM_DIMENSIONS; i++) { - if (token == NULL) { - error("insufficient dimensions in --geometry"); - rc = -1; - break; - } - geometry[i] = (uint16_t)atoi(token); - if (geometry[i] == 0 || geometry[i] == (uint16_t)NO_VAL) { - error("invalid --geometry argument"); - rc = -1; - break; - } - geometry_tmp = next_ptr; - token = strtok_r(geometry_tmp, delimiter, &next_ptr); - } - if (token != NULL) { - error("too many dimensions in --geometry"); - rc = -1; - } - - if (original_ptr) - xfree(original_ptr); - - return rc; -} - /* * _isvalue * returns 1 is the argument appears to be a value, 0 otherwise @@ -428,27 +288,64 @@ static int _isvalue(char *arg) { return 0; /* not a value */ } +/* + * First clear all of the bits in "*data" which are set in "clear_mask". + * Then set all of the bits in "*data" that are set in "set_mask". + */ +static void clear_then_set(int *data, int clear_mask, int set_mask) +{ + *data &= ~clear_mask; + *data |= set_mask; +} + +static void _print_cpu_bind_help() +{ + printf( +"CPU bind options:\n" +" --cpu_bind= Bind tasks to CPUs\n" +" q[uiet] quietly bind before task runs (default)\n" +" v[erbose] verbosely report binding before task runs\n" +" no[ne] don't bind tasks to CPUs (default)\n" +" rank bind by task rank\n" +" map_cpu:<list> specify a CPU ID binding for each task\n" +" where <list> is <cpuid1>,<cpuid2>,...<cpuidN>\n" +" mask_cpu:<list> specify a CPU ID binding mask for each task\n" +" where <list> is <mask1>,<mask2>,...<maskN>\n" +" sockets auto-generated masks bind to sockets\n" +" cores auto-generated masks bind to cores\n" +" threads auto-generated masks bind to threads\n" +" help show this help message\n"); +} + /* * verify cpu_bind arguments + * + * we support different launch policy names + * we also allow a verbose setting to be specified + * --cpu_bind=threads + * --cpu_bind=cores + * --cpu_bind=sockets + * --cpu_bind=v + * --cpu_bind=rank,v + * --cpu_bind=rank + * --cpu_bind={MAP_CPU|MASK_CPU}:0,1,2,3,4 + * + * * returns -1 on error, 0 otherwise */ static int _verify_cpu_bind(const char *arg, char **cpu_bind, - cpu_bind_type_t *cpu_bind_type) + cpu_bind_type_t *flags) { char *buf, *p, *tok; - if (!arg) { + int bind_bits = + CPU_BIND_NONE|CPU_BIND_RANK|CPU_BIND_MAP|CPU_BIND_MASK; + int bind_to_bits = + CPU_BIND_TO_SOCKETS|CPU_BIND_TO_CORES|CPU_BIND_TO_THREADS; + + if (arg == NULL) { return 0; } - /* we support different launch policy names - * we also allow a verbose setting to be specified - * --cpu_bind=threads - * --cpu_bind=cores - * --cpu_bind=sockets - * --cpu_bind=v - * --cpu_bind=rank,v - * --cpu_bind=rank - * --cpu_bind={MAP_CPU|MASK_CPU}:0,1,2,3,4 - */ + buf = xstrdup(arg); p = buf; /* change all ',' delimiters not followed by a digit to ';' */ @@ -462,50 +359,27 @@ static int _verify_cpu_bind(const char *arg, char **cpu_bind, p = buf; while ((tok = strsep(&p, ";"))) { if (strcasecmp(tok, "help") == 0) { - printf( -"CPU bind options:\n" -" --cpu_bind= Bind tasks to CPUs\n" -" q[uiet] quietly bind before task runs (default)\n" -" v[erbose] verbosely report binding before task runs\n" -" no[ne] don't bind tasks to CPUs (default)\n" -" rank bind by task rank\n" -" map_cpu:<list> specify a CPU ID binding for each task\n" -" where <list> is <cpuid1>,<cpuid2>,...<cpuidN>\n" -" mask_cpu:<list> specify a CPU ID binding mask for each task\n" -" where <list> is <mask1>,<mask2>,...<maskN>\n" -" sockets auto-generated masks bind to sockets\n" -" cores auto-generated masks bind to cores\n" -" threads auto-generated masks bind to threads\n" -" help show this help message\n"); + _print_cpu_bind_help(); return 1; } else if ((strcasecmp(tok, "q") == 0) || (strcasecmp(tok, "quiet") == 0)) { - *cpu_bind_type &= ~CPU_BIND_VERBOSE; + *flags &= ~CPU_BIND_VERBOSE; } else if ((strcasecmp(tok, "v") == 0) || (strcasecmp(tok, "verbose") == 0)) { - *cpu_bind_type |= CPU_BIND_VERBOSE; + *flags |= CPU_BIND_VERBOSE; } else if ((strcasecmp(tok, "no") == 0) || (strcasecmp(tok, "none") == 0)) { - *cpu_bind_type |= CPU_BIND_NONE; - *cpu_bind_type &= ~CPU_BIND_RANK; - *cpu_bind_type &= ~CPU_BIND_MAP; - *cpu_bind_type &= ~CPU_BIND_MASK; + clear_then_set((int *)flags, bind_bits, CPU_BIND_NONE); xfree(*cpu_bind); } else if (strcasecmp(tok, "rank") == 0) { - *cpu_bind_type &= ~CPU_BIND_NONE; - *cpu_bind_type |= CPU_BIND_RANK; - *cpu_bind_type &= ~CPU_BIND_MAP; - *cpu_bind_type &= ~CPU_BIND_MASK; + clear_then_set((int *)flags, bind_bits, CPU_BIND_RANK); xfree(*cpu_bind); } else if ((strncasecmp(tok, "map_cpu", 7) == 0) || (strncasecmp(tok, "mapcpu", 6) == 0)) { char *list; list = strsep(&tok, ":="); list = strsep(&tok, ":="); - *cpu_bind_type &= ~CPU_BIND_NONE; - *cpu_bind_type &= ~CPU_BIND_RANK; - *cpu_bind_type |= CPU_BIND_MAP; - *cpu_bind_type &= ~CPU_BIND_MASK; + clear_then_set((int *)flags, bind_bits, CPU_BIND_MAP); xfree(*cpu_bind); if (list && *list) { *cpu_bind = xstrdup(list); @@ -519,10 +393,7 @@ static int _verify_cpu_bind(const char *arg, char **cpu_bind, char *list; list = strsep(&tok, ":="); list = strsep(&tok, ":="); - *cpu_bind_type &= ~CPU_BIND_NONE; - *cpu_bind_type &= ~CPU_BIND_RANK; - *cpu_bind_type &= ~CPU_BIND_MAP; - *cpu_bind_type |= CPU_BIND_MASK; + clear_then_set((int *)flags, bind_bits, CPU_BIND_MASK); xfree(*cpu_bind); if (list && *list) { *cpu_bind = xstrdup(list); @@ -533,19 +404,16 @@ static int _verify_cpu_bind(const char *arg, char **cpu_bind, } } else if ((strcasecmp(tok, "socket") == 0) || (strcasecmp(tok, "sockets") == 0)) { - *cpu_bind_type |= CPU_BIND_TO_SOCKETS; - *cpu_bind_type &= ~CPU_BIND_TO_CORES; - *cpu_bind_type &= ~CPU_BIND_TO_THREADS; + clear_then_set((int *)flags, bind_to_bits, + CPU_BIND_TO_SOCKETS); } else if ((strcasecmp(tok, "core") == 0) || (strcasecmp(tok, "cores") == 0)) { - *cpu_bind_type &= ~CPU_BIND_TO_SOCKETS; - *cpu_bind_type |= CPU_BIND_TO_CORES; - *cpu_bind_type &= ~CPU_BIND_TO_THREADS; + clear_then_set((int *)flags, bind_to_bits, + CPU_BIND_TO_CORES); } else if ((strcasecmp(tok, "thread") == 0) || (strcasecmp(tok, "threads") == 0)) { - *cpu_bind_type &= ~CPU_BIND_TO_SOCKETS; - *cpu_bind_type &= ~CPU_BIND_TO_CORES; - *cpu_bind_type |= CPU_BIND_TO_THREADS; + clear_then_set((int *)flags, bind_to_bits, + CPU_BIND_TO_THREADS); } else { error("unrecognized --cpu_bind argument \"%s\"", tok); xfree(buf); @@ -557,24 +425,46 @@ static int _verify_cpu_bind(const char *arg, char **cpu_bind, return 0; } +static void _print_mem_bind_help() +{ + printf( +"Memory bind options:\n" +" --mem_bind= Bind memory to locality domains (ldom)\n" +" q[uiet] quietly bind before task runs (default)\n" +" v[erbose] verbosely report binding before task runs\n" +" no[ne] don't bind tasks to memory (default)\n" +" rank bind by task rank\n" +" local bind to memory local to processor\n" +" map_mem:<list> specify a memory binding for each task\n" +" where <list> is <cpuid1>,<cpuid2>,...<cpuidN>\n" +" mask_mem:<list> specify a memory binding mask for each tasks\n" +" where <list> is <mask1>,<mask2>,...<maskN>\n" +" help show this help message\n"); +} + /* * verify mem_bind arguments + * + * we support different memory binding names + * we also allow a verbose setting to be specified + * --mem_bind=v + * --mem_bind=rank,v + * --mem_bind=rank + * --mem_bind={MAP_MEM|MASK_MEM}:0,1,2,3,4 + * * returns -1 on error, 0 otherwise */ static int _verify_mem_bind(const char *arg, char **mem_bind, - mem_bind_type_t *mem_bind_type) + mem_bind_type_t *flags) { char *buf, *p, *tok; - if (!arg) { + int bind_bits = MEM_BIND_NONE|MEM_BIND_RANK|MEM_BIND_LOCAL| + MEM_BIND_MAP|MEM_BIND_MASK; + + if (arg == NULL) { return 0; } - /* we support different memory binding names - * we also allow a verbose setting to be specified - * --mem_bind=v - * --mem_bind=rank,v - * --mem_bind=rank - * --mem_bind={MAP_MEM|MASK_MEM}:0,1,2,3,4 - */ + buf = xstrdup(arg); p = buf; /* change all ',' delimiters not followed by a digit to ';' */ @@ -588,59 +478,31 @@ static int _verify_mem_bind(const char *arg, char **mem_bind, p = buf; while ((tok = strsep(&p, ";"))) { if (strcasecmp(tok, "help") == 0) { - printf( -"Memory bind options:\n" -" --mem_bind= Bind memory to locality domains (ldom)\n" -" q[uiet] quietly bind before task runs (default)\n" -" v[erbose] verbosely report binding before task runs\n" -" no[ne] don't bind tasks to memory (default)\n" -" rank bind by task rank\n" -" local bind to memory local to processor\n" -" map_mem:<list> specify a memory binding for each task\n" -" where <list> is <cpuid1>,<cpuid2>,...<cpuidN>\n" -" mask_mem:<list> specify a memory binding mask for each tasks\n" -" where <list> is <mask1>,<mask2>,...<maskN>\n" -" help show this help message\n"); + _print_mem_bind_help(); return 1; } else if ((strcasecmp(tok, "q") == 0) || (strcasecmp(tok, "quiet") == 0)) { - *mem_bind_type &= ~MEM_BIND_VERBOSE; + *flags &= ~MEM_BIND_VERBOSE; } else if ((strcasecmp(tok, "v") == 0) || (strcasecmp(tok, "verbose") == 0)) { - *mem_bind_type |= MEM_BIND_VERBOSE; + *flags |= MEM_BIND_VERBOSE; } else if ((strcasecmp(tok, "no") == 0) || (strcasecmp(tok, "none") == 0)) { - *mem_bind_type |= MEM_BIND_NONE; - *mem_bind_type &= ~MEM_BIND_RANK; - *mem_bind_type &= ~MEM_BIND_LOCAL; - *mem_bind_type &= ~MEM_BIND_MAP; - *mem_bind_type &= ~MEM_BIND_MASK; + clear_then_set((int *)flags, bind_bits, MEM_BIND_NONE); xfree(*mem_bind); } else if (strcasecmp(tok, "rank") == 0) { - *mem_bind_type &= ~MEM_BIND_NONE; - *mem_bind_type |= MEM_BIND_RANK; - *mem_bind_type &= ~MEM_BIND_LOCAL; - *mem_bind_type &= ~MEM_BIND_MAP; - *mem_bind_type &= ~MEM_BIND_MASK; + clear_then_set((int *)flags, bind_bits, MEM_BIND_RANK); xfree(*mem_bind); } else if (strcasecmp(tok, "local") == 0) { - *mem_bind_type &= ~MEM_BIND_NONE; - *mem_bind_type &= ~MEM_BIND_RANK; - *mem_bind_type |= MEM_BIND_LOCAL; - *mem_bind_type &= ~MEM_BIND_MAP; - *mem_bind_type &= ~MEM_BIND_MASK; + clear_then_set((int *)flags, bind_bits, MEM_BIND_LOCAL); xfree(*mem_bind); } else if ((strncasecmp(tok, "map_mem", 7) == 0) || (strncasecmp(tok, "mapmem", 6) == 0)) { char *list; list = strsep(&tok, ":="); list = strsep(&tok, ":="); - *mem_bind_type &= ~MEM_BIND_NONE; - *mem_bind_type &= ~MEM_BIND_RANK; - *mem_bind_type &= ~MEM_BIND_LOCAL; - *mem_bind_type |= MEM_BIND_MAP; - *mem_bind_type &= ~MEM_BIND_MASK; + clear_then_set((int *)flags, bind_bits, MEM_BIND_MAP); xfree(*mem_bind); if (list && *list) { *mem_bind = xstrdup(list); @@ -654,11 +516,7 @@ static int _verify_mem_bind(const char *arg, char **mem_bind, char *list; list = strsep(&tok, ":="); list = strsep(&tok, ":="); - *mem_bind_type &= ~MEM_BIND_NONE; - *mem_bind_type &= ~MEM_BIND_RANK; - *mem_bind_type &= ~MEM_BIND_LOCAL; - *mem_bind_type &= ~MEM_BIND_MAP; - *mem_bind_type |= MEM_BIND_MASK; + clear_then_set((int *)flags, bind_bits, MEM_BIND_MASK); xfree(*mem_bind); if (list && *list) { *mem_bind = xstrdup(list); @@ -678,203 +536,6 @@ static int _verify_mem_bind(const char *arg, char **mem_bind, return 0; } -/* - * verify that a resource counts in arg are of a known form X, X:X, X:X:X, or - * X:X:X:X, where X is defined as either (count, min-max, or '*') - * RET true if valid - */ -static bool -_verify_socket_core_thread_count(const char *arg, - int *min_sockets, int *max_sockets, - int *min_cores, int *max_cores, - int *min_threads, int *max_threads, - cpu_bind_type_t *cpu_bind_type) -{ - bool tmp_val,ret_val; - int i,j; - const char *cur_ptr = arg; - char buf[3][48]; /* each can hold INT64_MAX - INT64_MAX */ - buf[0][0] = '\0'; - buf[1][0] = '\0'; - buf[2][0] = '\0'; - - for (j=0;j<3;j++) { - for (i=0;i<47;i++) { - if (*cur_ptr == '\0' || *cur_ptr ==':') break; - buf[j][i] = *cur_ptr++; - } - if (*cur_ptr == '\0') break; - xassert(*cur_ptr == ':'); - buf[j][i] = '\0'; - cur_ptr++; - } - /* if cpu_bind_type doesn't already have a auto preference, choose - * the level based on the level of the -E specification - */ - if (!(*cpu_bind_type & (CPU_BIND_TO_SOCKETS | - CPU_BIND_TO_CORES | - CPU_BIND_TO_THREADS))) { - if (j == 0) { - *cpu_bind_type |= CPU_BIND_TO_SOCKETS; - } else if (j == 1) { - *cpu_bind_type |= CPU_BIND_TO_CORES; - } else if (j == 2) { - *cpu_bind_type |= CPU_BIND_TO_THREADS; - } - } - buf[j][i] = '\0'; - - ret_val = true; - tmp_val = _get_resource_range(&buf[0][0], "first arg of -B", - min_sockets, max_sockets, true); - ret_val = ret_val && tmp_val; - tmp_val = _get_resource_range(&buf[1][0], "second arg of -B", - min_cores, max_cores, true); - ret_val = ret_val && tmp_val; - tmp_val = _get_resource_range(&buf[2][0], "third arg of -B", - min_threads, max_threads, true); - ret_val = ret_val && tmp_val; - - return ret_val; -} - -/* - * verify that a hint is valid and convert it into the implied settings - * RET true if valid - */ -static bool -_verify_hint(const char *arg, - int *min_sockets, int *max_sockets, - int *min_cores, int *max_cores, - int *min_threads, int *max_threads, - cpu_bind_type_t *cpu_bind_type) -{ - char *buf, *p, *tok; - if (!arg) { - return true; - } - - buf = xstrdup(arg); - p = buf; - /* change all ',' delimiters not followed by a digit to ';' */ - /* simplifies parsing tokens while keeping map/mask together */ - while (p[0] != '\0') { - if ((p[0] == ',') && (!isdigit(p[1]))) - p[0] = ';'; - p++; - } - - p = buf; - while ((tok = strsep(&p, ";"))) { - if (strcasecmp(tok, "help") == 0) { - printf( -"Application hint options:\n" -" --hint= Bind tasks according to application hints\n" -" compute_bound use all cores in each physical CPU\n" -" memory_bound use only one core in each physical CPU\n" -" [no]multithread [don't] use extra threads with in-core multi-threading\n" -" help show this help message\n"); - return 1; - } else if (strcasecmp(tok, "compute_bound") == 0) { - *min_sockets = 1; - *max_sockets = INT_MAX; - *min_cores = 1; - *max_cores = INT_MAX; - *cpu_bind_type |= CPU_BIND_TO_CORES; - } else if (strcasecmp(tok, "memory_bound") == 0) { - *min_cores = 1; - *max_cores = 1; - *cpu_bind_type |= CPU_BIND_TO_CORES; - } else if (strcasecmp(tok, "multithread") == 0) { - *min_threads = 1; - *max_threads = INT_MAX; - *cpu_bind_type |= CPU_BIND_TO_THREADS; - } else if (strcasecmp(tok, "nomultithread") == 0) { - *min_threads = 1; - *max_threads = 1; - *cpu_bind_type |= CPU_BIND_TO_THREADS; - } else { - error("unrecognized --hint argument \"%s\", see --hint=help", tok); - xfree(buf); - return 1; - } - } - - xfree(buf); - return 0; -} - -/* return command name from its full path name */ -static char * _base_name(char* command) -{ - char *char_ptr, *name; - int i; - - if (command == NULL) - return NULL; - - char_ptr = strrchr(command, (int)'/'); - if (char_ptr == NULL) - char_ptr = command; - else - char_ptr++; - - i = strlen(char_ptr); - name = xmalloc(i+1); - strcpy(name, char_ptr); - return name; -} - -/* - * _to_bytes(): verify that arg is numeric with optional "G" or "M" at end - * if "G" or "M" is there, multiply by proper power of 2 and return - * number in bytes - */ -static long _to_bytes(const char *arg) -{ - char *buf; - char *endptr; - int end; - int multiplier = 1; - long result; - - buf = xstrdup(arg); - - end = strlen(buf) - 1; - - if (isdigit(buf[end])) { - result = strtol(buf, &endptr, 10); - - if (*endptr != '\0') - result = -result; - - } else { - - switch (toupper(buf[end])) { - - case 'G': - multiplier = 1024; - break; - - case 'M': - /* do nothing */ - break; - - default: - multiplier = -1; - } - - buf[end] = '\0'; - - result = multiplier * strtol(buf, &endptr, 10); - - if (*endptr != '\0') - result = -result; - } - - return result; -} - /* * print error message to stderr with opt.progname prepended */ @@ -943,6 +604,9 @@ static void _opt_default() opt.mem_bind = NULL; opt.time_limit = NO_VAL; opt.time_limit_str = NULL; + opt.ckpt_interval = 0; + opt.ckpt_interval_str = NULL; + opt.ckpt_path = NULL; opt.partition = NULL; opt.max_threads = MAX_THREADS; pmi_server_max_threads(opt.max_threads); @@ -953,7 +617,7 @@ static void _opt_default() opt.job_name_set = false; opt.jobid = NO_VAL; opt.jobid_set = false; - opt.dependency = NO_VAL; + opt.dependency = NULL; opt.account = NULL; opt.comment = NULL; @@ -969,17 +633,13 @@ static void _opt_default() opt.labelio = false; opt.unbuffered = false; opt.overcommit = false; - opt.batch = false; opt.shared = (uint16_t)NO_VAL; + opt.exclusive = false; opt.no_kill = false; opt.kill_bad_exit = false; opt.immediate = false; - opt.no_requeue = false; - opt.allocate = false; - opt.noshell = false; - opt.attach = NULL; opt.join = false; opt.max_wait = slurm_get_wait_time(); @@ -996,7 +656,7 @@ static void _opt_default() opt.job_min_cores = NO_VAL; opt.job_min_threads = NO_VAL; opt.job_min_memory = NO_VAL; - opt.job_max_memory = NO_VAL; + opt.task_mem = NO_VAL; opt.job_min_tmp_disk= NO_VAL; opt.hold = false; @@ -1030,8 +690,6 @@ static void _opt_default() opt.task_prolog = NULL; opt.task_epilog = NULL; - mode = MODE_NORMAL; - gethostname_short(hostname, sizeof(hostname)); opt.ctrl_comm_ifhn = xstrdup(hostname); @@ -1045,8 +703,9 @@ static void _opt_default() opt.msg_timeout = 15; } - opt.get_user_env_time = -1; - opt.get_user_env_mode = -1; + opt.pty = false; + opt.open_mode = 0; + opt.acctg_freq = -1; } /*---[ env var processing ]-----------------------------------------------*/ @@ -1076,7 +735,7 @@ env_vars_t env_vars[] = { {"SLURM_CORE_FORMAT", OPT_CORE, NULL, NULL }, {"SLURM_CPU_BIND", OPT_CPU_BIND, NULL, NULL }, {"SLURM_MEM_BIND", OPT_MEM_BIND, NULL, NULL }, -{"SLURM_DEPENDENCY", OPT_INT, &opt.dependency, NULL }, +{"SLURM_DEPENDENCY", OPT_STRING, &opt.dependency, NULL }, {"SLURM_DISTRIBUTION", OPT_DISTRIB, NULL, NULL }, {"SLURM_GEOMETRY", OPT_GEOMETRY, NULL, NULL }, {"SLURM_IMMEDIATE", OPT_INT, &opt.immediate, NULL }, @@ -1090,7 +749,6 @@ env_vars_t env_vars[] = { {"SLURM_NSOCKETS_PER_NODE",OPT_NSOCKETS,NULL, NULL }, {"SLURM_NCORES_PER_SOCKET",OPT_NCORES, NULL, NULL }, {"SLURM_NTHREADS_PER_CORE",OPT_NTHREADS,NULL, NULL }, -{"SLURM_NO_REQUEUE", OPT_INT, &opt.no_requeue, NULL }, {"SLURM_NO_ROTATE", OPT_NO_ROTATE, NULL, NULL }, {"SLURM_NPROCS", OPT_INT, &opt.nprocs, &opt.nprocs_set }, {"SLURM_OVERCOMMIT", OPT_OVERCOMMIT, NULL, NULL }, @@ -1102,6 +760,8 @@ env_vars_t env_vars[] = { {"SLURM_STDOUTMODE", OPT_STRING, &opt.ofname, NULL }, {"SLURM_THREADS", OPT_INT, &opt.max_threads, NULL }, {"SLURM_TIMELIMIT", OPT_STRING, &opt.time_limit_str,NULL }, +{"SLURM_CHECKPOINT", OPT_STRING, &opt.ckpt_interval_str, NULL }, +{"SLURM_CHECKPOINT_PATH",OPT_STRING, &opt.ckpt_path, NULL }, {"SLURM_WAIT", OPT_INT, &opt.max_wait, NULL }, {"SLURM_DISABLE_STATUS",OPT_INT, &opt.disable_status,NULL }, {"SLURM_MPI_TYPE", OPT_MPI, NULL, NULL }, @@ -1115,6 +775,9 @@ env_vars_t env_vars[] = { {"SLURM_TASK_EPILOG", OPT_STRING, &opt.task_epilog, NULL }, {"SLURM_WORKING_DIR", OPT_STRING, &opt.cwd, &opt.cwd_set }, {"SLURM_EXCLUSIVE", OPT_EXCLUSIVE, NULL, NULL }, +{"SLURM_OPEN_MODE", OPT_OPEN_MODE, NULL, NULL }, +{"SLURM_ACCTG_FREQ", OPT_INT, &opt.acctg_freq, NULL }, +{"SLURM_TASK_MEM", OPT_INT, &opt.task_mem, NULL }, {NULL, 0, NULL, NULL} }; @@ -1164,7 +827,7 @@ _process_env_var(env_vars_t *e, const char *val) case OPT_DISTRIB: if (strcmp(val, "unknown") == 0) break; /* ignore it, passed from salloc */ - dt = _verify_dist_type(val, &opt.plane_size); + dt = verify_dist_type(val, &opt.plane_size); if (dt == SLURM_DIST_UNKNOWN) { error("\"%s=%s\" -- invalid distribution type. " "ignoring...", e->var, val); @@ -1185,9 +848,9 @@ _process_env_var(env_vars_t *e, const char *val) break; case OPT_NODES: - opt.nodes_set = _get_resource_range( val ,"OPT_NODES", - &opt.min_nodes, - &opt.max_nodes, false); + opt.nodes_set = get_resource_arg_range( val ,"OPT_NODES", + &opt.min_nodes, + &opt.max_nodes, false); if (opt.nodes_set == false) { error("\"%s=%s\" -- invalid node count. ignoring...", e->var, val); @@ -1199,15 +862,25 @@ _process_env_var(env_vars_t *e, const char *val) break; case OPT_EXCLUSIVE: + opt.exclusive = true; opt.shared = 0; break; + case OPT_OPEN_MODE: + if ((val[0] == 'a') || (val[0] == 'A')) + opt.open_mode = OPEN_MODE_APPEND; + else if ((val[0] == 't') || (val[0] == 'T')) + opt.open_mode = OPEN_MODE_TRUNCATE; + else + error("Invalid SLURM_OPEN_MODE: %s. Ignored", val); + break; + case OPT_CORE: opt.core_type = core_format_type (val); break; case OPT_CONN_TYPE: - opt.conn_type = _verify_conn_type(val); + opt.conn_type = verify_conn_type(val); break; case OPT_NO_ROTATE: @@ -1215,7 +888,7 @@ _process_env_var(env_vars_t *e, const char *val) break; case OPT_GEOMETRY: - if (_verify_geometry(val, opt.geometry)) { + if (verify_geometry(val, opt.geometry)) { error("\"%s=%s\" -- invalid geometry, ignoring...", e->var, val); } @@ -1260,76 +933,12 @@ _get_int(const char *arg, const char *what, bool positive) return (int) result; } -/* - * get either 1 or 2 integers for a resource count in the form of either - * (count, min-max, or '*') - * A partial error message is passed in via the 'what' param. - * RET true if valid - */ -static bool -_get_resource_range(const char *arg, const char *what, int* min, int *max, - bool isFatal) -{ - char *p; - long int result; - - if (*arg == '\0') return true; - - /* wildcard meaning every possible value in range */ - if (*arg == '*' ) { - *min = 1; - *max = INT_MAX; - return true; - } - - result = strtol(arg, &p, 10); - if (*p == 'k' || *p == 'K') { - result *= 1024; - p++; - } - - if (((*p != '\0')&&(*p != '-')) || (result <= 0L)) { - error ("Invalid numeric value \"%s\" for %s.", arg, what); - if (isFatal) exit(1); - return false; - } else if (result > INT_MAX) { - error ("Numeric argument (%ld) to big for %s.", result, what); - if (isFatal) exit(1); - return false; - } - - *min = (int) result; - - if (*p == '\0') return true; - if (*p == '-') p++; - - result = strtol(p, &p, 10); - if (*p == 'k' || *p == 'K') { - result *= 1024; - p++; - } - - if (((*p != '\0')&&(*p != '-')) || (result <= 0L)) { - error ("Invalid numeric value \"%s\" for %s.", arg, what); - if (isFatal) exit(1); - return false; - } else if (result > INT_MAX) { - error ("Numeric argument (%ld) to big for %s.", result, what); - if (isFatal) exit(1); - return false; - } - - *max = (int) result; - - return true; -} - -void set_options(const int argc, char **argv, int first) +static void set_options(const int argc, char **argv) { int opt_char, option_index = 0; struct utsname name; static struct option long_options[] = { - {"attach", required_argument, 0, 'a'}, + {"attach", no_argument, 0, 'a'}, {"allocate", no_argument, 0, 'A'}, {"batch", no_argument, 0, 'b'}, {"extra-node-info", required_argument, 0, 'B'}, @@ -1347,6 +956,7 @@ void set_options(const int argc, char **argv, int first) {"no-kill", no_argument, 0, 'k'}, {"kill-on-bad-exit", no_argument, 0, 'K'}, {"label", no_argument, 0, 'l'}, + {"licenses", required_argument, 0, 'L'}, {"distribution", required_argument, 0, 'm'}, {"ntasks", required_argument, 0, 'n'}, {"nodes", required_argument, 0, 'N'}, @@ -1380,10 +990,10 @@ void set_options(const int argc, char **argv, int first) {"mincores", required_argument, 0, LONG_OPT_MINCORES}, {"minthreads", required_argument, 0, LONG_OPT_MINTHREADS}, {"mem", required_argument, 0, LONG_OPT_MEM}, - {"job-mem", required_argument, 0, LONG_OPT_JOBMEM}, + {"job-mem", required_argument, 0, LONG_OPT_TASK_MEM}, + {"task-mem", required_argument, 0, LONG_OPT_TASK_MEM}, {"hint", required_argument, 0, LONG_OPT_HINT}, {"mpi", required_argument, 0, LONG_OPT_MPI}, - {"no-shell", no_argument, 0, LONG_OPT_NOSHELL}, {"tmp", required_argument, 0, LONG_OPT_TMP}, {"jobid", required_argument, 0, LONG_OPT_JOBID}, {"msg-timeout", required_argument, 0, LONG_OPT_TIMEO}, @@ -1408,8 +1018,6 @@ void set_options(const int argc, char **argv, int first) {"nice", optional_argument, 0, LONG_OPT_NICE}, {"ctrl-comm-ifhn", required_argument, 0, LONG_OPT_CTRL_COMM_IFHN}, {"multi-prog", no_argument, 0, LONG_OPT_MULTI}, - {"no-requeue", no_argument, 0, LONG_OPT_NO_REQUEUE}, - {"requeue", no_argument, 0, LONG_OPT_REQUEUE}, {"comment", required_argument, 0, LONG_OPT_COMMENT}, {"sockets-per-node", required_argument, 0, LONG_OPT_SOCKETSPERNODE}, {"cores-per-socket", required_argument, 0, LONG_OPT_CORESPERSOCKET}, @@ -1417,15 +1025,21 @@ void set_options(const int argc, char **argv, int first) {"ntasks-per-node", required_argument, 0, LONG_OPT_NTASKSPERNODE}, {"ntasks-per-socket",required_argument, 0, LONG_OPT_NTASKSPERSOCKET}, {"ntasks-per-core", required_argument, 0, LONG_OPT_NTASKSPERCORE}, + {"tasks-per-node", required_argument, 0, LONG_OPT_NTASKSPERNODE}, {"blrts-image", required_argument, 0, LONG_OPT_BLRTS_IMAGE}, {"linux-image", required_argument, 0, LONG_OPT_LINUX_IMAGE}, {"mloader-image", required_argument, 0, LONG_OPT_MLOADER_IMAGE}, {"ramdisk-image", required_argument, 0, LONG_OPT_RAMDISK_IMAGE}, {"reboot", no_argument, 0, LONG_OPT_REBOOT}, {"get-user-env", optional_argument, 0, LONG_OPT_GET_USER_ENV}, + {"pty", no_argument, 0, LONG_OPT_PTY}, + {"checkpoint", required_argument, 0, LONG_OPT_CHECKPOINT}, + {"checkpoint-path", required_argument, 0, LONG_OPT_CHECKPOINT_PATH}, + {"open-mode", required_argument, 0, LONG_OPT_OPEN_MODE}, + {"acctg-freq", required_argument, 0, LONG_OPT_ACCTG_FREQ}, {NULL, 0, 0, 0} }; - char *opt_string = "+a:AbB:c:C:d:D:e:g:Hi:IjJ:kKlm:n:N:" + char *opt_string = "+aAbB:c:C:d:D:e:g:Hi:IjJ:kKlL:m:n:N:" "o:Op:P:qQr:R:st:T:uU:vVw:W:x:XZ"; struct option *optz = spank_option_table_create (long_options); @@ -1437,69 +1051,32 @@ void set_options(const int argc, char **argv, int first) if(opt.progname == NULL) opt.progname = xbasename(argv[0]); - else if(!first) - argv[0] = opt.progname; else - error("opt.progname is set but it is the first time through."); + error("opt.progname is already set."); optind = 0; while((opt_char = getopt_long(argc, argv, opt_string, optz, &option_index)) != -1) { switch (opt_char) { case (int)'?': - if(first) { - fprintf(stderr, "Try \"srun --help\" for more " - "information\n"); - exit(1); - } + fprintf(stderr, + "Try \"srun --help\" for more information\n"); + exit(1); break; case (int)'a': - if(first) { - if (opt.allocate || opt.batch) { - error("can only specify one mode: " - "allocate, attach or batch."); - exit(1); - } - mode = MODE_ATTACH; - opt.attach = strdup(optarg); - } else { - error("Option '%c' can only be set " - "from srun commandline.", opt_char); - } - break; + error("Please use the \"sattach\" command instead of " + "\"srun -a/--attach\"."); + exit(1); case (int)'A': - if(first) { - if (opt.attach || opt.batch) { - error("can only specify one mode: " - "allocate, attach or batch."); - exit(1); - } - mode = MODE_ALLOCATE; - opt.allocate = true; - } else { - error("Option '%c' can only be set " - "from srun commandline.", opt_char); - } - break; + error("Please use the \"salloc\" command instead of " + "\"srun -A/--allocate\"."); + exit(1); case (int)'b': - if(first) { - if (opt.allocate || opt.attach) { - error("can only specify one mode: " - "allocate, attach or batch."); - exit(1); - } - mode = MODE_BATCH; - opt.batch = true; - } else { - error("Option '%c' can only be set " - "from srun commandline.", opt_char); - } - break; + error("Please use the \"sbatch\" command instead of " + "\"srun -b/--batch\"."); + exit(1); case (int)'B': - if(!first && opt.extra_set) - break; - - opt.extra_set = _verify_socket_core_thread_count( + opt.extra_set = verify_socket_core_thread_count( optarg, &opt.min_sockets_per_node, &opt.max_sockets_per_node, @@ -1517,37 +1094,26 @@ void set_options(const int argc, char **argv, int first) } break; case (int)'c': - if(!first && opt.cpus_set) - break; opt.cpus_set = true; opt.cpus_per_task = _get_int(optarg, "cpus-per-task", true); break; case (int)'C': - if(!first && opt.constraints) - break; xfree(opt.constraints); opt.constraints = xstrdup(optarg); break; case (int)'d': - if(!first && opt.slurmd_debug) - break; - opt.slurmd_debug = _get_int(optarg, "slurmd-debug", false); break; case (int)'D': - if(!first && opt.cwd_set) - break; - opt.cwd_set = true; xfree(opt.cwd); opt.cwd = xstrdup(optarg); break; case (int)'e': - if(!first && opt.efname) - break; - + if (opt.pty) + fatal("--error incompatable with --pty option"); xfree(opt.efname); if (strncasecmp(optarg, "none", (size_t) 4) == 0) opt.efname = xstrdup("/dev/null"); @@ -1555,18 +1121,15 @@ void set_options(const int argc, char **argv, int first) opt.efname = xstrdup(optarg); break; case (int)'g': - if(!first && opt.geometry) - break; - if (_verify_geometry(optarg, opt.geometry)) + if (verify_geometry(optarg, opt.geometry)) exit(1); break; case (int)'H': opt.hold = true; break; case (int)'i': - if(!first && opt.ifname) - break; - + if (opt.pty) + fatal("--input incompatable with --pty option"); xfree(opt.ifname); if (strncasecmp(optarg, "none", (size_t) 4) == 0) opt.ifname = xstrdup("/dev/null"); @@ -1580,9 +1143,6 @@ void set_options(const int argc, char **argv, int first) opt.join = true; break; case (int)'J': - if(!first && opt.job_name_set) - break; - opt.job_name_set = true; xfree(opt.job_name); opt.job_name = xstrdup(optarg); @@ -1596,10 +1156,12 @@ void set_options(const int argc, char **argv, int first) case (int)'l': opt.labelio = true; break; + case 'L': + xfree(opt.licenses); + opt.licenses = xstrdup(optarg); + break; case (int)'m': - if(!first && opt.distribution) - break; - opt.distribution = _verify_dist_type(optarg, + opt.distribution = verify_dist_type(optarg, &opt.plane_size); if (opt.distribution == SLURM_DIST_UNKNOWN) { error("distribution type `%s' " @@ -1608,22 +1170,16 @@ void set_options(const int argc, char **argv, int first) } break; case (int)'n': - if(!first && opt.nprocs_set) - break; - opt.nprocs_set = true; opt.nprocs = _get_int(optarg, "number of tasks", true); break; case (int)'N': - if(!first && opt.nodes_set) - break; - opt.nodes_set = - _get_resource_range(optarg, - "requested node count", - &opt.min_nodes, - &opt.max_nodes, true); + get_resource_arg_range( optarg, + "requested node count", + &opt.min_nodes, + &opt.max_nodes, true ); if (opt.nodes_set == false) { error("invalid resource allocation -N `%s'", @@ -1632,9 +1188,8 @@ void set_options(const int argc, char **argv, int first) } break; case (int)'o': - if(!first && opt.ofname) - break; - + if (opt.pty) + fatal("--output incompatable with --pty option"); xfree(opt.ofname); if (strncasecmp(optarg, "none", (size_t) 4) == 0) opt.ofname = xstrdup("/dev/null"); @@ -1645,32 +1200,20 @@ void set_options(const int argc, char **argv, int first) opt.overcommit = true; break; case (int)'p': - if(!first && opt.partition) - break; - xfree(opt.partition); opt.partition = xstrdup(optarg); break; case (int)'P': - if(!first && opt.dependency) - break; - - opt.dependency = _get_int(optarg, "dependency", true); + xfree(opt.dependency); + opt.dependency = xstrdup(optarg); break; case (int)'q': opt.quit_on_intr = true; break; case (int) 'Q': - if(!first && opt.quiet) - break; - opt.quiet++; break; case (int)'r': - if(!first && opt.relative) - break; - - //xfree(opt.relative); opt.relative = _get_int(optarg, "relative", false); opt.relative_set = true; break; @@ -1681,16 +1224,10 @@ void set_options(const int argc, char **argv, int first) opt.shared = 1; break; case (int)'t': - if(!first && opt.time_limit_str) - break; - xfree(opt.time_limit_str); opt.time_limit_str = xstrdup(optarg); break; case (int)'T': - if(!first && opt.max_threads) - break; - opt.max_threads = _get_int(optarg, "max_threads", true); pmi_server_max_threads(opt.max_threads); @@ -1699,36 +1236,19 @@ void set_options(const int argc, char **argv, int first) opt.unbuffered = true; break; case (int)'U': - if(!first && opt.account) - break; xfree(opt.account); opt.account = xstrdup(optarg); break; case (int)'v': - if(!first && _verbose) - break; - _verbose++; break; case (int)'V': - _print_version(); + print_slurm_version(); exit(0); break; case (int)'w': - if(!first && opt.nodelist) - break; - xfree(opt.nodelist); opt.nodelist = xstrdup(optarg); -#ifdef HAVE_BG - info("\tThe nodelist option should only be used if\n" - "\tthe block you are asking for can be created.\n" - "\tIt should also include all the midplanes you\n" - "\twant to use, partial lists may not\n" - "\twork correctly.\n" - "\tPlease consult smap before using this option\n" - "\tor your job may be stuck with no way to run."); -#endif break; case (int)'W': opt.max_wait = _get_int(optarg, "wait", false); @@ -1752,6 +1272,7 @@ void set_options(const int argc, char **argv, int first) opt.contiguous = true; break; case LONG_OPT_EXCLUSIVE: + opt.exclusive = true; opt.shared = 0; break; case LONG_OPT_CPU_BIND: @@ -1785,16 +1306,16 @@ void set_options(const int argc, char **argv, int first) true); break; case LONG_OPT_MEM: - opt.job_min_memory = (int) _to_bytes(optarg); + opt.job_min_memory = (int) str_to_bytes(optarg); if (opt.job_min_memory < 0) { error("invalid memory constraint %s", optarg); exit(1); } break; - case LONG_OPT_JOBMEM: - opt.job_max_memory = (int) _to_bytes(optarg); - if (opt.job_max_memory < 0) { + case LONG_OPT_TASK_MEM: + opt.task_mem = (int) str_to_bytes(optarg); + if (opt.task_mem < 0) { error("invalid memory constraint %s", optarg); exit(1); @@ -1808,11 +1329,8 @@ void set_options(const int argc, char **argv, int first) optarg); } break; - case LONG_OPT_NOSHELL: - opt.noshell = true; - break; case LONG_OPT_TMP: - opt.job_min_tmp_disk = _to_bytes(optarg); + opt.job_min_tmp_disk = str_to_bytes(optarg); if (opt.job_min_tmp_disk < 0) { error("invalid tmp value %s", optarg); exit(1); @@ -1862,7 +1380,7 @@ void set_options(const int argc, char **argv, int first) _usage(); exit(0); case LONG_OPT_CONNTYPE: - opt.conn_type = _verify_conn_type(optarg); + opt.conn_type = verify_conn_type(optarg); break; case LONG_OPT_TEST_ONLY: opt.test_only = true; @@ -1897,7 +1415,7 @@ void set_options(const int argc, char **argv, int first) } break; case LONG_OPT_MAIL_TYPE: - opt.mail_type |= _parse_mail_type(optarg); + opt.mail_type |= parse_mail_type(optarg); if (opt.mail_type == 0) fatal("--mail-type=%s invalid", optarg); break; @@ -1940,35 +1458,30 @@ void set_options(const int argc, char **argv, int first) case LONG_OPT_MULTI: opt.multi_prog = true; break; - case LONG_OPT_NO_REQUEUE: - opt.no_requeue = true; - break; - case LONG_OPT_REQUEUE: - opt.no_requeue = false; /* the default */ - break; case LONG_OPT_COMMENT: - if(!first && opt.comment) - break; xfree(opt.comment); opt.comment = xstrdup(optarg); break; case LONG_OPT_SOCKETSPERNODE: - _get_resource_range( optarg, "sockets-per-node", - &opt.min_sockets_per_node, - &opt.max_sockets_per_node, true ); + get_resource_arg_range( optarg, "sockets-per-node", + &opt.min_sockets_per_node, + &opt.max_sockets_per_node, + true ); break; case LONG_OPT_CORESPERSOCKET: - _get_resource_range( optarg, "cores-per-socket", - &opt.min_cores_per_socket, - &opt.max_cores_per_socket, true); + get_resource_arg_range( optarg, "cores-per-socket", + &opt.min_cores_per_socket, + &opt.max_cores_per_socket, + true); break; case LONG_OPT_THREADSPERCORE: - _get_resource_range( optarg, "threads-per-core", - &opt.min_threads_per_core, - &opt.max_threads_per_core, true ); + get_resource_arg_range( optarg, "threads-per-core", + &opt.min_threads_per_core, + &opt.max_threads_per_core, + true ); break; case LONG_OPT_HINT: - if (_verify_hint(optarg, + if (verify_hint(optarg, &opt.min_sockets_per_node, &opt.max_sockets_per_node, &opt.min_cores_per_socket, @@ -1992,26 +1505,18 @@ void set_options(const int argc, char **argv, int first) true); break; case LONG_OPT_BLRTS_IMAGE: - if(!first && opt.blrtsimage) - break; xfree(opt.blrtsimage); opt.blrtsimage = xstrdup(optarg); break; case LONG_OPT_LINUX_IMAGE: - if(!first && opt.linuximage) - break; xfree(opt.linuximage); opt.linuximage = xstrdup(optarg); break; case LONG_OPT_MLOADER_IMAGE: - if(!first && opt.mloaderimage) - break; xfree(opt.mloaderimage); opt.mloaderimage = xstrdup(optarg); break; case LONG_OPT_RAMDISK_IMAGE: - if(!first && opt.ramdiskimage) - break; xfree(opt.ramdiskimage); opt.ramdiskimage = xstrdup(optarg); break; @@ -2019,10 +1524,43 @@ void set_options(const int argc, char **argv, int first) opt.reboot = true; break; case LONG_OPT_GET_USER_ENV: - if (optarg) - _proc_get_user_env(optarg); - else - opt.get_user_env_time = 0; + error("--get-user-env is no longer supported in srun, use sbatch"); + break; + case LONG_OPT_PTY: +#ifdef HAVE_PTY_H + opt.pty = true; + opt.unbuffered = true; /* implicit */ + if (opt.ifname) + fatal("--input incompatable with --pty option"); + if (opt.ofname) + fatal("--output incompatable with --pty option"); + if (opt.efname) + fatal("--error incompatable with --pty option"); +#else + error("--pty not currently supported on this system type"); +#endif + break; + case LONG_OPT_CHECKPOINT: + xfree(opt.ckpt_interval_str); + opt.ckpt_interval_str = xstrdup(optarg); + break; + case LONG_OPT_OPEN_MODE: + if ((optarg[0] == 'a') || (optarg[0] == 'A')) + opt.open_mode = OPEN_MODE_APPEND; + else if ((optarg[0] == 't') || (optarg[0] == 'T')) + opt.open_mode = OPEN_MODE_TRUNCATE; + else { + error("Invalid --open-mode argument: %s. Ignored", + optarg); + } + break; + case LONG_OPT_ACCTG_FREQ: + opt.acctg_freq = _get_int(optarg, "acctg-freq", + false); + break; + case LONG_OPT_CHECKPOINT_PATH: + xfree(opt.ckpt_path); + opt.ckpt_path = xstrdup(optarg); break; default: if (spank_process_option (opt_char, optarg) < 0) { @@ -2031,35 +1569,9 @@ void set_options(const int argc, char **argv, int first) } } - if (!first) { - if (!_opt_verify()) - exit(1); - if (_verbose > 3) - _opt_list(); - } - spank_option_table_destroy (optz); } -static void _proc_get_user_env(char *optarg) -{ - char *end_ptr; - - if ((optarg[0] >= '0') && (optarg[0] <= '9')) - opt.get_user_env_time = strtol(optarg, &end_ptr, 10); - else { - opt.get_user_env_time = 0; - end_ptr = optarg; - } - - if ((end_ptr == NULL) || (end_ptr[0] == '\0')) - return; - if ((end_ptr[0] == 's') || (end_ptr[0] == 'S')) - opt.get_user_env_mode = 1; - else if ((end_ptr[0] == 'l') || (end_ptr[0] == 'L')) - opt.get_user_env_mode = 2; -} - /* Load the multi_prog config file into argv, pass the entire file contents * in order to avoid having to read the file on every node. We could parse * the infomration here too for loading the MPIR records for TotalView */ @@ -2109,16 +1621,17 @@ static void _opt_args(int argc, char **argv) int i; char **rest = NULL; - set_options(argc, argv, 1); + set_options(argc, argv); /* When CR with memory as a CR is enabled we need to assign - adequate value or check the value to opt.mem */ - if ((opt.job_min_memory >= -1) && (opt.job_max_memory > 0)) { + * adequate value or check the value to opt.mem */ + if ((opt.job_min_memory >= -1) && (opt.task_mem > 0)) { if (opt.job_min_memory == -1) { - opt.job_min_memory = opt.job_max_memory; - } else if (opt.job_min_memory < opt.job_max_memory) { - info("mem < job-mem - resizing mem to be equal to job-mem"); - opt.job_min_memory = opt.job_max_memory; + opt.job_min_memory = opt.task_mem; + } else if (opt.job_min_memory < opt.task_mem) { + info("mem < task-mem - resizing mem to be equal " + "to task-mem"); + opt.job_min_memory = opt.task_mem; } } @@ -2154,39 +1667,45 @@ static void _opt_args(int argc, char **argv) } #endif - remote_argc = 0; + if (opt.nodelist && (!opt.test_only)) { +#ifdef HAVE_BG + info("\tThe nodelist option should only be used if\n" + "\tthe block you are asking for can be created.\n" + "\tIt should also include all the midplanes you\n" + "\twant to use, partial lists will not work correctly.\n" + "\tPlease consult smap before using this option\n" + "\tor your job may be stuck with no way to run."); +#endif + } + + opt.argc = 0; if (optind < argc) { rest = argv + optind; - while (rest[remote_argc] != NULL) - remote_argc++; + while (rest[opt.argc] != NULL) + opt.argc++; } - remote_argv = (char **) xmalloc((remote_argc + 1) * sizeof(char *)); - for (i = 0; i < remote_argc; i++) - remote_argv[i] = xstrdup(rest[i]); - remote_argv[i] = NULL; /* End of argv's (for possible execv) */ + opt.argv = (char **) xmalloc((opt.argc + 1) * sizeof(char *)); + for (i = 0; i < opt.argc; i++) + opt.argv[i] = xstrdup(rest[i]); + opt.argv[i] = NULL; /* End of argv's (for possible execv) */ if (opt.multi_prog) { - if (remote_argc < 1) { + if (opt.argc < 1) { error("configuration file not specified"); exit(1); } - _load_multi(&remote_argc, remote_argv); + _load_multi(&opt.argc, opt.argv); } - else if (remote_argc > 0) { + else if (opt.argc > 0) { char *fullpath; - char *cmd = remote_argv[0]; - bool search_cwd = (opt.batch || opt.allocate); - int mode = (search_cwd) ? R_OK : X_OK; - if ((fullpath = _search_path(cmd, search_cwd, mode))) { - xfree(remote_argv[0]); - remote_argv[0] = fullpath; + if ((fullpath = search_path(opt.cwd, opt.argv[0], false, X_OK))) { + xfree(opt.argv[0]); + opt.argv[0] = fullpath; } } - if (!_opt_verify()) - exit(1); - if (opt.multi_prog && verify_multi_name(remote_argv[0], opt.nprocs)) + if (opt.multi_prog && verify_multi_name(opt.argv[0], opt.nprocs)) exit(1); } @@ -2239,8 +1758,8 @@ static bool _opt_verify(void) if (opt.job_min_cpus < opt.cpus_per_task) opt.job_min_cpus = opt.cpus_per_task; - if ((opt.job_name == NULL) && (remote_argc > 0)) - opt.job_name = _base_name(remote_argv[0]); + if ((opt.job_name == NULL) && (opt.argc > 0)) + opt.job_name = base_name(opt.argv[0]); if(!opt.nodelist) { if((opt.nodelist = xstrdup(getenv("SLURM_HOSTFILE")))) { @@ -2263,9 +1782,10 @@ static bool _opt_verify(void) opt.nodelist); } } - } else + } else { if (!_valid_node_list(&opt.nodelist)) exit(1); + } /* now if max is set make sure we have <= max_nodes in the * nodelist but only if it isn't arbitrary since the user has @@ -2295,72 +1815,89 @@ static bool _opt_verify(void) } - if (mode == MODE_ATTACH) { /* attach to a running job */ - if (opt.nodes_set || opt.cpus_set || opt.nprocs_set) { - error("do not specific a node allocation " - "with --attach (-a)"); - verified = false; - } - - /* if (constraints_given()) { - * error("do not specify any constraints with " - * "--attach (-a)"); - * verified = false; - *} - */ + if ((opt.argc == 0) && (opt.test_only == false)) { + error("must supply remote command"); + verified = false; + } - } else { /* mode != MODE_ATTACH */ + /* check for realistic arguments */ + if (opt.nprocs <= 0) { + error("%s: invalid number of processes (-n %d)", + opt.progname, opt.nprocs); + verified = false; + } - if ((remote_argc == 0) && (mode != MODE_ALLOCATE)) { - error("must supply remote command"); - verified = false; - } + if (opt.cpus_per_task <= 0) { + error("%s: invalid number of cpus per task (-c %d)\n", + opt.progname, opt.cpus_per_task); + verified = false; + } + if ((opt.min_nodes <= 0) || (opt.max_nodes < 0) || + (opt.max_nodes && (opt.min_nodes > opt.max_nodes))) { + error("%s: invalid number of nodes (-N %d-%d)\n", + opt.progname, opt.min_nodes, opt.max_nodes); + verified = false; + } - /* check for realistic arguments */ - if (opt.nprocs <= 0) { - error("%s: invalid number of processes (-n %d)", - opt.progname, opt.nprocs); - verified = false; + /* bound max_threads/cores from ntasks_cores/sockets */ + if ((opt.max_threads_per_core <= 0) && + (opt.ntasks_per_core > 0)) { + opt.max_threads_per_core = opt.ntasks_per_core; + /* if cpu_bind_type doesn't already have a auto pref, + * choose the level based on the level of ntasks + */ + if (!(opt.cpu_bind_type & (CPU_BIND_TO_SOCKETS | + CPU_BIND_TO_CORES | + CPU_BIND_TO_THREADS))) { + opt.cpu_bind_type |= CPU_BIND_TO_CORES; } - - if (opt.cpus_per_task <= 0) { - error("%s: invalid number of cpus per task (-c %d)\n", - opt.progname, opt.cpus_per_task); - verified = false; + } + if ((opt.max_cores_per_socket <= 0) && + (opt.ntasks_per_socket > 0)) { + opt.max_cores_per_socket = opt.ntasks_per_socket; + /* if cpu_bind_type doesn't already have a auto pref, + * choose the level based on the level of ntasks + */ + if (!(opt.cpu_bind_type & (CPU_BIND_TO_SOCKETS | + CPU_BIND_TO_CORES | + CPU_BIND_TO_THREADS))) { + opt.cpu_bind_type |= CPU_BIND_TO_SOCKETS; } + } - if ((opt.min_nodes <= 0) || (opt.max_nodes < 0) || - (opt.max_nodes && (opt.min_nodes > opt.max_nodes))) { - error("%s: invalid number of nodes (-N %d-%d)\n", - opt.progname, opt.min_nodes, opt.max_nodes); - verified = false; + core_format_enable (opt.core_type); + + /* massage the numbers */ + if (opt.nodelist) { + hl = hostlist_create(opt.nodelist); + if (!hl) + fatal("memory allocation failure"); + hostlist_uniq(hl); + hl_cnt = hostlist_count(hl); + if (opt.nodes_set) + opt.min_nodes = MAX(hl_cnt, opt.min_nodes); + else { + opt.min_nodes = hl_cnt; + opt.nodes_set = true; } + } + if ((opt.nodes_set || opt.extra_set) && !opt.nprocs_set) { + /* 1 proc / node default */ + opt.nprocs = opt.min_nodes; - /* bound max_threads/cores from ntasks_cores/sockets */ - if ((opt.max_threads_per_core <= 0) && - (opt.ntasks_per_core > 0)) { - opt.max_threads_per_core = opt.ntasks_per_core; - /* if cpu_bind_type doesn't already have a auto pref, - * choose the level based on the level of ntasks - */ - if (!(opt.cpu_bind_type & (CPU_BIND_TO_SOCKETS | - CPU_BIND_TO_CORES | - CPU_BIND_TO_THREADS))) { - opt.cpu_bind_type |= CPU_BIND_TO_CORES; - } + /* 1 proc / min_[socket * core * thread] default */ + if (opt.min_sockets_per_node > 0) { + opt.nprocs *= opt.min_sockets_per_node; + opt.nprocs_set = true; } - if ((opt.max_cores_per_socket <= 0) && - (opt.ntasks_per_socket > 0)) { - opt.max_cores_per_socket = opt.ntasks_per_socket; - /* if cpu_bind_type doesn't already have a auto pref, - * choose the level based on the level of ntasks - */ - if (!(opt.cpu_bind_type & (CPU_BIND_TO_SOCKETS | - CPU_BIND_TO_CORES | - CPU_BIND_TO_THREADS))) { - opt.cpu_bind_type |= CPU_BIND_TO_SOCKETS; - } + if (opt.min_cores_per_socket > 0) { + opt.nprocs *= opt.min_cores_per_socket; + opt.nprocs_set = true; + } + if (opt.min_threads_per_core > 0) { + opt.nprocs *= opt.min_threads_per_core; + opt.nprocs_set = true; } core_format_enable (opt.core_type); @@ -2386,61 +1923,44 @@ static bool _opt_verify(void) used later */ } + } else if (opt.nodes_set && opt.nprocs_set) { - if ((opt.nodes_set || opt.extra_set) && !opt.nprocs_set) { - /* 1 proc / node default */ - opt.nprocs = opt.min_nodes; - - /* 1 proc / min_[socket * core * thread] default */ - if (opt.min_sockets_per_node > 0) { - opt.nprocs *= opt.min_sockets_per_node; - opt.nprocs_set = true; - } - if (opt.min_cores_per_socket > 0) { - opt.nprocs *= opt.min_cores_per_socket; - opt.nprocs_set = true; - } - if (opt.min_threads_per_core > 0) { - opt.nprocs *= opt.min_threads_per_core; - opt.nprocs_set = true; - } - } else if (opt.nodes_set && opt.nprocs_set) { - - /* - * Make sure in a non allocate situation that - * the number of max_nodes is <= number of tasks - */ - if (!opt.allocate && opt.nprocs < opt.max_nodes) - opt.max_nodes = opt.nprocs; - - /* - * make sure # of procs >= min_nodes || max_nodes - */ - if (opt.nprocs < opt.min_nodes) { - - info ("Warning: can't run %d processes on %d " - "nodes, setting nnodes to %d", - opt.nprocs, opt.min_nodes, opt.nprocs); - - opt.min_nodes = opt.nprocs; - if (opt.max_nodes - && (opt.min_nodes > opt.max_nodes) ) - opt.max_nodes = opt.min_nodes; - if (hl_cnt > opt.min_nodes) { - int del_cnt, i; - char *host; - del_cnt = hl_cnt - opt.min_nodes; - for (i=0; i<del_cnt; i++) { - host = hostlist_pop(hl); - free(host); - } - hostlist_ranged_string(hl, strlen(opt.nodelist)+1, - opt.nodelist); + /* + * Make sure that the number of + * max_nodes is <= number of tasks + */ + if (opt.nprocs < opt.max_nodes) + opt.max_nodes = opt.nprocs; + + /* + * make sure # of procs >= min_nodes + */ + if (opt.nprocs < opt.min_nodes) { + + info ("Warning: can't run %d processes on %d " + "nodes, setting nnodes to %d", + opt.nprocs, opt.min_nodes, opt.nprocs); + + opt.min_nodes = opt.nprocs; + if (opt.max_nodes + && (opt.min_nodes > opt.max_nodes) ) + opt.max_nodes = opt.min_nodes; + if (hl_cnt > opt.min_nodes) { + int del_cnt, i; + char *host; + del_cnt = hl_cnt - opt.min_nodes; + for (i=0; i<del_cnt; i++) { + host = hostlist_pop(hl); + free(host); } + hostlist_ranged_string(hl, + strlen(opt.nodelist)+1, + opt.nodelist); } + } + + } /* else if (opt.nprocs_set && !opt.nodes_set) */ - } /* else if (opt.nprocs_set && !opt.nodes_set) */ - } if (hl) hostlist_destroy(hl); @@ -2467,7 +1987,7 @@ static bool _opt_verify(void) if (opt.time_limit_str) { opt.time_limit = time_str2mins(opt.time_limit_str); - if (opt.time_limit < 0) { + if ((opt.time_limit < 0) && (opt.time_limit != INFINITE)) { error("Invalid time limit specification"); exit(1); } @@ -2475,6 +1995,17 @@ static bool _opt_verify(void) opt.time_limit = INFINITE; } + if (opt.ckpt_interval_str) { + opt.ckpt_interval = time_str2mins(opt.ckpt_interval_str); + if ((opt.ckpt_interval < 0) && (opt.ckpt_interval != INFINITE)) { + error("Invalid checkpoint interval specification"); + exit(1); + } + } + + if (! opt.ckpt_path) + opt.ckpt_path = xstrdup(opt.cwd); + if ((opt.euid != (uid_t) -1) && (opt.euid != opt.uid)) opt.uid = opt.euid; @@ -2484,11 +2015,6 @@ static bool _opt_verify(void) if ((opt.egid != (gid_t) -1) && (opt.egid != opt.gid)) opt.gid = opt.egid; - if (opt.noshell && !opt.allocate) { - error ("--no-shell only valid with -A (--allocate)"); - verified = false; - } - if (opt.propagate && parse_rlimits( opt.propagate, PROPAGATE_RLIMITS)) { error( "--propagate=%s is not valid.", opt.propagate ); verified = false; @@ -2504,123 +2030,22 @@ static bool _opt_verify(void) xfree(sched_name); } - return verified; -} - -static uint16_t _parse_mail_type(const char *arg) -{ - uint16_t rc; - - if (strcasecmp(arg, "BEGIN") == 0) - rc = MAIL_JOB_BEGIN; - else if (strcasecmp(arg, "END") == 0) - rc = MAIL_JOB_END; - else if (strcasecmp(arg, "FAIL") == 0) - rc = MAIL_JOB_FAIL; - else if (strcasecmp(arg, "ALL") == 0) - rc = MAIL_JOB_BEGIN | MAIL_JOB_END | MAIL_JOB_FAIL; - else - rc = 0; /* failure */ - - return rc; -} - -static char *_print_mail_type(const uint16_t type) -{ - if (type == 0) - return "NONE"; - - if (type == MAIL_JOB_BEGIN) - return "BEGIN"; - if (type == MAIL_JOB_END) - return "END"; - if (type == MAIL_JOB_FAIL) - return "FAIL"; - if (type == (MAIL_JOB_BEGIN | MAIL_JOB_END | MAIL_JOB_FAIL)) - return "ALL"; - - return "MULTIPLE"; -} - -static void -_freeF(void *data) -{ - xfree(data); -} - -static List -_create_path_list(void) -{ - List l = list_create(_freeF); - char *path, *c, *lc; - - c = getenv("PATH"); - if (!c) { - verbose("No PATH environment variable"); - return l; - } - - path = xstrdup(c); - c = lc = path; - - while (*c != '\0') { - if (*c == ':') { - /* nullify and push token onto list */ - *c = '\0'; - if (lc != NULL && strlen(lc) > 0) - list_append(l, xstrdup(lc)); - lc = ++c; - } else - c++; - } - - if (strlen(lc) > 0) - list_append(l, xstrdup(lc)); - - xfree(path); - - return l; -} - -static char * -_search_path(char *cmd, bool check_current_dir, int access_mode) -{ - List l = NULL; - ListIterator i = NULL; - char *path, *fullpath = NULL; - - if ( (cmd[0] == '.' || cmd[0] == '/') - && (access(cmd, access_mode) == 0 ) ) { - if (cmd[0] == '.') - xstrfmtcat(fullpath, "%s/", opt.cwd); - xstrcat(fullpath, cmd); - goto done; + if (opt.task_mem > 0) { + uint32_t max_mem = slurm_get_max_mem_per_task(); + if (max_mem && (opt.task_mem > max_mem)) { + info("WARNING: Reducing --task-mem to system maximum " + "of %u MB", max_mem); + opt.task_mem = max_mem; + } + } else { + uint32_t max_mem = slurm_get_def_mem_per_task(); + if (max_mem) + opt.task_mem = max_mem; } - l = _create_path_list(); - if (l == NULL) - return NULL; - - if (check_current_dir) - list_prepend(l, xstrdup(opt.cwd)); - - i = list_iterator_create(l); - while ((path = list_next(i))) { - xstrfmtcat(fullpath, "%s/%s", path, cmd); - - if (access(fullpath, access_mode) == 0) - goto done; - - xfree(fullpath); - fullpath = NULL; - } -done: - if (l) - list_destroy(l); - return fullpath; + return verified; } - /* helper function for printing options * * warning: returns pointer to memory allocated on the stack. @@ -2644,8 +2069,8 @@ static char *print_constraints() if (opt.job_min_memory > 0) xstrfmtcat(buf, "mem=%dM ", opt.job_min_memory); - if (opt.job_max_memory > 0) - xstrfmtcat(buf, "job-mem=%dM ", opt.job_max_memory); + if (opt.task_mem > 0) + xstrfmtcat(buf, "task-mem=%dM ", opt.task_mem); if (opt.job_min_tmp_disk > 0) xstrfmtcat(buf, "tmp=%ld ", opt.job_min_tmp_disk); @@ -2665,39 +2090,6 @@ static char *print_constraints() return buf; } -static char * -print_commandline() -{ - int i; - char buf[256]; - - buf[0] = '\0'; - for (i = 0; i < remote_argc; i++) - snprintf(buf, 256, "%s", remote_argv[i]); - return xstrdup(buf); -} - -static char * -print_geometry() -{ - int i; - char buf[32], *rc = NULL; - - if ((SYSTEM_DIMENSIONS == 0) - || (opt.geometry[0] == (uint16_t)NO_VAL)) - return NULL; - - for (i=0; i<SYSTEM_DIMENSIONS; i++) { - if (i > 0) - snprintf(buf, sizeof(buf), "x%u", opt.geometry[i]); - else - snprintf(buf, sizeof(buf), "%u", opt.geometry[i]); - xstrcat(rc, buf); - } - - return rc; -} - #define tf_(b) (b == true) ? "true" : "false" static void _opt_list() @@ -2737,27 +2129,25 @@ static void _opt_list() info("verbose : %d", _verbose); info("slurmd_debug : %d", opt.slurmd_debug); info("immediate : %s", tf_(opt.immediate)); - info("no-requeue : %s", tf_(opt.no_requeue)); info("label output : %s", tf_(opt.labelio)); info("unbuffered IO : %s", tf_(opt.unbuffered)); - info("allocate : %s", tf_(opt.allocate)); - info("attach : `%s'", opt.attach); info("overcommit : %s", tf_(opt.overcommit)); - info("batch : %s", tf_(opt.batch)); info("threads : %d", opt.max_threads); if (opt.time_limit == INFINITE) info("time_limit : INFINITE"); else if (opt.time_limit != NO_VAL) info("time_limit : %d", opt.time_limit); + if (opt.ckpt_interval) + info("checkpoint : %d secs", opt.ckpt_interval); + info("checkpoint_path: %s", opt.ckpt_path); info("wait : %d", opt.max_wait); if (opt.nice) info("nice : %d", opt.nice); info("account : %s", opt.account); info("comment : %s", opt.comment); - if (opt.dependency == NO_VAL) - info("dependency : none"); - else - info("dependency : %u", opt.dependency); + + info("dependency : %s", opt.dependency); + info("exclusive : %s", tf_(opt.exclusive)); if (opt.shared != (uint16_t) NO_VAL) info("shared : %u", opt.shared); str = print_constraints(); @@ -2765,7 +2155,7 @@ static void _opt_list() xfree(str); if (opt.conn_type != (uint16_t) NO_VAL) info("conn_type : %u", opt.conn_type); - str = print_geometry(); + str = print_geometry(opt.geometry); info("geometry : %s", str); xfree(str); info("reboot : %s", opt.reboot ? "no" : "yes"); @@ -2790,17 +2180,23 @@ static void _opt_list() } info("prolog : %s", opt.prolog); info("epilog : %s", opt.epilog); - info("mail_type : %s", _print_mail_type(opt.mail_type)); + info("mail_type : %s", print_mail_type(opt.mail_type)); info("mail_user : %s", opt.mail_user); info("task_prolog : %s", opt.task_prolog); info("task_epilog : %s", opt.task_epilog); info("ctrl_comm_ifhn : %s", opt.ctrl_comm_ifhn); info("multi_prog : %s", opt.multi_prog ? "yes" : "no"); + info("sockets-per-node : %d - %d", opt.min_sockets_per_node, + opt.max_sockets_per_node); + info("cores-per-socket : %d - %d", opt.min_cores_per_socket, + opt.max_cores_per_socket); + info("threads-per-core : %d - %d", opt.min_threads_per_core, + opt.max_threads_per_core); info("ntasks-per-node : %d", opt.ntasks_per_node); info("ntasks-per-socket : %d", opt.ntasks_per_socket); info("ntasks-per-core : %d", opt.ntasks_per_core); info("plane_size : %u", opt.plane_size); - str = print_commandline(); + str = print_commandline(opt.argc, opt.argv); info("remote command : `%s'", str); xfree(str); @@ -2819,10 +2215,11 @@ static void _usage(void) " [-c ncpus] [-r n] [-p partition] [--hold] [-t minutes]\n" " [-D path] [--immediate] [--overcommit] [--no-kill]\n" " [--share] [--label] [--unbuffered] [-m dist] [-J jobname]\n" -" [--jobid=id] [--batch] [--verbose] [--slurmd_debug=#]\n" -" [--core=type] [-T threads] [-W sec] [--attach] [--join] \n" +" [--jobid=id] [--verbose] [--slurmd_debug=#]\n" +" [--core=type] [-T threads] [-W sec] [--checkpoint=time]\n" +" [--checkpoint-path=dir] [--licenses=names]\n" " [--contiguous] [--mincpus=n] [--mem=MB] [--tmp=MB] [-C list]\n" -" [--mpi=type] [--account=name] [--dependency=jobid]\n" +" [--mpi=type] [--account=name] [--dependency=type:jobid]\n" " [--kill-on-bad-exit] [--propagate[=rlimits] [--comment=name]\n" " [--cpu_bind=...] [--mem_bind=...]\n" " [--ntasks-per-node=n] [--ntasks-per-socket=n]\n" @@ -2835,7 +2232,7 @@ static void _usage(void) " [--mail-type=type] [--mail-user=user] [--nice[=value]]\n" " [--prolog=fname] [--epilog=fname]\n" " [--task-prolog=fname] [--task-epilog=fname]\n" - " [--ctrl-comm-ifhn=addr] [--multi-prog] [--no-requeue]\n" + " [--ctrl-comm-ifhn=addr] [--multi-prog]\n" " [-w hosts...] [-x hosts...] executable [args...]\n"); } @@ -2883,7 +2280,7 @@ static void _help(void) " -d, --slurmd-debug=level slurmd debug level\n" " --core=type change default corefile format type\n" " (type=\"list\" to list of valid formats)\n" -" -P, --dependency=jobid defer job until specified jobid completes\n" +" -P, --dependency=type:jobid defer job until condition on jobid is satisfied\n" " --nice[=value] decrease secheduling priority by value\n" " -U, --account=name charge job to specified account\n" " --comment=name arbitrary comment\n" @@ -2900,16 +2297,12 @@ static void _help(void) " --multi-prog if set the program name specified is the\n" " configuration specification for multiple programs\n" " --get-user-env used by Moab. See srun man page.\n" -" --no-requeue if set, do not permit the job to be requeued\n" -"\n" -"Allocate only:\n" -" -A, --allocate allocate resources and spawn a shell\n" -" --no-shell don't spawn shell in allocate mode\n" -"\n" -"Attach to running job:\n" -" -a, --attach=jobid attach to running job with specified id\n" -" -j, --join when used with --attach, allow forwarding of\n" -" signals and stdin.\n" +" -L, --licenses=names required license, comma separated\n" +" --checkpoint=time job step checkpoint interval\n" +" --checkpoint-path=dir path to store job step checkpoint image files\n" +#ifdef HAVE_PTY_H +" --pty run task zero in pseudo terminal\n" +#endif "\n" "Constraint options:\n" " --mincpus=n minimum number of cpus per node\n" @@ -2927,7 +2320,8 @@ static void _help(void) "Consumable resources related options:\n" " --exclusive allocate nodes in exclusive mode when\n" " cpu consumable resource is enabled\n" -" --job-mem=MB maximum amount of real memory per node\n" +" or don't share CPUs for job steps\n" +" --task-mem=MB maximum amount of real memory per task\n" " required by the job.\n" " --mem >= --job-mem if --mem is specified.\n" "\n" diff --git a/src/srun/opt.h b/src/srun/opt.h index 8104cb48c..54cdac36c 100644 --- a/src/srun/opt.h +++ b/src/srun/opt.h @@ -1,11 +1,11 @@ /*****************************************************************************\ * opt.h - definitions for srun option processing - * $Id: opt.h 13407 2008-02-28 20:13:43Z jette $ + * $Id: opt.h 13771 2008-04-02 20:03:47Z jette $ ***************************************************************************** * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <grondona1@llnl.gov>, et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -58,20 +58,8 @@ #define INT_UNASSIGNED ((int)-1) /* global variables relating to user options */ -extern char **remote_argv; -extern int remote_argc; extern int _verbose; -/* mutually exclusive modes for srun */ -enum modes { - MODE_UNKNOWN = 0, - MODE_NORMAL = 1, - MODE_IMMEDIATE = 2, - MODE_ATTACH = 3, - MODE_ALLOCATE = 4, - MODE_BATCH = 5 -}; - extern enum modes mode; #define format_task_dist_states(t) (t == SLURM_DIST_BLOCK) ? "block" : \ @@ -121,6 +109,10 @@ typedef struct srun_options { bool extra_set; /* true if extra node info explicitly set */ int time_limit; /* --time, -t (int minutes) */ char *time_limit_str; /* --time, -t (string) */ + int ckpt_interval; /* --checkpoint (int minutes) */ + char *ckpt_interval_str;/* --checkpoint (string) */ + char *ckpt_path; /* --checkpoint-path (string) */ + bool exclusive; /* --exclusive */ char *partition; /* --partition=n, -p n */ enum task_dist_states distribution; /* --distribution=, -m dist */ @@ -132,7 +124,7 @@ typedef struct srun_options { unsigned int jobid; /* --jobid=jobid */ bool jobid_set; /* true if jobid explicitly set */ char *mpi_type; /* --mpi=type */ - unsigned int dependency;/* --dependency, -P jobid */ + char *dependency; /* --dependency, -P type:jobid */ int nice; /* --nice */ char *account; /* --account, -U acct_name */ char *comment; /* --comment */ @@ -143,7 +135,6 @@ typedef struct srun_options { int slurmd_debug; /* --slurmd-debug, -D */ core_format_t core_type;/* --core= */ - char *attach; /* --attach=id -a id */ bool join; /* --join, -j */ /* no longer need these, they are set globally : */ @@ -158,10 +149,8 @@ typedef struct srun_options { bool allocate; /* --allocate, -A */ bool noshell; /* --no-shell */ bool overcommit; /* --overcommit, -O */ - bool batch; /* --batch, -b */ bool no_kill; /* --no-kill, -k */ bool kill_bad_exit; /* --kill-on-bad-exit, -K */ - bool no_requeue; /* --no-requeue */ uint16_t shared; /* --share, -s */ int max_wait; /* --wait, -W */ bool quit_on_intr; /* --quit-on-interrupt, -q */ @@ -173,6 +162,7 @@ typedef struct srun_options { char *propagate; /* --propagate[=RLIMIT_CORE,...]*/ char *task_epilog; /* --task-epilog= */ char *task_prolog; /* --task-prolog= */ + char *licenses; /* --licenses, -L */ /* constraint options */ int32_t job_min_cpus; /* --mincpus=n */ @@ -180,7 +170,7 @@ typedef struct srun_options { int32_t job_min_cores; /* --mincores=n */ int32_t job_min_threads;/* --minthreads=n */ int32_t job_min_memory; /* --mem=n */ - int32_t job_max_memory; /* --job-mem=n */ + int32_t task_mem; /* --task-mem=n */ long job_min_tmp_disk; /* --tmp=n */ char *constraints; /* --constraints=, -C constraint*/ bool contiguous; /* --contiguous */ @@ -212,8 +202,11 @@ typedef struct srun_options { uint16_t mail_type; /* --mail-type */ char *mail_user; /* --mail-user */ char *ctrl_comm_ifhn; /* --ctrl-comm-ifhn */ - int get_user_env_time; /* --get-user-env[=secs] */ - int get_user_env_mode; /* --get-user-env=[S|L] */ + uint8_t open_mode; /* --open-mode=append|truncate */ + int acctg_freq; /* --acctg-freq=secs */ + bool pty; /* --pty */ + int argc; /* length of argv array */ + char **argv; /* left over on command line */ } opt_t; extern opt_t opt; @@ -239,8 +232,4 @@ extern opt_t opt; */ int initialize_and_process_args(int argc, char *argv[]); -/* set options based upon commandline args */ -void set_options(const int argc, char **argv, int first); - - #endif /* _HAVE_OPT_H */ diff --git a/src/srun/reattach.c b/src/srun/reattach.c deleted file mode 100644 index a24d20bdb..000000000 --- a/src/srun/reattach.c +++ /dev/null @@ -1,545 +0,0 @@ -/****************************************************************************\ - * src/srun/reattach.c - reattach to a running job - * $Id: reattach.c 10574 2006-12-15 23:38:29Z jette $ - ***************************************************************************** - * Copyright (C) 2002-2006 The Regents of the University of California. - * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Mark Grondona <grondona@llnl.gov>. - * UCRL-CODE-226842. - * - * This file is part of SLURM, a resource management program. - * For details, see <http://www.llnl.gov/linux/slurm/>. - * - * SLURM is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * In addition, as a special exception, the copyright holders give permission - * to link the code of portions of this program with the OpenSSL library under - * certain conditions as described in each individual source file, and - * distribute linked combinations including the two. You must obey the GNU - * General Public License in all respects for all of the code used other than - * OpenSSL. If you modify file(s) with this exception, you may extend this - * exception to your version of the file(s), but you are not obligated to do - * so. If you do not wish to do so, delete this exception statement from your - * version. If you delete this exception statement from all source files in - * the program, then also delete it here. - * - * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY - * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License along - * with SLURM; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -\*****************************************************************************/ - -#if HAVE_CONFIG_H -# include "config.h" -#endif - -#include <errno.h> -#include <signal.h> -#include <string.h> -#include <stdlib.h> -#include <unistd.h> -#include <sys/param.h> - -#include "src/common/xmalloc.h" -#include "src/common/xstring.h" -#include "src/common/xsignal.h" -#include "src/common/log.h" -#include "src/common/list.h" -#include "src/common/macros.h" -#include "src/common/hostlist.h" -#include "src/common/slurm_protocol_api.h" -#include "src/common/read_config.h" -#include "src/common/forward.h" - -#include "src/srun/srun_job.h" -#include "src/srun/launch.h" -#include "src/srun/opt.h" -#include "src/srun/msg.h" -#include "src/srun/srun.h" -#include "src/srun/signals.h" - - -/* number of active threads */ -static pthread_mutex_t active_mutex = PTHREAD_MUTEX_INITIALIZER; -static pthread_cond_t active_cond = PTHREAD_COND_INITIALIZER; -static int active = 0; - -static bool invalid_user = false; - -typedef enum {THD_NEW, THD_ACTIVE, THD_DONE, THD_FAILED} state_t; - -typedef struct thd { - pthread_t thread; /* thread ID */ - pthread_attr_t attr; /* thread attributes */ - state_t state; /* thread state */ - slurm_msg_t *msg; - srun_job_t *job; - uint32_t nodeid; -} thd_t; - -static void _p_reattach(slurm_msg_t *req, srun_job_t *job); -static void *_p_reattach_task(void *args); - -typedef struct _srun_step { - uint32_t jobid; - uint32_t stepid; - uint32_t ntasks; - char *nodes; - char *name; - bool complete_job; -} srun_step_t; - -static void -_srun_step_destroy(srun_step_t *s) -{ - if (s->name) - xfree(s->name); - if (s->nodes) - xfree(s->nodes); - xfree(s); -} - -static srun_step_t * -_srun_step_create(uint32_t jobid, uint32_t stepid, char *name) -{ - srun_step_t *s = xmalloc(sizeof(*s)); - s->jobid = jobid; - s->stepid = stepid; - s->ntasks = 0; - s->nodes = NULL; - s->name = NULL; - - s->complete_job = false; - - if (name == NULL) - return s; - s->name = xstrdup(name); - return s; -} - -static char * -_next_tok(char *sep, char **str) -{ - char *tok; - - /* push str past any leading separators */ - while ((**str != '\0') && (strchr(sep, **str) != '\0')) - (*str)++; - - if (**str == '\0') - return NULL; - - /* assign token ptr */ - tok = *str; - - /* push str past token and leave pointing to first separator */ - while ((**str != '\0') && (strchr(sep, **str) == '\0')) - (*str)++; - - /* nullify consecutive separators and push str beyond them */ - while ((**str != '\0') && (strchr(sep, **str) != '\0')) - *(*str)++ = '\0'; - - return tok; -} - - -static List -_step_list_create(char *steplist) -{ - List l = NULL; - char *str = NULL; - char *orig = NULL; - char *tok = NULL; - uint32_t jobid, stepid; - - if (steplist == NULL) - return NULL; - - orig = str = xstrdup(steplist); - - l = list_create((ListDelF)_srun_step_destroy); - - while ((tok = _next_tok(",", &str))) { - char *cur = tok; - char *p = strchr(tok, '.'); - char *q = NULL; - - if (p) *(p++) = '\0'; - - jobid = strtoul(tok, &q, 10); - - if (q == tok) { - error("Invalid jobid: `%s'", cur); - goto error; - } - - stepid = (p && *p) ? strtoul(p, &q, 10) : NO_VAL; - - if ((q == p) || (*q != '\0')) { - error("Invalid job step id: `%s'", cur); - goto error; - } - - list_append(l, _srun_step_create(jobid, stepid, cur)); - } - - xfree(orig); - return l; - - error: - xfree(orig); - list_destroy(l); - return NULL; - -} - -static int -_get_job_info(srun_step_t *s) -{ - int i, rc = -1; - job_info_msg_t *resp = NULL; - job_info_t *job = NULL; - hostlist_t hl; - - s->nodes = NULL; - - if (slurm_load_jobs((time_t) 0, &resp, 1) < 0) { - error("Unable to load jobs: %m"); - goto done; - } - - for (i = 0; i < resp->record_count; i++) { - job = &resp->job_array[i]; - if (job->job_id == s->jobid) - break; - job = NULL; - } - - if (job == NULL) { - error ("Unable to find job %u", s->jobid); - goto done; - } - - if ((job->job_state != JOB_RUNNING) - && (job->job_state != JOB_SUSPENDED)) { - error ("Cannot attach to job %d in state %s", - job->job_id, job_state_string(job->job_state)); - goto done; - } - - if (!job->batch_flag) { - rc = 0; - goto done; - } - - if (!(hl = hostlist_create(job->nodes))) { - error ("Unable to create hostlist from `%s'", job->nodes); - goto done; - } - s->nodes = hostlist_shift(hl); - hostlist_destroy(hl); - - s->ntasks = 1; - rc = 0; - - done: - if (resp) - slurm_free_job_info_msg(resp); - return rc; -} - -static void -_get_step_info(srun_step_t *s) -{ - uid_t my_uid; - job_step_info_response_msg_t *resp = NULL; - - xassert(s->stepid != NO_VAL); - - if (slurm_get_job_steps((time_t) 0, s->jobid, s->stepid, &resp, 1) - < 0) { - error("Unable to get step information for %u.%u: %m", - s->jobid, s->stepid); - goto done; - } - if (resp->job_step_count == 0) { - error("No nodes in %u.%u", s->jobid, s->stepid); - s->ntasks = 0; - goto done; - } - - invalid_user = false; - if ((my_uid = getuid()) != 0) { /* not user root */ - if (my_uid != resp->job_steps->user_id) { - error("Invalid user id"); - invalid_user = true; - /* We let the request continue and log the - * event in SlurmdLog for security purposes */ - } - } - s->nodes = xstrdup(resp->job_steps->nodes); - s->ntasks = resp->job_steps->num_tasks; - - done: - if (resp) - slurm_free_job_step_info_response_msg(resp); - return; -} - -static void -_get_attach_info(srun_step_t *s) -{ - if (s->stepid == NO_VAL) { - if (_get_job_info(s) < 0) - return; - - /* If job was not a batch job, try step 0 - */ - if (s->nodes == NULL) { - s->stepid = 0; - _get_step_info(s); - } - - } else { - _get_step_info(s); - } -} - -static int -_attach_to_job(srun_job_t *job) -{ - int i; - reattach_tasks_request_msg_t *req = NULL; - slurm_msg_t *msg = NULL; - hostlist_t hl = NULL; - char *name = NULL; - - req = xmalloc(job->nhosts * sizeof(reattach_tasks_request_msg_t)); - msg = xmalloc(job->nhosts * sizeof(slurm_msg_t)); - - debug("Going to attach to job %u.%u", job->jobid, job->stepid); - - hl = hostlist_create(job->step_layout->node_list); - for (i = 0; i < job->nhosts; i++) { - reattach_tasks_request_msg_t *r = &req[i]; - slurm_msg_t *m = &msg[i]; - - r->job_id = job->jobid; - r->job_step_id = job->stepid; - r->num_io_port = 1; - r->io_port = (uint16_t *)xmalloc(sizeof(uint16_t)); - r->io_port[0] = job->client_io->listenport[ - i%job->client_io->num_listen]; - r->num_resp_port = 1; - r->resp_port = (uint16_t *)xmalloc(sizeof(uint16_t)); - r->resp_port[0] = ntohs(job->jaddr[i%job->njfds].sin_port); - r->cred = job->cred; - slurm_msg_t_init(m); - m->data = r; - m->msg_type = REQUEST_REATTACH_TASKS; - name = hostlist_shift(hl); - if(!name) { - error("hostlist incomplete for this job request"); - hostlist_destroy(hl); - return SLURM_ERROR; - } - if(slurm_conf_get_addr(name, &m->address) - == SLURM_ERROR) { - error("_init_task_layout: can't get addr for " - "host %s", name); - free(name); - hostlist_destroy(hl); - return SLURM_ERROR; - } - free(name); - /* memcpy(&m->address, &job->step_layout->node_addr[i], */ -/* sizeof(slurm_addr)); */ - } - hostlist_destroy(hl); - _p_reattach(msg, job); - - return SLURM_SUCCESS; -} - -static void -_p_reattach(slurm_msg_t *msg, srun_job_t *job) -{ - int i; - thd_t *thd = xmalloc(job->nhosts * sizeof(thd_t)); - - for (i = 0; i < job->nhosts; i++) { - - slurm_mutex_lock(&active_mutex); - while (active >= opt.max_threads) { - pthread_cond_wait(&active_cond, &active_mutex); - } - active++; - slurm_mutex_unlock(&active_mutex); - - thd[i].msg = &msg[i]; - thd[i].job = job; - thd[i].nodeid = i; - - slurm_attr_init(&thd[i].attr); - if (pthread_attr_setdetachstate(&thd[i].attr, - PTHREAD_CREATE_DETACHED ) < 0) - fatal("pthread_attr_setdetachstate: %m"); - - if (pthread_create( &thd[i].thread, &thd[i].attr, - _p_reattach_task, (void *) &thd[i])) { - error("pthread_create: %m"); - _p_reattach_task((void *) &thd[i]); - } - slurm_attr_destroy(&thd[i].attr); - - } - - slurm_mutex_lock(&active_mutex); - while (active > 0) - pthread_cond_wait(&active_cond, &active_mutex); - slurm_mutex_unlock(&active_mutex); - - xfree(thd); -} - -static void * -_p_reattach_task(void *arg) -{ - thd_t *t = (thd_t *) arg; - int rc = 0; - char *host = nodelist_nth_host(t->job->step_layout->node_list, - t->nodeid); - - t->state = THD_ACTIVE; - debug3("sending reattach request to %s", host); - - rc = slurm_send_only_node_msg(t->msg); - if (rc < 0) { - error("reattach: %s: %m", host); - t->state = THD_FAILED; - t->job->host_state[t->nodeid] = SRUN_HOST_REPLIED; - } else { - t->state = THD_DONE; - t->job->host_state[t->nodeid] = SRUN_HOST_UNREACHABLE; - } - free(host); - slurm_mutex_lock(&active_mutex); - active--; - pthread_cond_signal(&active_cond); - slurm_mutex_unlock(&active_mutex); - - return NULL; -} - - -int reattach() -{ - List steplist = _step_list_create(opt.attach); - srun_step_t *s = NULL; - srun_job_t *job = NULL; - slurm_step_io_fds_t fds = SLURM_STEP_IO_FDS_INITIALIZER; - - if ((steplist == NULL) || (list_count(steplist) == 0)) { - info("No job/steps in attach"); - exit(1); - } - - if (list_count(steplist) > 1) - info("Warning: attach to multiple jobs/steps not supported"); - s = list_peek(steplist); - - _get_attach_info(s); - - if (!opt.join) - opt.ifname = "none"; - - if ((opt.nodelist = s->nodes) == NULL) - exit(1); - - if ((opt.nprocs = s->ntasks) == 0) - exit(1); - - /* - * Indicate that nprocs has been manually set - */ - opt.nprocs_set = true; - - if (!(job = job_create_noalloc())) - exit(1); - - job->jobid = s->jobid; - job->stepid = s->stepid; - - if (job->stepid == NO_VAL) { - char *new_argv0 = NULL; - xstrfmtcat(new_argv0, "attach[%d]", job->jobid); - log_set_argv0(new_argv0); - } - - /* - * mask and handle certain signals iff we are "joining" with - * the job in question. If opt.join is off, attached srun is in - * "read-only" mode and cannot forward stdin/signals. - */ - if (opt.join) - sig_setup_sigmask(); - - if (msg_thr_create(job) < 0) { - error("Unable to create msg thread: %m"); - exit(1); - } - - srun_set_stdio_fds(job, &fds); - job->client_io = client_io_handler_create(fds, - job->step_layout->task_cnt, - job->step_layout->node_cnt, - job->cred, - opt.labelio); - if (!job->client_io - || (client_io_handler_start(job->client_io) != SLURM_SUCCESS)) - job_fatal(job, "failed to start IO handler"); - - if (opt.join && sig_thr_create(job) < 0) { - error("Unable to create signals thread: %m"); - } - - _attach_to_job(job); - - if (invalid_user) - exit(1); - - slurm_mutex_lock(&job->state_mutex); - while (job->state < SRUN_JOB_TERMINATED) { - pthread_cond_wait(&job->state_cond, &job->state_mutex); - } - slurm_mutex_unlock(&job->state_mutex); - - if (job->state == SRUN_JOB_FAILED) - info("Job terminated abnormally."); - - /* - * Signal the IO thread to shutdown, which will stop - * the listening socket and file read (stdin) event - * IO objects, but allow file write (stdout) objects to - * complete any writing that remains. - */ - debug("Waiting for IO thread"); - if (client_io_handler_finish(job->client_io) != SLURM_SUCCESS) - error ("IO handler did not finish correctly (reattach): %m"); - client_io_handler_destroy(job->client_io); - - /* kill msg server thread */ - pthread_kill(job->jtid, SIGHUP); - - /* _complete_job(job); */ - - exit(0); -} diff --git a/src/srun/signals.c b/src/srun/signals.c deleted file mode 100644 index 3a40abbf7..000000000 --- a/src/srun/signals.c +++ /dev/null @@ -1,214 +0,0 @@ -/*****************************************************************************\ - * src/srun/signals.c - signal handling for srun - ***************************************************************************** - * Copyright (C) 2002-2006 The Regents of the University of California. - * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Mark Grondona <mgrondona@llnl.gov>, and - * Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. - * - * This file is part of SLURM, a resource management program. - * For details, see <http://www.llnl.gov/linux/slurm/>. - * - * SLURM is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * In addition, as a special exception, the copyright holders give permission - * to link the code of portions of this program with the OpenSSL library under - * certain conditions as described in each individual source file, and - * distribute linked combinations including the two. You must obey the GNU - * General Public License in all respects for all of the code used other than - * OpenSSL. If you modify file(s) with this exception, you may extend this - * exception to your version of the file(s), but you are not obligated to do - * so. If you do not wish to do so, delete this exception statement from your - * version. If you delete this exception statement from all source files in - * the program, then also delete it here. - * - * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY - * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License along - * with SLURM; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -\*****************************************************************************/ - -#ifdef HAVE_CONFIG_H -# include "config.h" -#endif - -#if HAVE_PTHREAD -#include <pthread.h> -#endif - -#include <signal.h> -#include <string.h> - -#include <slurm/slurm_errno.h> - -#include "src/common/log.h" -#include "src/common/macros.h" -#include "src/common/slurm_protocol_api.h" -#include "src/common/slurm_protocol_defs.h" -#include "src/common/xmalloc.h" -#include "src/common/xstring.h" -#include "src/common/xsignal.h" - -#include "src/srun/opt.h" -#include "src/srun/srun_job.h" -#include "src/srun/signals.h" - -#define MAX_RETRIES 3 - -/* - * Static list of signals to block in srun: - */ -static int srun_sigarray[] = { - SIGINT, SIGQUIT, /*SIGTSTP,*/ SIGCONT, SIGTERM, - SIGALRM, SIGUSR1, SIGUSR2, SIGPIPE, 0 -}; - -/* - * Static prototypes - */ -static void _sigterm_handler(int); -static void _handle_intr(srun_job_t *, time_t *, time_t *); -static void * _sig_thr(void *); - -static inline bool -_sig_thr_done(srun_job_t *job) -{ - bool retval; - slurm_mutex_lock(&job->state_mutex); - retval = (job->state >= SRUN_JOB_DONE); - slurm_mutex_unlock(&job->state_mutex); - return retval; -} - -int -sig_setup_sigmask(void) -{ - if (xsignal_block(srun_sigarray) < 0) - return SLURM_ERROR; - - xsignal(SIGHUP, &_sigterm_handler); - - return SLURM_SUCCESS; -} - -int -sig_unblock_signals(void) -{ - return xsignal_unblock(srun_sigarray); -} - -int -sig_thr_create(srun_job_t *job) -{ - int e, retries = 0; - pthread_attr_t attr; - - slurm_attr_init(&attr); - - while ((e = pthread_create(&job->sigid, &attr, &_sig_thr, job))) { - if (++retries > MAX_RETRIES) { - slurm_attr_destroy(&attr); - slurm_seterrno_ret(e); - } - sleep(1); /* sleep and try again */ - } - slurm_attr_destroy(&attr); - - debug("Started signals thread (%lu)", (unsigned long) job->sigid); - - return SLURM_SUCCESS; -} - - -static void -_sigterm_handler(int signum) -{ -} - -static void -_handle_intr(srun_job_t *job, time_t *last_intr, time_t *last_intr_sent) -{ - if (opt.quit_on_intr) { - job_force_termination(job); - pthread_exit (0); - } - - if (((time(NULL) - *last_intr) > 1) && !opt.disable_status) { - info("interrupt (one more within 1 sec to abort)"); - if (mode != MODE_ATTACH) - report_task_status(job); - *last_intr = time(NULL); - } else { /* second Ctrl-C in half as many seconds */ - update_job_state(job, SRUN_JOB_CANCELLED); - /* terminate job */ - if (job->state < SRUN_JOB_FORCETERM) { - if ((time(NULL) - *last_intr_sent) < 1) { - job_force_termination(job); - pthread_exit(0); - } - - info("sending Ctrl-C to job"); - *last_intr_sent = time(NULL); - fwd_signal(job, SIGINT, opt.max_threads); - - } else { - job_force_termination(job); - } - } -} - -/* simple signal handling thread */ -static void * -_sig_thr(void *arg) -{ - srun_job_t *job = (srun_job_t *)arg; - sigset_t set; - time_t last_intr = 0; - time_t last_intr_sent = 0; - int signo, err; - - while (!_sig_thr_done(job)) { - - xsignal_sigset_create(srun_sigarray, &set); - - if ((err = sigwait(&set, &signo)) != 0) { - if (err != EINTR) - error ("sigwait: %s", slurm_strerror (err)); - continue; - } - - debug2("recvd signal %d", signo); - switch (signo) { - case SIGINT: - _handle_intr(job, &last_intr, &last_intr_sent); - break; - /* case SIGTSTP: */ -/* debug3("got SIGTSTP"); */ -/* break; */ - case SIGCONT: - debug3("got SIGCONT"); - break; - case SIGQUIT: - info("Quit"); - job_force_termination(job); - break; - default: - fwd_signal(job, signo, opt.max_threads); - break; - } - } - - pthread_exit(0); - return NULL; -} - - - diff --git a/src/srun/srun.c b/src/srun/srun.c index 82b01cd39..3a67fc6f7 100644 --- a/src/srun/srun.c +++ b/src/srun/srun.c @@ -5,7 +5,7 @@ * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <grondona@llnl.gov>, et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -65,10 +65,12 @@ #include <stdlib.h> #include <string.h> #include <signal.h> +#include <termios.h> #include <unistd.h> #include <fcntl.h> #include <grp.h> + #include "src/common/fd.h" #include "src/common/log.h" #include "src/common/slurm_protocol_api.h" @@ -80,17 +82,17 @@ #include "src/common/mpi.h" #include "src/common/slurm_rlimits_info.h" #include "src/common/plugstack.h" +#include "src/common/read_config.h" #include "src/srun/allocate.h" #include "src/srun/srun_job.h" -#include "src/srun/launch.h" -#include "src/srun/msg.h" #include "src/srun/opt.h" -#include "src/srun/sigstr.h" -#include "src/srun/reattach.h" -#include "src/srun/attach.h" +#include "src/srun/debugger.h" #include "src/srun/srun.h" -#include "src/srun/signals.h" +#include "src/srun/srun_pty.h" +#include "src/srun/multi_prog.h" +#include "src/api/pmi_server.h" +#include "src/api/step_launch.h" #define MAX_RETRIES 20 #define MAX_ENTRIES 50 @@ -100,57 +102,70 @@ #define TYPE_SCRIPT 2 mpi_plugin_client_info_t mpi_job_info[1]; -pid_t srun_ppid = 0; +static struct termios termdefaults; +int global_rc; +srun_job_t *job = NULL; + +struct { + bitstr_t *start_success; + bitstr_t *start_failure; + bitstr_t *finish_normal; + bitstr_t *finish_abnormal; +} task_state; /* * forward declaration of static funcs */ static void _print_job_information(resource_allocation_response_msg_t *resp); -static char *_build_script (const char *argv0, char *pathname, int file_type); -static char *_get_shell (void); -static void _send_options(const int argc, char **argv); -static void _get_options (const char *buffer); -static char *_get_token(char *buf_ptr); -static int _is_file_text (char *, char**); -static int _run_batch_job (const char *argv0); -static int _run_job_script(srun_job_t *job, env_t *env); static void _set_prio_process_env(void); static int _set_rlimit_env(void); static int _set_umask_env(void); static char *_uint16_array_to_str(int count, const uint16_t *array); -static void _switch_standalone(srun_job_t *job); static int _become_user (void); -static int _print_script_exit_status(const char *argv0, int status); static void _run_srun_prolog (srun_job_t *job); static void _run_srun_epilog (srun_job_t *job); static int _run_srun_script (srun_job_t *job, char *script); -static int _change_rlimit_rss(void); static int _slurm_debug_env_val (void); static int _call_spank_local_user (srun_job_t *job); +static void _set_stdio_fds(srun_job_t *job, slurm_step_io_fds_t *cio_fds); static void _define_symbols(void); +static void _pty_restore(void); +static void _step_opt_exclusive(void); +static void _task_start(launch_tasks_response_msg_t *msg); +static void _task_finish(task_exit_msg_t *msg); +static void _task_state_struct_init(int num_tasks); +static void _task_state_struct_print(void); +static void _task_state_struct_free(void); +static void _handle_intr(); +static void _handle_signal(int signo); +static int _setup_signals(); int srun(int ac, char **av) { resource_allocation_response_msg_t *resp; - srun_job_t *job = NULL; - int exitcode = 0; env_t *env = xmalloc(sizeof(env_t)); uint32_t job_id = 0; log_options_t logopt = LOG_OPTS_STDERR_ONLY; - slurm_step_io_fds_t fds = SLURM_STEP_IO_FDS_INITIALIZER; - char **mpi_env = NULL; - mpi_plugin_client_state_t *mpi_state; - + slurm_step_launch_params_t launch_params; + slurm_step_launch_callbacks_t callbacks; + int got_alloc = 0; + env->stepid = -1; env->procid = -1; env->localid = -1; env->nodeid = -1; env->cli = NULL; env->env = NULL; + env->ckpt_path = NULL; logopt.stderr_level += _slurm_debug_env_val(); log_init(xbasename(av[0]), logopt, 0, NULL); +/* xsignal(SIGQUIT, _ignore_signal); */ +/* xsignal(SIGPIPE, _ignore_signal); */ +/* xsignal(SIGUSR1, _ignore_signal); */ +/* xsignal(SIGUSR2, _ignore_signal); */ + /* Initialize plugin stack, read options from plugins, etc. */ if (spank_init(NULL) < 0) { @@ -170,7 +185,7 @@ int srun(int ac, char **av) error ("srun initialization failed"); exit (1); } - srun_ppid = getppid(); + record_ppid(); /* reinit log with new verbosity (if changed by command line) */ @@ -185,14 +200,13 @@ int srun(int ac, char **av) log_alter(logopt, 0, NULL); } - if (!opt.allocate) { - (void) _set_rlimit_env(); - _set_prio_process_env(); - (void) _set_umask_env(); - } + (void) _set_rlimit_env(); + _set_prio_process_env(); + (void) _set_umask_env(); + /* Set up slurmctld message handler */ slurmctld_msg_init(); - + /* now global "opt" should be filled in and available, * create a job from opt */ @@ -202,96 +216,28 @@ int srun(int ac, char **av) slurm_perror("allocation failure"); exit (1); } - info("allocation success"); - exit (0); - - } else if (opt.batch) { - /* allow binding with batch submissions */ - env->distribution = opt.distribution; - env->cpu_bind_type = opt.cpu_bind_type; - env->cpu_bind = opt.cpu_bind; - env->mem_bind_type = opt.mem_bind_type; - env->mem_bind = opt.mem_bind; - setup_env(env); - - if (_run_batch_job(av[0]) < 0) - exit (1); exit (0); } else if (opt.no_alloc) { info("do not allocate resources"); - sig_setup_sigmask(); job = job_create_noalloc(); - _switch_standalone(job); - - } else if (opt.allocate) { - sig_setup_sigmask(); - if ( !(resp = allocate_nodes()) ) + if (create_job_step(job) < 0) { exit(1); - if (opt.noshell) { - fprintf (stdout, "SLURM_JOBID=%u\n", resp->job_id); - exit (0); } - if (_become_user () < 0) - info ("Warning: unable to assume uid=%lu\n", opt.uid); - _print_job_information(resp); - - job = job_create_allocation(resp); - if(!job) - exit(1); - - job->step_layout = - fake_slurm_step_layout_create(resp->node_list, - resp->cpus_per_node, - resp->cpu_count_reps, - resp->node_cnt, 0); - if(!job->step_layout) - exit(1); - if (msg_thr_create(job) < 0) - job_fatal(job, "Unable to create msg thread"); - exitcode = _run_job_script(job, env); - - /* close up the msg thread cleanly */ - close(job->forked_msg->msg_par->msg_pipe[1]); - debug2("Waiting for message thread"); - if (pthread_join(job->jtid, NULL) < 0) - error ("Waiting on message thread: %m"); - debug2("done"); - - srun_job_destroy(job,exitcode); - - debug ("Spawned srun shell terminated"); - xfree(env->task_count); - xfree(env); - exit (exitcode); - } else if ((resp = existing_allocation())) { job_id = resp->job_id; if (opt.alloc_nodelist == NULL) opt.alloc_nodelist = xstrdup(resp->node_list); - - if (opt.allocate) { - error("job %u already has an allocation", - job_id); - slurm_free_resource_allocation_response_msg(resp); - exit(1); - } + if (opt.exclusive) + _step_opt_exclusive(); job = job_step_create_allocation(resp); slurm_free_resource_allocation_response_msg(resp); - if(!job) - exit(1); - - job->old_job = true; - sig_setup_sigmask(); - - if (create_job_step(job) < 0) + if (!job || create_job_step(job) < 0) exit(1); - } else if (mode == MODE_ATTACH) { - reattach(); - exit (0); } else { + got_alloc = 1; /* Combined job allocation and job step launch */ #ifdef HAVE_FRONT_END uid_t my_uid = getuid(); @@ -301,18 +247,14 @@ int srun(int ac, char **av) exit(1); } #endif - if (opt.job_max_memory > 0) { - (void) _change_rlimit_rss(); - } - sig_setup_sigmask(); + if ( !(resp = allocate_nodes()) ) exit(1); _print_job_information(resp); job = job_create_allocation(resp); - if(!job) - exit(1); - if (create_job_step(job) < 0) { - srun_job_destroy(job, 0); + opt.exclusive = false; /* not applicable for this step */ + if (!job || create_job_step(job) < 0) { + slurm_complete_job(job->jobid, 1); exit(1); } @@ -325,11 +267,6 @@ int srun(int ac, char **av) if (_become_user () < 0) info ("Warning: Unable to assume uid=%lu\n", opt.uid); - /* job structure should now be filled in */ - - if (_call_spank_local_user (job) < 0) - job_fatal(job, "Failure in local plugin stack"); - /* * Enhance environment for job */ @@ -353,144 +290,152 @@ int srun(int ac, char **av) env->labelio = opt.labelio; env->comm_port = slurmctld_comm_addr.port; env->comm_hostname = slurmctld_comm_addr.hostname; - if(job) { + if (job) { + uint16_t *tasks = NULL; + slurm_step_ctx_get(job->step_ctx, SLURM_STEP_CTX_TASKS, + &tasks); + env->select_jobinfo = job->select_jobinfo; env->nhosts = job->nhosts; env->nodelist = job->nodelist; env->task_count = _uint16_array_to_str( - job->nhosts, job->step_layout->tasks); + job->nhosts, tasks); env->jobid = job->jobid; env->stepid = job->stepid; } + if (opt.pty) { + struct termios term; + int fd = STDIN_FILENO; + + /* Save terminal settings for restore */ + tcgetattr(fd, &termdefaults); + tcgetattr(fd, &term); + /* Set raw mode on local tty */ + cfmakeraw(&term); + tcsetattr(fd, TCSANOW, &term); + atexit(&_pty_restore); + + set_winsize(job); + block_sigwinch(); + pty_thread_create(job); + env->pty_port = job->pty_port; + env->ws_col = job->ws_col; + env->ws_row = job->ws_row; + } setup_env(env); xfree(env->task_count); xfree(env); - _run_srun_prolog(job); + _task_state_struct_init(opt.nprocs); + slurm_step_launch_params_t_init(&launch_params); + launch_params.gid = opt.gid; + launch_params.argc = opt.argc; + launch_params.argv = opt.argv; + launch_params.multi_prog = opt.multi_prog ? true : false; + launch_params.cwd = opt.cwd; + launch_params.slurmd_debug = opt.slurmd_debug; + launch_params.buffered_stdio = !opt.unbuffered; + launch_params.labelio = opt.labelio ? true : false; + launch_params.remote_output_filename =fname_remote_string(job->ofname); + launch_params.remote_input_filename = fname_remote_string(job->ifname); + launch_params.remote_error_filename = fname_remote_string(job->efname); + launch_params.task_prolog = opt.task_prolog; + launch_params.task_epilog = opt.task_epilog; + launch_params.cpu_bind = opt.cpu_bind; + launch_params.cpu_bind_type = opt.cpu_bind_type; + launch_params.mem_bind = opt.mem_bind; + launch_params.mem_bind_type = opt.mem_bind_type; + launch_params.open_mode = opt.open_mode; + if (opt.acctg_freq >= 0) + launch_params.acctg_freq = opt.acctg_freq; + launch_params.pty = opt.pty; + launch_params.max_sockets = opt.max_sockets_per_node; + launch_params.max_cores = opt.max_cores_per_socket; + launch_params.max_threads = opt.max_threads_per_core; + launch_params.cpus_per_task = opt.cpus_per_task; + launch_params.ntasks_per_node = opt.ntasks_per_node; + launch_params.ntasks_per_socket = opt.ntasks_per_socket; + launch_params.ntasks_per_core = opt.ntasks_per_core; + launch_params.ckpt_path = xstrdup(opt.ckpt_path); - if (msg_thr_create(job) < 0) - job_fatal(job, "Unable to create msg thread"); - - mpi_job_info->jobid = job->jobid; - mpi_job_info->stepid = job->stepid; - mpi_job_info->step_layout = job->step_layout; - if (!(mpi_state = mpi_hook_client_prelaunch(mpi_job_info, &mpi_env))) - job_fatal (job, "Failed to initialize MPI"); - env_array_set_environment(mpi_env); - env_array_free(mpi_env); - - srun_set_stdio_fds(job, &fds); - job->client_io = client_io_handler_create(fds, - job->step_layout->task_cnt, - job->step_layout->node_cnt, - job->cred, - opt.labelio); - if (!job->client_io - || (client_io_handler_start(job->client_io) != SLURM_SUCCESS)) - job_fatal(job, "failed to start IO handler"); - - if (sig_thr_create(job) < 0) - job_fatal(job, "Unable to create signals thread: %m"); - - if (launch_thr_create(job) < 0) - job_fatal(job, "Unable to create launch thread: %m"); - - /* wait for job to terminate - */ - slurm_mutex_lock(&job->state_mutex); - while (job->state < SRUN_JOB_TERMINATED) { - pthread_cond_wait(&job->state_cond, &job->state_mutex); + /* job structure should now be filled in */ + _setup_signals(); + + _set_stdio_fds(job, &launch_params.local_fds); + + if (MPIR_being_debugged) { + launch_params.parallel_debug = true; + pmi_server_max_threads(1); + } else { + launch_params.parallel_debug = false; } - slurm_mutex_unlock(&job->state_mutex); - - /* job is now overdone, clean up - * - * If job is "forcefully terminated" exit immediately. - * - */ - if (job->state == SRUN_JOB_FORCETERM) { - info("Force Terminated job"); - srun_job_destroy(job, 0); - exit(1); - } else if (job->state == SRUN_JOB_CANCELLED) { - info("Cancelling job"); - srun_job_destroy(job, NO_VAL); - exit(1); - } else if (job->state == SRUN_JOB_FAILED) { - /* This check here is to check if the job failed - because we (srun or slurmd or slurmstepd wasn't - able to fork or make a thread or something we still - need the job failed check below incase the job - failed on it's own. - */ - info("Job Failed"); - srun_job_destroy(job, NO_VAL); + callbacks.task_start = _task_start; + callbacks.task_finish = _task_finish; + + _run_srun_prolog(job); + + mpir_init(job->ctx_params.task_count); + + if (_call_spank_local_user (job) < 0) { + error("Failure in local plugin stack"); + slurm_step_launch_abort(job->step_ctx); exit(1); } - /* - * We want to make sure we get the correct state of the job - * and not finish before all the messages have been sent. - */ - if (job->state == SRUN_JOB_FAILED) - close(job->forked_msg->msg_par->msg_pipe[1]); - debug("Waiting for message thread"); - if (pthread_join(job->jtid, NULL) < 0) - error ("Waiting on message thread: %m"); - debug("done"); - - /* have to check if job was cancelled here just to make sure - state didn't change when we were waiting for the message thread */ - exitcode = set_job_rc(job); - if (job->state == SRUN_JOB_CANCELLED) { - info("Cancelling job"); - srun_job_destroy(job, NO_VAL); - } else if (job->state == SRUN_JOB_FAILED) { - info("Terminating job"); - srun_job_destroy(job, job->rc); - } else - srun_job_destroy(job, job->rc); - - /* wait for launch thread */ - if (pthread_join(job->lid, NULL) < 0) - error ("Waiting on launch thread: %m"); + update_job_state(job, SRUN_JOB_LAUNCHING); + if (slurm_step_launch(job->step_ctx, slurmctld_comm_addr.hostname, + &launch_params, &callbacks) != SLURM_SUCCESS) { + error("Application launch failed: %m"); + goto cleanup; + } - /* - * Signal the IO thread to shutdown, which will stop - * the listening socket and file read (stdin) event - * IO objects, but allow file write (stdout) objects to - * complete any writing that remains. - */ - debug("Waiting for IO thread"); - if (client_io_handler_finish(job->client_io) != SLURM_SUCCESS) - error ("IO handler did not finish correctly: %m"); - client_io_handler_destroy(job->client_io); - debug("done"); - - - if (mpi_hook_client_fini (mpi_state) < 0) - ; /* eh, ignore errors here */ + update_job_state(job, SRUN_JOB_STARTING); + if (slurm_step_launch_wait_start(job->step_ctx) == SLURM_SUCCESS) { + update_job_state(job, SRUN_JOB_RUNNING); + /* Only set up MPIR structures if the step launched + correctly. */ + if (opt.multi_prog) + mpir_set_multi_name(job->ctx_params.task_count, + launch_params.argv[0]); + else + mpir_set_executable_names(launch_params.argv[0]); + MPIR_debug_state = MPIR_DEBUG_SPAWNED; + MPIR_Breakpoint(); + if (opt.debugger_test) + mpir_dump_proctable(); + } else { + info("Job step aborted before step completely launched."); + } - _run_srun_epilog(job); + slurm_step_launch_wait_finish(job->step_ctx); - /* - * Let exit() clean up remaining threads. - */ +cleanup: + if(got_alloc) { + cleanup_allocation(); + slurm_complete_job(job->jobid, global_rc); + } + _run_srun_epilog(job); + slurm_step_ctx_destroy(job->step_ctx); + mpir_cleanup(); + _task_state_struct_free(); log_fini(); - exit(exitcode); + + return global_rc; } static int _call_spank_local_user (srun_job_t *job) { struct spank_launcher_job_info info[1]; + job_step_create_response_msg_t *step_resp; info->uid = opt.uid; info->gid = opt.gid; info->jobid = job->jobid; info->stepid = job->stepid; - info->step_layout = job->step_layout; - info->argc = remote_argc; - info->argv = remote_argv; + slurm_step_ctx_get(job->step_ctx, SLURM_STEP_CTX_RESP, &step_resp); + info->step_layout = step_resp->step_layout; + info->argc = opt.argc; + info->argv = opt.argv; return spank_local_user(info); } @@ -554,21 +499,6 @@ static char *_uint16_array_to_str(int array_len, const uint16_t *array) return str; } -static void -_switch_standalone(srun_job_t *job) -{ - int cyclic = (opt.distribution == SLURM_DIST_CYCLIC); - - if (switch_alloc_jobinfo(&job->switch_job) < 0) - fatal("switch_alloc_jobinfo: %m"); - if (switch_build_jobinfo(job->switch_job, - job->nodelist, - job->step_layout->tasks, - cyclic, opt.network) < 0) - fatal("switch_build_jobinfo: %m"); -} - - static void _print_job_information(resource_allocation_response_msg_t *resp) { @@ -592,301 +522,6 @@ _print_job_information(resource_allocation_response_msg_t *resp) verbose("%s",job_details); } - -/* submit a batch job and return error code */ -static int -_run_batch_job(const char *argv0) -{ - int file_type, retries = 0; - int rc = SLURM_SUCCESS; - job_desc_msg_t *req; - submit_response_msg_t *resp; - char *script; - static char *msg = "Slurm job queue full, sleeping and retrying."; - - if ((remote_argc == 0) || (remote_argv[0] == NULL)) - return SLURM_ERROR; - - file_type = _is_file_text (remote_argv[0], NULL); - - /* if (file_type == TYPE_NOT_TEXT) { - * error ("file %s is not script", remote_argv[0]); - * return SLURM_ERROR; - * } - */ - - if ((script = _build_script (argv0, remote_argv[0], file_type)) - == NULL) { - error ("unable to build script from file %s", remote_argv[0]); - return SLURM_ERROR; - } - - if (!(req = job_desc_msg_create_from_opts (script))) - fatal ("Unable to create job request"); - - /* Do not re-use existing job id from environment variable - * when submitting new job from within a running job */ - if (!opt.jobid_set) - req->job_id = NO_VAL; - - while ((rc = slurm_submit_batch_job(req, &resp)) < 0) { - if ((errno != ESLURM_ERROR_ON_DESC_TO_RECORD_COPY) || - (retries >= MAX_RETRIES)) - return (error("Unable to submit batch job: %m")); - - if (retries == 0) - error(msg); - else - debug(msg); - sleep (++retries); - } - - - if (rc == SLURM_SUCCESS) { - if (resp->step_id == NO_VAL) - info ("jobid %u submitted",resp->job_id); - else - info ("jobid %u.%u submitted",resp->job_id, - resp->step_id); - if (resp->error_code) { - if (opt.immediate) { - error("Job failed: %s", - slurm_strerror(resp->error_code)); - rc = resp->error_code; - } else { - info("Warning: %s", - slurm_strerror(resp->error_code)); - } - } - slurm_free_submit_response_response_msg (resp); - } - - job_desc_msg_destroy (req); - xfree (script); - - return (rc); -} - -static void _send_options(const int argc, char **argv) -{ - int i; - - set_options(argc, argv, 0); - for(i=1; i<argc; i++) { - debug3("argv[%d] = %s.",i,argv[i]); - xfree(argv[i]); - } -} - -/* _get_shell - return a string containing the default shell for this user - * NOTE: This function is NOT reentrant (see getpwuid_r if needed) */ -static char * -_get_shell (void) -{ - struct passwd *pw_ent_ptr; - - pw_ent_ptr = getpwuid (opt.uid); - if ( ! pw_ent_ptr ) { - pw_ent_ptr = getpwnam( "nobody" ); - info( "warning - no user information for user %d", opt.uid ); - } - return pw_ent_ptr->pw_shell; -} - -static char *_get_token(char *buf_ptr) -{ - int i, token_size = 0; - char *token; - - for (i=1; (buf_ptr[i] != '\n') && (buf_ptr[i] != '\0'); - i++) { - if (isspace(buf_ptr[i])) - break; - } - token_size = i; - - token = xmalloc(token_size + 1); - strncpy(token, buf_ptr, token_size); - return token; -} - -/* _get_opts - gather options put in user script. Used for batch scripts. */ -static void -_get_options (const char *buffer) -{ - int argc = 1; - char *argv[MAX_ENTRIES]; - char *buf_loc = (char *) buffer; - - while ((buf_loc = strstr(buf_loc, "#SLURM"))) { - buf_loc += 6; - /* find the tokens and move them to argv */ - for ( ; ((buf_loc[0] != '\n') && (buf_loc[0] != '\0')); - buf_loc++) { - if (isspace(buf_loc[0])) - continue; - argv[argc] = _get_token(buf_loc); - buf_loc += (strlen(argv[argc]) - 1); - argc++; - } - } - if(argc > 1) - _send_options(argc, argv); - return; -} - -#define F 0 /* char never appears in text */ -#define T 1 /* character appears in plain ASCII text */ -#define I 2 /* character appears in ISO-8859 text */ -#define X 3 /* character appears in non-ISO extended ASCII */ -static char text_chars[256] = { - /* BEL BS HT LF FF CR */ - F, F, F, F, F, F, F, T, T, T, T, F, T, T, F, F, /* 0x0X */ - /* ESC */ - F, F, F, F, F, F, F, F, F, F, F, T, F, F, F, F, /* 0x1X */ - T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, /* 0x2X */ - T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, /* 0x3X */ - T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, /* 0x4X */ - T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, /* 0x5X */ - T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, /* 0x6X */ - T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, F, /* 0x7X */ - /* NEL */ - X, X, X, X, X, T, X, X, X, X, X, X, X, X, X, X, /* 0x8X */ - X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, /* 0x9X */ - I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, /* 0xaX */ - I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, /* 0xbX */ - I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, /* 0xcX */ - I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, /* 0xdX */ - I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, /* 0xeX */ - I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I /* 0xfX */ -}; - - -/* _is_file_text - determine if specified file is a script - * shell_ptr - if not NULL, set to pointer to pathname of specified shell - * (if any, ie. return code of 2) - * return 0 if the specified file can not be read or does not contain text - * returns 2 if file contains text starting with "#!", otherwise - * returns 1 if file contains text, but lacks "#!" header - */ -static int -_is_file_text (char *fname, char **shell_ptr) -{ - int buf_size, fd, i; - int rc = 1; /* initially assume the file contains text */ - unsigned char buffer[8192]; - - if (fname[0] != '/') { - info("warning: %s not found in local path", fname); - return 0; - } - - fd = open(fname, O_RDONLY); - if (fd < 0) { - error ("Unable to open file %s: %m", fname); - return 0; - } - - buf_size = read (fd, buffer, sizeof (buffer)); - if (buf_size < 0) { - error ("Unable to read file %s: %m", fname); - rc = 0; - } - (void) close (fd); - - for (i=0; i<buf_size; i++) { - if (((int) text_chars[buffer[i]] != T) - && ((int) text_chars[buffer[i]] != I)) { - rc = 0; - break; - } - } - - if ((rc == 1) && (buf_size > 2)) { - if ((buffer[0] == '#') && (buffer[1] == '!')) - rc = 2; - } - - if ((rc == 2) && shell_ptr) { - shell_ptr[0] = xmalloc (sizeof (buffer)); - for (i=2; i<sizeof(buffer); i++) { - if (iscntrl (buffer[i])) { - shell_ptr[0][i-2] = '\0'; - break; - } else - shell_ptr[0][i-2] = buffer[i]; - } - if (i == sizeof(buffer)) { - error ("shell specified in script too long, not used"); - xfree (shell_ptr[0]); - shell_ptr[0] = NULL; - } - } - - return rc; -} - -/* allocate and build a string containing a script for a batch job */ -static char * -_build_script (const char *argv0, char *fname, int file_type) -{ - cbuf_t cb = cbuf_create(512, 1048576); - int fd = -1; - int i = 0; - char *buffer = NULL; - - if (file_type != 0) { - if ((fd = open(fname, O_RDONLY)) < 0) { - error ("Unable to open file %s: %m", fname); - return NULL; - } - } - - if (file_type != TYPE_SCRIPT) { - xstrfmtcat(buffer, "#!%s\n", _get_shell()); - if (file_type == 0) { - xstrfmtcat(buffer, "%s ", argv0); /* path to srun */ - for (i = 0; i < remote_argc; i++) - xstrfmtcat(buffer, "%s ", remote_argv[i]); - xstrcatchar(buffer, '\n'); - } - } - - if (file_type != 0) { - int len = buffer ? strlen(buffer) : 0; - int size; - - while ((size = cbuf_write_from_fd(cb, fd, -1, NULL)) > 0) - ; - - if (size < 0) { - error ("unable to read %s: %m", fname); - cbuf_destroy(cb); - return NULL; - } - - cbuf_write(cb, "\0", 1, NULL); - - xrealloc(buffer, cbuf_used(cb) + len +1); - - cbuf_read(cb, buffer+len, cbuf_used(cb)); - - if (close(fd) < 0) - error("close: %m"); - } - - cbuf_destroy(cb); - - _get_options(buffer); - - if (strlen(buffer) >= 0xffff) { - error("Job script exceeds size supported by slurm"); - xfree(buffer); - } - - return buffer; -} - /* Set SLURM_UMASK environment variable with current state */ static int _set_umask_env(void) { @@ -937,34 +572,6 @@ static void _set_prio_process_env(void) debug ("propagating SLURM_PRIO_PROCESS=%d", retval); } -/* - * Change SLURM_RLIMIT_RSS to the user specified value --job-mem - * or opt.job_max_memory - */ -static int _change_rlimit_rss(void) -{ - struct rlimit rlim[1]; - long new_cur; - int rc = SLURM_SUCCESS; - - if (getrlimit (RLIMIT_RSS, rlim) < 0) - return (error ("getrlimit (RLIMIT_RSS): %m")); - - new_cur = opt.job_max_memory*1024; - if((new_cur > rlim->rlim_max) || (new_cur < 0)) - rlim->rlim_cur = rlim->rlim_max; - else - rlim->rlim_cur = new_cur; - - if (setenvf (NULL, "SLURM_RLIMIT_RSS", "%lu", rlim->rlim_cur) < 0) - error ("unable to set %s in environment", "RSS"); - - if (setrlimit (RLIMIT_RSS, rlim) < 0) - return (error ("Unable to change memoryuse: %m")); - - return rc; -} - /* Set SLURM_RLIMIT_* environment variables with current resource * limit values, reset RLIMIT_NOFILE to maximum possible value */ static int _set_rlimit_env(void) @@ -1017,111 +624,6 @@ static int _set_rlimit_env(void) return rc; } -static int -_print_script_exit_status(const char *argv0, int status) -{ - char *corestr = ""; - int exitcode = 0; - - if (status == 0) { - verbose("%s: Done", argv0); - return exitcode; - } - -#ifdef WCOREDUMP - if (WCOREDUMP(status)) - corestr = " (core dumped)"; -#endif - - if (WIFSIGNALED(status)) { - error("%s: %s%s", argv0, sigstr(status), corestr); - return WTERMSIG(status) + 128; - } - if (WEXITSTATUS(status)) - error("%s: Exit %d", argv0, WEXITSTATUS(status)); - return WEXITSTATUS(status); -} - -/* allocation option specified, spawn a script and wait for it to exit */ -static int _run_job_script (srun_job_t *job, env_t *env) -{ - int status, exitcode; - pid_t cpid; - char **argv = (remote_argv[0] ? remote_argv : NULL); - - if (opt.nprocs_set) - env->nprocs = opt.nprocs; - if (opt.cpus_set) - env->cpus_per_task = opt.cpus_per_task; - if (opt.ntasks_per_node != NO_VAL) - env->ntasks_per_node = opt.ntasks_per_node; - if (opt.ntasks_per_socket != NO_VAL) - env->ntasks_per_socket = opt.ntasks_per_socket; - if (opt.ntasks_per_core != NO_VAL) - env->ntasks_per_core = opt.ntasks_per_core; - env->distribution = opt.distribution; - env->overcommit = opt.overcommit; - env->slurmd_debug = opt.slurmd_debug; - env->labelio = opt.labelio; - env->comm_port = slurmctld_comm_addr.port; - env->comm_hostname = slurmctld_comm_addr.hostname; - if(job) { - env->select_jobinfo = job->select_jobinfo; - env->jobid = job->jobid; - env->nhosts = job->nhosts; - env->nodelist = job->nodelist; - env->task_count = _uint16_array_to_str( - job->nhosts, job->step_layout->tasks); - } - - if (setup_env(env) != SLURM_SUCCESS) - return SLURM_ERROR; - - if (!argv) { - /* - * If no arguments were supplied, spawn a shell - * for the user. - */ - argv = xmalloc(2 * sizeof(char *)); - argv[0] = _get_shell(); - argv[1] = NULL; - } - - if ((cpid = fork()) < 0) { - error("fork: %m"); - exit(1); - } - - if (cpid == 0) { - /* - * Child. - */ -#ifdef HAVE_AIX - (void) mkcrid(0); -#endif - log_fini(); - sig_unblock_signals(); - execvp(argv[0], argv); - exit(1); - } - - /* - * Parent continues. - */ - - again: - if (waitpid(cpid, &status, 0) < (pid_t) 0) { - if (errno == EINTR) - goto again; - error("waitpid: %m"); - } - - exitcode = _print_script_exit_status(xbasename(argv[0]), status); - - (void) unsetenv("SLURM_JOBID"); /* no return code on some systems */ - return exitcode; -} - static int _become_user (void) { struct passwd *pwd = getpwuid (opt.uid); @@ -1186,8 +688,8 @@ static int _run_srun_script (srun_job_t *job, char *script) */ args = xmalloc(sizeof(char *) * 1024); args[0] = script; - for (i = 0; i < remote_argc; i++) { - args[i+1] = remote_argv[i]; + for (i = 0; i < opt.argc; i++) { + args[i+1] = opt.argv[i]; } args[i+1] = NULL; execv(script, args); @@ -1209,7 +711,7 @@ static int _run_srun_script (srun_job_t *job, char *script) } static int -_is_local_file (io_filename_t *fname) +_is_local_file (fname_t *fname) { if (fname->name == NULL) return 1; @@ -1220,10 +722,25 @@ _is_local_file (io_filename_t *fname) return ((fname->type != IO_PER_TASK) && (fname->type != IO_ONE)); } -void -srun_set_stdio_fds(srun_job_t *job, slurm_step_io_fds_t *cio_fds) +static void +_set_stdio_fds(srun_job_t *job, slurm_step_io_fds_t *cio_fds) { bool err_shares_out = false; + int file_flags; + + if (opt.open_mode == OPEN_MODE_APPEND) + file_flags = O_CREAT|O_WRONLY|O_APPEND; + else if (opt.open_mode == OPEN_MODE_TRUNCATE) + file_flags = O_CREAT|O_WRONLY|O_APPEND|O_TRUNC; + else { + slurm_ctl_conf_t *conf; + conf = slurm_conf_lock(); + if (conf->job_file_append) + file_flags = O_CREAT|O_WRONLY|O_APPEND; + else + file_flags = O_CREAT|O_WRONLY|O_APPEND|O_TRUNC; + slurm_conf_unlock(); + } /* * create stdin file descriptor @@ -1237,9 +754,14 @@ srun_set_stdio_fds(srun_job_t *job, slurm_step_io_fds_t *cio_fds) fatal("Could not open stdin file: %m"); } if (job->ifname->type == IO_ONE) { + job_step_create_response_msg_t *step_resp = NULL; + + slurm_step_ctx_get(job->step_ctx, SLURM_STEP_CTX_RESP, + &step_resp); + cio_fds->in.taskid = job->ifname->taskid; cio_fds->in.nodeid = slurm_step_layout_host_id( - job->step_layout, job->ifname->taskid); + step_resp->step_layout, job->ifname->taskid); } } @@ -1251,7 +773,7 @@ srun_set_stdio_fds(srun_job_t *job, slurm_step_io_fds_t *cio_fds) cio_fds->out.fd = STDOUT_FILENO; } else { cio_fds->out.fd = open(job->ofname->name, - O_CREAT|O_WRONLY|O_TRUNC, 0644); + file_flags, 0644); if (cio_fds->out.fd == -1) fatal("Could not open stdout file: %m"); } @@ -1269,12 +791,13 @@ srun_set_stdio_fds(srun_job_t *job, slurm_step_io_fds_t *cio_fds) if (err_shares_out) { debug3("stdout and stderr sharing a file"); cio_fds->err.fd = cio_fds->out.fd; + cio_fds->err.taskid = cio_fds->out.taskid; } else if (_is_local_file(job->efname)) { if ((job->efname->name == NULL) || (job->efname->taskid != -1)) { cio_fds->err.fd = STDERR_FILENO; } else { cio_fds->err.fd = open(job->efname->name, - O_CREAT|O_WRONLY|O_TRUNC, 0644); + file_flags, 0644); if (cio_fds->err.fd == -1) fatal("Could not open stderr file: %m"); } @@ -1290,3 +813,286 @@ static void _define_symbols(void) { slurm_signal_job_step(0,0,0); /* needed by mvapich and mpichgm */ } + +static void _pty_restore(void) +{ + /* STDIN is probably closed by now */ + if (tcsetattr(STDOUT_FILENO, TCSANOW, &termdefaults) < 0) + fprintf(stderr, "tcsetattr: %s\n", strerror(errno)); +} + +/* opt.exclusive is set, disable user task layout controls */ +static void _step_opt_exclusive(void) +{ + if (opt.nodes_set) { + verbose("ignoring node count set by --nodes or SLURM_NNODES"); + verbose(" it is incompatible with --exclusive"); + opt.nodes_set = false; + opt.min_nodes = 1; + opt.max_nodes = 0; + } + if (!opt.nprocs_set) + fatal("--nprocs must be set with --exclusive"); + if (opt.relative_set) + fatal("--relative disabled, incompatible with --exclusive"); + if (opt.exc_nodes) + fatal("--exclude is incompatible with --exclusive"); + if (opt.nodelist) + fatal("--nodelist is incompatible with --exclusive"); +} + +static void +_task_start(launch_tasks_response_msg_t *msg) +{ + MPIR_PROCDESC *table; + int taskid; + int i; + + verbose("Node %s (%d), %d tasks started", + msg->node_name, msg->srun_node_id, msg->count_of_pids); + + for (i = 0; i < msg->count_of_pids; i++) { + taskid = msg->task_ids[i]; + table = &MPIR_proctable[taskid]; + table->host_name = xstrdup(msg->node_name); + /* table->executable_name is set elsewhere */ + table->pid = msg->local_pids[i]; + + if (msg->return_code == 0) { + bit_set(task_state.start_success, taskid); + } else { + bit_set(task_state.start_failure, taskid); + } + } + +} + +static void +_terminate_job_step(slurm_step_ctx_t *step_ctx) +{ + uint32_t job_id, step_id; + + slurm_step_ctx_get(step_ctx, SLURM_STEP_CTX_JOBID, &job_id); + slurm_step_ctx_get(step_ctx, SLURM_STEP_CTX_STEPID, &step_id); + info("Terminating job step %u.%u", job_id, step_id); + slurm_kill_job_step(job_id, step_id, SIGKILL); +} + +static void +_handle_max_wait(int signo) +{ + info("First task exited %ds ago", opt.max_wait); + _task_state_struct_print(); + _terminate_job_step(job->step_ctx); +} + +static void +_task_finish(task_exit_msg_t *msg) +{ + static bool first_done = true; + static bool first_error = true; + int rc = 0; + int i; + + verbose("%d tasks finished (rc=%u)", + msg->num_tasks, msg->return_code); + if (WIFEXITED(msg->return_code)) { + rc = WEXITSTATUS(msg->return_code); + if (rc != 0) { + for (i = 0; i < msg->num_tasks; i++) { + error("task %u exited with exit code %d", + msg->task_id_list[i], rc); + bit_set(task_state.finish_abnormal, + msg->task_id_list[i]); + } + } else { + for (i = 0; i < msg->num_tasks; i++) { + bit_set(task_state.finish_normal, + msg->task_id_list[i]); + } + } + } else if (WIFSIGNALED(msg->return_code)) { + for (i = 0; i < msg->num_tasks; i++) { + verbose("task %u killed by signal %d", + msg->task_id_list[i], + WTERMSIG(msg->return_code)); + bit_set(task_state.finish_abnormal, + msg->task_id_list[i]); + } + rc = 1; + } + global_rc = MAX(global_rc, rc); + + if (first_error && rc > 0 && opt.kill_bad_exit) { + first_error = false; + _terminate_job_step(job->step_ctx); + } else if (first_done && opt.max_wait > 0) { + /* If these are the first tasks to finish we need to + * start a timer to kill off the job step if the other + * tasks don't finish within opt.max_wait seconds. + */ + first_done = false; + debug2("First task has exited"); + xsignal(SIGALRM, _handle_max_wait); + verbose("starting alarm of %d seconds", opt.max_wait); + alarm(opt.max_wait); + } +} + +static void +_task_state_struct_init(int num_tasks) +{ + task_state.start_success = bit_alloc(num_tasks); + task_state.start_failure = bit_alloc(num_tasks); + task_state.finish_normal = bit_alloc(num_tasks); + task_state.finish_abnormal = bit_alloc(num_tasks); +} + +/* + * Tasks will most likely have bits set in multiple of the task_state + * bit strings (e.g. a task can start normally and then later exit normally) + * so we ensure that a task is only "seen" once. + */ +static void +_task_state_struct_print(void) +{ + bitstr_t *tmp, *seen, *not_seen; + char buf[BUFSIZ]; + int len; + + len = bit_size(task_state.finish_abnormal); /* all the same length */ + tmp = bit_alloc(len); + seen = bit_alloc(len); + not_seen = bit_alloc(len); + bit_not(not_seen); + + if (bit_set_count(task_state.finish_abnormal) > 0) { + bit_copybits(tmp, task_state.finish_abnormal); + bit_and(tmp, not_seen); + bit_fmt(buf, BUFSIZ, tmp); + info("task%s: exited abnormally", buf); + bit_or(seen, tmp); + bit_copybits(not_seen, seen); + bit_not(not_seen); + } + + if (bit_set_count(task_state.finish_normal) > 0) { + bit_copybits(tmp, task_state.finish_normal); + bit_and(tmp, not_seen); + bit_fmt(buf, BUFSIZ, tmp); + info("task%s: exited", buf); + bit_or(seen, tmp); + bit_copybits(not_seen, seen); + bit_not(not_seen); + } + + if (bit_set_count(task_state.start_failure) > 0) { + bit_copybits(tmp, task_state.start_failure); + bit_and(tmp, not_seen); + bit_fmt(buf, BUFSIZ, tmp); + info("task%s: failed to start", buf); + bit_or(seen, tmp); + bit_copybits(not_seen, seen); + bit_not(not_seen); + } + + if (bit_set_count(task_state.start_success) > 0) { + bit_copybits(tmp, task_state.start_success); + bit_and(tmp, not_seen); + bit_fmt(buf, BUFSIZ, tmp); + info("task%s: running", buf); + bit_or(seen, tmp); + bit_copybits(not_seen, seen); + bit_not(not_seen); + } +} + +static void +_task_state_struct_free(void) +{ + bit_free(task_state.start_success); + bit_free(task_state.start_failure); + bit_free(task_state.finish_normal); + bit_free(task_state.finish_abnormal); +} + +static void _handle_intr() +{ + static time_t last_intr = 0; + static time_t last_intr_sent = 0; + if (opt.quit_on_intr) { + job_force_termination(job); + slurm_step_launch_abort(job->step_ctx); + return; + } + + if (((time(NULL) - last_intr) > 1) && !opt.disable_status) { + info("interrupt (one more within 1 sec to abort)"); + _task_state_struct_print(); + last_intr = time(NULL); + } else { /* second Ctrl-C in half as many seconds */ + update_job_state(job, SRUN_JOB_CANCELLED); + /* terminate job */ + if (job->state < SRUN_JOB_FORCETERM) { + if ((time(NULL) - last_intr_sent) < 1) { + job_force_termination(job); + slurm_step_launch_abort(job->step_ctx); + return; + } + + info("sending Ctrl-C to job"); + last_intr_sent = time(NULL); + slurm_step_launch_fwd_signal(job->step_ctx, SIGINT); + + } else { + job_force_termination(job); + slurm_step_launch_abort(job->step_ctx); + } + } +} + +static void _handle_signal(int signo) +{ + debug2("got signal %d", signo); + + switch (signo) { + case SIGINT: + _handle_intr(); + break; + case SIGQUIT: + info("Quit"); + /* continue with slurm_step_launch_abort */ + case SIGTERM: + case SIGHUP: + job_force_termination(job); + slurm_step_launch_abort(job->step_ctx); + break; + /* case SIGTSTP: */ +/* debug3("got SIGTSTP"); */ +/* break; */ + case SIGCONT: + debug3("got SIGCONT"); + break; + default: + slurm_step_launch_fwd_signal(job->step_ctx, signo); + break; + } +} + +static int _setup_signals() +{ + int sigarray[] = { + SIGINT, SIGQUIT, /*SIGTSTP,*/ SIGCONT, SIGTERM, + SIGALRM, SIGUSR1, SIGUSR2, SIGPIPE, 0 + }; + int rc = SLURM_SUCCESS, i=0, signo; + + xassert(job); + xassert(job->step_ctx); + + while ((signo = sigarray[i++])) + xsignal(signo, _handle_signal); + + return rc; +} + diff --git a/src/srun/srun.h b/src/srun/srun.h index 90f9aaf62..edf3da93b 100644 --- a/src/srun/srun.h +++ b/src/srun/srun.h @@ -4,7 +4,7 @@ * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -37,8 +37,6 @@ #include "src/api/step_io.h" #include "src/srun/srun_job.h" -extern pid_t srun_ppid; /* required for OpenMPI checkpoint */ - void srun_set_stdio_fds(srun_job_t *job, slurm_step_io_fds_t *cio_fds); #endif /* !_HAVE_SRUN_H */ diff --git a/src/srun/srun_job.c b/src/srun/srun_job.c index 4c2ad3158..10d9daf4a 100644 --- a/src/srun/srun_job.c +++ b/src/srun/srun_job.c @@ -5,7 +5,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <grondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -56,19 +56,16 @@ #include "src/common/log.h" #include "src/common/read_config.h" #include "src/common/slurm_protocol_api.h" -#include "src/common/slurm_cred.h" #include "src/common/xmalloc.h" #include "src/common/xstring.h" #include "src/common/io_hdr.h" #include "src/common/forward.h" +#include "src/common/fd.h" #include "src/srun/srun_job.h" #include "src/srun/opt.h" #include "src/srun/fname.h" -#include "src/srun/attach.h" -#include "src/srun/msg.h" - -typedef enum {DSH_NEW, DSH_ACTIVE, DSH_DONE, DSH_FAILED} state_t; +#include "src/srun/debugger.h" /* * allocation information structure used to store general information @@ -85,13 +82,6 @@ typedef struct allocation_info { select_jobinfo_t select_jobinfo; } allocation_info_t; -typedef struct thd { - pthread_t thread; /* thread ID */ - pthread_attr_t attr; /* thread attributes */ - state_t state; /* thread state */ -} thd_t; - -int message_thread = 0; /* * Prototypes: */ @@ -99,9 +89,6 @@ static inline int _estimate_nports(int nclients, int cli_per_port); static int _compute_task_count(allocation_info_t *info); static void _set_nprocs(allocation_info_t *info); static srun_job_t *_job_create_structure(allocation_info_t *info); -static void _job_fake_cred(srun_job_t *job); -static char * _task_state_name(srun_task_state_t state_inx); -static char * _host_state_name(srun_host_state_t state_inx); static char * _normalize_hostlist(const char *hostlist); @@ -139,13 +126,9 @@ job_create_noalloc(void) * Create job, then fill in host addresses */ job = _job_create_structure(ai); - job->step_layout = fake_slurm_step_layout_create(job->nodelist, - NULL, NULL, - job->nhosts, - job->ntasks); - - _job_fake_cred(job); + job_update_io_fnames(job); + error: xfree(ai); return (job); @@ -378,106 +361,17 @@ job_create_allocation(resource_allocation_response_msg_t *resp) return (job); } -/* - * Create an srun job structure from a resource allocation response msg - */ -static srun_job_t * -_job_create_structure(allocation_info_t *ainfo) -{ - srun_job_t *job = xmalloc(sizeof(srun_job_t)); - - _set_nprocs(ainfo); - debug2("creating job with %d tasks", opt.nprocs); - - slurm_mutex_init(&job->state_mutex); - pthread_cond_init(&job->state_cond, NULL); - job->state = SRUN_JOB_INIT; - - job->nodelist = xstrdup(ainfo->nodelist); - job->stepid = ainfo->stepid; - -#ifdef HAVE_FRONT_END /* Limited job step support */ - opt.overcommit = true; - job->nhosts = 1; -#else - job->nhosts = ainfo->nnodes; -#endif - -#ifndef HAVE_FRONT_END - if(opt.min_nodes > job->nhosts) { - error("Only allocated %d nodes asked for %d", - job->nhosts, opt.min_nodes); - if (opt.exc_nodes) { - /* When resources are pre-allocated and some nodes - * are explicitly excluded, this error can occur. */ - error("Are required nodes explicitly excluded?"); - } - return NULL; - } -#endif - job->select_jobinfo = ainfo->select_jobinfo; - job->jobid = ainfo->jobid; - - job->ntasks = opt.nprocs; - job->task_prolog = xstrdup(opt.task_prolog); - job->task_epilog = xstrdup(opt.task_epilog); - /* Compute number of file descriptors / Ports needed for Job - * control info server - */ - job->njfds = _estimate_nports(opt.nprocs, 48); - debug3("njfds = %d", job->njfds); - job->jfd = (slurm_fd *) - xmalloc(job->njfds * sizeof(slurm_fd)); - job->jaddr = (slurm_addr *) - xmalloc(job->njfds * sizeof(slurm_addr)); - - slurm_mutex_init(&job->task_mutex); - - job->old_job = false; - job->removed = false; - job->signaled = false; - job->rc = -1; - - /* - * Initialize Launch and Exit timeout values - */ - job->ltimeout = 0; - job->etimeout = 0; - - job->host_state = xmalloc(job->nhosts * sizeof(srun_host_state_t)); - - /* ntask task states and statii*/ - job->task_state = xmalloc(opt.nprocs * sizeof(srun_task_state_t)); - job->tstatus = xmalloc(opt.nprocs * sizeof(int)); - - job_update_io_fnames(job); - - return (job); -} - void update_job_state(srun_job_t *job, srun_job_state_t state) { - pipe_enum_t pipe_enum = PIPE_JOB_STATE; pthread_mutex_lock(&job->state_mutex); if (job->state < state) { job->state = state; - if(message_thread) { - safe_write(job->forked_msg->par_msg->msg_pipe[1], - &pipe_enum, sizeof(int)); - safe_write(job->forked_msg->par_msg->msg_pipe[1], - &job->state, sizeof(int)); - } pthread_cond_signal(&job->state_cond); } pthread_mutex_unlock(&job->state_mutex); return; -rwfail: - pthread_mutex_unlock(&job->state_mutex); - error("update_job_state: " - "write from srun message-handler process failed"); - } srun_job_state_t @@ -494,245 +388,8 @@ job_state(srun_job_t *job) void job_force_termination(srun_job_t *job) { - if (mode == MODE_ATTACH) { - info ("forcing detach"); - update_job_state(job, SRUN_JOB_DETACHED); - } else { - info ("forcing job termination"); - update_job_state(job, SRUN_JOB_FORCETERM); - } - - client_io_handler_finish(job->client_io); -} - - -int -set_job_rc(srun_job_t *job) -{ - int i, rc = 0, task_failed = 0; - - /* - * return code set to at least one if any tasks failed launch - */ - for (i = 0; i < opt.nprocs; i++) { - if (job->task_state[i] == SRUN_TASK_FAILED) - task_failed = 1; - if (job->rc < job->tstatus[i]) - job->rc = job->tstatus[i]; - } - if (task_failed && (job->rc <= 0)) { - job->rc = 1; - return 1; - } - - if ((rc = WEXITSTATUS(job->rc))) - return rc; - if (WIFSIGNALED(job->rc)) - return (128 + WTERMSIG(job->rc)); - return job->rc; -} - - -void job_fatal(srun_job_t *job, const char *msg) -{ - if (msg) error(msg); - - srun_job_destroy(job, errno); - - exit(1); -} - - -void -srun_job_destroy(srun_job_t *job, int error) -{ - if (job->removed) - return; - - if (job->old_job) { - debug("cancelling job step %u.%u", job->jobid, job->stepid); - slurm_kill_job_step(job->jobid, job->stepid, SIGKILL); - } else if (!opt.no_alloc) { - debug("cancelling job %u", job->jobid); - slurm_complete_job(job->jobid, error); - } else { - debug("no allocation to cancel, killing remote tasks"); - fwd_signal(job, SIGKILL, opt.max_threads); - return; - } - - if (error) debugger_launch_failure(job); - - job->removed = true; -} - - -void -srun_job_kill(srun_job_t *job) -{ - if (!opt.no_alloc) { - if (slurm_kill_job_step(job->jobid, job->stepid, SIGKILL) < 0) - error ("slurm_kill_job_step: %m"); - } - update_job_state(job, SRUN_JOB_FAILED); -} - -void -report_job_status(srun_job_t *job) -{ - int i; - hostlist_t hl = hostlist_create(job->nodelist); - char *name = NULL; - - for (i = 0; i < job->nhosts; i++) { - name = hostlist_shift(hl); - info ("host:%s state:%s", name, - _host_state_name(job->host_state[i])); - free(name); - } -} - - -#define NTASK_STATES 6 -void -report_task_status(srun_job_t *job) -{ - int i; - char buf[MAXHOSTRANGELEN+2]; - hostlist_t hl[NTASK_STATES]; - - for (i = 0; i < NTASK_STATES; i++) - hl[i] = hostlist_create(NULL); - - for (i = 0; i < opt.nprocs; i++) { - int state = job->task_state[i]; - debug3(" state of task %d is %d", i, state); - snprintf(buf, 256, "%d", i); - hostlist_push(hl[state], buf); - } - - for (i = 0; i< NTASK_STATES; i++) { - if (hostlist_count(hl[i]) > 0) { - hostlist_ranged_string(hl[i], MAXHOSTRANGELEN, buf); - info("task%s: %s", buf, _task_state_name(i)); - } - hostlist_destroy(hl[i]); - } - -} - -void -fwd_signal(srun_job_t *job, int signo, int max_threads) -{ - int i; - slurm_msg_t req; - kill_tasks_msg_t msg; - static pthread_mutex_t sig_mutex = PTHREAD_MUTEX_INITIALIZER; - pipe_enum_t pipe_enum = PIPE_SIGNALED; - hostlist_t hl; - char *name = NULL; - char buf[8192]; - List ret_list = NULL; - ListIterator itr; - ret_data_info_t *ret_data_info = NULL; - int rc = SLURM_SUCCESS; - - slurm_mutex_lock(&sig_mutex); - - if (signo == SIGKILL || signo == SIGINT || signo == SIGTERM) { - slurm_mutex_lock(&job->state_mutex); - job->signaled = true; - slurm_mutex_unlock(&job->state_mutex); - if(message_thread) { - write(job->forked_msg->par_msg->msg_pipe[1], - &pipe_enum,sizeof(int)); - write(job->forked_msg->par_msg->msg_pipe[1], - &job->signaled,sizeof(int)); - } - } - - debug2("forward signal %d to job", signo); - - /* common to all tasks */ - msg.job_id = job->jobid; - msg.job_step_id = job->stepid; - msg.signal = (uint32_t) signo; - - hl = hostlist_create(""); - for (i = 0; i < job->nhosts; i++) { - if (job->host_state[i] != SRUN_HOST_REPLIED) { - name = nodelist_nth_host( - job->step_layout->node_list, i); - debug2("%s has not yet replied\n", name); - free(name); - continue; - } - if (job_active_tasks_on_host(job, i) == 0) - continue; - name = nodelist_nth_host(job->step_layout->node_list, i); - hostlist_push(hl, name); - free(name); - } - if(!hostlist_count(hl)) { - hostlist_destroy(hl); - goto nothing_left; - } - hostlist_ranged_string(hl, sizeof(buf), buf); - hostlist_destroy(hl); - name = xstrdup(buf); - - slurm_msg_t_init(&req); - req.msg_type = REQUEST_SIGNAL_TASKS; - req.data = &msg; - - debug3("sending signal to host %s", name); - - if (!(ret_list = slurm_send_recv_msgs(name, &req, 0))) { - error("fwd_signal: slurm_send_recv_msgs really failed bad"); - xfree(name); - slurm_mutex_unlock(&sig_mutex); - return; - } - xfree(name); - itr = list_iterator_create(ret_list); - while((ret_data_info = list_next(itr))) { - rc = slurm_get_return_code(ret_data_info->type, - ret_data_info->data); - /* - * Report error unless it is "Invalid job id" which - * probably just means the tasks exited in the meanwhile. - */ - if ((rc != 0) && (rc != ESLURM_INVALID_JOB_ID) - && (rc != ESLURMD_JOB_NOTRUNNING) && (rc != ESRCH)) { - error("%s: signal: %s", - ret_data_info->node_name, - slurm_strerror(rc)); - } - } - list_iterator_destroy(itr); - list_destroy(ret_list); -nothing_left: - debug2("All tasks have been signalled"); - - slurm_mutex_unlock(&sig_mutex); -} - -int -job_active_tasks_on_host(srun_job_t *job, int hostid) -{ - int i; - int retval = 0; - - slurm_mutex_lock(&job->task_mutex); - for (i = 0; i < job->step_layout->tasks[hostid]; i++) { - uint32_t *tids = job->step_layout->tids[hostid]; - xassert(tids != NULL); - debug("Task %d state: %d", tids[i], job->task_state[tids[i]]); - if (job->task_state[tids[i]] == SRUN_TASK_RUNNING) - retval++; - } - slurm_mutex_unlock(&job->task_mutex); - return retval; + info ("forcing job termination"); + update_job_state(job, SRUN_JOB_FORCETERM); } static inline int @@ -767,63 +424,61 @@ _set_nprocs(allocation_info_t *info) } } -void -job_update_io_fnames(srun_job_t *job) +/* + * Create an srun job structure from a resource allocation response msg + */ +static srun_job_t * +_job_create_structure(allocation_info_t *ainfo) { - job->ifname = fname_create(job, opt.ifname); - job->ofname = fname_create(job, opt.ofname); - job->efname = opt.efname ? fname_create(job, opt.efname) : job->ofname; -} + srun_job_t *job = xmalloc(sizeof(srun_job_t)); + + _set_nprocs(ainfo); + debug2("creating job with %d tasks", opt.nprocs); -static void -_job_fake_cred(srun_job_t *job) -{ - slurm_cred_arg_t arg; - arg.jobid = job->jobid; - arg.stepid = job->stepid; - arg.uid = opt.uid; - arg.hostlist = job->nodelist; - arg.alloc_lps_cnt = 0; - arg.alloc_lps = NULL; - job->cred = slurm_cred_faker(&arg); -} + slurm_mutex_init(&job->state_mutex); + pthread_cond_init(&job->state_cond, NULL); + job->state = SRUN_JOB_INIT; -static char * -_task_state_name(srun_task_state_t state_inx) -{ - switch (state_inx) { - case SRUN_TASK_INIT: - return "initializing"; - case SRUN_TASK_RUNNING: - return "running"; - case SRUN_TASK_FAILED: - return "failed"; - case SRUN_TASK_EXITED: - return "exited"; - case SRUN_TASK_IO_WAIT: - return "waiting for io"; - case SRUN_TASK_ABNORMAL_EXIT: - return "exited abnormally"; - default: - return "unknown"; - } + job->nodelist = xstrdup(ainfo->nodelist); + job->stepid = ainfo->stepid; + +#ifdef HAVE_FRONT_END /* Limited job step support */ + opt.overcommit = true; + job->nhosts = 1; +#else + job->nhosts = ainfo->nnodes; +#endif + +#ifndef HAVE_FRONT_END + if(opt.min_nodes > job->nhosts) { + error("Only allocated %d nodes asked for %d", + job->nhosts, opt.min_nodes); + if (opt.exc_nodes) { + /* When resources are pre-allocated and some nodes + * are explicitly excluded, this error can occur. */ + error("Are required nodes explicitly excluded?"); + } + return NULL; + } +#endif + job->select_jobinfo = ainfo->select_jobinfo; + job->jobid = ainfo->jobid; + + job->ntasks = opt.nprocs; + + job->rc = -1; + + job_update_io_fnames(job); + + return (job); } -static char * -_host_state_name(srun_host_state_t state_inx) +void +job_update_io_fnames(srun_job_t *job) { - switch (state_inx) { - case SRUN_HOST_INIT: - return "initial"; - case SRUN_HOST_CONTACTED: - return "contacted"; - case SRUN_HOST_UNREACHABLE: - return "unreachable"; - case SRUN_HOST_REPLIED: - return "replied"; - default: - return "unknown"; - } + job->ifname = fname_create(job, opt.ifname); + job->ofname = fname_create(job, opt.ofname); + job->efname = opt.efname ? fname_create(job, opt.efname) : job->ofname; } static char * diff --git a/src/srun/srun_job.h b/src/srun/srun_job.h index 3f099448c..200bf9838 100644 --- a/src/srun/srun_job.h +++ b/src/srun/srun_job.h @@ -5,7 +5,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -53,7 +53,6 @@ #include "src/common/slurm_protocol_defs.h" #include "src/api/step_io.h" -//#include "src/srun/fname.h" typedef enum { SRUN_JOB_INIT = 0, /* Job's initial state */ @@ -70,56 +69,25 @@ typedef enum { SRUN_JOB_FORCETERM /* Forced termination of IO thread */ } srun_job_state_t; -typedef enum { - SRUN_HOST_INIT = 0, - SRUN_HOST_CONTACTED, - SRUN_HOST_UNREACHABLE, - SRUN_HOST_REPLIED -} srun_host_state_t; +enum io_t { + IO_ALL = 0, /* multiplex output from all/bcast stdin to all */ + IO_ONE = 1, /* output from only one task/stdin to one task */ + IO_PER_TASK = 2, /* separate output/input file per task */ + IO_NONE = 3, /* close output/close stdin */ +}; -typedef enum { - SRUN_TASK_INIT = 0, - SRUN_TASK_RUNNING, - SRUN_TASK_FAILED, - SRUN_TASK_IO_WAIT,/* this state deprecated with new eio stdio engine */ - SRUN_TASK_EXITED, - SRUN_TASK_ABNORMAL_EXIT -} srun_task_state_t; - -typedef enum { - PIPE_NONE = 0, - PIPE_JOB_STATE, - PIPE_TASK_STATE, - PIPE_TASK_EXITCODE, - PIPE_HOST_STATE, - PIPE_SIGNALED, - PIPE_MPIR_DEBUG_STATE, - PIPE_UPDATE_MPIR_PROCTABLE, - PIPE_UPDATE_STEP_LAYOUT, - PIPE_NODE_FAIL -} pipe_enum_t; - -/* For Message thread */ -typedef struct forked_msg_pipe { - int msg_pipe[2]; - int pid; -} forked_msg_pipe_t; - -typedef struct forked_message { - forked_msg_pipe_t * par_msg; - forked_msg_pipe_t * msg_par; - enum job_states * job_state; -} forked_msg_t; - -typedef struct io_filename io_filename_t; +#define format_io_t(t) (t == IO_ONE) ? "one" : (t == IO_ALL) ? \ + "all" : "per task" + +typedef struct fname { + char *name; + enum io_t type; + int taskid; /* taskid for IO if IO_ONE */ +} fname_t; typedef struct srun_job { - slurm_step_layout_t *step_layout; /* holds info about how the task is - laid out */ uint32_t jobid; /* assigned job id */ uint32_t stepid; /* assigned step id */ - bool old_job; /* run job step under previous allocation */ - bool removed; /* job has been removed from SLURM */ uint32_t nhosts; /* node count */ uint32_t ntasks; /* task count */ @@ -127,46 +95,26 @@ typedef struct srun_job { pthread_mutex_t state_mutex; pthread_cond_t state_cond; - bool signaled; /* True if user generated signal to job */ int rc; /* srun return code */ - slurm_cred_t cred; /* Slurm job credential */ char *nodelist; /* nodelist in string form */ - pthread_t sigid; /* signals thread tid */ - - pthread_t jtid; /* job control thread id */ - slurm_fd *jfd; /* job control info fd */ - - pthread_t lid; /* launch thread id */ - - client_io_t *client_io; - time_t ltimeout; /* Time by which all tasks must be running */ - time_t etimeout; /* exit timeout (see opt.max_wait */ - - srun_host_state_t *host_state; /* nhost host states */ - - int *tstatus; /* ntask exit statii */ - srun_task_state_t *task_state; /* ntask task states */ - - switch_jobinfo_t switch_job; - io_filename_t *ifname; - io_filename_t *ofname; - io_filename_t *efname; - forked_msg_t *forked_msg; - char *task_epilog; /* task-epilog */ - char *task_prolog; /* task-prolog */ - pthread_mutex_t task_mutex; - int njfds; /* number of job control info fds */ - slurm_addr *jaddr; /* job control info ports */ - int thr_count; /* count of threads in job launch */ + fname_t *ifname; + fname_t *ofname; + fname_t *efname; /* Output streams and stdin fileno */ select_jobinfo_t select_jobinfo; - -} srun_job_t; -extern int message_thread; + /* Pseudo terminial support */ + pthread_t pty_id; /* pthread to communicate window size changes */ + int pty_fd; /* file to communicate window size changes */ + uint16_t pty_port; /* used to communicate window size changes */ + uint8_t ws_col; /* window size, columns */ + uint8_t ws_row; /* window size, row count */ + slurm_step_ctx_t *step_ctx; + slurm_step_ctx_params_t ctx_params; +} srun_job_t; void update_job_state(srun_job_t *job, srun_job_state_t newstate); void job_force_termination(srun_job_t *job); @@ -186,38 +134,7 @@ extern srun_job_t * job_create_structure( */ void job_update_io_fnames(srun_job_t *j); -/* - * Issue a fatal error message and terminate running job - */ -void job_fatal(srun_job_t *job, const char *msg); - -/* - * Deallocates job and or job step via slurm API - */ -void srun_job_destroy(srun_job_t *job, int error); - -/* - * Send SIGKILL to running job via slurm controller - */ -void srun_job_kill(srun_job_t *job); - -/* - * report current task status - */ -void report_task_status(srun_job_t *job); - -/* - * report current node status - */ -void report_job_status(srun_job_t *job); - -/* - * Sets job->rc to highest task exit value. - * Returns job return code (for srun exit status) - */ -int set_job_rc(srun_job_t *job); - -void fwd_signal(srun_job_t *job, int signal, int max_threads); -int job_active_tasks_on_host(srun_job_t *job, int hostid); +/* Set up port to handle messages from slurmctld */ +slurm_fd slurmctld_msg_init(void); #endif /* !_HAVE_JOB_H */ diff --git a/src/srun/srun_pty.c b/src/srun/srun_pty.c new file mode 100644 index 000000000..8de090594 --- /dev/null +++ b/src/srun/srun_pty.c @@ -0,0 +1,171 @@ +/*****************************************************************************\ + * src/srun/srun_pty.c - pty handling for srun + ***************************************************************************** + * Copyright (C) 2002-2006 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Morris Jette <jette1@llnl.gov> + * LLNL-CODE-402394. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#ifdef HAVE_CONFIG_H +# include "config.h" +#endif + +#if HAVE_PTHREAD +#include <pthread.h> +#endif + +#include <signal.h> +#include <string.h> +#include <sys/ioctl.h> +#include <sys/poll.h> + +#include <slurm/slurm_errno.h> + +#include "src/common/log.h" +#include "src/common/macros.h" +#include "src/common/slurm_protocol_api.h" +#include "src/common/slurm_protocol_defs.h" +#include "src/common/xmalloc.h" +#include "src/common/xstring.h" +#include "src/common/xsignal.h" + +#include "src/srun/opt.h" +#include "src/srun/srun_job.h" + +#define MAX_RETRIES 3 + +/* Processed by pty_thr() */ +static int pty_sigarray[] = { SIGWINCH, 0 }; +static int winch; + +/* + * Static prototypes + */ +static void _handle_sigwinch(int sig); +static void * _pty_thread(void *arg); + +void set_winsize(srun_job_t *job) +{ + struct winsize ws; + + if (ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws)) + error("ioctl(TIOCGWINSZ): %m"); + else { + job->ws_row = ws.ws_row; + job->ws_col = ws.ws_col; + debug2("winsize %u:%u", job->ws_row, job->ws_col); + } + return; +} + +/* SIGWINCH should already be blocked by srun/signal.c */ +void block_sigwinch(void) +{ + xsignal_block(pty_sigarray); +} + +void pty_thread_create(srun_job_t *job) +{ + slurm_addr pty_addr; + pthread_attr_t attr; + + if ((job->pty_fd = slurm_init_msg_engine_port(0)) < 0) { + error("init_msg_engine_port: %m"); + return; + } + if (slurm_get_stream_addr(job->pty_fd, &pty_addr) < 0) { + error("slurm_get_stream_addr: %m"); + return; + } + job->pty_port = ntohs(((struct sockaddr_in) pty_addr).sin_port); + debug2("initialized job control port %hu\n", job->pty_port); + + slurm_attr_init(&attr); + pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); + if ((pthread_create(&job->pty_id, &attr, &_pty_thread, (void *) job))) + error("pthread_create(pty_thread): %m"); + slurm_attr_destroy(&attr); +} + +static void _handle_sigwinch(int sig) +{ + winch = 1; + xsignal(SIGWINCH, _handle_sigwinch); +} + +static void _notify_winsize_change(int fd, srun_job_t *job) +{ + pty_winsz_t winsz; + int len; + char buf[4]; + + if (fd < 0) { + error("pty: no file to write window size changes to"); + return; + } + + winsz.cols = htons(job->ws_col); + winsz.rows = htons(job->ws_row); + memcpy(buf, &winsz.cols, 2); + memcpy(buf+2, &winsz.rows, 2); + len = slurm_write_stream(fd, buf, 4); + if (len < sizeof(winsz)) + error("pty: window size change notification error: %m"); +} + +static void *_pty_thread(void *arg) +{ + int fd = -1; + srun_job_t *job = (srun_job_t *) arg; + slurm_addr client_addr; + + xsignal_unblock(pty_sigarray); + xsignal(SIGWINCH, _handle_sigwinch); + + if ((fd = slurm_accept_msg_conn(job->pty_fd, &client_addr)) < 0) { + error("pty: accept failure: %m"); + return NULL; + } + + while (job->state <= SRUN_JOB_RUNNING) { + debug2("waiting for SIGWINCH"); + poll(NULL, 0, -1); + if (winch) { + set_winsize(job); + _notify_winsize_change(fd, job); + } + winch = 0; + } + return NULL; +} + + diff --git a/src/srun/signals.h b/src/srun/srun_pty.h similarity index 89% rename from src/srun/signals.h rename to src/srun/srun_pty.h index 0fb53db82..56e8ded1a 100644 --- a/src/srun/signals.h +++ b/src/srun/srun_pty.h @@ -1,10 +1,10 @@ /*****************************************************************************\ - * src/srun/signals.h - srun signal handling + * src/srun/srun_pty.h - srun signal handling ***************************************************************************** * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Mark Grodnona <mgrondona@llnl.gov>. - * UCRL-CODE-226842. + * Written by Moe Jette <jette@llnl.gov>. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -38,10 +38,12 @@ #ifndef _SIGNALS_H #define _SIGNALS_H +#include "src/srun/srun_job.h" + typedef struct srun_job signal_job_t; -int sig_setup_sigmask(void); -int sig_unblock_signals(void); -int sig_thr_create(signal_job_t *job); +void block_sigwinch(void); +void pty_thread_create(srun_job_t *job); +void set_winsize(srun_job_t *job); #endif /* !_SIGNALS_H */ diff --git a/src/sstat/Makefile.am b/src/sstat/Makefile.am new file mode 100644 index 000000000..cdb0a0c6e --- /dev/null +++ b/src/sstat/Makefile.am @@ -0,0 +1,20 @@ +# Makefile for sstat + +AUTOMAKE_OPTIONS = foreign + +INCLUDES = -I$(top_srcdir) + +bin_PROGRAMS = sstat + +sstat_LDADD = $(top_builddir)/src/common/libcommon.o -ldl \ + $(top_builddir)/src/api/libslurmhelper.la + +noinst_HEADERS = sstat.c +sstat_SOURCES = sstat.c process.c print.c options.c + +force: +$(sstat_LDADD) : force + @cd `dirname $@` && $(MAKE) `basename $@` + +sstat_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) + diff --git a/src/sstat/Makefile.in b/src/sstat/Makefile.in new file mode 100644 index 000000000..d752d24b7 --- /dev/null +++ b/src/sstat/Makefile.in @@ -0,0 +1,563 @@ +# Makefile.in generated by automake 1.10.1 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# Makefile for sstat + + +VPATH = @srcdir@ +pkgdatadir = $(datadir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +target_triplet = @target@ +bin_PROGRAMS = sstat$(EXEEXT) +subdir = src/sstat +DIST_COMMON = $(noinst_HEADERS) $(srcdir)/Makefile.am \ + $(srcdir)/Makefile.in +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ + $(top_srcdir)/auxdir/slurm.m4 \ + $(top_srcdir)/auxdir/x_ac__system_configuration.m4 \ + $(top_srcdir)/auxdir/x_ac_affinity.m4 \ + $(top_srcdir)/auxdir/x_ac_aix.m4 \ + $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ + $(top_srcdir)/auxdir/x_ac_debug.m4 \ + $(top_srcdir)/auxdir/x_ac_elan.m4 \ + $(top_srcdir)/auxdir/x_ac_federation.m4 \ + $(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \ + $(top_srcdir)/auxdir/x_ac_gtk.m4 \ + $(top_srcdir)/auxdir/x_ac_munge.m4 \ + $(top_srcdir)/auxdir/x_ac_ncurses.m4 \ + $(top_srcdir)/auxdir/x_ac_pam.m4 \ + $(top_srcdir)/auxdir/x_ac_ptrace.m4 \ + $(top_srcdir)/auxdir/x_ac_readline.m4 \ + $(top_srcdir)/auxdir/x_ac_setpgrp.m4 \ + $(top_srcdir)/auxdir/x_ac_setproctitle.m4 \ + $(top_srcdir)/auxdir/x_ac_sgi_job.m4 \ + $(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \ + $(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h +CONFIG_CLEAN_FILES = +am__installdirs = "$(DESTDIR)$(bindir)" +binPROGRAMS_INSTALL = $(INSTALL_PROGRAM) +PROGRAMS = $(bin_PROGRAMS) +am_sstat_OBJECTS = sstat.$(OBJEXT) process.$(OBJEXT) print.$(OBJEXT) \ + options.$(OBJEXT) +sstat_OBJECTS = $(am_sstat_OBJECTS) +sstat_DEPENDENCIES = $(top_builddir)/src/common/libcommon.o \ + $(top_builddir)/src/api/libslurmhelper.la +sstat_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(sstat_LDFLAGS) \ + $(LDFLAGS) -o $@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm +depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp +am__depfiles_maybe = depfiles +COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ + $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ + $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +CCLD = $(CC) +LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ + $(LDFLAGS) -o $@ +SOURCES = $(sstat_SOURCES) +DIST_SOURCES = $(sstat_SOURCES) +HEADERS = $(noinst_HEADERS) +ETAGS = etags +CTAGS = ctags +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AR = @AR@ +AUTHD_CFLAGS = @AUTHD_CFLAGS@ +AUTHD_LIBS = @AUTHD_LIBS@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +BG_INCLUDES = @BG_INCLUDES@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CMD_LDFLAGS = @CMD_LDFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ +ECHO = @ECHO@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +ELAN_LIBS = @ELAN_LIBS@ +EXEEXT = @EXEEXT@ +F77 = @F77@ +FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@ +FFLAGS = @FFLAGS@ +GREP = @GREP@ +GTK2_CFLAGS = @GTK2_CFLAGS@ +GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ +HAVEPKGCONFIG = @HAVEPKGCONFIG@ +HAVE_AIX = @HAVE_AIX@ +HAVE_ELAN = @HAVE_ELAN@ +HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ +HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIB_LDFLAGS = @LIB_LDFLAGS@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MKDIR_P = @MKDIR_P@ +MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ +MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ +MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ +NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ +NUMA_LIBS = @NUMA_LIBS@ +OBJEXT = @OBJEXT@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PAM_LIBS = @PAM_LIBS@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ +PLPA_LIBS = @PLPA_LIBS@ +PROCTRACKDIR = @PROCTRACKDIR@ +PROJECT = @PROJECT@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +RANLIB = @RANLIB@ +READLINE_LIBS = @READLINE_LIBS@ +RELEASE = @RELEASE@ +SED = @SED@ +SEMAPHORE_LIBS = @SEMAPHORE_LIBS@ +SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ +SLURMD_PORT = @SLURMD_PORT@ +SLURM_API_AGE = @SLURM_API_AGE@ +SLURM_API_CURRENT = @SLURM_API_CURRENT@ +SLURM_API_MAJOR = @SLURM_API_MAJOR@ +SLURM_API_REVISION = @SLURM_API_REVISION@ +SLURM_API_VERSION = @SLURM_API_VERSION@ +SLURM_MAJOR = @SLURM_MAJOR@ +SLURM_MICRO = @SLURM_MICRO@ +SLURM_MINOR = @SLURM_MINOR@ +SLURM_VERSION = @SLURM_VERSION@ +SO_LDFLAGS = @SO_LDFLAGS@ +SSL_CPPFLAGS = @SSL_CPPFLAGS@ +SSL_LDFLAGS = @SSL_LDFLAGS@ +SSL_LIBS = @SSL_LIBS@ +STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ +VERSION = @VERSION@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_F77 = @ac_ct_F77@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target = @target@ +target_alias = @target_alias@ +target_cpu = @target_cpu@ +target_os = @target_os@ +target_vendor = @target_vendor@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +AUTOMAKE_OPTIONS = foreign +INCLUDES = -I$(top_srcdir) +sstat_LDADD = $(top_builddir)/src/common/libcommon.o -ldl \ + $(top_builddir)/src/api/libslurmhelper.la + +noinst_HEADERS = sstat.c +sstat_SOURCES = sstat.c process.c print.c options.c +sstat_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) +all: all-am + +.SUFFIXES: +.SUFFIXES: .c .lo .o .obj +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ + && exit 0; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/sstat/Makefile'; \ + cd $(top_srcdir) && \ + $(AUTOMAKE) --foreign src/sstat/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +install-binPROGRAMS: $(bin_PROGRAMS) + @$(NORMAL_INSTALL) + test -z "$(bindir)" || $(MKDIR_P) "$(DESTDIR)$(bindir)" + @list='$(bin_PROGRAMS)'; for p in $$list; do \ + p1=`echo $$p|sed 's/$(EXEEXT)$$//'`; \ + if test -f $$p \ + || test -f $$p1 \ + ; then \ + f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \ + echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ + $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ + else :; fi; \ + done + +uninstall-binPROGRAMS: + @$(NORMAL_UNINSTALL) + @list='$(bin_PROGRAMS)'; for p in $$list; do \ + f=`echo "$$p" | sed 's,^.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/'`; \ + echo " rm -f '$(DESTDIR)$(bindir)/$$f'"; \ + rm -f "$(DESTDIR)$(bindir)/$$f"; \ + done + +clean-binPROGRAMS: + @list='$(bin_PROGRAMS)'; for p in $$list; do \ + f=`echo $$p|sed 's/$(EXEEXT)$$//'`; \ + echo " rm -f $$p $$f"; \ + rm -f $$p $$f ; \ + done +sstat$(EXEEXT): $(sstat_OBJECTS) $(sstat_DEPENDENCIES) + @rm -f sstat$(EXEEXT) + $(sstat_LINK) $(sstat_OBJECTS) $(sstat_LDADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + +distclean-compile: + -rm -f *.tab.c + +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/options.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/print.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/process.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sstat.Po@am__quote@ + +.c.o: +@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c $< + +.c.obj: +@am__fastdepCC_TRUE@ $(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'` +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(COMPILE) -c `$(CYGPATH_W) '$<'` + +.c.lo: +@am__fastdepCC_TRUE@ $(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $< +@am__fastdepCC_TRUE@ mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo +@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCC_FALSE@ $(LTCOMPILE) -c -o $@ $< + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + mkid -fID $$unique +tags: TAGS + +TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + here=`pwd`; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$tags $$unique; \ + fi +ctags: CTAGS +CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + tags=; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + test -z "$(CTAGS_ARGS)$$tags$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$tags $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && cd $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) $$here + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ + fi; \ + cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ + else \ + test -f $(distdir)/$$file \ + || cp -p $$d/$$file $(distdir)/$$file \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(PROGRAMS) $(HEADERS) +installdirs: + for dir in "$(DESTDIR)$(bindir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + `test -z '$(STRIP)' || \ + echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-binPROGRAMS clean-generic clean-libtool mostlyclean-am + +distclean: distclean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-exec-am: install-binPROGRAMS + +install-html: install-html-am + +install-info: install-info-am + +install-man: + +install-pdf: install-pdf-am + +install-ps: install-ps-am + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -rf ./$(DEPDIR) + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-binPROGRAMS + +.MAKE: install-am install-strip + +.PHONY: CTAGS GTAGS all all-am check check-am clean clean-binPROGRAMS \ + clean-generic clean-libtool ctags distclean distclean-compile \ + distclean-generic distclean-libtool distclean-tags distdir dvi \ + dvi-am html html-am info info-am install install-am \ + install-binPROGRAMS install-data install-data-am install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-pdf install-pdf-am install-ps install-ps-am \ + install-strip installcheck installcheck-am installdirs \ + maintainer-clean maintainer-clean-generic mostlyclean \ + mostlyclean-compile mostlyclean-generic mostlyclean-libtool \ + pdf pdf-am ps ps-am tags uninstall uninstall-am \ + uninstall-binPROGRAMS + + +force: +$(sstat_LDADD) : force + @cd `dirname $@` && $(MAKE) `basename $@` +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/src/sstat/options.c b/src/sstat/options.c new file mode 100644 index 000000000..a1aafaa40 --- /dev/null +++ b/src/sstat/options.c @@ -0,0 +1,396 @@ +/*****************************************************************************\ + * options.c - option functions for sstat + * + * $Id: options.c 7541 2006-03-18 01:44:58Z da $ + ***************************************************************************** + * Copyright (C) 2006 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov>. + * LLNL-CODE-402394. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#include "src/common/read_config.h" +#include "sstat.h" +#include <time.h> + +void _help_fields_msg(void); +void _help_msg(void); +void _usage(void); +void _init_params(); + +void _help_fields_msg(void) +{ + int i; + + for (i = 0; fields[i].name; i++) { + if (i & 3) + printf(" "); + else + printf("\n"); + printf("%-10s", fields[i].name); + } + printf("\n"); + return; +} + +void _help_msg(void) +{ + printf("\n" + "By default, sstat displays status data for job/step stated\n" + "Options:\n" + "-C, --cluster\n" + " Job is running on this cluster.\n" + "-F <field-list>, --fields=<field-list>\n" + " Display the specified data (use \"--help-fields\" for a\n" + " list of available fields). If no field option is specified,\n" + " we use \"--fields=jobid,vsize,rss,pages,cputime,ntasks,state\".\n" + "-h, --help\n" + " Print a general help message.\n" + "--help-fields\n" + " Print a list of fields that can be specified with the\n" + " \"--fields\" option\n" + "-j <job(.step)>, --jobs=<job(.step)>\n" + " Display information about this job or comma-separated\n" + " list of jobs. The default is all jobs. Adding .step will\n" + " display the specfic job step of that job.\n" + "--noheader\n" + " Print (or don't print) a header. The default is to print a\n" + " header; the option has no effect if --dump is specified\n" + "--usage\n" + " Pointer to this message.\n" + "-v, --verbose\n" + " Primarily for debugging purposes, report the state of various\n" + " variables during processing.\n"); + + return; +} + +void _usage(void) +{ + printf("\nUsage: sstat [options]\n\tUse --help for help\n"); +} + + +void _do_help(void) +{ + switch (params.opt_help) { + case 1: + _help_msg(); + break; + case 2: + _help_fields_msg(); + break; + case 3: + _usage(); + break; + default: + fprintf(stderr, "sacct bug: params.opt_help=%d\n", + params.opt_help); + } +} + +void _init_params() +{ + params.opt_cluster = NULL; /* --cluster */ + params.opt_completion = 0; /* --completion */ + params.opt_dump = 0; /* --dump */ + params.opt_dup = -1; /* --duplicates; +1 = explicitly set */ + params.opt_fdump = 0; /* --formattted_dump */ + params.opt_stat = 0; /* --stat */ + params.opt_gid = -1; /* --gid (-1=wildcard, 0=root) */ + params.opt_header = 1; /* can only be cleared */ + params.opt_help = 0; /* --help */ + params.opt_long = 0; /* --long */ + params.opt_lowmem = 0; /* --low_memory */ + params.opt_purge = 0; /* --purge */ + params.opt_total = 0; /* --total */ + params.opt_uid = -1; /* --uid (-1=wildcard, 0=root) */ + params.opt_uid_set = 0; + params.opt_verbose = 0; /* --verbose */ + params.opt_expire_timespec = NULL; /* --expire= */ + params.opt_field_list = NULL; /* --fields= */ + params.opt_filein = NULL; /* --file */ + params.opt_job_list = NULL; /* --jobs */ + params.opt_partition_list = NULL;/* --partitions */ + params.opt_state_list = NULL; /* --states */ +} + +int decode_state_char(char *state) +{ + if (!strcasecmp(state, "p")) + return JOB_PENDING; /* we should never see this */ + else if (!strcasecmp(state, "r")) + return JOB_RUNNING; + else if (!strcasecmp(state, "su")) + return JOB_SUSPENDED; + else if (!strcasecmp(state, "cd")) + return JOB_COMPLETE; + else if (!strcasecmp(state, "ca")) + return JOB_CANCELLED; + else if (!strcasecmp(state, "f")) + return JOB_FAILED; + else if (!strcasecmp(state, "to")) + return JOB_TIMEOUT; + else if (!strcasecmp(state, "nf")) + return JOB_NODE_FAIL; + else + return -1; // unknown +} + +void parse_command_line(int argc, char **argv, List selected_steps) +{ + extern int optind; + int c, i, optionIndex = 0; + char *end = NULL, *start = NULL; + jobacct_selected_step_t *selected_step = NULL; + ListIterator itr = NULL; + char *dot = NULL; + log_options_t logopt = LOG_OPTS_STDERR_ONLY; + + static struct option long_options[] = { + {"cluster", 1, 0, 'C'}, + {"fields", 1, 0, 'F'}, + {"help", 0, ¶ms.opt_help, 1}, + {"help-fields", 0, ¶ms.opt_help, 2}, + {"jobs", 1, 0, 'j'}, + {"noheader", 0, ¶ms.opt_header, 0}, + {"usage", 0, ¶ms.opt_help, 3}, + {"verbose", 0, 0, 'v'}, + {"version", 0, 0, 'V'}, + {0, 0, 0, 0}}; + + log_init(xbasename(argv[0]), logopt, 0, NULL); + + _init_params(); + + if ((i=getuid())) + /* default to current user unless root*/ + params.opt_uid = i; + + opterr = 1; /* Let getopt report problems to the user */ + + while (1) { /* now cycle through the command line */ + c = getopt_long(argc, argv, "C:F:hj:Vv", + long_options, &optionIndex); + if (c == -1) + break; + switch (c) { + case 'C': + params.opt_cluster = xstrdup(optarg); + break; + case 'F': + if(params.opt_field_list) + xfree(params.opt_field_list); + + params.opt_field_list = + xrealloc(params.opt_field_list, + (params.opt_field_list==NULL? 0 : + strlen(params.opt_field_list)) + + strlen(optarg) + 1); + strcat(params.opt_field_list, optarg); + strcat(params.opt_field_list, ","); + break; + case 'h': + params.opt_help = 1; + break; + case 'j': + if ((strspn(optarg, "0123456789, ") < strlen(optarg)) + && (strspn(optarg, ".0123456789, ") + < strlen(optarg))) { + fprintf(stderr, "Invalid jobs list: %s\n", + optarg); + exit(1); + } + params.opt_job_list = + xrealloc(params.opt_job_list, + (params.opt_job_list==NULL? 0 : + strlen(params.opt_job_list)) + + strlen(optarg) + 1); + strcat(params.opt_job_list, optarg); + strcat(params.opt_job_list, ","); + break; + case 'v': + /* Handle -vvv thusly... + * 0 - report only normal messages and errors + * 1 - report options selected and major operations + * 2 - report data anomalies probably not errors + * 3 - blather on and on + */ + params.opt_verbose++; + break; + + case 'V': + { + char obuf[20]; /* should be long enough */ + char *rev="$Revision: 7267 $"; + char *s; + + s=strstr(rev, " ")+1; + for (i=0; s[i]!=' '; i++) + obuf[i]=s[i]; + obuf[i] = 0; + printf("%s: %s\n", argv[0], obuf); + exit(0); + } + + case ':': + case '?': /* getopt() has explained it */ + exit(1); + } + } + + if(params.opt_help) { + _do_help(); + exit(0); + } + + if (optind < argc) { + optarg = argv[optind]; + if ((strspn(optarg, "0123456789, ") < strlen(optarg)) + && (strspn(optarg, ".0123456789, ") + < strlen(optarg))) { + fprintf(stderr, "Invalid jobs list: %s\n", + optarg); + exit(1); + } + params.opt_job_list = + xrealloc(params.opt_job_list, + (params.opt_job_list==NULL? 0 : + strlen(params.opt_job_list)) + + strlen(optarg) + 1); + strcat(params.opt_job_list, optarg); + strcat(params.opt_job_list, ","); + } + + if(!params.opt_field_list) { + params.opt_field_list = + xmalloc(sizeof(STAT_FIELDS)+1); + strcat(params.opt_field_list, STAT_FIELDS); + strcat(params.opt_field_list, ","); + } + + + if (params.opt_verbose) { + fprintf(stderr, "Options selected:\n" + "\topt_cluster=%s\n" + "\topt_field_list=%s\n" + "\topt_header=%d\n" + "\topt_help=%d\n" + "\topt_job_list=%s\n" + "\topt_verbose=%d\n", + params.opt_cluster, + params.opt_field_list, + params.opt_header, + params.opt_help, + params.opt_job_list, + params.opt_verbose); + logopt.stderr_level += params.opt_verbose; + log_alter(logopt, 0, NULL); + + } + + /* specific jobs requested? */ + if (params.opt_job_list) { + start = params.opt_job_list; + while ((end = strstr(start, ",")) && start) { + *end = 0; + while (isspace(*start)) + start++; /* discard whitespace */ + if(!(int)*start) + continue; + selected_step = + xmalloc(sizeof(jobacct_selected_step_t)); + list_append(selected_steps, selected_step); + + dot = strstr(start, "."); + if (dot == NULL) { + debug2("No jobstep requested"); + selected_step->step = NULL; + selected_step->stepid = (uint32_t)NO_VAL; + } else { + *dot++ = 0; + selected_step->step = xstrdup(dot); + selected_step->stepid = atoi(dot); + } + selected_step->job = xstrdup(start); + selected_step->jobid = atoi(start); + start = end + 1; + } + if (params.opt_verbose) { + fprintf(stderr, "Jobs requested:\n"); + itr = list_iterator_create(selected_steps); + while((selected_step = list_next(itr))) { + if(selected_step->step) + fprintf(stderr, "\t: %s.%s\n", + selected_step->job, + selected_step->step); + else + fprintf(stderr, "\t: %s\n", + selected_step->job); + } + list_iterator_destroy(itr); + } + } + + start = params.opt_field_list; + while ((end = strstr(start, ","))) { + *end = 0; + while (isspace(*start)) + start++; /* discard whitespace */ + if(!(int)*start) + continue; + for (i = 0; fields[i].name; i++) { + if (!strcasecmp(fields[i].name, start)) + goto foundfield; + } + fprintf(stderr, + "Invalid field requested: \"%s\"\n", + start); + exit(1); + foundfield: + printfields[nprintfields++] = i; + start = end + 1; + } + + if (params.opt_verbose) { + fprintf(stderr, "%d field%s selected:\n", + nprintfields, + (nprintfields==1? "" : "s")); + for (i = 0; i < nprintfields; i++) + fprintf(stderr, + "\t%s\n", + fields[printfields[i]].name); + } + + return; +} + + diff --git a/src/sstat/print.c b/src/sstat/print.c new file mode 100644 index 000000000..0ff252c9d --- /dev/null +++ b/src/sstat/print.c @@ -0,0 +1,438 @@ +/*****************************************************************************\ + * print.c - print functions for sacct + * + * $Id: print.c 7541 2006-03-18 01:44:58Z da $ + ***************************************************************************** + * Copyright (C) 2006 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov>. + * LLNL-CODE-402394. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#include "sstat.h" +#include "src/common/parse_time.h" +#include "slurm.h" +#define FORMAT_STRING_SIZE 34 + +void _elapsed_time(long secs, long usecs, char *str); + +void _elapsed_time(long secs, long usecs, char *str) +{ + long days, hours, minutes, seconds; + long subsec = 0; + + if(secs < 0) { + snprintf(str, FORMAT_STRING_SIZE, "'N/A'"); + return; + } + + while (usecs >= 1E6) { + secs++; + usecs -= 1E6; + } + if(usecs > 0) { + /* give me 3 significant digits to tack onto the sec */ + subsec = (usecs/1000); + } + seconds = secs % 60; + minutes = (secs / 60) % 60; + hours = (secs / 3600) % 24; + days = secs / 86400; + + if (days) + snprintf(str, FORMAT_STRING_SIZE, + "%ld-%2.2ld:%2.2ld:%2.2ld", + days, hours, minutes, seconds); + else if (hours) + snprintf(str, FORMAT_STRING_SIZE, + "%ld:%2.2ld:%2.2ld", + hours, minutes, seconds); + else + snprintf(str, FORMAT_STRING_SIZE, + "%ld:%2.2ld.%3.3ld", + minutes, seconds, subsec); +} + +extern void print_fields(type_t type, void *object) +{ + int f, pf; + for (f=0; f<nprintfields; f++) { + pf = printfields[f]; + if (f) + printf(" "); + (fields[pf].print_routine)(type, object); + } + printf("\n"); +} + +/* Field-specific print routines */ + +extern void print_cputime(type_t type, void *object) +{ + jobacct_job_rec_t *job = (jobacct_job_rec_t *)object; + jobacct_step_rec_t *step = (jobacct_step_rec_t *)object; + char outbuf[FORMAT_STRING_SIZE]; + char buf1[FORMAT_STRING_SIZE]; + char buf2[FORMAT_STRING_SIZE]; + char buf3[FORMAT_STRING_SIZE]; + sacct_t sacct; + char *nodes = NULL; + uint32_t pos; + + switch(type) { + case HEADLINE: + printf("%-37s", "MinCPUtime/Node:Task - Ave"); + break; + case UNDERSCORE: + printf("%-37s", "-------------------------------------"); + break; + case JOB: + sacct = job->sacct; + nodes = job->nodes; + pos = sacct.min_cpu_id.nodeid; + _elapsed_time((int)sacct.min_cpu, 0, buf1); + if(job->track_steps) + snprintf(outbuf, FORMAT_STRING_SIZE, + "%s/- - -", buf1); + else { + _elapsed_time((int)sacct.ave_cpu, 0, buf2); + find_hostname(pos, nodes, buf3); + snprintf(outbuf, FORMAT_STRING_SIZE, + "%s/%s:%u - %s", + buf1, + buf3, + sacct.min_cpu_id.taskid, + buf2); + } + printf("%-37s", outbuf); + break; + case JOBSTEP: + sacct = step->sacct; + nodes = step->nodes; + pos = sacct.min_cpu_id.nodeid; + _elapsed_time((int)sacct.min_cpu, 0, buf1); + _elapsed_time((int)sacct.ave_cpu, 0, buf2); + find_hostname(pos, nodes, buf3); + snprintf(outbuf, FORMAT_STRING_SIZE, + "%s/%s:%u - %s", + buf1, + buf3, + sacct.min_cpu_id.taskid, + buf2); + printf("%-37s", outbuf); + break; + default: + printf("%-37s", "n/a"); + break; + } +} + +extern void print_jobid(type_t type, void *object) +{ + jobacct_job_rec_t *job = (jobacct_job_rec_t *)object; + jobcomp_job_rec_t *jobcomp = (jobcomp_job_rec_t *)object; + jobacct_step_rec_t *step = (jobacct_step_rec_t *)object; + char outbuf[10]; + + switch(type) { + case HEADLINE: + printf("%-10s", "JobID"); + break; + case UNDERSCORE: + printf("%-10s", "----------"); + break; + case JOB: + printf("%-10u", job->jobid); + break; + case JOBCOMP: + printf("%-10u", jobcomp->jobid); + break; + case JOBSTEP: + snprintf(outbuf, sizeof(outbuf), "%u.%u", + step->jobid, + step->stepid); + printf("%-10s", outbuf); + break; + default: + printf("%-10s", "n/a"); + break; + } + +} + +extern void print_ntasks(type_t type, void *object) +{ + jobacct_job_rec_t *job = (jobacct_job_rec_t *)object; + jobacct_step_rec_t *step = (jobacct_step_rec_t *)object; + + switch(type) { + case HEADLINE: + printf("%-7s", "Ntasks"); + break; + case UNDERSCORE: + printf("%-7s", "-------"); + break; + case JOB: + printf("%-7u", job->alloc_cpus); + break; + case JOBSTEP: + printf("%-7u", step->ncpus); + break; + default: + printf("%-7s", "n/a"); + break; + } +} + +extern void print_pages(type_t type, void *object) +{ + jobacct_job_rec_t *job = (jobacct_job_rec_t *)object; + jobacct_step_rec_t *step = (jobacct_step_rec_t *)object; + char outbuf[FORMAT_STRING_SIZE]; + char buf1[FORMAT_STRING_SIZE]; + char buf2[FORMAT_STRING_SIZE]; + char buf3[FORMAT_STRING_SIZE]; + sacct_t sacct; + char *nodes = NULL; + uint32_t pos; + + switch(type) { + case HEADLINE: + printf("%-34s", "MaxPages/Node:Task - Ave"); + break; + case UNDERSCORE: + printf("%-34s", "----------------------------------"); + break; + case JOB: + sacct = job->sacct; + nodes = job->nodes; + pos = sacct.min_cpu_id.nodeid; + convert_num_unit((float)sacct.max_pages, + buf1, sizeof(buf1), UNIT_NONE); + + if(job->track_steps) + snprintf(outbuf, FORMAT_STRING_SIZE, "%s/- - -", buf1); + else { + convert_num_unit((float)sacct.ave_pages, + buf2, sizeof(buf2), UNIT_NONE); + find_hostname(pos, nodes, buf3); + snprintf(outbuf, FORMAT_STRING_SIZE, "%s/%s:%u - %s", + buf1, + buf3, + sacct.max_pages_id.taskid, + buf2); + } + printf("%-34s", outbuf); + break; + case JOBSTEP: + sacct = step->sacct; + nodes = step->nodes; + pos = sacct.min_cpu_id.nodeid; + convert_num_unit((float)sacct.max_pages, buf1, sizeof(buf1), + UNIT_NONE); + convert_num_unit((float)sacct.ave_pages, buf2, sizeof(buf2), + UNIT_NONE); + find_hostname(pos, nodes, buf3); + snprintf(outbuf, FORMAT_STRING_SIZE, "%s/%s:%u - %s", + buf1, + buf3, + sacct.max_pages_id.taskid, + buf2); + printf("%-34s", outbuf); + break; + default: + printf("%-34s", "n/a"); + break; + } +} + +extern void print_rss(type_t type, void *object) +{ + jobacct_job_rec_t *job = (jobacct_job_rec_t *)object; + jobacct_step_rec_t *step = (jobacct_step_rec_t *)object; + char outbuf[FORMAT_STRING_SIZE]; + char buf1[FORMAT_STRING_SIZE]; + char buf2[FORMAT_STRING_SIZE]; + char buf3[FORMAT_STRING_SIZE]; + sacct_t sacct; + char *nodes = NULL; + uint32_t pos; + + switch(type) { + case HEADLINE: + printf("%-34s", "MaxRSS/Node:Task - Ave"); + break; + case UNDERSCORE: + printf("%-34s", "----------------------------------"); + break; + case JOB: + sacct = job->sacct; + nodes = job->nodes; + pos = sacct.min_cpu_id.nodeid; + convert_num_unit((float)sacct.max_rss, buf1, sizeof(buf1), + UNIT_NONE); + + if(job->track_steps) + snprintf(outbuf, FORMAT_STRING_SIZE, "%s/- - -", buf1); + else { + convert_num_unit((float)sacct.ave_rss, + buf2, sizeof(buf2), UNIT_NONE); + find_hostname(pos, nodes, buf3); + snprintf(outbuf, FORMAT_STRING_SIZE, "%s/%s:%u - %s", + buf1, + buf3, + sacct.max_rss_id.taskid, + buf2); + } + printf("%-34s", outbuf); + break; + case JOBSTEP: + sacct = step->sacct; + nodes = step->nodes; + pos = sacct.min_cpu_id.nodeid; + convert_num_unit((float)sacct.max_rss, buf1, sizeof(buf1), + UNIT_NONE); + convert_num_unit((float)sacct.ave_rss, buf2, sizeof(buf2), + UNIT_NONE); + find_hostname(pos, nodes, buf3); + snprintf(outbuf, FORMAT_STRING_SIZE, "%s/%s:%u - %s", + buf1, + buf3, + sacct.max_rss_id.taskid, + buf2); + printf("%-34s", outbuf); + break; + default: + printf("%-34s", "n/a"); + break; + } +} + +extern void print_state(type_t type, void *object) +{ + jobacct_job_rec_t *job = (jobacct_job_rec_t *)object; + jobcomp_job_rec_t *jobcomp = (jobcomp_job_rec_t *)object; + jobacct_step_rec_t *step = (jobacct_step_rec_t *)object; + + switch(type) { + case HEADLINE: + printf("%-20s", "State"); + break; + case UNDERSCORE: + printf("%-20s", "--------------------"); + break; + case JOB: + if ( job->state == JOB_CANCELLED) { + printf ("%-10s by %6d", + job_state_string(job->state), job->requid); + } + else { + printf("%-20s", job_state_string(job->state)); + } + break; + case JOBCOMP: + printf("%-20s", jobcomp->state); + break; + case JOBSTEP: + if ( step->state == JOB_CANCELLED) { + printf ("%-10s by %6d", + job_state_string(step->state), step->requid); + } + else { + printf("%-20s", job_state_string(step->state)); + } + break; + default: + printf("%-20s", "n/a"); + break; + } +} + +extern void print_vsize(type_t type, void *object) +{ + jobacct_job_rec_t *job = (jobacct_job_rec_t *)object; + jobacct_step_rec_t *step = (jobacct_step_rec_t *)object; + char outbuf[FORMAT_STRING_SIZE]; + char buf1[FORMAT_STRING_SIZE]; + char buf2[FORMAT_STRING_SIZE]; + char buf3[FORMAT_STRING_SIZE]; + sacct_t sacct; + char *nodes = NULL; + uint32_t pos; + + switch(type) { + case HEADLINE: + printf("%-34s", "MaxVSIZE/Node:Task - Ave"); + break; + case UNDERSCORE: + printf("%-34s", "----------------------------------"); + break; + case JOB: + sacct = job->sacct; + nodes = job->nodes; + pos = sacct.min_cpu_id.nodeid; + convert_num_unit((float)sacct.max_vsize, + buf1, sizeof(buf1),UNIT_NONE); + if(job->track_steps) + snprintf(outbuf, FORMAT_STRING_SIZE, "%s/- - -", buf1); + else { + convert_num_unit((float)sacct.ave_vsize, + buf2, sizeof(buf2), UNIT_NONE); + find_hostname(pos, nodes, buf3); + snprintf(outbuf, FORMAT_STRING_SIZE, "%s/%s:%u - %s", + buf1, + buf3, + sacct.max_vsize_id.taskid, + buf2); + } + printf("%-34s", outbuf); + break; + case JOBSTEP: + sacct = step->sacct; + nodes = step->nodes; + pos = sacct.min_cpu_id.nodeid; + convert_num_unit((float)sacct.max_vsize, buf1, sizeof(buf1), + UNIT_NONE); + convert_num_unit((float)sacct.ave_vsize, buf2, sizeof(buf2), + UNIT_NONE); + find_hostname(pos, nodes, buf3); + snprintf(outbuf, FORMAT_STRING_SIZE, "%s/%s:%u - %s", + buf1, + buf3, + sacct.max_vsize_id.taskid, + buf2); + printf("%-34s", outbuf); + break; + default: + printf("%-34s", "n/a"); + break; + } +} diff --git a/src/sstat/process.c b/src/sstat/process.c new file mode 100644 index 000000000..433433519 --- /dev/null +++ b/src/sstat/process.c @@ -0,0 +1,90 @@ +/*****************************************************************************\ + * process.c - process functions for sacct + * + * $Id: process.c 7541 2006-03-18 01:44:58Z da $ + ***************************************************************************** + * Copyright (C) 2006 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Danny Auble <da@llnl.gov>. + * LLNL-CODE-402394. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#include "sstat.h" + + +void find_hostname(uint32_t pos, char *hosts, char *host) +{ + hostlist_t hostlist = NULL; + char *temp = NULL; + + if(pos == (uint32_t)NO_VAL) { + snprintf(host, 50, "'N/A'"); + return; + } + hostlist = hostlist_create(hosts); + temp = hostlist_nth(hostlist, pos); + if(temp) { + snprintf(host, 50, "%s", temp); + free(temp); + } else { + snprintf(host, 50, "'N/A'"); + } + hostlist_destroy(hostlist); + return; +} + +void aggregate_sacct(sacct_t *dest, sacct_t *from) +{ + if(dest->max_vsize < from->max_vsize) { + dest->max_vsize = from->max_vsize; + dest->max_vsize_id = from->max_vsize_id; + } + dest->ave_vsize += from->ave_vsize; + + if(dest->max_rss < from->max_rss) { + dest->max_rss = from->max_rss; + dest->max_rss_id = from->max_rss_id; + } + dest->ave_rss += from->ave_rss; + + if(dest->max_pages < from->max_pages) { + dest->max_pages = from->max_pages; + dest->max_pages_id = from->max_pages_id; + } + dest->ave_pages += from->ave_pages; + + if((dest->min_cpu > from->min_cpu) + || (dest->min_cpu == (float)NO_VAL)) { + dest->min_cpu = from->min_cpu; + dest->min_cpu_id = from->min_cpu_id; + } + dest->ave_cpu += from->ave_cpu; +} diff --git a/src/sstat/sstat.c b/src/sstat/sstat.c new file mode 100644 index 000000000..58f75d55d --- /dev/null +++ b/src/sstat/sstat.c @@ -0,0 +1,273 @@ +/*****************************************************************************\ + * sstat.c - job accounting reports for SLURM's jobacct/log plugin + ***************************************************************************** + * Copyright (C) 2008 Lawrence Livermore National Security. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Morris Jette <jette1@llnl.gov> + * LLNL-CODE-402394. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#include "sstat.h" + +void _destroy_steps(void *object); +void _print_header(void); +void *_stat_thread(void *args); +int _sstat_query(slurm_step_layout_t *step_layout, uint32_t job_id, + uint32_t step_id); +int _process_results(); +int _do_stat(uint32_t jobid, uint32_t stepid); + +/* + * Globals + */ + sacct_parameters_t params; +fields_t fields[] = {{"cputime", print_cputime}, + {"jobid", print_jobid}, + {"ntasks", print_ntasks}, + {"pages", print_pages}, + {"rss", print_rss}, + {"state", print_state}, + {"vsize", print_vsize}, + {NULL, NULL}}; + +List jobs = NULL; +jobacct_step_rec_t step; + +int printfields[MAX_PRINTFIELDS], /* Indexed into fields[] */ + nprintfields = 0; + +void _destroy_steps(void *object) +{ + jobacct_selected_step_t *step = (jobacct_selected_step_t *)object; + if(step) { + xfree(step->job); + xfree(step->step); + xfree(step); + } +} + +void _print_header(void) +{ + int i,j; + for (i=0; i<nprintfields; i++) { + if (i) + printf(" "); + j=printfields[i]; + (fields[j].print_routine)(HEADLINE, 0); + } + printf("\n"); + for (i=0; i<nprintfields; i++) { + if (i) + printf(" "); + j=printfields[i]; + (fields[j].print_routine)(UNDERSCORE, 0); + } + printf("\n"); +} + +int _sstat_query(slurm_step_layout_t *step_layout, uint32_t job_id, + uint32_t step_id) +{ + slurm_msg_t msg; + stat_jobacct_msg_t r; + stat_jobacct_msg_t *jobacct_msg = NULL; + ListIterator itr; + List ret_list = NULL; + sacct_t temp_sacct; + ret_data_info_t *ret_data_info = NULL; + int rc = SLURM_SUCCESS; + int ntasks = 0; + int tot_tasks = 0; + debug("getting the stat of job %d on %d nodes", + job_id, step_layout->node_cnt); + + memset(&temp_sacct, 0, sizeof(sacct_t)); + temp_sacct.min_cpu = (float)NO_VAL; + memset(&step.sacct, 0, sizeof(sacct_t)); + step.sacct.min_cpu = (float)NO_VAL; + + step.jobid = job_id; + step.stepid = step_id; + step.nodes = step_layout->node_list; + step.stepname = NULL; + step.state = JOB_RUNNING; + slurm_msg_t_init(&msg); + /* Common message contents */ + r.job_id = job_id; + r.step_id = step_id; + r.jobacct = jobacct_gather_g_create(NULL); + msg.msg_type = MESSAGE_STAT_JOBACCT; + msg.data = &r; + + ret_list = slurm_send_recv_msgs(step_layout->node_list, &msg, 0); + if (!ret_list) { + error("got an error no list returned"); + goto cleanup; + } + + itr = list_iterator_create(ret_list); + while((ret_data_info = list_next(itr))) { + switch (ret_data_info->type) { + case MESSAGE_STAT_JOBACCT: + jobacct_msg = (stat_jobacct_msg_t *) + ret_data_info->data; + if(jobacct_msg) { + debug2("got it back for job %d", + jobacct_msg->job_id); + jobacct_gather_g_2_sacct( + &temp_sacct, + jobacct_msg->jobacct); + ntasks += jobacct_msg->num_tasks; + aggregate_sacct(&step.sacct, &temp_sacct); + } + break; + case RESPONSE_SLURM_RC: + rc = slurm_get_return_code(ret_data_info->type, + ret_data_info->data); + error("there was an error with the request rc = %s", + slurm_strerror(rc)); + break; + default: + rc = slurm_get_return_code(ret_data_info->type, + ret_data_info->data); + error("unknown return given %d rc = %s", + ret_data_info->type, slurm_strerror(rc)); + break; + } + } + list_iterator_destroy(itr); + list_destroy(ret_list); + + tot_tasks += ntasks; +cleanup: + + if(tot_tasks) { + step.sacct.ave_rss *= 1024; + step.sacct.max_rss *= 1024; + step.sacct.ave_vsize *= 1024; + step.sacct.max_vsize *= 1024; + + step.sacct.ave_cpu /= tot_tasks; + step.sacct.ave_cpu /= 100; + step.sacct.min_cpu /= 100; + step.sacct.ave_rss /= tot_tasks; + step.sacct.ave_vsize /= tot_tasks; + step.sacct.ave_pages /= tot_tasks; + } + jobacct_gather_g_destroy(r.jobacct); + return SLURM_SUCCESS; +} + +int _process_results() +{ + print_fields(JOBSTEP, &step); + return SLURM_SUCCESS; +} + +int _do_stat(uint32_t jobid, uint32_t stepid) +{ + slurm_msg_t req_msg; + slurm_msg_t resp_msg; + job_step_id_msg_t req; + slurm_step_layout_t *step_layout = NULL; + int rc = SLURM_SUCCESS; + + slurm_msg_t_init(&req_msg); + slurm_msg_t_init(&resp_msg); + debug("requesting info for job %u.%u", jobid, stepid); + req.job_id = jobid; + req.step_id = stepid; + req_msg.msg_type = REQUEST_STEP_LAYOUT; + req_msg.data = &req; + + if (slurm_send_recv_controller_msg(&req_msg, &resp_msg) < 0) { + return SLURM_ERROR; + } + + switch (resp_msg.msg_type) { + case RESPONSE_STEP_LAYOUT: + step_layout = (slurm_step_layout_t *)resp_msg.data; + break; + case RESPONSE_SLURM_RC: + rc = ((return_code_msg_t *) resp_msg.data)->return_code; + slurm_free_return_code_msg(resp_msg.data); + printf("problem getting job: %s\n", slurm_strerror(rc)); + slurm_seterrno_ret(rc); + break; + default: + slurm_seterrno_ret(SLURM_UNEXPECTED_MSG_ERROR); + break; + } + + if(!step_layout) { + error("didn't get the job record rc = %s", slurm_strerror(rc)); + return rc; + } + + _sstat_query(step_layout, jobid, stepid); + + _process_results(); + + slurm_step_layout_destroy(step_layout); + + return rc; +} + +int main(int argc, char **argv) +{ + ListIterator itr = NULL; + uint32_t jobid = 0; + uint32_t stepid = 0; + jobacct_selected_step_t *selected_step = NULL; + + List selected_steps = list_create(_destroy_steps); + + parse_command_line(argc, argv, selected_steps); + + if (params.opt_header) /* give them something to look */ + _print_header();/* at while we think... */ + itr = list_iterator_create(selected_steps); + while((selected_step = list_next(itr))) { + jobid = atoi(selected_step->job); + if(selected_step->step) + stepid = atoi(selected_step->step); + else + stepid = 0; + _do_stat(jobid, stepid); + } + list_iterator_destroy(itr); + + list_destroy(selected_steps); + + return 0; +} + + diff --git a/src/sstat/sstat.h b/src/sstat/sstat.h new file mode 100644 index 000000000..94b971980 --- /dev/null +++ b/src/sstat/sstat.h @@ -0,0 +1,119 @@ +/*****************************************************************************\ + * sstat.h - header file for sstat + * + * $Id: sstat.h 7541 2006-03-18 01:44:58Z da $ + ***************************************************************************** + * Copyright (C) 2002-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Morris Jette <jette1@llnl.gov> + * LLNL-CODE-402394. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * In addition, as a special exception, the copyright holders give permission + * to link the code of portions of this program with the OpenSSL library under + * certain conditions as described in each individual source file, and + * distribute linked combinations including the two. You must obey the GNU + * General Public License in all respects for all of the code used other than + * OpenSSL. If you modify file(s) with this exception, you may extend this + * exception to your version of the file(s), but you are not obligated to do + * so. If you do not wish to do so, delete this exception statement from your + * version. If you delete this exception statement from all source files in + * the program, then also delete it here. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ +#ifndef _SSTAT_H +#define _SSTAT_H + +#include <ctype.h> +#include <errno.h> +#include <grp.h> +#include <pwd.h> +#include <stdio.h> +#include <stdlib.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <sys/wait.h> +#include <string.h> +#include <time.h> +#include <unistd.h> + +#include "src/common/getopt.h" +#include "src/common/xmalloc.h" +#include "src/common/xstring.h" +#include "src/common/list.h" +#include "src/common/hostlist.h" +#include "src/common/slurm_jobacct_gather.h" +#include "src/common/slurm_accounting_storage.h" +#include "src/common/slurm_jobcomp.h" + +#define ERROR 2 + +#define STAT_FIELDS "jobid,vsize,rss,pages,cputime,ntasks,state" + +#define BUFFER_SIZE 4096 +#define STATE_COUNT 10 + +#define MAX_PRINTFIELDS 100 + +#define SECONDS_IN_MINUTE 60 +#define SECONDS_IN_HOUR (60*SECONDS_IN_MINUTE) +#define SECONDS_IN_DAY (24*SECONDS_IN_HOUR) + +/* On output, use fields 12-37 from JOB_STEP */ + +typedef enum { HEADLINE, + UNDERSCORE, + JOB, + JOBSTEP, + JOBCOMP +} type_t; + + +typedef struct fields { + char *name; /* Specified in --fields= */ + void (*print_routine) (); /* Who gets to print it? */ +} fields_t; + +extern fields_t fields[]; +extern sacct_parameters_t params; + +extern List jobs; + +extern int printfields[MAX_PRINTFIELDS], /* Indexed into fields[] */ + nprintfields; + +/* process.c */ +void find_hostname(uint32_t pos, char *hosts, char *host); +void aggregate_sacct(sacct_t *dest, sacct_t *from); + +/* print.c */ +void print_cputime(type_t type, void *object); +void print_fields(type_t type, void *object); +void print_jobid(type_t type, void *object); +void print_ntasks(type_t type, void *object); +void print_pages(type_t type, void *object); +void print_rss(type_t type, void *object); +void print_state(type_t type, void *object); +void print_vsize(type_t type, void *object); + + +/* options.c */ +void parse_command_line(int argc, char **argv, List selected_steps); + +#endif /* !_SACCT_H */ diff --git a/src/strigger/Makefile.in b/src/strigger/Makefile.in index ffb5dbbea..1755ca5fa 100644 --- a/src/strigger/Makefile.in +++ b/src/strigger/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -48,6 +48,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -77,7 +79,7 @@ strigger_DEPENDENCIES = $(top_builddir)/src/api/libslurmhelper.la strigger_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(strigger_LDFLAGS) \ $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -118,6 +120,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -131,10 +134,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -154,7 +160,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -165,6 +174,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -180,6 +191,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -195,6 +207,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -299,8 +312,8 @@ install-binPROGRAMS: $(bin_PROGRAMS) || test -f $$p1 \ ; then \ f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \ - echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) --mode=install $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ - $(INSTALL_PROGRAM_ENV) $(LIBTOOL) --mode=install $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ + echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ + $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ else :; fi; \ done @@ -363,8 +376,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -376,8 +389,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -387,13 +400,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/strigger/opts.c b/src/strigger/opts.c index 987d3cfc9..499e3e60f 100644 --- a/src/strigger/opts.c +++ b/src/strigger/opts.c @@ -2,9 +2,10 @@ * opts.c - strigger command line option processing functions ***************************************************************************** * Copyright (C) 2006-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -92,6 +93,8 @@ extern void parse_command_line(int argc, char *argv[]) static struct option long_options[] = { {"block_err", no_argument, 0, OPT_LONG_BLOCK_ERR}, {"down", no_argument, 0, 'd'}, + {"drained", no_argument, 0, 'D'}, + {"fail", no_argument, 0, 'F'}, {"fini", no_argument, 0, 'f'}, {"id", required_argument, 0, 'i'}, {"idle", no_argument, 0, 'I'}, @@ -117,7 +120,7 @@ extern void parse_command_line(int argc, char *argv[]) _init_options(); optind = 0; - while((opt_char = getopt_long(argc, argv, "dfi:Ij:no:p:qrtuvV", + while((opt_char = getopt_long(argc, argv, "dDFfi:Ij:no:p:qrtuvV", long_options, &option_index)) != -1) { switch (opt_char) { case (int)'?': @@ -131,6 +134,12 @@ extern void parse_command_line(int argc, char *argv[]) case (int)'d': params.node_down = true; break; + case (int)'D': + params.node_drained = true; + break; + case (int)'F': + params.node_fail = true; + break; case (int)'f': params.job_fini = true; break; @@ -221,49 +230,53 @@ extern void parse_command_line(int argc, char *argv[]) /* initialize the parameters */ static void _init_options( void ) { - params.mode_set = false; - params.mode_get = false; - params.mode_clear = false; + params.mode_set = false; + params.mode_get = false; + params.mode_clear = false; - params.block_err = false; - params.node_down = false; - params.node_idle = false; - params.trigger_id = 0; - params.job_fini = false; - params.job_id = 0; - params.node_id = NULL; - params.offset = 0; - params.program = NULL; - params.quiet = false; - params.reconfig = false; - params.time_limit = false; - params.node_up = false; - params.user_id = 0; - params.verbose = 0; + params.block_err = false; + params.node_down = false; + params.node_drained = false; + params.node_fail = false; + params.node_idle = false; + params.trigger_id = 0; + params.job_fini = false; + params.job_id = 0; + params.node_id = NULL; + params.offset = 0; + params.program = NULL; + params.quiet = false; + params.reconfig = false; + params.time_limit = false; + params.node_up = false; + params.user_id = 0; + params.verbose = 0; } /* print the parameters specified */ static void _print_options( void ) { verbose("-----------------------------"); - verbose("set = %s", params.mode_set ? "true" : "false"); - verbose("get = %s", params.mode_get ? "true" : "false"); - verbose("clear = %s", params.mode_clear ? "true" : "false"); - verbose("block_err = %s", params.block_err ? "true" : "false"); - verbose("job_id = %u", params.job_id); - verbose("job_fini = %s", params.job_fini ? "true" : "false"); - verbose("node_down = %s", params.node_down ? "true" : "false"); - verbose("node_idle = %s", params.node_idle ? "true" : "false"); - verbose("node_up = %s", params.node_up ? "true" : "false"); - verbose("node = %s", params.node_id); - verbose("offset = %d secs", params.offset); - verbose("program = %s", params.program); - verbose("quiet = %s", params.quiet ? "true" : "false"); - verbose("reconfig = %s", params.reconfig ? "true" : "false"); - verbose("time_limit = %s", params.time_limit ? "true" : "false"); - verbose("trigger_id = %u", params.trigger_id); - verbose("user_id = %u", params.user_id); - verbose("verbose = %d", params.verbose); + verbose("set = %s", params.mode_set ? "true" : "false"); + verbose("get = %s", params.mode_get ? "true" : "false"); + verbose("clear = %s", params.mode_clear ? "true" : "false"); + verbose("block_err = %s", params.block_err ? "true" : "false"); + verbose("job_id = %u", params.job_id); + verbose("job_fini = %s", params.job_fini ? "true" : "false"); + verbose("node_down = %s", params.node_down ? "true" : "false"); + verbose("node_drained = %s", params.node_drained ? "true" : "false"); + verbose("node_fail = %s", params.node_fail ? "true" : "false"); + verbose("node_idle = %s", params.node_idle ? "true" : "false"); + verbose("node_up = %s", params.node_up ? "true" : "false"); + verbose("node = %s", params.node_id); + verbose("offset = %d secs", params.offset); + verbose("program = %s", params.program); + verbose("quiet = %s", params.quiet ? "true" : "false"); + verbose("reconfig = %s", params.reconfig ? "true" : "false"); + verbose("time_limit = %s", params.time_limit ? "true" : "false"); + verbose("trigger_id = %u", params.trigger_id); + verbose("user_id = %u", params.user_id); + verbose("verbose = %d", params.verbose); verbose("-----------------------------"); } @@ -282,7 +295,8 @@ static void _validate_options( void ) } if (params.mode_set - && ((params.node_down + params.node_idle + params.node_up + params.reconfig + + && ((params.node_down + params.node_drained + params.node_fail + + params.node_idle + params.node_up + params.reconfig + params.job_fini + params.time_limit + params.block_err) == 0)) { error("You must specify a trigger (--block_err, --down, --up, " "--reconfig, --time or --fini)"); @@ -330,7 +344,7 @@ static void _print_version(void) static void _usage( void ) { - printf("Usage: strigger [--set | --get | --clear | --version] [-dfiIjnoptuv]\n"); + printf("Usage: strigger [--set | --get | --clear | --version] [-dDfiIjnoptuv]\n"); } static void _help( void ) @@ -342,6 +356,8 @@ Usage: strigger [--set | --get | --clear] [OPTIONS]\n\ --clear delete a trigger\n\n\ --block_err trigger event on BlueGene block error\n\ -d, --down trigger event when node goes DOWN\n\ + -D, --drained trigger event when node becomes DRAINED\n\ + -F, --fail trigger event when node is expected to FAIL\n\ -f, --fini trigger event when job finishes\n\ -i, --id=# a trigger's ID number\n\ -I, --idle trigger event when node remains IDLE\n\ diff --git a/src/strigger/strigger.c b/src/strigger/strigger.c index c8789e689..6371787d5 100644 --- a/src/strigger/strigger.c +++ b/src/strigger/strigger.c @@ -2,9 +2,10 @@ * strigger.c - Manage slurm event triggers ***************************************************************************** * Copyright (C) 2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -147,6 +148,10 @@ static int _set_trigger(void) ti.trig_type |= TRIGGER_TYPE_BLOCK_ERR; if (params.node_down) ti.trig_type |= TRIGGER_TYPE_DOWN; + if (params.node_drained) + ti.trig_type |= TRIGGER_TYPE_DRAINED; + if (params.node_fail) + ti.trig_type |= TRIGGER_TYPE_FAIL; if (params.node_idle) ti.trig_type |= TRIGGER_TYPE_IDLE; if (params.node_up) @@ -208,6 +213,20 @@ static int _get_trigger(void) != TRIGGER_TYPE_DOWN)) continue; } + if (params.node_drained) { + if ((trig_msg->trigger_array[i].res_type + != TRIGGER_RES_TYPE_NODE) + || (trig_msg->trigger_array[i].trig_type + != TRIGGER_TYPE_DRAINED)) + continue; + } + if (params.node_fail) { + if ((trig_msg->trigger_array[i].res_type + != TRIGGER_RES_TYPE_NODE) + || (trig_msg->trigger_array[i].trig_type + != TRIGGER_TYPE_FAIL)) + continue; + } if (params.node_id) { if (trig_msg->trigger_array[i].res_type != TRIGGER_RES_TYPE_NODE) @@ -281,6 +300,10 @@ static char *_trig_type(uint16_t trig_type) return "up"; else if (trig_type == TRIGGER_TYPE_DOWN) return "down"; + else if (trig_type == TRIGGER_TYPE_DRAINED) + return "drained"; + else if (trig_type == TRIGGER_TYPE_FAIL) + return "fail"; else if (trig_type == TRIGGER_TYPE_IDLE) return "idle"; else if (trig_type == TRIGGER_TYPE_TIME) diff --git a/src/strigger/strigger.h b/src/strigger/strigger.h index 8aa26706a..a91c08b0f 100644 --- a/src/strigger/strigger.h +++ b/src/strigger/strigger.h @@ -2,9 +2,10 @@ * strigger.h - definitions used for strigger functions ***************************************************************************** * Copyright (C) 2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -54,7 +55,9 @@ struct strigger_parameters { bool mode_get; bool mode_clear; bool node_down; + bool node_drained; char * node_id; + bool node_fail; bool node_idle; bool node_up; int offset; diff --git a/src/sview/Makefile.am b/src/sview/Makefile.am index 7d5345844..bf2e360c9 100644 --- a/src/sview/Makefile.am +++ b/src/sview/Makefile.am @@ -10,7 +10,9 @@ if HAVE_GTK bin_PROGRAMS = sview sview_LDADD = \ - $(top_builddir)/src/plugins/select/bluegene/block_allocator/libbluegene_block_allocator.la + $(top_builddir)/src/plugins/select/bluegene/block_allocator/libbluegene_block_allocator.la \ + $(top_builddir)/src/api/libslurmhelper.la + noinst_HEADERS = sview.h sview_SOURCES = sview.c popups.c grid.c part_info.c job_info.c \ diff --git a/src/sview/Makefile.in b/src/sview/Makefile.in index f72b149de..c46c9bfb4 100644 --- a/src/sview/Makefile.in +++ b/src/sview/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -49,6 +49,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -88,11 +90,12 @@ am__EXTRA_sview_SOURCES_DIST = sview.h sview.c popups.c grid.c \ part_info.c job_info.c block_info.c node_info.c submit_info.c \ admin_info.c common.c sview_OBJECTS = $(am_sview_OBJECTS) -@HAVE_GTK_TRUE@sview_DEPENDENCIES = $(top_builddir)/src/plugins/select/bluegene/block_allocator/libbluegene_block_allocator.la +@HAVE_GTK_TRUE@sview_DEPENDENCIES = $(top_builddir)/src/plugins/select/bluegene/block_allocator/libbluegene_block_allocator.la \ +@HAVE_GTK_TRUE@ $(top_builddir)/src/api/libslurmhelper.la sview_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ --mode=link $(CCLD) $(sview_CFLAGS) $(CFLAGS) $(sview_LDFLAGS) \ $(LDFLAGS) -o $@ -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -135,6 +138,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -148,10 +152,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -171,7 +178,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -182,6 +192,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -197,6 +209,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -212,6 +225,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -271,7 +285,8 @@ top_srcdir = @top_srcdir@ AUTOMAKE_OPTIONS = foreign INCLUDES = -I$(top_srcdir) $(BG_INCLUDES) @HAVE_GTK_TRUE@sview_LDADD = \ -@HAVE_GTK_TRUE@ $(top_builddir)/src/plugins/select/bluegene/block_allocator/libbluegene_block_allocator.la +@HAVE_GTK_TRUE@ $(top_builddir)/src/plugins/select/bluegene/block_allocator/libbluegene_block_allocator.la \ +@HAVE_GTK_TRUE@ $(top_builddir)/src/api/libslurmhelper.la @HAVE_GTK_TRUE@noinst_HEADERS = sview.h @HAVE_GTK_TRUE@sview_SOURCES = sview.c popups.c grid.c part_info.c job_info.c \ @@ -326,8 +341,8 @@ install-binPROGRAMS: $(bin_PROGRAMS) || test -f $$p1 \ ; then \ f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \ - echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) --mode=install $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ - $(INSTALL_PROGRAM_ENV) $(LIBTOOL) --mode=install $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ + echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \ + $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \ else :; fi; \ done @@ -538,8 +553,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -551,8 +566,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -562,13 +577,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/src/sview/admin_info.c b/src/sview/admin_info.c index 908fffa1f..a73189f4c 100644 --- a/src/sview/admin_info.c +++ b/src/sview/admin_info.c @@ -6,7 +6,7 @@ * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <da@llnl.gov> * - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/sview/block_info.c b/src/sview/block_info.c index ee1afa88e..cc8e26892 100644 --- a/src/sview/block_info.c +++ b/src/sview/block_info.c @@ -6,7 +6,7 @@ * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <da@llnl.gov> * - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/sview/common.c b/src/sview/common.c index 62330728b..aecce533c 100644 --- a/src/sview/common.c +++ b/src/sview/common.c @@ -5,7 +5,7 @@ * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <da@llnl.gov> * - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/sview/grid.c b/src/sview/grid.c index 4d0fa0566..1ebdcc20e 100644 --- a/src/sview/grid.c +++ b/src/sview/grid.c @@ -4,7 +4,7 @@ * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <da@llnl.gov>, et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -384,7 +384,6 @@ extern void get_button_list_from_main(List *button_list, int start, int end, *button_list = list_create(destroy_grid_button); color_inx %= sview_colors_cnt; - itr = list_iterator_create(grid_button_list); while((grid_button = list_next(itr))) { if ((grid_button->inx < start) diff --git a/src/sview/job_info.c b/src/sview/job_info.c index 2bbc43be2..7155dc4b4 100644 --- a/src/sview/job_info.c +++ b/src/sview/job_info.c @@ -6,7 +6,7 @@ * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <da@llnl.gov> * - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -87,7 +87,6 @@ enum { SORTID_LINUXIMAGE, #endif SORTID_MAX_CORES, - SORTID_MAX_MEM, SORTID_MAX_NODES, SORTID_MAX_SOCKETS, #ifdef HAVE_BG @@ -263,8 +262,6 @@ static display_data_t display_data_job[] = { FALSE, EDIT_NONE, refresh_job, create_model_job, admin_edit_job}, {G_TYPE_STRING, SORTID_MIN_MEM, "Min Memory", FALSE, EDIT_TEXTBOX, refresh_job, create_model_job, admin_edit_job}, - {G_TYPE_STRING, SORTID_MAX_MEM, "Max Memory", - FALSE, EDIT_NONE, refresh_job, create_model_job, admin_edit_job}, {G_TYPE_STRING, SORTID_TMP_DISK, "Tmp Disk", FALSE, EDIT_TEXTBOX, refresh_job, create_model_job, admin_edit_job}, {G_TYPE_STRING, SORTID_NICE, "Nice", @@ -696,12 +693,8 @@ static const char *_set_job_msg(job_desc_msg_t *job_msg, const char *new_text, type = "account"; break; case SORTID_DEPENDENCY: - temp_int = strtol(new_text, (char **)NULL, 10); - + job_msg->dependency = xstrdup(new_text); type = "dependency"; - if(temp_int <= 0) - goto return_error; - job_msg->dependency = (uint32_t)temp_int; break; #ifdef HAVE_BG case SORTID_GEOMETRY: @@ -1396,14 +1389,10 @@ static void _layout_job_record(GtkTreeView *treeview, SORTID_FEATURES), job_ptr->features); - if(job_ptr->dependency > 0) - sprintf(tmp_char, "%u", job_ptr->dependency); - else - sprintf(tmp_char, " "); add_display_treestore_line(update, treestore, &iter, find_col_name(display_data_job, SORTID_DEPENDENCY), - tmp_char); + job_ptr->dependency); add_display_treestore_line(update, treestore, &iter, find_col_name(display_data_job, @@ -1679,21 +1668,16 @@ static void _update_job_record(sview_job_info_t *sview_job_info_ptr, gtk_tree_store_set(treestore, iter, SORTID_MIN_MEM, tmp_char, -1); - sprintf(tmp_char, "%u", job_ptr->job_max_memory); - gtk_tree_store_set(treestore, iter, - SORTID_MAX_MEM, tmp_char, -1); - sprintf(tmp_char, "%u", job_ptr->job_min_tmp_disk); gtk_tree_store_set(treestore, iter, SORTID_TMP_DISK, tmp_char, -1); gtk_tree_store_set(treestore, iter, SORTID_ACCOUNT, job_ptr->account, -1); - if(job_ptr->dependency > 0) { - sprintf(tmp_char, "%u", job_ptr->dependency); - gtk_tree_store_set(treestore, iter, - SORTID_DEPENDENCY, tmp_char, -1); - } + + gtk_tree_store_set(treestore, iter, + SORTID_DEPENDENCY, job_ptr->dependency, -1); + sprintf(tmp_char, "%u", job_ptr->priority); gtk_tree_store_set(treestore, iter, SORTID_PRIORITY, tmp_char, -1); @@ -2151,6 +2135,7 @@ static List _create_job_info_list(job_info_msg_t *job_info_ptr, count = 0; while(job_ptr->node_inx[count] != -1) count++; + count++; // for the -1; #endif for(j = 0; j < step_info_ptr->job_step_count; j++) { @@ -2214,6 +2199,7 @@ void _display_info_job(List info_list, popup_info_t *popup_win) } if(!list_count(popup_win->grid_button_list)) first_time = 1; + need_refresh: if(!spec_info->display_widget) { treeview = create_treeview_2cols_attach_to_table( diff --git a/src/sview/node_info.c b/src/sview/node_info.c index 2330f0f8f..53889f8ef 100644 --- a/src/sview/node_info.c +++ b/src/sview/node_info.c @@ -6,7 +6,7 @@ * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <da@llnl.gov> * - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/sview/part_info.c b/src/sview/part_info.c index bf53ddb10..ad0fb43e4 100644 --- a/src/sview/part_info.c +++ b/src/sview/part_info.c @@ -6,7 +6,7 @@ * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <da@llnl.gov> * - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -88,6 +88,7 @@ enum { #endif SORTID_NODES, SORTID_ONLY_LINE, + SORTID_PRIORITY, SORTID_REASON, SORTID_ROOT, SORTID_SHARE, @@ -125,6 +126,8 @@ static display_data_t display_data_part[] = { #endif {G_TYPE_STRING, SORTID_JOB_SIZE, "Job Size", FALSE, EDIT_NONE, refresh_part, create_model_part, admin_edit_part}, + {G_TYPE_STRING, SORTID_PRIORITY, "Priority", FALSE, + EDIT_TEXTBOX, refresh_part, create_model_part, admin_edit_part}, {G_TYPE_STRING, SORTID_MIN_NODES, "Min Nodes", FALSE, EDIT_TEXTBOX, refresh_part, create_model_part, admin_edit_part}, {G_TYPE_STRING, SORTID_MAX_NODES, "Max Nodes", FALSE, @@ -332,6 +335,8 @@ static void _set_active_combo_part(GtkComboBox *combo, action = 1; else if(!strcmp(temp_char, "force")) action = 2; + else if(!strcmp(temp_char, "exclusive")) + action = 3; else action = 0; break; @@ -411,6 +416,11 @@ static const char *_set_part_msg(update_part_msg_t *part_msg, goto return_error; part_msg->max_time = (uint32_t)temp_int; break; + case SORTID_PRIORITY: + temp_int = strtol(new_text, (char **)NULL, 10); + type = "priority"; + part_msg->priority = (uint16_t)temp_int; + break; case SORTID_MIN_NODES: temp_int = strtol(new_text, (char **)NULL, 10); type = "min_nodes"; @@ -442,11 +452,13 @@ static const char *_set_part_msg(update_part_msg_t *part_msg, break; case SORTID_SHARE: if (!strcasecmp(new_text, "yes")) { - part_msg->shared = SHARED_YES; - } else if (!strcasecmp(new_text, "no")) { - part_msg->shared = SHARED_NO; - } else { - part_msg->shared = SHARED_FORCE; + part_msg->max_share = 4; + } else if (!strcasecmp(new_text, "exclusive")) { + part_msg->max_share = 0; + } else if (!strcasecmp(new_text, "force")) { + part_msg->max_share = SHARED_FORCE | 4; + } else { /* "no" */ + part_msg->max_share = 1; } type = "share"; break; @@ -722,7 +734,7 @@ static void _layout_part_record(GtkTreeView *treeview, { GtkTreeIter iter; ListIterator itr = NULL; - char time_buf[20]; + char time_buf[20], tmp_buf[20]; char tmp_cnt[8]; char tmp_cnt1[8]; char tmp_cnt2[8]; @@ -794,7 +806,14 @@ static void _layout_part_record(GtkTreeView *treeview, find_col_name(display_data_part, SORTID_JOB_SIZE), time_buf); - + + convert_num_unit((float)part_ptr->priority, + time_buf, sizeof(time_buf), UNIT_NONE); + add_display_treestore_line(update, treestore, &iter, + find_col_name(display_data_part, + SORTID_PRIORITY), + time_buf); + if (part_ptr->min_nodes == (uint32_t) INFINITE) snprintf(time_buf, sizeof(time_buf), "infinite"); else { @@ -825,11 +844,17 @@ static void _layout_part_record(GtkTreeView *treeview, SORTID_ROOT), temp_char); - if(part_ptr->shared > 1) - temp_char = "force"; - else if(part_ptr->shared) - temp_char = "yes"; - else + if(part_ptr->max_share & SHARED_FORCE) { + snprintf(tmp_buf, sizeof(tmp_buf), "force:%u", + (part_ptr->max_share & ~(SHARED_FORCE))); + temp_char = tmp_buf; + } else if(part_ptr->max_share == 0) + temp_char = "exclusive"; + else if(part_ptr->max_share > 1) { + snprintf(tmp_buf, sizeof(tmp_buf), "yes:%u", + part_ptr->max_share); + temp_char = tmp_buf; + } else temp_char = "no"; add_display_treestore_line(update, treestore, &iter, find_col_name(display_data_part, @@ -921,7 +946,7 @@ static void _update_part_record(sview_part_info_t *sview_part_info, GtkTreeStore *treestore, GtkTreeIter *iter) { - char time_buf[20]; + char time_buf[20], tmp_buf[20]; char tmp_cnt[8]; char *temp_char = NULL; partition_info_t *part_ptr = sview_part_info->part_ptr; @@ -962,6 +987,11 @@ static void _update_part_record(sview_part_info_t *sview_part_info, part_ptr->max_nodes, true); gtk_tree_store_set(treestore, iter, SORTID_JOB_SIZE, time_buf, -1); + convert_num_unit((float)part_ptr->priority, + time_buf, sizeof(time_buf), UNIT_NONE); + gtk_tree_store_set(treestore, iter, SORTID_PRIORITY, + time_buf, -1); + if (part_ptr->min_nodes == (uint32_t) INFINITE) snprintf(time_buf, sizeof(time_buf), "infinite"); else { @@ -985,11 +1015,17 @@ static void _update_part_record(sview_part_info_t *sview_part_info, temp_char = "no"; gtk_tree_store_set(treestore, iter, SORTID_ROOT, temp_char, -1); - if(part_ptr->shared > 1) - temp_char = "force"; - else if(part_ptr->shared) - temp_char = "yes"; - else + if(part_ptr->max_share & SHARED_FORCE) { + snprintf(tmp_buf, sizeof(tmp_buf), "force:%u", + (part_ptr->max_share & ~(SHARED_FORCE))); + temp_char = tmp_buf; + } else if(part_ptr->max_share == 0) + temp_char = "exclusive"; + else if(part_ptr->max_share > 1) { + snprintf(tmp_buf, sizeof(tmp_buf), "yes:%u", + part_ptr->max_share); + temp_char = tmp_buf; + } else temp_char = "no"; gtk_tree_store_set(treestore, iter, SORTID_SHARE, temp_char, -1); @@ -1566,7 +1602,7 @@ need_refresh: treeview = GTK_TREE_VIEW(spec_info->display_widget); update = 1; } - + itr = list_iterator_create(info_list); while ((sview_part_info = (sview_part_info_t*) list_next(itr))) { part_ptr = sview_part_info->part_ptr; @@ -1709,6 +1745,7 @@ extern GtkListStore *create_model_part(int type) -1); break; + case SORTID_PRIORITY: case SORTID_TIMELIMIT: case SORTID_MIN_NODES: case SORTID_MAX_NODES: @@ -1730,9 +1767,9 @@ extern GtkListStore *create_model_part(int type) model = gtk_list_store_new(2, G_TYPE_STRING, G_TYPE_INT); gtk_list_store_append(model, &iter); gtk_list_store_set(model, &iter, - 0, "yes", + 0, "force", 1, SORTID_SHARE, - -1); + -1); gtk_list_store_append(model, &iter); gtk_list_store_set(model, &iter, 0, "no", @@ -1740,9 +1777,14 @@ extern GtkListStore *create_model_part(int type) -1); gtk_list_store_append(model, &iter); gtk_list_store_set(model, &iter, - 0, "force", + 0, "yes", 1, SORTID_SHARE, - -1); + -1); + gtk_list_store_append(model, &iter); + gtk_list_store_set(model, &iter, + 0, "exclusive", + 1, SORTID_SHARE, + -1); break; case SORTID_GROUPS: break; diff --git a/src/sview/popups.c b/src/sview/popups.c index a2f1cefb6..f48612022 100644 --- a/src/sview/popups.c +++ b/src/sview/popups.c @@ -4,7 +4,7 @@ * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <da@llnl.gov>, et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -222,235 +222,326 @@ static void _layout_ctl_conf(GtkTreeStore *treestore, slurm_make_time_str((time_t *)&slurm_ctl_conf_ptr->last_update, temp_str, sizeof(temp_str)); add_display_treestore_line(update, treestore, &iter, - "Configuration data as of", temp_str); + "Configuration data as of", temp_str); add_display_treestore_line(update, treestore, &iter, - "AuthType", slurm_ctl_conf_ptr->authtype); + "AccountingStorageHost", + slurm_ctl_conf_ptr->accounting_storage_host); add_display_treestore_line(update, treestore, &iter, - "BackupAddr", slurm_ctl_conf_ptr->backup_addr); + "AccountingStorageType", + slurm_ctl_conf_ptr->accounting_storage_type); + snprintf(temp_str, sizeof(temp_str), "%u", + slurm_ctl_conf_ptr->accounting_storage_port); + add_display_treestore_line(update, treestore, &iter, + "AccountingStoragePort", + temp_str); + add_display_treestore_line(update, treestore, &iter, + "AccountingStorageUser", + slurm_ctl_conf_ptr->accounting_storage_user); + add_display_treestore_line(update, treestore, &iter, + "AuthType", slurm_ctl_conf_ptr->authtype); add_display_treestore_line(update, treestore, &iter, - "BackupController", - slurm_ctl_conf_ptr->backup_controller); + "BackupAddr", + slurm_ctl_conf_ptr->backup_addr); + add_display_treestore_line(update, treestore, &iter, + "BackupController", + slurm_ctl_conf_ptr->backup_controller); + slurm_make_time_str ((time_t *)&slurm_ctl_conf_ptr->boot_time, + temp_str, sizeof(temp_str)); + add_display_treestore_line(update, treestore, &iter, + "BOOT_TIME", temp_str); snprintf(temp_str, sizeof(temp_str), "%u", slurm_ctl_conf_ptr->cache_groups); add_display_treestore_line(update, treestore, &iter, - "CacheGroups", temp_str); + "CacheGroups", temp_str); + add_display_treestore_line(update, treestore, &iter, + "CheckpointType", + slurm_ctl_conf_ptr->checkpoint_type); + add_display_treestore_line(update, treestore, &iter, + "ControlAddr", + slurm_ctl_conf_ptr->control_addr); add_display_treestore_line(update, treestore, &iter, - "CheckpointType", - slurm_ctl_conf_ptr->checkpoint_type); + "ControlMachine", + slurm_ctl_conf_ptr->control_machine); add_display_treestore_line(update, treestore, &iter, - "ControlAddr", - slurm_ctl_conf_ptr->control_addr); + "CryptoType", + slurm_ctl_conf_ptr->crypto_type); + snprintf(temp_str, sizeof(temp_str), "%u", + slurm_ctl_conf_ptr->def_mem_per_task); add_display_treestore_line(update, treestore, &iter, - "ControlMachine", - slurm_ctl_conf_ptr->control_machine); + "DefMemPerTask", + temp_str); add_display_treestore_line(update, treestore, &iter, - "Epilog", - slurm_ctl_conf_ptr->epilog); + "Epilog", + slurm_ctl_conf_ptr->epilog); snprintf(temp_str, sizeof(temp_str), "%u", slurm_ctl_conf_ptr->fast_schedule); add_display_treestore_line(update, treestore, &iter, - "FastSchedule", - temp_str); + "FastSchedule", + temp_str); snprintf(temp_str, sizeof(temp_str), "%u", slurm_ctl_conf_ptr->first_job_id); add_display_treestore_line(update, treestore, &iter, - "FirstJobId", - temp_str); + "FirstJobId", + temp_str); #ifdef HAVE_XCPU add_display_treestore_line(update, treestore, &iter, - "HAVE_XCPU", "1"); + "HAVE_XCPU", "1"); #endif snprintf(temp_str, sizeof(temp_str), "%u", slurm_ctl_conf_ptr->inactive_limit); add_display_treestore_line(update, treestore, &iter, - "InactiveLimit", - temp_str); + "InactiveLimit", + temp_str); + + add_display_treestore_line(update, treestore, &iter, + "JobAcctGatherType", + slurm_ctl_conf_ptr->job_acct_gather_type); + snprintf(temp_str, sizeof(temp_str), "%u", + slurm_ctl_conf_ptr->job_acct_gather_freq); + add_display_treestore_line(update, treestore, &iter, + "JobAcctGatherFrequency", + temp_str); + + add_display_treestore_line(update, treestore, &iter, + "JobCompHost", + slurm_ctl_conf_ptr->job_comp_host); add_display_treestore_line(update, treestore, &iter, - "JobAcctLogFile", - slurm_ctl_conf_ptr->job_acct_logfile); + "JobCompLoc", + slurm_ctl_conf_ptr->job_comp_loc); snprintf(temp_str, sizeof(temp_str), "%u", - slurm_ctl_conf_ptr->job_acct_freq); + slurm_ctl_conf_ptr->job_comp_port); add_display_treestore_line(update, treestore, &iter, - "JobAcctFrequency", - temp_str); + "JobCompPort", + temp_str); add_display_treestore_line(update, treestore, &iter, - "JobAcctType", - slurm_ctl_conf_ptr->job_acct_type); + "JobCompType", + slurm_ctl_conf_ptr->job_comp_type); add_display_treestore_line(update, treestore, &iter, - "JobCompLoc", - slurm_ctl_conf_ptr->job_comp_loc); + "JobCompUser", + slurm_ctl_conf_ptr->job_comp_user); + add_display_treestore_line(update, treestore, &iter, - "JobCompType", - slurm_ctl_conf_ptr->job_comp_type); + "JobCredentialPrivateKey", + slurm_ctl_conf_ptr->job_credential_private_key); add_display_treestore_line(update, treestore, &iter, - "JobCredentialPrivateKey", - slurm_ctl_conf_ptr->job_credential_private_key); + "JobCredentialPublicCertificate", + slurm_ctl_conf_ptr-> + job_credential_public_certificate); + snprintf(temp_str, sizeof(temp_str), "%u", + slurm_ctl_conf_ptr->job_file_append); add_display_treestore_line(update, treestore, &iter, - "JobCredentialPublicCertificate", - slurm_ctl_conf_ptr-> - job_credential_public_certificate); + "JobFileAppend", temp_str); snprintf(temp_str, sizeof(temp_str), "%u", slurm_ctl_conf_ptr->kill_wait); add_display_treestore_line(update, treestore, &iter, - "KillWait", - temp_str); + "KillWait", + temp_str); add_display_treestore_line(update, treestore, &iter, - "MailProg", - slurm_ctl_conf_ptr->mail_prog); + "MailProg", + slurm_ctl_conf_ptr->mail_prog); snprintf(temp_str, sizeof(temp_str), "%u", slurm_ctl_conf_ptr->max_job_cnt); add_display_treestore_line(update, treestore, &iter, - "MaxJobCount", - temp_str); + "MaxJobCount", + temp_str); + snprintf(temp_str, sizeof(temp_str), "%u", + slurm_ctl_conf_ptr->max_mem_per_task); + add_display_treestore_line(update, treestore, &iter, + "MaxMemPerTask", + temp_str); snprintf(temp_str, sizeof(temp_str), "%u", slurm_ctl_conf_ptr->msg_timeout); add_display_treestore_line(update, treestore, &iter, - "MessageTimeout", - temp_str); + "MessageTimeout", + temp_str); snprintf(temp_str, sizeof(temp_str), "%u", slurm_ctl_conf_ptr->min_job_age); add_display_treestore_line(update, treestore, &iter, - "MinJobAge", - temp_str); + "MinJobAge", + temp_str); add_display_treestore_line(update, treestore, &iter, - "MpiDefault", - slurm_ctl_conf_ptr->mpi_default); + "MpiDefault", + slurm_ctl_conf_ptr->mpi_default); #ifdef MULTIPLE_SLURMD add_display_treestore_line(update, treestore, &iter, - "MULTIPLE_SLURMD", "1"); + "MULTIPLE_SLURMD", "1"); #endif snprintf(temp_str, sizeof(temp_str), "%u", slurm_ctl_conf_ptr->next_job_id); add_display_treestore_line(update, treestore, &iter, - "NEXT_JOB_ID", - temp_str); + "NEXT_JOB_ID", + temp_str); add_display_treestore_line(update, treestore, &iter, - "PluginDir", - slurm_ctl_conf_ptr->plugindir); + "PluginDir", + slurm_ctl_conf_ptr->plugindir); + add_display_treestore_line(update, treestore, &iter, + "PlugStackConfig", + slurm_ctl_conf_ptr->plugstack); + snprintf(temp_str, sizeof(temp_str), "%u", + slurm_ctl_conf_ptr->private_data); add_display_treestore_line(update, treestore, &iter, - "PlugStackConfig", - slurm_ctl_conf_ptr->plugstack); + "PrivateData", + temp_str); add_display_treestore_line(update, treestore, &iter, - "ProctrackType", - slurm_ctl_conf_ptr->proctrack_type); + "ProctrackType", + slurm_ctl_conf_ptr->proctrack_type); add_display_treestore_line(update, treestore, &iter, - "Prolog", - slurm_ctl_conf_ptr->prolog); + "Prolog", + slurm_ctl_conf_ptr->prolog); snprintf(temp_str, sizeof(temp_str), "%u", slurm_ctl_conf_ptr->propagate_prio_process); add_display_treestore_line(update, treestore, &iter, - "PropagatePrioProcess", temp_str); + "PropagatePrioProcess", temp_str); add_display_treestore_line(update, treestore, &iter, - "PropagateResourceLimits", - slurm_ctl_conf_ptr->propagate_rlimits); + "PropagateResourceLimits", + slurm_ctl_conf_ptr->propagate_rlimits); add_display_treestore_line(update, treestore, &iter, - "PropagateResourceLimitsExcept", - slurm_ctl_conf_ptr->propagate_rlimits_except); + "PropagateResourceLimitsExcept", + slurm_ctl_conf_ptr-> + propagate_rlimits_except); + add_display_treestore_line(update, treestore, &iter, + "ResumeProgram", temp_str); + snprintf(temp_str, sizeof(temp_str), "%u", + slurm_ctl_conf_ptr->resume_rate); + add_display_treestore_line(update, treestore, &iter, + "ResumeRate", temp_str); snprintf(temp_str, sizeof(temp_str), "%u", slurm_ctl_conf_ptr->ret2service); add_display_treestore_line(update, treestore, &iter, - "ReturnToService", temp_str); + "ReturnToService", temp_str); snprintf(temp_str, sizeof(temp_str), "%u", slurm_ctl_conf_ptr->schedport); add_display_treestore_line(update, treestore, &iter, - "SchedulerPort", temp_str); + "SchedulerPort", temp_str); snprintf(temp_str, sizeof(temp_str), "%u", slurm_ctl_conf_ptr->schedrootfltr); add_display_treestore_line(update, treestore, &iter, - "SchedulerRootFilter", temp_str); + "SchedulerRootFilter", temp_str); + snprintf(temp_str, sizeof(temp_str), "%u", + slurm_ctl_conf_ptr->sched_time_slice); + add_display_treestore_line(update, treestore, &iter, + "SchedulerTimeSlice", temp_str); add_display_treestore_line(update, treestore, &iter, - "SchedulerType", - slurm_ctl_conf_ptr->schedtype); + "SchedulerType", + slurm_ctl_conf_ptr->schedtype); add_display_treestore_line(update, treestore, &iter, - "SelectType", - slurm_ctl_conf_ptr->select_type); + "SelectType", + slurm_ctl_conf_ptr->select_type); snprintf(temp_str, sizeof(temp_str), "%s(%u)", slurm_ctl_conf_ptr->slurm_user_name, slurm_ctl_conf_ptr->slurm_user_id); add_display_treestore_line(update, treestore, &iter, - "SlurmUser", temp_str); + "SlurmUser", temp_str); snprintf(temp_str, sizeof(temp_str), "%u", slurm_ctl_conf_ptr->slurmctld_debug); add_display_treestore_line(update, treestore, &iter, - "SlurmctldDebug", temp_str); + "SlurmctldDebug", temp_str); add_display_treestore_line(update, treestore, &iter, - "SlurmctldLogFile", - slurm_ctl_conf_ptr->slurmctld_logfile); + "SlurmctldLogFile", + slurm_ctl_conf_ptr->slurmctld_logfile); add_display_treestore_line(update, treestore, &iter, - "SlurmctldPidFile", - slurm_ctl_conf_ptr->slurmctld_pidfile); + "SlurmctldPidFile", + slurm_ctl_conf_ptr->slurmctld_pidfile); snprintf(temp_str, sizeof(temp_str), "%u", slurm_ctl_conf_ptr->slurmctld_port); add_display_treestore_line(update, treestore, &iter, - "SlurmctldPort", temp_str); + "SlurmctldPort", temp_str); snprintf(temp_str, sizeof(temp_str), "%u", slurm_ctl_conf_ptr->slurmctld_timeout); add_display_treestore_line(update, treestore, &iter, - "SlurmctldTimeout", temp_str); + "SlurmctldTimeout", temp_str); snprintf(temp_str, sizeof(temp_str), "%u", slurm_ctl_conf_ptr->slurmd_debug); add_display_treestore_line(update, treestore, &iter, - "SlurmdDebug", temp_str); + "SlurmdDebug", temp_str); add_display_treestore_line(update, treestore, &iter, - "SlurmdLogFile", - slurm_ctl_conf_ptr->slurmd_logfile); + "SlurmdLogFile", + slurm_ctl_conf_ptr->slurmd_logfile); add_display_treestore_line(update, treestore, &iter, - "SlurmdPidFile", - slurm_ctl_conf_ptr->slurmd_pidfile); + "SlurmdPidFile", + slurm_ctl_conf_ptr->slurmd_pidfile); #ifndef MULTIPLE_SLURMD snprintf(temp_str, sizeof(temp_str), "%u", slurm_ctl_conf_ptr->slurmd_port); add_display_treestore_line(update, treestore, &iter, - "SlurmdPort", temp_str); + "SlurmdPort", temp_str); #endif add_display_treestore_line(update, treestore, &iter, - "SlurmdSpoolDir", - slurm_ctl_conf_ptr->slurmd_spooldir); + "SlurmdSpoolDir", + slurm_ctl_conf_ptr->slurmd_spooldir); snprintf(temp_str, sizeof(temp_str), "%u", slurm_ctl_conf_ptr->slurmd_timeout); add_display_treestore_line(update, treestore, &iter, - "SlurmdTimeout", temp_str); + "SlurmdTimeout", temp_str); + add_display_treestore_line(update, treestore, &iter, + "SLURM_CONFIG_FILE", + slurm_ctl_conf_ptr->slurm_conf); + add_display_treestore_line(update, treestore, &iter, + "SLURM_VERSION", SLURM_VERSION); + add_display_treestore_line(update, treestore, &iter, + "SrunEpilog", + slurm_ctl_conf_ptr->srun_epilog); add_display_treestore_line(update, treestore, &iter, - "SLURM_CONFIG_FILE", - slurm_ctl_conf_ptr->slurm_conf); + "SrunProlog", + slurm_ctl_conf_ptr->srun_prolog); add_display_treestore_line(update, treestore, &iter, - "SLURM_VERSION", SLURM_VERSION); + "StateSaveLocation", + slurm_ctl_conf_ptr->state_save_location); + add_display_treestore_line(update, treestore, &iter, + "SuspendExcNodes", + slurm_ctl_conf_ptr->suspend_exc_nodes); + add_display_treestore_line(update, treestore, &iter, + "SuspendExcParts", + slurm_ctl_conf_ptr->suspend_exc_parts); + add_display_treestore_line(update, treestore, &iter, + "SuspendProgram", + slurm_ctl_conf_ptr->suspend_program); + snprintf(temp_str, sizeof(temp_str), "%u", + slurm_ctl_conf_ptr->suspend_rate); add_display_treestore_line(update, treestore, &iter, - "SrunProlog", - slurm_ctl_conf_ptr->srun_prolog); + "SuspendRate", temp_str); + snprintf(temp_str, sizeof(temp_str), "%d", + ((int)slurm_ctl_conf_ptr->suspend_time - 1)); add_display_treestore_line(update, treestore, &iter, - "SrunEpilog", - slurm_ctl_conf_ptr->srun_epilog); + "SuspendTime", temp_str); add_display_treestore_line(update, treestore, &iter, - "StateSaveLocation", - slurm_ctl_conf_ptr->state_save_location); + "SwitchType", + slurm_ctl_conf_ptr->switch_type); add_display_treestore_line(update, treestore, &iter, - "SwitchType", - slurm_ctl_conf_ptr->switch_type); + "TaskEpilog", + slurm_ctl_conf_ptr->task_epilog); add_display_treestore_line(update, treestore, &iter, - "TaskEpilog", - slurm_ctl_conf_ptr->task_epilog); + "TaskPlugin", + slurm_ctl_conf_ptr->task_plugin); + snprintf(temp_str, sizeof(temp_str), "%u", + slurm_ctl_conf_ptr->task_plugin_param); add_display_treestore_line(update, treestore, &iter, - "TaskPlugin", - slurm_ctl_conf_ptr->task_plugin); + "TaskPluginParam", temp_str); add_display_treestore_line(update, treestore, &iter, - "TaskProlog", - slurm_ctl_conf_ptr->task_prolog); + "TaskProlog", + slurm_ctl_conf_ptr->task_prolog); add_display_treestore_line(update, treestore, &iter, - "TmpFS", - slurm_ctl_conf_ptr->tmp_fs); + "TmpFS", + slurm_ctl_conf_ptr->tmp_fs); snprintf(temp_str, sizeof(temp_str), "%u", slurm_ctl_conf_ptr->tree_width); add_display_treestore_line(update, treestore, &iter, - "TreeWidth", temp_str); + "TreeWidth", temp_str); snprintf(temp_str, sizeof(temp_str), "%u", slurm_ctl_conf_ptr->use_pam); add_display_treestore_line(update, treestore, &iter, - "UsePam", temp_str); + "UsePam", temp_str); + add_display_treestore_line(update, treestore, &iter, + "UnkillableStepProgram", + slurm_ctl_conf_ptr->unkillable_program); + snprintf(temp_str, sizeof(temp_str), "%u", + slurm_ctl_conf_ptr->unkillable_timeout); + add_display_treestore_line(update, treestore, &iter, + "UnkillableStepTimeout", temp_str); snprintf(temp_str, sizeof(temp_str), "%u", slurm_ctl_conf_ptr->wait_time); add_display_treestore_line(update, treestore, &iter, - "WaitTime", temp_str); + "WaitTime", temp_str); } extern void create_config_popup(GtkAction *action, gpointer user_data) diff --git a/src/sview/submit_info.c b/src/sview/submit_info.c index 5a07e1463..19543917b 100644 --- a/src/sview/submit_info.c +++ b/src/sview/submit_info.c @@ -6,7 +6,7 @@ * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <da@llnl.gov> * - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/sview/sview.c b/src/sview/sview.c index 5ddc8d502..f277da115 100644 --- a/src/sview/sview.c +++ b/src/sview/sview.c @@ -4,7 +4,7 @@ * Copyright (C) 2002-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <da@llnl.gov>, et. al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/src/sview/sview.h b/src/sview/sview.h index 5a57eefc8..9d70257ed 100644 --- a/src/sview/sview.h +++ b/src/sview/sview.h @@ -4,7 +4,7 @@ * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Danny Auble <da@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/Makefile.in b/testsuite/Makefile.in index ac957c0b0..e0e5c2fca 100644 --- a/testsuite/Makefile.in +++ b/testsuite/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -43,6 +43,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -106,6 +108,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -119,10 +122,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -142,7 +148,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -153,6 +162,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -168,6 +179,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -183,6 +195,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -360,8 +373,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -386,8 +399,8 @@ TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -397,13 +410,12 @@ ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/testsuite/expect/Makefile.am b/testsuite/expect/Makefile.am index 2d4cd215b..61bf0d5f8 100644 --- a/testsuite/expect/Makefile.am +++ b/testsuite/expect/Makefile.am @@ -9,7 +9,7 @@ EXTRA_DIST = \ mpi-testscripts/barrier_timed.c \ mpi-testscripts/Makefile \ mpi-testscripts/script.slurm.sh \ - globals.example \ + globals \ pkill \ README \ regression \ @@ -29,9 +29,6 @@ EXTRA_DIST = \ test1.14 \ test1.15 \ test1.16 \ - test1.17 \ - test1.18 \ - test1.18.prog.c \ test1.19 \ test1.20 \ test1.21 \ @@ -49,27 +46,21 @@ EXTRA_DIST = \ test1.32 \ test1.32.prog.c \ test1.33 \ - test1.34 \ test1.35 \ test1.36 \ - test1.37 \ test1.38 \ test1.39 \ test1.39.prog.c \ - test1.40 \ test1.41 \ test1.42 \ test1.43 \ test1.44 \ - test1.45 \ test1.46 \ - test1.47 \ test1.48 \ test1.49 \ test1.50 \ test1.51 \ test1.52 \ - test1.53 \ test1.54 \ test1.55 \ test1.56 \ @@ -79,7 +70,6 @@ EXTRA_DIST = \ test1.82 \ test1.83 \ test1.84 \ - test1.85 \ test1.86 \ test1.87 \ test1.88 \ @@ -102,6 +92,7 @@ EXTRA_DIST = \ test2.8 \ test2.9 \ test2.10 \ + test2.11 \ test3.1 \ test3.2 \ test3.3 \ @@ -111,6 +102,8 @@ EXTRA_DIST = \ test3.7 \ test3.7.prog.c \ test3.8 \ + test3.9 \ + test3.10 \ test4.1 \ test4.2 \ test4.3 \ @@ -137,11 +130,14 @@ EXTRA_DIST = \ test6.5 \ test6.6 \ test6.7 \ + test6.7.prog.c \ test6.8 \ test6.9 \ test6.10 \ test6.11 \ test6.12 \ + test6.13 \ + test6.13.prog.c \ test7.1 \ test7.2 \ test7.2.prog.c \ @@ -150,8 +146,6 @@ EXTRA_DIST = \ test7.3.prog.c \ test7.4 \ test7.4.prog.c \ - test7.5 \ - test7.5.prog.c \ test7.6 \ test7.6.prog.c \ test7.7 \ @@ -161,11 +155,17 @@ EXTRA_DIST = \ test7.8.prog.c \ test7.9 \ test7.9.prog.c \ + test7.10 \ test8.1 \ test8.2 \ test8.3 \ test8.4 \ test8.4.prog.c \ + test8.5 \ + test8.6 \ + test8.7 \ + test8.7.crypto.c \ + test8.7.prog.c \ test9.1 \ test9.2 \ test9.3 \ @@ -266,51 +266,15 @@ EXTRA_DIST = \ test17.27 \ test17.28 \ test17.29 \ - test17.30 \ test17.31 \ test17.32 \ - test18.1 \ - test18.2 \ - test18.3 \ - test18.4 \ - test18.5 \ - test18.6 \ - test18.7 \ - test18.8 \ - test18.9 \ - test18.10 \ - test18.11 \ - test18.12 \ - test18.13 \ - test18.14 \ - test18.15 \ - test18.16 \ - test18.16.prog.c \ - test18.17 \ - test18.18 \ - test18.19 \ - test18.19.prog.c \ - test18.20 \ - test18.21 \ - test18.22 \ - test18.23 \ - test18.24 \ - test18.25 \ - test18.26 \ - test18.27 \ - test18.28 \ - test18.29 \ - test18.30 \ - test18.31 \ - test18.32 \ - test18.32.prog.c \ - test18.33 \ - test18.34 \ - test18.35 \ - test18.36 \ - test18.36.prog.c \ - test18.37 \ - test18.37.prog.c \ + test19.1 \ + test19.2 \ + test19.3 \ + test19.4 \ + test19.5 \ + test19.6 \ + test19.7 \ usleep distclean-local: diff --git a/testsuite/expect/Makefile.in b/testsuite/expect/Makefile.in index 2f48840aa..e8c11f705 100644 --- a/testsuite/expect/Makefile.in +++ b/testsuite/expect/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -41,6 +41,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -87,6 +89,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -100,10 +103,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -123,7 +129,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -134,6 +143,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -149,6 +160,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -164,6 +176,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -229,7 +242,7 @@ EXTRA_DIST = \ mpi-testscripts/barrier_timed.c \ mpi-testscripts/Makefile \ mpi-testscripts/script.slurm.sh \ - globals.example \ + globals \ pkill \ README \ regression \ @@ -249,9 +262,6 @@ EXTRA_DIST = \ test1.14 \ test1.15 \ test1.16 \ - test1.17 \ - test1.18 \ - test1.18.prog.c \ test1.19 \ test1.20 \ test1.21 \ @@ -269,27 +279,21 @@ EXTRA_DIST = \ test1.32 \ test1.32.prog.c \ test1.33 \ - test1.34 \ test1.35 \ test1.36 \ - test1.37 \ test1.38 \ test1.39 \ test1.39.prog.c \ - test1.40 \ test1.41 \ test1.42 \ test1.43 \ test1.44 \ - test1.45 \ test1.46 \ - test1.47 \ test1.48 \ test1.49 \ test1.50 \ test1.51 \ test1.52 \ - test1.53 \ test1.54 \ test1.55 \ test1.56 \ @@ -299,7 +303,6 @@ EXTRA_DIST = \ test1.82 \ test1.83 \ test1.84 \ - test1.85 \ test1.86 \ test1.87 \ test1.88 \ @@ -322,6 +325,7 @@ EXTRA_DIST = \ test2.8 \ test2.9 \ test2.10 \ + test2.11 \ test3.1 \ test3.2 \ test3.3 \ @@ -331,6 +335,8 @@ EXTRA_DIST = \ test3.7 \ test3.7.prog.c \ test3.8 \ + test3.9 \ + test3.10 \ test4.1 \ test4.2 \ test4.3 \ @@ -357,11 +363,14 @@ EXTRA_DIST = \ test6.5 \ test6.6 \ test6.7 \ + test6.7.prog.c \ test6.8 \ test6.9 \ test6.10 \ test6.11 \ test6.12 \ + test6.13 \ + test6.13.prog.c \ test7.1 \ test7.2 \ test7.2.prog.c \ @@ -370,8 +379,6 @@ EXTRA_DIST = \ test7.3.prog.c \ test7.4 \ test7.4.prog.c \ - test7.5 \ - test7.5.prog.c \ test7.6 \ test7.6.prog.c \ test7.7 \ @@ -381,11 +388,17 @@ EXTRA_DIST = \ test7.8.prog.c \ test7.9 \ test7.9.prog.c \ + test7.10 \ test8.1 \ test8.2 \ test8.3 \ test8.4 \ test8.4.prog.c \ + test8.5 \ + test8.6 \ + test8.7 \ + test8.7.crypto.c \ + test8.7.prog.c \ test9.1 \ test9.2 \ test9.3 \ @@ -486,51 +499,15 @@ EXTRA_DIST = \ test17.27 \ test17.28 \ test17.29 \ - test17.30 \ test17.31 \ test17.32 \ - test18.1 \ - test18.2 \ - test18.3 \ - test18.4 \ - test18.5 \ - test18.6 \ - test18.7 \ - test18.8 \ - test18.9 \ - test18.10 \ - test18.11 \ - test18.12 \ - test18.13 \ - test18.14 \ - test18.15 \ - test18.16 \ - test18.16.prog.c \ - test18.17 \ - test18.18 \ - test18.19 \ - test18.19.prog.c \ - test18.20 \ - test18.21 \ - test18.22 \ - test18.23 \ - test18.24 \ - test18.25 \ - test18.26 \ - test18.27 \ - test18.28 \ - test18.29 \ - test18.30 \ - test18.31 \ - test18.32 \ - test18.32.prog.c \ - test18.33 \ - test18.34 \ - test18.35 \ - test18.36 \ - test18.36.prog.c \ - test18.37 \ - test18.37.prog.c \ + test19.1 \ + test19.2 \ + test19.3 \ + test19.4 \ + test19.5 \ + test19.6 \ + test19.7 \ usleep all: all-am diff --git a/testsuite/expect/README b/testsuite/expect/README index 6d1ae1ad0..102284a02 100644 --- a/testsuite/expect/README +++ b/testsuite/expect/README @@ -1,8 +1,10 @@ ############################################################################ +# Copyright (C) 2008 Lawrence Livermore National Security. # Copyright (C) 2002-2007 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# Additionals by Joseph Donaghy <donaghy1@llnl.gov> +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -92,18 +94,15 @@ test1.8 Confirm that basic srun stdin, stdout, and stderr options work (--input, --output, and --error option respectively). test1.9 Test of srun verbose mode (--verbose option). test1.10 Test of srun/slurmd debug mode (--debug option). -test1.11 Test of batch job and job name options (--batch and --job-name - options). -test1.12 Test of processors, memory, and temporary disk space - constraints options (--mincpus, --mem, and --tmp options). - Also test that priority zero job is not started (--hold option). +test1.11 Test job name option (--job-name). +test1.12 Test of --checkpoint option. This does not validate the + checkpoint file itself. test1.13 Test of immediate allocation option (--immediate option). -test1.14 Test of shared and contiguous options (--shared and --contiguous). - Also uses --batch and --hold options. Also see test1.53. +test1.14 Test exclusive resource allocation for a step (--exclusive option). test1.15 Test of wait option (--wait option). test1.16 Confirm that srun buffering can be disabled (--unbuffered option). -test1.17 Confirm that srun allocation mode (--allocate option). -test1.18 Test of srun attach to existing job (--attach and --join options). +test1.17 Test of srun --open-mode (truncate or append) option. +test1.18 Test of --licenses option test1.19 Test srun stdout/err file name formatting (--output and --error options with %j, %J, %n, %s and %t specifications). test1.20 Test srun stdout/err disabling (--output and --error options with @@ -128,31 +127,28 @@ test1.31 Verify that SLURM directed environment variables are processed: SLURM_STDOUTMODE. test1.32 Test of srun signal forwarding test1.33 Test of srun application exit code reporting -test1.34 Verify that command arguments get forwarded to job script - (--batch option). +test1.34 REMOVED test1.35 Test of batch job with multiple concurrent job steps test1.36 Test parallel launch of srun (e.g. "srun srun hostname") -test1.37 Confirm that node sharing flags are respected (--nodelist and - --share options). +test1.37 REMOVED test1.38 Test srun handling of SIGINT to get task status or kill the job (--quit-on-interrupt option). test1.39 Test of linux light-weight core files. -test1.40 Test of stand-alone srun resource allocation (--uid and --no-shell - options). +test1.40 REMOVED test1.41 Validate SLURM debugger infrastructure (--debugger-test option). -test1.42 Test of account number and job dependencies (--account, --begin +test1.42 Test of account number and job dependencies (--account, and --depedency options). test1.43 Test of slurm_job_will_run API, (srun --test-only option). test1.44 Read srun's stdout slowly and test for lost data. -test1.45 Test the launch of a batch job within an existing job allocation. +test1.45 REMOVED test1.46 Test srun option --kill-on-bad-exit -test1.47 Tests #SLURM entry functionality in a batch script. +test1.47 REMOVED test1.48 Test of srun mail options (--mail-type and --mail-user options). test1.49 Test of srun task-prolog and task-epilog options. test1.50 Test of running non-existant job, confirm timely termination. test1.51 Test propagation of umask to spawned tasks. test1.52 Test of hostfile logic -test1.53 Test of nice value specification (--nice option). +test1.53 REMOVED test1.54 Test of running different executables with different arguments for each task (--multi-prog option). test1.55 Make certain that srun behaves when its controlling terminal @@ -175,8 +171,7 @@ test1.83 Test of contiguous option with multiple nodes (--contiguous option). Also see test1.14. test1.84 Test of cpus-per-task option on a single node (--cpus-per-task option). -test1.85 Test of partition specification on job submission (--partition - option). +test1.85 REMOVED test1.86 Confirm node selection from within a job step on existing allocation (--nodelist, --exclude, --nodes and --nprocs options). test1.87 Confirm node selection from within a job step on existing allocation @@ -186,6 +181,7 @@ test1.89 Test of CPU affinity support. test1.90 Test of memory affinity support for NUMA systems. test1.91 Test of CPU affinity for multi-core systems. test1.92 Test of task distribution support on multi-core systems. +test1.93 Test of LAM-MPI functionality **NOTE** The above tests for mutliple processor/partition systems only test2.# Testing of scontrol options (to be run as unprivileged user). @@ -215,6 +211,7 @@ test3.6 Testing of hidden partitions. test3.7 Test of job suspend/resume. test3.8 Test of batch job requeue. test3.9 Test of "scontrol show slurmd" +test3.10 Test of "scontrol notify <jobid> <message>" UNTESTED "scontrol abort" would stop slurm UNTESTED "scontrol shutdown" would stop slurm @@ -276,14 +273,13 @@ test7.# Testing of other functionality. ========================================== test7.1 Test priorities slurmctld assigns to jobs. Uses srun --hold and --batch options. -test7.2 Test of PMI functions available via API library. Uses srun and - slaunch. Tests --pmi-threads option in both commands. +test7.2 Test of PMI functions available via API library. Tests + --pmi-threads option in srun command. test7.3 Test of slurm_step_launch API with spawn_io=true (needed by poe on IBM AIX systems). test7.4 Test of TotalView operation with srun, with and without bulk transfer. -test7.5 Test of TotalView operation with slaunch, with and without bulk - transfer. +test7.5 REMOVED test7.6 Test of TotalView operation with sattach test7.7 Test of sched/wiki2 plugin. This is intended to execute in the place of Moab or Maui and emulate its actions to confirm proper @@ -298,13 +294,13 @@ test7.10 Test if we can trick SLURM into using the wrong user ID test8.# Test of Blue Gene specific functionality. ================================================= -test8.1 Test of Blue Gene specific srun command line options -test8.2 Test of Blue Gene specific srun environment variables +test8.1 Test of Blue Gene specific sbatch command line options +test8.2 Test of Blue Gene specific sbatch environment variables test8.3 Test of Blue Gene specific job geometry support test8.4 Test of Blue Gene MPI job execution test8.5 Confirm we can make a 32, 128, and 512 cnode block. test8.6 Stress test Dynamic mode block creation. - +test8.7 Test of Blue Gene scheduling with sched/wik2 plugin. test9.# System stress testing. Exercises all commands and daemons. ===================================================================== @@ -475,67 +471,9 @@ test17.29 Verify that command arguments get forwarded to job script. test17.30 Test of comment field specification (--comment option). test17.31 Tests #PBS entry functionality in a batch script. test17.32 Test of --overcommit option. +test17.33 Test of --open-mode option. -test18.# Testing of slaunch options. -====================================== -test18.1 Confirm slaunch usage option works (--usage option). -test18.2 Confirm slaunch help option works (--help option). -test18.3 Confirm that slaunch reports a proper version number - (--version option). -test18.4 Confirm that a job executes with the proper task count (--tasks - and --overcommit options). -test18.5 Confirm that slauch local stdin, stdout, and stderr options work - (options --slaunch-input, --slaunch-output and --slaunch-error - respectively). -test18.6 Test of slaunch verbose mode (--verbose option). -test18.7 Test of slaunch/slurmd debug mode (--slurmd_debug option). -test18.8 Confirm that slaunch buffering can be disabled (--unbuffered option). -test18.9 Test of wait option (--wait option). -test18.10 Test slaunch task stdout/err disabling (--task-output and - --task-error options). -test18.11 Test slaunch stdout/err file name formatting (--task-output and - --task-error options with %j, %J, %n, %s and %t specifications). -test18.12 Test slaunch stdin routing to specific task (--slaunch-input-filter - option with numeric argument). -test18.13 Confirm that slaunch sets appropriate working directory (--workdir - option). -test18.14 Verify the appropriate job environment variables are set -test18.15 Verify that user environment variables are propagated to the job -test18.16 Verify that user limits are propagated to the job -test18.17 Test of salloc and slaunch exit code reporting -test18.18 Test of parallel launch of slaunch ("slaunch slaunch id"). -test18.19 Test of slaunch signal forwarding (actually using scancel, for - now anyway) -test18.20 Run "slaunch cat" and read slaunch's stdout SLOWLY, creating - stdout back pressure in slaunch. -test18.21 Test of slaunch's --kill-on-bad-exit option. -test18.22 Test of slaunch task-prolog and task-epilog option. -test18.23 Test of running non-existant job, confirm timely termination. -test18.24 Test propagation of umask to spawned tasks. -test18.25 Test of --task-layout-file option. -test18.26 Test of running different executables with different arguments - for each task (--multi-prog option). -test18.27 Confirm that a job executes with the proper task distribution - (--nodes and --distribution options). -test18.28 Confirm that a job executes with the proper node count - (--nodes option). -test18.29 Test of slaunch --cpus-per-task option. -test18.30 Confirm that a job executes with the proper node count - (--nodes option). -test18.31 Confirm that a job executes with the specified nodes - (--relative, --nodelist-byname and --nodelist-byid). -test18.32 Basic MPI functionality tests via slaunch. -test18.33 Verify environment variables controlling slaunch are processed: - SLAUNCH_DEBUG, SLAUNCH_DISTRIBUTION, SLAUNCH_LABELIO and - SLAUNCH_OVERCOMMIT -test18.34 Test slaunch's ability to set the job step's name (--name option) -test18.35 Test of task layout controls (--task-layout-byid, - --task-layout-byname and --task-layout-file options). -test18.36 Test of CPU affinity support (--cpu-bind option). -test18.37 Test of memory affinity support for NUMA systems (--mem-bind option). -test18.38 Test of slaunch --jobid - test19.# Testing of strigger options. ======================================= test19.1 strigger --help @@ -554,3 +492,12 @@ test20.2 qstat command tests test20.3 qdel command tests test20.4 pbsnodes command tests + +test21.# Testing of sacctmgr commands and options. +================================================= +test21.1 sacctmgr --usage +test21.2 sacctmgr --help +test21.3 sacctmgr -V +test21.4 sacctmgr version +test21.5 sacctmgr add, list, and delete a cluster +test21.6 sacctmgr add, list, and delete multiple cluster diff --git a/testsuite/expect/globals b/testsuite/expect/globals index 4e249c946..8fc3abccb 100755 --- a/testsuite/expect/globals +++ b/testsuite/expect/globals @@ -11,10 +11,12 @@ # set mpicc "/usr/local/bin/mpicc" # ############################################################################ +# Copyright (C) 2008 Lawrence Livermore National Security. # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# Additions by Joseph Donaghy <donaghy1@llnl.gov> +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -34,7 +36,7 @@ # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ############################################################################ -global sacct salloc sattach sbatch sbcast scancel scontrol sinfo slaunch smap squeue srun +global sacctmgr sacct salloc sattach sbatch sbcast scancel scontrol sinfo smap squeue srun # Conditional set. Only set variable if variable does not yet exist. proc cset {name value} { @@ -53,6 +55,8 @@ if [file exists globals.local] { # Used to locate binaries, libraries, and header files. # cset slurm_dir "/usr" +cset build_dir "../../" +cset sacctmgr "${slurm_dir}/bin/sacctmgr" cset sacct "${slurm_dir}/bin/sacct" cset salloc "${slurm_dir}/bin/salloc" cset sattach "${slurm_dir}/bin/sattach" @@ -61,7 +65,6 @@ cset sbcast "${slurm_dir}/bin/sbcast" cset scancel "${slurm_dir}/bin/scancel" cset scontrol "${slurm_dir}/bin/scontrol" cset sinfo "${slurm_dir}/bin/sinfo" -cset slaunch "${slurm_dir}/bin/slaunch" cset smap "${slurm_dir}/bin/smap" cset squeue "${slurm_dir}/bin/squeue" cset srun "${slurm_dir}/bin/srun" @@ -100,7 +103,7 @@ cset prompt "(%|#|\\\$|]|\[^>]>) *(|\[^ ]* *)$" # Only the shell names (e.g. bin_bash) must be full pathnames # cset bin_awk "awk" -cset bin_bash [exec which bash | tail -1] +cset bin_bash [exec which bash | tail -n 1] cset bin_cat "cat" cset bin_cc "gcc" cset bin_chmod "chmod" @@ -162,7 +165,7 @@ cset sleep_error_message "(invalid time interval)|(bad character in argument)" set alpha "\[a-zA-Z\]+" set alpha_cap "\[A-Z\]+" set alpha_numeric "\[a-zA-Z0-9\]+" -set alpha_numeric_under "\[a-zA-Z0-9_\]+" +set alpha_numeric_under "\[a-zA-Z0-9_\-\]+" set alpha_under "\[A-Z_\]+" set end_of_line "\[\r\n\]" set number "\[0-9\]+" diff --git a/testsuite/expect/pkill b/testsuite/expect/pkill index 58b263065..8c648f22c 100755 --- a/testsuite/expect/pkill +++ b/testsuite/expect/pkill @@ -7,7 +7,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/regression b/testsuite/expect/regression index 4a386b89c..b59af8525 100755 --- a/testsuite/expect/regression +++ b/testsuite/expect/regression @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -54,9 +54,9 @@ BEGIN_TIME=`date +%s` for major in `seq 1 20`; do for minor in `seq 1 100`; do TEST=test${major}.${minor} - if [ ! -f $TEST ]; then continue; fi + if [ ! -f ./$TEST ]; then continue; fi - $TEST + ./$TEST if [ $? -eq 0 ] then COMPLETIONS=$((COMPLETIONS+1)) diff --git a/testsuite/expect/regression.py b/testsuite/expect/regression.py index 66de8f8a5..a1e706430 100755 --- a/testsuite/expect/regression.py +++ b/testsuite/expect/regression.py @@ -3,7 +3,7 @@ # Copyright (C) 2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Christopher J. Morrone <morrone2@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -140,19 +140,23 @@ def main(argv=None): sys.stdout.flush() def test_cmp(testA, testB): - if testA[0] < testB[0]: - return -1 - elif testA[0] > testB[0]: - return 1 - else: - if testA[1] < testB[1]: - return -1 - elif testA[1] > testB[1]: - return 1 - else: - return 0 + rc = cmp(testA[0], testB[0]) + if rc != 0: + return rc + return cmp(testA[1], testB[1]) def test_in_list(major, minor, test_list): + '''Test for whether a test numbered major.minor is in test_list. + + "major" and "minor" must be integers. "test_list" is a list of + tuples, each tuple representing one test. The tuples are of the + form: + + (major, minor, filename) + + Returns True if the test is in the list, and False otherwise. + ''' + if not test_list: return False for test in test_list: @@ -162,15 +166,38 @@ def test_in_list(major, minor, test_list): return False def test_parser(option, opt_str, value, parser): - splitter = re.compile('[,\s]+') - # On error: raise OptionValueError - # parser.values.exclude - # setattr(parser.values, option.dest, 1) + '''Option callback function for the optparse.OptionParser class. + + Will take a string representing one or more test names and append + a tuple representing the test into a list in the options's destination + variable. + + A string representing test names must patch the regular expression + named "test_re" below. Some examples of exceptable options are: + + '1.5' + 'test9.8' + '2.6 test3.1 14.2' + '3.4,6.7,8.3' + '1.*' + '*.2' + '1.*,3.8,9.2' + + Raises OptionValueError on error. + ''' + + # Initialize the option's destination array, if is does not already exist. if not hasattr(parser.values, option.dest): setattr(parser.values, option.dest, []) if getattr(parser.values, option.dest) is None: setattr(parser.values, option.dest, []) + + # Get a pointer to the option's destination array. l = getattr(parser.values, option.dest) + + # Split the user's option string into a series of tuples that represent + # each test, and add each tuple to the destination array. + splitter = re.compile('[,\s]+') val = splitter.split(value) test_re = re.compile('(test)?((\d+)|\*)\.((\d+)|\*)$') for v in val: @@ -186,6 +213,13 @@ def test_parser(option, opt_str, value, parser): l.append((major, minor)) class poor_Popen_substitute: + '''subprocess.Popen work-alike function. + + The subprocess module and its subprocess.Popen class were + added in Python 2.4. This function is provided to supply the + subset of Popen functionality need by this program if run under + older python interpreters. + ''' def __init__(self, args, shell=False, stdout=None, stderr=None): if shell is not False: raise Exception("This substitute Popen only supports shell=True") diff --git a/testsuite/expect/test1.1 b/testsuite/expect/test1.1 index 06f2ef541..19516084d 100755 --- a/testsuite/expect/test1.1 +++ b/testsuite/expect/test1.1 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.10 b/testsuite/expect/test1.10 index 7fbeac42f..f8268d040 100755 --- a/testsuite/expect/test1.10 +++ b/testsuite/expect/test1.10 @@ -10,7 +10,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.11 b/testsuite/expect/test1.11 index e4844c231..b27adc5a7 100755 --- a/testsuite/expect/test1.11 +++ b/testsuite/expect/test1.11 @@ -14,7 +14,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -36,10 +36,6 @@ source ./globals set test_id "1.11" -set file_in "test$test_id.input" -set file_out "test$test_id.output" -set file_err "test$test_id.error" -set job_name "jobname$test_id" set name_read "" set complete_flag 0 @@ -53,125 +49,6 @@ set got_login_grps 0 print_header $test_id -# -# Delete left-over input script plus stdout/err files -# Build input script file that runs two job steps -# -exec $bin_rm -f $file_in $file_out $file_err -make_bash_script $file_in " - $bin_id - $srun $bin_sleep 1 - $srun $bin_sleep 1 -" - -# -# Spawn a srun batch job that uses stdout/err and confirm their contents -# -set timeout $max_job_delay -set srun_pid [spawn $srun --batch --output=$file_out --error=$file_err --job-name=$job_name -t1 $file_in] -expect { - -re "jobid ($number) submitted" { - set job_id $expect_out(1,string) - exp_continue - } - timeout { - send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid - set exit_code 1 - } - eof { - wait - } -} - -if {$job_id == 0} { - send_user "\nFAILURE: batch submit failure\n" - exit 1 -} - -# -# Wait for job to complete -# -if {[wait_for_job $job_id "DONE"] != 0} { - send_user "\nFAILURE: waiting for job to complete\n" - set exit_code 1 -} - -spawn $scontrol show job $job_id -expect { - -re "Name=$job_name" { - set name_flag 1 - exp_continue - } - -re "JobState=COMPLETE" { - set complete_flag 1 - exp_continue - } - timeout { - send_user "\nFAILURE: scontrol not responding\n" - set exit_code 1 - } - eof { - wait - } -} - -if {$name_flag == 0} { - send_user "\nFAILURE: batch job name failure\n" - set exit_code 1 -} - -if {$complete_flag == 0} { - send_user "\nFAILURE: batch job termination failure\n" - set exit_code 1 -} - -# -# Check user id and group id in stdout -# -spawn $bin_id -expect { - -re "(uid=.*\n)" { - set login_grp_info $expect_out(1,string) - set got_login_grps 1 - exp_continue - } - eof { - wait - } -} - -if {[wait_for_file $file_out] == 0} { - spawn $bin_cat $file_out - expect { - -re "(uid=.*\n)" { - set job_grp_info $expect_out(1,string) - set got_job_grps 1 - exp_continue - } - eof { - wait - } - } -} - -if {$got_login_grps == 0} { - send_user "\nFAILURE: Unable to get user and group ID info\n" - set exit_code 1 -} -if {$got_job_grps == 0} { - send_user "\nFAILURE: User and group ID info missing from stdout\n" - set exit_code 1 -} -if {[string compare $login_grp_info $job_grp_info] != 0} { - send_user "\nFAILURE: Login and slurm user info mismatch\n" - set exit_code 1 -} - -if {$exit_code == 0} { - exec $bin_rm -f $file_in $file_out $file_err -} - # # Spawn a srun job with a really long name and confirm it is accepted or truncated # @@ -180,7 +57,7 @@ set job_id 0 set srun_pid [spawn $srun --verbose --job-name=$job_name -t1 $bin_id] expect { - -re "launching ($number).0 on host" { + -re "jobid ($number):" { set job_id $expect_out(1,string) exp_continue } diff --git a/testsuite/expect/test1.12 b/testsuite/expect/test1.12 index 2d64951bc..a0c173d50 100755 --- a/testsuite/expect/test1.12 +++ b/testsuite/expect/test1.12 @@ -1,19 +1,17 @@ #!/usr/bin/expect ############################################################################ # Purpose: Test of SLURM functionality -# Test of processors, memory, and temporary disk space -# constraints options (--mincpus, --mem, and --tmp options). -# Also test that priority zero job is not started (--hold -# option). +# Test of --checkpoint option. This does not validate the +# checkpoint file itself. # # Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR # "FAILURE: ..." otherwise with an explanation of the failure, OR # anything else indicates a failure mode that must be investigated. ############################################################################ -# Copyright (C) 2002-2006 The Regents of the University of California. +# Copyright (C) 2007 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -30,40 +28,36 @@ # # You should have received a copy of the GNU General Public License along # with SLURM; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. ############################################################################ source ./globals -set test_id "1.12" -set exit_code 0 -set file_in "test$test_id.input" -set job_id 0 - -set cpu_cnt 1 -set mem_size 13 -set tmp_size 2 -set matches 0 +set test_id "1.12" +set exit_code 0 +set file_in "test$test_id.input" +set ckpt_in 12 +set ckpt_out -1 print_header $test_id # -# Delete left-over input script -# Build input script file +# Submit a slurm job that print it's info from scontrol # exec $bin_rm -f $file_in make_bash_script $file_in " - $bin_sleep 10 + $scontrol show step \$SLURM_JOBID.\$SLURM_STEPID " -# -# Spawn a srun batch job with constraints and stdout/err -# -set srun_pid [spawn $srun --batch --output=none --error=none --mincpus=$cpu_cnt --mem=$mem_size --tmp=$tmp_size --hold -t1 $file_in] +set srun_pid [spawn $srun -N1 -t1 --checkpoint=$ckpt_in $file_in] expect { - -re "jobid ($number) submitted" { - set job_id $expect_out(1,string) + -re "Checkpoint=($number)" { + set ckpt_out $expect_out(1,string) exp_continue } + -re "Unable to contact" { + send_user "\nFAILURE: slurm appears to be down\n" + exit 1 + } timeout { send_user "\nFAILURE: srun not responding\n" slow_kill $srun_pid @@ -74,67 +68,14 @@ expect { } } -if {$job_id == 0} { - send_user "\nFAILURE: batch submit failure\n" - exit 1 -} - -# -# Confirm constraints are registered and wait for job completion -# -spawn $scontrol show job $job_id -expect { - -re "Priority=($number)" { - set read_prio $expect_out(1,string) - if {$read_prio == 0} { - incr matches - send_user "match of Priority\n" - } - exp_continue - } - -re "JobState=PENDING" { - incr matches - send_user "match of JobState\n" - exp_continue - } - -re "MinProcs=($number)" { - set read_proc $expect_out(1,string) - if {$read_proc == $cpu_cnt} { - incr matches - send_user "match of MinProcs\n" - } - exp_continue - } - -re "MinMemory=($number)" { - set read_mem $expect_out(1,string) - if {$read_mem == $mem_size} { - incr matches - send_user "match of MinMemory\n" - } - exp_continue - } - -re "MinTmpDisk=($number)" { - set read_disk $expect_out(1,string) - if {$read_disk == $tmp_size} { - incr matches - send_user "match of MinTmpDisk\n" - } - exp_continue - } - timeout { - send_user "\nFAILURE: scontrol not responding\n" - set exit_code 1 - } - eof { - wait - } -} -cancel_job $job_id - -if {$matches != 5} { - send_user "\nFAILURE: Did not get proper constraints\n" +if {$ckpt_out == -1} { + send_user "\nFAILURE: No Checkpoint time reported for job step\n" + set exit_code 1 +} elseif {$ckpt_in != $ckpt_out} { + send_user "\nFAILURE: No Checkpoint time not set properly\n" set exit_code 1 } + if {$exit_code == 0} { exec $bin_rm -f $file_in send_user "\nSUCCESS\n" diff --git a/testsuite/expect/test1.13 b/testsuite/expect/test1.13 index f509a1402..2d31d61bf 100755 --- a/testsuite/expect/test1.13 +++ b/testsuite/expect/test1.13 @@ -10,7 +10,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -45,7 +45,7 @@ set timeout 10 set srun_pid [spawn $srun --immediate --hold -t1 $bin_pwd] expect { -re "Unable to allocate resources" { - send_user "This error is expected, no worries\n" + send_user "\nThis error is expected, no worries\n" incr matches exp_continue } @@ -72,11 +72,11 @@ set matches 0 set srun_pid [spawn $srun --immediate -v -t1 $bin_pwd] expect { -re "error" { - send_user "This error is not unexpected, no worries\n" + send_user "\nThis error is not unexpected, no worries\n" incr matches exp_continue } - -re "launching" { + -re "tasks started" { incr matches exp_continue } diff --git a/testsuite/expect/test1.14 b/testsuite/expect/test1.14 index 114ef40c7..86f8f9dfc 100755 --- a/testsuite/expect/test1.14 +++ b/testsuite/expect/test1.14 @@ -1,17 +1,16 @@ #!/usr/bin/expect ############################################################################ # Purpose: Test of SLURM functionality -# Test of shared and contiguous options (--shared and --contiguous). -# Also uses --batch and --hold options. Also see test1.53. +# Test exclusive resource allocation for a step (--exclusive option). # # Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR # "FAILURE: ..." otherwise with an explanation of the failure, OR # anything else indicates a failure mode that must be investigated. ############################################################################ -# Copyright (C) 2002-2006 The Regents of the University of California. +# Copyright (C) 2007 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -28,127 +27,194 @@ # # You should have received a copy of the GNU General Public License along # with SLURM; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. ############################################################################ source ./globals -set test_id "1.14" -set exit_code 0 -set job_id 0 -set matches 0 +set test_id "1.14" +set exit_code 0 +set file_in "test$test_id.input" +set file_out "test$test_id.output" +set job_id 0 +set sleep_secs 10 print_header $test_id +if {[test_bluegene]} { + send_user "\nWARNING: This test is incompatable with bluegene systems\n" + exit $exit_code +} + # -# Spawn a srun batch job with shared option only +# Delete left-over input script +# Build input script file +# Run one more step than allocated CPUs and make sure it waits +# The "sleep 2" is meant to insure the earlier job steps start first +# +exec $bin_rm -f $file_in $file_out +make_bash_script $file_in " + inx=0 + while \[ \$inx -lt \$SLURM_TASKS_PER_NODE \] + do + $srun --exclusive -n1 sleep $sleep_secs & + inx=\$((inx+1)) + done + $bin_sleep 2 + $srun -v --exclusive -n1 $bin_printenv SLURMD_NODENAME & + wait +" + # -set srun_pid [spawn $srun --share --hold --batch -t1 $bin_pwd] +# Spawn a job via sbatch +# +spawn $sbatch -N1 -t1 --output=$file_out $file_in expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } timeout { - send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + send_user "\nFAILURE: sbatch not responding\n" set exit_code 1 + exp_continue } eof { wait } } +if { $job_id == 0 } { + send_user "\nFAILURE: failed to submit job\n" + exit 1 +} # -# Confirm shared and contiguous flag values -# -if {$job_id != 0} { - spawn $scontrol show job $job_id - expect { - -re "Shared=($number)" { - set shared_val $expect_out(1,string) - if {$shared_val == 1} { - incr matches - } - exp_continue - } - -re "Contiguous=($number)" { - set cont_val $expect_out(1,string) - if {$cont_val == 0} { - incr matches - } - exp_continue - } - timeout { - send_user "\nFAILURE: scontrol not responding\n" - set exit_code 1 - } - eof { - wait - } - } +# Wait for job to complete +# +if {[wait_for_job $job_id "DONE"] != 0} { + send_user "\nFAILURE: waiting for job to complete\n" cancel_job $job_id - set job_id 0 + set exit_code 1 +} + +# +# Check for desired output +# +if {[wait_for_file $file_out] != 0} { + send_user "\nFAILURE: Output file $file_out is missing\n" + exit 1 +} +set match1 0 +set match2 0 +spawn $bin_cat $file_out +expect { + -re "Job step creation temporarily disabled, retrying" { + incr match1 + exp_continue + } + -re "Job step created" { + incr match2 + exp_continue + } + eof { + wait + } +} + +if { $match1 != 1 || $match2 != 1 } { + send_user "\nFAILURE: Problem with exclusive resource allocation " + send_user "for step ($match1, $match2)\n" + set exit_code 1 +} + +if {$exit_code == 0} { + send_user "\nSo far, so good. Trying with --imediate option\n\n" } else { - set exit_code 1 + exit $exit_code } # -# Spawn a srun batch job with contiguous option only +# Delete left-over input script +# Build another input script file +# Run one more step than allocated CPUs with immediate option and make aborts +# The "sleep 2" is meant to insure the earlier job steps start first +# +exec $bin_rm -f $file_in $file_out +make_bash_script $file_in " + inx=0 + while \[ \$inx -lt \$SLURM_TASKS_PER_NODE \] + do + $srun --exclusive -n1 sleep $sleep_secs & + inx=\$((inx+1)) + done + $bin_sleep 2 + $srun --exclusive -n1 --immediate hostname & + wait +" + # -set job_id 0 -spawn $srun --contiguous --hold --batch -t1 $bin_pwd +# Spawn a job via sbatch +# +spawn $sbatch -N1 -t1 --output=$file_out $file_in expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } timeout { - send_user "\nFAILURE: srun not responding\n" - exit 1 + send_user "\nFAILURE: sbatch not responding\n" + set exit_code 1 + exp_continue } eof { wait } } +if { $job_id == 0 } { + send_user "\nFAILURE: failed to submit job\n" + exit 1 +} # -# Confirm shared and contiguous flag values -# -if {$job_id != 0} { - spawn $scontrol show job $job_id - expect { - -re "Shared=($alpha)" { - set shared_val $expect_out(1,string) - if {[string compare $shared_val OK] == 0} { - incr matches - } - exp_continue - } - -re "Contiguous=($number)" { - set cont_val $expect_out(1,string) - if {$cont_val == 1} { - incr matches - } - exp_continue - } - timeout { - send_user "\nFAILURE: scontrol not responding\n" - set exit_code 1 - } - eof { - wait - } - } +# Wait for job to complete +# +if {[wait_for_job $job_id "DONE"] != 0} { + send_user "\nFAILURE: waiting for job to complete\n" cancel_job $job_id -} else { set exit_code 1 } -if {$matches != 4} { - send_user "\nFAILURE: Did not properly set shared and contiguous flags\n" +# +# Check for desired output +# +if {[wait_for_file $file_out] != 0} { + send_user "\nFAILURE: Output file $file_out is missing\n" + exit 1 +} +set match1 0 +spawn $bin_cat $file_out +expect { + -re "Job step creation temporarily disabled, retrying" { + send_user "\nFAILURE: Problem --exclusive and --immediate option for step\n" + set exit_code 1 + exp_continue + } + -re "Unable to create job step" { + send_user "This error was expected, no worries\n" + incr match1 + exp_continue + } + eof { + wait + } +} + +if { $match1 != 1 } { + send_user "\nFAILURE: Problem --exclusive and --immediate option for step\n" set exit_code 1 } + if {$exit_code == 0} { + exec $bin_rm -f $file_in $file_out send_user "\nSUCCESS\n" } exit $exit_code diff --git a/testsuite/expect/test1.15 b/testsuite/expect/test1.15 index 682b55700..4237f3afa 100755 --- a/testsuite/expect/test1.15 +++ b/testsuite/expect/test1.15 @@ -10,7 +10,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -65,6 +65,10 @@ expect { incr matches exp_continue } + -re "exited with exit code" { + send_user "This error is expected, no worries\n" + exp_continue + } timeout { send_user "\nFAILURE: srun not responding\n" slow_kill $srun_pid diff --git a/testsuite/expect/test1.16 b/testsuite/expect/test1.16 index 98e6b2726..5d7465a95 100755 --- a/testsuite/expect/test1.16 +++ b/testsuite/expect/test1.16 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.17 b/testsuite/expect/test1.17 index c92cbce48..e1adfa1ae 100755 --- a/testsuite/expect/test1.17 +++ b/testsuite/expect/test1.17 @@ -1,16 +1,16 @@ #!/usr/bin/expect ############################################################################ # Purpose: Test of SLURM functionality -# Confirm that srun allocation mode (--allocate option). +# Test of srun --open-mode (truncate or append) option. # # Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR # "FAILURE: ..." otherwise with an explanation of the failure, OR # anything else indicates a failure mode that must be investigated. ############################################################################ -# Copyright (C) 2002 The Regents of the University of California. +# Copyright (C) 2002-2007 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -31,40 +31,55 @@ ############################################################################ source ./globals -set test_id "1.17" -set exit_code 0 -set job_id 0 -set slurm_jobid 0 +set test_id "1.17" +set file_in "test$test_id.input" +set file_out "test$test_id.output" +set file_err "test$test_id.error" +set exit_code 0 +set login_grp_info "" +set got_job_grps 0 +set got_login_grps 0 +set got_sleep_err 0 print_header $test_id # -# Submit a slurm allocate job -# Interactively print $SLURM_JOBID +# Delete left-over stdin/out/err files +# Build stdin file # -set timeout $max_job_delay -set match 0 -set srun_pid [spawn $srun --allocate --verbose -t1] +exec $bin_rm -f $file_in $file_out $file_err +make_bash_script $file_in " + $bin_id + $bin_sleep aaa + exit 0" +exec echo "$bin_echo INITIAL_VALUE" >$file_err +exec echo "$bin_echo INITIAL_VALUE" >$file_out + +# +# Get user id and group id for comparison with stdout +# +spawn $bin_id expect { - -re "jobid ($number).*" { - set job_id $expect_out(1,string) - send "$bin_echo MY_ID=\$SLURM_JOBID \n" + -re "(uid=$number)" { + set login_grp_info $expect_out(1,string) + set got_login_grps 1 exp_continue } - -re "MY_ID=($number)" { - set slurm_jobid $expect_out(1,string) - send "exit 2\n" - exp_continue - } - -re "MY_ID=" { -# no environment variable - send "exit 2\n" - exp_continue + eof { + wait } - -re "\[Ee\]xit 2" { - send_user "This error was expected, no worries\n" - set match 1 - exp_continue +} + +# +# Spawn a shell via srun that uses stdin/out/err in truncate mode +# and confirm their contents +# +set timeout $max_job_delay +set srun_pid [spawn $srun --input=$file_in --output=$file_out --error=$file_err --open-mode=t -t1 $bin_bash] +expect { + -re "Unable to contact" { + send_user "\nFAILURE: slurm appears to be down\n" + exit 1 } timeout { send_user "\nFAILURE: srun not responding\n" @@ -76,29 +91,134 @@ expect { } } +if {[wait_for_file $file_out] == 0} { + spawn $bin_cat $file_out + expect { + -re "INITIAL_VALUE" { + send_user "\nFAILURE: stdout file not truncated\n" + set exit_code 1 + } + -re "$login_grp_info" { + incr got_job_grps + exp_continue + } + eof { + wait + } + } +} + +if {$got_login_grps == 0} { + send_user "\nFAILURE: Unable to get user and group ID info\n" + set exit_code 1 +} +if {$got_job_grps != 1} { + send_user "\nFAILURE: User and group ID info missing from stdout\n" + set exit_code 1 +} # -# Confirm the job_ids match. +# Check for sleep input specification error in stderr # -if { $job_id == 0 } { - send_user "\nFAILURE: srun --allocate failure\n" +if {[wait_for_file $file_err] == 0} { + spawn $bin_cat $file_err + expect { + -re "INITIAL_VALUE" { + send_user "\nFAILURE: stderr file not truncated\n" + set exit_code 1 + } + -re "$sleep_error_message" { + send_user "\nNo worries, this error is expected\n" + incr got_sleep_err + exp_continue + } + -re "Specify time as a positive integer.*\n" { + incr got_sleep_err + exp_continue + } + eof { + wait + } + } +} +if {$got_sleep_err != 1} { + send_user "\nFAILURE: Unexpected stderr contents\n" set exit_code 1 -} else { - if { $job_id != $slurm_jobid } { - send_user "\nFAILURE: srun job_id mis-match\n" +} + +if {$exit_code != 0} { + exit $exit_code +} + + +# +# Spawn a shell via srun that uses stdin/out/err in append mode +# and confirm their contents +# +set timeout $max_job_delay +set srun_pid [spawn $srun --input=$file_in --output=$file_out --error=$file_err --open-mode=a -t1 $bin_bash] +expect { + -re "Unable to contact" { + send_user "\nFAILURE: slurm appears to be down\n" + exit 1 + } + timeout { + send_user "\nFAILURE: srun not responding\n" + slow_kill $srun_pid set exit_code 1 } + eof { + wait + } +} + +set got_job_grps 0 +if {[wait_for_file $file_out] == 0} { + spawn $bin_cat $file_out + expect { + -re "$login_grp_info" { + incr got_job_grps + exp_continue + } + eof { + wait + } + } +} +if {$got_job_grps != 2} { + send_user "\nFAILURE: User and group ID info missing from stdout\n" + set exit_code 1 } # -# Confirm exit code is propogated +# Check for sleep input specification error in stderr # -if { $match != 1 } { - send_user "\nFAILURE: srun exit code not reported\n" +set got_sleep_err 0 +if {[wait_for_file $file_err] == 0} { + spawn $bin_cat $file_err + expect { + -re "$sleep_error_message" { + send_user "\nNo worries, this error is expected\n" + incr got_sleep_err + exp_continue + } + -re "Specify time as a positive integer.*\n" { + incr got_sleep_err + exp_continue + } + eof { + wait + } + } +} +if {$got_sleep_err != 2} { + send_user "\nFAILURE: Unexpected stderr contents\n" set exit_code 1 } -if { $exit_code == 0 } { + +if {$exit_code == 0} { + exec $bin_rm -f $file_in $file_out $file_err send_user "\nSUCCESS\n" } exit $exit_code diff --git a/testsuite/expect/test1.18 b/testsuite/expect/test1.18 index 5419a058e..38b442039 100755 --- a/testsuite/expect/test1.18 +++ b/testsuite/expect/test1.18 @@ -1,16 +1,17 @@ #!/usr/bin/expect ############################################################################ # Purpose: Test of SLURM functionality -# Test of srun attach to existing job (--attach and --join options). +# Test of --licenses option # # Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR +# "WARNING: ..." with an explanation of why the test can't be made, OR # "FAILURE: ..." otherwise with an explanation of the failure, OR # anything else indicates a failure mode that must be investigated. ############################################################################ -# Copyright (C) 2002-2006 The Regents of the University of California. +# Copyright (C) 2008 Lawrence Livermore National Security. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -33,110 +34,77 @@ source ./globals set test_id "1.18" set exit_code 0 -set file_prog "test$test_id.prog" -set job_id 0 -set matches 0 -set tasks 8 print_header $test_id -if { [test_bluegene] } { - set tasks 1 - set node_cnt 1-512 -} else { - set tasks 8 - set node_cnt 1-$tasks -} - -# -# Delete left-over program and rebuild it -# -exec $bin_rm -f $file_prog -exec $bin_make -f /dev/null $file_prog -exec $bin_chmod 700 $file_prog # -# Spawn initial program via srun +# Run a job attempting to get some dummy license name # -set timeout $max_job_delay -set srun_pid [spawn $srun -N$node_cnt -n$tasks -O -v -t5 -l $file_prog] -set init_id $spawn_id +set match 0 +set srun_pid [spawn $srun --licenses=DUMMY_FOR_TESTING $bin_hostname] expect { - -i $init_id - -re "launching ($number).0 on host" { - set job_id $expect_out(1,string) + -re "invalid license" { + set match 1 + send_user "This error was expected, no worries\n\n" exp_continue } - -re "WAITING" { - incr matches - if {$matches < $tasks} { - exp_continue - } - } timeout { - send_user "\nFAILURE: srun (launch) not responding\n" + send_user "\nFAILURE: srun not responding\n" slow_kill $srun_pid - set exit_code 1 + exit 1 } eof { wait } } -if {$job_id == 0} { - send_user "\nFAILURE: job submit failure\n" - exit 1 -} -if {$matches == 0} { - send_user "\nFAILURE: job run time failure\n" - exit 1 +if {$match == 0} { + send_user "\nFAILURE: No error on with bad license name\n" + set exit_code 1 } # -# Attach to initial program via srun +# Test if any licenses are configured. # -set matches 0 -set timeout 10 -set attach_pid [spawn $srun -vv -l --attach=$job_id --join] -set attach_id $spawn_id +log_user 0 +set licenses "" +spawn $scontrol show config expect { - -i $attach_id - -re "WAITING" { - incr matches - send_user "\nsending exit message\n" - send -i $attach_id "exit\n" - } - timeout { - send_user "\nFAILURE: srun (attach) not responding\n" - slow_kill $attach_pid - set exit_code 1 + -re "Licenses *= ($alpha_numeric_under)" { + set licenses $expect_out(1,string) + exp_continue } eof { wait } } -if {$matches == 0} { - send_user "\nFAILURE: job run time failure\n" - set exit_code 1 +log_user 1 + +if {[string compare $licenses ""] != 0} { + set srun_pid [spawn $srun --licenses=$licenses $bin_hostname] + expect { + -re "invalid license" { + send_user "\nFAILURE: Error getting license\n" + set exit_code 1 + exp_continue + } + timeout { + send_user "\nNOTE: srun not responding\n" + send_user " This test can fail if the selected " + send_user "license is in use.\n" + set exit_code 1 + } + eof { + wait + } + } } # -# Make sure initial program terminates too +# Run a job attempting to get a legitimate license name # -# Explicitly reset spawn_id for wait call -set spawn_id $init_id -expect { - timeout { - send_user "\nFAILURE: srun (terminate) not responding\n" - set exit_code 1 - } - eof { - wait - } -} if {$exit_code == 0} { - exec $bin_rm -f $file_prog send_user "\nSUCCESS\n" -} else { - cancel_job $job_id } exit $exit_code + diff --git a/testsuite/expect/test1.18.prog.c b/testsuite/expect/test1.18.prog.c deleted file mode 100644 index ca25b635f..000000000 --- a/testsuite/expect/test1.18.prog.c +++ /dev/null @@ -1,53 +0,0 @@ - /*****************************************************************************\ - * test1.18.proc.c - Simple I/O test program for SLURM regression test1.18. - * Print "waiting\n" to stdout and wait for "exit" as stdin. - ***************************************************************************** - * Copyright (C) 2002 The Regents of the University of California. - * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. - * - * This file is part of SLURM, a resource management program. - * For details, see <http://www.llnl.gov/linux/slurm/>. - * - * SLURM is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY - * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License along - * with SLURM; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -\*****************************************************************************/ -#include <stdio.h> -#include <stdlib.h> -#include <string.h> -#include <unistd.h> - -main (int argc, char **argv) -{ - char in_line[10]; - int i; - - fprintf(stdout, "WAITING\n"); - fflush(stdout); - - for (i=0; i<sizeof(in_line); ) { - in_line[i] = getc(stdin); - if ((in_line[i] < 'a') || - (in_line[i] > 'z')) - i = 0; - else if (strncmp(in_line, "exit", 4) == 0) - exit(0); - else - i++; - } - - fprintf(stderr, "Invalid input\n"); - exit(1); -} diff --git a/testsuite/expect/test1.19 b/testsuite/expect/test1.19 index e34b769c4..b4e33b966 100755 --- a/testsuite/expect/test1.19 +++ b/testsuite/expect/test1.19 @@ -11,7 +11,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -240,10 +240,12 @@ if { [test_bluegene] } { set task_cnt 4 } +# Note: Task count is in the script for each srun +# There is not oversubscribe option for sbatch. set job_id 0 -set srun_pid [spawn $srun --batch --output=/dev/null -N$node_cnt -n$task_cnt -O -t1 $file_in] +set srun_pid [spawn $sbatch --output=/dev/null -N$node_cnt -t1 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } diff --git a/testsuite/expect/test1.2 b/testsuite/expect/test1.2 index e5098fd3e..f49b9ee0f 100755 --- a/testsuite/expect/test1.2 +++ b/testsuite/expect/test1.2 @@ -11,7 +11,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.20 b/testsuite/expect/test1.20 index ade317dfb..3f10914c1 100755 --- a/testsuite/expect/test1.20 +++ b/testsuite/expect/test1.20 @@ -11,7 +11,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.21 b/testsuite/expect/test1.21 index e920d7766..9b258cae2 100755 --- a/testsuite/expect/test1.21 +++ b/testsuite/expect/test1.21 @@ -11,7 +11,7 @@ # Copyright (C) 2002-2007 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -79,6 +79,14 @@ if {$matches != 1} { send_user "\nFAILURE: stdin to specific task_id failed\n" set exit_code 1 } + +if {[test_bluegene]} { + if {$exit_code == 0} { + send_user "\nSUCCESS\n" + } + exit $exit_code +} + if {$exit_code == 0} { send_user "\nSo far... stdin to specific task worked fine\n\n\n" } diff --git a/testsuite/expect/test1.22 b/testsuite/expect/test1.22 index 7cc93f666..69f73ee1b 100755 --- a/testsuite/expect/test1.22 +++ b/testsuite/expect/test1.22 @@ -12,7 +12,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -84,6 +84,10 @@ set host_0 "" set timeout $max_job_delay set srun_pid [spawn $srun -N$node_cnt -n32 -O -l --threads=32 -t1 $bin_hostname] expect { + -re "Memory required by task is not available" { + send_user "\nWARNING: DefMemPerTask is configured too high for this test\n" + set host_0 "warn" + } -re "0: ($alpha_numeric)" { set host_0 $expect_out(1,string) exp_continue diff --git a/testsuite/expect/test1.23 b/testsuite/expect/test1.23 index d4de7f1c9..ab47df669 100755 --- a/testsuite/expect/test1.23 +++ b/testsuite/expect/test1.23 @@ -12,7 +12,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.24 b/testsuite/expect/test1.24 index 572e5535d..096b698a2 100755 --- a/testsuite/expect/test1.24 +++ b/testsuite/expect/test1.24 @@ -11,7 +11,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.25 b/testsuite/expect/test1.25 index 321fd7788..35ec82df5 100755 --- a/testsuite/expect/test1.25 +++ b/testsuite/expect/test1.25 @@ -12,7 +12,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.26 b/testsuite/expect/test1.26 index 6a225fb74..51744936a 100755 --- a/testsuite/expect/test1.26 +++ b/testsuite/expect/test1.26 @@ -12,7 +12,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -61,15 +61,15 @@ set nodelist_name "" set timeout $max_job_delay set srun_pid [spawn $srun -v -N1 -l $bin_printenv SLURMD_NODENAME] expect { - -re "on host ($alpha_numeric)," { + -re "on host ($alpha_numeric_under)," { set nodelist_name $expect_out(1,string) exp_continue } - -re "^0: ($alpha_numeric)" { + -re "^0: ($alpha_numeric_under)" { set host_0 $expect_out(1,string) exp_continue } - -re "\n0: ($alpha_numeric)" { + -re "\n0: ($alpha_numeric_under)" { set host_0 $expect_out(1,string) exp_continue } @@ -122,7 +122,7 @@ expect { -re "error: .*exit code 1" { exp_continue } - -re "0: ($alpha_numeric)" { + -re "0: ($alpha_numeric_under)" { set host_1 $expect_out(1,string) exp_continue } @@ -276,7 +276,7 @@ for {set inx 0} {$inx < $interations} {incr inx} { set failures 1 exp_continue } - -re "0: ($alpha_numeric)" { + -re "0: ($alpha_numeric_under)" { set host_0 $expect_out(1,string) exp_continue } diff --git a/testsuite/expect/test1.27 b/testsuite/expect/test1.27 index 73d96ac4a..636aa59fe 100755 --- a/testsuite/expect/test1.27 +++ b/testsuite/expect/test1.27 @@ -10,7 +10,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.28 b/testsuite/expect/test1.28 index f4b780c4d..4dc41c5c4 100755 --- a/testsuite/expect/test1.28 +++ b/testsuite/expect/test1.28 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.29 b/testsuite/expect/test1.29 index f8d1878f0..c055e47dc 100755 --- a/testsuite/expect/test1.29 +++ b/testsuite/expect/test1.29 @@ -13,7 +13,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -133,9 +133,9 @@ make_bash_script $file_in " set timeout $max_job_delay -set srun_pid [spawn $srun --batch --output=$file_out --error=$file_err -t1 ./$file_in] +set srun_pid [spawn $sbatch --output=$file_out --error=$file_err -t1 ./$file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } diff --git a/testsuite/expect/test1.29.prog.c b/testsuite/expect/test1.29.prog.c index d47f5b78b..d6e231700 100644 --- a/testsuite/expect/test1.29.prog.c +++ b/testsuite/expect/test1.29.prog.c @@ -6,7 +6,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.3 b/testsuite/expect/test1.3 index 74623b7f8..a0f68f471 100755 --- a/testsuite/expect/test1.3 +++ b/testsuite/expect/test1.3 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.30 b/testsuite/expect/test1.30 index edb502734..c49166785 100755 --- a/testsuite/expect/test1.30 +++ b/testsuite/expect/test1.30 @@ -10,7 +10,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.31 b/testsuite/expect/test1.31 index b1316242a..58e0e0932 100755 --- a/testsuite/expect/test1.31 +++ b/testsuite/expect/test1.31 @@ -12,7 +12,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.32 b/testsuite/expect/test1.32 index f833cb4ec..9275a8bee 100755 --- a/testsuite/expect/test1.32 +++ b/testsuite/expect/test1.32 @@ -13,7 +13,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -38,6 +38,7 @@ set test_id "1.32" set exit_code 0 set file_prog "test$test_id.prog" set matches 0 +set fini_cnt 0 set usr1cnt 0 set usr2cnt 0 @@ -100,6 +101,10 @@ expect { send_user "\nDon't worry about the error...\n" exp_continue } + -re "FINI" { + incr fini_cnt + exp_continue + } timeout { send_user "\nFAILURE: srun not responding\n" slow_kill $srun_pid @@ -122,6 +127,11 @@ if {$usr2cnt != 1} { send_user "\nFAILURE: $file_prog received $usr2cnt SIGUSR2 (not 1)\n" set exit_code 1 } +if {$fini_cnt != 1} { + send_user "\nFAILURE: srun failed to terminate properly\n" + set exit_code 1 +} + # # Post-processing diff --git a/testsuite/expect/test1.32.prog.c b/testsuite/expect/test1.32.prog.c index f5e315dbd..2f206dab8 100644 --- a/testsuite/expect/test1.32.prog.c +++ b/testsuite/expect/test1.32.prog.c @@ -5,7 +5,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -77,5 +77,6 @@ main (int argc, char **argv) sleep(1); } + printf("FINI\n"); exit(0); } diff --git a/testsuite/expect/test1.33 b/testsuite/expect/test1.33 index 543eed115..30511eef1 100755 --- a/testsuite/expect/test1.33 +++ b/testsuite/expect/test1.33 @@ -10,7 +10,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.34 b/testsuite/expect/test1.34 deleted file mode 100755 index b2e6bb76f..000000000 --- a/testsuite/expect/test1.34 +++ /dev/null @@ -1,116 +0,0 @@ -#!/usr/bin/expect -############################################################################ -# Purpose: Test of SLURM functionality -# Verify that arguments get forwarded to job script (--batch option). -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -# -# Note: This script generates and then deletes files in the working directory -# named test1.34.input, test1.34.output, and test1.34.error -############################################################################ -# Copyright (C) 2002 The Regents of the University of California. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. -# -# This file is part of SLURM, a resource management program. -# For details, see <http://www.llnl.gov/linux/slurm/>. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -############################################################################ -source ./globals - -set test_id "1.34" -set file_in "test$test_id.input" -set file_out "test$test_id.output" -set file_err "test$test_id.error" - -set arg1 "arg_one" -set arg2 "arg_two" -set arg_match 0 -set exit_code 0 -set job_id 0 - -print_header $test_id - -# -# Delete left-over input script plus stdout/err files -# Build input script file -# -exec $bin_rm -f $file_in $file_out $file_err -make_bash_script $file_in "$bin_echo \$1,\$2" - -# -# Spawn a srun batch job with arguments -# -set timeout $max_job_delay -set srun_pid [spawn $srun --batch --output=$file_out --error=$file_err -t1 $file_in $arg1 $arg2] -expect { - -re "jobid ($number) submitted" { - set job_id $expect_out(1,string) - exp_continue - } - timeout { - send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid - set exit_code 1 - } - eof { - wait - } -} - -if {$job_id == 0} { - send_user "\nFAILURE: batch submit failure\n" - exit 1 -} - -# -# Wait for job to complete -# -if {[wait_for_job $job_id "DONE"] != 0} { - send_user "\nFAILURE: waiting for job to complete\n" - cancel_job $job_id - set exit_code 1 -} - -# -# Check arguments returned in stdout -# -if {[wait_for_file $file_out] == 0} { - spawn $bin_cat $file_out - expect { - -re "$arg1,$arg2" { - set arg_match 1 - exp_continue - } - eof { - wait - } - } -} - -if {$arg_match == 0} { - send_user "\nFAILURE: Failed to pass script arguments\n" - set exit_code 1 -} - -if {$exit_code == 0} { - send_user "\nSUCCESS\n" - exec $bin_rm -f $file_in $file_out $file_err -} -exit $exit_code diff --git a/testsuite/expect/test1.35 b/testsuite/expect/test1.35 index b19e81a00..298c223fa 100755 --- a/testsuite/expect/test1.35 +++ b/testsuite/expect/test1.35 @@ -10,7 +10,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -52,7 +52,6 @@ make_bash_script $file_in " for ((i = 0; i < $steps_started; i++)); do j=`expr $steps_started + 10 - \$i` $srun $bin_sleep \$j & -# $slaunch $bin_sleep \$j & $bin_sleep 1 done $bin_sleep 2 @@ -75,9 +74,9 @@ if { [test_bluegene] } { } } -set srun_pid [spawn $srun --batch -N$node_cnt --output=$file_out --error=$file_err -t1 $file_in] +set srun_pid [spawn $sbatch -N$node_cnt --output=$file_out --error=$file_err -t1 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } diff --git a/testsuite/expect/test1.36 b/testsuite/expect/test1.36 index 3adb14a9c..a5570a7be 100755 --- a/testsuite/expect/test1.36 +++ b/testsuite/expect/test1.36 @@ -10,7 +10,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.37 b/testsuite/expect/test1.37 deleted file mode 100755 index a505b0a1a..000000000 --- a/testsuite/expect/test1.37 +++ /dev/null @@ -1,155 +0,0 @@ -#!/usr/bin/expect -############################################################################ -# Purpose: Test of SLURM functionality -# Confirm that node sharing flags are respected (--nodelist and -# --share options). -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "WARNING: ..." with an explanation of why the test can't be made, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -############################################################################ -# Copyright (C) 2002 The Regents of the University of California. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. -# -# This file is part of SLURM, a resource management program. -# For details, see <http://www.llnl.gov/linux/slurm/>. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -############################################################################ -source ./globals - -set test_id "1.37" -set exit_code 0 -set file_err "test$test_id.error" -set file_in "test$test_id.input" -set file_out "test$test_id.output" -set job_id1 0 -set host_name "" -set nodelist_name "" - -print_header $test_id - -# -# Submit a job and get the node's NodeName from the nodelist -# -set timeout $max_job_delay -set srun_pid [spawn $srun -v -N1 -l -t1 $bin_hostname] -expect { - -re "on host ($alpha_numeric)," { - set nodelist_name $expect_out(1,string) - exp_continue - } - -re "0: ($alpha_numeric)" { - set host_name $expect_out(1,string) - exp_continue - } - timeout { - send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid - set exit_code 1 - } - eof { - wait - } -} -if {[string compare $nodelist_name ""] == 0} { - send_user "\nFAILURE: Did not get hostname of task 0\n" - exit 1 -} -if {[test_front_end] != 0} { - send_user "\nWARNING: Additional testing is incompatable with front-end systems\n" - exit $exit_code -} - -# -# Delete left-over input script -# Build input script file -# -exec $bin_rm -f $file_in -make_bash_script $file_in "$srun $bin_sleep 5" - -# -# Submit two jobs to the same node, one with no sharing, the other -# with sharing permitted. Insure the first job completes before the -# second job is started. -# -set srun_pid [spawn $srun --batch -N1 --exclusive --nodelist=$nodelist_name -t1 --output=$file_out --error=$file_err $file_in] -expect { - -re "jobid ($number) submitted" { - set job_id1 $expect_out(1,string) - exp_continue - } - timeout { - send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid - exit 1 - } - eof { - wait - } -} -if {$job_id1 == 0} { - send_user "\nFAILURE: srun failed to report jobid\n" - exit 1 -} - -set partition "dummy" -set waited 1 -set timeout [expr $timeout + 5] -set srun_pid [spawn $srun -N1 --nodelist=$nodelist_name -t1 --share $scontrol -o show job $job_id1] -expect { - -re "Partition=($alpha_numeric)" { - set partition $expect_out(1,string) - exp_continue - } - -re "JobState=RUN" { - set waited 0 - exp_continue - } - timeout { - send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid - exit 1 - } - eof { - wait - } -} -if {$waited == 0} { - spawn $scontrol show partition - expect { - -re "Shared=FORCE" { - send_user "\nWARNING: Test incompatable with Shared=FORCE\n" - set waited 1 - exp_continue - } - eof { - wait - } - } -} -if {$waited == 0} { - send_user "\nFAILURE: srun failed to wait for non-sharing job to complete\n" - set exit_code 1 -} - -if {$exit_code == 0} { - exec $bin_rm -f $file_err $file_in $file_out - send_user "\nSUCCESS\n" -} -exit $exit_code diff --git a/testsuite/expect/test1.38 b/testsuite/expect/test1.38 index 0477ac897..074f9a67a 100755 --- a/testsuite/expect/test1.38 +++ b/testsuite/expect/test1.38 @@ -11,7 +11,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -147,6 +147,7 @@ expect { wait } } +cancel_job $job_id if {$matches != 2} { send_user "\nFAILURE: srun failed to properly process SIGINT\n" set exit_code 1 diff --git a/testsuite/expect/test1.39 b/testsuite/expect/test1.39 index dd51d690c..2a2a15cf9 100755 --- a/testsuite/expect/test1.39 +++ b/testsuite/expect/test1.39 @@ -10,7 +10,7 @@ # Copyright (C) 2004-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.39.prog.c b/testsuite/expect/test1.39.prog.c index ac36ef967..fccca258a 100644 --- a/testsuite/expect/test1.39.prog.c +++ b/testsuite/expect/test1.39.prog.c @@ -5,7 +5,7 @@ * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.4 b/testsuite/expect/test1.4 index eb77f9915..85389cc91 100755 --- a/testsuite/expect/test1.4 +++ b/testsuite/expect/test1.4 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.40 b/testsuite/expect/test1.40 deleted file mode 100755 index 89e870ae8..000000000 --- a/testsuite/expect/test1.40 +++ /dev/null @@ -1,152 +0,0 @@ -#!/usr/bin/expect -############################################################################ -# Purpose: Test of SLURM functionality -# Test of stand-alone srun resource allocation (--uid and --no-shell -# options). -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -############################################################################ -# Copyright (C) 2002 The Regents of the University of California. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. -# -# This file is part of SLURM, a resource management program. -# For details, see <http://www.llnl.gov/linux/slurm/>. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -############################################################################ -source ./globals - -set test_id "1.40" -set exit_code 0 -set job_id 0 - -print_header $test_id - -# -# Submit a slurm allocate job -# -set timeout $max_job_delay -set srun_pid [spawn $srun --allocate -t1 --no-shell] -expect { - -re "SLURM_JOBID=($number).*" { - set job_id $expect_out(1,string) - exp_continue - } - timeout { - send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid - set exit_code 1 - } - eof { - wait - } -} -if { $job_id == 0 } { - send_user "\nFAILURE: job_id not captured\n" - exit 1 -} - -# -# The srun should have completed, confirm the job is active -# -set found_job 0 -spawn $squeue --states=running --jobs=$job_id -expect { - -re $job_id { - set found_job 1 - exp_continue - } - timeout { - send_user "\nFAILURE: squeue not responding\n" - set exit_code 1 - } - eof { - wait - } -} -if { $found_job == 0 } { - send_user "\nFAILURE: job $job_id not in run state\n" - exit 1 -} - -# -# Kill the job -# -cancel_job $job_id - -# -# Create a job allocation as some other user, namely root -# -set job_id 0 -set srun_pid [spawn $srun --allocate -t1 --no-shell --uid=0] -expect { - -re "SLURM_JOBID=($number).*" { - set job_id $expect_out(1,string) - exp_continue - } - -re "Invalid user id" { - set job_id -1 - exp_continue - } - timeout { - send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid - set exit_code 1 - } - eof { - wait - } -} -if { $job_id == 0 } { - send_user "\nFAILURE: job_id not captured\n" - exit 1 -} - -if { $job_id == -1 } { - send_user "\nNo worries, this is expected for non-privileged users\n" -} else { -# -# The srun should have completed, confirm the job is active -# - set found_job 0 - spawn $squeue --states=running --jobs=$job_id --user=root - expect { - -re $job_id { - set found_job 1 - exp_continue - } - timeout { - send_user "\nFAILURE: squeue not responding\n" - set exit_code 1 - } - eof { - wait - } - } - if { $found_job == 0 } { - send_user "\nFAILURE: job $job_id not in run state\n" - exit 1 - } - cancel_job $job_id -} - -if { $exit_code == 0 } { - send_user "\nSUCCESS\n" -} -exit $exit_code diff --git a/testsuite/expect/test1.41 b/testsuite/expect/test1.41 index 2bef5df23..ee367ee27 100755 --- a/testsuite/expect/test1.41 +++ b/testsuite/expect/test1.41 @@ -10,7 +10,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.42 b/testsuite/expect/test1.42 index 38a01776b..430d7d5af 100755 --- a/testsuite/expect/test1.42 +++ b/testsuite/expect/test1.42 @@ -1,17 +1,17 @@ #!/usr/bin/expect ############################################################################ # Purpose: Test of SLURM functionality -# Test of account number and job dependencies (--account, --begin -# and --depedency options). +# Test of account number and job dependencies (--account, and +# --depedency options). # # Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR # "FAILURE: ..." otherwise with an explanation of the failure, OR # anything else indicates a failure mode that must be investigated. ############################################################################ -# Copyright (C) 2004 The Regents of the University of California. +# Copyright (C) 2004-2007 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -52,9 +52,9 @@ make_bash_script $file_in "$bin_sleep 5" # Spawn a srun batch job that just sleeps for a while # set timeout $max_job_delay -set srun_pid [spawn $srun --batch --output=/dev/null --error=/dev/null --account=MY_ACCT -t1 $file_in] +set srun_pid [spawn $sbatch --output=/dev/null --error=/dev/null --account=MY_ACCT -t1 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id1 $expect_out(1,string) exp_continue } @@ -77,7 +77,7 @@ if {$job_id1 == 0} { # set match_acct 0 set match_state 0 -set srun_pid [spawn $srun -v --dependency=$job_id1 $scontrol show job $job_id1] +set srun_pid [spawn $srun -v --dependency=afterany:$job_id1 $scontrol show job $job_id1] expect { -re "launching ($number).0" { set job_id2 $expect_out(1,string) @@ -117,7 +117,7 @@ set match_acct 0 set match_jobid 0 spawn $scontrol show job $job_id2 expect { - -re "Dependency=($number)" { + -re "Dependency=afterany:($number)" { set match_jobid $expect_out(1,string) exp_continue } @@ -142,100 +142,6 @@ if {$match_jobid != $job_id1} { set exit_code 1 } -# -# Submit a job to run at noon tomorrow -# -set job_id1 0 -set srun_pid [spawn $srun --batch --output=/dev/null --error=/dev/null --begin=noon-tomorrow $file_in] -expect { - -re "jobid ($number) submitted" { - set job_id1 $expect_out(1,string) - exp_continue - } - timeout { - send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid - set exit_code 1 - } - eof { - wait - } -} -if {$job_id1 == 0} { - send_user "\nFAILURE: batch submit failure\n" - exit 1 -} -exec $bin_sleep 5 -set match 0 -spawn $scontrol show job $job_id1 -expect { - -re "JobState=PENDING" { - incr match - exp_continue - } - -re "StartTime=($number)/($number)-12:00:00" { - incr match - exp_continue - } - -re "StartTime=($number)-($number)-($number)T12:00:00" { - incr match - exp_continue - } - timeout { - send_user "\nFAILURE: scontrol not responding\n" - set exit_code 1 - exp_continue - } - eof { - wait - } -} -if {$match != 2} { - send_user "\nFAILURE: unexpected JobState or StartTime\n" - set exit_code 1 -} -# Reset start time and test for completion -spawn $scontrol update JobId=$job_id1 StartTime=now -expect { - timeout { - send_user "\nFAILURE: scontrol not responding\n" - set exit_code 1 - exp_continue - } - eof { - wait - } -} -set delayed 0 -set is_done 0 -while { $delayed < $max_job_delay } { - exec $bin_sleep 10 - incr delayed +10 - spawn $scontrol show job $job_id1 - expect { - -re "JobState=COMPLETED" { - set is_done 1 - exp_continue - } - timeout { - send_user "\nFAILURE: scontrol not responding\n" - set exit_code 1 - exp_continue - } - eof { - wait - } - } - if {$is_done == 1} { - break - } -} -if {$is_done == 0} { - send_user "\nFAILURE: unexpected JobState\n" - cancel_job $job_id1 - set exit_code 1 -} - if {$exit_code == 0} { exec $bin_rm -f $file_in diff --git a/testsuite/expect/test1.43 b/testsuite/expect/test1.43 index 2dfb3e417..c88b1d9ea 100755 --- a/testsuite/expect/test1.43 +++ b/testsuite/expect/test1.43 @@ -10,7 +10,7 @@ # Copyright (C) 2005 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.44 b/testsuite/expect/test1.44 index 1c29b0219..e66c46909 100755 --- a/testsuite/expect/test1.44 +++ b/testsuite/expect/test1.44 @@ -11,7 +11,7 @@ # Copyright (C) 2005 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Chris Morrone <morrone2@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.45 b/testsuite/expect/test1.45 deleted file mode 100755 index d6cbe2a1b..000000000 --- a/testsuite/expect/test1.45 +++ /dev/null @@ -1,213 +0,0 @@ -#!/usr/bin/expect -############################################################################ -# Purpose: Test of SLURM functionality -# Test the launch of a batch job within an existing job allocation. -# This logic is used by LSF -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -############################################################################ -# Copyright (C) 2005-2006 The Regents of the University of California. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. -# -# This file is part of SLURM, a resource management program. -# For details, see <http://www.llnl.gov/linux/slurm/>. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -############################################################################ -source ./globals - -set test_id "1.45" -set file_in "test$test_id.input" -set file_out1 "test$test_id.output1" -set file_out2 "test$test_id.output2" -set exit_code 0 -set job_id_0 0 -set job_id_1 0 -set job_id_2 0 - -print_header $test_id - -# -# Delete left-over stdout/err files -# -exec $bin_rm -f $file_in $file_out1 $file_out2 - -# -# Build input script file -# -make_bash_script $file_in " - $bin_id - $bin_sleep 20" - -# -# Spawn a srun batch job that uses stdout/err and confirm their contents -# -if { [test_bluegene] } { - set node_cnt 1-2048 -} else { - if { [test_xcpu] } { - set node_cnt 1-1 - } else { - set node_cnt 1-4 - } -} - -set timeout $max_job_delay -set srun_pid [spawn $srun -N$node_cnt -A -v -t1] -expect { - -re "jobid ($number):" { - set job_id_0 $expect_out(1,string) - send "$srun -b --jobid=$job_id_0 -o $file_out1 $file_in \n" - exp_continue - } - -re "jobid ($number).0 submitted" { - set job_id_1 $expect_out(1,string) - send "$srun -b --jobid=$job_id_0 -o $file_out2 $bin_id \n" - exp_continue - } - -re "jobid ($number).1 submitted" { - set job_id_2 $expect_out(1,string) - } - timeout { - send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid - set exit_code 1 - exp_continue - } - eof { - wait - } -} - -if {$job_id_0 == 0} { - send_user "\nFAILURE: job allocation failure\n" - slow_kill $srun_pid - exit 1 -} -if {($job_id_1 == 0) || ($job_id_2 == 0)} { - send_user "\nFAILURE: batch job submit failure\n" - cancel_job $job_id_0 - exit 1 -} - -if {($job_id_0 != $job_id_2) || ($job_id_1 != $job_id_2)} { - send_user "\nFAILURE: batch job did not run in existing allocation\n" - cancel_job $job_id_0 - cancel_job $job_id_1 - cancel_job $job_id_2 - exit 1 -} - -# -# Check that the job step is reported -# -set matches 0 -spawn $scontrol show step $job_id_0.0 -expect { - -re "Invalid" { - send_user "\nFAILURE: batch step not found\n" - set matches 1 - set exit_code 1 - exp_continue - } - -re "$job_id_0.0" { - set matches 1 - exp_continue - } - timeout { - send_user "\nFAILURE: srun not responding\n" - set exit_code 1 - } - eof { - wait - } -} -if {$matches == 0} { - send_user "\nFAILURE: batch step not found\n" - set exit_code 1 -} - -# -# Check batch job step output -# -if {[wait_for_file $file_out1] == 0} { - set matches 0 - spawn $bin_cat $file_out1 - expect { - -re "uid=" { - set matches 1 - exp_continue - } - eof { - wait - } - } - if {$matches == 0} { - send_user "\nFAILURE: Job output missing\n" - set exit_code 1 - } -} -if {[wait_for_file $file_out2] == 0} { - set matches 0 - spawn $bin_cat $file_out2 - expect { - -re "uid=" { - set matches 1 - exp_continue - } - -re "srun.*command not found" { - send_user "\nWARNING: srun is not installed on this computer\n" - set matches 1 - exp_continue - } - eof { - wait - } - } - if {$matches == 0} { - send_user "\nFAILURE: Job output missing\n" - set exit_code 1 - } -} - -# -# Make sure job is still active, then cancel it -# -set matches 0 -spawn $scontrol -o show job $job_id_0 -expect { - -re "JobState=RUNNING" { - set matches 1 - exp_continue - } - eof { - wait - } -} -if {$matches == 0} { - send_user "\nFAILURE: Job not still running\n" - set exit_code 1 -} -cancel_job $job_id_0 - -if {$exit_code == 0} { - send_user "\nSUCCESS\n" - exec $bin_rm -f $file_in $file_out1 $file_out2 -} -exit $exit_code diff --git a/testsuite/expect/test1.46 b/testsuite/expect/test1.46 index 4a2c66c2b..57abc964a 100755 --- a/testsuite/expect/test1.46 +++ b/testsuite/expect/test1.46 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.47 b/testsuite/expect/test1.47 deleted file mode 100755 index 7c6b8bfe1..000000000 --- a/testsuite/expect/test1.47 +++ /dev/null @@ -1,160 +0,0 @@ -#!/usr/bin/expect -############################################################################ -# Purpose: Test of SLURM functionality -# Tests #SLURM entry functionality in a batch script. -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -############################################################################ -# Copyright (C) 2005-2006 The Regents of the University of California. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Danny Auble <da@llnl.gov> -# UCRL-CODE-226842. -# -# This file is part of SLURM, a resource management program. -# For details, see <http://www.llnl.gov/linux/slurm/>. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -############################################################################ -source ./globals - -set test_id "1.47" -set exit_code 0 -set file_in "test$test_id.input" -set file_out "test$test_id.output" -set job_acct "TEST_ACCT" -set job_name "TEST_NAME" -set delay 1 - -print_header $test_id - -make_bash_script $file_in " - #SLURM --job-name=$job_name - #SLURM --account=$job_acct - $bin_sleep $delay -" - -set timeout $max_job_delay -set job_id 0 -set srun_pid [spawn $srun -o $file_out -b $file_in] -expect { - -re "jobid ($number) submitted" { - set job_id $expect_out(1,string) - exp_continue - } - timeout { - send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid - set exit_code 1 - exp_continue - } - eof { - wait - } -} -if {$job_id == 0} { - send_user "\nFAILURE: batch submit failure\n" - exit 1 -} -set matches 0 -spawn $scontrol show job $job_id -expect { - -re "Name=$job_name" { - incr matches - exp_continue - } - -re "Account=$job_acct" { - incr matches - exp_continue - } - timeout { - send_user "\nFAILURE: scontrol not responding\n" - set exit_code 1 - exp_continue - } - eof { - wait - } -} -if {$matches != 2} { - send_user "\nFAILURE: did not set job name and account from batch script\n" - set exit_code 1 -} - -# -# Build input script file -# NOTE: The initial sleep is so that all of the submissions have time -# to occur before contending with a multitude of job step creations. -# This is especially important on very slow systems (e.g. AIX). -# -make_bash_script $file_in " - #SLURM -N1000000k - $bin_sleep $delay -" - -set matches 0 -set srun_pid [spawn $srun -o $file_out -b $file_in] -expect { - -re "More .* requested than permitted" { - send_user "This error was expected, no worries\n\n" - incr matches - exp_continue - } - timeout { - send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid - set exit_code 1 - } - eof { - wait - } -} -if {$matches != 1} { - send_user "\nFAILURE: srun didn't read the correct options from batch file\n" - set exit_code 1 -} - -make_bash_script $file_in " - #SLURM -N650000 - $bin_sleep $delay -" - -set srun_pid [spawn $srun -N1 -o $file_out -b $file_in] -expect { - -re "More nodes requested than permitted" { - send_user "\nFAILURE: srun read from the batch file options" - send_user "over writing the commandline options\n" - set exit_code 1 - exp_continue - } - timeout { - send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid - set exit_code 1 - } - eof { - wait - } -} - -# -# Post-processing -# -if {$exit_code == 0} { - exec $bin_rm -f $file_in $file_out - send_user "\nSUCCESS\n" -} -exit $exit_code diff --git a/testsuite/expect/test1.48 b/testsuite/expect/test1.48 index c2b70ef51..ac41d2995 100755 --- a/testsuite/expect/test1.48 +++ b/testsuite/expect/test1.48 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.49 b/testsuite/expect/test1.49 index a6c77fed5..fdbc7cfef 100755 --- a/testsuite/expect/test1.49 +++ b/testsuite/expect/test1.49 @@ -10,7 +10,7 @@ # Copyright (C) 2005-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.5 b/testsuite/expect/test1.5 index 32ef1a90d..79427066b 100755 --- a/testsuite/expect/test1.5 +++ b/testsuite/expect/test1.5 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.50 b/testsuite/expect/test1.50 index 68fb338a8..d04076e92 100755 --- a/testsuite/expect/test1.50 +++ b/testsuite/expect/test1.50 @@ -10,7 +10,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.51 b/testsuite/expect/test1.51 index 0a39e33c1..34528e12e 100755 --- a/testsuite/expect/test1.51 +++ b/testsuite/expect/test1.51 @@ -11,7 +11,7 @@ # Copyright (C) 2005 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.52 b/testsuite/expect/test1.52 index e67eb9cfc..b488a13df 100755 --- a/testsuite/expect/test1.52 +++ b/testsuite/expect/test1.52 @@ -10,7 +10,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Danny Auble <da@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.53 b/testsuite/expect/test1.53 deleted file mode 100755 index 8f033ed3f..000000000 --- a/testsuite/expect/test1.53 +++ /dev/null @@ -1,189 +0,0 @@ -#!/usr/bin/expect -############################################################################ -# Purpose: Test of SLURM functionality -# Test of nice value specification (--nice option). -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -############################################################################ -# Copyright (C) 2005 The Regents of the University of California. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. -# -# This file is part of SLURM, a resource management program. -# For details, see <http://www.llnl.gov/linux/slurm/>. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -############################################################################ -source ./globals - -set test_id "1.53" -set exit_code 0 -set file_in "test$test_id.input" -set job_id1 0 -set job_id2 0 -set job_id3 0 -set job_prio1 0 -set job_prio2 0 -set job_prio3 0 - -print_header $test_id - -# -# Build input script file -# -make_bash_script $file_in "$bin_sleep 60" - -# -# Submit three jobs with differing nice values -# -set srun_pid [spawn $srun --batch --output=/dev/null --error=/dev/null -t2 $file_in] -expect { - -re "jobid ($number) submitted" { - set job_id1 $expect_out(1,string) - exp_continue - } - timeout { - send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid - exit 1 - } - eof { - wait - } -} -if {$job_id1 == 0} { - send_user "\nFAILURE: srun submit failed\n" - exit 1 -} - -set srun_pid [spawn $srun --batch --output=/dev/null --error=/dev/null -t2 --nice $file_in] -expect { - -re "jobid ($number) submitted" { - set job_id2 $expect_out(1,string) - exp_continue - } - timeout { - send_user "\nFAILURE: srun not responding\n" - cancel_job $job_id1 - slow_kill $srun_pid - exit 1 - } - eof { - wait - } -} -if {$job_id2 == 0} { - send_user "\nFAILURE: srun submit failed\n" - cancel_job $job_id1 - exit 1 -} -set srun_pid [spawn $srun --batch --output=/dev/null --error=/dev/null -t2 --nice=200 $file_in] -expect { - -re "jobid ($number) submitted" { - set job_id3 $expect_out(1,string) - exp_continue - } - timeout { - send_user "\nFAILURE: srun not responding\n" - cancel_job $job_id1 - cancel_job $job_id2 - slow_kill $srun_pid - exit 1 - } - eof { - wait - } -} - -exec $bin_rm -f $file_in - -# -# Get the priority of each job job with scontrol -# -spawn $scontrol show job $job_id1 -expect { - -re "Priority=($number)" { - set job_prio1 $expect_out(1,string) - exp_continue - } - timeout { - send_user "\nFAILURE: scontrol not responding\n" - set exit_code 1 - } - eof { - wait - } -} -spawn $scontrol show job $job_id2 -expect { - -re "Priority=($number)" { - set job_prio2 $expect_out(1,string) - exp_continue - } - timeout { - send_user "\nFAILURE: scontrol not responding\n" - set exit_code 1 - } - eof { - wait - } -} -spawn $scontrol show job $job_id3 -expect { - -re "Priority=($number)" { - set job_prio3 $expect_out(1,string) - exp_continue - } - timeout { - send_user "\nFAILURE: scontrol not responding\n" - set exit_code 1 - } - eof { - wait - } -} - -# -# Make sure the job priorities are as expected -# -if {$job_prio1 == 0 || $job_prio2 == 0 || $job_prio3 == 0} { - send_user "\nFAILURE: failed to job priorities of each submitted job\n" - set exit_code 1 -} else { - set diff2 [expr $job_prio1 - $job_prio2] - set diff3 [expr $job_prio1 - $job_prio3] -# Target for diff2 is 101 - if {$diff2 < 91 || $diff2 > 111} { - send_user "\nFAILURE: job2 priority delta bad $diff2\n" - set exit_code 1 - } -# Target for diff3 is 202 - if {$diff3 < 192 || $diff3 > 212} { - send_user "\nFAILURE: job3 priority delta bad $diff3\n" - set exit_code 1 - } -} - -cancel_job $job_id1 -cancel_job $job_id2 -cancel_job $job_id3 -if {$exit_code == 0} { - send_user "\nSUCCESS\n" -} -exit $exit_code - diff --git a/testsuite/expect/test1.54 b/testsuite/expect/test1.54 index 21a153bfb..cd235cf73 100755 --- a/testsuite/expect/test1.54 +++ b/testsuite/expect/test1.54 @@ -11,7 +11,7 @@ # Copyright (C) 2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -119,9 +119,14 @@ puts $file "# multi-program configuration file close $file exec $bin_chmod 700 $file_in +set job_id - set matches 0 -set srun_pid [spawn $srun -N1 -n4 --overcommit -l -t1 --multi-prog --debugger-test ./$file_in] +set srun_pid [spawn $srun -N1 -n4 --overcommit -l -t1 --multi-prog --debugger-test -v ./$file_in] expect { + -re "launching ($number)" { + set job_id $expect_out(1,string) + exp_continue + } -re "executable:(/bin/)($alpha)" { if {[string compare $expect_out(2,string) "date"] != 0} { incr matches @@ -147,6 +152,12 @@ if {$matches != 4} { send_user "\nFAILURE: did not generate full list of executables.\n" set exit_code 1 } +if {$job_id == 0} { + send_user "\nFAILURE: failed to get job id\n" + set exit_code 1 +} else { + cancel_job $job_id +} if {$exit_code != 0} { exit $exit_code diff --git a/testsuite/expect/test1.55 b/testsuite/expect/test1.55 index f2239f0a9..6879d7563 100755 --- a/testsuite/expect/test1.55 +++ b/testsuite/expect/test1.55 @@ -11,7 +11,7 @@ # Copyright (C) 2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Christopher J. Morrone <morrone2@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -57,7 +57,7 @@ make_bash_script $test_script { set timeout $max_job_delay set jobid 0 set stepid 0 -set srun_pid [spawn $srun -u -v -n1 $test_script] +set srun_pid [spawn $srun -u -v -n1 -t1 $test_script] expect { -re "launching (($number)\.0)" { set stepid $expect_out(1,string) @@ -105,14 +105,37 @@ send_user "Test sees step is gone, srun should have exited as well.\n" # # And finally check to see if srun is still hanging around (it should -# have exited by now). +# have exited by now) and job has completed # if [catch {exec kill -0 $srun_pid}] { + send_user "\nsrun command is terminated, as desired\n" + set exit_code 0 +} else { + send_user "\nFAILURE: srun is still running after job exits!\n" + set exit_code 1 +} +spawn $squeue --noheader --jobs=$jobid +expect { + -re "R" { + send_user "\nFAILURE: job not completed!\n" + set exit_code 1 + exp_continue + } + timeout { + send_user "\nFAILURE: squeue not responding\n" + set exit_code 1 + exit 1 + } + eof { + wait + } +} + +if {$exit_code == 0} { send_user "\nSUCCESS\n" exec $bin_rm -f $test_script exit 0 } else { - send_user "\nFAILURE: srun is still running after job exits!\n" - exec kill -9 $srun_pid + cancel_job $jobid exit 1 } diff --git a/testsuite/expect/test1.56 b/testsuite/expect/test1.56 index 58af95735..67e539c97 100755 --- a/testsuite/expect/test1.56 +++ b/testsuite/expect/test1.56 @@ -10,7 +10,7 @@ # Copyright (C) 2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Christopher J. Morrone <morrone2@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -59,7 +59,7 @@ close $file # Launch the test script # set timeout $max_job_delay -set srun_pid [spawn $srun -v -n1 --input=$file_in --output=$file_out --error=- cat] +set srun_pid [spawn $srun -v -n1 -t1 --input=$file_in --output=$file_out --error=- cat] expect { -re {launching ($number)\.($number)} { set jobid $expect_out(1,string) diff --git a/testsuite/expect/test1.57 b/testsuite/expect/test1.57 index 4c6916acd..e11760001 100755 --- a/testsuite/expect/test1.57 +++ b/testsuite/expect/test1.57 @@ -10,7 +10,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.58 b/testsuite/expect/test1.58 index 917b97820..6d467811c 100755 --- a/testsuite/expect/test1.58 +++ b/testsuite/expect/test1.58 @@ -10,7 +10,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Christopher J. Morrone <morrone2@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -42,10 +42,10 @@ set timeout $max_job_delay # Run an srun to grab a single node allocation, but not start any # job steps. # -set srun_alloc_pid [spawn $srun -v -N1 -n1 -A $bin_sleep 600] +set srun_alloc_pid [spawn $salloc -v -N1 -n1 $bin_sleep 600] set srun_alloc_sid $spawn_id expect { - -re "srun: jobid ($number)" { + -re "salloc: Granted job allocation ($number)" { set jobid $expect_out(1,string) } timeout { @@ -90,7 +90,7 @@ if {$got_pattern == 0} { } # -# Release the allocation by killing the first srun (really it kills the "sleep") +# Release the allocation by killing salloc (really it kills the "sleep") # cancel_job $jobid set spawn_id $srun_alloc_sid diff --git a/testsuite/expect/test1.59 b/testsuite/expect/test1.59 index 8796b0218..a6f7256e5 100755 --- a/testsuite/expect/test1.59 +++ b/testsuite/expect/test1.59 @@ -84,9 +84,9 @@ set node3 0 set node4 0 set timeout $max_job_delay -spawn $srun -N$num_nodes -A -v bash +spawn $salloc -N$num_nodes -v bash expect { - -re "jobid ($number):" { + -re "salloc: Granted job allocation ($number):" { set job_id $expect_out(1,string) exp_continue } @@ -173,7 +173,7 @@ for {set i 0} {$i<4} {incr i} { # send "$srun -l $extra -O $bin_printenv SLURMD_NODENAME\n" expect { - -re "($number): ($alpha_numeric)" { + -re "($number): ($alpha_numeric_under)" { set task_id $expect_out(1,string) if {$task_id == 0} { set node0 $expect_out(2,string) @@ -326,7 +326,7 @@ for {set i 0} {$i<5} {incr i} { send "$srun -l $extra -O $bin_printenv SLURMD_NODENAME\n" expect { - -re "($number): ($alpha_numeric)" { + -re "($number): ($alpha_numeric_under)" { set task_id $expect_out(1,string) if {$task_id == 0} { set node0 $expect_out(2,string) diff --git a/testsuite/expect/test1.6 b/testsuite/expect/test1.6 index d854161d0..5b6ffcb9f 100755 --- a/testsuite/expect/test1.6 +++ b/testsuite/expect/test1.6 @@ -13,7 +13,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.7 b/testsuite/expect/test1.7 index af3f663cf..9bca53f85 100755 --- a/testsuite/expect/test1.7 +++ b/testsuite/expect/test1.7 @@ -10,7 +10,7 @@ # Copyright (C) 2002-2007 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.8 b/testsuite/expect/test1.8 index 270ee7b56..e46a26558 100755 --- a/testsuite/expect/test1.8 +++ b/testsuite/expect/test1.8 @@ -14,7 +14,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.80 b/testsuite/expect/test1.80 index 1cf18dfdb..404781af5 100755 --- a/testsuite/expect/test1.80 +++ b/testsuite/expect/test1.80 @@ -12,7 +12,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.81 b/testsuite/expect/test1.81 index 9bddb103b..bd48d432e 100755 --- a/testsuite/expect/test1.81 +++ b/testsuite/expect/test1.81 @@ -12,7 +12,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.82 b/testsuite/expect/test1.82 index 84616961b..e91a3f388 100755 --- a/testsuite/expect/test1.82 +++ b/testsuite/expect/test1.82 @@ -12,7 +12,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.83 b/testsuite/expect/test1.83 index 6da63e7b0..a5ec1a9b4 100755 --- a/testsuite/expect/test1.83 +++ b/testsuite/expect/test1.83 @@ -16,7 +16,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -74,7 +74,7 @@ expect { send_user "\nWARNING: can't test srun task distribution\n" exit $exit_code } - -re "($number): ($alpha)($number)" { + -re "($number): ($alpha_numeric_under)($number)" { set task_id $expect_out(1,string) if {$task_id == 0} { set host_0_name $expect_out(2,string) @@ -90,7 +90,7 @@ expect { } exp_continue } - -re "($number): ($alpha)" { + -re "($number): ($alpha_numeric_under)" { set task_id $expect_out(1,string) if {$task_id == 0} { set host_0_name $expect_out(2,string) diff --git a/testsuite/expect/test1.84 b/testsuite/expect/test1.84 index 15051b731..e23d88f01 100755 --- a/testsuite/expect/test1.84 +++ b/testsuite/expect/test1.84 @@ -16,7 +16,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.85 b/testsuite/expect/test1.85 deleted file mode 100755 index dbf0fc2fa..000000000 --- a/testsuite/expect/test1.85 +++ /dev/null @@ -1,182 +0,0 @@ -#!/usr/bin/expect -############################################################################ -# Purpose: Test of SLURM functionality -# Test of partition specification on job submission (--partition -# option). -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "WARNING: ..." with an explanation of why the test can't be made, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -############################################################################ -# Copyright (C) 2002 The Regents of the University of California. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. -# -# This file is part of SLURM, a resource management program. -# For details, see <http://www.llnl.gov/linux/slurm/>. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -############################################################################ -source ./globals - -set test_id "1.85" -set def_part_name "" -set exit_code 0 -set file_in "test$test_id.input" -set job_id 0 -set other_part_name "" - -print_header $test_id - -# -# Identify the partitions in the cluster, identifying the default -# -spawn $sinfo --summarize -expect { - -re "($end_of_line)($alpha_numeric)(\[ \*\]) *up" { - if (![string compare $expect_out(3,string) "*"]) { - set def_part_name $expect_out(2,string) - } else { - set other_part_name $expect_out(2,string) - } - exp_continue - } - -re "Unable to contact" { - send_user "\nFAILURE: slurm appears to be down\n" - exit 1 - } - timeout { - send_user "\nFAILURE: sinfo not responding\n" - set exit_code 1 - } - eof { - wait - } -} - -# -# Build input script file -# -make_bash_script $file_in "$srun $bin_sleep $max_job_delay" - -# -# Submit a batch job explicitly to the default partition -# -set job_id 0 -set srun_pid [spawn $srun --batch --output=/dev/null --error=/dev/null --hold --partition=$def_part_name -t1 $file_in] -expect { - -re "jobid ($number) submitted" { - set job_id $expect_out(1,string) - exp_continue - } - timeout { - send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid - set exit_code 1 - } - eof { - wait - } -} -# Confirm the job's partition -if {$job_id == 0} { - send_user "\nFAILURE: batch submit failure\n" - set exit_code 1 -} else { - set read_part "" - spawn $scontrol show job $job_id - expect { - -re "Partition=($alpha_numeric)" { - set read_part $expect_out(1,string) - exp_continue - } - timeout { - send_user "\nFAILURE: scontrol not responding\n" - set exit_code 1 - } - eof { - wait - } - } - if ([string compare $read_part $def_part_name]) { - send_user "\nFAILURE: Improper partition selected\n" - set exit_code 1 - } - cancel_job $job_id -} - -# -# Test if a non-default partition exists, terminate if none -# -if (![string compare $other_part_name ""]) { - send_user "\nWARNING: can't test srun partition option" - send_user " only the default partition exists\n" - exec $bin_rm -f $file_in - exit $exit_code -} - -# -# Submit job explicitly to a non-default partition -# -set job_id 0 -set srun_pid [spawn $srun --batch --output=/dev/null --error=/dev/null --hold --partition=$other_part_name -t1 $file_in] -expect { - -re "jobid ($number) submitted" { - set job_id $expect_out(1,string) - exp_continue - } - timeout { - send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid - set exit_code 1 - } - eof { - wait - } -} -exec $bin_rm -f $file_in -# Confirm the job's partition -if {$job_id == 0} { - send_user "\nFAILURE: batch submit failure\n" - set exit_code 1 -} else { - set read_part "" - spawn $scontrol show job $job_id - expect { - -re "Partition=($alpha_numeric)" { - set read_part $expect_out(1,string) - exp_continue - } - timeout { - send_user "\nFAILURE: scontrol not responding\n" - set exit_code 1 - } - eof { - wait - } - } - if ([string compare $read_part $other_part_name]) { - send_user "\nFAILURE: Improper partition selected\n" - set exit_code 1 - } - cancel_job $job_id -} - -if {$exit_code == 0} { - send_user "\nSUCCESS\n" -} -exit $exit_code diff --git a/testsuite/expect/test1.86 b/testsuite/expect/test1.86 index 434858e24..380e80997 100755 --- a/testsuite/expect/test1.86 +++ b/testsuite/expect/test1.86 @@ -12,7 +12,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -36,7 +36,7 @@ source ./globals set test_id "1.86" set exit_code 0 set file_in "test$test_id.input" -set prompt "SLURM_QA_PROMPT: " +set prompt "PROMPT: " print_header $test_id @@ -62,7 +62,7 @@ make_bash_script $file_in " # Submit a 2 node job # set timeout $max_job_delay -set srun_pid [spawn $srun -N2 -A $file_in] +set salloc_pid [spawn $salloc -N2 -t1 ./$file_in] expect { -re "More ($alpha) requested than permitted" { send_user "\nWARNING: can't test srun task distribution\n" @@ -78,7 +78,7 @@ expect { } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $salloc_pid exit 1 } eof { @@ -115,7 +115,7 @@ expect { } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $salloc_pid exit 1 } eof { @@ -163,7 +163,7 @@ expect { } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $salloc_pid exit 1 } eof { @@ -200,7 +200,7 @@ expect { } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $salloc_pid exit 1 } eof { @@ -244,7 +244,7 @@ expect { } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $salloc_pid exit 1 } eof { @@ -281,7 +281,7 @@ expect { } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $salloc_pid exit 1 } eof { @@ -318,7 +318,7 @@ expect { } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $salloc_pid exit 1 } eof { @@ -352,7 +352,7 @@ expect { } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $salloc_pid exit 1 } eof { @@ -391,7 +391,7 @@ expect { } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $salloc_pid exit 1 } eof { @@ -423,7 +423,7 @@ expect { } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $salloc_pid exit 1 } eof { @@ -448,7 +448,7 @@ expect { } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $salloc_pid exit 1 } eof { @@ -474,7 +474,7 @@ expect { } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $salloc_pid exit 1 } eof { diff --git a/testsuite/expect/test1.87 b/testsuite/expect/test1.87 index be5f10717..9442883ed 100755 --- a/testsuite/expect/test1.87 +++ b/testsuite/expect/test1.87 @@ -12,7 +12,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -57,7 +57,7 @@ make_bash_script $file_in " # Submit a 4 node job # set timeout $max_job_delay -set srun_pid [spawn $srun -N4 -A $file_in] +set srun_pid [spawn $salloc -N4 ./$file_in] expect { -re "More ($alpha) requested than permitted" { send_user "\nWARNING: can't test srun task distribution\n" diff --git a/testsuite/expect/test1.88 b/testsuite/expect/test1.88 index b23295b91..d10421f8f 100755 --- a/testsuite/expect/test1.88 +++ b/testsuite/expect/test1.88 @@ -11,7 +11,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -102,13 +102,13 @@ make_bash_script $file_in " " # -# Spawn a srun batch job that uses stdout/err and confirm their contents +# Spawn an sbatch job that uses stdout/err and confirm their contents # set timeout $max_job_delay set no_start 0 -set srun_pid [spawn $srun -N3 -n6 --batch --output=$file_out --error=$file_err -t1 $file_in] +set sbatch_pid [spawn $sbatch -N3 -n6 --output=$file_out --error=$file_err -t1 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } @@ -126,7 +126,7 @@ expect { } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $sbatch_pid set exit_code 1 } eof { @@ -176,7 +176,7 @@ if {[wait_for_file $file_out] == 0} { } } if {$matches == 0} { - send_user "\nFAILURE: No MPI communications occured\n" + send_user "\nFAILURE: No MPI communications occurred\n" send_user " The version of MPI you are using may be incompatible " send_user "with the configured switch\n" send_user " Core files may be present from failed MPI tasks\n\n" diff --git a/testsuite/expect/test1.88.prog.c b/testsuite/expect/test1.88.prog.c index 09ab2a7e7..e56cb74be 100644 --- a/testsuite/expect/test1.88.prog.c +++ b/testsuite/expect/test1.88.prog.c @@ -4,7 +4,7 @@ * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Dong Ang <dahn@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.89 b/testsuite/expect/test1.89 index c314a4f41..7c561700a 100755 --- a/testsuite/expect/test1.89 +++ b/testsuite/expect/test1.89 @@ -11,7 +11,7 @@ # Copyright (C) 2005 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -64,13 +64,13 @@ send_user "\ntask affinity plugin installed\n" # Build a test program to report affinity by task # exec $bin_rm -f $file_prog -exec $bin_make -f /dev/null $file_prog +exec $bin_cc -I$build_dir $file_prog.c -o $file_prog exec $bin_chmod 700 $file_prog # # Create an allocation # -set srun_pid [spawn $srun --allocate -N1 --verbose -t2] +set salloc_pid [spawn $salloc -N1 --verbose -t2 $bin_bash] # # Run a job step to get allocated processor count and affinity @@ -91,9 +91,9 @@ expect { exp_continue } timeout { - send_user "\nFAILURE: srun (from --allocate) not responding " + send_user "\nFAILURE: salloc not responding " send_user "or failure to recognize prompt\n" - slow_kill $srun_pid + slow_kill $salloc_pid exit 1 } -re $prompt @@ -116,7 +116,7 @@ expect { exp_continue } timeout { - send_user "\nFAILURE: srun (from --allocate) not responding " + send_user "\nFAILURE: salloc not responding " send_user "or failure to recognize prompt\n" set exit_code 1 } @@ -143,7 +143,7 @@ expect { exp_continue } timeout { - send_user "\nFAILURE: srun (from --allocate) not responding " + send_user "\nFAILURE: salloc not responding " send_user "or failure to recognize prompt\n" set exit_code 1 } @@ -166,7 +166,7 @@ expect { exp_continue } timeout { - send_user "\nFAILURE: srun (from --allocate) not responding " + send_user "\nFAILURE: salloc not responding " send_user "or failure to recognize prompt\n" set exit_code 1 } @@ -196,7 +196,7 @@ while {$cpu_cnt < $task_cnt} { exp_continue } timeout { - send_user "\nFAILURE: srun (from --allocate) not " + send_user "\nFAILURE: salloc not " send_user "responding or failure to recognize prompt\n" set exit_code 1 exp_continue @@ -230,7 +230,7 @@ while {$cpu_cnt < $task_cnt} { exp_continue } timeout { - send_user "\nFAILURE: srun (from --allocate) not " + send_user "\nFAILURE: salloc not " send_user "responding or failure to recognize prompt\n" set exit_code 1 exp_continue @@ -306,7 +306,7 @@ expect { exp_continue } timeout { - send_user "\nFAILURE: srun (from --allocate) not responding " + send_user "\nFAILURE: salloc not responding " send_user "or failure to recognize prompt\n" set exit_code 1 exp_continue @@ -334,7 +334,7 @@ expect { exp_continue } timeout { - send_user "\nFAILURE: srun (from --allocate) not responding " + send_user "\nFAILURE: salloc not responding " send_user "or failure to recognize prompt\n" set exit_code 1 exp_continue @@ -362,7 +362,7 @@ expect { exp_continue } timeout { - send_user "\nFAILURE: srun (from --allocate) not responding " + send_user "\nFAILURE: salloc not responding " send_user "or failure to recognize prompt\n" set exit_code 1 exp_continue @@ -390,7 +390,7 @@ expect { exp_continue } timeout { - send_user "\nFAILURE: srun (from --allocate) not responding " + send_user "\nFAILURE: salloc not responding " send_user "or failure to recognize prompt\n" set exit_code 1 exp_continue @@ -418,7 +418,7 @@ expect { exp_continue } timeout { - send_user "\nFAILURE: srun (from --allocate) not responding " + send_user "\nFAILURE: salloc not responding " send_user "or failure to recognize prompt\n" set exit_code 1 exp_continue @@ -446,7 +446,7 @@ expect { exp_continue } timeout { - send_user "\nFAILURE: srun (from --allocate) not responding " + send_user "\nFAILURE: salloc not responding " send_user "or failure to recognize prompt\n" set exit_code 1 exp_continue @@ -468,9 +468,9 @@ expect { set exit_code 1 } timeout { - send_user "\nFAILURE: srun (from --allocate) not responding " + send_user "\nFAILURE: salloc not responding " send_user "or failure to recognize prompt\n" - slow_kill $srun_pid + slow_kill $salloc_pid set exit_code 1 } eof { diff --git a/testsuite/expect/test1.89.prog.c b/testsuite/expect/test1.89.prog.c index bcd2c40ec..2a1824921 100644 --- a/testsuite/expect/test1.89.prog.c +++ b/testsuite/expect/test1.89.prog.c @@ -6,7 +6,7 @@ * Copyright (C) 2005 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -32,7 +32,7 @@ #include <stdio.h> #include <stdlib.h> #include <string.h> -#include "../../config.h" +#include "config.h" static void _load_mask(cpu_set_t *mask) { diff --git a/testsuite/expect/test1.9 b/testsuite/expect/test1.9 index a16f027c7..f6002c958 100755 --- a/testsuite/expect/test1.9 +++ b/testsuite/expect/test1.9 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.90 b/testsuite/expect/test1.90 index 374978e9f..e7b6538e4 100755 --- a/testsuite/expect/test1.90 +++ b/testsuite/expect/test1.90 @@ -11,7 +11,7 @@ # Copyright (C) 2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -89,7 +89,7 @@ exec $bin_chmod 700 $file_prog # # Create an allocation # -set srun_pid [spawn $srun --allocate -N1 --exclusive --verbose -t2] +set salloc_pid [spawn $salloc -N1 --exclusive --verbose -t2 $bin_bash] # # Run a job step to get allocated processor count and affinity @@ -110,7 +110,7 @@ expect { exp_continue } timeout { - send_user "\nFAILURE: srun (from --allocate) not responding " + send_user "\nFAILURE: salloc not responding " send_user "or failure to recognize prompt\n" set exit_code 1 } @@ -135,7 +135,7 @@ expect { exp_continue } timeout { - send_user "\nFAILURE: srun (from --allocate) not responding " + send_user "\nFAILURE: salloc not responding " send_user "or failure to recognize prompt\n" set exit_code 1 } @@ -163,7 +163,7 @@ expect { exp_continue } timeout { - send_user "\nFAILURE: srun (from --allocate) not responding " + send_user "\nFAILURE: salloc not responding " send_user "or failure to recognize prompt\n" set exit_code 1 } @@ -186,7 +186,7 @@ expect { exp_continue } timeout { - send_user "\nFAILURE: srun (from --allocate) not responding " + send_user "\nFAILURE: salloc not responding " send_user "or failure to recognize prompt\n" set exit_code 1 } @@ -215,7 +215,7 @@ expect { exp_continue } timeout { - send_user "\nFAILURE: srun (from --allocate) not responding " + send_user "\nFAILURE: salloc not responding " send_user "or failure to recognize prompt\n" set exit_code 1 exp_continue @@ -242,7 +242,7 @@ while {$cpu_cnt < $task_cnt} { exp_continue } timeout { - send_user "\nFAILURE: srun (from --allocate) not " + send_user "\nFAILURE: salloc not " send_user "responding or failure to recognize prompt\n" set exit_code 1 exp_continue @@ -276,7 +276,7 @@ while {$cpu_cnt < $task_cnt} { exp_continue } timeout { - send_user "\nFAILURE: srun (from --allocate) not " + send_user "\nFAILURE: salloc not " send_user "responding or failure to recognize prompt\n" set exit_code 1 exp_continue @@ -352,7 +352,7 @@ expect { exp_continue } timeout { - send_user "\nFAILURE: srun (from --allocate) not responding " + send_user "\nFAILURE: salloc not responding " send_user "or failure to recognize prompt\n" set exit_code 1 exp_continue @@ -380,7 +380,7 @@ expect { exp_continue } timeout { - send_user "\nFAILURE: srun (from --allocate) not responding " + send_user "\nFAILURE: salloc not responding " send_user "or failure to recognize prompt\n" set exit_code 1 exp_continue @@ -408,7 +408,7 @@ expect { exp_continue } timeout { - send_user "\nFAILURE: srun (from --allocate) not responding " + send_user "\nFAILURE: salloc not responding " send_user "or failure to recognize prompt\n" set exit_code 1 exp_continue @@ -436,7 +436,7 @@ expect { exp_continue } timeout { - send_user "\nFAILURE: srun (from --allocate) not responding " + send_user "\nFAILURE: salloc not responding " send_user "or failure to recognize prompt\n" set exit_code 1 exp_continue @@ -464,7 +464,7 @@ expect { exp_continue } timeout { - send_user "\nFAILURE: srun (from --allocate) not responding " + send_user "\nFAILURE: salloc not responding " send_user "or failure to recognize prompt\n" set exit_code 1 exp_continue @@ -492,7 +492,7 @@ expect { exp_continue } timeout { - send_user "\nFAILURE: srun (from --allocate) not responding " + send_user "\nFAILURE: salloc not responding " send_user "or failure to recognize prompt\n" set exit_code 1 exp_continue @@ -514,9 +514,9 @@ expect { set exit_code 1 } timeout { - send_user "\nFAILURE: srun (from --allocate) not responding " + send_user "\nFAILURE: salloc not responding " send_user "or failure to recognize prompt\n" - slow_kill $srun_pid + slow_kill $salloc_pid set exit_code 1 } eof { @@ -530,7 +530,6 @@ if {$exit_code == 0} { } else { send_user "\nNOTE: This test can fail if the node configuration in slurm.conf \n" send_user " (sockets, cores, threads) differs from the actual configuration\n" - } exit $exit_code diff --git a/testsuite/expect/test1.90.prog.c b/testsuite/expect/test1.90.prog.c index b24730c3f..b6f9a2c12 100644 --- a/testsuite/expect/test1.90.prog.c +++ b/testsuite/expect/test1.90.prog.c @@ -6,7 +6,7 @@ * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.91 b/testsuite/expect/test1.91 index a8f375a6a..71c5a832a 100755 --- a/testsuite/expect/test1.91 +++ b/testsuite/expect/test1.91 @@ -11,7 +11,7 @@ # Copyright (C) 2005-2007 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -93,7 +93,7 @@ send_user "Node config: Sockets=$num_sockets Cores=$num_cores Threads=$num_threa # Build a test program to report affinity by task # exec $bin_rm -f $file_prog -exec $bin_make -f /dev/null $file_prog +exec $bin_cc -I$build_dir $file_prog.c -o $file_prog exec $bin_chmod 700 $file_prog # @@ -411,6 +411,9 @@ expect { if {$exit_code == 0} { exec $bin_rm -f $file_prog send_user "\nSUCCESS\n" +} else { + send_user "\nNOTE: This test can fail if the node configuration in slurm.conf \n" + send_user " (sockets, cores, threads) differs from the actual configuration\n" } exit $exit_code diff --git a/testsuite/expect/test1.91.prog.c b/testsuite/expect/test1.91.prog.c index bcd2c40ec..ad98f3ec8 100644 --- a/testsuite/expect/test1.91.prog.c +++ b/testsuite/expect/test1.91.prog.c @@ -1,12 +1,12 @@ /*****************************************************************************\ - * test1.89.prog.c - Simple test program for SLURM regression test1.89. + * test1.91.prog.c - Simple test program for SLURM regression test1.91. * Reports SLURM task ID and the CPU mask, * similar functionality to "taskset" command ***************************************************************************** * Copyright (C) 2005 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -32,7 +32,7 @@ #include <stdio.h> #include <stdlib.h> #include <string.h> -#include "../../config.h" +#include "config.h" static void _load_mask(cpu_set_t *mask) { diff --git a/testsuite/expect/test1.92 b/testsuite/expect/test1.92 index 1abef4fa8..a4f1719a9 100755 --- a/testsuite/expect/test1.92 +++ b/testsuite/expect/test1.92 @@ -11,7 +11,7 @@ # Copyright (C) 2005 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test1.93 b/testsuite/expect/test1.93 new file mode 100755 index 000000000..6ff305d4c --- /dev/null +++ b/testsuite/expect/test1.93 @@ -0,0 +1,156 @@ +#!/usr/bin/expect +############################################################################ +# Purpose: Test of SLURM functionality +# Test of LAM-MPI functionality +# +# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR +# "WARNING: ..." with an explanation of why the test can't be made, OR +# "FAILURE: ..." otherwise with an explanation of the failure, OR +# anything else indicates a failure mode that must be investigated. +############################################################################ +# Copyright (C) 2008 Lawrence Livermore National Security. +# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). +# Written by Morris Jette <jette1@llnl.gov> +# LLNL-CODE-402394. +# +# This file is part of SLURM, a resource management program. +# For details, see <http://www.llnl.gov/linux/slurm/>. +# +# SLURM is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free +# Software Foundation; either version 2 of the License, or (at your option) +# any later version. +# +# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along +# with SLURM; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +############################################################################ +source ./globals + +set test_id "1.93" +set exit_code 0 +set file_in "test$test_id.input" +set prompt "PROMPT: " + +print_header $test_id + +if {[test_front_end] != 0} { + send_user "\nWARNING: This test is incompatable with front-end systems\n" + exit 0 +} + +# +# Build input script file +# +make_bash_script $file_in " + export PS1=\"$prompt\" + $bin_bash --norc +" + +# +# Submit a 2 node job +# +set timeout $max_job_delay +set salloc_pid [spawn $salloc -t1 -n2 ./$file_in] +expect { + -re "More ($alpha) requested than permitted" { + send_user "\nWARNING: can't test srun task distribution\n" + exec $bin_rm -f $file_in + exit $exit_code + } + -re "Unable to contact" { + send_user "\nFAILURE: slurm appears to be down\n" + exit 1 + } + "$prompt" { + send_user "Job initiated\n" + } + timeout { + send_user "\nFAILURE: srun not responding\n" + slow_kill $salloc_pid + exit 1 + } + eof { + wait + send_user "\nFAILURE: srun terminated\n" + exit 1 + } +} +exec $bin_rm -f $file_in + +# +# Get node names +# +set host_0 "" +set host_1 "" +send "$srun -l --mpi=lam $bin_hostname\n" +expect { + -re "($number): ($alpha_numeric)" { + set host_inx $expect_out(1,string) + if {$host_inx == 0} { + set host_0 $expect_out(2,string) + } + if {$host_inx == 1} { + set host_1 $expect_out(2,string) + } + exp_continue + } + -re "Unable to contact" { + send_user "\nFAILURE: slurm appears to be down\n" + exit 1 + } + "$prompt" { + send_user "srun completed\n\n" + } + timeout { + send_user "\nFAILURE: srun not responding\n" + slow_kill $salloc_pid + exit 1 + } + eof { + send_user "\nFAILURE: srun EOF\n" + exit 1 + } +} + +# +# Verify node count +# +if {[string compare $host_0 ""] == 0} { + send_user "\nFAILURE: no response from task zero\n" + set exit_code 1 +} +if {[string compare $host_0 $host_1] == 0} { + send_user "\nFAILURE: mulitple tasks ran on a single node with --mpi=lam option\n" + set exit_code 1 +} + +# +# Post-processing +# +send "exit\n" +expect { + -re "error" { + send_user "\nFAILURE: Some error occured\n" + set exit_code 1 + exp_continue + } + timeout { + send_user "\nFAILURE: srun not responding\n" + slow_kill $salloc_pid + exit 1 + } + eof { + wait + } +} + +if {$exit_code == 0} { + send_user "\nSUCCESS\n" +} +exit $exit_code diff --git a/testsuite/expect/test10.1 b/testsuite/expect/test10.1 index 27941c819..cabb50915 100755 --- a/testsuite/expect/test10.1 +++ b/testsuite/expect/test10.1 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Danny Auble <da@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test10.10 b/testsuite/expect/test10.10 index 3eb202c65..6124c5ed8 100755 --- a/testsuite/expect/test10.10 +++ b/testsuite/expect/test10.10 @@ -11,7 +11,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Danny Auble <da@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test10.11 b/testsuite/expect/test10.11 index 42adfd023..290638010 100755 --- a/testsuite/expect/test10.11 +++ b/testsuite/expect/test10.11 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Danny Auble <da@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test10.12 b/testsuite/expect/test10.12 index 3ce11585b..09dabf14b 100755 --- a/testsuite/expect/test10.12 +++ b/testsuite/expect/test10.12 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Danny Auble <da@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -49,7 +49,7 @@ if { [test_bluegene] == 0 } { # spawn $smap --resolve 000 expect { - -re "must be on BG SN to resolve." { + -re "Must be on BG" { set non_bg 1 exp_continue; } diff --git a/testsuite/expect/test10.13 b/testsuite/expect/test10.13 index 5f5739f0c..4f3a5d38a 100755 --- a/testsuite/expect/test10.13 +++ b/testsuite/expect/test10.13 @@ -11,7 +11,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Danny Auble <da@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test10.2 b/testsuite/expect/test10.2 index 0cd448d34..85106edd7 100755 --- a/testsuite/expect/test10.2 +++ b/testsuite/expect/test10.2 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Danny Auble <da@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test10.3 b/testsuite/expect/test10.3 index 6fd734719..fe21a8dc4 100755 --- a/testsuite/expect/test10.3 +++ b/testsuite/expect/test10.3 @@ -11,7 +11,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Danny Auble <da@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test10.4 b/testsuite/expect/test10.4 index bc03bbd5c..fbf8b1990 100755 --- a/testsuite/expect/test10.4 +++ b/testsuite/expect/test10.4 @@ -11,7 +11,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Danny Auble <da@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test10.5 b/testsuite/expect/test10.5 index d48da0b53..563e009a3 100755 --- a/testsuite/expect/test10.5 +++ b/testsuite/expect/test10.5 @@ -11,7 +11,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Danny Auble <da@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test10.6 b/testsuite/expect/test10.6 index 038e9331b..8e42cf2cd 100755 --- a/testsuite/expect/test10.6 +++ b/testsuite/expect/test10.6 @@ -11,7 +11,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Danny Auble <da@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test10.7 b/testsuite/expect/test10.7 index 1434afaad..143edfd6c 100755 --- a/testsuite/expect/test10.7 +++ b/testsuite/expect/test10.7 @@ -11,7 +11,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Danny Auble <da@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test10.8 b/testsuite/expect/test10.8 index ba3d6e920..7a53961ac 100755 --- a/testsuite/expect/test10.8 +++ b/testsuite/expect/test10.8 @@ -11,7 +11,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Danny Auble <da@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test10.9 b/testsuite/expect/test10.9 index d10cd780f..9cb315ca9 100755 --- a/testsuite/expect/test10.9 +++ b/testsuite/expect/test10.9 @@ -11,7 +11,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Danny Auble <da@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test11.1 b/testsuite/expect/test11.1 index 80521b7a8..a67bf6692 100755 --- a/testsuite/expect/test11.1 +++ b/testsuite/expect/test11.1 @@ -13,7 +13,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Danny Auble <da@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test11.2 b/testsuite/expect/test11.2 index 75b668198..9012250c4 100755 --- a/testsuite/expect/test11.2 +++ b/testsuite/expect/test11.2 @@ -13,7 +13,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Danny Auble <da@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test11.3 b/testsuite/expect/test11.3 index 71edfa253..af784f1ed 100755 --- a/testsuite/expect/test11.3 +++ b/testsuite/expect/test11.3 @@ -13,7 +13,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Danny Auble <da@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test11.4 b/testsuite/expect/test11.4 index cf5ccef64..10e119cff 100755 --- a/testsuite/expect/test11.4 +++ b/testsuite/expect/test11.4 @@ -12,7 +12,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Danny Auble <da@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test11.5 b/testsuite/expect/test11.5 index 243487b30..ae5c79c62 100755 --- a/testsuite/expect/test11.5 +++ b/testsuite/expect/test11.5 @@ -10,7 +10,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -50,16 +50,16 @@ make_bash_script $file_in " # # Submit a job so we have something to work with # -set srun_pid [spawn $srun --batch --output=/dev/null --error=/dev/null -t2 $file_in] +set sbatch_pid [spawn $sbatch --output=/dev/null --error=/dev/null -t2 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" if {$job_id == 0} { - slow_kill $srun_pid + slow_kill $sbatch_pid } else { cancel_job $job_id } diff --git a/testsuite/expect/test11.6 b/testsuite/expect/test11.6 index 4bb373f8d..afa4414df 100755 --- a/testsuite/expect/test11.6 +++ b/testsuite/expect/test11.6 @@ -10,7 +10,7 @@ # Copyright (C) 2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test11.7 b/testsuite/expect/test11.7 index df544a280..abd36558f 100755 --- a/testsuite/expect/test11.7 +++ b/testsuite/expect/test11.7 @@ -12,7 +12,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Danny Auble <da@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test12.1 b/testsuite/expect/test12.1 index 5c9f538c9..4e704b3d2 100755 --- a/testsuite/expect/test12.1 +++ b/testsuite/expect/test12.1 @@ -10,7 +10,7 @@ # Copyright (C) 2005-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -44,7 +44,7 @@ print_header $test_id spawn $sacct --help expect { - -re "SLURM accounting is disabled" { + -re "SLURM accounting storage is disabled" { set not_support 1 exp_continue } diff --git a/testsuite/expect/test12.2 b/testsuite/expect/test12.2 index e931af639..30f006010 100755 --- a/testsuite/expect/test12.2 +++ b/testsuite/expect/test12.2 @@ -10,7 +10,7 @@ # Copyright (C) 2005 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -55,17 +55,27 @@ if {[test_bluegene] != 0} { # # Check if accounting is enabled # -set supported 0 +set supported_gather 0 +set supported_storage 1 log_user 0 spawn $scontrol show config expect { - -re "JobAcctType *= jobacct/linux" { - set supported 1 + -re "AccountingStorageType *= accounting_storage/none" { + set supported_storage 0 exp_continue } - -re "JobAcctType *= jobacct/aix" { - set supported 1 + -re "JobAcctGatherType *= jobacct_gather/linux" { + set supported_gather 1 + exp_continue + } + -re "JobAcctGatherType *= jobacct_gather/aix" { + set supported_gather 1 set aix 1 + send_user "AIX doesn't do a great job tracking memory usage,\n" + send_user "as long as some memory number was\n" + send_user "returned we are ok since users are looking for\n" + send_user "mostly abnormalities between tasks instead\n" + send_user "of actual memory usuage.\n" exp_continue } eof { @@ -73,8 +83,12 @@ expect { } } log_user 1 -if {$supported == 0} { - send_user "\nWARNING: job accounting not configured on this system\n" +if {$supported_gather == 0} { + send_user "\nWARNING: Job accounting information not gathered on this system\n" + exit 0 +} +if {$supported_storage == 0} { + send_user "\nWARNING: Job accounting information not stored on this system\n" exit 0 } @@ -95,15 +109,15 @@ make_bash_script $file_in " # Usage: test12.2.prog <exit_code> <sleep_secs> <mem_kb> # set timeout [expr $max_job_delay + $sleep_time] -set srun_pid [spawn $srun --batch --output=$file_out --error=$file_err -t2 $file_in] +set sbatch_pid [spawn $sbatch --output=$file_out --error=$file_err -t2 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $sbatch_pid exit 1 } eof { @@ -255,7 +269,7 @@ set diff_time [expr $elapsed_time - $sleep_time] set error_time [expr abs($diff_time)] if {$error_time > 5} { send_user "\nFAILURE: sacct elapsed time discrepancy of $error_time secs\n" - send_user " Wanted $sleep_time secs, got $error_time secs\n" + send_user " Wanted $sleep_time secs, got $elapsed_time secs\n" set exit_code 1 } else { send_user "\nSUCCESS: sacct elapsed time discrepancy of $error_time secs\n" diff --git a/testsuite/expect/test12.2.prog.c b/testsuite/expect/test12.2.prog.c index 1f1550422..a87085124 100644 --- a/testsuite/expect/test12.2.prog.c +++ b/testsuite/expect/test12.2.prog.c @@ -5,7 +5,7 @@ * Copyright (C) 2005 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -31,7 +31,7 @@ main (int argc, char **argv) { - int exit_code, i, sleep_time, mem_kb; + int exit_code, sleep_time, mem_kb; char *mem; if (argc != 4) { @@ -46,10 +46,11 @@ main (int argc, char **argv) mem_kb = atoi(argv[3]); mem = malloc(mem_kb * 1024); - /* We need to actually use the memory for - * AIX to the allocation */ - for (i=0; i<(mem_kb * 1024); i++) - mem[i] = i%64; + /* need to do a memset on the memory or AIX will not give it + * to me! + */ + memset(mem, 0, (mem_kb * 1024)); sleep(sleep_time); + free(mem); exit(exit_code); } diff --git a/testsuite/expect/test13.1 b/testsuite/expect/test13.1 index 0e9299632..dfe37a3cb 100755 --- a/testsuite/expect/test13.1 +++ b/testsuite/expect/test13.1 @@ -10,7 +10,7 @@ # Copyright (C) 2005-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -92,8 +92,8 @@ set timeout $max_job_delay set job_id 0 set matches 0 -set srun_pid [spawn $srun --allocate -N1 --verbose -t2] -expect -re "jobid ($number).*" +set srun_pid [spawn $salloc -N1 --verbose -t2 $bin_bash] +expect -re "Granted job allocation ($number)" set job_id $expect_out(1,string) # start initial job step to claim some switch windows @@ -113,7 +113,7 @@ for {set inx 0} {$inx < $windows_iterations} {incr inx} { send "$srun -N1 -O -n$windows_used true\n" } timeout { - send_user "\nFAILURE: srun (from --allocate) not responding\n" + send_user "\nFAILURE: salloc not responding\n" slow_kill $srun_pid exit 1 } @@ -138,7 +138,7 @@ expect { exp_continue } timeout { - send_user "\nFAILURE: srun (from --allocate) not responding\n" + send_user "\nFAILURE: salloc not responding\n" slow_kill $srun_pid exit 1 } diff --git a/testsuite/expect/test14.1 b/testsuite/expect/test14.1 index 7122f3202..fed1099e9 100755 --- a/testsuite/expect/test14.1 +++ b/testsuite/expect/test14.1 @@ -10,7 +10,7 @@ # Copyright (C) 2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test14.2 b/testsuite/expect/test14.2 index 24d5f0d3a..564f091e3 100755 --- a/testsuite/expect/test14.2 +++ b/testsuite/expect/test14.2 @@ -10,7 +10,7 @@ # Copyright (C) 2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test14.3 b/testsuite/expect/test14.3 index f8d985032..1037ca76d 100755 --- a/testsuite/expect/test14.3 +++ b/testsuite/expect/test14.3 @@ -11,7 +11,7 @@ # Copyright (C) 2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test14.4 b/testsuite/expect/test14.4 index 9af3b58ae..8c7b3636c 100755 --- a/testsuite/expect/test14.4 +++ b/testsuite/expect/test14.4 @@ -13,7 +13,7 @@ # Copyright (C) 2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -77,18 +77,18 @@ make_bash_script $file_in " " # -# Spawn a srun batch job that uses stdout/err and confirm their contents +# Spawn an sbatch job that uses stdout/err and confirm their contents # set timeout $max_job_delay -set srun_pid [spawn $srun -N1-4 --batch --output=$file_out --error=$file_err -t1 $file_in] +set sbatch_pid [spawn $sbatch -N1-4 --output=$file_out --error=$file_err -t1 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $sbatch_pid set exit_code 1 } eof { diff --git a/testsuite/expect/test14.5 b/testsuite/expect/test14.5 index 20a6a9d25..4cdc56176 100755 --- a/testsuite/expect/test14.5 +++ b/testsuite/expect/test14.5 @@ -10,7 +10,7 @@ # Copyright (C) 2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -69,18 +69,18 @@ make_bash_script $file_in " " # -# Spawn a srun batch job that uses stdout/err and confirm their contents +# Spawn an sbatch job that uses stdout/err and confirm their contents # set timeout $max_job_delay -set srun_pid [spawn $srun -N1-1 --batch --output=$file_out --error=$file_err -t1 $file_in] +set sbatch_pid [spawn $sbatch -N1 --output=$file_out --error=$file_err -t1 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $sbatch_pid set exit_code 1 } eof { diff --git a/testsuite/expect/test14.6 b/testsuite/expect/test14.6 index 90f64d18d..3f5e82c96 100755 --- a/testsuite/expect/test14.6 +++ b/testsuite/expect/test14.6 @@ -10,7 +10,7 @@ # Copyright (C) 2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -61,18 +61,18 @@ make_bash_script $file_in " " # -# Spawn a srun batch job that uses stdout/err and confirm their contents +# Spawn an sbatch job that uses stdout/err and confirm their contents # set timeout $max_job_delay -set srun_pid [spawn $srun -N1-1 --batch --output=$file_out --error=$file_err -t1 $file_in] +set sbatch_pid [spawn $sbatch -N1 --output=$file_out --error=$file_err -t1 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $sbatch_pid set exit_code 1 } eof { diff --git a/testsuite/expect/test14.7 b/testsuite/expect/test14.7 index a296a63e4..a66eb24da 100755 --- a/testsuite/expect/test14.7 +++ b/testsuite/expect/test14.7 @@ -10,7 +10,7 @@ # Copyright (C) 2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -74,18 +74,18 @@ make_bash_script $file_in " " # -# Spawn a srun batch job that uses stdout/err and confirm their contents +# Spawn an sbatch job that uses stdout/err and confirm their contents # set timeout $max_job_delay -set srun_pid [spawn $srun -N1 --batch --output=$file_out --error=$file_err -t1 $file_in] +set sbatch_pid [spawn $sbatch -N1 --output=$file_out --error=$file_err -t1 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $sbatch_pid set exit_code 1 } eof { diff --git a/testsuite/expect/test14.8 b/testsuite/expect/test14.8 index 9fe39f60b..967c95717 100755 --- a/testsuite/expect/test14.8 +++ b/testsuite/expect/test14.8 @@ -14,7 +14,7 @@ # Copyright (C) 2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -75,18 +75,18 @@ make_bash_script $file_in " " # -# Spawn a srun batch job that uses stdout and confirm its contents +# Spawn an sbatch job that uses stdout and confirm its contents # set timeout $max_job_delay -set srun_pid [spawn $srun -N1-4 --batch --output=$file_out -t1 $file_in] +set sbatch_pid [spawn $sbatch -N1-4 --output=$file_out -t1 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $sbatch_pid set exit_code 1 } eof { diff --git a/testsuite/expect/test15.1 b/testsuite/expect/test15.1 index 97085562e..e94ca8302 100755 --- a/testsuite/expect/test15.1 +++ b/testsuite/expect/test15.1 @@ -10,7 +10,7 @@ # Copyright (C) 2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test15.10 b/testsuite/expect/test15.10 index 8261a4461..511b2020e 100755 --- a/testsuite/expect/test15.10 +++ b/testsuite/expect/test15.10 @@ -10,7 +10,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test15.11 b/testsuite/expect/test15.11 index d0833377f..845f0b2e6 100755 --- a/testsuite/expect/test15.11 +++ b/testsuite/expect/test15.11 @@ -10,7 +10,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test15.12 b/testsuite/expect/test15.12 index e16f9e97b..2b95da58a 100755 --- a/testsuite/expect/test15.12 +++ b/testsuite/expect/test15.12 @@ -11,7 +11,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test15.13 b/testsuite/expect/test15.13 index 033091378..3015134dc 100755 --- a/testsuite/expect/test15.13 +++ b/testsuite/expect/test15.13 @@ -10,7 +10,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test15.14 b/testsuite/expect/test15.14 index e28c4ba7d..b59f3867a 100755 --- a/testsuite/expect/test15.14 +++ b/testsuite/expect/test15.14 @@ -8,10 +8,10 @@ # "FAILURE: ..." otherwise with an explanation of the failure, OR # anything else indicates a failure mode that must be investigated. ############################################################################ -# Copyright (C) 2004-2006 The Regents of the University of California. +# Copyright (C) 2004-2007 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -76,7 +76,7 @@ if {$job_id1 == 0} { # set match_acct 0 set match_state 0 -set salloc_pid [spawn $salloc --dependency=$job_id1 $slaunch $scontrol show job $job_id1] +set salloc_pid [spawn $salloc --dependency=afterany:$job_id1 $srun $scontrol show job $job_id1] expect { -re "Granted job allocation ($number)" { set job_id2 $expect_out(1,string) @@ -119,7 +119,7 @@ set match_acct 0 set match_jobid 0 spawn $scontrol show job $job_id2 expect { - -re "Dependency=($number)" { + -re "Dependency=afterany:($number)" { set match_jobid $expect_out(1,string) exp_continue } diff --git a/testsuite/expect/test15.15 b/testsuite/expect/test15.15 index 2a90c1d8d..bb310ec97 100755 --- a/testsuite/expect/test15.15 +++ b/testsuite/expect/test15.15 @@ -12,7 +12,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test15.16 b/testsuite/expect/test15.16 index f2f08d16e..8c03be953 100755 --- a/testsuite/expect/test15.16 +++ b/testsuite/expect/test15.16 @@ -11,7 +11,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test15.17 b/testsuite/expect/test15.17 index 1f47dc50e..2adfadf81 100755 --- a/testsuite/expect/test15.17 +++ b/testsuite/expect/test15.17 @@ -11,7 +11,7 @@ # Copyright (C) 2005-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test15.18 b/testsuite/expect/test15.18 index ef6dbbe86..7c40c9fde 100755 --- a/testsuite/expect/test15.18 +++ b/testsuite/expect/test15.18 @@ -10,7 +10,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test15.19 b/testsuite/expect/test15.19 index 4a2c9ae94..54c93c518 100755 --- a/testsuite/expect/test15.19 +++ b/testsuite/expect/test15.19 @@ -12,7 +12,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -46,7 +46,7 @@ set job_id 0 set host_0 "" set task_cnt 0 set timeout $max_job_delay -set salloc_pid [spawn $salloc -N1-1 -t1 $slaunch -c 1 -l $bin_printenv SLURMD_NODENAME] +set salloc_pid [spawn $salloc -N1-1 -t1 $srun -c 1 -l $bin_printenv SLURMD_NODENAME] expect { -re "Granted job allocation ($number)" { set job_id $expect_out(1,string) @@ -59,7 +59,7 @@ expect { } exp_continue } - -re "($number): ($alpha_numeric)" { + -re "($number): ($alpha_numeric_under)" { if {$task_cnt == 0} { set host_0 $expect_out(1,string) } @@ -90,7 +90,7 @@ if {[string compare $host_0 ""] == 0} { set alloc_fail 0 set job_id 0 set task_cnt2 0 -set salloc_pid [spawn $salloc -N1-1 -w $host_0 -t1 $slaunch -n [expr $task_cnt + 1] -l $bin_printenv SLURMD_NODENAME] +set salloc_pid [spawn $salloc -N1-1 -w $host_0 -t1 $srun -n [expr $task_cnt + 1] -l $bin_printenv SLURMD_NODENAME] expect { -re "Granted job allocation ($number)" { set job_id $expect_out(1,string) @@ -133,13 +133,13 @@ set job_id 0 set host_0 "" set host_1 "" -set salloc_pid [spawn $salloc -N1-1 -t1 $slaunch -l $bin_printenv SLURMD_NODENAME] +set salloc_pid [spawn $salloc -N1-1 -t1 $srun -l $bin_printenv SLURMD_NODENAME] expect { -re "Granted job allocation ($number)" { set job_id $expect_out(1,string) exp_continue } - -re "($number): ($alpha_numeric)" { + -re "($number): ($alpha_numeric_under)" { if {$expect_out(1,string) == 0} { set host_0 $expect_out(2,string) } @@ -185,13 +185,13 @@ set host_1 "" set host_2 "" set host_3 "" set timeout $max_job_delay -set salloc_pid [spawn $salloc -N1-3 -t1 $slaunch -l $bin_printenv SLURMD_NODENAME] +set salloc_pid [spawn $salloc -N1-3 -t1 $srun -l $bin_printenv SLURMD_NODENAME] expect { -re "Granted job allocation ($number)" { set job_id $expect_out(1,string) exp_continue } - -re "($number): ($alpha_numeric)" { + -re "($number): ($alpha_numeric_under)" { if {$expect_out(1,string) == 0} { set host_0 $expect_out(2,string) } @@ -254,7 +254,7 @@ set host_1 "" set host_2 "" set host_3 "" set timeout $max_job_delay -set salloc_pid [spawn $salloc -N2-3 -t1 $slaunch -l $bin_printenv SLURMD_NODENAME] +set salloc_pid [spawn $salloc -N2-3 -t1 $srun -l $bin_printenv SLURMD_NODENAME] expect { -re "Granted job allocation ($number)" { set job_id $expect_out(1,string) @@ -264,7 +264,7 @@ expect { send_user "\nWARNING: can't test salloc task distribution\n" exit $exit_code } - -re "($number): ($alpha_numeric)" { + -re "($number): ($alpha_numeric_under)" { if {$expect_out(1,string) == 0} { set host_0 $expect_out(2,string) } diff --git a/testsuite/expect/test15.2 b/testsuite/expect/test15.2 index c61c60a2e..fd397cbf8 100755 --- a/testsuite/expect/test15.2 +++ b/testsuite/expect/test15.2 @@ -10,7 +10,7 @@ # Copyright (C) 2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test15.20 b/testsuite/expect/test15.20 index 567ff0508..171387509 100755 --- a/testsuite/expect/test15.20 +++ b/testsuite/expect/test15.20 @@ -12,7 +12,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -65,7 +65,7 @@ if {$available < $node_cnt} { exit $exit_code } -set salloc_pid [spawn $salloc -N$node_cnt -t1 $slaunch -l $bin_printenv SLURMD_NODENAME] +set salloc_pid [spawn $salloc -N$node_cnt -t1 $srun -l $bin_printenv SLURMD_NODENAME] expect { -re "Granted job allocation ($number)" { set job_id $expect_out(1,string) @@ -80,7 +80,7 @@ expect { send_user "WARNING: partition too small for test\n" exit 0 } - -re "($number): ($alpha_numeric)" { + -re "($number): ($alpha_numeric_under)" { if {$expect_out(1,string) == 0} { set host_0 $expect_out(2,string) } @@ -155,7 +155,7 @@ set host_1 "" set host_2 "" set job_id 0 set timeout $max_job_delay -set salloc_pid [spawn $salloc -N2 -t1 --exclude=$exclude_node $slaunch -l $bin_printenv SLURMD_NODENAME] +set salloc_pid [spawn $salloc -N2 -t1 --exclude=$exclude_node $srun -l $bin_printenv SLURMD_NODENAME] expect { -re "Granted job allocation ($number)" { set job_id $expect_out(1,string) @@ -168,7 +168,7 @@ expect { exit 0 } - -re "($number): ($alpha_numeric)" { + -re "($number): ($alpha_numeric_under)" { if {$expect_out(1,string) == 0} { set host_0 $expect_out(2,string) } @@ -227,13 +227,13 @@ set host_0 "" set host_1 "" set job_id 0 set timeout $max_job_delay -set salloc_pid [spawn $salloc -N1 -t1 --nodelist=$include_node $slaunch -l $bin_printenv SLURMD_NODENAME] +set salloc_pid [spawn $salloc -N1 -t1 --nodelist=$include_node $srun -l $bin_printenv SLURMD_NODENAME] expect { -re "Granted job allocation ($number)" { set job_id $expect_out(1,string) exp_continue } - -re "($number): ($alpha_numeric)" { + -re "($number): ($alpha_numeric_under)" { if {$expect_out(1,string) == 0} { set host_0 $expect_out(2,string) } diff --git a/testsuite/expect/test15.21 b/testsuite/expect/test15.21 index 37d47f7d2..05cd8bce8 100755 --- a/testsuite/expect/test15.21 +++ b/testsuite/expect/test15.21 @@ -15,7 +15,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -63,7 +63,7 @@ set host_1_num 0 set host_2_num 0 set job_id 0 set timeout $max_job_delay -set salloc_pid [spawn $salloc -N3-3 --contiguous --immediate -t1 $slaunch -l $bin_printenv SLURMD_NODENAME] +set salloc_pid [spawn $salloc -N3-3 --contiguous --immediate -t1 $srun -l $bin_printenv SLURMD_NODENAME] expect { -re "Granted job allocation ($number)" { set job_id $expect_out(1,string) @@ -73,7 +73,7 @@ expect { send_user "\nWARNING: can't test salloc task distribution\n" exit $exit_code } - -re "($number): ($alpha)($number)" { + -re "($number): ($alpha_numeric_under)($number)" { set task_id $expect_out(1,string) if {$task_id == 0} { set host_0_name $expect_out(2,string) @@ -89,7 +89,7 @@ expect { } exp_continue } - -re "($number): ($alpha)" { + -re "($number): ($alpha_numeric_under)" { set task_id $expect_out(1,string) if {$task_id == 0} { set host_0_name $expect_out(2,string) @@ -107,11 +107,7 @@ expect { cancel_job $job_id } slow_kill [expr 0 - $salloc_pid] - if {$waiting == 1} { - send_user "\nWARNING: salloc still not responding, quiting\n" - } else { - send_user "\nFAILURE: salloc not responding\n" - } + send_user "\nFAILURE: salloc not responding\n" exit 1 } eof { diff --git a/testsuite/expect/test15.22 b/testsuite/expect/test15.22 index 74823cb37..63aaaa4b4 100755 --- a/testsuite/expect/test15.22 +++ b/testsuite/expect/test15.22 @@ -12,7 +12,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test15.23 b/testsuite/expect/test15.23 index 5e81434a5..3ef5b70a6 100755 --- a/testsuite/expect/test15.23 +++ b/testsuite/expect/test15.23 @@ -1,7 +1,7 @@ #!/usr/bin/expect ############################################################################ # Purpose: Test of SLURM functionality -# Verify environment variables controlling sallocare processed: +# Verify environment variables controlling salloc are processed: # SALLOC_ACCOUNT, SALLOC_DEBUG and SALLOC_TIMELIMIT # # Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR @@ -11,7 +11,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -48,12 +48,12 @@ set env(SALLOC_DEBUG) 2 set env(SALLOC_TIMELIMIT) 2 # -# Spawn a job via slaunch using these environment variables +# Spawn a job via salloc using these environment variables # set matches 0 set salloc_pid [spawn $salloc -N1 $bin_bash] expect { - -re "debug: Entering _msg_thr_create" { + -re "debug: Entering _msg_thr_internal" { incr matches exp_continue } @@ -62,8 +62,9 @@ expect { send "$scontrol show job $job_id\n" exp_continue } - -re "TimeLimit=($number)" { - if {$expect_out(1,string) == 2} { + -re "TimeLimit=($number):($number):" { + set time_limit [expr $expect_out(1,string) * 60 + $expect_out(2,string)] + if {$time_limit == 2} { incr matches } else { send_user "FAILURE: TimeLimit not set\n" diff --git a/testsuite/expect/test15.24 b/testsuite/expect/test15.24 index 4f64f8509..21ba71c8e 100755 --- a/testsuite/expect/test15.24 +++ b/testsuite/expect/test15.24 @@ -10,7 +10,7 @@ # Copyright (C) 2002-2007 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -54,7 +54,7 @@ make_bash_script $file_in " set timeout $max_job_delay set matches 0 set tasks 0 -spawn $salloc --tasks=$task_cnt --overcommit -N1 -t1 $file_in +spawn $salloc --ntasks=$task_cnt --overcommit -N1 -t1 ./$file_in expect { -re "Granted job allocation ($number)" { set job_id $expect_out(1,string) diff --git a/testsuite/expect/test15.3 b/testsuite/expect/test15.3 index 0a04fc078..9598d575a 100755 --- a/testsuite/expect/test15.3 +++ b/testsuite/expect/test15.3 @@ -11,7 +11,7 @@ # Copyright (C) 2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test15.4 b/testsuite/expect/test15.4 index 584459b3b..515a1f132 100755 --- a/testsuite/expect/test15.4 +++ b/testsuite/expect/test15.4 @@ -10,7 +10,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -71,7 +71,7 @@ if {$got_login_grps == 0} { # set timeout $max_job_delay set job_id 0 -set salloc_pid [spawn $salloc -N1 -t1 $slaunch $bin_id] +set salloc_pid [spawn $salloc -N1 -t1 $srun $bin_id] expect { -re "Granted job allocation ($number)" { set job_id $expect_out(1,string) diff --git a/testsuite/expect/test15.5 b/testsuite/expect/test15.5 index a9fe64296..84a5f0d86 100755 --- a/testsuite/expect/test15.5 +++ b/testsuite/expect/test15.5 @@ -11,7 +11,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test15.6 b/testsuite/expect/test15.6 index 2ce58a43f..9c002d753 100755 --- a/testsuite/expect/test15.6 +++ b/testsuite/expect/test15.6 @@ -10,7 +10,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test15.7 b/testsuite/expect/test15.7 index 862b7b3e5..ee9a6e126 100755 --- a/testsuite/expect/test15.7 +++ b/testsuite/expect/test15.7 @@ -13,7 +13,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test15.8 b/testsuite/expect/test15.8 index 1060aa71b..077d3694e 100755 --- a/testsuite/expect/test15.8 +++ b/testsuite/expect/test15.8 @@ -10,7 +10,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test15.9 b/testsuite/expect/test15.9 index 74899a386..f7cc37cff 100755 --- a/testsuite/expect/test15.9 +++ b/testsuite/expect/test15.9 @@ -10,7 +10,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -61,10 +61,10 @@ expect { break } } - send "exit 0\n" + send "exit 4\n" exp_continue } - -re "\[Ee\]xit 0" { + -re "\[Ee\]xit 4" { set match 1 exp_continue } @@ -86,7 +86,7 @@ expect { # Confirm the job_ids match. # if { $job_id == 0 } { - send_user "\nFAILURE: salloc --allocate failure\n" + send_user "\nFAILURE: salloc failure\n" set exit_code 1 } else { if { $job_id != $slurm_jobid } { diff --git a/testsuite/expect/test16.1 b/testsuite/expect/test16.1 index dba33284a..ad2a8d1eb 100755 --- a/testsuite/expect/test16.1 +++ b/testsuite/expect/test16.1 @@ -10,7 +10,7 @@ # Copyright (C) 2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test16.2 b/testsuite/expect/test16.2 index 22f2445c4..bcc989381 100755 --- a/testsuite/expect/test16.2 +++ b/testsuite/expect/test16.2 @@ -10,7 +10,7 @@ # Copyright (C) 2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test16.3 b/testsuite/expect/test16.3 index a3a2693fd..91dc0efaa 100755 --- a/testsuite/expect/test16.3 +++ b/testsuite/expect/test16.3 @@ -11,7 +11,7 @@ # Copyright (C) 2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test16.4 b/testsuite/expect/test16.4 index 5be31b491..eede3faef 100755 --- a/testsuite/expect/test16.4 +++ b/testsuite/expect/test16.4 @@ -11,7 +11,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -56,7 +56,7 @@ exec $bin_chmod 700 $file_prog # Spawn initial program via srun # set timeout $max_job_delay -set salloc_pid [spawn $salloc -N1-4 -t2 $slaunch -n4 --overcommit $file_prog] +set salloc_pid [spawn $salloc -N1-4 -t2 $srun -n4 --overcommit $file_prog] set init_id $spawn_id expect { -i $init_id diff --git a/testsuite/expect/test16.4.prog.c b/testsuite/expect/test16.4.prog.c index ca25b635f..323926345 100644 --- a/testsuite/expect/test16.4.prog.c +++ b/testsuite/expect/test16.4.prog.c @@ -5,7 +5,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test17.1 b/testsuite/expect/test17.1 index f16bedf0f..f9c4dde8d 100755 --- a/testsuite/expect/test17.1 +++ b/testsuite/expect/test17.1 @@ -10,7 +10,7 @@ # Copyright (C) 2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test17.10 b/testsuite/expect/test17.10 index c8d0c72c6..e50a538e2 100755 --- a/testsuite/expect/test17.10 +++ b/testsuite/expect/test17.10 @@ -13,7 +13,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test17.11 b/testsuite/expect/test17.11 index 61f8b5597..2978fbb93 100755 --- a/testsuite/expect/test17.11 +++ b/testsuite/expect/test17.11 @@ -11,7 +11,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test17.12 b/testsuite/expect/test17.12 index ca4f20a2c..da5499ef6 100755 --- a/testsuite/expect/test17.12 +++ b/testsuite/expect/test17.12 @@ -11,7 +11,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test17.13 b/testsuite/expect/test17.13 index ae34b3346..30f7b2952 100755 --- a/testsuite/expect/test17.13 +++ b/testsuite/expect/test17.13 @@ -10,7 +10,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test17.14 b/testsuite/expect/test17.14 index d7c1b581c..c13b6ce7d 100755 --- a/testsuite/expect/test17.14 +++ b/testsuite/expect/test17.14 @@ -10,7 +10,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test17.15 b/testsuite/expect/test17.15 index 963088281..2bfa3b89a 100755 --- a/testsuite/expect/test17.15 +++ b/testsuite/expect/test17.15 @@ -13,7 +13,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -44,7 +44,7 @@ set job_id 0 set limit_core 943 set limit_fsize 274515 set limit_nofile 1016 -set limit_nproc 123 +set limit_nproc 1230 set limit_stack 2021 set matches 0 @@ -117,11 +117,11 @@ if {$cur_stack != -1} { # Spawn a job via sbatch to print environment variables and user limits # make_bash_script $file_in " - ulimit -c $limit_core - ulimit -f $limit_fsize - ulimit -n $limit_nofile - ulimit -u $limit_nproc - ulimit -s $limit_stack + ulimit -S -c $limit_core + ulimit -S -f $limit_fsize + ulimit -S -n $limit_nofile + ulimit -S -u $limit_nproc + ulimit -S -s $limit_stack ./$file_prog_get " diff --git a/testsuite/expect/test17.15.prog.c b/testsuite/expect/test17.15.prog.c index f49966329..520d92799 100644 --- a/testsuite/expect/test17.15.prog.c +++ b/testsuite/expect/test17.15.prog.c @@ -6,7 +6,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test17.16 b/testsuite/expect/test17.16 index 6a2345802..63d9498a8 100755 --- a/testsuite/expect/test17.16 +++ b/testsuite/expect/test17.16 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test17.17 b/testsuite/expect/test17.17 index 5f99e433d..105e0b642 100755 --- a/testsuite/expect/test17.17 +++ b/testsuite/expect/test17.17 @@ -12,7 +12,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -50,11 +50,11 @@ print_header $test_id set timeout $max_job_delay set srun_pid [spawn $srun -v -N1 -l -t1 $bin_hostname] expect { - -re "on host ($alpha_numeric)," { + -re "on host ($alpha_numeric_under)," { set nodelist_name $expect_out(1,string) exp_continue } - -re "0: ($alpha_numeric)" { + -re "0: ($alpha_numeric_under)" { set host_name $expect_out(1,string) exp_continue } @@ -113,7 +113,7 @@ set waited 1 set timeout [expr $timeout + 5] set srun_pid [spawn $srun -N1 --nodelist=$nodelist_name -t1 --share $scontrol -o show job $job_id1] expect { - -re "Partition=($alpha_numeric)" { + -re "Partition=($alpha_numeric_under)" { set partition $expect_out(1,string) exp_continue } @@ -124,6 +124,7 @@ expect { timeout { send_user "\nFAILURE: srun not responding\n" slow_kill $srun_pid + cancel_job $job_id1 exit 1 } eof { @@ -148,6 +149,7 @@ if {$waited == 0} { set exit_code 1 } +cancel_job $job_id1 if {$exit_code == 0} { exec $bin_rm -f $file_err $file_in $file_out send_user "\nSUCCESS\n" diff --git a/testsuite/expect/test17.18 b/testsuite/expect/test17.18 index 3a23344af..f6c03097b 100755 --- a/testsuite/expect/test17.18 +++ b/testsuite/expect/test17.18 @@ -8,10 +8,10 @@ # "FAILURE: ..." otherwise with an explanation of the failure, OR # anything else indicates a failure mode that must be investigated. ############################################################################ -# Copyright (C) 2004-2006 The Regents of the University of California. +# Copyright (C) 2004-2007 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -82,7 +82,7 @@ make_bash_script $file_in "$scontrol show job $job_id1" set match_acct 0 set match_state 0 set timeout 30 -spawn $sbatch --dependency=$job_id1 --output=$file_out $file_in +spawn $sbatch --dependency=afterany:$job_id1 --output=$file_out $file_in expect { -re "Submitted batch job ($number)" { set job_id2 $expect_out(1,string) @@ -101,8 +101,11 @@ if {$job_id2 == 0} { # if {[wait_for_job $job_id2 "DONE"] != 0} { send_user "\nFAILURE: waiting for job to complete\n" + cancel_job $job_id2 + cancel_job $job_id1 exit 1 } +cancel_job $job_id1 # # Inspect the job's output file @@ -113,7 +116,8 @@ if {[wait_for_file $file_out] != 0} { spawn $bin_cat $file_out expect { - -re "JobState=COMPLETED" { +# Could be COMPLETED or COMPLETING + -re "JobState=COMPLET" { set match_state 1 exp_continue } @@ -142,7 +146,7 @@ set match_acct 0 set match_jobid 0 spawn $scontrol show job $job_id2 expect { - -re "Dependency=($number)" { + -re "Dependency=afterany:($number)" { set match_jobid $expect_out(1,string) exp_continue } @@ -185,6 +189,7 @@ if {$job_id1 == 0} { send_user "\nFAILURE: batch submit failure\n" exit 1 } + exec $bin_sleep 5 set match 0 spawn $scontrol show job $job_id1 @@ -214,6 +219,7 @@ if {$match != 2} { send_user "\nFAILURE: unexpected JobState or StartTime\n" set exit_code 1 } + # Reset start time and test for completion spawn $scontrol update JobId=$job_id1 StartTime=now expect { diff --git a/testsuite/expect/test17.19 b/testsuite/expect/test17.19 index 5edc0b5b9..1be95bb18 100755 --- a/testsuite/expect/test17.19 +++ b/testsuite/expect/test17.19 @@ -11,7 +11,7 @@ # Copyright (C) 2005-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -47,7 +47,7 @@ exec $bin_rm -f $file_in make_bash_script $file_in "$bin_hostname" # -# Spawn a srun batch job that uses stdout/err and confirm their contents +# Spawn a batch job that uses stdout/err and confirm their contents # set timeout $max_job_delay if { [test_bluegene] } { @@ -60,9 +60,9 @@ if { [test_bluegene] } { } } -set srun_pid [spawn $srun -N$node_cnt -A -v -t1] +set salloc_pid [spawn $salloc -N$node_cnt -v -t1 $bin_bash] expect { - -re "jobid ($number):" { + -re "Granted job allocation ($number)" { set job_id_1 $expect_out(1,string) send "$sbatch --jobid=$job_id_1 -o none -e none $file_in \n" exp_continue @@ -79,7 +79,7 @@ expect { } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $salloc_pid set exit_code 1 } eof { diff --git a/testsuite/expect/test17.2 b/testsuite/expect/test17.2 index 88979f6e4..c10ca2aad 100755 --- a/testsuite/expect/test17.2 +++ b/testsuite/expect/test17.2 @@ -10,7 +10,7 @@ # Copyright (C) 2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test17.20 b/testsuite/expect/test17.20 index 934e92836..f95f02a23 100755 --- a/testsuite/expect/test17.20 +++ b/testsuite/expect/test17.20 @@ -10,7 +10,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -96,8 +96,9 @@ if {$job_id == 0} { # Wait for job to complete # if {[wait_for_job $job_id "DONE"] != 0} { - send_user "\nFAILURE: waiting for job to complete\n" - set exit_code 1 + send_user "\nFAILURE: waiting for job to complete\n" + cancel_job $job_id + exit 1 } if {[wait_for_file $file_out] != 0} { diff --git a/testsuite/expect/test17.21 b/testsuite/expect/test17.21 index e4a7a005f..099e10fa2 100755 --- a/testsuite/expect/test17.21 +++ b/testsuite/expect/test17.21 @@ -10,7 +10,7 @@ # Copyright (C) 2005-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Danny Auble <da@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test17.22 b/testsuite/expect/test17.22 index 6c50acfe5..b97ef0ce0 100755 --- a/testsuite/expect/test17.22 +++ b/testsuite/expect/test17.22 @@ -10,7 +10,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test17.23 b/testsuite/expect/test17.23 index 6db886574..9eb4646f8 100755 --- a/testsuite/expect/test17.23 +++ b/testsuite/expect/test17.23 @@ -10,7 +10,7 @@ # Copyright (C) 2005-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test17.24 b/testsuite/expect/test17.24 index baad68234..7a1a57d3d 100755 --- a/testsuite/expect/test17.24 +++ b/testsuite/expect/test17.24 @@ -10,7 +10,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test17.25 b/testsuite/expect/test17.25 index cc81950cd..f09f79ca7 100755 --- a/testsuite/expect/test17.25 +++ b/testsuite/expect/test17.25 @@ -11,7 +11,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -56,7 +56,7 @@ make_bash_script $file_in " " # -# Spawn a job via slaunch using these environment variables +# Spawn a job via sbatch using these environment variables # set matches 0 spawn $sbatch -N1 --output=none $file_in @@ -99,4 +99,5 @@ if {$exit_code == 0} { exec $bin_rm -f $file_in send_user "\nSUCCESS\n" } +cancel_job $job_id exit $exit_code diff --git a/testsuite/expect/test17.26 b/testsuite/expect/test17.26 index 5898ee19f..99fb08cbb 100755 --- a/testsuite/expect/test17.26 +++ b/testsuite/expect/test17.26 @@ -10,7 +10,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -51,7 +51,7 @@ close $fd make_bash_script $file_script "$bin_cat" # -# Spawn a job via slaunch using these environment variables +# Spawn a job via sbatch using these environment variables # set matches 0 spawn $sbatch -N1 --input=$file_in --output=$file_out $file_script diff --git a/testsuite/expect/test17.27 b/testsuite/expect/test17.27 index f936989e2..97b7e48bc 100755 --- a/testsuite/expect/test17.27 +++ b/testsuite/expect/test17.27 @@ -12,7 +12,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -69,7 +69,7 @@ if {$available < $node_cnt} { # # Build input script file # -make_bash_script $file_in "$slaunch -l $bin_printenv SLURMD_NODENAME" +make_bash_script $file_in "$srun -l $bin_printenv SLURMD_NODENAME" # # Run job to determine what nodes are available @@ -95,9 +95,14 @@ expect { wait } } +if {$job_id == 0} { + send_user "\nFAILURE: job not submitted\n" + exit 1 +} if {[wait_for_job $job_id "DONE"] != 0} { send_user "\nFAILURE: job did not complete\n" - set exit_code 1 + cancel_job $job_id + exit 1 } if {[wait_for_file $file_out] != 0} { send_user "\nFAILURE: no output file\n" @@ -105,7 +110,7 @@ if {[wait_for_file $file_out] != 0} { } spawn $bin_cat $file_out expect { - -re "($number): ($alpha_numeric)" { + -re "($number): ($alpha_numeric_under)" { if {$expect_out(1,string) == 0} { set host_0 $expect_out(2,string) } @@ -196,7 +201,7 @@ if {[wait_for_file $file_out] != 0} { } spawn $bin_cat $file_out expect { - -re "($number): ($alpha_numeric)" { + -re "($number): ($alpha_numeric_under)" { if {$expect_out(1,string) == 0} { set host_0 $expect_out(2,string) } @@ -272,7 +277,7 @@ if {[wait_for_file $file_out] != 0} { spawn $bin_cat $file_out expect { - -re "($number): ($alpha_numeric)" { + -re "($number): ($alpha_numeric_under)" { if {$expect_out(1,string) == 0} { set host_0 $expect_out(2,string) } diff --git a/testsuite/expect/test17.28 b/testsuite/expect/test17.28 index 8921309be..ca9c67733 100755 --- a/testsuite/expect/test17.28 +++ b/testsuite/expect/test17.28 @@ -10,7 +10,7 @@ # Copyright (C) 2005-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Danny Auble <da@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -91,6 +91,7 @@ if {$matches != 2} { send_user "\nFAILURE: did not set job name and account from batch script\n" set exit_code 1 } +cancel_job $job_id # # Build input script file @@ -102,7 +103,7 @@ make_bash_script $file_in " #SBATCH -N1000000k $bin_sleep $delay " - +set job_id 0 set matches 0 spawn $sbatch -o $file_out $file_in expect { @@ -111,6 +112,10 @@ expect { incr matches exp_continue } + -re "Submitted batch job ($number)" { + set job_id $expect_out(1,string) + exp_continue + } timeout { send_user "\nFAILURE: sbatch not responding\n" set exit_code 1 @@ -124,12 +129,18 @@ if {$matches != 1} { send_user "\nFAILURE: sbatch didn't read the correct options from batch file\n" set exit_code 1 } +if {$job_id != 0} { + send_user "\nFAILURE: sbatch didn't reject job with invalid size\n" + cancel_job $job_id + set exit_code 1 +} make_bash_script $file_in " #SBATCH -N650000 $bin_sleep $delay " +set job_id 0 spawn $sbatch -N1 -o $file_out $file_in expect { -re "More nodes requested than permitted" { @@ -138,6 +149,10 @@ expect { set exit_code 1 exp_continue } + -re "Submitted batch job ($number)" { + set job_id $expect_out(1,string) + exp_continue + } timeout { send_user "\nFAILURE: sbatch not responding\n" set exit_code 1 @@ -147,6 +162,11 @@ expect { wait } } +if {$job_id == 0} { + send_user "\nFAILURE: sbatch didn't reject job with invalid size\n" + cancel_job $job_id + set exit_code 1 +} # # Post-processing diff --git a/testsuite/expect/test17.29 b/testsuite/expect/test17.29 index 9d4d783a8..ae7f97446 100755 --- a/testsuite/expect/test17.29 +++ b/testsuite/expect/test17.29 @@ -10,7 +10,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -80,6 +80,7 @@ if {$job_id == 0} { # if {[wait_for_job $job_id "DONE"] != 0} { send_user "\nFAILURE: waiting for job to complete\n" + cancel_job $job_id set exit_code 1 } diff --git a/testsuite/expect/test17.3 b/testsuite/expect/test17.3 index ff88f3c37..9455d10ce 100755 --- a/testsuite/expect/test17.3 +++ b/testsuite/expect/test17.3 @@ -11,7 +11,7 @@ # Copyright (C) 2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test17.31 b/testsuite/expect/test17.31 index 681f2e6b7..4f9daf676 100755 --- a/testsuite/expect/test17.31 +++ b/testsuite/expect/test17.31 @@ -10,7 +10,7 @@ # Copyright (C) 2005-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Danny Auble <da@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -146,12 +146,16 @@ expect { wait } } - - if {$matches != 3} { send_user "\nFAILURE: sbatch didn't read the correct options from batch file\n" set exit_code 1 } +if {$job_id == 0} { + send_user "\nFAILURE: sbatch failed to submit the job\n" + set exit_code 1 +} else { + cancel_job $job_id +} # # Post-processing diff --git a/testsuite/expect/test17.32 b/testsuite/expect/test17.32 index 0a408b019..22e0c1726 100755 --- a/testsuite/expect/test17.32 +++ b/testsuite/expect/test17.32 @@ -10,7 +10,7 @@ # Copyright (C) 2002-2007 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -76,6 +76,7 @@ expect { # if {[wait_for_job $job_id "DONE"] != 0} { send_user "\nFAILURE: waiting for job to complete\n" + cancel_job $job_id set exit_code 1 } set matches 0 diff --git a/testsuite/expect/test18.5 b/testsuite/expect/test17.33 similarity index 54% rename from testsuite/expect/test18.5 rename to testsuite/expect/test17.33 index d1606bf0f..c26cc08f9 100755 --- a/testsuite/expect/test18.5 +++ b/testsuite/expect/test17.33 @@ -1,18 +1,16 @@ #!/usr/bin/expect ############################################################################ # Purpose: Test of SLURM functionality -# Confirm that slauch local stdin, stdout, and stderr options work -# (options --slaunch-input, --slaunch-output and --slaunch-error -# respectively). +# Test of sbatch --open-mode (truncate or append) option. # # Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR # "FAILURE: ..." otherwise with an explanation of the failure, OR # anything else indicates a failure mode that must be investigated. ############################################################################ -# Copyright (C) 2002 The Regents of the University of California. +# Copyright (C) 2002-2007 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -29,18 +27,16 @@ # # You should have received a copy of the GNU General Public License along # with SLURM; if not, write to the Free Software Foundation, Inc., -# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ############################################################################ source ./globals -set test_id "18.5" +set test_id "17.33" set file_in "test$test_id.input" set file_out "test$test_id.output" set file_err "test$test_id.error" - set exit_code 0 set login_grp_info "" -set job_grp_info "" set got_job_grps 0 set got_login_grps 0 set got_sleep_err 0 @@ -52,60 +48,62 @@ print_header $test_id # Build stdin file # exec $bin_rm -f $file_in $file_out $file_err -exec echo "$bin_id" >$file_in -exec echo "$bin_sleep aaa" >>$file_in -exec echo "exit 0" >>$file_in -exec $bin_chmod 700 $file_in +make_bash_script $file_in " + $bin_id + $bin_sleep aaa + exit 0" +exec echo "$bin_echo INITIAL_VALUE" >$file_err +exec echo "$bin_echo INITIAL_VALUE" >$file_out # -# Spawn a shell via slaunch that uses stdin/out/err and confirm their contents +# Get user id and group id for comparison with stdout # -set job_id 0 -set timeout $max_job_delay -set salloc_pid [spawn $salloc -N1 -t1 $slaunch --slaunch-input=$file_in --slaunch-output=$file_out --slaunch-error=$file_err $bin_bash] +spawn $bin_id expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) + -re "(uid=$number)" { + set login_grp_info $expect_out(1,string) + set got_login_grps 1 exp_continue } - -re "Unable to contact" { - send_user "\nFAILURE: slurm appears to be down\n" - exit 1 - } - timeout { - send_user "\nFAILURE: salloc/slaunch not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - set exit_code 1 - } eof { wait } } # -# Check user id and group id in stdout +# Spawn a shell via srun that uses stdin/out/err in truncate mode +# and confirm their contents # -spawn $bin_id +set job_id 0 +set srun_pid [spawn $sbatch --output=$file_out --error=$file_err --open-mode=t -t1 $file_in] expect { - -re "(uid=.*\n)" { - set login_grp_info $expect_out(1,string) - set got_login_grps 1 + -re "Submitted batch job ($number)" { + set job_id $expect_out(1,string) exp_continue } eof { wait } } +if {$job_id == 0} { + send_user "\nFAILURE: sbatch failed to submit job\n" + exit 1 +} +if {[wait_for_job $job_id "DONE"] != 0} { + send_user "\nFAILURE: waiting for job to complete\n" + cancel_job $job_id + exit 1 +} if {[wait_for_file $file_out] == 0} { spawn $bin_cat $file_out expect { - -re "(uid=.*\n)" { - set job_grp_info $expect_out(1,string) - set got_job_grps 1 + -re "INITIAL_VALUE" { + send_user "\nFAILURE: stdout file not truncated\n" + set exit_code 1 + } + -re "$login_grp_info" { + incr got_job_grps exp_continue } eof { @@ -118,14 +116,10 @@ if {$got_login_grps == 0} { send_user "\nFAILURE: Unable to get user and group ID info\n" set exit_code 1 } -if {$got_job_grps == 0} { +if {$got_job_grps != 1} { send_user "\nFAILURE: User and group ID info missing from stdout\n" set exit_code 1 } -if {[string compare $login_grp_info $job_grp_info] != 0} { - send_user "\nFAILURE: Login and slurm user info mismatch\n" - set exit_code 1 -} # # Check for sleep input specification error in stderr @@ -133,13 +127,17 @@ if {[string compare $login_grp_info $job_grp_info] != 0} { if {[wait_for_file $file_err] == 0} { spawn $bin_cat $file_err expect { + -re "INITIAL_VALUE" { + send_user "\nFAILURE: stderr file not truncated\n" + set exit_code 1 + } -re "$sleep_error_message" { send_user "\nNo worries, this error is expected\n" - set got_sleep_err 1 + incr got_sleep_err exp_continue } -re "Specify time as a positive integer.*\n" { - set got_sleep_err 1 + incr got_sleep_err exp_continue } eof { @@ -147,47 +145,85 @@ if {[wait_for_file $file_err] == 0} { } } } -if {$got_sleep_err == 0} { +if {$got_sleep_err != 1} { send_user "\nFAILURE: Unexpected stderr contents\n" set exit_code 1 } +if {$exit_code != 0} { + exit $exit_code +} + + # -# Spawn a program to run for a while with no input, output or error +# Spawn a shell via srun that uses stdin/out/err in append mode +# and confirm their contents # set job_id 0 -set salloc_pid [spawn $salloc -N1 -t1 $slaunch --slaunch-input=none --slaunch-output=none --slaunch-error=none $bin_od -c $srun] +set srun_pid [spawn $sbatch --output=$file_out --error=$file_err --open-mode=a -t1 $file_in] expect { - -re "Granted job allocation ($number)" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } - -re "Unable to contact" { - send_user "\nFAILURE: slurm appears to be down\n" - exit 1 - } - -re "Terminated" { - send_user "\nFAILURE: srun failed to complete\n" - set exit_code 1 - exp_continue - } - -re "job exceeded timelimit" { - send_user "\nFAILURE: srun failed to complete\n" - set exit_code 1 - exp_continue + eof { + wait } - timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id +} +if {$job_id == 0} { + send_user "\nFAILURE: sbatch failed to submit job\n" + exit 1 +} +if {[wait_for_job $job_id "DONE"] != 0} { + send_user "\nFAILURE: waiting for job to complete\n" + cancel_job $job_id + exit 1 +} + +set got_job_grps 0 +if {[wait_for_file $file_out] == 0} { + spawn $bin_cat $file_out + expect { + -re "$login_grp_info" { + incr got_job_grps + exp_continue + } + eof { + wait } - slow_kill [expr 0 - $salloc_pid] - set exit_code 1 } - eof { - wait +} +if {$got_job_grps != 2} { + send_user "\nFAILURE: User and group ID info missing from stdout\n" + set exit_code 1 +} + +# +# Check for sleep input specification error in stderr +# +set got_sleep_err 0 +if {[wait_for_file $file_err] == 0} { + spawn $bin_cat $file_err + expect { + -re "$sleep_error_message" { + send_user "\nNo worries, this error is expected\n" + incr got_sleep_err + exp_continue + } + -re "Specify time as a positive integer.*\n" { + incr got_sleep_err + exp_continue + } + eof { + wait + } } } +if {$got_sleep_err != 2} { + send_user "\nFAILURE: Unexpected stderr contents\n" + set exit_code 1 +} + if {$exit_code == 0} { exec $bin_rm -f $file_in $file_out $file_err diff --git a/testsuite/expect/test17.4 b/testsuite/expect/test17.4 index 77006a48a..1c65f51f4 100755 --- a/testsuite/expect/test17.4 +++ b/testsuite/expect/test17.4 @@ -11,7 +11,7 @@ # Copyright (C) 2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test17.5 b/testsuite/expect/test17.5 index 5dce3b47d..e8a72451a 100755 --- a/testsuite/expect/test17.5 +++ b/testsuite/expect/test17.5 @@ -11,7 +11,7 @@ # Copyright (C) 2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -34,6 +34,7 @@ source ./globals set test_id "17.5" set file_in "test$test_id.input" +set file_script "test$test_id.bash" set file_out "test$test_id.output" set file_err "test$test_id.error" set file_out_j "test$test_id.j.%j.output" @@ -46,13 +47,66 @@ print_header $test_id # Delete left-over stdin/out/err files # Build stdin file # -exec $bin_rm -f $file_in $file_out $file_err +exec $bin_rm -f $file_in $file_script $file_out $file_err make_bash_script $file_in " $bin_id $bin_sleep aaa exit 0 " +set cwd "[$bin_pwd]" +make_bash_script $file_script " + cd /tmp + $sbatch -N1 --workdir=$cwd --output=$file_out --error=$file_err -t1 $cwd/$file_in + exit 0 +" + +# +# Spawn a shell via sbatch that submits from a different directory and uses stdout/err +# and confirm their contents +# +set job_id 0 +spawn $file_script +expect { + -re "Submitted batch job ($number)" { + set job_id $expect_out(1,string) + exp_continue + } + timeout { + send_user "\nFAILURE: sbatch not responding\n" + set exit_code 1 + exp_continue + } + eof { + wait + } +} +if { $job_id == 0 } { + send_user "\nFAILURE: failed to submit job\n" + exit 1 +} + +# +# Wait for job to complete +# +if {[wait_for_job $job_id "DONE"] != 0} { + send_user "\nFAILURE: waiting for job to complete\n" + set exit_code 1 +} + +# +# Check for desired output +# +if {[wait_for_file $file_out] != 0} { + send_user "\nFAILURE: Missing stdout\n" + set exit_code 1 +} +if {[wait_for_file $file_err] != 0} { + send_user "\nFAILURE: Missing stderr\n" + set exit_code 1 +} +exec $bin_rm -f $file_script $file_out $file_err + # # Spawn a shell via sbatch that uses stdout/err and confirm their contents # @@ -227,7 +281,7 @@ if { [file exists $file_err2] } { } if {$exit_code == 0} { - exec $bin_rm -f $file_in $file_out $file_err + exec $bin_rm -f $file_in $file_script $file_out $file_err send_user "\nSUCCESS\n" } exit $exit_code diff --git a/testsuite/expect/test17.6 b/testsuite/expect/test17.6 index ef1df747e..29b9f0b13 100755 --- a/testsuite/expect/test17.6 +++ b/testsuite/expect/test17.6 @@ -1,7 +1,7 @@ #!/usr/bin/expect ############################################################################ # Purpose: Test of SLURM functionality -# Confirm that a job executes with the proper task count (--tasks +# Confirm that a job executes with the proper task count (--ntasks # option). # # Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR @@ -11,7 +11,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -45,7 +45,7 @@ print_header $test_id # Submit a slurm job that will execute 'id' on $task_cnt tasks (or try anyway) # file delete $file_in $file_out -make_bash_script $file_in "$slaunch $bin_id" +make_bash_script $file_in "$srun $bin_id" set job_id 0 set no_run 0 spawn $sbatch --ntasks=$task_cnt --output=$file_out -t1 $file_in diff --git a/testsuite/expect/test17.7 b/testsuite/expect/test17.7 index f2a66d8eb..1e68235fc 100755 --- a/testsuite/expect/test17.7 +++ b/testsuite/expect/test17.7 @@ -11,7 +11,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -33,9 +33,11 @@ source ./globals set test_id "17.7" +set cwd "[$bin_pwd]" set exit_code 0 +set file_err "test$test_id.error" set file_in "test$test_id.input" -set file_out "test$test_id.output" +set file_out "$cwd/test$test_id.output" set tmp_dir "/tmp" print_header $test_id @@ -44,16 +46,17 @@ print_header $test_id # Delete left-over stdin/out/err files # Build stdin file # -exec $bin_rm -f $file_in $file_out +exec $bin_rm -f $file_in $file_out $file_err make_bash_script $file_in " $bin_pwd + $bin_cat /no/such/file " # # Submit a slurm job that will execute 'pwd' # set job_id 0 -spawn $sbatch -N1 --output=$file_out --workdir=$tmp_dir -t1 $file_in +spawn $sbatch -N1 --error=$file_err --output=$file_out --workdir=$tmp_dir -t1 $file_in expect { -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) @@ -89,6 +92,10 @@ if {[wait_for_file $file_out] == 0} { set matches 1 exp_continue } + -re "No such" { + send_user "\nFAILURE: sbatch stderr in stdout file\n" + set exit_code 1 + } eof { wait } @@ -99,8 +106,13 @@ if {$matches != 1} { set exit_code 1 } +if [file exists $file_err] { + send_user "\nFAILURE: sbatch failed to create error file in working directory\n" + set exit_code 1 +} + if {$exit_code == 0} { - exec $bin_rm -f $file_in $file_out + exec $bin_rm -f $file_in $file_out $file_err send_user "\nSUCCESS\n" } exit $exit_code diff --git a/testsuite/expect/test17.8 b/testsuite/expect/test17.8 index 10349c324..d17db4023 100755 --- a/testsuite/expect/test17.8 +++ b/testsuite/expect/test17.8 @@ -11,7 +11,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -77,8 +77,8 @@ if { $job_id == 0 } { spawn $scontrol show job $job_id expect { - -re "TimeLimit=($number)" { - set time_get $expect_out(1,string) + -re "TimeLimit=($number):($number):" { + set time_get [expr $expect_out(1,string) * 60 + $expect_out(2,string)] exp_continue } eof { @@ -121,8 +121,8 @@ if { $job_id == 0 } { spawn $scontrol show job $job_id expect { - -re "TimeLimit=($number)" { - set time_get $expect_out(1,string) + -re "TimeLimit=($number):($number):" { + set time_get [expr $expect_out(1,string) * 60 + $expect_out(2,string)] exp_continue } eof { @@ -165,8 +165,8 @@ if { $job_id == 0 } { spawn $scontrol show job $job_id expect { - -re "TimeLimit=($number)" { - set time_get $expect_out(1,string) + -re "TimeLimit=($number)-($number):($number):" { + set time_get [expr $expect_out(1,string) * 1440 + $expect_out(2,string) * 60 + $expect_out(3,string)] exp_continue } eof { diff --git a/testsuite/expect/test17.9 b/testsuite/expect/test17.9 index 54f779ac4..f3aa6c12f 100755 --- a/testsuite/expect/test17.9 +++ b/testsuite/expect/test17.9 @@ -10,7 +10,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test18.10 b/testsuite/expect/test18.10 deleted file mode 100755 index 5889f4862..000000000 --- a/testsuite/expect/test18.10 +++ /dev/null @@ -1,116 +0,0 @@ -#!/usr/bin/expect -############################################################################ -# Purpose: Test of SLURM functionality -# Test slaunch task stdout/err disabling (--task-output and -# --task-error options). -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -############################################################################ -# Copyright (C) 2002-2006 The Regents of the University of California. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. -# -# This file is part of SLURM, a resource management program. -# For details, see <http://www.llnl.gov/linux/slurm/>. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -############################################################################ -source ./globals - -set test_id "18.10" -set exit_code 0 -set job_id 0 - -print_header $test_id - -# -# Spawn a program via slaunch with stdout forwarding disabled -# -set timeout $max_job_delay -set salloc_pid [spawn $salloc -t1 $slaunch --task-output=none $bin_id] -expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) - exp_continue - } - -re "uid=" { - send_user "\nFAILURE: slaunch improperly forwarded stdout\n" - set exit_code 1 - exp_continue - } - -re "groups=" { - send_user "\nFAILURE: slaunch improperly forwarded stdout\n" - set exit_code 1 - exp_continue - } - timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - set exit_code 1 - } - eof { - wait - } -} - -# -# Spawn a program via slaunch with stderr forwarding disabled -# -set job_id 0 -set matches 0 -set salloc_pid [spawn $salloc -t1 -v $slaunch --task-error=/dev/null $bin_sleep aaa] -expect { - -re "Pending job allocation ($number)" { - set job_id $expect_out(1,string) - exp_continue - } - -re "invalid" { - send_user "\nFAILURE: slaunch improperly forwarded stderr\n" - set exit_code 1 - exp_continue - } - -re "exit code $number" { - send_user "This error is expected, no worries\n" - set matches 1 - exp_continue - } - timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - set exit_code 1 - } - eof { - wait - } -} - -if {$matches != 1} { - send_user "\nFAILURE: slaunch exit code does not indicate error\n" - set exit_code 1 -} - -if {$exit_code == 0} { - send_user "\nSUCCESS\n" -} -exit $exit_code diff --git a/testsuite/expect/test18.11 b/testsuite/expect/test18.11 deleted file mode 100755 index 7bb996177..000000000 --- a/testsuite/expect/test18.11 +++ /dev/null @@ -1,304 +0,0 @@ -#!/usr/bin/expect -############################################################################ -# Purpose: Test of SLURM functionality -# Test slaunch stdout/err file name formatting (--task-output and -# --task-error options with %j, %J, %n, %s and %t specifications). -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -############################################################################ -# Copyright (C) 2002-2006 The Regents of the University of California. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. -# -# This file is part of SLURM, a resource management program. -# For details, see <http://www.llnl.gov/linux/slurm/>. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -############################################################################ -source ./globals - -set test_id "18.11" -set exit_code 0 -set file_err_j "test$test_id.j.%j.error" -set file_in "test$test_id.input" -set file_out_J "test$test_id.J.%J.output" -set file_out_n "test$test_id.n.%n.output" -set file_out_s "test$test_id.s.%s.output" -set file_out_t "test$test_id.t.%t.output" -set job_id 0 -set task_cnt 5 - -print_header $test_id - -# -# Spawn a program that includes "task_id" (%t) in stdout file names -# and confirm they are created -# -file delete [glob -nocomplain "test$test_id.s.*.output"] -set timeout $max_job_delay -set salloc_pid [spawn $salloc -N1 -t1 $slaunch --task-output=$file_out_t -n$task_cnt --overcommit $bin_id] -expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) - exp_continue - } - timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - set exit_code 1 - } - eof { - wait - } -} -if {$job_id == 0} { - send_user "\nFAILURE: job initiation failed\n" - exit 1 -} - -set file_cnt 0 -for {set task_id 0} {$task_id < $task_cnt} {incr task_id} { - set file_out_t_glob "test$test_id.t.$task_id.output" - if {[wait_for_file $file_out_t_glob] != 0} { - set exit_code 1 - } else { - file delete $file_out_t_glob - incr file_cnt - } -} -if {$file_cnt != $task_cnt} { - send_user "\nFAILURE: file format of %t in stdout failed\n" - set exit_code 1 -} - -# -# Spawn a program that includes "jobid" (%j) in stderr file name -# and confirm it is created -# -set job_id 0 -set matches 0 -set salloc_pid [spawn $salloc -N1 -t1 $slaunch --task-output=/dev/null --task-error=$file_err_j -n$task_cnt --overcommit $bin_sleep aaa] -expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) - exp_continue - } - -re "exit code $number" { - send_user "This error is expected, no worries\n" - set matches 1 - exp_continue; - } - timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - exit 1 - } - eof { - wait - } -} -if {$job_id == 0} { - send_user "\nFAILURE: job initiation failed\n" - exit 1 -} -if {$matches == 0} { - send_user "\nFAILURE: exit code failed to indicate an error\n" - set exit_code 1 -} - -set file_err_j_glob "test$test_id.j.$job_id.error" -if {[wait_for_file $file_err_j_glob] == 0} { - file delete $file_err_j_glob -} else { - send_user "\nFAILURE: file format of %j in stderr failed\n" - set exit_code 1 -} - -# -# Spawn a program that includes "job_id.step_id" (%J) in stdout -# file name and confirm it is created -# -set job_id 0 -set salloc_pid [spawn $salloc -N1 -t1 $slaunch --task-output=$file_out_J $bin_hostname] -expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) - exp_continue - } - timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - exit 1 - } - eof { - wait - } -} -if {$job_id == 0} { - send_user "\nFAILURE: job initiation failed\n" - exit 1 -} -set task_id 0 -set file_out_J_glob "test$test_id.J.$job_id.$task_id.output" -if {[wait_for_file $file_out_J_glob] != 0} { - send_user "\nFAILURE: file format of %J in stdout failed\n" - set exit_code 1 -} else { - file delete $file_out_J_glob -} - -# -# Spawn a program that includes "node_id" (%n) in stdout -# file name and confirm it is created -# -set node_id 0 -set file_out_n_glob "test$test_id.n.$node_id.output" -file delete $file_out_n_glob - -set job_id 0 -set salloc_pid [spawn $salloc -N1 -t1 $slaunch --task-output=$file_out_n -n2 --overcommit $bin_hostname] -expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) - exp_continue - } - timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - exit 1 - } - eof { - wait - } -} -if {$job_id == 0} { - send_user "\nFAILURE: job initiation failed\n" - exit 1 -} -set file_cnt 0 -for {set node_id 0} {$node_id < 2} {incr node_id} { - set file_out_n_glob "test$test_id.n.$node_id.output" - - if {($node_id == 0) && ([wait_for_file $file_out_n_glob] != 0)} { - send_user "\nFAILURE: Missing file $file_out_n_glob\n" - set exit_code 1 - } else { - exec $bin_sleep 1 - } - if [file exists $file_out_n_glob] { - file delete $file_out_n_glob - incr file_cnt - } -} - -if {$file_cnt != 1} { - send_user "\nFAILURE: file format of %n in stdout failed\n" - set exit_code 1 -} - -# -# Spawn a program that includes "step_id" (%s) in stdout -# file name and confirm it is created. Use two step batch job. -# -make_bash_script $file_in " - $slaunch -n4 --overcommit --task-output=$file_out_s $bin_hostname - $slaunch -n4 --overcommit --task-output=$file_out_s $bin_hostname -" - -file delete [glob -nocomplain "test$test_id.s.\[0-4].output"] - -if { [test_bluegene] } { - set node_cnt 32-2048 - set task_cnt 32 -} else { - if { [test_xcpu] } { - set node_cnt 1-1 - } else { - set node_cnt 1-4 - } - set task_cnt 4 -} - -set job_id 0 -set sbatch_pid [spawn $sbatch --output=/dev/null -N$node_cnt -t1 $file_in] -expect { - -re "Submitted batch job ($number)" { - set job_id $expect_out(1,string) - exp_continue - } - timeout { - send_user "\nFAILURE: sbatch not responding\n" - slow_kill $sbatch_pid - exit 1 - } - eof { - wait - } -} -if {$job_id == 0} { - send_user "\nFAILURE: job initiation failed\n" - exit 1 -} else { - if {[wait_for_job $job_id DONE] != 0} { - send_user "\nFAILURE: error completing job $job_id\n" - cancel_job $job_id - set exit_code 1 - } -} - -set file_cnt 0 -for {set step_id 0} {$step_id < 3} {incr step_id} { - set file_out_s_glob "test$test_id.s.$step_id.output" - - if {($step_id < 2) && ([wait_for_file $file_out_s_glob] != 0)} { - send_user "\nFAILURE: Missing file $file_out_s_glob\n" - set exit_code 1 - } else { - exec $bin_sleep 1 - } - if [file exists $file_out_s_glob] { - file delete $file_out_s_glob - incr file_cnt - } -} - -if {$file_cnt != 2} { - send_user "\nFAILURE: file format of %s in stdout failed($file_cnt)\n" - set exit_code 1 -} - -# -# Post-processing -# -if {$exit_code == 0} { - file delete $file_in - send_user "\nSUCCESS\n" -} -exit $exit_code diff --git a/testsuite/expect/test18.12 b/testsuite/expect/test18.12 deleted file mode 100755 index 48cecadf0..000000000 --- a/testsuite/expect/test18.12 +++ /dev/null @@ -1,90 +0,0 @@ -#!/usr/bin/expect -############################################################################ -# Purpose: Test of SLURM functionality -# Test slaunch stdin routing to specific task (--slaunch-input-filter -# option with numeric argument). -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -############################################################################ -# Copyright (C) 2002-2006 The Regents of the University of California. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. -# -# This file is part of SLURM, a resource management program. -# For details, see <http://www.llnl.gov/linux/slurm/>. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -############################################################################ -source ./globals - -set test_id "18.12" -set exit_code 0 -set job_id 0 -set matches 0 -set task_id 3 - -print_header $test_id - -# -# Spawn a shell via slaunch -# -set timeout $max_job_delay -set salloc_pid [spawn $salloc -v -N1 -t1 $slaunch -n10 --overcommit --wait=2 --slaunch-input-filter=3 $bin_bash] -expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) - send "$bin_env\n" - exp_continue - } - -re "SLURM_PROCID=($number)" { - set proc_id $expect_out(1,string) - if {$task_id == $proc_id} { - incr matches - } else { - send_user "\nFAILURE: wrong task_id responded\n" - set exit_code 1 - } - send "exit\n" - exp_continue - } - -re "error: First task exited" { - send_user "This error is expected, no worries\n" - exp_continue - } - timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - set exit_code 1 - } - eof { - wait - } -} - -if {$matches != 1} { - send_user "\nFAILURE: specific task_id failed to respond\n" - set exit_code 1 -} - -if {$exit_code == 0} { - send_user "\nSUCCESS\n" -} -exit $exit_code diff --git a/testsuite/expect/test18.14 b/testsuite/expect/test18.14 deleted file mode 100755 index cf2bbceac..000000000 --- a/testsuite/expect/test18.14 +++ /dev/null @@ -1,120 +0,0 @@ -#!/usr/bin/expect -############################################################################ -# Purpose: Test of SLURM functionality -# Verify the appropriate job environment variables are set -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -############################################################################ -# Copyright (C) 2002-2006 The Regents of the University of California. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. -# -# This file is part of SLURM, a resource management program. -# For details, see <http://www.llnl.gov/linux/slurm/>. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -############################################################################ -source ./globals - -set test_id "18.14" -set exit_code 0 -set job_id 0 -set matches 0 -set timeout $max_job_delay - -print_header $test_id - -# These are the variables for which we are checking existence. -# If the number following a variable name is 1, then we check to make -# certain that the value of the env variable is greater than 0 -array set good_vars { - SLURM_JOB_ID 1 - SLURM_JOB_NUM_NODES 1 - SLURM_JOB_NODELIST 0 - SLURM_JOB_CPUS_PER_NODE 1 - - SLURM_CPUS_ON_NODE 1 - SLURM_CPUS_PER_TASK 1 - SLURM_JOBID 1 - SLURM_LAUNCH_NODE_IPADDR 0 - SLURM_LOCALID 0 - SLURM_NNODES 0 - SLURM_NODEID 0 - SLURM_NODELIST 0 - SLURM_NPROCS 1 - SLURM_PROCID 0 - SLURM_SRUN_COMM_HOST 0 - SLURM_SRUN_COMM_PORT 1 - SLURM_STEPID 0 - SLURM_TASKS_PER_NODE 1 - SLURM_TASK_PID 1 - SLURM_UMASK 0 -} - -# -# Spawn a job via salloc to print environment variables -# -set salloc_pid [spawn $salloc -N1 -t1 $slaunch $bin_env] -expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) - exp_continue - } - -re "(SLURM_$alpha_under)=($alpha_numeric)" { - set found_vars($expect_out(1,string)) "$expect_out(2,string)" - exp_continue - } - timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - set exit_code 1 - } - eof { - wait - } -} - -set total 0 -set good 0 -send_user "\n" -foreach {slurm_var check_flag} [array get good_vars] { - incr total - if {[info exists found_vars($slurm_var)]} { - if { $check_flag == 1 && $found_vars($slurm_var) <= 0 } { - send_user "FAILURE: Found $slurm_var, " - send_user "but $found_vars($slurm_var) <= 0\n" - } else { - incr good - } - } else { - send_user "FAILURE: Variable $slurm_var not found\n" - } -} - -if {$good < $total} { - send_user "\nFAILURE: Only $good of $total SLURM environment variables set\n" - set exit_code 1 -} - -if {$exit_code == 0} { - send_user "\nSUCCESS\n" -} -exit $exit_code diff --git a/testsuite/expect/test18.15 b/testsuite/expect/test18.15 deleted file mode 100755 index 7d8161cef..000000000 --- a/testsuite/expect/test18.15 +++ /dev/null @@ -1,84 +0,0 @@ -#!/usr/bin/expect -############################################################################ -# Purpose: Test of SLURM functionality -# Verify that user environment variables are propagated to the job -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -############################################################################ -# Copyright (C) 2002-2006 The Regents of the University of California. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. -# -# This file is part of SLURM, a resource management program. -# For details, see <http://www.llnl.gov/linux/slurm/>. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -############################################################################ -source ./globals - -set test_id "18.15" -set test_env_name "TEST_ENV_$test_id" -set test_env_val 123 -set exit_code 0 -set job_id 0 -set matches 0 -set timeout $max_job_delay - -global env -set env($test_env_name) $test_env_val - -print_header $test_id - -# -# Spawn a job via sbatch to print environment variables -# -set env($test_env_name) $test_env_val -set salloc_pid [spawn $salloc -N1 -t1 $slaunch $bin_env] -expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) - exp_continue - } - -re "$test_env_name=($number)" { - if {$expect_out(1,string) == $test_env_val} { - incr matches - } - exp_continue - } - timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - set exit_code 1 - } - eof { - wait - } -} - -if {$matches != 1} { - send_user "\nFAILURE: Environment variables not propagated\n" - set exit_code 1 -} - -if {$exit_code == 0} { - send_user "\nSUCCESS\n" -} -exit $exit_code diff --git a/testsuite/expect/test18.16 b/testsuite/expect/test18.16 deleted file mode 100755 index 160773903..000000000 --- a/testsuite/expect/test18.16 +++ /dev/null @@ -1,194 +0,0 @@ -#!/usr/bin/expect -############################################################################ -# Purpose: Test of SLURM functionality -# Verify that user limits are propagated to the job -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -# -# Note: AIX does not support the NPROC limit, but this test should -# otherwise succeed -############################################################################ -# Copyright (C) 2002-2006 The Regents of the University of California. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. -# -# This file is part of SLURM, a resource management program. -# For details, see <http://www.llnl.gov/linux/slurm/>. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -############################################################################ -source ./globals - -set test_id "18.16" -set exit_code 0 -set job_id 0 -set limit_core 943 -set limit_fsize 274515 -set limit_nofile 1016 -set limit_nproc 345 -set limit_stack 2021 -set matches 0 -set file_in "test$test_id.input" -set file_prog_get "test$test_id.prog" -set timeout $max_job_delay - -print_header $test_id - -# -# Delete left-over programs and rebuild them. -# We use our own program to get ulimit values since the output -# of the ulimit program is inconsistent across systems. -# -exec $bin_rm -f $file_prog_get -exec $bin_make -f /dev/null $file_prog_get - -# -# Get our current limits and adjust targets accordingly -# -set cur_core -1 -set cur_fsize -1 -set cur_nofile -1 -set cur_nproc -1 -set cur_stack -1 -spawn ./$file_prog_get -expect { - -re "USER_CORE=($number)" { - set cur_core $expect_out(1,string) - exp_continue - } - -re "USER_FSIZE=($number)" { - set cur_fsize $expect_out(1,string) - exp_continue - } - -re "USER_NOFILE=($number)" { - set cur_nofile $expect_out(1,string) - exp_continue - } - -re "USER_NPROC=($number)" { - set cur_nproc $expect_out(1,string) - exp_continue - } - -re "USER_STACK=($number)" { - set cur_stack $expect_out(1,string) - exp_continue - } -} -if {$cur_core != -1} { - if {$cur_core == 0} { - set limit_core 0 - } else { - set limit_core [expr ($cur_core / 1024) - 2] - } -} -if {$cur_fsize != -1} { - if {$cur_fsize == 0} { - set limit_fsize 0 - } else { - set limit_fsize [expr ($cur_fsize / 1024) - 2] - } -} -if {$cur_nofile != -1} { - set limit_nofile [expr $cur_nofile - 2] -} -if {$cur_nproc != -1} { - set limit_nproc [expr $cur_nproc - 2] -} -if {$cur_stack != -1} { - set limit_stack [expr ($cur_stack / 1024) - 2] -} - -# -# Spawn a job via salloc/slaunch to print environment variables and -# user limits -# -make_bash_script $file_in " - ulimit -c $limit_core - ulimit -f $limit_fsize - ulimit -n $limit_nofile - ulimit -u $limit_nproc - ulimit -s $limit_stack - ./$file_prog_get -" - -set salloc_pid [spawn $salloc -N1 -t1 $slaunch ./$file_in] -expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) - exp_continue - } - -re "USER_CORE=($number)" { - if {$expect_out(1,string) == [expr $limit_core * 1024]} { - incr matches - } - exp_continue - } - -re "USER_FSIZE=($number)" { - if {$expect_out(1,string) == [expr $limit_fsize * 1024]} { - incr matches - } - exp_continue - } - -re "USER_NOFILE=($number)" { - if {$expect_out(1,string) == $limit_nofile} { - incr matches - } - exp_continue - } - -re "USER_NPROC=($number)" { - if {$expect_out(1,string) == $limit_nproc} { - incr matches - } - exp_continue - } - -re "USER_NPROC unsupported" { - incr matches - exp_continue - } - -re "USER_STACK=($number)" { - if {$expect_out(1,string) == [expr $limit_stack * 1024]} { - incr matches - } - exp_continue - } - -re "USER_STACK unsupported" { - incr matches - exp_continue - } - timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - set exit_code 1 - } - eof { - wait - } -} - -if {$matches != 5} { - send_user "\nFAILURE: User limits not propagated\n" - set exit_code 1 -} - -if {$exit_code == 0} { - exec $bin_rm -f $file_in $file_prog_get - send_user "\nSUCCESS\n" -} -exit $exit_code diff --git a/testsuite/expect/test18.16.prog.c b/testsuite/expect/test18.16.prog.c deleted file mode 100644 index f49966329..000000000 --- a/testsuite/expect/test18.16.prog.c +++ /dev/null @@ -1,59 +0,0 @@ -/*****************************************************************************\ - * proc1.29.proc.c - Simple user limit set program for SLURM regression - * test1.29. Get the core, fsize, nofile, and nproc limits and print their - * values in the same format as SLURM environment variables. - ***************************************************************************** - * Copyright (C) 2002 The Regents of the University of California. - * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. - * - * This file is part of SLURM, a resource management program. - * For details, see <http://www.llnl.gov/linux/slurm/>. - * - * SLURM is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY - * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License along - * with SLURM; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -\*****************************************************************************/ -#include <stdio.h> -#include <stdlib.h> -#include <sys/time.h> -#include <sys/resource.h> -#include <unistd.h> - -main (int argc, char **argv) -{ - struct rlimit u_limit; - int exit_code = 0; - - (void) getrlimit(RLIMIT_CORE, &u_limit); - printf("USER_CORE=%d\n", u_limit.rlim_cur); - (void) getrlimit(RLIMIT_FSIZE, &u_limit); - printf("USER_FSIZE=%d\n", u_limit.rlim_cur); - (void) getrlimit(RLIMIT_NOFILE, &u_limit); - printf("USER_NOFILE=%d\n", u_limit.rlim_cur); -#ifdef RLIMIT_NPROC - (void) getrlimit(RLIMIT_NPROC, &u_limit); - printf("USER_NPROC=%d\n", u_limit.rlim_cur); -#else - printf("USER_NPROC unsupported\n"); -#endif -#ifdef RLIMIT_STACK - (void) getrlimit(RLIMIT_STACK, &u_limit); - printf("USER_STACK=%d\n", u_limit.rlim_cur); -#else - printf("USER_STACK unsupported\n"); -#endif - - exit(exit_code); -} diff --git a/testsuite/expect/test18.17 b/testsuite/expect/test18.17 deleted file mode 100755 index 7ecf97965..000000000 --- a/testsuite/expect/test18.17 +++ /dev/null @@ -1,117 +0,0 @@ -#!/usr/bin/expect -############################################################################ -# Purpose: Test of SLURM functionality -# Test of salloc and slaunch exit code reporting -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -############################################################################ -# Copyright (C) 2002-2006 The Regents of the University of California. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. -# -# This file is part of SLURM, a resource management program. -# For details, see <http://www.llnl.gov/linux/slurm/>. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -############################################################################ -source ./globals - -set test_id "18.17" -set exit_code 0 -set exit_script "./test$test_id.exit.bash" -set test_script "./test$test_id.bash" -print_header $test_id - -set timeout $max_job_delay - -# -# Delete left-over scripts and build new ones -# -make_bash_script $exit_script {exit $((SLURM_PROCID + 10))} - -# -# Check the return code of slaunch. To do so, we spawn -# salloc and slaunch command seperately. -# -set job_id 0 -set matches 0 - -# First start salloc and wait for the allocation -set salloc_pid [spawn $salloc -N1 -t1 --kill-command sleep 300] -set salloc_spawn_id $spawn_id -set spawn_id $salloc_spawn_id -expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) - } - timeout { - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - send_user "\nFAILURE: salloc not responding\n" - exit 1 - } -} - -# Then spawn slaunch using the job allocation from the previous salloc -set slaunch_pid [spawn $slaunch --jobid=$job_id -n2 --overcommit $exit_script] -set slaunch_spawn_id $spawn_id -set spawn_id $slaunch_spawn_id -set sum 0 -expect { - -re "exit code ($number)" { - send_user "\nThis error is expected, no worries\n" - incr sum $expect_out(1,string) - exp_continue - } - timeout { - send_user "\nFAILURE: slaunch not responding\n" - slow_kill $slaunch_pid - set exit_code 1 - exp_continue - } - eof { - set slaunch_rc [lindex [wait] 3] - } -} -if {$sum != 21} { - send_user "\nFAILURE: slaunch failed to report individual task error codes\n" - set exit_code 1 -} -send_user "slaunch rc = $slaunch_rc\n" -if {$slaunch_rc != 11} { - send_user "\nFAILURE: slaunch's return code was not the maximum task return code\n" - set exit_code 1 -} - -# signal the entire process group of salloc to kill the "sleep 300" -exec $bin_kill -s INT "-$salloc_pid" -set salloc_rc [lindex [wait -i $salloc_spawn_id] 3] -# We don't care about salloc's return code (it is probably 1 because -# we are killing the sleep command with SIGINT. -#send_user "salloc rc = $salloc_rc\n" - -# -# Post-processing -# -if {$exit_code == 0} { - exec $bin_rm -f $exit_script $test_script - send_user "\nSUCCESS\n" -} -exit $exit_code diff --git a/testsuite/expect/test18.18 b/testsuite/expect/test18.18 deleted file mode 100755 index aa27a9106..000000000 --- a/testsuite/expect/test18.18 +++ /dev/null @@ -1,94 +0,0 @@ -#!/usr/bin/expect -############################################################################ -# Purpose: Test of SLURM functionality -# Test parallel launch of slaunch ("slaunch slaunch id"). -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -############################################################################ -# Copyright (C) 2002-2006 The Regents of the University of California. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. -# -# This file is part of SLURM, a resource management program. -# For details, see <http://www.llnl.gov/linux/slurm/>. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -############################################################################ -source ./globals - -set test_id "18.18" - -set exit_code 0 -set mult 4 -set task_output 0 -if {[test_aix]} { - set task_cnt 3 -} else { - set task_cnt 4 -} -print_header $test_id - -# -# Spawn slaunch with $task_cnt tasks each of which runs a $mult way /bin/id -# -set timeout $max_job_delay - -if { [test_bluegene] } { - set node_cnt 1-2048 -} else { - if { [test_xcpu] } { - set node_cnt 1-1 - } else { - set node_cnt 1-4 - } -} - -set job_id 0 -set timeout $max_job_delay -set salloc_pid [spawn $salloc -N$node_cnt -t1 $slaunch -n$task_cnt --overcommit $slaunch -l -n$mult --overcommit $bin_id] -expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) - exp_continue - } - -re "($number): uid=" { - incr task_output - exp_continue - } - timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - set exit_code 1 - } - eof { - wait - } -} - -if {$task_output != [expr $task_cnt * $mult]} { - send_user "\nFAILURE: failed to get output from all tasks\n" - set exit_code 1 -} - -if {$exit_code == 0} { - send_user "\nSUCCESS\n" -} -exit $exit_code diff --git a/testsuite/expect/test18.19 b/testsuite/expect/test18.19 deleted file mode 100755 index 757ee908b..000000000 --- a/testsuite/expect/test18.19 +++ /dev/null @@ -1,148 +0,0 @@ -#!/usr/bin/expect -############################################################################ -# Purpose: Test of SLURM functionality -# Test of slaunch signal forwarding (actually using scancel, for -# now anyway) -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -############################################################################ -# Copyright (C) 2002-2006 The Regents of the University of California. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. -# -# This file is part of SLURM, a resource management program. -# For details, see <http://www.llnl.gov/linux/slurm/>. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -############################################################################ -source ./globals - -set test_id "18.19" -set exit_code 0 -set file_prog "test$test_id.prog" -set matches 0 -set usr1cnt 0 -set usr2cnt 0 - -print_header $test_id - -if { [test_xcpu] } { - send_user "\nWARNING: This test is incompatable with XCPU systems\n" - exit $exit_code -} - -# -# Delete left-over program and rebuild it -# -exec $bin_rm -f $file_prog -exec $bin_make -f /dev/null $file_prog -exec $bin_chmod 700 $file_prog - -# -# Get uid -# -spawn $bin_id -u -expect { - -re "($number)" { - set uid $expect_out(1,string) - } - eof { - wait - } -} - -# -# Spawn initial program via salloc/slaunch -# Note: For systems supporting proper pthreads, instead use -# exec $bin_kill -USR1 $salloc_pid, otherwise we need pkill -# and can get multiple signals delivered -# Note: We send the signals right after task startup rather than -# interspersed with messages because some versions of -# Expect have difficulties handling unbuffered slaunch output -# -set timeout $max_job_delay -set job_id 0 -set slaunch_pid 0 -set salloc_pid [spawn $salloc -N1 -t1 $slaunch -vv --unbuffered $file_prog] -expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) - exp_continue - } - -re "slaunch pid ($number)" { - set slaunch_pid $expect_out(1,string) - exp_continue - } - -re "WAITING" { - incr matches - if {$slaunch_pid != 0} { -# exec $bin_kill -USR1 $slaunch_pid -# exec $bin_kill -USR2 $slaunch_pid - exec $scancel --signal=USR1 $job_id.0 - exec $scancel --signal=USR2 $job_id.0 - exp_continue - } - } - -re "SIGUSR1" { - set usr1cnt [expr $usr1cnt + 1] - exp_continue - } - -re "SIGUSR2" { - set usr2cnt [expr $usr2cnt + 1] - if {$job_id != 0} { - cancel_job $job_id - } - } - -re "error.*not running" { - send_user "\nDon't worry about the error...\n" - exp_continue - } - timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - set exit_code 1 - } - eof { - send_user "\nEOF\n" - wait - } -} -if {$matches != 1} { - send_user "\nFAILURE: salloc failed to initialize properly\n" - set exit_code 1 -} -if {$usr1cnt != 1} { - send_user "\nFAILURE: $file_prog received $usr1cnt SIGUSR1 (not 1)\n" - set exit_code 1 -} -if {$usr2cnt != 1} { - send_user "\nFAILURE: $file_prog received $usr2cnt SIGUSR2 (not 1)\n" - set exit_code 1 -} - -# -# Post-processing -# -if {$exit_code == 0} { - exec $bin_rm -f $file_prog - send_user "\nSUCCESS\n" -} -exit $exit_code diff --git a/testsuite/expect/test18.19.prog.c b/testsuite/expect/test18.19.prog.c deleted file mode 100644 index bbf1d3b07..000000000 --- a/testsuite/expect/test18.19.prog.c +++ /dev/null @@ -1,81 +0,0 @@ -/*****************************************************************************\ - * prog18.19.prog.c - Simple signal catching test program for SLURM regression - * test18.19. Report caught signals. Exit after SIGUSR1 and SIGUSR2 received. - ***************************************************************************** - * Copyright (C) 2002 The Regents of the University of California. - * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. - * - * This file is part of SLURM, a resource management program. - * For details, see <http://www.llnl.gov/linux/slurm/>. - * - * SLURM is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY - * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License along - * with SLURM; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -\*****************************************************************************/ -#include <signal.h> -#include <stdio.h> -#include <stdlib.h> -#include <errno.h> -#include <sys/types.h> -#include <unistd.h> - - -int sigusr1_cnt = 0, sigusr2_cnt = 0; - -void sig_handler(int sig) -{ - switch (sig) - { - case SIGUSR1: - printf("Received SIGUSR1\n"); - fflush(stdout); - sigusr1_cnt++; - break; - case SIGUSR2: - printf("Received SIGUSR2\n"); - fflush(stdout); - sigusr2_cnt++; - break; - default: - printf("Received signal %d\n", sig); - fflush(stdout); - } -} - -main (int argc, char **argv) -{ - struct sigaction act; - - act.sa_handler = sig_handler; - sigemptyset(&act.sa_mask); - act.sa_flags = 0; - if (sigaction(SIGUSR1, &act, NULL) < 0) { - perror("setting SIGUSR1 handler"); - exit(2); - } - if (sigaction(SIGUSR2, &act, NULL) < 0) { - perror("setting SIGUSR2 handler"); - exit(2); - } - - printf("WAITING\n"); - fflush(stdout); - - while (!sigusr1_cnt || !sigusr2_cnt) { - sleep(1); - } - - exit(0); -} diff --git a/testsuite/expect/test18.20 b/testsuite/expect/test18.20 deleted file mode 100755 index b10f11255..000000000 --- a/testsuite/expect/test18.20 +++ /dev/null @@ -1,116 +0,0 @@ -#!/usr/bin/expect -############################################################################ -# Purpose: Run "slaunch cat" and read slaunch's stdout SLOWLY, creating -# stdout back pressure in slaunch. -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -# -############################################################################ -# Copyright (C) 2005-2006 The Regents of the University of California. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Chris Morrone <morrone2@llnl.gov> -# UCRL-CODE-226842. -# -# This file is part of SLURM, a resource management program. -# For details, see <http://www.llnl.gov/linux/slurm/>. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -############################################################################ -source ./globals - -set test_id "18.20" -set cycle_count 3 -set target_lines 1000 -set exit_code 0 -set file_in "test$test_id.input" -set file_out "test$test_id.output" -set file_err "test$test_id.error" -set task_cnt 1 - -print_header $test_id - -# Execute an slaunch job to cat input_file. Read slaunch's stdout SLOWLY -# (1k/sec) to create back pressure which used to reveal a stdout data-loss bug. -# Write the slaunch's stdout to a file so that the caller can compare the sizes -# of input_file and output_file. -proc run_cat_backpressure { input_file output_file } { - global bin_cat bin_sleep bin_rm salloc slaunch - global node_cnt task_cnt timeout file_err - - send_user "Running run_cat_backpressure (This is slow)." - exec $bin_rm -f $file_err $output_file - set iter 0 - - set output [open $output_file w] - set slaunch_output [open "| $salloc -N1 -t2 $slaunch --task-error=$file_err $bin_cat $input_file" r] - while {![eof $slaunch_output]} { - puts -nonewline $output [read $slaunch_output 1] - incr iter - if {[expr $iter % 1000] == 0} { - send_user "." - exec $bin_sleep 1 - } - } - - send_user "\n" - flush $output - close $output -} - -# -# Create a sizable text file -# -exec $bin_rm -f $file_in $file_out -for {set inx 0} {$inx < 10} {incr inx} { - exec $bin_cat /etc/hosts >>$file_out - exec $bin_cat /etc/passwd >>$file_out -} -exec head -n $target_lines <$file_out >$file_in -exec $bin_rm -f $file_out - -set stdin_lines [get_line_cnt $file_in] -set stdout_target [expr $stdin_lines * $task_cnt] - -# -# Run cycle_count jobs to copy job input to job output and compare sizes -# -set success_cnt 0 -set timeout $max_job_delay -for {set inx 0} {$inx < $cycle_count} {incr inx} { - run_cat_backpressure $file_in $file_out - set stdout_lines [get_line_cnt $file_out] - if {$stdout_lines != $stdout_target} { - exec $bin_sleep 1 - set stdout_lines [get_line_cnt $file_out] - } - if {$stdout_lines != $stdout_target} { - send_user "\nFAILURE: stdout is incomplete\n" - set exit_code 1 - break - } else { - incr success_cnt - } -} -exec $bin_rm -f $file_in $file_out $file_err - -if {$exit_code == 0} { - send_user "\nSUCCESS\n" - exec $bin_rm -f $file_err $file_in $file_out -} else { - send_user "\nFAILURE\n" -} -exit $exit_code diff --git a/testsuite/expect/test18.21 b/testsuite/expect/test18.21 deleted file mode 100755 index 6b78a7c13..000000000 --- a/testsuite/expect/test18.21 +++ /dev/null @@ -1,114 +0,0 @@ -#!/usr/bin/expect -############################################################################ -# Purpose: Test of SLURM functionality -# Test of slaunch's --kill-on-bad-exit option. -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -############################################################################ -# Copyright (C) 2002-2006 The Regents of the University of California. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. -# -# This file is part of SLURM, a resource management program. -# For details, see <http://www.llnl.gov/linux/slurm/>. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -############################################################################ -source ./globals - -set test_id "18.21" -set exit_code 0 -set file_in "test$test_id.prog" -set matches 0 - -print_header $test_id - -# -# Delete left-over input script -# Build input script file -# -# We have the last of the tasks exit. proctrack/linuxproc will not -# signal processes named "slurmstepd" and later user tasks could still -# be named "slurmstepd" when the termination signal comes in. -# -exec $bin_rm -f $file_in -set fd [open "$file_in.c" w] -puts $fd { -#include <stdio.h> -#include <stdlib.h> -#include <unistd.h> - -main() -{ - char *id = getenv("SLURM_PROCID"); - if (atoi(id) == 9) { - sleep(1); /* helps with proctrack/linuxproc problem */ - exit(2); - } - sleep(15); - printf("SHOULD_NOT_BE_HERE\n"); -} -} -close $fd -# Add delay due to sporatic error "Clock skew detected" -exec $bin_sleep 1 -exec $bin_make -f /dev/null $file_in -exec $bin_chmod 700 $file_in - -# -# Spawn a shell via sattach/slaunch and send exit command to task 1 only -# -set timeout $max_job_delay -set job_id 0 -set salloc_pid [spawn $salloc -t1 -N1 $slaunch -n10 --overcommit --kill-on-bad-exit $file_in] -expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) - exp_continue - } - -re "exit code 2" { - send_user "This error is expected, no worries\n" - incr matches - exp_continue - } - -re "SHOULD_NOT_BE_HERE" { - set matches -10 - exp_continue - } - timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id_1 - } - slow_kill [expr 0 - $salloc_pid] - set exit_code 1 - } - eof { - wait - } -} - -if {$matches != 1} { - send_user "\nFAILURE: problem with --kill-on-bad-exit option\n" - set exit_code 1 -} -if {$exit_code == 0} { - exec $bin_rm -f $file_in $file_in.c - send_user "\nSUCCESS\n" -} -exit $exit_code diff --git a/testsuite/expect/test18.22 b/testsuite/expect/test18.22 deleted file mode 100755 index d4398c40e..000000000 --- a/testsuite/expect/test18.22 +++ /dev/null @@ -1,181 +0,0 @@ -#!/usr/bin/expect -############################################################################ -# Purpose: Test of SLURM functionality -# Test of slaunch task-prolog and task-epilog option. -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -############################################################################ -# Copyright (C) 2005-2006 The Regents of the University of California. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. -# -# This file is part of SLURM, a resource management program. -# For details, see <http://www.llnl.gov/linux/slurm/>. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -############################################################################ -source ./globals - -set test_id "18.22" -set exit_code 0 -set cwd "[$bin_pwd]" -set tasks 4 -set file_in "$cwd/test$test_id.in" -set task_prolog "$cwd/test$test_id.prolog" -set task_epilog "$cwd/test$test_id.epilog" -set file_out_pre "$cwd/test$test_id.output_pre" -set file_out_post "$cwd/test$test_id.output_post" - -print_header $test_id - -# -# Delete left-over scripts and rebuild, -# The sleep command just forces slurmd to kill the user's (long running) epilog -# -file delete $task_prolog $task_epilog -file delete $file_out_pre $file_out_post -exec $bin_touch $file_out_pre -exec $bin_touch $file_out_post - -make_bash_script $task_prolog " - $bin_id >> $file_out_pre - echo export TEST=prolog_qa -" - -make_bash_script $task_epilog " - $bin_id >> $file_out_post - $bin_sleep 200 -" - -make_bash_script $file_in " - echo TEST==\$TEST - #env -" - -# -# Submit a slurm job that will execute $tasks tasks -# Note: If running on more than one node and writing to an -# NFS file, overwritting of data has been observed, this -# causes the test to fail -# -set job_id 0 -set matches 0 -set timeout $max_job_delay -if { [test_bluegene] } { - set node_cnt 1-1024 -} else { - set node_cnt 1 -} - -set salloc_pid [spawn $salloc -N$node_cnt -t1 $slaunch -n$tasks --overcommit --task-prolog=$task_prolog --task-epilog=$task_epilog $file_in] -expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) - exp_continue - } - -re "TEST==prolog_qa" { - incr matches - exp_continue - } - -re "error" { - send_user "\nFAILURE: Error running salloc/slaunch\n" - set exit_code 1 - exp_continue - } - timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - set exit_code 1 - } - eof { - wait - } -} -if {$exit_code != 0} { - exit $exit_code -} -if {$matches != $tasks} { - send_user "\nFAILURE: prolog exported env var failure ($matches != $tasks)\n" - set exit_code 1 -} - -# -# Get my id to compare with output -# -set my_uid -1 -spawn $bin_id -expect { - -re "uid=($number)" { - set my_uid $expect_out(1,string) - exp_continue - } - eof { - wait - } -} - -# -# Make sure we have two records in both prolog and epilog output (one for each task) -# Wait a few seconds for various delays -# -set matches 0 -if {[wait_for_file $file_out_pre] == 0} { - set timeout 5 - spawn $bin_cat $file_out_pre - expect { - "uid=$my_uid" { - incr matches - exp_continue - } - eof { - wait - } - } -} -if {$matches != $tasks} { - send_user "\nFAILURE: task prolog output is missing or uid mismatch ($matches:$tasks)\n" - set exit_code 1 -} - - -set matches 0 -spawn $bin_cat $file_out_post -expect { - "uid=$my_uid" { - incr matches - exp_continue - } - eof { - wait - } -} -if {$matches != $tasks} { - send_user "\nFAILURE: task epilog output is missing or uid mismatch ($matches:$tasks)\n" - set exit_code 1 -} - - -if {$exit_code == 0} { - file delete $task_prolog $task_epilog $file_in - file delete $file_out_pre $file_out_post - send_user "\nSUCCESS\n" -} -exit $exit_code diff --git a/testsuite/expect/test18.23 b/testsuite/expect/test18.23 deleted file mode 100755 index 45eb00430..000000000 --- a/testsuite/expect/test18.23 +++ /dev/null @@ -1,86 +0,0 @@ -#!/usr/bin/expect -############################################################################ -# Purpose: Test of SLURM functionality -# Test of running non-existant job, confirm timely termination. -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -############################################################################ -# Copyright (C) 2002-2006 The Regents of the University of California. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. -# -# This file is part of SLURM, a resource management program. -# For details, see <http://www.llnl.gov/linux/slurm/>. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -############################################################################ -source ./globals - -set test_id "18.23" -set exit_code 0 - -print_header $test_id - -if {[test_front_end] != 0 && [test_super_user] == 0} { - send_user "\nWARNING: This test is incompatable with front-end systems\n" - exit $exit_code -} - -# -# Submit a slurm job that will execute bogus job name -# -set job_id 0 -set matches 0 -set timeout $max_job_delay -set salloc_pid [spawn $salloc -N1 -t1 $slaunch /bad/bad/bad] -expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) - exp_continue - } - -re "No such file" { - send_user "\nNo worries, this error is expected\n" - set matches 1 - exp_continue - } - -re "Unable to run executable" { - send_user "\nNo worries, this error is expected\n" - set matches 1 - exp_continue - } - timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - set exit_code 1 - } - eof { - wait - } -} -if {$matches == 0} { - send_user "\nFAILURE: unexpected output from bad job name\n" - set exit_code 1 -} - -if {$exit_code == 0} { - send_user "\nSUCCESS\n" -} -exit $exit_code diff --git a/testsuite/expect/test18.24 b/testsuite/expect/test18.24 deleted file mode 100755 index f88c728fb..000000000 --- a/testsuite/expect/test18.24 +++ /dev/null @@ -1,93 +0,0 @@ -#!/usr/bin/expect -############################################################################ -# Purpose: Test of SLURM functionality -# Test propagation of umask to spawned tasks. -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "WARNING: ..." with an explanation of why the test can't be made, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -############################################################################ -# Copyright (C) 2005 The Regents of the University of California. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. -# -# This file is part of SLURM, a resource management program. -# For details, see <http://www.llnl.gov/linux/slurm/>. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -############################################################################ -source ./globals - -set test_id "18.24" -set exit_code 0 -set file_in "test$test_id.input" -set file_script "test$test_id.script" - -print_header $test_id - -if {[test_front_end] != 0} { - send_user "\nWARNING: This test is incompatable with front-end systems\n" - exit $exit_code -} - -# -# Build input script files -# -make_bash_script $file_in "umask" - -make_bash_script $file_script " - umask 0123 - $salloc -N1 -t1 $slaunch ./$file_in -" - -# -# Set umask and confirm it is propagated -# -set job_id 0 -set matches 0 -set salloc_pid [spawn ./$file_script] -expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) - exp_continue - } - -re (0123|123) { - set matches 1 - exp_continue - } - timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - exit 1 - } - eof { - wait - } -} -if {$matches != 1} { - send_user "\nFAILURE: umask not propagated\n" - set exit_code 1 -} - -if {$exit_code == 0} { - exec $bin_rm -f $file_in $file_script - send_user "\nSUCCESS\n" -} -exit $exit_code diff --git a/testsuite/expect/test18.25 b/testsuite/expect/test18.25 deleted file mode 100755 index f9f7c7998..000000000 --- a/testsuite/expect/test18.25 +++ /dev/null @@ -1,155 +0,0 @@ -#!/usr/bin/expect -############################################################################ -# Purpose: Test of slaunch functionality -# Test of HOSTFILE environment variable. -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -############################################################################ -# Copyright (C) 2002-2006 The Regents of the University of California. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Danny Auble <da@llnl.gov> -# UCRL-CODE-226842. -# -# This file is part of SLURM, a resource management program. -# For details, see <http://www.llnl.gov/linux/slurm/>. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -############################################################################ -source ./globals - -set test_id "18.25" -set exit_code 0 -set node_count 0 -set job_id 0 -set hostfile "test$test_id.hostfile" - -print_header $test_id - -exec $bin_rm -f $hostfile -if { [test_front_end] } { - send_user "\nWARNING: This test incompatable with front-end systems\n" - exit $exit_code -} - -if {[string compare [switch_type] "elan"] == 0} { - send_user "\nWARNING: This test incompatable with elan switch\n" - exit $exit_code -} - -#find out if we have enough nodes to test functionality -set node_count [available_nodes [default_partition]] -if { $node_count < 2 } { - send_user "WARNING: system must have at least 2 nodes to run this test.\n" - exit $exit_code -} - -set timeout $max_job_delay -set salloc_pid [spawn $salloc -N2 sleep 30] -set salloc_spawn_id $spawn_id -expect { - -re "Granted job allocation ($number)" { - set jobid $expect_out(1,string) - } - timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - exit 1 - } -} - -# -# First launch 2 tasks on 2 nodes, and find out the default task layout order -# -set node0 0 -set node1 0 -spawn $slaunch --jobid $jobid -l $bin_printenv SLURMD_NODENAME -expect { - -re "($number): ($alpha_numeric)" { - set task_id $expect_out(1,string) - if {$task_id == 0} { - set node0 $expect_out(2,string) - } else { - set node1 $expect_out(2,string) - } - exp_continue - } - eof { - wait - } -} - - -# -# Then create a hostfile laying out the tasks in the opposite order of -# the default. -# -if { $node0 == 0 || $node1 == 0 } { - send_user "\nFAILURE: node names not set from previous srun\n" - exit 1 -} -set env(SLURM_HOSTFILE) $hostfile -set 1node0 $node0 -set 1node1 $node1 -set file [open $hostfile "w"] -puts $file "$node1" -puts $file "$node0" -close $file - -# -# slaunch the tasks using the hostfile -# -spawn $slaunch --jobid $jobid -l --task-layout-file $hostfile $bin_printenv SLURMD_NODENAME -expect { - -re "($number): ($alpha_numeric)" { - set task_id $expect_out(1,string) - if {$task_id == 0} { - set node0 $expect_out(2,string) - } else { - set node1 $expect_out(2,string) - } - exp_continue - } - eof { - wait - } -} - -# -# Kill salloc (by signalling its process group and killing the sleep 30) -# -exec $bin_kill -s SIGINT -$salloc_pid -set spawn_id $salloc_spawn_id -expect { - eof { - wait - } -} - -if { [string compare $node0 $1node1] } { - send_user "\n FAILURE: tasks not distributed by hostfile\n" - set exit_code 1 -} elseif { [string compare $node1 $1node0] } { - send_user "\nFAILURE: tasks not distributed by hostfile\n" - set exit_code 1 -} -if {$exit_code == 0} { - send_user "\nSUCCESS\n" -} -exit $exit_code diff --git a/testsuite/expect/test18.26 b/testsuite/expect/test18.26 deleted file mode 100755 index 55497a2cb..000000000 --- a/testsuite/expect/test18.26 +++ /dev/null @@ -1,173 +0,0 @@ -#!/usr/bin/expect -############################################################################ -# Purpose: Test of SLURM functionality -# Test of running different executables with different arguments -# for each task (--multi-prog option). -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -############################################################################ -# Copyright (C) 2006 The Regents of the University of California. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. -# -# This file is part of SLURM, a resource management program. -# For details, see <http://www.llnl.gov/linux/slurm/>. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -############################################################################ -source ./globals - -set test_id "18.26" -set file_in "test$test_id.input" -set exit_code 0 - -print_header $test_id - -if {[test_front_end] != 0} { - send_user "\nThis test is incompatable with front-end systems\n" -} - -# -# Delete left-over input script -# Build input script file -# -exec $bin_rm -f $file_in -set file [open $file_in "w"] -puts $file "# multi-program configuration file -1-2 /bin/echo task:%t:offset:%o -0,3 echo task:%t:offset:%o -" -close $file -exec $bin_chmod 700 $file_in - -# -# Submit a slurm job that will execute different programs and arguments by task number -# -set job_id 0 -set matches 0 -set timeout $max_job_delay -set salloc_pid [spawn $salloc -N1 -t1 $slaunch -n4 --overcommit -l --multi-prog $file_in] -expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) - exp_continue - } - -re "($number): task:($number):offset:($number)" { - set label $expect_out(1,string) - set task_id $expect_out(2,string) - set task_offset $expect_out(3,string) - if {$label == 0 && $task_id == 0 && $task_offset == 0} { - incr matches - } - if {$label == 1 && $task_id == 1 && $task_offset == 0} { - incr matches - } - if {$label == 2 && $task_id == 2 && $task_offset == 1} { - incr matches - } - if {$label == 3 && $task_id == 3 && $task_offset == 1} { - incr matches - } - exp_continue - } - timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - set exit_code 1 - } - eof { - wait - } -} -if {$matches != 4} { - send_user "\nFAILURE: Did not get expected multi-program output\n" - set exit_code 1 -} -if {$exit_code != 0} { - exit $exit_code -} - -# -# Submit a slurm job that will execute different executables and check debug info -# -# Timeout is max_job_delay (to spawn task) + -# 60 (job time limit) + -# 60 (slurmctld time limit check poll interval) + -# KillWait -# -set timeout [expr $max_job_delay + 60 + 60 + 60] - -file delete $file_in -set file [open $file_in "w"] -puts $file "# multi-program configuration file -1-2 /bin/hostname -0,3 /bin/date -" -close $file -exec $bin_chmod 700 $file_in - -set job_id 0 -set matches 0 -set timed_out 0 -set salloc_pid [spawn $salloc -N1 -t1 $slaunch -n4 --overcommit -l --multi-prog --debugger-test $file_in] -expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) - exp_continue - } - -re "executable:(/bin/)($alpha)" { - if {[string compare $expect_out(2,string) "date"] == 0} { - incr matches - } - if {[string compare $expect_out(2,string) "hostname"] == 0} { - incr matches - } - if { $matches == 4} { - # set the debugged processes running again - exec $scancel -s CONT $job_id.0 - } - exp_continue - } - timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - set exit_code 1 - } - eof { - wait - } -} -if {$timed_out == 1} { - send_user "\nEarly termination is expected, no worries.\n" -} -if {$matches != 4} { - send_user "\nFAILURE: did not generate full list of executables.\n" - set exit_code 1 -} - -if {$exit_code == 0} { - file delete $file_in - send_user "\nSUCCESS\n" -} -exit $exit_code diff --git a/testsuite/expect/test18.27 b/testsuite/expect/test18.27 deleted file mode 100755 index a0542642a..000000000 --- a/testsuite/expect/test18.27 +++ /dev/null @@ -1,141 +0,0 @@ -#!/usr/bin/expect -############################################################################ -# Purpose: Test of SLURM functionality -# Confirm that a job executes with the proper task distribution -# (--nodes and --distribution options). -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "WARNING: ..." with an explanation of why the test can't be made, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -############################################################################ -# Copyright (C) 2002-2006 The Regents of the University of California. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. -# -# This file is part of SLURM, a resource management program. -# For details, see <http://www.llnl.gov/linux/slurm/>. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -############################################################################ -source ./globals - -set test_id "18.27" -set exit_code 0 - -print_header $test_id - -if {[test_front_end] != 0} { - send_user "\nWARNING: Additional testing is incompatable with front-end systems\n" - exit $exit_code -} - -# -# Submit a two node job with block distribution -# -set job_id 0 -set timeout $max_job_delay -set expected_layout [list 0 0 1 1] -set tested_layout [list -1 -1 -1 -1] -set salloc_pid [spawn $salloc -N2 -t1 $slaunch -n4 --overcommit -l --distribution=block $bin_printenv SLURM_NODEID] -expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) - exp_continue - } - -re "More ($alpha) requested than permitted" { - send_user "\nWARNING: can't test slaunch task distribution\n" - exit 0 - } - -re "($number): ($number)" { - set index $expect_out(1,string) - set value $expect_out(2,string) - lset tested_layout $index $value - exp_continue - } - timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - set exit_code 1 - } - eof { - wait - } -} - -# -# Verify block distribution of tasks -# -send_user "Expected layout was: $expected_layout\n" -send_user "Tested layout was : $tested_layout\n" -if {[string compare $expected_layout $tested_layout] != 0} { - send_user "\FAILURE: failed to distribute tasks in block fashion\n" - set exit_code 1 -} - -# -# Submit a two node job with cyclic distribution -# -set job_id 0 -set timeout $max_job_delay -set expected_layout [list 0 1 0 1] -set tested_layout [list -1 -1 -1 -1] -set salloc_pid [spawn $salloc -N2 -t1 $slaunch -n4 --overcommit -l --distribution=cyclic $bin_printenv SLURM_NODEID] -expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) - exp_continue - } - -re "More ($alpha) requested than permitted" { - send_user "\nWARNING: can't test slaunch task distribution\n" - exit 0 - } - -re "($number): ($number)" { - set index $expect_out(1,string) - set value $expect_out(2,string) - lset tested_layout $index $value - exp_continue - } - timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - set exit_code 1 - } - eof { - wait - } -} - -# -# Verify cyclic distribution of tasks -# -send_user "Expected layout was: $expected_layout\n" -send_user "Tested layout was : $tested_layout\n" -if {[string compare $expected_layout $tested_layout] != 0} { - send_user "\FAILURE: failed to distribute tasks in cyclic fashion\n" - set exit_code 1 -} - -if {$exit_code == 0} { - send_user "\nSUCCESS\n" -} -exit $exit_code diff --git a/testsuite/expect/test18.28 b/testsuite/expect/test18.28 deleted file mode 100755 index d7e0a9558..000000000 --- a/testsuite/expect/test18.28 +++ /dev/null @@ -1,136 +0,0 @@ -#!/usr/bin/expect -############################################################################ -# Purpose: Test of SLURM functionality -# Confirm that a job executes with the proper node count -# (--nodes option). -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "WARNING: ..." with an explanation of why the test can't be made, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -############################################################################ -# Copyright (C) 2002-2006 The Regents of the University of California. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. -# -# This file is part of SLURM, a resource management program. -# For details, see <http://www.llnl.gov/linux/slurm/>. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -############################################################################ -source ./globals - -set test_id "18.28" -set exit_code 0 - -print_header $test_id - -if { [test_xcpu] } { - send_user "\nWARNING: This test is incompatable with XCPU systems\n" - exit 0 -} - -# -# Submit jobs of various node counts and validate task count -# -for {set inx 1} {$inx <= 4} {set inx [expr $inx * 2]} { - set job_id 0 - set host_0 "" - set host_1 "" - set host_2 "" - set host_3 "" - set no_fit 0 - set task_cnt 0 - set timeout $max_job_delay - set salloc_pid [spawn $salloc -N1-4 -t1 $slaunch -N$inx -l $bin_printenv SLURM_NODEID] - expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) - exp_continue - } - -re " Requested node configuration is not available" { - set no_fit 1 - exp_continue - } - -re "($number): ($number)" { - incr task_cnt - if {$expect_out(1,string) == 0} { - set host_0 $expect_out(2,string) - } - if {$expect_out(1,string) == 1} { - set host_1 $expect_out(2,string) - } - if {$expect_out(1,string) == 2} { - set host_2 $expect_out(2,string) - } - if {$expect_out(1,string) == 3} { - set host_3 $expect_out(2,string) - } - exp_continue - } - timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - exit 1 - } - eof { - wait - } - } - if { $no_fit == 0 && $task_cnt != $inx } { - send_user "\nFAILURE: slaunch failed to launch specified tasks count\n" - set exit_code 1 - } - if {$no_fit == 0 && $task_cnt == 2} { - if {[string compare $host_0 $host_1] == 0} { - send_user "\nFAILURE: Re-used a node in the allocation\n" - set exit_code 1 - } - } - if {$no_fit == 0 && $task_cnt == 4} { - set dup_host 0 - if {[string compare $host_0 $host_1] == 0} { - set dup_host 1 - } - if {[string compare $host_0 $host_2] == 0} { - set dup_host 1 - } - if {[string compare $host_0 $host_3] == 0} { - set dup_host 1 - } - if {[string compare $host_1 $host_2] == 0} { - set dup_host 1 - } - if {[string compare $host_1 $host_3] == 0} { - set dup_host 1 - } - if {[string compare $host_2 $host_3] == 0} { - set dup_host 1 - } - if {$dup_host == 1} { - send_user "\nFAILURE: Re-used a node in the allocation\n" - set exit_code 1 - } - } -} - -if {$exit_code == 0} { - send_user "\nSUCCESS\n" -} -exit $exit_code diff --git a/testsuite/expect/test18.29 b/testsuite/expect/test18.29 deleted file mode 100755 index 94a105c1c..000000000 --- a/testsuite/expect/test18.29 +++ /dev/null @@ -1,207 +0,0 @@ -#!/usr/bin/expect -############################################################################ -# Purpose: Test of SLURM functionality -# Test of slaunch --cpus-per-task option. -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "WARNING: ..." with an explanation of why the test can't be made, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -############################################################################ -# Copyright (C) 2002-2006 The Regents of the University of California. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. -# -# This file is part of SLURM, a resource management program. -# For details, see <http://www.llnl.gov/linux/slurm/>. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -############################################################################ -source ./globals - -set test_id "18.29" -set exit_code 0 -set file_in "test$test_id.input" - -print_header $test_id - -if { [test_front_end] } { - send_user "\nWARNING: This test is incompatable with FRONT_END systems\n" - exit 0 -} -if {[test_multiple_slurmd] != 0} { - send_user "\nWARNING: This test is incompatable with multiple slurmd systems\n" - exit 0 -} - -set cpu_cnt 0 -set job_id 0 -set timeout $max_job_delay -set node_cnt 2 - -set available [available_nodes [default_partition]] -if {$available < 2} { - send_user "\nWARNING: not enough nodes currently available" - send_user " ($available avail, 2)\n" - exit $exit_code -} - -make_bash_script $file_in " - env | grep SLURM_JOB_CPUS_PER_NODE - $bin_hostname" - - -# Script will print SLURM_JOB_CPUS_PER_NODE, then hold the allocation -# for a long time. -set script_name test18.28.sh -make_bash_script $script_name { - printenv SLURM_JOB_CPUS_PER_NODE - sleep 600 -} -set salloc_pid [spawn $salloc -N $node_cnt $slaunch -n 1 $script_name] -set salloc_spawn_id $spawn_id -expect { - -re "Granted job allocation ($number)" { - set jobid $expect_out(1,string) - } - timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - exit 1 - } -} -expect { - -re "($number)" { - set cpu_cnt $expect_out(1,string) - } - timeout { - send_user "\nFAILURE: salloc not responding\n" - cancel_job $job_id - slow_kill [expr 0 - $salloc_pid] - exit 1 - } -} -file delete $script_name - -send_user "cpu count is $cpu_cnt\n" - -# Check CPU count -if {$cpu_cnt < 2} { - send_user "\nWARNING: The node only has one CPU\n" - cancel_job $jobid - exec $bin_rm -f $file_in - exit $exit_code -} - -# -# Now start the real testing of --cpus-per-task. -# - -# -# Test A -# Should run: --cpus-per-task=1, -n=(node_count*cpu count) -# -set task_cnt 0 -set pid [spawn $slaunch --jobid $jobid -l --cpus-per-task 1 -n [expr $node_cnt * $cpu_cnt] $bin_printenv SLURMD_NODENAME] -expect { - -re "($number): ($alpha_numeric)" { - incr task_cnt - exp_continue - } - eof { - wait - } -} -set expected_task_cnt [expr $node_cnt * $cpu_cnt] -if {$task_cnt < $expected_task_cnt} { - send_user "\nFAILURE Test A: Fewer tasks ($task_cnt) then expected ($expected_task_cnt)\n" - set exit_code 1 -} -if {$task_cnt > $expected_task_cnt} { - send_user "\nFAILURE Test A: More tasks ($task_cnt) then " - send_user "expected ($expected_task_cnt)\n" - set exit_code 1 -} - -# -# Test B -# Should NOT run: --cpus-per-task=2, -n=(2*cpu count) -# -set task_cnt 0 -set pid [spawn $slaunch --jobid $jobid -l --cpus-per-task $cpu_cnt -n [expr $node_cnt * $cpu_cnt] $bin_printenv SLURMD_NODENAME] -expect { - -re "($number): ($alpha_numeric)" { - incr task_cnt - exp_continue - } - "error: Failed creating job step context" { - send_user "This error was expected!\n" - } - eof { - wait - } -} -if {$task_cnt > 0} { - send_user "FAILURE Test B: Test should NOT have run.\n" - set exit_code 1 -} - - -# -# Test C -# Should run: --cpus-per-task=2, -n=(cpu count) -# -set task_cnt 0 -set pid [spawn $slaunch --jobid $jobid -l --cpus-per-task $cpu_cnt -n $node_cnt $bin_printenv SLURMD_NODENAME] -expect { - -re "($number): ($alpha_numeric)" { - incr task_cnt - exp_continue - } - eof { - wait - } -} -set expected_task_cnt $node_cnt -if {$task_cnt < $expected_task_cnt} { - send_user "\nFAILURE Test C: Fewer tasks ($task_cnt) then expected ($expected_task_cnt)\n" - set exit_code 1 -} -if {$task_cnt > $expected_task_cnt} { - send_user "\nFAILURE Test C: More tasks ($task_cnt) then expected ($expected_task_cnt)\n" - set exit_code 1 -} - -# -# Clean up the job allocation. -# -cancel_job $jobid -set spawn_id $salloc_spawn_id -expect { - eof { - wait - } -} - - -if {$exit_code == 0} { - exec $bin_rm -f $file_in - send_user "\nSUCCESS\n" -} -exit $exit_code diff --git a/testsuite/expect/test18.30 b/testsuite/expect/test18.30 deleted file mode 100755 index bdaea5b4d..000000000 --- a/testsuite/expect/test18.30 +++ /dev/null @@ -1,204 +0,0 @@ -#!/usr/bin/expect -############################################################################ -# Purpose: Test of SLURM functionality -# Confirm that a job executes with the proper node count -# (--nodes option). -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "WARNING: ..." with an explanation of why the test can't be made, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -############################################################################ -# Copyright (C) 2002-2006 The Regents of the University of California. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. -# -# This file is part of SLURM, a resource management program. -# For details, see <http://www.llnl.gov/linux/slurm/>. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -############################################################################ -source ./globals - -set test_id "18.30" -set exit_code 0 - -print_header $test_id - -if { [test_xcpu] } { - send_user "\nWARNING: This test is incompatable with XCPU systems\n" - exit 0 -} - -# -# Submit a 3+ node job -# -set host_0 "" -set host_1 "" -set host_2 "" -set host_3 "" -set job_id 0 -set timeout $max_job_delay - -if { [test_bluegene] } { - set node_cnt 1536 -} else { - set node_cnt 3 -} -set available [available_nodes [default_partition]] -if {$available < $node_cnt} { - send_user "\nWARNING: not enough nodes currently available" - send_user " ($available avail, $node_cnt needed)\n" - exit $exit_code -} - -# -# Allocate a 3+ node job, launch 1 node task -# -set job_id 0 -set host_0 "" -set host_1 "" -set timeout $max_job_delay -set salloc_pid [spawn $salloc -N$node_cnt -t1 $slaunch -l -N1 $bin_printenv SLURM_NODEID] -expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) - exp_continue - } - -re "More ($alpha) requested than permitted" { - send_user "\nWARNING: can't test salloc task distribution\n" - exit $exit_code - } - -re "($number): ($alpha_numeric)" { - if {$expect_out(1,string) == 0} { - set host_0 $expect_out(2,string) - } - if {$expect_out(1,string) == 1} { - set host_1 $expect_out(2,string) - } - exp_continue - } - timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - set exit_code 1 - } - eof { - wait - } -} - -# -# Verify node count -# -if {[string compare $host_0 ""] == 0} { - send_user "\nFAILURE: Did not get SLURM_NODEID of task 0\n" - set exit_code 1 -} -if {[string compare $host_1 ""] != 0} { - send_user "\nFAILURE: Started more than one task\n" - set exit_code 1 -} - -# -# Allocate a 3+ node job, launch 3 node task -# -set job_id 0 -set host_0 "" -set host_1 "" -set host_2 "" -set host_3 "" -set timeout $max_job_delay -set salloc_pid [spawn $salloc -N$node_cnt -t1 $slaunch -l -N3 $bin_printenv SLURM_NODEID] -expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) - exp_continue - } - -re "More ($alpha) requested than permitted" { - send_user "\nWARNING: can't test salloc task distribution\n" - exit $exit_code - } - -re "($number): ($alpha_numeric)" { - if {$expect_out(1,string) == 0} { - set host_0 $expect_out(2,string) - } - if {$expect_out(1,string) == 1} { - set host_1 $expect_out(2,string) - } - if {$expect_out(1,string) == 2} { - set host_2 $expect_out(2,string) - } - if {$expect_out(1,string) == 3} { - set host_3 $expect_out(2,string) - } - exp_continue - } - timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - set exit_code 1 - } - eof { - wait - } -} - -# -# Verify node count -# -if {[string compare $host_0 ""] == 0} { - send_user "\nFAILURE: Did not get SLURM_NODEID of task 0\n" - set exit_code 1 -} -if {[string compare $host_1 ""] == 0} { - send_user "\nFAILURE: Did not get SLURM_NODEID of task 1\n" - set exit_code 1 -} -if {[string compare $host_2 ""] == 0} { - send_user "\nFAILURE: Did not get SLURM_NODEID of task 2\n" - set exit_code 1 -} -if {[string compare $host_3 ""] != 0} { - send_user "\nFAILURE: Started more than three task\n" - set exit_code 1 -} - -set dup_host 0 -if {[string compare $host_0 $host_1] == 0} { - set dup_host 1 -} -if {[string compare $host_0 $host_2] == 0} { - set dup_host 1 -} -if {[string compare $host_0 $host_3] == 0} { - set dup_host 1 -} -if {$dup_host == 1} { - send_user "\nFAILURE: Re-used a node in the allocation\n" - set exit_code 1 -} - -if {$exit_code == 0} { - send_user "\nSUCCESS\n" -} -exit $exit_code diff --git a/testsuite/expect/test18.31 b/testsuite/expect/test18.31 deleted file mode 100755 index c7088231b..000000000 --- a/testsuite/expect/test18.31 +++ /dev/null @@ -1,333 +0,0 @@ -#!/usr/bin/expect -############################################################################ -# Purpose: Test of SLURM functionality -# Confirm that a job executes with the specified nodes -# (--relative, --nodelist-byname and --nodelist-byid). -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "WARNING: ..." with an explanation of why the test can't be made, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -############################################################################ -# Copyright (C) 2002-2006 The Regents of the University of California. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. -# -# This file is part of SLURM, a resource management program. -# For details, see <http://www.llnl.gov/linux/slurm/>. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -############################################################################ -source ./globals - -set test_id "18.31" -set exit_code 0 - -print_header $test_id - -if { [test_xcpu] } { - send_user "\nWARNING: This test is incompatable with XCPU systems\n" - exit 0 -} - -if { [test_bluegene] } { - send_user "\nWARNING: test incompatable with Blue Gene systems\n" - exit $exit_code -} else { - set node_cnt 3 -} - -set available [available_nodes [default_partition]] -if {$available < $node_cnt} { - send_user "\nWARNING: not enough nodes currently available" - send_user " ($available avail, $node_cnt needed)\n" - exit $exit_code -} - -# -# Submit a 3 node job -# -set host_0 "" -set host_1 "" -set host_2 "" -set host_3 "" -set job_id 0 -set timeout $max_job_delay -set salloc_pid [spawn $salloc -N$node_cnt -t1 $slaunch -l $bin_printenv SLURMD_NODENAME] -expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) - send "$bin_echo MY_ID=\$SLURM_JOBID \n" - exp_continue - } - -re "More ($alpha) requested than permitted" { - send_user "\nWARNING: can't test salloc task distribution\n" - exit $exit_code - } - -re "configuration not available" { - send_user "WARNING: partition too small for test\n" - exit 0 - } - -re "($number): ($alpha_numeric)" { - if {$expect_out(1,string) == 0} { - set host_0 $expect_out(2,string) - } - if {$expect_out(1,string) == 1} { - set host_1 $expect_out(2,string) - } - if {$expect_out(1,string) == 2} { - set host_2 $expect_out(2,string) - } - if {$expect_out(1,string) == 3} { - set host_3 $expect_out(2,string) - } - exp_continue - } - timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - exit 1 - } - eof { - wait - } -} - -# -# Verify node count -# -if {[string compare $host_0 ""] == 0} { - send_user "\nFAILURE: Did not get hostname of task 0\n" - set exit_code 1 -} -if {[string compare $host_1 ""] == 0} { - send_user "\nFAILURE: Did not get hostname of task 1\n" - set exit_code 1 -} -if {[string compare $host_2 ""] == 0} { - send_user "\nFAILURE: Did not get hostname of task 2\n" - set exit_code 1 -} -if {[string compare $host_3 ""] != 0} { - send_user "\nFAILURE: Started more than three tasks\n" - set exit_code 1 -} - -set dup_hostname 0 -if {[string compare $host_0 $host_1] == 0} { - incr dup_hostname -} -if {[string compare $host_0 $host_2] == 0} { - incr dup_hostname 1 -} -if {$dup_hostname == 1} { - send_user "\nFAILURE: Re-used a node in the allocation\n" - set exit_code 1 -} -set all_nodes $host_0,$host_1,$host_2 -set exclude_node $host_0 -set include_node $host_2 - -# -# Submit a job explicitly skip node 0 -# -set host_0 "" -set host_1 "" -set host_2 "" -set job_id 0 -set timeout $max_job_delay -set salloc_pid [spawn $salloc -N$node_cnt -t1 --nodelist=$all_nodes $slaunch -N2 --relative=1 -l $bin_printenv SLURMD_NODENAME] -expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) - exp_continue - } - -re "Invalid node name specified" { - send_user "\nWARNING: Appears you are using " - send_user "multiple slurmd testing.\n" - send_user "This test won't work in that fashion.\n" - exit 0 - } - - -re "($number): ($alpha_numeric)" { - if {$expect_out(1,string) == 0} { - set host_0 $expect_out(2,string) - } - if {$expect_out(1,string) == 1} { - set host_1 $expect_out(2,string) - } - if {$expect_out(1,string) == 2} { - set host_2 $expect_out(2,string) - } - exp_continue - } - timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - exit 1 - } - eof { - wait - } -} - -# -# Verify node count and names -# -if {[string compare $host_0 ""] == 0} { - send_user "\nFAILURE: Did not get hostname of task 0\n" - set exit_code 1 -} -if {[string compare $host_1 ""] == 0} { - send_user "\nFAILURE: Did not get hostname of task 1\n" - set exit_code 1 -} -if {[string compare $host_2 ""] != 0} { - send_user "\nFAILURE: Started more than two tasks\n" - set exit_code 1 -} -set dup_hostname 0 -if {[string compare $host_0 $exclude_node] == 0} { - set dup_hostname 1 -} -if {[string compare $host_1 $exclude_node] == 0} { - set dup_hostname 1 -} -if {$dup_hostname == 1} { - send_user "\nFAILURE: Allocated an excluded node\n" - set exit_code 1 -} - -# -# Submit a job explicitly including a node by name -# -set host_0 "" -set host_1 "" -set job_id 0 -set timeout $max_job_delay -set salloc_pid [spawn $salloc -N$node_cnt --nodelist=$all_nodes -t1 $slaunch -N1 --nodelist-byname=$include_node -l $bin_printenv SLURMD_NODENAME] -expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) - exp_continue - } - -re "($number): ($alpha_numeric)" { - if {$expect_out(1,string) == 0} { - set host_0 $expect_out(2,string) - } - if {$expect_out(1,string) == 1} { - set host_1 $expect_out(2,string) - } - exp_continue - } - timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - set exit_code 1 - } - eof { - wait - } -} - -# -# Verify node count and names -# -if {[string compare $host_0 ""] == 0} { - send_user "\nFAILURE: Did not get hostname of task 0\n" - set exit_code 1 -} -if {[string compare $host_1 ""] != 0} { - send_user "\nFAILURE: Started more than one task\n" - set exit_code 1 -} -set dup_hostname 0 -if {[string compare $host_0 $include_node] == 0} { - set dup_hostname 1 -} -if {$dup_hostname == 0} { - send_user "\nFAILURE: Allocation lacked an included node\n" - set exit_code 1 -} - -# -# Submit a job explicitly including a node by id -# -set host_0 "" -set host_1 "" -set job_id 0 -set timeout $max_job_delay -set salloc_pid [spawn $salloc -N$node_cnt --nodelist=$all_nodes -t1 $slaunch -N1 --nodelist-byid=2 -l $bin_printenv SLURMD_NODENAME] -expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) - exp_continue - } - -re "($number): ($alpha_numeric)" { - if {$expect_out(1,string) == 0} { - set host_0 $expect_out(2,string) - } - if {$expect_out(1,string) == 1} { - set host_1 $expect_out(2,string) - } - exp_continue - } - timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - set exit_code 1 - } - eof { - wait - } -} - -# -# Verify node count and names -# -if {[string compare $host_0 ""] == 0} { - send_user "\nFAILURE: Did not get hostname of task 0\n" - set exit_code 1 -} -if {[string compare $host_1 ""] != 0} { - send_user "\nFAILURE: Started more than one task\n" - set exit_code 1 -} -set dup_hostname 0 -if {[string compare $host_0 $include_node] == 0} { - set dup_hostname 1 -} -if {$dup_hostname == 0} { - send_user "\nFAILURE: Allocation lacked an included node\n" - set exit_code 1 -} - -if {$exit_code == 0} { - send_user "\nSUCCESS\n" -} -exit $exit_code diff --git a/testsuite/expect/test18.32 b/testsuite/expect/test18.32 deleted file mode 100755 index 19fb1c1ab..000000000 --- a/testsuite/expect/test18.32 +++ /dev/null @@ -1,218 +0,0 @@ -#!/usr/bin/expect -############################################################################ -# Purpose: Test of SLURM functionality -# Basic test of MPI functionality via slaunch -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "WARNING: ..." with an explanation of why the test can't be made, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -############################################################################ -# Copyright (C) 2002-2006 The Regents of the University of California. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. -# -# This file is part of SLURM, a resource management program. -# For details, see <http://www.llnl.gov/linux/slurm/>. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -############################################################################ -source ./globals - -set test_id "18.32" -set exit_code 0 -set file_in "test$test_id.input" -set file_out "test$test_id.output" -set file_err "test$test_id.error" -set test_prog "test$test_id.prog" -set job_id 0 - -print_header $test_id - -# -# Test for existence of mpi compiler -# -if {[info exists mpicc] == 0} { - send_user "\nWARNING: mpicc not defined, can't perform mpi testing\n" - exit 0 -} -if {[file executable $mpicc] == 0} { - send_user "\nWARNING: $mpicc does not exists\n" - exit 0 -} -if {[test_front_end] != 0} { - send_user "\nWARNING: This test is incompatable with front-end systems\n" - exit 0 -} - -# -# Delete left-over program and rebuild it -# -exec $bin_rm -f $test_prog ${test_prog}.o -if {$use_pmi} { - set pmi_link "-rpath $slurm_dir/lib -L $slurm_dir/lib -lpmi" - exec $mpicc -Xlinker $pmi_link -o $test_prog ${test_prog}.c -} else { - exec $mpicc -o $test_prog ${test_prog}.c -} - - -# Delete left-over stdout/err files -file delete $file_out $file_err - -# -# Build input script file -# -make_bash_script $file_in " - $bin_date - $bin_echo test1_N3_n6_cyclic - $slaunch -n6 --distribution=cyclic $test_prog - - $bin_date - $bin_echo test2_n6_block - $slaunch -n6 --distribution=block $test_prog - - $bin_date - $bin_echo test3_n4_cyclic - $slaunch -n4 --distribution=cyclic $test_prog - - $bin_date - $bin_echo test4_n4_block - $slaunch -n4 --distribution=block $test_prog - - $bin_date - $bin_echo test5_N2_n4_with_hole - $slaunch -N2 -n4 --distribution=cyclic --nodelist-byid=0,2 $test_prog - - $bin_date - $bin_echo TEST_COMPLETE -" - -# -# Spawn a srun batch job that uses stdout/err and confirm their contents -# -set timeout $max_job_delay -set no_start 0 -set srun_pid [spawn $srun -N3 -n6 --batch --output=$file_out --error=$file_err -t1 $file_in] -expect { - -re "jobid ($number) submitted" { - set job_id $expect_out(1,string) - exp_continue - } - -re "configuration not available" { - set no_start 1 - exp_continue - } - -re "Unable to submit batch job" { - set no_start 1 - exp_continue - } - -re "Unable to contact" { - send_user "\nFAILURE: slurm appears to be down\n" - exit 1 - } - timeout { - send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid - exit 1 - } - eof { - wait - } -} - -if {$no_start != 0} { - send_user "\nWARNING: partition too small for test\n" - if {$job_id != 0} { - cancel_job $job_id - } - exit 0 -} -if {$job_id == 0} { - send_user "\nFAILURE: batch submit failure\n" - exit 1 -} - -# -# Wait for job to complete -# -if {[wait_for_job $job_id "DONE"] != 0} { - send_user "\nFAILURE: waiting for job to complete\n" - set exit_code 1 -} - -# -# Check for desired output in stdout -# -set expected [expr 6 + 6 + 4 + 4 + 4] -if {[wait_for_file $file_out] == 0} { - set matches 0 - set complete 0 - spawn $bin_cat $file_out - expect { - -re "I just received msg from Rank" { - incr matches - exp_continue - } - -re "TEST_COMPLETE" { - incr complete - exp_continue - } - eof { - wait - } - } - if {$matches == 0} { - send_user "\nFAILURE: No MPI communications occured\n" - send_user " The version of MPI you are using may be incompatible " - send_user "with the configured switch\n" - send_user " Core files may be present from failed MPI tasks\n\n" - set exit_code 1 - } elseif {$matches != $expected} { - send_user "\nFAILURE: unexpected output ($matches of $expected)\n" - set exit_code 1 - } elseif {$complete == 0} { - send_user "\nFAILURE: test failed to complete\n" - set exit_code 1 - } -} else { - set exit_code 1 -} - -if {$exit_code == 0} { - exec $bin_rm -f $file_in $file_out $file_err $test_prog ${test_prog}.o - send_user "\nSUCCESS\n" -} else { - set matches - spawn head $file_err - expect { - -re "Error creating CQ" { - incr matches - exp_continue - } - eof { - wait - } - } - if {$matches != 0} { - send_user "WARNING: If using MVAPICH then\n" - send_user " Configure \"PropagateResourceLimitsExcept=MEMLOCK\"\n" - send_user " Also start slurmd with \"ulimit -l unlimited\"\n" - } else { - send_user "Check contents of $file_err\n" - } -} -exit $exit_code diff --git a/testsuite/expect/test18.32.prog.c b/testsuite/expect/test18.32.prog.c deleted file mode 100644 index 52e37db06..000000000 --- a/testsuite/expect/test18.32.prog.c +++ /dev/null @@ -1,62 +0,0 @@ -/*****************************************************************************\ - * test18.32.prog.c - Simple ping test of operation with SLURM. - ***************************************************************************** - * Copyright (C) 2004 The Regents of the University of California. - * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Dong Ang <dahn@llnl.gov> - * UCRL-CODE-226842. - * - * This file is part of SLURM, a resource management program. - * For details, see <http://www.llnl.gov/linux/slurm/>. - * - * SLURM is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY - * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License along - * with SLURM; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -\*****************************************************************************/ - -#include <stdio.h> -#include <mpi.h> - -#define COMM_TAG 1000 - -static void pass_its_neighbor(const int rank, const int size, const int* buf) -{ - MPI_Request request[2]; - MPI_Status status[2]; - - MPI_Irecv((void *)buf, 1, MPI_INT, ((rank+size-1)%size), COMM_TAG, - MPI_COMM_WORLD, &request[0]); - MPI_Isend((void *)&rank, 1, MPI_INT, ((rank+1)%size), COMM_TAG, - MPI_COMM_WORLD, &request[1]); - MPI_Waitall(2, request, status); - - fprintf(stdout, "Rank[%d] I just received msg from Rank %d\n", - rank, *buf); -} - -int main(int argc, char * argv[]) -{ - int size, rank,buf; - - MPI_Init(&argc, &argv); - MPI_Comm_size(MPI_COMM_WORLD, &size); - MPI_Comm_rank(MPI_COMM_WORLD, &rank); - - buf = rank; /* we only pass rank */ - - pass_its_neighbor(rank, size, &buf); - - MPI_Finalize(); - return 0; -} - diff --git a/testsuite/expect/test18.33 b/testsuite/expect/test18.33 deleted file mode 100755 index d4b44d8cb..000000000 --- a/testsuite/expect/test18.33 +++ /dev/null @@ -1,102 +0,0 @@ -#!/usr/bin/expect -############################################################################ -# Purpose: Test of SLURM functionality -# Verify environment variables controlling slaunch are processed: -# SLAUNCH_DEBUG, SLAUNCH_DISTRIBUTION, SLAUNCH_LABELIO and -# SLAUNCH_OVERCOMMIT -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -############################################################################ -# Copyright (C) 2002-2006 The Regents of the University of California. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. -# -# This file is part of SLURM, a resource management program. -# For details, see <http://www.llnl.gov/linux/slurm/>. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -############################################################################ -source ./globals - -set test_id "18.33" -set exit_code 0 -set job_id 0 -set timeout $max_job_delay - -print_header $test_id - -# -# Set target environment variables -# -global env -set env(SLAUNCH_DEBUG) 2 -set env(SLAUNCH_DISTRIBUTION) block -set env(SLAUNCH_LABELIO) 1 -set env(SLAUNCH_OVERCOMMIT) 1 -# -# Spawn a job via slaunch using these environment variables -# -set matches 0 -set salloc_pid [spawn $salloc -N1 -t1 $slaunch $bin_hostname] -expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) - exp_continue - } - -re "distribution *: block" { - incr matches - exp_continue - } - -re "verbose *: 2" { - incr matches - exp_continue - } - -re "label output *: true" { - incr matches - exp_continue - } - -re "overcommit *: true" { - incr matches - exp_continue - } - timeout { - send_user "\nFAILURE: srun not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - set exit_code 1 - } - eof { - wait - } -} - -if {$job_id == 0} { - send_user "\nFAILURE: did not get job_id\n" - exit 1 -} -if {$matches != 4} { - send_user "\nFAILURE: processed $matches of 4 environment variables\n" - set exit_code 1 -} - -if {$exit_code == 0} { - send_user "\nSUCCESS\n" -} -exit $exit_code diff --git a/testsuite/expect/test18.34 b/testsuite/expect/test18.34 deleted file mode 100755 index 5bead7daa..000000000 --- a/testsuite/expect/test18.34 +++ /dev/null @@ -1,83 +0,0 @@ -#!/usr/bin/expect -############################################################################ -# Purpose: Test of SLURM functionality -# Test slaunch's ability to set the job step's name (--name option) -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -# -# NOTE: Due to symbolic links, we only look to match the last component -# of the directory name (e.g "cd /tmp; pwd" might return "/var/tmp"). -############################################################################ -# Copyright (C) 2002-2006 The Regents of the University of California. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. -# -# This file is part of SLURM, a resource management program. -# For details, see <http://www.llnl.gov/linux/slurm/>. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -############################################################################ -source ./globals - -set test_id "18.34" -set exit_code 0 -set job_id 0 -set matches 0 -set step_name "aaa.$test_id" - -print_header $test_id - -# -# Change working directory on execute line and then print where jobs runs -# -set timeout $max_job_delay -set salloc_pid [spawn $salloc -N1 -t1 $slaunch --name=$step_name $scontrol show step] -expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) - exp_continue - } - -re "Name=$step_name" { - incr matches - exp_continue - } - -re "Unable to contact" { - send_user "\nFAILURE: slurm appears to be down\n" - exit 1 - } - timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - set exit_code 1 - } - eof { - wait - } -} - -if {$matches != 1} { - send_user "\nFAILURE: slaunch failed to set name\n" - set exit_code 1 -} -if {$exit_code == 0} { - send_user "\nSUCCESS\n" -} -exit $exit_code diff --git a/testsuite/expect/test18.35 b/testsuite/expect/test18.35 deleted file mode 100755 index 5eb9a5e84..000000000 --- a/testsuite/expect/test18.35 +++ /dev/null @@ -1,352 +0,0 @@ -#!/usr/bin/expect -############################################################################ -# Purpose: Test of SLURM functionality -# Test of task layout controls (--task-layout-byid, -# --task-layout-byname and --task-layout-file options). -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "WARNING: ..." with an explanation of why the test can't be made, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -############################################################################ -# Copyright (C) 2006 The Regents of the University of California. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. -# -# This file is part of SLURM, a resource management program. -# For details, see <http://www.llnl.gov/linux/slurm/>. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -############################################################################ -source ./globals - -set test_id "18.35" -set hostfile "test$test_id.hostfile" -set exit_code 0 - -print_header $test_id - -if { [test_xcpu] } { - send_user "\nWARNING: This test is incompatable with XCPU systems\n" - exit 0 -} - -if { [test_bluegene] } { - send_user "\nWARNING: test incompatable with Blue Gene systems\n" - exit $exit_code -} else { - set node_cnt 3 -} - -set available [available_nodes [default_partition]] -if {$available < $node_cnt} { - send_user "\nWARNING: not enough nodes currently available" - send_user " ($available avail, $node_cnt needed)\n" - exit $exit_code -} - -# -# Submit a 3 node job -# -set host_0 "" -set host_1 "" -set host_2 "" -set host_3 "" -set job_id 0 -set timeout $max_job_delay -set salloc_pid [spawn $salloc -N$node_cnt -t1 $slaunch -l $bin_printenv SLURMD_NODENAME] -expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) - send "$bin_echo MY_ID=\$SLURM_JOBID \n" - exp_continue - } - -re "More ($alpha) requested than permitted" { - send_user "\nWARNING: can't test salloc task distribution\n" - exit $exit_code - } - -re "configuration not available" { - exec $bin_rm -f $hostfile - send_user "WARNING: partition too small for test\n" - exit 0 - } - -re "($number): ($alpha_numeric)" { - if {$expect_out(1,string) == 0} { - set host_0 $expect_out(2,string) - } - if {$expect_out(1,string) == 1} { - set host_1 $expect_out(2,string) - } - if {$expect_out(1,string) == 2} { - set host_2 $expect_out(2,string) - } - if {$expect_out(1,string) == 3} { - set host_3 $expect_out(2,string) - } - exp_continue - } - timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - exit 1 - } - eof { - wait - } -} - -# -# Verify node count -# -if {[string compare $host_0 ""] == 0} { - send_user "\nFAILURE: Did not get hostname of task 0\n" - set exit_code 1 -} -if {[string compare $host_1 ""] == 0} { - send_user "\nFAILURE: Did not get hostname of task 1\n" - set exit_code 1 -} -if {[string compare $host_2 ""] == 0} { - send_user "\nFAILURE: Did not get hostname of task 2\n" - set exit_code 1 -} -if {[string compare $host_3 ""] != 0} { - send_user "\nFAILURE: Started more than three tasks\n" - set exit_code 1 -} - -set dup_hostname 0 -if {[string compare $host_0 $host_1] == 0} { - incr dup_hostname -} -if {[string compare $host_0 $host_2] == 0} { - incr dup_hostname 1 -} -if {$dup_hostname == 1} { - send_user "\nFAILURE: Re-used a node in the allocation\n" - set exit_code 1 -} -set all_nodes $host_0,$host_1,$host_2 -set exclude_node $host_0 -set include_node $host_2 - -# -# Submit a job explicitly skip node 0 via --task-layout-byid -# -set invalid 0 -set host_0 "" -set host_1 "" -set host_2 "" -set job_id 0 -set timeout $max_job_delay -set salloc_pid [spawn $salloc -N$node_cnt -t1 --nodelist=$all_nodes $slaunch -n2 --task-layout-byid=1,2 -l $bin_hostname] -expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) - exp_continue - } - -re "Invalid node name specified" { - send_user "\nWARNING: Appears you are using " - send_user "multiple slurmd testing.\n" - send_user "This test won't work in that fashion.\n" - set invalid 1 - exp_continue - } - -re "SwitchType does not permit" { - send_user "\nNo worries, just stop the test now\n" - set invalid 1 - } - -re "($number): ($alpha_numeric)" { - if {$expect_out(1,string) == 0} { - set host_0 $expect_out(2,string) - } - if {$expect_out(1,string) == 1} { - set host_1 $expect_out(2,string) - } - if {$expect_out(1,string) == 2} { - set host_2 $expect_out(2,string) - } - exp_continue - } - timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - exit 1 - } - eof { - wait - } -} -if {$invalid != 0} { - if {$exit_code == 0} { - exec $bin_rm -f $hostfile - send_user "\nSUCCESS\n" - } - exit $exit_code -} - -# -# Verify node count and names -# -if {[string compare $host_0 ""] == 0} { - send_user "\nFAILURE: Did not get hostname of task 0\n" - set exit_code 1 -} -if {[string compare $host_1 ""] == 0} { - send_user "\nFAILURE: Did not get hostname of task 1\n" - set exit_code 1 -} -if {[string compare $host_2 ""] != 0} { - send_user "\nFAILURE: Started more than two tasks\n" - set exit_code 1 -} -set dup_hostname 0 -if {[string compare $host_0 $exclude_node] == 0} { - set dup_hostname 1 -} -if {[string compare $host_1 $exclude_node] == 0} { - set dup_hostname 1 -} -if {$dup_hostname == 1} { - send_user "\nFAILURE: Allocated an excluded node\n" - set exit_code 1 -} - -# -# Submit a job step using --task-layout-byname -# -set host_0 "" -set host_1 "" -set job_id 0 -set timeout $max_job_delay -set salloc_pid [spawn $salloc -N$node_cnt --nodelist=$all_nodes -t1 $slaunch -n1 --task-layout-byname=$include_node -l $bin_printenv SLURMD_NODENAME] -expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) - exp_continue - } - -re "($number): ($alpha_numeric)" { - if {$expect_out(1,string) == 0} { - set host_0 $expect_out(2,string) - } - if {$expect_out(1,string) == 1} { - set host_1 $expect_out(2,string) - } - exp_continue - } - timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - set exit_code 1 - } - eof { - wait - } -} - -# -# Verify node count and names -# -if {[string compare $host_0 ""] == 0} { - send_user "\nFAILURE: Did not get hostname of task 0\n" - set exit_code 1 -} -if {[string compare $host_1 ""] != 0} { - send_user "\nFAILURE: Started more than one task\n" - set exit_code 1 -} -set dup_hostname 0 -if {[string compare $host_0 $include_node] == 0} { - set dup_hostname 1 -} -if {$dup_hostname == 0} { - send_user "\nFAILURE: Allocation lacked an included node\n" - set exit_code 1 -} - -# -# Submit a job explicitly including a node by id -# -set file [open $hostfile "w"] -puts $file "$include_node" -close $file - -set host_0 "" -set host_1 "" -set job_id 0 -set timeout $max_job_delay -set salloc_pid [spawn $salloc -N$node_cnt --nodelist=$all_nodes -t1 $slaunch -n1 --task-layout-file=$hostfile -l $bin_printenv SLURMD_NODENAME] -expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) - exp_continue - } - -re "($number): ($alpha_numeric)" { - if {$expect_out(1,string) == 0} { - set host_0 $expect_out(2,string) - } - if {$expect_out(1,string) == 1} { - set host_1 $expect_out(2,string) - } - exp_continue - } - timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - set exit_code 1 - } - eof { - wait - } -} - -# -# Verify node count and names -# -if {[string compare $host_0 ""] == 0} { - send_user "\nFAILURE: Did not get hostname of task 0\n" - set exit_code 1 -} -if {[string compare $host_1 ""] != 0} { - send_user "\nFAILURE: Started more than one task\n" - set exit_code 1 -} -set dup_hostname 0 -if {[string compare $host_0 $include_node] == 0} { - set dup_hostname 1 -} -if {$dup_hostname == 0} { - send_user "\nFAILURE: Allocation lacked an included node\n" - set exit_code 1 -} - -if {$exit_code == 0} { - exec $bin_rm -f $hostfile - send_user "\nSUCCESS\n" -} -exit $exit_code diff --git a/testsuite/expect/test18.36 b/testsuite/expect/test18.36 deleted file mode 100755 index 8f7a6be43..000000000 --- a/testsuite/expect/test18.36 +++ /dev/null @@ -1,499 +0,0 @@ -#!/usr/bin/expect -############################################################################ -# Purpose: Test of SLURM functionality -# Test of CPU affinity support. -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "WARNING: ..." with an explanation of why the test can't be made, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -############################################################################ -# Copyright (C) 2005-2006 The Regents of the University of California. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. -# -# This file is part of SLURM, a resource management program. -# For details, see <http://www.llnl.gov/linux/slurm/>. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -############################################################################ -source ./globals - -set test_id "18.36" -set exit_code 0 -set job_id 0 -set file_prog "test$test_id.prog" - -print_header $test_id - -# -# Test if task affinity support is supported. -# -set affinity 0 -log_user 0 -spawn $scontrol show config -expect { - -re "task/affinity" { - set affinity 1 - exp_continue - } - eof { - wait - } -} -log_user 1 -if {$affinity == 0} { - send_user "\nWARNING: task affinity not supported on this system\n" - exit 0 -} -send_user "\ntask affinity plugin installed\n" - -# -# Build a test program to report affinity by task -# -exec $bin_rm -f $file_prog -exec $bin_make -f /dev/null $file_prog -exec $bin_chmod 700 $file_prog - -# -# Create an allocation -# -set salloc_pid [spawn $salloc -N1 --verbose -t2 $bin_bash] -expect { - -re "Granted job allocation ($number)" { - set jobid $expect_out(1,string) - } - timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - exit 1 - } -} - -send "env |grep SLURM_JOB_CPUS_PER_NODE\n" -expect { - -re "SLURM_JOB_CPUS_PER_NODE=($number)" { - set available_cpus $expect_out(1,string) - } -} - -# -# Run a job step to get allocated processor count and affinity -# -expect -re $prompt -set mask 0 -set task_cnt 0 -send "$slaunch -n $available_cpus $file_prog\n" -expect { - -re "TASK_ID:($number),MASK:($number)" { - incr task_cnt - set mask $expect_out(2,string) - exp_continue - } - -re "error" { - send_user "\nFAILURE: some error occurred\n" - set exit_code 1 - exp_continue - } - timeout { - send_user "\nFAILURE: slaunch not responding or failure to recognize prompt\n" - set exit_code 1 - } - -re $prompt -} - -# -# Run a job step with affinity -# -set expected_mask [ expr ((1 << $task_cnt) - 1) ] -set task_mask 0 -send "$slaunch -n $available_cpus --cpu_bind=rank $file_prog\n" -expect { - -re "TASK_ID:($number),MASK:($number)" { - incr task_mask $expect_out(2,string) - exp_continue - } - -re "error" { - send_user "\nFAILURE: some error occurred\n" - set exit_code 1 - exp_continue - } - timeout { - send_user "\nFAILURE: slaunch not responding or failure to recognize prompt\n" - set exit_code 1 - } - -re $prompt -} -if {$task_mask != $expected_mask} { - send_user "\nFAILURE: affinity mask inconsistency ($task_mask,$mask)\n" - set exit_code 1 -} - -# -# Run a job step with verbosity and all tasks on CPU 0 -# -set task_mask 0 -send "$slaunch -n $available_cpus --cpu_bind=verbose,map_cpu:0 $file_prog\n" -expect { - -re "TASK_ID:($number),MASK:($number)" { - incr task_mask $expect_out(2,string) - exp_continue - } - -re "error" { - send_user "\nFAILURE: some error occurred\n" - set exit_code 1 - exp_continue - } - timeout { - send_user "\nFAILURE: slaunch not responding or failure to recognize prompt\n" - set exit_code 1 - } - -re $prompt -} -if {$task_mask != $task_cnt} { - send_user "\nFAILURE: affinity mask inconsistent ($task_mask,$task_cnt)\n" - set exit_code 1 -} -set verbose_cnt 0 -send "$slaunch -n $available_cpus --cpu_bind=verbose,map_cpu:0 $file_prog\n" -expect { - -re "cpu_bind=MAP" { - incr verbose_cnt - exp_continue - } - -re "error" { - send_user "\nFAILURE: some error occurred\n" - set exit_code 1 - exp_continue - } - timeout { - send_user "\nFAILURE: slaunch not responding or failure to recognize prompt\n" - set exit_code 1 - } - -re $prompt -} -if {$verbose_cnt != $task_cnt} { - send_user "\nFAILURE: verbose messages count inconsisent ($verbose_cnt,$task_cnt)\n" - set exit_code 1 -} - -# -# Run all tasks all bound to the same CPU by specifying a map (for each CPU) -# -set cpu_cnt 0 -while {$cpu_cnt < $task_cnt} { - set mask_sum 0 - set mask [ expr 1 << $cpu_cnt ] - send "$slaunch -n $available_cpus --cpu_bind=map_cpu:$cpu_cnt $file_prog\n" - expect { - -re "TASK_ID:($number),MASK:($number)" { - incr mask_sum $expect_out(2,string) - exp_continue - } - -re "error" { - send_user "\nFAILURE: some error occurred\n" - set exit_code 1 - exp_continue - } - timeout { - send_user "\nFAILURE: slaunch not responding or failure to recognize prompt\n" - set exit_code 1 - exp_continue - } - -re $prompt - } - if {$mask_sum != $task_cnt * $mask} { - send_user "\nFAILURE: affinity mask inconsistent ($mask_sum,$task_cnt)\n" - set exit_code 1 - } - incr cpu_cnt 1 -} - -# -# Run all tasks all bound to the same CPU by specifying a mask (for each CPU) -# -set cpu_cnt 0 -while {$cpu_cnt < $task_cnt} { - set mask_sum 0 - set mask [ expr 1 << $cpu_cnt ] - set mstr [ dec2hex16 $mask] - send "$slaunch -n $available_cpus --cpu_bind=mask_cpu:$mstr $file_prog\n" - expect { - -re "TASK_ID:($number),MASK:($number)" { - incr mask_sum $expect_out(2,string) - exp_continue - } - -re "error" { - send_user "\nFAILURE: some error occurred\n" - set exit_code 1 - exp_continue - } - timeout { - send_user "\nFAILURE: slaunch not responding or failure to recognize prompt\n" - set exit_code 1 - exp_continue - } - -re $prompt - } - if {$mask_sum != $task_cnt * $mask} { - send_user "\nFAILURE: affinity mask inconsistent ($mask_sum,$task_cnt)\n" - set exit_code 1 - } - incr cpu_cnt 1 -} - -# -# Generate foward and reverse masks and maps -# -set cpu_cnt 0 -set fwd_mask "" -set fwd_map "" -set rev_mask "" -set rev_map "" -set alt_mask "" -set alt_map "" -set full_mask [ expr (1 << $task_cnt) - 1 ] -while {$cpu_cnt < $task_cnt} { - set mask_sum 0 - set mask [ expr 1 << $cpu_cnt ] - set mstr [ dec2hex16 $mask] - set fwd_mask "$fwd_mask,$mstr" - set fwd_map "$fwd_map,$cpu_cnt" - set rev_mask "$mstr,$rev_mask" - set rev_map "$cpu_cnt,$rev_map" - if { $cpu_cnt % 2 } { - set alt_mask "$mstr,$alt_mask" - set alt_map "$cpu_cnt,$alt_map" - } else { - set alt_mask "$alt_mask,$mstr" - set alt_map "$alt_map,$cpu_cnt" - } - if { $cpu_cnt == 0 } { - set fwd_mask "$mstr" - set fwd_map "$cpu_cnt" - set rev_mask "$mstr" - set rev_map "$cpu_cnt" - set alt_mask "$mstr" - set alt_map "$cpu_cnt" - } - incr cpu_cnt 1 -} - -send_user "\n" -send_user "full_mask: $full_mask\n" -send_user "fwd_map: $fwd_map\n" -send_user "fwd_mask: $fwd_mask\n" -send_user "rev_map: $rev_map\n" -send_user "rev_mask: $rev_mask\n" -send_user "alt_map: $alt_map\n" -send_user "alt_mask: $alt_mask\n" - -# -# Run all tasks bound to a different CPU by specifying a forward map -# -set task_mask 0 -send "$slaunch -n $available_cpus --cpu_bind=map_cpu:$fwd_map $file_prog\n" -expect { - -re "TASK_ID:($number),MASK:($number)" { - incr task_mask $expect_out(2,string) - exp_continue - } - -re "error" { - send_user "\nFAILURE: some error occurred\n" - set exit_code 1 - exp_continue - } - timeout { - send_user "\nFAILURE: slaunch not responding or failure to recognize prompt\n" - set exit_code 1 - exp_continue - } - -re $prompt -} -if {$task_mask != $full_mask} { - send_user "\nFAILURE: affinity mask inconsistent ($task_mask,$full_mask)\n" - set exit_code 1 -} - -# -# Run all tasks bound to a different CPU by specifying a reverse map -# -set task_mask 0 -send "$slaunch -n $available_cpus --cpu_bind=map_cpu:$rev_map $file_prog\n" -expect { - -re "TASK_ID:($number),MASK:($number)" { - incr task_mask $expect_out(2,string) - exp_continue - } - -re "error" { - send_user "\nFAILURE: some error occurred\n" - set exit_code 1 - exp_continue - } - timeout { - send_user "\nFAILURE: slaunch not responding or failure to recognize prompt\n" - set exit_code 1 - exp_continue - } - -re $prompt -} -if {$task_mask != $full_mask} { - send_user "\nFAILURE: affinity mask inconsistent ($task_mask,$full_mask)\n" - set exit_code 1 -} - -# -# Run all tasks bound to a different CPU by specifying an alternating map -# -set task_mask 0 -send "$slaunch -n $available_cpus --cpu_bind=map_cpu:$alt_map $file_prog\n" -expect { - -re "TASK_ID:($number),MASK:($number)" { - incr task_mask $expect_out(2,string) - exp_continue - } - -re "error" { - send_user "\nFAILURE: some error occurred\n" - set exit_code 1 - exp_continue - } - timeout { - send_user "\nFAILURE: slaunch not responding or failure to recognize prompt\n" - set exit_code 1 - exp_continue - } - -re $prompt -} -if {$task_mask != $full_mask} { - send_user "\nFAILURE: affinity mask inconsistent ($task_mask,$full_mask)\n" - set exit_code 1 -} - -# -# Run all tasks bound to a different CPU by specifying a forward mask -# -set task_mask 0 -send "$slaunch -n $available_cpus --cpu_bind=mask_cpu:$fwd_mask $file_prog\n" -expect { - -re "TASK_ID:($number),MASK:($number)" { - incr task_mask $expect_out(2,string) - exp_continue - } - -re "error" { - send_user "\nFAILURE: some error occurred\n" - set exit_code 1 - exp_continue - } - timeout { - send_user "\nFAILURE: slaunch not responding or failure to recognize prompt\n" - set exit_code 1 - exp_continue - } - -re $prompt -} -if {$task_mask != $full_mask} { - send_user "\nFAILURE: affinity mask inconsistent ($task_mask,$full_mask)\n" - set exit_code 1 -} - -# -# Run all tasks bound to a different CPU by specifying a reverse mask -# -set task_mask 0 -send "$slaunch -n $available_cpus --cpu_bind=mask_cpu:$rev_mask $file_prog\n" -expect { - -re "TASK_ID:($number),MASK:($number)" { - incr task_mask $expect_out(2,string) - exp_continue - } - -re "error" { - send_user "\nFAILURE: some error occurred\n" - set exit_code 1 - exp_continue - } - timeout { - send_user "\nFAILURE: slaunch not responding or failure to recognize prompt\n" - set exit_code 1 - exp_continue - } - -re $prompt -} -if {$task_mask != $full_mask} { - send_user "\nFAILURE: affinity mask inconsistent ($task_mask,$full_mask)\n" - set exit_code 1 -} - -# -# Run all tasks bound to a different CPU by specifying an alternating mask -# -set task_mask 0 -send "$slaunch -n $available_cpus --cpu_bind=mask_cpu:$alt_mask $file_prog\n" -expect { - -re "TASK_ID:($number),MASK:($number)" { - incr task_mask $expect_out(2,string) - exp_continue - } - -re "error" { - send_user "\nFAILURE: some error occurred\n" - set exit_code 1 - exp_continue - } - timeout { - send_user "\nFAILURE: slaunch not responding or failure to recognize prompt\n" - set exit_code 1 - exp_continue - } - -re $prompt -} -if {$task_mask != $full_mask} { - send_user "\nFAILURE: affinity mask inconsistent ($task_mask,$full_mask)\n" - set exit_code 1 -} - -# -# Terminate the job, free the allocation -# -send "exit\n" -expect { - -re "error" { - send_user "\nFAILURE: some error occurred\n" - set exit_code 1 - } - timeout { - send_user "\nFAILURE: salloc not responding or failure to recognize prompt\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - set exit_code 1 - } - eof { - wait - } -} - -if {$exit_code == 0} { - exec $bin_rm -f $file_prog - send_user "\nSUCCESS\n" -} else { - send_user "\nNOTE: This test can fail if the node configuration in slurm.conf \n" - send_user " (sockets, cores, threads) differs from the actual configuration\n" -} -exit $exit_code - diff --git a/testsuite/expect/test18.36.prog.c b/testsuite/expect/test18.36.prog.c deleted file mode 100644 index bcd2c40ec..000000000 --- a/testsuite/expect/test18.36.prog.c +++ /dev/null @@ -1,79 +0,0 @@ -/*****************************************************************************\ - * test1.89.prog.c - Simple test program for SLURM regression test1.89. - * Reports SLURM task ID and the CPU mask, - * similar functionality to "taskset" command - ***************************************************************************** - * Copyright (C) 2005 The Regents of the University of California. - * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. - * - * This file is part of SLURM, a resource management program. - * For details, see <http://www.llnl.gov/linux/slurm/>. - * - * SLURM is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY - * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License along - * with SLURM; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -\*****************************************************************************/ -#define _GNU_SOURCE -#define __USE_GNU -#include <errno.h> -#include <sched.h> -#include <stdio.h> -#include <stdlib.h> -#include <string.h> -#include "../../config.h" - -static void _load_mask(cpu_set_t *mask) -{ - int rc; - -#ifdef SCHED_GETAFFINITY_THREE_ARGS - rc = sched_getaffinity((pid_t) 0, (unsigned int) sizeof(cpu_set_t), - mask); -#else - rc = sched_getaffinity((pid_t) 0, mask); -#endif - if (rc != 0) { - fprintf(stderr, "ERROR: sched_getaffinity: %s\n", - strerror(errno)); - exit(1); - } -} - -static int _mask_to_int(cpu_set_t *mask) -{ - int i, rc = 0; - for (i=0; i<CPU_SETSIZE; i++) { - if (CPU_ISSET(i, mask)) - rc += (1 << i); - } - return rc; -} - - -main (int argc, char **argv) -{ - char *task_str; - cpu_set_t mask; - int task_id; - - _load_mask(&mask); - if ((task_str = getenv("SLURM_PROCID")) == NULL) { - fprintf(stderr, "ERROR: getenv(SLURM_PROCID) failed\n"); - exit(1); - } - task_id = atoi(task_str); - printf("TASK_ID:%d,MASK:%u\n", task_id, _mask_to_int(&mask)); - exit(0); -} diff --git a/testsuite/expect/test18.37 b/testsuite/expect/test18.37 deleted file mode 100755 index ccdcfb461..000000000 --- a/testsuite/expect/test18.37 +++ /dev/null @@ -1,538 +0,0 @@ -#!/usr/bin/expect -############################################################################ -# Purpose: Test of SLURM functionality -# Test of memory affinity support for NUMA systems. -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "WARNING: ..." with an explanation of why the test can't be made, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -############################################################################ -# Copyright (C) 2006 The Regents of the University of California. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. -# -# This file is part of SLURM, a resource management program. -# For details, see <http://www.llnl.gov/linux/slurm/>. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -############################################################################ -source ./globals - -set test_id "18.37" -set exit_code 0 -set job_id 0 -set file_prog "test$test_id.prog" - -print_header $test_id - -# -# Test if memory affinity support is supported. -# -set affinity 0 -set fast_sched 0 -log_user 0 -spawn $scontrol show config -expect { - -re "FastSchedule *= ($number)" { - set fast_sched $expect_out(1,string) - exp_continue - } - -re "task/affinity" { - set affinity 1 - exp_continue - } - eof { - wait - } -} -if {$fast_sched > 1} { - send_user "\nWARNING: FastSchedule > 1 not compatable with this test\n" - exit 0 -} -spawn ls /usr/include/numa.h -expect { - -nocase "no such file" { - set affinity 0 - exp_continue - } - eof { - wait - } -} -log_user 1 -if {$affinity == 0} { - send_user "\nWARNING: memory affinity not supported on this system\n" - exit 0 -} -send_user "\ntask affinity plugin installed with numa support\n" - -# -# Build a test program to report affinity by task -# -exec $bin_rm -f $file_prog -exec $bin_cc $file_prog.c -o $file_prog -lnuma -exec $bin_chmod 700 $file_prog - -# -# Create an allocation -# -set salloc_pid [spawn $salloc -N1 --exclusive --verbose -t2 $bin_bash] -expect { - -re "Granted job allocation ($number)" { - set jobid $expect_out(1,string) - } - timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - exit 1 - } -} - -# -# Run a job step to get allocated processor count and affinity -# -expect -re $prompt -set task_cnt 0 -set full_mask 0 -send "$srun -c1 $file_prog\n" -expect { - -re "TASK_ID:($number),CPU_MASK:($number),MEM_MASK:($number)" { - set full_mask $expect_out(2,string) - incr task_cnt - exp_continue - } - -re "error" { - send_user "\nFAILURE: some error occurred\n" - set exit_code 1 - exp_continue - } - timeout { - send_user "\nFAILURE: srun not responding or failure to recognize prompt\n" - set exit_code 1 - } - -re $prompt -} - -# -# Run a job step with memory affinity -# -set cpu_mask 0 -set mem_mask 0 -send "$srun -c1 --mem_bind=rank $file_prog\n" -expect { - -re "TASK_ID:($number),CPU_MASK:($number),MEM_MASK:($number)" { - incr cpu_mask $expect_out(2,string) - incr mem_mask $expect_out(3,string) - exp_continue - } - -re "error" { - send_user "\nFAILURE: some error occurred\n" - set exit_code 1 - exp_continue - } - timeout { - send_user "\nFAILURE: srun not responding or failure to recognize prompt\n" - set exit_code 1 - } - -re $prompt -} -if {$mem_mask != $full_mask} { - send_user "\nFAILURE: memory affinity mask inconsistency ($mem_mask,$full_mask)\n" - set exit_code 1 -} - -# -# Run a job step with verbosity and all tasks using memory of CPU 0 -# -set task_mask 0 -set verbose_cnt 0 -send "$srun -c1 --mem_bind=verbose,map_mem:0 $file_prog\n" -expect { - -re "TASK_ID:($number),CPU_MASK:($number),MEM_MASK:($number)" { - incr task_mask $expect_out(3,string) - exp_continue - } - -re "error" { - send_user "\nFAILURE: some error occurred\n" - set exit_code 1 - exp_continue - } - timeout { - send_user "\nFAILURE: srun not responding or failure to recognize prompt\n" - set exit_code 1 - } - -re $prompt -} -if {$task_mask != $task_cnt} { - send_user "\nFAILURE: affinity mask inconsistent ($task_mask,$task_cnt)\n" - set exit_code 1 -} -set verbose_cnt 0 -send "$srun -c1 --mem_bind=verbose,map_mem:0 $file_prog\n" -expect { - -re "mem_bind=MAP" { - incr verbose_cnt - exp_continue - } - -re "error" { - send_user "\nFAILURE: some error occurred\n" - set exit_code 1 - exp_continue - } - timeout { - send_user "\nFAILURE: srun not responding or failure to recognize prompt\n" - set exit_code 1 - } - -re $prompt -} -if {$verbose_cnt != $task_cnt} { - send_user "\nFAILURE: verbose messages count inconsisent ($verbose_cnt,$task_cnt)\n" - set exit_code 1 -} - -# -# Run all tasks all bound to the same CPU's memory (local CPU) -# -send "$srun -c1 --cpu_bind=rank --mem_bind=local $file_prog\n" -expect { - -re "TASK_ID:($number),CPU_MASK:($number),MEM_MASK:($number)" { - if {$expect_out(2,string) != $expect_out(3,string)} { - send_user "\nFAILURE: failed to use local memory for a task\n" - set exit_code 1 - } - exp_continue - } - -re "error" { - send_user "\nFAILURE: some error occurred\n" - set exit_code 1 - exp_continue - } - timeout { - send_user "\nFAILURE: srun not responding or failure to recognize prompt\n" - set exit_code 1 - exp_continue - } - -re $prompt -} - -# -# Run all tasks all bound to the same CPU's memory by specifying a map (for each CPU) -# -set cpu_cnt 0 -while {$cpu_cnt < $task_cnt} { - set mask_sum 0 - set mask [ expr 1 << $cpu_cnt ] - send "$srun -c1 --mem_bind=map_mem:$cpu_cnt $file_prog\n" - expect { - -re "TASK_ID:($number),CPU_MASK:($number),MEM_MASK:($number)" { - incr mask_sum $expect_out(3,string) - exp_continue - } - -re "error" { - send_user "\nFAILURE: some error occurred\n" - set exit_code 1 - exp_continue - } - timeout { - send_user "\nFAILURE: srun not responding or failure to recognize prompt\n" - set exit_code 1 - exp_continue - } - -re $prompt - } - if {$mask_sum != $task_cnt * $mask} { - send_user "\nFAILURE: affinity mask inconsistent ($mask_sum,$task_cnt)\n" - set exit_code 1 - } - incr cpu_cnt 1 -} - -# -# Run all tasks all bound to the same CPU's memory by specifying a mask (for each CPU) -# -set cpu_cnt 0 -while {$cpu_cnt < $task_cnt} { - set mask_sum 0 - set mask [ expr 1 << $cpu_cnt ] - set mstr [ dec2hex16 $mask] - send "$srun -c1 --mem_bind=mask_mem:$mstr $file_prog\n" - expect { - -re "TASK_ID:($number),CPU_MASK:($number),MEM_MASK:($number)" { - incr mask_sum $expect_out(3,string) - exp_continue - } - -re "error" { - send_user "\nFAILURE: some error occurred\n" - set exit_code 1 - exp_continue - } - timeout { - send_user "\nFAILURE: srun not responding or failure to recognize prompt\n" - set exit_code 1 - exp_continue - } - -re $prompt - } - if {$mask_sum != $task_cnt * $mask} { - send_user "\nFAILURE: affinity mask inconsistent ($mask_sum,$task_cnt)\n" - set exit_code 1 - } - incr cpu_cnt 1 -} - -# -# Generate foward and reverse masks and maps -# -set cpu_cnt 0 -set fwd_mask "" -set fwd_map "" -set rev_mask "" -set rev_map "" -set alt_mask "" -set alt_map "" -set full_mask [ expr (1 << $task_cnt) - 1 ] -while {$cpu_cnt < $task_cnt} { - set mask_sum 0 - set mask [ expr 1 << $cpu_cnt ] - set mstr [ dec2hex16 $mask] - set fwd_mask "$fwd_mask,$mstr" - set fwd_map "$fwd_map,$cpu_cnt" - set rev_mask "$mstr,$rev_mask" - set rev_map "$cpu_cnt,$rev_map" - if { $cpu_cnt % 2 } { - set alt_mask "$mstr,$alt_mask" - set alt_map "$cpu_cnt,$alt_map" - } else { - set alt_mask "$alt_mask,$mstr" - set alt_map "$alt_map,$cpu_cnt" - } - if { $cpu_cnt == 0 } { - set fwd_mask "$mstr" - set fwd_map "$cpu_cnt" - set rev_mask "$mstr" - set rev_map "$cpu_cnt" - set alt_mask "$mstr" - set alt_map "$cpu_cnt" - } - incr cpu_cnt 1 -} - -send_user "\n" -send_user "full_mask: $full_mask\n" -send_user "fwd_map: $fwd_map\n" -send_user "fwd_mask: $fwd_mask\n" -send_user "rev_map: $rev_map\n" -send_user "rev_mask: $rev_mask\n" -send_user "alt_map: $alt_map\n" -send_user "alt_mask: $alt_mask\n" - -# -# Run all tasks bound to a different CPU's memory by specifying a forward map -# -set task_mask 0 -send "$srun -c1 --mem_bind=map_mem:$fwd_map $file_prog\n" -expect { - -re "TASK_ID:($number),CPU_MASK:($number),MEM_MASK:($number)" { - incr task_mask $expect_out(3,string) - exp_continue - } - -re "error" { - send_user "\nFAILURE: some error occurred\n" - set exit_code 1 - exp_continue - } - timeout { - send_user "\nFAILURE: srun not responding or failure to recognize prompt\n" - set exit_code 1 - exp_continue - } - -re $prompt -} -if {$task_mask != $full_mask} { - send_user "\nFAILURE: affinity mask inconsistent ($task_mask,$full_mask)\n" - set exit_code 1 -} - -# -# Run all tasks bound to a different CPU's memory by specifying a reverse map -# -set task_mask 0 -send "$srun -c1 --mem_bind=map_mem:$rev_map $file_prog\n" -expect { - -re "TASK_ID:($number),CPU_MASK:($number),MEM_MASK:($number)" { - incr task_mask $expect_out(3,string) - exp_continue - } - -re "error" { - send_user "\nFAILURE: some error occurred\n" - set exit_code 1 - exp_continue - } - timeout { - send_user "\nFAILURE: srun not responding or failure to recognize prompt\n" - set exit_code 1 - exp_continue - } - -re $prompt -} -if {$task_mask != $full_mask} { - send_user "\nFAILURE: affinity mask inconsistent ($task_mask,$full_mask)\n" - set exit_code 1 -} - -# -# Run all tasks bound to a different CPU's memroy by specifying an alternating map -# -set task_mask 0 -send "$srun -c1 --mem_bind=map_mem:$alt_map $file_prog\n" -expect { - -re "TASK_ID:($number),CPU_MASK:($number),MEM_MASK:($number)" { - incr task_mask $expect_out(3,string) - exp_continue - } - -re "error" { - send_user "\nFAILURE: some error occurred\n" - set exit_code 1 - exp_continue - } - timeout { - send_user "\nFAILURE: srun not responding or failure to recognize prompt\n" - set exit_code 1 - exp_continue - } - -re $prompt -} -if {$task_mask != $full_mask} { - send_user "\nFAILURE: affinity mask inconsistent ($task_mask,$full_mask)\n" - set exit_code 1 -} - -# -# Run all tasks bound to a different CPU's memory by specifying a forward mask -# -set task_mask 0 -send "$srun -c1 --mem_bind=mask_mem:$fwd_mask $file_prog\n" -expect { - -re "TASK_ID:($number),CPU_MASK:($number),MEM_MASK:($number)" { - incr task_mask $expect_out(3,string) - exp_continue - } - -re "error" { - send_user "\nFAILURE: some error occurred\n" - set exit_code 1 - exp_continue - } - timeout { - send_user "\nFAILURE: srun not responding or failure to recognize prompt\n" - set exit_code 1 - exp_continue - } - -re $prompt -} -if {$task_mask != $full_mask} { - send_user "\nFAILURE: affinity mask inconsistent ($task_mask,$full_mask)\n" - set exit_code 1 -} - -# -# Run all tasks bound to a different CPU's memory by specifying a reverse mask -# -set task_mask 0 -send "$srun -c1 --mem_bind=mask_mem:$rev_mask $file_prog\n" -expect { - -re "TASK_ID:($number),CPU_MASK:($number),MEM_MASK:($number)" { - incr task_mask $expect_out(3,string) - exp_continue - } - -re "error" { - send_user "\nFAILURE: some error occurred\n" - set exit_code 1 - exp_continue - } - timeout { - send_user "\nFAILURE: srun not responding or failure to recognize prompt\n" - set exit_code 1 - exp_continue - } - -re $prompt -} -if {$task_mask != $full_mask} { - send_user "\nFAILURE: affinity mask inconsistent ($task_mask,$full_mask)\n" - set exit_code 1 -} - -# -# Run all tasks bound to a different CPU's memory by specifying an alternating mask -# -set task_mask 0 -send "$srun -c1 --mem_bind=mask_mem:$alt_mask $file_prog\n" -expect { - -re "TASK_ID:($number),CPU_MASK:($number),MEM_MASK:($number)" { - incr task_mask $expect_out(3,string) - exp_continue - } - -re "error" { - send_user "\nFAILURE: some error occurred\n" - set exit_code 1 - exp_continue - } - timeout { - send_user "\nFAILURE: srun not responding or failure to recognize prompt\n" - set exit_code 1 - exp_continue - } - -re $prompt -} -if {$task_mask != $full_mask} { - send_user "\nFAILURE: affinity mask inconsistent ($task_mask,$full_mask)\n" - set exit_code 1 -} - -# -# Terminate the job, free the allocation -# -send "exit\n" -expect { - -re "error" { - send_user "\nFAILURE: some error occurred\n" - set exit_code 1 - } - timeout { - send_user "\nFAILURE: salloc not responding or failure to recognize prompt\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - set exit_code 1 - } - eof { - wait - } -} - -if {$exit_code == 0} { - exec $bin_rm -f $file_prog - send_user "\nSUCCESS\n" -} else { - send_user "\nNOTE: This test can fail if the node configuration in slurm.conf \n" - send_user " (sockets, cores, threads) differs from the actual configuration\n" -} -exit $exit_code - diff --git a/testsuite/expect/test18.37.prog.c b/testsuite/expect/test18.37.prog.c deleted file mode 100644 index b24730c3f..000000000 --- a/testsuite/expect/test18.37.prog.c +++ /dev/null @@ -1,75 +0,0 @@ -/*****************************************************************************\ - * test1.90.prog.c - Simple test program for SLURM regression test1.90. - * Reports SLURM task ID, the CPU mask, and memory mask, - * similar functionality to "taskset" command - ***************************************************************************** - * Copyright (C) 2006 The Regents of the University of California. - * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. - * - * This file is part of SLURM, a resource management program. - * For details, see <http://www.llnl.gov/linux/slurm/>. - * - * SLURM is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY - * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License along - * with SLURM; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -\*****************************************************************************/ -#define _GNU_SOURCE -#include <numa.h> -#include <stdio.h> -#include <stdlib.h> - -static void _load_cpu_mask(nodemask_t *cpu_mask) -{ - *cpu_mask = numa_get_run_node_mask(); -} - -static void _load_mem_mask(nodemask_t *mem_mask) -{ - *mem_mask = numa_get_membind(); -} - -static unsigned long _mask_to_int(nodemask_t *mask) -{ - int i; - unsigned long rc = 0; - for (i=0; i<NUMA_NUM_NODES; i++) { - if (nodemask_isset(mask, i)) - rc += (1 << i); - } - return rc; -} - -main (int argc, char **argv) -{ - char *task_str; - nodemask_t cpu_mask, mem_mask; - int task_id; - - if (numa_available() < 0) { - fprintf(stderr, "ERROR: numa support not available\n"); - exit(1); - } - - if ((task_str = getenv("SLURM_PROCID")) == NULL) { - fprintf(stderr, "ERROR: getenv(SLURM_PROCID) failed\n"); - exit(1); - } - task_id = atoi(task_str); - _load_cpu_mask(&cpu_mask); - _load_mem_mask(&mem_mask); - printf("TASK_ID:%d,CPU_MASK:%lu,MEM_MASK:%lu\n", - task_id, _mask_to_int(&cpu_mask), _mask_to_int(&mem_mask)); - exit(0); -} diff --git a/testsuite/expect/test18.38 b/testsuite/expect/test18.38 deleted file mode 100755 index a036d0dc2..000000000 --- a/testsuite/expect/test18.38 +++ /dev/null @@ -1,117 +0,0 @@ -#!/usr/bin/expect -############################################################################ -# Purpose: Test of SLURM functionality -# Test of slaunch --jobid option -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -############################################################################ -# Copyright (C) 2002-2006 The Regents of the University of California. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Christopher J. Morrone <morrone2@llnl.gov> -# UCRL-CODE-226842. -# -# This file is part of SLURM, a resource management program. -# For details, see <http://www.llnl.gov/linux/slurm/>. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -############################################################################ -source ./globals - -set test_id "18.38" -set exit_code 0 -set jobid 0 - -print_header $test_id -set timeout $max_job_delay - -# -# Run an salloc to grab a single node allocation. -# -set salloc_pid [spawn $salloc -N1 $slaunch $bin_sleep 600] -set salloc_sid $spawn_id -expect { - -re "Granted job allocation ($number)" { - set jobid $expect_out(1,string) - } - timeout { - send_user "\nFAILURE: salloc failed to grab an allocation " - send_user "in a timely manner.\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - exit 1 - } -} -if {$jobid == 0} { - send_user "\nFAILURE: salloc failed to allocate resources\n" - exit 1 -} - -# -# Now try to use the slaunch --jobid option to run in the already existing -# allocation. -# -set pattern "xcoetn46398vnk" -set got_pattern 0 -set slaunch_pid [spawn $slaunch --jobid $jobid echo $pattern] -expect { - -re "error" { - send_user "\nFAILURE: unexpected error occurred\n" - set exit_code 1 - } - -re $pattern { - set got_pattern 1 - exp_continue - } - timeout { - exec kill $slaunch_pid - exec kill -9 $slaunch_pid - } - eof { - wait - } -} - -if {$got_pattern == 0} { - send_user "\nFAILURE: unable to srun step under existing job allocation\n" - set exit_code 1 -} - -# -# Release the allocation by killing the first srun (really it kills the "sleep") -# -cancel_job $jobid -exec kill $salloc_pid -set spawn_id $salloc_sid -expect { - -re "error.*already completed" { - send_user "The \"already completed\" error is normal.\n" - exp_continue - } - timeout { - exec kill -9 $salloc_pid - } - eof { - wait - } -} - -if {$got_pattern == 1} { - send_user "\nSUCCESS\n" -} -exit $exit_code diff --git a/testsuite/expect/test18.4 b/testsuite/expect/test18.4 deleted file mode 100755 index ad913915e..000000000 --- a/testsuite/expect/test18.4 +++ /dev/null @@ -1,85 +0,0 @@ -#!/usr/bin/expect -############################################################################ -# Purpose: Test of SLURM functionality -# Confirm that a job executes with the proper task count (--tasks -# and --overcommit options). -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -############################################################################ -# Copyright (C) 2002-2006 The Regents of the University of California. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. -# -# This file is part of SLURM, a resource management program. -# For details, see <http://www.llnl.gov/linux/slurm/>. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -############################################################################ -source ./globals - -set test_id "18.4" -set exit_code 0 -set job_id 0 - -print_header $test_id - -if { [test_xcpu] } { - set max_tasks 1 -} else { - set max_tasks 8 -} - -for {set inx 1} {$inx <= $max_tasks} {set inx [expr $inx * 2]} { - set tasks_set $inx - set tasks_get 0 - set timeout $max_job_delay - set salloc_pid [spawn $salloc -N1-4 -t1 $slaunch -n$tasks_set --overcommit $bin_id] - expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) - exp_continue - } - -re "uid=" { - incr tasks_get - exp_continue - } - timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - set exit_code 1 - } - eof { - if {$tasks_get < $tasks_set} { - send_user "\nFAILURE: Did not get proper number of tasks: " - send_user "$tasks_get < $tasks_set\n" - set exit_code 1 - } else { - send_user "test of $tasks_get tasks good\n\n" - } - wait - } - } -} - -if {$exit_code == 0} { - send_user "\nSUCCESS\n" -} -exit $exit_code diff --git a/testsuite/expect/test18.7 b/testsuite/expect/test18.7 deleted file mode 100755 index 9673ce291..000000000 --- a/testsuite/expect/test18.7 +++ /dev/null @@ -1,75 +0,0 @@ -#!/usr/bin/expect -############################################################################ -# Purpose: Test of SLURM functionality -# Test of slaunch/slurmd debug mode (--slurmd-debug option). -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -############################################################################ -# Copyright (C) 2002-2006 The Regents of the University of California. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. -# -# This file is part of SLURM, a resource management program. -# For details, see <http://www.llnl.gov/linux/slurm/>. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -############################################################################ -source ./globals - -set test_id "18.7" -set debug_get 0 -set debug_set 4 -set exit_code 0 -set job_id 0 - -print_header $test_id - -# -# Submit a slurm job that will bogus executable -# Debug debug level is 2, value set with --debug has that offset -# -set debug_offset [expr $debug_set - 2] -set timeout $max_job_delay -set salloc_pid [spawn $salloc -N1 -t1 $slaunch -d$debug_offset /invalid_executable ] -expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) - exp_continue - } - -re "debug level = ($number)" { - set debug_get $expect_out(1,string) - exp_continue - } - timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - set exit_code 1 - } -} - -if {$debug_get != $debug_set} { - send_user "\nFAILURE: Did not log at proper level ($debug_get != $debug_set)\n" - set exit_code 1 -} -if {$exit_code == 0} { - send_user "\nSUCCESS\n" -} -exit $exit_code diff --git a/testsuite/expect/test18.8 b/testsuite/expect/test18.8 deleted file mode 100755 index eaf972a76..000000000 --- a/testsuite/expect/test18.8 +++ /dev/null @@ -1,88 +0,0 @@ -#!/usr/bin/expect -############################################################################ -# Purpose: Test of SLURM functionality -# Confirm that slaunch buffering can be disabled (--unbuffered option). -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -############################################################################ -# Copyright (C) 2002-2006 The Regents of the University of California. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. -# -# This file is part of SLURM, a resource management program. -# For details, see <http://www.llnl.gov/linux/slurm/>. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -############################################################################ -source ./globals - -set test_id "18.8" -set exit_code 0 -set job_id 0 -set matches 0 -set scratch_file "scratch.$test_id" - -print_header $test_id - -# -# Create a scratch file -# -exec $bin_touch $scratch_file - -# -# Submit a slurm job that will execute 'rm -i' -# The --unbuffered option will send the message which lacks a '\n' -# -set timeout $max_job_delay -set salloc_pid [spawn $salloc -t1 $slaunch --unbuffered $bin_rm -i $scratch_file] -expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) - exp_continue - } - -re "\[Rr\]emove .*\?" { - set matches 1 - send "y\n" - exp_continue - } - timeout { - send_user "\nFAILURE: slaunch not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - set exit_code 1 - } - eof { - wait - } -} - -# -# Confirm message send before '\n' (unbuffered). -# - -if {$matches != 1} { - send_user "\nFAILURE: slaunch --unbuffered option failure\n" - exec $bin_rm -f $scratch_file - set exit_code 1 -} -if {$exit_code == 0} { - send_user "\nSUCCESS\n" -} -exit $exit_code diff --git a/testsuite/expect/test18.9 b/testsuite/expect/test18.9 deleted file mode 100755 index 97ff034f1..000000000 --- a/testsuite/expect/test18.9 +++ /dev/null @@ -1,94 +0,0 @@ -#!/usr/bin/expect -############################################################################ -# Purpose: Test of SLURM functionality -# Test of wait option (--wait option). -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -############################################################################ -# Copyright (C) 2002-2006 The Regents of the University of California. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. -# -# This file is part of SLURM, a resource management program. -# For details, see <http://www.llnl.gov/linux/slurm/>. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. -############################################################################ -source ./globals - -set test_id "18.9" -set exit_code 0 -set file_in "test$test_id.input" -set job_id 0 -set matches 0 - -print_header $test_id - -# -# Delete left-over input script -# Build input script file -# -exec $bin_rm -f $file_in -make_bash_script $file_in " - if ((\$SLURM_PROCID == 1)) - then exit - fi - $bin_sleep 300 -" - -# -# Spawn tasks via slaunch and immediately exit task 1 only -# -set timeout $max_job_delay -set salloc_pid [spawn $salloc -N1 -t2 $slaunch -n10 --overcommit -W2 $file_in] -expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) - exp_continue - } - -re "First task exited" { - send_user "This error is expected, no worries\n" - incr matches - exp_continue - } - -re "task\\\[0,2-9]: running" { - incr matches - exp_continue - } - timeout { - send_user "\nFAILURE: slaunch not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - set exit_code 1 - } - eof { - wait - } -} - -if {$matches != 2} { - send_user "\nFAILURE: problem with slaunch wait option\n" - set exit_code 1 -} -if {$exit_code == 0} { - exec $bin_rm -f $file_in - send_user "\nSUCCESS\n" -} -exit $exit_code diff --git a/testsuite/expect/test19.1 b/testsuite/expect/test19.1 index 9d1b507a3..de04f2465 100755 --- a/testsuite/expect/test19.1 +++ b/testsuite/expect/test19.1 @@ -10,7 +10,7 @@ # Copyright (C) 2007 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test19.2 b/testsuite/expect/test19.2 index d34a086b0..c897eecc3 100755 --- a/testsuite/expect/test19.2 +++ b/testsuite/expect/test19.2 @@ -10,7 +10,7 @@ # Copyright (C) 2007 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test19.3 b/testsuite/expect/test19.3 index e0d1e95ce..41b700718 100755 --- a/testsuite/expect/test19.3 +++ b/testsuite/expect/test19.3 @@ -10,7 +10,7 @@ # Copyright (C) 2007 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test19.4 b/testsuite/expect/test19.4 index 0bc05190d..789e4a9aa 100755 --- a/testsuite/expect/test19.4 +++ b/testsuite/expect/test19.4 @@ -10,7 +10,7 @@ # Copyright (C) 2007 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test19.5 b/testsuite/expect/test19.5 index d0c45dcea..609541be0 100755 --- a/testsuite/expect/test19.5 +++ b/testsuite/expect/test19.5 @@ -10,7 +10,7 @@ # Copyright (C) 2007 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -67,9 +67,9 @@ exec $strigger --clear --quiet --user=$uid make_bash_script $file_in "sleep 60" set job_id 0 -spawn $srun --batch --output=/dev/null -t2 $file_in +spawn $sbatch --output=/dev/null -t2 $file_in expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } diff --git a/testsuite/expect/test19.6 b/testsuite/expect/test19.6 index 8c264c87c..b2d17013b 100755 --- a/testsuite/expect/test19.6 +++ b/testsuite/expect/test19.6 @@ -10,7 +10,7 @@ # Copyright (C) 2007 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -74,9 +74,9 @@ make_bash_script $file_in_down "sleep 1" make_bash_script $file_in_reconfig "sleep 1" set job_id 0 -spawn $srun --batch --output=/dev/null -t1 $file_in +spawn $sbatch --output=/dev/null -t1 $file_in expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } diff --git a/testsuite/expect/test19.7 b/testsuite/expect/test19.7 index 336cd0627..8dd3ddee3 100755 --- a/testsuite/expect/test19.7 +++ b/testsuite/expect/test19.7 @@ -10,7 +10,7 @@ # Copyright (C) 2007 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -68,9 +68,9 @@ make_bash_script $file_in "sleep 6" make_bash_script $file_in_idle "echo \$1 >$cwd/$file_out" set job_id 0 -spawn $srun --batch --output=/dev/null -t1 $file_in +spawn $sbatch --output=/dev/null -t1 $file_in expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } diff --git a/testsuite/expect/test2.1 b/testsuite/expect/test2.1 index 986cf4d34..044ac6e1b 100755 --- a/testsuite/expect/test2.1 +++ b/testsuite/expect/test2.1 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test2.10 b/testsuite/expect/test2.10 index cf62a6e76..ac754a01b 100755 --- a/testsuite/expect/test2.10 +++ b/testsuite/expect/test2.10 @@ -10,7 +10,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test2.11 b/testsuite/expect/test2.11 index 9457b7f2f..3ae4cdfdc 100755 --- a/testsuite/expect/test2.11 +++ b/testsuite/expect/test2.11 @@ -10,7 +10,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -57,15 +57,15 @@ make_bash_script $file_in " # # Spawn a srun batch job that uses stdout/err and confirm their contents # -set srun_pid [spawn $srun --batch --output=$file_out -t1 $file_in] +set sbatch_pid [spawn $sbatch --output=$file_out -t1 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $sbatch_pid set exit_code 1 } eof { diff --git a/testsuite/expect/test2.2 b/testsuite/expect/test2.2 index 52b9c479b..fd894a5e0 100755 --- a/testsuite/expect/test2.2 +++ b/testsuite/expect/test2.2 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test2.3 b/testsuite/expect/test2.3 index 6cfe92713..6d069fdce 100755 --- a/testsuite/expect/test2.3 +++ b/testsuite/expect/test2.3 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test2.4 b/testsuite/expect/test2.4 index 6a122d7f6..12142fac7 100755 --- a/testsuite/expect/test2.4 +++ b/testsuite/expect/test2.4 @@ -14,7 +14,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test2.5 b/testsuite/expect/test2.5 index e2c520bf8..b378fdd4a 100755 --- a/testsuite/expect/test2.5 +++ b/testsuite/expect/test2.5 @@ -11,7 +11,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -80,7 +80,7 @@ set matches 0 set part_name "" spawn $scontrol show partition expect { - -re "PartitionName=($alpha_numeric)" { + -re "PartitionName=($alpha_numeric_under)" { set part_name $expect_out(1,string) incr matches exp_continue @@ -104,7 +104,7 @@ if {$matches < 1} { set matches 0 spawn $scontrol show partition $part_name expect { - -re "PartitionName=($alpha_numeric)" { + -re "PartitionName=($alpha_numeric_under)" { if {[string compare $expect_out(1,string) $part_name] == 0} { incr matches } @@ -131,7 +131,7 @@ set matches 0 set node_name "" spawn $scontrol show node expect { - -re "NodeName=($alpha_numeric)" { + -re "NodeName=($alpha_numeric_under)" { set node_name $expect_out(1,string) incr matches exp_continue @@ -155,7 +155,7 @@ if {$matches < 1} { set matches 0 spawn $scontrol show node $node_name expect { - -re "NodeName=($alpha_numeric)" { + -re "NodeName=($alpha_numeric_under)" { if {[string compare $expect_out(1,string) $node_name] == 0} { incr matches } diff --git a/testsuite/expect/test2.6 b/testsuite/expect/test2.6 index 877e2980c..49e3ab2aa 100755 --- a/testsuite/expect/test2.6 +++ b/testsuite/expect/test2.6 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test2.7 b/testsuite/expect/test2.7 index 62b163988..4bea531a7 100755 --- a/testsuite/expect/test2.7 +++ b/testsuite/expect/test2.7 @@ -10,7 +10,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -52,9 +52,9 @@ make_bash_script $file_in "$scontrol pidinfo \$\$" # # Spawn a srun batch job that uses stdout/err and confirm their contents # -set srun_pid [spawn $srun --batch --output=$file_out --error=$file_err -t1 $file_in] +set srun_pid [spawn $sbatch --output=$file_out --error=$file_err -t1 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } @@ -107,7 +107,7 @@ if {$rem_time > 60} { send_user "\nFAILURE: job remaining time is wrong $rem_time\n" set exit_code 1 } -if {$rem_time < 59} { +if {$rem_time < 58} { send_user "\nFAILURE: job remaining time seems too small\n" set exit_code 1 } diff --git a/testsuite/expect/test2.8 b/testsuite/expect/test2.8 index 182dc8127..39bf4363b 100755 --- a/testsuite/expect/test2.8 +++ b/testsuite/expect/test2.8 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -50,15 +50,15 @@ make_bash_script $file_in " # # Submit a couple jobs so we have something to work with # -set srun_pid [spawn $srun --batch --output=/dev/null --error=/dev/null -t5 $file_in] +set sbatch_pid [spawn $sbatch --output=/dev/null --error=/dev/null -t5 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id1 $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $sbatch_pid exit 1 } eof { @@ -70,16 +70,16 @@ if {$job_id1 == 0} { exit 1 } -set srun_pid [spawn $srun --batch --output=/dev/null --error=/dev/null -t5 $file_in] +set sbatch_pid [spawn $sbatch --output=/dev/null --error=/dev/null -t5 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id2 $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" cancel_job $job_id1 - slow_kill $srun_pid + slow_kill $sbatch_pid exit 1 } eof { diff --git a/testsuite/expect/test2.9 b/testsuite/expect/test2.9 index 411d040af..fc317de68 100755 --- a/testsuite/expect/test2.9 +++ b/testsuite/expect/test2.9 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test20.1 b/testsuite/expect/test20.1 index 70d7f1e6d..201f01815 100755 --- a/testsuite/expect/test20.1 +++ b/testsuite/expect/test20.1 @@ -10,7 +10,7 @@ # Copyright (C) 2007 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test20.2 b/testsuite/expect/test20.2 index 9caca3a3a..f07bdac76 100755 --- a/testsuite/expect/test20.2 +++ b/testsuite/expect/test20.2 @@ -10,7 +10,7 @@ # Copyright (C) 2007 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test20.3 b/testsuite/expect/test20.3 index e564f6f5c..50679cab7 100755 --- a/testsuite/expect/test20.3 +++ b/testsuite/expect/test20.3 @@ -10,7 +10,7 @@ # Copyright (C) 2007 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test20.4 b/testsuite/expect/test20.4 index 133ce152b..4a8acc783 100755 --- a/testsuite/expect/test20.4 +++ b/testsuite/expect/test20.4 @@ -10,7 +10,7 @@ # Copyright (C) 2007 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test18.6 b/testsuite/expect/test21.1 similarity index 63% rename from testsuite/expect/test18.6 rename to testsuite/expect/test21.1 index 2534fe058..e2e217a3e 100755 --- a/testsuite/expect/test18.6 +++ b/testsuite/expect/test21.1 @@ -1,16 +1,16 @@ #!/usr/bin/expect ############################################################################ # Purpose: Test of SLURM functionality -# Test of slaunch verbose mode (-v option). +# Test sacctmgr --usage option. (initially same as --help) # # Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR # "FAILURE: ..." otherwise with an explanation of the failure, OR # anything else indicates a failure mode that must be investigated. ############################################################################ -# Copyright (C) 2002-2006 The Regents of the University of California. +# Copyright (C) 2008 Lawrence Livermore National Security. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# Written by Joseph Donaghy <donaghy1@llnl.gov> +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -27,51 +27,49 @@ # # You should have received a copy of the GNU General Public License along # with SLURM; if not, write to the Free Software Foundation, Inc., -# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ############################################################################ source ./globals -set test_id "18.6" +set test_id "21.1" set exit_code 0 -set job_id 0 -set verbosity 0 +set matches 0 +set not_support 0 print_header $test_id # -# Submit a slurm job that will execute 'id' on 1 node and over task_cnt tasks +# Report the sacctmgr usage format # -set timeout $max_job_delay -set salloc_pid [spawn $salloc -N1 -t1 $slaunch -vv $bin_printenv SLURMD_NODENAME] + +spawn $sacctmgr --usage expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) + -re "commit changes immediately" { + incr matches exp_continue } - -re "debug:" { - set verbosity 1 + -re "display tool version number" { + incr matches exp_continue } - -re "Unable to contact" { - send_user "\nFAILURE: slurm appears to be down\n" - exit 1 + -re "add cluster" { + incr matches + exp_continue } timeout { - send_user "\nFAILURE: slurm not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] + send_user "\nFAILURE: sacctmgr not responding\n" set exit_code 1 } eof { wait } } -if {$verbosity != 1} { - send_user "\nFAILURE: Did not report debug message verbosity on\n" + +if {$matches != 3} { + send_user "\nFAILURE: sacctmgr --usage failed ($matches)\n" set exit_code 1 } + if {$exit_code == 0} { send_user "\nSUCCESS\n" } diff --git a/testsuite/expect/test18.3 b/testsuite/expect/test21.2 similarity index 71% rename from testsuite/expect/test18.3 rename to testsuite/expect/test21.2 index 4311290c7..90fa0218a 100755 --- a/testsuite/expect/test18.3 +++ b/testsuite/expect/test21.2 @@ -1,17 +1,16 @@ #!/usr/bin/expect ############################################################################ # Purpose: Test of SLURM functionality -# Confirm that slaunch reports a proper version number (--version -# option). +# Test sacctmgr --help option. # # Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR # "FAILURE: ..." otherwise with an explanation of the failure, OR # anything else indicates a failure mode that must be investigated. ############################################################################ -# Copyright (C) 2006 The Regents of the University of California. +# Copyright (C) 2008 Lawrence Livermore National Security. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# Written by Joseph Donaghy <donaghy1@llnl.gov> +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -28,39 +27,49 @@ # # You should have received a copy of the GNU General Public License along # with SLURM; if not, write to the Free Software Foundation, Inc., -# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ############################################################################ source ./globals -set test_id "18.3" +set test_id "21.2" set exit_code 0 -set version 0 +set matches 0 +set not_support 0 print_header $test_id # -# Report the slaunch version number +# Report the sacctmgr help format # -spawn $slaunch --version + +spawn $sacctmgr --help expect { - -re "(slurm ($number)\.($number)\.($number).*\n)" { - incr version + -re "commit changes immediately" { + incr matches + exp_continue + } + -re "display tool version number" { + incr matches + exp_continue + } + -re "add cluster" { + incr matches exp_continue } timeout { - send_user "\nFAILURE: slaunch not responding\n" + send_user "\nFAILURE: sacctmgr not responding\n" set exit_code 1 - exp_continue } eof { wait } } -if {$version != 1} { - send_user "\nFAILURE: Did not get proper slaunch version number\n" +if {$matches != 3} { + send_user "\nFAILURE: sacctmgr --help failed ($matches)\n" set exit_code 1 } + if {$exit_code == 0} { send_user "\nSUCCESS\n" } diff --git a/testsuite/expect/test18.1 b/testsuite/expect/test21.3 similarity index 77% rename from testsuite/expect/test18.1 rename to testsuite/expect/test21.3 index 884eb1118..55086164a 100755 --- a/testsuite/expect/test18.1 +++ b/testsuite/expect/test21.3 @@ -1,16 +1,16 @@ #!/usr/bin/expect ############################################################################ # Purpose: Test of SLURM functionality -# Confirm that slaunch usage option works (--usage option). +# Test sacctmgr -V (display version) # # Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR # "FAILURE: ..." otherwise with an explanation of the failure, OR # anything else indicates a failure mode that must be investigated. ############################################################################ -# Copyright (C) 2006 The Regents of the University of California. +# Copyright (C) 2008 Lawrence Livermore National Security. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# Written by Joseph Donaghy <donaghy1@llnl.gov> +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -27,28 +27,29 @@ # # You should have received a copy of the GNU General Public License along # with SLURM; if not, write to the Free Software Foundation, Inc., -# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ############################################################################ source ./globals -set test_id "18.1" +set test_id "21.3" set exit_code 0 set matches 0 +set not_support 0 print_header $test_id # -# Report the slaunch usage format +# Report the sacctmgr version format # -spawn $slaunch --usage +spawn $sacctmgr -V expect { - -re "Usage: .*slaunch.*" { + -re "slurm \[0-9]*.\[0-9]*.\[0-9]*" { incr matches exp_continue } timeout { - send_user "\nFAILURE: slaunch not responding\n" + send_user "\nFAILURE: sacctmgr not responding\n" set exit_code 1 } eof { @@ -57,9 +58,10 @@ expect { } if {$matches != 1} { - send_user "\nFAILURE: slaunch failed to report usage format\n" + send_user "\nFAILURE: sacctmgr -V failed ($matches)\n" set exit_code 1 } + if {$exit_code == 0} { send_user "\nSUCCESS\n" } diff --git a/testsuite/expect/test18.2 b/testsuite/expect/test21.4 similarity index 74% rename from testsuite/expect/test18.2 rename to testsuite/expect/test21.4 index d56c73da0..fc088c5d2 100755 --- a/testsuite/expect/test18.2 +++ b/testsuite/expect/test21.4 @@ -1,16 +1,16 @@ #!/usr/bin/expect ############################################################################ # Purpose: Test of SLURM functionality -# Confirm that slaunch help option works (--help option). +# Test sacctmgr version (display version) # # Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR # "FAILURE: ..." otherwise with an explanation of the failure, OR # anything else indicates a failure mode that must be investigated. ############################################################################ -# Copyright (C) 2006 The Regents of the University of California. +# Copyright (C) 2008 Lawrence Livermore National Security. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# Written by Joseph Donaghy <donaghy1@llnl.gov> +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -27,32 +27,29 @@ # # You should have received a copy of the GNU General Public License along # with SLURM; if not, write to the Free Software Foundation, Inc., -# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ############################################################################ source ./globals -set test_id "18.2" +set test_id "21.4" set exit_code 0 set matches 0 +set not_support 0 print_header $test_id # -# Report the slaunch help message +# Report the sacctmgr version keyword # -spawn $slaunch --help +spawn $sacctmgr version expect { - -re "Usage:" { - incr matches - exp_continue - } - -re "Help options" { + -re "slurm \[0-9]*.\[0-9]*.\[0-9]*" { incr matches exp_continue } timeout { - send_user "\nFAILURE: slaunch not responding\n" + send_user "\nFAILURE: sacctmgr not responding\n" set exit_code 1 } eof { @@ -60,10 +57,11 @@ expect { } } -if {$matches != 2} { - send_user "\nFAILURE: slaunch failed to report help message\n" +if {$matches != 1} { + send_user "\nFAILURE: sacctmgr version failed ($matches)\n" set exit_code 1 } + if {$exit_code == 0} { send_user "\nSUCCESS\n" } diff --git a/testsuite/expect/test21.5 b/testsuite/expect/test21.5 new file mode 100755 index 000000000..cee55d8f8 --- /dev/null +++ b/testsuite/expect/test21.5 @@ -0,0 +1,157 @@ +#!/usr/bin/expect +############################################################################ +# Purpose: Test of SLURM functionality +# sacctmgr add, list, and delete a cluster +# +# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR +# "FAILURE: ..." otherwise with an explanation of the failure, OR +# anything else indicates a failure mode that must be investigated. +############################################################################ +# Copyright (C) 2008 Lawrence Livermore National Security. +# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). +# Written by Joseph Donaghy <donaghy1@llnl.gov> +# LLNL-CODE-402394. +# +# This file is part of SLURM, a resource management program. +# For details, see <http://www.llnl.gov/linux/slurm/>. +# +# SLURM is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free +# Software Foundation; either version 2 of the License, or (at your option) +# any later version. +# +# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along +# with SLURM; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +############################################################################ +source ./globals + +set test_id "21.5" +set exit_code 0 +set amatches 0 +set lmatches 0 +set dmatches 0 +set not_support 0 +set add add +set lis list +set del delete +set mod modify +set clu cluster +set tc1 tCluster1 + + +print_header $test_id + +# +# Use sacctmgr to create a cluster +# +set sadd_pid [spawn $sacctmgr $add $clu $tc1] +expect { + -re "Adding Cluster" { + incr amatches + exp_continue + } + -re "Name *= $tc1" { + incr amatches + exp_continue + } + -re "Would you like to commit changes\\\? \\\(You have 30 seconds to decide\\\)" { + incr amatches + exp_continue + } + -re "\\\(N\\\/y\\\):" { + incr amatches + send "Y\r" + exp_continue + } + timeout { + send_user "\nFAILURE: sacctmgr add not responding\n" + slow_kill $sadd_pid + set exit_code 1 + } + eof { + wait + } +} + +if {$amatches != 4} { + send_user "\nFAILURE: sacctmgr had a problem adding clusters\n" + set exit_code 1 +} + +# +# Use sacctmgr to list the addition of cluster +# +set slist_pid [spawn $sacctmgr $lis $clu $tc1] +expect { + -re "Name" { + incr lmatches + exp_continue + } + -re "$tc1" { + incr lmatches + exp_continue + send_user "\nFound $tc1 in database\n" + } + timeout { + send_user "\nFAILURE: sacctmgr list not responding\n" + slow_kill $slist_pid + set exit_code 1 + } + eof { + wait + } +} + +if {$lmatches != 2} { + send_user "\nFAILURE: sacctmgr had a problem listing clusters\n" + set exit_code 1 +} + +# +# Use sacctmgr to delete the test cluster +# +set sadel_pid [spawn $sacctmgr $del $clu $tc1] +expect { + -re "Deleting clusters" { + incr dmatches + exp_continue + } + -re "$tc1" { + incr dmatches + exp_continue + } + -re "Would you like to commit changes\\\? \\\(You have 30 seconds to decide\\\)" { + incr dmatches + exp_continue + } + -re "\\\(N\\\/y\\\):" { + incr dmatches + send "Y\r" + exp_continue + } + timeout { + send_user "\nFAILURE: sacctmgr delete not responding\n" + slow_kill $sadel_pid + set exit_code 1 + } + eof { + wait + } +} + +if {$dmatches != 4} { + send_user "\nFAILURE: sacctmgr had a problem deleting cluster\n" + set exit_code 1 +} + +if {$exit_code == 0} { + send_user "\nSUCCESS\n" +} + +exit $exit_code diff --git a/testsuite/expect/test21.6 b/testsuite/expect/test21.6 new file mode 100755 index 000000000..8cf05b9e7 --- /dev/null +++ b/testsuite/expect/test21.6 @@ -0,0 +1,169 @@ +#!/usr/bin/expect +############################################################################ +# Purpose: Test of SLURM functionality +# sacctmgr add, list, and delete multiple clusters +# +# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR +# "FAILURE: ..." otherwise with an explanation of the failure, OR +# anything else indicates a failure mode that must be investigated. +############################################################################ +# Copyright (C) 2008 Lawrence Livermore National Security. +# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). +# Written by Joseph Donaghy <donaghy1@llnl.gov> +# LLNL-CODE-402394. +# +# This file is part of SLURM, a resource management program. +# For details, see <http://www.llnl.gov/linux/slurm/>. +# +# SLURM is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free +# Software Foundation; either version 2 of the License, or (at your option) +# any later version. +# +# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along +# with SLURM; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +############################################################################ +source ./globals + +set test_id "21.6" +set exit_code 0 +set amatches 0 +set lmatches 0 +set dmatches 0 +set not_support 0 +set add add +set lis list +set del delete +set mod modify +set clu cluster +set tc1 tCluster1 +set tc2 tCluster2 +set tc3 tCluster3 + + +print_header $test_id + +# +# Use sacctmgr to create a cluster +# +set sadd_pid [spawn $sacctmgr $add $clu $tc1,$tc2,$tc3] +expect { + -re "Adding Cluster" { + incr amatches + exp_continue + } + -re "Name *= $tc1" { + incr amatches + exp_continue + } + -re "Would you like to commit changes\\\? \\\(You have 30 seconds to decide\\\)" { + incr amatches + exp_continue + } + -re "\\\(N\\\/y\\\):" { + incr amatches + send "Y\r" + exp_continue + } + timeout { + send_user "\nFAILURE: sacctmgr add not responding\n" + slow_kill $sadd_pid + set exit_code 1 + } + eof { + wait + } +} + +if {$amatches != 4} { + send_user "\nFAILURE: sacctmgr had a problem adding clusters\n" + set exit_code 1 +} + +# +# Use sacctmgr to list the addition of cluster +# +set slist_pid [spawn $sacctmgr $lis $clu $tc1,$tc2,$tc3] +expect { + -re "Name" { + incr lmatches + exp_continue + } + -re "$tc1" { + incr lmatches + exp_continue + send_user "\nFound $tc1 in database\n" + } + -re "$tc2" { + incr lmatches + exp_continue + send_user "\nFound $tc1 in database\n" + } + -re "$tc3" { + incr lmatches + exp_continue + send_user "\nFound $tc1 in database\n" + } + timeout { + send_user "\nFAILURE: sacctmgr list not responding\n" + slow_kill $slist_pid + set exit_code 1 + } + eof { + wait + } +} + +if {$lmatches != 4} { + send_user "\nFAILURE: sacctmgr had a problem listing clusters\n" + set exit_code 1 +} + +# +# Use sacctmgr to delete the test cluster +# +set sadel_pid [spawn $sacctmgr $del $clu $tc1,$tc2,$tc3] +expect { + -re "Deleting clusters" { + incr dmatches + exp_continue + } + -re "$tc1" { + incr dmatches + exp_continue + } + -re "Would you like to commit changes\\\? \\\(You have 30 seconds to decide\\\)" { + incr dmatches + exp_continue + } + -re "\\\(N\\\/y\\\):" { + incr dmatches + send "Y\r" + exp_continue + } + timeout { + send_user "\nFAILURE: sacctmgr delete not responding\n" + slow_kill $sadel_pid + set exit_code 1 + } + eof { + wait + } +} + +if {$dmatches != 4} { + send_user "\nFAILURE: sacctmgr had a problem deleting cluster\n" + set exit_code 1 +} + +if {$exit_code == 0} { + send_user "\nSUCCESS\n" +} + +exit $exit_code diff --git a/testsuite/expect/test3.1 b/testsuite/expect/test3.1 index 14685b539..10d286e32 100755 --- a/testsuite/expect/test3.1 +++ b/testsuite/expect/test3.1 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test18.13 b/testsuite/expect/test3.10 similarity index 61% rename from testsuite/expect/test18.13 rename to testsuite/expect/test3.10 index ea0d78a53..4197f9dd9 100755 --- a/testsuite/expect/test18.13 +++ b/testsuite/expect/test3.10 @@ -1,20 +1,16 @@ #!/usr/bin/expect ############################################################################ # Purpose: Test of SLURM functionality -# Confirm that slaunch sets appropriate working directory (--workdir -# option). +# Test of "scontrol notify <jobid> <message>" # # Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR # "FAILURE: ..." otherwise with an explanation of the failure, OR # anything else indicates a failure mode that must be investigated. -# -# NOTE: Due to symbolic links, we only look to match the last component -# of the directory name (e.g "cd /tmp; pwd" might return "/var/tmp"). ############################################################################ -# Copyright (C) 2002-2006 The Regents of the University of California. +# Copyright (C) 2007 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -35,49 +31,78 @@ ############################################################################ source ./globals -set test_id "18.13" -set exit_code 0 -set job_id 0 -set matches 0 -set tmp_dir "/tmp" +set test_id "3.10" +set authorized 1 +set exit_code 0 +set job_id 0 print_header $test_id # -# Change working directory on execute line and then print where jobs runs +# Start a test program # set timeout $max_job_delay -set salloc_pid [spawn $salloc -N1 -t1 $slaunch --workdir=$tmp_dir $bin_pwd] +set srun_pid [spawn $srun -N1 -t1 -v $bin_sleep 10] +set srun_spawn_id $spawn_id expect { - -re "Granted job allocation ($number)" { + -re "launching ($number).0" { set job_id $expect_out(1,string) + } + timeout { + send_user "\nFAILURE: srun not responding\n" + slow_kill $srun_pid + exit 1 + } +} +if {$job_id == 0} { + send_user "\nFAILURE: srun failed to create job\n" + exit 1 +} + +# +# Send the program a message +# +spawn $scontrol notify $job_id TEST_NOTIFY +expect { + -re "Invalid user id" { + set authorized 0 exp_continue } - -re "$tmp_dir$end_of_line" { + eof { + wait + } +} +if {$authorized == 0} { + send_user "\nWARNING: You are not authorized to run this test\n" + cancel_job $job_id + exit 0 +} + +# +# Look for the message +# +set matches 0 +set spawn_id $srun_spawn_id +expect { + -re "TEST_NOTIFY" { set matches 1 exp_continue } - -re "Unable to contact" { - send_user "\nFAILURE: slurm appears to be down\n" - exit 1 - } timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - set exit_code 1 + send_user "\nFAILURE: srun not responding\n" + slow_kill $srun_pid + exit 1 } eof { wait } } - -if {$matches != 1} { - send_user "\nFAILURE: slaunch failed to change working directory\n" +if {$matches == 0} { + send_user "\nFAILURE: notification not received by srun\n" set exit_code 1 } + +cancel_job $job_id if {$exit_code == 0} { send_user "\nSUCCESS\n" } diff --git a/testsuite/expect/test3.2 b/testsuite/expect/test3.2 index 417a97459..8696ca456 100755 --- a/testsuite/expect/test3.2 +++ b/testsuite/expect/test3.2 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test3.3 b/testsuite/expect/test3.3 index ce6f8af16..9ccee66e7 100755 --- a/testsuite/expect/test3.3 +++ b/testsuite/expect/test3.3 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test3.4 b/testsuite/expect/test3.4 index d968f266d..dda1237d0 100755 --- a/testsuite/expect/test3.4 +++ b/testsuite/expect/test3.4 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -49,9 +49,9 @@ make_bash_script $file_in "$srun $bin_sleep 60" # # Submit a job so we have something to work with # -set srun_pid [spawn $srun --batch --output=/dev/null --error=/dev/null -t1 --hold $file_in] +set srun_pid [spawn $sbatch --output=/dev/null --error=/dev/null -t1 --hold $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } diff --git a/testsuite/expect/test3.5 b/testsuite/expect/test3.5 index 1d2412a9d..3a41159da 100755 --- a/testsuite/expect/test3.5 +++ b/testsuite/expect/test3.5 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test3.6 b/testsuite/expect/test3.6 index a64073c4b..9b4896c65 100755 --- a/testsuite/expect/test3.6 +++ b/testsuite/expect/test3.6 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test3.7 b/testsuite/expect/test3.7 index 1e0d425c4..9d8b4f884 100755 --- a/testsuite/expect/test3.7 +++ b/testsuite/expect/test3.7 @@ -11,7 +11,7 @@ # Copyright (C) 2005-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -122,9 +122,9 @@ exec $bin_chmod 700 $file_prog # # Submit two jobs to the same node # -set srun_pid [spawn $srun --batch -N1 -t1 --output=$file_out1 $file_prog_sh1] +set srun_pid [spawn $sbatch -N1 -t1 --output=$file_out1 $file_prog_sh1] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id1 $expect_out(1,string) exp_continue } @@ -171,9 +171,9 @@ if {[string compare $host_name ""] == 0} { exit 1 } # Submit another job to that same node -set srun_pid [spawn $srun --batch -N1 -t1 --output=$file_out2 $file_prog_sh2] +set srun_pid [spawn $sbatch -N1 -t1 --output=$file_out2 $file_prog_sh2] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id2 $expect_out(1,string) exp_continue } diff --git a/testsuite/expect/test3.7.prog.c b/testsuite/expect/test3.7.prog.c index a5e0280d3..489c34564 100644 --- a/testsuite/expect/test3.7.prog.c +++ b/testsuite/expect/test3.7.prog.c @@ -7,7 +7,7 @@ * Copyright (C) 2005 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test3.8 b/testsuite/expect/test3.8 index 0bd675e1f..1d5b4e949 100755 --- a/testsuite/expect/test3.8 +++ b/testsuite/expect/test3.8 @@ -14,7 +14,7 @@ # Copyright (C) 2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -84,15 +84,15 @@ if { [test_bluegene] } { # Spawn a srun batch job that uses stdout/err and confirm their contents # set timeout $max_job_delay -set srun_pid [spawn $srun -N$node_cnt --batch --output=$file_out --error=$file_err -t1 $file_in] +set sbatch_pid [spawn $sbatch -N$node_cnt --output=$file_out --error=$file_err -t1 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $sbatch_pid set exit_code 1 } eof { @@ -184,15 +184,15 @@ set job_id 0 exec $bin_rm -f $file_flag_1 $file_flag_2 $file_flag_3 $file_flag_4 -set srun_pid [spawn $srun --no-requeue --batch --output=$file_out --error=$file_err -t1 $file_in] +set sbatch_pid [spawn $sbatch --no-requeue --output=$file_out --error=$file_err -t1 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $sbatch_pid set exit_code 1 } eof { diff --git a/testsuite/expect/test3.9 b/testsuite/expect/test3.9 index e6822c606..5f9cc1f4c 100755 --- a/testsuite/expect/test3.9 +++ b/testsuite/expect/test3.9 @@ -10,7 +10,7 @@ # Copyright (C) 2007 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test4.1 b/testsuite/expect/test4.1 index d3cfad3e2..3fd1144df 100755 --- a/testsuite/expect/test4.1 +++ b/testsuite/expect/test4.1 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test4.10 b/testsuite/expect/test4.10 index 6ce5f806e..9e9affad6 100755 --- a/testsuite/expect/test4.10 +++ b/testsuite/expect/test4.10 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test4.11 b/testsuite/expect/test4.11 index 83ac4fe7d..ca4f488c3 100755 --- a/testsuite/expect/test4.11 +++ b/testsuite/expect/test4.11 @@ -10,7 +10,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test4.2 b/testsuite/expect/test4.2 index 4a078f4d8..b5727a15f 100755 --- a/testsuite/expect/test4.2 +++ b/testsuite/expect/test4.2 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test4.3 b/testsuite/expect/test4.3 index bc90494e8..43bd867f6 100755 --- a/testsuite/expect/test4.3 +++ b/testsuite/expect/test4.3 @@ -11,7 +11,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test4.4 b/testsuite/expect/test4.4 index 414f455aa..49e714054 100755 --- a/testsuite/expect/test4.4 +++ b/testsuite/expect/test4.4 @@ -11,7 +11,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test4.5 b/testsuite/expect/test4.5 index 8245c81dd..2c1764035 100755 --- a/testsuite/expect/test4.5 +++ b/testsuite/expect/test4.5 @@ -11,7 +11,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test4.6 b/testsuite/expect/test4.6 index e129e28fd..357ed3570 100755 --- a/testsuite/expect/test4.6 +++ b/testsuite/expect/test4.6 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test4.7 b/testsuite/expect/test4.7 index 3489ee636..31e7e3be6 100755 --- a/testsuite/expect/test4.7 +++ b/testsuite/expect/test4.7 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test4.8 b/testsuite/expect/test4.8 index 240106feb..e6c9452bd 100755 --- a/testsuite/expect/test4.8 +++ b/testsuite/expect/test4.8 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test4.9 b/testsuite/expect/test4.9 index 00b4dc85f..c8599cd04 100755 --- a/testsuite/expect/test4.9 +++ b/testsuite/expect/test4.9 @@ -11,7 +11,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test5.1 b/testsuite/expect/test5.1 index 0d542231a..df2e4c4b3 100755 --- a/testsuite/expect/test5.1 +++ b/testsuite/expect/test5.1 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test5.2 b/testsuite/expect/test5.2 index 1b1f31348..a4c7cd945 100755 --- a/testsuite/expect/test5.2 +++ b/testsuite/expect/test5.2 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test5.3 b/testsuite/expect/test5.3 index 10f8a41d5..85bc7d468 100755 --- a/testsuite/expect/test5.3 +++ b/testsuite/expect/test5.3 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test5.4 b/testsuite/expect/test5.4 index 3bb4112c6..9258116b3 100755 --- a/testsuite/expect/test5.4 +++ b/testsuite/expect/test5.4 @@ -11,7 +11,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -48,15 +48,15 @@ make_bash_script $file_in "$srun $bin_sleep 90" # # Submit a couple of job so we have something to look at # -set srun_pid [spawn $srun --batch --output=/dev/null --error=/dev/null -t1 $file_in] +set sbatch_pid [spawn $sbatch --output=/dev/null --error=/dev/null -t1 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id1 $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $sbatch_pid exit 1 } eof { @@ -68,15 +68,15 @@ if {$job_id1 == 0} { exit 1 } -set srun_pid [spawn $srun --batch --output=/dev/null --error=/dev/null --hold -t1 $file_in] +set sbatch_pid [spawn $sbatch --output=/dev/null --error=/dev/null --hold -t1 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id2 $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $sbatch_pid exit 1 } eof { diff --git a/testsuite/expect/test5.5 b/testsuite/expect/test5.5 index 0a33aa2b2..34d224e23 100755 --- a/testsuite/expect/test5.5 +++ b/testsuite/expect/test5.5 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -49,15 +49,15 @@ make_bash_script $file_in "$srun $bin_sleep 10" # # Submit a couple of jobs so we have something to look at # -set srun_pid [spawn $srun --batch --output=/dev/null --error=/dev/null --job-name=$job_name1 -t1 $file_in] +set sbatch_pid [spawn $sbatch --output=/dev/null --error=/dev/null --job-name=$job_name1 -t1 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id1 $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $sbatch_pid exit 1 } eof { @@ -69,15 +69,15 @@ if {$job_id1 == 0} { exit 1 } -set srun_pid [spawn $srun --batch --output=/dev/null --error=/dev/null --job-name=$job_name2 --hold -t1 $file_in] +set sbatch_pid [spawn $sbatch --output=/dev/null --error=/dev/null --job-name=$job_name2 --hold -t1 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id2 $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $sbatch_pid exit 1 } eof { diff --git a/testsuite/expect/test5.6 b/testsuite/expect/test5.6 index c9d9c8805..b6b74ac73 100755 --- a/testsuite/expect/test5.6 +++ b/testsuite/expect/test5.6 @@ -11,7 +11,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -52,15 +52,15 @@ if { [test_bluegene] } { # # Submit a couple of job so we have something to look at # -set srun_pid [spawn $srun --batch -N$node_cnt --output=/dev/null --error=/dev/null -t5 $file_in] +set sbatch_pid [spawn $sbatch -N$node_cnt --output=/dev/null --error=/dev/null -t5 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id1 $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $sbatch_pid exit 1 } eof { @@ -72,15 +72,15 @@ if {$job_id1 == 0} { exit 1 } -set srun_pid [spawn $srun --batch -N$node_cnt --output=/dev/null --error=/dev/null --hold -t5 $file_in] +set sbatch_pid [spawn $sbatch -N$node_cnt --output=/dev/null --error=/dev/null --hold -t5 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id2 $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $sbatch_pid cancel_job $job_id1 exit 1 } @@ -149,7 +149,7 @@ expect { # spawn $bin_id -un expect { - -re "($alpha_numeric)" { + -re "($alpha_numeric_under)" { set this_user $expect_out(1,string) exp_continue } @@ -159,7 +159,7 @@ expect { } spawn $squeue --format=%u --noheader --user=$this_user expect { - -re "($alpha_numeric)" { + -re "($alpha_numeric_under)" { if {[string compare $expect_out(1,string) $this_user]} { send_user "\nFAILURE: squeue user filter failure\n" set exit_code 1 @@ -190,7 +190,7 @@ expect { } spawn $squeue --format=%u --noheader --user=$this_uid expect { - -re "($alpha_numeric)" { + -re "($alpha_numeric_under)" { if {[string compare $expect_out(1,string) $this_user]} { send_user "\nFAILURE: squeue user filter failure\n" set exit_code 1 @@ -211,7 +211,7 @@ expect { # spawn $squeue --format=%u --noheader --user=$this_uid --node=dummy_name expect { - -re "($alpha_numeric)" { + -re "($alpha_numeric_under)" { send_user "\nFAILURE: squeue node filter failure\n" set exit_code 1 } @@ -227,7 +227,7 @@ expect { set node_name_set 0 spawn $squeue --format=%N --noheader --jobs=$job_id1 --states=RUNNING expect { - -re "($alpha_numeric)" { + -re "($alpha_numeric_under)" { set node_name $expect_out(1,string) set node_name_set 1 exp_continue @@ -243,7 +243,7 @@ expect { if {$node_name_set == 1} { spawn $squeue --format=%u --noheader --user=$this_uid --node=$node_name expect { - -re "($alpha_numeric)" { + -re "($alpha_numeric_under)" { set node_name_set 0 exp_continue } @@ -269,11 +269,12 @@ set partition1 "" set partition2 "" spawn $squeue --format=%P --noheader expect { - -re "($alpha_numeric)" { + -re "($alpha_numeric_under)" { if {![string compare $partition1 ""]} { set partition1 $expect_out(1,string) + exp_continue } - if {[string compare $expect_out(1,string) $partition1]} { + if {![string compare $expect_out(1,string) $partition1]} { set partition2 $expect_out(1,string) } exp_continue @@ -287,11 +288,11 @@ expect { } } if {[string compare partition2 ""]} { - set partition1 $partition2 + set partition2 $partition1 } spawn $squeue --format=%P --noheader --partitions=$partition1 expect { - -re "($alpha_numeric)" { + -re "($alpha_numeric_under)" { if {[string compare $expect_out(1,string) $partition1]} { send_user "\nFAILURE: squeue partition filter error\n" set exit_code 1 diff --git a/testsuite/expect/test5.7 b/testsuite/expect/test5.7 index 2a7ff3b5a..f5e0bb32c 100755 --- a/testsuite/expect/test5.7 +++ b/testsuite/expect/test5.7 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test5.8 b/testsuite/expect/test5.8 index d1fb63f7d..51ca309f2 100755 --- a/testsuite/expect/test5.8 +++ b/testsuite/expect/test5.8 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test6.1 b/testsuite/expect/test6.1 index c37b6bd94..b983bc731 100755 --- a/testsuite/expect/test6.1 +++ b/testsuite/expect/test6.1 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test6.10 b/testsuite/expect/test6.10 index f5efd0942..ccbac2166 100755 --- a/testsuite/expect/test6.10 +++ b/testsuite/expect/test6.10 @@ -14,7 +14,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -71,17 +71,17 @@ if { $got_login == 0 } { make_bash_script $file_in "$srun $bin_sleep $max_job_delay" # -# Spawn a srun batch job +# Spawn a sbatch job # -set srun_pid [spawn $srun --batch --output=/dev/null --error=/dev/null --hold -t5 $file_in] +set sbatch_pid [spawn $sbatch --output=/dev/null --error=/dev/null --hold -t5 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $sbatch_pid set exit_code 1 } eof { diff --git a/testsuite/expect/test6.11 b/testsuite/expect/test6.11 index 64b30d755..c32482491 100755 --- a/testsuite/expect/test6.11 +++ b/testsuite/expect/test6.11 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -38,18 +38,20 @@ set job_id 0 print_header $test_id +make_bash_script "id_script" { $bin_id } + # # Submit a job so we have something to work with # -set srun_pid [spawn $srun --batch --output=/dev/null --error=/dev/null --hold -t1 $bin_id] +set sbatch_pid [spawn $sbatch --output=/dev/null --error=/dev/null --hold -t1 id_script] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $sbatch_pid set exit_code 1 } eof { @@ -121,6 +123,7 @@ expect { } if {$exit_code == 0} { + exec $bin_rm -f id_script send_user "\nSUCCESS\n" } exit $exit_code diff --git a/testsuite/expect/test6.12 b/testsuite/expect/test6.12 index bb10a827a..191eef1b8 100755 --- a/testsuite/expect/test6.12 +++ b/testsuite/expect/test6.12 @@ -13,7 +13,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -73,18 +73,18 @@ done " # -# Spawn a srun batch job with arguments +# Spawn a sbatch job with arguments # set timeout $max_job_delay -set srun_pid [spawn $srun --batch --output=$file_out --error=$file_err -t2 $file_in] +set sbatch_pid [spawn $sbatch --output=$file_out --error=$file_err -t2 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $sbatch_pid exit 1 } eof { @@ -158,15 +158,15 @@ file delete $file_out $file_err make_bash_script $file_in "$bin_sleep 500" set job_id 0 -set srun_pid [spawn $srun --batch --output=$file_out --error=$file_err -t2 $file_in] +set sbatch_pid [spawn $sbatch --output=$file_out --error=$file_err -t2 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $sbatch_pid exit 1 } eof { @@ -236,15 +236,15 @@ set job_id 0 file delete $file_out $file_err make_bash_script $file_in "$srun $bin_sleep 500" -set srun_pid [spawn $srun --batch --output=$file_out --error=$file_err -t2 $file_in] +set sbatch_pid [spawn $sbatch --output=$file_out --error=$file_err -t2 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $sbatch_pid exit 1 } eof { diff --git a/testsuite/expect/test6.13 b/testsuite/expect/test6.13 index b93941685..9f41d43af 100755 --- a/testsuite/expect/test6.13 +++ b/testsuite/expect/test6.13 @@ -55,15 +55,15 @@ make_bash_script $file_in " # Submit a job so we have something to work with # set job_id 0 -set srun_pid [spawn $srun --batch --output=$file_out -t1 $file_in] +set sbatch_pid [spawn $sbatch --output=$file_out -t1 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $sbatch_pid set exit_code 1 exp_continue } @@ -71,7 +71,10 @@ expect { wait } } - +if {$job_id == 0} { + send_user "\nFAILURE: job submission failed\n" + exit 1 +} if {[wait_for_job $job_id RUNNING] != 0} { send_user "\nFAILURE: error starting job $job_id\n" cancel_job $job_id @@ -161,15 +164,15 @@ make_bash_script $file_in " # Submit a job so we have something to work with # set job_id 0 -set srun_pid [spawn $srun --batch --output=$file_out -t1 $file_in] +set sbatch_pid [spawn $sbatch --output=$file_out -t1 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $sbatch_pid set exit_code 1 exp_continue } diff --git a/testsuite/expect/test6.2 b/testsuite/expect/test6.2 index 68a95184d..770dccd3b 100755 --- a/testsuite/expect/test6.2 +++ b/testsuite/expect/test6.2 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test6.3 b/testsuite/expect/test6.3 index 3ee399fd0..f9146f72c 100755 --- a/testsuite/expect/test6.3 +++ b/testsuite/expect/test6.3 @@ -10,7 +10,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -47,15 +47,15 @@ make_bash_script $file_in "$srun $bin_sleep 600" # # Submit a job so we have something to work with # -set srun_pid [spawn $srun --batch --output=/dev/null --error=/dev/null -t1 $file_in] +set sbatch_pid [spawn $sbatch --output=/dev/null --error=/dev/null -t1 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id1 $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $sbatch_pid set exit_code 1 } eof { diff --git a/testsuite/expect/test6.4 b/testsuite/expect/test6.4 index 8065dd1f6..a8eae405e 100755 --- a/testsuite/expect/test6.4 +++ b/testsuite/expect/test6.4 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -47,15 +47,15 @@ make_bash_script $file_in "$srun $bin_sleep 10" # # Submit a couple of jobs so we have something to work with # -set srun_pid [spawn $srun --batch --output=/dev/null --error=/dev/null --job-name=job1 -t1 $file_in] +set sbatch_pid [spawn $sbatch --output=/dev/null --error=/dev/null --job-name=job1 -t1 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id1 $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $sbatch_pid set exit_code 1 } eof { @@ -67,15 +67,15 @@ if {$job_id1 == 0} { exit 1 } -set srun_pid [spawn $srun --batch --output=/dev/null --error=/dev/null --job-name=job2 -t1 $file_in] +set sbatch_pid [spawn $sbatch --output=/dev/null --error=/dev/null --job-name=job2 -t1 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id2 $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $sbatch_pid set exit_code 1 } eof { diff --git a/testsuite/expect/test6.5 b/testsuite/expect/test6.5 index 17d819808..52675a609 100755 --- a/testsuite/expect/test6.5 +++ b/testsuite/expect/test6.5 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -49,15 +49,15 @@ make_bash_script $file_in "$srun $bin_sleep 10" # Submit a couple of jobs so we have something to work with # set timeout 10 -set srun_pid [spawn $srun --batch --output=/dev/null --error=/dev/null -t1 $file_in] +set sbatch_pid [spawn $sbatch --output=/dev/null --error=/dev/null -t1 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id1 $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $sbatch_pid exit 1 } eof { @@ -69,15 +69,15 @@ if {$job_id1 == 0} { exit 1 } -set srun_pid [spawn $srun --batch --output=/dev/null --error=/dev/null -t1 $file_in] +set sbatch_pid [spawn $sbatch --output=/dev/null --error=/dev/null -t1 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id2 $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $sbatch_pid exit 1 } eof { diff --git a/testsuite/expect/test6.6 b/testsuite/expect/test6.6 index eb800069d..ff9a2aa6c 100755 --- a/testsuite/expect/test6.6 +++ b/testsuite/expect/test6.6 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test6.7 b/testsuite/expect/test6.7 index 269e76c38..bac8f26fe 100755 --- a/testsuite/expect/test6.7 +++ b/testsuite/expect/test6.7 @@ -10,7 +10,7 @@ # Copyright (C) 2002-2007 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -54,22 +54,25 @@ make_bash_script $file_in " # Submit a job so we have something to work with # set job_id 0 -set srun_pid [spawn $srun --batch --output=$file_out -t1 $file_in] +set sbatch_pid [spawn $sbatch --output=$file_out -t1 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $sbatch_pid set exit_code 1 } eof { wait } } - +if {$job_id == 0} { + send_user "\nFAILURE: error submitting job\n" + exit 1 +} if {[wait_for_job $job_id RUNNING] != 0} { send_user "\nFAILURE: error starting job $job_id\n" cancel_job $job_id @@ -160,15 +163,15 @@ make_bash_script $file_in " # Submit a job so we have something to work with # set job_id 0 -set srun_pid [spawn $srun --batch --output=$file_out -t1 $file_in] +set sbatch_pid [spawn $sbatch --output=$file_out -t1 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill srun_pid + slow_kill sbatch_pid set exit_code 1 exp_continue } diff --git a/testsuite/expect/test6.8 b/testsuite/expect/test6.8 index 006285d45..ec2bb097f 100755 --- a/testsuite/expect/test6.8 +++ b/testsuite/expect/test6.8 @@ -13,7 +13,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -48,17 +48,17 @@ print_header $test_id make_bash_script $file_in "$srun $bin_sleep $max_job_delay" # -# Spawn a couple of srun batch jobs +# Spawn a couple of sbatch jobs # -set srun_pid [spawn $srun --batch --output=/dev/null --error=/dev/null --job-name=job.$test_id --hold -t5 $file_in] +set sbatch_pid [spawn $sbatch --output=/dev/null --error=/dev/null --job-name=job.$test_id --hold -t5 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id1 $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $sbatch_pid set exit_code 1 } eof { @@ -70,15 +70,15 @@ if {$job_id1 == 0} { exit 1 } -set srun_pid [spawn $srun --batch --output=/dev/null --error=/dev/null --job-name=job.$test_id -t5 $file_in] +set sbatch_pid [spawn $sbatch --output=/dev/null --error=/dev/null --job-name=job.$test_id -t5 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id2 $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $sbatch_pid set exit_code 1 } eof { diff --git a/testsuite/expect/test6.9 b/testsuite/expect/test6.9 index b9c7c699a..c1651e765 100755 --- a/testsuite/expect/test6.9 +++ b/testsuite/expect/test6.9 @@ -13,7 +13,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -52,17 +52,17 @@ make_bash_script $file_in " " # -# Spawn srun batch job +# Spawn sbatch job # -set srun_pid [spawn $srun --batch --output=/dev/null --error=/dev/null --job-name=job.$test_id -t5 $file_in] +set sbatch_pid [spawn $sbatch --output=/dev/null --error=/dev/null --job-name=job.$test_id -t5 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $sbatch_pid exit 1 } eof { diff --git a/testsuite/expect/test7.1 b/testsuite/expect/test7.1 index b4313b88f..ab4923b0f 100755 --- a/testsuite/expect/test7.1 +++ b/testsuite/expect/test7.1 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -42,48 +42,50 @@ set prio3 -1 print_header $test_id +make_bash_script "pwd_script" { $bin_pwd } + # -# Spawn three srun batch job, one held +# Spawn three sbatch job, one held # -set srun_pid [spawn $srun --batch -t1 --output=/dev/null --error=/dev/null $bin_pwd] +set sbatch_pid [spawn $sbatch -t1 --output=/dev/null --error=/dev/null pwd_script] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id1 $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $sbatch_pid exit 1 } eof { wait } } -set srun_pid [spawn $srun --batch -t1 --output=/dev/null --error=/dev/null $bin_pwd] +set sbatch_pid [spawn $sbatch -t1 --output=/dev/null --error=/dev/null pwd_script] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id2 $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $sbatch_pid exit 1 } eof { wait } } -set srun_pid [spawn $srun --batch -t1 --output=/dev/null --error=/dev/null --hold $bin_pwd] +set sbatch_pid [spawn $sbatch -t1 --output=/dev/null --error=/dev/null --hold pwd_script] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id3 $expect_out(1,string) exp_continue } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $sbatch_pid exit 1 } eof { @@ -169,6 +171,7 @@ if {$prio3 != 0} { } if {$exit_code == 0} { + exec $bin_rm -f pwd_script send_user "\nSUCCESS\n" } exit $exit_code diff --git a/testsuite/expect/test7.10 b/testsuite/expect/test7.10 index 67df1ba83..bcfb40eb2 100755 --- a/testsuite/expect/test7.10 +++ b/testsuite/expect/test7.10 @@ -11,7 +11,7 @@ # Copyright (C) 2007 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test7.2 b/testsuite/expect/test7.2 index 0002e1e7b..6827f4075 100755 --- a/testsuite/expect/test7.2 +++ b/testsuite/expect/test7.2 @@ -1,8 +1,8 @@ #!/usr/bin/expect ############################################################################ # Purpose: Test of SLURM functionality -# Test of PMI functions available via API library. Uses srun and -# slaunch. Tests --pmi-threads option in both commands. +# Test of PMI functions available via API library. Tests +# --pmi-threads option in srun command. # # Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR # "FAILURE: ..." otherwise with an explanation of the failure, OR @@ -11,7 +11,7 @@ # Copyright (C) 2005-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -66,19 +66,19 @@ if { [test_bluegene] } { } } -# First test uses srun based task launch +# Test uses srun based task launch # Adjust time limits as needed for large task counts */ # times are here vv set timeout [expr $max_job_delay + 60] set srun_pid [spawn $srun -l -N$node_cnt -n$task_cnt -O -t1 --threads=1 $file_prog_get] expect { -re "FAILURE" { - send_user "\nFAILURE: some error occured\n" + send_user "\nFAILURE: some error occurred\n" set exit_code 1 exp_continue } -re "error" { - send_user "\nFAILURE: some error occured\n" + send_user "\nFAILURE: some error occurred\n" set exit_code 1 exp_continue } @@ -92,39 +92,6 @@ expect { } } -# Second test uses slaunch based task launch -# Adjust time limits as needed for large task counts */ -# times are here vv -set timeout [expr $max_job_delay + 60] -set job_id 0 -set salloc_pid [spawn $salloc -N$node_cnt -t1 $slaunch -n8 --overcommit -l --pmi-threads=3 $file_prog_get] -expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) - exp_continue - } - -re "FAILURE" { - send_user "\nFAILURE: some error occured\n" - set exit_code 1 - exp_continue - } - -re "error" { - send_user "\nFAILURE: some error occured\n" - set exit_code 1 - exp_continue - } - timeout { - send_user "\nFAILURE: salloc not responding\n" - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - set exit_code 1 - } - eof { - wait - } -} if {$exit_code == 0} { send_user "\nSUCCESS\n" exec $bin_rm -f $file_prog_get diff --git a/testsuite/expect/test7.2.prog.c b/testsuite/expect/test7.2.prog.c index 9c18cca82..e3b2e34df 100644 --- a/testsuite/expect/test7.2.prog.c +++ b/testsuite/expect/test7.2.prog.c @@ -4,7 +4,7 @@ * Copyright (C) 2005-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test7.3 b/testsuite/expect/test7.3 index 340e33280..2fe04f5df 100755 --- a/testsuite/expect/test7.3 +++ b/testsuite/expect/test7.3 @@ -12,7 +12,7 @@ # Copyright (C) 2004 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -55,7 +55,8 @@ exec $bin_chmod 700 $io_prog send_user "slurm_dir is $slurm_dir\n" if {![test_aix]} { - exec $bin_cc ${test_prog}.c -g -pthread -o ${test_prog} -I${slurm_dir}/include -Wl,--rpath=${slurm_dir}/lib -L${slurm_dir}/lib -lslurm + send_user "$bin_cc ${test_prog}.c -g -pthread -o ${test_prog} -I${slurm_dir}/include -Wl,--rpath=${slurm_dir}/lib -L${slurm_dir}/lib -lslurm\n" + exec $bin_cc ${test_prog}.c -g -pthread -o ${test_prog} -I${slurm_dir}/include -Wl,--rpath=${slurm_dir}/lib -L${slurm_dir}/lib -lslurm } else { send_user "$bin_cc ${test_prog}.c -Wl,-brtl -g -pthread -o ${test_prog} -I${slurm_dir}/include -L${slurm_dir}/lib -lslurm -lntbl\n" exec $bin_cc ${test_prog}.c -Wl,-brtl -g -pthread -o ${test_prog} -I${slurm_dir}/include -L${slurm_dir}/lib -lslurm -lntbl diff --git a/testsuite/expect/test7.3.io.c b/testsuite/expect/test7.3.io.c index e42c80a18..0f3917aea 100644 --- a/testsuite/expect/test7.3.io.c +++ b/testsuite/expect/test7.3.io.c @@ -9,7 +9,7 @@ * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test7.3.prog.c b/testsuite/expect/test7.3.prog.c index a29698bb3..0089321c1 100644 --- a/testsuite/expect/test7.3.prog.c +++ b/testsuite/expect/test7.3.prog.c @@ -8,7 +8,7 @@ * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -144,7 +144,7 @@ int main (int argc, char *argv[]) launch->user_managed_io = true; /* This is the key to using "user managed" IO */ - if (slurm_step_launch(ctx, launch, NULL) != SLURM_SUCCESS) { + if (slurm_step_launch(ctx, "", launch, NULL) != SLURM_SUCCESS) { slurm_perror("slurm_step_launch"); rc = 1; goto done; diff --git a/testsuite/expect/test7.4 b/testsuite/expect/test7.4 index 72f130d30..6ffda3dd6 100755 --- a/testsuite/expect/test7.4 +++ b/testsuite/expect/test7.4 @@ -12,7 +12,7 @@ # Copyright (C) 2004-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> and Dong Ang <dahn@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test7.4.prog.c b/testsuite/expect/test7.4.prog.c index d836d51e1..fc6881546 100644 --- a/testsuite/expect/test7.4.prog.c +++ b/testsuite/expect/test7.4.prog.c @@ -4,7 +4,7 @@ * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Dong Ang <dahn@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test7.5 b/testsuite/expect/test7.5 deleted file mode 100755 index 44a87f73c..000000000 --- a/testsuite/expect/test7.5 +++ /dev/null @@ -1,304 +0,0 @@ -#!/usr/bin/expect -############################################################################ -# Purpose: Test of SLURM functionality -# Test of TotalView operation with slaunch, with and without bulk -# transfer. -# -# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR -# "WARNING: ..." with an explanation of why the test can't be made, OR -# "FAILURE: ..." otherwise with an explanation of the failure, OR -# anything else indicates a failure mode that must be investigated. -############################################################################ -# Copyright (C) 2004-2006 The Regents of the University of California. -# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). -# Written by Morris Jette <jette1@llnl.gov> and Dong Ang <dahn@llnl.gov> -# UCRL-CODE-226842. -# -# This file is part of SLURM, a resource management program. -# For details, see <http://www.llnl.gov/linux/slurm/>. -# -# SLURM is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free -# Software Foundation; either version 2 of the License, or (at your option) -# any later version. -# -# In addition, as a special exception, the copyright holders give permission -# to link the code of portions of this program with the OpenSSL library under -# certain conditions as described in each individual source file, and -# distribute linked combinations including the two. You must obey the GNU -# General Public License in all respects for all of the code used other than -# OpenSSL. If you modify file(s) with this exception, you may extend this -# exception to your version of the file(s), but you are not obligated to do -# so. If you do not wish to do so, delete this exception statement from your -# version. If you delete this exception statement from all source files in -# the program, then also delete it here. -# -# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS -# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more -# details. -# -# You should have received a copy of the GNU General Public License along -# with SLURM; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -############################################################################ -source ./globals - -set test_id "7.5" -set exit_code 0 -set test_prog "test$test_id.prog" - -print_header $test_id - -# -# Test for existence of mpi compiler and totalview -# -if {[info exists mpicc] == 0} { - send_user "\nWARNING: mpicc not defined, can't perform mpi testing\n" - exit 0 -} -if {[file executable $mpicc] == 0} { - send_user "\nWARNING: $mpicc does not exists\n" - exit 0 -} -if {[info exists totalviewcli] == 0} { - send_user "\nWARNING: totalviewcli not defined, can't perform mpi testing\n" - exit 0 -} -if {[file executable $totalviewcli] == 0} { - send_user "\nWARNING: $totalviewcli does not exists\n" - exit 0 -} -if {[test_front_end] != 0} { - send_user "\nWARNING: This test is incompatable with front-end systems\n" - exit 0 -} -if {[test_aix] == 1} { - send_user "WARNING: Test is incompatible with AIX\n" - exit 0 -} - -# -# Put desired SLURM install directory at head of search path for bulk launch -# command to work (runs "slaunch" without path) -# -global env -set env(PATH) "$slurm_dir/bin:$env(PATH)" -send_user "\n $env(PATH)\n" - -# -# Delete left-over program and rebuild it -# -exec $bin_rm -f $test_prog ${test_prog}.o ${test_prog}.TVD.v3breakpoints -exec $mpicc -o $test_prog ${test_prog}.c - -# -# Create the slurm job allocation -# -set job_id 0 -set matches 0 -set no_capability 0 -set timeout $max_job_delay -set salloc_pid [spawn $salloc -N1-2 -t1 $bin_bash] -set salloc_spawn_id $spawn_id -expect { - -re "Granted job allocation ($number)" { - set job_id $expect_out(1,string) - } - timeout { - if {$job_id != 0} { - cancel_job $job_id - } - slow_kill [expr 0 - $salloc_pid] - } -} -if {$job_id == 0} { - send_user "\nFAILURE: failed to create job allocation\n" - exit 1 -} - -# Note this appears as a single argv value to totalviewcli -set bulk "set issue_dgo false; dset TV::bulk_launch_enabled true; dset TV::bulk_launch_string {$slaunch --jobid=%J -N%N -n%N -w`awk -F. \'BEGIN {ORS=\",\"} {if (NR==%N) ORS=\"\"; print \$1}\' %t1` -l -i /dev/null %B/tvdsvr%K -callback_host %H -callback_ports %L -set_pws %P -verbosity %V -working_directory %D %F}" -set no_bulk "set issue_dgo false; dset TV::bulk_launch_enabled false" - -# -# Now run totalviewcli (command line interface to TotalView) -# -send_user "======================================================================\n" -send_user "======================= Run without bulk transfer ===================\n" -send_user "======================================================================\n" -spawn $totalviewcli -verbosity info -e $no_bulk $slaunch -a --jobid $job_id -v -i /dev/null -n4 --overcommit $test_prog -expect { - -re "d1.<>" { - if {$matches == 0} { - incr matches - send "G\n" - } - if {$no_capability != 0} { - send "quit\n" - } - exp_continue - } - -re "cannot open shared object" { - send_user "\nWARNING: Set LD_LIBRARY_PATH environment variable " - send_user "to include this object's directory\n" - exp_continue - } - -re "Do you want to stop the job now?.*:" { - incr matches - send "yes\n" - exp_continue - } - -re "Attached to parallel task ($number)" { - incr matches - exp_continue - } - -re "Loaded MPI support.*" { - exec sleep 2 - send "G\n" - exp_continue - } - -re "I just received msg from Rank" { - incr matches - exp_continue - } - -re "elan_init: No capability, can't continue" { - incr no_capability - exp_continue - } - -re "Process 1 has exited.*" { - incr matches - exec sleep 2 - send "quit\n" - exp_continue - } - -re "Do you really wish to exit TotalView?" { - incr matches - send "yes\n" - exp_continue - } - timeout { - send_user "\nFAILURE: totalviewcli not responding\n" - set exit_code 1 - } - eof { - wait - } -} -if {$no_capability != 0} { - send_user "\nWARNING: Unable to run test with present configuration\n" - exit 0 -} -if {$matches != 12} { - send_user "\nFAILURE: totalviewcli operation matches $matches of 12\n" - send_user "Remove your ~/.totalview directory and try again\n" - set exit_code 1 -} else { - send_user "\nSo far, so good...\n\n\n" -} - -# -# Now run totalviewcli (command line interface to TotalView) -# -set matches 0 -set no_capability 0 -send_user "======================================================================\n" -send_user "===================== Run with bulk transfer ========================\n" -send_user "======================================================================\n" -spawn $totalviewcli -verbosity info -e $bulk $slaunch -a --jobid $job_id -i /dev/null -n4 --overcommit $test_prog -expect { - -re "d1.<>" { - if {$matches == 0} { - incr matches - send "G\n" - } - if {$no_capability != 0} { - send "quit\n" - } - exp_continue - } - -re "cannot open shared object" { - send_user "\nWARNING: Set LD_LIBRARY_PATH environment variable " - send_user "to include this object's directory\n" - exp_continue - } - -re "Do you want to stop the job now?.*:" { - incr matches - send "yes\n" - exp_continue - } - -re "Attached to parallel task ($number)" { - incr matches - exp_continue - } - -re "Loaded MPI support.*" { - exec sleep 2 - send "G\n" - exp_continue - } - -re "I just received msg from Rank" { - incr matches - exp_continue - } - -re "elan_init: No capability, can't continue" { - incr no_capability - exp_continue - } - -re "Process 1 has exited.*" { - incr matches - exec sleep 2 - send "quit\n" - exp_continue - } - -re "Do you really wish to exit TotalView?" { - incr matches - send "yes\n" - exp_continue - } - timeout { - send_user "\nFAILURE: totalviewcli not responding\n" - set exit_code 1 - } - eof { - wait - } -} - -# -# Check the results of bulk transfer test above -# -if {$no_capability != 0} { - send_user "\nWARNING: Unable to run test with present configuration\n" -} else { - if {$matches != 12} { - send_user "\nFAILURE: totalviewcli operation matches $matches of 12\n" - send_user "Remove your ~/.totalview directory and try again\n" - set exit_code 1 - } -} - -# -# terminate the job -# -cancel_job $job_id -set spawn_id $salloc_spawn_id -expect { - -re "Job allocation $job_id has been revoked." { - send "exit\n" - exp_continue - } - timeout { - send_user "\nFAILURE: salloc did not terminate as expected\n" - set exit_code 1 - } - eof { - wait - } -} - - -if {$exit_code == 0} { - exec $bin_rm -f $test_prog ${test_prog}.o ${test_prog}.TVD.b3breakpoints - send_user "\nSUCCESS\n" -} -exit $exit_code diff --git a/testsuite/expect/test7.5.prog.c b/testsuite/expect/test7.5.prog.c deleted file mode 100644 index fb7d5323b..000000000 --- a/testsuite/expect/test7.5.prog.c +++ /dev/null @@ -1,62 +0,0 @@ -/*****************************************************************************\ - * test7.5.prog.c - Test of TotalView operation with SLURM. - ***************************************************************************** - * Copyright (C) 2004 The Regents of the University of California. - * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). - * Written by Dong Ang <dahn@llnl.gov> - * UCRL-CODE-226842. - * - * This file is part of SLURM, a resource management program. - * For details, see <http://www.llnl.gov/linux/slurm/>. - * - * SLURM is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY - * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License along - * with SLURM; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -\*****************************************************************************/ - -#include <stdio.h> -#include <mpi.h> - -#define COMM_TAG 1000 - -static void pass_its_neighbor(const int rank, const int size, const int* buf) -{ - MPI_Request request[2]; - MPI_Status status[2]; - - MPI_Irecv((void *)buf, 1, MPI_INT, ((rank+size-1)%size), COMM_TAG, - MPI_COMM_WORLD, &request[0]); - MPI_Isend((void *)&rank, 1, MPI_INT, ((rank+1)%size), COMM_TAG, - MPI_COMM_WORLD, &request[1]); - MPI_Waitall(2, request, status); - - fprintf(stdout, "Rank[%d] I just received msg from Rank %d\n", - rank, *buf); -} - -int main(int argc, char * argv[]) -{ - int size, rank,buf; - - MPI_Init(&argc, &argv); - MPI_Comm_size(MPI_COMM_WORLD, &size); - MPI_Comm_rank(MPI_COMM_WORLD, &rank); - - buf = rank; /* we only pass rank */ - - pass_its_neighbor(rank, size, &buf); - - MPI_Finalize(); - return 0; -} - diff --git a/testsuite/expect/test7.6 b/testsuite/expect/test7.6 index 84eb5b7af..dee627741 100755 --- a/testsuite/expect/test7.6 +++ b/testsuite/expect/test7.6 @@ -11,7 +11,7 @@ # Copyright (C) 2004-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> and Dong Ang <dahn@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -98,7 +98,7 @@ exec $mpicc -o $test_prog ${test_prog}.c # # Create a slurm job step # -set salloc_pid [spawn $salloc -N1-2 -t1 $slaunch --input=none -n4 --overcommit $test_prog] +set salloc_pid [spawn $salloc -N1-2 -t1 $srun --input=none -n4 --overcommit ./$test_prog] expect { -re "Granted job allocation ($number)" { set job_id $expect_out(1,string) diff --git a/testsuite/expect/test7.6.prog.c b/testsuite/expect/test7.6.prog.c index 6f6d7ef93..f2afcc281 100644 --- a/testsuite/expect/test7.6.prog.c +++ b/testsuite/expect/test7.6.prog.c @@ -4,7 +4,7 @@ * Copyright (C) 2004-2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Dong Ang <dahn@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test7.7 b/testsuite/expect/test7.7 index a594d00c1..c9e2969a5 100755 --- a/testsuite/expect/test7.7 +++ b/testsuite/expect/test7.7 @@ -12,7 +12,7 @@ # Copyright (C) 2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -136,6 +136,12 @@ if {$agg_time != 0} { } #send_user "\nAuthKey=$auth_key\nEPort=$e_port\n" +if { [test_bluegene] } { + set is_bluegene 1 +} else { + set is_bluegene 0 +} + # # Submit a job to work with # @@ -147,15 +153,15 @@ make_bash_script $file_in " exit 123" set job_id1 0 set job_id2 0 -set srun_pid [spawn $srun -N1-6 --output=$file_out --comment=test --batch -t1 $file_in] +set sbatch_pid [spawn $sbatch -N1-1024 --output=$file_out --comment=test -t1 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id1 $expect_out(1,string) exp_continue } timeout { - send_user "\nFAILURE: srun not responding\n" - catch {exec $bin_kill -KILL $srun_pid} + send_user "\nFAILURE: sbatch not responding\n" + catch {exec $bin_kill -KILL $sbatch_pid} set exit_code 1 } eof { @@ -174,7 +180,7 @@ set timeout 60 exec $bin_rm -f $test_prog exec $bin_make -f /dev/null $test_prog set success 0 -set moab_pid [spawn $test_prog $auth_key $control_addr $e_port $job_id1 $sched_port] +set moab_pid [spawn $test_prog $auth_key $control_addr $e_port $job_id1 $sched_port $is_bluegene] set master_id $spawn_id expect { -re "READY" { @@ -192,15 +198,15 @@ expect { } } -set srun_pid [spawn $srun --output=/dev/null --batch -t1 $file_in] +set sbatch_pid [spawn $sbatch --output=/dev/null -t1 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id2 $expect_out(1,string) exp_continue } timeout { - send_user "\nFAILURE: srun not responding\n" - catch {exec $bin_kill -KILL $srun_pid} + send_user "\nFAILURE: sbatch not responding\n" + catch {exec $bin_kill -KILL $sbatch_pid} set exit_code 1 } eof { diff --git a/testsuite/expect/test7.7.prog.c b/testsuite/expect/test7.7.prog.c index 438a76100..cc8a53685 100644 --- a/testsuite/expect/test7.7.prog.c +++ b/testsuite/expect/test7.7.prog.c @@ -2,9 +2,10 @@ * test7.7.prog.c - Test of sched/wiki2 plugin ***************************************************************************** * Copyright (C) 2006-2007 The Regents of the University of California. + * Copyright (C) 2008 Lawrence Livermore National Security. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -33,9 +34,11 @@ #include "./test7.7.crypto.c" +#define _DEBUG 0 + /* global variables */ char *auth_key, *control_addr; -int e_port, sched_port; +int e_port, is_bluegene, sched_port; long job_id; static int _conn_wiki_port(char *host, int port) @@ -333,6 +336,7 @@ static void _modify_job(long my_job_id) "TS=%u AUTH=root DT=CMD=MODIFYJOB ARG=%ld " /* "MINSTARTTIME=55555 " */ /* "JOBNAME=foo " */ + /* "RFEATURES=big " */ /* "PARTITION=pdebug " */ /* "NODES=2 " */ /* "DEPEND=afterany:3 " */ @@ -349,7 +353,7 @@ static void _notify_job(long my_job_id) snprintf(out_msg, sizeof(out_msg), "TS=%u AUTH=root DT=CMD=NOTIFYJOB ARG=%ld " - "MSG=this is a test", + "MSG=this_is_a_test", (uint32_t) now, my_job_id); _xmit(out_msg); } @@ -382,9 +386,9 @@ static void _job_will_run(long my_job_id) char out_msg[128]; snprintf(out_msg, sizeof(out_msg), - "TS=%u AUTH=root DT=CMD=JOBWILLRUN ARG=%ld %s", + "TS=%u AUTH=root DT=CMD=JOBWILLRUN ARG=JOBID=%ld,%s", (uint32_t) now, my_job_id, - "TASKLIST="); /* put desired node list here */ + ""); /* put available node list here */ _xmit(out_msg); } @@ -394,16 +398,28 @@ static void _initialize(void) char out_msg[128]; snprintf(out_msg, sizeof(out_msg), - "TS=%u AUTH=root DT=CMD=INITIALIZE ARG=USEHOSTEXP=T EPORT=%u", + "TS=%u AUTH=root DT=CMD=INITIALIZE ARG=USEHOSTEXP=N EPORT=%u", (uint32_t) now, e_port); _xmit(out_msg); } +static void _single_msg(void) +{ + time_t now = time(NULL); + char out_msg[1024]; + + snprintf(out_msg, sizeof(out_msg), + "TS=%u AUTH=root DT=CMD=%s", + (uint32_t) now, + "JOBWILLRUN ARG=JOBID=65537,bgl[000x733] JOBID=65539,bgl[000x733] JOBID=65538,bgl[000x733]"); + _xmit(out_msg); +} + int main(int argc, char * argv[]) { - if (argc < 4) { + if (argc < 6) { printf("Usage: %s, auth_key control_addr e_port " - "job_id sched_port\n", argv[0]); + "job_id sched_port is_bluegene\n", argv[0]); exit(1); } @@ -412,20 +428,27 @@ int main(int argc, char * argv[]) e_port = atoi(argv[3]); job_id = atoi(argv[4]); sched_port = atoi(argv[5]); - printf("auth_key=%s control_addr=%s e_port=%d job_id=%d sched_port=%d\n", - auth_key, control_addr, e_port, job_id, sched_port); - + is_bluegene = atoi(argv[6]); + printf("auth_key=%s control_addr=%s e_port=%d job_id=%d sched_port=%d " + "is_bluegene=%d\n", + auth_key, control_addr, e_port, job_id, sched_port, is_bluegene); + +#if _DEBUG + _single_msg(); +#else _initialize(); _get_jobs(); _get_nodes(); _job_will_run(job_id); _modify_job(job_id); - /* _notify_job(65544); */ _get_jobs(); _start_job(job_id); _get_jobs(); - _suspend_job(job_id); - _resume_job(job_id); + if (!is_bluegene) { + _suspend_job(job_id); + _resume_job(job_id); + } + _notify_job(job_id); _signal_job(job_id); if (e_port) _event_mgr(); @@ -438,7 +461,7 @@ int main(int argc, char * argv[]) sleep(15); _start_job(job_id); _get_jobs(); - +#endif printf("SUCCESS\n"); exit(0); } diff --git a/testsuite/expect/test7.8 b/testsuite/expect/test7.8 index 8f5c0a134..17c66fa5e 100755 --- a/testsuite/expect/test7.8 +++ b/testsuite/expect/test7.8 @@ -12,7 +12,7 @@ # Copyright (C) 2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test7.8.prog.c b/testsuite/expect/test7.8.prog.c index d88ca1245..4f4011966 100644 --- a/testsuite/expect/test7.8.prog.c +++ b/testsuite/expect/test7.8.prog.c @@ -4,7 +4,7 @@ * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test7.9 b/testsuite/expect/test7.9 index f8eb960b2..e58d5243a 100755 --- a/testsuite/expect/test7.9 +++ b/testsuite/expect/test7.9 @@ -11,7 +11,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -102,6 +102,7 @@ make_bash_script $file_in " for ((i=0; i<$iterations; i++)) ; do $srun $file_prog done + $bin_echo 'fini' " set job_id 0 @@ -134,6 +135,7 @@ if {[wait_for_job $job_id "DONE"] != 0} { if {[wait_for_file $file_out] != 0} { exit 1 } +set fini 0 set matches 0 spawn $bin_cat $file_out expect { @@ -141,6 +143,10 @@ expect { incr matches exp_continue } + -re "fini" { + set fini 1 + exp_continue + } timeout { send_user "\nFAILURE: /bin/cat not responding\n" set exit_code 1 @@ -149,6 +155,10 @@ expect { wait } } +if {$fini != 1} { + send_user "\nFAILURE: script never completed\n" + set exit_code 1 +} if {$matches != 0} { set tot [expr $iterations + 1] if {$matches <= 1} { diff --git a/testsuite/expect/test7.9.prog.c b/testsuite/expect/test7.9.prog.c index 11e844c78..dc01a5e24 100644 --- a/testsuite/expect/test7.9.prog.c +++ b/testsuite/expect/test7.9.prog.c @@ -4,7 +4,7 @@ * Copyright (C) 2006 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. @@ -32,7 +32,8 @@ #include <time.h> #include <unistd.h> -#define _DEBUG 0 +#define _DEBUG 0 +#define _EXTREME_DEBUG 0 main (int argc, char **argv) { @@ -51,16 +52,17 @@ main (int argc, char **argv) printf("FAILED: File descriptor %d is open\n", i); #if _DEBUG { - char data[64]; - int j; - size_t data_size; - printf(" st_mode: 0%o\n",(int) buf.st_mode); printf(" st_uid: %d\n", (int) buf.st_uid); printf(" st_gid: %d\n", (int) buf.st_gid); printf(" st_size: %d\n", (int) buf.st_size); printf(" st_ino: %d\n", (int) buf.st_ino); printf(" st_dev: %d\n", (int) buf.st_dev); +#if _EXTREME_DEBUG + { + char data[64]; + int j; + size_t data_size; lseek(i, 0, SEEK_SET); data_size = read(i, data, 64); @@ -71,6 +73,8 @@ main (int argc, char **argv) for (j=0; j<data_size; j++) printf(" data[%d]:0x%x\n", j, data[j]); } + } +#endif } #endif } diff --git a/testsuite/expect/test8.1 b/testsuite/expect/test8.1 index ea2c05e4f..30ef18be3 100755 --- a/testsuite/expect/test8.1 +++ b/testsuite/expect/test8.1 @@ -1,7 +1,7 @@ #!/usr/bin/expect ############################################################################ # Purpose: Test of SLURM functionality -# Test of Blue Gene specific srun command line options +# Test of Blue Gene specific sbatch command line options # # Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR # "FAILURE: ..." otherwise with an explanation of the failure, OR @@ -10,7 +10,7 @@ # Copyright (C) 2004 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -58,14 +58,14 @@ exec echo "$bin_sleep 1 &" >>$file_in exec $bin_chmod 700 $file_in # -# Submit a slurm job using various srun options for blue gene +# Submit a slurm job using various sbatch options for blue gene # set timeout $max_job_delay for {set inx 0} {$inx < $cycle_count} {incr inx} { - set srun_pid [spawn $srun -N$num_nodes --geometry=$geometry --no-rotate --conn-type=$connection --batch --output=/dev/null --error=/dev/null $file_in] + set sbatch_pid [spawn $sbatch -N$num_nodes --geometry=$geometry --no-rotate --conn-type=$connection --output=/dev/null --error=/dev/null $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) set inx $cycle_count exp_continue @@ -76,8 +76,8 @@ for {set inx 0} {$inx < $cycle_count} {incr inx} { exp_continue } timeout { - send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + send_user "\nFAILURE: sbatch not responding\n" + slow_kill $sbatch_pid exit 1 } eof { @@ -133,15 +133,15 @@ cancel_job $job_id # set job_id 0 set timeout $max_job_delay -set srun_pid [spawn $srun -N$num_nodes --geometry=$geometry --no-rotate --conn-type=$connection --batch --output=/dev/null --error=/dev/null $file_in] +set sbatch_pid [spawn $sbatch -N$num_nodes --geometry=$geometry --no-rotate --conn-type=$connection --output=/dev/null --error=/dev/null $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } timeout { - send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + send_user "\nFAILURE: sbatch not responding\n" + slow_kill $sbatch_pid exit 1 } eof { diff --git a/testsuite/expect/test8.2 b/testsuite/expect/test8.2 index bcb8488af..64f630a12 100755 --- a/testsuite/expect/test8.2 +++ b/testsuite/expect/test8.2 @@ -1,7 +1,7 @@ #!/usr/bin/expect ############################################################################ # Purpose: Test of SLURM functionality -# Test of Blue Gene specific srun environment variables +# Test of Blue Gene specific sbatch environment variables # # Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR # "FAILURE: ..." otherwise with an explanation of the failure, OR @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -48,9 +48,9 @@ if {[test_bluegene] == 0} { # Set target environment variables # global env -set env(SLURM_CONN_TYPE) torus -set env(SLURM_GEOMETRY) 1x1x1 -set env(SLURM_NO_ROTATE) 1 +set env(SBATCH_CONN_TYPE) torus +set env(SBATCH_GEOMETRY) 1x1x1 +set env(SBATCH_NO_ROTATE) 1 # # Delete left-over input script files @@ -65,15 +65,15 @@ exec $bin_chmod 700 $file_in # Spawn a job via srun using these environment variables # set timeout $max_job_delay -set srun_pid [spawn $srun --batch --output=$file_out --error=/dev/null $file_in] +set sbatch_pid [spawn $sbatch --output=$file_out --error=/dev/null $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } timeout { - send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + send_user "\nFAILURE: sbatch not responding\n" + slow_kill $sbatch_pid exit 1 } eof { diff --git a/testsuite/expect/test8.3 b/testsuite/expect/test8.3 index 54d30905f..f5ab3ebbe 100755 --- a/testsuite/expect/test8.3 +++ b/testsuite/expect/test8.3 @@ -13,7 +13,7 @@ # Copyright (C) 2004 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -59,9 +59,9 @@ exec $bin_chmod 700 $file_in # Submit a slurm job for blue gene with geometry of 2x1x1 # set timeout $max_job_delay -set srun_pid [spawn $srun --geometry=2x1x1 --no-rotate --nodes=1k-1k --batch --output=/dev/null --error=/dev/null $file_in] +set sbatch_pid [spawn $sbatch --geometry=2x1x1 --no-rotate --nodes=1k-1k --output=/dev/null --error=/dev/null $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } @@ -70,8 +70,8 @@ expect { exp_continue } timeout { - send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + send_user "\nFAILURE: sbatch not responding\n" + slow_kill $sbatch_pid exit 1 } eof { @@ -114,9 +114,9 @@ if {$job_id != 0} { # Submit a slurm job for blue gene with geometry of 1x2x1 # set jobid 0 -set srun_pid [spawn $srun --geometry=1x2x1 --no-rotate --nodes=1k-1k --batch --output=/dev/null --error=/dev/null $file_in] +set sbatch_pid [spawn $sbatch --geometry=1x2x1 --no-rotate --nodes=1k-1k --output=/dev/null --error=/dev/null $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } @@ -125,8 +125,8 @@ expect { exp_continue } timeout { - send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + send_user "\nFAILURE: sbatch not responding\n" + slow_kill $sbatch_pid exit 1 } eof { @@ -166,9 +166,9 @@ if {$job_id != 0} { # Submit a slurm job for blue gene with geometry of 1x1x2 # set jobid 0 -set srun_pid [spawn $srun --geometry=1x1x2 --no-rotate --nodes=1k-1k --batch --output=/dev/null --error=/dev/null $file_in] +set sbatch_pid [spawn $sbatch --geometry=1x1x2 --no-rotate --nodes=1k-1k --output=/dev/null --error=/dev/null $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } @@ -177,8 +177,8 @@ expect { exp_continue } timeout { - send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + send_user "\nFAILURE: sbatch not responding\n" + slow_kill $sbatch_pid exit 1 } eof { diff --git a/testsuite/expect/test8.4 b/testsuite/expect/test8.4 index 720398524..e96045fd0 100755 --- a/testsuite/expect/test8.4 +++ b/testsuite/expect/test8.4 @@ -14,7 +14,7 @@ # Copyright (C) 2004 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -85,13 +85,13 @@ exec echo "$bin_echo TEST_COMPLETE" >>$file_in exec $bin_chmod 700 $file_in # -# Spawn a srun batch job that uses stdout/err and confirm their contents +# Spawn an sbatch job that uses stdout/err and confirm their contents # set timeout $max_job_delay set no_start 0 -set srun_pid [spawn $srun -N1-1 --batch --output=$file_out --error=$file_err -t4 $file_in] +set sbatch_pid [spawn $sbatch -N1-1 --output=$file_out --error=$file_err -t4 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } @@ -104,8 +104,8 @@ expect { exit 1 } timeout { - send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + send_user "\nFAILURE: sbatch not responding\n" + slow_kill $sbatch_pid set exit_code 1 } eof { diff --git a/testsuite/expect/test8.4.prog.c b/testsuite/expect/test8.4.prog.c index 25ae9da0f..c5b704cc8 100644 --- a/testsuite/expect/test8.4.prog.c +++ b/testsuite/expect/test8.4.prog.c @@ -4,7 +4,7 @@ * Copyright (C) 2004 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Dong Ang <dahn@llnl.gov> - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test8.5 b/testsuite/expect/test8.5 index 6db642750..b86b03569 100755 --- a/testsuite/expect/test8.5 +++ b/testsuite/expect/test8.5 @@ -10,7 +10,7 @@ # Copyright (C) 2006-2007 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Danny Auble <da@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -62,15 +62,15 @@ set timeout [expr $max_job_delay + $sleep_time] # make a 512 cnode block -spawn $srun --batch --output=$file_out --error=$file_err -t2 -N512-512 $file_in +set sbatch_pid [spawn $sbatch --output=$file_out --error=$file_err -t2 -N512-512 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } timeout { - send_user "\nFAILURE: srun not responding\n" - kill_srun + send_user "\nFAILURE: sbatch not responding\n" + slow_kill $sbatch_pid exit 1 } eof { @@ -127,15 +127,15 @@ if {$exit_code != 0} { # make a 128 cnode block -spawn $srun --batch --output=$file_out --error=$file_err -t2 -N128-128 $file_in +set sbatch_pid [spawn $sbatch --output=$file_out --error=$file_err -t2 -N128-128 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } timeout { - send_user "\nFAILURE: srun not responding\n" - kill_srun + send_user "\nFAILURE: sbatch not responding\n" + slow_kill $sbatch_pid exit 1 } eof { @@ -195,15 +195,15 @@ if {$matches != 1} { # make a 32 cnode block -spawn $srun --batch --output=$file_out --error=$file_err -t2 -N32-32 $file_in +set sbatch_pid [spawn $sbatch --output=$file_out --error=$file_err -t2 -N32-32 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { set job_id $expect_out(1,string) exp_continue } timeout { - send_user "\nFAILURE: srun not responding\n" - kill_srun + send_user "\nFAILURE: sbatch not responding\n" + slow_kill $sbatch_pid exit 1 } eof { @@ -226,7 +226,8 @@ if {[wait_for_job $job_id "DONE"] != 0} { set matches 0 spawn $scontrol show job $job_id expect { - -re "BP_List=$alpha_numeric\[$number-$number\]" { +# Could be one or multiple node cards (e.g. "bgl000[0]" or "bgl000[0-1]") + -re "BP_List=$alpha_numeric\[$number" { incr matches exp_continue } diff --git a/testsuite/expect/test8.6 b/testsuite/expect/test8.6 index 1834f6d98..91c0ef46e 100755 --- a/testsuite/expect/test8.6 +++ b/testsuite/expect/test8.6 @@ -10,7 +10,7 @@ # Copyright (C) 2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Danny Auble <da@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -39,9 +39,9 @@ set matches 0 set 32node_block_cnt 16 set 128node_block_cnt 8 set 512node_block_cnt 8 -set 1knode_block_cnt 8 -set 4knode_block_cnt 8 -set 8knode_block_cnt 8 +set 1knode_block_cnt 8 +set 4knode_block_cnt 8 +set 8knode_block_cnt 8 set 16knode_block_cnt 8 set 32knode_block_cnt 8 @@ -55,33 +55,25 @@ if {[test_bluegene] == 0} { exit $exit_code } -# -# Delete left-over input script files -# Build input script file -# -exec $bin_rm -f $file_in -exec echo "#!$bin_bash" >$file_in -exec echo "$bin_sleep $sleep_time " >>$file_in -exec $bin_chmod 700 $file_in - -# +make_bash_script $file_in "$bin_sleep $sleep_time" set timeout [expr $max_job_delay + $sleep_time] -# make a bunch of 32 cnode blocks - +# make a bunch of blocks of the specified size (node_cnt) proc run_batch_jobs { node_cnt job_cnt file_in } { - global srun number kill_srun + global sbatch srun number kill_srun set start_cnt 0 for {set inx 0} {$inx < $job_cnt} {incr inx} { - set srun_pid [spawn $srun --batch --output=/dev/null -t2 -N$node_cnt-$node_cnt $file_in] + set sbatch_pid [spawn $sbatch --output=/dev/null -t5 -N$node_cnt-$node_cnt $file_in] expect { - -re "More processors requested than permitted" - { + -re "More processors requested than permitted" { + send_user "This error was expected, no worries\n" + return -1 + } + -re "Requested node configuration is not available" { send_user "This error was expected, no worries\n" return -1 - } - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { incr start_cnt exp_continue } @@ -90,8 +82,8 @@ proc run_batch_jobs { node_cnt job_cnt file_in } { exp_continue } timeout { - send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + send_user "\nFAILURE: sbatch not responding\n" + slow_kill $sbatch_pid return 0 } eof { @@ -109,12 +101,11 @@ proc run_batch_jobs { node_cnt job_cnt file_in } { # Wait up to 900 seconds for all jobs to terminate # Return 0 if all jobs done, remainin job count otherwise proc wait_for_all_jobs { job_name } { - global squeue bin_sleep + global scancel squeue bin_sleep file_in set last_matches 0 send_user "Waiting for all jobs to terminate\n" - for {set inx 0} {$inx < 60} {incr inx} { - exec $bin_sleep 15 + for {set inx 0} {$inx < 600} {incr inx} { set matches 0 log_user 0 spawn $squeue -o %j @@ -123,6 +114,13 @@ proc wait_for_all_jobs { job_name } { incr matches exp_continue } + -re "error" { + set matches -1 + } + timeout { + send_user "No response from squeue\n" + set matches -1 + } eof { wait } @@ -130,23 +128,35 @@ proc wait_for_all_jobs { job_name } { log_user 1 if {$matches == 0} { send_user "All jobs complete\n" - return 0 + break } - send_user " $matches jobs remaining\n" - if {$matches == $last_matches} { - return $matches + if {$matches > 0} { + send_user " $matches jobs remaining\n" +# Moab can slow throughput down a lot, +# so don't return here +# if {$matches == $last_matches} { +# send_user "Running jobs hung\n" +# break +# } +# set last_matches $matches + exec $bin_sleep 15 } - set last_matches $matches } + exec $scancel -n $file_in return $matches } -if {[run_batch_jobs 32 $32node_block_cnt $file_in ] != 1} { +# +# Run the jobs here. We can be reasonably sure of running +# 512 cnode jobs (one midplane). Other sizes may not be supported +# due to architecture (I/O node count) and/or slurm configuration +# +if {[run_batch_jobs 32 $32node_block_cnt $file_in ] == 0} { send_user "\nFAILURE: 32 cnodes can't be created\n" set exit_code 1 } -if {[run_batch_jobs 128 $32node_block_cnt $file_in ] != 1} { +if {[run_batch_jobs 128 $128node_block_cnt $file_in ] == 0} { send_user "\nFAILURE: 128 cnodes can't be created\n" set exit_code 1 } @@ -156,7 +166,7 @@ if {[run_batch_jobs 512 $512node_block_cnt $file_in ] != 1} { set exit_code 1 } -if {[run_batch_jobs 1k $1knode_block_cnt $file_in ] != 1} { +if {[run_batch_jobs 1k $1knode_block_cnt $file_in ] == 0} { send_user "\nFAILURE: 1k cnodes can't be created\n" set exit_code 1 } @@ -181,7 +191,7 @@ if {[run_batch_jobs 32k $32knode_block_cnt $file_in ] == 0} { set exit_code 1 } -if {[run_batch_jobs 1k $1knode_block_cnt $file_in ] != 1} { +if {[run_batch_jobs 1k $1knode_block_cnt $file_in ] == 0} { send_user "\nFAILURE: 1k cnodes can't be created\n" set exit_code 1 } @@ -191,17 +201,17 @@ if {[run_batch_jobs 512 $512node_block_cnt $file_in ] != 1} { set exit_code 1 } -if {[run_batch_jobs 128 $128node_block_cnt $file_in ] != 1} { +if {[run_batch_jobs 128 $128node_block_cnt $file_in ] == 0} { send_user "\nFAILURE: 128 cnodes can't be created\n" set exit_code 1 } -if {[run_batch_jobs 32 $32node_block_cnt $file_in ] != 1} { +if {[run_batch_jobs 32 $32node_block_cnt $file_in ] == 0} { send_user "\nFAILURE: 32 cnodes can't be created\n" set exit_code 1 } -if {[run_batch_jobs 512 $512node_block_cnt $file_in ] != 1} { +if {[run_batch_jobs 512 $512node_block_cnt $file_in ] == 0} { send_user "\nFAILURE: 512 cnodes can't be created\n" set exit_code 1 } diff --git a/testsuite/expect/test8.7 b/testsuite/expect/test8.7 new file mode 100755 index 000000000..060a3598e --- /dev/null +++ b/testsuite/expect/test8.7 @@ -0,0 +1,256 @@ +#!/usr/bin/expect +############################################################################ +# Purpose: Test of SLURM functionality +# Test of Blue Gene scheduling with sched/wik2 plugin. +# +# Output: "TEST: #.#" followed by "SUCCESS" if test was successful, OR +# "FAILURE: ..." otherwise with an explanation of the failure, OR +# anything else indicates a failure mode that must be investigated. +############################################################################ +# Copyright (C) 2006-2007 The Regents of the University of California. +# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). +# Written by Morris Jette <jette1@llnl.gov> +# LLNL-CODE-402394. +# +# This file is part of SLURM, a resource management program. +# For details, see <http://www.llnl.gov/linux/slurm/>. +# +# SLURM is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free +# Software Foundation; either version 2 of the License, or (at your option) +# any later version. +# +# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License along +# with SLURM; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +############################################################################ +source ./globals + +set test_id "8.7" +set exit_code 0 +set file_in "test$test_id.in" +set test_prog "test$test_id.prog" + +print_header $test_id + +# +# Submit a batch job to slurm +# Input is node count and script name +# Output is job_id or zero on failure +# +proc submit_job { size script } { + global number sbatch + + set job_id 0 + set sbatch_pid [spawn $sbatch -N $size --output=/dev/null --comment=test -t1 $script] + expect { + -re "Submitted batch job ($number)" { + set job_id $expect_out(1,string) + exp_continue + } + timeout { + send_user "\nFAILURE: sbatch not responding\n" + catch {exec $bin_kill -KILL $sbatch_pid} + if {job_id != 0} { + cancel_job $job_id + set job_id 0 + } + } + eof { + wait + } + } + + return $job_id +} + +# +# Check if we have sched/wiki2 configured +# +log_user 0 +set conf_dir "" +set control_addr "" +set is_bluegene 0 +set sched_port 0 +set sched_wiki 0 +exec $bin_rm -f $file_in $test_prog +spawn $scontrol show config +expect { + -re "ControlAddr *= ($alpha_numeric)" { + set control_addr $expect_out(1,string) + exp_continue + } + -re "SchedulerPort *= ($number)" { + set sched_port $expect_out(1,string) + exp_continue + } + -re "sched/wiki2" { + set sched_wiki 1 + exp_continue + } + -re "select/bluegene" { + set is_bluegene 1 + exp_continue + } + -re "SLURM_CONFIG_FILE *= (.*)/slurm.conf" { + set conf_dir $expect_out(1,string) + exp_continue + } + timeout { + send_user "\nFAILURE: scontrol not responding\n" + set exit_code 1 + } + eof { + wait + } +} +log_user 1 +if {$sched_wiki == 0} { + send_user "\nWARNING: not running sched/wiki2, test is not applicable\n" + exit $exit_code +} +if {$is_bluegene == 0} { + send_user "\nWARNING: not running select/bluegene, test is not applicable\n" + exit $exit_code +} +if {[string compare $control_addr ""] == 0} { + send_user "\nFAILURE: ControlAddr not set\n" + exit 1 +} +if {$sched_port == 0} { + send_user "\nFAILURE: SchedulerPort = 0\n" + exit 1 +} + +# +# Try to get the AuthKey from wiki.conf +# +set auth_key 0 +set e_port 0 +set agg_time 999 +set wiki_file "$conf_dir/wiki.conf" +if {[file readable $wiki_file] == 0} { + send_user "\nWARNING: Unable to read $wiki_file\n" + exit $exit_code +} +log_user 0 +spawn $bin_sort $wiki_file +expect { + -re "AuthKey=($number)" { + set auth_key $expect_out(1,string) + exp_continue + } + -re "EPort=($number)" { + set e_port $expect_out(1,string) + exp_continue + } + -re "JobAggregationTime=($number)" { + set agg_time $expect_out(1,string) + exp_continue + } + -re + eof { + wait + } +} +log_user 1 +if {$auth_key == 0} { + send_user "\nWARNING: AuthKey not found in $wiki_file\n" + exit 1 +} +if {$e_port == 0} { + send_user "\nWARNING: EPort not found in $wiki_file\n" + exit 1 +} +if {$agg_time != 0} { + send_user "\nWARNING: JobAggregationTime != 0 in $wiki_file\n" + exit 1 +} +#send_user "\nAuthKey=$auth_key\nEPort=$e_port\n" + +# +# Submit four jobs to work with +# +make_bash_script $file_in " + echo BEGIN + $bin_sleep 30 + echo FINI + exit 123" +set job_id1 [submit_job 1024 $file_in] +if {$job_id1 == 0} { + send_user "\nFAILURE: srun job submit failed\n" + exit 1 +} + +set job_id2 [submit_job 512 $file_in] +if {$job_id2 == 0} { + send_user "\nFAILURE: srun job submit failed\n" + [cancel_job $job_id1] + exit 1 +} + +set job_id3 [submit_job 256 $file_in] +if {$job_id3 == 0} { + send_user "\nFAILURE: srun job submit failed\n" + [cancel_job $job_id1] + [cancel_job $job_id2] + exit 1 +} + +set job_id4 [submit_job 32 $file_in] +if {$job_id4 == 0} { + send_user "\nFAILURE: srun job submit failed\n" + [cancel_job $job_id1] + [cancel_job $job_id2] + [cancel_job $job_id4] + exit 1 +} + +# +# Start our Moab emulator ($test_prog) to manage the job +# +set timeout 60 +exec $bin_rm -f $test_prog +exec $bin_make -f /dev/null $test_prog +set success 0 +set moab_pid [spawn $test_prog $auth_key $control_addr $e_port $sched_port $job_id1 $job_id2 $job_id3 $job_id4] +set master_id $spawn_id +expect { + -re "FAILURE" { + set exit_code 1 + exp_continue + } + -re "SUCCESS" { + set success 1 + exp_continue + } + timeout { + send_user "\nFAILURE: Moab emulator not responding\n" + catch {exec $bin_kill -KILL $moab_pid} + set exit_code 1 + } + eof { + wait + } +} +if {$success == 0} { + send_user "\nFAILURE: Moab emulator failed\n" + set exit_code 1 +} + +cancel_job $job_id1 +cancel_job $job_id2 +cancel_job $job_id3 +cancel_job $job_id4 + +if { $exit_code == 0 } { + exec $bin_rm -f $file_in $test_prog + send_user "\nSUCCESS\n" +} +exit $exit_code + diff --git a/testsuite/expect/test8.7.crypto.c b/testsuite/expect/test8.7.crypto.c new file mode 100644 index 000000000..153c2ef57 --- /dev/null +++ b/testsuite/expect/test8.7.crypto.c @@ -0,0 +1,134 @@ +/*****************************************************************************\ + * crypto.c - DES cryptographic routines. + ***************************************************************************** + * Produced by Cluster Resources, Inc., no rights reserved. +\*****************************************************************************/ +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#define MAX_ITERATION 4 + +/************************************************************** + * des + ************************************************************** + * DESCRIPTION + * Compute a DES digest for a CRC according to a particular + * key. + * + * ARGUMENTS + * lword (in/out) - The CRC to encode, which becomes the first + * lexical segment of the checksum. + * irword (in/out ) - The key with which to encode the CRC, + * which becomes the second lexical segment of + * the checksum. + * + * RETURNS + * None. + * + * SOURCE + * Cluster Resources, Inc., no rights reserved. + **************************************************************/ +static void des( uint32_t *lword, uint32_t *irword ) +{ + int idx; + uint32_t ia, ib, iswap, itmph, itmpl; + + static uint32_t c1[ MAX_ITERATION ] = { + 0xcba4e531, + 0x537158eb, + 0x145cdc3c, + 0x0d3fdeb2 + }; + static uint32_t c2[ MAX_ITERATION ] = { + 0x12be4590, + 0xab54ce58, + 0x6954c7a6, + 0x15a2ca46 + }; + + itmph = 0; + itmpl = 0; + + for ( idx = 0; idx < MAX_ITERATION; ++idx ) { + iswap = *irword; + ia = iswap ^ c1[ idx ]; + itmpl = ia & 0xffff; + itmph = ia >> 16; + ib = itmpl * itmpl + ~( itmph * itmph ); + ia = (ib >> 16) | ( (ib & 0xffff) << 16 ); + *irword = (*lword) ^ ( (ia ^c2[ idx ]) + (itmpl * itmph) ); + *lword = iswap; + } +} + +/************************************************************** + * compute_crc + ************************************************************** + * DESCRIPTION + * Compute a cyclic redundancy check (CRC) character-wise. + * + * ARGUMENTS + * crc (in) - The CRC computed thus far. + * onech (in) - The character to be added to the CRC. + * + * RETURNS + * The new CRC value. + * + * SOURCE + * Cluster Resources, Inc., no rights reserved. + **************************************************************/ +static uint16_t compute_crc( uint16_t crc, uint8_t onech ) +{ + int idx; + uint32_t ans = ( crc ^ onech << 8 ); + + for ( idx = 0; idx < 8; ++idx ) { + if ( ans & 0x8000 ) { + ans <<= 1; + ans = ans ^ 4129; + } else { + ans <<= 1; + } + } + + return ans; +} + +/************************************************************** + * checksum + ************************************************************** + * DESCRIPTION + * Compute a Wiki checksum for the current message contents + * and return the result as a Wiki name-value pair. + * + * ARGUMENTS + * sum (out) - The string in which to store the resulting + * checksum. + * key(in) - The seed value for the checksum. This must be + * coordinated with the scheduler so that they + * both use the same value. It is a string of + * ASCII decimal digits. + * + * RETURNS + * None. + **************************************************************/ +extern void checksum( char *sum, const char * key, const char * buf ) +{ + uint32_t crc = 0; + uint32_t lword, irword; + int idx, buf_len = strlen(buf); + uint32_t seed = (uint32_t) strtoul( key, NULL, 0 ); + + for ( idx = 0; idx < buf_len; ++idx ) { + crc = (uint32_t) compute_crc( crc, buf[idx] ); + } + + lword = crc; + irword = seed; + + des( &lword, &irword ); + + sprintf(sum, "CK=%08x%08x", lword, irword); +} diff --git a/testsuite/expect/test8.7.prog.c b/testsuite/expect/test8.7.prog.c new file mode 100644 index 000000000..58b6c373a --- /dev/null +++ b/testsuite/expect/test8.7.prog.c @@ -0,0 +1,329 @@ +/*****************************************************************************\ + * test8.7.prog.c - Test of Blue Gene scheduling with sched/wik2 plugin. + ***************************************************************************** + * Copyright (C) 2006-2007 The Regents of the University of California. + * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). + * Written by Morris Jette <jette1@llnl.gov> + * LLNL-CODE-402394. + * + * This file is part of SLURM, a resource management program. + * For details, see <http://www.llnl.gov/linux/slurm/>. + * + * SLURM is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY + * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along + * with SLURM; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +\*****************************************************************************/ + +#include <netdb.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/types.h> +#include <sys/socket.h> + +#include "./test7.7.crypto.c" + +/* global variables */ +char *auth_key, *control_addr; +int e_port, sched_port; +long job_id1, job_id2, job_id3, job_id4; + +static int _conn_wiki_port(char *host, int port) +{ + int sock_fd; + struct sockaddr_in wiki_addr; + struct hostent *hptr; + + hptr = gethostbyname(host); + if (hptr == NULL) { + perror("gethostbyname"); + exit(1); + } + if ((sock_fd = socket(AF_INET, SOCK_STREAM, 0)) < 0) { + perror("socket"); + exit(1); + } + bzero((char *) &wiki_addr, sizeof(wiki_addr)); + wiki_addr.sin_family = AF_INET; + wiki_addr.sin_port = htons(port); + memcpy(&wiki_addr.sin_addr.s_addr, hptr->h_addr, hptr->h_length); + sock_fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); + if (connect(sock_fd, (struct sockaddr *) &wiki_addr, + sizeof(wiki_addr))) { + perror("connect"); + exit(1); + } + return sock_fd; +} + +static int _conn_event_port(char *host, int port) +{ + int sock_fd; + struct sockaddr_in wiki_addr; + struct hostent *hptr; + + hptr = gethostbyname(host); + if (hptr == NULL) { + perror("gethostbyname"); + exit(1); + } + if ((sock_fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)) < 0) { + perror("socket"); + exit(1); + } + bzero((char *) &wiki_addr, sizeof(wiki_addr)); + wiki_addr.sin_family = AF_INET; + wiki_addr.sin_port = htons(port); + memcpy(&wiki_addr.sin_addr.s_addr, hptr->h_addr, hptr->h_length); + if (bind(sock_fd, (struct sockaddr *) &wiki_addr, + sizeof(wiki_addr))) { + printf("WARNING: bind to port %i failed, may not be real error\n", + port); + return -1; + } + listen(sock_fd, 1); + return sock_fd; +} + +static size_t _read_bytes(int fd, char *buf, const size_t size) +{ + size_t bytes_remaining, bytes_read; + char *ptr; + + bytes_remaining = size; + ptr = buf; + while (bytes_remaining > 0) { + bytes_read = read(fd, ptr, bytes_remaining); + if (bytes_read <= 0) + return 0; + bytes_remaining -= bytes_read; + ptr += bytes_read; + } + + return size; +} + +static size_t _write_bytes(int fd, char *buf, const size_t size) +{ + size_t bytes_remaining, bytes_written; + char *ptr; + + bytes_remaining = size; + ptr = buf; + while (bytes_remaining > 0) { + bytes_written = write(fd, ptr, bytes_remaining); + if (bytes_written < 0) + return 0; + bytes_remaining -= bytes_written; + ptr += bytes_written; + } + return size; +} + +static size_t _send_msg(int fd, char *buf, size_t size) +{ + char header[10]; + size_t data_sent; + + (void) sprintf(header, "%08lu\n", (uint32_t) size); + if (_write_bytes(fd, header, 9) != 9) { + perror("writing message header"); + exit(1); + } + + data_sent = _write_bytes(fd, buf, size); + if (data_sent != size) { + perror("writing message"); + exit(1); + } + + return data_sent; +} + +static char *_recv_msg(int fd) +{ + char header[10]; + uint32_t size; + char *buf; + + if (_read_bytes(fd, header, 9) != 9) { + perror("reading message header"); + exit(1); + } + if (sscanf(header, "%ul", &size) != 1) { + perror("parsing message header"); + exit(1); + } + buf = calloc(1, (size+1)); /* need '\0' on end to print */ + if (buf == NULL) { + perror("malloc"); + exit(1); + } + if (_read_bytes(fd, buf, size) != size) { + perror("reading message"); + exit(1); + } + return buf; +} + +static void _xmit(char *msg) +{ + int msg_len = strlen(msg); + char *out_msg, *in_msg, sum[20], *sc_ptr; + int wiki_fd = _conn_wiki_port(control_addr, sched_port); + int sc; + + out_msg = calloc(1, (msg_len+100)); + if (out_msg == NULL) { + perror("malloc"); + exit(1); + } + checksum(sum, auth_key, msg); + sprintf(out_msg, "%s %s", sum, msg); + printf("send:%s\n", out_msg); + _send_msg(wiki_fd, out_msg, strlen(out_msg)); + in_msg = _recv_msg(wiki_fd); + printf("recv:%s\n\n", in_msg); + sc_ptr = strstr(in_msg, "SC="); + sc = atoi(sc_ptr+3); + if (sc != 0) { + fprintf(stderr, "RPC FAILURE\n"); + exit(1); + } + free(in_msg); + close(wiki_fd); +} + +static void _event_mgr(void) +{ + int accept_fd, event_fd; + int accept_addr_len = sizeof(struct sockaddr); + size_t cnt; + char in_msg[5]; + struct sockaddr_in accept_addr; + + if ((event_fd = _conn_event_port(control_addr, e_port)) < 0) + return; + printf("READY_FOR_EVENT\n"); + if ((accept_fd = accept(event_fd, (struct sockaddr *) &accept_addr, + &accept_addr_len)) < 0) { + perror("accept"); + exit(1); + } + close(event_fd); + + cnt = _read_bytes(accept_fd, in_msg, sizeof(in_msg)); + if (cnt > 0) + printf("event recv:%s\n\n", in_msg); + close(accept_fd); +} + +static void _get_jobs(void) +{ + time_t now = time(NULL); + char out_msg[128]; + + /* Dump all data */ + snprintf(out_msg, sizeof(out_msg), + "TS=%u AUTH=root DT=%s", + (uint32_t) now, "CMD=GETJOBS ARG=0:ALL"); + _xmit(out_msg); +} + +static void _get_nodes(void) +{ + time_t now = time(NULL); + char out_msg[128]; + + /* Dump all data */ + snprintf(out_msg, sizeof(out_msg), + "TS=%u AUTH=root DT=%s", + (uint32_t) now, "CMD=GETNODES ARG=0:ALL"); + _xmit(out_msg); +} + +static void _cancel_job(long my_job_id) +{ + time_t now = time(NULL); + char out_msg[128]; + + snprintf(out_msg, sizeof(out_msg), + "TS=%u AUTH=root DT=CMD=CANCELJOB ARG=%ld TYPE=ADMIN", + (uint32_t) now, my_job_id); + _xmit(out_msg); +} + +static void _start_job(long my_job_id) +{ + time_t now = time(NULL); + char out_msg[128]; + + snprintf(out_msg, sizeof(out_msg), + "TS=%u AUTH=root DT=CMD=STARTJOB ARG=%ld TASKLIST=", + /* Empty TASKLIST means we don't care */ + (uint32_t) now, my_job_id); + _xmit(out_msg); +} + +static void _initialize(void) +{ + time_t now = time(NULL); + char out_msg[128]; + + snprintf(out_msg, sizeof(out_msg), + "TS=%u AUTH=root DT=CMD=INITIALIZE ARG=USEHOSTEXP=T EPORT=%u", + (uint32_t) now, e_port); + _xmit(out_msg); +} + +int main(int argc, char * argv[]) +{ + if (argc < 8) { + printf("Usage: %s, auth_key control_addr e_port " + "sched_port job_id1 job_id2 job_id3 job_id4\n", argv[0]); + exit(1); + } + + auth_key = argv[1]; + control_addr = argv[2]; + e_port = atoi(argv[3]); + sched_port = atoi(argv[4]); + job_id1 = atoi(argv[5]); + job_id2 = atoi(argv[6]); + job_id3 = atoi(argv[7]); + job_id4 = atoi(argv[8]); + printf("auth_key=%s control_addr=%s e_port=%d sched_port=%d\n", + auth_key, control_addr, e_port, sched_port); + printf("job_id1=%d job_id2=%d job_id3=%d job_id4=%d \n", + job_id1, job_id2, job_id3, job_id4); + + _initialize(); + _get_jobs(); + _get_nodes(); + + /* FIXME: Replace with new RPC containing multiple jobs and reserved nodes */ + _start_job(job_id1); + _start_job(job_id2); + _start_job(job_id3); + _start_job(job_id4); + _get_jobs(); + + _cancel_job(job_id1); + _cancel_job(job_id2); + _cancel_job(job_id3); + _cancel_job(job_id4); + + printf("SUCCESS\n"); + exit(0); +} + diff --git a/testsuite/expect/test9.1 b/testsuite/expect/test9.1 index 035f39b7e..b2e1f3bb4 100755 --- a/testsuite/expect/test9.1 +++ b/testsuite/expect/test9.1 @@ -12,7 +12,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -106,14 +106,16 @@ for {set inx 0} {$inx < $cycle_count} {incr inx} { } if {$stdout_lines != $stdout_target} { send_user "\nFAILURE: stdout is incomplete\n" + set cycle_count [expr $inx + 1] set exit_code 1 + break } else { incr success_cnt } } -exec $bin_rm -f $file_in $file_out if {$exit_code == 0} { + exec $bin_rm -f $file_in $file_out send_user "\nSUCCESS\n" } else { send_user "\nFAILURE: Only $success_cnt of $cycle_count" diff --git a/testsuite/expect/test9.2 b/testsuite/expect/test9.2 index a2fcb12a2..34a71af12 100755 --- a/testsuite/expect/test9.2 +++ b/testsuite/expect/test9.2 @@ -12,7 +12,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -96,14 +96,16 @@ for {set inx 0} {$inx < $cycle_count} {incr inx} { } if {$stdout_lines != $task_cnt} { send_user "\nFAILURE:stdout is incomplete\n" + set cycle_count [expr $inx + 1] set exit_code 1 + break } else { incr success_cnt } } -exec $bin_rm -f $file_out if {$exit_code == 0} { + exec $bin_rm -f $file_out send_user "\nSUCCESS\n" } else { send_user "\nFAILURE: Only $success_cnt of $cycle_count" diff --git a/testsuite/expect/test9.3 b/testsuite/expect/test9.3 index 021b82d6f..7a392520b 100755 --- a/testsuite/expect/test9.3 +++ b/testsuite/expect/test9.3 @@ -12,7 +12,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -106,17 +106,19 @@ for {set inx 0} {$inx < $cycle_count} {incr inx} { } if {$stdout_lines != $stdout_target} { send_user "\nFAILURE:stdout is incomplete\n" + set cycle_count [expr $inx + 1] set exit_code 1 + break } else { incr success_cnt } } -exec $bin_rm -f $file_in $file_out if {$exit_code == 0} { + exec $bin_rm -f $file_in $file_out send_user "\nSUCCESS\n" } else { - send_user "\nFAILURE: Only $success_cnt of $cycle_count hostname" - send_user " output tests passed\n" + send_user "\nFAILURE: Only $success_cnt of $cycle_count I/O" + send_user " tests passed\n" } exit $exit_code diff --git a/testsuite/expect/test9.4 b/testsuite/expect/test9.4 index 66c01d440..0b7d95750 100755 --- a/testsuite/expect/test9.4 +++ b/testsuite/expect/test9.4 @@ -16,7 +16,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test9.5 b/testsuite/expect/test9.5 index 8bd4c003f..fb8a46b9b 100755 --- a/testsuite/expect/test9.5 +++ b/testsuite/expect/test9.5 @@ -9,7 +9,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test9.6 b/testsuite/expect/test9.6 index 1f88f07e0..a057883f1 100755 --- a/testsuite/expect/test9.6 +++ b/testsuite/expect/test9.6 @@ -12,7 +12,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -107,14 +107,16 @@ for {set inx 0} {$inx < $cycle_count} {incr inx} { } if {$stdout_lines != $stdout_target} { send_user "\nFAILURE:stdout is missing output\n" + set cycle_count [expr $inx + 1] set exit_code 1 + break } else { incr success_cnt } } -exec $bin_rm -f $file_in $file_out if {$exit_code == 0} { + exec $bin_rm -f $file_in $file_out send_user "\nSUCCESS\n" } else { send_user "\nFAILURE: Only $success_cnt of $cycle_count hostname" diff --git a/testsuite/expect/test9.7 b/testsuite/expect/test9.7 index cf4362510..fee330413 100755 --- a/testsuite/expect/test9.7 +++ b/testsuite/expect/test9.7 @@ -10,7 +10,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test9.7.bash b/testsuite/expect/test9.7.bash index 1056eadc6..15d53995b 100755 --- a/testsuite/expect/test9.7.bash +++ b/testsuite/expect/test9.7.bash @@ -7,7 +7,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/expect/test9.8 b/testsuite/expect/test9.8 index c1f9d44e8..494e51ab4 100755 --- a/testsuite/expect/test9.8 +++ b/testsuite/expect/test9.8 @@ -13,7 +13,7 @@ # Copyright (C) 2002-2006 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. @@ -62,9 +62,9 @@ if {[test_front_end] != 0} { make_bash_script $file_in " $bin_sleep $delay for ((inx=0; inx < $task_cnt; inx++)) ; do - $srun $bin_sleep $sleep_time & + $srun -N1 -n1 $bin_sleep $sleep_time & done -$srun $bin_sleep $sleep_time +$srun -N1 -n1 $bin_sleep $sleep_time " # @@ -73,9 +73,9 @@ $srun $bin_sleep $sleep_time set start_cnt 0 set timeout $delay for {set inx 0} {$inx < $job_cnt} {incr inx} { - set srun_pid [spawn $srun --job-name=$job_name --batch --output=/dev/null --error=/dev/null -t5 $file_in] + set sbatch_pid [spawn $sbatch --job-name=$job_name --output=/dev/null --error=/dev/null -t5 $file_in] expect { - -re "jobid ($number) submitted" { + -re "Submitted batch job ($number)" { incr start_cnt exp_continue } @@ -85,7 +85,7 @@ for {set inx 0} {$inx < $job_cnt} {incr inx} { } timeout { send_user "\nFAILURE: srun not responding\n" - slow_kill $srun_pid + slow_kill $sbatch_pid exit 1 } eof { diff --git a/testsuite/expect/usleep b/testsuite/expect/usleep index 3af6bd6be..5759a4602 100755 --- a/testsuite/expect/usleep +++ b/testsuite/expect/usleep @@ -7,7 +7,7 @@ # Copyright (C) 2002 The Regents of the University of California. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # Written by Morris Jette <jette1@llnl.gov> -# UCRL-CODE-226842. +# LLNL-CODE-402394. # # This file is part of SLURM, a resource management program. # For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/slurm_unit/Makefile.in b/testsuite/slurm_unit/Makefile.in index 48c72e40c..e4e4fe3c7 100644 --- a/testsuite/slurm_unit/Makefile.in +++ b/testsuite/slurm_unit/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -41,6 +41,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -99,6 +101,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -112,10 +115,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -135,7 +141,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -146,6 +155,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -161,6 +172,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -176,6 +188,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -350,8 +363,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -376,8 +389,8 @@ TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -387,13 +400,12 @@ ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/testsuite/slurm_unit/api/Makefile.in b/testsuite/slurm_unit/api/Makefile.in index bd6610ab2..25a05ccff 100644 --- a/testsuite/slurm_unit/api/Makefile.in +++ b/testsuite/slurm_unit/api/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -43,6 +43,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -69,7 +71,7 @@ api_test_OBJECTS = api-test.$(OBJEXT) api_test_LDADD = $(LDADD) api_test_DEPENDENCIES = $(top_builddir)/src/common/libcommon.la \ $(top_builddir)/src/api/libslurm.la -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -119,6 +121,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -132,10 +135,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -155,7 +161,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -166,6 +175,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -181,6 +192,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -196,6 +208,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -412,8 +425,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -438,8 +451,8 @@ TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -449,13 +462,12 @@ ctags: CTAGS CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/testsuite/slurm_unit/api/manual/Makefile.in b/testsuite/slurm_unit/api/manual/Makefile.in index 5e77e3dca..9f721d2e5 100644 --- a/testsuite/slurm_unit/api/manual/Makefile.in +++ b/testsuite/slurm_unit/api/manual/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -45,6 +45,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -97,7 +99,7 @@ update_config_tst_SOURCES = update_config-tst.c update_config_tst_OBJECTS = update_config-tst.$(OBJEXT) update_config_tst_LDADD = $(LDADD) update_config_tst_DEPENDENCIES = $(top_builddir)/src/api/libslurm.la -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -141,6 +143,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -154,10 +157,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -177,7 +183,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -188,6 +197,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -203,6 +214,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -218,6 +230,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -389,8 +402,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -402,8 +415,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -413,13 +426,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/testsuite/slurm_unit/api/manual/cancel-tst.c b/testsuite/slurm_unit/api/manual/cancel-tst.c index 1562fe539..8fce7808d 100644 --- a/testsuite/slurm_unit/api/manual/cancel-tst.c +++ b/testsuite/slurm_unit/api/manual/cancel-tst.c @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> et.al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/slurm_unit/api/manual/complete-tst.c b/testsuite/slurm_unit/api/manual/complete-tst.c index 93f8ddaaa..06926437d 100644 --- a/testsuite/slurm_unit/api/manual/complete-tst.c +++ b/testsuite/slurm_unit/api/manual/complete-tst.c @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> et.al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/slurm_unit/api/manual/job_info-tst.c b/testsuite/slurm_unit/api/manual/job_info-tst.c index 59ee9bed4..967e42088 100644 --- a/testsuite/slurm_unit/api/manual/job_info-tst.c +++ b/testsuite/slurm_unit/api/manual/job_info-tst.c @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> et.al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/slurm_unit/api/manual/node_info-tst.c b/testsuite/slurm_unit/api/manual/node_info-tst.c index 1065673f9..9db08b49a 100644 --- a/testsuite/slurm_unit/api/manual/node_info-tst.c +++ b/testsuite/slurm_unit/api/manual/node_info-tst.c @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> et.al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/slurm_unit/api/manual/partition_info-tst.c b/testsuite/slurm_unit/api/manual/partition_info-tst.c index 4749ac35b..c75d98a26 100644 --- a/testsuite/slurm_unit/api/manual/partition_info-tst.c +++ b/testsuite/slurm_unit/api/manual/partition_info-tst.c @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> et.al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/slurm_unit/api/manual/reconfigure-tst.c b/testsuite/slurm_unit/api/manual/reconfigure-tst.c index f8c1c2225..2064b77fd 100644 --- a/testsuite/slurm_unit/api/manual/reconfigure-tst.c +++ b/testsuite/slurm_unit/api/manual/reconfigure-tst.c @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> et.al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/slurm_unit/api/manual/submit-tst.c b/testsuite/slurm_unit/api/manual/submit-tst.c index c1079e327..f4b54bb32 100644 --- a/testsuite/slurm_unit/api/manual/submit-tst.c +++ b/testsuite/slurm_unit/api/manual/submit-tst.c @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> et.al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/slurm_unit/api/manual/update_config-tst.c b/testsuite/slurm_unit/api/manual/update_config-tst.c index 2fa8d5355..637622a8f 100644 --- a/testsuite/slurm_unit/api/manual/update_config-tst.c +++ b/testsuite/slurm_unit/api/manual/update_config-tst.c @@ -4,7 +4,7 @@ * Copyright (C) 2002 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Morris Jette <jette1@llnl.gov> et.al. - * UCRL-CODE-226842. + * LLNL-CODE-402394. * * This file is part of SLURM, a resource management program. * For details, see <http://www.llnl.gov/linux/slurm/>. diff --git a/testsuite/slurm_unit/common/Makefile.in b/testsuite/slurm_unit/common/Makefile.in index 6dd0bbce7..b56d8fd59 100644 --- a/testsuite/slurm_unit/common/Makefile.in +++ b/testsuite/slurm_unit/common/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -43,6 +43,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -87,7 +89,7 @@ runqsw_OBJECTS = runqsw.$(OBJEXT) runqsw_LDADD = $(LDADD) runqsw_DEPENDENCIES = $(top_builddir)/src/common/libcommon.la \ $(am__DEPENDENCIES_1) -DEFAULT_INCLUDES = -I. -I$(top_builddir) -I$(top_builddir)/slurm@am__isrc@ +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp am__depfiles_maybe = depfiles COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ @@ -127,6 +129,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -140,10 +143,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -163,7 +169,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -174,6 +183,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -189,6 +200,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -204,6 +216,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ @@ -365,8 +378,8 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ mkid -fID $$unique tags: TAGS @@ -378,8 +391,8 @@ TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \ test -n "$$unique" || unique=$$empty_fix; \ $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ @@ -389,13 +402,12 @@ ctags: CTAGS CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ $(TAGS_FILES) $(LISP) tags=; \ - here=`pwd`; \ list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ unique=`for i in $$list; do \ if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ done | \ - $(AWK) ' { files[$$0] = 1; } \ - END { for (i in files) print i; }'`; \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ test -z "$(CTAGS_ARGS)$$tags$$unique" \ || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ $$tags $$unique diff --git a/testsuite/slurm_unit/common/pack-test.c b/testsuite/slurm_unit/common/pack-test.c index e883df4cc..bf1b7c0e2 100644 --- a/testsuite/slurm_unit/common/pack-test.c +++ b/testsuite/slurm_unit/common/pack-test.c @@ -1,4 +1,4 @@ -/* $Id: pack-test.c 1528 2002-10-31 15:56:56Z grondo $ */ +/* $Id: pack-test.c 13654 2008-03-18 23:00:20Z jette $ */ #if HAVE_CONFIG_H # include <config.h> @@ -32,8 +32,8 @@ int main (int argc, char *argv[]) { Buf buffer; - uint16_t test16 = 1234, out16, byte_cnt; - uint32_t test32 = 5678, out32; + uint16_t test16 = 1234, out16; + uint32_t test32 = 5678, out32, byte_cnt; char testbytes[] = "TEST BYTES", *outbytes; char teststring[] = "TEST STRING", *outstring = NULL; char *nullstr = NULL; diff --git a/testsuite/slurm_unit/slurmctld/Makefile.in b/testsuite/slurm_unit/slurmctld/Makefile.in index 83f241777..aae4a7566 100644 --- a/testsuite/slurm_unit/slurmctld/Makefile.in +++ b/testsuite/slurm_unit/slurmctld/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -43,6 +43,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -90,6 +92,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -103,10 +106,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -126,7 +132,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -137,6 +146,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -152,6 +163,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -167,6 +179,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ diff --git a/testsuite/slurm_unit/slurmctld/security_2_1.py b/testsuite/slurm_unit/slurmctld/security_2_1.py index 6a2b573f4..ee82500a5 100755 --- a/testsuite/slurm_unit/slurmctld/security_2_1.py +++ b/testsuite/slurm_unit/slurmctld/security_2_1.py @@ -54,6 +54,8 @@ def main(argv=None): # print print "NOTE: slurm_epilog and slurm_prolog only exist on BlueGene systems" + print "NOTE: federation.conf only exists on AIX systems" + print "NOTE: sview, slurmdbd and slurmdbd.conf exists only on selected systems" print "NOTE: JobCredentialPrivateKey, SlurmctldLogFile, and StateSaveLocation only on control host" print "NOTE: SlurmdLogFile and SlurmdSpoolDir only exist on compute servers" print @@ -63,9 +65,12 @@ def main(argv=None): files.append(options.conf) files.append(options.sysconfdir+'/bluegene.conf') files.append(options.sysconfdir+'/federation.conf') + files.append(options.sysconfdir+'/slurm.conf') + files.append(options.sysconfdir+'/slurmdbd.conf') files.append(options.sysconfdir+'/wiki.conf') - files.append(options.prefix+'/bin/srun') + files.append(options.prefix+'/bin/mpiexec') files.append(options.prefix+'/bin/sacct') + files.append(options.prefix+'/bin/sacctmgr') files.append(options.prefix+'/bin/salloc') files.append(options.prefix+'/bin/sattach') files.append(options.prefix+'/bin/sbatch') @@ -73,13 +78,14 @@ def main(argv=None): files.append(options.prefix+'/bin/scancel') files.append(options.prefix+'/bin/scontrol') files.append(options.prefix+'/bin/sinfo') - files.append(options.prefix+'/bin/slaunch') files.append(options.prefix+'/bin/smap') files.append(options.prefix+'/bin/squeue') + files.append(options.prefix+'/bin/srun') files.append(options.prefix+'/bin/strigger') files.append(options.prefix+'/bin/sview') files.append(options.prefix+'/sbin/slurmctld') files.append(options.prefix+'/sbin/slurmd') + files.append(options.prefix+'/sbin/slurmdbd') files.append(options.prefix+'/sbin/slurmstepd') files.append(options.prefix+'/sbin/slurm_epilog') files.append(options.prefix+'/sbin/slurm_prolog') @@ -108,6 +114,7 @@ def main(argv=None): print "Ensuring the following are not world readable:" files = [] append_file(files, confpairs, 'JobCredentialPrivateKey') + files.append(options.sysconfdir+'/slurmdbd.conf') files.append(options.sysconfdir+'/wiki.conf') for fname in files: diff --git a/testsuite/slurm_unit/slurmctld/security_2_2a.sh b/testsuite/slurm_unit/slurmctld/security_2_2a.sh new file mode 100755 index 000000000..946aadc46 --- /dev/null +++ b/testsuite/slurm_unit/slurmctld/security_2_2a.sh @@ -0,0 +1,10 @@ +#!/bin/sh + +echo "#!/bin/sh" >tmp2.$$ +echo "id" >>tmp2.$$ + +# Run srun using this config file +${slurm_bin}sbatch tmp2.$$ + +# Clean up +rm -f tmp2.$$ diff --git a/testsuite/slurm_unit/slurmctld/security_2_2.sh b/testsuite/slurm_unit/slurmctld/security_2_2b.sh similarity index 79% rename from testsuite/slurm_unit/slurmctld/security_2_2.sh rename to testsuite/slurm_unit/slurmctld/security_2_2b.sh index 02e8a44b3..cf429a869 100755 --- a/testsuite/slurm_unit/slurmctld/security_2_2.sh +++ b/testsuite/slurm_unit/slurmctld/security_2_2b.sh @@ -8,9 +8,12 @@ file_orig=`${slurm_bin}scontrol show config | awk '{ if ( $1 ~ /SLURM_CONFIG_FIL grep -iv AuthType <$file_orig >tmp.$$ echo "AuthType=auth/dummy" >>tmp.$$ +echo "#!/bin/sh" >tmp2.$$ +echo "id" >>tmp2.$$ + # Run srun using this config file export SLURM_CONF=tmp.$$ -${slurm_bin}srun /usr/bin/id +${slurm_bin}sbatch tmp2.$$ # Clean up -rm tmp.$$ +rm -f tmp.$$ tmp2.$$ diff --git a/testsuite/slurm_unit/slurmd/Makefile.in b/testsuite/slurm_unit/slurmd/Makefile.in index 2be30e7ad..c78fa4e54 100644 --- a/testsuite/slurm_unit/slurmd/Makefile.in +++ b/testsuite/slurm_unit/slurmd/Makefile.in @@ -1,8 +1,8 @@ -# Makefile.in generated by automake 1.10 from Makefile.am. +# Makefile.in generated by automake 1.10.1 from Makefile.am. # @configure_input@ # Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, -# 2003, 2004, 2005, 2006 Free Software Foundation, Inc. +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This Makefile.in is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. @@ -43,6 +43,8 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \ $(top_srcdir)/auxdir/x_ac_affinity.m4 \ $(top_srcdir)/auxdir/x_ac_aix.m4 \ $(top_srcdir)/auxdir/x_ac_bluegene.m4 \ + $(top_srcdir)/auxdir/x_ac_cflags.m4 \ + $(top_srcdir)/auxdir/x_ac_databases.m4 \ $(top_srcdir)/auxdir/x_ac_debug.m4 \ $(top_srcdir)/auxdir/x_ac_elan.m4 \ $(top_srcdir)/auxdir/x_ac_federation.m4 \ @@ -90,6 +92,7 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = @DEFS@ DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ ECHO = @ECHO@ ECHO_C = @ECHO_C@ ECHO_N = @ECHO_N@ @@ -103,10 +106,13 @@ FFLAGS = @FFLAGS@ GREP = @GREP@ GTK2_CFLAGS = @GTK2_CFLAGS@ GTK2_LIBS = @GTK2_LIBS@ +HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@ +HAVEPGCONFIG = @HAVEPGCONFIG@ HAVEPKGCONFIG = @HAVEPKGCONFIG@ HAVE_AIX = @HAVE_AIX@ HAVE_ELAN = @HAVE_ELAN@ HAVE_FEDERATION = @HAVE_FEDERATION@ +HAVE_OPENSSL = @HAVE_OPENSSL@ HAVE_SOME_CURSES = @HAVE_SOME_CURSES@ INSTALL = @INSTALL@ INSTALL_DATA = @INSTALL_DATA@ @@ -126,7 +132,10 @@ MKDIR_P = @MKDIR_P@ MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@ MUNGE_LDFLAGS = @MUNGE_LDFLAGS@ MUNGE_LIBS = @MUNGE_LIBS@ +MYSQL_CFLAGS = @MYSQL_CFLAGS@ +MYSQL_LIBS = @MYSQL_LIBS@ NCURSES = @NCURSES@ +NMEDIT = @NMEDIT@ NUMA_LIBS = @NUMA_LIBS@ OBJEXT = @OBJEXT@ PACKAGE = @PACKAGE@ @@ -137,6 +146,8 @@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PAM_LIBS = @PAM_LIBS@ PATH_SEPARATOR = @PATH_SEPARATOR@ +PGSQL_CFLAGS = @PGSQL_CFLAGS@ +PGSQL_LIBS = @PGSQL_LIBS@ PLPA_LIBS = @PLPA_LIBS@ PROCTRACKDIR = @PROCTRACKDIR@ PROJECT = @PROJECT@ @@ -152,6 +163,7 @@ SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@ SET_MAKE = @SET_MAKE@ SHELL = @SHELL@ SLURMCTLD_PORT = @SLURMCTLD_PORT@ +SLURMDBD_PORT = @SLURMDBD_PORT@ SLURMD_PORT = @SLURMD_PORT@ SLURM_API_AGE = @SLURM_API_AGE@ SLURM_API_CURRENT = @SLURM_API_CURRENT@ @@ -167,6 +179,7 @@ SSL_CPPFLAGS = @SSL_CPPFLAGS@ SSL_LDFLAGS = @SSL_LDFLAGS@ SSL_LIBS = @SSL_LIBS@ STRIP = @STRIP@ +UTIL_LIBS = @UTIL_LIBS@ VERSION = @VERSION@ abs_builddir = @abs_builddir@ abs_srcdir = @abs_srcdir@ -- GitLab